id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
11306609
|
<reponame>bchopson/blazeutils<gh_stars>0
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
import six
from blazeutils.spreadsheets import workbook_to_reader, XlwtHelper, http_headers, xlsx_to_reader
from blazeutils.testing import emits_deprecation
class TestWorkbookToReader(object):
@pytest.mark.skipif(not six.PY2, reason="xlwt only works on Python 2")
def test_xlwt_to_reader(self):
import xlwt
write_wb = xlwt.Workbook()
ws = write_wb.add_sheet('Foo')
ws.write(0, 0, 'bar')
wb = workbook_to_reader(write_wb)
sh = wb.sheet_by_name('Foo')
assert sh.cell_value(rowx=0, colx=0) == 'bar'
class TestXlsxToReader(object):
def test_xlsx_to_reader(self):
import xlsxwriter
write_wb = xlsxwriter.Workbook()
ws = write_wb.add_worksheet('Foo')
ws.write(0, 0, 'bar')
wb = xlsx_to_reader(write_wb)
sh = wb.sheet_by_name('Foo')
assert sh.cell_value(rowx=0, colx=0) == 'bar'
class TestWriter(object):
@emits_deprecation('XlwtHelper has been renamed to Writer')
@pytest.mark.skipif(not six.PY2, reason="xlwt only works on Python 2")
def test_xlwt_helper_deprecation(self):
XlwtHelper()
class TestHttpHeaders(object):
def test_xls_filename(self):
expect = {
'Content-Type': 'application/vnd.ms-excel',
'Content-Disposition': 'attachment; filename=foo.xls'
}
assert http_headers('foo.xls', randomize=False), expect
def test_xlsx_filename(self):
expect = {
'Content-Type': 'application/vnd.openxmlformats-officedocument'
'.spreadsheetml.sheet',
'Content-Disposition': 'attachment; filename=foo.xlsx'
}
assert http_headers('foo.xlsx', randomize=False) == expect
def test_randomize(self):
content_dispo = http_headers('foo.xlsx')['Content-Disposition']
_, filename = content_dispo.split('=')
intpart = filename.replace('foo-', '').replace('.xlsx', '')
assert int(intpart) >= 1000000
|
StarcoderdataPython
|
1627437
|
<reponame>wyf2017/WSMCnet<filename>main.py
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import torch
import logging
logging.basicConfig(level=logging.INFO, format=' %(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def get_setting():
import argparse
parser = argparse.ArgumentParser(description='Supervised Stereo Matching by pytorch')
parser.add_argument('--mode', default='train',
help='mode of execute [train/finetune/val/submission')
# arguments of datasets
parser.add_argument('--datas_train', default='k2015-tr, k2012-tr',
help='datasets for training')
parser.add_argument('--datas_val', default='k2015-val, k2012-val',
help='datasets for validation')
parser.add_argument('--dir_datas_train', default='/media/qjc/D/data/kitti/',
help='dirpath of datasets for training')
parser.add_argument('--dir_datas_val', default='/media/qjc/D/data/kitti/',
help='dirpath of datasets for validation')
parser.add_argument('--bn', type=int, default=1,
help='batch size')
parser.add_argument('--crop_width', type=int, default=768,
help='width of crop_size')
parser.add_argument('--crop_height', type=int, default=384,
help='height of crop_size')
# arguments of model
parser.add_argument('--arch', default='WSMCnet',
help='select arch of model')
parser.add_argument('--maxdisp', type=int ,default=192,
help='maxium disparity')
parser.add_argument('--loadmodel', default=None,
help='path of pretrained weight')
# arguments of optimizer
parser.add_argument('--lr', type=float, default=0.001,
help='learnig rate')
parser.add_argument('--lr_epoch0', type=int, default=10,
help='learnig rate')
parser.add_argument('--lr_stride', type=int, default=10,
help='learnig rate')
parser.add_argument('--lr_delay', type=float, default=0.1,
help='learnig rate')
parser.add_argument('--beta1', type=float, default=0.9,
help='learnig rate')
parser.add_argument('--beta2', type=float, default=0.999,
help='learnig rate')
parser.add_argument('--freq_optim', type=int, default=1,
help='frequent of optimize weight')
# arguments for training
parser.add_argument('--epochs', type=int, default=10,
help='number of epochs to train')
parser.add_argument('--nloop', type=int, default=1,
help='loop of dataset in a epoch')
parser.add_argument('--freq_print', type=int, default=20,
help='frequent of print infomation')
# other arguments
parser.add_argument('--dir_save', default='./trained/',
help='dirpath of save result( weight/submission )')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# parser arguments
args = parser.parse_args()
# add arguments
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.beta = (args.beta1, args.beta2)
args.crop_size = (args.crop_width, args.crop_height)
# log arguments
items = args.__dict__.items()
items.sort()
msg = 'The setted arguments as follow: \n'
msg += '\n'.join([' [%s]: %s' % (k, str(v)) for k, v in items])
logger.info(msg)
return args
# program entry
if __name__ == '__main__':
# get setting
args = get_setting()
# set gpu id used
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
# set manual seed
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# excute stereo program
import stereo
if(args.mode.lower() in ['train', 'finetune']):
stereo.train_val(args)
elif(args.mode.lower() in ['val', 'validation']):
stereo.val(args)
elif(args.mode.lower() in ['sub', 'submission']):
stereo.submission(args)
else:
logger.error('not support mode[ %s ]' % args.mode)
|
StarcoderdataPython
|
8183716
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-13 06:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Summoner_V3',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_icon_id', models.IntegerField(default=0)),
('name', models.CharField(max_length=40)),
('summoner_level', models.IntegerField(default=0)),
('revision_date', models.IntegerField(default=0)),
('summoner_id', models.IntegerField(default=0)),
('account_id', models.IntegerField(default=0)),
],
),
]
|
StarcoderdataPython
|
4828831
|
<reponame>ericmbernier/ericbernier-blog-posts<filename>flask_rest_api/tests/integration/test_teams_resource.py
from football_api.resources.teams_resource import TEAMS_ENDPOINT
def test_teams_post(client):
new_team_json = {"name": "<NAME>", "abbreviation": "HOU"}
response = client.post(TEAMS_ENDPOINT, json=new_team_json)
assert response.status_code == 201
def test_teams_post_error(client):
duplicate_team_json = {"name": "<NAME>", "abbreviation": "SEA"}
response = client.post(TEAMS_ENDPOINT, json=duplicate_team_json)
assert response.status_code == 500
def test_get_all_teams(client):
response = client.get(TEAMS_ENDPOINT)
assert response.status_code == 200
assert len(response.json) > 1
def test_get_single_team(client):
response = client.get(f"{TEAMS_ENDPOINT}/1")
assert response.status_code == 200
assert response.json["abbreviation"] == "CAR"
def test_get_team_not_found(client):
response = client.get(f"{TEAMS_ENDPOINT}/99")
assert response.status_code == 404
|
StarcoderdataPython
|
8108030
|
import requests
def boxscoretraditionalv2(GameID,StartPeriod,EndPeriod,StartRange,EndRange,RangeType):
url = "https://stats.nba.com/stats/boxscoretraditionalv2/"
querystring = {"GameID":GameID,
"StartPeriod":StartPeriod,
"EndPeriod":EndPeriod,
"StartRange":StartRange,
"EndRange":EndRange,
"RangeType":RangeType
}
headers = {
'User-Agent': "Postman/7.13.0",
}
response = requests.request("GET", url, headers=headers, params=querystring)
return response.json()
|
StarcoderdataPython
|
4818904
|
<reponame>aws-samples/medical-text-sentence-relevance-bert
#download data needed for the sentence embeddings
import nltk
from sentence_transformers import models
nltk.download('punkt')
nltk.download('stopwords')
from sentence_transformers import SentenceTransformer
#SentenceTransformer('emilyalsentzer/Bio_ClinicalBERT')
models.Transformer('emilyalsentzer/Bio_ClinicalBERT')
|
StarcoderdataPython
|
8161012
|
<reponame>netvigator/auctions
# Generated by Django 2.2.10 on 2020-05-03 23:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('keepers', '0032_auto_20200322_0505'),
]
operations = [
migrations.AlterField(
model_name='keeper',
name='cLocation',
field=models.CharField(max_length=58, verbose_name='location'),
),
]
|
StarcoderdataPython
|
4985858
|
<reponame>mikiereed/fantaSheets
"""
Django settings for fantaSheets project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from django.contrib.messages import constants as messages
from pathlib import Path
import django_heroku
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
# BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(
os.path.join(__file__, os.pardir))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# stored in environment variable
SECRET_KEY = os.getenv('SECRET_KEY', 'CHANGE ME!')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'account.apps.AccountConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'base_pages.apps.BasePagesConfig',
'football.apps.FootballConfig',
'import_export',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fantaSheets.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fantaSheets.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'fantaSheets/static')
]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# Media Folder Settings
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Activate Django-Heroku
django_heroku.settings(locals())
# Authentication
LOGIN_REDIRECT_URL = 'dashboard'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'account.authentication.EmailAuthBackend',
]
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'fantasheetsdb',
'USER': 'postgres',
'PASSWORD': 'password',
'HOST': 'localhost',
}
}
# Messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# django-import-export
IMPORT_EXPORT_USE_TRANSACTIONS = True
|
StarcoderdataPython
|
216553
|
from .imprint_view import ImprintView
from .imprint_revision_view import ImprintRevisionView
from .imprint_sbs_view import ImprintSideBySideView
from .imprint_actions import (
archive_imprint,
restore_imprint,
delete_imprint,
expand_imprint_translation_id,
)
|
StarcoderdataPython
|
5136272
|
<reponame>liminspace/dju-image<gh_stars>1-10
import os
import cStringIO
from PIL import Image
from django.conf import settings
from django.core.files.uploadedfile import UploadedFile
from django.test import TestCase
from dju_image import settings as dju_settings
from dju_image.image import (adjust_image, image_get_format, is_image, optimize_png_file,
set_uploaded_file_content_type_and_file_ext)
from tests.tests.tools import (create_test_image, get_img_file, save_img_file,
safe_change_dju_settings, CleanTmpDirMixin)
class ImageCase(TestCase):
def assertImageSizeIs(self, f_img, size, msg=None):
f_img.seek(0)
img = Image.open(f_img)
if img.size != size:
raise self.failureException(msg)
def assertImageFormat(self, f_img, img_format, msg=None):
f_img.seek(0)
img = Image.open(f_img)
if img.format.lower() != img_format.lower():
raise self.failureException(msg)
class TestAdjustImage(ImageCase):
def setUp(self):
self.img_200x200 = create_test_image(200, 200)
self.img_300x300 = create_test_image(300, 300)
self.img_400x200 = create_test_image(400, 200)
self.img_200x400 = create_test_image(200, 400)
def make_files_for_images(self):
self.f_200x200_jpeg = get_img_file(self.img_200x200)
self.f_300x300_jpeg = get_img_file(self.img_300x300)
self.f_400x200_jpeg = get_img_file(self.img_400x200)
self.f_200x400_jpeg = get_img_file(self.img_200x400)
self.f_200x200_png = get_img_file(self.img_200x200, img_format='PNG')
self.f_300x300_png = get_img_file(self.img_300x300, img_format='PNG')
self.f_400x200_png = get_img_file(self.img_400x200, img_format='PNG')
self.f_200x400_png = get_img_file(self.img_200x400, img_format='PNG')
def test_size_not_fill_not_stretch(self):
self.make_files_for_images()
self.assertFalse(adjust_image(self.f_200x200_png, (200, 200)))
self.assertFalse(adjust_image(self.f_300x300_png, (300, 400)))
self.assertTrue(adjust_image(self.f_400x200_jpeg, (200, 200)))
self.assertImageSizeIs(self.f_400x200_jpeg, (200, 100))
self.assertTrue(adjust_image(self.f_200x400_jpeg, (200, 200)))
self.assertImageSizeIs(self.f_200x400_jpeg, (100, 200))
def test_size_not_fill_stretch(self):
self.make_files_for_images()
self.assertTrue(adjust_image(self.f_200x200_jpeg, (200, 200), stretch=True))
self.assertFalse(adjust_image(self.f_300x300_jpeg, (300, 400), stretch=True, force_jpeg_save=False))
self.assertTrue(adjust_image(self.f_400x200_jpeg, (500, 500), stretch=True))
self.assertImageSizeIs(self.f_400x200_jpeg, (500, 250))
def test_size_fill_not_stretch(self):
self.make_files_for_images()
self.assertFalse(adjust_image(self.f_200x200_png, (200, 200), fill=True))
self.assertTrue(adjust_image(self.f_400x200_jpeg, (100, 100), fill=True))
self.assertImageSizeIs(self.f_400x200_jpeg, (100, 100))
self.assertTrue(adjust_image(self.f_200x400_jpeg, (400, 500), fill=True))
self.assertImageSizeIs(self.f_200x400_jpeg, (200, 250))
self.assertTrue(adjust_image(self.f_300x300_jpeg, (150, 100), fill=True))
self.assertImageSizeIs(self.f_300x300_jpeg, (150, 100))
def test_size_fill_stretch(self):
self.make_files_for_images()
self.assertFalse(adjust_image(self.f_200x200_png, (200, 200), fill=True, stretch=True))
self.assertTrue(adjust_image(self.f_300x300_jpeg, (400, 350), fill=True, stretch=True))
self.assertImageSizeIs(self.f_300x300_jpeg, (400, 350))
def test_format(self):
self.make_files_for_images()
self.assertTrue(adjust_image(self.f_200x200_jpeg, (200, 200), new_format='PNG'))
self.assertImageFormat(self.f_200x200_jpeg, 'PNG')
def test_autosize(self):
self.make_files_for_images()
self.assertTrue(adjust_image(self.f_200x200_jpeg, (150, None), fill=True, stretch=True))
self.assertImageSizeIs(self.f_200x200_jpeg, (150, 150))
self.assertTrue(adjust_image(self.f_200x400_jpeg, (None, 300), fill=True, stretch=True))
self.assertImageSizeIs(self.f_200x400_jpeg, (150, 300))
def test_uploaded_file(self):
self.make_files_for_images()
uf = UploadedFile(file=self.f_200x200_jpeg, name='200x200.jpeg', content_type='image/jpeg',
size=len(self.f_200x200_jpeg.getvalue()))
self.assertTrue(adjust_image(uf, (120, 120), new_format='PNG'))
self.assertImageSizeIs(uf.file, (120, 120))
self.assertEqual(uf.content_type, 'image/png')
def test_new_image(self):
self.make_files_for_images()
self.assertIsInstance(adjust_image(self.f_200x200_jpeg, return_new_image=True),
(cStringIO.InputType, cStringIO.OutputType))
def test_cmyk_to_rgb(self):
def tests():
img_200x200_cmyk = create_test_image(200, 200, c='CMYK')
f_200x200_jpeg_cmyk = get_img_file(img_200x200_cmyk)
t = adjust_image(f_200x200_jpeg_cmyk, return_new_image=True)
self.assertIsInstance(t, (cStringIO.InputType, cStringIO.OutputType))
self.assertEqual(Image.open(t).mode, 'RGB')
img_200x200_p = create_test_image(200, 200, c='P')
f_200x200_png_p = get_img_file(img_200x200_p, img_format='PNG')
t = adjust_image(f_200x200_png_p, new_format='JPEG', return_new_image=True)
self.assertIsInstance(t, (cStringIO.InputType, cStringIO.OutputType))
self.assertEqual(Image.open(t).mode, 'RGB')
with safe_change_dju_settings():
dju_settings.DJU_IMG_CONVERT_JPEG_TO_RGB = True
tests()
with safe_change_dju_settings():
dju_settings.DJU_IMG_CONVERT_JPEG_TO_RGB = False
tests()
def test_adjust_image_invalid_new_format(self):
self.make_files_for_images()
with self.assertRaises(RuntimeError):
adjust_image(adjust_image(self.f_200x200_jpeg, new_format='test'))
def test_set_uploaded_file_content_type_and_file_ext_error(self):
with self.assertRaises(RuntimeError):
set_uploaded_file_content_type_and_file_ext(cStringIO.StringIO(), img_format='test')
class TestImageGetFormat(TestCase):
def setUp(self):
self.img_jpeg = get_img_file(create_test_image(100, 100))
self.img_png = get_img_file(create_test_image(100, 100), img_format='PNG')
self.img_gif = get_img_file(create_test_image(100, 100), img_format='GIF')
def test_format(self):
self.assertEqual(image_get_format(self.img_jpeg), 'jpeg')
self.assertEqual(image_get_format(self.img_png), 'png')
self.assertEqual(image_get_format(self.img_gif), 'gif')
def test_bad_format(self):
self.assertIsNone(image_get_format(cStringIO.StringIO('x' * 1000)))
class TestIsImage(TestCase):
def setUp(self):
self.img_jpeg = get_img_file(create_test_image(100, 100))
self.img_png = get_img_file(create_test_image(100, 100), img_format='PNG')
self.img_gif = get_img_file(create_test_image(100, 100), img_format='GIF')
def test_check(self):
self.assertTrue(is_image(self.img_jpeg, ('JpEg', 'PnG', 'GIF')))
self.assertFalse(is_image(self.img_jpeg, ('PnG', 'GIF')))
uf = UploadedFile(file=self.img_jpeg, name='test.jpeg', content_type='empty',
size=len(self.img_jpeg.getvalue()))
self.assertTrue(is_image(uf, ('jpeg',), set_content_type=False))
self.assertEqual(uf.content_type, 'empty')
is_image(uf, ('jpeg',))
self.assertEqual(uf.content_type, 'image/jpeg')
class OptimizePNGFile(ImageCase, CleanTmpDirMixin):
def setUp(self):
super(OptimizePNGFile, self).setUp()
self._clean_tmp_dir()
self.png1 = create_test_image(200, 300)
self.png1_fn = save_img_file('test1.png', self.png1, img_format='PNG')
self.png1_f = get_img_file(self.png1, img_format='PNG')
def tearDown(self):
super(OptimizePNGFile, self).tearDown()
self._clean_tmp_dir()
def test_pass_path_to_files(self):
o_fn = os.path.join(settings.TMP_DIR, 'test1_result.png')
self.assertTrue(optimize_png_file(self.png1_fn, o_fn))
self.assertTrue(os.path.exists(o_fn))
self.assertImageFormat(open(o_fn, 'rb'), 'PNG')
self.assertTrue(os.path.getsize(self.png1_fn) > (os.path.getsize(o_fn) * 0.2))
def test_pass_path_to_files_without_o(self):
original_size = os.path.getsize(self.png1_fn)
self.assertTrue(optimize_png_file(self.png1_fn))
self.assertTrue(os.path.exists(self.png1_fn))
self.assertImageFormat(open(self.png1_fn, 'rb'), 'PNG')
self.assertTrue(original_size > (os.path.getsize(self.png1_fn) * 0.2))
def test_pass_rw_object(self):
o = cStringIO.StringIO()
self.assertTrue(optimize_png_file(self.png1_f, o))
self.assertImageFormat(o, 'PNG')
self.png1_f.seek(0, os.SEEK_END)
o.seek(0, os.SEEK_END)
self.assertTrue(self.png1_f.tell() > (o.tell() * 0.2))
def test_pass_rw_object_withou_o(self):
self.png1_f.seek(0, os.SEEK_END)
original_size = self.png1_f.tell()
self.assertTrue(optimize_png_file(self.png1_f))
self.assertImageFormat(self.png1_f, 'PNG')
self.png1_f.seek(0, os.SEEK_END)
self.assertTrue(original_size > (self.png1_f.tell() * 0.2))
|
StarcoderdataPython
|
5162822
|
import unittest
from datetime import datetime
from parameterized import parameterized
import pytz
import lusid
import lusid.models as models
from utilities import InstrumentLoader
from utilities import TestDataUtilities
class Valuation(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Create a configured API client
api_client = TestDataUtilities.api_client()
# Setup required LUSID APIs
cls.transaction_portfolios_api = lusid.TransactionPortfoliosApi(api_client)
cls.portfolios_api = lusid.PortfoliosApi(api_client)
cls.instruments_api = lusid.InstrumentsApi(api_client)
cls.aggregation_api = lusid.AggregationApi(api_client)
cls.quotes_api = lusid.QuotesApi(api_client)
cls.recipes_api = lusid.ConfigurationRecipeApi(api_client)
instrument_loader = InstrumentLoader(cls.instruments_api)
cls.instrument_ids = instrument_loader.load_instruments()
# Setup test data from utilities
cls.test_data_utilities = TestDataUtilities(cls.transaction_portfolios_api)
# Set test parameters
cls.effective_date = datetime(2019, 4, 15, tzinfo=pytz.utc)
cls.portfolio_code = cls.test_data_utilities.create_transaction_portfolio(
TestDataUtilities.tutorials_scope
)
# Setup test portfolio
cls.setup_portfolio(cls.effective_date, cls.portfolio_code)
@classmethod
def tearDownClass(cls):
# Delete portfolio once tests are concluded
cls.portfolios_api.delete_portfolio(
TestDataUtilities.tutorials_scope,
cls.portfolio_code
)
@classmethod
def setup_portfolio(cls, effective_date, portfolio_code) -> None:
"""
Sets up instrument, quotes and portfolio data from TestDataUtilities
:param datetime effective_date: The portfolio creation date
:param str portfolio_code: The code of the the test portfolio
:return: None
"""
transactions = [
cls.test_data_utilities.build_transaction_request(
instrument_id=cls.instrument_ids[0],
units=100,
price=101,
currency="GBP",
trade_date=effective_date,
transaction_type="StockIn",
),
cls.test_data_utilities.build_transaction_request(
instrument_id=cls.instrument_ids[1],
units=100,
price=102,
currency="GBP",
trade_date=effective_date,
transaction_type="StockIn",
),
cls.test_data_utilities.build_transaction_request(
instrument_id=cls.instrument_ids[2],
units=100,
price=103,
currency="GBP",
trade_date=effective_date,
transaction_type="StockIn",
),
]
cls.transaction_portfolios_api.upsert_transactions(
scope=TestDataUtilities.tutorials_scope,
code=portfolio_code,
transaction_request=transactions,
)
prices = [
(cls.instrument_ids[0], 100),
(cls.instrument_ids[1], 200),
(cls.instrument_ids[2], 300),
]
requests = [
models.UpsertQuoteRequest(
quote_id=models.QuoteId(
models.QuoteSeriesId(
provider="Lusid",
instrument_id=price[0],
instrument_id_type="LusidInstrumentId",
quote_type="Price",
field="mid",
),
effective_at=effective_date,
),
metric_value=models.MetricValue(value=price[1], unit="GBP"),
)
for price in prices
]
cls.quotes_api.upsert_quotes(
TestDataUtilities.tutorials_scope,
request_body={
"quote" + str(request_number): requests[request_number]
for request_number in range(len(requests))
},
)
def create_configuration_recipe(
self, recipe_scope, recipe_code
) -> lusid.models.ConfigurationRecipe:
"""
Creates a configuration recipe that can be used inline or upserted
:param str recipe_scope: The scope for the configuration recipe
:param str recipe_code: The code of the the configuration recipe
:return: ConfigurationRecipe
"""
return models.ConfigurationRecipe(
scope=recipe_scope,
code=recipe_code,
market=models.MarketContext(
market_rules=[],
suppliers=models.MarketContextSuppliers(equity="Lusid"),
options=models.MarketOptions(
default_supplier="Lusid",
default_instrument_code_type="LusidInstrumentId",
default_scope=TestDataUtilities.tutorials_scope,
),
),
)
def upsert_recipe_request(self, configuration_recipe) -> None:
"""
Structures a recipe request and upserts it into LUSID
:param ConfigurationRecipe configuration_recipe: Recipe configuration
:return: None
"""
upsert_recipe_request = models.UpsertRecipeRequest(configuration_recipe)
self.recipes_api.upsert_configuration_recipe(upsert_recipe_request)
@parameterized.expand(
[
[
"Test valuation with an aggregation request using an already upserted recipe",
None,
"TestRecipes",
"SimpleQuotes",
],
]
)
def test_aggregation(self, _, in_line_recipe, recipe_scope, recipe_code) -> None:
"""
General valuation/aggregation test
"""
# create recipe (provides model parameters, locations to use in resolving market data etc.
# and push it into LUSID. Only needs to happen once each time when updated, or first time run to create.
recipe = self.create_configuration_recipe(recipe_scope, recipe_code)
self.upsert_recipe_request(recipe)
# Set valuation result key
valuation_key = "Sum(Holding/default/PV)"
# create valuation request
valuation_request = models.ValuationRequest(
recipe_id=models.ResourceId(scope=recipe_scope, code=recipe_code),
metrics=[
models.AggregateSpec("Instrument/default/Name", "Value"),
models.AggregateSpec("Holding/default/PV", "Proportion"),
models.AggregateSpec("Holding/default/PV", "Sum"),
],
group_by=["Instrument/default/Name"],
valuation_schedule=models.ValuationSchedule(effective_at=self.effective_date),
portfolio_entity_ids=[
models.PortfolioEntityId(
scope=TestDataUtilities.tutorials_scope,
code=self.portfolio_code)
]
)
# Complete aggregation
aggregation = self.aggregation_api.get_valuation(
valuation_request=valuation_request
)
# Asserts
self.assertEqual(len(aggregation.data), 3)
self.assertEqual(aggregation.data[0][valuation_key], 10000)
self.assertEqual(aggregation.data[1][valuation_key], 20000)
self.assertEqual(aggregation.data[2][valuation_key], 30000)
|
StarcoderdataPython
|
11322164
|
<filename>combat_tracker.py
class Tracker:
def __init__(self, hp, ar, sk):
self.max_health = int(hp)
self.max_armor = int(ar)
self.max_soak = int(sk)
self.current_health = self.max_health
self.current_armor = self.max_armor
self.current_soak = self.max_soak
def __repr__(self):
self.str_rep = 'stats: H[{}/{}] A[{}/{}] S[{}/{}]\n'.format(self.current_health, self.max_health,
self.current_armor, self.max_armor,
self.current_soak, self.max_soak)
if self.current_health <= 0 and self.current_health > -10:
self.str_rep += 'IMMOBILIZED!\n'
if self.current_health <= -10:
self.str_rep += 'DEAD!\n'
return self.str_rep
#Takes care of the damage, armor, and soak caculations.
def damage(self, dam, soak_dam=0):
if soak_dam <= 0:
soak_dam = dam
self.current_soak -= soak_dam
while self.current_soak <= 0:
if self.current_armor > 0:
self.current_armor -= 1
self.current_soak += self.max_soak
if dam > self.current_armor:
self.current_health -= dam - self.current_armor
# Returns a tuple containing the max values of each stat.
def get_max_stats(self):
return (self.max_health, self.max_armor, self.max_soak)
# Returns a tuple of the current value of each stat.
def get_stats(self):
return (self.current_health, self.current_armor, self.current_soak)
# Increases current_health by the value passed to amount.
def heal(self, amount):
if amount < 0:
amount = 0
self.current_health += amount
if self.current_health > self.max_health:
self.current_health = self.max_health
# Sets the current stats values to the max_values.
def reset_stats(self):
self.current_health = self.max_health
self.current_armor = self.max_armor
self.current_soak = self.max_soak
# Sets the current stats to the values of the arguments,
# and prevents the current values from exceeding the max_values.
def set_current_stats(self, hp, ar, sk):
self.current_health = hp
self.current_armor = ar
self.current_soak = sk
if self.current_health > self.max_health:
self.current_health = self.max_health
if self.current_armor > self.max_armor:
self.current_armor = self.max_armor
if self.current_soak > self.max_soak:
self.current_soak = self.max_soak
def set_max_stats(self, hp, ar, sk):
self.max_health = hp
self.max_armor = ar
self.max_soak = sk
self.current_health = self.max_health
self.current_armor = self.max_armor
self.current_soak = self.max_soak
|
StarcoderdataPython
|
377666
|
<filename>pdblib/artist.py
from construct import Struct, Int8ul, Int16ul, Int32ul, Const, Tell, this
from .piostring import OffsetPioString
ARTIST_ENTRY_MAGIC = 0x60
Artist = Struct(
"entry_start" / Tell,
"magic" / Const(ARTIST_ENTRY_MAGIC, Int16ul),
"index_shift" / Int16ul,
"id" / Int32ul,
"unknown" / Int8ul, # always 0x03, maybe an unindexed empty string
"name_idx" / Int8ul,
"name" / OffsetPioString(this.name_idx)
)
|
StarcoderdataPython
|
11360700
|
<filename>scripts/hoi4/hoi4/__init__.py
import sys
sys.path.append("../..")
import hoi4.load
import hoi4.unitstats
|
StarcoderdataPython
|
5049881
|
<reponame>GrowingData/hyper-model<gh_stars>10-100
import logging
import click
import pandas as pd
from typing import Dict, List
from xgboost import XGBClassifier
from hypermodel import hml
from hypermodel.features import one_hot_encode
from hypermodel.hml.model_container import ModelContainer
#from titanic.tragic_titanic_config import titanic_model_container, build_feature_matrix
from titanic.pipeline.tragic_titanic_training_pipeline import (
FEATURE_COLUMNS,
TARGET_COLUMN,
)
from hypermodel.platform.local.services import LocalServices
from titanic.tragic_titanic_config import (
DB_LOCATION,
DB_TABLE,
DB_TRAINING_TABLE,
DB_TESTING_TABLE,
TRAINING_CSV_LOCATION,
TESTING_CSV_LOCATION)
@hml.op()
@hml.pass_context
def create_training(ctx):
logging.info(f"Entering transform:create_training")
services: LocalServices = ctx.obj["services"]
services.warehouse.import_csv(TRAINING_CSV_LOCATION,DB_LOCATION, DB_TRAINING_TABLE)
logging.info(f"Wrote training set to {DB_TRAINING_TABLE}. Success!")
@hml.op()
@hml.pass_context
def create_test(ctx):
logging.info(f"Entering transform:create_test")
services: LocalServices = ctx.obj["services"]
services.warehouse.import_csv(TRAINING_CSV_LOCATION,DB_LOCATION, DB_TESTING_TABLE)
logging.info(f"Wrote test set to {DB_TESTING_TABLE}. Success!")
@hml.op()
@hml.pass_context
def train_model(ctx):
logging.info(f"Entering training:train_model")
services: LocalServices = ctx.obj["services"]
model_container=get_model_container(ctx)
app: HmlApp = ctx.obj["app"]
# training_df = services.warehouse.dataframe_from_table(
# services.config.warehouse_dataset, BQ_TABLE_TRAINING
# )
# training_df.to_csv("/mnt/c/data/crashed/training.csv")
training_df = pd.read_csv(TRAINING_CSV_LOCATION)
logging.info("Got Training DataFrame!")
test_df = pd.read_csv(TESTING_CSV_LOCATION)
# Find all our unique values for categorical features and
# distribution information for other features
model_container.analyze_distributions(training_df)
# Train the model
model = train(model_container, training_df)
# Let out container know about the trained model
model_container.bind_model(model)
# Run some evaluation against the model
evaluate_model(model_container, test_df)
# Publish this version of the model & data analysis
ref = model_container.publish()
# Create a merge request for this model to be deployed
#model_container.create_merge_request(ref, description="My new model")
return
def get_model_container(ctx):
models: Dict[str, ModelContainer] = ctx.obj["models"]
model_container = models[MODEL_NAME]
return model_container
def train(model_container, data_frame):
logging.info(f"training: {model_container.name}: train")
feature_matrix = build_feature_matrix(model_container, data_frame)
targets = data_frame[model_container.target]
classifier = XGBClassifier()
model = classifier.fit(feature_matrix, targets, verbose=True)
return model
def evaluate_model(model_container, data_frame):
logging.info(f"training: {model_container.name}: evaluate_model")
test_feature_matrix = build_feature_matrix(model_container, data_frame)
test_targets = data_frame[model_container.target]
# Evaluate the model against the training data to get an idea of where we are at
test_predictions = [v for v in model_container.model.predict(test_feature_matrix)]
correct = 0
for i in range(0, len(test_predictions)):
if test_predictions[i] == test_targets[i]:
correct += 1
pc_correct = int(100 * correct / len(test_predictions))
logging.info(f"Got {correct} out of {len(test_predictions)} ({pc_correct}%)")
|
StarcoderdataPython
|
11337816
|
<reponame>denova-com/blockchain-backup<gh_stars>0
'''
Special settings for blockchain_backup project.
If you have installed from source, then
create a secret key and enter it between
the single quote marks in place of
THE SECRET KEY
Copyright 2018-2020 DeNova
Last modified: 2020-10-24
'''
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
|
StarcoderdataPython
|
1813156
|
import string
import logging
NORTH = 0
EAST = 1
WEST = 2
SOUTH = 3
def parse(data):
ret = ''
steps = 0
data = data.splitlines()
x = data[0].index('|')
y = 0
direction = SOUTH
while True:
steps += 1
assert direction in (NORTH, EAST, WEST, SOUTH)
if direction == NORTH:
y -= 1
elif direction == EAST:
x += 1
elif direction == WEST:
x -= 1
elif direction == SOUTH:
y += 1
c = data[y][x]
logging.debug('C: (%d, %d) %s', x, y, c)
if c in string.ascii_uppercase:
ret += c
# Change direction
elif c == '+':
if direction in (NORTH, SOUTH):
if data[y][x-1] != ' ':
direction = WEST
logging.debug('WEST')
elif data[y][x+1] != ' ':
direction = EAST
logging.debug('EAST')
else:
logging.error('Unknown direction: %d %d', x, y)
raise Exception('Unknown direction')
elif direction in (EAST, WEST):
if data[y+1][x] != ' ':
direction = SOUTH
logging.debug('SOUTH')
elif data[y-1][x] != ' ':
direction = NORTH
logging.debug('NORTH')
else:
logging.error('Unknown direction: %d %d', x, y)
raise Exception('Unknown direction')
elif c == ' ':
break
return ret, steps
def part1(data):
return data[0]
def part2(data):
return data[1]
|
StarcoderdataPython
|
1944277
|
<filename>src/gluonts/nursery/torch_arsgls_rbpf/utils/local_seed.py
import contextlib
import hashlib
import numpy as np
import torch
@contextlib.contextmanager
def local_seed(seed):
state_np = np.random.get_state()
state_torch = torch.random.get_rng_state()
np.random.seed(seed)
torch.random.manual_seed(seed)
try:
yield
finally:
np.random.set_state(state_np)
torch.random.set_rng_state(state_torch)
def seed_from(*args):
"""
hashes any arguments to create a unique seed. This is useful for reproducibility,
e.g. when generating same data for different models (which increment the seed differently).
-------------
Example usage:
with local_seed(seed_from("train", 10)):
x = torch.randn(2)
y = np.random.randn(2)
z = torch.randn(2) # here we have re-stored the "global" seed.
"""
m = hashlib.sha256()
for arg in args:
m.update(str(arg).encode())
h = int.from_bytes(m.digest(), "big")
seed = h % (2 ** 32 - 1)
return seed
|
StarcoderdataPython
|
12859697
|
<filename>my_ner.py
import os
from torch.utils.data import Dataset,DataLoader
import torch
import torch.nn as nn
from sklearn.metrics import f1_score
def build_corpus(split, make_vocab=True, data_dir="data"):
"""读取数据"""
assert split in ['train', 'dev', 'test']
word_lists = []
tag_lists = []
with open(os.path.join(data_dir, split+".char.bmes"), 'r', encoding='utf-8') as f:
word_list = []
tag_list = []
for line in f:
if line != '\n':
word, tag = line.strip('\n').split()
word_list.append(word)
tag_list.append(tag)
else:
word_lists.append(word_list)
tag_lists.append(tag_list)
word_list = []
tag_list = []
word_lists = sorted(word_lists, key=lambda x: len(x), reverse=True)
tag_lists = sorted(tag_lists, key=lambda x: len(x), reverse=True)
# 如果make_vocab为True,还需要返回word2id和tag2id
if make_vocab:
word2id = build_map(word_lists)
tag2id = build_map(tag_lists)
word2id['<UNK>'] = len(word2id)
word2id['<PAD>'] = len(word2id)
tag2id['<PAD>'] = len(tag2id)
return word_lists, tag_lists, word2id, tag2id
else:
return word_lists, tag_lists
def build_map(lists):
maps = {}
for list_ in lists:
for e in list_:
if e not in maps:
maps[e] = len(maps)
return maps
class MyDataset(Dataset):
def __init__(self,datas,tags,word_2_index,tag_2_index):
self.datas = datas
self.tags = tags
self.word_2_index = word_2_index
self.tag_2_index = tag_2_index
def __getitem__(self,index):
data = self.datas[index]
tag = self.tags[index]
data_index = [self.word_2_index.get(i,self.word_2_index["<UNK>"]) for i in data]
tag_index = [self.tag_2_index[i] for i in tag]
return data_index,tag_index
def __len__(self):
assert len(self.datas) == len(self.tags)
return len(self.datas)
def batch_data_pro(self,batch_datas):
global device
data , tag = [],[]
da_len = []
for da,ta in batch_datas:
data.append(da)
tag.append(ta)
da_len.append(len(da))
max_len = max(da_len)
data = [i + [self.word_2_index["<PAD>"]] * (max_len - len(i)) for i in data]
tag = [i + [self.tag_2_index["<PAD>"]] * (max_len - len(i)) for i in tag]
data = torch.tensor(data,dtype=torch.long,device = device)
tag = torch.tensor(tag,dtype=torch.long,device = device)
return data , tag, da_len
class MyModel(nn.Module):
def __init__(self,embedding_num,hidden_num,corpus_num,bi,class_num,pad_index):
super().__init__()
self.embedding_num = embedding_num
self.hidden_num = hidden_num
self.corpus_num = corpus_num
self.bi = bi
self.embedding = nn.Embedding(corpus_num,embedding_num)
self.lstm = nn.LSTM(embedding_num,hidden_num,batch_first=True,bidirectional=bi)
if bi:
self.classifier = nn.Linear(hidden_num*2,class_num)
else:
self.classifier = nn.Linear(hidden_num, class_num)
self.cross_loss = nn.CrossEntropyLoss(ignore_index=pad_index)
def forward(self,data_index,data_len , tag_index=None):
em = self.embedding(data_index)
pack = nn.utils.rnn.pack_padded_sequence(em,data_len,batch_first=True)
output,_ = self.lstm(pack)
output,lens = nn.utils.rnn.pad_packed_sequence(output,batch_first=True)
pre = self.classifier(output)
self.pre = torch.argmax(pre, dim=-1).reshape(-1)
if tag_index is not None:
loss = self.cross_loss(pre.reshape(-1,pre.shape[-1]),tag_index.reshape(-1))
return loss
if __name__ == "__main__":
device = "cuda:0" if torch.cuda.is_available() else "cpu"
train_word_lists, train_tag_lists, word_2_index, tag_2_index = build_corpus("train")
dev_word_lists, dev_tag_lists = build_corpus("dev", make_vocab=False)
test_word_lists, test_tag_lists = build_corpus("test", make_vocab=False)
corpus_num = len(word_2_index)
class_num = len(tag_2_index)
train_batch_size = 5
dev_batch_size = len(dev_word_lists)
epoch = 100
lr = 0.001
embedding_num = 128
hidden_num = 129
bi = True
train_dataset = MyDataset(train_word_lists,train_tag_lists,word_2_index, tag_2_index)
train_dataloader = DataLoader(train_dataset,batch_size=train_batch_size,shuffle=False,collate_fn=train_dataset.batch_data_pro)
dev_dataset = MyDataset(dev_word_lists, dev_tag_lists, word_2_index, tag_2_index)
dev_dataloader = DataLoader(dev_dataset, batch_size=dev_batch_size, shuffle=False,collate_fn=dev_dataset.batch_data_pro)
model = MyModel(embedding_num,hidden_num,corpus_num,bi,class_num,word_2_index["<PAD>"])
model = model.to(device)
opt = torch.optim.Adam(model.parameters(),lr = lr)
for e in range(epoch):
model.train()
for data , tag, da_len in train_dataloader:
loss = model.forward(data,da_len,tag)
loss.backward()
opt.step()
opt.zero_grad()
model.eval() # F1,准确率,精确率,召回率
for dev_data , dev_tag, dev_da_len in dev_dataloader:
test_loss = model.forward(dev_data,dev_da_len,dev_tag)
score = f1_score(dev_tag.reshape(-1).cpu().numpy(),model.pre.cpu().numpy(),average="micro")
print(score)
break
|
StarcoderdataPython
|
6612922
|
#!/usr/bin/env python
"""reducer.py"""
from operator import itemgetter
import sys
batch_current = 0
metric_value_min = 0
metric_value_max = 0
# input comes from STDIN (standard input)
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
line = line.rstrip()
# parse the input we got from mapper.py
batch_id_current, metric_value, metric_selected = line.split('\t')
print('batch_id: {}\t value: {}\t metric: {}'.format(batch_id_current, metric_value, metric_selected))
|
StarcoderdataPython
|
8171723
|
<filename>functions/cut.py
import pygame
def CortarImagen (image, x, y, eX, eY):
info=image.get_rect()
an_image = info[2]
al_image = info[3]
an_corte = int(an_image/eX)
al_corte = int(al_image/eY)
cuadro = image.subsurface(x*an_corte,y*al_corte, an_corte, al_corte)
return cuadro
|
StarcoderdataPython
|
8179638
|
#!/usr/bin/env python3
#
# Copyright 2021 Graviti. Licensed under MIT License.
#
# pylint: disable=wrong-import-position
# pylint: disable=pointless-string-statement
# pylint: disable=pointless-statement
# pylint: disable=invalid-name
# type: ignore[attr-defined]
# https://github.com/python/mypy/issues/5858
"""This file includes the python code of squash_and_merge.rst."""
"""Authorize a Dataset Client Instance"""
from tensorbay import GAS
# Please visit `https://gas.graviti.cn/tensorbay/developer` to get the AccessKey.
gas = GAS("<YOUR_ACCESSKEY>")
dataset_client = gas.create_dataset("<DATASET_NAME>")
dataset_client.create_draft("draft-1")
dataset_client.commit("commit-1")
dataset_client.create_branch("dev")
dataset_client.create_draft("draft-2")
dataset_client.commit("commit-2")
dataset_client.create_draft("draft-3")
dataset_client.commit("commit-3")
dataset_client.checkout("main")
dataset_client.create_draft("draft-4")
dataset_client.commit("commit-4")
""""""
"""Create Job"""
job = dataset_client.squash_and_merge.create_job(
draft_title="draft-5",
source_branch_name="dev",
target_branch_name="main",
draft_description="draft_description",
strategy="override",
)
""""""
"""Checkout First"""
job = dataset_client.squash_and_merge.create_job(
draft_title="draft-5",
source_branch_name="dev",
draft_description="draft_description",
strategy="override",
)
""""""
"""Get, List and Delete"""
job = dataset_client.squash_and_merge.get_job("jobId")
dataset_client.squash_and_merge.delete_job("jobId")
job = dataset_client.squash_and_merge.list_jobs()[0]
""""""
"""Get Job Info"""
job.status
job.result
job.error_message
job.arguments
""""""
"""Update Job"""
job.update()
job.update(until_complete=True)
""""""
"""Abort and Retry Job"""
job.abort()
job.retry()
""""""
|
StarcoderdataPython
|
5015710
|
from lexer.Word import *
class Type(Word):
def __init__(self, s, tag):
super(Type, self).__init__(s, tag)
def numeric(self, p):
if p == CHAR or p == NUM or p == REAL:
return True
return False
def max(self, p1, p2):
if not self.numeric(p1) or not self.numeric(p2):
return None
elif p1 == REAL or p2 == REAL:
return REAL
elif p1 == NUM or p2 == NUM:
return NUM
else:
return CHAR
NUM = Type("NUM", Tag.BASIC)
REAL = Type("REAL", Tag.BASIC)
CHAR = Type("CHAR", Tag.BASIC)
BOOL = Type("BOOL", Tag.BASIC)
|
StarcoderdataPython
|
3466652
|
alien_color = "green"
if(alien_color == "green"):
print("Hey, you just got 5 points")
elif(alien_color == "yellow"):
print("Hey, you just got 10 points")
elif(alien_color == "red"):
print("Hey, you just got 15 points")
alien_color = "yellow"
if(alien_color == "green"):
print("Hey, you just got 5 points")
elif(alien_color == "yellow"):
print("Hey, you just got 10 points")
elif(alien_color == "red"):
print("Hey, you just got 15 points")
alien_color = "red"
if(alien_color == "green"):
print("Hey, you just got 5 points")
elif(alien_color == "yellow"):
print("Hey, you just got 10 points")
elif(alien_color == "red"):
print("Hey, you just got 15 points")
|
StarcoderdataPython
|
9643475
|
<reponame>richardGaoPy/NetSpider
# -*- coding:utf-8 -*-
"""
base.py
~~~~~~~
"""
import re
import datetime
import json
import requests
from torndb import Connection
from tornado.gen import coroutine
from tornado.concurrent import run_on_executor
from tornado.escape import to_unicode, url_escape
from concurrent.futures import ThreadPoolExecutor
from bs4 import BeautifulSoup
# import time
# import random
# from zh_auth import search_xsrf
# import sys
# reload(sys)
# sys.setdefaultencoding('utf8')
mysql_config = {
}
class ZHBase(object):
def __init__(self):
self.db = Connection(host=mysql_config.get('host', '127.0.0.1'), database=mysql_config.get('database', 'test'),
user=mysql_config.get('user', 'root'), password=mysql_<PASSWORD>.get('password', ''))
self.executor = ThreadPoolExecutor(max_workers=4)
# self.requests = requests.Session()
self.headers = {
'Host': 'www.zhihu.com',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:42.0) Gecko/20100101 Firefox/42.0',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'http://www.zhihu.com/topics',
}
class GetTopics(ZHBase):
def __init__(self, login):
ZHBase.__init__(self)
self.p_father = re.compile(r'<li data-id="(\d+)"><a href="(.*?)">(.*?)</a></li>')
self.father_topic_uri = 'http://www.zhihu.com/topics'
self.requests = login.requests
def get_father_topics(self):
try:
result = self.requests.get(url=self.father_topic_uri)
except RuntimeError as e:
print 'curl father topic failed!' # Write logging
print e
if result.status_code != 200:
print 'requests status code is {}'.format(result.status_code)
return
return self.p_father.findall(result.content)
def save_father_topics(self):
now = str(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'))
for topic in self.get_father_topics():
sql = u'INSERT INTO topics (data_id, topic_name, user_counts, is_deleted, last_update) ' \
u'VALUES (%d, "%s", %d, %d, "%s")' % (int(topic[0]), to_unicode(topic[2]), 0, 0, now)
print sql
try:
self.db.execute(sql)
print 'save {} success'.format(str(topic))
except RuntimeError as e:
print 'save failed : {}'.format(str(e))
class GetSubclassTopics(ZHBase):
def __init__(self, login):
ZHBase.__init__(self)
self.uri = 'http://www.zhihu.com/node/TopicsPlazzaListV2'
self.requests = login.requests
self.get_xsrf = login.get_xsrf()
def get_father_info(self):
sql = 'SELECT data_id FROM topics'
try:
return self.db.query(sql)
except IOError as e:
return []
def get_subclass_topics(self):
topics = self.get_father_info()
if len(topics) == 0:
return
r = self.requests.get(url='http://www.zhihu.com/topics', headers=self.headers)
if r.status_code != 200:
print 'request topics failed!'
return
# with open('topics_home.html', 'wb') as fd:
# fd.write(r.content)
# fd.close()
p = re.compile(r'"user_hash":"(.*?)"', re.M)
user_hash = p.findall(r.content)
if len(user_hash) == 0:
print 'get user hash failed!'
return
hash_id = user_hash[0]
xsrf = self.get_xsrf
print hash_id
for topic in topics[:2]:
print 'now get topic number {}`s subclass'.format(str(topic))
offset = 0
while True:
fd = open('subtopics.txt', 'a')
# uri = self.uri + '?_xsrf={}&method=next&'.format(xsrf) + params
uri = self.uri + '?' + 'method=next¶ms=%7B%22topic_id%22%3A' + str(topic.get('data_id')) + \
'%2C%22offset%22%3A' + str(offset) + '%2C%22hash_id%22%3A%22' + str(hash_id) + '%22%7D&_xsrf=' + str(xsrf)
r = self.requests.post(url=uri, data={}, headers=self.headers)
if '"r":0' not in r.content:
print 'curl subclass topics failed!'
return
contents = json.loads(r.content.replace('\\n', '').replace('\\"', '').replace('\\/', '/')).get('msg')
if len(contents) == 0:
break
for div in contents:
now = str(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'))
soup = BeautifulSoup(div, "lxml")
try:
item = soup.find(attrs={'class': 'blk'})
name = item.a.strong.get_text()
description = item.p.get_text()
topic_id = item.a.get('href')[7:]
sql = 'INSERT INTO subclass_topics (topic_id, sub_topic_name, description, user_counts, ' \
'is_self, is_deleted, last_update, father_topic_id_id) VALUES ' \
'("%s", "%s", "%s", 0, 0, 0, "%s", 1)' % (topic_id, name, description, now)
print sql
try:
self.db.execute(sql)
except:
print 'failed!'
continue
except RuntimeError as e:
# print soup.find(attrs={'class': 'blk'})
print 'failed in {}'.format(str(div))
continue
print '\n\n'
# with open('sub_topics' + str(offset) + '.html', 'wb') as fd:
# fd.write(str(contents))
fd.close()
offset += 20
class GetSubclassFans(ZHBase):
def __init__(self, uri, login):
ZHBase.__init__(self)
self.topic_uri = uri
self.requests = login.requests
self.get_xsrf = login.get_xsrf()
def get_topic_fans(self):
# get start mi-id.
url = self.topic_uri + '/followers'
r = self.requests.get(url, headers=self.headers)
if r.status_code != 200:
print 'get users failed!'
return
soup = BeautifulSoup(r.content, 'lxml')
first_user = soup.find(attrs={'class': 'zm-person-item'})
mi_id = first_user.get('id', None)
if mi_id:
account = first_user.h2.a.get('href', '')
name = first_user.h2.a.get_text()
print mi_id, account, name
with open('mark_location.txt', 'a') as fd:
fd.write('this loop begin at : \n{}'.format(str((mi_id, account, name))))
fd.write('\n')
else:
return
# began loop get users
offset = 0
_xsrf = self.get_xsrf
n = 0
while n < 11:
print 'begin test.'
post_data = {
'offset': offset,
'_xsrf': _xsrf,
'start': mi_id[3:]
}
r = self.requests.post(url=url, data=post_data, headers=self.headers)
if r.status_code != 200 and '"r":0' not in r.content:
print r.content
print r.status_code
return
soup_test = BeautifulSoup(r.content.replace('\\n', '').replace('\\"', '').replace('\\/', '/'), 'lxml')
users_item = soup_test.find_all(attrs={'class': 'zm-person-item'})
users_list = list()
if users_item:
for user_item in users_item:
# p = re.compile(r'mi-(\d+)')
mi_id = str(user_item.get('id', None))
user = user_item.find(attrs={'class': 'zm-list-content-title'})
account = user.a.get('href', None)
name = user.a.get_text()
print (mi_id, account, name.decode('raw_unicode_escape'))
users_list.append((mi_id, account, name.decode('raw_unicode_escape')))
self.save_users(users_list)
# time.sleep(random.randint(20, 30))
offset += 20
n += 1
# @coroutine
def save_users(self, users):
yield self.execute_sql(users)
@run_on_executor
def execute_sql(self, users):
now = str(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'))
for user in users:
try:
sql = 'INSERT INTO users (user_number, user_account, user_name, last_update, is_queried, is_deleted) ' \
'VALUES ("%s", "%s", "%s", "%s", 0, 0)' % (user[0], user[1], user[2], now)
self.db.execute(sql)
print sql
except:
with open('mark_location_failed.txt', 'a') as fd:
fd.write(str(user) + '\n')
fd.close()
class GetUserInfo(ZHBase):
def __init__(self, login):
ZHBase.__init__(self)
self.requests = login.requests
def get_uid_account(self):
sql = 'SELECT id, user_account FROM users'
for row in self.db.query(sql):
sql = 'UPDATE users SET is_queried=1 WHERE id=%d' % int(row.get('id'))
self.db.execute(sql)
self.get_user_info(row.get('id'), row.get('user_account'))
@coroutine
def get_user_info(self, uid, account):
yield self.get_personal_information(uid, account)
# @run_on_executor
def save_user_info(self, info):
print '*' * 32
if info:
now = str(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'))
sql = 'INSERT INTO info (sex, fans_number, follows_number, social_contact, location_by_self, abstract, ' \
'employment, domicile, educational, last_update, user_id_id) VALUES (%s, %s, %s, "%s", "%s", "%s", ' \
'"%s", "%s", "%s", "%s", %s)' % (info.get('sex', 2), info.get('fans', 0), info.get('follows', 0),
info.get('social_contact', ''), info.get('location_by_self', ''), info.get('description', ''),
info.get('employment', ''), info.get('domicile', ''), info.get('education', ''), now, info.get('user_id'))
print sql
self.db.execute(sql)
@coroutine
def get_personal_information(self, id, user):
uri = 'http://www.zhihu.com' + str(user)
print uri
r = self.requests.get(uri, headers=self.headers)
if r.status_code != 200:
print 'get {} failed!'.format(uri)
return
soup = BeautifulSoup(r.content, "lxml")
# get people main info
main_info = soup.find(attrs={"class": 'zm-profile-header-main'})
attention_info = soup.find(attrs={"class": 'zm-profile-side-following zg-clear'})
# sex info
sex = 2
if main_info.find(attrs={'class': 'icon icon-profile-male'}):
sex = 1
elif main_info.find(attrs={'class': 'icon icon-profile-female'}):
sex = 0
else:
pass
print 'sex : {}'.format(sex)
# social contact
contact = ''
contact_info = main_info.find(attrs={'class': 'zm-profile-header-user-weibo'})
if contact_info:
p = re.compile(r'href="(.*?)"', re.M)
contact_info = str(contact_info)
temp = p.findall(contact_info)
if len(temp) == 0:
pass
else:
contact = temp[0]
print 'social contact(sina) : {}'.format(contact)
# people's domicile
domicile = ''
domicile_info = main_info.find(attrs={'class': 'location item'})
if domicile_info:
domicile = domicile_info.get('title', '')
print 'domicile : {}'.format(domicile)
# location by self
location_self = ''
location_by_self = main_info.find(attrs={'class': 'business item'})
if location_by_self:
location_self = location_by_self.get('title', '')
print 'location by self : {}'.format(location_self)
# industry or employment - position
industry = ''
employment = ''
position = ''
employment_item = main_info.find(attrs={'class': 'employment item'})
if employment_item:
employment = employment_item.get('title', '')
position_item = main_info.find(attrs={'class': 'position item'})
if position_item:
position = position_item.get('title', '')
if True:
industry = str(employment) + ' - ' + str(position)
print 'employment : {}'.format(industry)
# occupations
# occupations = u'Now not need.'
# print 'occupations : {}'.format(occupations)
# education
education_info = ''
education = ''
education_extra = ''
education_item = main_info.find(attrs={'class': 'education item'})
if education_item:
education = education_item.get('title', '')
education_extra_item = main_info.find(attrs={'class': 'education-extra item'})
if education_extra_item:
education_extra = education_extra_item.get('title', '')
if True:
education_info = str(education) + ' - ' + str(education_extra)
print 'education information : {}'.format(education_info)
# description
description = ''
description_info = main_info.find(attrs={'class': 'fold-item'})
if description_info:
description = description_info.span.get_text()
print 'description : {}'.format(description)
# fans follows numbers
fans = 0
follows = 0
if attention_info:
p = re.compile(r'<strong>(\d+)</strong>', re.M)
numbers = p.findall(str(attention_info))
if len(numbers) == 2:
fans = numbers[0]
follows = numbers[1]
print 'fans number : {}'.format(fans)
print 'follows number : {}'.format(follows)
profile_info = {'user_id': id, 'sex': sex, 'social_contact': contact, 'domicile': domicile,
'location_by_self': location_self, 'employment': industry, 'education': education_info,
'description': description, 'fans': fans, 'follows': follows}
self.save_user_info(profile_info)
print profile_info
# if __name__ == '__main__':
# if login():
# print 'Spider start.'
# topics = GetTopics()
# topics.save_father_topics()
#
# sub_topics = GetSubclassTopics()
# sub_topics.get_subclass_topics()
#
# user_info = GetUserInfo()
# user_info.get_uid_account()
#
# get_users = GetSubclassFans('http://www.zhihu.com/topic/19550517')
# get_users.get_topic_fans()
|
StarcoderdataPython
|
4949626
|
#!/usr/bin/python3
import logging
from jujuna.helper import connect_juju, log_traceback, ApperrorTimeout, wait_until
from juju.errors import JujuError
from websockets import ConnectionClosed
# create logger
log = logging.getLogger('jujuna.deploy')
async def deploy(
bundle_file,
ctrl_name='',
model_name='',
wait=False,
endpoint='',
username='',
password='',
cacert='',
error_timeout=None,
**kwargs
):
"""Deploy a local juju bundle.
Handles deployment of a bundle file to the current or selected model.
Connection requires juju client configs to be present locally or specification of credentialls:
endpoint (e.g. 127.0.0.1:17070), username, password, and model uuid as model_name.
:param bundle_file: juju bundle file
:param ctrl_name: juju controller
:param model_name: juju model name or uuid
:param wait: boolean
:param endpoint: string
:param username: string
:param password: string
:param cacert: string
"""
ret = 0
log.info('Reading bundle: {}'.format(bundle_file.name))
entity_url = 'local:' + bundle_file.name.replace('/bundle.yaml', '')
controller, model = await connect_juju(
ctrl_name,
model_name,
endpoint=endpoint,
username=username,
password=password,
cacert=cacert
)
try:
# Deploy a bundle
log.info("Deploy: {}".format(entity_url))
deployed_apps = await model.deploy(
entity_url
)
if not isinstance(deployed_apps, list):
deployed_apps = [deployed_apps]
if wait:
await wait_until(
model,
deployed_apps,
log,
loop=model.loop,
error_timeout=error_timeout
)
else:
log.info('{} - Apps: {}'.format(
'DEPLOYED',
len(deployed_apps)
))
except ApperrorTimeout:
ret = 124
log.error('FAILED - Application too long in error state')
except ConnectionClosed as e:
ret = 1
log.error('FAILED - Connection closed')
log_traceback(e)
except JujuError as e:
ret = 1
log.error('JujuError during deploy')
log_traceback(e)
finally:
# Disconnect from the api server and cleanup.
await model.disconnect()
await controller.disconnect()
return ret
|
StarcoderdataPython
|
11299839
|
# Demonstração de método lower
nome_cidade = 'rIo DE jaNeirO'
print(nome_cidade.lower())
|
StarcoderdataPython
|
4828569
|
<reponame>dizcology/python-aiplatform<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .types.automl_image_classification import AutoMlImageClassification
from .types.automl_image_classification import AutoMlImageClassificationInputs
from .types.automl_image_classification import AutoMlImageClassificationMetadata
from .types.automl_image_object_detection import AutoMlImageObjectDetection
from .types.automl_image_object_detection import AutoMlImageObjectDetectionInputs
from .types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata
from .types.automl_image_segmentation import AutoMlImageSegmentation
from .types.automl_image_segmentation import AutoMlImageSegmentationInputs
from .types.automl_image_segmentation import AutoMlImageSegmentationMetadata
from .types.automl_tables import AutoMlTables
from .types.automl_tables import AutoMlTablesInputs
from .types.automl_tables import AutoMlTablesMetadata
from .types.automl_text_classification import AutoMlTextClassification
from .types.automl_text_classification import AutoMlTextClassificationInputs
from .types.automl_text_extraction import AutoMlTextExtraction
from .types.automl_text_extraction import AutoMlTextExtractionInputs
from .types.automl_text_sentiment import AutoMlTextSentiment
from .types.automl_text_sentiment import AutoMlTextSentimentInputs
from .types.automl_video_action_recognition import AutoMlVideoActionRecognition
from .types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs
from .types.automl_video_classification import AutoMlVideoClassification
from .types.automl_video_classification import AutoMlVideoClassificationInputs
from .types.automl_video_object_tracking import AutoMlVideoObjectTracking
from .types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs
from .types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig
__all__ = (
"AutoMlImageClassification",
"AutoMlImageClassificationInputs",
"AutoMlImageClassificationMetadata",
"AutoMlImageObjectDetection",
"AutoMlImageObjectDetectionInputs",
"AutoMlImageObjectDetectionMetadata",
"AutoMlImageSegmentation",
"AutoMlImageSegmentationInputs",
"AutoMlImageSegmentationMetadata",
"AutoMlTables",
"AutoMlTablesInputs",
"AutoMlTablesMetadata",
"AutoMlTextClassification",
"AutoMlTextClassificationInputs",
"AutoMlTextExtraction",
"AutoMlTextExtractionInputs",
"AutoMlTextSentiment",
"AutoMlTextSentimentInputs",
"AutoMlVideoActionRecognition",
"AutoMlVideoActionRecognitionInputs",
"AutoMlVideoClassification",
"AutoMlVideoClassificationInputs",
"AutoMlVideoObjectTracking",
"AutoMlVideoObjectTrackingInputs",
"ExportEvaluatedDataItemsConfig",
)
|
StarcoderdataPython
|
58587
|
<reponame>matslindh/codingchallenges<filename>adventofcode2017/19.py
def explore(lines):
y = 0
x = lines[0].index('|')
dx = 0
dy = 1
answer = ''
steps = 0
while True:
x += dx
y += dy
if lines[y][x] == '+':
if x < (len(lines[y]) - 1) and lines[y][x+1].strip() and dx != -1:
dx = 1
dy = 0
elif y < (len(lines) - 1) and x < len(lines[y+1]) and lines[y+1][x].strip() and dy != -1:
dx = 0
dy = 1
elif y > 0 and x < len(lines[y-1]) and lines[y-1][x].strip() and dy != 1:
dx = 0
dy = -1
elif x > 0 and lines[y][x-1].strip() and dx != 1:
dx = -1
dy = 0
elif lines[y][x] == ' ':
break
elif lines[y][x] not in ('-', '|'):
answer += lines[y][x]
steps += 1
return answer, steps + 1
def test_explore():
assert ('ABCDEF', 38) == explore(open("input/dec19_test").readlines())
if __name__ == "__main__":
print(explore(open("input/dec19").readlines()))
|
StarcoderdataPython
|
9705314
|
# The MIT License (MIT)
#
# Copyright (c) 2016 Oracle
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
__author__ = "<NAME> (Oracle A-Team)"
__copyright__ = "Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved."
__version__ = "1.0.0.0"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
from oc_provision_wrappers import commerce_setup_helper
import logging
logger = logging.getLogger(__name__)
json_key = 'ATGPATCH_install'
def post_install_cmds(configData, full_path):
if json_key in configData:
jsonData = configData[json_key]
else:
logging.error(json_key + " config data missing from json. will not install")
return
logging.info("executing post patch install tasks")
INSTALL_DIR = jsonData['dynamoRoot']
INSTALL_OWNER = jsonData['installOwner']
# fix manifest
patch_manifest(INSTALL_DIR, INSTALL_OWNER)
# fix crs after patch
patch_crs(configData, full_path)
def patch_manifest(INSTALL_DIR, INSTALL_OWNER):
# fix missing manifest entry that cause jps-config.xml to not get pulled into standalone ears
MANIFEST_TO_UPDATE = INSTALL_DIR + "/home/META-INF/MANIFEST.MF"
fixJPScmd = "\"" + "echo >> " + MANIFEST_TO_UPDATE + \
"; echo 'Name: security' >> " + MANIFEST_TO_UPDATE + \
"; echo 'ATG-Assembler-Import-File: true' >> " + MANIFEST_TO_UPDATE + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, fixJPScmd)
def patch_crs(configData, full_path):
"""
patch1 has a bug where it does not update CRS with new taglibs. Patch it here.
"""
atginstall_json_key = 'ATG_install'
if atginstall_json_key not in configData:
return
jsonData = configData[atginstall_json_key]
INSTALL_DIR = jsonData['dynamoRoot']
INSTALL_OWNER = jsonData['installOwner']
INSTALL_CRS = jsonData['install_crs']
if INSTALL_CRS:
# If patch1 is installed, these are not updated. fix it.
cpCmd = "\"" + "cp " + INSTALL_DIR + "/DAS/taglib/dspjspTaglib/1.0/lib/dspjspTaglib1_0.jar " + INSTALL_DIR + "/CommerceReferenceStore/Store/Storefront/j2ee-apps/Storefront/store.war/WEB-INF/lib" + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, cpCmd)
cpCmd = "\"" + "cp " + INSTALL_DIR + "/DAS/taglib/dspjspTaglib/1.0/lib/dspjspTaglib1_0.jar " + INSTALL_DIR + "/CommerceReferenceStore/Store/Storefront/j2ee-apps/Storefront/storedocroot.war/WEB-INF/lib" + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, cpCmd)
cpCmd = "\"" + "cp " + INSTALL_DIR + "/DAS/taglib/dspjspTaglib/1.0/lib/dspjspTaglib1_0.jar " + INSTALL_DIR + "/CommerceReferenceStore/Store/Fluoroscope/j2ee-apps/Fluoroscope/fluoroscope.war/WEB-INF/lib" + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, cpCmd)
cpCmd = "\"" + "cp " + INSTALL_DIR + "/DAS/taglib/dspjspTaglib/1.0/lib/dspjspTaglib1_0.jar " + INSTALL_DIR + "/CommerceReferenceStore/Store/DCS-CSR/j2ee-apps/DCS-CSR/CSRHelper.war/WEB-INF/lib" + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, cpCmd)
cpCmd = "\"" + "cp " + INSTALL_DIR + "/DAS/taglib/dspjspTaglib/1.0/lib/dspjspTaglib1_0.jar " + INSTALL_DIR + "/CommerceReferenceStore/Store/EStore/Versioned/j2ee-apps/Versioned/store-merchandising.war/WEB-INF/lib" + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, cpCmd)
|
StarcoderdataPython
|
9681656
|
<gh_stars>0
from django.db import models
import datetime
import feedparser
from dateutil import parser
from time import mktime
from django.db import transaction
from dateutil.tz import *
from math import *
from topia.termextract import extract
from constants.app_constants import *
from common.stemming import *
# Create your models heare.
class Site(models.Model):
name= models.CharField(max_length=200)
webfeedlink=models.CharField(max_length=400, verbose_name='web feed link')
softdelete=models.BooleanField(default=True)
addedon = models.DateTimeField()
last_published_article_time=models.DateTimeField(verbose_name='last published article time')
def _get_all_articles_published_after_last_sync_time(self):
print self.webfeedlink
parsedData=feedparser.parse(self.webfeedlink)
articles=[]
last_published_article_time= self.last_published_article_time
image_title=""
image_url=""
image_href=""
try:
image=parsedData.feed.image
if "title" in image:
image_title=image["title"]
if "link" in image:
image_url=image["link"]
if "href" in image:
image_href=image["href"]
except AttributeError:
# no image attribute
pass
list_category = PCategory.get_categories()
dict_features = Features.get_fetaures()
#count=0
for entry in parsedData.entries:
article=Article()
article.image_url=image_url
article.image_href=image_href
article.image_title=image_title
article.header=entry.title
article.link=entry.link
article.count=0
article.description=entry.description.encode('utf-8')
if hasattr(entry, 'published'):
article.published=parser.parse(entry.published)
elif hasattr(entry, 'created'):
article.published=parser.parse(entry.created)
elif hasattr(entry, 'updated'):
article.published=parser.parse(entry.updated)
else:
current_date_time = datetime.datetime.now()
article.published = datetime.datetime(current_date_time.year,current_date_time.month,current_date_time.day, current_date_time.hour, current_date_time.minute,current_date_time.second, tzinfo=tzutc())
article.siteid=self
if not article.published.tzinfo or not article.published.tzinfo.utcoffset:
temp_date = article.published
article.published = datetime.datetime(temp_date.year,temp_date.month,temp_date.day, temp_date.hour, temp_date.minute,temp_date.second, tzinfo=tzutc())
if article.published>self.last_published_article_time:
article.get_pcategory(list_category, dict_features)
articles.append(article)
if article.published>last_published_article_time:
last_published_article_time=article.published
current_date_time = datetime.datetime.now()
article.addedon = datetime.datetime(current_date_time.year,current_date_time.month,current_date_time.day, current_date_time.hour, current_date_time.minute,current_date_time.second, tzinfo=tzutc())
#count=count+1
#changing site last article published time
self.last_published_article_time=last_published_article_time
return articles
def save_articles_published_after_last_sync_time(self):
articles=self._get_all_articles_published_after_last_sync_time()
with transaction.atomic():
self.save()
print "check"
Article.objects.bulk_create(articles)
def fetch_articles(self,offset_from_latest=0,no_of_articles=10):
query=Article.objects.filter(siteid = self).order_by("-published")[offset_from_latest:no_of_articles]
articles=[]
for article in query:
articles.append(article)
return articles
class SiteProbability(models.Model):
siteid=models.ForeignKey(Site)
probability=models.FloatField()
class PCategory(models.Model):
name=models.CharField(max_length=100)
@staticmethod
def get_categories():
query=PCategory.objects.all()
list_category=[]
for element in query:
list_category.append(element)
return list_category
class Features(models.Model):
name=models.CharField(max_length=200)
pcategory=models.ForeignKey(PCategory)
probability=models.FloatField()
@staticmethod
def get_fetaures():
query= Features.objects.all()
dict_features={}
for element in query:
feature_name = element.name
if feature_name in dict_features:
dict_features[feature_name].append(element)
else:
dict_features[feature_name]= [element]
return dict_features
class Article(models.Model):
header=models.CharField(max_length=200)
description=models.CharField(max_length=1000000)
author = models.CharField(max_length=50,null=True)
siteid= models.ForeignKey(Site)
pcategoryid=models.ForeignKey(PCategory,verbose_name='primary category id')
image_url= models.CharField(max_length=300, null=True)
image_href= models.CharField(max_length=300, null=True)
image_title=models.CharField(max_length=300, null=True)
link= models.CharField(max_length=300, null=True)
addedon=models.DateTimeField()
published = models.DateTimeField()
count = models.IntegerField()
@staticmethod
def delete_articles():
count=Article.objects.count()
if count>MAX_NO_OF_ARTICLES_ALLOWED:
objects_to_keep=Article.objects.order_by("-published")[:MAX_NO_OF_ARTICLES_ALLOWED]
Article.objects.exclude(pk__in=objects_to_keep).delete()
@staticmethod
def increase_count(id):
print "increased_count"
entry=Article.objects.get(id=id)
print entry.count
entry.count=entry.count+1
entry.save()
entry=Article.objects.get(id=id)
print entry.count
print "increased"
@staticmethod
def get_spotlight_articles(count):
query=Article.objects.select_related().order_by("-count")[:count]
articles=[]
for article in query:
articles.append(article)
return articles
@staticmethod
def fetch_articles(offset_from_latest=0,no_of_articles=10,category="",type=TYPE_ALL):
start_point = offset_from_latest-1
end_point = start_point + no_of_articles
if type==TYPE_ALL:
if category==CATEGORY_ALL:
if start_point<0:
query=Article.objects.select_related().order_by("-published")[:end_point]
else:
query=Article.objects.select_related().order_by("-published")[start_point:end_point]
else:
if start_point<0:
query=Article.objects.filter(pcategoryid__name__contains=category).select_related().order_by("-published")[:end_point]
else:
query=Article.objects.filter(pcategoryid__name__contains=category).select_related().order_by("-published")[start_point:end_point]
elif type==TYPE_POPULAR:
if category==CATEGORY_ALL:
if start_point<0:
query=Article.objects.select_related().order_by("-count")[:end_point]
else:
query=Article.objects.select_related().order_by("-count")[start_point:end_point]
else:
if start_point<0:
query=Article.objects.filter(pcategoryid__name__contains=category).select_related().order_by("-count")[:end_point]
else:
query=Article.objects.filter(pcategoryid__name__contains=category).select_related().order_by("-count")[start_point:end_point]
articles=[]
for article in query:
articles.append(article)
return articles
def getImportaantFeatures(self):
extractor=extract.TermExtractor()
extractor.filter=extract.permissiveFilter
key_word_for_desc=extractor(self.description)
dict_important_features={}
for element in key_word_for_desc:
word = stem_word(element[0])
if len(word)!=0:
dict_important_features[word]=element[1]
#print str(dict_important_features)
return dict_important_features
def get_pcategory(self,list_pcategory,dict_features):
important_features=self.getImportaantFeatures()
dict_scores_for_category={}
vocab_of_document=1
vocab_of_keywords = 0
for key in important_features.keys():
vocab_of_document=vocab_of_document+important_features[key]
for feature in important_features.keys():
if feature in dict_features:
print feature
vocab_of_keywords=vocab_of_keywords+important_features[feature]
for element in dict_features[feature]:
category= element.pcategory.name
score_value = element.probability
score_value=score_value+DELTA
log_value = important_features[feature]*log(score_value,10)
if category in dict_scores_for_category:
dict_scores_for_category[category]=dict_scores_for_category[category]+log_value
else:
dict_scores_for_category[category]=log_value
print dict_scores_for_category
min_value = log(MIN_LIMIT,10)
min_key_word_count = RATIO_KEY_WORD_REQUIRED
print min_value
p_category_key=None
value_count_key_word = float(vocab_of_keywords)/vocab_of_document
for key in dict_scores_for_category.keys():
if dict_scores_for_category[key]>=min_value and value_count_key_word>=min_key_word_count :
min_value = dict_scores_for_category[key]
p_category_key = key
if p_category_key==None:
p_category_key=OTHER_KEY
#print p_category_key
for element in list_pcategory:
if element.name.lower().strip()==p_category_key.lower().strip():
self.pcategoryid=element
|
StarcoderdataPython
|
6685038
|
<filename>chromepass/chrome_passwords.py
import os
import sqlite3
import platform
from chromepass import ChromeLinux, ChromeWindows, ChromeMac
class Chromepass:
""" class Chromepass to get the passwords from the database file
Usage:
chpass = Chromepass()
chpass.get_passwords()
"""
def __init__(self):
self.conn = None
self.cursor = None
self.results = list()
if platform.system() == 'Windows':
self.os = ChromeWindows()
elif platform.system() == 'Linux':
self.os = ChromeLinux()
elif platform.system() == 'Darwin':
self.os = ChromeMac()
self.connect_to_database_file()
def __del__(self):
"""destructor"""
self.close_connection()
def close_connection(self):
""" closes the db connection
"""
if self.conn:
self.conn.close()
def connect_to_database_file(self):
""" connect to the database file
"""
if os.path.exists(self.os.tmp_login_db_path):
self.conn = sqlite3.connect(self.os.tmp_login_db_path)
self.cursor = self.conn.cursor()
else:
print("File does not exist: {}".format(self.os.tmp_login_db_path))
def get_passwords(self):
""" get passwords from database file
:return: list containing account information (url, username, password)
"""
try:
self.cursor.execute('SELECT action_url, username_value, password_value FROM logins')
data = self.cursor.fetchall()
if len(data) > 0:
for result in data:
url = result[0]
username = result[1]
encrypted_password = result[2]
# decrypt the password
password = self.os.decrypt_password(encrypted_password=encrypted_password)
if password:
account_details = dict()
account_details['url'] = url
account_details['username'] = username
account_details['password'] = password
self.results.append(account_details)
return self.results
else:
print('No results returned from sql query')
return self.results
except sqlite3.OperationalError as ex:
print('SQL Error {}'.format(ex))
return self.results
|
StarcoderdataPython
|
6037
|
""" Unit tests for ``wheezy.templates.utils``.
"""
import unittest
class FindAllBalancedTestCase(unittest.TestCase):
""" Test the ``find_all_balanced``.
"""
def test_start_out(self):
""" The start index is out of range.
"""
from wheezy.template.utils import find_all_balanced
assert 10 == find_all_balanced('test', 10)
def test_start_separator(self):
""" If text doesn't start with ``([`` return.
"""
from wheezy.template.utils import find_all_balanced
assert 0 == find_all_balanced('test([', 0)
assert 3 == find_all_balanced('test([', 3)
def test_not_balanced(self):
""" Separators are not balanced.
"""
from wheezy.template.utils import find_all_balanced
assert 4 == find_all_balanced('test(a, b', 4)
assert 4 == find_all_balanced('test[a, b()', 4)
def test_balanced(self):
""" Separators are balanced.
"""
from wheezy.template.utils import find_all_balanced
assert 10 == find_all_balanced('test(a, b)', 4)
assert 13 == find_all_balanced('test(a, b)[0]', 4)
assert 12 == find_all_balanced('test(a, b())', 4)
assert 17 == find_all_balanced('test(a, b())[0]()', 4)
class FindBalancedTestCase(unittest.TestCase):
""" Test the ``find_balanced``.
"""
def test_start_out(self):
""" The start index is out of range.
"""
from wheezy.template.utils import find_balanced
assert 10 == find_balanced('test', 10)
def test_start_separator(self):
""" If text doesn't start with ``start_sep`` return.
"""
from wheezy.template.utils import find_balanced
assert 0 == find_balanced('test(', 0)
assert 3 == find_balanced('test(', 3)
def test_not_balanced(self):
""" Separators are not balanced.
"""
from wheezy.template.utils import find_balanced
assert 4 == find_balanced('test(a, b', 4)
assert 4 == find_balanced('test(a, b()', 4)
def test_balanced(self):
""" Separators are balanced.
"""
from wheezy.template.utils import find_balanced
assert 10 == find_balanced('test(a, b)', 4)
assert 12 == find_balanced('test(a, b())', 4)
|
StarcoderdataPython
|
91418
|
from abc import ABC, abstractmethod
from contextlib import contextmanager
from queue import Empty, Full, LifoQueue, Queue
from typing import Callable, Generic, List, Optional, TypeVar
from .errors import Invalid, UnableToCreateValidObject, Unmanaged
from .factory import Factory
T = TypeVar("T")
class Pool(Generic[T]):
"""A managed pool of `T` objects"""
def __init__(
self,
factory: Factory[T],
queue_cls: Callable[[], Queue] = LifoQueue,
maxsize: Optional[int] = None,
eager: bool = False,
):
self._factory = factory
self._queue_cls = queue_cls
self.maxsize = maxsize
self.eager = eager
self._items: List[T] = []
self._available_items: Queue[T] = self._queue_cls(maxsize=self.maxsize or 0)
if self.eager:
self.fill()
def acquire(self) -> T:
"""Acquire a new item from the pool"""
try:
item = self._available_items.get(block=False)
except Empty:
return self._create()
if item not in self._items:
raise Unmanaged(item)
try:
if not self._factory.validate(item):
raise Invalid(item)
return item
except Invalid:
self._destroy(item)
return self.acquire()
except Exception as e:
self._destroy(item)
raise e
def release(self, item: T) -> None:
"""Release an item back to the pool"""
if item not in self._items:
raise Unmanaged(item)
if self._factory.validate(item):
self._available_items.put(item)
else:
self._destroy(item)
def empty(self) -> bool:
"""Returns True if there are no items in the pool"""
return len(self._items) == 0
def full(self) -> bool:
return self.maxsize is not None and len(self._items) >= self.maxsize
def busy(self) -> bool:
self._available_items.empty()
def idle(self) -> bool:
self._available_items.full()
def drain(self) -> None:
while not self.empty():
item = self._available_items.get()
self._destroy(item)
def fill(self) -> None:
if self.maxsize is None:
return
while not self.full():
item = self._create()
self.release(item)
@contextmanager
def item(self):
item = self.acquire()
try:
yield item
finally:
self.release(item)
def _create(self) -> T:
if self.full():
raise Full()
item = self._factory.create()
self._items.append(item)
if not self._factory.validate(item):
self._destroy(item)
raise UnableToCreateValidObject()
return item
def _destroy(self, item: T) -> None:
if item not in self._items:
raise Unmanaged(item)
self._items.remove(item)
self._factory.destroy(item)
|
StarcoderdataPython
|
8198221
|
<filename>TIPE/dessiner_avion.py
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 12 10:22:39 2018
@author: <NAME>
"""
k=50
import tkinter
dico = {-1 : "red", 2 : "yellow",1 :"blue", 0: "white"}
couleur = [[0 for x in range(n)] for y in range(n)]
fenetre = tkinter.Tk()
canvas = tkinter.Canvas(fenetre, width=k*n, height=k*n, highlightthickness=0)
canvas.pack()
for x in range(n):
for y in range(n):
couleur[y][x]=canvas.create_rectangle((x*k, y*k,(x+1)*k, (y+1)*k), outline="gray", fill="red")
for x in range(n):
for y in range(n):
canvas.itemconfig(couleur[x][y], fill=dico[c[x][y]])
fenetre.mainloop()
|
StarcoderdataPython
|
1640008
|
<reponame>masaharu-kato-lab/firefly_algorithm
import copy
from itertools import chain, product
import functools
import numpy as np #type:ignore
import nd_equation
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union, Optional
Node = Tuple[int, int]
class PatternedPermutation:
def __init__(self, nodes:List[Node], pattern:Union[str, tuple]):
self.nodes = nodes
self.pattern = pattern
if not pattern: raise RuntimeError('Pattern is empty.')
def chain_patterned_permutations(_perms:Iterable[PatternedPermutation]) -> PatternedPermutation:
perms = list(_perms)
return PatternedPermutation(
[*chain.from_iterable(p.nodes for p in perms)],
tuple(p.pattern for p in perms)
)
class Builder:
def __init__(self, *,
methods_func_dof:Dict[Any, Tuple[Callable, int]], # dict(method, tuple(method lambda, degree of freedom))
clusters_nodes:List[List[Node]]
):
self.clusters_nodes = clusters_nodes
self.dof_of_methods = {name:func_dof[1] for name, func_dof in methods_func_dof.items()}
self.func_of_methods = {name:func_dof[0] for name, func_dof in methods_func_dof.items()}
self.methods = methods_func_dof.keys()
self.n_cluster = len(clusters_nodes)
def build_with_dof(self, total_number:int) -> List[PatternedPermutation]:
return self.build_with_multiple_pattern(self.calc_number_of_pattern(total_number))
def build_with_multiple_pattern(self, number_of_pattern:Dict[Any, int]) -> List[PatternedPermutation]:
return [*chain.from_iterable((self.build_with_pattern(pattern) for _ in range(number)) for pattern, number in number_of_pattern.items())]
def build_with_pattern(self, pattern:List[Any]) -> PatternedPermutation:
return chain_patterned_permutations(
(self.func_of_methods[pattern[i]])(nodes)
for i, nodes in enumerate(self.clusters_nodes)
)
# dof = degree of freedom (= number of random constructing)
def calc_number_of_pattern(self, total_number:int) -> Dict[Any, int]:
dof_count:Dict[int, int] = {}
dof_of_patterns = {}
# count each dof
for pattern in product(self.methods, repeat = self.n_cluster):
dof = sum([self.dof_of_methods[method] for method in pattern])
dof_of_patterns[pattern] = dof
if dof in dof_count:
dof_count[dof] += 1
else:
dof_count[dof] = 1
# convert list from dict
l_dof_count = [0] * (max(dof_count.keys()) + 1)
for dof, count in dof_count.items(): l_dof_count[dof] = count
# transpose right side to constant of left side on equation
l_dof_count[0] -= total_number
# solve equation
unit_val = nd_equation.solve(coefs = l_dof_count, prec = 0.00001, init = 1)
# calc number (popluation) of each patterns
number_of_patterns = {}
for pattern, dof in dof_of_patterns.items():
number_of_patterns[pattern] = round(unit_val ** dof)
# adjust total number
c_total_number = sum(number_of_patterns.values())
if c_total_number != total_number:
# print('total number adjustment ({} to {})'.format(c_total_number, total_number))
max_number_pattern = max(number_of_patterns, key=number_of_patterns.get)
number_of_patterns[max_number_pattern] += total_number - c_total_number
# check
if not (sum(number_of_patterns.values()) == total_number and all(number >= 1 for number in number_of_patterns.values())):
raise RuntimeError('Invalid number of patterns.')
return number_of_patterns
def build_greedy(nodes:List[Node], dist:Callable, dist_compare:Callable, nn_n_random:int = 0, start_node:Optional[Node] = None) -> PatternedPermutation:
ordered_nodes = []
remain_nodes = copy.copy(nodes)
last_node = start_node
for i_itr in range(len(nodes)):
if i_itr < nn_n_random:
target_id = np.random.choice(range(len(remain_nodes)))
else:
if last_node is None:
raise RuntimeError('Start node required.')
dists = [dist(last_node, node) for node in remain_nodes]
min_dist = min(dists, key=functools.cmp_to_key(dist_compare))
min_ids = [i for i, dist in enumerate(dists) if dist == min_dist]
if len(min_ids) > 1:
raise RuntimeError('Same distance values.')
target_id = min_ids[0]
# if len(min_ids) > 1: print("choice one from multiple.")
# target_id = np.random.choice(min_ids) if len(min_ids) > 1 else min_ids[0]
last_node = remain_nodes[target_id]
ordered_nodes.append(last_node)
remain_nodes.pop(target_id)
if remain_nodes: raise RuntimeError('Remain nodes is not empty.')
return PatternedPermutation(ordered_nodes, 'G')
def build_randomly(nodes:List[Node]) -> PatternedPermutation:
return PatternedPermutation([nodes[i] for i in np.random.permutation(len(nodes))], 'R')
|
StarcoderdataPython
|
159125
|
from Utils.Data.DatasetUtils import is_test_or_val_set, get_train_set_id_from_test_or_val_set
from Utils.Data.Dictionary.MappingDictionary import *
from Utils.Data.Features.Generated.GeneratedFeature import GeneratedFeaturePickle, GeneratedFeatureOnlyPickle
from Utils.Data.Features.MappedFeatures import MappedFeatureTweetLanguage
def top_popular_language(dataset_id: str, top_n: int = 5):
# if is_test_or_val_set(dataset_id):
# dataset_id = get_train_set_id_from_test_or_val_set(dataset_id)
#
# dataframe = TweetFeatureIsLanguage(dataset_id).load_or_create()
#
# popularity_list = [(dataframe[column].sum(), dataframe[column]) for column in dataframe.columns]
#
# popularity_list = sorted(popularity_list, key=lambda x: x[0], reverse=True)
#
# selected_column = [tuple[1] for tuple in popularity_list][: top_n]
#
# selected_column_id = [col.name.split("_")[2] for col in selected_column]
#
# return selected_column_id
return [0, 1, 2, 10]
class TweetFeatureIsLanguage(GeneratedFeatureOnlyPickle):
def __init__(self, dataset_id: str, selected_languages: list = []):
super().__init__("tweet_is_language_x", dataset_id)
self.pck_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/is_language/{self.feature_name}.pck.gz")
self.csv_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/is_language/{self.feature_name}.csv.gz")
self.selected_languages = selected_languages
def load_feature(self):
dataframe = super().load_feature()
top_pop_list = []
if len(self.selected_languages) > 0:
selected_columns = ["is_language_" + str(language) for language in self.selected_languages]
return dataframe[selected_columns]
else:
return dataframe
def create_feature(self):
# Load the languages
languages = MappingLanguageDictionary().load_or_create().values()
# Load the language column
language_feature = MappedFeatureTweetLanguage(self.dataset_id)
language_df = language_feature.load_or_create()
# Creating the dataframe
dataframe = pd.DataFrame()
# Populating the dataframe
for language in languages:
dataframe[f"is_language_{language}"] = (language_df[language_feature.feature_name] == language)
self.save_feature(dataframe)
|
StarcoderdataPython
|
3550297
|
""" Effects Modules"""
__all__ = [
'brighbreathing',
'generic',
'rainbow',
'runner',
'static'
]
|
StarcoderdataPython
|
8116787
|
<reponame>zjZSTU/LightWeightCNN<filename>py/lib/test/test_fire.py
# -*- coding: utf-8 -*-
"""
@date: 2020/4/26 下午3:49
@file: test_fire.py
@author: zj
@description:
"""
import torch
from models.squeezenet.fire import Fire
def test():
x = torch.randn((1, 3, 28, 28))
model = Fire(3, 10, 5, 5)
outputs = model(x)
assert len(outputs.shape) == 4
|
StarcoderdataPython
|
3577058
|
<gh_stars>1-10
# Copyright 2011 Gilt Groupe, INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
mothership.serverinfo
Collects and displays information about servers
"""
# import some modules
import mothership
import sys
import mothership.kv
from mothership.mothership_models import *
from sqlalchemy import or_, desc, MetaData
# list_servers takes a parameter to search by and, optionally a tag
# then prints all servers it finds in the db
def list_servers(cfg, listby=None, lookfor=None):
"""
[description]
prints a list of all servers found for a given parameter
[parameter info]
required:
cfg: the config object. useful everywhere
listby: what parameter to list by (vlan, site_id, etc.)
lookfor: filter to apply to the parameter
(ie. to look in vlan 105, listby=vlan lookfor=105)
[return value]
no explicit return
"""
# list servers by vlan
if listby == 'vlan':
if lookfor == None:
print "you must supply a name with the -n flag"
sys.exit(1)
else:
for serv, net in cfg.dbsess.query(Server, Network).\
filter(Network.ip!=None).\
filter(Network.vlan==lookfor).\
filter(Server.id==Network.server_id).\
order_by(Server.hostname):
print "%s.%s.%s" % (serv.hostname, serv.realm, serv.site_id)
# list servers by site_id
elif listby == 'site_id':
if lookfor == None:
print "you must supply a name with the -n flag"
sys.exit(1)
else:
for serv in cfg.dbsess.query(Server).\
filter(Server.site_id==lookfor).\
order_by(Server.hostname):
print "%s.%s.%s" % (serv.hostname, serv.realm, serv.site_id)
# list servers by tag
elif listby == 'tag':
if lookfor == None:
print "you must supply a tag with the -r flag"
sys.exit(1)
else:
servers_primary = []
for server in cfg.dbsess.query(Server).\
filter(Server.tag==lookfor).\
order_by(Server.hostname):
servers_primary.append(server)
servers_kv = []
kvs = mothership.kv.collect(cfg, None, key='tag')
for i in kvs:
namespace,key = str(i).split(' ')
if key == "tag="+lookfor:
servers_kv.append(i.hostname+"."+i.realm+"."+i.site_id)
else:
pass
if servers_primary:
for serv in servers_primary:
print "%s.%s.%s" % (serv.hostname, serv.realm, serv.site_id)
elif servers_kv:
for serv in servers_kv:
print serv
else:
pass
# list servers by realm
elif listby == 'realm':
if lookfor == None:
print "you must supply a realm with the -R flag"
sys.exit(1)
else:
for serv in cfg.dbsess.query(Server).\
filter(Server.realm==lookfor).\
order_by(Server.hostname):
print "%s.%s.%s" % (serv.hostname, serv.realm, serv.site_id)
# list servers by manufacturer
elif listby == 'manufacturer':
if lookfor == None:
print "you must supply a manufacturer with the -m flag"
sys.exit(1)
else:
search_string = '%' + lookfor + '%'
for serv, hw in cfg.dbsess.query(Server, Hardware).\
filter(Hardware.manufacturer.like(search_string)).\
filter(Server.hw_tag==Hardware.hw_tag).\
order_by(Server.hostname):
print "%s.%s.%s" % (serv.hostname, serv.realm, serv.site_id)
# list servers by model name
elif listby == 'model':
if lookfor == None:
print "you must supply a model with the -M flag"
sys.exit(1)
else:
search_string = '%' + lookfor + '%'
for serv, hw in cfg.dbsess.query(Server, Hardware).\
filter(Hardware.model.like(search_string)).\
filter(Server.hw_tag==Hardware.hw_tag).\
order_by(Server.hostname):
print "%s.%s.%s" % (serv.hostname, serv.realm, serv.site_id)
# list servers by cores
elif listby == 'cores':
if lookfor.isdigit():
print "you must supply a number with the -C flag"
sys.exit(1)
else:
for serv in cfg.dbsess.query(Server).\
filter(Server.cores==lookfor).\
order_by(Server.hostname):
print "%s.%s.%s" % (serv.hostname, serv.realm, serv.site_id)
# list servers by ram
elif listby == 'ram':
if lookfor.isdigit():
print "you must supply a number with the -a flag"
sys.exit(1)
else:
for serv in cfg.dbsess.query(Server).\
filter(Server.ram==lookfor).\
order_by(Server.hostname):
print "%s.%s.%s" % (serv.hostname, serv.realm, serv.site_id)
# list servers by disk
elif listby == 'disk':
if lookfor.isdigit():
print "you must supply a number with the -d flag"
sys.exit(1)
else:
for serv in cfg.dbsess.query(Server).\
filter(Server.disk==lookfor).\
order_by(Server.hostname):
print "%s.%s.%s" % (serv.hostname, serv.realm, serv.site_id)
# list servers by hw_tag
elif listby == 'hw_tag':
if lookfor == None:
print "you must supply a hardware tag with the -H flag"
sys.exit(1)
else:
for serv in cfg.dbsess.query(Server).\
filter(Server.hw_tag==lookfor):
print "%s.%s.%s" % (serv.hostname, serv.realm, serv.site_id)
# list servers by virtual
elif listby == 'virtual':
for serv in cfg.dbsess.query(Server).\
filter(Server.virtual==True).\
order_by(Server.hostname):
print "%s.%s.%s" % (serv.hostname, serv.realm, serv.site_id)
# list servers by physical
elif listby == 'physical':
for serv in cfg.dbsess.query(Server).\
filter(Server.virtual==False).\
order_by(Server.hostname):
print "%s.%s.%s" % (serv.hostname, serv.realm, serv.site_id)
# list servers by name
elif listby == 'name':
if lookfor == None:
print "you must supply a name with the -n flag"
sys.exit(1)
else:
search_string = '%' + lookfor + '%'
for serv in cfg.dbsess.query(Server).\
filter(Server.hostname.like(search_string)).\
order_by(Server.hostname):
print "%s.%s.%s" % (serv.hostname, serv.realm, serv.site_id)
# list all servers by default
else:
for serv in cfg.dbsess.query(Server).order_by(Server.hostname):
print "%s.%s.%s" % (serv.hostname, serv.realm, serv.site_id)
|
StarcoderdataPython
|
11310265
|
[
{
'date': '2012-01-01',
'description': 'Nytårsdag',
'locale': 'da-DK',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2012-04-05',
'description': 'Skærtorsdag',
'locale': 'da-DK',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2012-04-06',
'description': 'Langfredag',
'locale': 'da-DK',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2012-04-08',
'description': 'Påskedag',
'locale': 'da-DK',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2012-04-09',
'description': 'Anden påskedag',
'locale': 'da-DK',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2012-05-04',
'description': 'Store bededag',
'locale': 'da-DK',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2012-05-17',
'description': 'Kristi himmelfartsdag',
'locale': 'da-DK',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2012-05-27',
'description': 'Pinsedag',
'locale': 'da-DK',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2012-05-28',
'description': 'Anden pinsedag',
'locale': 'da-DK',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2012-06-05',
'description': 'Grundlovsdag',
'locale': 'da-DK',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2012-12-25',
'description': 'Juledag',
'locale': 'da-DK',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2012-12-26',
'description': 'Anden juledag',
'locale': 'da-DK',
'notes': '',
'region': '',
'type': 'NRF'
}
]
|
StarcoderdataPython
|
1751979
|
import os
from parlai.utils.misc import warn_once
from parlai.utils.io import PathManager
from pytorch_pretrained_bert import BertTokenizer
from parlai.agents.bert_ranker.bert_dictionary import BertDictionaryAgent
from .helpers import VOCAB_PATH, MODEL_FOLDER, download
class IndoBertDictionaryAgent(BertDictionaryAgent):
def __init__(self, opt):
super().__init__(opt)
# initialize from vocab path
warn_once(
"WARNING: BERT uses a Hugging Face tokenizer; ParlAI dictionary args are ignored"
)
download(opt["datapath"])
vocab_path = PathManager.get_local_path(
os.path.join(opt["datapath"], "models", MODEL_FOLDER, VOCAB_PATH)
)
self.tokenizer = BertTokenizer.from_pretrained(vocab_path)
self.start_token = "[CLS]"
self.end_token = "[SEP]"
self.null_token = "[PAD]"
self.start_idx = self.tokenizer.convert_tokens_to_ids(["[CLS]"])[
0
] # should be 101
self.end_idx = self.tokenizer.convert_tokens_to_ids(["[SEP]"])[
0
] # should be 102
self.pad_idx = self.tokenizer.convert_tokens_to_ids(["[PAD]"])[0] # should be 0
# set tok2ind for special tokens
self.tok2ind[self.start_token] = self.start_idx
self.tok2ind[self.end_token] = self.end_idx
self.tok2ind[self.null_token] = self.pad_idx
# set ind2tok for special tokens
self.ind2tok[self.start_idx] = self.start_token
self.ind2tok[self.end_idx] = self.end_token
self.ind2tok[self.pad_idx] = self.null_token
|
StarcoderdataPython
|
6683291
|
'''annotate_rnp - add information to the output from RNPxl
=======================================================
:Author: <NAME>
:Release: $Id$
:Date: |today|
:Tags: Python RNP Proteomics
Purpose
-------
This script takes the xlsx output from RNPxl and annotates the table with useful information for downstream analyses.
The following columns are added:
- master_protein(s): The master protein(s) for the peptide. See below
for how this is derived
- master_uniprot_id(s): The uniprot id(s) for the master protein(s)
- protein_description(s): Description(s) for the master protein(s)
- protein_length(s): The length(s) of the master protein(s)
- position_in_peptide: The most-likely position of the cross-link in the peptide
- position_in_protein(s): The most-likely position of the cross-link in the protein
- window_13-17: 13, 15 & 17 amino acid windows around the cross-link
position. See below for further notes on these windows
- crap_protein: Is the protein in the cRAP database of common
proteomics proteins, e.g keratin
If a log file is requested (--log), basic statistics are collected and
written to the log file
Fasta description format
------------------------
The source of the protein (SwissProt or TrEMBL) is derived from the
protein fasta description, with SwissProt proteins starting 'sp' and
TrEMBL 'tr'. Furthermore, the description column is derived from the
fasta description too. For this reason the fasta databases must be
correctly formatted as in the examples below. This is the standard
format for fasta files from uniprot.
format:
Three-level identifier followed by protein description:
>[sp|tr]|[Uniprot id]|[Protein name] [Description]
examples:
>sp|P27361|MK03_HUMAN Mitogen-activated protein kinase 3 OS=Homo sapiens GN=MAPK3 PE=1 SV=4
>tr|F8W1T5|F8W1T5_HUMAN GTPase RhebL1 (Fragment) OS=Homo sapiens GN=RHEBL1 PE=4 SV=1
Deriving the master proteins
----------------------------
Matching peptides to their source proteins (protein inference) is a
common task in proteomics and there are many possible
approaches. Ultimately, the aim is usually to identify the most likely
source protein since taking all possible sources makes downstream
analyses very complex. Here we use the parsimonious approach to
identify a minimal set of proteins which explains all peptides
observed. In essense, the approach is as follows:
- start with list of all peptides
- sort proteins by the number of peptides observed
- take the protein(s) with the most peptides and remove these from the peptides list
- continue through the sorted proteins, removing peptides, until the
peptides list is empty
Additionally, we prioritise matches to SwissProt proteins over TrEMBL
proteins. SwissProt proteins have been manually curated and should not
contain any redundant proteins, truncated sequences etc. On the other
hand, the set of TrEMBL proteins will ceratainly contain proteins
which are redundant with respect to the SwissProt proteins as well as
truncated proteins. It is useful to include the TrEMBL proteins to
catch peptides which are from a protein or isoform which has not been
curated into SwissProt yet. However, where a SwissProt match is found,
any TrEMBL match can safely be ignored. Here, for all peptides with
matched to both SwissProt and TrEMBL proteins, we remove all the
TrEMBL matches.
In some instances, it is not possible to assign a single protein to a
peptide. In these cases, the proteins names, uniprot ids, descriptions
and lengths are ';' separated in the outfile.
13, 15 & 17 amino acid windows
------------------------------
For motif analysis, 13, 15 & 17 amino acid windows around the most
likely cross-linked protein are provided. Where the cross-link
posistion is too close to the start or end of the protein for the
window, e.g cross link is at position 6 - not possible to extract a
window from -1 -> 12, the value "protein_too_short_for_window" is
given. Where there is more than one master protein, the windows is
provided only where the amino acid sequence is the same for all master
proteins. Where the sequences diverge, the value
"different_sequences_for_the_proteins" is given. Care must be taken
with any motif analysis since the cross-link site is hard to identify
so the windows may not be well centered. Furthermore, since MS is bias
towards particular peptides, care must be taken to provide a suitable
background set of windows. For example, random windows from the fasta
could simply yield motifs which are enriched in MS analyses
Usage
-----
By default, the outfile will be created in the same directory with the
suffix annotated.xlsx. You can change the outfile name by specifying
the option --outfile
python annotate_rnp.py --infile=RNP.xlsx --fasta-db=Human_201701.fasta
--fasta-crap-db=RAP_FullIdentifiers.fasta --outfile=RNP_annotated.xlsx
--logfile=RNP_annotation.log
Command line options
--------------------
'''
import argparse
import collections
import copy
import os
import re
import sys
import pandas as pd
import proteomics.fasta as fasta
def getMotifWindow(positions, proteins, length):
''' Extract amino acid sequences of (length) n from (proteins),
centered at (positions)'''
assert length % 2 == 1, 'motif window must be an odd length'
assert len(positions) == len(proteins), "must have as many positions as proteins"
windows = set()
for position, protein_seq in zip(positions, proteins):
buffer = ((length - 1) / 2)
windows.add(protein_seq[int(position-buffer): int(position+buffer) + 1])
if min([len(x) for x in windows]) != length:
return "protein_too_short_for_window"
if len(windows) == 1:
return list(windows)[0]
else:
return "different_sequences_for_the_proteins"
def writeSectionHeader(logfile, section_header):
#underliner = "".join(("-",)*len(section_header))
section_blocker = ("======================================="
"=======================================")
underliner1 = ("----------------------------------------"
"----------------------------------------")
logfile.write("\n%s\n%s\n" % (section_blocker, section_header))
logfile.write("%s\n" % underliner1)
return section_blocker
def main(argv=sys.argv):
parser = argparse.ArgumentParser(
argv, usage=__doc__)
optional = parser.add_argument_group('optional arguments')
required = parser.add_argument_group('required arguments')
required.add_argument('-i', '--infile', dest="infile", required=True,
help="")
required.add_argument('-f', '--fasta-db', dest="fasta_db", required=True,
help="")
required.add_argument('-fc', '--fasta-crap-db', dest="fasta_crap_db",
required=True, help="")
optional.add_argument('-o', '--outfile', dest="outfile", default=None,
help="")
optional.add_argument('-l', '--logfile', dest="logfile", default=os.devnull,
help="")
args = vars(parser.parse_args())
if args['outfile'] is None:
args['outfile'] = args['infile'].replace(".xlsx", "_annotated.tsv")
logfile = open(args['logfile'], 'w')
logfile.write("Logfile for annotate_rnp.py\n\n")
section_blocker = writeSectionHeader(logfile, "Script arguments:")
for key, value in args.items():
logfile.write("%s: %s\n" % (key, value))
logfile.write("%s\n\n" % section_blocker)
# read the data into a dataframe
rnp_df = pd.read_excel(args['infile'])
# add some basic annotations
rnp_df['tr_only'] = [x.count("sp|") == 0 for x in rnp_df['Proteins']]
rnp_df['matches'] = [len(x.split(",")) for x in rnp_df['Proteins']]
#(1) Get the mappings between peptide and proteins
pep2pro = collections.defaultdict(lambda: collections.defaultdict(set))
pep2allpro = collections.defaultdict(set)
pro2pep = collections.defaultdict(set)
top_level_proteins = set()
initial_proteins = set()
# (1.1) extract the initial mappings between proteins and peptides
for row_ix, row_values in rnp_df[['Proteins', 'Peptide']].iterrows():
proteins = row_values['Proteins'].split(",")
peptide = row_values['Peptide']
if peptide in pep2pro:
assert pep2allpro[peptide] == proteins, (
"The same peptide is observed more than once with different proteins!")
pep2allpro[peptide] = proteins
for protein in proteins:
initial_proteins.add(protein)
pro2pep[protein].add(peptide)
if protein.split("|")[0] == "sp":
protein_level = 1
top_level_proteins.add(protein)
elif protein.split("|")[0] == "tr":
protein_level = 2
else:
raise ValueError("Protein does not appear to be either"
"SwissProt(sp) or TrEMBL(tr)")
pep2pro[peptide][protein_level].add(protein)
section_blocker = writeSectionHeader(logfile, "Initial file stats")
logfile.write("# initial peptides: %i\n" % len(pep2pro))
logfile.write("# initial proteins: %i\n" % len(pro2pep))
logfile.write("# initial SwissProt proteins: %i\n" % len(top_level_proteins))
logfile.write("# initial TrEMBL proteins: %i\n" % (
len(pro2pep)-len(top_level_proteins)))
logfile.write("%s\n\n" % section_blocker)
# (1.2) find the peptides with only TrEMBL protein matches and
# 'upgrade' these TrEMBL proteins to being equivalent to SwissProt
tr_only_peptides = set([x for x in pep2pro.keys() if len(pep2pro[x][1])==0])
set_upgraded = set()
for peptide in tr_only_peptides:
upgraded = pep2pro[peptide][2]
set_upgraded.update(upgraded)
top_level_proteins.update(upgraded)
pep2pro[peptide][2] = pep2pro[peptide][2].difference(set(upgraded))
pep2pro[peptide][1] = pep2pro[peptide][1].union(set(upgraded))
section_blocker = writeSectionHeader(
logfile, "Deciding which TrEMBL proteins to retain:")
logfile.write("# peptides with only TrEMBL matches: %i\n" % (
len(tr_only_peptides)))
logfile.write("# TrEMBL proteins retained as no SwissProt matches for "
"peptide: %i\n" % (len(set_upgraded)))
logfile.write("%s\n\n" % section_blocker)
# (1.3) Use a parsimonious approach to identifty the minimum number
# of proteins required to cover all the peptides:
# Start from the protein(s) with the most peptides and mark these as covered.
# Continue with remaining proteins in order of peptides per protein
# until all peptides are covered
retained_proteins = []
peptides = copy.deepcopy(set(pep2pro.keys()))
peptide_counts = {}
tmppro2pep = copy.deepcopy(pro2pep)
new_top_level_proteins = copy.deepcopy(top_level_proteins)
new_pep2pro = collections.defaultdict(set)
peptide_count = max(map(len, tmppro2pep.values()))
section_blocker = writeSectionHeader(
logfile, ("Parsimonious method to identify minimal set of proteins"
" to account for all peptides"))
while True:
# (1.3.1) If all peptides covered or the maximum peptides per
# protein = 0, break.
if len(peptides) == 0 or peptide_count == 0:
logfile.write("All peptides are now accounted for\n")
break
peptide_count -= 1
top_proteins = set()
top_score = 0
#(1.3.2) Find the proteins with the highest number of peptide matches
for protein in new_top_level_proteins:
if len(tmppro2pep[protein]) == top_score:
top_proteins.add(protein)
elif len(tmppro2pep[protein]) > top_score:
top_score = len(tmppro2pep[protein])
top_proteins = set((protein,))
logfile.write("%i remaining protein(s) with %i peptides\n" % (
len(top_proteins), top_score))
# (1.3.3) Remove the top proteins and the associated peptides
for top_protein in top_proteins:
new_top_level_proteins.remove(top_protein)
retained_proteins.append(top_protein)
for peptide in pro2pep[top_protein]:
new_pep2pro[peptide].add(top_protein)
if peptide in peptides:
peptides.remove(peptide)
for protein in pep2pro[peptide][1]:
if protein == top_protein:
continue
if peptide in tmppro2pep[protein]:
tmppro2pep[protein].remove(peptide)
logfile.write("\n%i proteins retained\n" % len(retained_proteins))
#logfile.write("\n".join([",".join(map(str, (x, len(tmppro2pep[x]), len(pro2pep[x]))))
# for x in new_top_level_proteins]))
logfile.write("%i SwissProt proteins retained\n" % len(
[x for x in retained_proteins if x.split("|")[0] == 'sp']))
logfile.write("%i TrEMBL proteins retained\n" % len(
[x for x in retained_proteins if x.split("|")[0] == 'tr']))
logfile.write("\nNote: If not all SwissProt proteins were retained, this means\n"
"these proteins only included peptides which were observed\n"
"in other proteins which had a greater number of peptides\n")
logfile.write("%s\n\n" % section_blocker)
section_blocker = writeSectionHeader(logfile, "proteins per peptide:")
counts = collections.Counter([len(x) for x in new_pep2pro.values()])
sum_counts = sum(counts.values())
for k, v in counts.items():
logfile.write("%i peptide(s) (%.2f %%) have %i master protein(s)\n" % (
v, (100 * v)/sum_counts, k))
logfile.write("%s\n\n" % section_blocker)
# Check all the peptides are covered
assert set(pep2pro.keys()).difference(set(new_pep2pro.keys())) == set()
# add the top protein and uniprot id annotations
rnp_df['master_protein(s)'] = [";".join(new_pep2pro[protein]) for protein in rnp_df['Peptide']]
rnp_df['master_uniprot_id(s)'] = [";".join([protein_id.split("|")[1] for protein_id in new_pep2pro[protein]])
for protein in rnp_df['Peptide']]
# (1.4) Build dictionaries to map from protein id to protein
# sequence and description using the fasta database
crap_proteins = set()
protein2description = {
entry.title.split(" ")[0]: " ".join(entry.title.split(" ")[1:])
for entry in fasta.FastaIterator(open(args['fasta_db']))}
protein2seq = {
entry.title.split(" ")[0]:entry.sequence
for entry in fasta.FastaIterator(open(args['fasta_db']))}
for entry in fasta.FastaIterator(open(args['fasta_crap_db'])):
protein2seq[entry.title.split(" ")[0]] = entry.sequence
protein2description[entry.title.split(" ")[0]] = entry.title.split(" ")[0]
crap_proteins.add(entry.title.split(" ")[0])
# (1.5) derive further annotations
protein_lengths = []
protein_descriptions = []
crap_protein = []
position_in_peptide = []
position_in_protein = []
motif_13 = []
motif_15 = []
motif_17 = []
for ix, row in rnp_df.iterrows():
peptide = row['Best localization(s)']
proteins = row['master_protein(s)'].split(";")
protein_lengths.append(";".join(map(str, [len(protein2seq[x]) for x in proteins])))
protein_descriptions.append(";".join([protein2description[x] for x in proteins]))
# (1.5.1) does peptide match a cRAP protein?
crap = 0
for protein in proteins:
if protein in crap_proteins:
crap = 1
break
crap_protein.append(crap)
# (1.5.1) Find crosslink position in protein and extract 13,
# 15 & 17 aa windows around the crosslink position
if row['Best localization(s)']!='nan' and row['Best loc score']>0:
peptide_positions = [re.search(peptide.upper(), protein2seq[x]).start() for
x in proteins]
crosslink_position = None
for ix, aa in enumerate(peptide):
if aa == aa.lower():
crosslink_position = ix
assert crosslink_position is not None, (
"no crosslinked position was found(!): %s" % peptide)
position_in_peptide.append(crosslink_position + 1)
protein_positions = [crosslink_position + x for x in peptide_positions]
position_in_protein.append(
";".join(map(str, [x + 1 for x in protein_positions])))
motif_13.append(
getMotifWindow(protein_positions, [protein2seq[x] for x in proteins], 13))
motif_15.append(
getMotifWindow(protein_positions, [protein2seq[x] for x in proteins], 15))
motif_17.append(
getMotifWindow(protein_positions, [protein2seq[x] for x in proteins], 17))
else:
position_in_peptide.append("no_crosslink")
position_in_protein.append("no_crosslink")
motif_13.append("no_crosslink")
motif_15.append("no_crosslink")
motif_17.append("no_crosslink")
rnp_df['protein_length(s)'] = protein_lengths
rnp_df['protein_description(s)'] = protein_descriptions
rnp_df['crap_protein'] = crap_protein
rnp_df['position_in_peptide'] = position_in_peptide
rnp_df['position_in_protein(s)'] = position_in_protein
rnp_df['window_13'] = motif_13
rnp_df['window_15'] = motif_15
rnp_df['window_17'] = motif_17
new_column_order = [
"Best localization(s)",
"RNA",
"master_protein(s)",
"master_uniprot_id(s)",
'protein_description(s)',
'protein_length(s)',
'position_in_peptide',
'position_in_protein(s)',
'window_13', 'window_15', 'window_17',
'crap_protein',
"Peptide",
"Proteins"]
new_column_order.extend([x for x in rnp_df.columns if x not in new_column_order])
final_rnp_df = rnp_df[new_column_order]
final_rnp_df.to_csv(args['outfile'], index=False, sep="\t")
os.chmod(args['outfile'], 0o666)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
StarcoderdataPython
|
5000938
|
import tensorflow as tf
import time
from easytransfer.utils.hooks import avgloss_logger_hook
import whale as wh
import os
class WhaleEstimator(object):
def __init__(self, model_fn, model_dir, num_model_replica,
num_accumulated_batches, keep_checkpoint_max, save_checkpoints_steps,
task_index=0):
self._build_model_fn = model_fn
self.model_dir = model_dir
self.num_model_replica = num_model_replica
self.num_accumulated_batches = num_accumulated_batches
self.keep_checkpoint_max = keep_checkpoint_max
self.save_checkpoints_steps = save_checkpoints_steps
assert self.save_checkpoints_steps is not None, "save_checkpoints_steps is not None"
self.task_index = task_index
def get_session(self, sess):
session = sess
while type(session).__name__ != 'Session':
# pylint: disable=W0212
session = session._sess
return session
def train(self, input_fn, max_steps):
# row = num_gpus / num_stages
#cluster = wh.cluster(layout={"row": self.num_model_replica})
cluster = wh.cluster()
tf.logging.info('cluster {}'.format(cluster))
with cluster:
with wh.replica():
dataset = input_fn()
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
results = iterator.get_next()
wh.current_scope_as_default()
total_loss, train_op = self._build_model_fn(results, None, "train", None)
summary_writer = tf.summary.FileWriter(os.path.join(self.model_dir, "train_suammary_output"))
saver = tf.train.Saver(max_to_keep=self.keep_checkpoint_max, var_list=tf.trainable_variables())
session_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
intra_op_parallelism_threads=1024,
inter_op_parallelism_threads=1024,
gpu_options=tf.GPUOptions(allow_growth=True,
force_gpu_compatible=True,
per_process_gpu_memory_fraction=1.0))
avgloss_hook = avgloss_logger_hook(max_steps,
total_loss,
self.model_dir,
100)
hooks = [tf.train.StopAtStepHook(last_step=max_steps), avgloss_hook]
with tf.train.MonitoredTrainingSession(
hooks=hooks,
config=session_config) as sess:
starttime = time.time()
while not sess.should_stop():
train_loss, _, step = sess.run([total_loss, train_op, tf.train.get_or_create_global_step()])
if step % 100 == 0:
endtime = time.time()
tf.logging.info("loss = {}, step = {} ({} sec)".format(train_loss, step, endtime - starttime))
starttime = time.time()
if step % 100 == 0 and self.task_index == 0:
train_loss_summary = tf.Summary()
train_loss_summary.value.add(tag='train_loss', simple_value=train_loss)
summary_writer.add_summary(train_loss_summary, global_step=step)
summary_writer.flush()
if step % self.save_checkpoints_steps == 0:
saver.save(self.get_session(sess), os.path.join(self.model_dir,'model.ckpt'), global_step=step)
summary_writer.close()
def evaluate(self):
raise NotImplementedError
def predict(self):
raise NotImplementedError
def export_savedmodel(self):
raise NotImplementedError
|
StarcoderdataPython
|
4983563
|
<reponame>WafflePersonThing/rust-bindings
import asyncio
import atexit
import logging
import os
import os.path as op
import sys
import time
import __main__ as main
from IPython import get_ipython
from IPython.terminal.pt_inputhooks import register
try:
from .pydatoviz import App, colormap, demo
except ImportError:
raise ImportError(
"Unable to load the shared library, make sure to run in your terminal:\n"
"`export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/build`")
exit(1)
# Logging
# -------------------------------------------------------------------------------------------------
# Set a null handler on the root logger
logger = logging.getLogger('datoviz')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.NullHandler())
_logger_fmt = '%(asctime)s.%(msecs)03d %(levelname)s %(caller)s %(message)s'
_logger_date_fmt = '%H:%M:%S'
class _Formatter(logging.Formatter):
def format(self, record):
# Only keep the first character in the level name.
record.levelname = record.levelname[0]
filename = op.splitext(op.basename(record.pathname))[0]
record.caller = '{:>18s}:{:04d}:'.format(filename, record.lineno).ljust(22)
message = super(_Formatter, self).format(record)
color_code = {'D': '90', 'I': '0', 'W': '33',
'E': '31'}.get(record.levelname, '7')
message = '\33[%sm%s\33[0m' % (color_code, message)
return message
def add_default_handler(level='INFO', logger=logger):
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = _Formatter(fmt=_logger_fmt, datefmt=_logger_date_fmt)
handler.setFormatter(formatter)
logger.addHandler(handler)
# DEBUG
add_default_handler('DEBUG')
# Globals
# -------------------------------------------------------------------------------------------------
_APP = None
_EXITING = False
_EVENT_LOOP_INTEGRATION = False
_ASYNCIO_LOOP = None
# Main functions
# -------------------------------------------------------------------------------------------------
def app(*args, **kwargs):
global _APP
if _APP is None:
_APP = App(*args, **kwargs)
assert _APP
return _APP
def canvas(*args, **kwargs):
return app().gpu().canvas(*args, **kwargs)
@atexit.register
def destroy():
global _APP, _EXITING
_EXITING = True
if _APP:
_APP.destroy()
_APP = None
# IPython event loop integration
# -------------------------------------------------------------------------------------------------
def inputhook(context):
global _APP, _EXITING, _EVENT_LOOP_INTEGRATION
if _EXITING:
return
if _APP is None:
logger.debug("automatically creating a Datoviz app")
_APP = app()
assert _APP
_EVENT_LOOP_INTEGRATION = True
while not context.input_is_ready():
_APP.next_frame()
# HACK: prevent the app.is_running flag to be reset to False at the end of next_frame()
_APP._set_running(True)
time.sleep(0.005)
def enable_ipython():
ipython = get_ipython()
if ipython:
logger.info("Enabling Datoviz IPython event loop integration")
app()._set_running(True)
ipython.magic('%gui datoviz')
def in_ipython():
try:
return __IPYTHON__
except NameError:
return False
def is_interactive():
if not in_ipython():
return hasattr(sys, 'ps1')
else:
if '-i' in sys.argv:
return True
# return sys.__stdin__.isatty()
# return hasattr(sys, 'ps1')
return not hasattr(main, '__file__')
# print(f"In IPython: {in_ipython()}, is interactive: {is_interactive()}")
register('datoviz', inputhook)
# Event loops
# -------------------------------------------------------------------------------------------------
def run_asyncio(n_frames=0, **kwargs):
# TODO: support kwargs options (autorun)
global _ASYNCIO_LOOP
if _ASYNCIO_LOOP is None:
_ASYNCIO_LOOP = asyncio.get_event_loop()
async def _event_loop():
logger.debug("start datoviz asyncio event loop")
i = 0
while app().next_frame() and (n_frames == 0 or i < n_frames):
await asyncio.sleep(0.005)
i += 1
task = _ASYNCIO_LOOP.create_task(_event_loop())
try:
_ASYNCIO_LOOP.run_until_complete(task)
except asyncio.CancelledError:
pass
def do_async(task):
global _ASYNCIO_LOOP
if _ASYNCIO_LOOP is None:
_ASYNCIO_LOOP = asyncio.get_event_loop()
_ASYNCIO_LOOP.create_task(task)
def run_native(n_frames=0, **kwargs):
logger.debug("start datoviz native event loop")
app().run(n_frames, **kwargs)
def run(n_frames=0, event_loop=None, **kwargs):
event_loop = event_loop or 'native'
if event_loop == 'ipython' or is_interactive():
enable_ipython()
elif event_loop == 'native':
run_native(n_frames, **kwargs)
elif event_loop == 'asyncio':
run_asyncio(n_frames)
|
StarcoderdataPython
|
12814414
|
# coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
import json
from typing import Text
from unittest import TestCase
import requests
from cornode import BadApiResponse, InvalidUri, TryteString
from cornode.adapter import HttpAdapter, MockAdapter, resolve_adapter
from mock import Mock, patch
from six import BytesIO, text_type
class ResolveAdapterTestCase(TestCase):
"""
Unit tests for :py:func:`resolve_adapter`.
"""
def test_adapter_instance(self):
"""
Resolving an adapter instance.
"""
adapter = MockAdapter()
self.assertIs(resolve_adapter(adapter), adapter)
def test_http(self):
"""
Resolving a valid ``http://`` URI.
"""
adapter = resolve_adapter('http://localhost:14265/')
self.assertIsInstance(adapter, HttpAdapter)
def test_https(self):
"""
Resolving a valid ``https://`` URI.
"""
adapter = resolve_adapter('https://localhost:14265/')
self.assertIsInstance(adapter, HttpAdapter)
def test_missing_protocol(self):
"""
The URI does not include a protocol.
"""
with self.assertRaises(InvalidUri):
resolve_adapter('localhost:14265')
def test_unknown_protocol(self):
"""
The URI references a protocol that has no associated adapter.
"""
with self.assertRaises(InvalidUri):
resolve_adapter('foobar://localhost:14265')
def create_http_response(content, status=200):
# type: (Text, int) -> requests.Response
"""
Creates an HTTP Response object for a test.
References:
- :py:meth:`requests.adapters.HTTPAdapter.build_response`
"""
response = requests.Response()
response.encoding = 'utf-8'
response.status_code = status
response.raw = BytesIO(content.encode('utf-8'))
return response
class HttpAdapterTestCase(TestCase):
def test_http(self):
"""
Configuring HttpAdapter using a valid ``http://`` URI.
"""
uri = 'http://localhost:14265/'
adapter = HttpAdapter(uri)
self.assertEqual(adapter.node_url, uri)
def test_https(self):
"""
Configuring HttpAdapter using a valid ``https://`` URI.
"""
uri = 'https://localhost:14265/'
adapter = HttpAdapter(uri)
self.assertEqual(adapter.node_url, uri)
def test_ipv4_address(self):
"""
Configuring an HttpAdapter using an IPv4 address.
"""
uri = 'http://127.0.0.1:8080/'
adapter = HttpAdapter(uri)
self.assertEqual(adapter.node_url, uri)
def test_configure_error_missing_protocol(self):
"""
Forgetting to add the protocol to the URI.
"""
with self.assertRaises(InvalidUri):
HttpAdapter.configure('localhost:14265')
def test_configure_error_invalid_protocol(self):
"""
Attempting to configure HttpAdapter with unsupported protocol.
"""
with self.assertRaises(InvalidUri):
HttpAdapter.configure('ftp://localhost:14265/')
def test_configure_error_empty_host(self):
"""
Attempting to configure HttpAdapter with empty host.
"""
with self.assertRaises(InvalidUri):
HttpAdapter.configure('http://:14265')
def test_configure_error_non_numeric_port(self):
"""
Attempting to configure HttpAdapter with non-numeric port.
"""
with self.assertRaises(InvalidUri):
HttpAdapter.configure('http://localhost:cornode/')
def test_configure_error_udp(self):
"""
UDP is not a valid protocol for ``HttpAdapter``.
"""
with self.assertRaises(InvalidUri):
HttpAdapter.configure('udp://localhost:14265')
def test_success_response(self):
"""
Simulates sending a command to the node and getting a success
response.
"""
adapter = HttpAdapter('http://localhost:14265')
expected_result = {
'message': 'Hello, cornode!',
}
mocked_response = create_http_response(json.dumps(expected_result))
mocked_sender = Mock(return_value=mocked_response)
# noinspection PyUnresolvedReferences
with patch.object(adapter, '_send_http_request', mocked_sender):
result = adapter.send_request({'command': 'helloWorld'})
self.assertEqual(result, expected_result)
def test_error_response(self):
"""
Simulates sending a command to the node and getting an error
response.
"""
adapter = HttpAdapter('http://localhost:14265')
error_message = 'Command \u0027helloWorld\u0027 is unknown'
mocked_response = create_http_response(
status = 400,
content = json.dumps({
'error': error_message,
'duration': 42,
}),
)
mocked_sender = Mock(return_value=mocked_response)
# noinspection PyUnresolvedReferences
with patch.object(adapter, '_send_http_request', mocked_sender):
with self.assertRaises(BadApiResponse) as context:
adapter.send_request({'command': 'helloWorld'})
self.assertEqual(
text_type(context.exception),
'400 response from node: {error}'.format(error=error_message),
)
def test_exception_response(self):
"""
Simulates sending a command to the node and getting an exception
response.
"""
adapter = HttpAdapter('http://localhost:14265')
error_message = 'java.lang.ArrayIndexOutOfBoundsException: 4'
mocked_response = create_http_response(
status = 500,
content = json.dumps({
'exception': error_message,
'duration': 16,
}),
)
mocked_sender = Mock(return_value=mocked_response)
# noinspection PyUnresolvedReferences
with patch.object(adapter, '_send_http_request', mocked_sender):
with self.assertRaises(BadApiResponse) as context:
adapter.send_request({'command': 'helloWorld'})
self.assertEqual(
text_type(context.exception),
'500 response from node: {error}'.format(error=error_message),
)
def test_non_200_status(self):
"""
The node sends back a non-200 response that we don't know how to
handle.
"""
adapter = HttpAdapter('http://localhost')
decoded_response = {'message': 'Request limit exceeded.'}
mocked_response = create_http_response(
status = 429,
content = json.dumps(decoded_response),
)
mocked_sender = Mock(return_value=mocked_response)
# noinspection PyUnresolvedReferences
with patch.object(adapter, '_send_http_request', mocked_sender):
with self.assertRaises(BadApiResponse) as context:
adapter.send_request({'command': 'helloWorld'})
self.assertEqual(
text_type(context.exception),
'429 response from node: {decoded}'.format(decoded=decoded_response),
)
def test_empty_response(self):
"""
The response is empty.
"""
adapter = HttpAdapter('http://localhost:14265')
mocked_response = create_http_response('')
mocked_sender = Mock(return_value=mocked_response)
# noinspection PyUnresolvedReferences
with patch.object(adapter, '_send_http_request', mocked_sender):
with self.assertRaises(BadApiResponse) as context:
adapter.send_request({'command': 'helloWorld'})
self.assertEqual(
text_type(context.exception),
'Empty 200 response from node.',
)
def test_non_json_response(self):
"""
The response is not JSON.
"""
adapter = HttpAdapter('http://localhost:14265')
invalid_response = 'EHLO cornodetoken.com' # Erm...
mocked_response = create_http_response(invalid_response)
mocked_sender = Mock(return_value=mocked_response)
# noinspection PyUnresolvedReferences
with patch.object(adapter, '_send_http_request', mocked_sender):
with self.assertRaises(BadApiResponse) as context:
adapter.send_request({'command': 'helloWorld'})
self.assertEqual(
text_type(context.exception),
'Non-JSON 200 response from node: ' + invalid_response,
)
def test_non_object_response(self):
"""
The response is valid JSON, but it's not an object.
"""
adapter = HttpAdapter('http://localhost:14265')
invalid_response = ['message', 'Hello, cornode!']
mocked_response = create_http_response(json.dumps(invalid_response))
mocked_sender = Mock(return_value=mocked_response)
# noinspection PyUnresolvedReferences
with patch.object(adapter, '_send_http_request', mocked_sender):
with self.assertRaises(BadApiResponse) as context:
adapter.send_request({'command': 'helloWorld'})
self.assertEqual(
text_type(context.exception),
'Malformed 200 response from node: {response!r}'.format(
response = invalid_response,
),
)
# noinspection SpellCheckingInspection
@staticmethod
def test_trytes_in_request():
"""
Sending a request that includes trytes.
"""
adapter = HttpAdapter('http://localhost:14265')
# Response is not important for this test; we just need to make
# sure that the request is converted correctly.
mocked_sender = Mock(return_value=create_http_response('{}'))
# noinspection PyUnresolvedReferences
with patch.object(adapter, '_send_http_request', mocked_sender):
adapter.send_request({
'command': 'helloWorld',
'trytes': [
TryteString(b'RBTC9D9DCDQAEASBYBCCKBFA'),
TryteString(
b'CCPCBDVC9DTCEAKDXC9D9DEARCWCPCBDVCTCEAHDWCTCEAKDCDFD9DSCSA',
),
],
})
mocked_sender.assert_called_once_with(
url = adapter.node_url,
payload = json.dumps({
'command': 'helloWorld',
# Tryte sequences are converted to strings for transport.
'trytes': [
'RBTC9D9DCDQAEASBYBCCKBFA',
'CCPCBDVC9DTCEAKDXC9D9DEARCWCPCBDVCTCEAHDWCTCEAKDCDFD9DSCSA',
],
}),
headers = {
'Content-type': 'application/json',
},
)
|
StarcoderdataPython
|
1721307
|
class Solution:
def frequencySort(self, s):
freqs = {}
for c in s:
if c not in freqs:
freqs[c] = 1
else:
freqs[c] += 1
ans = ""
for k, v in sorted(freqs.items(), key=lambda x: x[1]):
ans += k * v
return ans[::-1]
|
StarcoderdataPython
|
6457709
|
<reponame>lebrice/RoBO
import unittest
import numpy as np
from robo import initial_design
class TestInitialDesign(unittest.TestCase):
def setUp(self):
self.n_dim = 4
self.lower = np.zeros([self.n_dim])
self.upper = np.ones([self.n_dim])
self.n_points = 10
def test_init_random_uniform(self):
X = initial_design.init_random_uniform(self.lower, self.upper, self.n_points)
assert X.shape == (self.n_points, self.n_dim)
assert np.all(np.min(X, axis=0) >= 0)
assert np.all(np.max(X, axis=0) <= 1)
def test_init_random_normal(self):
X = initial_design.init_random_normal(self.lower, self.upper, self.n_points)
assert X.shape == (self.n_points, self.n_dim)
assert np.all(np.min(X, axis=0) >= 0)
assert np.all(np.max(X, axis=0) <= 1)
def test_init_grid(self):
X = initial_design.init_grid(self.lower, self.upper, self.n_points)
assert X.shape == (self.n_points ** self.n_dim, self.n_dim)
assert np.all(np.min(X, axis=0) >= 0)
assert np.all(np.max(X, axis=0) <= 1)
def test_init_latin_hypercube_sampling(self):
X = initial_design.init_latin_hypercube_sampling(self.lower, self.upper, self.n_points)
assert X.shape == (self.n_points, self.n_dim)
assert np.all(np.min(X, axis=0) >= 0)
assert np.all(np.max(X, axis=0) <= 1)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
6611934
|
# This is a scriptified version of the example here:
# https://github.com/kkaris/Network_Evaluation_Tools/blob/master/Network%20Evaluation%20Examples/Network%20Evaluation%20Example.ipynb
import argparse as ap
try:
from network_evaluation_tools import data_import_tools as dit
except ImportError: # Importing two times solves the ImportError apparently
from network_evaluation_tools import data_import_tools as dit
from network_evaluation_tools import network_evaluation_functions as nef
from network_evaluation_tools import network_propagation as prop
import pandas as pd
import numpy as np
def main(args):
input_network_file = args.infile # Input gene interaction set
gene_set_file = args.diseasefile
outname = args.outname
n_cores = args.cores
is_verbose = args.verbose
large = True
if args.size == 's':
large = False
# Load network (We choose a smaller network here for the example's sake)
network = dit.load_network_file(input_network_file, verbose=is_verbose)
# Load gene sets for analysis
genesets = dit.load_node_sets(gene_set_file)
# Calculate geneset sub-sample rate
genesets_p = nef.calculate_p(network, genesets)
# Determine optimal alpha for network (can also be done automatically by next step)
alpha = prop.calculate_alpha(network)
# print alpha
# Calculate network kernel for propagation
kernel = nef.construct_prop_kernel(network, alpha=alpha, verbose=is_verbose)
# Might want to tweak values here to speed up calculation
# Calculate the AUPRC values for each gene set
if large:
AUPRC_values = nef.large_network_AUPRC_wrapper(kernel, genesets, genesets_p, n=30, cores=n_cores, verbose=is_verbose)
else:
AUPRC_values = nef.small_network_AUPRC_wrapper(kernel, genesets, genesets_p, n=30, cores=n_cores, verbose=is_verbose)
# Construct null networks and calculate the AUPRC of the gene sets of the null networks
# We can use the AUPRC wrapper function for this
null_AUPRCs = []
for i in range(10):
shuffNet = nef.shuffle_network(network, max_tries_n=10, verbose=is_verbose)
shuffNet_kernel = nef.construct_prop_kernel(shuffNet, alpha=alpha, verbose=is_verbose)
if large:
shuffNet_AUPRCs = nef.large_network_AUPRC_wrapper(shuffNet_kernel, genesets, genesets_p, n=30, cores=n_cores, verbose=is_verbose)
else:
shuffNet_AUPRCs = nef.small_network_AUPRC_wrapper(shuffNet_kernel, genesets, genesets_p, n=30, cores=n_cores, verbose=is_verbose)
null_AUPRCs.append(shuffNet_AUPRCs)
print 'shuffNet', repr(i+1), 'AUPRCs calculated'
# Construct table of null AUPRCs
null_AUPRCs_table = pd.concat(null_AUPRCs, axis=1)
null_AUPRCs_table.columns = ['shuffNet'+repr(i+1) for i in range(len(null_AUPRCs))]
# Calculate performance metric of gene sets; This is the Z-score
network_performance = nef.calculate_network_performance_score(AUPRC_values, null_AUPRCs_table, verbose=is_verbose)
network_performance.name = 'Test Network'
network_performance.to_csv(outname+'_performance_score.csv', sep='\t')
# Calculate network performance gain over median null AUPRC;
network_perf_gain = nef.calculate_network_performance_gain(AUPRC_values, null_AUPRCs_table, verbose=is_verbose)
network_perf_gain.name = 'Test Network'
network_perf_gain.to_csv(outname+'_performance_gain.csv', sep='\t')
# # Rank network on average performance across gene sets vs performance on same gene sets in previous network set
# all_network_performance = pd.read_csv(outname+'.csv', index_col=0, sep='\t')
# all_network_performance_filt = pd.concat([network_performance, all_network_performance.ix[network_performance.index]], axis=1)
# network_performance_rank_table = all_network_performance_filt.rank(axis=1, ascending=False)
# network_performance_rankings = network_performance_rank_table['Test Network']
#
# # Rank network on average performance gain across gene sets vs performance gain on same gene sets in previous network set
# all_network_perf_gain = pd.read_csv(outname+'_Gain.csv', index_col=0, sep='\t')
# all_network_perf_gain_filt = pd.concat([network_perf_gain, all_network_perf_gain.ix[network_perf_gain.index]], axis=1)
# network_perf_gain_rank_table = all_network_perf_gain_filt.rank(axis=1, ascending=False)
# network_perf_gain_rankings = network_perf_gain_rank_table['Test Network']
#
# # Network Performance
# network_performance_metric_ranks = pd.concat([network_performance, network_performance_rankings, network_perf_gain, network_perf_gain_rankings], axis=1)
# network_performance_metric_ranks.columns = ['Network Performance', 'Network Performance Rank', 'Network Performance Gain', 'Network Performance Gain Rank']
# network_performance_metric_ranks.sort_values(by=['Network Performance Rank', 'Network Performance', 'Network Performance Gain Rank', 'Network Performance Gain'],
# ascending=[True, False, True, False])
# Construct network summary table
network_summary = {}
network_summary['Nodes'] = int(len(network.nodes()))
network_summary['Edges'] = int(len(network.edges()))
network_summary['Avg Node Degree'] = np.mean(dict(network.degree()).values())
network_summary['Edge Density'] = 2*network_summary['Edges'] / float((network_summary['Nodes']*(network_summary['Nodes']-1)))
# network_summary['Avg Network Performance Rank'] = network_performance_rankings.mean()
# network_summary['Avg Network Performance Rank, Rank'] = int(network_performance_rank_table.mean().rank().ix['Test Network'])
# network_summary['Avg Network Performance Gain Rank'] = network_perf_gain_rankings.mean()
# network_summary['Avg Network Performance Gain Rank, Rank'] = int(network_perf_gain_rank_table.mean().rank().ix['Test Network'])
with open(outname+'_summary', 'w') as f:
for item in ['Nodes', 'Edges', 'Avg Node Degree', 'Edge Density']:
f.write(item+':\t'+repr(network_summary[item])+'\n')
if __name__ == '__main__':
parser = ap.ArgumentParser()
parser.add_argument('-i', '--infile', required=True, help='Gene interaction data set in sif file format.')
parser.add_argument('-s', '--diseasefile', required=True,
help='Tab separated data file with disease name in first column and disease related genes in the rest of the columns.')
parser.add_argument('-c', '--cores', type=int, help='Number of cores to use. (Default: n=4)')
parser.add_argument('-n', '--size', help='Size of network. s (small): size <= 250k; l (large): size >= 250k (Default: small)')
parser.add_argument('-o', '--outname',
help='File name for output. Default: Network_Performance/Network_Performance_Gain.')
parser.add_argument('-v', '--verbose', help='More cowbell! (Default: True)', action='store_true')
args = parser.parse_args()
if not args.outname:
args.outname = 'Network_Performance'
if not args.cores:
args.cores = 4
if not args.size:
args.size = 's'
main(args)
|
StarcoderdataPython
|
11368053
|
<gh_stars>0
"""Test of envloader"""
import unittest
import test_initialize
import envloader
class TestEnvloader(unittest.TestCase):
def setUp(self):
test_initialize.init()
def test_load(self):
env = envloader.load(PREFIX='HOME')
self.assertIsNotNone(env.HOME)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4854748
|
from urllib.parse import urljoin
from http import HTTPStatus
import requests
import pytest
from pyembeddedfhir.fhir_runner import FHIRFlavor, FHIRRunner
@pytest.fixture(
scope="session",
params=[FHIRFlavor.HAPI, FHIRFlavor.MICROSOFT],
)
def running_fhir_url(request):
flavor = request.param
with FHIRRunner(flavor, host_ip="127.0.0.1") as running_fhir:
host_port = running_fhir.host_port
path = running_fhir.path
yield urljoin(f"http://127.0.0.1:{host_port}", path)
def test_metadata_returns_ok(running_fhir_url: str):
metadata_url = urljoin(running_fhir_url, "metadata")
r = requests.get(metadata_url)
assert r.status_code == HTTPStatus.OK
|
StarcoderdataPython
|
9671753
|
<gh_stars>1-10
"""
Build cutadapt.
Cython is run when
* no pre-generated C sources are found,
* or the pre-generated C sources are out of date,
* or when --cython is given on the command line.
"""
import sys
import os.path
from distutils.core import setup, Extension
from distutils.version import LooseVersion
from cutadapt import __version__
MIN_CYTHON_VERSION = '0.17'
if sys.version_info < (2, 6):
sys.stdout.write("At least Python 2.6 is required.\n")
sys.exit(1)
def out_of_date(extensions):
"""
Check whether any pyx source is newer than the corresponding generated
C source or whether any C source is missing.
"""
for extension in extensions:
for pyx in extension.sources:
path, ext = os.path.splitext(pyx)
if ext not in ('.pyx', '.py'):
continue
csource = path + ('.cpp' if extension.language == 'c++' else '.c')
if not os.path.exists(csource) or (
os.path.getmtime(pyx) > os.path.getmtime(csource)):
return True
return False
def no_cythonize(extensions, **_ignore):
"""
Change file extensions from .pyx to .c or .cpp.
Copied from Cython documentation
"""
for extension in extensions:
sources = []
for sfile in extension.sources:
path, ext = os.path.splitext(sfile)
if ext in ('.pyx', '.py'):
if extension.language == 'c++':
ext = '.cpp'
else:
ext = '.c'
sfile = path + ext
sources.append(sfile)
extension.sources[:] = sources
return extensions
def cythonize_if_necessary(extensions):
if '--cython' in sys.argv:
sys.argv.remove('--cython')
elif out_of_date(extensions):
sys.stdout.write('At least one C source file is missing or out of date.\n')
else:
return no_cythonize(extensions)
try:
from Cython import __version__ as cyversion
except ImportError:
sys.stdout.write(
"ERROR: Cython is not installed. Install at least Cython version " +
str(MIN_CYTHON_VERSION) + " to continue.\n")
sys.exit(1)
if LooseVersion(cyversion) < LooseVersion(MIN_CYTHON_VERSION):
sys.stdout.write(
"ERROR: Your Cython is at version '" + str(cyversion) +
"', but at least version " + str(MIN_CYTHON_VERSION) + " is required.\n")
sys.exit(1)
from Cython.Build import cythonize
return cythonize(extensions)
extensions = [
Extension('cutadapt._align', sources=['cutadapt/_align.pyx']),
Extension('cutadapt._qualtrim', sources=['cutadapt/_qualtrim.pyx']),
Extension('cutadapt._seqio', sources=['cutadapt/_seqio.pyx']),
]
extensions = cythonize_if_necessary(extensions)
setup(
name = 'cutadapt',
version = __version__,
author = '<NAME>',
author_email = '<EMAIL>',
url = 'http://code.google.com/p/cutadapt/',
description = 'trim adapters from high-throughput sequencing reads',
license = 'MIT',
ext_modules = extensions,
packages = ['cutadapt', 'cutadapt.scripts'],
scripts = ['bin/cutadapt'],
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Cython",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Bio-Informatics"
]
)
|
StarcoderdataPython
|
9692055
|
<filename>TopQuarkAnalysis/TopEventProducers/python/tqafEventContent_cff.py
import FWCore.ParameterSet.Config as cms
tqafEventContent = [
## genEvt
'keep *_decaySubset_*_*',
'keep *_initSubset_*_*',
'keep *_genEvt_*_*',
## extras for event selection
'keep *_kinFitTtSemiLepEventSelection_*_*',
'keep *_findTtSemiLepSignalSelMVA_*_*',
## hypotheses & event structure
'keep *_ttSemiLepHyp*_*_*',
'keep *_ttSemiLepEvent_*_*',
'keep *_ttFullLepHyp*_*_*',
'keep *_ttFullLepEvent_*_*',
'keep *_ttFullHadHyp*_*_*',
'keep *_ttFullHadEvent_*_*'
]
|
StarcoderdataPython
|
63304
|
<gh_stars>1-10
from configparser import ConfigParser
config = ConfigParser()
config.add_section('system')
config.add_section('linkedin')
config.add_section('profiles_data')
config.add_section('profiles_data_by_name')
print("Welcome to the configuration process.")
linkedin_username = ""
while linkedin_username == "":
print("Insert linkedin username.")
print("> ", end="")
linkedin_username = input()
config.set('linkedin', 'username', linkedin_username)
linkedin_password = ""
while linkedin_password == "":
print("Insert linkedin password.")
print("> ", end="")
linkedin_password = input()
config.set('linkedin', 'password', linkedin_password)
print("Insert the name of the .txt file that contains people profile urls.")
print("Notice: It doesn't matter if it doesn't exist right now.")
print("Leave blank for default option (profiles_data.txt)")
print("> ", end="")
input_file_name = input()
input_file_name = input_file_name if not input_file_name == "" else "profiles_data.txt"
config.set('profiles_data', 'input_file_name', input_file_name)
with open(input_file_name, "w"):
pass
print("Insert the name of the .xlsx file that will contain the results of the scraping by profile url.")
print("Leave blank for default option (results_profiles.xlsx)")
print("> ", end="")
output_file_name = input()
output_file_name = output_file_name if not output_file_name == "" else "results_profiles.xlsx"
config.set('profiles_data', 'output_file_name', output_file_name)
print("Do you want to append to it the timestamp in order to prevent to overwrite past results?")
print("Y for yes, N for no")
print("Leave blank for default option (Y)")
print("> ", end="")
append_timestamp = input()
append_timestamp = append_timestamp if not append_timestamp == "" else "Y"
config.set('profiles_data', 'append_timestamp', append_timestamp)
with open('config.ini', 'w') as f:
config.write(f)
print("")
print("Configuration completed. You can now do scraping.")
print("To scrape profile by url: execute do_scraping.py")
print("To search profiles by name: execute search_profiles_by_name.py")
|
StarcoderdataPython
|
1915621
|
from multiprocessing import Process
def say_hello(name='world'):
print("Hello, %s" % name)
p = Process(target=say_hello)
p.start()
p.join()
|
StarcoderdataPython
|
3264620
|
import time
from typing import Any, Dict, List, Union
from enum import Enum
from pydantic import parse_obj_as
import six
import prefect
from prefect import Task
from prefect.exceptions import PrefectException
from prefect.tasks.databricks.databricks_hook import DatabricksHook
from prefect.tasks.databricks.models import (
AccessControlRequest,
AccessControlRequestForGroup,
AccessControlRequestForUser,
JobTaskSettings,
)
from prefect.utilities.tasks import defaults_from_attrs
def _deep_string_coerce(content, json_path="json"):
"""
Coerces content or all values of content if it is a dict to a string. The
function will throw if content contains non-string or non-numeric types.
The reason why we have this function is because the `self.json` field must be a
dict with only string values. This is because `render_template` will fail
for numerical values.
"""
if isinstance(content, six.string_types):
return content
elif isinstance(content, six.integer_types + (float,)):
# Databricks can tolerate either numeric or string types in the API backend.
return str(content)
elif content is None:
return content
elif isinstance(content, (list, tuple)):
return [
_deep_string_coerce(content=item, json_path=f"{json_path}[{i}]")
for i, item in enumerate(content)
]
elif isinstance(content, dict):
return {
key: _deep_string_coerce(content=value, json_path=f"{json_path}[{key}]")
for key, value in list(content.items())
}
elif isinstance(content, Enum):
return str(content.value)
raise ValueError(
f"Type {type(content)} used for parameter {json_path} is not a number or a string"
)
def _handle_databricks_task_execution(task, hook, log, submitted_run_id):
"""
Handles the Databricks + Prefect lifecycle logic for a Databricks task
Args:
- task (prefect.Task) : Prefect task being handled
- hook (prefect.tasks.databricks.databricks_hook.DatabricksHook): Databricks Hook
- log (logger): Prefect logging instance
- submitted_run_id (str): run ID returned after submitting or running Databricks job
"""
log.info("Run submitted with run_id: %s", submitted_run_id)
run_page_url = hook.get_run_page_url(submitted_run_id)
log.info("Run submitted with config : %s", task.json)
log.info("View run status, Spark UI, and logs at %s", run_page_url)
while True:
run_state = hook.get_run_state(submitted_run_id)
if run_state.is_terminal:
if run_state.is_successful:
log.info("%s completed successfully.", task.name)
log.info("View run status, Spark UI, and logs at %s", run_page_url)
return
else:
error_message = "{t} failed with terminal state: {s}".format(
t=task.name, s=run_state
)
raise PrefectException(error_message)
else:
log.info("%s in run state: %s", task.name, run_state)
log.info("View run status, Spark UI, and logs at %s", run_page_url)
log.info("Sleeping for %s seconds.", task.polling_period_seconds)
time.sleep(task.polling_period_seconds)
class DatabricksSubmitRun(Task):
"""
Submits a Spark job run to Databricks using the
`api/2.0/jobs/runs/submit
<https://docs.databricks.com/api/latest/jobs.html#runs-submit>`_
API endpoint.
There are two ways to instantiate this task.
In the first way, you can take the JSON payload that you typically use
to call the `api/2.0/jobs/runs/submit` endpoint and pass it directly
to our `DatabricksSubmitRun` task through the `json` parameter.
For example:
```
from prefect import Flow
from prefect.tasks.secrets import PrefectSecret
from prefect.tasks.databricks import DatabricksRunNow
json = {
'new_cluster': {
'spark_version': '2.1.0-db3-scala2.11',
'num_workers': 2
},
'notebook_task': {
'notebook_path': '/Users/<EMAIL>/PrepareData',
},
}
with Flow("my flow") as flow:
conn = PrefectSecret('DATABRICKS_CONNECTION_STRING')
notebook_run = DatabricksSubmitRun(json=json)
notebook_run(databricks_conn_secret=conn)
```
Another way to accomplish the same thing is to use the named parameters
of the `DatabricksSubmitRun` directly. Note that there is exactly
one named parameter for each top level parameter in the `runs/submit`
endpoint. In this method, your code would look like this:
```
from prefect import Flow
from prefect.tasks.secrets import PrefectSecret
from prefect.tasks.databricks import DatabricksRunNow
new_cluster = {
'spark_version': '2.1.0-db3-scala2.11',
'num_workers': 2
}
notebook_task = {
'notebook_path': '/Users/<EMAIL>/PrepareData',
}
with Flow("my flow") as flow:
conn = PrefectSecret('DATABRICKS_CONNECTION_STRING')
notebook_run = DatabricksSubmitRun(
new_cluster=new_cluster,
notebook_task=notebook_task)
notebook_run(databricks_conn_secret=conn)
```
In the case where both the json parameter **AND** the named parameters
are provided, they will be merged together. If there are conflicts during the merge,
the named parameters will take precedence and override the top level `json` keys.
This task requires a Databricks connection to be specified as a Prefect secret and can
be passed to the task like so:
```
from prefect import Flow
from prefect.tasks.secrets import PrefectSecret
from prefect.tasks.databricks import DatabricksSubmitRun
with Flow("my flow") as flow:
conn = PrefectSecret('DATABRICKS_CONNECTION_STRING')
notebook_run = DatabricksSubmitRun(json=...)
notebook_run(databricks_conn_secret=conn)
```
Currently the named parameters that `DatabricksSubmitRun` task supports are
- `spark_jar_task`
- `notebook_task`
- `new_cluster`
- `existing_cluster_id`
- `libraries`
- `run_name`
- `timeout_seconds`
Args:
- databricks_conn_secret (dict, optional): Dictionary representation of the Databricks Connection
String. Structure must be a string of valid JSON. To use token based authentication, provide
the key `token` in the string for the connection and create the key `host`.
`PREFECT__CONTEXT__SECRETS__DATABRICKS_CONNECTION_STRING=
'{"host": "abcdef.xyz", "login": "ghijklmn", "password": "<PASSWORD>"}'`
OR
`PREFECT__CONTEXT__SECRETS__DATABRICKS_CONNECTION_STRING=
'{"host": "abcdef.xyz", "token": "<KEY>"}'`
See documentation of the `DatabricksSubmitRun` Task to see how to pass in the connection
string using `PrefectSecret`.
- json (dict, optional): A JSON object containing API parameters which will be passed
directly to the `api/2.0/jobs/runs/submit` endpoint. The other named parameters
(i.e. `spark_jar_task`, `notebook_task`..) to this task will
be merged with this json dictionary if they are provided.
If there are conflicts during the merge, the named parameters will
take precedence and override the top level json keys. (templated)
For more information about templating see :ref:`jinja-templating`.
https://docs.databricks.com/api/latest/jobs.html#runs-submit
- spark_jar_task (dict, optional): The main class and parameters for the JAR task. Note that
the actual JAR is specified in the `libraries`.
*EITHER* `spark_jar_task` *OR* `notebook_task` should be specified.
This field will be templated.
https://docs.databricks.com/api/latest/jobs.html#jobssparkjartask
- notebook_task (dict, optional): The notebook path and parameters for the notebook task.
*EITHER* `spark_jar_task` *OR* `notebook_task` should be specified.
This field will be templated.
https://docs.databricks.com/api/latest/jobs.html#jobsnotebooktask
- new_cluster (dict, optional): Specs for a new cluster on which this task will be run.
*EITHER* `new_cluster` *OR* `existing_cluster_id` should be specified.
This field will be templated.
https://docs.databricks.com/api/latest/jobs.html#jobsclusterspecnewcluster
- existing_cluster_id (str, optional): ID for existing cluster on which to run this task.
*EITHER* `new_cluster` *OR* `existing_cluster_id` should be specified.
This field will be templated.
- libraries (list of dicts, optional): Libraries which this run will use.
This field will be templated.
https://docs.databricks.com/api/latest/libraries.html#managedlibrarieslibrary
- run_name (str, optional): The run name used for this task.
By default this will be set to the Prefect `task_id`. This `task_id` is a
required parameter of the superclass `Task`.
This field will be templated.
- timeout_seconds (int, optional): The timeout for this run. By default a value of 0 is used
which means to have no timeout.
This field will be templated.
- polling_period_seconds (int, optional): Controls the rate which we poll for the result of
this run. By default the task will poll every 30 seconds.
- databricks_retry_limit (int, optional): Amount of times retry if the Databricks backend is
unreachable. Its value must be greater than or equal to 1.
- databricks_retry_delay (float, optional): Number of seconds to wait between retries (it
might be a floating point number).
- **kwargs (dict, optional): additional keyword arguments to pass to the
Task constructor
"""
def __init__(
self,
databricks_conn_secret: dict = None,
json: dict = None,
spark_jar_task: dict = None,
notebook_task: dict = None,
new_cluster: dict = None,
existing_cluster_id: str = None,
libraries: List[Dict] = None,
run_name: str = None,
timeout_seconds: int = None,
polling_period_seconds: int = 30,
databricks_retry_limit: int = 3,
databricks_retry_delay: float = 1,
**kwargs,
) -> None:
self.databricks_conn_secret = databricks_conn_secret
self.json = json or {}
self.spark_jar_task = spark_jar_task
self.notebook_task = notebook_task
self.new_cluster = new_cluster
self.existing_cluster_id = existing_cluster_id
self.libraries = libraries
self.run_name = run_name
self.timeout_seconds = timeout_seconds
self.polling_period_seconds = polling_period_seconds
self.databricks_retry_limit = databricks_retry_limit
self.databricks_retry_delay = databricks_retry_delay
super().__init__(**kwargs)
@staticmethod
def _get_hook(
databricks_conn_secret, databricks_retry_limit, databricks_retry_delay
):
return DatabricksHook(
databricks_conn_secret,
retry_limit=databricks_retry_limit,
retry_delay=databricks_retry_delay,
)
@defaults_from_attrs(
"databricks_conn_secret",
"json",
"spark_jar_task",
"notebook_task",
"new_cluster",
"existing_cluster_id",
"libraries",
"run_name",
"timeout_seconds",
"polling_period_seconds",
"databricks_retry_limit",
"databricks_retry_delay",
)
def run(
self,
databricks_conn_secret: dict = None,
json: dict = None,
spark_jar_task: dict = None,
notebook_task: dict = None,
new_cluster: dict = None,
existing_cluster_id: str = None,
libraries: List[Dict] = None,
run_name: str = None,
timeout_seconds: int = None,
polling_period_seconds: int = 30,
databricks_retry_limit: int = 3,
databricks_retry_delay: float = 1,
) -> str:
"""
Task run method.
Args:
- databricks_conn_secret (dict, optional): Dictionary representation of the Databricks Connection
String. Structure must be a string of valid JSON. To use token based authentication, provide
the key `token` in the string for the connection and create the key `host`.
`PREFECT__CONTEXT__SECRETS__DATABRICKS_CONNECTION_STRING=
'{"host": "abcdef.xyz", "login": "ghijklmn", "password": "<PASSWORD>"}'`
OR
`PREFECT__CONTEXT__SECRETS__DATABRICKS_CONNECTION_STRING=
'{"host": "abcdef.xyz", "token": "<KEY>"}'`
See documentation of the `DatabricksSubmitRun` Task to see how to pass in the connection
string using `PrefectSecret`.
- json (dict, optional): A JSON object containing API parameters which will be passed
directly to the `api/2.0/jobs/runs/submit` endpoint. The other named parameters
(i.e. `spark_jar_task`, `notebook_task`..) to this task will
be merged with this json dictionary if they are provided.
If there are conflicts during the merge, the named parameters will
take precedence and override the top level json keys. (templated)
For more information about templating see :ref:`jinja-templating`.
https://docs.databricks.com/api/latest/jobs.html#runs-submit
- spark_jar_task (dict, optional): The main class and parameters for the JAR task. Note that
the actual JAR is specified in the `libraries`.
*EITHER* `spark_jar_task` *OR* `notebook_task` should be specified.
This field will be templated.
https://docs.databricks.com/api/latest/jobs.html#jobssparkjartask
- notebook_task (dict, optional): The notebook path and parameters for the notebook task.
*EITHER* `spark_jar_task` *OR* `notebook_task` should be specified.
This field will be templated.
https://docs.databricks.com/api/latest/jobs.html#jobsnotebooktask
- new_cluster (dict, optional): Specs for a new cluster on which this task will be run.
*EITHER* `new_cluster` *OR* `existing_cluster_id` should be specified.
This field will be templated.
https://docs.databricks.com/api/latest/jobs.html#jobsclusterspecnewcluster
- existing_cluster_id (str, optional): ID for existing cluster on which to run this task.
*EITHER* `new_cluster` *OR* `existing_cluster_id` should be specified.
This field will be templated.
- libraries (list of dicts, optional): Libraries which this run will use.
This field will be templated.
https://docs.databricks.com/api/latest/libraries.html#managedlibrarieslibrary
- run_name (str, optional): The run name used for this task.
By default this will be set to the Prefect `task_id`. This `task_id` is a
required parameter of the superclass `Task`.
This field will be templated.
- timeout_seconds (int, optional): The timeout for this run. By default a value of 0 is used
which means to have no timeout.
This field will be templated.
- polling_period_seconds (int, optional): Controls the rate which we poll for the result of
this run. By default the task will poll every 30 seconds.
- databricks_retry_limit (int, optional): Amount of times retry if the Databricks backend is
unreachable. Its value must be greater than or equal to 1.
- databricks_retry_delay (float, optional): Number of seconds to wait between retries (it
might be a floating point number).
Returns:
- run_id (str): Run id of the submitted run
"""
assert (
databricks_conn_secret
), "A databricks connection string must be supplied as a dictionary or through Prefect Secrets"
assert isinstance(
databricks_conn_secret, dict
), "`databricks_conn_secret` must be supplied as a valid dictionary."
self.databricks_conn_secret = databricks_conn_secret
if json:
self.json = json
if polling_period_seconds:
self.polling_period_seconds = polling_period_seconds
# Initialize Databricks Connections
hook = self._get_hook(
databricks_conn_secret, databricks_retry_limit, databricks_retry_delay
)
if spark_jar_task is not None:
self.json["spark_jar_task"] = spark_jar_task
if notebook_task is not None:
self.json["notebook_task"] = notebook_task
if new_cluster is not None:
self.json["new_cluster"] = new_cluster
if existing_cluster_id is not None:
self.json["existing_cluster_id"] = existing_cluster_id
if libraries is not None:
self.json["libraries"] = libraries
if run_name is not None:
self.json["run_name"] = run_name
if timeout_seconds is not None:
self.json["timeout_seconds"] = timeout_seconds
if "run_name" not in self.json:
self.json["run_name"] = run_name or "Run Submitted by Prefect"
# Validate the dictionary to a valid JSON object
self.json = _deep_string_coerce(self.json)
# Submit the job
submitted_run_id = hook.submit_run(self.json)
_handle_databricks_task_execution(self, hook, self.logger, submitted_run_id)
return submitted_run_id
class DatabricksRunNow(Task):
"""
Runs an existing Spark job run to Databricks using the
`api/2.1/jobs/run-now
<https://docs.databricks.com/api/latest/jobs.html#run-now>`_
API endpoint.
There are two ways to instantiate this task.
In the first way, you can take the JSON payload that you typically use
to call the `api/2.1/jobs/run-now` endpoint and pass it directly
to our `DatabricksRunNow` task through the `json` parameter.
For example:
```
from prefect import Flow
from prefect.tasks.secrets import PrefectSecret
from prefect.tasks.databricks import DatabricksRunNow
json = {
"job_id": 42,
"notebook_params": {
"dry-run": "true",
"oldest-time-to-consider": "1457570074236"
}
}
with Flow("my flow") as flow:
conn = PrefectSecret('DATABRICKS_CONNECTION_STRING')
notebook_run = DatabricksRunNow(json=json)
notebook_run(databricks_conn_secret=conn)
```
Another way to accomplish the same thing is to use the named parameters
of the `DatabricksRunNow` task directly. Note that there is exactly
one named parameter for each top level parameter in the `run-now`
endpoint. In this method, your code would look like this:
```
from prefect import Flow
from prefect.tasks.secrets import PrefectSecret
from prefect.tasks.databricks import DatabricksRunNow
job_id=42
notebook_params = {
"dry-run": "true",
"oldest-time-to-consider": "1457570074236"
}
python_params = ["<NAME>", "42"]
spark_submit_params = ["--class", "org.apache.spark.examples.SparkPi"]
jar_params = ["<NAME>","35"]
with Flow("my flow') as flow:
conn = PrefectSecret('DATABRICKS_CONNECTION_STRING')
notebook_run = DatabricksRunNow(
notebook_params=notebook_params,
python_params=python_params,
spark_submit_params=spark_submit_params,
jar_params=jar_params
)
notebook_run(databricks_conn_secret=conn)
```
In the case where both the json parameter **AND** the named parameters
are provided, they will be merged together. If there are conflicts during the merge,
the named parameters will take precedence and override the top level `json` keys.
This task requires a Databricks connection to be specified as a Prefect secret and can
be passed to the task like so:
```
from prefect import Flow
from prefect.tasks.secrets import PrefectSecret
from prefect.tasks.databricks import DatabricksRunNow
with Flow("my flow") as flow:
conn = PrefectSecret('DATABRICKS_CONNECTION_STRING')
notebook_run = DatabricksRunNow(json=...)
notebook_run(databricks_conn_secret=conn)
```
Currently the named parameters that `DatabricksRunNow` task supports are
- `job_id`
- `json`
- `notebook_params`
- `python_params`
- `spark_submit_params`
- `jar_params`
Args:
- databricks_conn_secret (dict, optional): Dictionary representation of the Databricks Connection
String. Structure must be a string of valid JSON. To use token based authentication, provide
the key `token` in the string for the connection and create the key `host`.
`PREFECT__CONTEXT__SECRETS__DATABRICKS_CONNECTION_STRING=
'{"host": "abcdef.xyz", "login": "ghijklmn", "password": "<PASSWORD>"}'`
OR
`PREFECT__CONTEXT__SECRETS__DATABRICKS_CONNECTION_STRING=
'{"host": "abcdef.xyz", "token": "<KEY>"}'`
See documentation of the `DatabricksSubmitRun` Task to see how to pass in the connection
string using `PrefectSecret`.
- job_id (str, optional): The job_id of the existing Databricks job.
https://docs.databricks.com/api/latest/jobs.html#run-now
- json (dict, optional): A JSON object containing API parameters which will be passed
directly to the `api/2.0/jobs/run-now` endpoint. The other named parameters
(i.e. `notebook_params`, `spark_submit_params`..) to this operator will
be merged with this json dictionary if they are provided.
If there are conflicts during the merge, the named parameters will
take precedence and override the top level json keys. (templated)
https://docs.databricks.com/api/latest/jobs.html#run-now
- notebook_params (dict, optional): A dict from keys to values for jobs with notebook task,
e.g. "notebook_params": {"name": "<NAME>", "age": "35"}.
The map is passed to the notebook and will be accessible through the
dbutils.widgets.get function. See Widgets for more information.
If not specified upon run-now, the triggered run will use the
job’s base parameters. notebook_params cannot be
specified in conjunction with jar_params. The json representation
of this field (i.e. {"notebook_params":{"name":"<NAME>","age":"35"}})
cannot exceed 10,000 bytes.
https://docs.databricks.com/user-guide/notebooks/widgets.html
- python_params (list[str], optional): A list of parameters for jobs with python tasks,
e.g. "python_params": ["<NAME>", "35"].
The parameters will be passed to python file as command line parameters.
If specified upon run-now, it would overwrite the parameters specified in
job setting.
The json representation of this field (i.e. {"python_params":["<NAME>","35"]})
cannot exceed 10,000 bytes.
https://docs.databricks.com/api/latest/jobs.html#run-now
- spark_submit_params (list[str], optional): A list of parameters for jobs with spark submit
task, e.g. "spark_submit_params": ["--class", "org.apache.spark.examples.SparkPi"].
The parameters will be passed to spark-submit script as command line parameters.
If specified upon run-now, it would overwrite the parameters specified
in job setting.
The json representation of this field cannot exceed 10,000 bytes.
https://docs.databricks.com/api/latest/jobs.html#run-now
- jar_params (list[str], optional): A list of parameters for jobs with JAR tasks,
e.g. "jar_params": ["<NAME>", "35"]. The parameters will be used to invoke the main
function of the main class specified in the Spark JAR task. If not specified upon
run-now, it will default to an empty list. jar_params cannot be specified in conjunction
with notebook_params. The JSON representation of this field (i.e.
{"jar_params":["<NAME>","35"]}) cannot exceed 10,000 bytes.
https://docs.databricks.com/api/latest/jobs.html#run-now
- timeout_seconds (int, optional): The timeout for this run. By default a value of 0 is used
which means to have no timeout.
This field will be templated.
- polling_period_seconds (int, optional): Controls the rate which we poll for the result of
this run. By default the task will poll every 30 seconds.
- databricks_retry_limit (int, optional): Amount of times retry if the Databricks backend is
unreachable. Its value must be greater than or equal to 1.
- databricks_retry_delay (float, optional): Number of seconds to wait between retries (it
might be a floating point number).
- **kwargs (dict, optional): additional keyword arguments to pass to the
Task constructor
"""
def __init__(
self,
databricks_conn_secret: dict = None,
job_id: str = None,
json: dict = None,
notebook_params: dict = None,
python_params: List[str] = None,
spark_submit_params: List[str] = None,
jar_params: List[str] = None,
polling_period_seconds: int = 30,
databricks_retry_limit: int = 3,
databricks_retry_delay: float = 1,
**kwargs,
) -> None:
self.databricks_conn_secret = databricks_conn_secret
self.json = json or {}
self.job_id = job_id
self.notebook_params = notebook_params
self.python_params = python_params
self.spark_submit_params = spark_submit_params
self.jar_params = jar_params
self.polling_period_seconds = polling_period_seconds
self.databricks_retry_limit = databricks_retry_limit
self.databricks_retry_delay = databricks_retry_delay
super().__init__(**kwargs)
@staticmethod
def _get_hook(
databricks_conn_secret, databricks_retry_limit, databricks_retry_delay
):
return DatabricksHook(
databricks_conn_secret,
retry_limit=databricks_retry_limit,
retry_delay=databricks_retry_delay,
)
@defaults_from_attrs(
"databricks_conn_secret",
"job_id",
"json",
"notebook_params",
"python_params",
"spark_submit_params",
"jar_params",
"polling_period_seconds",
"databricks_retry_limit",
"databricks_retry_delay",
)
def run(
self,
databricks_conn_secret: dict = None,
job_id: str = None,
json: dict = None,
notebook_params: dict = None,
python_params: List[str] = None,
spark_submit_params: List[str] = None,
jar_params: List[str] = None,
polling_period_seconds: int = 30,
databricks_retry_limit: int = 3,
databricks_retry_delay: float = 1,
) -> str:
"""
Task run method.
Args:
- databricks_conn_secret (dict, optional): Dictionary representation of the Databricks
Connection String. Structure must be a string of valid JSON. To use token based
authentication, provide the key `token` in the string for the connection and create the
key `host`.
`PREFECT__CONTEXT__SECRETS__DATABRICKS_CONNECTION_STRING=
'{"host": "abcdef.xyz", "login": "ghijklmn", "password": "<PASSWORD>"}'`
OR
`PREFECT__CONTEXT__SECRETS__DATABRICKS_CONNECTION_STRING=
'{"host": "abcdef.xyz", "token": "<KEY>"}'`
See documentation of the `DatabricksSubmitRun` Task to see how to pass in the connection
string using `PrefectSecret`.
- job_id (str, optional): The job_id of the existing Databricks job.
https://docs.databricks.com/api/latest/jobs.html#run-now
- json (dict, optional): A JSON object containing API parameters which will be passed
directly to the `api/2.0/jobs/run-now` endpoint. The other named parameters
(i.e. `notebook_params`, `spark_submit_params`..) to this operator will
be merged with this json dictionary if they are provided.
If there are conflicts during the merge, the named parameters will
take precedence and override the top level json keys. (templated)
https://docs.databricks.com/api/latest/jobs.html#run-now
- notebook_params (dict, optional): A dict from keys to values for jobs with notebook task,
e.g. "notebook_params": {"name": "<NAME>", "age": "35"}.
The map is passed to the notebook and will be accessible through the
dbutils.widgets.get function. See Widgets for more information.
If not specified upon run-now, the triggered run will use the
job’s base parameters. notebook_params cannot be
specified in conjunction with jar_params. The json representation
of this field (i.e. {"notebook_params":{"name":"<NAME>","age":"35"}})
cannot exceed 10,000 bytes.
https://docs.databricks.com/user-guide/notebooks/widgets.html
- python_params (list[str], optional): A list of parameters for jobs with python tasks,
e.g. "python_params": ["<NAME>", "35"].
The parameters will be passed to python file as command line parameters.
If specified upon run-now, it would overwrite the parameters specified in
job setting.
The json representation of this field (i.e. {"python_params":["<NAME>","35"]})
cannot exceed 10,000 bytes.
https://docs.databricks.com/api/latest/jobs.html#run-now
- spark_submit_params (list[str], optional): A list of parameters for jobs with spark submit
task, e.g. "spark_submit_params": ["--class", "org.apache.spark.examples.SparkPi"].
The parameters will be passed to spark-submit script as command line parameters.
If specified upon run-now, it would overwrite the parameters specified
in job setting.
The json representation of this field cannot exceed 10,000 bytes.
https://docs.databricks.com/api/latest/jobs.html#run-now
- jar_params (list[str], optional): A list of parameters for jobs with JAR tasks,
e.g. "jar_params": ["<NAME>", "35"]. The parameters will be used to invoke the main
function of the main class specified in the Spark JAR task. If not specified upon
run-now, it will default to an empty list. jar_params cannot be specified in conjunction
with notebook_params. The JSON representation of this field (i.e.
{"jar_params":["<NAME>","35"]}) cannot exceed 10,000 bytes.
https://docs.databricks.com/api/latest/jobs.html#run-now
- polling_period_seconds (int, optional): Controls the rate which we poll for the result of
this run. By default the task will poll every 30 seconds.
- databricks_retry_limit (int, optional): Amount of times retry if the Databricks backend is
unreachable. Its value must be greater than or equal to 1.
- databricks_retry_delay (float, optional): Number of seconds to wait between retries (it
might be a floating point number).
Returns:
- run_id (str): Run id of the submitted run
"""
assert (
databricks_conn_secret
), "A databricks connection string must be supplied as a dictionary or through Prefect Secrets"
assert isinstance(
databricks_conn_secret, dict
), "`databricks_conn_secret` must be supplied as a valid dictionary."
self.databricks_conn_secret = databricks_conn_secret
# Initialize Databricks Connections
hook = self._get_hook(
databricks_conn_secret, databricks_retry_limit, databricks_retry_delay
)
run_now_json = json or {}
# Necessary be cause `_handle_databricks_task_execution` reads
# `polling_periods_seconds` off of the task instance
if polling_period_seconds:
self.polling_period_seconds = polling_period_seconds
if job_id is not None:
run_now_json["job_id"] = job_id
if notebook_params is not None:
merged = run_now_json.setdefault("notebook_params", {})
merged.update(notebook_params)
run_now_json["notebook_params"] = merged
if python_params is not None:
run_now_json["python_params"] = python_params
if spark_submit_params is not None:
run_now_json["spark_submit_params"] = spark_submit_params
if jar_params is not None:
run_now_json["jar_params"] = jar_params
# Validate the dictionary to a valid JSON object
self.json = _deep_string_coerce(run_now_json)
# Submit the job
submitted_run_id = hook.run_now(self.json)
_handle_databricks_task_execution(self, hook, self.logger, submitted_run_id)
return submitted_run_id
class DatabricksSubmitMultitaskRun(Task):
"""
Creates and triggers a one-time run via the Databricks submit run API endpoint. Supports
the execution of multiple Databricks tasks within the Databricks job run. Note: Databricks
tasks are distinct from Prefect tasks. All tasks configured will run as a single Prefect task.
For more information about the arguments of this task, refer to the [Databricks
submit run API documentation]
(https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunsSubmit)
Args:
- databricks_conn_secret (dict, optional): Dictionary representation of the Databricks Connection
String. Structure must be a string of valid JSON. To use token based authentication, provide
the key `token` in the string for the connection and create the key `host`.
`PREFECT__CONTEXT__SECRETS__DATABRICKS_CONNECTION_STRING=
'{"host": "abcdef.xyz", "login": "ghijklmn", "password": "<PASSWORD>"}'`
OR
`PREFECT__CONTEXT__SECRETS__DATABRICKS_CONNECTION_STRING=
'{"host": "abcdef.xyz", "token": "<KEY>"}'`
- tasks (List[JobTaskSettings]):" A list containing the Databricks task configuration. Should
contain configuration for at least one task.
- timeout_seconds (int, optional): An optional timeout applied to each run of this job.
The default behavior is to have no timeout.
- run_name (str, optional): An optional name for the run.
The default value is "Job run created by Prefect flow run {flow_run_name}".
- idempotency_token (str, optional): An optional token that can be used to guarantee
the idempotency of job run requests. Defaults to the flow run ID.
- access_control_list (List[AccessControlRequest]): List of permissions to set on the job.
- polling_period_seconds (int, optional): Controls the rate which we poll for the result of
this run. By default the task will poll every 30 seconds.
- databricks_retry_limit (int, optional): Amount of times retry if the Databricks backend is
unreachable. Its value must be greater than or equal to 1.
- databricks_retry_delay (float, optional): Number of seconds to wait between retries (it
might be a floating point number).
- **kwargs (dict, optional): additional keyword arguments to pass to the
Task constructor
Examples:
Trigger an ad-hoc multitask run
```
from prefect import Flow
from prefect.tasks.databricks import DatabricksSubmitMultitaskRun
from prefect.tasks.databricks.models import (
AccessControlRequestForUser,
AutoScale,
AwsAttributes,
AwsAvailability,
CanManage,
JobTaskSettings,
Library,
NewCluster,
NotebookTask,
SparkJarTask,
TaskDependency,
)
submit_multitask_run = DatabricksSubmitMultitaskRun(
tasks=[
JobTaskSettings(
task_key="Sessionize",
description="Extracts session data from events",
existing_cluster_id="0923-164208-meows279",
spark_jar_task=SparkJarTask(
main_class_name="com.databricks.Sessionize",
parameters=["--data", "dbfs:/path/to/data.json"],
),
libraries=[Library(jar="dbfs:/mnt/databricks/Sessionize.jar")],
timeout_seconds=86400,
),
JobTaskSettings(
task_key="Orders_Ingest",
description="Ingests order data",
existing_cluster_id="0923-164208-meows279",
spark_jar_task=SparkJarTask(
main_class_name="com.databricks.OrdersIngest",
parameters=["--data", "dbfs:/path/to/order-data.json"],
),
libraries=[Library(jar="dbfs:/mnt/databricks/OrderIngest.jar")],
timeout_seconds=86400,
),
JobTaskSettings(
task_key="Match",
description="Matches orders with user sessions",
depends_on=[
TaskDependency(task_key="Orders_Ingest"),
TaskDependency(task_key="Sessionize"),
],
new_cluster=NewCluster(
spark_version="7.3.x-scala2.12",
node_type_id="i3.xlarge",
spark_conf={"spark.speculation": True},
aws_attributes=AwsAttributes(
availability=AwsAvailability.SPOT, zone_id="us-west-2a"
),
autoscale=AutoScale(min_workers=2, max_workers=16),
),
notebook_task=NotebookTask(
notebook_path="/Users/<EMAIL>/Match",
base_parameters={"name": "<NAME>", "age": "35"},
),
timeout_seconds=86400,
),
],
run_name="A multitask job run",
timeout_seconds=86400,
access_control_list=[
AccessControlRequestForUser(
user_name="<EMAIL>", permission_level=CanManage.CAN_MANAGE
)
],
)
with Flow("my flow") as f:
conn = PrefectSecret('DATABRICKS_CONNECTION_STRING')
submit_multitask_run(databricks_conn_secret=conn)
```
"""
def __init__(
self,
databricks_conn_secret: dict = None,
tasks: List[JobTaskSettings] = None,
run_name: str = None,
timeout_seconds: int = None,
idempotency_token: str = None,
access_control_list: List[
Union[AccessControlRequestForUser, AccessControlRequestForGroup]
] = None,
polling_period_seconds: int = 30,
databricks_retry_limit: int = 3,
databricks_retry_delay: float = 1,
**kwargs,
):
self.databricks_conn_secret = databricks_conn_secret
self.tasks = tasks
self.run_name = run_name
self.timeout_seconds = timeout_seconds
self.idempotency_token = idempotency_token
self.access_control_list = access_control_list
self.polling_period_seconds = polling_period_seconds
self.databricks_retry_limit = databricks_retry_limit
self.databricks_retry_delay = databricks_retry_delay
super().__init__(**kwargs)
@staticmethod
def convert_dict_to_kwargs(input: Dict[str, Any]) -> Dict[str, Any]:
"""
Method to convert a dict that matches the structure of the Databricks API call into the required
object types for the task input
Args:
- input (Dict): A dictionary representing the input to the task
Returns:
- A dictionary with values that match the input types of the class
Example:
Use a JSON-like dict as input
```
from prefect import Flow
from prefect.tasks.databricks import DatabricksSubmitMultitaskRun
submit_multitask_run = DatabricksSubmitMultitaskRun()
databricks_kwargs = DatabricksSubmitMultitaskRun.convert_dict_to_kwargs({
"tasks": [
{
"task_key": "Sessionize",
"description": "Extracts session data from events",
"depends_on": [],
"existing_cluster_id": "0923-164208-meows279",
"spark_jar_task": {
"main_class_name": "com.databricks.Sessionize",
"parameters": ["--data", "dbfs:/path/to/data.json"],
},
"libraries": [{"jar": "dbfs:/mnt/databricks/Sessionize.jar"}],
"timeout_seconds": 86400,
},
{
"task_key": "Orders_Ingest",
"description": "Ingests order data",
"depends_on": [],
"existing_cluster_id": "0923-164208-meows279",
"spark_jar_task": {
"main_class_name": "com.databricks.OrdersIngest",
"parameters": ["--data", "dbfs:/path/to/order-data.json"],
},
"libraries": [{"jar": "dbfs:/mnt/databricks/OrderIngest.jar"}],
"timeout_seconds": 86400,
},
{
"task_key": "Match",
"description": "Matches orders with user sessions",
"depends_on": [
{"task_key": "Orders_Ingest"},
{"task_key": "Sessionize"},
],
"new_cluster": {
"spark_version": "7.3.x-scala2.12",
"node_type_id": "i3.xlarge",
"spark_conf": {"spark.speculation": True},
"aws_attributes": {
"availability": "SPOT",
"zone_id": "us-west-2a",
},
"autoscale": {"min_workers": 2, "max_workers": 16},
},
"notebook_task": {
"notebook_path": "/Users/<EMAIL>/Match",
"base_parameters": {"name": "<NAME>", "age": "35"},
},
"timeout_seconds": 86400,
},
],
"run_name": "A multitask job run",
"timeout_seconds": 86400,
"access_control_list": [
{
"user_name": "<EMAIL>",
"permission_level": "CAN_MANAGE",
}
],
})
with Flow("my flow") as f:
conn = PrefectSecret('DATABRICKS_CONNECTION_STRING')
submit_multitask_run(**databricks_kwargs, databricks_conn_secret=conn)
```
"""
kwargs = {**input, "tasks": parse_obj_as(List[JobTaskSettings], input["tasks"])}
if input.get("access_control_list"):
kwargs["access_control_list"] = parse_obj_as(
List[AccessControlRequest],
input["access_control_list"],
)
return kwargs
@defaults_from_attrs(
"databricks_conn_secret",
"tasks",
"run_name",
"timeout_seconds",
"idempotency_token",
"access_control_list",
"polling_period_seconds",
"databricks_retry_limit",
"databricks_retry_delay",
)
def run(
self,
databricks_conn_secret: dict = None,
tasks: List[JobTaskSettings] = None,
run_name: str = None,
timeout_seconds: int = None,
idempotency_token: str = None,
access_control_list: List[AccessControlRequest] = None,
polling_period_seconds: int = None,
databricks_retry_limit: int = None,
databricks_retry_delay: float = None,
):
"""
Task run method. Any values passed here will overwrite the values used when initializing the
task.
Args:
- databricks_conn_secret (dict, optional): Dictionary representation of the Databricks
Connection String. Structure must be a string of valid JSON. To use token based
authentication, provide the key `token` in the string for the connection and create
the key `host`. `PREFECT__CONTEXT__SECRETS__DATABRICKS_CONNECTION_STRING=
'{"host": "abcdef.xyz", "login": "ghijklmn", "password": "<PASSWORD>"}'`
OR
`PREFECT__CONTEXT__SECRETS__DATABRICKS_CONNECTION_STRING=
'{"host": "abcdef.xyz", "token": "<KEY>"}'`
- tasks (List[JobTaskSettings]):" A list containing the Databricks task configuration. Should
contain configuration for at least one task.
- timeout_seconds (int, optional): An optional timeout applied to each run of this job.
The default behavior is to have no timeout.
- run_name (str, optional): An optional name for the run.
The default value is "Job run created by Prefect flow run {flow_run_name}".
- idempotency_token (str, optional): An optional token that can be used to guarantee
the idempotency of job run requests. Defaults to the flow run ID.
- access_control_list (List[AccessControlRequest]): List of permissions to set on the job.
- polling_period_seconds (int, optional): Controls the rate which we poll for the result of
this run. By default the task will poll every 30 seconds.
- databricks_retry_limit (int, optional): Amount of times retry if the Databricks backend is
unreachable. Its value must be greater than or equal to 1.
- databricks_retry_delay (float, optional): Number of seconds to wait between retries (it
might be a floating point number).
Returns:
- run_id (str): Run id of the submitted run
"""
if databricks_conn_secret is None or not isinstance(
databricks_conn_secret, dict
):
raise ValueError(
"Databricks connection info must be supplied as a dictionary."
)
if tasks is None or len(tasks) < 1:
raise ValueError("Please supply at least one Databricks task to be run.")
run_name = (
run_name
or f"Job run created by Prefect flow run {prefect.context.flow_run_name}"
)
# Ensures that multiple job runs are not created on retries
idempotency_token = idempotency_token or prefect.context.flow_run_id
# Set polling_period_seconds on task because _handle_databricks_task_execution expects it
if polling_period_seconds:
self.polling_period_seconds = polling_period_seconds
databricks_client = DatabricksHook(
databricks_conn_secret,
retry_limit=databricks_retry_limit,
retry_delay=databricks_retry_delay,
)
# Set json on task instance because _handle_databricks_task_execution expects it
self.json = _deep_string_coerce(
dict(
tasks=[task.dict() for task in tasks],
run_name=run_name,
timeout_seconds=timeout_seconds,
idempotency_token=idempotency_token,
access_control_list=[
entry.json() for entry in access_control_list or []
],
)
)
submitted_run_id = databricks_client.submit_multi_task_run(self.json)
_handle_databricks_task_execution(
self, databricks_client, self.logger, submitted_run_id
)
return submitted_run_id
|
StarcoderdataPython
|
9614206
|
<filename>tools/eqget/AUG.py<gh_stars>10-100
import h5py
import numpy as np
import sys
try:
sys.path.append('/afs/ipp/aug/ads-diags/common/python/lib')
from sf2equ_20200525 import EQU
import mapeq_20200507 as meq
AVAILABLE = True
except:
AVAILABLE = False
def isAvailable():
"""
Returns ``True`` if this module can be used to fetch equilibrium data
on this system.
"""
global AVAILABLE
return AVAILABLE
def getLUKE(shot, time, npsi=80, ntheta=80, filename=None):
"""
Returns magnetic equilibrium data for the given time of the specified
AUG shot. If ``filename`` is provided, the data is also saved to the
named LUKE equilibrium data file.
The shape of the returned 2D arrays are (ntheta, npsi).
:param shot: ASDEX Upgrade shot to fetch equilibrium data for.
:param time: Time to fetch equilibrium data for.
:param filename: Name of file to store data in.
"""
equ = EQU(shot)
# Radial grid (in normalized poloidal flux)
rhop = np.linspace(0, 1, npsi+1)[1:]
# Poloidal angle
theta = np.linspace(0, 2*np.pi, ntheta)
# Flux surface (R, Z) coordinates
R, Z = meq.rhoTheta2rz(equ, rhop, theta, t_in=time, coord_in='rho_pol')
R = R[0,:]
Z = Z[0,:]
# Poloidal flux psi
psi = meq.rho2rho(equ, rhop, t_in=time, coord_in='rho_pol', coord_out='Psi')[0,:]
# Calculate aspect ratio and normalize poloidal flux
tidx = meq.get_nearest_index(equ.time, [time])[0][0]
Rp = equ.Rmag[tidx]
Zp = equ.Zmag[tidx]
a = R[0,-1]-Rp
ieps = Rp / a
psi_apRp = psi / ieps
# Magnetic field components
Br, Bz, Bphi = meq.rz2brzt(equ, r_in=R.flatten(), z_in=Z.flatten(), t_in=time)
Br = Br[0,:].reshape(R.shape)
Bz = Bz[0,:].reshape(R.shape)
Bphi = Bphi[0,:].reshape(R.shape)
equil = {
'id': 'ASDEX Upgrade #{} t={:.4f}s'.format(shot, time),
'Rp': np.array([Rp]), 'Zp': np.array([Zp]),
'psi_apRp': psi_apRp,
'theta': theta,
'ptx': R-Rp, 'pty': Z-Zp,
'ptBx': Br, 'ptBy': Bz, 'ptBPHI': Bphi
}
if filename:
with h5py.File(filename, 'w') as f:
f.create_group('equil')
for key in equil.keys():
f['equil/{}'.format(key)] = equil[key]
return equil
def getVolume(shot, time, filename=None):
"""
Returns the plasma volume enclosed by a given flux surface.
"""
tidx = meq.get_nearest_index(equ.time, [time])[0][0]
data = {'psiN': equ.psiN[tidx,:], 'vol': equ.vol[tidx,:]}
if filename:
np.savez(filename, **data)
return data
|
StarcoderdataPython
|
3352746
|
"""
bbofuser: apps.v1api.views
FILE: patients
Created: 8/16/15 11:21 PM
"""
from django.contrib import messages
__author__ = '<NAME>:@ekivemark'
import json
import requests
from collections import OrderedDict
from oauth2_provider.decorators import protected_resource
from xml.dom import minidom
from xml.etree import ElementTree as ET
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core import serializers
from django.core.urlresolvers import reverse
from django.http import (HttpResponseRedirect,
HttpResponse,
JsonResponse, )
from django.utils.safestring import mark_safe
from django.shortcuts import render, render_to_response
from django.template import RequestContext
from ..models import Crosswalk
from apps.v1api.utils import (get_format,
etree_to_dict,
xml_str_to_json_str,
get_url_query_string,
concat_string,
build_params)
from fhir.utils import kickout_404
from bbapi.utils import FhirServerUrl
@protected_resource()
def get_patient(request, Access_Mode=None, *args, **kwargs):
"""
Display Patient Profile
:param request:
:param Access_Mode = [None], Open
:param args:
:param kwargs:
:return:
"""
# Access_Mode = None = Do Crosswalk using Request.user
# Access_Mode = OPEN = use kwargs['patient_id']
if settings.DEBUG:
print("Request.GET :", request.GET)
print("Access_Mode :", Access_Mode)
print("KWargs :", kwargs)
print("Args :", args)
if Access_Mode == "OPEN" and kwargs['patient_id']!="":
# Lookup using patient_id for fhir_url_id
key = kwargs['patient_id'].strip()
else:
# DONE: Setup Patient API so that ID is not required
# DONE: Do CrossWalk Lookup to get Patient ID
if settings.DEBUG:
print("Request User Beneficiary(Patient):", request.user)
try:
xwalk = Crosswalk.objects.get(user=request.user.id)
except Crosswalk.DoesNotExist:
reason = "Unable to find Patient ID for user:%s[%s]" % (request.user,
request.user.id)
messages.error(request, reason)
return kickout_404(reason)
# return HttpResponseRedirect(reverse('api:v1:home'))
if xwalk.fhir_url_id == "":
err_msg = ['Crosswalk lookup failed: Sorry, We were unable to find',
'your record', ]
exit_message = concat_string("",
msg=err_msg,
delimiter=" ",
last=".")
messages.error(request, exit_message)
return kickout_404(exit_message)
# return HttpResponseRedirect(reverse('api:v1:home'))
key = xwalk.fhir_url_id.strip()
if settings.DEBUG:
print("Crosswalk :", xwalk)
print("GUID :", xwalk.guid)
print("FHIR :", xwalk.fhir)
print("FHIR URL ID :", key)
# We will deal internally in JSON Format if caller does not choose
# a format
in_fmt = "json"
# fhir_server_configuration = {"SERVER":"http://fhir-test.bbonfhir.com:8081",
# "PATH":"",
# "RELEASE":"/baseDstu2"}
# FHIR_SERVER_CONF = fhir_server_configuration
# FHIR_SERVER = FHIR_SERVER_CONF['SERVER'] + FHIR_SERVER_CONF['PATH']
# Since this is BlueButton and we are dealing with Patient Records
# We need to limit the id search to the specific beneficiary.
# A BlueButton user should not be able to request a patient profile
# that is not their own.
# We do this via the CrossWalk. The xwalk.fhir_url_id is the patient
# id as used in the url. eg. /Patient/23/
# FHIR also allows an enquiry with ?_id=23. We need to detect that
# and remove it from the parameters that are passed.
# All other query parameters should be passed through to the
# FHIR server.
# Add URL Parameters to skip_parm to ignore or perform custom
# processing with them. Use lower case values for matching.
# DO NOT USE Uppercase
skip_parm = ['_id', '_format']
mask = True
pass_to = FhirServerUrl()
pass_to += "/Patient"
pass_to += "/"
pass_to = pass_to + key + "/"
# We need to detect if a format was requested in the URL Parameters
# ie. _format=json|xml
# modify get_format to default to return nothing. ie. make no change
# internal data handling will be JSON
# _format will drive external display
# if no _format setting we will display in html (Current mode)
# if valid _format string we will pass content through to display in
# raw format
get_fmt = get_format(request.GET)
if settings.DEBUG:
print("pass_to:", pass_to)
pass_to = pass_to + build_params(request.GET, skip_parm)
if settings.DEBUG:
print("pass_to added to:", pass_to)
mask_to = settings.DOMAIN
# Set Context
context = {'display':"Patient",
'name': "Patient",
'mask': mask,
'key': key,
'get_fmt': get_fmt,
'in_fmt': in_fmt,
# 'output' : "test output ",
# 'args' : args,
# 'kwargs' : kwargs,
# 'get' : request.GET,
'pass_to': pass_to,
'template': 'v1api/patient.html',
}
if settings.DEBUG:
print("Calling Requests with:", pass_to)
try:
r = requests.get(pass_to)
context = process_page(request,r,context)
return publish_page(request, context)
# # Setup the page
#
# if settings.DEBUG:
# print("Context-result:", context['result'])
# # print("Context-converted:", json.dumps(context['result'], sort_keys=False))
# # print("Context:",context)
#
# if get_fmt == 'xml' or get_fmt == 'json':
# if settings.DEBUG:
# print("Mode = ", get_fmt)
# print("Context['result']: ", context['result'])
# if get_fmt == "xml":
# return HttpResponse(context['result'],
# content_type='application/' + get_fmt)
# if get_fmt == "json":
# #return HttpResponse(context['result'], mimetype="application/json")
# return JsonResponse(context['import_text'], safe=False)
#
# else:
#
# if context['text'] == "No user readable content to display" or context['text']=="":
#
# result = json.loads(context['result'], object_pairs_hook=OrderedDict)
# print("Result::", result)
# context['text'] += "<br/> extracting information from returned record:<br/>"
# context['text'] += "<table>\n"
# if 'name' in result:
# patient_name = result['name'][0]['given'][0]
# patient_name += " "
# patient_name += result['name'][0]['family'][0]
# context['text'] += tr_build_item("Patient Name ",
# patient_name)
# if 'address' in result:
# context['text'] += tr_build_item("Patient Address",
# result['address'][0]['line'][0])
# if 'birthDate' in result:
# context['text'] += tr_build_item("Birth Date", result['birthDate'])
#
# if 'identifier' in result:
# context['text'] += tr_build_item("Patient ID",
# result['identifier'][0]['value'])
# context['text'] += "</table>"
#
# return render_to_response('v1api/patient.html',
# RequestContext(request,
# context, ))
except requests.ConnectionError:
pass
return cms_not_connected(request, 'api:v1:home')
@login_required
def get_eob(request, eob_id=None, Access_Mode=None, *args, **kwargs):
"""
Display one or more EOBs but Always limit scope to Patient_Id
:param request:
:param eob_id: Request a specific EOB
:param args:
:param kwargs:
:return:
"""
if settings.DEBUG:
print("Request User Beneficiary(Patient):", request.user,
"\nFor EOB Enquiry ")
print("Request.GET :", request.GET)
print("Access_Mode :", Access_Mode)
print("KWargs :", kwargs)
print("Args :", args)
if Access_Mode == "OPEN":
# Lookup using eob_id without patient filter
key = ""
else:
try:
xwalk = Crosswalk.objects.get(user=request.user)
except Crosswalk.DoesNotExist:
messages.error(request, "Unable to find Patient ID")
return HttpResponseRedirect(reverse('api:v1:home'))
if xwalk.fhir_url_id == "":
err_msg = ['Sorry, We were unable to find',
'your record', ]
exit_message = concat_string("",
msg=err_msg,
delimiter=" ",
last=".")
messages.error(request, exit_message)
return HttpResponseRedirect(reverse('api:v1:home'))
key = xwalk.fhir_url_id.strip()
if settings.DEBUG:
print("FHIR URL ID :", key)
# We should have the xwalk.FHIR_url_id
# So we will construct the EOB Identifier to include
# This is a hack to limit EOBs returned to this user only.
# id_source['system'] = "https://mymedicare.gov/claims/beneficiary"
# id_source['use'] = "official"
# id_source['value'] = "Patient/"+str(patient_id)
# id_list.append(unique_id(id_source))
# this search works:
# http://fhir.bbonfhir.com:8080/fhir-p/
# search?serverId=bbonfhir_dev
# &resource=ExplanationOfBenefit
# ¶m.0.0=https%3A%2F%2Fmymedicare.gov%2Fclaims%2Fbeneficiary
# ¶m.0.1=Patient%2F4995401
# ¶m.0.name=identifier
# ¶m.0.type=token
# &sort_by=
# &sort_direction=
# &resource-search-limit=
# http://ec2-52-4-198-86.compute-1.amazonaws.com:8081/baseDstu2/
# ExplanationOfBenefit/?patient=Patient/131052&_format=json
# We will deal internally in JSON Format if caller does not choose
# a format
in_fmt = "json"
get_fmt = get_format(request.GET)
skip_parm = ['_id', '_format', 'patient']
mask = True
pass_to = FhirServerUrl()
pass_to += "/ExplanationOfBenefit"
pass_to += "/"
if eob_id == None:
pass
else:
pass_to += eob_id
# We can allow an EOB but we MUST add a search Parameter
# to limit the items found to those relevant to the Patient Id
#if eob_id:
# pass_to = eob_id + "/"
# Now apply the search restriction to limit to patient _id
#pass_to = pass_to + key + "/"
if not key == "":
pass_to += "?patient="
pass_to += "Patient/"
pass_to += key
pass_to = pass_to + "&" + build_params(request.GET, skip_parm)[1:]
if settings.DEBUG:
print("Pass_to from build_params:", pass_to)
if settings.DEBUG:
print("Calling requests with pass_to:", pass_to)
# Set Context
context = {'display': 'EOB',
'name': 'ExplanationOfBenefit',
'mask': mask,
'key': key,
'eob_id': eob_id,
'get_fmt': get_fmt,
'in_fmt': in_fmt,
'pass_to': pass_to,
'template': 'v1api/eob.html',
}
try:
r = requests.get(pass_to)
context = process_page(request,r,context)
return publish_page(request, context)
except requests.ConnectionError:
pass
return cms_not_connected(request, 'api:v1:home')
#@login_required
def get_eob_view(request, eob_id, *args, **kwargs):
"""
Display one or more EOBs but Always limit scope to Patient_Id
:param request:
:param eob_id: Request a specific EOB
:param args:
:param kwargs:
:return:
"""
if settings.DEBUG:
print("Request User Beneficiary(Patient):", request.user,
"\nFor Single EOB")
try:
xwalk = Crosswalk.objects.get(user=request.user)
except Crosswalk.DoesNotExist:
messages.error(request, "Unable to find Patient ID")
return HttpResponseRedirect(reverse('api:v1:home'))
if xwalk.fhir_url_id == "":
err_msg = ['Sorry, We were unable to find',
'your record', ]
exit_message = concat_string("",
msg=err_msg,
delimiter=" ",
last=".")
messages.error(request, exit_message)
return HttpResponseRedirect(reverse('api:v1:home'))
if settings.DEBUG:
print("Request.GET :", request.GET)
print("KWargs :", kwargs)
print("Crosswalk :", xwalk)
print("GUID :", xwalk.guid)
print("FHIR :", xwalk.fhir)
print("FHIR URL ID :", xwalk.fhir_url_id)
# We should have the xwalk.FHIR_url_id
# So we will construct the EOB Identifier to include
# We will deal internally in JSON Format if caller does not choose
# a format
in_fmt = "json"
get_fmt = get_format(request.GET)
# DONE: Define Transaction Dictionary to enable generic presentation of API Call
skip_parm = ['_id', '_format']
key = xwalk.fhir_url_id.strip()
mask = True
pass_to = FhirServerUrl()
pass_to += "/ExplanationOfBenefit/"
# We can allow an EOB but we MUST add a search Parameter
# to limit the items found to those relevant to the Patient Id
if eob_id:
pass_to = pass_to + eob_id + "/"
# Now apply the search restriction to limit to patient _id
#pass_to = pass_to + key + "/"
pass_to = pass_to + "?patient="
pass_to = pass_to + "Patient/"
pass_to = pass_to + xwalk.fhir_url_id.strip()
pass_to = pass_to + "&" + build_params(request.GET, skip_parm)
if settings.DEBUG:
print("Pass_to from build_params:", pass_to)
if settings.DEBUG:
print("Calling requests with pass_to:", pass_to)
# Set Context
context = {'name': "ExplanationOfBenefit",
'display': 'EOB',
'mask': mask,
'key': key,
'get_fmt': get_fmt,
'in_fmt': in_fmt,
# 'output' : "test output ",
# 'args' : args,
# 'kwargs' : kwargs,
# 'get' : request.GET,
'pass_to': pass_to,
'template': 'v1api/eob.html',
}
try:
r = requests.get(pass_to)
context = process_page(request, r, context)
return publish_page(request, context)
except requests.ConnectionError:
pass
return cms_not_connected(request,'api:v1:home')
def process_page(request, r, context):
"""
Process the request
:param request:
:param r:
:param context:
:return: context
"""
if context["get_fmt"] == "xml":
pre_text = re_write_url(r.text)
xml_text = minidom.parseString(pre_text)
if settings.DEBUG:
print("XML_TEXT:", xml_text.toxml())
root = ET.fromstring(r.text)
# root_out = etree_to_dict(r.text)
json_string = ""
# json_out = xml_str_to_json_str(r.text, json_string)
if settings.DEBUG:
print("Root ET XML:", root)
# print("XML:", root_out)
# print("JSON_OUT:", json_out,":", json_string)
drill_down = ['Bundle',
'entry',
'Patient', ]
level = 0
tag0 = xml_text.getElementsByTagName("text")
# tag1 = tag0.getElementsByTagName("entry")
if settings.DEBUG:
print("Patient?:", tag0)
print("DrillDown:", drill_down[level])
print("root find:", root.find(drill_down[level]))
pretty_xml = xml_text.toprettyxml()
#if settings.DEBUG:
# print("TEXT:", text)
# # print("Pretty XML:", pretty_xml)
context['result'] = pretty_xml # convert
context['text'] = pretty_xml
else:
pre_text = re_write_url(r.text)
convert = json.loads(pre_text, object_pairs_hook=OrderedDict)
content = OrderedDict(convert)
text = ""
if settings.DEBUG:
# print("Content:", content)
print("resourceType:", content['resourceType'])
if 'text' in content:
if 'div' in content['text']:
pass
# print("text:", content['text']['div'])
# context['result'] = r.json() # convert
import_text = json.loads(pre_text, object_pairs_hook=OrderedDict)
context['import_text'] = import_text
context['result'] = json.dumps(import_text, indent=4, sort_keys=False)
if 'text' in content:
if 'div' in content['text']:
context['text'] = content['text']['div']
else:
context['text'] = ""
else:
context['text'] = "No user readable content to display"
if 'error' in content:
context['error'] = context['issue']
return context
def publish_page(request, context):
"""
Publish the page
:return:
"""
# Setup the page
get_fmt = context['get_fmt']
in_fmt = context['in_fmt']
if get_fmt == 'xml' or get_fmt == 'json':
# if settings.DEBUG:
# print("Mode = ", get_fmt)
# print("Context['result']: ", context['result'])
if get_fmt == "xml":
return HttpResponse(context['result'],
content_type='application/' + get_fmt)
if get_fmt == "json":
# return HttpResponse(context['result'],content_type="application/json")
return JsonResponse(context['import_text'], safe=False)
else:
if context['text'] == "No user readable content to display" or context['text']=="":
result = json.loads(context['result'], object_pairs_hook=OrderedDict)
context['text'] += "<br/> extracting information from returned record:<br/>"
context['text'] += "<table>\n"
if 'name' in result:
patient_name = result['name'][0]['given'][0]
patient_name += " "
patient_name += result['name'][0]['family'][0]
context['text'] += tr_build_item("Patient Name ",
patient_name)
if 'address' in result:
context['text'] += tr_build_item("Patient Address",
result['address'][0]['line'][0])
if 'birthDate' in result:
context['text'] += tr_build_item("Birth Date", result['birthDate'])
if 'identifier' in result:
context['text'] += tr_build_item("Patient ID",
result['identifier'][0]['value'])
context['text'] += "</table>"
if settings.DEBUG:
print("Template:", context['template'])
return render_to_response(context['template'],
RequestContext(request,
context ))
def re_write_url(src_text, rw_from=None, rw_to=None):
"""
receive text and rewrite rw_From with rw_to
"""
# We need to replace FHIR Server with External Server reference
if rw_from == None:
rewrite_from = settings.FHIR_SERVER_CONF['REWRITE_FROM']
else:
rewrite_from = rw_from
if rw_to == None:
rewrite_to = settings.FHIR_SERVER_CONF['REWRITE_TO']
else:
rewrite_to = rw_to
return src_text.replace(rewrite_from, rewrite_to)
def li_build_item(field_name, field_value):
li_build_item = "<li>%s: %s</li>" % (field_name, field_value)
return li_build_item
def tr_build_item(field_name, field_value):
ti_build_item = "<tr>"
ti_build_item += "<td>%s</td><td>%s</td>" % (field_name, field_value)
ti_build_item += "</tr>"
return ti_build_item
def cms_not_connected(request, reverse_to_name):
"""
did we get a connection error because we are in the CMS network?
"""
if settings.DEBUG:
print("Whoops - Problem connecting to FHIR Server")
messages.error(request,
"FHIR Server is unreachable. "
"Are you on the CMS Network?")
return HttpResponseRedirect(reverse(reverse_to_name))
|
StarcoderdataPython
|
6510783
|
import numpy as np
import pytest
import aesara
import aesara.tensor as tt
from aesara.tensor import fft
from tests import unittest_tools as utt
N = 16
class TestFFT:
def test_rfft_float(self):
# Test that numpy's default float64 output is cast to aesara input type
eps = 1e-1
def f_rfft(inp):
return fft.rfft(inp)
inputs_val = np.random.random((1, N)).astype(aesara.config.floatX)
utt.verify_grad(f_rfft, [inputs_val], eps=eps)
def f_irfft(inp):
return fft.irfft(inp)
inputs_val = np.random.random((1, N // 2 + 1, 2)).astype(aesara.config.floatX)
utt.verify_grad(f_irfft, [inputs_val], eps=eps)
def test_1Drfft(self):
inputs_val = np.random.random((1, N)).astype(aesara.config.floatX)
x = tt.matrix("x")
rfft = fft.rfft(x)
f_rfft = aesara.function([x], rfft)
res_rfft = f_rfft(inputs_val)
res_rfft_comp = np.asarray(res_rfft[:, :, 0]) + 1j * np.asarray(
res_rfft[:, :, 1]
)
rfft_ref = np.fft.rfft(inputs_val, axis=1)
utt.assert_allclose(rfft_ref, res_rfft_comp)
m = rfft.type()
print(m.ndim)
irfft = fft.irfft(m)
f_irfft = aesara.function([m], irfft)
res_irfft = f_irfft(res_rfft)
utt.assert_allclose(inputs_val, np.asarray(res_irfft))
# The numerical gradient of the FFT is sensitive, must set large
# enough epsilon to get good accuracy.
eps = 1e-1
def f_rfft(inp):
return fft.rfft(inp)
inputs_val = np.random.random((1, N)).astype(aesara.config.floatX)
utt.verify_grad(f_rfft, [inputs_val], eps=eps)
def f_irfft(inp):
return fft.irfft(inp)
inputs_val = np.random.random((1, N // 2 + 1, 2)).astype(aesara.config.floatX)
utt.verify_grad(f_irfft, [inputs_val], eps=eps)
def test_rfft(self):
inputs_val = np.random.random((1, N, N)).astype(aesara.config.floatX)
inputs = aesara.shared(inputs_val)
rfft = fft.rfft(inputs)
f_rfft = aesara.function([], rfft)
res_rfft = f_rfft()
res_rfft_comp = np.asarray(res_rfft[:, :, :, 0]) + 1j * np.asarray(
res_rfft[:, :, :, 1]
)
rfft_ref = np.fft.rfftn(inputs_val, axes=(1, 2))
utt.assert_allclose(rfft_ref, res_rfft_comp, atol=1e-4, rtol=1e-4)
def test_irfft(self):
inputs_val = np.random.random((1, N, N)).astype(aesara.config.floatX)
inputs = aesara.shared(inputs_val)
rfft = fft.rfft(inputs)
f_rfft = aesara.function([], rfft)
res_fft = f_rfft()
m = rfft.type()
irfft = fft.irfft(m)
f_irfft = aesara.function([m], irfft)
res_irfft = f_irfft(res_fft)
utt.assert_allclose(inputs_val, np.asarray(res_irfft))
inputs_val = np.random.random((1, N, N, 2)).astype(aesara.config.floatX)
inputs = aesara.shared(inputs_val)
irfft = fft.irfft(inputs)
f_irfft = aesara.function([], irfft)
res_irfft = f_irfft()
inputs_ref = inputs_val[..., 0] + inputs_val[..., 1] * 1j
irfft_ref = np.fft.irfftn(inputs_ref, axes=(1, 2))
utt.assert_allclose(irfft_ref, res_irfft, atol=1e-4, rtol=1e-4)
def test_norm_rfft(self):
inputs_val = np.random.random((1, N, N)).astype(aesara.config.floatX)
inputs = aesara.shared(inputs_val)
# Unitary normalization
rfft = fft.rfft(inputs, norm="ortho")
f_rfft = aesara.function([], rfft)
res_rfft = f_rfft()
res_rfft_comp = np.asarray(res_rfft[:, :, :, 0]) + 1j * np.asarray(
res_rfft[:, :, :, 1]
)
rfft_ref = np.fft.rfftn(inputs_val, axes=(1, 2))
utt.assert_allclose(rfft_ref / N, res_rfft_comp, atol=1e-4, rtol=1e-4)
# No normalization
rfft = fft.rfft(inputs, norm="no_norm")
f_rfft = aesara.function([], rfft)
res_rfft = f_rfft()
res_rfft_comp = np.asarray(res_rfft[:, :, :, 0]) + 1j * np.asarray(
res_rfft[:, :, :, 1]
)
utt.assert_allclose(rfft_ref, res_rfft_comp, atol=1e-4, rtol=1e-4)
# Inverse FFT inputs
inputs_val = np.random.random((1, N, N // 2 + 1, 2)).astype(
aesara.config.floatX
)
inputs = aesara.shared(inputs_val)
inputs_ref = inputs_val[..., 0] + 1j * inputs_val[..., 1]
# Unitary normalization inverse FFT
irfft = fft.irfft(inputs, norm="ortho")
f_irfft = aesara.function([], irfft)
res_irfft = f_irfft()
irfft_ref = np.fft.irfftn(inputs_ref, axes=(1, 2))
utt.assert_allclose(irfft_ref * N, res_irfft, atol=1e-4, rtol=1e-4)
# No normalization inverse FFT
irfft = fft.irfft(inputs, norm="no_norm")
f_irfft = aesara.function([], irfft)
res_irfft = f_irfft()
utt.assert_allclose(irfft_ref * N ** 2, res_irfft, atol=1e-4, rtol=1e-4)
def test_params(self):
inputs_val = np.random.random((1, N)).astype(aesara.config.floatX)
inputs = aesara.shared(inputs_val)
with pytest.raises(ValueError):
fft.rfft(inputs, norm=123)
inputs_val = np.random.random((1, N // 2 + 1, 2)).astype(aesara.config.floatX)
inputs = aesara.shared(inputs_val)
with pytest.raises(ValueError):
fft.irfft(inputs, norm=123)
with pytest.raises(ValueError):
fft.irfft(inputs, is_odd=123)
def test_grad_rfft(self):
# The numerical gradient of the FFT is sensitive, must set large
# enough epsilon to get good accuracy.
eps = 1e-1
def f_rfft(inp):
return fft.rfft(inp)
inputs_val = np.random.random((1, N, N)).astype(aesara.config.floatX)
utt.verify_grad(f_rfft, [inputs_val], eps=eps)
def f_irfft(inp):
return fft.irfft(inp)
inputs_val = np.random.random((1, N, N // 2 + 1, 2)).astype(
aesara.config.floatX
)
utt.verify_grad(f_irfft, [inputs_val], eps=eps)
def f_rfft(inp):
return fft.rfft(inp, norm="ortho")
inputs_val = np.random.random((1, N, N)).astype(aesara.config.floatX)
utt.verify_grad(f_rfft, [inputs_val], eps=eps)
def f_irfft(inp):
return fft.irfft(inp, norm="no_norm")
inputs_val = np.random.random((1, N, N // 2 + 1, 2)).astype(
aesara.config.floatX
)
utt.verify_grad(f_irfft, [inputs_val], eps=eps)
|
StarcoderdataPython
|
9738940
|
class A196:
pass
|
StarcoderdataPython
|
158495
|
<filename>lib/tagnews/crimetype/tag.py<gh_stars>10-100
import os
import pickle
import glob
import time
import pandas as pd
# not used explicitly, but this needs to be imported like this
# for unpickling to work.
from ..utils.model_helpers import LemmaTokenizer # noqa
"""
Contains the CrimeTags class that allows tagging of articles.
"""
MODEL_LOCATION = os.path.join(os.path.split(__file__)[0],
'models',
'binary_stemmed_logistic')
TAGS = ['OEMC', 'CPD', 'SAO', 'CCCC', 'CCJ', 'CCSP',
'CPUB', 'IDOC', 'DOMV', 'SEXA', 'POLB', 'POLM',
'GUNV', 'GLBTQ', 'JUVE', 'REEN', 'VIOL', 'BEAT',
'PROB', 'PARL', 'CPLY', 'DRUG', 'CPS', 'GANG', 'ILSP',
'HOMI', 'IPRA', 'CPBD', 'IMMG', 'ENVI', 'UNSPC',
'ILSC', 'ARSN', 'BURG', 'DUI', 'FRUD', 'ROBB', 'TASR']
def load_model(location=MODEL_LOCATION):
"""
Load a model from the given folder `location`.
There should be at least one file named model-TIME.pkl and
a file named vectorizer-TIME.pkl inside the folder.
The files with the most recent timestamp are loaded.
"""
models = glob.glob(os.path.join(location, 'model*.pkl'))
if not models:
raise RuntimeError(('No models to load. Run'
' "python -m tagnews.crimetype.models.'
'binary_stemmed_logistic.save_model"'))
model = models.pop()
while models:
model_time = time.strptime(model[-19:-4], '%Y%m%d-%H%M%S')
new_model_time = time.strptime(models[0][-19:-4], '%Y%m%d-%H%M%S')
if model_time < new_model_time:
model = models[0]
models = models[1:]
with open(model, 'rb') as f:
clf = pickle.load(f)
with open(os.path.join(location, 'vectorizer-' + model[-19:-4] + '.pkl'),
'rb') as f:
vectorizer = pickle.load(f)
return clf, vectorizer
class CrimeTags():
"""
CrimeTags let you tag articles. Neat!
"""
def __init__(self,
model_directory=MODEL_LOCATION,
clf=None,
vectorizer=None):
"""
Load a model from the given `model_directory`.
See `load_model` for more information.
Alternatively, the classifier and vectorizer can be
provided. If one is provided, then both must be provided.
"""
if clf is None and vectorizer is None:
self.clf, self.vectorizer = load_model(model_directory)
elif clf is None or vectorizer is None:
raise ValueError(('clf and vectorizer must both be None,'
' or both be not None'))
else:
self.clf, self.vectorizer = clf, vectorizer
def tagtext_proba(self, text):
"""
Compute the probability each tag applies to the given text.
inputs:
text: A python string.
returns:
pred_proba: A pandas series indexed by the tag name.
"""
x = self.vectorizer.transform([text])
y_hat = self.clf.predict_proba(x)
preds = pd.DataFrame(y_hat)
preds.columns = TAGS
preds = preds.T.iloc[:, 0].sort_values(ascending=False)
return preds
def tagtext(self, text, prob_thresh=0.5):
"""
Tag a string with labels.
inputs:
text: A python string.
prob_thresh: The threshold on probability at which point
the tag will be applied.
returns:
preds: A list of tags that have > prob_thresh probability
according to the model.
"""
preds = self.tagtext_proba(text)
return preds[preds > prob_thresh].index.values.tolist()
def relevant_proba(self, text):
"""
Outputs the probability that the given text is relevant.
This probability is computed naively as the maximum of
the probabilities each tag applies to the text.
A more nuanced method would compute a joint probability.
inputs:
text: A python string.
returns:
relevant_proba: Probability the text is relevant.
"""
return max(self.tagtext_proba(text))
def relevant(self, text, prob_thresh=0.05):
"""
Determines whether given text is relevant or not. Relevance
is defined as whether any tag has more than prob_thresh
chance of applying to the text according to the model.
inputs:
text: A python string.
prob_thresh: The threshold on probability that
determines relevance. If no tags have >=
prob_thresh of applying to the text, then
the text is not relevant.
returns:
relevant: Boolean. Is the text "relevant"?
"""
return len(self.tagtext(text, prob_thresh)) > 0
def get_contributions(self, text):
"""
Rank the words in the text by their contribution to each
category. This function assumes that clf has an attribute
`coef_` and that vectorizer has an attribute
`inverse_transform`.
inputs:
text: A python string.
returns:
contributions: Pandas panel keyed off [category, word].
Example:
>>> s = 'This is an article about drugs and gangs.'
>>> s += ' Written by the amazing <NAME>.'
>>> p = tagger.get_contributions(s)
>>> p['DRUG'].sort_values('weight', ascending=False)
weight
drug 5.549870
copyright 0.366905
gang 0.194773
this 0.124590
an -0.004484
article -0.052026
is -0.085534
about -0.154800
kevin -0.219028
rose -0.238296
and -0.316201
. -0.853208
"""
p = {}
vec = self.vectorizer.transform([text])
vec_inv = self.vectorizer.inverse_transform(vec)
for i, tag in enumerate(TAGS):
p[tag] = pd.DataFrame(
index=vec_inv,
data={'weight': self.clf.coef_[i, vec.nonzero()[1]]}
)
return pd.Panel(p)
|
StarcoderdataPython
|
5163051
|
from ..remote import RemoteModel
class DevicePhysicalNetworkExplorerInventorySummaryGridRemote(RemoteModel):
"""
| ``DevicePhysicalID:`` none
| ``attribute type:`` string
| ``DeviceID:`` none
| ``attribute type:`` string
| ``DeviceIPDotted:`` none
| ``attribute type:`` string
| ``DeviceIPNumeric:`` none
| ``attribute type:`` string
| ``VirtualNetworkID:`` none
| ``attribute type:`` string
| ``Network:`` none
| ``attribute type:`` string
| ``DeviceName:`` none
| ``attribute type:`` string
| ``DeviceModel:`` none
| ``attribute type:`` string
| ``DeviceVendor:`` none
| ``attribute type:`` string
| ``DeviceVersion:`` none
| ``attribute type:`` string
| ``PhysicalName:`` none
| ``attribute type:`` string
| ``PhysicalDescr:`` none
| ``attribute type:`` string
| ``PhysicalClass:`` none
| ``attribute type:`` string
| ``PhysicalSerialNum:`` none
| ``attribute type:`` string
| ``PhysicalModelName:`` none
| ``attribute type:`` string
| ``PhysicalHardwareRev:`` none
| ``attribute type:`` string
| ``PhysicalFirmwareRev:`` none
| ``attribute type:`` string
| ``PhysicalSoftwareRev:`` none
| ``attribute type:`` string
| ``PhysicalAlias:`` none
| ``attribute type:`` string
| ``PhysicalAssetID:`` none
| ``attribute type:`` string
"""
properties = ("DevicePhysicalID",
"DeviceID",
"DeviceIPDotted",
"DeviceIPNumeric",
"VirtualNetworkID",
"Network",
"DeviceName",
"DeviceModel",
"DeviceVendor",
"DeviceVersion",
"PhysicalName",
"PhysicalDescr",
"PhysicalClass",
"PhysicalSerialNum",
"PhysicalModelName",
"PhysicalHardwareRev",
"PhysicalFirmwareRev",
"PhysicalSoftwareRev",
"PhysicalAlias",
"PhysicalAssetID",
)
|
StarcoderdataPython
|
9718850
|
##-----------------------------------------------------------
## Copyright 2020 Science and Technologies Facilities Council
## Licensed under the MIT License
## Author <NAME>, STFC Hartree Centre
import h5py
import numpy as np
import matplotlib.pyplot as plt
import argparse
#f = h5py.File('still_water.hdf5', 'r')
#f = h5py.File('initial_output.hdf5', 'r')
f = h5py.File('outputs/file9.hdf5', 'r')
#plt.scatter(f['pos_x'], f['pos_y'], c=f['is_boundary'])
print(max(f['pos_x']))
print(max(f['pos_y']))
plt.scatter(f['pos_x'], f['pos_y'], c=f['density'])
plt.show()
|
StarcoderdataPython
|
1818072
|
import atexit
def _encode_string(s):
encoded = s.encode('utf-8')
return encoded
def _decode_string(b):
return b.decode('utf-8')
_encode_string.__doc__ = """Encode a string for use by LLVM."""
_decode_string.__doc__ = """Decode a LLVM character (byte)string."""
_shutting_down = [False]
def _at_shutdown():
_shutting_down[0] = True
atexit.register(_at_shutdown)
def _is_shutting_down(_shutting_down=_shutting_down):
"""
Whether the interpreter is currently shutting down.
For use in finalizers, __del__ methods, and similar; it is advised
to early bind this function rather than look it up when calling it,
since at shutdown module globals may be cleared.
"""
return _shutting_down[0]
|
StarcoderdataPython
|
5120579
|
#!/usr/bin/env python
import argparse
import binascii
import errno
import fuse
import getpass
import os
import paramiko
import socket
import stat
import sys
import time
import uuid
fuse.fuse_python_api = (0, 2)
class BlitzClient(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.sock = None
self.chan = None
self.fd = None
self.transport = None
self.keys = None
self.key = None
def __del__(self):
self.disconnect()
def connect(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
def load_key(self, keyfile):
if self.key is not None:
return
try:
self.key = paramiko.RSAKey.from_private_key_file(keyfile)
except paramiko.PasswordRequiredException:
password = getpass.getpass('RSA key password: ')
self.key = paramiko.RSAKey.from_private_key_file(keyfile, password)
def get_transport(self):
self.transport = paramiko.Transport(self.sock)
self.transport.start_client()
def load_keys(self):
# if self.keys is None:
# self.keys = paramiko.util.load_host_keys(os.path.expanduser('~/.ssh/known_hosts'))
key = self.transport.get_remote_server_key()
print ("KEY: %s" % binascii.hexlify(key.get_fingerprint()))
def auth_pubkey(self, username):
self.transport.auth_publickey(username, self.key)
def get_channel(self):
self.chan = self.transport.open_session()
self.chan.get_pty()
self.chan.invoke_shell()
self.fd = self.chan.makefile('rU')
return self.chan
def close(self):
if self.fd is not None:
self.fd.close()
self.fd = None
if self.chan is not None:
self.chan.close()
self.chan = None
def disconnect(self):
if self.fd is not None:
self.fd.close()
self.fd = None
if self.chan is not None:
self.chan.close()
self.chan = None
if self.transport is not None:
self.transport.close()
self.transport = None
if self.sock is not None:
self.sock.close()
self.sock = None
def wait_for(self, result='\n', printres=False):
data = self.fd.read(1)
if not data:
return ''
buf = data
res = ''
rsize = len(result)
while buf != result:
if printres:
sys.stdout.write(data)
data = self.fd.read(1)
if not data:
return res
res += data
buf += data
if len(buf) > rsize:
buf = buf[-rsize:]
return res
def list(self, folder):
self.chan.send('list %s\r\n' % (folder))
self.wait_for('OK')
self.wait_for('\n')
res = self.wait_for('>')
if res and res[-1] == '>':
res = res[:-1]
if res.startswith('ERROR'):
raise ValueError(res.strip())
return [x.strip() for x in res.split('\n') if x.strip()]
def get(self, filename):
self.chan.send('get %s\r\n' % (filename.strip()))
self.wait_for('OK')
self.wait_for('\n')
name = self.fd.readline()
if name.startswith('ERROR') or not name.startswith('File:'):
raise ValueError(name.strip())
size = self.fd.readline()
if not size.startswith('Size:'):
raise ValueError(size.strip())
name = name[5:].strip()
size = size[5:].strip()
if not size.isdigit():
raise ValueError(size)
size = int(size)
data = self.fd.read(size)
self.wait_for('>')
return (name, size, data)
def stat(self, filename):
self.chan.send('stat %s\r\n' % (filename))
self.wait_for('OK')
self.wait_for('\n')
res = self.fd.readline()
self.wait_for('>')
res = res.strip()
if res.startswith('ERROR'):
raise ValueError('ERROR: %s' % res)
entries = res.split(' ')
if len(entries) < 2:
raise ValueError('ERROR: Num entries %s' % (res))
ftype = entries[0]
size = entries[1]
if not size.isdigit():
raise ValueError('ERROR: size (%s, %s)' % (size, res))
size = int(size)
name = ' '.join(entries[2:])
return (ftype, size, name)
class BlitzFuse(fuse.Operations):
def __init__(self, config):
self.host = config['server']
self.port = int(config['port'])
self.cache = config['cache']
self.logfile = config['logfile']
self.fd = 0
self.cli = BlitzClient(self.host, self.port)
self.cli.connect()
self.cli.get_transport()
self.cli.load_keys()
self.cli.load_key(os.path.expanduser('~/.ssh/id_rsa'))
self.cli.auth_pubkey('dummy')
self.chan = None
self.channel()
self.files = {}
self.filemap = {}
self.dirmap = {}
self.statmap = {}
def channel(self):
self.chan = self.cli.get_channel()
self.wait_prompt()
def wait_prompt(self):
self.cli.wait_for('>')
def getattr(self, path, fh=None):
if self.cache and path in self.statmap:
return self.statmap[path]
res = {
'st_atime': int(time.time()),
'st_nlink': 1,
'st_size': 0
}
res['st_mtime'] = res['st_atime']
res['st_ctime'] = res['st_atime']
if path == '/' or path == '.' or path == '..':
ftype = 'DIR'
fsize = 36
else:
try:
(ftype, fsize, fname) = self.cli.stat(path)
except Exception as e:
self.log(e)
raise fuse.FuseOSError(errno.ENOENT)
if ftype == 'DIR':
res['st_mode'] = stat.S_IFDIR | 0755
res['st_size'] = fsize
elif ftype == 'FILE':
res['st_mode'] = stat.S_IFREG | 0644
res['st_size'] = fsize
else:
raise fuse.FuseOSError(errno.ENOENT)
if self.cache:
self.statmap[path] = res
return res
def readdir(self, path, offset):
try:
ok = False
if self.cache:
if path in self.dirmap:
res = self.dirmap[path]
ok = True
if not ok:
res = ['.', '..']
try:
res += self.cli.list(path)
if self.cache:
self.dirmap[path] = res
except Exception as e:
self.log(e)
for ent in res:
yield os.path.basename(ent.decode('utf-8'))
except:
return
def open(self, path, flags):
if self.cache:
res = self.filemap.get(path, -1)
if res > 0:
return res
(fname, size, data) = self.cli.get(path)
self.fd += 1
self.files[self.fd] = (fname, size, data)
if self.cache:
self.filemap[path] = self.fd
return self.fd
def read(self, path, length, offset, fh):
(fname, size, data) = self.files[fh]
return data[offset:offset + length]
def release(self, path, fh):
if not self.cache:
del self.files[fh]
def log(self, msg):
if self.logfile:
with open(self.logfile, 'a') as fd:
fd.write('%s\n' % msg)
else:
print ('%s' % (msg))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Blitz fuse')
parser.add_argument('mountpoint', help='Mount point')
parser.add_argument('-s', '--server', default='localhost', help='Server to connect')
parser.add_argument('-p', '--port', default=4444, help='Port to connect')
parser.add_argument('-l', '--logfile', help='Log to file')
parser.add_argument('-c', '--cache', action='store_true', help='Cache results for faster access, but server data changes are not visible')
res = parser.parse_args()
args = vars(res)
fuse.FUSE(BlitzFuse(args), args['mountpoint'], foreground=True)
|
StarcoderdataPython
|
12854030
|
<reponame>maxgerhardt/gd32-bootloader-dfu-dapboot
Import("env")
# original Makefile builds into dapboot.bin/elf, let's do the same
env.Replace(PROGNAME="dapboot")
|
StarcoderdataPython
|
12849995
|
<reponame>pedrohenriquegomes/openwsn-sw<gh_stars>10-100
# Copyright (c) 2010-2013, Regents of the University of California.
# All rights reserved.
#
# Released under the BSD 3-Clause license as published at the link below.
# https://openwsn.atlassian.net/wiki/display/OW/License
import logging
log = logging.getLogger('typeRssi')
log.setLevel(logging.ERROR)
log.addHandler(logging.NullHandler())
import openType
class typeRssi(openType.openType):
def __init__(self):
# log
log.info("creating object")
# initialize parent class
openType.openType.__init__(self)
def __str__(self):
return '{0} dBm'.format(self.rssi)
#======================== public ==========================================
def update(self,rssi):
self.rssi = rssi
#======================== private =========================================
|
StarcoderdataPython
|
11294368
|
<gh_stars>0
# """
# This script can be used to evaluate a trained model on 3D pose/shape and masks/part segmentation. You first need to download the datasets and preprocess them.
# Example usage:
# ```
# python3 eval.py --checkpoint=data/model_checkpoint.pt --dataset=h36m-p1 --log_freq=20
# ```
# Running the above command will compute the MPJPE and Reconstruction Error on the Human3.6M dataset (Protocol I). The ```--dataset``` option can take different values based on the type of evaluation you want to perform:
# 1. Human3.6M Protocol 1 ```--dataset=h36m-p1```
# 2. Human3.6M Protocol 2 ```--dataset=h36m-p2```
# 3. 3DPW ```--dataset=3dpw```
# 4. LSP ```--dataset=lsp```
# 5. MPI-INF-3DHP ```--dataset=mpi-inf-3dhp```
# """
# import torch
# from torch.utils.data import DataLoader
# import numpy as np
# import cv2
# import os
# import argparse
# import json
# from collections import namedtuple
# from tqdm import tqdm
# import torchgeometry as tgm
# import sys
# import config
# import constants
# from models import hmr, SMPL
# from datasets import BaseDataset
# from utils.imutils import uncrop
# from utils.pose_utils import reconstruction_error
# from utils.part_utils import PartRenderer
# from utils.geometry import batch_rodrigues, perspective_projection, estimate_translation
# from utils.imutils import crop, flip_img, flip_pose, flip_kp, transform, rot_aa
# # Define command-line arguments
# parser = argparse.ArgumentParser()
# parser.add_argument('--checkpoint', default=None, help='Path to network checkpoint')
# parser.add_argument('--dataset', default='slp', choices=['h36m-p1', 'h36m-p2', 'lsp', '3dpw', 'mpi-inf-3dhp','coco','slp'], help='Choose evaluation dataset')
# parser.add_argument('--log_freq', default=50, type=int, help='Frequency of printing intermediate results')
# parser.add_argument('--batch_size', default=32, help='Batch size for testing')
# parser.add_argument('--shuffle', default=False, action='store_true', help='Shuffle data')
# parser.add_argument('--num_workers', default=8, type=int, help='Number of processes for data loading')
# parser.add_argument('--result_file', default=None, help='If set, save detections to a .npz file')
# def untranskey(kp, center, scale, inverse, r=0, f=0):
# scaleRGB = 256/1024
# """'Undo' the image cropping/resizing.
# This function is used when evaluating mask/part segmentation.
# """
# nparts = kp.shape[0]
# #print(nparts)
# for i in range(nparts):
# kp[i,0:2] = transform(kp[i,0:2]+1, center, scale,
# [constants.IMG_RES, constants.IMG_RES],invert=inverse,rot=r)
# kp = kp/scaleRGB
# kp = kp.astype('float32')
# return kp
# def run_evaluation(model, dataset_name, dataset, result_file,
# batch_size=1, img_res=224,
# num_workers=32, shuffle=False, log_freq=50):
# """Run evaluation on the datasets and metrics we report in the paper. """
# device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# # Transfer model to the GPU
# model.to(device)
# # Load SMPL model
# smpl_neutral = SMPL(config.SMPL_MODEL_DIR,
# create_transl=False).to(device)
# smpl_male = SMPL(config.SMPL_MODEL_DIR,
# gender='male',
# create_transl=False).to(device)
# smpl_female = SMPL(config.SMPL_MODEL_DIR,
# gender='female',
# create_transl=False).to(device)
# renderer = PartRenderer()
# # Regressor for H36m joints
# #J_regressor = torch.from_numpy(np.load(config.JOINT_REGRESSOR_H36M)).float()
# save_results = result_file is not None
# # Disable shuffling if you want to save the results
# if save_results:
# shuffle=False
# # Create dataloader for the dataset
# data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, drop_last=True)
# # Pose metrics
# # MPJPE and Reconstruction error for the non-parametric and parametric shapes
# mpjpe = np.zeros(len(dataset))
# print('dataset length:',len(dataset))
# recon_err = np.zeros(len(dataset))
# mpjpe_smpl = np.zeros(len(dataset))
# recon_err_smpl = np.zeros(len(dataset))
# # Shape metrics
# # Mean per-vertex error
# shape_err = np.zeros(len(dataset))
# shape_err_smpl = np.zeros(len(dataset))
# # Mask and part metrics
# # Accuracy
# accuracy = 0.
# parts_accuracy = 0.
# # True positive, false positive and false negative
# tp = np.zeros((2,1))
# fp = np.zeros((2,1))
# fn = np.zeros((2,1))
# parts_tp = np.zeros((7,1))
# parts_fp = np.zeros((7,1))
# parts_fn = np.zeros((7,1))
# # Pixel count accumulators
# pixel_count = 0
# parts_pixel_count = 0
# # Store SMPL parameters
# smpl_pose = np.zeros((len(dataset), 72))
# smpl_betas = np.zeros((len(dataset), 10))
# smpl_camera = np.zeros((len(dataset), 3))
# pred_joints = np.zeros((len(dataset), 17, 3))
# eval_pose = False
# eval_masks = False
# eval_parts = False
# # Choose appropriate evaluation for each dataset
# if dataset_name == 'coco' or dataset_name == 'slp':
# eval_pose = True
# # Iterate over the entire dataset
# for step, batch in enumerate(tqdm(data_loader, desc='Eval', total=len(data_loader))):
# # Get ground truth annotations from the batch
# gt_pose = batch['pose'].to(device)
# gt_betas = batch['betas'].to(device)
# gt_vertices = smpl_neutral(betas=gt_betas, body_pose=gt_pose[:, 3:], global_orient=gt_pose[:, :3]).vertices
# images = batch['img'].to(device)
# #images_depth = batch['img_depth'].to(device)
# # images_ir = batch['img_ir'].to(device)
# # images_pm = batch['img_pm'].to(device)
# gender = batch['gender'].to(device)
# center = batch['center'].to(device)
# scale = batch['scale'].to(device)
# curr_batch_size = images.shape[0]
# with torch.no_grad():
# pred_rotmat, pred_betas, pred_camera = model([images])
# pred_output = smpl_neutral(betas=pred_betas, body_pose=pred_rotmat[:,1:], global_orient=pred_rotmat[:,0].unsqueeze(1), pose2rot=False)
# pred_vertices = pred_output.vertices
# if save_results:
# rot_pad = torch.tensor([0,0,1], dtype=torch.float32, device=device).view(1,3,1)
# rotmat = torch.cat((pred_rotmat.view(-1, 3, 3), rot_pad.expand(curr_batch_size * 24, -1, -1)), dim=-1)
# pred_pose = tgm.rotation_matrix_to_angle_axis(rotmat).contiguous().view(-1, 72)
# smpl_pose[step * batch_size:step * batch_size + curr_batch_size, :] = pred_pose.cpu().numpy()
# smpl_betas[step * batch_size:step * batch_size + curr_batch_size, :] = pred_betas.cpu().numpy()
# smpl_camera[step * batch_size:step * batch_size + curr_batch_size, :] = pred_camera.cpu().numpy()
# # 3D pose evaluation
# if eval_pose:
# pred_joints = pred_output.joints
# pred_cam_t = torch.stack([pred_camera[:,1],
# pred_camera[:,2],
# 2*5000./(224 * pred_camera[:,0] +1e-9)],dim=-1)
# camera_center = torch.zeros(batch_size, 2, device=torch.device('cuda'))
# gt_keypoints_2d = batch['keypoints'][:,:,:2].cuda()
# pred_keypoints_2d = perspective_projection(pred_joints,
# rotation=torch.eye(3, device=torch.device('cuda')).unsqueeze(0).expand(batch_size, -1, -1),
# translation=pred_cam_t,
# focal_length=5000.,
# camera_center=camera_center)
# #pred_keypoints_2d = pred_keypoints_2d / (224 / 2.)
# center = center.cpu().numpy()
# scale = scale.cpu().numpy()
# gt_keypoints_2d = 112.*gt_keypoints_2d
# gt_keypoints_2d = gt_keypoints_2d.cpu().numpy()
# gt_keypoints = gt_keypoints_2d[:,25:39,:]
# gt_keypoints_2d = gt_keypoints+112
# temp = np.zeros((gt_keypoints_2d.shape[0],14,2))
# for i in range(gt_keypoints_2d.shape[0]):
# temp[i,:,:] = untranskey(gt_keypoints_2d[i,:,:], center[i], scale[i], inverse=1, r=0, f=0)
# gt_keypoints_2d = torch.tensor(temp)
# pred_keypoints_2d = pred_keypoints_2d.cpu().numpy()
# pred_keypoints_2d = pred_keypoints_2d[:,25:39,:]
# pred_keypoints_2d+=112
# for i in range(pred_keypoints_2d.shape[0]):
# temp[i,:,:] = untranskey(pred_keypoints_2d[i,:,:], center[i], scale[i], inverse=1, r=0, f=0)
# pred_keypoints_2d = torch.tensor(temp)
# #Absolute error (MPJPE)
# error = torch.sqrt(((pred_keypoints_2d - gt_keypoints_2d) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
# error = torch.sqrt(((pred_keypoints_2d - gt_keypoints_2d) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
# mpjpe[step * batch_size:step * batch_size + curr_batch_size] = error
# # Reconstuction_error
# r_error = reconstruction_error(pred_keypoints_2d.cpu().numpy(), gt_keypoints_2d.cpu().numpy(), reduction=None)
# recon_err[step * batch_size:step * batch_size + curr_batch_size] = r_error
# # Print intermediate results during evaluation
# #print(step,log_freq)
# if step % log_freq == log_freq - 1:
# if eval_pose:
# print('MPJPE: ' + str(mpjpe[:step * batch_size].mean()))
# print('Reconstruction Error: ' + str(recon_err[:step * batch_size].mean()))
# print()
# # Save reconstructions to a file for further processing
# if save_results:
# np.savez(result_file, pred_joints=pred_joints, pose=smpl_pose, betas=smpl_betas, camera=smpl_camera)
# # Print final results during evaluation
# print('*** Final Results ***')
# print()
# if eval_pose:
# print('MPJPE: ' + str(mpjpe.mean()))
# print('Reconstruction Error: ' + str(recon_err.mean()))
# print()
# if __name__ == '__main__':
# args = parser.parse_args()
# model = hmr(config.SMPL_MEAN_PARAMS)
# checkpoint = torch.load(args.checkpoint)
# model.load_state_dict(checkpoint['model'], strict=False)
# model.eval()
# # Setup evaluation dataset
# dataset = BaseDataset(None, args.dataset, is_train=False)
# # Run evaluation
# run_evaluation(model, args.dataset, dataset, args.result_file,
# batch_size=args.batch_size,
# shuffle=args.shuffle,
# log_freq=args.log_freq)
import torch
from torch.utils.data import DataLoader
import numpy as np
import cv2
import os
import argparse
import json
from collections import namedtuple
from tqdm import tqdm
import torchgeometry as tgm
import config
import constants
from models import hmr, SMPL
from datasets import BaseDataset
from utils.imutils import uncrop
from utils.pose_utils import reconstruction_error
# from utils.part_utils import PartRenderer
from utils.imutils import crop, flip_img, flip_pose, flip_kp, transform, rot_aa
from utils.geometry import batch_rodrigues, perspective_projection, estimate_translation
# Define command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', default=None, help='Path to network checkpoint')
parser.add_argument('--dataset', default='h36m-p1', choices=['slp','h36m-p1', 'h36m-p2', 'lsp', '3dpw', 'mpi-inf-3dhp'], help='Choose evaluation dataset')
parser.add_argument('--log_freq', default=50, type=int, help='Frequency of printing intermediate results')
parser.add_argument('--batch_size', default=1, help='Batch size for testing')
parser.add_argument('--shuffle', default=False, action='store_true', help='Shuffle data')
parser.add_argument('--num_workers', default=8, type=int, help='Number of processes for data loading')
parser.add_argument('--result_file', default=None, help='If set, save detections to a .npz file')
def untranskey(kp, center, scale, inverse, r=0, f=0):
scaleRGB = 256 / 1024
"""'Undo' the image cropping/resizing.
This function is used when evaluating mask/part segmentation.
"""
nparts = kp.shape[0]
# print(nparts)
for i in range(nparts):
kp[i, 0:2] = transform(kp[i, 0:2] + 1, center, scale,
[constants.IMG_RES, constants.IMG_RES], invert=inverse, rot=r)
kp = kp / scaleRGB
kp = kp.astype('float32')
return kp
def run_evaluation(model, dataset_name, dataset, result_file,
batch_size=1, img_res=224,
num_workers=1, shuffle=False, log_freq=50):
"""Run evaluation on the datasets and metrics we report in the paper. """
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Transfer model to the GPU
model.to(device)
# Load SMPL model
smpl_neutral = SMPL(config.SMPL_MODEL_DIR,
create_transl=False).to(device)
smpl_male = SMPL(config.SMPL_MODEL_DIR,
gender='male',
create_transl=False).to(device)
smpl_female = SMPL(config.SMPL_MODEL_DIR,
gender='female',
create_transl=False).to(device)
# renderer = PartRenderer()
# Regressor for H36m joints
# J_regressor = torch.from_numpy(np.load(config.JOINT_REGRESSOR_H36M)).float()
save_results = result_file is not None
# Disable shuffling if you want to save the results
if save_results:
shuffle=False
# Create dataloader for the dataset
data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
# Pose metrics
# MPJPE and Reconstruction error for the non-parametric and parametric shapes
mpjpe = np.zeros(len(dataset))
recon_err = np.zeros(len(dataset))
mpjpe_smpl = np.zeros(len(dataset))
recon_err_smpl = np.zeros(len(dataset))
# Shape metrics
# Mean per-vertex error
shape_err = np.zeros(len(dataset))
shape_err_smpl = np.zeros(len(dataset))
# Mask and part metrics
# Accuracy
accuracy = 0.
parts_accuracy = 0.
# True positive, false positive and false negative
tp = np.zeros((2,1))
fp = np.zeros((2,1))
fn = np.zeros((2,1))
parts_tp = np.zeros((7,1))
parts_fp = np.zeros((7,1))
parts_fn = np.zeros((7,1))
# Pixel count accumulators
pixel_count = 0
parts_pixel_count = 0
# Store SMPL parameters
smpl_pose = np.zeros((len(dataset), 72))
smpl_betas = np.zeros((len(dataset), 10))
smpl_camera = np.zeros((len(dataset), 3))
pred_joints = np.zeros((len(dataset), 17, 3))
eval_pose = False
eval_masks = False
eval_parts = False
# Choose appropriate evaluation for each dataset
if dataset_name == 'h36m-p1' or dataset_name == 'h36m-p2' or dataset_name == '3dpw' or dataset_name == 'mpi-inf-3dhp':
eval_pose = True
elif dataset_name == 'lsp':
eval_masks = True
eval_parts = True
annot_path = config.DATASET_FOLDERS['upi-s1h']
joint_mapper_h36m = constants.H36M_TO_J17 if dataset_name == 'mpi-inf-3dhp' else constants.H36M_TO_J14
joint_mapper_gt = constants.J24_TO_J17 if dataset_name == 'mpi-inf-3dhp' else constants.J24_TO_J14
# Iterate over the entire dataset
for step, batch in enumerate(tqdm(data_loader, desc='Eval', total=len(data_loader))):
# Get ground truth annotations from the batch
gt_pose = batch['pose'].to(device)
gt_betas = batch['betas'].to(device)
gt_vertices = smpl_neutral(betas=gt_betas, body_pose=gt_pose[:, 3:], global_orient=gt_pose[:, :3]).vertices
images = batch['img'].to(device)
#depths = batch['depth'].to(device)
gender = batch['gender'].to(device)
center = batch['center'].to(device)
scale = batch['scale'].to(device)
curr_batch_size = images.shape[0]
with torch.no_grad():
#pred_rotmat_1, pred_betas_1, pred_camera_1,\
#pred_rotmat_2, pred_betas_2, pred_camera_2,\
pred_rotmat, pred_betas, pred_camera = model([images])
pred_output = smpl_neutral(betas=pred_betas, body_pose=pred_rotmat[:,1:], global_orient=pred_rotmat[:,0].unsqueeze(1), pose2rot=False)
pred_vertices = pred_output.vertices
if save_results:
rot_pad = torch.tensor([0,0,1], dtype=torch.float32, device=device).view(1,3,1)
rotmat = torch.cat((pred_rotmat.view(-1, 3, 3), rot_pad.expand(curr_batch_size * 24, -1, -1)), dim=-1)
pred_pose = tgm.rotation_matrix_to_angle_axis(rotmat).contiguous().view(-1, 72)
smpl_pose[step * batch_size:step * batch_size + curr_batch_size, :] = pred_pose.cpu().numpy()
smpl_betas[step * batch_size:step * batch_size + curr_batch_size, :] = pred_betas.cpu().numpy()
smpl_camera[step * batch_size:step * batch_size + curr_batch_size, :] = pred_camera.cpu().numpy()
# 2D Absolute error (MPJPE)
pred_joints = pred_output.joints
pred_cam_t = torch.stack([pred_camera[:, 1],
pred_camera[:, 2],
2 * 5000. / (224 * pred_camera[:, 0] + 1e-9)], dim=-1)
camera_center = torch.zeros(batch_size, 2, device=torch.device('cuda'))
# print('camera_center',camera_center)
gt_keypoints_2d = batch['keypoints'][:, :, :2].cuda()
pred_keypoints_2d = perspective_projection(pred_joints,
rotation=torch.eye(3, device=torch.device('cuda')).unsqueeze(
0).expand(batch_size, -1, -1),
translation=pred_cam_t,
focal_length=5000.,
camera_center=camera_center)
# pred_keypoints_2d = pred_keypoints_2d / (224 / 2.)
center = center.cpu().numpy()
scale = scale.cpu().numpy()
gt_keypoints_2d = 112. * gt_keypoints_2d
gt_keypoints_2d = gt_keypoints_2d.cpu().numpy()
gt_keypoints = gt_keypoints_2d[:, 25:39, :]
gt_keypoints_2d = gt_keypoints + 112
temp = np.zeros((gt_keypoints_2d.shape[0], 14, 2))
for i in range(gt_keypoints_2d.shape[0]):
temp[i, :, :] = untranskey(gt_keypoints_2d[i, :, :], center[i], scale[i], inverse=1, r=0, f=0)
gt_keypoints_2d = torch.tensor(temp)
pred_keypoints_2d = pred_keypoints_2d.cpu().numpy()
pred_keypoints_2d = pred_keypoints_2d[:, 25:39, :]
pred_keypoints_2d += 112
for i in range(pred_keypoints_2d.shape[0]):
temp[i, :, :] = untranskey(pred_keypoints_2d[i, :, :], center[i], scale[i], inverse=1, r=0, f=0)
pred_keypoints_2d = torch.tensor(temp)
# Absolute error (MPJPE)
error = torch.sqrt(((pred_keypoints_2d - gt_keypoints_2d) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
error = torch.sqrt(((pred_keypoints_2d - gt_keypoints_2d) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
mpjpe[step * batch_size:step * batch_size + curr_batch_size] = error
# Reconstuction_error
r_error = reconstruction_error(pred_keypoints_2d.cpu().numpy(), gt_keypoints_2d.cpu().numpy(), reduction=None)
recon_err[step * batch_size:step * batch_size + curr_batch_size] = r_error
print('*** Final Results ***')
print()
print('MPJPE: ' + str(mpjpe.mean()))
print('Reconstruction Error: ' + str(recon_err.mean()))
print()
if __name__ == '__main__':
args = parser.parse_args()
model = hmr(config.SMPL_MEAN_PARAMS)
checkpoint = torch.load(args.checkpoint)
model.load_state_dict(checkpoint['model'], strict=False)
model.eval()
# Setup evaluation dataset
dataset = BaseDataset(None, args.dataset, is_train=False)
# Run evaluation
run_evaluation(model, args.dataset, dataset, args.result_file,
batch_size=args.batch_size,
shuffle=args.shuffle,
log_freq=args.log_freq)
|
StarcoderdataPython
|
3323098
|
#!python3
# -*- coding: utf-8 -*-
# Copyright (C) 2019 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
import pandas as pd
from scipy import constants
from mendeleev import element
from pymatgen import Element
from thermo import nested_formula_parser, mass_fractions
from prettytable import PrettyTable
__author__ = '<NAME>'
__version__ = "0.0.1"
__email__ = '<EMAIL>'
__status__ = 'Development'
class HEA:
"""
Attributes
----------
formula : str
The formula of the alloy, in at%
_alloy : str
The formula of the alloy, in at%
mixing_entropy : float
The formula of the alloy, in at%
mixing_enthalpy : float
The formula of the alloy, in at%
melting_temperature : float
The formula of the alloy, in at%
delta : float
The formula of the alloy, in at%
omega : float
The formula of the alloy, in at%
VEC : float
The formula of the alloy, in at%
density : float
The formula of the alloy, in at%
crystalStructure : str
The formula of the alloy, in at%
isSolidSolution : bool
The formula of the alloy, in at%
"""
_data = pd.read_csv('data/mixing.csv')
_tab = PrettyTable(
field_names=["Alloy", "Density", "Delta", "Hmix", "Smix", "VEC", "Tm", "Phases", "isSolidSolution"])
_tab.align["Alloy"] = 'r'
_tab.align["Density"] = 'r'
_tab.align["Delta"] = 'r'
_tab.align["Hmix"] = 'r'
_tab.align["Smix"] = 'r'
_tab.align["VEC"] = 'r'
_tab.align["Tm"] = 'r'
_tab.align["Phases"] = 'r'
_tab.align["isSolidSolution"] = 'r'
def __init__(self, formula):
"""
Initilize HEA class with the alloy formula and related attributes.
Parameters
----------
alloy : str
The formula of the alloy, in at%
"""
self.formula = formula
self._alloy = nested_formula_parser(formula)
self.mixing_entropy = self.configurationalEntropy()
self.mixing_enthalpy = self.enthalpyOfMixing()
self.melting_temperature = self.meltingTemperature()
self.delta = self.atomicSizeDifference()
self.omega = self.omegaCalculation()
self.VEC = self.valenceElectronConcentration()
self.density = self.density()
self.crystalStructure = self.crystalStructure()
self.isSolidSolution = self.isSolidSolution()
def configurationalEntropy(self):
"""Return the mixing entropy of the alloy."""
return -1 * constants.R * sum(num / sum(self._alloy.values()) * np.log(num / sum(self._alloy.values())) for num in self._alloy.values())
def enthalpyOfMixing(self):
"""
Return the mixing enthalpy of the alloy.
References
----------
<NAME> and <NAME>: Cohesion in Metals, (Elsevier Science Publishers B.V., Netherlands, 1988)
"""
list_of_pairs = [(a, b)
for a in self._alloy for b in self._alloy if a != b and (a, b) != (b, a)]
enthalpy = [self._data.at[a] for a in list_of_pairs]
return sum(2 * y / (len(self._alloy) ** 2) for y in enthalpy)
def atomicSizeDifference(self):
"""Return the atomic size difference of the alloy, i.e. delta."""
delta = sum((num / sum(self._alloy.values())) * (1 - (element(elm).atomic_radius /
self.averageAtomicRadius()))**2 for elm, num in self._alloy.items())
return np.sqrt(delta) * 100
def averageAtomicRadius(self):
"""Return the average atomic radius of the alloy."""
return sum(num / sum(self._alloy.values()) * (element(elm).atomic_radius) for elm, num in self._alloy.items())
def valenceElectronConcentration(self):
"""Return the valence electron concentration of the alloy."""
return sum(num / sum(self._alloy.values()) * element(elm).nvalence() for elm, num in self._alloy.items())
def omegaCalculation(self):
"""Return the omega value of the alloy."""
return (self.meltingTemperature() * self.configurationalEntropy()) / (abs(self.enthalpyOfMixing()) * 1000)
def meltingTemperature(self):
"""Return the approximate melting temperature of the alloy."""
t_melting = sum(num / sum(self._alloy.values()) * (element(elm).melting_point)
for elm, num in self._alloy.items())
return t_melting
def isSolidSolution(self):
"""Return True if the alloy forms solid solution."""
if self.omegaCalculation() >= 1.1:
if self.atomicSizeDifference() < 6.6:
if (self.enthalpyOfMixing() < 5 and self.enthalpyOfMixing() > -15):
return 'Yes'
else:
return 'No'
def density(self):
"""Return the approximate density of the alloy."""
return 100 / sum(element(elm).density / (ws * 100) for elm, ws in mass_fractions(self._alloy).items())
def crystalStructure(self):
"""Return the predicted crystal structure of the alloy."""
# TODO: Amorphous phases and Intermetallics --> Prog. Nat. Sci: Mat. Int. 21(2011) 433-446
if self.VEC > 8:
return 'FCC'
if self.VEC < 6.87:
return 'BCC'
else:
return 'BCC+FCC'
def printResults(self):
print(self.formula)
print('\n\t Density\t\t= {:7.2f} [g/cm3]'.format(self.density))
print('\t Delta\t\t\t= {:7.2f} [%]'.format(self.delta))
print('\t H_mixing\t\t= {:7.2f} [kJ/mol]'.format(self.mixing_enthalpy))
print('\t VEC\t\t\t= {:7.2f}'.format(self.VEC))
print('\t S_mixing\t\t= {:7.2f} [J/K mol]'.format(self.mixing_entropy))
print('\t T_melting\t\t= {:7.3f} [K]'.format(self.melting_temperature))
print('\t Omega\t\t\t= {:7.3f}'.format(self.omega))
print('\t Crystal Structure\t= {}\n'.format(self.crystalStructure))
print('\t Is Solid Solution\t= {}\n'.format(self.isSolidSolution))
def prettyPrint(self):
self._tab.add_row([self.formula, round(self.density, 2), round(self.delta, 2), round(self.mixing_enthalpy, 2),
round(self.mixing_entropy, 2), round(self.VEC, 2), round(self.melting_temperature, 2), self.crystalStructure, self.isSolidSolution])
def table(self):
print(self._tab)
|
StarcoderdataPython
|
6569969
|
import pytz
from office365.runtime.client_value import ClientValue
class DateTimeTimeZone(ClientValue):
"""Describes the date, time, and time zone of a point in time."""
def __init__(self, datetime=None, timezone=None):
"""
:param str timezone: Represents a time zone, for example, "Pacific Standard Time".
:param str datetime: A single point of time in a combined date and time representation ({date}T{time};
for example, 2017-08-29T04:00:00.0000000).
"""
super(DateTimeTimeZone, self).__init__()
self.dateTime = datetime
self.timeZone = timezone
@staticmethod
def parse(dt):
"""
:type dt: datetime.datetime
"""
local_dt = dt.replace(tzinfo=pytz.utc)
return DateTimeTimeZone(datetime=local_dt.isoformat(), timezone=local_dt.strftime('%Z'))
|
StarcoderdataPython
|
11357676
|
<reponame>Forward83/staff_info
import copy
import getpass
import subprocess
from db.connectors import connection_factory
from settings import DB_CREDENTIALS, DB_TYPE
admin_credential = copy.deepcopy(DB_CREDENTIALS)
del admin_credential['db']
cnx, connector = None, None
# Create connection to mysql with admin credentials
while not cnx:
username = input('Input DB administrator username: ')
password = getpass.getpass()
admin_credential['user'] = username
admin_credential['passwd'] = password
connector = connection_factory(DB_TYPE, **admin_credential)
cnx = connector.connection
# SQL block for DB, user, grant privileges creation
sql_create_db = "CREATE DATABASE IF NOT EXISTS {}; ".format(DB_CREDENTIALS['db'])
sql_create_user = "CREATE USER IF NOT EXISTS {}@{} IDENTIFIED BY '{}'; ".format(DB_CREDENTIALS['user'],
DB_CREDENTIALS['host'],
DB_CREDENTIALS['passwd'])
sql_grant_perm = "GRANT ALL PRIVILEGES ON {}.* TO {}@{};".format(DB_CREDENTIALS['db'], DB_CREDENTIALS['user'],
DB_CREDENTIALS['host'])
for sql in (sql_create_db, sql_create_user, sql_grant_perm):
connector.execute_sql(sql, change=False)
connector.close()
# Loading DB skeleton
args = [DB_TYPE, '-u{}'.format( admin_credential['user']), '-p{}'.format(admin_credential['passwd']),
DB_CREDENTIALS['db'], ]
with open('db/attendance.sql') as input_file:
proc = subprocess.Popen(args, stdin=input_file, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.communicate()
if proc.returncode == 0:
print('DB {} was created successfully'.format(DB_CREDENTIALS['db']))
|
StarcoderdataPython
|
11259849
|
import _plotly_utils.basevalidators
class GeoValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(self, plotly_name="geo", parent_name="choropleth", **kwargs):
super(GeoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", "geo"),
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
|
StarcoderdataPython
|
6640222
|
from collections import OrderedDict
import numpy as np
def area_of_polygon(x, y):
"""Calculates the signed area of an arbitrary polygon given its vertices
http://stackoverflow.com/a/4682656/190597 (<NAME>)
http://softsurfer.com/Archive/algorithm_0101/algorithm_0101.htm#2D%20Polygons
"""
area = 0.0
for i in range(-1, len(x) - 1):
area += x[i] * (y[i + 1] - y[i - 1])
return area / 2.0
def centroid_of_polygon(points):
"""
http://stackoverflow.com/a/14115494/190597 (mgamba)
"""
import itertools as IT
area = area_of_polygon(*zip(*points))
result_x = 0
result_y = 0
N = len(points)
points = IT.cycle(points)
x1, y1 = next(points)
for i in range(N):
x0, y0 = x1, y1
x1, y1 = next(points)
cross = (x0 * y1) - (x1 * y0)
result_x += (x0 + x1) * cross
result_y += (y0 + y1) * cross
result_x /= (area * 6.0)
result_y /= (area * 6.0)
return (result_x, result_y)
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
return
def isBetween(a, b, c, epsilon=0.001):
crossproduct = (c.y - a.y) * (b.x - a.x) - (c.x - a.x) * (b.y - a.y)
if abs(crossproduct) > epsilon : return False # (or != 0 if using integers)
dotproduct = (c.x - a.x) * (b.x - a.x) + (c.y - a.y ) *(b.y - a.y)
if dotproduct < 0 : return False
squaredlengthba = (b.x - a.x ) *(b.x - a.x) + (b.y - a.y ) *(b.y - a.y)
if dotproduct > squaredlengthba: return False
return True
def shared_face(ivlist1, ivlist2):
for i in range(len(ivlist1) - 1):
iv1 = ivlist1[i]
iv2 = ivlist1[i + 1]
for i2 in range(len(ivlist2) - 1):
if ivlist2[i2: i2 +1] == [iv2, iv1]:
return True
return False
def segment_face(ivert, ivlist1, ivlist2, vertices):
ic1pos = ivlist1.index(ivert)
if ic1pos == 0: # if ivert is first, then must also be last
ic1pos = len(ivlist1) - 1
ic1v1 = ivlist1[ic1pos - 1]
ic1v2 = ivlist1[ic1pos]
x, y = vertices[ic1v1]
a = Point(x, y)
x, y = vertices[ic1v2]
b = Point(x, y)
ic2pos = ivlist2.index(ivert)
ic2v2 = ivlist2[ic2pos + 1]
x, y = vertices[ic2v2]
c = Point(x, y)
if ic1v1 == ic2v2 or ic1v2 == ic2v2:
return
# print('Checking segment {} {} with point {}'.format(ic1v1, ic1v2, ic2v2))
if isBetween(a, b, c):
# print('between: ', a, b, c)
ivlist1.insert(ic1pos, ic2v2)
return
def to_cvfd(vertdict, nodestart=None, nodestop=None,
skip_hanging_node_check=False, verbose=False):
"""
Convert a vertex dictionary
Parameters
----------
vertdict
vertdict is a dictionary {icell: [(x1, y1), (x2, y2), (x3, y3), ...]}
nodestart : int
starting node number. (default is zero)
nodestop : int
ending node number up to but not including. (default is len(vertdict))
skip_hanging_node_check : bool
skip the hanging node check. this may only be necessary for quad-based
grid refinement. (default is False)
verbose : bool
print messages to the screen. (default is False)
Returns
-------
verts : ndarray
array of x, y vertices
iverts : list
list containing a list for each cell
"""
if nodestart is None:
nodestart = 0
if nodestop is None:
nodestop = len(vertdict)
ncells = nodestop - nodestart
# First create vertexdict {(x1, y1): ivert1, (x2, y2): ivert2, ...} and
# vertexlist [[ivert1, ivert2, ...], [ivert9, ivert10, ...], ...]
# In the process, filter out any duplicate vertices
vertexdict = OrderedDict()
vertexlist = []
xcyc = np.empty((ncells, 2), dtype=np.float)
iv = 0
nvertstart = 0
if verbose:
print('Converting vertdict to cvfd representation.')
print('Number of cells in vertdict is: {}'.format(len(vertdict)))
print('Cell {} up to {} (but not including) will be processed.'
.format(nodestart, nodestop))
for icell in range(nodestart, nodestop):
points = vertdict[icell]
nvertstart += len(points)
xc, yc = centroid_of_polygon(points)
xcyc[icell, 0] = xc
xcyc[icell, 1] = yc
ivertlist = []
for p in points:
pt = tuple(p)
if pt in vertexdict:
ivert = vertexdict[pt]
else:
vertexdict[pt] = iv
ivert = iv
iv += 1
ivertlist.append(ivert)
if ivertlist[0] != ivertlist[-1]:
raise Exception('Cell {} not closed'.format(icell))
vertexlist.append(ivertlist)
# next create vertex_cell_dict = {}; for each vertex, store list of cells
# that use it
nvert = len(vertexdict)
if verbose:
print('Started with {} vertices.'.format(nvertstart))
print('Ended up with {} vertices.'.format(nvert))
print('Reduced total number of vertices by {}'.format(nvertstart -
nvert))
print('Creating dict of vertices with their associated cells')
vertex_cell_dict = OrderedDict()
for icell in range(nodestart, nodestop):
ivertlist = vertexlist[icell]
for ivert in ivertlist:
if ivert in vertex_cell_dict:
vertex_cell_dict[ivert].append(icell)
else:
vertex_cell_dict[ivert] = [icell]
# Now, go through each vertex and look at the cells that use the vertex.
# For quadtree-like grids, there may be a need to add a new hanging node
# vertex to the larger cell.
if verbose:
print('Done creating dict of vertices with their associated cells')
print('Checking for hanging nodes.')
vertexdict_keys = list(vertexdict.keys())
for ivert, cell_list in vertex_cell_dict.items():
for icell1 in cell_list:
for icell2 in cell_list:
# skip if same cell
if icell1 == icell2:
continue
# skip if share face already
ivertlist1 = vertexlist[icell1]
ivertlist2 = vertexlist[icell2]
if shared_face(ivertlist1, ivertlist2):
continue
# don't share a face, so need to segment if necessary
segment_face(ivert, ivertlist1, ivertlist2, vertexdict_keys)
if verbose:
print('Done checking for hanging nodes.')
verts = np.array(vertexdict_keys)
iverts = vertexlist
return verts, iverts
def shapefile_to_cvfd(shp, **kwargs):
import shapefile
print('Translating shapefile ({}) into cvfd format'.format(shp))
sf = shapefile.Reader(shp)
shapes = sf.shapes()
vertdict = {}
for icell, shape in enumerate(shapes):
points = shape.points
vertdict[icell] = points
verts, iverts = to_cvfd(vertdict, **kwargs)
return verts, iverts
def shapefile_to_xcyc(shp):
"""
Get cell centroid coordinates
Parameters
----------
shp : string
Name of shape file
Returns
-------
xcyc : ndarray
x, y coordinates of all polygons in shp
"""
import shapefile
print('Translating shapefile ({}) into cell centroids'.format(shp))
sf = shapefile.Reader(shp)
shapes = sf.shapes()
ncells = len(shapes)
xcyc = np.empty((ncells, 2), dtype=np.float)
for icell, shape in enumerate(shapes):
points = shape.points
xc, yc = centroid_of_polygon(points)
xcyc[icell, 0] = xc
xcyc[icell, 1] = yc
return xcyc
|
StarcoderdataPython
|
3260034
|
"""获取python执行版本,用于兼容"""
import sys
PY2 = False
PY3 = False
def python_version():
version = sys.version[0]
# sys.version 返回版本信息字符串 3.7.0......
if version == '2':
global PY2
PY2 = True
else:
global PY3
PY3 = True
return
# 导包时直接执行获取到版本信息
python_version()
|
StarcoderdataPython
|
9654466
|
# imports
import json
import time
import pickle
import scipy.misc
import skimage.io
import cv2
import caffe
import numpy as np
import os.path as osp
from random import shuffle
from PIL import Image
import random
class ImageSegDataLayer(caffe.Layer):
"""
This is a simple syncronous datalayer for training a Detection model on
PASCAL.
"""
def setup(self, bottom, top):
self.top_names = ['data', 'label']
# === Read input parameters ===
# params is a python dictionary with layer parameters.
params = eval(self.param_str)
SimpleTransformer.check_params(params)
# store input as class variables
self.batch_size = params['batch_size']
self.input_shape = params['crop_size']
# Create a batch loader to load the images.
self.batch_loader = BatchLoader(params)
# === reshape tops ===
# since we use a fixed input image size, we can shape the data layer
# once. Else, we'd have to do it in the reshape call.
top[0].reshape(
self.batch_size, 3, self.input_shape[0], self.input_shape[1])
# Note the 20 channels (because PASCAL has 20 classes.)
top[1].reshape(
self.batch_size, 1, self.input_shape[0], self.input_shape[1])
print_info("ImageSegDataLayer", params)
def forward(self, bottom, top):
"""
Load data.
"""
for itt in range(self.batch_size):
# Use the batch loader to load the next image.
im, label = self.batch_loader.load_next_image()
# Add directly to the caffe data layer
top[0].data[itt, ...] = im
top[1].data[itt, ...] = label
def reshape(self, bottom, top):
"""
There is no need to reshape the data, since the input is of fixed size
(rows and columns)
"""
pass
def backward(self, top, propagate_down, bottom):
"""
These layers does not back propagate
"""
pass
class BatchLoader(object):
"""
This class abstracts away the loading of images.
Images can either be loaded singly, or in a batch. The latter is used for
the asyncronous data layer to preload batches while other processing is
performed.
"""
def __init__(self, params):
self.batch_size = params['batch_size']
self.root_folder = params['root_folder']
self.source = params['source']
# get list of image indexes.
self.indexlist = [line.strip().split() for line in open(self.source)]
self._cur = 0 # current image
# this class does some simple data-manipulations
self.transformer = SimpleTransformer(params)
print "BatchLoader initialized with {} images".format(
len(self.indexlist))
# def exract_image_list(self, source):
# image_list = []
# with open(source, 'r') as f:
# for line in f:
# image_list.append(line.strip().split())
#
# return image_list
def load_next_image(self):
"""
Load the next image in a batch.
"""
# Did we finish an epoch?
if self._cur == len(self.indexlist):
self._cur = 0
shuffle(self.indexlist)
# Load an image
index = self.indexlist[self._cur] # Get the image index
image_file_path, label_file_path = index
# image = Image.open(osp.join(self.root_folder, image_file_path))
# label = Image.open(osp.join(self.root_folder, label_file_path))
image = cv2.imread(osp.join(self.root_folder, image_file_path), cv2.IMREAD_COLOR)
label = cv2.imread(osp.join(self.root_folder, label_file_path), cv2.IMREAD_GRAYSCALE)
self._cur += 1
return self.transformer.preprocess(image, label)
class SimpleTransformer:
"""
SimpleTransformer is a simple class for preprocessing and deprocessing
images for caffe.
"""
def __init__(self, params):
SimpleTransformer.check_params(params)
self.mean = params['mean']
self.is_mirror = params['mirror']
self.crop_h, self.crop_w = params['crop_size']
self.scale = params['scale']
self.phase = params['phase']
self.ignore_label = params['ignore_label']
def set_mean(self, mean):
"""
Set the mean to subtract for centering the data.
"""
self.mean = mean
def set_scale(self, scale):
"""
Set the data scaling.
"""
self.scale = scale
def generate_scale_label(self, image, label):
f_scale = 0.5 + random.randint(0, 8) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def pre_test_image(self, image):
image = np.asarray(image, np.float32)
image -= self.mean
image *= self.scale
img_h, img_w, _ = image.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0,0.0,0.0))
else:
img_pad = image
img_h, img_w, _ = img_pad.shape
h_off = (img_h - self.crop_h) / 2
w_off = (img_w - self.crop_w) / 2
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
#image = image[:, :, ::-1] # change to BGR
image = image.transpose((2, 0, 1))
return image
def preprocess(self, image, label):
"""
preprocess() emulate the pre-processing occuring in the vgg16 caffe
prototxt.
"""
# image = cv2.convertTo(image, cv2.CV_64F)
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image -= self.mean
image *= self.scale
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
if self.phase == 'Train':
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
else:
h_off = (img_h - self.crop_h) / 2
w_off = (img_w - self.crop_w) / 2
# roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
#image = image[:, :, ::-1] # change to BGR
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image, label
@classmethod
def check_params(cls, params):
if 'crop_size' not in params:
params['crop_size'] = (505, 505)
if 'mean' not in params:
params['mean'] = [128, 128, 128]
if 'scale' not in params:
params['scale'] = 1.0
if 'mirror' not in params:
params['mirror'] = False
if 'phase' not in params:
params['phase'] = 'Train'
if 'ignore_label' not in params:
params['ignore_label'] = 255
def print_info(name, params):
"""
Ouput some info regarding the class
"""
print "{} initialized for split: {}, with bs: {}, im_shape: {}.".format(
name,
params['source'],
params['batch_size'],
params['crop_size'])
if __name__ == '__main__':
params = {'batch_size': 2,
'mean': (104.008, 116.669, 122.675),
'root_folder': 'D:/v-zihuan/segmentation_with_scale/experiment/voc_part/data/',
'source': 'D:/v-zihuan/segmentation_with_scale/experiment/voc_part/list/train_3s.txt',
'mirror': True,
'crop_size': (505, 505)}
t = SimpleTransformer(params)
image = Image.open(r'D:/v-zihuan/segmentation_with_scale/experiment/voc_part/data/images/2008_000003.jpg')
label = Image.open(r'D:/v-zihuan/segmentation_with_scale/experiment/voc_part/data/part_mask_scale_3/2008_000003.png')
t.preprocess(image, label)
|
StarcoderdataPython
|
12810988
|
from .basic_transform import BasicTransform
from typing import List
import numpy as np
class Translation (BasicTransform):
"""
Translates the image along the y-axis and colors the remaining part of the image with the chosen color.
Parameters
----------
shift_along_x : int
Distance of the shift along x-axis.
shift_along_y : int
Distance of the shift along y-axis.
color_value : int
The chosen color for the remaining part of the image.
"""
def __init__(self, shift_along_x, shift_along_y, color_value):
self.shift_along_y = shift_along_y
self.shift_along_x = shift_along_x
self.color_value = color_value
def transform(self, images: List[np.ndarray]):
self.check_images(images)
res = []
for im in images:
height, width = im.shape[:2]
if self.shift_along_y > height or self.shift_along_x > width:
raise ValueError("shift_along_y and shift_along_x should be less then height and width of the image "
"respectively")
output_im = np.zeros(im.shape, dtype='u1')
output_im[:] = self.color_value
output_im[self.shift_along_y:, self.shift_along_x:] = im[:height - self.shift_along_y, :width-self.shift_along_x]
res.append(output_im)
return res
class TranslationX(BasicTransform):
"""
Translates the image along the x-axis then adds a padding
corresponding to the color value chosen.
Parameters
----------
shift_along_x : int
Distance of the shift along x-axis.
color_value : int
The chosen color for the padding
"""
def __init__(self, shift_along_x, color_value):
self.shift_along_x = shift_along_x
self.color_value = color_value
def transform(self, images: List[np.ndarray]):
self.check_images(images)
res = []
for im in images:
height, width = im.shape[:2]
if self.shift_along_x > width:
raise ValueError("shift_along_x should be less then width of the image")
output_im = np.zeros(im.shape, dtype='u1')
output_im[:] = self.color_value
output_im[:, self.shift_along_x:] = im[:, :width - self.shift_along_x]
res.append(output_im)
return res
class TranslationY(BasicTransform):
"""
Translates the image along the y-axis and colors the remaining part of the image with the chosen color.
Parameters
----------
shift_along_y : int
Distance of the shift along y-axis.
color_value : int
The chosen color for the remaining part of the image.
"""
def __init__(self, shift_along_y, color_value):
self.shift_along_y = shift_along_y
self.color_value = color_value
def transform(self, images: List[np.ndarray]):
self.check_images(images)
res = []
for im in images:
height, width = im.shape[:2]
if self.shift_along_y > height:
raise ValueError("shift_along_y should be less then height of the image")
output_im = np.zeros(im.shape, dtype='u1')
output_im[:] = self.color_value
output_im[self.shift_along_y:, :] = im[:height - self.shift_along_y, :]
res.append(output_im)
return res
|
StarcoderdataPython
|
3425668
|
# Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .mobilenet import mobilenet_v2, model_urls as mobilenet_urls
from .vgg import (
vgg11,
vgg11_bn,
vgg13,
vgg13_bn,
vgg16,
vgg16_bn,
vgg19,
vgg19_bn,
model_urls as vgg_urls
)
from .mnasnet import (
mnasnet0_5,
mnasnet0_75,
mnasnet1_0,
mnasnet1_3,
model_urls as mnasnet_urls
)
from .densenet import (
densenet121,
densenet161,
densenet169,
densenet201,
model_urls as densenet_urls
)
from .resnet import (
resnet18,
resnet34,
resnet50,
resnet101,
resnet152,
resnext50_32x4d,
resnext101_32x8d,
wide_resnet50_2,
wide_resnet101_2,
model_urls as resnet_urls
)
architectures = {'mobilenet_v2': mobilenet_v2,
'vgg11': vgg11,
'vgg11_bn': vgg11_bn,
'vgg13': vgg13,
'vgg13_bn': vgg13_bn,
'vgg16': vgg16,
'vgg16_bn': vgg16_bn,
'vgg19': vgg19,
'vgg19_bn': vgg19_bn,
'mnasnet0_5': mnasnet0_5,
'mnasnet0_75': mnasnet0_75,
'mnasnet1_0': mnasnet1_0,
'mnasnet1_3': mnasnet1_3,
'densenet121': densenet121,
'densenet161': densenet161,
'densenet169': densenet169,
'densenet201': densenet201,
'resnet18': resnet18,
'resnet34': resnet34,
'resnet50': resnet50,
'resnet101': resnet101,
'resnet152': resnet152,
'resnext50_32x4d': resnext50_32x4d,
'resnext101_32x8d': resnext101_32x8d,
'wide_resnet50_2': wide_resnet50_2,
'wide_resnet101_2': wide_resnet101_2}
def get_builtin_architectures():
return list(architectures.keys())
def get_pretrained_architectures():
return list(vgg_urls.keys()) +\
list(mobilenet_urls.keys()) +\
list(mnasnet_urls.keys()) +\
list(densenet_urls.keys()) +\
list(resnet_urls.keys())
|
StarcoderdataPython
|
345160
|
<filename>txros/src/txros/test/test_nodehandle.py
from __future__ import division
from twisted.internet import defer
from twisted.trial import unittest
from txros import util
from txros.test import util as test_util
class Test(unittest.TestCase):
@defer.inlineCallbacks
def test_creation(self):
yield test_util.call_with_nodehandle(lambda nh: defer.succeed(None))
@defer.inlineCallbacks
def test_params(self):
@defer.inlineCallbacks
def f(nh):
k = '/my_param'
v = ['hi', 2]
assert not (yield nh.has_param(k))
yield nh.set_param(k, v)
assert (yield nh.has_param(k))
assert (yield nh.get_param(k)) == v
yield nh.delete_param(k)
assert not (yield nh.has_param(k))
yield test_util.call_with_nodehandle(f)
@defer.inlineCallbacks
def test_advertise(self):
@defer.inlineCallbacks
def f(nh):
from std_msgs.msg import Int32
pub = nh.advertise('/my_topic', Int32, latching=True)
pub.publish(Int32(42))
sub = nh.subscribe('/my_topic', Int32)
yield sub.get_next_message()
assert sub.get_last_message().data == 42
yield test_util.call_with_nodehandle(f)
@defer.inlineCallbacks
def test_service(self):
@defer.inlineCallbacks
def f(nh):
from roscpp_tutorials.srv import TwoInts, TwoIntsRequest, TwoIntsResponse
@util.cancellableInlineCallbacks
def callback(req):
yield util.wall_sleep(.5)
defer.returnValue(TwoIntsResponse(sum=req.a + req.b))
nh.advertise_service('/my_service', TwoInts, callback)
s = nh.get_service_client('/my_service', TwoInts)
yield s.wait_for_service()
assert (yield s(TwoIntsRequest(a=10, b=30))).sum == 40
assert (yield s(TwoIntsRequest(a=-10, b=30))).sum == 20
yield test_util.call_with_nodehandle(f)
@defer.inlineCallbacks
def test_simulated_time(self):
@defer.inlineCallbacks
def f(nh):
import time
t1 = time.time()
yield nh.sleep(10)
t2 = time.time()
assert t2 - t1 < 5
yield test_util.call_with_nodehandle_sim_time(f)
|
StarcoderdataPython
|
95975
|
<reponame>rafi16jan/rapyd-framework<gh_stars>10-100
from javascript import Object, asynchronous
from .. import get_db, tools, data
@asynchronous
def init(promise, app):
get_db()
promise.wait()
data.run().wait()
Object.get('Module', 'mount_component').call(app.toRef())
return
def init_compile():
#tools.register_models()
return
|
StarcoderdataPython
|
3282337
|
from .utils import check_interact, check_close, check_record
from core import Env
from core.env import env_dict
def test_nes(MockAgent):
for name in [key for key, val in env_dict.items() if "procgen" in str(val)]:
env = Env(name)
agent = MockAgent(env.state_size, env.action_size, env.action_type)
run_step = 10
# test interact
check_interact(env, agent, run_step)
# test close
check_close(env)
# test record
check_record(env)
|
StarcoderdataPython
|
6569510
|
from setuptools import setup, find_packages
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in tinymce_editor/__init__.py
from tinymce_editor import __version__ as version
setup(
name='tinymce_editor',
version=version,
description='tinyMCE text editor',
author='Shridhar',
author_email='<EMAIL>',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
|
StarcoderdataPython
|
18220
|
<gh_stars>0
import requests
from lxml import html
from bs4 import BeautifulSoup
import json
import codecs
import re
#In this variable I will store the information as a dictionary with this structure:
# {number : "Name"}
ms_dict = {}
links_dict = {"links" : []}
for index in range(1,27000):
print(index)
page = requests.get('http://www.handschriftencensus.de/'+ str(index))
c = page.content
soup = BeautifulSoup(c, "lxml")
ms_label = soup.find_all("th", class_="ort")
if len(ms_label) > 0:
ms_label = ms_label[0].text.rstrip()
ms_dict[ "h" + str(index)] = ms_label
inhalt = soup.find_all("a", class_="aw")
for el in inhalt:
work_id = re.findall('/\d+$', el['href'])[0][1:]
links_dict['links'].append( { "source": "h" + str(index), "target": "w" + work_id } )
# In td id="inhalt" get the href, and only the number. Create the links at the same time
# work = work[0].text
# work = work.replace("'","")
# final_dict[index +1] = {"title":work}
#
# signaturen = soup.find_all("ol", class_="signaturen")
# if len(signaturen) > 0:
# final_dict[index+1]["manuscripts"] = []
# signaturen = signaturen[0]
# for elem in signaturen:
# if len(elem) > 1:
# manuscript = elem.find_all("a")[0]
#
# final_dict[index+1]["manuscripts"].append(manuscript.text)
index = index + 1
#Save data as json
with codecs.open('manuscripts_ids.json', 'w', 'utf-8') as outfile:
json.dump(ms_dict,outfile, indent=2)
with codecs.open('links.json', 'w', 'utf-8') as outfile:
json.dump(links_dict,outfile, indent=2)
#To save the data as a csv
# table = pd.DataFrame.from_dict(final_dict, orient='index')
# table.to_csv("Handschriftencensus_full.csv", encoding="utf-8")
|
StarcoderdataPython
|
6423059
|
import matplotlib.pyplot as plt
sports = ['Biathlon', 'Bobsleigh', 'Curling', 'Ice Hockey', 'Luge', 'Skating', 'Skiing']
medals = [27, 26, 30, 214, 8, 121, 153]
width = 0.35
x_labels = [0, 50, 100, 150, 200]
y_labels = ['Biathlon', 'Bobsleigh', 'Curling', 'Ice Hockey', 'Luge', 'Skating', 'Skiing']
plt.xlabel("Sports")
plt.ylabel("Medals")
plt.bar(sports, medals)
plt.show()
|
StarcoderdataPython
|
317744
|
import re
class VE(object):
""" Create a VE object """
def __init__(self, lines):
self.id = 0
self.name = ""
self.ospf = dict()
self.ipaddress = []
self.helper = []
self.pim = 0
self.mtu = 0
self.p2p = 0
self.p2p6 = 0
self.ip6address = []
self.mtu6 = 0
self.ospf3 = dict()
for l in lines:
r1 = re.match("^interface ve (\d+)", l)
r2 = re.match("\s+port-name (.*)", l)
r3 = re.match("\s+ip ospf (area|cost|dead-interval|hello-interval) (\S+)", l)
r4 = re.match("\s+ipv6 ospf (area|cost|dead-interval|hello-interval) (\S+)", l)
r5 = re.match("\s+ip address (\S+)", l)
r6 = re.match("\s+ipv6 address (\S+)", l)
r7 = re.match("\s+ip helper-address (\S+)", l)
r8 = re.match("\s+ip pim-sparse", l)
r9 = re.match("\s+ip ospf network point-to-point", l)
r10 = re.match("\s+ip mtu (\d+)", l)
r11 = re.match("\s+ipv6 enable", l)
r12 = re.match("\s+ipv6 mtu (\d+)", l)
r13 = re.match("\s+ipv6 ospf network point-to-point", l)
r14 = re.match("\s+ipv6 nd suppress-ra", l)
r15 = re.match("\s+bandwidth (\d+)", l)
r16 = re.match("\s+vrf forwarding (\S+)", l)
if r1:
self.id = r1.group(1)
elif r2:
self.name = r2.group(1)
elif r3:
self.ospf[r3.group(1)] = r3.group(2)
elif r4:
self.ospf[r4.group(1)] = r4.group(2)
elif r5:
self.ipaddress.append(r5.group(1))
elif r6:
self.ip6address.append(r6.group(1))
elif r7:
self.helper.append(r7.group(1))
elif r8:
self.pim = 1
elif r9:
self.p2p = 1
elif r10:
self.mtu = r10.group(1)
elif r11:
pass
elif r12:
self.mtu = r12.group(1)
elif r13:
self.p2p6 = 1
elif r14:
# FIXME: is there anyting to do ?
pass
elif r15:
# Nothing to do with bandwidth
pass
elif r16:
# FIXME: No support for VRF for the time being
pass
else:
print("* Warning line skipped in VE: %s" % l.strip("\n"))
def __repr__(self):
return "ve %s (%s) ipaddress: %s" % (self.id, self.name, self.ipaddress)
|
StarcoderdataPython
|
12837583
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-12-04 00:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('intake', '0030_add_visitor_model'),
]
operations = [
migrations.AlterModelOptions(
name='applicant',
options={'permissions': (('view_app_stats', 'Can see detailed aggregate information about apps'),)},
),
]
|
StarcoderdataPython
|
5100345
|
<reponame>vkpro-forks/python-zhmcclient
# Copyright 2017 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Function tests for HMC credentials file.
"""
from __future__ import absolute_import, print_function
import requests.packages.urllib3
import pytest
import zhmcclient
from tests.common.utils import HmcCredentials, info
requests.packages.urllib3.disable_warnings()
class TestHMCCredentialsFile(object):
"""
Test your HMC credentials file, if you have one at the default location.
"""
def setup_method(self):
self.hmc_creds = HmcCredentials()
def test_1_format(self, capsys):
"""Test the format of the HMC credentials file."""
cpc_items = self.hmc_creds.get_cpc_items()
if cpc_items is None:
pytest.skip("HMC credentials file not found: %r" %
self.hmc_creds.filepath)
return
assert len(cpc_items) > 0
@pytest.mark.skip("Disabled contacting all HMCs in credentials file")
def test_2_hmcs(self, capsys):
"""
Check out the HMCs specified in the HMC credentials file.
Skip HMCs that cannot be contacted.
"""
cpc_items = self.hmc_creds.get_cpc_items()
if cpc_items is None:
pytest.skip("HMC credentials file not found: %r" %
self.hmc_creds.filepath)
return
rt_config = zhmcclient.RetryTimeoutConfig(
connect_timeout=10,
connect_retries=1,
)
# Check HMCs and their CPCs
for cpc_name in cpc_items:
cpc_item = cpc_items[cpc_name]
hmc_host = cpc_item['hmc_host']
info(capsys, "Checking HMC %r for CPC %r", (hmc_host, cpc_name))
session = zhmcclient.Session(
hmc_host, cpc_item['hmc_userid'], cpc_item['hmc_password'],
retry_timeout_config=rt_config)
client = zhmcclient.Client(session)
try:
session.logon()
except zhmcclient.ConnectionError as exc:
info(capsys, "Skipping HMC %r for CPC %r: %s",
(hmc_host, cpc_name, exc))
continue
cpcs = client.cpcs.list()
cpc_names = [cpc.name for cpc in cpcs]
if cpc_name not in cpc_names:
raise AssertionError(
"CPC {!r} not found in HMC {!r}.\n"
"Existing CPCs: {!r}".
format(cpc_name, hmc_host, cpc_names))
session.logoff()
|
StarcoderdataPython
|
147894
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-09 21:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('filer', '0010_auto_20180414_2058'),
('cms', '0020_old_tree_cleanup'),
('core', '0024_staffmember_categories'),
]
operations = [
migrations.RenameModel('InstructorListPluginModel','StaffMemberListPluginModel'),
migrations.RenameField(
model_name='instructor',
old_name='staffmember_ptr',
new_name='staffMember',
),
migrations.AlterField(
model_name='instructor',
name='staffMember',
field=models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='core.StaffMember', verbose_name='Staff member'),
preserve_default=False,
),
migrations.RemoveField(
model_name='staffmember',
name='polymorphic_ctype',
),
migrations.AlterField(
model_name='instructor',
name='availableForPrivates',
field=models.BooleanField(default=True, help_text='Check this box if you would like to be listed as available for private lessons from students.', verbose_name='Available for private lessons'),
),
migrations.AlterModelOptions(
name='instructor',
options={'permissions': (('update_instructor_bio', "Can update instructors' bio information"), ('view_own_instructor_stats', "Can view one's own statistics (if an instructor)"), ('view_other_instructor_stats', "Can view other instructors' statistics"), ('view_own_instructor_finances', "Can view one's own financial/payment data (if a staff member)"), ('view_other_instructor_finances', "Can view other staff members' financial/payment data")), 'verbose_name': 'Instructor', 'verbose_name_plural': 'Instructors'},
),
migrations.AlterField(
model_name='staffmemberlistpluginmodel',
name='activeUpcomingOnly',
field=models.BooleanField(default=False, verbose_name='Include only staff members with upcoming classes/events'),
),
migrations.AlterField(
model_name='staffmemberlistpluginmodel',
name='bioRequired',
field=models.BooleanField(default=False, verbose_name='Exclude staff members with no bio'),
),
migrations.AlterField(
model_name='staffmemberlistpluginmodel',
name='cmsplugin_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='core_staffmemberlistpluginmodel', serialize=False, to='cms.CMSPlugin'),
),
migrations.AlterField(
model_name='staffmemberlistpluginmodel',
name='photoRequired',
field=models.BooleanField(default=False, verbose_name='Exclude staff members with no photo'),
),
migrations.AlterField(
model_name='staffmemberlistpluginmodel',
name='statusChoices',
field=multiselectfield.db.fields.MultiSelectField(choices=[('R', 'Regular Instructor'), ('A', 'Assistant Instructor'), ('T', 'Instructor-in-training'), ('G', 'Guest Instructor'), ('Z', 'Former Guest Instructor'), ('X', 'Former/Retired Instructor'), ('H', 'Publicly Hidden')], default=['R', 'A', 'G'], max_length=13, verbose_name='Limit to Instructors with Status'),
),
migrations.AlterField(
model_name='instructor',
name='status',
field=models.CharField(choices=[('R', 'Regular Instructor'), ('A', 'Assistant Instructor'), ('T', 'Instructor-in-training'), ('G', 'Guest Instructor'), ('Z', 'Former Guest Instructor'), ('X', 'Former/Retired Instructor'), ('H', 'Publicly Hidden')], default='H', help_text='Instructor status affects the visibility of the instructor on the site, but is separate from the "categories" of event staffing on which compensation is based.', max_length=1, verbose_name='Instructor status'),
),
migrations.AlterField(
model_name='staffmember',
name='categories',
field=models.ManyToManyField(blank=True, help_text='When choosing staff members, the individuals available to staff will be limited based on the categories chosen here. If the individual is an instructor, also be sure to set the instructor information below.', to='core.EventStaffCategory', verbose_name='Included in staff categories'),
),
migrations.CreateModel(
name='SeriesStaffMember',
fields=[
],
options={
'verbose_name': 'Series staff member',
'verbose_name_plural': 'Series staff members',
'proxy': True,
'indexes': [],
},
bases=('core.eventstaffmember',),
),
migrations.AlterModelOptions(
name='staffmember',
options={'ordering': ('lastName', 'firstName'), 'permissions': (('view_staff_directory', 'Can access the staff directory view'), ('view_school_stats', "Can view statistics about the school's performance."), ('can_autocomplete_staffmembers', 'Able to use customer and staff member autocomplete features (in admin forms)')), 'verbose_name': 'Staff member', 'verbose_name_plural': 'Staff members'},
),
]
|
StarcoderdataPython
|
9646827
|
import traceback
import gevent
import structlog
from gevent.event import AsyncResult
log = structlog.get_logger(__name__)
def raise_on_failure(raiden_apps, test_function, **kwargs):
"""Wait on the result for the test function and any of the apps.
This utility should be used for happy path testing with more than one app.
This will raise if any of the apps is killed.
"""
result = AsyncResult()
# Do not use `link` or `link_value`, an app an be stopped to test restarts.
for app in raiden_apps:
assert app.raiden, "The RaidenService must be started"
app.raiden.link_exception(result)
test_greenlet = gevent.spawn(test_function, **kwargs)
test_greenlet.link(result)
# Returns if either happens:
# - The test finished (successfully or not)
# - One of the apps crashed during the test
try:
result.get()
except: # noqa
# Print the stack trace of the running test to know in which line the
# test is waiting.
#
# This may print a duplicated stack trace, when the test fails.
log.exception(
"Test failed",
test_traceback="".join(traceback.format_stack(test_greenlet.gr_frame)),
all_tracebacks="\n".join(gevent.util.format_run_info()),
)
raise
|
StarcoderdataPython
|
6602556
|
<gh_stars>100-1000
from rx import config
from rx.core import Observer, ObservableBase, Disposable
from rx.internal import DisposedException
from .anonymoussubject import AnonymousSubject
from .innersubscription import InnerSubscription
class Subject(ObservableBase, Observer):
"""Represents an object that is both an observable sequence as well as an
observer. Each notification is broadcasted to all subscribed observers.
"""
def __init__(self):
super(Subject, self).__init__()
self.is_disposed = False
self.is_stopped = False
self.observers = []
self.exception = None
self.lock = config["concurrency"].RLock()
def check_disposed(self):
if self.is_disposed:
raise DisposedException()
def _subscribe_core(self, observer):
with self.lock:
self.check_disposed()
if not self.is_stopped:
self.observers.append(observer)
return InnerSubscription(self, observer)
if self.exception:
observer.on_error(self.exception)
return Disposable.empty()
observer.on_completed()
return Disposable.empty()
def on_completed(self):
"""Notifies all subscribed observers of the end of the sequence."""
os = None
with self.lock:
self.check_disposed()
if not self.is_stopped:
os = self.observers[:]
self.observers = []
self.is_stopped = True
if os:
for observer in os:
observer.on_completed()
def on_error(self, exception):
"""Notifies all subscribed observers with the exception.
Keyword arguments:
error -- The exception to send to all subscribed observers.
"""
os = None
with self.lock:
self.check_disposed()
if not self.is_stopped:
os = self.observers[:]
self.observers = []
self.is_stopped = True
self.exception = exception
if os:
for observer in os:
observer.on_error(exception)
def on_next(self, value):
"""Notifies all subscribed observers with the value.
Keyword arguments:
value -- The value to send to all subscribed observers.
"""
os = None
with self.lock:
self.check_disposed()
if not self.is_stopped:
os = self.observers[:]
if os:
for observer in os:
observer.on_next(value)
def dispose(self):
"""Unsubscribe all observers and release resources."""
with self.lock:
self.is_disposed = True
self.observers = None
@classmethod
def create(cls, observer, observable):
return AnonymousSubject(observer, observable)
|
StarcoderdataPython
|
6456501
|
<gh_stars>1-10
import destructify
import enum
from binascii import crc32
class ChunkType(destructify.PseudoMemberEnumMixin, enum.Enum):
IHDR = "IHDR"
IEND = "IEND"
TEXT = "tEXt"
PHYS = "pHYs"
PLTE = "PLTE"
class ColorType(enum.IntEnum):
GrayScale = 0
RGB = 2
Palette = 3
GrayScaleAlpha = 4
RGBA = 6
class InterlaceMethod(enum.IntEnum):
NoInterlace = 0
Adam7 = 1
class PngChunk_IHDR(destructify.Structure):
width = destructify.IntegerField(4, "big")
height = destructify.IntegerField(4, "big")
bit_depth = destructify.IntegerField(1)
color_type = destructify.EnumField(destructify.IntegerField(1), ColorType)
compression_method = destructify.IntegerField(1)
filter_method = destructify.IntegerField(1)
interlace_method = destructify.EnumField(destructify.IntegerField(1), InterlaceMethod)
class PngChunk_tEXt(destructify.Structure):
keyword = destructify.StringField(terminator=b"\x00", encoding="latin1")
text = destructify.StringField(length=lambda f: f._.length - (len(f.keyword) + 1), encoding="latin1")
class PaletteEntry(destructify.Structure):
red = destructify.IntegerField(length=1)
green = destructify.IntegerField(length=1)
blue = destructify.IntegerField(length=1)
class PngChunk_PLTE(destructify.Structure):
palettes = destructify.ArrayField(destructify.StructureField(PaletteEntry), length=-1)
class PhysUnit(enum.IntEnum):
Unknown = 0
Meter = 1
class PngChunk_pHYs(destructify.Structure):
pixels_per_unit_x = destructify.IntegerField(4, "big")
pixels_per_unit_y = destructify.IntegerField(4, "big")
unit = destructify.EnumField(destructify.IntegerField(1), PhysUnit)
def calculate_crc(f):
crc = 0
crc = crc32(f._context.fields['chunk_type'].raw, crc)
crc = crc32(f._context.fields['chunk_data'].raw, crc)
return crc
class PngChunk(destructify.Structure):
length = destructify.IntegerField(4, "big")
chunk_type = destructify.EnumField(destructify.StringField(length=4, encoding="ascii"), enum=ChunkType)
chunk_data = destructify.SwitchField(
cases={
ChunkType.IHDR: destructify.StructureField(PngChunk_IHDR, length='length'),
ChunkType.IEND: destructify.ConstantField(b""),
ChunkType.TEXT: destructify.StructureField(PngChunk_tEXt, length='length'),
ChunkType.PHYS: destructify.StructureField(PngChunk_pHYs, length='length'),
ChunkType.PLTE: destructify.StructureField(PngChunk_PLTE, length='length'),
},
switch="chunk_type",
other=destructify.FixedLengthField("length")
)
crc = destructify.IntegerField(4, "big", override=lambda f, v: calculate_crc(f))
class Meta:
capture_raw = True
checks = [
lambda f: f._context.fields['chunk_data'].length == f.length,
lambda f: calculate_crc(f) == f.crc,
]
class PngFile(destructify.Structure):
magic = destructify.ConstantField(b"\x89PNG\r\n\x1a\n")
chunks = destructify.ArrayField(destructify.StructureField(PngChunk), length=-1,
until=lambda c, v: v.chunk_type == "IEND")
class Meta:
checks = [
lambda f: f.chunks[0].chunk_type == ChunkType.IHDR,
lambda f: f.chunks[-1].chunk_type == ChunkType.IEND,
]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("input")
args = parser.parse_args()
with open(args.input, "rb") as f:
destructify.gui.show(PngFile, f)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
12860709
|
# Import the required 'libraries' for pin definitions and PWM
from machine import Pin, PWM
# Also import a subset for sleep and millisecond sleep. If you just import
# the utime you will have to prefix each call with "utime."
from utime import sleep, sleep_ms
# Define what the buzzer object is - a PWM output on pin 15
buzzer = PWM(Pin(15))
# A list of frequencies
tones = (200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, 1000, 1100, 1200, 1400, 1500)
# Define the function to play a single tone then stop
def buzz(freq):
# Set the frequence
buzzer.freq(freq)
# Set the duty cycle (affects volume)
buzzer.duty_u16(15000);
# Let the sound continue for X milliseconds
sleep_ms(30);
# Now switch the sound off
buzzer.duty_u16(0);
# And delay a small amount (gap between tones)
sleep_ms(20);
# Define a similar functionm with no delay between tones
def buzz2(freq):
buzzer.freq(freq)
buzzer.duty_u16(15000);
# Now sound the tones, one after the other
for tone in range(len(tones)):
buzz(tones[tone])
# Small gap in SECONDS after the ascending tones
sleep(1)
# Don't do this, it puts the device to Seep Sleep but it reboots on wakeup just
# like the ESP8266
#machine.deepsleep(1)
# Now sound the tones IN REVERSE ORDER ie descending
for tone in range(len(tones) -1, -1, -1):
buzz(tones[tone])
# Another delay
sleep(1)
# Now sound ALL the frequencies from X to Y
for tone in range(500, 2500):
buzz2(tone)
sleep_ms(5)
buzzer.duty_u16(0);
# And repeat in reverse order
for tone in range(2500, 500, -1):
buzz2(tone)
sleep_ms(4)
buzzer.duty_u16(0);
|
StarcoderdataPython
|
3439683
|
_base_ = [
'../../_base_/datasets/cifar100/metrics_umap.py',
'vit_small_dec6_dim512_8xb64_accu8_cos_ep1000.py',
]
|
StarcoderdataPython
|
11291655
|
<reponame>SilasPDJ/autoesk<filename>default/settings/set_paths.py<gh_stars>0
from .now import Now
class SetPaths(Now):
# the class Now IS NOT large
def __get_atual_competencia_file(self):
import os
f = '\\get_atual_competencia.txt'
dir_only = os.path.dirname(__file__)
project_dir = '\\'.join(dir_only.split('\\')[:-2])
# Contar de trás pra frente, pois vai que um dia eu coloque ele num diretorio raiz
tot = project_dir + f
return tot
def files_get_anexos(self, client, file_type='pdf', upload=False):
"""
:param client: nome da pasta onde estão os arquivos organizados por data dd-mm-yyyy
:param year: True -> folder contains year, False -> folder DOES NOT contain year
:param file_type: file annexed type
:param upload: False -> email it! True: upload it!
:return: pdf_files or whatever
# _files_path
"""
import os
from email.mime.application import MIMEApplication
# compt, excel_file_name = self.compt_and_filename()
compt_and_file = self.compt_and_filename()
path = self._files_path_v3(client, wexplorer_tup=compt_and_file)
# print(path, '\nPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATH', year)
volta = os.getcwd()
os.chdir(path)
list_returned = os.listdir()
pdf_files = list()
for fname in list_returned:
if fname.lower().endswith(f'.{file_type}'):
if not upload:
file_opened = MIMEApplication(open(fname, 'rb').read())
file_opened.add_header('Content-Disposition', 'attachment', filename=fname)
pdf_files.append(file_opened)
else:
pdf_files.append(f'{os.getcwd()}\\{fname}')
os.chdir(volta)
print(os.getcwd())
return pdf_files
def files_get_anexos_v2(self, client, file_type='pdf', wexplorer_tup=None, upload=False):
"""
:param client: nome da pasta onde estão os arquivos organizados por data dd-mm-yyyy
:param file_type: file annexed type
:param wexplorer_tup: ... ctrl+F me
:param upload: False -> email it! True: upload it!
:return: pdf_files or whatever
# _files_path
"""
import os
from email.mime.application import MIMEApplication
# compt, excel_file_name = self.compt_and_filename()
if wexplorer_tup is None:
compt_and_file_anexos = self.compt_and_filename()
else:
compt_and_file_anexos = wexplorer_tup
path = self._files_path_v3(client, wexplorer_tup=compt_and_file_anexos)
# print(path, '\nPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATH', year)
volta = os.getcwd()
os.chdir(path)
list_returned = os.listdir()
pdf_files = list()
for fname in list_returned:
if fname.lower().endswith(f'.{file_type}'):
if upload:
file_opened = MIMEApplication(open(fname, 'rb').read())
file_opened.add_header('Content-Disposition', 'attachment', filename=fname)
pdf_files.append(file_opened)
else:
pdf_files.append(f'{os.getcwd()}\\{fname}')
os.chdir(volta)
print(os.getcwd())
return pdf_files
def compt_and_filename(self):
"""
:return: already set compt and file_names; COMPT e file_names já programados antes vindos de um arquivo
##########################################################################
"""
from time import sleep
# compt, excel_file_name = 'biri', 'biri'
try:
sleep(1)
with open(self.__get_atual_competencia_file(), 'r') as f:
compt, excel_file_name = f.read().splitlines()
except FileNotFoundError:
# raise FileNotFoundError('\033[1;31mfile not existence\033[m')
return self.set_get_compt_file(m_cont=-1)
else:
return compt, excel_file_name
def file_wtp_only1(self):
import os
filepath = os.path.realpath(__file__)
os.path.join('\\'.join(filepath.split('\\')[:-1]))
file_with_name = 'with_titlePATH.txt'
try:
f = open(f'{file_with_name}', 'r')
a = f.read()
a = a.split('/')
a = '/'.join(a)
returned = a
f.close()
except FileNotFoundError:
FileExistsError('WITH TITLE PATH NOT EXISTENTE ')
returned = self.select_sheets_path_if_not_exists()
return returned
def select_sheets_path_if_not_exists(self):
from tkinter import Tk, filedialog, messagebox
root = Tk()
root.withdraw()
root = Tk()
root.withdraw()
file_with_name = 'with_titlePATH.txt'
# sh_management = SheetPathManager(file_with_name)
way = None
while way is None:
way = filedialog.askdirectory(title='SELECIONE ONDE ESTÃO SUAS PLANILHAS')
if len(way) <= 0:
way = None
resp = messagebox.askokcancel('ATENÇÃO!', message='Favor, selecione uma pasta ou clique em CANCELAR.')
if not resp:
break
else:
wf = open(file_with_name, 'w')
wf.write(way)
root.quit()
return way
def set_get_compt_file(self=None, m_cont=0, y_cont=0, past_only=True, file_type='xlsx', open_excel=False):
"""
:param int m_cont: quantos meses para trás? (0 atual)
:param int y_cont: quantos anos para trás? (0 atual)
:param bool past_only: True -> somente passado (multiplica por -1), False: não faz multiplicação
:param any file_type: None -> (((DOES NOTHING))) update self.__get_atual_competencia_file
:param open_excel: if True => OPENS EXCEL FILE
:return: competencia & excel_path
# responsivo, retorna também o caminho e a competencia para a variável PATH de self._files_path_v2
"""
compt = self.get_compt_only(m_cont, y_cont, past_only)
path = self.file_wtp_only1()
# é o mesmo do de cima, mas to tentando
if file_type:
excel_file_path_updated = r'{}/{}.{}'.format(path, compt, file_type)
with open(self.__get_atual_competencia_file(), 'w') as f:
for line in [compt, excel_file_path_updated]:
# print(compt)
f.write(line + '\n')
if open_excel:
from pgdas_fiscal_oesk.main_excel_manager.main_excel_manager import SheetPathManager
spm = SheetPathManager()
spm.new_xlsxcompt_from_padrao_if_not_exists((compt, excel_file_path_updated))
spm.save_after_changes((compt, excel_file_path_updated))
return compt, excel_file_path_updated
return compt
def get_compt_only(self, m_cont=-1, y_cont=0, past_only=True, sep='-'):
from datetime import datetime as dt
from datetime import date, timedelta
from dateutil.relativedelta import relativedelta
month = dt.now().month
year = dt.now().year
now_date = date(year, month, 1)
if past_only:
m_cont = m_cont * (-1) if m_cont > 0 else m_cont
y_cont = y_cont * (-1) if y_cont > 0 else y_cont
# force to be negative
now_date = now_date + relativedelta(months=m_cont)
now_date = now_date + relativedelta(years=y_cont)
month, year = now_date.month, now_date.year
compt = f'{month:02d}{sep}{year}'
return compt
# @staticmethod
def get_last_business_day_of_month(self, month=None, year=None):
from calendar import monthrange
from datetime import datetime
if month is None:
month = datetime.now().month
if year is None:
year = datetime.now().year
init = monthrange(year, month)
ultimo_day = init[1]
business_date = datetime(year, month, ultimo_day)
weekday = business_date.weekday()
while weekday > 4:
now_day = business_date.day
business_date = business_date.replace(day=now_day - 1)
weekday = business_date.weekday()
returned = business_date.day
returned -= 1 if month == 12 else returned
return returned
def first_and_last_day_compt(self, sep='/'):
"""
ELE JÁ PEGA O ANTERIOR MAIS PROX
# É necessario o will_be pois antes dele é botado ao contrário
# tipo: 20200430
# ano 2020, mes 04, dia 30... (exemplo)
:return: ÚLTIMO DIA DO MES
"""
from datetime import date, timedelta
from dateutil.relativedelta import relativedelta
compt, file_name = self.compt_and_filename()
mes, ano = compt.split('-') if '-' in compt else '/'
mes, ano = int(mes), int(ano)
# - timedelta(days=1)
# + relativedelta(months=1)
last_now = date(ano, mes, 1) + relativedelta(months=1)
last_now -= timedelta(days=1)
first_now = date(ano, mes, 1)
z, a = last_now, first_now
br1st = f'{a.day:02d}{sep}{a.month:02d}{sep}{a.year}'
brlast = f'{z.day:02d}{sep}{z.month:02d}{sep}{z.year}'
print(br1st, brlast)
return br1st, brlast
def _files_path_v3(self, pasta_client, wexplorer_tup=None):
"""
:param pasta_client: client_name
:param wexplorer_tup: the tuple containing the self.compt_and_file_name()
:return: salva_path (save_path)
"""
import os
if wexplorer_tup is None:
compt, excel_file_name = self.compt_and_filename()
else:
compt, excel_file_name = wexplorer_tup
ano = [compt.split(v)[-1] for v in compt if not v.isdigit()]
ano = ano[0]
possible_folders = ['_Dívidas']
# preciso melhorar, deixar mais responsivo talvez, porém, ele já tá contando com dívidas e responsivo com anos
PATH = '/'.join(excel_file_name.split('/')[:-2])
pasta_client = pasta_client.strip()
volta = os.getcwd()
for acp in [PATH, ano, compt, pasta_client]:
try:
os.chdir(acp)
except FileNotFoundError:
os.mkdir(acp)
os.chdir(acp)
salva_path = os.getcwd()
# print(salva_path)
os.chdir(volta)
return salva_path
def _files_path_defis(self, pasta_client, tup_path, ano=None):
"""
:param pasta_client: client name
:param tup_path: like wexplorer_tup
:return:
# THE FUTURE OF THE SOFTWARE
"""
# insyear: inside this year...
import os
from pathlib import Path
if ano is None:
ano = str(self.y())
insyear, excel_file_name = tup_path
defis_path = Path(os.path.dirname(excel_file_name))
defis_path = defis_path.parent
defis_path_final = [defis_path, ano, insyear, pasta_client]
volta = os.getcwd()
for acp in defis_path_final:
try:
os.chdir(acp)
except FileNotFoundError:
os.mkdir(acp)
os.chdir(acp)
salva_path = os.getcwd()
os.chdir(volta)
return salva_path
def mkdir_hoje(self, und_disco, relative_path=None):
"""
:param und_disco: A, B, C, D, E, F, G, H, I, J, K ... ETC
:param relative_path: path/b_path/c_path
:return: folder de hoje criado com o caminho relativo sem ficar sobreposto
"""
date_folder = f'{self.hj()}-{self.m():02d}-{self.y()}'
if len(und_disco) > 1:
print('Digite somente a letra da unidade de disco')
raise AttributeError
if relative_path is not None:
if '/' == relative_path[0] or '/' == relative_path[-1]:
print('Não use "/" nem "\\" em path[0] or path[-1]')
raise AttributeError
res = self.__new_path_set(f'{und_disco}:/{relative_path}/{date_folder}')
else:
res = self.__new_path_set(f'{und_disco}:/{date_folder}')
return res
def move_file(self, where_from, destiny):
"""
:param where_from: where the files come from
:param destiny: where they're going to
:return: File moved from a place[where_from] to another[destiny]
"""
from shutil import move
move(where_from, destiny)
def __new_path_set(self, path=''):
"""
:param path: default path atual (downloads)
:return: Se caminho não criado, ele cria
# até agora chamado somente por mkdir_hoje
"""
import os
volta = os.getcwd()
if '/' in path:
spliter = '/'
elif '\\' in path:
spliter = '\\'
else:
spliter = ''
print(path)
raise AttributeError
try:
und_disco = path.split('/')[0]
except (IndexError, AttributeError) as e:
raise e
else:
os.chdir(und_disco)
pnow = os.getcwd()[:-1]
for folder in path.split(spliter)[1:]:
pnow = f'{pnow}/{folder}'
if not os.path.exists(pnow):
os.mkdir(folder)
os.chdir(folder)
# print('NOTHING went wrong')
os.chdir(volta)
return path
def certif_feito(self, save_path, add=''):
"""
certificado de que está feito
:param save_path: nome da pasta vinda de _files_path_v2
:param add: um adicional no nome do arquivo
:return: caminho+ nome_arquivo jpeg
"""
client_name = save_path[save_path.index('-')-2: save_path.index('-')+2]
type_arquivo = 'png'
try:
save = r'{}\\{}-SimplesNacionalDeclarado.{}'.format(save_path, add, type_arquivo)
print(save, '---------> SAVE')
return save
except FileNotFoundError:
print('NÃO CONSEGUI RETORNAR SAVE')
def unzipe_file(self, full_path, rm_zip=True):
"""
:param full_path: caminho
:param rm_zip: True -> remove zip, False, não remove
:return: arquivo extraído e excluído o zip.
Ele faz isso com todos os zip
"""
from time import sleep
from os import chdir, remove, listdir
from zipfile import ZipFile, BadZipFile
chdir(full_path)
ls = listdir()
for file in ls:
print('Descompactando, ZIPs e excluíndo-os')
if file.endswith('.zip'):
try:
zf = ZipFile(file, mode='r')
zf.extractall()
zf.close()
except BadZipFile:
print('Não deszipei')
finally:
if rm_zip:
sleep(5)
remove(file)
|
StarcoderdataPython
|
12810781
|
from django.db import connection
def auto_dolt_commit_migration(sender, **kwargs):
msg = "completed database migration"
author = "nautobot <<EMAIL>>"
with connection.cursor() as cursor:
cursor.execute("SELECT dolt_add('-A') FROM dual;")
cursor.execute(
f"""
SELECT dolt_commit(
'--all',
'--allow-empty',
'--message', '{msg}',
'--author', '{author}')
FROM dual;"""
)
|
StarcoderdataPython
|
5014619
|
<gh_stars>1-10
from spytest import st, tgapi, tgapi
import utilities.common as utils
import apis.system.port as portapi
def log_info(fmt, *args):
st.log(fmt % args)
def warn(fmt, *args):
st.warn(fmt % args)
def trace(dut, local, partner, remote, status):
#print(dut, local, partner, remote, status)
pass
def wait():
st.wait(5)
def check_status(s1, s2, s3, s4):
#print(s1, s2, s3, s4)
if not s1 or not s3:
return False
if s1.lower() != s2.lower():
return False
if s3.lower() != s4.lower():
return False
return True
def get_link_status(tg, ph):
return tg.tg_interface_control(mode="check_link", desired_status='up',
port_handle=ph)
def verify_topology(check_type, threads=True):
if check_type in ["status", "status2", "status3", "status4"]:
return links_status(threads, check_type)
retval = True
results = []
header = ['DUT', 'Local', "Partner", "Remote", "Status"]
check_oneway = True
exclude = []
for dut in st.get_dut_names():
alias = st.get_device_alias(dut)
for local, partner, remote in st.get_dut_links(dut):
palias = st.get_device_alias(partner)
# check if the port is verified from other direction
skip = False
for ex in exclude:
#print("CMP", dut, local, ex[0], ex[1])
if dut == ex[0] and local == ex[1]:
skip = True
break
if skip:
log_info("{}/{} is already verified".format(alias, local))
continue
result = [alias, local, palias, remote, "Fail"]
# shutdown local link and get remote link stats in partner
portapi.shutdown(dut, [local])
wait()
status1 = portapi.get_interface_status(partner, remote)
trace(alias, local, palias, remote, status1)
# noshutdown local link and get remote link stats in partner
portapi.noshutdown(dut, [local])
wait()
status2 = portapi.get_interface_status(partner, remote)
trace(alias, local, palias, remote, status2)
# log the result on fail
if not check_status(status1, "down", status2, "up"):
warn("1. port %s/%s is not connected to %s/%s\n",
alias, local, palias, remote)
results.append(result)
exclude.append([partner, remote])
retval = False
continue
if not check_oneway:
# shutdown remote link and get local link status
portapi.shutdown(partner, [remote])
wait()
status3 = portapi.get_interface_status(dut, local)
trace(alias, local, palias, remote, status3)
# noshutdown remote link and get local link status
portapi.noshutdown(partner, [remote])
wait()
status4 = portapi.get_interface_status(dut, local)
trace(alias, local, palias, remote, status4)
# log the result on fail
if not check_status(status3, "down", status4, "up"):
warn("2. port %s/%s is not connected to %s/%s\n",
alias, local, palias, remote)
results.append(result)
retval = False
continue
# log the result on pass
result[4] = "OK"
results.append(result)
exclude.append([partner, remote])
for local, partner, remote in st.get_tg_links(dut):
palias = st.get_device_alias(partner)
(tg, ph) = tgapi.get_handle_byname(None, tg=partner, port=remote)
result = [alias, local, palias, remote, "Fail"]
tgen_link_status_supported = False
if tgen_link_status_supported:
# shutdown local link and get remote link stats in partner
portapi.shutdown(dut, [local])
wait()
status1 = get_link_status(tg, ph)
trace(alias, local, palias, remote, status1)
# no shutdown local link and get remote link stats in partner
portapi.noshutdown(dut, [local])
wait()
status2 = get_link_status(tg, ph)
trace(alias, local, palias, remote, status2)
# log the result on fail
if tgen_link_status_supported and (status1 or not status2):
warn("3. port %s/%s is not connected to %s/%s\n",
alias, local, palias, remote)
results.append(result)
retval = False
continue
# shutdown remote link and get local link status
tg.tg_interface_control(mode="break_link", port_handle=ph)
wait()
status3 = portapi.get_interface_status(dut, local)
trace(alias, local, palias, remote, status3)
# noshutdown remote link and get local link status
tg.tg_interface_control(mode="restore_link", port_handle=ph)
wait()
status4 = portapi.get_interface_status(dut, local)
trace(alias, local, palias, remote, status4)
# log the result on fail
if not check_status(status3, "down", status4, "up"):
warn("4. port %s/%s is not connected to %s/%s\n",
alias, local, palias, remote)
results.append(result)
retval = False
continue
# log the result on pass
result[4] = "OK"
results.append(result)
return [retval, header, results]
def fill_alias():
alias = dict()
for dut in st.get_dut_names():
alias[dut] = st.get_device_alias(dut)
for tg in st.get_tg_names():
alias[tg] = st.get_device_alias(tg)
return alias
def links_status(threads, check_type):
header = ['DUT', 'Local', "LStatus (A/O)", "Partner", "Remote", "RStatus (A/O)"]
funcs = [
[tg_links_status, check_type],
[duts_links_status, threads]
]
[[v1, v2], [e1, e2]] = utils.exec_all(threads, funcs, True)
if v1 is None or v2 is None or e1 is not None or e2 is not None:
print(v1, v2, e1, e2)
return [True, header, []]
v1_default = "?2?" if v1 else "NA"
(results, exclude, alias) = ([], [], fill_alias())
for dut in st.get_dut_names():
for local, partner, remote in st.get_tg_links(dut):
res = []
res.append(alias.get(dut, "?"))
res.append(local)
res.append(v2.get("{}--{}".format(dut, local), "?1?"))
res.append(alias.get(partner, "?"))
res.append(remote)
res.append(v1.get("{}--{}".format(partner, remote), v1_default))
results.append(res)
for local, partner, remote in st.get_dut_links(dut):
name = "{}--{}".format(dut, local)
if name in exclude:
continue
res = []
res.append(alias.get(dut, "?"))
res.append(local)
res.append(v2.get("{}--{}".format(dut, local), "?3?"))
res.append(alias.get(partner, "?"))
res.append(remote)
res.append(v2.get("{}--{}".format(partner, remote), "?4?"))
exclude.append("{}--{}".format(partner, remote))
results.append(res)
return [True, header, results]
def tg_links_status_1():
results = dict()
for dut in st.get_dut_names():
for local, partner, remote in st.get_tg_links(dut):
(tg, ph) = tgapi.get_handle_byname(None, tg=partner, port=remote)
name = "{}--{}".format(partner, remote)
results[name] = get_link_status(tg, ph)
return results
def tg_links_status_0():
# build port list per tgen
tg_port_dict = {}
for dut in st.get_dut_names():
for local, partner, remote in st.get_tg_links(dut):
tg_port_dict.setdefault(partner, []).append(remote)
results = dict()
for partner, port_list in tg_port_dict.items():
# get tgen handle using first port
(tg, ph) = tgapi.get_handle_byname(None, tg=partner, port=port_list[0])
# get all ports status
rv = tg.get_port_status(port_list)
# fill the results
for port in port_list:
name = "{}--{}".format(partner, port)
results[name] = rv[port]
return results
def tg_links_status(check_type):
if check_type in ["status3"]:
return dict()
try:
return tg_links_status_0()
except:
return tg_links_status_1()
def duts_links_status(threads):
results = dict()
[rvs, exs] = utils.exec_foreach(threads, st.get_dut_names(), dut_links_status)
for rv in rvs:
if rv:
results.update(rv)
return results
def dut_links_status(dut):
local_list = []
for local, partner, remote in st.get_dut_links(dut):
local_list.append(local)
for local, partner, remote in st.get_tg_links(dut):
local_list.append(local)
output = portapi.get_status(dut, ",".join(local_list))
results = dict()
for local, partner, remote in st.get_dut_links(dut):
match = {"interface": local}
entries = utils.filter_and_select(output, ["admin","oper"], match)
name = "{}--{}".format(dut, local)
if entries:
results[name] = "{}/{}".format(entries[0]["admin"], entries[0]["oper"])
else:
results[name] = "----"
for local, partner, remote in st.get_tg_links(dut):
match = {"interface": local}
entries = utils.filter_and_select(output, ["admin","oper"], match)
name = "{}--{}".format(dut, local)
if entries:
results[name] = "{}/{}".format(entries[0]["admin"], entries[0]["oper"])
else:
results[name] = "----"
return results
|
StarcoderdataPython
|
8170913
|
<gh_stars>0
#######################################################
# Main APP definition.
#
# Dash Bootstrap Components used for main theme and better
# organization.
#######################################################
import dash
import dash_bootstrap_components as dbc
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
server = app.server
# We need this for function callbacks not present in the app.layout
app.config.suppress_callback_exceptions = True
|
StarcoderdataPython
|
4963573
|
from django.conf import settings
from attributes import defaults
ATTRIBUTES_CATEGORY_MODEL = getattr(
settings, 'ATTRIBUTES_CATEGORY_MODEL', defaults.ATTRIBUTES_CATEGORY_MODEL)
ATTRIBUTES_ENTRY_MODEL = getattr(
settings, 'ATTRIBUTES_ENTRY_MODEL', defaults.ATTRIBUTES_ENTRY_MODEL)
|
StarcoderdataPython
|
3220596
|
<gh_stars>1-10
#! /usr/bin/env python
## Hey, Python: encoding=utf-8
#
# Copyright (c) 2007-2010 <NAME> (<EMAIL>)
# Licensed under the terms of the MIT license.
from __future__ import with_statement
import justrok
from justrok import util
from justrok.enumerations import EngineState, ScrobbleServer
import appdirs
import errno
import hashlib
import httplib2
import os
from PyQt5.QtCore import QObject
import re
import socket
import string
import threading
import time
import urllib
try:
import json
except ImportError:
import simplejson as json
try:
import psutil
except ImportError:
_has_psutil = False
else:
_has_psutil = True
# TODO: Quitting Minirok while playing will not submit the current track until
# the next time Minirok starts (via the spool).
# TODO: Use KWallet for the password?
PROTOCOL_VERSION = '1.2.1'
CLIENT_IDENTIFIER = 'mrk'
TRACK_MIN_LENGTH = 30
TRACK_SUBMIT_SECONDS = 240
TRACK_SUBMIT_PERCENT = 0.5
SOURCE_USER = 'P'
SOURCE_BROADCAST = 'R'
SOURCE_PERSONALIZED = 'E'
SOURCE_LASTFM = 'L'
SOURCE_UNKNOWN = 'U'
MAX_FAILURES = 3
MAX_SLEEP_MINUTES = 120
MAX_TRACKS_AT_ONCE = 50
APPDATA_SCROBBLE = 'scrobble'
APPDATA_SCROBBLE_LOCK = 'scrobble.lock'
##
class Submission(object):
class RequiredDataMissing(Exception):
pass
class TrackTooShort(Exception):
pass
def __init__(self, tag_dict):
"""Create a Submission object from a dict of tags.
Args:
tag_dict: a dictionary as returned by Playlist.get_current_tags(),
i.e., containing 'Title', 'Artist', etc.
"""
if not all(tag_dict[x] for x in ['title', 'artist', 'length']):
raise self.RequiredDataMissing()
elif tag_dict['length'] < TRACK_MIN_LENGTH:
raise self.TrackTooShort()
self.path = None
self.length = tag_dict['length']
self.start_time = int(time.time())
self.param = {
'm': '',
'r': '',
'o': SOURCE_USER,
'l': str(self.length),
'i': str(self.start_time),
'n': tag_dict['track'],
't': tag_dict['title'],
'b': tag_dict['album'],
'a': tag_dict['artist'],
}
def get_params(self, i=0):
return dict(('%s[%d]' % (k, i), v) for k, v in self.param.items())
def get_now_playing_params(self):
return dict((k, self.param[k]) for k in list('atblnm'))
def serialize(self):
return json.dumps(self.param, indent=4)
@classmethod
def load_from_file(cls, path):
with open(path) as f:
try:
param = json.load(f)
except ValueError:
return None
else:
# TODO: could it be possible to get json to give us str objects?
param = dict((k, param[k]) for k in param)
if set(param.keys()) == set('mrolintba'):
obj = cls.__new__(cls)
obj.path = path
obj.param = param
obj.length = int(param['l'])
obj.start_time = int(param['i'])
return obj
else:
return None
##
class HandshakeFatalError(Exception):
pass
##
class Request(object):
def __init__(self, url, params):
self.body = []
self.error = None
self.failed = False
try:
conn = httplib2.Http()
response, content = conn.request(url, "POST", urllib.parse.urlencode(params), {'Content-Type': 'application/x-www-form-urlencoded'})
except socket.error as e:
self.failed = True
self.error = e.args[1] # No e.message available.
else:
if response.status != 200:
self.failed = True
self.error = "Reason ..."
else:
content = content.decode()
self.body = content.rstrip('\n').split('\n')
if not self.body:
self.failed = True
self.error = 'no response received from server'
elif self.body[0].split()[0] != 'OK':
self.failed = True
self.error = re.sub(r'^FAILED\s+', '', self.body[0])
class HandshakeRequest(Request):
def __init__(self, url, params):
super(HandshakeRequest, self).__init__(url, params)
if self.failed:
if re.search(r'^(BANNED|BADTIME)', self.error):
raise HandshakeFatalError(self.error)
elif len(self.body) != 4:
self.failed = True
self.error = 'unexpected response from scrobbler server:\n%r' % (
self.body,)
##
class ProcInfo(object):
def __init__(self, pid=None):
if pid is None:
pid = os.getpid()
d = self.data = {}
if not _has_psutil:
d['pid'] = pid
d['version'] = '1.0'
else:
d['pid'] = pid
d['version'] = '1.1'
try:
d['cmdline'] = psutil.Process(pid).cmdline()
except psutil.error:
d['version'] = '1.0'
def serialize(self):
return json.dumps(self.data, indent=4)
def isRunning(self):
if self.data['version'] == '1.0':
try:
os.kill(self.data['pid'], 0)
except OSError as e:
return (False if e.errno == errno.ESRCH
else True) # ESRCH: no such PID.
else:
return True
elif self.data['version'] == '1.1':
try:
proc = psutil.Process(self.data['pid'])
except psutil.NoSuchProcess:
return False
else:
return proc.cmdline == self.data['cmdline']
@classmethod
def load_from_fileobj(cls, fileobj):
try:
param = json.load(fileobj)
except ValueError:
return None
else:
version = param.get('version', None)
if version == '1.0':
keys = ['version', 'pid']
elif version == '1.1':
if _has_psutil:
keys = ['version', 'pid', 'cmdline']
else: # Downgrade format.
param['version'] = '1.0'
keys = ['version', 'pid']
else:
return None
obj = cls.__new__(cls)
try:
obj.data = dict((k, param[k]) for k in keys)
except KeyError:
return None
else:
return obj
##
class Scrobbler(QObject, threading.Thread):
def __init__(self):
QObject.__init__(self)
threading.Thread.__init__(self)
self.setDaemon(True)
self.user = None
self.password_hash = None
self.handshake_url = None
self.session_key = None
self.scrobble_url = None
self.now_playing_url = None
self.failure_count = 0
self.pause_duration = 1 # minutes
self.scrobble_queue = []
self.current_track = None
self.mutex = threading.Lock()
self.event = threading.Event()
self.timer = util.QTimerWithPause()
self.configured = threading.Condition()
self.timer.setSingleShot(True)
justrok.Globals.callback_registry.register_apply_settings(self.__apply_settings)
self.__apply_settings()
appdata = Scrobbler.__get_configuration_directory_path()
do_queue = False
self.spool = os.path.join(appdata, APPDATA_SCROBBLE)
# Spool directory handling: create it if it doesn't exist...
if not os.path.isdir(self.spool):
try:
os.mkdir(self.spool)
except OSError as e:
justrok.logger.error('could not create scrobbling spool: %s', e)
self.spool = None
# ... else ensure it is readable and writable.
elif not os.access(self.spool, os.R_OK | os.W_OK):
justrok.logger.error('scrobbling spool is not readable/writable')
self.spool = None
# If not, we try to assess whether this Minirok instance should try to
# submit the existing entries, if any. Supposedly, the Last.fm server
# has some support for detecting duplicate submissions, but we're
# adviced not to rely on it (<<EMAIL>>), so we use a
# lock file to signal that some Minirok process is taking care of the
# submissions from the spool directory. (This scheme, I realize,
# doesn't get all corner cases right, but will have to suffice for now.
# For example, if Minirok A starts, then Minirok B starts, and finally
# Minirok A quits and Minirok C starts, Minirok B and C will end up
# both trying to submit B's entries that haven't been able to be
# submitted yet. There's also the race-condition-du-jour, of course.)
else:
scrobble_lock = os.path.join(appdata, APPDATA_SCROBBLE_LOCK)
try:
lockfile = open(scrobble_lock)
except IOError as e:
if e.errno == errno.ENOENT:
do_queue = True
else:
raise
else:
proc = ProcInfo.load_from_fileobj(lockfile)
if proc and proc.isRunning():
justrok.logger.info(
'Minirok already running (pid=%d), '
'not scrobbling existing items', proc.data['pid'])
else:
do_queue = True
if do_queue:
self.lock_file = scrobble_lock
with open(self.lock_file, 'w') as lock:
lock.write(ProcInfo().serialize())
files = [os.path.join(self.spool, x)
for x in os.listdir(self.spool)]
tracks = sorted(
[t for t in map(Submission.load_from_file, files)
if t is not None ], key=lambda t: t.start_time)
if tracks:
self.scrobble_queue.extend(tracks)
else:
self.lock_file = None
justrok.Globals.callback_registry.register_at_exit(self.cleanup)
@staticmethod
def __get_configuration_directory_path():
return appdirs.user_config_dir(justrok.__appname__)
def cleanup(self):
if self.lock_file is not None:
try:
os.unlink(self.lock_file)
except:
pass
def _on_playlist_playing_new_track_started(self):
self.timer.stop()
self.current_track = None
tags = justrok.Globals.playlist.get_current_tags()
try:
self.current_track = Submission(tags)
except Submission.RequiredDataMissing as e:
justrok.logger.info('track missing required tags, not scrobbling')
except Submission.TrackTooShort as e:
justrok.logger.info('track shorter than %d seconds, '
'not scrobbling', TRACK_MIN_LENGTH)
else:
runtime = min(TRACK_SUBMIT_SECONDS,
self.current_track.length * TRACK_SUBMIT_PERCENT)
self.timer.start(runtime * 1000)
with self.mutex:
self.event.set()
def _on_engine_state_changed(self, new_state):
if new_state == EngineState.PAUSED:
self.timer.pause()
elif new_state == EngineState.PLAYING:
self.timer.resume()
elif new_state == EngineState.STOPPED:
self.timer.stop()
self.current_track = None
with self.mutex:
self.event.set()
def _on_timer_timeout(self):
if not self.isAlive():
# Abort this function if the scrobbling thread is not running; this
# happens if we received a BANNED or BADTIME from the server. In
# such cases, it's probably not a bad idea not to write anything to
# disk. (Well, supposedly there's precious data we could save in
# the case of BANNED, and submit it again with a fixed version, hm.)
return
with self.mutex:
self.scrobble_queue.append(self.current_track)
if self.spool is not None:
path = self.write_track_to_spool(self.current_track)
if path is None:
justrok.logger.warn(
'could not create file in scrobbling spool')
else:
self.current_track.path = path
self.current_track = None
justrok.logger.debug('track queued for scrobbling') # XXX.
def write_track_to_spool(self, track):
path = os.path.join(self.spool, str(track.start_time))
for x in [''] + list(string.ascii_lowercase):
try:
f = util.creat_excl(path + x)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
f.write(track.serialize())
f.flush() # Otherwise write() syscall happens after fsync().
os.fsync(f.fileno())
f.close()
return path + x
##
def run(self):
if self.user is None:
# We're not configured to run, so we hang on here.
with self.configured:
self.configured.wait()
if self.scrobble_queue: # Any tracks loaded from spool?
with self.mutex:
self.event.set()
while True:
if self.session_key is None:
try:
self.do_handshake()
except HandshakeFatalError as e:
justrok.logger.error('aborting scrobbler: %s', e)
return
self.event.wait()
with self.mutex:
self.event.clear()
current_track = self.current_track
##
while self.scrobble_queue:
params = { 's': self.session_key }
with self.mutex:
tracks = self.scrobble_queue[0:MAX_TRACKS_AT_ONCE]
for i, track in enumerate(tracks):
params.update(track.get_params(i))
req = Request(self.scrobble_url, params)
if req.failed:
if req.error.startswith('BADSESSION'):
self.session_key = None # Trigger re-handshake.
else:
justrok.logger.info('scrobbling %d track(s) failed: %s',
len(tracks), req.error)
self.failure_count += 1
if self.failure_count >= MAX_FAILURES:
self.session_key = None
break
else:
justrok.logger.debug('scrobbled %d track(s) successfully',
len(tracks)) # XXX.
for t in tracks:
if t.path is not None:
try:
os.unlink(t.path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
with self.mutex:
self.scrobble_queue[0:len(tracks)] = []
##
if current_track is not None and self.session_key is not None:
params = { 's': self.session_key }
params.update(current_track.get_now_playing_params())
req = Request(self.now_playing_url, params)
if req.failed:
justrok.logger.info(
'could not send "now playing" information: %s',
req.error)
if req.error.startswith('BADSESSION'):
self.session_key = None # Trigger re-handshake.
else:
self.failure_count += 1
if self.failure_count >= MAX_FAILURES:
self.session_key = None
else:
justrok.logger.debug(
'sent "now playing" information successfully') # XXX.
##
if self.session_key is None:
# Ensure we retry pending actions as soon as we've successfully
# handshaked again.
with self.mutex:
self.event.set()
def do_handshake(self):
while True:
now = str(int(time.time()))
params = {
'hs': 'true',
'p': PROTOCOL_VERSION,
'c': CLIENT_IDENTIFIER,
'v': justrok.__version__,
'u': self.user,
't': now,
'a': hashlib.md5((self.password_hash + now).encode()).hexdigest()
}
req = HandshakeRequest(self.handshake_url, params)
if req.failed:
if re.search(r'^BADAUTH', req.error):
justrok.logger.warn(
'scrobbler handshake failed: bad password')
with self.configured:
self.configured.wait()
else:
justrok.logger.info(
'scrobbler handshake failed (%s), retrying in '
'%d minute(s)', req.error, self.pause_duration)
time.sleep(self.pause_duration * 60)
if self.pause_duration < MAX_SLEEP_MINUTES:
self.pause_duration = min(MAX_SLEEP_MINUTES,
self.pause_duration * 2)
else:
self.failure_count = 0
self.pause_duration = 1
self.session_key = req.body[1]
self.scrobble_url = req.body[3]
self.now_playing_url = req.body[2]
justrok.logger.debug('scrobbling handshake successful') # XXX.
break
def __apply_settings(self):
# TODO: what if there's a queue and we get disabled?
if justrok.Globals.settings.scrobbling_enabled:
justrok.Globals.engine.state_changed.connect(self._on_engine_state_changed)
justrok.Globals.playlist.playing_new_track_started.connect(self._on_playlist_playing_new_track_started)
self.timer.timeout.connect(self._on_timer_timeout)
self.user = justrok.Globals.settings.scrobbling_username
# TODO: The password is stored in plain in the configuration file..
self.password_hash = hashlib.md5(justrok.Globals.settings.scrobbling_password.encode()).hexdigest()
if justrok.Globals.settings.scrobbling_server is ScrobbleServer.CUSTOM:
self.handshake_url = justrok.Globals.settings.scrobbling_custom_server_url
else:
self.handshake_url = justrok.Globals.settings.scrobbling_server.url
self.session_key = None
with self.configured:
self.configured.notify()
else:
try:
justrok.Globals.engine.state_changed.disconnect(self._on_engine_state_changed)
justrok.Globals.playlist.playing_new_track_started.disconnect(self._on_playlist_playing_new_track_started)
self.timer.timeout.disconnect(self._on_timer_timeout)
except TypeError:
# We have not been connected previously. It's OK!
pass
|
StarcoderdataPython
|
8013537
|
<filename>tests/test_smoke.py
import pytest
@pytest.mark.smoke
def test_import():
import dovpanda
|
StarcoderdataPython
|
1690270
|
<filename>output/models/ms_data/complex_type/ct_z006_xsd/__init__.py<gh_stars>1-10
from output.models.ms_data.complex_type.ct_z006_xsd.ct_z006 import Root
__all__ = [
"Root",
]
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.