id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
320836
|
<gh_stars>1-10
"""
NoxRating Main Logic
"""
if __name__ == "__main__":
from noxrating import noxrating
noxrating.main()
|
StarcoderdataPython
|
8047306
|
import os
from unittest import SkipTest
from unittest.mock import Mock, call, patch
from django.conf import settings
from django.test import TestCase
try:
import qgis # noqa
except ImportError:
raise SkipTest("Skipping all QGIS tests because it's not installed.")
from eventkit_cloud.utils.qgis_utils import convert_qgis_gpkg_to_kml
class TestQGIS(TestCase):
@patch("eventkit_cloud.utils.qgis_utils.gdal")
@patch("qgis.core.QgsProject")
@patch("eventkit_cloud.utils.qgis_utils.osgeo")
def test_convert_qgis_gpkg_to_kml(self, mock_osgeo, mock_qgs_project, mock_gdal):
# Setup the mocks and expected values.
qgs_file = "test.qgs"
output_kml_path = "test.kml"
stage_dir = os.path.join(settings.EXPORT_STAGING_ROOT)
mock_layer_land = Mock()
mock_layer_land.filename = os.path.join(stage_dir, "kml", "land_polygons.kml")
mock_layer_land.source.return_value = "layername=land_polygons"
mock_layer_boundary = Mock()
mock_layer_boundary.filename = os.path.join(stage_dir, "kml", "boundary.kml")
mock_layer_boundary.source.return_value = "layername=boundary"
mock_layer_roads = Mock()
mock_layer_roads.filename = os.path.join(stage_dir, "kml", "roads_lines.kml")
mock_layer_roads.source.return_value = "layername=roads_lines"
# Mimic what mapLayers returns which is a list of tuples containing a name and a layer instance.
mock_qgs_project.instance.return_value.mapLayers.return_value.items.return_value = [
("land_polygons", mock_layer_land),
("boundary", mock_layer_boundary),
("roads_lines", mock_layer_roads),
]
# Have to use a context manager because this is imported within the function's scope.
with patch("qgis.core.QgsVectorFileWriter") as mock_qgs_writer:
kml = convert_qgis_gpkg_to_kml(qgs_file, output_kml_path, stage_dir=stage_dir)
# Ensure the QGIS project is created.
mock_qgs_project.instance.assert_called_once()
# Ensure the layers are all written in the order from the mapLayers list.
write_vector_format_calls = [
call(
layer=mock_layer_land,
fileName=mock_layer_land.filename,
fileEncoding="utf-8",
driverName="libkml",
symbologyExport=mock_qgs_writer.SymbolLayerSymbology,
symbologyScale=20000,
),
call(
layer=mock_layer_boundary,
fileName=mock_layer_boundary.filename,
fileEncoding="utf-8",
driverName="libkml",
symbologyExport=mock_qgs_writer.SymbolLayerSymbology,
symbologyScale=20000,
),
call(
layer=mock_layer_roads,
fileName=mock_layer_roads.filename,
fileEncoding="utf-8",
driverName="libkml",
symbologyExport=mock_qgs_writer.SymbolLayerSymbology,
symbologyScale=20000,
),
]
mock_qgs_writer.writeAsVectorFormat.assert_has_calls(write_vector_format_calls)
# Ensure the datasource is created.
mock_osgeo.ogr.GetDriverByName.assert_called_once_with("libkml")
mock_osgeo.ogr.GetDriverByName.return_value.CreateDataSource.assert_called_once_with(output_kml_path)
# Ensure that the layers are all merged in the correct order.
vector_translate_calls = [
call(output_kml_path, mock_layer_roads.filename, accessMode="append"),
call(output_kml_path, mock_layer_land.filename, accessMode="append"),
call(output_kml_path, mock_layer_boundary.filename, accessMode="append"),
]
mock_gdal.VectorTranslate.assert_has_calls(vector_translate_calls)
self.assertEqual(kml, output_kml_path)
|
StarcoderdataPython
|
1657413
|
<gh_stars>0
import secrets, sys, os, getopt, struct
import Crypto
from Crypto.Cipher import AES
from getpass import getpass
from hashlib import sha256
from lib import to_bytes, to_str, hash_password, wipe, prompt_password, get_key
Version = 1
KEY_SIZE = 32
Verbose = False
def encrypt(key, inp_fn, out_fn, remove_input, send_to_stdout):
output = None
close_out = False
if out_fn is None:
if send_to_stdout:
output = sys.stdout.buffer
else:
out_fn = out_fn or inp_fn + ".aes"
if output is None:
close_out = True
output = open(out_fn, "wb")
if Verbose:
print(f"encrypting {inp_fn} -> {out_fn} ...")
iv = secrets.token_bytes(AES.block_size)
cipher = AES.new(key, AES.MODE_CFB, iv)
if inp_fn == "-":
inp = sys.stdin.buffer
else:
inp = open(inp_fn, "rb")
flags = 0
header = struct.pack(">HL", Version, flags)
output.write(header)
output.write(iv)
while True:
buf = inp.read(8*1024)
if not buf:
break
output.write(cipher.encrypt(buf))
if close_out:
output.close()
inp.close()
if remove_input:
wipe(inp_fn)
def decrypt(key, inp_fn, out_fn, remove_input, send_to_stdout):
output = None
close_out = False
if out_fn is None:
if send_to_stdout:
output = sys.stdout.buffer
else:
if not inp_fn.endswith(".aes"):
print("Can not reconstruct original file name. Specify the output file explicitly")
sys.exit(2)
out_fn = inp_fn[:-4]
if output is None:
output = open(out_fn, "wb")
close_out = True
if Verbose:
print(f"decrypting {inp_fn} -> %s ..." % ())
inp = open(inp_fn, "rb")
header_length = 2+4 # version + flags + IV
header = inp.read(header_length)
iv = inp.read(AES.block_size)
if len(header) != header_length or len(iv) != AES.block_size:
raise ValueError(f"Invalid format for encrypted file {inp_fn}")
version, flags = struct.unpack(">HL", header) # ignored
cipher = AES.new(key, AES.MODE_CFB, iv)
while True:
buf = inp.read(8*1024)
if not buf:
break
decrypted = cipher.decrypt(buf)
output.write(decrypted)
if close_out:
output.close()
inp.close()
if remove_input:
wipe(inp_fn)
def decrypt_many(key, inputs, output_dir, overwrite_out, remove_input):
errors = 0
outputs = []
for inp in inputs:
if not os.path.exists(inp):
print("Not found:", inp, file=sys.stderr)
continue
if not os.path.isfile(inp):
print("Not a file:", inp, file=sys.stderr)
continue
inp_dir, _, inp_fn = inp.rpartition("/")
if not inp.endswith(".aes"):
print("Can not reconstruct original file name for encrypted file", inp)
errors += 1
out_fn = inp_fn[:-4] # cut ".aes"
out_dir = output_dir or inp_dir
out = out_dir + "/" + out_fn if out_dir else out_fn
if os.path.isfile(out) and not overwrite_out:
print(f"Plaintext for encrypted file {inp} exists. Use -f to ovverwrite")
errors += 1
outputs.append(out)
if not errors:
for inp, out in zip(inputs, outputs):
decrypt(key, inp, out, remove_input, False)
return True
else:
print("Aborted due to errors")
return False
def encrypt_many(key, inputs, output_dir, overwrite_out, remove_input):
errors = 0
outputs = []
for inp in inputs:
if not os.path.exists(inp):
print("Not found:", inp, file=sys.stderr)
continue
if not os.path.isfile(inp):
print("Not a file:", inp, file=sys.stderr)
continue
inp_dir, _, inp_fn = inp.rpartition("/")
out_fn = inp_fn + ".aes"
out_dir = output_dir or inp_dir
out = out_dir + "/" + out_fn if out_dir else out_fn
if os.path.isfile(out) and not overwrite_out:
print(f"Encrypted file {out} exists. Use -f to ovverwrite")
errors += 1
outputs.append(out)
if not errors:
for inp, out in zip(inputs, outputs):
encrypt(key, inp, out, remove_input, False)
return True
else:
print("Aborted due to errors")
return False
Usage = """
python aes.py (encrypt|decrypt) [options] <input_file> [<output_file>]
python aes.py (encrypt|decrypt) [options] <input_file> ... <output dir>
-w <password> # password will be hashed into a key
-w @<file with one line password>
-k <hex key>
-k @<file with binary or hex key>
-g <output file for key> # generate random key and write to file
-G <output file for key> # generate random key and write to file and override existing key file if present
-f # override output file
-r # securely wipe and remove input file(s)
-c # send output to stdout (single file only)
-v # verbose output
python aes.py wipe <file> ... # securely wipe and remove files
"""
if not sys.argv[1:]:
print(Usage)
sys.exit(2)
cmd, args = sys.argv[1], sys.argv[2:]
if not args:
print(Usage)
sys.exit(2)
opts, args = getopt.getopt(args, "w:k:g:G:fcrv")
opts = dict(opts)
opts = dict(opts)
overwrite = "-f" in opts
remove_input = "-r" in opts
send_to_stdout = "-c" in opts
Verbose = "-v" in opts
if cmd == "wipe":
for path in args:
if Verbose:
print("wiping", path, "...")
wipe(path)
sys.exit(0)
if len(args) == 1 or len(args) == 2 and not os.path.isdir(args[-1]):
inp = args[0]
if len(args) == 1:
out = None
else:
out = args[1]
if out and os.path.isfile(out) and not overwrite:
print(f"Output {out} exists. Use -f")
sys.exit(1)
if cmd == "encrypt":
key = get_key(opts, verify_password=True)
encrypt(key, inp, out, remove_input, send_to_stdout)
elif cmd == "decrypt":
key = get_key(opts, verify_password=False)
decrypt(key, inp, out, remove_input, send_to_stdout)
else:
if os.path.isfile(args[-1]):
out_dir = None
inputs = args
elif os.path.isdir(args[-1]):
out_dir = args[-1]
inputs = args[:-1]
else:
print("Output directory does not exist")
sys.exit(1)
key = get_key(opts)
if cmd == "encrypt":
encrypt_many(key, inputs, out_dir, overwrite, remove_input)
elif cmd == "decrypt":
decrypt_many(key, inputs, out_dir, overwrite, remove_input)
else:
print(Usage)
sys.exit(2)
|
StarcoderdataPython
|
218686
|
#!/usr/bin/python
# Uses google spreadsheets to store data.
# Google writing leverages gspread library. See: https://github.com/burnash/gspread
# But the oauth stuff is not quite right....
# Requires oauth2 authorisation.
# That means we need to create a security key file using google ui.
# This is pointed to below as authfile. Needs to be in working dir.
# Spreadsheet needs to share access to the strange email address in this authfile
# - the service account email address.
import subprocess, StringIO, gspread, time
from oauth2client.service_account import ServiceAccountCredentials
authfile = 'My Project-3aa2e2c55ac0.json'
FILENAME = "test"
class Spreadsheet():
def __init__(self):
print 'Started spreadsheet class.'
self.row = 0
# self.open()
def open(self, filename, mode):
# mode is just for filewrite compatibility and is discarded so far.
self.filename = filename
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name(authfile, scope)
gc = gspread.authorize(credentials)
wks = gc.open(filename)
self.sheet = wks.get_worksheet(0)
def header(self):
val = []
val.append('Time')
val.append('Ping(ms)')
val.append('Download(Mb/s)')
val.append('Upload(Mb/s)')
return(val)
def write(self, val):
self.open(self.filename, "w") # otherwise the authorisation times out
self.row += 1
col = 0
for nextval in val:
col += 1
self.sheet.update_cell(self.row, col, nextval)
return(0)
if __name__ == '__main__':
print 'Google Spreadsheet test.'
s = Spreadsheet()
s.open(FILENAME, "w")
val = []
val.append(time.strftime('%d %b')) # date in top row
s.write(val)
val = []
val = s.header()
s.write(val)
print 'Written to spreadsheet - test.'
|
StarcoderdataPython
|
388580
|
<filename>mobiletrans/settings/__init__.py<gh_stars>1-10
from mobiletrans.settings.main import *
|
StarcoderdataPython
|
3209253
|
<gh_stars>0
# File: ciscoumbrella_consts.py
#
# Copyright (c) 2021-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
CISCOUMB_JSON_DOMAIN = "domain"
CISCOUMB_JSON_CUSTKEY = "customer_key"
CISCOUMB_JSON_PAGE_INDEX = "page_index"
CISCOUMB_JSON_DOMAIN_LIMIT = "limit"
CISCOUMB_JSON_TOTAL_DOMAINS = "total_domains"
CISCOUMB_JSON_DISABLE_SAFEGUARDS = "disable_safeguards"
CISCOUMB_LIST_UPDATED_WITH_GUID = "REST API returned success with id: {id}"
CISCOUMB_ERR_CONNECTIVITY_TEST = "Test Connectivity Failed"
CISCOUMB_SUCC_CONNECTIVITY_TEST = "Test Connectivity Passed"
CISCOUMB_ERR_SERVER_CONNECTION = "Connection failed"
CISCOUMB_ERR_FROM_SERVER = "API failed, Status code: {status}, Message: {message}"
CISCOUMB_MSG_GET_DOMAIN_LIST_TEST = "Querying a single domain entry to check credentials"
CISCOUMB_USING_BASE_URL = "Using url: {base_url}"
CISCOUMB_REST_API_URL = "https://s-platform.api.opendns.com"
CISCOUMP_REST_API_VER = '1.0'
CISCOUMB_DEFAULT_PAGE_INDEX = 1
CISCOUMB_DEFAULT_DOMAIN_LIMIT = 200
|
StarcoderdataPython
|
5063529
|
import sys
import unittest
from flask import Flask
from test_base import *
if sys.version_info > (2, 6):
from test_decorator_registration import decorator_registration
setattr(TestCaseContextIndependent, 'test_decorator_registration',
decorator_registration)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestCaseContextIndependent))
suite.addTest(unittest.makeSuite(TestCaseInitAppWithRequestContext))
suite.addTest(unittest.makeSuite(TestCaseWithRequestContext))
suite.addTest(unittest.makeSuite(TestCaseWithRequestContextAuth))
suite.addTest(unittest.makeSuite(TestCaseMultipleAppsWithRequestContext))
if hasattr(Flask, "app_context"):
suite.addTest(unittest.makeSuite(TestCaseInitAppWithAppContext))
suite.addTest(unittest.makeSuite(TestCaseWithAppContext))
suite.addTest(unittest.makeSuite(TestCaseWithAppContextAuth))
suite.addTest(unittest.makeSuite(TestCaseMultipleAppsWithAppContext))
return suite
|
StarcoderdataPython
|
5135232
|
"""
"""
from .width import wcwidth, wcswidth # noqa
__all__ = ('wcwidth', 'wcswidth',)
|
StarcoderdataPython
|
12853503
|
<filename>backtoshops/notifs/views.py
# -*- coding: utf-8 -*-
#############################################################################
#
# Copyright © <NAME>
# contact: <EMAIL>
#
# This software is a collection of webservices designed to provide a secure
# and scalable framework to build e-commerce websites.
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL-B
# license as circulated by CEA, CNRS and INRIA at the following URL
# " http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
#
#############################################################################
import settings
import json
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.generic import View, ListView
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.views.generic.base import TemplateResponseMixin
from sorl.thumbnail import get_thumbnail
from fouillis.views import AdminLoginRequiredMixin
from notifs.forms import NotifForm
from notifs.models import Notif, NotifTemplateImage
class NotifListView(AdminLoginRequiredMixin, ListView):
template_name = "notif_list.html"
model = Notif
form_class = NotifForm
paginate_by = settings.DEFAULT_PAGE_SIZE
def get_queryset(self):
queryset = self.model.objects.filter(
mother_brand=self.request.user.get_profile().work_for)
if getattr(self, 'search', None):
queryset = queryset.filter(name__icontains=self.search)
return queryset
def post(self, request, *args, **kwargs):
self.search = self.request.POST.get('search')
return self.get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(NotifListView, self).get_context_data(**kwargs)
context.update({
'search': getattr(self, 'search', None) or '',
})
return context
class NewNotifView(AdminLoginRequiredMixin, CreateView):
model = Notif
form_class = NotifForm
template_name = "notif.html"
def post(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
notif = form.save(commit=True)
pp_pks = [int(pp['pk']) for pp in form.images.cleaned_data
if not pp['DELETE']]
notif.images = NotifTemplateImage.objects.filter(pk__in=pp_pks)
notif.save()
return self.form_valid(form)
return self.form_invalid(form)
def get_form_kwargs(self):
kwargs = super(NewNotifView, self).get_form_kwargs()
kwargs.update({'request': self.request})
return kwargs
def get_success_url(self):
return reverse('notif_list')
class EditNotifView(AdminLoginRequiredMixin, UpdateView):
model = Notif
form_class = NotifForm
template_name = "notif.html"
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(EditNotifView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
notif = form.save(commit=True)
pp_pks = [int(pp['pk']) for pp in form.images.cleaned_data
if not pp['DELETE']]
notif.images = NotifTemplateImage.objects.filter(pk__in=pp_pks)
notif.save()
return self.form_valid(form)
return self.form_invalid(form)
def get_success_url(self):
pk = self.kwargs.get('pk', None)
return reverse('edit_notif', args=[pk])
def get_form_kwargs(self):
kwargs = super(EditNotifView, self).get_form_kwargs()
kwargs.update({'request': self.request})
return kwargs
def get_context_data(self, **kwargs):
context = super(EditNotifView, self).get_context_data(**kwargs)
context['pk'] = self.kwargs.get('pk', None)
return context
class PreviewTemplateContentView(AdminLoginRequiredMixin, CreateView):
model = Notif
form_class = NotifForm
template_name = "template_editor.html"
def get_form_kwargs(self):
kwargs = super(CreateView, self).get_form_kwargs()
initial = {}
images = []
for _img in NotifTemplateImage.objects.all():
images.append({
'pk': _img.pk,
'url': _img.image.url,
'thumb_url': get_thumbnail(_img.image, '40x43').url,
})
initial.update({'images': images})
kwargs.update({'initial': initial})
return kwargs
class DeleteNotifView(AdminLoginRequiredMixin, DeleteView):
model = Notif
form_class = NotifForm
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
return HttpResponse(content=json.dumps({'pk': self.kwargs.get('pk', None)}),
mimetype="application/json")
class UploadImageView(TemplateResponseMixin, View):
template_name = ""
def post(self, request, *args, **kwargs):
if request.FILES:
new_img = request.FILES[u'files[]']
if new_img.size > settings.SALE_IMG_UPLOAD_MAX_SIZE:
content = {'status': 'max_limit_error'}
return HttpResponse(json.dumps(content), mimetype='application/json')
new_media = NotifTemplateImage(image=request.FILES[u'files[]'])
new_media.save()
thumb = get_thumbnail(new_media.image, '40x43')
to_ret = {
'status': 'ok',
'pk': new_media.pk,
'url': new_media.image.url,
'thumb_url': thumb.url,
}
return HttpResponse(json.dumps(to_ret), mimetype="application/json")
raise HttpResponseBadRequest(_("Please upload a picture."))
|
StarcoderdataPython
|
125648
|
<reponame>graykode/nlpblock
from nlpblock.layer import RNN
from nlpblock.layer.GRU import GRU
from nlpblock.layer.LSTM import LSTM
from nlpblock.layer.Attention import Attention
from nlpblock.layer.AttentionOne import AttentionOne
from nlpblock.layer.AttentionTwo import AttentionTwo
from nlpblock.layer.AttentionTwo import AttentionTwo
from nlpblock.layer.SelfAttnEncoder import SelfAttnEncoder
from nlpblock.layer.SelfAttnEncoderLayer import SelfAttnEncoderLayer
from nlpblock.layer.PosEncoding import PosEncoding
__all__ = [
'RNN', 'LSTM', 'GRU',
'Attention',
'AttentionOne',
'AttentionTwo',
'SelfAttnEncoder',
'SelfAttnEncoderLayer',
'PosEncoding',
]
|
StarcoderdataPython
|
1892927
|
<gh_stars>0
# coding: utf-8
# Temp1, Temp2 Temp3 e Temp4 são temperaturas medidas em diferentes partes da planta
# Target representa o estado da qualidade da amostra (temp1, temp2, temp3 e temp4)
# In[20]:
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
#p1_data_test_df = pd.read_csv('p1_data_test.csv',header=0)
df = pd.read_csv('../p1_data_train.csv',header=0)
print len(df)
pct = int(len(df)*0.5)
print pct
new_df = df[df.index > pct]
new_df
# In[2]:
def get_outliers_index(df, columns, gama = 1.5):
index_to_drop = []
for column in columns:
q2 = df[column].median()
q3 = df[df[column] > q2][column].median()
q1 = df[df[column] < q2][column].median()
IQR = q3 - q1
index_to_drop += list(df[(df[column] > q3 + gama*IQR) | (df[column] < q1 - gama*IQR)][column].index.values)
return list(np.unique(index_to_drop))
# In[4]:
df.head()
index_to_drop = get_outliers_index(df,['Temp1','Temp2','Temp3','Temp4'])
print df.shape
print len(index_to_drop)
print index_to_drop
df = df.drop(df.index[index_to_drop])
print df.shape
# In[ ]:
data = {'name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'],
'year': [2012, 2012, 2013, 2014, 2014],
'reports': [4, 24, 31, 2, 3]}
df = pd.DataFrame(data, index = ['Cochice', 'Pima', 'Santa Cruz', 'Maricopa', 'Yuma'])
df
df.drop(df.index[[0,1,2]])
# In[192]:
|
StarcoderdataPython
|
3201561
|
<gh_stars>1000+
"""Constants for the Garages Amsterdam integration."""
DOMAIN = "garages_amsterdam"
ATTRIBUTION = f'{"Data provided by municipality of Amsterdam"}'
|
StarcoderdataPython
|
5062982
|
'''
存在重复元素
给定一个整数数组,判断是否存在重复元素。
如果存在一值在数组中出现至少两次,函数返回 true 。如果数组中每个元素都不相同,则返回 false 。
'''
from typing import List
'''
思路:哈希表
如果重复的元素出现,返回True
'''
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
allset = set()
for n in nums:
if n in allset:
return True
allset.add(n)
return False
s = Solution()
print(s.containsDuplicate([1, 2, 3, 1]))
print(s.containsDuplicate([1, 2, 3, 4]))
print(s.containsDuplicate([1, 1, 1, 3, 3, 4, 3, 2, 4, 2]))
|
StarcoderdataPython
|
4961127
|
#
# -------------------------------------------------------------------------
# Copyright (c) 2015-2017 AT&T Intellectual Property
# Copyright (C) 2020 Wipro Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
import copy
import json
import mock
import unittest
from unittest.mock import patch
from oslo_config import cfg
import conductor.data.plugins.inventory_provider.aai as aai
from conductor.data.plugins.inventory_provider.aai import AAI
from conductor.data.plugins.inventory_provider.sdc import SDC
from conductor.data.plugins.inventory_provider.hpa_utils import match_hpa
from conductor.data.plugins.triage_translator.triage_translator import TraigeTranslator
class TestAAI(unittest.TestCase):
def setUp(self):
cfg.CONF.set_override('password', '<PASSWORD>', 'aai')
CONF = cfg.CONF
CONF.register_opts(aai.AAI_OPTS, group='aai')
self.conf = CONF
self.aai_ep = AAI()
def tearDown(self):
mock.patch.stopall()
def test_get_version_from_string(self):
self.assertEqual("2.5", self.aai_ep._get_version_from_string("AAI2.5"))
self.assertEqual("3.0", self.aai_ep._get_version_from_string("AAI3.0"))
def test_aai_versioned_path(self):
self.assertEqual('/{}/cloud-infrastructure/cloud-regions/?depth=0'.format(self.conf.aai.server_url_version),
self.aai_ep._aai_versioned_path("/cloud-infrastructure/cloud-regions/?depth=0"))
self.assertEqual('/{}/query?format=id'.format(self.conf.aai.server_url_version),
self.aai_ep._aai_versioned_path("/query?format=id"))
def test_resolve_clli_location(self):
req_json_file = './conductor/tests/unit/data/plugins/inventory_provider/_request_clli_location.json'
req_json = json.loads(open(req_json_file).read())
response = mock.MagicMock()
response.status_code = 200
response.ok = True
response.json.return_value = req_json
self.mock_get_request = mock.patch.object(AAI, '_request', return_value=response)
self.mock_get_request.start()
self.assertEqual({'country': u'USA', 'latitude': u'40.39596', 'longitude': u'-74.135342'} ,
self.aai_ep.resolve_clli_location("clli_code"))
def test_get_inventory_group_pair(self):
req_json_file = './conductor/tests/unit/data/plugins/inventory_provider/_request_inventory_group_pair.json'
req_json = json.loads(open(req_json_file).read())
response = mock.MagicMock()
response.status_code = 200
response.ok = True
response.json.return_value = req_json
self.mock_get_request = mock.patch.object(AAI, '_request', return_value=response)
self.mock_get_request.start()
self.assertEqual([[u'instance-1', u'instance-2']] ,
self.aai_ep.get_inventory_group_pairs("service_description"))
def test_resolve_host_location(self):
req_json_file = './conductor/tests/unit/data/plugins/inventory_provider/_request_host_name.json'
req_json = json.loads(open(req_json_file).read())
req_response = mock.MagicMock()
req_response.status_code = 200
req_response.ok = True
req_response.json.return_value = req_json
complex_json_file = './conductor/tests/unit/data/plugins/inventory_provider/_get_complex.json'
complex_json = json.loads(open(complex_json_file).read())
self.mock_get_request = mock.patch.object(AAI, '_request', return_value=req_response)
self.mock_get_request.start()
self.mock_get_complex = mock.patch.object(AAI, '_get_complex', return_value=complex_json)
self.mock_get_complex.start()
self.assertEqual({'country': u'USA', 'latitude': u'28.543251', 'longitude': u'-81.377112'} ,
self.aai_ep.resolve_host_location("host_name"))
def test_resolve_demands_inventory_type_cloud(self):
self.aai_ep.conf.HPA_enabled = True
TraigeTranslator.getPlanIdNAme = mock.MagicMock(return_value=None)
TraigeTranslator.addDemandsTriageTranslator = mock.MagicMock(return_value=None)
plan_info = {
'plan_name': 'name',
'plan_id': 'id'
}
triage_translator_data = None
demands_list_file = './conductor/tests/unit/data/plugins/inventory_provider/demand_list.json'
demands_list = json.loads(open(demands_list_file).read())
generic_vnf_list_file = './conductor/tests/unit/data/plugins/inventory_provider/generic_vnf_list.json'
generic_vnf_list = json.loads(open(generic_vnf_list_file).read())
regions_response_file = './conductor/tests/unit/data/plugins/inventory_provider/regions.json'
regions_response = json.loads(open(regions_response_file).read())
demand_service_response_file = './conductor/tests/unit/data/plugins/inventory_provider/resolve_demand_service_response.json'
demand_service_response = json.loads(open(demand_service_response_file).read())
complex_json_file = './conductor/tests/unit/data/plugins/inventory_provider/_get_complex.json'
complex_json = json.loads(open(complex_json_file).read())
req_response = mock.MagicMock()
req_response.status_code = 200
req_response.ok = True
req_response.json.return_value = demand_service_response
self.mock_first_level_service_call = mock.patch.object(AAI, 'first_level_service_call', return_value=generic_vnf_list)
self.mock_first_level_service_call.start()
self.mock_get_regions = mock.patch.object(AAI, '_get_regions', return_value=regions_response)
self.mock_get_regions.start()
regions_list = list()
regions_list.append(regions_response.get('region-name'))
self.mock_resolve_cloud_regions_by_cloud_region_id = mock.patch.object(AAI,
'resolve_cloud_regions_by_cloud_region_id',
return_value=regions_list)
self.mock_resolve_cloud_regions_by_cloud_region_id.start()
self.mock_resolve_v_server_for_candidate = mock.patch.object(AAI, 'resolve_v_server_for_candidate',
return_value=demand_service_response)
self.mock_resolve_v_server_for_candidate.start()
complex_link = {"link": "/aai/v10/complex-id", "d_value": 'test-id'}
self.mock_resolve_complex_info_link_for_v_server = mock.patch.object(AAI,
'resolve_complex_info_link_for_v_server',
return_value=complex_link)
self.mock_resolve_complex_info_link_for_v_server.start()
self.mock_get_complex = mock.patch.object(AAI, '_get_complex', return_value=complex_json)
self.mock_get_complex.start()
flavor_info = regions_response["region-name"]["flavors"]
self.maxDiff = None
self.assertCountEqual({u'demand_name': [
{'candidate_id': u'service-instance-id', 'city': None,
'cloud_owner': u'cloud-owner',
'uniqueness': 'true',
'vim-id': u'cloud-owner_cloud-region-id',
'vlan_key': None, 'cloud_region_version': '', 'complex_name': None, 'cost': 1.0,
'country': u'USA', 'existing_placement': 'false',
'host_id': u'vnf-name', 'inventory_provider': 'aai',
'inventory_type': 'service', 'latitude': u'28.543251',
'location_id': u'cloud-region-id', 'location_type': 'att_aic',
'longitude': u'-81.377112', 'physical_location_id': u'test-id',
'port_key': None,
'region': u'SE', 'service_resource_id': '',
'sriov_automation': 'false', 'state': None},
{'candidate_id': u'region-name', 'city': u'Middletown',
'cloud_owner': u'cloud-owner',
'uniqueness': 'true',
'vim-id': u'cloud-owner_region-name',
'cloud_region_version': u'1.0', 'complex_name': u'complex-name',
'cost': 2.0, 'country': u'USA', 'existing_placement': 'false',
'inventory_provider': 'aai', 'inventory_type': 'cloud',
'latitude': u'50.34', 'location_id': u'region-name',
'location_type': 'att_aic', 'longitude': u'30.12',
'physical_location_id': u'complex-id',
'region': u'USA', 'service_resource_id': u'service-resource-id-123',
'sriov_automation': 'false', 'state': u'NJ',
'flavors': flavor_info}]},
self.aai_ep.resolve_demands(demands_list, plan_info=plan_info,
triage_translator_data=triage_translator_data))
def test_resolve_demands_inventory_type_service(self):
self.aai_ep.conf.HPA_enabled = True
TraigeTranslator.getPlanIdNAme = mock.MagicMock(return_value=None)
TraigeTranslator.addDemandsTriageTranslator = mock.MagicMock(return_value=None)
plan_info = {
'plan_name': 'name',
'plan_id': 'id'
}
triage_translator_data = None
demands_list_file = './conductor/tests/unit/data/plugins/inventory_provider/service_demand_list.json'
demands_list = json.loads(open(demands_list_file).read())
generic_vnf_list_file = './conductor/tests/unit/data/plugins/inventory_provider/vfmodule_service_generic_vnf_list.json'
generic_vnf_list = json.loads(open(generic_vnf_list_file).read())
v_server_file = './conductor/tests/unit/data/plugins/inventory_provider/vfmodule_vserver.json'
v_server = json.loads(open(v_server_file).read())
demand_service_response_file = './conductor/tests/unit/data/plugins/inventory_provider/resolve_demand_service_response.json'
demand_service_response = json.loads(open(demand_service_response_file).read())
complex_file = './conductor/tests/unit/data/plugins/inventory_provider/vfmodule_complex.json'
complex_response = json.loads(open(complex_file).read())
region_response_file = './conductor/tests/unit/data/plugins/inventory_provider/vfmodule_region.json'
region_response = json.loads(open(region_response_file).read())
results_file = './conductor/tests/unit/data/plugins/inventory_provider/service_candidates.json'
results_json = json.loads(open(results_file).read())
req_response = mock.MagicMock()
req_response.status_code = 200
req_response.ok = True
req_response.json.return_value = demand_service_response
def mock_first_level_service_call_response(path, name, service_type):
if "equipment-role" in path:
return list()
else:
return generic_vnf_list
self.mock_first_level_service_call = mock.patch.object(AAI, 'first_level_service_call',
side_effect=mock_first_level_service_call_response)
self.mock_first_level_service_call.start()
regions = list()
regions.append(region_response)
self.mock_resolve_cloud_regions_by_cloud_region_id = mock.patch.object(AAI,
'resolve_cloud_regions_by_cloud_region_id',
return_value=regions)
self.mock_resolve_cloud_regions_by_cloud_region_id.start()
self.mock_resolve_v_server_for_candidate = mock.patch.object(AAI, 'resolve_v_server_for_candidate',
return_value=v_server)
self.mock_resolve_v_server_for_candidate.start()
complex_link = {"link": "/aai/v14/cloud-infrastructure/complexes/complex/clli1", "d_value": 'clli1'}
self.mock_resolve_complex_info_link_for_v_server = mock.patch.object(AAI,
'resolve_complex_info_link_for_v_server',
return_value=complex_link)
self.mock_resolve_complex_info_link_for_v_server.start()
self.mock_get_complex = mock.patch.object(AAI, '_get_complex', return_value=complex_response)
self.mock_get_complex.start()
self.maxDiff = None
self.assertEqual(results_json, self.aai_ep.resolve_demands(demands_list, plan_info=plan_info,
triage_translator_data=triage_translator_data))
def test_resolve_demands_inventory_type_vfmodule(self):
self.aai_ep.conf.HPA_enabled = True
TraigeTranslator.getPlanIdNAme = mock.MagicMock(return_value=None)
TraigeTranslator.addDemandsTriageTranslator = mock.MagicMock(return_value=None)
plan_info = {
'plan_name': 'name',
'plan_id': 'id'
}
triage_translator_data = None
demands_list_file = './conductor/tests/unit/data/plugins/inventory_provider/vfmodule_demand_list.json'
demands_list = json.loads(open(demands_list_file).read())
generic_vnf_list_file = './conductor/tests/unit/data/plugins/inventory_provider/vfmodule_service_generic_vnf_list.json'
generic_vnf_list = json.loads(open(generic_vnf_list_file).read())
vfmodules_list_file = './conductor/tests/unit/data/plugins/inventory_provider/vfmodule_list.json'
vfmodules_list = json.loads(open(vfmodules_list_file).read())
v_server_file = './conductor/tests/unit/data/plugins/inventory_provider/vfmodule_vserver.json'
v_server = json.loads(open(v_server_file).read())
demand_service_response_file = './conductor/tests/unit/data/plugins/inventory_provider/resolve_demand_service_response.json'
demand_service_response = json.loads(open(demand_service_response_file).read())
complex_file = './conductor/tests/unit/data/plugins/inventory_provider/vfmodule_complex.json'
complex_response = json.loads(open(complex_file).read())
region_response_file = './conductor/tests/unit/data/plugins/inventory_provider/vfmodule_region.json'
region_response = json.loads(open(region_response_file).read())
results_file = './conductor/tests/unit/data/plugins/inventory_provider/vfmodule_candidates.json'
results_json = json.loads(open(results_file).read())
req_response = mock.MagicMock()
req_response.status_code = 200
req_response.ok = True
req_response.json.return_value = demand_service_response
def mock_first_level_service_call_response(path, name, service_type):
if "equipment-role" in path:
return list()
else:
return generic_vnf_list
self.mock_first_level_service_call = mock.patch.object(AAI, 'first_level_service_call',
side_effect=mock_first_level_service_call_response)
self.mock_first_level_service_call.start()
self.mock_resolve_vf_modules_for_generic_vnf = mock.patch.object(AAI, 'resolve_vf_modules_for_generic_vnf',
return_value=vfmodules_list)
self.mock_resolve_vf_modules_for_generic_vnf.start()
regions = list()
regions.append(region_response)
self.mock_resolve_cloud_regions_by_cloud_region_id = mock.patch.object(AAI,
'resolve_cloud_regions_by_cloud_region_id',
return_value=regions)
self.mock_resolve_cloud_regions_by_cloud_region_id.start()
self.mock_resolve_v_server_for_candidate = mock.patch.object(AAI, 'resolve_v_server_for_candidate',
return_value=v_server)
self.mock_resolve_v_server_for_candidate.start()
complex_link = {"link": "/aai/v14/cloud-infrastructure/complexes/complex/clli1", "d_value": 'clli1'}
self.mock_resolve_complex_info_link_for_v_server = mock.patch.object(AAI, 'resolve_complex_info_link_for_v_server',
return_value=complex_link)
self.mock_resolve_complex_info_link_for_v_server.start()
self.mock_get_complex = mock.patch.object(AAI, '_get_complex', return_value=complex_response)
self.mock_get_complex.start()
self.maxDiff = None
self.assertEqual(results_json, self.aai_ep.resolve_demands(demands_list, plan_info=plan_info,
triage_translator_data=triage_translator_data))
def test_get_complex(self):
complex_json_file = './conductor/tests/unit/data/plugins/inventory_provider/_request_get_complex.json'
complex_json = json.loads(open(complex_json_file).read())
response = mock.MagicMock()
response.status_code = 200
response.ok = True
response.json.return_value = complex_json
self.mock_get_request = mock.patch.object(AAI, '_request', return_value=response)
self.mock_get_request.start()
self.assertEqual({u'city': u'Middletown', u'latitude': u'28.543251', u'longitude': u'-81.377112', u'country': u'USA', u'region': u'SE'},
self.aai_ep._get_complex("/v10/complex/complex_id", "complex_id"))
def test_check_network_roles(self):
network_role_json_file = './conductor/tests/unit/data/plugins/inventory_provider/_request_network_role.json'
network_role_json = json.loads(open(network_role_json_file).read())
response = mock.MagicMock()
response.status_code = 200
response.ok = True
response.json.return_value = network_role_json
self.mock_get_request = mock.patch.object(AAI, '_request', return_value=response)
self.mock_get_request.start()
self.assertEqual(set(['test-cloud-value']) ,
self.aai_ep.check_network_roles("network_role_id"))
def test_check_candidate_role(self):
candidate_role_json_file = './conductor/tests/unit/data/plugins/inventory_provider/_request_candidate_role.json'
candidate_role_json = json.loads(open(candidate_role_json_file).read())
response = mock.MagicMock()
response.status_code = 200
response.ok = True
response.json.return_value = candidate_role_json
self.mock_get_request = mock.patch.object(AAI, '_request', return_value=response)
self.mock_get_request.start()
self.assertEqual("test-role",
self.aai_ep.check_candidate_role("candidate_host_id"))
def test_match_inventory_attributes(self):
template_attributes = dict()
template_attributes['attr-1'] = ['attr-1-value1', 'attr-1-value2']
inventory_attributes = dict()
inventory_attributes['attr-1'] = 'attr-1-value1'
self.assertEqual(True,
self.aai_ep.match_inventory_attributes(template_attributes, inventory_attributes, "candidate-id"))
template_attributes['attr-1'] = {
'not': ['attr-1-value2']
}
self.assertEqual(True,
self.aai_ep.match_inventory_attributes(template_attributes, inventory_attributes,
"candidate-id"))
template_attributes['attr-1'] = {
'not': ['attr-1-value1']
}
self.assertEqual(False,
self.aai_ep.match_inventory_attributes(template_attributes, inventory_attributes,
"candidate-id"))
def test_refresh_cache(self):
regions_response_file = './conductor/tests/unit/data/plugins/inventory_provider/cache_regions.json'
regions_response = json.loads(open(regions_response_file).read())
complex_json_file = './conductor/tests/unit/data/plugins/inventory_provider/_cached_complex.json'
complex_json = json.loads(open(complex_json_file).read())
flavors_json_file = './conductor/tests/unit/data/plugins/inventory_provider/_request_get_flavors.json'
flavors_json = json.loads(open(flavors_json_file).read())
response = mock.MagicMock()
response.status_code = 200
response.ok = True
response.json.return_value = regions_response
self.mock_get_regions = mock.patch.object(AAI, '_request', return_value=response)
self.mock_get_regions.start()
self.mock_get_complex = mock.patch.object(AAI, '_get_complex', return_value=complex_json)
self.mock_get_complex.start()
self.mock_get_flavors = mock.patch.object(AAI, '_get_flavors',
return_value=flavors_json)
self.mock_get_flavors.start()
self.assertEqual(None,
self.aai_ep._refresh_cache())
def test_get_aai_rel_link(self):
relatonship_response_file = './conductor/tests/unit/data/plugins/inventory_provider/relationship_list.json'
relatonship_response = json.loads(open(relatonship_response_file).read())
related_to = "service-instance"
self.assertEqual("relationship-link",
self.aai_ep._get_aai_rel_link(relatonship_response, related_to))
def test_get_flavor(self):
flavors_json_file = './conductor/tests/unit/data/plugins/inventory_provider/_request_get_flavors.json'
flavors_json = json.loads(open(flavors_json_file).read())
response = mock.MagicMock()
response.json.return_value = None
self.mock_get_request = mock.patch.object(AAI, '_request',
return_value=response)
self.mock_get_request.start()
flavors_info = self.aai_ep._get_flavors("mock-cloud-owner",
"mock-cloud-region-id")
self.assertEqual(None, flavors_info)
response.status_code = 200
response.ok = True
response.json.return_value = flavors_json
flavors_info = self.aai_ep._get_flavors("mock-cloud-owner",
"mock-cloud-region-id")
self.assertEqual(2, len(flavors_info['flavor']))
def test_resolve_complex_info_link_for_v_server(self):
TraigeTranslator.collectDroppedCandiate = mock.MagicMock(return_value=None)
triage_translator_data = None
demand_name = 'vPGN'
service_type = 'vFW'
cloud_owner = 'CloudOwner'
cloud_region_id = 'RegionOne'
v_server_file = './conductor/tests/unit/data/plugins/inventory_provider/vfmodule_vserver.json'
v_server = json.loads(open(v_server_file).read())
region_response_file = './conductor/tests/unit/data/plugins/inventory_provider/vfmodule_region.json'
region_response = json.loads(open(region_response_file).read())
candidate_id = 'some_id'
location_id = 'some_location_id'
inventory_type = 'service'
response = mock.MagicMock()
response.status_code = 200
response.ok = True
response.json.return_value = region_response
self.mock_get_request = mock.patch.object(AAI, '_request', return_value=response)
self.mock_get_request.start()
link_rl_data = self.aai_ep.resolve_complex_info_link_for_v_server(candidate_id, v_server, None,
cloud_region_id, service_type,
demand_name, triage_translator_data)
self.assertEqual(None, link_rl_data)
complex_link = {"link": "/aai/v14/cloud-infrastructure/complexes/complex/clli1", "d_value": 'clli1'}
link_rl_data = self.aai_ep.resolve_complex_info_link_for_v_server(candidate_id, v_server, cloud_owner,
cloud_region_id, service_type,
demand_name, triage_translator_data)
self.assertEqual(complex_link, link_rl_data)
def test_build_complex_info_for_candidate(self):
TraigeTranslator.collectDroppedCandiate = mock.MagicMock(return_value=None)
triage_translator_data = None
demand_name = 'vPGN'
service_type = 'vFW'
complex_file = './conductor/tests/unit/data/plugins/inventory_provider/vfmodule_complex.json'
complex_response = json.loads(open(complex_file).read())
candidate = dict()
candidate['candidate_id'] = 'some_id'
candidate['location_id'] = 'some_location_id'
candidate['inventory_type'] = 'service'
initial_candidate = copy.deepcopy(candidate)
complex_list_empty = dict()
complex_list = list()
complex_list.append({"link": "/aai/v14/cloud-infrastructure/complexes/complex/clli1", "d_value": 'clli1'})
complex_list.append({"link": "/aai/v14/cloud-infrastructure/complexes/complex/clli2", "d_value": 'clli2'})
self.mock_get_complex = mock.patch.object(AAI, '_get_complex', return_value=complex_response)
self.mock_get_complex.start()
self.aai_ep.build_complex_info_for_candidate(candidate['candidate_id'], candidate['location_id'], None, complex_list_empty, candidate['inventory_type'], demand_name,
triage_translator_data)
self.assertEqual(initial_candidate, candidate)
self.assertEqual(1, TraigeTranslator.collectDroppedCandiate.call_count)
self.aai_ep.build_complex_info_for_candidate(candidate['candidate_id'], candidate['location_id'], None, complex_list, candidate['inventory_type'], demand_name,
triage_translator_data)
self.assertEqual(initial_candidate, candidate)
self.assertEqual(2, TraigeTranslator.collectDroppedCandiate.call_count)
complex_list.pop()
self.aai_ep.build_complex_info_for_candidate(candidate['candidate_id'], candidate['location_id'], None, complex_list, candidate['inventory_type'], demand_name,
triage_translator_data)
self.assertEqual(self.aai_ep.build_complex_info_for_candidate(candidate['candidate_id'], candidate['location_id'], None, complex_list, candidate['inventory_type'], demand_name,
triage_translator_data), {'city': u'example-city-val-27150', 'country': u'example-country-val-94173',
'region': u'example-region-val-13893', 'longitude': u'32.89948', 'state': u'example-state-val-59487',
'physical_location_id': 'clli1', 'latitude': u'example-latitude-val-89101',
'complex_name': u'clli1'})
self.assertEqual(2, TraigeTranslator.collectDroppedCandiate.call_count)
def test_resolve_vnf_parameters(self):
TraigeTranslator.collectDroppedCandiate = mock.MagicMock(return_value=None)
triage_translator_data = None
demand_name = 'vPGN'
service_type = 'vFW'
candidate = dict()
candidate_id = 'some_id'
location_id = 'some_location_id'
candidate['inventory_type'] = 'service'
generic_vnf_list_file = './conductor/tests/unit/data/plugins/inventory_provider/vfmodule_service_generic_vnf_list.json'
good_vnf = json.loads(open(generic_vnf_list_file).read())[0]
bad_generic_vnf_list_file = './conductor/tests/unit/data/plugins/inventory_provider/bad_generic_vnf_list.json'
bad_vnf = json.loads(open(bad_generic_vnf_list_file).read())[0]
region_response_file = './conductor/tests/unit/data/plugins/inventory_provider/vfmodule_region.json'
region_response = json.loads(open(region_response_file).read())
regions = list()
regions.append(region_response)
self.mock_get_regions = mock.patch.object(AAI, 'resolve_cloud_regions_by_cloud_region_id',
return_value=regions)
self.mock_get_regions.start()
good_cloud_info = self.aai_ep.resolve_cloud_for_vnf(candidate_id, location_id, good_vnf, service_type,
demand_name, triage_translator_data)
bad_cloud_info = self.aai_ep.resolve_cloud_for_vnf(candidate_id, location_id, bad_vnf, service_type,
demand_name, triage_translator_data)
self.assertEqual("CloudOwner", good_cloud_info['cloud_owner'])
self.assertEqual("RegionOne", good_cloud_info['location_id'])
self.assertEqual("1", good_cloud_info['cloud_region_version'])
self.assertIsNone(bad_cloud_info)
v_server_links = list()
v_server_links.append("/aai/v14/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/\
tenant/3c6c471ada7747fe8ff7f28e100b61e8/vservers/vserver/00bddefc-126e-4e4f-a18d-99b94d8d9a30")
v_server_links.append("/aai/v14/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner2/RegionOne2/tenants/\
tenant/3c6c471ada7747fe8ff7f28e100b61e8/vservers/vserver/00bddefc-126e-4e4f-a18d-99b94d8d9a31")
self.assertEqual(v_server_links, self.aai_ep.resolve_v_server_links_for_vnf(bad_vnf))
customer_id = 'Demonstration'
self.assertEqual(customer_id,
self.aai_ep.resolve_global_customer_id_for_vnf(candidate_id, location_id, good_vnf, customer_id, service_type,
demand_name,
triage_translator_data).get('d_value'))
self.assertEqual("3e8d118c-10ca-4b4b-b3db-089b5e9e6a1c",
self.aai_ep.resolve_service_instance_id_for_vnf(candidate_id, location_id, good_vnf, customer_id, service_type,
demand_name,
triage_translator_data).get('d_value'))
self.assertIsNone(self.aai_ep.resolve_service_instance_id_for_vnf(candidate_id, location_id, bad_vnf, customer_id, service_type,
demand_name, triage_translator_data))
def test_add_passthrough_parameters(self):
triage_translator_data = None
candidate = dict()
candidate['candidate_id'] = 'some_id'
candidate['location_id'] = 'some_location_id'
candidate['inventory_type'] = 'service'
parameters = dict()
parameters['param_one'] = "value"
parameters['param_two'] = "value"
candidate_info = copy.deepcopy(candidate)
candidate_info['passthrough_attributes'] = dict()
candidate_info['passthrough_attributes']['param_one'] = "value"
candidate_info['passthrough_attributes']['param_two'] = "value"
self.aai_ep.add_passthrough_attributes(candidate, parameters, 'demand')
self.assertDictEqual(candidate, candidate_info)
def test_match_candidate_by_list(self):
TraigeTranslator.collectDroppedCandiate = mock.MagicMock(return_value=None)
triage_translator_data = None
candidate = dict()
candidate['candidate_id'] = 'some_id'
candidate['location_id'] = 'some_location_id'
candidate['inventory_type'] = 'service'
candidate_list_empty = list()
candidate_list = list()
candidate_info = copy.deepcopy(candidate)
candidate_info['candidate_id'] = list()
candidate_info['candidate_id'].append(candidate['candidate_id'])
candidate_list.append(candidate_info)
self.assertFalse(self.aai_ep.match_candidate_by_list(candidate, candidate_list_empty, True, 'demand',
triage_translator_data)),
self.assertEqual(0, TraigeTranslator.collectDroppedCandiate.call_count)
self.assertTrue(self.aai_ep.match_candidate_by_list(candidate, candidate_list, True, 'demand',
triage_translator_data))
self.assertEqual(1, TraigeTranslator.collectDroppedCandiate.call_count)
self.assertTrue(self.aai_ep.match_candidate_by_list(candidate, candidate_list, False, 'demand',
triage_translator_data))
self.assertEqual(1, TraigeTranslator.collectDroppedCandiate.call_count)
self.assertFalse(self.aai_ep.match_candidate_by_list(candidate, candidate_list_empty, False, 'demand',
triage_translator_data))
self.assertEqual(2, TraigeTranslator.collectDroppedCandiate.call_count)
def test_match_hpa(self):
flavor_json_file = \
'./conductor/tests/unit/data/plugins/inventory_provider/hpa_flavors.json'
flavor_json = json.loads(open(flavor_json_file).read())
feature_json_file = \
'./conductor/tests/unit/data/plugins/inventory_provider/hpa_req_features.json'
feature_json = json.loads(open(feature_json_file).read())
candidate_json_file = './conductor/tests/unit/data/candidate_list.json'
candidate_json = json.loads(open(candidate_json_file).read())
candidate_json['candidate_list'][1]['flavors'] = flavor_json
flavor_map = {
"directives": [],
"flavor_map": {"flavor-id": "f5aa2b2e-3206-41b6-80d5-cf041b098c43",
"flavor-name": "flavor-cpu-pinning-ovsdpdk-instruction-set",
"score": 0}}
self.assertEqual(flavor_map,
match_hpa(candidate_json['candidate_list'][1],
feature_json[0]))
flavor_map = {"flavor_map": {"flavor-id": "f5aa2b2e-3206-41b6-80d5-cf041b098c43",
"flavor-name": "flavor-cpu-ovsdpdk-instruction-set",
"score": 10},
"directives": []}
self.assertEqual(flavor_map,
match_hpa(candidate_json['candidate_list'][1],
feature_json[1]))
flavor_map = {"flavor_map": {"flavor-id": "f5aa2b2e-3206-41b6-80d5-cf6t2b098c43",
"flavor-name": "flavor-ovsdpdk-cpu-pinning-sriov-NIC-Network-set",
"score": 13},
"directives": [{
"type": "sriovNICNetwork_directives",
"attributes": [
{
"attribute_name": "A",
"attribute_value": "a"
}
]
}]}
self.assertEqual(flavor_map,
match_hpa(candidate_json['candidate_list'][1],
feature_json[2]))
self.assertEqual(None, match_hpa(candidate_json['candidate_list'][1],
feature_json[3]))
flavor_map = {"flavor_map": {"flavor-id": "f5aa2b2e-3206-41b6-19d5-cf6t2b098c43",
"flavor-name": "flavor-ovsdpdk-cpu-pinning-double-sriov-NIC-Network-set",
"score": 6},
"directives": [
{
"type": "sriovNICNetwork_directives",
"attributes": [
{
"attribute_name": "A",
"attribute_value": "a"
}
]
},
{
"type": "sriovNICNetwork_directives",
"attributes": [
{
"attribute_name": "B",
"attribute_value": "b"
}
]
}]
}
self.assertEqual(flavor_map, match_hpa(candidate_json['candidate_list'][1],
feature_json[4]))
self.assertEqual(None, match_hpa(candidate_json['candidate_list'][1],
feature_json[5]))
def test_filter_nssi_candidates(self):
nssi_response_file = './conductor/tests/unit/data/plugins/inventory_provider/nssi_response.json'
nssi_response = json.loads(open(nssi_response_file).read())
slice_profile_file = './conductor/tests/unit/data/plugins/inventory_provider/nssi_slice_profile.json'
slice_profile = json.loads(open(slice_profile_file).read())
nssi_candidates_file = './conductor/tests/unit/data/plugins/inventory_provider/nssi_candidate.json'
nssi_candidates = json.loads(open(nssi_candidates_file).read())
self.mock_get_profiles = mock.patch.object(AAI, 'get_profile_instances', return_value=[slice_profile])
self.mock_get_profiles.start()
service_role = 'nssi'
second_level_filter = dict()
second_level_filter['service-role'] = service_role
default_attributes = dict()
default_attributes['creation_cost'] =1
self.assertEqual(nssi_candidates, self.aai_ep.filter_nxi_candidates(nssi_response, second_level_filter,
default_attributes, "true", service_role))
nssi_response['service-instance'][0]['service-role'] = 'service'
self.assertEqual([], self.aai_ep.filter_nxi_candidates(nssi_response, second_level_filter, default_attributes,
"true", service_role))
self.assertEqual([], self.aai_ep.filter_nxi_candidates(None, second_level_filter, default_attributes,
"true", service_role))
self.assertEqual([], self.aai_ep.filter_nxi_candidates(None, None, default_attributes, "true", service_role))
self.assertEqual(nssi_candidates, self.aai_ep.filter_nxi_candidates(nssi_response, None, default_attributes,
"true", service_role))
del nssi_candidates[0]['creation_cost']
self.assertEqual(nssi_candidates, self.aai_ep.filter_nxi_candidates(nssi_response, None, None, "true",
service_role))
def test_resolve_demands_inventory_type_nssi(self):
self.aai_ep.conf.HPA_enabled = True
TraigeTranslator.getPlanIdNAme = mock.MagicMock(return_value=None)
TraigeTranslator.addDemandsTriageTranslator = mock.MagicMock(return_value=None)
plan_info = {
'plan_name': 'name',
'plan_id': 'id'
}
triage_translator_data = None
demands_list_file = './conductor/tests/unit/data/plugins/inventory_provider/nssi_demand_list.json'
demands_list = json.loads(open(demands_list_file).read())
nssi_response_file = './conductor/tests/unit/data/plugins/inventory_provider/nssi_response.json'
nssi_response = json.loads(open(nssi_response_file).read())
slice_profile_file = './conductor/tests/unit/data/plugins/inventory_provider/nssi_slice_profile.json'
slice_profile = json.loads(open(slice_profile_file).read())
nssi_candidates_file = './conductor/tests/unit/data/plugins/inventory_provider/nssi_candidate.json'
nssi_candidates = json.loads(open(nssi_candidates_file).read())
result = dict()
result['embb_cn'] = nssi_candidates
self.mock_get_nxi_candidates = mock.patch.object(AAI, 'get_nxi_candidates',
return_value=nssi_response)
self.mock_get_nxi_candidates.start()
self.mock_get_profiles = mock.patch.object(AAI, 'get_profile_instances', return_value=[slice_profile])
self.mock_get_profiles.start()
self.assertEqual(result, self.aai_ep.resolve_demands(demands_list, plan_info=plan_info,
triage_translator_data=triage_translator_data))
def test_filter_nsi_candidates(self):
nsi_response_file = './conductor/tests/unit/data/plugins/inventory_provider/nsi_response.json'
nsi_response = json.loads(open(nsi_response_file).read())
nsi_candidates_file = './conductor/tests/unit/data/plugins/inventory_provider/nsi_candidate.json'
nsi_candidates = json.loads(open(nsi_candidates_file).read())
service_profile_file = './conductor/tests/unit/data/plugins/inventory_provider/nsi_service_profile.json'
service_profile = json.loads(open(service_profile_file).read())
self.mock_get_profiles = mock.patch.object(AAI, 'get_profile_instances', return_value=[service_profile])
self.mock_get_profiles.start()
service_role = 'nsi'
second_level_filter = dict()
second_level_filter['service-role'] = service_role
default_attributes = dict()
default_attributes['creation_cost'] = 1
self.assertEqual(nsi_candidates, self.aai_ep.filter_nxi_candidates(nsi_response, second_level_filter,
default_attributes, "true", service_role))
nsi_response['service-instance'][0]['service-role'] = 'service'
self.assertEqual([], self.aai_ep.filter_nxi_candidates(nsi_response, second_level_filter, default_attributes,
"true", service_role))
def test_resolve_demands_inventory_type_nsi(self):
self.aai_ep.conf.HPA_enabled = True
TraigeTranslator.getPlanIdNAme = mock.MagicMock(return_value=None)
TraigeTranslator.addDemandsTriageTranslator = mock.MagicMock(return_value=None)
plan_info = {
'plan_name': 'name',
'plan_id': 'id'
}
triage_translator_data = None
demands_list_file = './conductor/tests/unit/data/plugins/inventory_provider/nsi_demand_list.json'
demands_list = json.loads(open(demands_list_file).read())
nsi_response_file = './conductor/tests/unit/data/plugins/inventory_provider/nsi_response.json'
nsi_response = json.loads(open(nsi_response_file).read())
nsi_candidates_file = './conductor/tests/unit/data/plugins/inventory_provider/nsi_candidate.json'
nsi_candidates = json.loads(open(nsi_candidates_file).read())
result = dict()
result['embb_nst'] = nsi_candidates
service_profile_file = './conductor/tests/unit/data/plugins/inventory_provider/nsi_service_profile.json'
service_profile = json.loads(open(service_profile_file).read())
self.mock_get_profiles = mock.patch.object(AAI, 'get_profile_instances', return_value=[service_profile])
self.mock_get_profiles.start()
self.mock_get_nxi_candidates = mock.patch.object(AAI, 'get_nxi_candidates',
return_value=nsi_response)
self.mock_get_nxi_candidates.start()
self.maxDiff = None
self.assertEqual(result, self.aai_ep.resolve_demands(demands_list, plan_info=plan_info,
triage_translator_data=triage_translator_data))
def test_get_nst_candidates(self):
nst_response_file = './conductor/tests/unit/data/plugins/inventory_provider/nst_response.json'
nst_response = json.loads(open(nst_response_file).read())
second_level_filter=None
default_attributes = dict()
default_attributes['creation_cost'] = 1
self.assertEqual("5d345ca8-1f8e-4f1e-aac7-6c8b33cc33e7", self.aai_ep.get_nst_candidates(nst_response, second_level_filter,
default_attributes, "true", "nst").__getitem__(0).__getattribute__('candidate_id'))
def test_resolve_demands_inventory_type_nst(self):
self.aai_ep.conf.HPA_enabled = True
TraigeTranslator.getPlanIdNAme = mock.MagicMock(return_value=None)
TraigeTranslator.addDemandsTriageTranslator = mock.MagicMock(return_value=None)
plan_info = {
'plan_name': 'name',
'plan_id': 'id'
}
triage_translator_data = None
demands_list_file = './conductor/tests/unit/data/plugins/inventory_provider/nst_demand_list.json'
demands_list = json.loads(open(demands_list_file).read())
nst_response_file = './conductor/tests/unit/data/plugins/inventory_provider/nst_response.json'
nst_response = json.loads(open(nst_response_file).read())
final_nst_candidates_file = './conductor/tests/unit/data/plugins/inventory_provider/final_nst_candidate.json'
final_nst_candidates = json.loads(open(final_nst_candidates_file).read())
result = dict()
result['embb_nst'] = final_nst_candidates
self.mock_get_nst_candidates = mock.patch.object(AAI, 'get_nst_response',
return_value=nst_response)
self.mock_get_final_nst_candidates = mock.patch.object(SDC, 'update_candidates',
return_value=final_nst_candidates)
self.mock_get_nst_candidates.start()
self.mock_get_final_nst_candidates.start()
self.maxDiff = None
self.assertEqual(result, self.aai_ep.resolve_demands(demands_list, plan_info=plan_info,
triage_translator_data=triage_translator_data))
def test_get_aai_data(self):
nst_response_file = './conductor/tests/unit/data/plugins/inventory_provider/nst_response.json'
nst_response = json.loads(open(nst_response_file).read())
response = mock.MagicMock()
response.status_code = 200
response.ok = True
response.json.return_value = nst_response
self.mock_get_request = mock.patch.object(AAI, '_request', return_value=response)
self.mock_get_request.start()
filtering_attr={"model-role":"NST"}
self.assertEquals(nst_response, self.aai_ep.get_nst_response(filtering_attr))
def test_get_profile_instances(self):
nsi_response_file = './conductor/tests/unit/data/plugins/inventory_provider/nsi_response.json'
nsi_response = json.loads(open(nsi_response_file).read())
service_profile_file = './conductor/tests/unit/data/plugins/inventory_provider/nsi_service_profile.json'
service_profile = json.loads(open(service_profile_file).read())
response = mock.MagicMock()
response.status_code = 200
response.ok = True
response.json.return_value = service_profile
self.mock_get_profiles = mock.patch.object(AAI, '_request', return_value=response)
self.mock_get_profiles.start()
self.assertEquals([service_profile], self.aai_ep.get_profile_instances(nsi_response["service-instance"][0]))
|
StarcoderdataPython
|
6636632
|
<filename>space/const.py
from util import *
# gravitational constant
G = 6.673e-11
# gravitational acceleration at earth surface
g = 9.81
# stefan-boltzmann constant
sigma = 5.67e-8
GM=namespace('Gravitational constant times body mass [???]')
GM.sun = 1.327e20
GM.earth = 3.986e14
GM.moon = 4.903e12
GM.mercury = 2.094e13
GM.venus = 3.249e14
GM.mars = 4.269e13
GM.jupiter = 1.267e17
mu=GM
S=namespace('Solar irradiation constant [W/m2]')
S.earth = 1.361e3
M=namespace('Mass [kg]')
M.moon = 7.348e22
M.earth = 5.973e24
M.mars = 6.417e23
AU = 1.496e11
DAU=namespace('Distance to central body [AU]')
DAU.mercury = 0.387
DAU.venus = 0.723
DAU.earth = 1.0
DAU.mars = 1.524
DAU.jupiter = 5.204
D=namespace('Distance to central body [m]')
D.moon = 3.844e8
D.mercury = DAU.mercury * AU
D.venus = DAU.venus * AU
D.earth = AU
D.mars = DAU.mars * AU
D.jupiter = DAU.jupiter * AU
R=namespace('Radius (equatorial) [m]')
R.sun = 6.955e8
R.earth = 6.378e6
R.moon = 1.738e6
R.mars = 3.396e6
SRP=namespace('Sidereal Rotation Period [s]')
SRP.earth = to_seconds(23,56,4)
SRP.mars = to_seconds(24,37,22)
|
StarcoderdataPython
|
8167559
|
<reponame>henryleberre/MFC-develop<filename>samples/1D_exp_bubscreen/case.py
#!/usr/bin/env python3
import math
import json
x0 = 17.E-05
p0 = 101325.
rho0 = 1.E+03
c0 = math.sqrt( p0/rho0 )
patm = 1.
#water props
## AKA little \gamma (see coralic 2014 eq'n (13))
n_tait = 7.1
## AKA little \pi(see coralic 2014 eq'n (13))
B_tait = 306.E+06 / p0
mul0 = 1.002E-03 #viscosity
# mul0 = 1.E-12
ss = 0.07275 #surface tension
# ss = 1.E-12 ## this would turn-off surface tension
pv = 2.3388E+03 #vapor pressure
# water
# These _v and _n parameters ONLY correspond to the bubble model of Preston (2010 maybe 2008)
# (this model would replace the usual Rayleigh-plesset or Keller-miksis model (it's more compilcated))
#gamma_v = 1.33
#M_v = 18.02
#mu_v = 0.8816E-05
#k_v = 0.019426
##air props
#gamma_n = 1.4
#M_n = 28.97
#mu_n = 1.8E-05
#k_n = 0.02556
#air props
gamma_gas = 1.4
#reference bubble size
R0ref = 17.E-05
pa = 0.1 * 1.E+06 / 101325.
#Characteristic velocity
uu = math.sqrt( p0/rho0 )
#Cavitation number
# Ca = (p0 - pv)/(rho0*(uu**2.))
Ca = 1
#Weber number
We = rho0*(uu**2.)*R0ref/ss
#Inv. bubble Reynolds number
Re_inv = mul0/(rho0*uu*R0ref)
#IC setup
vf0 = 0.0023
# vf0 = 1.E-6
n0 = vf0/(math.pi*4.E+00/3.E+00)
cphysical = 1475.
t0 = x0/c0
nbubbles = 1
myr0 = R0ref
# CFL number should be < 1 for numerical stability
# CFL = speed of sound * dt/dx
cfl = 0.2
Nx = 500
Ldomain = 0.3
L = Ldomain/x0
dx = L/float(Nx)
dt = cfl*dx/(cphysical/c0)
Lpulse = 0.3*Ldomain
Tpulse = Lpulse/cphysical
Tfinal = 0.3*0.25*10.*Tpulse*c0/x0
Nt = int(Tfinal/dt)
Nfiles = 100.
Nout = int(math.ceil(Nt/Nfiles))
Nt = int(Nout*Nfiles)
# Configuring case dictionary
print(json.dumps({
# Logistics ================================================
'case_dir' : '\'.\'',
'run_time_info' : 'T',
# ==========================================================
# Computational Domain Parameters ==========================
'x_domain%beg' : -0.15/x0,
'x_domain%end' : 0.15/x0,
'stretch_x' : 'F',
'cyl_coord' : 'F',
'm' : Nx,
'n' : 0,
'p' : 0,
'dt' : dt,
't_step_start' : 0,
't_step_stop' : Nt,
't_step_save' : Nout,
# ==========================================================
# Simulation Algorithm Parameters ==========================
'num_patches' : 2,
'model_eqns' : 2,
'alt_soundspeed' : 'F',
'num_fluids' : 1,
'adv_alphan' : 'T',
'mpp_lim' : 'F',
'mixture_err' : 'F',
'time_stepper' : 3,
'weno_vars' : 2,
'weno_order' : 5,
'weno_eps' : 1.E-16,
'mapped_weno' : 'T',
'null_weights' : 'F',
'mp_weno' : 'T',
'riemann_solver' : 2,
'wave_speeds' : 1,
'avg_state' : 2,
'bc_x%beg' : -8,
'bc_x%end' : -8,
# ==========================================================
# Formatted Database Files Structure Parameters ============
'format' : 1,
'precision' : 2,
'prim_vars_wrt' :'T',
'parallel_io' :'T',
'fd_order' : 1,
#'schlieren_wrt' :'T',
'probe_wrt' :'T',
'num_probes' : 1,
'probe(1)%x' : 0.,
# ==========================================================
# Patch 1 _ Background =====================================
# this problem is 1D... so based on the dimension of the problem
# you have different 'geometries' available to you
# e.g. in 3D you might have spherical geometries
# and rectangular ones
# in 1D (like here)... there is only one option {#1}... which is a
# line
'patch_icpp(1)%geometry' : 1,
'patch_icpp(1)%x_centroid' : 0.,
'patch_icpp(1)%length_x' : 0.3/x0,
'patch_icpp(1)%vel(1)' : 0.0,
'patch_icpp(1)%pres' : patm,
# \alpha stands for volume fraction of this phase
# so if there are no bubbles, then it is all water (liquid)
# and \alpha_1 = \alpha_liquid \approx 1
'patch_icpp(1)%alpha_rho(1)' : (1.-1.E-12)*(1.E+03/rho0),
# \alpha_1 here is always (for num_fluids = 1 and bubbles=True)
# \alpha is always the void fraction of bubbles (usually << 1)
'patch_icpp(1)%alpha(1)' : 1.E-12,
# dimensionless initial bubble radius
'patch_icpp(1)%r0' : 1.,
# dimensionless initial velocity
'patch_icpp(1)%v0' : 0.0E+00,
# ==========================================================
# Patch 2 Screen ===========================================
'patch_icpp(2)%geometry' : 1,
#overwrite the part in the middle that was the
#background (no bubble) area
'patch_icpp(2)%alter_patch(1)' : 'T',
'patch_icpp(2)%x_centroid' : 0.,
'patch_icpp(2)%length_x' : 0.08/x0,
'patch_icpp(2)%vel(1)' : 0.0,
'patch_icpp(2)%pres' : patm,
# \alpha stands for volume fraction of this phase
# so if there are no bubbles, then it is all water (liquid)
# and \alpha_1 = \alpha_liquid \approx 1
# in the screen case, you have \alpha_1 = 1 - \alpha_bubbles = 1 - vf0
'patch_icpp(2)%alpha_rho(1)' : (1.-vf0)*1.E+03/rho0,
# void fraction of bubbles
'patch_icpp(2)%alpha(1)' : vf0,
'patch_icpp(2)%r0' : 1.,
'patch_icpp(2)%v0' : 0.0E+00,
# ==========================================================
# Fluids Physical Parameters ===============================
# Surrounding liquid
'fluid_pp(1)%gamma' : 1.E+00/(n_tait-1.E+00),
'fluid_pp(1)%pi_inf' : n_tait*B_tait/(n_tait-1.),
# 'fluid_pp(1)%mul0' : mul0,
# 'fluid_pp(1)%ss' : ss,
# 'fluid_pp(1)%pv' : pv,
# 'fluid_pp(1)%gamma_v' : gamma_v,
# 'fluid_pp(1)%M_v' : M_v,
# 'fluid_pp(1)%mu_v' : mu_v,
# 'fluid_pp(1)%k_v' : k_v,
# Last fluid_pp is always reserved for bubble gas state ===
# if applicable ==========================================
'fluid_pp(2)%gamma' : 1./(gamma_gas-1.),
'fluid_pp(2)%pi_inf' : 0.0E+00,
# 'fluid_pp(2)%gamma_v' : gamma_n,
# 'fluid_pp(2)%M_v' : M_n,
# 'fluid_pp(2)%mu_v' : mu_n,
# 'fluid_pp(2)%k_v' : k_n,
# ==========================================================
# Non-polytropic gas compression model AND/OR Tait EOS =====
'pref' : p0,
'rhoref' : rho0,
# ==========================================================
# Bubbles ==================================================
'bubbles' : 'T',
# in user guide... 1 = gilbert 2 = keller-miksis
# but gilbert won't work for the equations that you are using... (i think)
'bubble_model' : 2,
# polytropic: this is where the different between Rayleigh--Plesset and
# Preston's model shows up. polytropic = False means complicated Preston model
# = True means simpler Rayleigh--Plesset model
# if polytropic == False then you will end up calling s_initialize_nonpoly in
# m_global_parameters.f90 in both the pre_process and simulation_code
'polytropic' : 'T',
'polydisperse' : 'F',
#'poly_sigma' : 0.3,
# only matters if polytropic = False (complicated model)
'thermal' : 3,
# only matters if polytropic = False (complicated model)
'R0ref' : myr0,
'nb' : 1,
# cavitation number (has something to do with the ratio of gas to vapour in the bubble)
# this is usually near 1
# can set = 1 for testing purposes
'Ca' : Ca,
# weber number (corresponds to surface tension)
'Web' : We,
# inverse reynolds number (coresponds to viscosity)
'Re_inv' : Re_inv,
# ==========================================================
# Acoustic source ==========================================
'Monopole' : 'T',
'num_mono' : 1,
'Mono(1)%loc(1)' : -0.05/x0,
'Mono(1)%npulse' : 1,
'Mono(1)%dir' : 1.,
'Mono(1)%pulse' : 1,
'Mono(1)%mag' : 0.001,
'Mono(1)%length' : (1./(30000.))*cphysical/x0,
# ==========================================================
}))
# ==============================================================================
|
StarcoderdataPython
|
3565093
|
#Trabajo con clases y objetos
#Los objetos y las clases son importantes cuando se trata de trabajar en el
#lenguaje Python. Los objetos ayudan a definir las diferentes partes del código
#y mantenerlos todos organizados y fáciles de entender, mientras que las clases
#van a funcionar como los contenedores de los objetos para que los objetos que
#son similares entre sí para ayudar a que el código funcione mejor.
#CLASE ES EL CONTENEDOR DE OBJETOS
#OBJETO ES LA PARTE QUE CONTIENE EL CÓDIGO FUNCIONAL
#¿Cómo puedo crear una nueva clase?
#Así que lo primero que debemos ver es cómo crear una clase dentro de Python
#y por suerte este es un proceso bastante simple para trabajar. Al trabajar en
#una declaración para una clase, también debe tomar el tiempo para crear una
#nueva definición. Debe colocar el nombre de la clase justo después de su
#palabra clave y luego su superclase estará dentro del paréntesis.
class Vehicle(object):
#constructor
def_init_(self, steering, wheels, clutch, breaks, gears):
self._steering = steering
self._wheels = wheels
self._clutch = clutch
self._breaks =breaks
self._gears = gears
#destructor
def_del_(self):
print(“This is destructor….”)
#member functions or methods
def Display_Vehicle(self):
print(‘Steering:’, self._steering)
print(‘Wheels:’, self._wheels)
print(‘Clutch:’, self._clutch)
print(‘Breaks:’, self._breaks)
print(‘Gears:’, self._gears)
#instantiate a vehicle option
myGenericVehicle = Vehicle(‘Power Steering’, 4, ‘Super Clutch’, ‘Disk Breaks’, 5)
myGenericVehicle.Display_Vehicle()
#Definición de clase
#Necesitará la definición de clase y la instanciación de objeto como parte de la
#sintaxis de la clase. Estos ayudan a decirle a su compilador lo que está
#pasando y le da los comandos que son necesarios
#invocar la definición de clase dentro de su código, sólo tendría que agregar la
#función object.method () o el objeto.attributo para ayudar a hacer esto.
object.method()
#Atributos especiales para agregar en el código
#Hay algunos atributos especiales que se reconocen justo dentro del código de
#Python. Es una buena idea aprender de qué se trata esto porque ayudan a que
#sea más fácil trabajar en cualquier código que desee. También es bueno tener
#la tranquilidad de saber que el intérprete verá estos atributos y sabrá cómo
#usarlos dentro del código. Algunos de los atributos que son importantes
#cuando se trabaja en Python son:
__bases__: se considera una tupla que contiene cualquiera de las superclases
__module__: aquí es donde vas a encontrar el nombre del módulo y también
mantendrá tus clases.
__name__: se mantendrá en el nombre de la clase.
__doc__: aquí es donde vas a encontrar la cadena de referencia dentro del
documento para tu clase.
__dict__: esta va a ser la variable para el dict. Dentro del nombre de la clase.
#Accediendo a los miembros de tu clase
#Hay algunas opciones diferentes que son capaces de utilizar cuando se trata de
#acceder a los miembros que están dentro de las clases que está utilizando. Y
#mientras que todos van a trabajar, ir con el método de acceso es visto como el
#mejor porque le permite encapsular, o proporcionar la información, dentro de
#la sintaxis para facilitar las cosas y para asegurarse de que usted es capaz de
#leer el Código fácil más adelante. Un buen ejemplo de cómo esto funciona
#incluye:
class Cat(object)
itsAge = None
itsWeight = None
itsName = None
#set accessor function use to assign values to the fields or member vars
def setItsAge(self, itsAge):
self.itsAge = itsAge
def setItsWeight(self, itsWeight):
#get accessor function use to return the values from a field
def getItsAge(self):
return self.itsAge
def getItsWeight(self):
return self.itsWeight
def getItsName(self):
return self.itsName
objFrisky = Cat()
objFrisky.setItsAge(5)
objFrisky.setItsWeight(10)
objFrisky.setItsName(“Frisky”)
print(“Cats Name is:”, objFrisky.getItsname())
print(“Its age is:”, objFrisky.getItsAge())
print(“Its weight is:”, objFrisky.getItsName())
|
StarcoderdataPython
|
8161232
|
/home/runner/.cache/pip/pool/f3/c8/d9/1f377645cbb76e873071fee60b3d23efac83c822e9b7867e11df7ae58f
|
StarcoderdataPython
|
110260
|
<reponame>blackw1ng/FritzBox-monitor
#!/opt/bin/python3
from fritzconnection import FritzConnection
import sys
FRITZBOX_USER = "monitoring"
FRITZBOX_PASSWORD = "<PASSWORD>"
try:
fc = FritzConnection(address='192.168.178.1', user=FRITZBOX_USER, password=<PASSWORD>, timeout=2.0)
except BaseException:
print("Cannot connect to fritzbox.")
sys.exit(1)
def readout(module, action, variable=None, show=False, numeric=True):
'''
Generic readout function, that wraps values in a json-compliant way.
:module: TR-064 sub-modules, such as 'WANIPConn1'
:action: Calls an action, e.g. 'GetStatusInfo', as defined by TR-04 (cf. https://avm.de/service/schnittstellen/)
:variable: (optional) a specific variable out of this set to extract
:show: print variable name
:numeric: cast value to numeric
'''
try:
answer_dict = fc.call_action(module, action)
except BaseException:
print(f"Could not query {module} with action {action}")
raise
# cast the 64 bit traffic counters into int
if action == "GetAddonInfos":
answer_dict['NewX_AVM_DE_TotalBytesSent64'] = int(answer_dict['NewX_AVM_DE_TotalBytesSent64'])
answer_dict['NewX_AVM_DE_TotalBytesReceived64'] = int(answer_dict['NewX_AVM_DE_TotalBytesReceived64'])
if variable:
# single variable extraction mode
answer_dict = str(answer_dict[variable])
# FIXME: try type-conversion to int, then fallback to string.
if not numeric:
answer_dict = '"' + answer_dict + '"'
if show:
answer_dict = '"' + variable + '": ' + answer_dict
else:
# remove unwanted keys in a safe way
entitiesToRemove = ('NewAllowedCharsSSID', 'NewDNSServer1', 'NewDNSServer2', 'NewVoipDNSServer1',
'NewVoipDNSServer2', 'NewATURVendor', 'NewATURCountry', 'NewDeviceLog')
entitiesToRemove = [answer_dict.pop(k, None) for k in entitiesToRemove]
# cast to string, omit the {} without a regex :)
answer_dict = str(answer_dict)[1:-1]
# handle stupid naming of counters in LAN, so we can use grouping in grafana...
answer_dict = answer_dict.replace("NewBytes", "NewTotalBytes")
answer_dict = answer_dict.replace("NewPackets", "NewTotalPackets")
# ugly string-cast to a dictionary that has json compliant "
flattened_string = answer_dict.replace("'", '"').replace("True", "true").replace("False", "false")
return flattened_string
def assemble(*args):
# ugly hack json array constructor.
json_dict = '\t "v": {' + ', '.join(list(args)) + "}"
print(json_dict)
def add_device_tag(starting=False):
if starting:
comma = ""
else:
comma = ","
print('\t' + comma + '{"box": "' + deviceinfo + '",')
def add_interface_tag(interface):
print('\t"interface": "' + interface + '",')
def end_device():
print('\t}')
#############
# tag every meaurement by fritzbox serial number
deviceinfo = readout('DeviceInfo1', 'GetInfo', 'NewSerialNumber')
# list of measurements - so telegraf puts them in separate lines
print('[')
# box generic info
add_device_tag(starting=True)
uptime = readout('DeviceInfo1', 'GetInfo', 'NewUpTime', show=True)
version = readout('DeviceInfo1', 'GetInfo', 'NewDescription', show=True, numeric=False)
dhcp_leases = readout('Hosts1', 'GetHostNumberOfEntries', show=True)
assemble(uptime, version, dhcp_leases)
end_device()
# tag list by box & interface
add_device_tag()
add_interface_tag("wan")
status = readout('WANIPConn1', 'GetStatusInfo')
link = readout('WANCommonIFC1', 'GetCommonLinkProperties')
my_ip = readout('WANIPConn', 'GetExternalIPAddress')
my_ipv6 = readout('WANIPConn', 'X_AVM_DE_GetExternalIPv6Address', 'NewExternalIPv6Address', show=True, numeric=False)
# my_ipv6_prefix = readout('WANIPConn','X_AVM_DE_GetIPv6Prefix','NewIPv6Prefix', show=False)+"/"+
# readout('WANIPConn','X_AVM_DE_GetIPv6Prefix','NewPrefixLength', show=False)
info = readout('WANDSLInterfaceConfig1', 'GetInfo')
traffic = readout('WANCommonIFC1', 'GetAddonInfos')
assemble(status, link, my_ip, my_ipv6, info, traffic)
end_device()
# check dect
add_device_tag()
add_interface_tag("dect")
registered = readout('X_AVM-DE_Dect1', 'GetNumberOfDectEntries')
assemble(registered)
end_device()
# check voip
add_device_tag()
add_interface_tag("voip")
registered = readout('X_VoIP1', 'X_AVM-DE_GetNumberOfNumbers')
assemble(registered)
end_device()
# tag list for the other networks
for i, interface in enumerate(['lan', 'wlan24', 'wlan5', 'wlanGuest']):
add_device_tag()
add_interface_tag(interface)
if i == 0:
assemble(readout('LANEthernetInterfaceConfig1', 'GetStatistics'))
else:
stats = readout('WLANConfiguration'+str(i), 'GetStatistics')
associations = readout('WLANConfiguration'+str(i), 'GetTotalAssociations')
info = readout('WLANConfiguration'+str(i), 'GetInfo', "NewChannel", show=True)
assemble(stats, associations, info)
end_device()
print("]")
|
StarcoderdataPython
|
3459481
|
"""kytos - The kytos command line.
You are at the "bug-report" command.
Usage:
kytos bug-report
kytos bug-report -h | --help
Options:
-h, --help Show this screen.
"""
import sys
from docopt import docopt
from kytos.cli.commands.bug_report.api import BugReportAPI
from kytos.utils.exceptions import KytosException
def parse(argv):
"""Parse cli args."""
args = docopt(__doc__, argv=argv)
try:
BugReportAPI.bug_report(args)
except KytosException as exception:
print("Error parsing args: {}".format(exception))
sys.exit(-1)
|
StarcoderdataPython
|
5048463
|
<filename>docs_src/options/autocompletion/tutorial006.py
from typing import List
import typer
def main(name: List[str] = typer.Option(["World"], help="The name to say hi to.")):
for each_name in name:
typer.echo(f"Hello {each_name}")
if __name__ == "__main__":
typer.run(main)
|
StarcoderdataPython
|
9625220
|
<reponame>ua-data7/placeholder
from datetime import datetime
from astm.mapping import (
Record, ConstantField, DateTimeField, IntegerField, NotUsedField,
TextField, RepeatedComponentField, Component
)
QuidelHeaderRecord = Record.build(
ConstantField(name='type', default='H'), # 1
RepeatedComponentField(Component.build(
ConstantField(name='_', default=''),
TextField(name='__')
), name='delimeter', default=[[], ['', '&']]), # 2
NotUsedField(name='unused01'), # 3
NotUsedField(name='unused02'), # 4
TextField(name='instrument'), # 5
NotUsedField(name='unused03'), # 6
NotUsedField(name='unused03'), # 7
NotUsedField(name='unused03'), # 8
NotUsedField(name='unused03'), # 9
NotUsedField(name='unused03'), # 10
NotUsedField(name='unused03'), # 11
ConstantField(name='processing_id', default='P'), # 12
TextField(name='firmware'), # 13
DateTimeField(name='timestamp', default=datetime.now, required=True), # 14
)
QuidelPatientRecord = Record.build(
ConstantField(name='type', default='P'), # 1
IntegerField(name='seq', default=1, required=True), # 2
TextField(name='patient_id'), # 3
NotUsedField(name='unused00'), # 4
NotUsedField(name='unused01'),
NotUsedField(name='unused02'),
NotUsedField(name='unused03'),
NotUsedField(name='unused04'),
NotUsedField(name='unused05'),
NotUsedField(name='unused06'), # 10
NotUsedField(name='unused07'),
NotUsedField(name='unused08'),
NotUsedField(name='unused09'),
NotUsedField(name='unused10'),
NotUsedField(name='unused11'),
NotUsedField(name='unused12'),
NotUsedField(name='unused13'),
NotUsedField(name='unused14'),
NotUsedField(name='unused15'),
NotUsedField(name='unused16'), # 20
NotUsedField(name='unused17'),
NotUsedField(name='unused18'),
NotUsedField(name='unused19'),
NotUsedField(name='unused20'),
NotUsedField(name='unused21'), # 25
TextField(name='location'),
)
QuidelOrderRecord = Record.build(
ConstantField(name='type', default='O'), # 1
IntegerField(name='seq', default=1, required=True), # 2
TextField(name='order_id'), # 3
NotUsedField(name='unused00'), # 4
TextField(name='test_type'), # 5
NotUsedField(name='unused01'),
NotUsedField(name='unused02'),
NotUsedField(name='unused03'),
NotUsedField(name='unused04'),
NotUsedField(name='unused05'), # 10
TextField(name='operator_id'), # 11
NotUsedField(name='unused06'), # 12
NotUsedField(name='unused07'), # 13
NotUsedField(name='unused08'), # 14
NotUsedField(name='unused09'), # 15
TextField(name='sample_type'), # 16
)
QuidelCommentRecord = Record.build(
ConstantField(name='type', default='C'),
IntegerField(name='seq', default=1, required=True),
NotUsedField(name='unused00'),
TextField(name='sample_comment'),
)
QuidelResultRecord = Record.build(
ConstantField(name='type', default='R'), # 1
IntegerField(name='seq', default=1, required=True), # 2
TextField(name='analyte_name'), # 3
TextField(name='test_value'), # 4
TextField(name='test_units'), # 5
TextField(name='test_range'), # 6
TextField(name='test_flag'), # 7
NotUsedField(name='unused00'), # 8
TextField(name='test_type'), # 9
NotUsedField(name='unused01'), # 10
NotUsedField(name='unused02'), # 11
NotUsedField(name='unused03'), # 12
DateTimeField(name='completion'), #13
)
|
StarcoderdataPython
|
6682230
|
<filename>setup.py
from setuptools import setup, find_packages
from os.path import dirname, realpath, join
CURRENT_DIR = dirname(realpath(__file__))
with open(join(CURRENT_DIR, "README.md")) as long_description_file:
long_description = long_description_file.read()
setup(
name="Flask-RRBAC",
version="0.3.0",
url="https://github.com/dhruv-aggarwal/flask-rrbac",
author="<NAME>",
author_email="<EMAIL>",
description="Role Route Based Access Control support for Flask",
long_description=long_description,
license='MIT',
zip_safe=False,
packages=find_packages(exclude=["docs", "tests*", "source", "build"]),
include_package_data=True,
platforms="any",
install_requires=["Flask>=0.10"],
keywords='flask access control acl rbac',
python_requires='~=2.6',
classifiers=[
"Framework :: Flask",
"Environment :: Web Environment",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
'Topic :: Software Development :: Build Tools',
],
project_urls={
'Documentation':
'https://github.com/dhruv-aggarwal/flask-rrbac/blob/master/README.md',
'Source': 'https://github.com/dhruv-aggarwal/flask-rrbac',
'Tracker': 'https://github.com/dhruv-aggarwal/flask-rrbac/issues',
},
)
|
StarcoderdataPython
|
222653
|
from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext, Row
conf = SparkConf().setAppName("clo2016")
sc = SparkContext(conf=conf)
sqlc = SQLContext(sc)
df = sqlc.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').load("/home/oxclo/datafiles/practices/*.csv")
print df.rdd.count()
simpler = df.rdd.map(lambda x: (x.postcode.split()[0], 1))
nums = simpler.countByKey()
print "OX1", nums['OX1']
print "SW11", nums['SW11']
|
StarcoderdataPython
|
1796781
|
# -*- coding: utf8 -*-
import sys
from traceback import format_exception
from tuttle.error import TuttleError
from tuttle.report.dot_repport import create_dot_report
from tuttle.report.html_repport import create_html_report
from pickle import dump, load
from tuttle.workflow_runner import WorkflowRunner, TuttleEnv
from tuttle_directories import TuttleDirectories
from tuttle.log_follower import LogsFollower
from tuttle.version import version
class ProcessDependencyIterator:
""" Provides an iterator on processes according to dependency order"""
def __init__(self, workflow):
self._resources_to_build = {r for r in workflow.iter_resources() if r.creator_process}
self._processes_to_run = {p for p in workflow.iter_processes()}
def all_inputs_built(self, process):
""" Returns True if all inputs of this process where build, ie if the process can be executed """
for input_res in process.iter_inputs():
if input_res in self._resources_to_build:
return False
return True
def pick_a_process(self):
""" Pick an executable process, if there is one
"""
for process in self._processes_to_run:
if self.all_inputs_built(process):
return process
# No more process to pick
return None
def iter_processes(self):
# The idea is to remove the resource from the list as we simulate execution of _processes
p = self.pick_a_process()
while p:
for r in p.iter_outputs():
self._resources_to_build.remove(r)
self._processes_to_run.remove(p)
yield p
p = self.pick_a_process()
def remaining(self):
return self._processes_to_run
class Workflow:
""" A workflow is a dependency tree of processes
"""
def __init__(self, resources):
self._processes = []
self._preprocesses = []
self._resources = resources
self._signatures = {}
self.tuttle_version = version
def add_process(self, process):
""" Adds a process
:param process:
:return:
"""
self._processes.append(process)
def add_preprocess(self, preprocess):
""" Adds a preprocess
:param preprocess:
:return:
"""
self._preprocesses.append(preprocess)
def iter_processes(self):
for process in self._processes:
yield process
def iter_preprocesses(self):
for preprocess in self._preprocesses:
yield preprocess
def nb_resources(self):
return len(self._resources)
def iter_resources(self):
return self._resources.itervalues()
def has_preprocesses(self):
""" Has preprocesses ?
:return: True if the workflow has preprocesses
"""
return len(self._preprocesses) > 0
def primary_inputs_not_available(self):
""" Check that all primary resources (external resources) that are necessary to run the workflow are available
:return: a list of missing resources
:rtype: list
"""
missing = []
for resource in self._resources.itervalues():
if resource.is_primary():
if not self.resource_available(resource.url):
missing.append(resource)
return missing
def circular_references(self):
""" Return a list of processes that won't be able to run according to to dependency graph, because
of circular references, ie when A is produced by B... And B produced by A.
:return: a list of process that won't be able to run. No special indication about circular groups
:rtype: list
"""
process_iterator = ProcessDependencyIterator(self)
for _ in process_iterator.iter_processes():
pass
return process_iterator.remaining()
def static_check_processes(self):
""" Runs a pre-check for every process, in order to catch early obvious errors, even before invalidation
:return: None
"""
for process in self.iter_processes():
process.static_check()
def check_resources_consistency(self):
resource_classes = { res.__class__ for res in self.iter_resources()}
for resource_class in resource_classes:
resource_class.check_consistency(self)
def update_signatures(self, signatures):
""" updates the workflow's signatures after the process has run
:param signatures: a dictionary of signatures indexed by urls
"""
self._signatures.update(signatures)
def run_pre_processes(self):
""" Runs all the preprocesses
:return:
:raises ExecutionError if an error occurs
"""
TuttleDirectories.create_tuttle_dirs()
TuttleDirectories.empty_extension_dir()
if not self.has_preprocesses():
return
lt = LogsFollower()
WorkflowRunner.print_preprocesses_header()
for process in self.iter_preprocesses():
TuttleDirectories.prepare_and_assign_paths(process)
lt.follow_process(process.log_stdout, process.log_stderr, None)
with lt.trace_in_background(), TuttleEnv():
for preprocess in self.iter_preprocesses():
WorkflowRunner.print_preprocess_header(preprocess, lt._logger)
success = True
error_msg = None
try:
preprocess.set_start()
preprocess.processor.run(preprocess, preprocess._reserved_path,
preprocess.log_stdout, preprocess.log_stderr)
except TuttleError as e:
success = False
error_msg = str(e)
raise
except Exception:
exc_info = sys.exc_info()
stacktrace = "".join(format_exception(*exc_info))
error_msg = "An unexpected error have happened in tuttle preprocessor {} : \n" \
"{}\n" \
"Preprocess {} will not complete.".format(preprocess._processor.name, stacktrace, preprocess.id)
finally:
preprocess.set_end(success, error_msg)
self.create_reports()
WorkflowRunner.print_preprocesses_footer()
def create_reports(self):
""" Write to disk files describing the workflow, with color for states
:return: None
"""
create_html_report(self, TuttleDirectories.tuttle_dir("report.html"))
create_dot_report(self, TuttleDirectories.tuttle_dir("report.dot"))
def dump(self):
""" Pickles the workflow and writes it to last_workflow.pickle
:return: None
"""
with open(TuttleDirectories.tuttle_dir("last_workflow.pickle"), "w") as f:
dump(self, f)
def export(self):
""" Export the workflow for external use : a dump for running tuttle later and a report for human users
:return: None
"""
self.dump()
self.create_reports()
@staticmethod
def load():
try:
with open(TuttleDirectories.tuttle_dir("last_workflow.pickle"), "r") as f:
return load(f)
except:
return None
def get_extensions(self):
return TuttleDirectories.list_extensions()
def find_process_that_creates(self, url):
"""
:param url: Returns the process that creates this url. this url is supposed to be created by this workflow,
so check creates_url() before calling this method
:return:
"""
if url in self._resources:
return self._resources[url].creator_process
def find_resource(self, url):
if url in self._resources:
return self._resources[url]
else:
return None
def compute_dependencies(self):
""" Feeds the dependant_processes field in every resource
:return: Nothing
"""
for resource in self._resources.itervalues():
resource.dependant_processes = []
for process in self.iter_processes():
for resource in process.iter_inputs():
resource.dependant_processes.append(process)
def iter_available_signatures(self):
return self._signatures.iteritems()
def retrieve_signatures(self, previous):
""" Retrieve the signatures from the former workflow. Useful to detect what has changed.
Returns True if some resources where in previous and no longer exist in self
"""
for url, signature in previous.iter_available_signatures():
if (url in self._signatures) and (self._signatures[url] == "DISCOVERED"):
self._signatures[url] = signature
def pick_a_failing_process(self):
for process in self.iter_processes():
if process.end is not None and process.success is False:
return process
return None
def reset_failures(self):
workflow_changed = False
for process in self._processes:
if process.success is False:
process.reset_execution_info()
workflow_changed = True
return workflow_changed
def all_inputs_available(self, process):
"""
:return: True if all input resources for this process are vailable, False otherwise
"""
for in_res in process.iter_inputs():
if not self.resource_available(in_res.url):
return False
return True
def runnable_processes(self):
""" List processes that can be run (because they have all inputs)
:return:
"""
res = set()
for process in self.iter_processes():
if process.start is None and self.all_inputs_available(process):
res.add(process)
return res
def discover_runnable_processes(self, complete_process):
""" List processes that can be run (because they have all inputs)
:return:
"""
res = set()
for process in self.iter_processes():
if process.start is None:
if process.depends_on_process(complete_process):
if self.all_inputs_available(process):
res.add(process)
return res
def discover_resources(self):
for resource in self._resources.itervalues():
if resource.exists():
if resource.is_primary():
self._signatures[resource.url] = resource.signature()
else:
self._signatures[resource.url] = "DISCOVERED"
def signature(self, url):
# TODO simplier with __get__ ?
if url in self._signatures:
return self._signatures[url]
else:
return None
def resource_available(self, url):
return url in self._signatures
def clear_signatures(self, urls):
for url in urls:
if url in self._signatures:
del self._signatures[url]
def fill_missing_availability(self):
for url, signature in self.iter_available_signatures():
if signature is True:
print("Filling availability for {}".format(url))
resource = self.find_resource(url)
new_signature = resource.signature()
self._signatures[url] = new_signature
def similar_process(self, process_from_other_workflow):
output_resource = process_from_other_workflow.pick_an_output()
if output_resource:
return self.find_process_that_creates(output_resource.url)
else:
other_wf_urls = process_from_other_workflow.input_urls()
for process in self.iter_processes():
if not process.has_outputs() and process.input_urls() == other_wf_urls:
return process
return None
def iter_processes_on_dependency_order(self):
""" returns an iterator on processes according to dependency order"""
process_iterator = ProcessDependencyIterator(self)
return process_iterator.iter_processes()
def contains_resource(self, resource):
return resource.url in self._resources
|
StarcoderdataPython
|
3420486
|
<reponame>jscherer26/Icarra<gh_stars>1-10
# Copyright (c) 2006-2010, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE IMPLIED
# DISCLAIMED. IN NO EVENT SHALL JESSE LIESCH BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Import transactions. Can fail due to threading.
imported = False
while not imported:
try:
import datetime
datetime.datetime.strptime("2000", "%Y")
imported = True
except Exception, e:
pass
import locale
def dateDict(date):
return {"y": date.year, "m": date.month, "d": date.day}
class Transaction:
# Define transaction type
deposit = 0
withdrawal = 1
expense = 2
buy = 3
sell = 4
split = 5
dividend = 6
adjustment = 7
stockDividend = 8
dividendReinvest = 9
spinoff = 10
transferIn = 11
transferOut = 12
short = 13
cover = 14
tickerChange = 15
# Options
exercise = 16
assign = 17
buyToOpen = 18
sellToClose = 19
sellToOpen = 20
buyToClose = 21
expire = 22
numTransactionTypes = 23
# Subtypes for dividend
ordinary = 1
qualified = 2
capitalGainShortTerm = 3
capitalGainLongTerm = 4
returnOfCapital = 5
taxExempt = 6
# Subtypes for option
# Applies to buy, sell, short, buyToClose
optionPut = 1
optionCall = 2
def __init__(self, uniqueId, ticker, date, transactionType, amount = False, shares = False, pricePerShare = False, fee = False, edited = False, deleted = False, ticker2 = False, subType = False, optionStrike = False, optionExpire = False, auto = False):
# Icarra desktop app ids are in the form __N__ where N is unique (eg, __3__, __7__)
# Icarra ids that have been synched with the server are in the form __SN__ where N is a unique number (eg, __S3__, __S7__)
# Other ids may be supplied by the brokerage and are assumed to be unique
if type(uniqueId) != bool:
self.uniqueId = str(uniqueId)
else:
self.uniqueId = uniqueId
self.setTicker(ticker)
self.setTicker2(ticker2)
self.setAuto(auto)
if type(date) in [unicode, str]:
self.date = Transaction.parseDate(date)
elif type(date) == datetime.datetime:
self.date = date
else:
raise Exception("Transaction date must be datetime type, is " + str(type(date)))
self.type = transactionType
self.subType = subType
self.setTotal(amount)
if shares and shares != "False":
self.shares = float(shares)
else:
self.shares = False
if pricePerShare and pricePerShare != "False":
self.pricePerShare = float(pricePerShare)
else:
self.pricePerShare = False
if fee and fee != "False":
self.fee = float(fee)
else:
self.fee = False
if optionStrike and optionStrike != "False":
self.optionStrike = float(optionStrike)
else:
self.optionStrike = False
if optionExpire and optionExpire != "False":
if type(optionExpire) in [unicode, str]:
self.optionExpire = Transaction.parseDate(optionExpire)
elif type(optionExpire) == datetime.datetime:
self.optionExpire = optionExpire
else:
raise Exception("Transaction optionExpire must be datetime type, is " + str(type(optionExpire)))
else:
self.optionExpire = False
if edited == "True" or (type(edited) == bool and edited):
self.edited = True
else:
self.edited = False
if deleted == "True" or (type(deleted) == bool and deleted):
self.deleted = True
else:
self.deleted = False
def __eq__(self, t2):
if self and not t2:
return False
def compField(a, b):
return a == b or (not a and not b) or (not a and b == "False") or (a == "False" and not b)
return compField(self.ticker, t2.ticker) and compField(self.date, t2.date) and compField(self.type, t2.type) and compField(self.total, t2.total) and compField(self.shares, t2.shares) and compField(self.pricePerShare, t2.pricePerShare) and compField(self.fee, t2.fee) and compField(self.ticker2, t2.ticker2) and compField(self.subType, t2.subType)
def __ne__(self, t2):
return not self.__eq__(t2)
def __str__(self):
str = ""
if self.uniqueId:
str += "id=" + self.uniqueId + " "
str += self.formatDate() + " " + self.formatTicker() + " " + self.formatType()
if self.ticker2:
str += " ticker2=" + self.formatTicker2()
if self.shares:
str += " shares=" + self.formatShares()
if self.total:
str += " total=" + self.formatTotal()
if self.fee:
str += " fee=" + self.formatFee()
if self.edited:
str += " (edited)"
if self.deleted:
str += " (deleted)"
if self.auto:
str += " (auto)"
return str
def __cmp__(self, other):
# Check for false
if other == False:
return 1
# First sort by date
if self.date < other.date:
return 1
if self.date > other.date:
return -1
# Next sort by
# Deposit
# Buy
# Sell
# Withdrawal
myRank = Transaction.getTransactionOrdering(self.type)
otherRank = Transaction.getTransactionOrdering(other.type)
if myRank < otherRank:
return 1
elif myRank > otherRank:
return -1
return 0
def __hash__(self):
# Basic hash function by datetime (integer) and transaction type
return hash((self.date, self.type))
def setDate(self, date):
self.date = date
def setTicker(self, ticker):
self.ticker = ticker.upper()
def setTicker2(self, ticker2):
if ticker2 == "False":
self.ticker2 = False
elif type(ticker2) == bool:
self.ticker2 = bool(ticker2)
elif isinstance(ticker2, str) or isinstance(ticker2, unicode):
self.ticker2 = ticker2.upper()
else:
self.ticker2 = False
def setAuto(self, auto):
if auto == "False":
self.auto = False
elif auto == "True":
self.auto = True
elif type(auto) == bool:
self.auto = bool(auto)
else:
self.auto = False
def setType(self, type):
if isinstance(type, int):
self.type = type
else:
self.type = self.getType(type)
def setSubType(self, subType):
self.subType = int(subType)
def setShares(self, shares):
self.shares = float(shares)
def setPricePerShare(self, pps):
self.pricePerShare = pps
def setFee(self, fee):
self.fee = fee
def setTotal(self, total):
if total and total != "False":
self.total = float(total)
else:
self.total = False
def setEdited(self):
self.edited = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
def setDeleted(self, deleted = True):
self.edited = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
self.deleted = deleted
@staticmethod
def parseDate(date):
'Return a datetime object for a date in the form "%Y-%m-%d %H:%M:%S"'
try:
return datetime.datetime(int(date[0:4]), int(date[5:7]), int(date[8:10]),
int(date[11:13]), int(date[14:16]), int(date[17:19]))
except Exception, e:
return datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S")
@staticmethod
def ofxDateToSql(date):
if len(date) >= 14:
return date[:4] + "-" + date[4:6] + "-" + date[6:8] + " " + date[8:10] + ":" + date[10:12] + ":" + date[12:14]
elif len(date) == 8:
return date[:4] + "-" + date[4:6] + "-" + date[6:8] + " 00:00:00"
else:
raise Exception("unknown date size")
@staticmethod
def ameritradeDateToSql(date):
# MM-DD-YYYY (or with slashes)
return date[6:10] + "-" + date[:2] + "-" + date[3:5] + " 00:00:00"
@staticmethod
def optionsHouseDateToSql(date):
# YYYY-MM-DD (or with slashes)
return date[0:4] + "-" + date[5:7] + "-" + date[8:10] + " 00:00:00"
@staticmethod
def forEdit():
list = [Transaction.deposit, Transaction.withdrawal, Transaction.buy, Transaction.sell, Transaction.short, Transaction.cover, Transaction.split, Transaction.dividend, Transaction.dividendReinvest, Transaction.expense, Transaction.adjustment, Transaction.stockDividend, Transaction.spinoff, Transaction.tickerChange, Transaction.transferIn, Transaction.transferOut, Transaction.buyToOpen, Transaction.sellToClose, Transaction.sellToOpen, Transaction.buyToClose, Transaction.exercise, Transaction.assign, Transaction.expire]
assert(len(list) == Transaction.numTransactionTypes)
return list
@staticmethod
def forEditBank():
list = [Transaction.deposit, Transaction.withdrawal, Transaction.buy, Transaction.sell, Transaction.dividend, Transaction.dividendReinvest, Transaction.expense, Transaction.adjustment]
return list
@staticmethod
def fieldsForTransaction(type, subType = False):
if type == Transaction.deposit or type == Transaction.withdrawal:
return ["date", "fee", "total"]
elif type == Transaction.expense:
return ["date", "ticker", "fee", "-total"]
elif type in [Transaction.buy, Transaction.sell, Transaction.short]:
return ["date", "fee", "ticker", "shares", "pricePerShare"]
elif type == Transaction.cover:
return ["date", "fee", "ticker", "shares", "pricePerShare", "total"]
elif type in [Transaction.buyToOpen, Transaction.sellToClose, Transaction.sellToOpen, Transaction.buyToClose]:
return ["date", "fee", "ticker", "shares", "pricePerShare", "total", "strike", "expire"]
elif type in [Transaction.dividendReinvest, Transaction.transferIn, Transaction.transferOut]:
return ["date", "fee", "ticker", "shares", "pricePerShare"]
elif type == Transaction.split or type == Transaction.dividend:
return ["date", "fee", "ticker", "total"]
elif type == Transaction.adjustment:
return ["date", "ticker", "total"]
elif type == Transaction.stockDividend:
return ["date", "fee", "ticker", "shares", "-total"]
elif type == Transaction.spinoff:
return ["date", "fee", "ticker", "ticker2", "shares", "pricePerShare"]
elif type == Transaction.tickerChange:
return ["date", "fee", "ticker", "ticker2", "shares", "-total"]
elif type == Transaction.exercise or type == Transaction.assign:
return ["date", "fee", "ticker", "shares", "option", "-total"]
elif type == Transaction.expire:
return ["date", "fee", "ticker", "shares", "option"]
else:
return []
def formatTicker(self):
if self.ticker == "__CASH__":
return "Cash Balance"
elif self.ticker2:
return self.ticker + " -> " + self.ticker2
elif self.isOption():
ret = self.ticker
ret += " " + self.optionExpire.strftime("%b-%y")
ret += " " + self.formatDollar(self.optionStrike)
if self.subType == Transaction.optionPut:
ret += " put"
elif self.subType == Transaction.optionCall:
ret += " call"
else:
ret += "???"
return ret
else:
return self.ticker
def formatTicker1(self):
if self.ticker == "__CASH__":
return "Cash Balance"
else:
return self.ticker
def formatTicker2(self):
if self.ticker2 == "__CASH__":
return "Cash Balance"
else:
return self.ticker2
def formatDate(self):
return str(self.date.month) + "/" + str(self.date.day) + "/" + str(self.date.year)
@staticmethod
def formatDays(days):
if days > 365:
val = days / 365.0
desc = 'year'
elif days > 30:
val = days / 30.0
desc = 'month'
else:
val = days
desc = 'day'
if val == 1:
return '%.1f %s' % (val, desc)
else:
return '%.1f %ss' % (val, desc)
def dateDict(self):
return dateDict(self.date)
def getDate(self):
return self.date
# Return how much this transaction modifies the cash position
# Returns 0 if it does not change cash
def getCashMod(self):
if self.type in [Transaction.deposit, Transaction.dividend]:
return abs(self.total)
elif self.type in [Transaction.sell, Transaction.sellToClose, Transaction.sellToOpen, Transaction.short, Transaction.buyToClose, Transaction.cover]:
return self.getTotal()
elif self.type in [Transaction.withdrawal, Transaction.buy, Transaction.buyToOpen]:
return -abs(self.total)
elif self.type == Transaction.expense:
if self.total:
return -abs(self.total)
else:
return -abs(self.fee)
elif self.type == Transaction.adjustment and self.ticker == "__CASH__":
return self.total
return 0
def getIrrFee(self, ticker):
'Returns the fee IRR for this transaction'
if self.ticker == "__CASH__":
if self.type == Transaction.dividend:
# Cash dividends increase value on their own, do not include here
val = 0
else:
val = self.getCashMod()
val += self.getFee()
return val
else:
# IRR for stocks
if self.type == Transaction.dividend:
# Include dividends
return -self.getTotal()
elif self.type == Transaction.spinoff:
# Spinoff is withdrawal if we are the original ticker
# Deposit if we are the spinoff ticker
if ticker == self.ticker:
return -self.getTotal()
else:
return self.getTotal()
elif self.type == Transaction.dividendReinvest:
# Do not include dividend reinvest since new shares are included in value
return self.getFee()
elif self.type == Transaction.transferIn:
return self.getTotal()
elif self.type == Transaction.transferOut:
return -self.getTotal()
elif self.type == Transaction.short:
if not self.pricePerShare or not self.shares:
return 0
return self.pricePerShare * self.shares + self.getFee()
else:
# Base IRR on cash mod
return -self.getCashMod()
def getIrrDiv(self, ticker):
'Returns the dividend IRR for this transaction (getIrrFee ignoring fees)'
if self.ticker == "__CASH__":
return self.getIrrFee(ticker) - self.getFee()
else:
if self.type == Transaction.expense:
return 0
val = self.getIrrFee(ticker) - self.getFee()
return val
@staticmethod
def getTypeString(type):
if type == Transaction.deposit:
return "Deposit"
elif type == Transaction.withdrawal:
return "Withdrawal"
elif type == Transaction.expense:
return "Expense"
elif type == Transaction.buy:
return "Buy"
elif type == Transaction.sell:
return "Sell"
elif type == Transaction.short:
return "Short"
elif type == Transaction.cover:
return "Cover"
elif type == Transaction.split:
return "Split"
elif type == Transaction.dividend:
return "Dividend"
elif type == Transaction.adjustment:
return "Adjustment"
elif type == Transaction.stockDividend:
return "Stock Dividend"
elif type == Transaction.dividendReinvest:
return "Dividend Reinvest"
elif type == Transaction.spinoff:
return "Spinoff"
elif type == Transaction.tickerChange:
return "Ticker Change"
elif type == Transaction.transferIn:
return "Transfer In"
elif type == Transaction.transferOut:
return "Transfer Out"
elif type == Transaction.buyToOpen:
return "Options: Buy to Open"
elif type == Transaction.sellToClose:
return "Options: Sell to Close"
elif type == Transaction.sellToOpen:
return "Options: Sell to Open"
elif type == Transaction.buyToClose:
return "Options: Buy to Close"
elif type == Transaction.exercise:
return "Options: Exercised"
elif type == Transaction.assign:
return "Options: Assigned"
elif type == Transaction.expire:
return "Options: Expired"
else:
return "???"
@staticmethod
def getType(string):
if string == "Deposit":
return Transaction.deposit
elif string == "Withdrawal":
return Transaction.withdrawal
elif string == "Expense":
return Transaction.expense
elif string == "Buy":
return Transaction.buy
elif string == "Sell":
return Transaction.sell
elif string == "Short":
return Transaction.short
elif string == "Cover":
return Transaction.cover
elif string == "Split":
return Transaction.split
elif string == "Dividend":
return Transaction.dividend
elif string == "Adjustment":
return Transaction.adjustment
elif string == "Stock Dividend":
return Transaction.stockDividend
elif string == "Dividend Reinvest":
return Transaction.dividendReinvest
elif string == "Spinoff":
return Transaction.spinoff
elif string == "Ticker Change":
return Transaction.tickerChange
elif string == "Transfer In":
return Transaction.transferIn
elif string == "Transfer Out":
return Transaction.transferOut
elif string == "Options: Buy to Open":
return Transaction.buyToOpen
elif string == "Options: Sell to Close":
return Transaction.sellToClose
elif string == "Options: Sell to Open":
return Transaction.sellToOpen
elif string == "Options: Buy to Close":
return Transaction.buyToClose
elif string == "Options: Exercised":
return Transaction.exercise
elif string == "Options: Assigned":
return Transaction.assign
elif string == "Options: Expired":
return Transaction.expire
else:
return False
@staticmethod
def getTransactionOrdering(type):
if type in [Transaction.deposit, Transaction.transferIn]:
return 0
elif type in [Transaction.buy, Transaction.short, Transaction.dividendReinvest, Transaction.buyToOpen, Transaction.sellToOpen]:
return 1
elif type in [Transaction.split, Transaction.dividend, Transaction.spinoff, Transaction.tickerChange]:
return 2
elif type in [Transaction.sell, Transaction.cover, Transaction.buyToClose, Transaction.sellToClose]:
return 99
elif type in [Transaction.withdrawal, Transaction.transferOut]:
return 100
else:
return 50
def hasShares(self):
return "shares" in self.fieldsForTransaction(self.type)
def hasPricePerShare(self):
return self.type in [Transaction.buy, Transaction.sell, Transaction.dividendReinvest]
def formatType(self):
# Check for options
if self.isOption():
return self.getTypeString(self.type).replace("Options: ", "")
elif self.type == Transaction.dividend:
if self.subType == Transaction.returnOfCapital:
return "Return of Capital"
elif self.subType == Transaction.capitalGainShortTerm:
return "Short Term Capital Gain"
elif self.subType == Transaction.capitalGainLongTerm:
return "Long Term Capital Gain"
return self.getTypeString(self.type)
else:
return self.getTypeString(self.type)
def formatShares(self):
return self.formatFloat(abs(self.shares))
@staticmethod
def formatFloat(value, commas = False):
'''
Format a floating point value without any trailing 0s in the decimal portion
'''
if value == 0.0:
return "0"
if value == False:
return ""
decimals = 0
multiply = 1
while decimals < 6:
diff = round(value * multiply) - value * multiply
if abs(diff) < 1.0e-6:
break
decimals += 1
multiply *= 10
format = "%%.%df" % decimals
if commas:
return locale.format(format, value, True)
else:
return format % value
@staticmethod
def formatDollar(value):
'''
Format a floating point value as a dollar value
'''
return locale.currency(value, grouping = True)
def formatPricePerShare(self):
if not self.pricePerShare or self.pricePerShare == "False":
return ""
return self.formatDollar(self.pricePerShare)
def getShares(self):
if self.shares:
return abs(self.shares)
else:
return 0
def getFee(self):
if self.type == Transaction.expense:
if self.total:
return abs(self.total)
else:
return abs(self.fee)
elif self.fee:
return abs(self.fee)
else:
return 0
def formatFee(self):
if not self.fee or self.fee == "False":
return ""
return "$" + str(self.fee)
def getTotal(self):
if not self.total:
return 0
# Compute for buys/sells
if self.type in [Transaction.buy, Transaction.transferIn]:
if self.pricePerShare and self.shares:
return -abs(self.pricePerShare * self.shares) - self.getFee()
return -abs(self.total)
elif self.type in [Transaction.sell, Transaction.transferOut]:
if self.pricePerShare and self.shares:
return abs(self.pricePerShare * self.shares) - self.getFee()
return abs(self.total)
elif self.type in [Transaction.deposit, Transaction.dividend]:
return abs(self.total)
elif self.type in [Transaction.withdrawal]:
return -abs(self.total)
return self.total
def getTotalIgnoreFee(self):
return self.getTotal() + self.getFee()
def formatTotal(self):
if not self.total:
return ""
elif self.type in [Transaction.split]:
return self.splitValueToString(self.total)
elif self.type in [Transaction.sell, Transaction.deposit, Transaction.dividend]:
# Always positive
return self.formatDollar(abs(self.total))
elif self.type in [Transaction.buy, Transaction.withdrawal, Transaction.expense]:
# Always negative
return self.formatDollar(-abs(self.total))
else:
return self.formatDollar(self.total)
def formatStrike(self):
if not self.optionStrike or self.optionStrike == "False":
return ""
return self.formatDollar(self.optionStrike)
def formatExpire(self):
if not self.optionExpire or self.optionExpire == "False":
return ""
return "%d/%d/%d" % (self.optionExpire.month, self.optionExpire.day, self.optionExpire.year)
@staticmethod
def splitValueToString(value):
# Determine split value
splitVal = "?-?"
if value == 1:
splitVal = "1-1"
if value > 1:
reversed = False
else:
value = 1.0 / value
reversed = True
# guess denom from 1-10
min = 1.0e6
minDenom = -1
for denom in range(1, 11):
num = value * denom
diff = abs(num - round(num))
if diff < min * 0.0001:
minDenom = denom
min = diff
if reversed:
splitVal = "%d-%d" % (minDenom, round(value * minDenom))
else:
splitVal = "%d-%d" % (round(value * minDenom), minDenom)
return splitVal
def isOption(self):
return self.type in [Transaction.buyToOpen, Transaction.sellToClose, Transaction.sellToOpen, Transaction.buyToClose, Transaction.assign, Transaction.exercise, Transaction.expire]
def isBankSpending(self):
return self.type == Transaction.withdrawal and self.ticker != "__CASH__"
def getSaveData(self):
return {
"uniqueId": self.uniqueId,
"ticker": self.ticker,
"ticker2": self.ticker2,
"type": self.type,
"subType": self.subType,
"date": self.date,
"shares": self.shares,
"pricePerShare": self.pricePerShare,
"fee": self.fee,
"total": self.total,
"optionStrike": self.optionStrike,
"optionExpire": self.optionExpire,
"edited": self.edited,
"deleted": self.deleted,
"auto": self.auto
}
def save(self, db):
data = self.getSaveData()
# If uniqueId is supplied make it the criteria for update
# Otherwise use the entire transaction
if self.uniqueId:
on = {"uniqueId": self.uniqueId}
else:
on = data
return db.insertOrUpdate("transactions", data, on)
# Returns False if no error, string if error
def checkError(self):
fields = self.fieldsForTransaction(self.type)
if self.type in [Transaction.deposit, Transaction.withdrawal]:
self.ticker = "__CASH__"
error = ""
if "ticker" in fields:
if not self.ticker:
error += "Ticker is required."
if "shares" in fields:
if self.shares == 0:
error += "Shares value is required."
if "fee" in fields:
if not self.fee:
self.fee = 0
if "total" in fields:
if self.total == 0:
error += "Total value is required."
if error:
return error
else:
return False
|
StarcoderdataPython
|
3212333
|
<gh_stars>0
import json
import os.path
def load_json( filename, **options ):
debug = False
if 'debug' in options: debug = True
if debug:
print("UTIL: DEBUG: Config file: "+filename+"\n" )
data = ""
fd = None
if not os.path.exists( filename ) : raise RuntimeError( "File "+filename+" could not be found" )
try:
fd = open( filename, "r" )
for line in fd:
line.rstrip("\n\r")
data += " "+line
except Exception as error: raise error
finally: fd.close()
# if debug: print("DEBUG: Data read from file "+filename+"\n"+data+"\n" )
try:
if debug: print("DEBUG: Data read from file "+filename+"\n"+data+"\n" )
return json.loads( data )
except ValueError as error:
if debug: print("ERROR: Bad file content in "+filename+"\n")
raise error
except Exception as error:
if debug: print("ERROR: Could not load json from "+filename+"\n")
raise error
return None
|
StarcoderdataPython
|
3451697
|
<filename>orttraining/orttraining/python/training/ortmodule/__init__.py<gh_stars>10-100
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
from packaging import version
# All global constant goes here, before ORTModule is imported
ONNX_OPSET_VERSION = 12
MINIMUM_TORCH_VERSION_STR = '1.8.1'
from .ortmodule import ORTModule
# Verify proper PyTorch is installed before proceding to ONNX Runtime initializetion
try:
import torch
torch_version = version.parse(torch.__version__.split('+')[0])
minimum_torch_version = version.parse(MINIMUM_TORCH_VERSION_STR)
if torch_version < minimum_torch_version:
raise RuntimeError(
f'ONNXRuntime ORTModule frontend requires PyTorch version greater or equal to {MINIMUM_TORCH_VERSION_STR}, '
f'but version {torch.__version__} was found instead.')
except:
raise(f'PyTorch {MINIMUM_TORCH_VERSION_STR} must be installed in order to run ONNXRuntime ORTModule frontend!')
|
StarcoderdataPython
|
283320
|
from flask import Flask
from flask import Flask, flash, redirect, render_template, request, session, abort
from flask_googlemaps import GoogleMaps
from flask_googlemaps import Map
import os
import requests
import json
import jwt
app = Flask(__name__)
app.config['GOOGLEMAPS_KEY'] = "<KEY>"
app.secret_key = os.urandom(12)
GoogleMaps(app)
api_url = "https://studentjobengine.herokuapp.com"
jwt_token = ''
def get_all_jobs(latitude, longitude):
print(latitude)
print(longitude)
url = api_url+'/jobs/all'
print(url)
headers = {'Authorization': 'Bearer '+jwt_token}
response = requests.get(url, headers=headers)
jobs_list = json.loads(response.text)
return jobs_list
def build_marker(jobs_list):
markers = []
for job in jobs_list:
job_shw = {
'icon': 'http://maps.google.com/mapfiles/ms/icons/green-dot.png',
'lat': float(job['Latitude']),
'lng': float(job['Longtitude']),
'infobox': job['Short_desc']
}
markers.append(job_shw)
return markers
@app.route('/')
def home(latitude='', longitude=''):
if not session.get('logged_in'):
return render_template('login.html')
else:
jobs_list = get_all_jobs(latitude, longitude)
markers = build_marker(jobs_list)
sndmap = Map(
identifier="sndmap",
style="height:600px;width:100%;margin:0;",
zoom=15,
lat=47.087734,
lng=17.922842,
markers=[
]
)
sndmap.markers = markers
return render_template('home.html', sndmap=sndmap)
@app.route('/', methods=['POST'])
def do_admin_login():
user_details = {}
print(request.form)
user_name = request.form['username']
user_password = request.form['password']
current_latitude = request.form['latitude']
current_longitude = request.form['longitude']
user_details['UserName'] = user_name
user_details['Password'] = <PASSWORD>
url = api_url+'/users/login'
body = user_details
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(body), headers=headers)
response_dict = json.loads(response.text)
global jwt_token
jwt_token = response_dict['JWT']
try:
claim = jwt.decode(response_dict['JWT'], algorithms=['HS256'], verify=False)
if claim['user'] == user_name:
session['logged_in'] = True
else:
flash('wrong password!')
except jwt.DecodeError:
flash('wrong credentials!')
return home(current_latitude, current_longitude)
@app.route("/showregister")
def showregister():
return render_template('register.html')
@app.route("/register", methods=['POST'])
def register():
user_details = {}
print(request.form)
user_name = request.form['userName']
user_password = request.form['inputPassword']
user_firstname = request.form['firstName']
user_lastname = request.form['lastName']
user_email = request.form['inputEmail']
user_type = "student"
user_status = "1"
user_details['UserName'] = user_name
user_details['Password'] = <PASSWORD>
user_details['FirstName'] = user_firstname
user_details['LastName'] = user_lastname
user_details['UserType'] = user_type
user_details['UserStatus'] = user_status
user_details['Email'] = user_email
url = api_url+'/users/add'
body = user_details
print(body)
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(body), headers=headers)
response_dict = json.loads(response.text)
if response_dict['Action'] == 'register':
if response_dict['Result'] == 'true':
flash('successfully registered')
else:
flash('something went wrong')
else:
flash('wrong msg type')
return home()
@app.route("/logout")
def logout():
session['logged_in'] = False
return home()
if __name__ == "__main__":
app.run(host='0.0.0.0', port=1588)
|
StarcoderdataPython
|
9730172
|
from itertools import product
def accumulate_products(max_turns, numbers=None):
numbers = [str(digit) for digit in numbers]
accumulators = []
for repetition in range(1, max_turns + 1):
accumulators.extend(product(numbers, repeat=repetition))
accumulated_combinations = [int(''.join(item)) for item in accumulators]
return accumulated_combinations
def not_primes(start, end, formers=(2, 3, 5, 7), limit=20000):
turns = len(str(limit)) + 1
possibilities = accumulate_products(turns, formers)
possibilities = filter(lambda x: start<=x<end, possibilities)
founds=[]
for possibility in possibilities:
test = False
if possibility % 2 != 0:
test = all(possibility % i for i in range(3, possibility, 2))
if not test:
founds.append(possibility)
return founds
|
StarcoderdataPython
|
3297512
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from identify import extensions
@pytest.mark.parametrize('extension', extensions.EXTENSIONS)
def test_extensions_have_binary_or_text(extension):
tags = extensions.EXTENSIONS[extension]
assert len({'text', 'binary'} & tags) == 1, tags
@pytest.mark.parametrize('name', extensions.NAMES)
def test_names_have_binary_or_text(name):
tags = extensions.NAMES[name]
assert len({'text', 'binary'} & tags) == 1, tags
@pytest.mark.parametrize('extension', extensions.EXTENSIONS_NEED_BINARY_CHECK)
def test_need_binary_check_do_not_specify_text_binary(extension):
tags = extensions.EXTENSIONS_NEED_BINARY_CHECK[extension]
assert len({'text', 'binary'} & tags) == 0, tags
def test_mutually_exclusive_check_types():
assert not (
set(extensions.EXTENSIONS) &
set(extensions.EXTENSIONS_NEED_BINARY_CHECK)
)
|
StarcoderdataPython
|
5072918
|
<filename>append.py
none = 'aa'
s = [1,2,3,none]
p = 5,6
print(s.append(p))
print(s)
print(p)
|
StarcoderdataPython
|
6701396
|
<gh_stars>0
import gym
from IPython import display
import matplotlib
import matplotlib.pyplot as plt
import gym_minigrid
from gym import wrappers
from time import time
env = gym.make('MiniGrid-GridCity-4S30Static-v0')
env.render()
env = wrappers.Monitor(env, "./gym-results", force=True)
img = env.reset()
for _ in range(1000):
action = 1
observation, reward, done, info = env.step(action)
if done: break
env.close()
|
StarcoderdataPython
|
11299829
|
<gh_stars>1-10
import sys
from eosapi import Client
from haiku_node.config.config import UnificationConfig
from haiku_node.blockchain_helpers.eos import eosio_account
from haiku_node.validation.validation import UnificationAppScValidation
def run_test(requesting_app):
conf = UnificationConfig()
eos_client = Client(
nodes=[f"http://{conf['eos_rpc_ip']}:{conf['eos_rpc_port']}"])
print("THIS app is", conf['uapp_contract'])
v = UnificationAppScValidation(
eos_client, requesting_app)
app_valid = v.valid_app()
print(f"Requesting App {requesting_app} Valid according to MOTHER: "
f"{app_valid}")
code_valid = v.valid_code()
print(requesting_app, "contract code hash valid:", code_valid)
both_valid = v.valid()
print(requesting_app, "is considered valid:", both_valid)
if both_valid:
print("App valid according to MOTHER. App code hash valid.")
else:
if app_valid is False:
print("App not valid according to MOTHER")
if code_valid is False:
print("Code hash did not match hash held by MOTHER")
if __name__ == '__main__':
# get requesting app
if len(sys.argv) > 1:
requesting_app = sys.argv[1]
run_test(requesting_app)
u = "user.1"
n = eosio_account.string_to_name(u)
account_name = eosio_account.name_to_string(n)
print("str", account_name)
else:
print("run with requesting app account as arg1, e.g.:")
print("python test_validation.py app2")
|
StarcoderdataPython
|
11363785
|
from typing import List, Optional
from libs.enums import Intervention
from libs.datasets.dataset_utils import AggregationLevel
from libs import us_state_abbrev
from libs import base_model
import pydantic
import datetime
"""
CovidActNow API
Documentation at https://github.com/covid-projections/covid-data-model/tree/master/api
"""
class ResourceUsageProjection(base_model.APIBaseModel):
peakShortfall: int = pydantic.Field(
..., description="Shortfall of resource needed at the peak utilization"
)
peakDate: Optional[datetime.date] = pydantic.Field(
..., description="Date of peak resource utilization"
)
shortageStartDate: Optional[datetime.date] = pydantic.Field(
..., description="Date when resource shortage begins"
)
class Projections(base_model.APIBaseModel):
totalHospitalBeds: ResourceUsageProjection = pydantic.Field(
..., description="Projection about total hospital bed utilization"
)
ICUBeds: Optional[ResourceUsageProjection] = pydantic.Field(
..., description="Projection about ICU hospital bed utilization"
)
Rt: float = pydantic.Field(..., description="Historical or Inferred Rt")
RtCI90: float = pydantic.Field(..., description="Rt standard deviation")
class ResourceUtilization(base_model.APIBaseModel):
capacity: Optional[int] = pydantic.Field(
...,
description=(
"*deprecated*: Capacity for resource. In the case of ICUs, "
"this refers to total capacity. For hospitalization this refers to free capacity for "
"COVID patients. This value is calculated by (1 - typicalUsageRate) * totalCapacity * 2.07"
),
)
totalCapacity: Optional[int] = pydantic.Field(..., description="Total capacity for resource.")
currentUsageCovid: Optional[int] = pydantic.Field(
..., description="Currently used capacity for resource by COVID "
)
currentUsageTotal: Optional[int] = pydantic.Field(
..., description="Currently used capacity for resource by all patients (COVID + Non-COVID)",
)
typicalUsageRate: Optional[float] = pydantic.Field(
..., description="Typical used capacity rate for resource. This excludes any COVID usage.",
)
class Actuals(base_model.APIBaseModel):
population: Optional[int] = pydantic.Field(
...,
description="Total population in geographic area [*deprecated*: refer to summary for this]",
gt=0,
)
intervention: str = pydantic.Field(..., description="Name of high-level intervention in-place")
cumulativeConfirmedCases: Optional[int] = pydantic.Field(
..., description="Number of confirmed cases so far"
)
cumulativePositiveTests: Optional[int] = pydantic.Field(
..., description="Number of positive test results to date"
)
cumulativeNegativeTests: Optional[int] = pydantic.Field(
..., description="Number of negative test results to date"
)
cumulativeDeaths: Optional[int] = pydantic.Field(..., description="Number of deaths so far")
hospitalBeds: Optional[ResourceUtilization] = pydantic.Field(...)
ICUBeds: Optional[ResourceUtilization] = pydantic.Field(...)
# contactTracers count is available for states, not counties.
contactTracers: Optional[int] = pydantic.Field(default=None, description="# of Contact Tracers")
class CovidActNowAreaSummary(base_model.APIBaseModel):
countryName: str = "US"
fips: str = pydantic.Field(
...,
description="Fips Code. For state level data, 2 characters, for county level data, 5 characters.",
)
lat: Optional[float] = pydantic.Field(
..., description="Latitude of point within the state or county"
)
long: Optional[float] = pydantic.Field(
..., description="Longitude of point within the state or county"
)
stateName: str = pydantic.Field(..., description="The state name")
countyName: Optional[str] = pydantic.Field(default=None, description="The county name")
lastUpdatedDate: datetime.date = pydantic.Field(..., description="Date of latest data")
projections: Optional[Projections] = pydantic.Field(...)
actuals: Optional[Actuals] = pydantic.Field(...)
population: int = pydantic.Field(..., description="Total Population in geographic area.", gt=0)
@property
def intervention(self) -> Optional[Intervention]:
if not self.actuals:
return None
return Intervention[self.actuals.intervention]
@property
def aggregate_level(self) -> AggregationLevel:
if len(self.fips) == 2:
return AggregationLevel.STATE
if len(self.fips) == 5:
return AggregationLevel.COUNTY
@property
def state(self) -> str:
"""State abbreviation."""
return us_state_abbrev.US_STATE_ABBREV[self.stateName]
def output_key(self, intervention: Intervention):
if self.aggregate_level is AggregationLevel.STATE:
return f"{self.state}.{intervention.name}"
if self.aggregate_level is AggregationLevel.COUNTY:
return f"{self.fips}.{intervention.name}"
class CANActualsTimeseriesRow(Actuals):
date: datetime.date = pydantic.Field(..., descrition="Date of timeseries data point")
class CANPredictionTimeseriesRow(base_model.APIBaseModel):
date: datetime.date = pydantic.Field(..., descrition="Date of timeseries data point")
hospitalBedsRequired: int = pydantic.Field(
...,
description="Number of hospital beds projected to be in-use or that were actually in use (if in the past)",
)
hospitalBedCapacity: int = pydantic.Field(
...,
description="Number of hospital beds projected to be in-use or actually in use (if in the past)",
)
ICUBedsInUse: int = pydantic.Field(
...,
description="Number of ICU beds projected to be in-use or that were actually in use (if in the past)",
)
ICUBedCapacity: int = pydantic.Field(
...,
description="Number of ICU beds projected to be in-use or actually in use (if in the past)",
)
ventilatorsInUse: int = pydantic.Field(
..., description="Number of ventilators projected to be in-use.",
)
ventilatorCapacity: int = pydantic.Field(..., description="Total ventilator capacity.")
RtIndicator: float = pydantic.Field(..., description="Historical or Inferred Rt")
RtIndicatorCI90: float = pydantic.Field(..., description="Rt standard deviation")
cumulativeDeaths: int = pydantic.Field(..., description="Number of cumulative deaths")
cumulativeInfected: Optional[int] = pydantic.Field(
..., description="Number of cumulative infections"
)
currentInfected: Optional[int] = pydantic.Field(..., description="Number of current infections")
currentSusceptible: Optional[int] = pydantic.Field(
..., description="Number of people currently susceptible "
)
currentExposed: Optional[int] = pydantic.Field(
..., description="Number of people currently exposed"
)
class PredictionTimeseriesRowWithHeader(CANPredictionTimeseriesRow):
countryName: str = "US"
stateName: str = pydantic.Field(..., description="The state name")
countyName: Optional[str] = pydantic.Field(..., description="The county name")
intervention: str = pydantic.Field(..., description="Name of high-level intervention in-place")
fips: str = pydantic.Field(..., description="Fips for State + County. Five character code")
lat: Optional[float] = pydantic.Field(
..., description="Latitude of point within the state or county"
)
long: Optional[float] = pydantic.Field(
..., description="Longitude of point within the state or county"
)
lastUpdatedDate: datetime.date = pydantic.Field(..., description="Date of latest data")
@property
def aggregate_level(self) -> AggregationLevel:
if len(self.fips) == 2:
return AggregationLevel.STATE
if len(self.fips) == 5:
return AggregationLevel.COUNTY
class CovidActNowAreaTimeseries(CovidActNowAreaSummary):
timeseries: Optional[List[CANPredictionTimeseriesRow]] = pydantic.Field(...)
actualsTimeseries: List[CANActualsTimeseriesRow] = pydantic.Field(...)
@property
def area_summary(self) -> CovidActNowAreaSummary:
data = {}
# Iterating through self does not force any conversion
# https://pydantic-docs.helpmanual.io/usage/exporting_models/#dictmodel-and-iteration
for field, value in self:
if field not in CovidActNowAreaSummary.__fields__:
continue
data[field] = value
return CovidActNowAreaSummary(**data)
# pylint: disable=no-self-argument
@pydantic.validator("timeseries")
def check_timeseries_have_cumulative_test_data(cls, rows, values):
# TODO: Fix validation
return rows
# Nebraska is missing testing data.
state_full_name = values["stateName"]
if state_full_name == "Nebraska":
return rows
total_negative_tests = sum(row.cumulativeNegativeTests or 0 for row in rows)
total_positive_tests = sum(row.cumulativePositiveTests or 0 for row in rows)
if not total_positive_tests or not total_negative_tests:
raise ValueError(f"Missing cumulative test data for {state_full_name}.")
return rows
@pydantic.validator("timeseries")
def check_timeseries_one_row_per_date(cls, rows, values):
dates_in_row = len(set(row.date for row in rows))
if len(rows) != dates_in_row:
raise ValueError(
"Number of rows does not match number of dates: " f"{len(rows)} vs. {dates_in_row}"
)
return rows
def output_key(self, intervention: Intervention) -> str:
return super().output_key(intervention) + ".timeseries"
class CovidActNowBulkSummary(base_model.APIBaseModel):
__root__: List[CovidActNowAreaSummary] = pydantic.Field(...)
def output_key(self, intervention):
aggregate_level = self.__root__[0].aggregate_level
if aggregate_level is AggregationLevel.COUNTY:
return f"counties.{intervention.name}"
if aggregate_level is AggregationLevel.STATE:
return f"states.{intervention.name}"
class CovidActNowBulkTimeseries(base_model.APIBaseModel):
__root__: List[CovidActNowAreaTimeseries] = pydantic.Field(...)
def output_key(self, intervention):
aggregate_level = self.__root__[0].aggregate_level
if aggregate_level is AggregationLevel.COUNTY:
return f"counties.{intervention.name}.timeseries"
if aggregate_level is AggregationLevel.STATE:
return f"states.{intervention.name}.timeseries"
class CovidActNowBulkFlattenedTimeseries(base_model.APIBaseModel):
__root__: List[PredictionTimeseriesRowWithHeader] = pydantic.Field(...)
def output_key(self, intervention):
aggregate_level = self.__root__[0].aggregate_level
if aggregate_level is AggregationLevel.COUNTY:
return f"counties.{intervention.name}.timeseries"
if aggregate_level is AggregationLevel.STATE:
return f"states.{intervention.name}.timeseries"
|
StarcoderdataPython
|
5181646
|
# ---------------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE file for license information.
# ---------------------------------------------------------------------------
from __future__ import annotations
import asyncio
import os
import token
import tokenize
from collections.abc import Iterable
from pathlib import Path
from typing import TextIO
from .const import FileStatus
def check_files_exist(file_list: Iterable[str]) -> list[str]:
"""Check if all files exist. Return False if not."""
file_errors: list[str] = []
cwd = Path(os.getcwd())
for file_ in file_list:
if cwd.joinpath(file_).is_file() is False:
file_errors.append(file_)
return sorted(file_errors)
async def async_restore_files(file_list: Iterable[str]) -> None:
if not file_list:
return
process = await asyncio.create_subprocess_shell(
f"git restore -- {' '.join(file_list)}",
)
await process.communicate()
async def async_check_uncommitted_changes(file_list: Iterable[str]) -> bool:
"""Check for uncommitted changes.
Returns:
False: if changes still need to be committed
"""
process = await asyncio.create_subprocess_shell(
"git diff-index --name-only HEAD --",
stdout=asyncio.subprocess.PIPE,
)
stdout, _ = await process.communicate()
files_uncommitted: set[str] = {file_ for item in stdout.decode().split('\n')
if (file_ := item.strip())}
return not any(True for file_ in file_list if file_ in files_uncommitted)
def check_comment_between_imports(fp: TextIO) -> FileStatus:
"""Return True if comment is found between imports.
Sign that the file can't be updated automatically.
"""
flag_in_import_block: bool = False
flag_multiple_imports: bool = False
flag_typing_import: bool = False
token_name_count: int = 0
line_first_import: int | None = None
line_last_import: int = 0
line_comments: list[tuple[int, int]] = []
return_value: FileStatus = FileStatus.CLEAR
tokens = tokenize.generate_tokens(fp.readline)
while True:
try:
t = next(tokens)
if flag_in_import_block is True:
if t.type == token.NAME and token_name_count == 0:
token_name_count += 1
if t.string == 'typing':
flag_typing_import = True
elif t.type == token.NEWLINE:
flag_in_import_block = False
flag_multiple_imports = False
flag_typing_import = False
token_name_count = 0
elif t.type == token.OP and t.string != '.':
flag_multiple_imports = True
elif t.type == token.COMMENT:
if flag_typing_import is True:
return_value = return_value | FileStatus.COMMENT | FileStatus.COMMENT_TYPING
elif flag_multiple_imports is True:
# Comment in same line as import statement
return_value = return_value | FileStatus.COMMENT
continue
if t.type == token.NAME:
if t.string in ('import', 'from'):
flag_in_import_block = True
if line_first_import is None:
line_first_import = t.start[0]
line_last_import = t.start[0]
else:
# Any other code block,
# not in main import block anymore
break
elif t.type in (token.COMMENT, token.STRING):
line_comments.append((t.type, t.start[0]))
except StopIteration:
break
if return_value != FileStatus.CLEAR:
# If inline comment was detected, stop here
return return_value
for _, line_number in line_comments:
if line_first_import is None:
# No import block detected
return FileStatus.CLEAR
if (line_first_import < line_number < line_last_import):
# Report all comments in the main import block
return FileStatus.COMMENT
return FileStatus.CLEAR
def extract_imports(fp: TextIO) -> set[str]:
"""Create set of all imports in main import block."""
flag_in_import_block: bool = False
flag_relative_import: bool | None = None
flag_imports: bool = False
flag_last_token_name: bool = False
current_package: str = ''
imports: set[str] = set()
tokens = tokenize.generate_tokens(fp.readline)
while True:
try:
t = next(tokens)
if flag_in_import_block is True:
if t.type == token.NEWLINE:
if flag_relative_import is False:
imports.add(current_package)
flag_in_import_block = False
flag_relative_import = None
flag_imports = False
flag_last_token_name = False
current_package = ''
elif t.type == token.NAME and t.string == 'import':
flag_imports = True
elif t.type == token.NAME and flag_last_token_name is False:
if (
flag_relative_import is False
or flag_relative_import is True
and flag_imports is False
):
current_package += t.string
elif flag_relative_import is True and flag_imports is True:
imports.add(f"{current_package}.{t.string}")
elif t.type == token.OP and t.string == '.':
current_package += '.'
elif t.type == token.OP and t.string == ',' and flag_relative_import is False:
imports.add(current_package)
current_package = ''
flag_last_token_name = (t.type == token.NAME and t.string != 'import')
continue
if t.type == token.NAME:
if t.string == 'import':
flag_in_import_block = True
flag_relative_import = False
elif t.string == 'from':
flag_in_import_block = True
flag_relative_import = True
else:
break
except StopIteration:
break
return imports
|
StarcoderdataPython
|
35214
|
<reponame>KanataIZUMIKAWA/TXTer
# Generated by Django 3.1.4 on 2021-01-05 03:33
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Posts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.CharField(default='noname', max_length=64)),
('note', models.TextField(default='')),
('read', models.BooleanField(default=False)),
],
),
]
|
StarcoderdataPython
|
1601245
|
import numpy as np
from .Observable import Subject
class ObservableArray(np.ndarray, Subject):
def __init__(self, *args, **kwargs):
Subject.__init__(self)
np.ndarray.__init__(self)
def _notify(self, to_return):
"""
if hasattr(to_return, "_observers") and hasattr(self, "_observers"):
Subject._notify()
else:
print("Error! No observers found")
"""
if to_return is not None:
if (hasattr(self, "_observers")):
to_return._observers = self._observers
Subject._notify(self)
else:
Subject._notify(self)
def __getitem__(self, index):
to_return = super(ObservableArray, self).__getitem__(index)
if hasattr(self, "_observers") and type(to_return) is not ObservableArray:
if to_return.shape != ():
tmp = ObservableArray(to_return.shape)
else:
tmp = ObservableArray((1,))
tmp[:] = to_return
tmp._observers = self._observers
return tmp
elif hasattr(self, "_observers") and not hasattr(to_return, "_observers"):
to_return._observers = self._observers
return to_return
else:
return to_return
def __repr__(self):
to_return = repr(np.asarray(self))
return to_return
def __iadd__(self, *args, **kwargs):
to_return = super(self.__class__, self).__iadd__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __isub__(self, *args, **kwargs):
to_return = super(self.__class__, self).__isub__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __imul__(self, *args, **kwargs):
to_return = super(self.__class__, self).__imul__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __idiv__(self, *args, **kwargs):
to_return = super(self.__class__, self).__idiv__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __itruediv__(self, *args, **kwargs):
to_return = super(self.__class__, self).__itruediv__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __rmatmul__(self, *args, **kwargs):
to_return = super(self.__class__, self).__rmatmul__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __matmul__(self, *args, **kwargs):
to_return = super(self.__class__, self).__matmul__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __imatmul__(self, *args, **kwargs):
to_return = super(self.__class__, self).__imatmul__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __ipow__(self, *args, **kwargs):
to_return = super(self.__class__, self).__ipow__(*args, **kwargs)
self._notify(to_return)
return to_return
def __imod__(self, *args, **kwargs):
to_return = super(self.__class__, self).__imod__(*args, **kwargs)
self._notify(to_return)
return to_return
def __ifloordiv__(self, *args, **kwargs):
to_return = super(self.__class__, self).__ifloordiv__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __ilshift__(self, *args, **kwargs):
to_return = super(self.__class__, self).__ilshift__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __irshift__(self, *args, **kwargs):
to_return = super(self.__class__, self).__irshift__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __iand__(self, *args, **kwargs):
to_return = super(self.__class__, self).__iand__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __ixor__(self, *args, **kwargs):
to_return = super(self.__class__, self).__ixor__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __ior__(self, *args, **kwargs):
to_return = super(self.__class__, self).__ior__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __setitem__(self, *args, **kwargs):
to_return = super(self.__class__, self).__setitem__(*args,
**kwargs)
self._notify(to_return)
return to_return
def __setslice__(self, *args, **kwargs):
to_return = super(self.__class__, self).__setslice__(*args,
**kwargs)
self._notify(to_return)
return to_return
|
StarcoderdataPython
|
1984599
|
# if condition:
# pass
# else:
# pass
# if condition:
# pass
# elif expression:
# pass
# else:
# pass
# if condition:
# pass
# pass if condition else pass
xa = "XA"
# if xa == "XA":
# print("Yo!")
# else:
# print("No!")
# print("Yo!") if xa == "XA" else print("No!")
# print("Yo!") if xa == "XA" else print("No!")
# if condition:
# pass
# else:
# if expression:
# pass
# else:
# pass
# pass if condition else pass if expression else pass
xa = "XA"
# if xa == "XA":
# print("Yo!")
# elif xa == "SK":
# print("So!")
# elif xa == "NM":
# print("No!")
# else:
# print("Po!")
# print("Yo!") if xa == "XA" else print("So!") if xa == "SK" else print("No!") if xa == "NM" else print("Po!")
# print("Yo!") if xa == "XA" else print("So!") if xa == "SK" else print("No!") if xa == "NM" else print("Po!")
# if condition:
# pass
# if xa == "XXXXXX":
# print("Yo!")
# if xa == "XA":print("Yo!")
# for idx in range(10):print(idx)
# while True:print()
# xa = 0
# while xa < 10:
# print(xa)
# xa += 1
# print("XA")
xa = 0
# while xa < 10:print(xa);xa+=1;print("XA")
|
StarcoderdataPython
|
8181262
|
<reponame>aticie/SATRN<gh_stars>0
"""
Copyright (c) 2020-present NAVER Corp.
MIT license
Usage:
python train.py --config_file=<config_file_path>
"""
import logging
import os
import random
import time
from logging import handlers, StreamHandler
import fire
import numpy as np
import tensorflow as tf
from psutil import virtual_memory
from constant import DELIMITER
from dataset import DatasetLoader
from flags import Flags
from utils import \
load_charset, get_optimizer, \
get_network, get_session_config, get_string, \
adjust_string, count_available_gpus, \
single_tower, validate, get_scaffold, get_init_trained
tf.logging.set_verbosity(tf.logging.ERROR)
def _average_gradients(tower_grads):
""" Average gradients from multiple towers.
"""
average_grads = []
for grads_and_vars in zip(*tower_grads):
grads = tf.stack([g for g, _ in grads_and_vars])
grad = tf.reduce_mean(grads, 0)
v = grads_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def _clip_gradients(grads, grad_clip_norm):
""" Clip gradients with global norm.
"""
g, v = zip(*grads)
g, global_norm = tf.clip_by_global_norm(g, grad_clip_norm)
clipped_grads = list(zip(g, v))
return clipped_grads, global_norm
def create_model_dir(model_dir):
""" Create model directory
"""
os.makedirs(os.path.join(model_dir, 'best_models'), exist_ok=True)
os.makedirs(os.path.join(model_dir, 'valid'), exist_ok=True)
return model_dir
def get_logger(model_dir, name):
""" Get stdout, file logger
"""
logger = logging.getLogger(name)
logger.handlers = []
# Stdout
streamHandler = StreamHandler()
logger.addHandler(streamHandler)
# File
log_dir = os.path.join(model_dir, 'log')
os.makedirs(log_dir, exist_ok=True)
log_path = os.path.join(log_dir, '{}.log'.format(name))
fileHandler = handlers.RotatingFileHandler(log_path,
maxBytes=1024 * 1024,
backupCount=10)
logger.addHandler(fileHandler)
logger.setLevel(logging.INFO)
return logger
def set_seed(seed):
""" Set random seed
"""
if seed is not None:
random.seed(seed)
np.random.seed(seed)
tf.random.set_random_seed(seed)
print('[+] Random seed is set to {}'.format(seed))
return
def log_formatted(logger, *logs):
""" Print multiple lines log.
"""
logger.info('=' * 70)
logger.info(' ' * 70)
for log in logs:
logger.info(log)
logger.info(' ' * 70)
logger.info('=' * 70)
return
def main(config_file):
""" Train text recognition network
"""
# Parse configs
FLAGS = Flags(config_file).get()
# Set directory, seed, logger
model_dir = create_model_dir(FLAGS.model_dir)
logger = get_logger(model_dir, 'train')
best_model_dir = os.path.join(model_dir, 'best_models')
set_seed(FLAGS.seed)
# Print configs
flag_strs = [
'{}:\t{}'.format(name, value)
for name, value in FLAGS._asdict().items()
]
log_formatted(logger, '[+] Model configurations', *flag_strs)
# Print system environments
num_gpus = count_available_gpus()
num_cpus = os.cpu_count()
mem_size = virtual_memory().available // (1024 ** 3)
log_formatted(logger, '[+] System environments',
'The number of gpus : {}'.format(num_gpus),
'The number of cpus : {}'.format(num_cpus),
'Memory Size : {}G'.format(mem_size))
# Get optimizer and network
global_step = tf.train.get_or_create_global_step()
optimizer, learning_rate = get_optimizer(FLAGS.train.optimizer, global_step)
out_charset = load_charset(FLAGS.charset)
net = get_network(FLAGS, out_charset)
is_ctc = (net.loss_fn == 'ctc_loss')
# Multi tower for multi-gpu training
tower_grads = []
tower_extra_update_ops = []
tower_preds = []
tower_gts = []
tower_losses = []
batch_size = FLAGS.train.batch_size
tower_batch_size = batch_size // num_gpus
val_tower_outputs = []
eval_tower_outputs = []
for gpu_indx in range(num_gpus):
# Train tower
print('[+] Build Train tower GPU:%d' % gpu_indx)
input_device = '/gpu:%d' % gpu_indx
tower_batch_size = tower_batch_size \
if gpu_indx < num_gpus - 1 \
else batch_size - tower_batch_size * (num_gpus - 1)
train_loader = DatasetLoader(
dataset_paths=FLAGS.train.dataset_paths,
dataset_portions=FLAGS.train.dataset_portions,
batch_size=tower_batch_size,
label_maxlen=FLAGS.label_maxlen,
out_charset=out_charset,
preprocess_image=net.preprocess_image,
is_train=True,
is_ctc=is_ctc,
shuffle_and_repeat=True,
concat_batch=True,
input_device=input_device,
num_cpus=num_cpus,
num_gpus=num_gpus,
worker_index=gpu_indx,
use_rgb=FLAGS.use_rgb,
seed=FLAGS.seed,
name='train')
tower_output = single_tower(net,
gpu_indx,
train_loader,
out_charset,
optimizer,
name='train',
is_train=True)
tower_grads.append([x for x in tower_output.grads if x[0] is not None])
tower_extra_update_ops.append(tower_output.extra_update_ops)
tower_preds.append(tower_output.prediction)
tower_gts.append(tower_output.text)
tower_losses.append(tower_output.loss)
# Print network structure
if gpu_indx == 0:
param_stats = tf.profiler.profile(tf.get_default_graph())
logger.info('total_params: %d\n' % param_stats.total_parameters)
# Valid tower
print('[+] Build Valid tower GPU:%d' % gpu_indx)
valid_loader = DatasetLoader(dataset_paths=FLAGS.valid.dataset_paths,
dataset_portions=None,
batch_size=FLAGS.valid.batch_size //
num_gpus,
label_maxlen=FLAGS.label_maxlen,
out_charset=out_charset,
preprocess_image=net.preprocess_image,
is_train=False,
is_ctc=is_ctc,
shuffle_and_repeat=False,
concat_batch=False,
input_device=input_device,
num_cpus=num_cpus,
num_gpus=num_gpus,
worker_index=gpu_indx,
use_rgb=FLAGS.use_rgb,
seed=FLAGS.seed,
name='valid')
val_tower_output = single_tower(net,
gpu_indx,
valid_loader,
out_charset,
optimizer=None,
name='valid',
is_train=False)
val_tower_outputs.append(
(val_tower_output.loss, val_tower_output.prediction,
val_tower_output.text, val_tower_output.filename,
val_tower_output.dataset))
# Aggregate gradients
losses = tf.reduce_mean(tower_losses)
grads = _average_gradients(tower_grads)
with tf.control_dependencies(tower_extra_update_ops[-1]):
if FLAGS.train.optimizer.grad_clip_norm is not None:
grads, global_norm = _clip_gradients(
grads, FLAGS.train.optimizer.grad_clip_norm)
tf.summary.scalar('global_norm', global_norm)
train_op = optimizer.apply_gradients(grads, global_step=global_step)
# Define config, scaffold
saver = tf.train.Saver()
sess_config = get_session_config()
scaffold = get_scaffold(saver, FLAGS.train.tune_from, 'train')
restore_model = get_init_trained()
# Define validation saver, summary writer
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES)
val_summary_op = tf.summary.merge(
[s for s in summaries if 'valid' in s.name])
val_summary_writer = {
dataset_name:
tf.summary.FileWriter(os.path.join(model_dir, 'valid', dataset_name))
for dataset_name in valid_loader.dataset_names
}
val_summary_writer['total_valid'] = tf.summary.FileWriter(
os.path.join(model_dir, 'valid', 'total_valid'))
val_saver = tf.train.Saver(max_to_keep=len(valid_loader.dataset_names) + 1)
best_val_err_rates = {}
best_steps = {}
# Training
print('[+] Make Session...')
with tf.train.MonitoredTrainingSession(
checkpoint_dir=model_dir,
scaffold=scaffold,
config=sess_config,
save_checkpoint_steps=FLAGS.train.save_steps,
save_checkpoint_secs=None,
save_summaries_steps=FLAGS.train.summary_steps,
save_summaries_secs=None,
) as sess:
log_formatted(logger, 'Training started!')
_step = 0
train_t = 0
start_t = time.time()
while _step < FLAGS.train.max_num_steps \
and not sess.should_stop():
# Train step
step_t = time.time()
[step_loss, _, _step, preds, gts, lr] = sess.run([
losses, train_op, global_step, tower_preds[0], tower_gts[0],
learning_rate
])
train_t += time.time() - step_t
# Summary
if _step % FLAGS.valid.steps == 0:
# Train summary
train_err = 0.
for i, (p, g) in enumerate(zip(preds, gts)):
s = get_string(p, out_charset, is_ctc=is_ctc)
g = g.decode('utf8').replace(DELIMITER, '')
s = adjust_string(s, FLAGS.train.lowercase,
FLAGS.train.alphanumeric)
g = adjust_string(g, FLAGS.train.lowercase,
FLAGS.train.alphanumeric)
e = int(s != g)
train_err += e
if FLAGS.train.verbose and i < 5:
print('TRAIN :\t{}\t{}\t{}'.format(s, g, not bool(e)))
train_err_rate = \
train_err / len(gts)
# Valid summary
val_cnts, val_errs, val_err_rates, _ = \
validate(sess,
_step,
val_tower_outputs,
out_charset,
is_ctc,
val_summary_op,
val_summary_writer,
val_saver,
best_val_err_rates,
best_steps,
best_model_dir,
FLAGS.valid.lowercase,
FLAGS.valid.alphanumeric)
# Logging
log_strings = ['', '-' * 28 + ' VALID_DETAIL ' + '-' * 28, '']
for dataset in sorted(val_err_rates.keys()):
if dataset == 'total_valid':
continue
cnt = val_cnts[dataset]
err = val_errs[dataset]
err_rate = val_err_rates[dataset]
best_step = best_steps[dataset]
s = '%s : %.2f%%(%d/%d)\tBEST_STEP : %d' % \
(dataset, (1. - err_rate) * 100, cnt - err, cnt, best_step)
log_strings.append(s)
elapsed_t = float(time.time() - start_t) / 60
remain_t = (elapsed_t / (_step + 1)) * \
(FLAGS.train.max_num_steps - _step - 1)
log_formatted(
logger, 'STEP : %d\tTRAIN_LOSS : %f' % (_step, step_loss),
'ELAPSED : %.2f min\tREMAIN : %.2f min\t'
'STEP_TIME: %.1f sec' %
(elapsed_t, remain_t, float(train_t) / (_step + 1)),
'TRAIN_SEQ_ERR : %f\tVALID_SEQ_ERR : %f' %
(train_err_rate, val_err_rates['total_valid']),
'BEST_STEP : %d\tBEST_VALID_SEQ_ERR : %f' %
(best_steps['total_valid'],
best_val_err_rates['total_valid']), *log_strings)
log_formatted(logger, 'Training is completed!')
if __name__ == '__main__':
fire.Fire(main)
|
StarcoderdataPython
|
3384980
|
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
# A utility function to insert a new node with given data in BST and find its successor
def insert(node, data):
global succ
# If the tree is empty, return a new node
temp = node
if (node == None):
return Node(data)
# If key is smaller than root's key, go to left subtree and set successor as current node
if (data < node.data):
succ = node
temp.left = insert(node.left, data)
# Go to right subtree
elif (data > node.data):
temp.right = insert(node.right, data)
return temp
# Function to replace every element with the least greater element on its right
def replace(arr, n):
global succ
root = None
# Start from right to left // or in reverse order
for i in range(n - 1, -1, -1):
succ = None
# Insert current element into BST and find its inorder successor
root = insert(root, arr[i])
# Replace element by its inorder successor in BST
if (succ):
arr[i] = succ.data
else:
arr[i] = -1
return arr
# Driver code
if __name__ == '__main__':
arr = [ 8, 58, 71, 18, 31, 32, 63, 92, 43, 3, 91, 93, 25, 80, 28 ]
n = len(arr)
succ = None
print("Before function call-->",*arr)
arr = replace(arr, n)
print()
print("After function call-->",*arr)
|
StarcoderdataPython
|
4957763
|
for _ in range(0,10):
print("Hello World!!!!!!!!!.")
|
StarcoderdataPython
|
314611
|
import importlib
import os
import matplotlib.pyplot as plt
import torch
import numpy as np
import dataloaders.mnist
from ifaces import DownloadableDataset
from modules.iwae import IWAE
from modules.vae import VAE
from utils_clone.pytorch import reshape_and_tile_images
def load_checkpoint(checkpoint_fname):
state_dict = torch.load(checkpoint_fname, map_location='cpu')
checkpoint_fname = os.path.basename(checkpoint_fname)
splitted_checkpint_fname = checkpoint_fname.split("_")
dataset_name = splitted_checkpint_fname[0]
model_type = splitted_checkpint_fname[1]
two_dim_latent_space = model_type[-2:] == '2d'
if two_dim_latent_space: model_type = model_type[:-2]
k = int(splitted_checkpint_fname[2][1:])
num_layers = int(splitted_checkpint_fname[3][1:])
train_bs = int(splitted_checkpint_fname[4][2:])
assert splitted_checkpint_fname[5].split(".")[0] == "final", "Checkpoint is not a final checkpoint"
try:
dataloader_class_name = ''.join(x.title() for x in dataset_name.split('_')) + 'Dataloader'
_module = importlib.import_module('dataloaders.' + dataset_name.split('_')[-1])
_dataloader_class = getattr(_module, dataloader_class_name)
_train_dataloader = _dataloader_class(train_not_test=True, batch_size=train_bs, pin_memory=True,
shuffle=True)
# Smaller batch size for final performance testing as k could be large
final_validation_batch_size = 1
_test_dataloader = _dataloader_class(train_not_test=False, batch_size=final_validation_batch_size,
pin_memory=True, shuffle=True)
except AttributeError:
raise Exception(f'Unknown dataset name {dataset_name}')
if num_layers == 1:
_latent_units = [2 if two_dim_latent_space else 50]
_hidden_units_q = [[200, 200]]
_hidden_units_p = [[200, 200]]
elif num_layers == 2:
_latent_units = [100, 2 if two_dim_latent_space else 50]
_hidden_units_q = [[200, 200], [100, 100]]
_hidden_units_p = [[100, 100], [200, 200]]
else:
raise Exception("Invalid number of layers")
if model_type == 'vae':
_model = VAE(k=k, latent_units=[28 * 28] + _latent_units, hidden_units_q=_hidden_units_q,
hidden_units_p=_hidden_units_p, output_bias=_train_dataloader.dataset.get_train_bias())
else: # model_type == 'iwae':
_model = IWAE(k=k, latent_units=[28 * 28] + _latent_units, hidden_units_q=_hidden_units_q,
hidden_units_p=_hidden_units_p, output_bias=_train_dataloader.dataset.get_train_bias())
_model.load_state_dict(state_dict['model'])
return _model
def load_and_plot_samples(checkpoint_path, num_samples):
DownloadableDataset.set_data_directory('../data')
model: VAE = load_checkpoint(checkpoint_path)
res = model.get_samples(num_samples)
plt.imshow(res)
plt.show()
def calc_log_p(model, x_sample, minx, maxx, miny, maxy, num_points):
x = np.linspace(minx, maxx, num_points)
y = np.linspace(miny, maxy, num_points)
xg, yg = np.meshgrid(x, y)
xg1d = xg.reshape((-1, 1))
yg1d = yg.reshape((-1, 1))
log_p = np.empty((num_points, num_points))
for i in range(num_points):
h_samples_np = np.vstack((xg[i], yg[i])).T
with torch.no_grad():
h_samples = torch.from_numpy(h_samples_np).type(torch.get_default_dtype())
log_p_partial = model.calc_log_p(x_sample, h_samples).detach().numpy()
log_p[i, :] = log_p_partial
return x, y, log_p
def true_posterior_find_box(model, x_sample, minx, maxx, miny, maxy, num_points, nll_range):
x, y, log_p = calc_log_p(model, x_sample, minx, maxx, miny, maxy, num_points)
max_nll = np.max(log_p)
max_index = np.unravel_index(np.argmax(log_p), log_p.shape)
# Disable code for finding the size of the interesting area in the latent space
# min_nll = max_nll - nll_range
# indices = np.where(log_p > min_nll)
# minxi = np.min(indices[1])
# maxxi = np.max(indices[1])
# minyi = np.min(indices[0])
# maxyi = np.max(indices[0])
# minx = x[minxi]
# maxx = x[maxxi]
# miny = y[minyi]
# maxy = y[maxyi]
# width = maxx - minx
# height = maxy - miny
# centerx = minx + 0.5 * width
# centery = miny + 0.5 * height
# size = 1.5 * max(width, height)
# size = min(2.0, size)
# Instead, always plot a square box of the same size in latent space
size = 1.0
centerx = x[max_index[1]]
centery = y[max_index[0]]
minx = centerx - 0.5 * size
maxx = centerx + 0.5 * size
miny = centery - 0.5 * size
maxy = centery + 0.5 * size
print(centerx, centery, size, minx, maxx, miny, maxy)
return minx, maxx, miny, maxy
def generate_true_posterior_image(model, x_sample):
num_points = 300
search_minx = -5.0
search_maxx = 5.0
search_miny = -5.0
search_maxy = 5.0
nll_range = 10
minx, maxx, miny, maxy = true_posterior_find_box(model, x_sample, search_minx, search_maxx, search_miny,
search_maxy, num_points, nll_range)
x, y, log_p = calc_log_p(model, x_sample, minx, maxx, miny, maxy, num_points)
max_nll = np.max(log_p)
adjusted_log_p = log_p - max_nll
# The structure of the posterior is easier to see with a smaller base
# image = np.exp(adjusted_log_p)
image = np.power(1.02, adjusted_log_p)
return image, minx, maxx, miny, maxy
def generate_and_plot_posterior(model, x_sample):
image, minx, maxx, miny, maxy = generate_true_posterior_image(model, x_sample)
plt.imshow(-image, extent=[minx, maxx, miny, maxy], origin='lower', cmap="gray")
def generate_and_plot_reconstructed_samples(model, x_sample, return_mean: bool = False):
img = np.zeros((0, 28))
for j in range(5):
pred = model(x_sample, return_mean=return_mean)
pred_img = pred.detach().squeeze().numpy().reshape((28, 28))
img = np.vstack((img, pred_img))
plt.xticks([])
plt.yticks([])
plt.imshow(-img, cmap="gray")
def plot_true_posteriors_and_reconstructed_samples(checkpoint1_path, checkpoint2_path, figure_path,
return_mean: bool = False):
test_dataloader = dataloaders.mnist.MnistDataloader(
train_not_test=False,
batch_size=400,
pin_memory=True,
shuffle=False)
model1: VAE = load_checkpoint(checkpoint1_path)
model2: IWAE = load_checkpoint(checkpoint2_path)
x_samples_indices = [3, 2, 1, 32, 4, 8]
x_samples_ = [test_dataloader.dataset[i] for i in x_samples_indices]
x_samples = [torch.bernoulli(x_sample_) for x_sample_ in x_samples_]
num_samples = len(x_samples)
plt.subplots(num_samples, 5, figsize=(10, 15))
plt.tight_layout()
for i, x_sample in enumerate(x_samples):
# Plot x sample
plt.subplot(num_samples, 5, i * 5 + 1)
if return_mean:
img = x_samples_[i]
else:
img = x_sample
img = img.detach().squeeze().numpy().reshape((28, 28))
plt.xticks([])
plt.yticks([])
plt.imshow(-img, cmap="gray")
# Model 1
plt.subplot(num_samples, 5, i * 5 + 2)
generate_and_plot_posterior(model1, x_sample)
plt.subplot(num_samples, 5, i * 5 + 3)
generate_and_plot_reconstructed_samples(model1, x_sample, return_mean=return_mean)
# Model 2
plt.subplot(num_samples, 5, i * 5 + 4)
generate_and_plot_posterior(model2, x_sample)
plt.subplot(num_samples, 5, i * 5 + 5)
generate_and_plot_reconstructed_samples(model2, x_sample, return_mean=return_mean)
plt.savefig(figure_path)
def plot_lerp_samples(checkpoint1_path, checkpoint2_path, figure_path, n_img: int = 20):
test_dataloader = dataloaders.mnist.MnistDataloader(
train_not_test=False,
batch_size=400,
pin_memory=True,
shuffle=False)
model1: VAE = load_checkpoint(checkpoint1_path)
model2: IWAE = load_checkpoint(checkpoint2_path)
# Find boundaries
# for i, batch in enumerate(test_dataloader):
# h = model1.encoder(batch)
# print(h.min(dim=0).values, h.max(dim=0).values)
# if i > 5:
# break
with torch.no_grad():
# For VAE
h1_bounds = -3.5, 3.5
h2_bounds = -3.5, 3.0
h1, h2 = np.meshgrid(np.linspace(h1_bounds[0], h1_bounds[1], n_img),
np.linspace(h2_bounds[0], h2_bounds[1], n_img))
plt.subplots(n_img, n_img, figsize=(10, 10))
# plt.tight_layout()
for i in range(n_img):
for j in range(n_img):
plt.axis('off')
plt.xticks([])
plt.yticks([])
plt.subplot(n_img, n_img, i * n_img + j + 1)
h = torch.tensor([[h1[i, j], h2[i, j]]]).type(torch.get_default_dtype())
x = model1.decoder_layers[-1].calc_mean(h).squeeze().cpu().numpy()
plt.imshow(-x.reshape(28, 28), cmap="gray")
plt.savefig(figure_path.replace(os.path.basename(figure_path), f'vae_{os.path.basename(figure_path)}'))
plt.show()
# For IWAE
h1_bounds = -8.0, 5.0
h2_bounds = -3.5, 6.5
h1, h2 = np.meshgrid(np.linspace(h1_bounds[0], h1_bounds[1], n_img),
np.linspace(h2_bounds[0], h2_bounds[1], n_img))
plt.subplots(n_img, n_img, figsize=(10, 10))
# plt.tight_layout()
for i in range(n_img):
for j in range(n_img):
plt.axis('off')
plt.xticks([])
plt.yticks([])
plt.subplot(n_img, n_img, i * n_img + j + 1)
h = torch.tensor([[h1[i, j], h2[i, j]]]).type(torch.get_default_dtype())
x = model2.decoder_layers[-1].calc_mean(h).squeeze().cpu().numpy()
plt.imshow(-x.reshape(28, 28), cmap="gray")
plt.savefig(figure_path.replace(os.path.basename(figure_path), f'iwae_{os.path.basename(figure_path)}'))
plt.show()
def plot_100_samples(*args, figure_path):
test_dataloader = dataloaders.mnist.MnistDataloader(
train_not_test=False,
batch_size=400,
pin_memory=True,
shuffle=False)
# Generate 100 random samples from the dataset
x = None
for i in np.random.choice(list(range(len(test_dataloader.dataset))), 100, replace=False):
if x is None:
x = test_dataloader.dataset[i].numpy()
else:
x = np.vstack((x, test_dataloader.dataset[i].numpy()))
n_ckpts = len(args)
assert n_ckpts > 0
plt.subplots(1, n_ckpts + 1, figsize=(n_ckpts * 5, 5))
plt.subplot(1, n_ckpts + 1, 1)
plt.axis('off')
plt.xticks([])
plt.yticks([])
plt.imshow(reshape_and_tile_images(x), cmap='Greys')
with torch.no_grad():
for i, checkpoint_path in enumerate(args):
model = load_checkpoint(checkpoint_path)
plt.subplot(1, n_ckpts + 1, i + 2)
plt.axis('off')
plt.xticks([])
plt.yticks([])
x_hat = model(torch.bernoulli(torch.from_numpy(x).type(torch.get_default_dtype())),
return_mean=True).cpu().numpy()
plt.imshow(reshape_and_tile_images(x_hat), cmap='Greys')
plt.tight_layout()
plt.savefig(figure_path)
plt.show()
if __name__ == '__main__':
DownloadableDataset.set_data_directory('../../data')
# load_and_plot_samples("../../checkpoints/mnist_iwae_k01_L2_bs100_final.pkl", 100)
# plot_true_posteriors_and_reconstructed_samples(
# "../../checkpoints/mnist_vae2d_k01_L1_bs400_final.pkl",
# "../../checkpoints/mnist_iwae2d_k50_L1_bs400_final.pkl",
# "../../figures/posteriors.pdf",
# return_mean=True)
# plot_lerp_samples(
# "../../checkpoints/mnist_vae2d_k01_L1_bs400_final.pkl",
# "../../checkpoints/mnist_iwae2d_k50_L1_bs400_final.pkl",
# "../../figures/lerp.pdf",
# n_img=20)
# plot_100_samples(
# "../../checkpoints/mnist_vae2d_k01_L1_bs400_final.pkl",
# "../../checkpoints/mnist_vae_k01_L1_bs400_final.pkl",
# "../../checkpoints/mnist_vae_k50_L1_bs400_final.pkl",
# figure_path="../../figures/vae_samples.pdf"
# )
plot_100_samples(
"../../checkpoints/mnist_iwae2d_k50_L1_bs400_final.pkl",
"../../checkpoints/mnist_iwae_k01_L1_bs400_final.pkl",
"../../checkpoints/mnist_iwae_k50_L1_bs400_final.pkl",
figure_path="../../figures/iwae_samples.pdf"
)
|
StarcoderdataPython
|
9613261
|
<reponame>DimitriPapadopoulos/nmrglue
#! /usr/bin/env python
import nmrglue as ng
# read in the Agilent data
dic, data = ng.varian.read("agilent_2d")
# Set the spectral parameters
udic = ng.varian.guess_udic(dic, data)
# Direct dimension # Indirect dimension
udic[1]['size'] = 1500 ; udic[0]['size'] = 332
udic[1]['complex'] = True ; udic[0]['complex'] = True
udic[1]['encoding'] = 'direct' ; udic[0]['encoding'] = 'states'
udic[1]['sw'] = 50000.0 ; udic[0]['sw'] = 5555.556
udic[1]['obs'] = 125.691 ; udic[0]['obs'] = 50.648
udic[1]['car'] = 55.0 * 125.691; udic[0]['car'] = 120.0 * 50.648
udic[1]['label'] = '13C' ; udic[0]['label'] = '15N'
# create the converter object and initialize with Agilent data
C = ng.convert.converter()
C.from_varian(dic, data, udic)
# create NMRPipe data and then write it out
ng.pipe.write("2d_pipe.fid", *C.to_pipe(), overwrite=True)
|
StarcoderdataPython
|
3232102
|
<reponame>xdfcfc0xa/THMC-Challenge-Server
import os
from datetime import datetime
production = os.getenv("PRODUCTION", None) is not None
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ctf_name = "TMHC"
eligibility = "In order to be able to join this server you will need a team key for one of the teams allowed to compete " \
"here. This site is still being developed. If you would like to contribute you probably know who to contact."
tagline = "TMHC Challenge Server"
# IRC Channel
ctf_chat_channel = "#TMHC"
ctf_home_url = "https://themanyhats.club/"
# Serve javascript libraries from CDN
cdn = True
# Allow users to submit via an api?
apisubmit = True
# Allow registration?
registration = True
# If running behind proxy (nginx), which header contains the real IP
proxied_ip_header = "X-Forwarded-For"
# How many teams to show on the scoreboard graph
teams_on_graph = 10
# Which email to send out notifications from
mail_from = "<EMAIL>"
# Wether to render the scoreboard on request or cache
immediate_scoreboard = False
# Banned email domains
disallowed_domain = "icec.tf"
# Where the static stuff is stored
static_prefix = "/static/problem-static/"
static_dir = "{}/static/problem_static/".format(os.path.dirname(os.path.abspath(__file__)))
custom_stylesheet = "css/main.css"
# Shell accounts?
enable_shell = True
shell_port = 22
shell_host = "shell.icec.tf"
shell_user_prefixes = ["ctf-"]
shell_password_length = 8
shell_free_acounts = 10
shell_max_accounts = 99999
shell_user_creation = "sudo useradd -m {username} -p {password} -g ctf -b /home_users"
# when the competition begins
competition_begin = datetime(1970, 1, 1, 0, 0)
competition_end = datetime(2018, 1, 1, 0, 0)
if production:
competition_begin = datetime(2016, 8, 12, hour=16, minute=0, second=0)
competition_end = datetime(2016, 8, 26, hour=16, minute=0, second=0)
def competition_is_running():
return competition_begin < datetime.now() < competition_end
def competition_has_started():
return competition_begin < datetime.now()
# Don't touch these. Instead, copy secrets.example to secrets and edit that.
import yaml
from collections import namedtuple
with open("secrets") as f:
_secret = yaml.load(f)
secret = namedtuple('SecretsDict', _secret.keys())(**_secret)
_redis = {
'host': secret.redis_host_ip,
'port': 6379,
'db': 0
}
if production:
with open("database") as f:
_database = yaml.load(f)
database = namedtuple('DatabaseDict', _database.keys())(**_database)
_redis['db'] = 1
redis = namedtuple('RedisDict', _redis.keys())(**_redis)
|
StarcoderdataPython
|
8171529
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/vision.models.pooling.ipynb (unless otherwise specified).
from __future__ import annotations
__all__ = ['BlurPool', 'MaxBlurPool']
# Cell
#nbdev_comment from __future__ import annotations
from kornia.filters import BlurPool2D, MaxBlurPool2D
from ...imports import *
# Cell
def BlurPool(
stride:int=2, # The stride size for pooling
ks:int=3, # The kernel size for pooling
padding:int=0, # Unused, for fastai compatibility
ndim:int=2, # Unused, for fastai compatibility
ceil_mode:bool=False # Unused, for fastai compatibility
) -> BlurPool2D:
"Compute blur (anti-aliasing) and downsample a given feature map."
assert ndim==2, "BlurPool is only implemented for 2D"
return BlurPool2D(kernel_size=ks, stride=stride)
# Cell
def MaxBlurPool(
stride:int=2, # The stride size for blur pooling
ks:int=3, # The kernel size for blur pooling
padding:int=0, # Unused, for fastai compatibility
ndim:int=2, # Unused, for fastai compatibility
ceil_mode:int=True, # If True, output size matches conv2d with same kernel size
max_ks:int=2 # The kernel size for max pooling
) -> MaxBlurPool2D:
"Compute pools and blurs and downsample a given feature map. Equivalent to `nn.Sequential(nn.MaxPool2d(...), BlurPool2D(...))`"
assert ndim==2, "MaxBlurPool is only implemented for 2D"
return MaxBlurPool2D(kernel_size=ks, stride=stride, ceil_mode=ceil_mode, max_pool_size=max_ks)
|
StarcoderdataPython
|
8167634
|
<gh_stars>0
import atexit
import RPi.GPIO as GPIO
import time
# PIN Setup -- set this to whatever pins you have the LED hooked up to
red_pin = 20
green_pin = 21
blue_pin = 22
min = 0 #start PWM at 0% duty cycle
max = 100 #maximum durty cycle
# GPIO Infrastructure for the LED
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(red_pin, GPIO.OUT)
GPIO.setup(green_pin, GPIO.OUT)
GPIO.setup(blue_pin, GPIO.OUT)
r = GPIO.PWM(red_pin, max)
g = GPIO.PWM(green_pin, max)
b = GPIO.PWM(blue_pin, max)
r.start(min)
g.start(min)
b.start(min)
# pulse at startup so we know you're ready
def cycle():
for x in range(min, max):
r.ChangeDutyCycle(x)
time.sleep(0.005)
for x in range(min, max):
g.ChangeDutyCycle(x)
r.ChangeDutyCycle(max - x)
time.sleep(0.005)
for x in range(min, max):
b.ChangeDutyCycle(x)
g.ChangeDutyCycle(max - x)
time.sleep(0.005)
for x in range(min, max):
r.ChangeDutyCycle(x)
b.ChangeDutyCycle(max - x)
time.sleep(0.005)
r.ChangeDutyCycle(min)
g.ChangeDutyCycle(min)
b.ChangeDutyCycle(min)
# Interrupt handler that turns LED and releases hardware lock when the program exits (SIGTERM).
def interrupt():
r.ChangeDutyCycle(min)
r.stop()
g.ChangeDutyCycle(min)
g.stop()
b.ChangeDutyCycle(min)
b.stop()
GPIO.cleanup()
# Interface function to allow hueGPIO to control PWM signal on led pins
def setHueColor(color, bright):
print("color: ", color, "bright: ", bright)
red, green, blue = color
#adjust brightness
red = red * bright
green = green * bright
blue = blue * bright
#convert color from 0..255 RGB interval to 0..100 PWM signal
red = int(round((100 / 255 * red), 0))
green = int(round((100 / 255 * green), 0))
blue = int(round((100 / 255 * blue), 0))
r.ChangeDutyCycle(red)
g.ChangeDutyCycle(green)
b.ChangeDutyCycle(blue)
atexit.register(interrupt)
cycle()
# END.
|
StarcoderdataPython
|
4928712
|
<reponame>BlazeCode2/haddoc2
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
##----------------------------------------------------------------------------
## Title : quartus
## Project : Haddoc2
##----------------------------------------------------------------------------
## File : quartus.py
## Author : <NAME>
## Company : Institut Pascal
## Last update: 07-07-2017
##----------------------------------------------------------------------------
## Description: Script to create a Quartus project using generated Haddoc2 VHDL
## and Haddoc2 IP library. FPGA here is Intel's Cyclone V 5CGXFC9E7F35C8
##----------------------------------------------------------------------------
import sys
import os
def gen_qsf (qsf_file,haddoc2_hdl_lib,top_level_dir):
#print("Haddoc2 lib in " + haddoc2_hdl_lib)
with open (qsf_file,'w') as f:
# Device
f.write("set_global_assignment -name FAMILY \"CYCLONE V\"\n")
f.write("set_global_assignment -name DEVICE 5CGXFC9E7F35C8\n")
f.write("set_global_assignment -name ORIGINAL_QUARTUS_VERSION 16.0\n")
f.write("set_global_assignment -name TOP_LEVEL_ENTITY cnn_process\n")
f.write("set_global_assignment -name MIN_CORE_JUNCTION_TEMP 0\n")
f.write("set_global_assignment -name MAX_CORE_JUNCTION_TEMP 85\n")
f.write("set_global_assignment -name PARTITION_NETLIST_TYPE SOURCE -section_id Top\n")
f.write("set_global_assignment -name PARTITION_FITTER_PRESERVATION_LEVEL PLACEMENT_AND_ROUTING -section_id Top\n")
f.write("set_global_assignment -name PARTITION_COLOR 16764057 -section_id Top\n")
f.write("set_global_assignment -name POWER_PRESET_COOLING_SOLUTION \"23 MM HEAT SINK WITH 200 LFPM AIRFLOW\"\n")
f.write("set_global_assignment -name POWER_BOARD_THERMAL_MODEL \"NONE (CONSERVATIVE)\"\n")
f.write("set_global_assignment -name PROJECT_OUTPUT_DIRECTORY build\n")
f.write("set_global_assignment -name NUM_PARALLEL_PROCESSORS 4\n")
# Logic Elements Based arithmetic
f.write("set_global_assignment -name DSP_BLOCK_BALANCING \"LOGIC ELEMENTS\"\n")
f.write("set_global_assignment -name AUTO_DSP_RECOGNITION OFF\n")
# Generated files
f.write("set_global_assignment -name VHDL_FILE " + top_level_dir + "/bitwidths.vhd\n")
f.write("set_global_assignment -name VHDL_FILE " + haddoc2_hdl_lib + "/cnn_types.vhd\n")
f.write("set_global_assignment -name VHDL_FILE " + top_level_dir + "/params.vhd\n")
f.write("set_global_assignment -name VHDL_FILE " + top_level_dir + "/cnn_process.vhd\n")
# Haddoc2 lib
f.write("set_global_assignment -name VHDL_FILE " + haddoc2_hdl_lib + "/cnn_types.vhd\n")
f.write("set_global_assignment -name VHDL_FILE " + haddoc2_hdl_lib + "/InputLayer.vhd\n")
f.write("set_global_assignment -name VHDL_FILE " + haddoc2_hdl_lib + "/DisplayLayer.vhd\n")
f.write("set_global_assignment -name VHDL_FILE " + haddoc2_hdl_lib + "/ConvLayer.vhd\n")
f.write("set_global_assignment -name VHDL_FILE " + haddoc2_hdl_lib + "/MCM.vhd\n")
f.write("set_global_assignment -name VHDL_FILE " + haddoc2_hdl_lib + "/MOA.vhd\n")
f.write("set_global_assignment -name VHDL_FILE " + haddoc2_hdl_lib + "/DotProduct.vhd\n")
f.write("set_global_assignment -name VHDL_FILE " + haddoc2_hdl_lib + "/Taps.vhd\n")
f.write("set_global_assignment -name VHDL_FILE " + haddoc2_hdl_lib + "/NeighExtractor.vhd\n")
f.write("set_global_assignment -name VHDL_FILE " + haddoc2_hdl_lib + "/TensorExtractor.vhd\n")
f.write("set_global_assignment -name VHDL_FILE " + haddoc2_hdl_lib + "/TanhLayer.vhd\n")
f.write("set_global_assignment -name VHDL_FILE " + haddoc2_hdl_lib + "/PoolLayer.vhd\n")
f.write("set_global_assignment -name VHDL_FILE " + haddoc2_hdl_lib + "/poolV.vhd\n")
f.write("set_global_assignment -name VHDL_FILE " + haddoc2_hdl_lib + "/poolH.vhd\n")
f.write("set_global_assignment -name VHDL_FILE " + haddoc2_hdl_lib + "/maxPool.vhd\n")
f.write("set_instance_assignment -name PARTITION_HIERARCHY root_partition -to | -section_id Top\n")
f.close()
def gen_qpf(qpf_file):
with open (qpf_file,'w') as f:
f.write("QUARTUS_VERSION = \"16.0\"\n")
f.write("PROJECT_REVISION = \"cnn_process\"\n")
f.close()
def generateProject(haddoc2_hdl_lib,
top_level_dir,
out_dir):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
print("Haddoc2 Quartus Project Generator:")
qsf_file = out_dir + "/cnn_process.qsf"
qpf_file = out_dir + "/cnn_process.qpf"
print("\tQSF File: " + qsf_file)
print("\tQPF File: " + qpf_file)
gen_qsf (qsf_file,haddoc2_hdl_lib,top_level_dir)
gen_qpf (qpf_file)
print("Succefully generated quartus project")
if __name__ == '__main__':
if (len(sys.argv) == 2):
cwd = os.getcwd()
cwd = '/'.join(cwd.split('\\'))
out_dir = sys.argv[1]
generateProject(haddoc2_hdl_lib = cwd + '/../lib/hdl',
top_level_dir = cwd + '/hdl_generated',
out_dir = out_dir)
else:
print('Not enought arguments')
print('python quartus.py <hdl_directory>')
|
StarcoderdataPython
|
6630891
|
<gh_stars>1-10
from . import rman_operators_printer
from . import rman_operators_view3d
from . import rman_operators_render
from . import rman_operators_rib
from . import rman_operators_nodetree
from . import rman_operators_collections
from . import rman_operators_editors
from . import rman_operators_stylized
from . import rman_operators_mesh
def register():
rman_operators_printer.register()
rman_operators_view3d.register()
rman_operators_render.register()
rman_operators_rib.register()
rman_operators_nodetree.register()
rman_operators_collections.register()
rman_operators_editors.register()
rman_operators_stylized.register()
rman_operators_mesh.register()
def unregister():
rman_operators_printer.unregister()
rman_operators_view3d.unregister()
rman_operators_render.unregister()
rman_operators_rib.unregister()
rman_operators_nodetree.unregister()
rman_operators_collections.unregister()
rman_operators_editors.unregister()
rman_operators_stylized.unregister()
rman_operators_mesh.unregister()
|
StarcoderdataPython
|
8115313
|
import numpy as np
def normalize_to_range(array, R):
"""Returns array normalized to range R."""
array = array - np.min(array) + R[0]
return array * (R[1] / np.max(array))
def bound_errors(x, x1, x2):
if x1 >= x2:
raise ValueError('x2 must be greater than x1')
if x1 >= x[-1]:
raise ValueError('x1 is out of range.')
if x2 <= x[0]:
raise ValueError('x2 is out of range')
|
StarcoderdataPython
|
4847144
|
#!/usr/bin/env python
'''Class for plotting simulation data.
@author: <NAME>
@contact: <EMAIL>
@status: Development
'''
# Base python imports
import numpy as np
import os
import scipy.stats
import scipy.signal as signal
import warnings
import verdict
import matplotlib
matplotlib.use('PDF')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as plt_colors
import matplotlib.gridspec as gridspec
import matplotlib.patheffects as path_effects
import matplotlib.transforms as transforms
import galaxy_dive.utils.mp_utils as mp_utils
import galaxy_dive.utils.utilities as utilities
import galaxy_dive.plot_data.plotting as gen_plot
import galaxy_dive.plot_data.pu_colormaps as pu_cm
########################################################################
########################################################################
class GenericPlotter( object ):
@utilities.store_parameters
def __init__( self, data_object, label=None, color='black', ):
'''
Args:
data_object ( generic_data.GenericData object or subclass of such ) : The data container to use.
'''
pass
########################################################################
# Alternate inherent methods
########################################################################
def __getattr__( self, attr):
'''By replacing getattr with the following code, we allow automatically searching the data_object
for the appropriate attribute as well, while losing none of the original functionality.
'''
print( "Attribute {} not found in plotting object. Checking data object.".format( attr ) )
return getattr( self.data_object, attr )
########################################################################
# Specific Generic Plots
########################################################################
def histogram( self,
data_key,
provided_data = None,
provided_hist = None,
weight_key = None,
slices = None,
ax = None,
fix_invalid = False,
mask_zeros = False,
invalid_fix_method = None,
bins = 32,
normed = True,
norm_type = 'probability',
scaling = None,
smooth = False,
smoothing_window_length = 9,
smoothing_polyorder = 3,
histogram_style = 'step',
color = 'black',
linestyle = '-',
linewidth = 3.5,
alpha = 1.,
x_range = None, y_range = None,
x_label = None, y_label = None,
add_x_label = True, add_y_label = True,
add_plot_label = True,
plot_label = None,
line_label = None,
label_fontsize = 24,
x_scale = 'linear', y_scale = 'linear',
cdf = False,
vertical_line = None,
vertical_line_kwargs = { 'linestyle': '--', 'linewidth': 3, 'color': 'k', },
return_dist = False,
assert_contains_all_data = True,
data_kwargs = {},
*args, **kwargs ):
'''Make a histogram of the data. Extra arguments are passed to
self.data_object.get_selected_data.
Args:
data_key (str) :
Data key to plot.
weight_key (str) :
Data key for data to use as a weight. By None, no weight.
slices (int or tuple of slices) :
How to slices the data.
ax (axis) :
What axis to use. By default creates a figure and places the axis on it.
fix_invalid (bool) :
Throw away invalid values?
invalid_fix_method (float or int) :
How to handle invalid values. By None throw them away. Providing a value to this argument instead replaces
them with that value.
bins (int or array-like) :
bins argument to be passed to np.histogram
normed (bool) :
Normalize the histogram?
color (str) :
Color of histogram.
linestyle (str) :
Linestyle of histogram.
linewidth (float) :
Linewidth for histogram.
alpha (float) :
Alpha value for histogram
x_range, y_range ( [min,max] ) :
What are the minimum and maximum x- and y- values to include?
Defaults to matplotlib's automatic choices
x_label, ylabel (str) :
Axes labels. Defaults to the data_key for the x-axis and "Normalized Histogram" for the y-axis.
add_x_label, add_y_label (bool) :
Include axes labels?
plot_label (str or dict) :
What to label the plot with. By None, uses self.label.
line_label (str) :
What label to give the line.
label_fontsize (int) :
Fontsize for the labels.
x_scale, y_scale (str) :
What scales to use for the x and y axes.
cdf (bool) :
Plot a CDF instead.
vertical_line (float) :
Plot a vertical line at this value on the x-axis, if true.
vertical_line_kwargs (dict) :
Arguments to pass to ax.plot( for the vertical line.
return_dist (bool) :
If True, return the data values and the edges for the histogram.
assert_contains_all_data (bool) :
If True, make sure that the histogram plots all selected data.
*args, **kwargs :
Extra arguments to pass to self.data_object.get_selected_data()
'''
print( "Plotting histogram for {}".format( data_key ) )
if provided_hist is None:
if isinstance( slices, int ):
sl = ( slice(None), slices )
else:
sl = slices
data_kwargs = utilities.merge_two_dicts( data_kwargs, kwargs )
if provided_data is None:
data = self.data_object.get_selected_data(
data_key,
sl=sl,
*args, **data_kwargs
).copy()
else:
data = provided_data.copy()
if weight_key is None:
weights = None
else:
if 'scale_key' in kwargs:
warnings.warn(
"Scaling weights by {}. Is this correct?".format(
kwargs['scale_key']
)
)
weights = self.data_object.get_selected_data( weight_key, sl=sl, *args, **kwargs )
if fix_invalid:
if invalid_fix_method is None:
data = np.ma.fix_invalid( data ).compressed()
else:
data = np.ma.fix_invalid( data )
data.fill_value = invalid_fix_method
data = data.filled()
# Make the histogram itself
hist, edges = np.histogram( data, bins=bins, weights=weights )
# Make sure we have all the data in the histogram
if assert_contains_all_data:
assert data.size == hist.sum()
if normed:
if norm_type == 'probability':
hist = hist.astype( float ) / ( hist.sum()*(edges[1] - edges[0]) )
elif norm_type == 'bin_width':
hist = hist.astype( float ) / (edges[1] - edges[0])
elif norm_type == 'outer_edge':
hist = hist.astype( float ) / hist[-1]
elif norm_type == 'max_value':
hist = hist.astype( float ) / hist.max()
else:
raise Exception(
"Unrecognized norm_type, {}".format( norm_type )
)
if scaling is not None:
hist *= scaling
if cdf:
hist = np.cumsum( hist )*(edges[1] - edges[0])
else:
hist = provided_hist
edges = bins
if mask_zeros:
hist = np.ma.masked_where(
hist < 1e-14,
hist,
)
if smooth:
hist = signal.savgol_filter(
hist,
window_length = smoothing_window_length,
polyorder = smoothing_polyorder,
)
if ax is None:
fig = plt.figure( figsize=(11,5), facecolor='white', )
ax = plt.gca()
if line_label is None:
line_label = self.label
if color is None:
color = self.color
# Inserting a 0 at the beginning allows plotting a numpy histogram with a step plot
if histogram_style == 'step':
ax.step(
edges,
np.insert(hist, 0, 0.),
color=color,
linestyle=linestyle,
linewidth=linewidth,
label=line_label,
alpha=alpha,
)
elif histogram_style == 'line':
x_values = 0.5 * ( edges[:-1] + edges[1:] )
ax.plot(
x_values,
hist,
color=color,
linestyle=linestyle,
linewidth=linewidth,
label=line_label,
alpha=alpha,
)
else:
raise KeyError(
"Unrecognized histogram_style, {}".format( histogram_style )
)
# Plot a vertical line?
if vertical_line is not None:
trans = transforms.blended_transform_factory( ax.transData, ax.transAxes )
ax.plot(
[ vertical_line, ]*2,
[ 0., 1., ],
transform = trans,
**vertical_line_kwargs
)
# Plot label
if add_plot_label:
if plot_label is None:
plt_label = ax.annotate(
s = self.label,
xy = (0.,1.0),
va = 'bottom',
xycoords = 'axes fraction',
fontsize = label_fontsize,
)
elif isinstance( plot_label, str ):
plt_label = ax.annotate(
s = plot_label,
xy = (0.,1.0),
va = 'bottom',
xycoords = 'axes fraction',
fontsize = label_fontsize,
)
elif isinstance( plot_label, dict ):
plt_label = ax.annotate( **plot_label )
elif plot_label is None:
pass
else:
raise Exception( 'Unrecognized plot_label arguments, {}'.format( plot_label ) )
# Add axis labels
if add_x_label:
if x_label is None:
x_label = data_key
ax.set_xlabel( x_label, fontsize=label_fontsize )
if add_y_label:
if y_label is None:
if not cdf:
y_label = r'Normalized Histogram'
else:
y_label = r'CDF'
ax.set_ylabel( y_label, fontsize=label_fontsize )
if x_range is not None:
ax.set_xlim( x_range )
if y_range is not None:
ax.set_ylim( y_range )
ax.set_xscale( x_scale )
ax.set_yscale( y_scale )
if return_dist:
return hist, edges
########################################################################
def histogram2d( self,
x_key, y_key,
x_data = None, y_data = None,
weight_key = None,
weight_data = None,
x_data_args = {}, y_data_args = {},
weight_data_args = {},
slices = None,
ax = None,
x_range = None, y_range = None,
x_edges = None, y_edges = None,
x_scale = 'linear', y_scale = 'linear', z_scale = 'log',
n_bins = 128, n_bins_x = None, n_bins_y = None,
average = False,
normed = False,
hist_div_arr = None,
conditional_y = False,
y_div_function = None,
vmin = None, vmax = None,
min_bin_value_displayed = None,
zorder = 0,
add_colorbar = True,
cmap = pu_cm.magma,
colorbar_args = None,
x_label = None, y_label = None,
add_x_label = True, add_y_label = True,
plot_label = None,
outline_plot_label = False,
label_galaxy_cut = False,
label_redshift = False,
label_fontsize = 24,
tick_param_args = None,
out_dir = None,
save_file = None,
close_plot_after_saving = True,
fix_invalid = True,
line_slope = None,
cdf = False,
horizontal_line = None, vertical_line = None,
horizontal_line_kwargs = { 'linestyle': '--', 'linewidth': 5, 'color': '#337DB8', },
vertical_line_kwargs = { 'linestyle': '--', 'linewidth': 5, 'color': '#337DB8', },
return_dist = False,
*args, **kwargs ):
'''Make a 2D histogram of the data. Extra arguments are passed to get_selected_data.
Args:
x_key, y_key (str) : Data keys to plot.
weight_key (str) : Data key for data to use as a weight. By None, no weight.
x_data_args, y_data_args (dicts) : Keyword arguments to be passed only to x or y.
slices (int or tuple of slices) : How to slices the data.
ax (axis) : What axis to use. By None creates a figure and places the axis on it.
x_range, y_range ( (float, float) ) : Histogram edges. If None, all data is enclosed. If list, set manually.
If float, is +- x_range*length scale at that snapshot.
n_bins (int) : Number of bins in the histogram.
vmin, vmax (float) : Limits for the colorbar.
aspect (str) : What should the aspect ratio of the plot be?
plot_halos (bool) : Whether or not to plot merger tree halos on top of the histogram.
Only makes sense for when dealing with positions.
add_colorbar (bool) : If True, add a colorbar to colorbar_args
colorbar_args (axis) : What axis to add the colorbar to. By None, is ax.
x_label, ylabel (str) : Axes labels.
add_x_label, add_y_label (bool) : Include axes labels?
plot_label (str or dict) : What to label the plot with. By None, uses self.label.
Can also pass a dict of full args.
outline_plot_label (bool) : If True, add an outline around the plot label.
label_galaxy_cut (bool) : If true, add a label that indicates how the galaxy was defined.
label_redshift (bool) : If True, add a label indicating the redshift.
label_fontsize (int) : Fontsize for the labels.
tick_param_args (args) : Arguments to pass to ax.tick_params. By None, don't change inherent defaults.
out_dir (str) : If given, where to save the file.
fix_invalid (bool) : Fix invalid values.
line_slope (float) : If given, draw a line with the given slope.
'''
if isinstance( slices, int ):
sl = ( slice(None), slices )
else:
sl = slices
varying_kwargs = {
'x': x_data_args,
'y': y_data_args,
'weight': weight_data_args
}
data_kwargs = utilities.dict_from_defaults_and_variations( kwargs, varying_kwargs )
# Get data
if x_data is None:
x_data = self.data_object.get_selected_data( x_key, sl=sl, *args, **data_kwargs['x'] ).copy()
if y_data is None:
y_data = self.data_object.get_selected_data( y_key, sl=sl, *args, **data_kwargs['y'] ).copy()
if y_div_function is not None:
y_div_values = y_div_function( x_data )
y_data /= y_div_values
# Fix NaNs
if fix_invalid:
x_mask = np.ma.fix_invalid( x_data ).mask
y_mask = np.ma.fix_invalid( y_data ).mask
mask = np.ma.mask_or( x_mask, y_mask )
x_data = np.ma.masked_array( x_data, mask=mask ).compressed()
y_data = np.ma.masked_array( y_data, mask=mask ).compressed()
if weight_key is None:
weights = None
else:
if weight_data is None:
weights = self.data_object.get_selected_data(
weight_key,
sl=sl,
*args,
**data_kwargs['weight']
).flatten()
else:
weights = weight_data
if fix_invalid:
weights = np.ma.masked_array( weights, mask=mask ).compressed()
if n_bins_x is None:
n_bins_x = n_bins
if n_bins_y is None:
n_bins_y = n_bins
if x_range is None:
x_range = [ x_data.min(), x_data.max() ]
elif isinstance( x_range, float ):
x_range = np.array( [ -x_range, x_range ])*self.data_object.length_scale[slices]
if y_range is None:
y_range = [ y_data.min(), y_data.max() ]
elif isinstance( y_range, float ):
y_range = np.array( [ -y_range, y_range ])*self.data_object.length_scale[slices]
if x_edges is None:
if x_scale == 'log':
x_edges = np.logspace( np.log10( x_range[0] ), np.log10( x_range[1] ), n_bins_x )
else:
x_edges = np.linspace( x_range[0], x_range[1], n_bins_x )
if y_edges is None:
if y_scale == 'log':
y_edges = np.logspace( np.log10( y_range[0] ), np.log10( y_range[1] ), n_bins_y )
else:
y_edges = np.linspace( y_range[0], y_range[1], n_bins_y )
# Make the histogram
hist2d, x_edges, y_edges = np.histogram2d( x_data, y_data, [x_edges, y_edges], weights=weights, normed=normed )
# If doing an average, divide by the number in each bin
if average:
average_hist2d, x_edges, y_edges = np.histogram2d( x_data, y_data, [x_edges, y_edges], normed=normed )
hist2d /= average_hist2d
# If making the y-axis conditional, divide by the distribution of data for the x-axis.
if conditional_y:
hist_x, x_edges = np.histogram( x_data, x_edges, normed=normed )
hist2d /= hist_x[:,np.newaxis]
# Divide the histogram bins by this array
if hist_div_arr is not None:
hist2d /= hist_div_arr
# Mask bins below a specified value
if min_bin_value_displayed is not None:
hist2d = np.ma.masked_where(
hist2d < min_bin_value_displayed,
hist2d,
)
# Plot
if ax is None:
fig = plt.figure( figsize=(10,9), facecolor='white' )
ax = plt.gca()
if z_scale == 'linear':
norm = plt_colors.Normalize()
elif z_scale == 'log':
norm = plt_colors.LogNorm()
else:
norm = z_scale
if cdf:
raise Exception(
"Not implemented yet. When implementing, use utilities.cumsum2d"
)
im = ax.pcolormesh(
x_edges,
y_edges,
hist2d.transpose(),
cmap = cmap,
norm = norm,
vmin = vmin,
vmax = vmax,
zorder = zorder,
)
# Add a colorbar
if add_colorbar:
if colorbar_args is None:
colorbar_args = ax
cbar = gen_plot.add_colorbar( colorbar_args, im, method='ax' )
else:
colorbar_args['color_object'] = im
cbar = gen_plot.add_colorbar( **colorbar_args )
cbar.ax.tick_params( labelsize=20 )
# Plot Line for easier visual interpretation
if line_slope is not None:
line_x = np.array( [ x_data.min(), x_data.max() ] )
line_y = line_slope*line_x
ax.plot( line_x, line_y, linewidth=3, linestyle='dashed', )
if horizontal_line is not None:
trans = transforms.blended_transform_factory( ax.transAxes, ax.transData )
ax.plot( [ 0., 1. ], [ horizontal_line, ]*2, transform=trans, **horizontal_line_kwargs )
if vertical_line is not None:
trans = transforms.blended_transform_factory( ax.transData, ax.transAxes )
ax.plot( [ vertical_line, ]*2, [ 0., 1. ], transform=trans, **vertical_line_kwargs )
# Plot label
if plot_label is not None:
if plot_label is None:
plt_label = ax.annotate(
s = self.label,
xy = (0.,1.0),
va = 'bottom',
xycoords = 'axes fraction',
fontsize = label_fontsize,
)
elif isinstance( plot_label, str ):
plt_label = ax.annotate(
s = plot_label,
xy = (0.,1.0),
va = 'bottom',
xycoords = 'axes fraction',
fontsize = label_fontsize,
)
elif isinstance( plot_label, dict ):
plt_label = ax.annotate( **plot_label )
else:
raise Exception( 'Unrecognized plot_label arguments, {}'.format( plot_label ) )
if outline_plot_label:
plt_label.set_path_effects([ path_effects.Stroke(linewidth=3, foreground='black'), path_effects.Normal() ])
# Upper right label (info label)
info_label = ''
if label_galaxy_cut:
info_label = r'$r_{ \rm cut } = ' + '{:.3g}'.format( self.data_object.galids.parameters['galaxy_cut'] ) + 'r_{ s}$'
if label_redshift:
try:
info_label = r'$z=' + '{:.3f}'.format( self.data_object.redshift ) + '$'+ info_label
except ValueError:
info_label = r'$z=' + '{:.3f}'.format( self.data_object.redshift.values[sl[1]] ) + '$'+ info_label
if label_galaxy_cut or label_redshift:
ax.annotate( s=info_label, xy=(1.,1.0225), xycoords='axes fraction', fontsize=label_fontsize,
ha='right' )
# Add axis labels
if add_x_label:
if x_label is None:
x_label = x_key
ax.set_xlabel( x_label, fontsize=label_fontsize )
if add_y_label:
if y_label is None:
y_label = y_key
ax.set_ylabel( y_label, fontsize=label_fontsize )
# Limits
ax.set_xlim( x_range )
ax.set_ylim( y_range )
# Scale
ax.set_xscale( x_scale )
ax.set_yscale( y_scale )
# Set tick parameters
if tick_param_args is not None:
ax.tick_params( **tick_param_args )
# Save the file
if out_dir is not None:
if save_file is None:
save_file = '{}_{:03d}.png'.format( self.label, self.data_object.ptracks.snum[slices] )
gen_plot.save_fig( out_dir, save_file, fig=fig, dpi=75 )
if close_plot_after_saving:
plt.close()
# Return?
if return_dist:
return hist2d, x_edges, y_edges
########################################################################
def statistic_and_interval(
self,
x_key, y_key,
x_data = None, y_data = None,
weights = None,
statistic = 'median',
lower_percentile = 16,
upper_percentile = 84,
plot_interval = True,
x_data_args = {}, y_data_args = {},
ax = None,
slices = None,
fix_invalid = False,
bins = 64,
linewidth = 3,
linestyle = '-',
color = 'k',
label = None,
zorder = 100,
alpha = 0.5,
plot_label = None,
add_plot_label = True,
plot_label_kwargs = {
'xy': (0.,1.0),
'va': 'bottom',
'xycoords': 'axes fraction',
'fontsize': 22,
},
return_values = False,
*args, **kwargs
):
if isinstance( slices, int ):
sl = ( slice(None), slices )
else:
sl = slices
varying_kwargs = {
'x': x_data_args,
'y': y_data_args,
}
data_kwargs = utilities.dict_from_defaults_and_variations( kwargs, varying_kwargs )
# Get data
if x_data is None:
x_data = self.data_object.get_selected_data( x_key, sl=sl, *args, **data_kwargs['x'] ).copy()
if y_data is None:
y_data = self.data_object.get_selected_data( y_key, sl=sl, *args, **data_kwargs['y'] ).copy()
# Fix NaNs
if fix_invalid:
x_mask = np.ma.fix_invalid( x_data ).mask
y_mask = np.ma.fix_invalid( y_data ).mask
mask = np.ma.mask_or( x_mask, y_mask )
x_data = np.ma.masked_array( x_data, mask=mask ).compressed()
y_data = np.ma.masked_array( y_data, mask=mask ).compressed()
# Calculate the statistic
if statistic == 'weighted_mean':
assert weights is not None, "Need to provide weights."
weighted_sum, bin_edges, binnumber = scipy.stats.binned_statistic(
x = x_data,
values = y_data * weights,
statistic = 'sum',
bins = bins,
)
weights_sum, bin_edges, binnumber = scipy.stats.binned_statistic(
x = x_data,
values = weights,
statistic = 'sum',
bins = bins,
)
stat = weighted_sum / weights_sum
else:
assert weights is None, "weights only works with weighted_mean"
# Usual statistic
stat, bin_edges, binnumber = scipy.stats.binned_statistic(
x = x_data,
values = y_data,
statistic = statistic,
bins = bins,
)
# Calculate the percentiles
def get_lower_percentile( data ):
return np.percentile( data, lower_percentile )
def get_upper_percentile( data ):
return np.percentile( data, upper_percentile )
low_p, bin_edges, binnumber = scipy.stats.binned_statistic(
x = x_data,
values = y_data,
statistic = get_lower_percentile,
bins = bins,
)
high_p, bin_edges, binnumber = scipy.stats.binned_statistic(
x = x_data,
values = y_data,
statistic = get_upper_percentile,
bins = bins,
)
# Get plotting axis
if ax is None:
fig = plt.figure( figsize=(10,9), facecolor='white' )
ax = plt.gca()
# X Values fo rplot
x_values = bin_edges[:-1] + 0.5 * ( bin_edges[1] - bin_edges[0] )
# Plot statistic
ax.plot(
x_values,
stat,
linewidth = linewidth,
linestyle = linestyle,
color = color,
zorder = zorder,
label = label,
)
# Plot interval
if plot_interval:
ax.fill_between(
x_values,
low_p,
high_p,
color = color,
alpha = alpha,
)
# Add plot label
if add_plot_label:
if plot_label is None:
plot_label = self.label
if plot_label is not None:
plt_label = ax.annotate(
s = plot_label,
**plot_label_kwargs
)
if return_values:
return stat, low_p, high_p, bin_edges
########################################################################
def scatter(
self,
x_key, y_key,
slices = None,
n_subsample = None,
ax = None,
marker_size = 100,
color = 'k',
marker = '.',
zorder = -100,
x_range = None, y_range = None,
x_label = None, y_label = None,
add_x_label = True, add_y_label = True,
plot_label = None,
outline_plot_label = False,
label_galaxy_cut = False,
label_redshift = False,
label_fontsize = 24,
tick_param_args = None,
out_dir = None,
fix_invalid = True,
line_slope = None,
*args, **kwargs ):
'''Make a 2D scatter plot of the data. Extra arguments are passed to get_selected_data.
Args:
x_key, y_key (str) : Data keys to plot.
weight_key (str) : Data key for data to use as a weight. By None, no weight.
slices (int or tuple of slices) : How to slices the data.
ax (axis) : What axis to use. By None creates a figure and places the axis on it.
x_range, y_range ( (float, float) ) : Histogram edges. If None, all data is enclosed. If list, set manually.
If float, is +- x_range*length scale at that snapshot.
n_bins (int) : Number of bins in the histogram.
vmin, vmax (float) : Limits for the colorbar.
plot_halos (bool) : Whether or not to plot merger tree halos on top of the histogram.
Only makes sense for when dealing with positions.
add_colorbar (bool) : If True, add a colorbar to colorbar_args
colorbar_args (axis) : What axis to add the colorbar to. By None, is ax.
x_label, ylabel (str) : Axes labels.
add_x_label, add_y_label (bool) : Include axes labels?
plot_label (str or dict) : What to label the plot with. By None, uses self.label.
Can also pass a dict of full args.
outline_plot_label (bool) : If True, add an outline around the plot label.
label_galaxy_cut (bool) : If true, add a label that indicates how the galaxy was defined.
label_redshift (bool) : If True, add a label indicating the redshift.
label_fontsize (int) : Fontsize for the labels.
tick_param_args (args) : Arguments to pass to ax.tick_params. By None, don't change inherent defaults.
out_dir (str) : If given, where to save the file.
fix_invalid (bool) : Fix invalid values.
line_slope (float) : If given, draw a line with the given slope.
'''
if isinstance( slices, int ):
sl = ( slice(None), slices )
else:
sl = slices
# Get data
x_data = self.data_object.get_selected_data( x_key, sl=sl, *args, **kwargs )
y_data = self.data_object.get_selected_data( y_key, sl=sl, *args, **kwargs )
# Fix NaNs
if fix_invalid:
x_mask = np.ma.fix_invalid( x_data ).mask
y_mask = np.ma.fix_invalid( y_data ).mask
mask = np.ma.mask_or( x_mask, y_mask )
x_data = np.ma.masked_array( x_data, mask=mask ).compressed()
y_data = np.ma.masked_array( y_data, mask=mask ).compressed()
# Subsample
if n_subsample is not None:
sampled_inds = np.random.randint( 0, x_data.size, n_subsample )
x_data = x_data[sampled_inds]
y_data = y_data[sampled_inds]
if x_range is None:
x_range = [ x_data.min(), x_data.max() ]
elif isinstance( x_range, float ):
x_range = np.array( [ -x_range, x_range ])*self.data_object.ptracks.length_scale.iloc[slices]
if y_range is None:
y_range = [ y_data.min(), y_data.max() ]
elif isinstance( y_range, float ):
y_range = np.array( [ -y_range, y_range ])*self.data_object.ptracks.length_scale.iloc[slices]
# Plot
if ax is None:
fig = plt.figure( figsize=(10,9), facecolor='white' )
ax = plt.gca()
s = ax.scatter( x_data, y_data, s=marker_size, color=color, marker=marker )
# Change the z order
s.set_zorder( zorder )
# Halo Plot
if line_slope is not None:
line_x = np.array( [ x_data.min(), x_data.max() ] )
line_y = line_slope*line_x
ax.plot( line_x, line_y, linewidth=3, linestyle='dashed', )
# Plot label
if plot_label is None:
plt_label = ax.annotate( s=self.label, xy=(0.,1.0225), xycoords='axes fraction', fontsize=label_fontsize, )
elif isinstance( plot_label, str ):
plt_label = ax.annotate( s=plot_label, xy=(0.,1.0225), xycoords='axes fraction', fontsize=label_fontsize, )
elif isinstance( plot_label, dict ):
plt_label = ax.annotate( **plot_label )
elif plot_label is None:
pass
else:
raise Exception( 'Unrecognized plot_label arguments, {}'.format( plot_label ) )
if outline_plot_label:
plt_label.set_path_effects([ path_effects.Stroke(linewidth=3, foreground='black'), path_effects.Normal() ])
# Upper right label (info label)
info_label = ''
if label_galaxy_cut:
info_label = r'$r_{ \rm cut } = ' + '{:.3g}'.format( self.data_object.galids.parameters['galaxy_cut'] ) + 'r_{ s}$'
if label_redshift:
info_label = r'$z=' + '{:.3f}'.format( self.data_object.ptracks.redshift.iloc[slices] ) + '$, '+ info_label
if label_galaxy_cut or label_redshift:
ax.annotate( s=info_label, xy=(1.,1.0225), xycoords='axes fraction', fontsize=label_fontsize,
ha='right' )
# Add axis labels
if add_x_label:
if x_label is None:
x_label = x_key
ax.set_xlabel( x_label, fontsize=label_fontsize )
if add_y_label:
if y_label is None:
y_label = y_key
ax.set_ylabel( y_label, fontsize=label_fontsize )
# Limits
ax.set_xlim( x_range )
ax.set_ylim( y_range )
# Set tick parameters
if tick_param_args is not None:
ax.tick_params( **tick_param_args )
# Save the file
if out_dir is not None:
save_file = '{}_{:03d}.png'.format( self.label, self.data_object.ptracks.snum[slices] )
gen_plot.save_fig( out_dir, save_file, fig=fig, dpi=75 )
plt.close()
########################################################################
def plot_stacked_data(
self,
x_key,
y_keys,
colors,
ax = None,
*args, **kwargs
):
if ax is None:
plt.figure( figsize=(11, 5), facecolor='white' )
ax = plt.gca()
y_prev = np.zeros( shape=y_datas.values()[0].shape )
y_datas = []
for y_key in y_keys:
y_data = self.data_object.get_selected_data(
y_key,
*args, **kwargs
).copy()
y_datas.append( y_data )
for i, y_key in y_keys:
y_next = y_prev + y_datas[i]
ax.fill_between(
x_data,
y_prev,
y_next,
color = classification_colors[key],
alpha = p_constants.CLASSIFICATION_ALPHA,
)
# Make virtual artists to allow a legend to appear
color_object = matplotlib.patches.Rectangle(
(0, 0),
1,
1,
fc = classification_colors[key],
ec = classification_colors[key],
alpha = p_constants.CLASSIFICATION_ALPHA,
)
color_objects.append( color_object )
labels.append( p_constants.CLASSIFICATION_LABELS[key] )
ax.annotate(
s=self.label,
xy=(0., 1.0225),
xycoords='axes fraction',
fontsize=22,
)
ax.legend(
color_objects,
labels,
prop={'size': 14.5},
ncol=5,
loc=(0., -0.28),
fontsize=20
)
########################################################################
def plot_time_dependent_data( self,
ax = None,
x_range = [ 0., np.log10(8.) ], y_range = None,
y_scale = 'log',
x_label = None, y_label = None,
):
'''Make a plot like the top panel of Fig. 3 in Angles-Alcazar+17
Args:
ax (axis object) :
What axis to put the plot on. By None, create a new one on a separate figure.
x_range, y_range (list-like) :
[ x_min, x_max ] or [ y_min, y_max ] for the displayed range.
x_label, y_label (str) :
Labels for axis. By None, redshift and f(M_star), respectively.
plot_dividing_line (bool) :
Whether or not to plot a line at the edge between stacked regions.
'''
if ax is None:
fig = plt.figure( figsize=(11,5), facecolor='white' )
ax = plt.gca()
x_data = np.log10( 1. + self.data_object.get_data( 'redshift' ) )
y_data = self.data_object.get_categories_galaxy_mass()
for key in p_constants.CLASSIFICATION_LIST_A[::-1]:
y_data = y_datas[key]
ax.plot(
x_data,
y_data,
linewidth = 3,
color = p_constants.CLASSIFICATION_COLORS[key],
label = p_constants.CLASSIFICATION_LABELS[key],
)
if x_range is not None:
ax.set_xlim( x_range )
if y_range is not None:
ax.set_ylim( y_range )
ax.set_yscale( y_scale )
tick_redshifts = np.array( [ 0.25, 0.5, 1, 2, 3, 4, 5, 6, 7, ] )
x_tick_values = np.log10( 1. + tick_redshifts )
plt.xticks( x_tick_values, tick_redshifts )
ax.set_xlabel( r'z', fontsize=22, )
ax.set_ylabel( r'$M_{\star} (M_{\odot})$', fontsize=22, )
ax.annotate( s=self.label, xy=(0.,1.0225), xycoords='axes fraction', fontsize=22, )
ax.legend( prop={'size':14.5}, ncol=5, loc=(0.,-0.28), fontsize=20 )
########################################################################
# Generic Plotting Methods
########################################################################
def same_axis_plot(
self,
axis_plotting_method_str,
variations,
ax = None,
figsize = (11, 5),
out_dir = None,
add_line_label = False,
legend_args = { 'prop': {'size': 16.5}, 'loc': 'upper right', 'fontsize': 20 },
*args, **kwargs
):
if ax is None:
fig = plt.figure( figsize=figsize, facecolor='white', )
ax = plt.gca()
all_plotting_kwargs = utilities.dict_from_defaults_and_variations( kwargs, variations )
axis_plotting_method = getattr( self, axis_plotting_method_str )
for key, plotting_kwargs in all_plotting_kwargs.items():
plotting_kwargs['ax'] = ax
if add_line_label:
plotting_kwargs['line_label'] = key
axis_plotting_method( *args, **plotting_kwargs )
ax.legend( **legend_args )
# Save the file
if out_dir is not None:
save_file = '{}_{:03d}.png'.format( self.label, self.data_object.ptracks.snum[kwargs['slices']] )
gen_plot.save_fig( out_dir, save_file, fig=fig, dpi=75 )
plt.close()
########################################################################
def panel_plot( self,
panel_plotting_method_str,
defaults,
variations,
slices = None,
n_rows = 2,
n_columns = 2,
plot_locations = [ (0,0), (0,1), (1,0), (1,1) ],
figsize = (10,9),
plot_label = None,
outline_plot_label = False,
label_galaxy_cut = False,
label_redshift = True,
label_fontsize = 24,
subplot_label_args = { 'xy': (0.075, 0.88), 'xycoords': 'axes fraction', 'fontsize': 20, 'color': 'k', },
subplot_spacing_args = { 'hspace': 0.0001, 'wspace': 0.0001, },
out_dir = None,
):
'''
Make a multi panel plot of the type of your choosing.
Args:
panel_plotting_method_str (str) : What type of plot to make.
defaults (dict) : Default arguments to pass to panel_plotting_method.
variations (dict of dicts) : Differences in plotting arguments per subplot.
slices (slice) : What slices to select. By None, this doesn't pass any slices argument to panel_plotting_method
plot_label (str or dict) : What to label the plot with. By None, uses self.label.
Can also pass a dict of full args.
outline_plot_label (bool) : If True, add an outline around the plot label.
label_galaxy_cut (bool) : If true, add a label that indicates how the galaxy was defined.
label_redshift (bool) : If True, add a label indicating the redshift.
label_fontsize (int) : Fontsize for the labels.
subplot_label_args (dict) : Label arguments to pass to each subplot for the label for the subplot.
The actual label string itself corresponds to the keys in variations.
subplot_spacing_args (dict) : How to space the subplots.
out_dir (str) : If given, where to save the file.
'''
fig = plt.figure( figsize=figsize, facecolor='white', )
ax = plt.gca()
fig.subplots_adjust( **subplot_spacing_args )
if slices is not None:
defaults['slices'] = slices
plotting_kwargs = utilities.dict_from_defaults_and_variations( defaults, variations )
# Setup axes
gs = gridspec.GridSpec(n_rows, n_columns)
axs = []
for plot_location in plot_locations:
axs.append( plt.subplot( gs[plot_location] ) )
# Setup arguments further
for i, key in enumerate( plotting_kwargs.keys() ):
ax_kwargs = plotting_kwargs[key]
ax_kwargs['ax'] = axs[i]
# Subplot label args
this_subplot_label_args = subplot_label_args.copy()
this_subplot_label_args['s'] = key
ax_kwargs['plot_label'] = this_subplot_label_args
if ax_kwargs['add_colorbar']:
ax_kwargs['colorbar_args'] = { 'fig_or_ax': fig, 'ax_location': [0.9, 0.125, 0.03, 0.775 ], }
# Clean up interior axes
ax_tick_parm_args = ax_kwargs['tick_param_args'].copy()
plot_location = plot_locations[i]
# Hide repetitive x labels
if plot_location[0] != n_rows -1 :
ax_kwargs['add_x_label'] = False
ax_tick_parm_args['labelbottom'] = False
# Hide repetitive y labels
if plot_location[1] != 0:
ax_kwargs['add_y_label'] = False
ax_tick_parm_args['labelleft'] = False
ax_kwargs['tick_param_args'] = ax_tick_parm_args
# Actual panel plots
panel_plotting_method = getattr( self, panel_plotting_method_str )
for key in plotting_kwargs.keys():
panel_plotting_method( **plotting_kwargs[key] )
# Main axes labels
# Plot label
if plot_label is None:
plt_label = axs[0].annotate( s=self.label, xy=(0.,1.0225), xycoords='axes fraction', fontsize=label_fontsize, )
elif isinstance( plot_label, str ):
plt_label = axs[0].annotate( s=plot_label, xy=(0.,1.0225), xycoords='axes fraction', fontsize=label_fontsize, )
elif isinstance( plot_label, dict ):
plt_label = axs[0].annotate( **plot_label )
else:
raise Exception( 'Unrecognized plot_label arguments, {}'.format( plot_label ) )
if outline_plot_label:
plt_label.set_path_effects([
path_effects.Stroke(linewidth=3, foreground='white', background='white'),
path_effects.Normal()
])
# Upper right label (info label)
info_label = ''
if label_galaxy_cut:
info_label = r'$r_{ \rm cut } = ' + '{:.3g}'.format( self.data_object.galids.parameters['galaxy_cut'] ) + 'r_{ s}$'
if label_redshift:
ind = defaults['slices']
info_label = r'$z=' + '{:.3f}'.format( self.data_object.ptracks.redshift.iloc[ind] ) + '$'+ info_label
if label_galaxy_cut or label_redshift:
label_ax = plt.subplot( gs[0,n_columns-1,] )
label_ax.annotate(
s=info_label,
xy=(1.,1.0225),
xycoords='axes fraction',
fontsize=label_fontsize,
ha='right'
)
# Save the file
if out_dir is not None:
save_file = '{}_{:03d}.png'.format( self.label, self.data_object.ptracks.snum[slices] )
gen_plot.save_fig( out_dir, save_file, fig=fig )
plt.close()
########################################################################
def make_multiple_plots( self,
plotting_method_str,
iter_args_key,
iter_args,
n_processors = 1,
out_dir = None,
make_out_dir_subdir = True,
make_movie = False,
clear_data = False,
*args, **kwargs ):
'''Make multiple plots of a selected type. *args and **kwargs are passed to plotting_method_str.
Args:
plotting_method_str (str) : What plotting method to use.
iter_args_key (str) : The name of the argument to iterate over.
iter_args (list) : List of argument values to change.
n_processors (int) : Number of processors to use. Should only be used when saving the data.
out_dir (str) : Where to save the data.
make_movie (bool) : Make a movie out of the plots, if True.
clear_data (bool) : If True, clear memory of the data after making the plots.
'''
plotting_method = getattr( self, plotting_method_str )
if ( out_dir is not None ) and make_out_dir_subdir:
out_dir = os.path.join( out_dir, self.label )
def plotting_method_wrapper( process_args ):
used_out_dir, used_args, used_kwargs = process_args
plotting_method( out_dir=used_out_dir, *used_args, **used_kwargs )
del used_out_dir, used_args, used_kwargs
return
all_process_args = []
for iter_arg in iter_args:
process_kwargs = dict( kwargs )
process_kwargs[iter_args_key] = iter_arg
all_process_args.append( ( out_dir, args, process_kwargs ) )
if n_processors > 1:
# For safety, make sure we've loaded the data already
self.data_object.ptracks, self.data_object.galids, self.data_object.classifications
mp_utils.parmap( plotting_method_wrapper, all_process_args, n_processors=n_processors, return_values=False )
else:
for i, iter_arg in enumerate( iter_args ):
plotting_method_wrapper( all_process_args[i] )
if make_movie:
gen_plot.make_movie( out_dir, '{}_*.png'.format( self.label ), '{}.mp4'.format( self.label ), )
if clear_data:
del self.data_object.ptracks
del self.data_object.galids
del self.data_object.classifications
########################################################################
########################################################################
class PlotterSet( verdict.Dict ):
'''Container for multiple plotters that is an enhanced dictionary.
'''
def __init__( self, data_object_cls, plotter_object_cls, defaults, variations ):
'''
Args:
data_object_cls (object) : Class for the data object.
plotter_object_cls (object) : Class for the plotter object.
defaults (dict) : Set of None arguments for loading worldline data.
variations (dict of dicts) : Labels and differences in arguments to be passed to Worldlines
'''
# Load the worldline sets
storage = {}
for key in variations.keys():
kwargs = dict( defaults )
for var_key in variations[key].keys():
kwargs[var_key] = variations[key][var_key]
storage[key] = { 'data_object': data_object_cls( **kwargs ), 'label': key }
plotters_storage = utilities.SmartDict.from_class_and_args( plotter_object_cls, storage )
super( PlotterSet, self ).__init__( plotters_storage )
|
StarcoderdataPython
|
12850483
|
<filename>swc/cli_repositories.bzl
"@generated by @aspect_rules_js//npm/private:npm_translate_lock.bzl from pnpm lock file @aspect_rules_swc@aspect_rules_swc//swc:pnpm-lock.yaml"
load("@aspect_rules_js//npm:npm_import.bzl", "npm_import")
def npm_repositories():
"Generated npm_import repository rules corresponding to npm packages in @aspect_rules_swc@aspect_rules_swc//swc:pnpm-lock.yaml"
npm_import(
name = "swc_cli__at_nodelib_fs.scandir__2.1.5",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "@nodelib/fs.scandir",
version = "2.1.5",
integrity = "<KEY>
deps = {
"@nodelib/fs.stat": "2.0.5",
"run-parallel": "1.2.0",
},
transitive_closure = {
"@nodelib/fs.scandir": ["2.1.5"],
"@nodelib/fs.stat": ["2.0.5"],
"run-parallel": ["1.2.0"],
"queue-microtask": ["1.2.3"],
},
)
npm_import(
name = "swc_cli__at_nodelib_fs.stat__2.0.5",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "@nodelib/fs.stat",
version = "2.0.5",
integrity = "<KEY>
transitive_closure = {
"@nodelib/fs.stat": ["2.0.5"],
},
)
npm_import(
name = "swc_cli__at_nodelib_fs.walk__1.2.8",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "@nodelib/fs.walk",
version = "1.2.8",
integrity = "<KEY>
deps = {
"@nodelib/fs.scandir": "2.1.5",
"fastq": "1.13.0",
},
transitive_closure = {
"@nodelib/fs.walk": ["1.2.8"],
"@nodelib/fs.scandir": ["2.1.5"],
"fastq": ["1.13.0"],
"reusify": ["1.0.4"],
"@nodelib/fs.stat": ["2.0.5"],
"run-parallel": ["1.2.0"],
"queue-microtask": ["1.2.3"],
},
)
npm_import(
name = "swc_cli__at_swc_cli__0.1.57",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {
"swc": ["@swc/cli"],
},
package = "@swc/cli",
version = "0.1.57",
integrity = "<KEY>
deps = {
"commander": "7.2.0",
"fast-glob": "3.2.11",
"slash": "3.0.0",
"source-map": "0.7.3",
},
transitive_closure = {
"@swc/cli": ["0.1.57"],
"commander": ["7.2.0"],
"fast-glob": ["3.2.11"],
"slash": ["3.0.0"],
"source-map": ["0.7.3"],
"@nodelib/fs.stat": ["2.0.5"],
"@nodelib/fs.walk": ["1.2.8"],
"glob-parent": ["5.1.2"],
"merge2": ["1.4.1"],
"micromatch": ["4.0.5"],
"braces": ["3.0.2"],
"picomatch": ["2.3.1"],
"fill-range": ["7.0.1"],
"to-regex-range": ["5.0.1"],
"is-number": ["7.0.0"],
"is-glob": ["4.0.3"],
"is-extglob": ["2.1.1"],
"@nodelib/fs.scandir": ["2.1.5"],
"fastq": ["1.13.0"],
"reusify": ["1.0.4"],
"run-parallel": ["1.2.0"],
"queue-microtask": ["1.2.3"],
},
)
npm_import(
name = "swc_cli__braces__3.0.2",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "braces",
version = "3.0.2",
integrity = "<KEY>
deps = {
"fill-range": "7.0.1",
},
transitive_closure = {
"braces": ["3.0.2"],
"fill-range": ["7.0.1"],
"to-regex-range": ["5.0.1"],
"is-number": ["7.0.0"],
},
)
npm_import(
name = "swc_cli__commander__7.2.0",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "commander",
version = "7.2.0",
integrity = "<KEY>
transitive_closure = {
"commander": ["7.2.0"],
},
)
npm_import(
name = "swc_cli__fast-glob__3.2.11",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "fast-glob",
version = "3.2.11",
integrity = "<KEY>
deps = {
"@nodelib/fs.stat": "2.0.5",
"@nodelib/fs.walk": "1.2.8",
"glob-parent": "5.1.2",
"merge2": "1.4.1",
"micromatch": "4.0.5",
},
transitive_closure = {
"fast-glob": ["3.2.11"],
"@nodelib/fs.stat": ["2.0.5"],
"@nodelib/fs.walk": ["1.2.8"],
"glob-parent": ["5.1.2"],
"merge2": ["1.4.1"],
"micromatch": ["4.0.5"],
"braces": ["3.0.2"],
"picomatch": ["2.3.1"],
"fill-range": ["7.0.1"],
"to-regex-range": ["5.0.1"],
"is-number": ["7.0.0"],
"is-glob": ["4.0.3"],
"is-extglob": ["2.1.1"],
"@nodelib/fs.scandir": ["2.1.5"],
"fastq": ["1.13.0"],
"reusify": ["1.0.4"],
"run-parallel": ["1.2.0"],
"queue-microtask": ["1.2.3"],
},
)
npm_import(
name = "swc_cli__fastq__1.13.0",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "fastq",
version = "1.13.0",
integrity = "<KEY>
deps = {
"reusify": "1.0.4",
},
transitive_closure = {
"fastq": ["1.13.0"],
"reusify": ["1.0.4"],
},
)
npm_import(
name = "swc_cli__fill-range__7.0.1",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "fill-range",
version = "7.0.1",
integrity = "<KEY>LWJ2USVpQ==",
deps = {
"to-regex-range": "5.0.1",
},
transitive_closure = {
"fill-range": ["7.0.1"],
"to-regex-range": ["5.0.1"],
"is-number": ["7.0.0"],
},
)
npm_import(
name = "swc_cli__glob-parent__5.1.2",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "glob-parent",
version = "5.1.2",
integrity = "<KEY>
deps = {
"is-glob": "4.0.3",
},
transitive_closure = {
"glob-parent": ["5.1.2"],
"is-glob": ["4.0.3"],
"is-extglob": ["2.1.1"],
},
)
npm_import(
name = "swc_cli__is-extglob__2.1.1",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "is-extglob",
version = "2.1.1",
integrity = "<KEY>
transitive_closure = {
"is-extglob": ["2.1.1"],
},
)
npm_import(
name = "swc_cli__is-glob__4.0.3",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "is-glob",
version = "4.0.3",
integrity = "<KEY>
deps = {
"is-extglob": "2.1.1",
},
transitive_closure = {
"is-glob": ["4.0.3"],
"is-extglob": ["2.1.1"],
},
)
npm_import(
name = "swc_cli__is-number__7.0.0",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "is-number",
version = "7.0.0",
integrity = "<KEY>
transitive_closure = {
"is-number": ["7.0.0"],
},
)
npm_import(
name = "swc_cli__merge2__1.4.1",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "merge2",
version = "1.4.1",
integrity = "<KEY>
transitive_closure = {
"merge2": ["1.4.1"],
},
)
npm_import(
name = "swc_cli__micromatch__4.0.5",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "micromatch",
version = "4.0.5",
integrity = "<KEY>
deps = {
"braces": "3.0.2",
"picomatch": "2.3.1",
},
transitive_closure = {
"micromatch": ["4.0.5"],
"braces": ["3.0.2"],
"picomatch": ["2.3.1"],
"fill-range": ["7.0.1"],
"to-regex-range": ["5.0.1"],
"is-number": ["7.0.0"],
},
)
npm_import(
name = "swc_cli__picomatch__2.3.1",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "picomatch",
version = "2.3.1",
integrity = "<KEY>
transitive_closure = {
"picomatch": ["2.3.1"],
},
)
npm_import(
name = "swc_cli__queue-microtask__1.2.3",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "queue-microtask",
version = "1.2.3",
integrity = "<KEY>
transitive_closure = {
"queue-microtask": ["1.2.3"],
},
)
npm_import(
name = "swc_cli__reusify__1.0.4",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "reusify",
version = "1.0.4",
integrity = "<KEY>
transitive_closure = {
"reusify": ["1.0.4"],
},
)
npm_import(
name = "swc_cli__run-parallel__1.2.0",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "run-parallel",
version = "1.2.0",
integrity = "<KEY>
deps = {
"queue-microtask": "1.2.3",
},
transitive_closure = {
"run-parallel": ["1.2.0"],
"queue-microtask": ["1.2.3"],
},
)
npm_import(
name = "swc_cli__slash__3.0.0",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "slash",
version = "3.0.0",
integrity = "<KEY>
transitive_closure = {
"slash": ["3.0.0"],
},
)
npm_import(
name = "swc_cli__source-map__0.7.3",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "source-map",
version = "0.7.3",
integrity = "<KEY>
transitive_closure = {
"source-map": ["0.7.3"],
},
)
npm_import(
name = "swc_cli__to-regex-range__5.0.1",
root_package = "swc",
link_workspace = "aspect_rules_swc",
link_packages = {},
package = "to-regex-range",
version = "5.0.1",
integrity = "<KEY>
deps = {
"is-number": "7.0.0",
},
transitive_closure = {
"to-regex-range": ["5.0.1"],
"is-number": ["7.0.0"],
},
)
|
StarcoderdataPython
|
24203
|
#!venv/bin/python
# coding=UTF-8
# -*- coding: UTF-8 -*-
# vim: set fileencoding=UTF-8 :
"""
Double-deck bid euchre
Implementation is similar to the rules given by <NAME>
https://www.pagat.com/euchre/bideuch.html
Notable differences (to match how I learned in high school calculus) include:
* Minimum bid of 6 (which can be stuck to the dealer)
* Shooters and loners are separate bids (guessing as ±18 for shooter, similar to a loner)
* Shooters are a mandatory 2 card exchange with your partner
* Trump isn't announced until after bidding has concluded
* Winner of bid leads the first hand
* Winning your bid gives you (tricks earned + 2) points
Mothjab is a funny word with no current meaning.
"""
from cardstock import *
debug: Optional[bool] = False
o: Optional[TextIO] = None
log_dir: str = game_out_dir(os.path.basename(__file__).split(".py")[0])
def p(msg):
global o
click.echo(msg, o)
def px(msg) -> None:
global debug
if debug:
p(msg)
class EuchrePlayer(BasePlayer, abc.ABC):
desired_trump: Bid
def __init__(self, g: "GameType", /, name: str, is_bot: int = 1, **kwargs):
super().__init__(g, name, is_bot)
self.tricks: int = 0
self.bid_estimates: Dict[Bid, int] = {}
self.reset_bids()
def reset_bids(self) -> None:
for t in Bid:
self.bid_estimates[t] = 0
@property
def shoot_strength(self) -> int:
return self.in_game.shoot_strength
@property
def choose_trump(self) -> Bid:
return self.desired_trump
@abc.abstractmethod
def make_bid(
self,
valid_bids: List[int],
min_bid: int = 0,
leading_player: "Optional[EuchrePlayer]" = None,
) -> int:
pass
def trumpify_hand(self, trump_suit: Optional[Suit], is_lo: bool = False) -> None:
"""Marks the trump suit and sort the hands"""
if trump_suit:
self.hand.trumpify(trump_suit)
self.sort_hand(is_lo)
def receive_shooter(self, **kwargs) -> None:
shot = PassList(
list(self.teammates),
directions=[pass_shoot] * self.in_game.shoot_strength,
specific_destination=cycle([self]),
sort_low=self.in_game.low_win,
)
shot.collect_cards()
shot.distribute_cards()
class HumanPlayer(BaseHuman, EuchrePlayer):
def __init__(self, g: "GameType", /, name: str):
BaseHuman.__init__(self, g, name)
EuchrePlayer.__init__(self, g, name, 0)
@property
def choose_trump(self) -> Bid:
p(self.hand) # give a closer look at your hand before bidding
bids: List[str] = [c for c in Bid.__members__]
bids.extend([Bid[c].short_name for c in Bid.__members__])
bid: str = click.prompt(
"Declare Trump", type=click.Choice(bids, False), show_choices=False,
).upper()
return Bid[[b for b in Bid.__members__ if (bid in b)][0]]
def make_bid(
self,
valid_bids: List[int],
min_bid: int = 0,
leading_player: "Optional[EuchrePlayer]" = None,
) -> int:
self.hand.sort(key=key_display4human)
p(self.hand)
return int(
click.prompt(
"How much to bid",
type=click.Choice(
["0"] + [str(x) for x in valid_bids if (x >= min_bid)], False,
),
)
)
class ComputerPlayer(BaseComputer, EuchrePlayer):
sort_key = key_trump_power
def __init__(self, g: "GameType", /, name: str):
BaseComputer.__init__(self, g, name)
EuchrePlayer.__init__(self, g, name, 1)
def make_bid(
self,
valid_bids: List[int],
min_bid: int = 0,
leading_player: "Optional[EuchrePlayer]" = None,
) -> int:
if max(self.bid_estimates.values()) == 0:
self.bid_estimates = {
t: self.simulate_hand(
h_p=deepcopy(self.hand),
d_p=deepcopy(self.card_count),
handedness=self.in_game.handedness,
t=t,
)
for t in Bid
}
# pick the biggest
# any decisions based on the current winning bid should happen here
bid: int = max(self.bid_estimates.values())
self.desired_trump = random.choice(
[k for k in self.bid_estimates.keys() if (self.bid_estimates[k] == bid)]
)
# don't outbid your partner (within reason)
if leading_player in self.teammates and bid - min_bid < 2:
return 0
# can you do it by yourself?
if bid == len(self.hand) - 1:
return valid_bids[-2] # call a shooter
elif bid == len(self.hand):
return valid_bids[-1] # call a loner
# don't bid outrageously if you don't have to
# count on two tricks from your partner
return bid + self.shoot_strength * len(self.teammates)
def pick_card(self, valid_cards: Hand, **kwargs,) -> Card:
tp: Trick = kwargs.get("trick_in_progress")
is_low: bool = kwargs.get("is_low")
unplayed: Hand = self.card_count
broken: Dict[Suit, Union[Team, None, bool]] = self.in_game.suit_safety
# TODO be less stupid with large games (>4 players)
def winning_leads(ss: List[Suit], st: bool = True) -> List[Card]:
wl: List[Card] = []
for s in ss:
wl.extend(
self.estimate_tricks_by_suit(
follow_suit(s, valid_cards, True),
follow_suit(s, unplayed, True),
is_low,
strict=st,
)
)
return wl
if not tp: # you have the lead
safer_suits: List[Suit] = [
s for s in broken.keys() if broken[s] is False or broken[s] == self.team
] if broken else suits
w: List[Card] = []
if safer_suits: # unbroken suits to lead aces
px("Checking suits")
w += winning_leads(safer_suits)
else: # lead with good trump
px("Leading with a good trump")
w += winning_leads([Suit.TRUMP])
if not w: # try a risky ace
px("Risky bet")
w += winning_leads(suits, st=bool(self.teammates))
if not w and self.teammates: # seamless passing of the lead
is_low = not is_low
w += winning_leads(suits + [Suit.TRUMP], st=False)
px("Lead pass")
if not w: # YOLO time
px("YOLO")
return random.choice(valid_cards)
px(w)
return random.choice(w)
# you don't have the lead
# win if you can (and the current lead isn't on your team)
# play garbage otherwise
junk_ranks: Set[Rank] = (
{Rank.ACE_HI, Rank.KING} if is_low else {Rank.NINE, Rank.TEN, Rank.JACK}
) | {Rank.QUEEN}
wc, wp = tp.winner(is_low)
w = Hand(c for c in valid_cards if c.beats(wc, is_low))
junk_cards = Hand(h for h in valid_cards if h not in w)
if w: # you have something that can win
if wp in self.teammates and junk_cards: # your partner is winning
if wc.rank in junk_ranks: # but their card is rubbish
return random.choice(w)
return random.choice(junk_cards)
return random.choice(w)
return random.choice(junk_cards)
def simulate_hand(self, *, h_p: Hand, d_p: Hand, t: Bid, **kwargs) -> int:
def slice_by_suit(h: Hand, s: Suit) -> Hand:
return follow_suit(
s,
sorted(
h.trumpify(t.trump_suit), key=key_trump_power, reverse=not t.is_low,
),
strict=True,
ok_empty=True,
)
return sum(
[
len(
self.estimate_tricks_by_suit(
my_suit=slice_by_suit(h_p, s),
mystery_suit=slice_by_suit(d_p, s),
is_low=t.is_low,
is_trump=(s == Suit.TRUMP),
)
)
for s in suits + [Suit.TRUMP]
]
)
@staticmethod
def estimate_tricks_by_suit(
my_suit: Iterable[Card],
mystery_suit: Iterable[Card],
is_low: bool,
is_trump: Optional[bool] = False,
strict: bool = False,
) -> Hand:
"""
Slices up your hand and unplayed cards to estimate which suit has the most potential
:param my_suit: list of your cards presumed of the same suit
:param mystery_suit: unplayed cards of the suit
:param is_low: lo no?
:param is_trump: unused
:param strict: True to pick a trick, False to estimate total tricks in a hand
:return: winning cards for the suit
"""
est = Hand()
for rank in (
euchre_ranks
if is_low
else [Rank.RIGHT_BOWER, Rank.LEFT_BOWER] + list(reversed(euchre_ranks))
):
me: List[Card] = match_by_rank(my_suit, rank)
oth: List[Card] = match_by_rank(mystery_suit, rank)
# p(f"{me} {rank} {oth}") # debugging
est.extend(me)
if oth and (strict or not me and not strict):
break # there are mystery cards that beat your cards
return est
class Team(BaseTeam, MakesBid, WithScore):
def __init__(self, players: Iterable[BasePlayer]):
BaseTeam.__init__(self, players)
MakesBid.__init__(self)
WithScore.__init__(self)
self.bid_history: List[str] = []
self.tricks_taken: List[int] = []
def hand_tab(self, hand: Optional[int], tab: str = "\t") -> str:
return tab.join(
[
str(self.bid_history[hand]),
str(self.tricks_taken[hand]),
str(self.score_changes[hand]),
]
if hand is not None
else [
str(sum([1 for b in self.bid_history if b != str(None)])),
str(sum(self.tricks_taken)),
str(self.score),
]
)
class BidEuchre(BaseGame):
def __init__(self, *, minimum_bid: int = 6, **kwargs):
"""
A game of bid euchre
:param minimum_bid: minimum bid that will get stuck to the dealer
:param kwargs: things to pass along to BaseGame
"""
# setup for the super() call
if not kwargs.get("deck_replication"):
kwargs["deck_replication"] = 2
if not kwargs.get("team_size"):
kwargs["team_size"] = (
2 if (h := kwargs.get("handedness")) and not (h % 2) else 1
)
if kwargs.get("pass_size") is None:
kwargs["pass_size"] = 2
if kwargs.get("minimum_kitty_size") is None:
kwargs["minimum_kitty_size"] = 0
if not kwargs.get("minimum_hand_size"):
kwargs["minimum_hand_size"] = 8
super().__init__(
human_player_type=HumanPlayer,
computer_player_type=ComputerPlayer,
team_type=Team,
game_name="Euchre",
deck_generator=make_euchre_deck,
**kwargs,
)
self.trump: Optional[Suit] = None
self.low_win: bool = False
# set the bidding
c = configparser.ConfigParser()
c.read("constants.cfg")
minimum_bid: int = minimum_bid if minimum_bid else (
6 if self.handedness == 3 else (self.hand_size // 2)
)
self.valid_bids: List[int] = [
i for i in range(minimum_bid, self.hand_size + 1)
] + (
[round(self.hand_size * 1.5), self.hand_size * 2]
if len(self.teams) != len(self.players)
else []
)
if (
self.victory_threshold is not None and self.victory_threshold > 0
): # negative thresholds get dunked on
self.mercy_rule: int = -self.victory_threshold
self.bad_ai_end: int = -self.victory_threshold // 2
else:
self.victory_threshold: int = c["Scores"].getint("victory")
self.mercy_rule: int = c["Scores"].getint("mercy")
self.bad_ai_end: int = c["Scores"].getint("broken_ai")
@property
def shoot_strength(self) -> int:
"""Alias so I don't break existing code"""
return self.pass_size
def bidding(self, bid_order: List[EuchrePlayer]) -> EuchrePlayer:
first_round: bool = True
count: int = 1
hands: int = len(bid_order)
wp: Optional[EuchrePlayer] = None
wb: int = 0
bid_order = cycle(bid_order)
min_bid: int = min(self.valid_bids)
max_bid: int = max(self.valid_bids)
for pl in bid_order:
# everyone has passed
if count == hands:
if first_round: # stuck the dealer
wb = min_bid
p(f"Dealer {pl} got stuck with {min_bid}")
if pl.is_bot: # dealer picks suit
pl.make_bid(self.valid_bids, min_bid, pl)
wp = pl
else: # someone won the bid
wb = min_bid - 1
break
# end bidding early for a loner
if min_bid > max_bid:
wb = max_bid
break
# get the bid
bid: int = pl.make_bid(self.valid_bids, min_bid, wp)
# player passes
if bid < min_bid:
p(f"{pl} passes")
count += 1
continue
# bid successful
min_bid = bid + 1
wp = pl
count = 1
first_round = False
p(f"{pl} bids {bid}")
wp.team.bid = wb
return wp
def play_hand(self, dealer: EuchrePlayer) -> EuchrePlayer:
self.deal()
hn: int = len(dealer.team.score_changes) + 1
p(f"\nHand {hn}")
p(f"Dealer: {dealer}")
po: List[EuchrePlayer] = get_play_order(dealer)
po.append(po.pop(0)) # because the dealer doesn't lead bidding
# deal the cards
for pl in po:
pl.tricks = 0
pl.reset_bids()
# bidding
lead: EuchrePlayer = self.bidding(po)
# declare Trump
trump: Bid = lead.choose_trump
p(trump)
self.low_win = trump.is_low
p(f"{lead} bid {lead.team.bid} {trump.name}\n")
# modify hands if trump called
[player.trumpify_hand(trump.trump_suit, trump.is_low) for player in po]
self.unplayed_cards.trumpify(trump.trump_suit) # for card-counting
self.suit_safety[trump.trump_suit] = None
# check for shooters and loners
lone: Optional[EuchrePlayer] = None
if lead.team.bid > self.hand_size:
if lead.team.bid < 2 * self.hand_size:
lead.receive_shooter()
lone = lead
# play the tricks
for _ in range(self.hand_size):
lead = self.play_trick(lead, trump.is_low, lone)
# calculate scores
p(f"Hand {hn} scores:")
for t in self.teams:
tr_t: int = 0
ls: int = 0
bid: int = t.bid
for pl in t.players:
tr_t += pl.tricks
if bid:
# loners and shooters
if lone:
ls = bid
bid = self.hand_size
if tr_t < bid:
p(f"{t} got Euchred and fell {bid - tr_t} short of {bid}")
t.score = -bid if not ls else -bid * 3 // 2
elif ls:
p(f"{lone} won all alone, the absolute madman!")
t.score = ls
else:
p(f"{t} beat their bid of {bid} with {tr_t} tricks")
t.score = tr_t + 2
else: # tricks the non-bidding team earned
p(f"{t} earned {tr_t} tricks")
t.score = tr_t
# bookkeeping
t.bid_history.append(
f"{ls if ls else bid} {trump.name}" if bid else str(None)
)
t.tricks_taken.append(tr_t)
p(f"{t}: {t.score}")
t.bid = 0 # reset for next time
return dealer.next_player
def play_trick(
self,
lead: EuchrePlayer,
is_low: bool = False,
lone: Optional[EuchrePlayer] = None,
) -> EuchrePlayer:
pl: EuchrePlayer = lead
po: List[EuchrePlayer] = get_play_order(lead)
trick_in_progress: Trick = Trick()
# play the cards
for pl in po:
if lone and pl in lone.teammates:
continue
c: Card = pl.play_card(
trick_in_progress,
handedness=self.handedness,
is_low=is_low,
broken_suits=self.suit_safety,
trump=self.trump,
)
trick_in_progress.append(TrickPlay(c, pl))
p(f"{pl.name} played {repr(c)}")
# find the winner
w: TrickPlay = trick_in_progress.winner(is_low)
w.played_by.tricks += 1
p(f"{w.played_by.name} won the trick\n")
l_suit: Suit = trick_in_progress.lead_suit
if w.card.suit != l_suit:
self.suit_safety[l_suit] = (
True if self.suit_safety[l_suit] else w.played_by.team
)
return w.played_by
def write_log(self, ld: str, splitter: str = "\t|\t") -> None:
stop_time: str = str(datetime.now()).split(".")[0]
f: TextIO = open(os.path.join(ld, f"{self.start_time}.gamelog"), "w")
t_l: List[Team] = list(self.teams) # give a consistent ordering
def w(msg):
click.echo(msg, f)
# headers
w(splitter.join([self.start_time] + [f"{t}\t\t" for t in t_l]))
w(splitter.join([""] + ["Bid\tTricks Taken\tScore Change" for _ in t_l]))
w(splitter.join(["Hand"] + ["===\t===\t===" for _ in t_l]))
w( # body
"\n".join(
[
splitter.join([f"{hand + 1}"] + [t.hand_tab(hand) for t in t_l])
for hand in range(len(t_l[0].bid_history))
]
)
)
# totals
w(splitter.join([stop_time] + ["===\t===\t===" for _ in t_l]))
w(splitter.join(["Totals"] + [t.hand_tab(None) for t in t_l]))
f.close()
def victory_check(self) -> Tuple[int, Optional[Team]]:
scorecard: List[Team] = sorted(self.teams, key=score_key)
best_score: int = scorecard[-1].score
if best_score < self.bad_ai_end:
return -2, None # everyone went too far negative
if best_score == scorecard[-2].score:
return 0, None # keep playing for a tie
if best_score > self.victory_threshold: # a team won
return 1, scorecard[-1]
if scorecard[0].score < self.mercy_rule: # a team lost
return -1, scorecard[0] # should never tie for last
return 0, None
def play(self) -> None:
v: Tuple[int, Optional[Team]] = self.victory_check()
global o
while v[0] == 0:
self.current_dealer = self.play_hand(self.current_dealer)
v = self.victory_check()
def final_score(pf: Callable = print):
pf(f"\nFinal Scores")
for t in self.teams:
pf(f"{t}: {t.score}")
pf(f"({len(self.current_dealer.team.bid_history)} hands)")
final_score(p)
if o: # final scores to terminal
final_score()
def score_key(t: Team) -> int:
return t.score
@click.command()
@common_options
@click.option(
"--minimum-bid",
type=click.IntRange(0, None),
help="The minimum bid (will usually be 6 if not set)",
)
def main(**kwargs):
global o
global debug
global log_dir
if kwargs.get("all_bots"):
st: str = str(datetime.now()).split(".")[0]
o = open(os.path.join(log_dir, f"{st}.gameplay"), "w")
kwargs["start_time"] = st
debug = True
make_and_play_game(BidEuchre, log_dir, **kwargs)
if __name__ == "__main__":
Path(log_dir).mkdir(parents=True, exist_ok=True)
main()
|
StarcoderdataPython
|
9712723
|
<reponame>jiayushe/hnr-2021<gh_stars>1-10
from .healthcheck import HealthCheck
from .user import SignUp, SignIn, SignOut
__all__ = ["HealthCheck", "SignUp", "SignIn", "SignOut"]
|
StarcoderdataPython
|
125390
|
<filename>hc/front/tests/test_update_priority.py<gh_stars>0
from hc.api.models import Check
from hc.test import BaseTestCase
class UpdatePriorityTestCase(BaseTestCase):
def setUp(self):
super(UpdatePriorityTestCase, self).setUp()
self.check = Check(user=self.alice)
self.check.save()
def test_priority_is_updated(self):
"""
test the update method works
"""
url = "/checks/%s/priority/" % self.check.code
payload = {"priority": "high"}
self.client.login(username="<EMAIL>", password="password")
response = self.client.post(url, data=payload)
self.assertRedirects(response, "/checks/")
check = Check.objects.get(code=self.check.code)
assert check.priority == 3
def test_team_access_works(self):
"""
tests that team members have access to change priority
"""
url = "/checks/%s/priority/" % self.check.code
payload = {"priority": "medium"}
# Logging in as bob, not alice. Bob has team access so this
# should work.
self.client.login(username="<EMAIL>", password="password")
self.client.post(url, data=payload)
check = Check.objects.get(code=self.check.code)
assert check.priority == 2
def test_it_checks_ownership(self):
"""
test it checks for ownership of a check
"""
url = "/checks/%s/priority/" % self.check.code
payload = {"priority": "low"}
self.client.login(username="<EMAIL>", password="password")
response = self.client.post(url, data=payload)
assert response.status_code == 403
def test_it_handles_bad_uuid(self):
"""
tests bad uuid returns error
"""
url = "/checks/not-uuid/priority/"
payload = {"priority": "high"}
self.client.login(username="<EMAIL>", password="password")
response = self.client.post(url, data=payload)
assert response.status_code == 400
def test_it_handles_missing_uuid(self):
"""
Valid UUID but there is no check for it
"""
url = "/checks/6837d6ec-fc08-4da5-a67f-08a9ed1ccf62/priority/"
payload = {"priority": "medium"}
self.client.login(username="<EMAIL>", password="password")
response = self.client.post(url, data=payload)
assert response.status_code == 404
|
StarcoderdataPython
|
201193
|
<reponame>korenlev/calipso-cvim
###############################################################################
# Copyright (c) 2017-2020 <NAME> (Cisco Systems), #
# <NAME> (Cisco Systems), <NAME> (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
from abc import abstractmethod, ABC
from base.fetcher import Fetcher
from base.utils.inventory_mgr import InventoryMgr
from base.utils.origins import ScanOrigin, ScanOrigins
class EventResult:
def __init__(self,
result: bool, retry: bool = False, message: str = None,
related_object: str = None,
display_context: str = None):
self.result = result
self.retry = retry
self.message = message
self.related_object = related_object
self.display_context = display_context
self.origin = ScanOrigin(origin_id=None,
origin_type=ScanOrigins.EVENT)
class EventBase(Fetcher, ABC):
def __init__(self):
super().__init__()
self.inv = InventoryMgr()
@abstractmethod
def handle(self, env, values) -> EventResult:
pass
|
StarcoderdataPython
|
9725208
|
<reponame>affinis-lab/car-detection-module
import cv2
from keras.callbacks import ModelCheckpoint
from keras.models import Model
from keras.layers import Input, Flatten, Dense, Reshape, Lambda
from keras.layers import Conv2D, BatchNormalization, LeakyReLU, MaxPooling2D, Dropout, Activation, \
GlobalAveragePooling2D, np
from keras.models import load_model
import tensorflow as tf
from keras.optimizers import Adam
from read_data import GroundTruth
from utils import decode_netout, compute_overlap, compute_ap
from preprocessing import BatchGenerator
class TinyYolo():
def __init__(self, input_size, config):
self.config = config
self.true_boxes = Input(shape=(1, 1, 1, self.config['model']['max_obj'], 4))
self.nb_box = len(self.config['model']['anchors']) // 2
self.class_wt = np.ones(self.config['model']['nb_class'], dtype='float32')
input_image = Input(shape=(input_size, input_size, 3))
# Layer 1
x = Conv2D(16, (3,3), strides=(1,1), padding='same', name='conv_1', use_bias=False)(input_image)
x = BatchNormalization(name='norm_1')(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 2 - 5
for i in range(0,4):
x = Conv2D(32*(2**i), (3,3), strides=(1,1), padding='same', name='conv_' + str(i+2), use_bias=False)(x)
x = BatchNormalization(name='norm_' + str(i+2))(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 6
x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_6', use_bias=False)(x)
x = BatchNormalization(name='norm_6')(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(1,1), padding='same')(x)
# Layer 7
x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_' + str(7), use_bias=False)(x)
x = BatchNormalization(name='norm_' + str(7))(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 8
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_' + str(8), use_bias=False)(x)
x = BatchNormalization(name='norm_' + str(8))(x)
x = LeakyReLU(alpha=0.1)(x)
# Object detection layer
output = Conv2D(2 * (4 + 1 + self.config['model']['nb_class']),
(1, 1), strides=(1, 1),
padding='same',
name='DetectionLayer',
kernel_initializer='lecun_normal')(x)
output = Reshape((self.config['model']['grid_h'], self.config['model']['grid_w'], self.nb_box,
4 + 1 + self.config['model']['nb_class']))(output)
output = Lambda(lambda args: args[0])([output, self.true_boxes])
self.model = Model([input_image, self.true_boxes], output)
# Load pretrained model
pretrained = load_model('yolov2-tiny-coco.h5', custom_objects={'custom_loss': self.custom_loss, 'tf': tf})
idx = 0
for layer in self.model.layers:
if layer.name.startswith("DetectionLayer"):
break
if layer.name.startswith("class_conv") or layer.name.startswith("dropout"):
break
layer.set_weights(pretrained.get_layer(index=idx).get_weights())
idx += 1
for l in self.config['model']['frozen_layers']:
self.model.get_layer("conv_" + str(l)).trainable = False
self.model.get_layer("norm_" + str(l)).trainable = False
#self.model.summary()
def normalize(self, image):
return image / 255.
def custom_loss(self, y_true, y_pred):
mask_shape = tf.shape(y_true)[:4]
cell_x = tf.to_float(
tf.reshape(tf.tile(tf.range(self.config['model']['grid_w']), [self.config['model']['grid_h']]),
(1, self.config['model']['grid_h'], self.config['model']['grid_w'], 1, 1)))
cell_y = tf.transpose(cell_x, (0, 2, 1, 3, 4))
cell_grid = tf.tile(tf.concat([cell_x, cell_y], -1), [self.config['train']['batch_size'], 1, 1, self.nb_box, 1])
coord_mask = tf.zeros(mask_shape)
conf_mask = tf.zeros(mask_shape)
class_mask = tf.zeros(mask_shape)
seen = tf.Variable(0.)
total_loss = tf.Variable(0.)
total_recall = tf.Variable(0.)
total_boxes = tf.Variable(self.config['model']['grid_h'] * self.config['model']['grid_w'] *
self.config['model']['num_boxes'] * self.config['train']['batch_size'])
"""
Adjust prediction
"""
### adjust x and y
pred_box_xy = tf.sigmoid(y_pred[..., :2]) + cell_grid
### adjust w and h tf.exp(
pred_box_wh = tf.exp(y_pred[..., 2:4]) * np.reshape(self.config['model']['anchors'], [1, 1, 1, self.nb_box, 2])
### adjust confidence
pred_box_conf = tf.sigmoid(y_pred[..., 4])
### adjust class probabilities
pred_box_class = y_pred[..., 5:]
"""
Adjust ground truth
"""
### adjust x and y
true_box_xy = y_true[..., 0:2] # relative position to the containing cell
### adjust w and h
true_box_wh = y_true[..., 2:4] # number of cells accross, horizontally and vertically
### adjust confidence
true_wh_half = true_box_wh / 2.
true_mins = true_box_xy - true_wh_half
true_maxes = true_box_xy + true_wh_half
pred_wh_half = pred_box_wh / 2.
pred_mins = pred_box_xy - pred_wh_half
pred_maxes = pred_box_xy + pred_wh_half
intersect_mins = tf.maximum(pred_mins, true_mins)
intersect_maxes = tf.minimum(pred_maxes, true_maxes)
intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
true_areas = true_box_wh[..., 0] * true_box_wh[..., 1]
pred_areas = pred_box_wh[..., 0] * pred_box_wh[..., 1]
union_areas = pred_areas + true_areas - intersect_areas
iou_scores = tf.truediv(intersect_areas, union_areas)
true_box_conf = iou_scores * y_true[..., 4]
### adjust class probabilities
true_box_class = tf.argmax(y_true[..., 5:], -1)
"""
Determine the masks
"""
### coordinate mask: simply the position of the ground truth boxes (the predictors)
coord_mask = tf.expand_dims(y_true[..., 4], axis=-1) * self.config['model']['coord_scale']
### confidence mask: penelize predictors + penalize boxes with low IOU
# penalize the confidence of the boxes, which have IOU with some ground truth box < 0.6
true_xy = self.true_boxes[..., 0:2]
true_wh = self.true_boxes[..., 2:4]
true_wh_half = true_wh / 2.
true_mins = true_xy - true_wh_half
true_maxes = true_xy + true_wh_half
pred_xy = tf.expand_dims(pred_box_xy, 4)
pred_wh = tf.expand_dims(pred_box_wh, 4)
pred_wh_half = pred_wh / 2.
pred_mins = pred_xy - pred_wh_half
pred_maxes = pred_xy + pred_wh_half
intersect_mins = tf.maximum(pred_mins, true_mins)
intersect_maxes = tf.minimum(pred_maxes, true_maxes)
intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
true_areas = true_wh[..., 0] * true_wh[..., 1]
pred_areas = pred_wh[..., 0] * pred_wh[..., 1]
union_areas = pred_areas + true_areas - intersect_areas
iou_scores = tf.truediv(intersect_areas, union_areas)
best_ious = tf.reduce_max(iou_scores, axis=4)
#conf_mask = conf_mask + tf.to_float(best_ious < 0.5) * (1 - y_true[..., 4]) * self.no_object_scale
# penalize the confidence of the boxes, which are reponsible for corresponding ground truth box
#conf_mask = conf_mask + y_true[..., 4] * self.object_scale
conf_mask_neg = tf.to_float(best_ious < 0.4) * (1 - y_true[..., 4]) * self.config['model']['no_obj_scale']
conf_mask_pos = y_true[..., 4] * self.config['model']['obj_scale']
### class mask: simply the position of the ground truth boxes (the predictors)
class_mask = y_true[..., 4] * tf.gather(self.class_wt, true_box_class) * self.config['model']['class_scale']
"""
Warm-up training
"""
no_boxes_mask = tf.to_float(coord_mask < self.config['model']['coord_scale'] / 2.)
seen = tf.assign_add(seen, 1.)
true_box_xy, true_box_wh, coord_mask = tf.cond(tf.less(seen, self.config['train']['warmup_batches'] + 1),
lambda: [true_box_xy + (0.5 + cell_grid) * no_boxes_mask,
true_box_wh + tf.ones_like(true_box_wh) * \
np.reshape(self.config['model']['anchors'],
[1, 1, 1, self.nb_box, 2]) * no_boxes_mask,
tf.ones_like(coord_mask)],
lambda: [true_box_xy,
true_box_wh,
coord_mask])
"""
Finalize the loss
"""
nb_coord_box = tf.reduce_sum(tf.to_float(coord_mask > 0.0))
#nb_conf_box = tf.reduce_sum(tf.to_float(conf_mask > 0.0))
nb_conf_box_neg = tf.reduce_sum(tf.to_float(conf_mask_neg > 0.0))
nb_conf_box_pos = tf.subtract(tf.to_float(total_boxes), nb_conf_box_neg) #tf.reduce_sum(tf.to_float(conf_mask_pos > 0.0))
nb_class_box = tf.reduce_sum(tf.to_float(class_mask > 0.0))
true_box_wh = tf.sqrt(true_box_wh)
pred_box_wh = tf.sqrt(pred_box_wh)
loss_xy = tf.reduce_sum(tf.square(true_box_xy - pred_box_xy) * coord_mask) / (nb_coord_box + 1e-6) / 2.
loss_wh = tf.reduce_sum(tf.square(true_box_wh - pred_box_wh) * coord_mask) / (nb_coord_box + 1e-6) / 2.
loss_conf_neg = tf.reduce_sum(tf.square(true_box_conf - pred_box_conf) * conf_mask_neg) / (nb_conf_box_neg + 1e-6) / 2.
loss_conf_pos = tf.reduce_sum(tf.square(true_box_conf - pred_box_conf) * conf_mask_pos) / (nb_conf_box_pos + 1e-6) / 2
loss_conf = loss_conf_neg + loss_conf_pos
loss_class = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=true_box_class, logits=pred_box_class)
loss_class = tf.reduce_sum(loss_class * class_mask) / (nb_class_box + 1e-6)
loss = tf.cond(tf.less(seen, self.config['train']['warmup_batches'] + 1),
lambda: loss_xy + loss_wh + loss_conf + loss_class + 10,
lambda: loss_xy + loss_wh + loss_conf + loss_class)
if self.config['train']['debug']:
nb_true_box = tf.reduce_sum(y_true[..., 4])
nb_pred_box = tf.reduce_sum(tf.to_float(true_box_conf > 0.3) * tf.to_float(pred_box_conf > 0.25))
current_recall = nb_pred_box / (nb_true_box + 1e-6)
total_recall = tf.assign_add(total_recall, current_recall)
total_loss = tf.assign_add(total_loss, loss)
#loss = tf.Print(loss, [m2], message='\nPred box conf \t', summarize=1000)
loss = tf.Print(loss, [loss_xy], message='\nLoss XY \t', summarize=1000)
loss = tf.Print(loss, [loss_wh], message='Loss WH \t', summarize=1000)
loss = tf.Print(loss, [nb_conf_box_neg], message='Nb Conf Box Negative \t', summarize=1000)
loss = tf.Print(loss, [nb_conf_box_pos], message='Nb Conf Box Positive \t', summarize=1000)
loss = tf.Print(loss, [loss_conf_neg], message='Loss Conf Negative \t', summarize=1000)
loss = tf.Print(loss, [loss_conf_pos], message='Loss Conf Positive \t', summarize=1000)
loss = tf.Print(loss, [loss_conf], message='Loss Conf \t', summarize=1000)
loss = tf.Print(loss, [loss_class], message='Loss Class \t', summarize=1000)
loss = tf.Print(loss, [loss], message='Total Loss \t', summarize=1000)
loss = tf.Print(loss, [total_loss / seen], message='Average Loss \t', summarize=1000)
#loss = tf.Print(loss, [y_true[..., 5:]], message='\nYtrue \t', summarize=1000)
#loss = tf.Print(loss, [true_box_class], message='True box class \t', summarize=1000)
#loss = tf.Print(loss, [pred_box_class], message=' Pred box class \t', summarize=1000)
loss = tf.Print(loss, [nb_pred_box], message='Number of pred boxes \t', summarize=1000)
loss = tf.Print(loss, [nb_true_box], message='Number of true boxes \t', summarize=1000)
loss = tf.Print(loss, [current_recall], message='Current Recall \t', summarize=1000)
loss = tf.Print(loss, [total_recall / seen], message='Average Recall \t', summarize=1000)
return loss
def train(self):
############################################
# Make train and validation generators
############################################
objectReader = GroundTruth(self.config)
objectReader.load_json()
objectReader.objects_all()
data = objectReader.objects_all()
np.random.shuffle(data)
size = int(len(data) * 0.8)
train_instances, validation_instances = data[:size], data[size:]
np.random.shuffle(train_instances)
np.random.shuffle(validation_instances)
checkpoint = ModelCheckpoint('weights_coco.h5',
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='auto',
period=1)
train_generator = BatchGenerator(train_instances,
self.config['generator_config'],
norm=self.normalize)
valid_generator = BatchGenerator(validation_instances,
self.config['generator_config'],
norm=self.normalize,
jitter=False)
############################################
# Compile the model
############################################
optimizer = Adam(lr=self.config['train']['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
self.model.compile(loss=self.custom_loss, optimizer=optimizer)
############################################
# Start the training process
############################################
self.model.fit_generator(generator=train_generator,
steps_per_epoch=len(train_generator),
epochs= self.config['train']['nb_epochs'],
verbose=2 if self.config['train']['debug'] else 1,
validation_data=valid_generator,
validation_steps=len(valid_generator),
workers=3,
callbacks=[checkpoint],
max_queue_size=16)
############################################
# Compute mAP on the validation set
############################################
average_precisions = self.evaluate(valid_generator)
# print evaluation
for label, average_precision in average_precisions.items():
print('car', '{:.4f}'.format(average_precision))
print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions)))
def evaluate(self,
generator,
iou_threshold=0.3,
score_threshold=0.3,
max_detections=100,
save_path=None):
""" Evaluate a given dataset using a given model.
code originally from https://github.com/fizyr/keras-retinanet
# Arguments
generator : The generator that represents the dataset to evaluate.
model : The model to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
score_threshold : The score confidence threshold to use for detections.
max_detections : The maximum number of detections to use per image.
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_detections = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
all_annotations = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
for i in range(generator.size()):
raw_image = generator.load_image(i)
raw_height, raw_width, raw_channels = raw_image.shape
# make the boxes and the labels
pred_boxes = self.predict(raw_image)
score = np.array([box.score for box in pred_boxes])
pred_labels = np.array([box.label for box in pred_boxes])
if len(pred_boxes) > 0:
pred_boxes = np.array([[box.xmin * raw_width, box.ymin * raw_height, box.xmax * raw_width,
box.ymax * raw_height, box.score] for box in pred_boxes])
else:
pred_boxes = np.array([[]])
# sort the boxes and the labels according to scores
score_sort = np.argsort(-score)
pred_labels = pred_labels[score_sort]
pred_boxes = pred_boxes[score_sort]
# copy detections to all_detections
for label in range(generator.num_classes()):
all_detections[i][label] = pred_boxes[pred_labels == label, :]
annotations = generator.load_annotation(i)
# copy detections to all_annotations
for label in range(generator.num_classes()):
all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()
# compute mAP by comparing all detections and all annotations
average_precisions = {}
for label in range(generator.num_classes()):
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(generator.size()):
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
average_precisions[label] = 0
continue
# sort by score
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = compute_ap(recall, precision)
average_precisions[label] = average_precision
return average_precisions
def predict(self, image):
image_h, image_w, _ = image.shape
image = cv2.resize(image, (416, 416))
image = self.normalize(image)
input_image = image[:, :, ::-1]
input_image = np.expand_dims(input_image, 0)
dummy_array = np.zeros((1, 1, 1, 1, self.config['model']['max_obj'], 4))
netout = self.model.predict([input_image, dummy_array])[0]
boxes = decode_netout(netout, self.config['model']['anchors'], self.config['model']['nb_class'])
return boxes
|
StarcoderdataPython
|
5055065
|
<reponame>Helilysyt/tensorlayer
'''
Twin Delayed DDPG (TD3)
------------------------
DDPG suffers from problems like overestimate of Q-values and sensitivity to hyper-parameters.
Twin Delayed DDPG (TD3) is a variant of DDPG with several tricks:
* Trick One: Clipped Double-Q Learning. TD3 learns two Q-functions instead of one (hence “twin”),
and uses the smaller of the two Q-values to form the targets in the Bellman error loss functions.
* Trick Two: “Delayed” Policy Updates. TD3 updates the policy (and target networks) less frequently
than the Q-function.
* Trick Three: Target Policy Smoothing. TD3 adds noise to the target action, to make it harder for
the policy to exploit Q-function errors by smoothing out Q along changes in action.
The implementation of TD3 includes 6 networks: 2 Q-net, 2 target Q-net, 1 policy net, 1 target policy net
Actor policy in TD3 is deterministic, with Gaussian exploration noise.
Reference
---------
original paper: https://arxiv.org/pdf/1802.09477.pdf
Environment
---
Openai Gym Pendulum-v0, continuous action space
https://gym.openai.com/envs/Pendulum-v0/
Prerequisites
---
tensorflow >=2.0.0a0
tensorflow-probability 0.6.0
tensorlayer >=2.0.0
&&
pip install box2d box2d-kengz --user
'''
import argparse
import math
import random
import time
import gym
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from IPython.display import clear_output
import tensorlayer as tl
from common.buffer import *
from common.networks import *
from common.utils import *
from tensorlayer.layers import Dense
from tensorlayer.models import Model
tfd = tfp.distributions
Normal = tfd.Normal
tl.logging.set_verbosity(tl.logging.DEBUG)
############################### TD3 ####################################
class PolicyNetwork(Model):
''' the network for generating non-determinstic (Gaussian distributed) action from the state input '''
def __init__(self, num_inputs, num_actions, hidden_dim, action_range=1., init_w=3e-3):
super(PolicyNetwork, self).__init__()
# w_init = tf.keras.initializers.glorot_normal(seed=None)
w_init = tf.random_uniform_initializer(-init_w, init_w)
self.linear1 = Dense(n_units=hidden_dim, act=tf.nn.relu, W_init=w_init, in_channels=num_inputs, name='policy1')
self.linear2 = Dense(n_units=hidden_dim, act=tf.nn.relu, W_init=w_init, in_channels=hidden_dim, name='policy2')
self.linear3 = Dense(n_units=hidden_dim, act=tf.nn.relu, W_init=w_init, in_channels=hidden_dim, name='policy3')
self.output_linear = Dense(n_units=num_actions, W_init=w_init, \
b_init=tf.random_uniform_initializer(-init_w, init_w), in_channels=hidden_dim, name='policy_output')
self.action_range = action_range
self.num_actions = num_actions
def forward(self, state):
x = self.linear1(state)
x = self.linear2(x)
x = self.linear3(x)
output = tf.nn.tanh(self.output_linear(x)) # unit range output [-1, 1]
return output
def evaluate(self, state, eval_noise_scale):
'''
generate action with state for calculating gradients;
eval_noise_scale: as the trick of target policy smoothing, for generating noisy actions.
'''
state = state.astype(np.float32)
action = self.forward(state)
action = self.action_range * action
# add noise
normal = Normal(0, 1)
eval_noise_clip = 2 * eval_noise_scale
noise = normal.sample(action.shape) * eval_noise_scale
noise = tf.clip_by_value(noise, -eval_noise_clip, eval_noise_clip)
action = action + noise
return action
def get_action(self, state, explore_noise_scale):
''' generate action with state for interaction with envronment '''
action = self.forward([state])
action = action.numpy()[0]
# add noise
normal = Normal(0, 1)
noise = normal.sample(action.shape) * explore_noise_scale
action = self.action_range * action + noise
return action.numpy()
def sample_action(self, ):
''' generate random actions for exploration '''
a = tf.random.uniform([self.num_actions], -1, 1)
return self.action_range * a.numpy()
class TD3_Trainer():
def __init__(
self, replay_buffer, hidden_dim, state_dim, action_dim, action_range, policy_target_update_interval=1,
q_lr=3e-4, policy_lr=3e-4
):
self.replay_buffer = replay_buffer
# initialize all networks
self.q_net1 = QNetwork(state_dim, action_dim, hidden_dim)
self.q_net2 = QNetwork(state_dim, action_dim, hidden_dim)
self.target_q_net1 = QNetwork(state_dim, action_dim, hidden_dim)
self.target_q_net2 = QNetwork(state_dim, action_dim, hidden_dim)
self.policy_net = PolicyNetwork(state_dim, action_dim, hidden_dim, action_range)
self.target_policy_net = PolicyNetwork(state_dim, action_dim, hidden_dim, action_range)
print('Q Network (1,2): ', self.q_net1)
print('Policy Network: ', self.policy_net)
# initialize weights of target networks
self.target_q_net1 = self.target_ini(self.q_net1, self.target_q_net1)
self.target_q_net2 = self.target_ini(self.q_net2, self.target_q_net2)
self.target_policy_net = self.target_ini(self.policy_net, self.target_policy_net)
self.update_cnt = 0
self.policy_target_update_interval = policy_target_update_interval
self.q_optimizer1 = tf.optimizers.Adam(q_lr)
self.q_optimizer2 = tf.optimizers.Adam(q_lr)
self.policy_optimizer = tf.optimizers.Adam(policy_lr)
def target_ini(self, net, target_net):
''' hard-copy update for initializing target networks '''
for target_param, param in zip(target_net.trainable_weights, net.trainable_weights):
target_param.assign(param)
return target_net
def target_soft_update(self, net, target_net, soft_tau):
''' soft update the target net with Polyak averaging '''
for target_param, param in zip(target_net.trainable_weights, net.trainable_weights):
target_param.assign( # copy weight value into target parameters
target_param * (1.0 - soft_tau) + param * soft_tau
)
return target_net
def update(self, batch_size, eval_noise_scale, reward_scale=10., gamma=0.9, soft_tau=1e-2):
''' update all networks in TD3 '''
self.update_cnt += 1
state, action, reward, next_state, done = self.replay_buffer.sample(batch_size)
reward = reward[:, np.newaxis] # expand dim
done = done[:, np.newaxis]
new_next_action = self.target_policy_net.evaluate(
next_state, eval_noise_scale=eval_noise_scale
) # clipped normal noise
reward = reward_scale * (reward - np.mean(reward, axis=0)) / (
np.std(reward, axis=0) + 1e-6
) # normalize with batch mean and std; plus a small number to prevent numerical problem
# Training Q Function
target_q_input = tf.concat([next_state, new_next_action], 1) # the dim 0 is number of samples
target_q_min = tf.minimum(self.target_q_net1(target_q_input), self.target_q_net2(target_q_input))
target_q_value = reward + (1 - done) * gamma * target_q_min # if done==1, only reward
q_input = tf.concat([state, action], 1) # input of q_net
with tf.GradientTape() as q1_tape:
predicted_q_value1 = self.q_net1(q_input)
q_value_loss1 = tf.reduce_mean(tf.square(predicted_q_value1 - target_q_value))
q1_grad = q1_tape.gradient(q_value_loss1, self.q_net1.trainable_weights)
self.q_optimizer1.apply_gradients(zip(q1_grad, self.q_net1.trainable_weights))
with tf.GradientTape() as q2_tape:
predicted_q_value2 = self.q_net2(q_input)
q_value_loss2 = tf.reduce_mean(tf.square(predicted_q_value2 - target_q_value))
q2_grad = q2_tape.gradient(q_value_loss2, self.q_net2.trainable_weights)
self.q_optimizer2.apply_gradients(zip(q2_grad, self.q_net2.trainable_weights))
# Training Policy Function
if self.update_cnt % self.policy_target_update_interval == 0:
with tf.GradientTape() as p_tape:
new_action = self.policy_net.evaluate(
state, eval_noise_scale=0.0
) # no noise, deterministic policy gradients
new_q_input = tf.concat([state, new_action], 1)
# ''' implementation 1 '''
# predicted_new_q_value = tf.minimum(self.q_net1(new_q_input),self.q_net2(new_q_input))
''' implementation 2 '''
predicted_new_q_value = self.q_net1(new_q_input)
policy_loss = -tf.reduce_mean(predicted_new_q_value)
p_grad = p_tape.gradient(policy_loss, self.policy_net.trainable_weights)
self.policy_optimizer.apply_gradients(zip(p_grad, self.policy_net.trainable_weights))
# Soft update the target nets
self.target_q_net1 = self.target_soft_update(self.q_net1, self.target_q_net1, soft_tau)
self.target_q_net2 = self.target_soft_update(self.q_net2, self.target_q_net2, soft_tau)
self.target_policy_net = self.target_soft_update(self.policy_net, self.target_policy_net, soft_tau)
def save_weights(self): # save trained weights
save_model(self.q_net1, 'model_q_net1', 'TD3')
save_model(self.q_net2, 'model_q_net2', 'TD3')
save_model(self.target_q_net1, 'model_target_q_net1', 'TD3')
save_model(self.target_q_net2, 'model_target_q_net2', 'TD3')
save_model(self.policy_net, 'model_policy_net', 'TD3')
save_model(self.target_policy_net, 'model_target_policy_net', 'TD3')
def load_weights(self): # load trained weights
load_model(self.q_net1, 'model_q_net1', 'TD3')
load_model(self.q_net2, 'model_q_net2', 'TD3')
load_model(self.target_q_net1, 'model_target_q_net1', 'TD3')
load_model(self.target_q_net2, 'model_target_q_net2', 'TD3')
load_model(self.policy_net, 'model_policy_net', 'TD3')
load_model(self.target_policy_net, 'model_target_policy_net', 'TD3')
def learn(env_id, train_episodes, test_episodes=1000, max_steps=150, batch_size=64, explore_steps=500, update_itr=3, hidden_dim=32, \
q_lr = 3e-4, policy_lr = 3e-4, policy_target_update_interval = 3, action_range = 1., \
replay_buffer_size = 5e5, reward_scale = 1. , seed=2, save_interval=500, explore_noise_scale = 1.0, eval_noise_scale = 0.5, mode='train'):
'''
parameters
----------
env: learning environment
train_episodes: total number of episodes for training
test_episodes: total number of episodes for testing
max_steps: maximum number of steps for one episode
batch_size: udpate batchsize
explore_steps: for random action sampling in the beginning of training
update_itr: repeated updates for single step
hidden_dim: size of hidden layers for networks
q_lr: q_net learning rate
policy_lr: policy_net learning rate
policy_target_update_interval: delayed update for the policy network and target networks
action_range: range of action value
replay_buffer_size: size of replay buffer
reward_scale: value range of reward
save_interval: timesteps for saving the weights and plotting the results
explore_noise_scale: range of action noise for exploration
eval_noise_scale: range of action noise for evaluation of action value
mode: train or test
'''
env = make_env(env_id) # make env with common.utils and wrappers
action_dim = env.action_space.shape[0]
state_dim = env.observation_space.shape[0]
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed) # reproducible
# initialization of buffer
replay_buffer = ReplayBuffer(replay_buffer_size)
# initialization of trainer
td3_trainer=TD3_Trainer(replay_buffer, hidden_dim=hidden_dim, state_dim=state_dim, action_dim=action_dim, policy_target_update_interval=policy_target_update_interval, \
action_range=action_range, q_lr=q_lr, policy_lr=policy_lr )
# set train mode
td3_trainer.q_net1.train()
td3_trainer.q_net2.train()
td3_trainer.target_q_net1.train()
td3_trainer.target_q_net2.train()
td3_trainer.policy_net.train()
td3_trainer.target_policy_net.train()
# training loop
if mode == 'train':
frame_idx = 0
rewards = []
t0 = time.time()
for eps in range(train_episodes):
state = env.reset()
state = state.astype(np.float32)
episode_reward = 0
if frame_idx < 1:
_ = td3_trainer.policy_net(
[state]
) # need an extra call here to make inside functions be able to use model.forward
_ = td3_trainer.target_policy_net([state])
for step in range(max_steps):
if frame_idx > explore_steps:
action = td3_trainer.policy_net.get_action(state, explore_noise_scale=1.0)
else:
action = td3_trainer.policy_net.sample_action()
next_state, reward, done, _ = env.step(action)
next_state = next_state.astype(np.float32)
env.render()
done = 1 if done ==True else 0
replay_buffer.push(state, action, reward, next_state, done)
state = next_state
episode_reward += reward
frame_idx += 1
if len(replay_buffer) > batch_size:
for i in range(update_itr):
td3_trainer.update(batch_size, eval_noise_scale=0.5, reward_scale=1.)
if done:
break
if eps % int(save_interval) == 0:
plot(rewards, Algorithm_name='TD3', Env_name=env_id)
td3_trainer.save_weights()
print('Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'\
.format(eps, train_episodes, episode_reward, time.time()-t0 ))
rewards.append(episode_reward)
td3_trainer.save_weights()
if mode == 'test':
frame_idx = 0
rewards = []
t0 = time.time()
td3_trainer.load_weights()
for eps in range(test_episodes):
state = env.reset()
state = state.astype(np.float32)
episode_reward = 0
if frame_idx < 1:
_ = td3_trainer.policy_net(
[state]
) # need an extra call to make inside functions be able to use forward
_ = td3_trainer.target_policy_net([state])
for step in range(max_steps):
action = td3_trainer.policy_net.get_action(state, explore_noise_scale=1.0)
next_state, reward, done, _ = env.step(action)
next_state = next_state.astype(np.float32)
env.render()
done = 1 if done ==True else 0
state = next_state
episode_reward += reward
frame_idx += 1
# if frame_idx % 50 == 0:
# plot(frame_idx, rewards)
if done:
break
print('Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'\
.format(eps, test_episodes, episode_reward, time.time()-t0 ) )
rewards.append(episode_reward)
|
StarcoderdataPython
|
3420388
|
import random
def test_shift_left():
n = random.randint(1, 100)
my_list = [random.randint(1, 100) for _ in range(n)]
my_list_before = my_list.copy()
for i in range(n):
my_list.append(my_list.pop(0))
assert my_list_before == my_list
# def test_key_schedule_core():
# pass
# def test_expand_key():
# pass
|
StarcoderdataPython
|
4971036
|
# IMPORTS ################################################################################ IMPORTS #
# Standard library
import http
import datetime
import json
import unittest
import time
# Installed
import pytest
import marshmallow
# Own
from dds_web import db
from dds_web.database import models
import tests
# CONFIG ################################################################################## CONFIG #
proj_data = {"pi": "piName", "title": "Test proj", "description": "A longer project description"}
proj_data_with_existing_users = {
**proj_data,
"users_to_add": [
{"email": "<EMAIL>", "role": "Project Owner"},
{"email": "<EMAIL>", "role": "Researcher"},
],
}
proj_data_with_nonexisting_users = {
**proj_data,
"users_to_add": [
{"email": "<EMAIL>", "role": "Project Owner"},
{"email": "<EMAIL>", "role": "Researcher"},
],
}
proj_data_with_unsuitable_user_roles = {
**proj_data,
"users_to_add": [
{"email": "<EMAIL>", "role": "Unit Admin"},
{"email": "<EMAIL>", "role": "Unit Personnel"},
],
}
# TESTS #################################################################################### TESTS #
def test_create_project_without_credentials(client):
"""Create project without valid user credentials."""
response = client.post(
tests.DDSEndpoint.PROJECT_CREATE,
headers=tests.UserAuth(tests.USER_CREDENTIALS["researchuser"]).token(client),
data=json.dumps(proj_data),
content_type="application/json",
)
assert response.status_code == http.HTTPStatus.FORBIDDEN
created_proj = models.Project.query.filter_by(
created_by="researchuser",
title=proj_data["title"],
pi=proj_data["pi"],
description=proj_data["description"],
).one_or_none()
assert created_proj is None
def test_create_project_with_credentials(client, boto3_session):
"""Create project with correct credentials."""
time_before_run = datetime.datetime.utcnow()
time.sleep(1)
response = client.post(
tests.DDSEndpoint.PROJECT_CREATE,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unituser"]).token(client),
data=json.dumps(proj_data),
content_type="application/json",
)
assert response.status_code == http.HTTPStatus.OK
created_proj = models.Project.query.filter_by(
created_by="unituser",
title=proj_data["title"],
pi=proj_data["pi"],
description=proj_data["description"],
).one_or_none()
assert (
created_proj
and created_proj.date_created > time_before_run
and not created_proj.is_sensitive
)
def test_create_project_no_title(client):
"""Create project without a title specified."""
with pytest.raises(marshmallow.ValidationError):
response = client.post(
tests.DDSEndpoint.PROJECT_CREATE,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unituser"]).token(client),
data=json.dumps({"pi": "piName"}),
content_type="application/json",
)
created_proj = models.Project.query.filter_by(
created_by="unituser",
pi=proj_data["pi"],
).one_or_none()
assert created_proj is None
def test_create_project_title_too_short(client):
"""Create a project with too short title."""
proj_data_short_title = proj_data.copy()
proj_data_short_title["title"] = ""
with pytest.raises(marshmallow.ValidationError):
response = client.post(
tests.DDSEndpoint.PROJECT_CREATE,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unituser"]).token(client),
data=json.dumps(proj_data_short_title),
content_type="application/json",
)
created_proj = models.Project.query.filter_by(
created_by="unituser",
title=proj_data_short_title["title"],
pi=proj_data_short_title["pi"],
description=proj_data_short_title["description"],
).one_or_none()
assert not created_proj
def test_create_project_with_malformed_json(client):
"""Create a project with malformed project info."""
response = client.post(
tests.DDSEndpoint.PROJECT_CREATE,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unituser"]).token(client),
data="",
content_type="application/json",
)
assert response.status_code == http.HTTPStatus.BAD_REQUEST
created_proj = models.Project.query.filter_by(
created_by="unituser",
title="",
pi="",
description="",
).one_or_none()
assert created_proj is None
def test_create_project_sensitive(client, boto3_session):
"""Create a sensitive project."""
p_data = proj_data
p_data["is_sensitive"] = True
response = client.post(
tests.DDSEndpoint.PROJECT_CREATE,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unituser"]).token(client),
data=json.dumps(p_data),
content_type="application/json",
)
assert response.status == "200 OK"
created_proj = models.Project.query.filter_by(
created_by="unituser",
title=proj_data["title"],
pi=proj_data["pi"],
description=proj_data["description"],
).one_or_none()
assert created_proj and created_proj.is_sensitive
def test_create_project_description_too_short(client):
"""Create a project with too short description."""
proj_data_short_description = proj_data.copy()
proj_data_short_description["description"] = ""
with pytest.raises(marshmallow.ValidationError):
response = client.post(
tests.DDSEndpoint.PROJECT_CREATE,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unituser"]).token(client),
data=json.dumps(proj_data_short_description),
content_type="application/json",
)
created_proj = models.Project.query.filter_by(
created_by="unituser",
title=proj_data_short_description["title"],
pi=proj_data_short_description["pi"],
description=proj_data_short_description["description"],
).one_or_none()
assert not created_proj
def test_create_project_pi_too_short(client):
"""Create a project with too short PI."""
proj_data_short_pi = proj_data.copy()
proj_data_short_pi["pi"] = ""
with pytest.raises(marshmallow.ValidationError):
response = client.post(
tests.DDSEndpoint.PROJECT_CREATE,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unituser"]).token(client),
data=json.dumps(proj_data_short_pi),
content_type="application/json",
)
created_proj = models.Project.query.filter_by(
created_by="unituser",
title=proj_data_short_pi["title"],
pi=proj_data_short_pi["pi"],
description=proj_data_short_pi["description"],
).one_or_none()
assert not created_proj
def test_create_project_pi_too_long(client):
"""Create a project with too long PI."""
proj_data_long_pi = proj_data.copy()
proj_data_long_pi["pi"] = "pi" * 128
with pytest.raises(marshmallow.ValidationError):
response = client.post(
tests.DDSEndpoint.PROJECT_CREATE,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unituser"]).token(client),
data=json.dumps(proj_data_long_pi),
content_type="application/json",
)
created_proj = models.Project.query.filter_by(
created_by="unituser",
title=proj_data_long_pi["title"],
pi=proj_data_long_pi["pi"],
description=proj_data_long_pi["description"],
).one_or_none()
assert not created_proj
def test_create_project_wrong_status(client, boto3_session):
"""Create a project with own status, should be overridden."""
proj_data_wrong_status = proj_data.copy()
proj_data_wrong_status["status"] = "Incorrect Status"
response = client.post(
tests.DDSEndpoint.PROJECT_CREATE,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unituser"]).token(client),
data=json.dumps(proj_data_wrong_status),
content_type="application/json",
)
assert response.status_code == http.HTTPStatus.OK
created_proj = models.Project.query.filter_by(
created_by="unituser",
title=proj_data_wrong_status["title"],
pi=proj_data_wrong_status["pi"],
description=proj_data_wrong_status["description"],
).one_or_none()
assert created_proj and created_proj.current_status == "In Progress"
def test_create_project_sensitive_not_boolean(client):
"""Create project with incorrect is_sensitive format."""
proj_data_sensitive_not_boolean = proj_data.copy()
proj_data_sensitive_not_boolean["is_sensitive"] = "test"
with pytest.raises(marshmallow.ValidationError):
response = client.post(
tests.DDSEndpoint.PROJECT_CREATE,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unituser"]).token(client),
data=json.dumps(proj_data_sensitive_not_boolean),
content_type="application/json",
)
created_proj = models.Project.query.filter_by(
created_by="unituser",
title=proj_data_sensitive_not_boolean["title"],
pi=proj_data_sensitive_not_boolean["pi"],
description=proj_data_sensitive_not_boolean["description"],
).one_or_none()
assert not created_proj
def test_create_project_date_created_overridden(client, boto3_session):
"""Create project with own date_created, should be overridden."""
proj_data_date_created_own = proj_data.copy()
proj_data_date_created_own["date_created"] = "test"
response = client.post(
tests.DDSEndpoint.PROJECT_CREATE,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unituser"]).token(client),
data=json.dumps(proj_data_date_created_own),
content_type="application/json",
)
assert response.status_code == http.HTTPStatus.OK
created_proj = models.Project.query.filter_by(
created_by="unituser",
title=proj_data_date_created_own["title"],
pi=proj_data_date_created_own["pi"],
description=proj_data_date_created_own["description"],
).one_or_none()
assert created_proj and created_proj.date_created != proj_data_date_created_own["date_created"]
def test_create_project_with_users(client, boto3_session):
"""Create project and add users to the project."""
response = client.post(
tests.DDSEndpoint.PROJECT_CREATE,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unituser"]).token(client),
data=json.dumps(proj_data_with_existing_users),
content_type="application/json",
)
assert response.status == "200 OK"
assert response.json and response.json.get("user_addition_statuses")
for x in response.json.get("user_addition_statuses"):
assert "associated with project" in x
resp_json = response.json
created_proj = models.Project.query.filter_by(public_id=resp_json["project_id"]).one_or_none()
assert created_proj
users = models.ProjectUsers.query.filter_by(project_id=created_proj.id).all()
users_dict_from_db = []
for user in users:
users_dict_from_db.append({"username": user.user_id, "owner": user.owner})
users_dict_from_email = []
for user in proj_data_with_existing_users["users_to_add"]:
email = models.Email.query.filter_by(email=user["email"]).one_or_none()
users_dict_from_email.append(
{
"username": email.user_id,
"owner": True if user.get("role") == "Project Owner" else False,
}
)
case = unittest.TestCase()
case.assertCountEqual(users_dict_from_db, users_dict_from_email)
def test_create_project_with_invited_users(client, boto3_session):
"""Create project and invite users to the project."""
response = client.post(
tests.DDSEndpoint.PROJECT_CREATE,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unituser"]).token(client),
data=json.dumps(proj_data_with_nonexisting_users),
content_type="application/json",
)
assert response.status == "200 OK"
assert response.json and response.json.get("user_addition_statuses")
for x in response.json.get("user_addition_statuses"):
assert "Invitation sent" in x
def test_create_project_with_unsuitable_roles(client, boto3_session):
"""Create project and add users with unsuitable roles to the project."""
response = client.post(
tests.DDSEndpoint.PROJECT_CREATE,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unituser"]).token(client),
data=json.dumps(proj_data_with_unsuitable_user_roles),
content_type="application/json",
)
assert response.status == "200 OK"
assert response.json and response.json.get("user_addition_statuses")
for x in response.json.get("user_addition_statuses"):
assert "User Role should be either 'Project Owner' or 'Researcher'" in x
|
StarcoderdataPython
|
8168328
|
<reponame>myles-novick/pysplice
"""
A set of utility functions for calculating drift of deployed models
"""
import datetime as datetime
import matplotlib.pyplot as plt
from datetime import datetime
import pyspark.sql.functions as f
from itertools import count, islice
import warnings
try:
from pyspark_dist_explore import distplot
except:
warnings.warn("You need pyspark-dist-explore installed to use this function. Install it directly or the "
"splicemachine[stats] extra ")
def calculate_outlier_bounds(df, column_name):
"""
Calculates outlier bounds based on interquartile range of distribution of values in column 'column_name'
from data set in data frame 'df'.
:param df: data frame containing data to be analyzed
:param column_name: column name to analyze
:return: dictionary with keys min, max, q1 and q3 keys and corresponding values for outlier minimum, maximum
and 25th and 75th percentile values (q1,q3)
"""
bounds = dict(zip(["q1", "q3"], df.approxQuantile(column_name, [0.25, 0.75], 0)))
iqr = bounds['q3'] - bounds['q1']
bounds['min'] = bounds['q1'] - (iqr * 1.5)
bounds['max'] = bounds['q3'] + (iqr * 1.5)
return bounds
def remove_outliers(df, column_name):
"""
Calculates outlier bounds no distribution of 'column_name' values and returns a filtered data frame without
outliers in the specified column.
:param df: data frame with data to remove outliers from
:param column_name: name of column to remove outliers from
:return: input data frame filtered to remove outliers
"""
import pyspark.sql.functions as f
bounds = calculate_outlier_bounds(df, column_name)
return df.filter((f.col(column_name) >= bounds['min']) & (f.col(column_name) <= bounds['max']))
def add_feature_plot(ax, train_df, model_df, feature, n_bins):
"""
Adds a distplot of the outlier free feature values from both train_df and model_df data frames which both
contain the feature.
:param ax: target subplot for chart
:param train_df: training data containing feature of interest
:param model_df: model input data also containing feature of interest
:param feature: name of feature to display in distribution histogram
:param n_bins: number of bins to use in histogram plot
:return: None
"""
distplot(ax, [remove_outliers(train_df.select(f.col(feature).alias('training')), 'training'),
remove_outliers(model_df.select(f.col(feature).alias('model')), 'model')], bins=n_bins)
ax.set_title(feature)
ax.legend()
def datetime_range_split( start: datetime, end: datetime, number: int):
"""
Subdivides the time frame defined by 'start' and 'end' parameters into 'number' equal time frames.
:param start: start date time
:param end: end date time
:param number: number of time frames to split into
:return: list of start/end date times
"""
start_secs = (start - datetime(1970, 1, 1)).total_seconds()
end_secs = (end - datetime(1970, 1, 1)).total_seconds()
dates = [datetime.fromtimestamp(el) for el in
islice(count(start_secs, (end_secs - start_secs) / number), number + 1)]
return zip(dates, dates[1:])
def build_feature_drift_plot(features, training_set_df, model_table_df):
"""
Displays feature by feature comparison of distributions between the training set and the model inputs.
:param features: list of features to analyze
:param training_set_df: the dataframe used for training the model that contains all the features to analyze
:param model_table_df: the dataframe with the content of the model table containing all input features
"""
final_features = [f for f in features if f in model_table_df.columns]
# prep plot area
n_bins = 15
num_features = len(final_features)
n_rows = int(num_features / 5)
if num_features % 5 > 0:
n_rows = n_rows + 1
fig, axes = plt.subplots(nrows=n_rows, ncols=5, figsize=(30, 10 * n_rows))
axes = axes.flatten()
# calculate combined plots for each feature
for plot, f in enumerate(final_features):
add_feature_plot(axes[plot], training_set_df, model_table_df, f, n_bins)
plt.show()
def build_model_drift_plot( model_table_df, time_intervals):
"""
Displays model prediction distribution plots split into multiple time intervals.
:param model_table_df: dataframe containing columns EVAL_TIME and PREDICTION
:param time_intervals: number of time intervals to display
:return:
"""
min_ts = model_table_df.first()['EVAL_TIME']
max_ts = model_table_df.orderBy(f.col("EVAL_TIME").desc()).first()['EVAL_TIME']
if max_ts > min_ts:
intervals = datetime_range_split(min_ts, max_ts, time_intervals)
n_rows = int(time_intervals / 5)
if time_intervals % 5 > 0:
n_rows = n_rows + 1
fig, axes = plt.subplots(nrows=n_rows, ncols=5, figsize=(30, 10 * n_rows))
axes = axes.flatten()
for i, time_int in enumerate(intervals):
df = model_table_df.filter((f.col('EVAL_TIME') >= time_int[0]) & (f.col('EVAL_TIME') < time_int[1]))
distplot(axes[i], [remove_outliers(df.select(f.col('PREDICTION')), 'PREDICTION')], bins=15)
axes[i].set_title(f"{time_int[0]}")
axes[i].legend()
else:
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(10, 10))
distplot(axes, [remove_outliers(model_table_df.select(f.col('PREDICTION')), 'PREDICTION')], bins=15)
axes.set_title(f"Predictions at {min_ts}")
axes.legend()
|
StarcoderdataPython
|
1956584
|
<reponame>brianleungwh/signals<filename>signals/__init__.py
from signals.main import run_signals
__all__ = ['run_signals']
|
StarcoderdataPython
|
390851
|
<reponame>abh/salt
'''
Create virtualenv environments
'''
# Import python libs
from salt import utils
__opts__ = {
'venv_bin': 'virtualenv'
}
__pillar__ = {}
def create(path,
venv_bin=None,
no_site_packages=False,
system_site_packages=False,
distribute=False,
clear=False,
python='',
extra_search_dir='',
never_download=False,
prompt='',
runas=None):
'''
Create a virtualenv
path
The path to create the virtualenv
venv_bin : 'virtualenv'
The name (and optionally path) of the virtualenv command. This can also
be set globally in the minion config file as ``virtualenv.venv_bin``.
no_site_packages : False
Passthrough argument given to virtualenv
system_site_packages : False
Passthrough argument given to virtualenv
distribute : False
Passthrough argument given to virtualenv
clear : False
Passthrough argument given to virtualenv
python : (default)
Passthrough argument given to virtualenv
extra_search_dir : (default)
Passthrough argument given to virtualenv
never_download : (default)
Passthrough argument given to virtualenv
prompt : (default)
Passthrough argument given to virtualenv
runas : None
Set ownership for the virtualenv
CLI Example::
salt '*' virtualenv.create /path/to/new/virtualenv
'''
if venv_bin is None:
venv_bin = __opts__.get('venv_bin') or __pillar__.get('venv_bin')
# raise CommandNotFoundError if venv_bin is missing
utils.check_or_die(venv_bin)
cmd = '{venv_bin} {args} {path}'.format(
venv_bin=venv_bin,
args=''.join([
' --no-site-packages' if no_site_packages else '',
' --system-site-packages' if system_site_packages else '',
' --distribute' if distribute else '',
' --clear' if clear else '',
' --python {0}'.format(python) if python else '',
' --extra-search-dir {0}'.format(extra_search_dir
) if extra_search_dir else '',
' --never-download' if never_download else '',
' --prompt {0}'.format(prompt) if prompt else '']),
path=path)
return __salt__['cmd.run_all'](cmd, runas=runas)
|
StarcoderdataPython
|
8002131
|
<filename>tests/performance_tests/common.py
import argparse
from enum import Enum
SEC_IN_A_YEAR = 3600 * 24 * 365
class TimeUnit(Enum):
second = 's'
year = 'y'
def get_avg_events_sec(avg_events, time_unit):
return avg_events / SEC_IN_A_YEAR if TimeUnit(time_unit) == TimeUnit.year else avg_events
def get_parser():
parser = argparse.ArgumentParser(description='Mockup producer of data to test dispatcher performance')
parser.add_argument('-d', '--doc_size', dest='doc_size', type=int, default=100000,
help='The mean of the gaussian of the document size in byte')
parser.add_argument('-s', '--doc_size_sigma', dest='doc_size_sigma', type=float, default=2000, help='The mean')
parser.add_argument('--avg_events', dest='avg_events', type=float, default=13,
help='Average events (per year by default, see time_unit args).')
parser.add_argument('--time_unit', dest='time_unit', type=str, default=TimeUnit.year.value,
choices=[u.value for u in list(TimeUnit)],
help='Time unit for average events')
return parser
|
StarcoderdataPython
|
9607273
|
<reponame>anirudhakulkarni/codes<filename>codeforces/anirudhak47/1389/C.py
def ans(s,x):
ana=0
bo=0
for i in range(len(x)):
if(bo==0):
if x[i]==s[0]:
ana+=1
bo=1
continue
if(bo==1):
if x[i]==s[1]:
ana+=1
bo=0
return ana
def solve(x):
a=["00","01","02","03","04","05","06","07","08","09","10","11","12","13","14","15","16","17","18","19","20","21","22","23","24","25","26","27","28","29","30","31","32","33","34","35","36","37","38","39","40","41","42","43","44","45","46","47","48","49","50","51","52","53","54","55","56","57","58","59","60","61","62","63","64","65","66","67","68","69","70","71","72","73","74","75","76","77","78","79","80","81","82","83","84","85","86","87","88","89","90","91","92","93","94","95","96","97","98","99"]
res=0
for i in range(len(a)):
aaa=ans(a[i],x)
if(aaa%2 and a[i][0]!=a[i][1]):
aaa-=1
res=max(res,aaa)
print(len(x)-res)
# cook your dish here
for i in range(int(input())):
x=(input())
y=set(x)
if(len(x)==len(y)):
print(len(x)-2)
continue
else:
solve(x)
|
StarcoderdataPython
|
4915574
|
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Reshape
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras import regularizers
from keras.models import load_model
from keras.losses import mean_squared_error
def create_baseline_model(activation = 'relu', input_shape=(64, 64)):
model = Sequential()
model.add(Conv2D(100, (11,11), padding='valid', strides=(1, 1), input_shape=(input_shape[0], input_shape[1], 1)))
model.add(AveragePooling2D((6,6)))
model.add(Reshape([-1, 8100]))
model.add(Dense(1024, activation='sigmoid', kernel_regularizer=regularizers.l2(0.0001)))
model.add(Reshape([-1, 32, 32]))
return model
def create_model_larger(activation = 'relu', input_shape=(64, 64)):
"""
Larger (more filters) convnet model : one convolution, one average pooling and one fully connected layer:
:param activation: None if nothing passed, e.g : ReLu, tanh, etc.
:return: Keras model
"""
model = Sequential()
model.add(Conv2D(200, (11,11), activation=activation, padding='valid', strides=(1, 1), input_shape=(input_shape[0], input_shape[1], 1)))
model.add(AveragePooling2D((6,6)))
model.add(Reshape([-1, 16200]))
model.add(Dense(1024, activation='sigmoid', kernel_regularizer=regularizers.l2(0.0001)))
model.add(Reshape([-1, 32, 32]))
return model
def create_model_deeper(activation = 'relu', input_shape=(64, 64)):
"""
Deeper convnet model : two convolutions, two average pooling and one fully connected layer:
:param activation: None if nothing passed, e.g : ReLu, tanh, etc.
:return: Keras model
"""
model = Sequential()
model.add(Conv2D(64, (11,11), activation=activation, padding='valid', strides=(1, 1), input_shape=(input_shape[0], input_shape[1], 1)))
model.add(AveragePooling2D((2,2)))
model.add(Conv2D(128, (10, 10), activation=activation, padding='valid', strides=(1, 1)))
model.add(AveragePooling2D((2,2)))
model.add(Reshape([-1, 128*9*9]))
model.add(Dense(1024, activation='sigmoid', kernel_regularizer=regularizers.l2(0.0001)))
model.add(Reshape([-1, 32, 32]))
return model
def create_maxpooling_model(activation = 'relu', input_shape = (64,64)):
"""
Simple convnet model with max pooling: one convolution, one max pooling and one fully connected layer
:param activation: None if nothing passed, e.g : ReLu, tanh, etc.
:return: Keras model
"""
model = Sequential()
model.add(Conv2D(100, (11,11), activation='relu', padding='valid', strides=(1, 1), input_shape=(input_shape[0], input_shape[1], 1)))
model.add(MaxPooling2D((6,6)))
model.add(Reshape([-1, 8100]))
model.add(Dense(1024, activation = 'sigmoid', kernel_regularizer=regularizers.l2(0.0001)))
model.add(Reshape([-1, 32, 32]))
return model
def print_model(model):
print('Size for each layer :\nLayer, Input Size, Output Size')
for p in model.layers:
print(p.name.title(), p.input_shape, p.output_shape)
def run_cnn(data, train = False):
X_train = data['X_train']
Y_train = data['Y_train']
X_test = data['X_test']
Y_test = data['Y_test']
if train:
model = create_maxpooling_model()
print_model(model)
model.compile(loss='mean_squared_error',
optimizer='adam',
metrics=['accuracy'])
h = training(model, X_train, Y_train, batch_size=16, epochs= 10, data_augm=False)
metrics = 'loss'
plt.plot(range(len(h.history[metric])), h.history[metric])
plt.ylabel(metric)
plt.xlabel('epochs')
plt.title("Learning curve")
model.save('cnn_model_saved.h5')
y_pred = model.predict(X_test, batch_size = 16)
else:
try:
model = load_model('cnn_model_saved.h5')
except IOError as e:
print "I/O Error ({0}): {1}".format(e.errno, e.strerror)
y_pred = model.predict(X_test, batch_size = 16)
del model
return y_pred
def run(X, Y, model, X_to_pred=None, history=False, verbose=0, activation=None, epochs=20, data_augm=False):
if model == 'simple':
m = create_baseline_model(activation = activation)
elif model == 'larger':
m = create_model_larger(activation=activation)
elif model == 'deeper':
m = create_model_deeper(activation=activation)
elif model == 'maxpooling':
m = create_model_maxpooling(activation=activation)
m.compile(loss='mean_squared_error',
optimizer='adam',
metrics=['accuracy'])
if verbose > 0:
print('Size for each layer :\nLayer, Input Size, Output Size')
for p in m.layers:
print(p.name.title(), p.input_shape, p.output_shape)
h = training(m, X, Y, batch_size=16, epochs=epochs, data_augm=data_augm)
if not X_to_pred:
X_to_pred = X
y_pred = m.predict(X_to_pred, batch_size=16)
if history:
return h, m
else:
return m
def training(model, X, Y, batch_size=16, epochs= 10, data_augm=False):
"""
Training CNN with the possibility to use data augmentation
:param m: Keras model
:param X: training pictures
:param Y: training binary ROI mask
:return: history
"""
if data_augm:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=50, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False)
datagen.fit(X)
history = model.fit_generator(datagen.flow(X, Y,
batch_size=batch_size),
steps_per_epoch=X.shape[0] // batch_size,
epochs=epochs)
else:
history = model.fit(X, Y, batch_size=batch_size, epochs=epochs)
return history
|
StarcoderdataPython
|
12812789
|
import re
import sublime
import sublime_plugin
CLASSES = {
'ActionBar': 'android.app.ActionBar',
'Activity': 'android.app.Activity',
'AlertDialog': 'android.app.AlertDialog',
'ArrayAdapter': 'android.widget.ArrayAdapter',
'ArrayList': 'java.util.ArrayList',
'Build': 'android.os.Build',
'Bundle': 'android.os.Bundle',
'Button': 'android.widget.Button',
'CheckBox': 'android.widget.CheckBox',
'CompoundButton': 'android.widget.CompoundButton',
'Configuration': 'android.content.res.Configuration',
'Context': 'android.content.Context',
'Date': 'java.util.Date',
'DateFormat': 'java.text.DateFormat',
'DialogInterface': 'android.content.DialogInterface',
'Editable': 'android.text.Editable',
'EditText': 'android.widget.EditText',
'Fragment': 'android.app.Fragment',
'FragmentManager': 'android.app.FragmentManager',
'Gravity': 'android.view.Gravity',
'ImageButton': 'android.widget.ImageButton',
'Intent': 'android.content.Intent',
'Intent': 'android.content.Intent',
'LayoutInflater': 'android.view.LayoutInflater',
'LayoutParams': 'android.widget.LinearLayout.LayoutParams',
'LinearLayout': 'android.view.LinearLayout',
'ListFragment': 'android.app.ListFragment',
'ListView': 'android.widget.ListView',
'Log': 'android.util.Log',
'Menu': 'android.view.Menu',
'MenuInflater': 'android.view.MenuInflater',
'MenuItem': 'android.view.MenuItem',
'OnCheckedChangeListener': 'android.widget.CompoundButton.OnCheckedChangeListener',
'OnClickListener': 'android.view.View.OnClickListener',
'OnInitListener': 'android.speech.tts.TextToSpeech.OnInitListener',
'RelativeLayout': 'android.widget.RelativeLayout',
'SimpleDateFormat': 'java.text.SimpleDateFormat',
'TextToSpeech': 'android.speech.tts.TextToSpeech',
'TextView': 'android.widget.TextView',
'TextWatcher': 'android.text.TextWatcher',
'Toast': 'android.widget.Toast',
'UUID': 'java.util.UUID',
'View': 'android.view.View',
'ViewGroup': 'android.view.ViewGroup',
'ViewPager': 'android.support.v4.view.ViewPager',
}
class AndroidAddImportsCommand(sublime_plugin.TextCommand):
def run(self, edit, **kwargs):
line_regions = self.view.lines(sublime.Region(0, self.view.size()))
imports = set()
classes = set()
final_point = None
insert_point = None
was_package = None
for region in line_regions:
line = self.view.substr(region)
import_match = re.search(r'^import (\w+\..*);', line)
if import_match:
if insert_point is None:
insert_point = region.a
# add 1 to select the newline
final_point = region.b + 1
imports |= {import_match.group(1),}
else:
for class_name in CLASSES.keys():
for class_match in re.finditer(r'\b{0}\b'.format(class_name), line):
classes |= {CLASSES[class_match.group(0)],}
if insert_point is None and not was_package:
empty_line_match = re.search(r'^$', line)
if empty_line_match:
insert_point = region.a
final_point = region.b
was_package = bool(re.search(r'^package', line))
to_import = list(classes | imports)
new_imports = classes - imports
to_import.sort()
if len(to_import):
msg = ''
for stmt in new_imports:
if len(msg):
msg += ', '
msg += stmt
if msg:
sublime.status_message('Adding: ' + msg)
else:
sublime.status_message('Nothing to add')
to_import_stmt = map(lambda stmt: "import " + stmt + ";\n", to_import)
to_insert = ''
insert_region = sublime.Region(insert_point, final_point)
for import_stmt in to_import_stmt:
to_insert += import_stmt
self.view.replace(edit, insert_region, to_insert)
class AndroidGenerateSettersCommand(sublime_plugin.TextCommand):
def run(self, edit, **kwargs):
line_regions = self.view.lines(sublime.Region(0, self.view.size()))
to_add = []
getters = []
setters = []
index = 0
last_line = None
for region in line_regions:
line = self.view.substr(region)
attr_match = re.search(r'^([ \t]+)(private|public) ((?!class)(?!static)[\w<>]+(?:\[\])?) [ms]?[_]{0,2}(\w+)\b(?!\()', line)
getter_match = re.search(r'^[ \t/]+(private|public) ([\w<>]+(?:\[\])?) (?:get|is)([A-Z]\w*)\b(?=\()', line)
setter_match = re.search(r'^[ \t/]+(private|public) ([\w<>]+(?:\[\])?) set([A-Z]\w*)\b(?=\()', line)
last_line_match = re.search(r'^}', line)
if last_line_match:
last_line = region.a
elif attr_match:
ws = attr_match.group(1)
scope = attr_match.group(2)
varclass = attr_match.group(3)
varname = attr_match.group(4)
entry = {
'scope': scope,
'whitespace': ws,
'varclass': varclass,
'varname': varname,
'getter': True,
'setter': True,
'index': index
}
index += 1
to_add.append(entry)
elif getter_match:
text = getter_match.group(3)
varname = text[0].lower() + text[1:]
getters.append(varname)
elif setter_match:
text = setter_match.group(3)
varname = text[0].lower() + text[1:]
setters.append(varname)
will_add = []
for entry in to_add:
for text in getters:
if entry['varname'] == text:
entry['getter'] = False
for text in setters:
if entry['varname'] == text:
entry['setter'] = False
if entry['getter'] or entry['setter']:
will_add.append(entry)
will_add.sort(key=lambda entry: -entry['index'])
for entry in will_add:
ws = entry['whitespace']
varclass = entry['varclass']
varname = entry['varname']
ucfirst = varname[0].upper() + varname[1:]
if entry['setter']:
setter = '{ws}public void set{ucfirst}({varclass} {varname}) {{ _{varname} = {varname}; }}\n'.format(varclass=varclass, varname=varname, ucfirst=ucfirst, ws=ws, tab=' ')
self.view.replace(edit, sublime.Region(last_line, last_line), setter)
if entry['getter']:
if entry['varclass'] == 'boolean':
prefix = 'is'
else:
prefix = 'get'
getter = '{ws}public {varclass} {prefix}{ucfirst}() {{ return _{varname}; }}\n'.format(varclass=varclass, prefix=prefix, varname=varname, ucfirst=ucfirst, ws=ws, tab=' ')
self.view.replace(edit, sublime.Region(last_line, last_line), getter)
|
StarcoderdataPython
|
6445066
|
#!/usr/bin/env python
# SCADA Simulator
#
# Copyright 2018 Carnegie Mellon University. All Rights Reserved.
#
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
#
# Released under a MIT (SEI)-style license, please see license.txt or contact <EMAIL> for full terms.
#
# [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see Copyright notice for non-US Government use and distribution.
# This Software includes and/or makes use of the following Third-Party Software subject to its own license:
# 1. Packery (https://packery.metafizzy.co/license.html) Copyright 2018 metafizzy.
# 2. Bootstrap (https://getbootstrap.com/docs/4.0/about/license/) Copyright 2011-2018 Twitter, Inc. and Bootstrap Authors.
# 3. JIT/Spacetree (https://philogb.github.io/jit/demos.html) Copyright 2013 Sencha Labs.
# 4. html5shiv (https://github.com/aFarkas/html5shiv/blob/master/MIT%20and%20GPL2%20licenses.md) Copyright 2014 <NAME>.
# 5. jquery (https://jquery.org/license/) Copyright 2018 jquery foundation.
# 6. CanvasJS (https://canvasjs.com/license/) Copyright 2018 fenopix.
# 7. Respond.js (https://github.com/scottjehl/Respond/blob/master/LICENSE-MIT) Copyright 2012 <NAME>.
# 8. Datatables (https://datatables.net/license/) Copyright 2007 SpryMedia.
# 9. jquery-bridget (https://github.com/desandro/jquery-bridget) Copyright 2018 <NAME>.
# 10. Draggabilly (https://draggabilly.desandro.com/) Copyright 2018 <NAME>.
# 11. Business Casual Bootstrap Theme (https://startbootstrap.com/template-overviews/business-casual/) Copyright 2013 Blackrock Digital LLC.
# 12. Glyphicons Fonts (https://www.glyphicons.com/license/) Copyright 2010 - 2018 GLYPHICONS.
# 13. Bootstrap Toggle (http://www.bootstraptoggle.com/) Copyright 2011-2014 <NAME>, The New York Times.
# DM18-1351
#
import sys
import yaml
from os import path
# (Default) open txt file to get config.yaml file name IF path of config file was not supplied as argument to master.py
# strip any trailing /n from string
if len(sys.argv) == 1:
f = open("/home/hp/Desktop/SCADASim/startup/config_file_name.txt", 'r')
file_name = f.read()
file_name = file_name.rstrip()
f.close()
else:
file_name = sys.argv[1]
# open yaml config file, build object
config_file = open(file_name, 'r')
config_yaml = yaml.safe_load(config_file)
config_file.close()
# get number of plc devices from MASTER section of the config file
num_of_plc = config_yaml['MASTER']['num_of_PLC']
# create backup files if they do not already exist - 1 for each PLC device
i = 0
while(i < num_of_plc):
num = str(i)
plc_device_name = 'PLC ' + num
# keep in the src repo, in format of "backup_N.yaml", where N is the ID of the PLC device
backup_file_name = '/home/hp/Desktop/SCADASim/backups/backup_' + num + '.yaml'
# collect num of register/coils for each plc device
hr_values = config_yaml[plc_device_name]['DATASTORE']['hr']['values']
co_values = config_yaml[plc_device_name]['DATASTORE']['co']['values']
di_values = config_yaml[plc_device_name]['DATASTORE']['di']['values']
ir_values = config_yaml[plc_device_name]['DATASTORE']['ir']['values']
# check if file exists
if (path.exists(backup_file_name) == True or path.getsize(backup_file_name) == 0):
# create file - only storing the register starting address and values
backup_dict = {}
backup = open(backup_file_name, 'w+')
backup_dict['DATASTORE'] = {'hr': {'start_addr': 1, 'values': hr_values}, 'ir': {'start_addr': 1, 'values': ir_values}, 'co': {'start_addr': 1, 'values': co_values}, 'di': {'start_addr': 1, 'values': di_values}}
yaml.dump(backup_dict, backup)
backup.close()
i = i + 1
# return number of backup files created and the config filepath to bash startup script
print(str(num_of_plc)+' '+file_name)
|
StarcoderdataPython
|
9660784
|
<filename>djangorest_alchemy/tests/test_apibuilder.py
import unittest
import mock
from djangorest_alchemy.apibuilder import APIModelBuilder
class TestAPIBuilder(unittest.TestCase):
def test_urls(self):
"""
Test basic urls property
"""
class Model(object):
pass
class Model2(object):
pass
class SessionMixin(object):
def __init__(self):
self.session = mock.Mock()
builder = APIModelBuilder([Model, Model2], SessionMixin)
self.assertIsNotNone(builder.urls)
|
StarcoderdataPython
|
327865
|
import pywaves as pw
"""
pywaves is an open source library for waves. While it is not as stable as REST API,
we leave the test here if integration is desired for future dexbot cross-exchange strategies.
"""
if __name__ == '__main__':
try:
# set the asset pair
WAVES_BTC = pw.AssetPair(pw.WAVES, pw.BTC)
# get last price and volume
print(WAVES_BTC.last(), WAVES_BTC.volume(), sep=' ')
# get ticker
ticker = WAVES_BTC.ticker()
print(ticker['24h_open'])
print(ticker['24h_vwap'])
# get last 10 trades
trades = WAVES_BTC.trades(10)
for t in trades:
print(t['buyer'], t['seller'], t['price'], t['amount'], sep=' ')
# get last 10 daily OHLCV candles
ohlcv = WAVES_BTC.candles(1440, 10)
for t in ohlcv:
print(t['open'], t['high'], t['low'], t['close'], t['volume'], sep=' ')
except Exception as e:
print(type(e).__name__, e.args, 'Exchange Error (ignoring)')
|
StarcoderdataPython
|
8056688
|
# Delfino Mercenary (9390232)
|
StarcoderdataPython
|
3234100
|
from django.db import models
from psqlextra.fields import HStoreField
from psqlextra.expressions import HStoreRef
from .fake_model import get_fake_model
def test_annotate_hstore_key_ref():
"""Tests whether annotating using a :see:HStoreRef expression
works correctly.
This allows you to select an individual hstore key."""
model_fk = get_fake_model({
'title': HStoreField(),
})
model = get_fake_model({
'fk': models.ForeignKey(model_fk)
})
fk = model_fk.objects.create(title={'en': 'english', 'ar': 'arabic'})
model.objects.create(fk=fk)
queryset = (
model.objects
.annotate(english_title=HStoreRef('fk__title', 'en'))
.values('english_title')
.first()
)
assert queryset['english_title'] == 'english'
|
StarcoderdataPython
|
8142892
|
"""
FBDTools library - icecream package
This module contains helper methods that generate plot objects useful for
drawing PD/ICE/ALE plots using data objects containing features,
targets and predictions
"""
from typing import Any, Dict, List, Optional, Union
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from .config import options
from .discretizer import FeatureDiscretizer
FEATURE_CONST = "feature "
def detect_axis_range(*args: Union[pd.Series, pd.DataFrame]) -> Optional[List[float]]:
"""
Determines axis range based on content of data.
Returns [-0.05, 1.05] if data is contained in this interval, else None.
Parameters
----------
args : Union[pd.Series, pd.DataFrame]
Dataframes or series containing values shown on axis,
each arg should have an attribute `values` that returns a np.ndarray
Returns
-------
range : Optional[List[float]]
Axis range values, None to let the graph library decide
"""
# prevent problems if data is not numerical
try:
full_data = np.vstack([x.values for x in args if hasattr(x, "values")])
if (np.nanmin(full_data) >= 0) and (np.nanmax(full_data) <= 1):
return [-0.05, 1.05]
except TypeError:
return None
except ValueError:
return None
except AttributeError:
return None
return None
def plotly_background_bars(
feature: FeatureDiscretizer,
opacity: float = 0.5,
marker: Dict[str, Any] = dict(
color=options.bars_color, line=dict(color=options.bars_color, width=1)
),
name: str = "histogram",
hbar: bool = False,
) -> go.Bar:
"""
Generates a background bar/histogram plot with Plotly using discretized
representation of the feature.
Wrapper around go.Bar with sane defaults with present usage.
Parameters
----------
feature : FeatureDiscretizer
Discretized representation of the feature
opacity : float
Opacity of the bars
marker : Dict[str, Any], optional
Plotly option: marker definition (the default is dict(size=10, line=dict(width=1)))
name : str, optional
Plotly option: Name given to the trace (the default is "output")
hbar : bool, optional
Option to plot bars horizontally (the default is False)
Returns
-------
bar : go.Bar
Bar plot representing the distribution of values of feature
"""
if hbar:
return go.Bar(
x=feature.counts,
y=feature.centers,
width=feature.widths,
opacity=opacity,
marker=marker,
xaxis="x2",
orientation="h",
name=name,
)
else:
return go.Bar(
x=feature.centers,
y=feature.counts,
width=feature.widths,
opacity=opacity,
marker=marker,
yaxis="y2",
name=name,
)
def plotly_line(
feature: FeatureDiscretizer,
outputs: pd.Series,
mode: str = "lines+markers",
marker: Dict[str, Any] = dict(size=10, line=dict(width=1)),
line: Optional[Dict[str, Any]] = None,
name: Optional[str] = "output",
showlegend: bool = True,
) -> go.Scatter:
"""
Generates a line plot with Plotly to show outputs for discretized feature.
Wrapper around go.Scatter with sane defaults with present usage.
Parameters
----------
feature : FeatureDiscretizer
Discretized representation of the feature, used for x values
outputs : pd.Series
Y values
mode : str, optional
Plotly option: line plot mode (the default is "lines+markers")
marker : Dict[str, Any], optional
Plotly option: marker definition (the default is dict(size=10, line=dict(width=1)))
line : Dict[str, Any], optional None
Plotly option: line definition, mainly useful if mode="lines"
(the default is None)
name : str, optional
Plotly option: Name given to the trace (the default is "output")
showlegend : bool, optional
Plotly option: Option to show name in legend or not (the default is True)
Returns
-------
line : go.Scatter
Line plot representing the output values
"""
return go.Scatter(
x=feature.centers,
y=outputs,
mode=mode,
marker=marker,
line=line,
name=name,
showlegend=showlegend,
)
def plotly_boxes(
outputs: pd.Series, marker: Optional[Dict[str, Any]] = None, name: str = "output"
) -> go.Box:
"""
Generates boxes with Plotly using 2D dataframe.
Uses dataframe columns as x values for boxes, and rows generate the boxes.
Parameters
----------
outputs : pd.Series
Dataframe where columns are bins of discretized feature,
rows are predictions
name : str, optional
Name given to the boxes in the legend (the default is "output")
Returns
-------
boxes : go.Box
Boxes plot representing the distribution of values
of rows of outputs for each column of outputs
"""
melted = outputs.melt()
return go.Box(x=melted.variable, y=melted.value, marker=marker, name=name)
def plotly_partial_dependency(
feature: FeatureDiscretizer,
agg_predictions: Optional[pd.Series],
agg_targets: Optional[pd.Series],
aggfunc: str = "",
use_ale: bool = False,
) -> go.FigureWidget:
"""
Generates a Partial Dependency Plot for given feature, predictions and targets.
Parameters
----------
feature : FeatureDiscretizer
Discretized representation of the feature, used for x values
agg_predictions : Optional[pd.Series])
Aggregated predictions for feature, used for y values
agg_targets : Optional[pd.Series]
Aggregated values of targets, used for y values
aggfunc : str = ""
Name of aggregation function for legend
use_ale : bool, optional
True if use ale else False (the default is False)
Returns
-------
figure : go.FigureWidget
Full partial dependency plot
"""
data = [plotly_background_bars(feature)]
if use_ale:
name = "ALE"
multi_name = "ALE {}"
else:
name = "{} effect".format(aggfunc)
multi_name = "effect {}"
if agg_predictions is not None:
# in case there are several prediction values (multiclass)
if len(agg_predictions.shape) > 1:
for name, values in agg_predictions.iterrows():
data.append(
plotly_line(feature, values, name=multi_name.format(aggfunc, name))
)
else:
data.append(plotly_line(feature, agg_predictions, name=name))
if agg_targets is not None and not use_ale:
if len(agg_targets.shape) > 1:
for name, values in agg_targets.iterrows():
data.append(
plotly_line(
feature, values, name="{} target {}".format(aggfunc, name)
)
)
else:
data.append(
plotly_line(
feature,
agg_targets,
name="{} target".format(aggfunc),
marker=dict(
size=10, color=options.targets_color, line=dict(width=1)
),
)
)
yaxis_range = detect_axis_range(agg_predictions, agg_targets)
layout = go.Layout(
xaxis=dict(title=FEATURE_CONST + feature.name),
yaxis=dict(range=yaxis_range, title="value", overlaying="y2"),
yaxis2=dict(
autorange=True,
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
)
return go.FigureWidget(data=data, layout=layout)
def plotly_ice_box(
feature: FeatureDiscretizer,
predictions: Optional[pd.DataFrame],
agg_targets: Optional[pd.Series],
aggfunc: str = "",
) -> go.FigureWidget:
"""
Generates an ICE Box Plot with Plotly for given feature, predictions and targets.
Parameters
----------
feature : FeatureDiscretizer
Discretized representation of the feature, used for x values
predictions : pd.DataFrame
Dataframe of predictions for feature, columns must be bins of `feature`
agg_targets : pd.Series
Aggregated values of targets, used for y values
aggfunc : str = ""
Name of aggregation function for legend
Returns
-------
figure : go.FigureWidget
Full ICE Box plot
"""
data = [plotly_background_bars(feature)]
if predictions is not None:
data.append(
plotly_boxes(
predictions,
name="prediction",
marker=dict(color=options.predictions_color),
)
)
if agg_targets is not None:
data.append(
plotly_line(
feature,
agg_targets,
name="{} label".format(aggfunc),
marker=dict(size=10, color=options.targets_color, line=dict(width=1)),
)
)
yaxis_range = detect_axis_range(predictions, agg_targets)
layout = go.Layout(
xaxis=dict(title=FEATURE_CONST + feature.name),
yaxis=dict(range=yaxis_range, title="value", overlaying="y2"),
yaxis2=dict(
autorange=True,
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
)
return go.FigureWidget(data=data, layout=layout)
def plotly_ice_lines(
feature: FeatureDiscretizer,
samples: pd.DataFrame,
counts: np.ndarray,
names: List[str],
colors: List[str],
agg_targets: Optional[pd.Series],
aggfunc: str = "",
) -> go.FigureWidget:
"""
Generates an ICE Plot with Plotly for given feature, predictions and targets.
Operates a sampling or clustering on predictions to draw a limited number
of lines.
Parameters
----------
feature : FeatureDiscretizer
Discretized representation of the feature, used for x values
samples : pd.DataFrame
Quantiles limits as a dataframe of shape (len(data), nb_rows)
counts : np.ndarray
Number of examples in each cluster, array of shape (nb_rows)
names : List[str]
Description of each row of samples
colors : List[str]
Colors for plotting each row of samples
agg_targets : pd.Series
Aggregated values of targets, used for y values
aggfunc : str = ""
Name of aggregation function for legend
Returns
-------
figure : go.FigureWidget
Full ICE plot
"""
data = [plotly_background_bars(feature)]
for (_, sample), count, name, color in zip(
samples.iterrows(), counts, names, colors
):
data.append(
plotly_line(
feature,
sample,
name=name,
showlegend=False,
mode="lines",
line=dict(color=color, width=count / counts.mean()),
)
)
if agg_targets is not None:
data.append(
plotly_line(
feature,
agg_targets,
name="{} label".format(aggfunc),
marker=dict(size=10, color=options.targets_color, line=dict(width=1)),
)
)
yaxis_range = detect_axis_range(samples, agg_targets)
layout = go.Layout(
xaxis=dict(title=FEATURE_CONST + feature.name),
yaxis=dict(range=yaxis_range, title="value", overlaying="y2"),
yaxis2=dict(
autorange=True,
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
)
return go.FigureWidget(data=data, layout=layout)
def plotly_partial_dependency_2d_scatter(
feature_x: FeatureDiscretizer,
feature_y: FeatureDiscretizer,
counts: pd.DataFrame,
values: pd.DataFrame,
name: Optional[str] = "output",
) -> go.FigureWidget:
"""
Generates a heatmap + scatter with Plotly for given features, counts and values.
Heatmap represent predictions or target values, scatter represent histogram.
Parameters
----------
feature_x : FeatureDiscretizer
Discretized representation of the feature, used for x values
feature_y : FeatureDiscretizer
Discretized representation of the feature, used for y values
counts : pd.DataFrame
Pivot table of number of examples in each bin of features
values : pd.DataFrame
Pivot table of output values (predictions or targets) for each bin of features
name : str
Plotly option: Name given to the trace (the default is "output")
Returns
-------
figure : go.FigureWidget
Full heatmap + scatter plot
"""
zaxis_range = detect_axis_range(values)
zmin = None
zmax = None
if zaxis_range is not None:
zmin = zaxis_range[0]
zmax = zaxis_range[1]
heatmap = go.Heatmap(
x=feature_x.centers,
y=feature_y.centers,
z=values,
zmin=zmin,
zmax=zmax,
colorscale=options.heatmap_colorscale,
colorbar=dict(len=0.7),
name=name,
)
mesh_x, mesh_y = np.meshgrid(feature_x.centers, feature_y.centers)
counts_flat = counts.values.flatten()
scatter = go.Scatter(
x=mesh_x.flatten(),
y=mesh_y.flatten(),
mode="markers",
marker=dict(
opacity=1,
color="rgba(0,0,0,0)",
size=10 * counts_flat / counts_flat.max(),
line=dict(color="black", width=2),
),
name="histogram",
text=counts_flat,
)
data = [heatmap, scatter]
layout = go.Layout(
xaxis=dict(title=FEATURE_CONST + feature_x.name, showgrid=False),
yaxis=dict(title=FEATURE_CONST + feature_y.name, showgrid=False),
autosize=False,
showlegend=True,
hovermode="closest",
title=name,
)
return go.FigureWidget(data=data, layout=layout)
def plotly_partial_dependency_2d_hist(
feature_x: FeatureDiscretizer,
feature_y: FeatureDiscretizer,
counts: pd.DataFrame,
values: pd.DataFrame,
name: Optional[str] = "output",
) -> go.FigureWidget:
"""
Generates a heatmap + 2 histograms with Plotly for given features and values.
Heatmap represent predictions or target values.
Parameters
----------
feature_x : FeatureDiscretizer
Discretized representation of the feature, used for x values
feature_y : FeatureDiscretizer
Discretized representation of the feature, used for y values
counts : pd.DataFrame
Pivot table of number of examples in each bin of features
values : pd.DataFrame
Pivot table of output values (predictions or targets) for each bin of features
name : str
Plotly option: Name given to the trace (the default is "output")
Returns
-------
figure : go.FigureWidget
Full heatmap + hists plot
"""
zaxis_range = detect_axis_range(values)
zmin = None
zmax = None
if zaxis_range is not None:
zmin = zaxis_range[0]
zmax = zaxis_range[1]
heatmap = go.Heatmap(
x=feature_x.centers,
y=feature_y.centers,
z=values,
zmin=zmin,
zmax=zmax,
colorscale=options.heatmap_colorscale,
colorbar=dict(len=0.7),
name=name,
text=counts,
)
hist_x = plotly_background_bars(feature_x)
hist_y = plotly_background_bars(feature_y, hbar=True)
data = [heatmap, hist_x, hist_y]
layout = go.Layout(
xaxis=dict(
title=FEATURE_CONST + feature_x.name,
domain=[0, 0.85],
showgrid=False,
ticks="",
),
yaxis=dict(
title=FEATURE_CONST + feature_y.name,
domain=[0, 0.85],
showgrid=False,
ticks="",
),
xaxis2=dict(domain=[0.85, 1], showgrid=False, ticks="", showticklabels=False),
yaxis2=dict(domain=[0.85, 1], showgrid=False, ticks="", showticklabels=False),
autosize=False,
bargap=0,
showlegend=True,
hovermode="closest",
title=name,
)
return go.FigureWidget(data=data, layout=layout)
|
StarcoderdataPython
|
3357421
|
<reponame>pavalucas/Any2Some
__author__='thiagocastroferreira'
import argparse
from models.bartgen import BARTGen
from models.bert import BERTGen
from models.gportuguesegen import GPorTugueseGen
from models.t5gen import T5Gen
from models.gpt2 import GPT2
from torch.utils.data import DataLoader, Dataset
import nltk
from nltk.translate.bleu_score import corpus_bleu, SmoothingFunction
nltk.download('punkt')
import os
import torch
from torch import optim
class Trainer:
'''
Module for training a generative neural model
'''
def __init__(self, model, trainloader, devdata, optimizer, epochs, \
batch_status, device, write_path, early_stop=5, verbose=True, language='english'):
'''
params:
---
model: model to be trained
trainloader: training data
devdata: dev data
optimizer
epochs: number of epochs
batch_status: update the loss after each 'batch_status' updates
device: cpu or gpy
write_path: folder to save best model
early_stop
verbose
language
'''
self.model = model
self.optimizer = optimizer
self.epochs = epochs
self.batch_status = batch_status
self.device = device
self.early_stop = early_stop
self.verbose = verbose
self.trainloader = trainloader
self.devdata = devdata
self.write_path = write_path
self.language = language
if not os.path.exists(write_path):
os.mkdir(write_path)
def train(self):
'''
Train model based on the parameters specified in __init__ function
'''
max_bleu, repeat = 0, 0
for epoch in range(self.epochs):
self.model.model.train()
losses = []
for batch_idx, inp in enumerate(self.trainloader):
intents, texts = inp['X'], inp['y']
self.optimizer.zero_grad()
# generating
output = self.model(intents, texts)
# Calculate loss
loss = output.loss
losses.append(float(loss))
# Backpropagation
loss.backward()
self.optimizer.step()
# Display
if (batch_idx+1) % self.batch_status == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tTotal Loss: {:.6f}'.format(epoch, \
batch_idx+1, len(self.trainloader), 100. * batch_idx / len(self.trainloader),
float(loss), round(sum(losses) / len(losses), 5)))
bleu, acc = self.evaluate()
print('BLEU: ', bleu, 'Accuracy: ', acc)
if bleu > max_bleu:
self.model.model.save_pretrained(os.path.join(self.write_path, 'model'))
max_bleu = bleu
repeat = 0
print('Saving best model...')
else:
repeat += 1
if repeat == self.early_stop:
break
def evaluate(self):
'''
Evaluating the model in devset after each epoch
'''
self.model.model.eval()
results = {}
for batch_idx, inp in enumerate(self.devdata):
intent, text = inp['X'], inp['y']
if intent not in results:
results[intent] = { 'hyp': '', 'refs': [] }
# predict
output = self.model([intent])
results[intent]['hyp'] = output[0]
# Display
if (batch_idx+1) % self.batch_status == 0:
print('Evaluation: [{}/{} ({:.0f}%)]'.format(batch_idx+1, \
len(self.devdata), 100. * batch_idx / len(self.devdata)))
results[intent]['refs'].append(text)
hyps, refs, acc = [], [], 0
for i, intent in enumerate(results.keys()):
if i < 20 and self.verbose:
print('Real: ', results[intent]['refs'][0])
print('Pred: ', results[intent]['hyp'])
print()
if self.language != 'english':
hyps.append(nltk.word_tokenize(results[intent]['hyp'], language=self.language))
refs.append([nltk.word_tokenize(ref, language=self.language) for ref in results[intent]['refs']])
else:
hyps.append(nltk.word_tokenize(results[intent]['hyp']))
refs.append([nltk.word_tokenize(ref) for ref in results[intent]['refs']])
if results[intent]['hyp'] in results[intent]['refs'][0]:
acc += 1
chencherry = SmoothingFunction()
bleu = corpus_bleu(refs, hyps, smoothing_function=chencherry.method3)
return bleu, float(acc) / len(results)
class NewsDataset(Dataset):
def __init__(self, data):
"""
Args:
data (string): data
"""
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def load_data(src_fname, trg_fname):
with open(src_fname) as f:
src = f.read().split('\n')
with open(trg_fname) as f:
trg = f.read().split('\n')
assert len(src) == len(trg)
data = [{ 'X': src[i], 'y': trg[i] } for i in range(len(src))]
return data
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--tokenizer", help="path to the tokenizer", required=True)
parser.add_argument("--model", help="path to the model", required=True)
parser.add_argument("--src_train", help="path to the source train data", required=True)
parser.add_argument("--trg_train", help="path to the target train data", required=True)
parser.add_argument("--src_dev", help="path to the source dev data", required=True)
parser.add_argument("--trg_dev", help="path to the target dev data", required=True)
parser.add_argument("--epochs", help="number of epochs", type=int, default=5)
parser.add_argument("--learning_rate", help="learning rate", type=float, default=1e-5)
parser.add_argument("--batch_size", help="batch size", type=int, default=16)
parser.add_argument("--early_stop", help="earling stop", type=int, default=3)
parser.add_argument("--max_length", help="maximum length to be processed by the network", type=int, default=180)
parser.add_argument("--write_path", help="path to write best model", required=True)
parser.add_argument("--language", help="language", default='english')
parser.add_argument("--verbose", help="should display the loss?", action="store_true")
parser.add_argument("--batch_status", help="display of loss", type=int)
parser.add_argument("--cuda", help="use CUDA", action="store_true")
parser.add_argument("--src_lang", help="source language of mBART tokenizer", default='pt_XX')
parser.add_argument("--trg_lang", help="target language of mBART tokenizer", default='pt_XX')
args = parser.parse_args()
# settings
learning_rate = args.learning_rate
epochs = args.epochs
batch_size = args.batch_size
batch_status = args.batch_status
early_stop =args.early_stop
language = args.language
try:
verbose = args.verbose
except:
verbose = False
try:
device = 'cuda' if args.cuda else 'cpu'
except:
device = 'cpu'
write_path = args.write_path
# model
max_length = args.max_length
tokenizer_path = args.tokenizer
model_path = args.model
if 'mbart' in tokenizer_path:
src_lang = args.src_lang
trg_lang = args.trg_lang
generator = BARTGen(tokenizer_path, model_path, max_length, device, True, src_lang, trg_lang)
elif 'bart' in tokenizer_path:
generator = BARTGen(tokenizer_path, model_path, max_length, device, False)
elif 'bert' in tokenizer_path:
generator = BERTGen(tokenizer_path, model_path, max_length, device)
elif 'mt5' in tokenizer_path:
generator = T5Gen(tokenizer_path, model_path, max_length, device, True)
elif 't5' in tokenizer_path:
generator = T5Gen(tokenizer_path, model_path, max_length, device, False)
elif 'gpt2-small-portuguese' in tokenizer_path:
generator = GPorTugueseGen(tokenizer_path, model_path, max_length, device)
elif tokenizer_path == 'gpt2':
generator = GPT2(tokenizer_path, model_path, max_length, device)
else:
raise Exception("Invalid model")
# train data
src_fname = args.src_train
trg_fname = args.trg_train
data = load_data(src_fname, trg_fname)
dataset = NewsDataset(data)
trainloader = DataLoader(dataset, batch_size=batch_size)
# dev data
src_fname = args.src_dev
trg_fname = args.trg_dev
devdata = load_data(src_fname, trg_fname)
# optimizer
optimizer = optim.AdamW(generator.model.parameters(), lr=learning_rate)
# trainer
trainer = Trainer(generator, trainloader, devdata, optimizer, epochs, batch_status, device, write_path, early_stop, verbose, language)
trainer.train()
|
StarcoderdataPython
|
1730343
|
<filename>src/rcpicar/car/NetworkStatisticsMessage.py
from __future__ import annotations
from ..message import IMessage
separator = ','
class NetworkStatisticsMessage(IMessage):
def __init__(self, expire_receive_count: int, receive_count: int, timeout_receive_count: int) -> None:
self.expire_receive_count = expire_receive_count
self.receive_count = receive_count
self.timeout_receive_count = timeout_receive_count
def encode(self) -> bytes:
return separator.join([
str(self.expire_receive_count),
str(self.receive_count),
str(self.timeout_receive_count),
]).encode()
@staticmethod
def decode(message: bytes) -> NetworkStatisticsMessage:
expire_receive_count, receive_count, timeout_receive_count = message.decode().split(separator)
return NetworkStatisticsMessage(int(expire_receive_count), int(receive_count), int(timeout_receive_count))
|
StarcoderdataPython
|
6410096
|
<reponame>DinoSubbu/SmartEnergyManagementSystem
import requests
import json
from sqlalchemy import Column, Text, Integer, Float, ForeignKey, DateTime
from datetime import datetime
class WeatherAPI:
def __init__(self, api_key="3d34a9a9b0e544269a3ddbb97ec89ba7", api_current="https://api.weatherbit.io/v2.0/current", api_forecast = "https://api.weatherbit.io/v2.0/forecast/hourly?"):
""" inits weather request class """
# constants
self.API_KEY = api_key
# necessary data for request
self.API_CURRENT = api_current
self.API_FORECAST = api_forecast
def getCurrent(self, lat, lon):
"""
queries the api for the current weather and returns the relevant information
@return: weatherdata as dict:
timestamp
temp in °celsius
windSpeed in m/s
pressure in Pa
relativeHumidity in %
"""
payloadCurrent = {'lat': str(lat), 'lon':str(lon), 'key': self.API_KEY}
try:
r = requests.get(self.API_CURRENT, payloadCurrent)
if r.status_code != 200:
print("not successfull status code: ", r.status_code)
print(r.text)
return
print("current weather data gathered")
# returned data is in json
data = r.json()["data"][0]
# TODO timestamp
# timestamp with request is given in utc and not local time
# timestamp is encoded like "2017-08-28:17"
# timestampString = data["datetime"]
# # convert from string to datetime object
# timestamp = datetime.strptime(timestampString, '%Y-%m-%d:%H')
timestamp = datetime.now()
# timestamp has to be aligned to hourly basis
timestamp = timestamp.replace(minute=0, second=0, microsecond=0)
# get needed attributes of json object
tempData = {}
tempData["timestamp"] = timestamp
tempData["temp"] = data["temp"]
tempData["windSpeed"] = data["wind_spd"]
# pressure is returned in millibar
# needs to be converted to pascal
tempData["pressure"] = data["pres"] * 100
tempData["relativeHumidity"] = data["rh"]
tempData["s_horizontal"] = data["ghi"]
return tempData
except requests.RequestException as e:
print("request exception")
print(e)
def getForecast(self, lat, lon):
"""
queries the api for the forecast weather and returns the relevant information
@return: Array of weatherdata as dict:
timestamp
temp in °celsius
windSpeed in m/s
pressure in Pa
relativeHumidity in %
"""
payloadForecast = {'lat': str(lat), 'lon':str(lon), 'key': self.API_KEY, 'hours': '48'}
try:
r = requests.get(self.API_FORECAST, payloadForecast)
if r.status_code != 200:
print("not successfull status code: ", r.status_code)
print(r.text)
return
print("forecast weather data gathered")
# returned data is in json
data = r.json()["data"]
dataArray = []
for dataElement in data:
# get needed attributes of json object
# timestamp is encoded like "2018-04-02T00:00:00"
timestampString = dataElement["timestamp_local"]
# convert from string to datetime object
timestamp = datetime.strptime(timestampString, '%Y-%m-%dT%H:%M:%S')
tempData = {}
tempData["timestamp"] = timestamp
tempData["temp"] = dataElement["temp"]
tempData["windSpeed"] = dataElement["wind_spd"]
# pressure is returned in millibar
# needs to be converted to pascal
tempData["pressure"] = dataElement["pres"] * 100
tempData["relativeHumidity"] = dataElement["rh"]
tempData["s_horizontal"] = dataElement["ghi"]
dataArray.append(tempData)
return dataArray
except requests.RequestException as e:
print("request exception")
print(e)
if __name__ == "__main__":
weather = WeatherAPI()
print(weather.getForecast(48.78232, 9.17702))
|
StarcoderdataPython
|
9753343
|
<filename>shimmer/apps/BtStream/python/getShimmerVersion.py<gh_stars>0
#!/usr/bin/python
import sys, struct, array, time, serial
def wait_for_ack():
ddata = ""
ack = struct.pack('B', 0xff)
while ddata != ack:
ddata = ser.read(1)
return
if len(sys.argv) < 2:
print "no device specified"
print "You need to specifiy the serial port of the shimmer you wish to connect to"
print "example:"
print " getShimmerVersion.py Com5"
print " or"
print " getShimmerVersion.py /dev/rfcomm0"
print
else:
ser = serial.Serial(sys.argv[1], 115200)
ser.flushInput()
# send the get shimmer version command
ser.write(struct.pack('B', 0x24))
wait_for_ack()
print "Ack received for get shimmer version command"
ddata = ""
shimmerversionresponse = struct.pack('B', 0x25)
while ddata != shimmerversionresponse:
ddata = ser.read(1)
shimmerversion = struct.unpack('B', ser.read(1))
if shimmerversion[0] == 0:
print "Shimmer Version: shimmer 1.3"
elif shimmerversion[0] == 1:
print "Shimmer Version: shimmer 2"
elif shimmerversion[0] == 2:
print "Shimmer Version: shimmer 2r"
ser.close()
|
StarcoderdataPython
|
12841997
|
<reponame>matteocarde/asp2logic
class Result:
name: str
optimum: int
status: str
time: float
def __init__(self, name, optimum, status, time):
self.name = name
self.optimum = optimum
self.status = status
self.time = time
|
StarcoderdataPython
|
3454790
|
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'requirements.txt')) as f:
requirements = f.readlines()
requirements = [requirement.replace('\n', '') for requirement in requirements]
setup(
name='sshmanager',
version='0.1',
packages=['sshmanager'],
url='https://github.com/jenkins-zh/application-operation/apps/ssh-manager',
license='MIT',
author='Anxk',
author_email='<EMAIL>',
description='A tool to manage ssh authorized_keys files',
install_requires=requirements,
)
|
StarcoderdataPython
|
3526078
|
<gh_stars>0
from django.apps import AppConfig
class KstudentConfig(AppConfig):
name = 'kstudent'
|
StarcoderdataPython
|
1839461
|
from enum import Enum, unique
@unique
class TriviaQAType(Enum):
TEST = 1
|
StarcoderdataPython
|
3220372
|
<gh_stars>0
from nomenklatura.model.dataset import Dataset
from nomenklatura.model.entity import Entity
from nomenklatura.model.account import Account
__all__ = [Dataset, Entity, Account]
|
StarcoderdataPython
|
9678176
|
<filename>how-to-use-azureml/azure-synapse/start_script.py
from pyspark.sql import SparkSession
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--input", default="")
parser.add_argument("--output", default="")
args, unparsed = parser.parse_known_args()
spark= SparkSession.builder.getOrCreate()
sc = spark.sparkContext
arr = sc._gateway.new_array(sc._jvm.java.lang.String, 2)
arr[0] = args.input
arr[1] = args.output
obj = sc._jvm.WordCount
obj.main(arr)
|
StarcoderdataPython
|
11364619
|
<gh_stars>0
from typing import NamedTuple
from .tomato_constants import *
class CropStates(NamedTuple):
carbohydrate_amount_Buf: float
carbohydrate_amount_Fruits: [float]*FRUIT_DEVELOPMENT_STAGES_NUM
number_Fruits: [float]*FRUIT_DEVELOPMENT_STAGES_NUM
carbohydrate_amount_Leaf: float
carbohydrate_amount_Stem: float
dry_matter_Har: float
sum_canopy_t: float
last_24_canopy_t: float
|
StarcoderdataPython
|
11233415
|
<reponame>ICOS-Carbon-Portal/jupyter<filename>notebooks/icos_jupyter_notebooks/tools/visualization/bokeh_help_funcs/secondary_yaxis.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 14 16:40:00 2020
Function that aligns primary with secondary y-axis in
a Bokeh time-series plot.
"""
__author__ = ["<NAME>"]
__credits__ = "ICOS Carbon Portal"
__license__ = "GPL-3.0"
__version__ = "0.1.0"
__maintainer__ = "ICOS Carbon Portal, elaborated products team"
__email__ = ['<EMAIL>', '<EMAIL>']
__date__ = "2020-10-14"
######################################################################################################
def set_yranges_2y(y1_min, y1_max, y2_min, y2_max, y1_step, y2_step ,new_yrange_name):
"""
Project: 'ICOS Carbon Portal'
Created: Tue May 07 10:30:00 2018
Last Changed: Tue May 07 10:30:00 2019
Version: 1.0.0
Author(s): Karolina
Description: Function that takes the primary and secondary y-axis min/max values as well as
the step values for every y-axis and the secondary y-axis new range name as input
parameters, performs computations so that the two axes are alligned and returns
their corresponding RangeId objects. Works only for Bokeh plots.
Input parameters: 1. Min value of primary y-axis (var_name: 'y1_min', var_type: Integer or Float)
2. Max value of primary y-axis (var_name: 'y1_max', var_type: Integer or Float)
3. Min value of secondary y-axis (var_name: 'y2_min', var_type: Integer or Float)
4. Max value of secondary y-axis (var_name: 'y2_max', var_type: Integer or Float)
5. Step of primary y-axis (var_name: 'y1_step', var_type: Integer or Float)
6. Step of primary y-axis (var_name: 'y2_step', var_type: Integer or Float)
7. Name of new yrange object for secondary y-axis
(var_name: "new_yrange_name", var_type: Bokeh Plot yrange object)
Output: Bokeh Plot yrange objects for primary and secondary y-axes.
"""
#import modules:
import numpy as np
from bokeh.models import Range1d
#yrange and tick function for plot with primary and secondary y-axis:
yticks1 = np.arange(y1_min, y1_max + y1_step, y1_step)
yticks2 = np.arange(y2_min, y2_max + y2_step, y2_step)
#Get difference in total number of ticks between primary and secondary y-axis:
diff = abs(len(yticks2)-len(yticks1))
#Get how many times the step needs to be added to start and end:
num_of_steps = int(diff/2)
#If the primary and the secondary y-axis have the same number of ticks:
if(diff==0):
#Set the range of the 1st y-axis:
y_range = Range1d(start=y1_min, end=y1_max)
#Set the 2nd y-axis, range-name, range:
extra_y_ranges = {new_yrange_name: Range1d(start=y2_min, end=y2_max)}
#If the primary y-axis has fewer ticks than the secondary y-axis:
elif(len(yticks2)>len(yticks1)):
#If the difference in ticks between the two axes is an odd number:
if(diff%2==1):
#Set the range of the 1st y-axis:
y_range = Range1d(start=y1_min-(y1_step*(num_of_steps+1)),
end=y1_max+(y1_step*num_of_steps))
#Set the 2nd y-axis, range-name, range:
extra_y_ranges = {new_yrange_name: Range1d(start=y2_min, end=y2_max)}
#If the difference in ticks between the two axes is an even number:
else:
#Set the range of the 1st y-axis:
y_range = Range1d(start=y1_min-(y1_step*num_of_steps), end=y1_max+(y1_step*num_of_steps))
#Set the 2nd y-axis, range-name, range:
extra_y_ranges = {new_yrange_name: Range1d(start=y2_min, end=y2_max)}
#If the primary y-axis has more ticks than the secondary y-axis, e.g. len(yticks1)>len(yticks2_test):
else:
#If the difference in ticks between the two axes is an odd number:
if(diff%2==1):
#Set the range of the 1st y-axis:
y_range = Range1d(start=y1_min, end=y1_max)
#Set the 2nd y-axis, range-name, range:
extra_y_ranges = {new_yrange_name: Range1d(start=y2_min - (y2_step*(num_of_steps)), end=y2_max + (y2_step*(num_of_steps+1)))}
#If the difference in ticks between the two axes is an even number:
else:
#Set the range of the 1st y-axis:
y_range = Range1d(start=y1_min, end=y1_max)
#Set the 2nd y-axis, range-name, range:
extra_y_ranges = {new_yrange_name: Range1d(start=y2_min - (y2_step*num_of_steps),
end=y2_max + (y2_step*num_of_steps))}
#Return y-range for primary and secondary y-axes:
return y_range, extra_y_ranges
######################################################################################################
|
StarcoderdataPython
|
269256
|
<filename>todo/views/del_list.py<gh_stars>100-1000
from django.contrib import messages
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from todo.models import Task, TaskList
from todo.utils import staff_check
@login_required
@user_passes_test(staff_check)
def del_list(request, list_id: int, list_slug: str) -> HttpResponse:
"""Delete an entire list. Only staff members should be allowed to access this view.
"""
task_list = get_object_or_404(TaskList, id=list_id)
# Ensure user has permission to delete list. Get the group this list belongs to,
# and check whether current user is a member of that group AND a staffer.
if task_list.group not in request.user.groups.all():
raise PermissionDenied
if not request.user.is_staff:
raise PermissionDenied
if request.method == "POST":
TaskList.objects.get(id=task_list.id).delete()
messages.success(request, "{list_name} is gone.".format(list_name=task_list.name))
return redirect("todo:lists")
else:
task_count_done = Task.objects.filter(task_list=task_list.id, completed=True).count()
task_count_undone = Task.objects.filter(task_list=task_list.id, completed=False).count()
task_count_total = Task.objects.filter(task_list=task_list.id).count()
context = {
"task_list": task_list,
"task_count_done": task_count_done,
"task_count_undone": task_count_undone,
"task_count_total": task_count_total,
}
return render(request, "todo/del_list.html", context)
|
StarcoderdataPython
|
9793378
|
<filename>lambdata_vdeb/__init__.py
"""
lambdata - a collection of encouragement
"""
import pandas as pd
import numpy as np
import random
from random import seed
from random import randint
from lambdata_vdeb.dataframe_Helper import report_missing_values
TEST = pd.DataFrame(np.ones(10))
|
StarcoderdataPython
|
6605472
|
<reponame>alffore/tileimagen
import requests
import cv2
import numpy as np
def analizaHisto(imagen):
"""
Analiza el histograma de imagenes
:param imagen:
:return:
"""
hist = cv2.calcHist([imagen], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
hist = cv2.normalize(hist, hist).flatten()
return hist
def recuperaImagenSIC(id):
"""
Función que recupera una imagen del SIC via su URL en Internet
:param id:
:return:
"""
url = f'http://sic.gob.mx/images/{id}'
print(url)
response = requests.get(url)
if response.status_code == 200:
file = open(f'/tmp/{id}', "wb")
file.write(response.content)
file.close()
return cv2.cvtColor(cv2.imread(f'/tmp/{id}'), cv2.COLOR_BGR2RGB)
return None
def ajusteTam(imagen, tamx, tamy):
"""
:param imagen:
:param tamx:
:param tamy:
:return:
"""
return
def seccionaImagen(imagen, partesx, partesy):
"""
Función que fracciona una imagen regresa arreglo de fragmentos con histogramas
:param imagen:
:param partesx:
:param partesy:
:return:
"""
return
def cargaImagen(archivo):
"""
:param archivo:
:return:
"""
imagen = cv2.imread(archivo)
return cv2.cvtColor(imagen, cv2.COLOR_BGR2RGB)
|
StarcoderdataPython
|
3411293
|
from typing import List
from torch import Tensor
from torch.nn import Module
class ViewChange(Module):
def __init__(self, new_size: List[int]):
super().__init__()
self.new_size = new_size
def forward(self, x: Tensor):
n = x.shape[0]
return x.view([n] + self.new_size)
class ViewImageAsVector(Module):
def __init__(self):
super().__init__()
def forward(self, x: Tensor):
assert x.dim() == 4
n, c, w, h = x.shape
return x.view(n, c * w * h)
class ViewVectorAsMultiChannelImage(Module):
def __init__(self):
super().__init__()
def forward(self, x: Tensor):
assert x.dim() == 2
n, c = x.shape
return x.view(n, c, 1, 1)
class ViewVectorAsOneChannelImage(Module):
def __init__(self):
super().__init__()
def forward(self, x: Tensor):
assert x.dim() == 2
n, c = x.shape
return x.view(n, 1, c, 1)
|
StarcoderdataPython
|
3526890
|
#Find dynamically allocated string structures in Go binaries.
# type stringStruct struct {
# str unsafe.Pointer
# len int
# }
#Different instructions per architecture. Multiple solutions are possible.
#Future ToDo: add newly discovered instruction sequences.
#@author <EMAIL>
#@category goscripts
#@keybinding
#@menupath
#@toolbar
from ghidra.program.model.lang import OperandType
#x86
#LEA REG, [STRING_ADDRESS]
#MOV [ESP + ..], REG
#MOV [ESP + ..], STRING_SIZE
def string_rename_x86():
for block in getMemoryBlocks():
if block.getName() != ".text":
continue
start = block.getStart()
ins = getInstructionAt(start)
while ins:
op_type = ins.getOperandType(1)
reg = ins.getRegister(0)
#Check first instruction: LEA REG, [STRING_ADDRESS]
if ins.getMnemonicString() != "LEA" or reg is None or OperandType.isAddress(op_type) is False:
ins = getInstructionAfter(ins)
continue
ins_next = getInstructionAfter(ins)
#Check second instruction: MOV [ESP + ..], REG (where REG is the same as in previous instruction)
if ins_next.getMnemonicString() != "MOV" or ins_next.getRegister(1) != reg or ins_next.getOpObjects(0)[0].toString() != "ESP":
ins = getInstructionAfter(ins)
continue
ins_next2 = getInstructionAfter(ins_next)
op_type = ins_next2.getOperandType(1)
#Check third instruction: MOV [ESP + ..], STRING_SIZE
if ins_next2.getMnemonicString() != "MOV" or ins_next2.getOpObjects(0)[0].toString() != "ESP" or OperandType.isScalar(op_type) is False:
ins = getInstructionAfter(ins)
continue
address = ins.getPrimaryReference(1).getToAddress()
length = ins_next2.getOpObjects(1)[0].getValue()
try:
#Create string.
createAsciiString(address, length)
#print "SUCCESS at %s" % address
except:
#print "ERROR at address %s" % ins.getAddress()
ins = getInstructionAfter(ins)
continue
ins = getInstructionAfter(ins)
#x86_64
#LEA REG, [STRING_ADDRESS]
#MOV [RSP + ..], REG
#MOV [RSP + ..], STRING_SIZE
def string_rename_x86_64():
for block in getMemoryBlocks():
if block.getName() != ".text":
continue
start = block.getStart()
ins = getInstructionAt(start)
while ins:
op_type = ins.getOperandType(1)
reg = ins.getRegister(0)
#Check first instruction: LEA REG, [STRING_ADDRESS]
if ins.getMnemonicString() != "LEA" or reg is None or OperandType.isAddress(op_type) is False:
ins = getInstructionAfter(ins)
continue
ins_next = getInstructionAfter(ins)
#Check second instruction: MOV [RSP + ..], REG (where REG is the same as in previous instruction)
if ins_next.getMnemonicString() != "MOV" or ins_next.getRegister(1) != reg or ins_next.getOpObjects(0)[0].toString() != "RSP":
ins = getInstructionAfter(ins)
continue
ins_next2 = getInstructionAfter(ins_next)
op_type = ins_next2.getOperandType(1)
#Check third instruction: MOV [RSP + ..], STRING_SIZE
if ins_next2.getMnemonicString() != "MOV" or ins_next2.getOpObjects(0)[0].toString() != "RSP" or OperandType.isScalar(op_type) is False:
ins = getInstructionAfter(ins)
continue
address = ins.getPrimaryReference(1).getToAddress()
length = ins_next2.getOpObjects(1)[0].getValue()
try:
#Create string.
createAsciiString(address, length)
#print "SUCCESS at %s" % address
except:
#print "ERROR at address %s" % ins.getAddress()
ins = getInstructionAfter(ins)
continue
ins = getInstructionAfter(ins)
#ARM, 32-bit
#LDR REG, [STRING_ADDRESS_POINTER]
#STR REG, [SP, ..]
#MOV REG, STRING_SIZE
#STR REG, [SP, ..]
def string_rename_arm():
for block in getMemoryBlocks():
if block.getName() != ".text":
continue
start = block.getStart()
ins = getInstructionAt(start)
while ins:
op_type = ins.getOperandType(1)
#Check first instruction: LDR REG, [STRING_ADDRESS_POINTER]
if ins.getMnemonicString() != "ldr" or ins.getRegister(0) is None or OperandType.isAddress(op_type) is False or OperandType.isScalar(op_type) is False:
ins = getInstructionAfter(ins)
continue
reg = ins.getRegister(0)
ins_next = getInstructionAfter(ins)
#Check second instruction: STR REG, [SP + ..] (where REG is the same as in previous instruction)
if ins_next.getMnemonicString() != "str" or ins_next.getRegister(0) != reg or ins_next.getOpObjects(1)[0].toString() != "sp":
ins = getInstructionAfter(ins)
continue
ins_next2 = getInstructionAfter(ins_next)
op_type = ins_next2.getOperandType(1)
#Check third instruction: MOV REG, STRING_SIZE
if ins_next2.getMnemonicString() != "mov" or ins_next2.getRegister(0) is None or OperandType.isScalar(op_type) is False:
ins = getInstructionAfter(ins)
continue
reg = ins_next2.getRegister(0)
ins_next3 = getInstructionAfter(ins_next2)
#Check fourth instruction: STR REG, [SP + ..] (where REG is the same as in previous instruction)
if ins_next3.getMnemonicString() != "str" or ins_next3.getRegister(0) != reg or ins_next3.getOpObjects(1)[0].toString() != "sp":
ins = getInstructionAfter(ins)
continue
#print "ins: %s" % ins
address_pointer = getInt(ins.getPrimaryReference(1).getToAddress())
address = currentProgram.getAddressFactory().getAddress(hex(address_pointer))
length = ins_next2.getOpObjects(1)[0].getValue()
try:
#Create string.
createAsciiString(address, length)
#print "SUCCESS at %s" % address
except:
#print "ERROR at address %s" % ins.getAddress()
ins = getInstructionAfter(ins)
continue
ins = getInstructionAfter(ins)
#ARM, 64-bit - version 1
#ADRP REG, [STRING_ADDRESS_START]
#ADD REG, REG, INT
#STR REG, [SP, ..]
#ORR REG, REG, STRING_SIZE
#STR REG, [SP, ..]
#ARM, 64-bit - version 2
#ADRP REG, [STRING_ADDRESS_START]
#ADD REG, REG, INT
#STR REG, [SP, ..]
#MOV REG, STRING_SIZE
#STR REG, [SP, ..]
def string_rename_arm_64():
for block in getMemoryBlocks():
if block.getName() != ".text":
continue
start = block.getStart()
ins = getInstructionAt(start)
while ins:
op_type = ins.getOperandType(1)
reg = ins.getRegister(0)
#Check first instruction: ADRP REG, [STRING_ADDRESS_START]
if ins.getMnemonicString() != "adrp" or reg is None or OperandType.isScalar(op_type) is False:
ins = getInstructionAfter(ins)
continue
ins_next = getInstructionAfter(ins)
op_type = ins_next.getOperandType(2)
#Check second instruction: ADD REG, REG, INT (where REG is the same as in previous instruction)
if ins_next.getMnemonicString() != "add" or ins_next.getRegister(0) != reg or OperandType.isScalar(op_type) is False:
ins = getInstructionAfter(ins)
continue
ins_next2 = getInstructionAfter(ins_next)
#Check third instruction: STR REG, [SP + ..] (where REG is the same as in previous instruction)
if ins_next2.getMnemonicString() != "str" or ins_next2.getRegister(0) != reg or ins_next2.getOpObjects(1)[0].toString() != "sp":
ins = getInstructionAfter(ins)
continue
ins_next3 = getInstructionAfter(ins_next2)
reg = ins_next3.getRegister(0)
#Check fourth instruction: ORR REG, REG, STRING_SIZE
if ins_next3.getMnemonicString() == "orr" and reg is not None and OperandType.isScalar(ins_next3.getOperandType(2)) is True:
length = ins_next3.getOpObjects(2)[0].getValue()
#Check fourth instruction: MOV REG, STRING_SIZE
elif ins_next3.getMnemonicString() == "mov" and reg is not None and OperandType.isScalar(ins_next3.getOperandType(1)) is True:
length = ins_next3.getOpObjects(1)[0].getValue()
else:
ins = getInstructionAfter(ins)
continue
ins_next4 = getInstructionAfter(ins_next3)
#Check fifth instruction: STR REG, [SP + ..] (where REG is the same as in previous instruction)
if ins_next4.getMnemonicString() != "str" or ins_next4.getRegister(0) != reg or ins_next4.getOpObjects(1)[0].toString() != "sp":
ins = getInstructionAfter(ins)
continue
#print "ins: %s" % ins
address_int = int(ins.getOpObjects(1)[0].getValue() + ins_next.getOpObjects(2)[0].getValue())
address = currentProgram.getAddressFactory().getAddress(hex(address_int))
try:
#Create string.
createAsciiString(address, length)
#print "SUCCESS at %s" % address
except:
#print "ERROR at address %s" % ins.getAddress()
ins = getInstructionAfter(ins)
continue
ins = getInstructionAfter(ins)
#Check program architecture.
language_id = currentProgram.getLanguageID()
print "lang: %s" % language_id
pointer_size = currentProgram.getDefaultPointerSize()
if language_id.toString().startswith("ARM"):
print "32 BIT ARM"
string_rename_arm()
elif language_id.toString().startswith("AARCH64"):
print "64 BIT ARM"
string_rename_arm_64()
elif language_id.toString().startswith("x86") and pointer_size == 4:
print "32 BIT x86"
string_rename_x86()
elif language_id.toString().startswith("x86") and pointer_size == 8:
print "64 BIT x86"
string_rename_x86_64()
else:
print "ERROR: unknown arch."
|
StarcoderdataPython
|
8145718
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-01-02 16:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
('invites', '0002_auto_20180101_0208'),
]
operations = [
migrations.RenameField(
model_name='sessioneventinvite',
old_name='invitee',
new_name='sub',
),
migrations.RemoveField(
model_name='sessioneventinvite',
name='event',
),
migrations.AddField(
model_name='sessioneventinvite',
name='captain',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='accounts.CustomUser'),
preserve_default=False,
),
]
|
StarcoderdataPython
|
12828909
|
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.conf import settings
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
from apps.app import application
admin.autodiscover()
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^payfast/', include('payfast.urls'))
]
urlpatterns += i18n_patterns(
url(r'', include(application.urls)),
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
StarcoderdataPython
|
6426239
|
<filename>wms/models.py<gh_stars>10-100
# Keeping manage.py happy
|
StarcoderdataPython
|
1938173
|
from tkinter import *
from tkinter.tix import TEXT
from urllib import response
import requests
import ast
import subprocess
import os
from tkinter import messagebox
import pyttsx3
from pathlib import Path
# from tkinter.tix import *
# global so it can be accessed in the actions.py file when needed
# global category
# global blacklist
location = Path('actions\\stupid_settings_jokes.txt')
engine = pyttsx3.init()
# print(location)
BG_GREY = "#ABB289"
BG_COLOUR = "#17202A"
TEXT_COLOUR = "#EAECEE"
BG_DARK_BLUE = "#1D3557"
BG_BLUE = "#457B9D"
BG_LIGHT_BLUE = "#A8DADC"
BG_WHITE = "#F1FAEE"
BG_RED = "#E63946"
FONT = "Helvetica 14"
FONT_ENTRY = "Roboto 30"
FONT_BOLD = "Helvetica 13 bold"
FONT_BOLD_SEND = "Helvetica 25 bold"
FONT_BOLD_HEADING = "Helvetica 30 bold"
FONT_BOLD_CHECKBOX = "Helvetica 16 bold"
# class MenuBar(Menu):
# """Menubar in the ChattyBot application"""
# def __init__(self, window):
# Menu.__init__(self, window)
# file = Menu(self, tearoff=False)
# file.add_command(label="New")
# file.add_command(label="Open")
# file.add_command(label="Save")
# file.add_command(label="Save as")
# file.add_separator()
# file.add_command(label="Exit", underline=1, command=self.quit)
# self.add_cascade(label="File",underline=0, menu=file)
# edit = Menu(self, tearoff=0)
# edit.add_command(label="Undo")
# edit.add_separator()
# edit.add_command(label="Cut")
# edit.add_command(label="Copy")
# edit.add_command(label="Paste")
# self.add_cascade(label="Edit", menu=edit)
# help = Menu(self, tearoff=0)
# help.add_command(label="About", command=self.about)
# self.add_cascade(label="Help", menu=help)
# def exit(self):
# self.exit
# def about(self):
# messagebox.showinfo('PythonGuides', 'Python Guides aims at providing best practical tutorials')
class ChattyBot(Menu):
def __init__(self):
self.window = Tk()
self.users_name = "User"
# Runs virtual environment and runs NLU server. subprocess.run waits for it to finish. ";" indicates multiple commands UNCOMMENT THESE TWO WHEN TESTING/ USING
NLU_server = subprocess.Popen('rasa run --enable-api', shell=True)
actions_server = subprocess.Popen('cd actions && rasa run actions', shell=True)
# subprocess.run(['NLU_server'], stdout=subprocess.PIPE, input='rasa run --enable-api')
# # Runs actions server
# subprocess.run(['Actions_server'], stdout=subprocess.PIPE, text=True, input='conda activate env', shell=True)
# subprocess.run(['Actions_server'], stdout=subprocess.PIPE, text=True, input='cd actions', shell=True)
# subprocess.run(['Actions_server'], stdout=subprocess.PIPE, text=True, input='rasa run actions', shell=True)
# Runs virtual environment and actions server
self.gender_engine_number = 0
# NLU server
self.url = "http://localhost:5005/webhooks/rest/webhook"
self.engine = pyttsx3.init()
self.voice = False
self.rate_value = 200
self.volume_value = 1.0
self.opened_settings = False
# Number of files in the "conversations" directory to keep track of conversations
self.number = len(os.listdir(".\conversations")) + 1
self.popup_name()
def run(self):
"""Runs application"""
self.window.mainloop()
def _setup_main_window(self):
"""Configures root window"""
self.window.title("ChattyBot")
# Icon in window
self.window.iconbitmap("little_robot_sh.ico")
# # create the file object)
# edit = Menu(menu)
# # adds a command to the menu option, calling it exit, and the
# # command it runs on event is client_exit
# edit.add_command(label="Undo")
# #added "file" to our menu
# menu.add_cascade(label="Edit", menu=edit)
# help = Menu(menu)
# help.add_cascade(label="Help", menu=help)
# send_button = Button(bottom_label, text="Send", font=FONT_BOLD, width=20, bg=BG_GREY, command=lambda: self._on_enter_pressed(None))
# send_button.place(relx=0.77, rely=0.008, relheight=0.06, relwidth=0.22)
# If true, then window can be resized
self.window.resizable(width=False, height=False)
# # head label
# # pady moves label down a bit
# head_label = Label(self.window, bg=BG_COLOUR, fg=TEXT_COLOUR, text="Welcome", font=FONT_BOLD, pady=10)
# # Number between 0 and 1. 1 indicates takes whole width of window
# head_label.place(relwidth=1)
# divider between head label and user inputs
line = Label(self.window, width=450, bg=BG_BLUE)
line.place(relwidth=1, rely=0.07, relheight=0.012)
# text widget - 20 characters wide, 2 characters high, padding
# padding x in tupple is to stop the text sliding under the scroll bar
self.text_widget = Text(self.window, width=20, height=2, bg=BG_WHITE, fg=BG_DARK_BLUE, font=FONT, padx=5, pady=5)
self.text_widget.place(relheight=0.745, relwidth=0.974, rely=0.08)
# Disable so only text can be displayed
self.text_widget.configure(cursor="arrow", state=DISABLED)
# Different "container" for the scroll bar so text doesn't go behind it
self.scroll_widget = Frame(self.window, bg=BG_WHITE)
self.scroll_widget.place(relheight=0.745, relwidth=0.026, relx=0.974, rely=0.081)
# scroll bar - only for text widget
scrollbar = Scrollbar(self.scroll_widget)
scrollbar.place(relheight=1)
# command changes y-position of widget to view more
scrollbar.configure(command=self.text_widget.yview)
# bottom label - for entry box background
self.bottom_label = Label(self.window, bg=BG_BLUE, height=80)
self.bottom_label.place(relwidth=1, rely=0.825)
# Making a frame for entry box and cursor caret type
entry_frame = Frame(self.bottom_label, bg=BG_DARK_BLUE, cursor="xterm")
entry_frame.place(relwidth=0.74, relheight=0.06, rely=0.008, relx=0.011)
# text entry box
self.msg_entry = Entry(self.bottom_label, bg=BG_DARK_BLUE, fg=TEXT_COLOUR, font=FONT_ENTRY, borderwidth=0, insertbackground=BG_WHITE)
self.msg_entry.place(relwidth=0.69, relheight=0.06, rely=0.008, relx=0.03)
# Label for entry box - move cursor to the right
# self.entry_label = Label(self.window, bg=BG_BLUE)
# self.entry_label.place(relwidth=0.05, relheight=0.06, rely=0.008, relx=0)
# automatically selected when app is opened
self.msg_entry.focus()
# Allows message to be sent via enter key in addition send button
self.msg_entry.bind("<Return>", self._on_enter_pressed)
# send button - calls function via lambda
send_button = Button(self.bottom_label, text="Send", font=FONT_BOLD_SEND, fg=BG_WHITE, activeforeground=BG_DARK_BLUE, width=20, bg=BG_BLUE, command=lambda: self._on_enter_pressed(None))
send_button.place(relx=0.77, rely=0.008, relheight=0.06, relwidth=0.22)
# # Create value for an option in "Settings"
# setting_value = StringVar(self.window, "Settings")
# settings = OptionMenu(self.window, setting_value, "Export Conversation", "Exit")
# settings.place(relx=0, rely=0, relheight=0.1875, relwidth=0.25)
var = StringVar()
label = Label(self.window, textvariable=var)
# Menubutton automatically highlights when mouse hovers button, so active background makes it same colour
self.menubutton = Menubutton(self.window, text="Settings", borderwidth=1, relief="ridge", indicatoron=False, font=FONT_BOLD, bg=BG_BLUE, fg=BG_WHITE, activeforeground=BG_WHITE, activebackground=BG_BLUE)
self.menu = Menu(self.menubutton, tearoff=False)
self.menubutton.configure(menu=self.menu)
self.menu.add_radiobutton(label="Export Conversation", variable=var, value="Export Conversation", command=self._export_convo)
self.menu.add_radiobutton(label="Edit Joke Settings", variable=var, value="Edit Joke Settings", command=self._joke_settings)
self.menu.add_radiobutton(label="Edit Voice Settings", variable=var, value="Edit Voice Settings", command=self._voice_settings)
self.menu.add_radiobutton(label="Exit", variable=var, value="Exit", command=self.exitApp)
# label.pack(side="bottom", fill="x")
self.menubutton.place(relx=0, rely=0, relheight=0.07, relwidth=0.25)
# Help button for main menu
self.help = Button(self.window, text="Help", font=FONT_BOLD, width=20, bg=BG_BLUE, fg=BG_WHITE, borderwidth=1, activeforeground=BG_WHITE, relief="ridge", activebackground=BG_BLUE, command=self._help_screen)
self.help.place(relx=0.25, rely=0, relheight=0.07, relwidth=0.25)
# Voice button for main menu
self.voice_button = Button(self.window, text="Voice: Off", font=FONT_BOLD, width=20, bg=BG_BLUE, fg=BG_WHITE, activeforeground=BG_WHITE, borderwidth=1, relief="ridge", activebackground=BG_BLUE, command=self._toggle_voice)
self.voice_button.place(relx=0.5, rely=0, relheight=0.07, relwidth=0.25)
# Clear conversation button for main menu
self.convo_button = Button(self.window, text="Clear Conversation", font=FONT_BOLD, width=20, bg=BG_BLUE, fg=BG_WHITE, activeforeground=BG_WHITE, borderwidth=1, relief="ridge", activebackground=BG_BLUE, command=self._clear_text)
self.convo_button.place(relx=0.75, rely=0, relheight=0.07, relwidth=0.25)
# give widget attributes with .configure(). dimensions correspond with screen aspect ratio 4:3
self.window.configure(width=733.33, height=550, bg=BG_COLOUR)
def _voice_settings(self):
"""Shows voice settings to change gender of voice, voice volume, and voice rate"""
# Creates a window which can be destroyed
self.voice_settings_window = Toplevel()
self.voice_settings_window.wm_title("Change Voice Settings!")
self.voice_settings_window.resizable(width=False, height=False)
# Image icon on help screen in program - subtlety at its finest.
self.voice_settings_window.iconbitmap(r".\\duck.ico")
# Rate of voice heading
self.rate = Label(self.voice_settings_window, text="Rate of Voice:", font=FONT_ENTRY, fg=BG_DARK_BLUE, bg=BG_WHITE)
self.rate.place(rely=0.05, relx=0.05)
# Creating a slider for rate of voice
self.rate_slider = Scale(self.voice_settings_window, from_=0, to=400, resolution=2, orient="horizontal", length=500, showvalue=0, troughcolor=BG_BLUE, bg=BG_WHITE, bd=0, width=30, relief=FLAT, highlightbackground=BG_WHITE, activebackground=BG_WHITE)
self.rate_slider.place(rely=0.15, relx=0.05)
self.rate_slider.set(200)
# Volume of voice heading
self.volume = Label(self.voice_settings_window, text="Volume of Voice:", font=FONT_ENTRY, fg=BG_DARK_BLUE, bg=BG_WHITE)
self.volume.place(rely=0.25, relx=0.05)
# Creating a slider for volume of voice
self.volume_slider = Scale(self.voice_settings_window, from_=0, to=1, resolution=0.01, orient="horizontal", length=500, showvalue=0, troughcolor=BG_BLUE, bg=BG_WHITE, bd=0, width=30, relief=FLAT, highlightbackground=BG_WHITE, activebackground=BG_WHITE)
self.volume_slider.place(rely=0.35, relx=0.05)
self.volume_slider.set(1)
# changing gender of voice (male or female)
self.gender_value = StringVar(self.voice_settings_window, "1")
self.gender = Label(self.voice_settings_window, text="Gender of Voice:", font=FONT_ENTRY, fg=BG_DARK_BLUE, bg=BG_WHITE)
self.gender.place(rely=0.45, relx=0.05)
# Creating checkboxes for male and female options
# Male
self.male_checkbutton = Radiobutton(self.voice_settings_window, text="Male", value="1", variable=self.gender_value, fg=BG_DARK_BLUE, bg=BG_WHITE, activeforeground=BG_DARK_BLUE, activebackground=BG_WHITE, padx=5, font=FONT_BOLD_CHECKBOX)
self.male_checkbutton.place(rely=0.55, relx=0.05)
# Female
self.female_checkbutton = Radiobutton(self.voice_settings_window, text="Female", value="2", variable=self.gender_value, fg=BG_DARK_BLUE, bg=BG_WHITE, activeforeground=BG_DARK_BLUE, activebackground=BG_WHITE, padx=5, font=FONT_BOLD_CHECKBOX)
self.female_checkbutton.place(rely=0.6, relx=0.05)
# Disables menu button for "edit joke settings" so no more than 1 window appears
self.menu.entryconfig(2, state=DISABLED)
self.voice_settings_window.protocol("WM_DELETE_WINDOW", self._voice_settings_window_close)
self.voice_settings_window.configure(width=733.33, height=550, bg=BG_WHITE)
def _voice_settings_window_close(self):
"""Enables the voice index in main menu when corresponding window closes"""
self.volume_value = self.volume_slider.get()
self.rate_value = self.rate_slider.get()
self.opened_settings = True
self.voice_settings_window.destroy()
self.menu.entryconfig(2, state=NORMAL)
def _joke_settings(self):
"""Shows joke settings to change black list etc."""
# Creates a window which can be destroyed
self.jokes_setting_window = Toplevel()
self.jokes_setting_window.wm_title("Joke Settings!")
self.jokes_setting_window.resizable(width=False, height=False)
# Image icon on help screen in program
self.jokes_setting_window.iconbitmap(r".\\address_book.ico")
self.jokes_setting_window.configure(width=733.33, height=550, bg=BG_WHITE)
# Blacklist subtitle
self.blacklist = Label(self.jokes_setting_window, text="Blacklist:", font=FONT_ENTRY, fg=BG_DARK_BLUE, bg=BG_WHITE)
self.blacklist.place(rely=0.05, relx=0.05)
# Categories subtitle
self.categories = Label(self.jokes_setting_window, text="Categories:", font=FONT_ENTRY, fg=BG_DARK_BLUE, bg=BG_WHITE)
self.categories.place(rely=0.05, relx=0.5)
# Making a checkbox for types of jokes the user wants - Checkboxes are in intervals of 15.833...% vertically
# BLACKLIST
# NSFW checkbox
self.nsfw_value = IntVar()
self.nsfw_checkbutton = Checkbutton(self.jokes_setting_window, text="NSFW", variable=self.nsfw_value, onvalue=0, offvalue=1, fg=BG_DARK_BLUE, bg=BG_WHITE, activeforeground=BG_DARK_BLUE, activebackground=BG_WHITE, padx=5, font=FONT_BOLD_CHECKBOX)
self.nsfw_checkbutton.place(rely=0.1857142857, relx=0.05)
# Religious checkbox
self.religious_value = IntVar()
self.religious_checkbutton = Checkbutton(self.jokes_setting_window, text="Religious", variable=self.religious_value, onvalue=0, offvalue=1, fg=BG_DARK_BLUE, bg=BG_WHITE, activeforeground=BG_DARK_BLUE, activebackground=BG_WHITE, padx=5, font=FONT_BOLD_CHECKBOX)
self.religious_checkbutton.place(rely=0.3214285714, relx=0.05)
# Political checkbox
self.political_value = IntVar()
self.political_checkbutton = Checkbutton(self.jokes_setting_window, text="Political", variable=self.political_value, onvalue=0, offvalue=1, fg=BG_DARK_BLUE, bg=BG_WHITE, activeforeground=BG_DARK_BLUE, activebackground=BG_WHITE, padx=5, font=FONT_BOLD_CHECKBOX)
self.political_checkbutton.place(rely=0.4571428571, relx=0.05)
# Racist checkbox
self.racist_value = IntVar()
self.racist_checkbutton = Checkbutton(self.jokes_setting_window, text="Racist", variable=self.racist_value, onvalue=0, offvalue=1, fg=BG_DARK_BLUE, bg=BG_WHITE, activeforeground=BG_DARK_BLUE, activebackground=BG_WHITE, padx=5, font=FONT_BOLD_CHECKBOX)
self.racist_checkbutton.place(rely=0.5928571429, relx=0.05)
# Sexist checkbox
self.sexist_value = IntVar()
self.sexist_checkbutton = Checkbutton(self.jokes_setting_window, text="Sexist", variable=self.sexist_value, onvalue=0, offvalue=1, fg=BG_DARK_BLUE, bg=BG_WHITE, activeforeground=BG_DARK_BLUE, activebackground=BG_WHITE, padx=5, font=FONT_BOLD_CHECKBOX)
self.sexist_checkbutton.place(rely=0.7285714286, relx=0.05)
# Explicit checkbox
self.explicit_value = IntVar()
self.explicit_checkbutton = Checkbutton(self.jokes_setting_window, text="Explicit", variable=self.explicit_value, onvalue=0, offvalue=1, fg=BG_DARK_BLUE, bg=BG_WHITE, activeforeground=BG_DARK_BLUE, activebackground=BG_WHITE, padx=5, font=FONT_BOLD_CHECKBOX)
self.explicit_checkbutton.place(rely=0.8642857143, relx=0.05)
# CATEGORIES - automatically checked
# Programming checkbox
self.programming_value = IntVar()
self.programming_checkbutton = Checkbutton(self.jokes_setting_window, text="Programming", variable=self.programming_value, onvalue=0, offvalue=1, fg=BG_DARK_BLUE, bg=BG_WHITE, activeforeground=BG_DARK_BLUE, activebackground=BG_WHITE, padx=5, font=FONT_BOLD_CHECKBOX)
self.programming_checkbutton.place(rely=0.1857142857, relx=0.5)
# Miscellaneous checkbox
self.miscellaneous_value = IntVar()
self.miscellaneous_checkbutton = Checkbutton(self.jokes_setting_window, text="Miscellaneous", variable=self.miscellaneous_value, onvalue=0, offvalue=1, fg=BG_DARK_BLUE, bg=BG_WHITE, activeforeground=BG_DARK_BLUE, activebackground=BG_WHITE, padx=5, font=FONT_BOLD_CHECKBOX)
self.miscellaneous_checkbutton.place(rely=0.3214285714, relx=0.5)
# Pun checkbox
self.pun_value = IntVar()
self.pun_checkbutton = Checkbutton(self.jokes_setting_window, text="Pun", variable=self.pun_value, onvalue=0, offvalue=1, fg=BG_DARK_BLUE, bg=BG_WHITE, activeforeground=BG_DARK_BLUE, activebackground=BG_WHITE, padx=5, font=FONT_BOLD_CHECKBOX)
self.pun_checkbutton.place(rely=0.4571428571, relx=0.5)
# Spooky checkbox
self.spooky_value = IntVar()
self.spooky_checkbutton = Checkbutton(self.jokes_setting_window, text="Spooky", variable=self.spooky_value, onvalue=0, offvalue=1, fg=BG_DARK_BLUE, bg=BG_WHITE, activeforeground=BG_DARK_BLUE, activebackground=BG_WHITE, padx=5, font=FONT_BOLD_CHECKBOX)
self.spooky_checkbutton.place(rely=0.5928571429, relx=0.5)
# Christmas
self.christmas_value = IntVar()
self.christmas_checkbutton = Checkbutton(self.jokes_setting_window, text="Christmas", variable=self.christmas_value, onvalue=0, offvalue=1, fg=BG_DARK_BLUE, bg=BG_WHITE, activeforeground=BG_DARK_BLUE, activebackground=BG_WHITE, padx=5, font=FONT_BOLD_CHECKBOX)
self.christmas_checkbutton.place(rely=0.7285714286, relx=0.5)
# Dark checkbox
self.dark_value = IntVar()
self.dark_checkbutton = Checkbutton(self.jokes_setting_window, text="Dark", variable=self.dark_value, onvalue=0, offvalue=1, fg=BG_DARK_BLUE, bg=BG_WHITE, activeforeground=BG_DARK_BLUE, activebackground=BG_WHITE, padx=5, font=FONT_BOLD_CHECKBOX)
self.dark_checkbutton.place(rely=0.8642857143, relx=0.5)
self.dark_checkbutton.toggle()
# Disables menu button for "edit joke settings" so no more than 1 window appears
self.menu.entryconfig(1, state=DISABLED)
self.jokes_setting_window.protocol("WM_DELETE_WINDOW", self._jokes_settings_window_close)
# self.blacklist_choices = ("nsfw", "religious", "political", "racist", "sexist", "explicit") <-- CASE SENSITIVE
# self.category_choices = ("Programming", "Miscellaneous", "Pun", "Spooky", "Christmas")
def _jokes_settings_window_close(self):
"""Enables the jokes index in main menu when corresponding window closes"""
# global category
# global blacklist
# Checks the value of each checkbox and sends it to the jokes class in actions.py - updates blacklist and categories in jokes
# default values
#getting settings contents
with open(location, "r") as file:
lines = file.readlines()
lines = [line.rstrip() for line in lines]
category = ast.literal_eval(lines[0])
blacklist = ast.literal_eval(lines[1])
# print(category)
# Create category list to post to jokes api as necessary
category_copy = ""
blacklist_copy = ""
# Adds word to list if it's in string and turned off
if self.programming_value.get() == 0:
category_copy += "Programming," # Remove "Programming," etc.
if self.dark_value.get() == 0:
category_copy += "Dark,"
if self.miscellaneous_value.get() == 0:
category_copy += "Miscellaneous,"
if self.pun_value.get() == 0:
category_copy += "Pun,"
if self.spooky_value.get() == 0:
category_copy += "Spooky,"
if self.christmas_value.get() == 0:
category_copy += "Christmas"
if len(category_copy) > 0:
if category_copy[-1] == ",":
category_copy = category_copy[:-1]
# Adds word to list if it is on
# Create blacklist list to post to jokes api as needed
if self.nsfw_value.get() == 0:
blacklist_copy += "nsfw,"
if self.religious_value.get() == 0:
blacklist_copy += "religious,"
if self.political_value.get() == 0:
blacklist_copy += "political,"
if self.racist_value.get() == 0:
blacklist_copy += "racist,"
if self.sexist_value.get() == 0:
blacklist_copy += "sexist,"
if self.explicit_value.get() == 0:
blacklist_copy += "explicit"
if len(blacklist_copy) > 0:
if blacklist_copy[-1] == ",":
blacklist_copy = blacklist_copy[:-1]
# Changing global variables
blacklist.append(blacklist_copy)
blacklist.pop(0)
category.append(category_copy)
category.pop(0)
# Deleting settings contents, then updating it
with open(location, "w") as file:
file.write("{}\n{}".format(category, blacklist))
action_blacklist = blacklist
action_category = category
# print(action_blacklist)
# print(action_category)
# print(blacklist, category)
self.jokes_setting_window.destroy()
self.menu.entryconfig(1, state=NORMAL)
def _clear_text(self):
"""Clear the conversation on screen"""
self.text_widget.configure(state=NORMAL)
self.text_widget.delete("1.0", END)
self.text_widget.configure(state=DISABLED)
def _export_convo(self):
"""Export the conversation on screen into a text file - """
conversation = self.text_widget.get("1.0", END)
with open(".\conversations\conversation{}.txt".format(self.number), "w") as file:
file.write(conversation)
self.number += 1
def _help_screen(self):
"""Displays help screen with instructions to inform user how to use program"""
# Creates a window which can be destroyed
self.help_window = Toplevel()
self.help_window.wm_title("How to Use ChattyBot!")
self.help_window.resizable(width=False, height=False)
# Image icon on help screen in program
self.help_window.iconbitmap(r".\\address_book.ico")
self.help_window.configure(width=733.33, height=550, bg=BG_WHITE)
# Disable help button so as to not produce more "help" screens
self.help.configure(state=DISABLED)
# SCROLLBAR - using Canvas :)
self.container = Frame(self.help_window, bg=BG_WHITE)
canvas = Canvas(self.container, bg=BG_WHITE)
scrollbar = Scrollbar(self.container, orient="vertical", command=canvas.yview)
self.scrollable_frame = Frame(canvas, bg=BG_WHITE)
self.scrollable_frame.bind(
"<Configure>",
lambda e: canvas.configure(
scrollregion=canvas.bbox("all")
)
)
canvas.create_window((0, 0), window=self.scrollable_frame, anchor="nw")
canvas.configure(yscrollcommand=scrollbar.set)
self.container.place(relx=0, rely=0, width=733.33, height=550)
canvas.pack(side="left", fill="both", expand=True)
scrollbar.pack(side="right", fill="y")
# CUSTOMISE LABELS + TEXT ON HELP PAGE
# Heading
self.heading = Label(self.scrollable_frame, font=FONT_BOLD_HEADING, fg=BG_DARK_BLUE, bg=BG_WHITE, text="How to use ChattyBot!").pack()
# self.heading.place(relx=0.05, rely=0.05)
# Phrases text
self.phrase = Label(self.scrollable_frame, font=FONT_BOLD_CHECKBOX, fg=BG_DARK_BLUE, bg=BG_WHITE,
text="""Speak about any of these topics with ChattyBot and hit 'Enter' or\nthe Send Button:
o The time in any city! - "What's the time in Prague?"
o The time according to your IP address - "What time is it?"
o The weather in any place - "Is it cold in Djibouti?"
o Any definition - "Define "move"" (Use the quotes!)
o Country facts and demographics - "What is Nepal's currency?"
o The meaning of life
o COVID data anywhere in the world - "How many covid cases are in
Hawaii?"
o Jokes - "Tell me a joke NOW"
o Trivia - "Trivia"
o Play any song - "play The Temple Of The King" (use the keyword
"play"!)
o Music metadata through the artist's spotify link - "Link:"
Click the "Settings" button to:
o Export the current conversation, which is found in the folder
"conversations"
o Edit the Joke Settings to change the categories and blacklist
for jokes
o Edit Voice Settings
o Exit the application
Hit the "Voice: Off" button to toggle the AI's voice in responses.
Hit the "Clear Conversation" button to erase your conversation with
the AI and start over!
If you have any suggestions, email <EMAIL> or visit
my github: github.com\patrickdbrowne
""",
justify='left').pack()
# self.phrase.place(relx=0.05, rely=0.15)
self.help_window.protocol("WM_DELETE_WINDOW", self._help_window_close)
def _help_window_close(self):
""" when help window closes, the help button is enabled so it can be reopened.
Prevents multiple instances of the same window."""
self.help_window.destroy()
self.help.configure(state=NORMAL)
def _on_enter_pressed(self, event):
"""Retrieves message"""
msg = self.msg_entry.get()
self._insert_message(msg, self.users_name)
def _insert_message(self, msg, sender):
"""Displays user's message in text area"""
# If user returns ""
if not msg:
return
# Delete message in the textbox
self.msg_entry.delete(0, END)
# format user's message to display
msg1 = "{}: {}\n\n".format(sender, msg)
# change state of text box momentarily so string can be appended there, then disable it so it can't be typed on later
self.text_widget.configure(state=NORMAL)
self.text_widget.insert(END, msg1)
self.text_widget.configure(state=DISABLED)
# Connects to my RASA program
NLU_Object = {
"message": msg,
"sender": self.users_name,
}
# gives message to rasa server
str_message_data = requests.post(self.url, json=NLU_Object)
# turns returned stringed dictionary into a dictionary
message_data = ast.literal_eval(str_message_data.text)
# Iterates through multiple message objects that need to have it's texts displayed in succession
for response in range(len(message_data)):
bot_response = message_data[response]["text"]
msg2= "ChattyBot: {}\n\n".format(bot_response)
# change state of text box momentarily so string can be appended there, then disable it so it can't be typed on later
self.text_widget.configure(state=NORMAL)
self.text_widget.insert(END, msg2)
self.text_widget.configure(state=DISABLED)
# Always allows user to see last message sent (scrolls down)
self.text_widget.see(END)
# Orates the bot's message if the voice button is turned on
if self.voice:
if self.opened_settings:
# Get value from sliders and incorporates it. Changes gender of voice, too.
engine.setProperty("rate", self.rate_value)
engine.setProperty("volume", self.volume_value)
# If voice is turned on before settings, it will ignore the "self.gender_value.get()" which hasn't been defined (avoids much pain)
# sets gender to corresponding number in pyttsx3
if self.gender_value.get() == "1":
self.gender_engine_number = 0
if self.gender_value.get() == "2":
self.gender_engine_number = 1
engine.setProperty("voice", engine.getProperty("voices")[self.gender_engine_number].id)
self.engine.say(bot_response)
self.engine.runAndWait()
def _toggle_voice(self):
"""Toggles the voice in the chatbot and text in button"""
# print(self.volume_value)
# print(self.rate_value)
# print(self.gender_value.get())
if self.voice:
self.voice_button.config(text="Voice: Off")
self.voice = False
else:
self.voice_button.config(text="Voice: On")
self.voice = True
def popup_name(self):
"""A pop up window to get the user's name"""
# Creates a window which can be destroyed
self.win = Toplevel()
self.win.wm_title("Name")
# Name label
self.name_label = Label(self.win, text="What's your name?")
self.name_label.grid(row=0, column=0)
# Entry box
self.name_box = Entry(self.win, text="")
self.name_box.bind("<Return>", self._get_name)
self.name_box.grid(row=1,column=0)
def _get_name(self, event):
"""Gets user name then destroys window"""
self.name = self.name_box.get()
# Stops anything from happening if user doesn't enter anything
if self.name == "":
return
elif self.name != "":
self.users_name = self.name
# Creates main program once name has been entered
# creates layout of window widget
self._setup_main_window()
self.win.destroy()
def exitApp(self):
"""Exit app"""
exit()
if __name__ == "__main__":
app = ChattyBot()
app.run()
|
StarcoderdataPython
|
5111535
|
"""
This class provides functionality for managing a generig sqlite or mysql
database:
* reading specific fields (with the possibility to filter by field values)
* storing calculated values in the dataset
Created on May 11 2018
@author: <NAME>
"""
from __future__ import print_function # For python 2 copmatibility
import os
import pandas as pd
import MySQLdb
import sqlite3
import numpy as np
from tabulate import tabulate
import copy
import ipdb
class BaseDMsql(object):
"""
Data manager base class.
"""
def __init__(self, db_name, db_connector, path2project=None,
db_server=None, db_user=None, db_password=None):
"""
Initializes a DataManager object
Args:
db_name :Name of the DB
db_connector :Connector. Available options are mysql or sqlite
path2project :Path to the project folder (sqlite only)
db_server :Server (mysql only)
db_user :User (mysql only)
db_password :Password (mysql only)
"""
# Store paths to the main project folders and files
self._path2project = copy.copy(path2project)
self.dbname = db_name
self.connector = db_connector
self.server = db_server
self.user = db_user
self.password = db_password
# Other class variables
self.dbON = False # Will switch to True when the db was connected.
# Connector to database
self._conn = None
# Cursor of the database
self._c = None
# Try connection
try:
if self.connector == 'mysql':
self._conn = MySQLdb.connect(self.server, self.user,
self.password, self.dbname)
self._c = self._conn.cursor()
print("MySQL database connection successful")
self.dbON = True
self._conn.set_character_set('utf8')
elif self.connector == 'sqlite3':
# sqlite3
# sqlite file will be in the root of the project, we read the
# name from the config file and establish the connection
db_fname = os.path.join(self._path2project,
self.dbname + '.db')
print("---- Connecting to {}".format(db_fname))
self._conn = sqlite3.connect(db_fname)
self._c = self._conn.cursor()
self.dbON = True
else:
print("---- Unknown DB connector {}".format(self.connector))
except:
print("---- Error connecting to the database")
def __del__(self):
"""
When destroying the object, it is necessary to commit changes
in the database and close the connection
"""
try:
self._conn.commit()
self._conn.close()
except:
print("---- Error closing database")
def resetDBtables(self, tables=None):
"""
Delete existing database, and regenerate empty tables
Args:
tables: If string, name of the table to reset.
If list, list of tables to reset
If None (default), all tables are deleted, and all tables
(inlcuding those that might not exist previously)
"""
# If tables is None, all tables are deleted an re-generated
if tables is None:
# Delete all existing tables
for table in self.getTableNames():
self._c.execute("DROP TABLE " + table)
# Create tables. No tables as specifies in order to create tables
# that did not exist previously also.
self.createDBtables()
else:
# It tables is not a list, make the appropriate list
if type(tables) is str:
tables = [tables]
# Remove all selected tables (if exist in the database).
for table in set(tables) & set(self.getTableNames()):
self._c.execute("DROP TABLE " + table)
# All deleted tables are created again
self.createDBtables(tables)
self._conn.commit()
return
def resetDB(self):
"""
Deletes existing database, and regenerate empty tables
"""
if self.connector == 'mysql':
# In mysql we simply drop all existing tables
for tablename in self.getTableNames():
self._c.execute("DROP TABLE " + tablename)
self._conn.commit()
else:
# If sqlite3, we need to delete the file, and start over
try:
self._conn.commit()
self._conn.close()
except:
print("Error closing database")
# Delete sqlite3 file
db_fname = os.path.join(self._path2project, self.dbname + '.db')
os.remove(db_fname)
try:
self._conn = sqlite3.connect(db_fname)
self._c = self._conn.cursor()
except:
print("Error connecting to the database")
self.createDBtables()
def addTableColumn(self, tablename, columnname, columntype):
"""
Add a new column to the specified table.
Args:
tablename :Table to which the column will be added
columnname :Name of new column
columntype :Type of new column.
Note that, for mysql, if type is TXT or VARCHAR, the character set if
forzed to be utf8.
"""
# Check if the table exists
if tablename in self.getTableNames():
# Check that the column does not already exist
if columnname not in self.getColumnNames(tablename):
# Fit characters to the allowed format if necessary
fmt = ''
if (self.connector == 'mysql' and
('TEXT' in columntype or 'VARCHAR' in columntype) and
not ('CHARACTER SET' in columntype or
'utf8' in columntype)):
# We need to enforze utf8 for mysql
fmt = ' CHARACTER SET utf8'
sqlcmd = ('ALTER TABLE ' + tablename + ' ADD COLUMN ' +
columnname + ' ' + columntype + fmt)
self._c.execute(sqlcmd)
# Commit changes
self._conn.commit()
else:
print(("WARNING: Column {0} already exist in table {1}."
).format(columnname, tablename))
else:
print('Error adding column to table. Please, select a valid ' +
'table name from the list')
print(self.getTableNames())
def dropTableColumn(self, tablename, columnname):
"""
Remove column from the specified table
Args:
tablename :Table to which the column will be added
columnname :Name of column to be removed
"""
# Check if the table exists
if tablename in self.getTableNames():
# Check that the column does not already exist
if columnname in self.getColumnNames(tablename):
# ALTER TABLE DROP COLUMN IS ONLY SUPPORTED IN MYSQL
if self.connector == 'mysql':
sqlcmd = ('ALTER TABLE ' + tablename + ' DROP COLUMN ' +
columnname)
self._c.execute(sqlcmd)
# Commit changes
self._conn.commit()
else:
print('Column drop not yet supported for SQLITE')
else:
print('Error deleting column. The column does not exist')
print(tablename, columnname)
else:
print('Error deleting column. Please, select a valid table name' +
' from the list')
print(self.getTableNames())
return
def readDBtable(self, tablename, limit=None, selectOptions=None,
filterOptions=None, orderOptions=None):
"""
Read data from a table in the database can choose to read only some
specific fields
Args:
tablename : Table to read from
selectOptions: string with fields that will be retrieved
(e.g. 'REFERENCIA, Resumen')
filterOptions: string with filtering options for the SQL query
(e.g., 'WHERE UNESCO_cd=23')
orderOptions: string with field that will be used for sorting the
results of the query
(e.g, 'Cconv')
limit: The maximum number of records to retrieve
"""
try:
# Check that table name is valid
if tablename in self.getTableNames():
sqlQuery = 'SELECT '
if selectOptions:
sqlQuery = sqlQuery + selectOptions
else:
sqlQuery = sqlQuery + '*'
sqlQuery = sqlQuery + ' FROM ' + tablename + ' '
if filterOptions:
sqlQuery = sqlQuery + ' WHERE ' + filterOptions
if orderOptions:
sqlQuery = sqlQuery + ' ORDER BY ' + orderOptions
if limit:
sqlQuery = sqlQuery + ' LIMIT ' + str(limit)
# This is to update the connection to changes by other
# processes.
self._conn.commit()
# Return the pandas dataframe. Note that numbers in text format
# are not converted to
return pd.read_sql(sqlQuery, con=self._conn,
coerce_float=False)
else:
print('Error in query. Please, select a valid table name ' +
'from the list')
print(self.getTableNames())
except Exception as E:
print(str(E))
def getTableNames(self):
"""
Returns a list with the names of all tables in the database
"""
# The specific command depends on whether we are using mysql or sqlite
if self.connector == 'mysql':
sqlcmd = ("SELECT table_name FROM INFORMATION_SCHEMA.TABLES " +
"WHERE table_schema='" + self.dbname + "'")
else:
sqlcmd = "SELECT name FROM sqlite_master WHERE type='table'"
self._c.execute(sqlcmd)
tbnames = [el[0] for el in self._c.fetchall()]
return tbnames
def getColumnNames(self, tablename):
"""
Returns a list with the names of all columns in the indicated table
Args:
tablename: the name of the table to retrieve column names
"""
# Check if tablename exists in database
if tablename in self.getTableNames():
# The specific command depends on whether we are using mysql or
# sqlite
if self.connector == 'mysql':
sqlcmd = "SHOW COLUMNS FROM " + tablename
self._c.execute(sqlcmd)
columnnames = [el[0] for el in self._c.fetchall()]
else:
sqlcmd = "PRAGMA table_info(" + tablename + ")"
self._c.execute(sqlcmd)
columnnames = [el[1] for el in self._c.fetchall()]
return columnnames
else:
print('Error retrieving column names: Table does not exist on ' +
'database')
return []
def getTableInfo(self, tablename):
# Get columns
cols = self.getColumnNames(tablename)
# Get number of rows
sqlcmd = "SELECT COUNT(*) FROM " + tablename
self._c.execute(sqlcmd)
n_rows = self._c.fetchall()[0][0]
return cols, n_rows
def showTable(self, tablename, max_rows=500, max_width=200):
""" A simple method to display the content of a single table.
Args:
max_rows: Maximum number of rows to display. It the size of
the table is higher, only the first max_rows rows
are shown
max_width: Maximum with of the table to display. If the size
of the table is higher, the tabulate environment
is not used and only a table heading is shown
"""
title = "= Database {} ====================".format(self.dbname)
print("="*len(title))
print(title)
print("="*len(title))
print("")
print("==== Table {} ".format(tablename))
cols, n_rows = self.getTableInfo(tablename)
df = self.readDBtable(tablename, limit=max_rows, selectOptions=None,
filterOptions=None, orderOptions=None)
txt = tabulate(df, headers='keys', tablefmt='psql')
txt_width = max(len(z) for z in txt.split('\n'))
if txt_width > max_width:
print('---- The table is too wide (up to {}'.format(txt_width) +
' characters per line). Showing a portion of the table ' +
'header only')
print(df.head(25))
else:
print(txt)
return
def insertInTable(self, tablename, columns, arguments):
"""
Insert new records into table
Args:
tablename: Name of table in which the data will be inserted
columns: Name of columns for which data are provided
arguments: A list of lists or tuples, each element associated
to one new entry for the table
"""
# Make sure columns is a list, and not a single string
if not isinstance(columns, (list,)):
columns = [columns]
ncol = len(columns)
if len(arguments[0]) == ncol:
# Make sure the tablename is valid
if tablename in self.getTableNames():
# Make sure we have a list of tuples; necessary for mysql
arguments = list(map(tuple, arguments))
# # Update DB entries one by one.
# for arg in arguments:
# # sd
# sqlcmd = ('INSERT INTO ' + tablename + '(' +
# ','.join(columns) + ') VALUES(' +
# ','.join('{}'.format(a) for a in arg) + ')'
# )
# try:
# self._c.execute(sqlcmd)
# except:
# import ipdb
# ipdb.set_trace()
sqlcmd = ('INSERT INTO ' + tablename +
'(' + ','.join(columns) + ') VALUES (')
if self.connector == 'mysql':
sqlcmd += '%s' + (ncol-1)*',%s' + ')'
else:
sqlcmd += '?' + (ncol-1)*',?' + ')'
self._c.executemany(sqlcmd, arguments)
# Commit changes
self._conn.commit()
else:
print('Error inserting data in table: number of columns mismatch')
return
def setField(self, tablename, keyfld, valueflds, values):
"""
Update records of a DB table
Args:
tablename: Table that will be modified
keyfld: string with the column name that will be used as key
(e.g. 'REFERENCIA')
valueflds: list with the names of the columns that will be updated
(e.g., 'Lemas')
values: A list of tuples in the format
(keyfldvalue, valuefldvalue)
(e.g., [('Ref1', 'gen celula'),
('Ref2', 'big_data, algorithm')])
"""
# Make sure valueflds is a list, and not a single string
if not isinstance(valueflds, (list,)):
valueflds = [valueflds]
ncol = len(valueflds)
if len(values[0]) == (ncol+1):
# Make sure the tablename is valid
if tablename in self.getTableNames():
# Update DB entries one by one.
# WARNING: THIS VERSION MAY NOT WORK PROPERLY IF v
# HAS A STRING CONTAINING "".
for v in values:
sqlcmd = ('UPDATE ' + tablename + ' SET ' +
', '.join(['{0} ="{1}"'.format(f, v[i + 1])
for i, f in enumerate(valueflds)]) +
' WHERE {0}="{1}"'.format(keyfld, v[0]))
self._c.execute(sqlcmd)
# This is the old version: it might not have the problem of
# the above version, but did not work properly with sqlite.
# # Make sure we have a list of tuples; necessary for mysql
# # Put key value last in the tuples
# values = list(map(circ_left_shift, values))
# sqlcmd = 'UPDATE ' + tablename + ' SET '
# if self.connector == 'mysql':
# sqlcmd += ', '.join([el+'=%s' for el in valueflds])
# sqlcmd += ' WHERE ' + keyfld + '=%s'
# else:
# sqlcmd += ', '.join([el+'=?' for el in valueflds])
# sqlcmd += ' WHERE ' + keyfld + '=?'
# self._c.executemany(sqlcmd, values)
# Commit changes
self._conn.commit()
else:
print('Error updating table values: number of columns mismatch')
return
def upsert(self, tablename, keyfld, df):
"""
Update records of a DB table with the values in the df
This function implements the following additional functionality:
* If there are coumns in df that are not in the SQL table,
columns will be added
* New records will be created in the table if there are rows
in the dataframe without an entry already in the table. For this,
keyfld indicates which is the column that will be used as an
index
Args:
tablename: Table that will be modified
keyfld: string with the column name that will be used as key
(e.g. 'REFERENCIA')
df: Dataframe that we wish to save in table tablename
"""
# Check that table exists and keyfld exists both in the Table and the
# Dataframe
if tablename in self.getTableNames():
if not ((keyfld in df.columns) and
(keyfld in self.getColumnNames(tablename))):
print("Upsert function failed: Key field does not exist",
"in the selected table and/or dataframe")
return
else:
print('Upsert function failed: Table does not exist')
return
# Reorder dataframe to make sure that the key field goes first
flds = [keyfld] + [x for x in df.columns if x != keyfld]
df = df[flds]
# Create new columns if necessary
for clname in df.columns:
if clname not in self.getColumnNames(tablename):
if df[clname].dtypes == np.float64:
self.addTableColumn(tablename, clname, 'DOUBLE')
else:
if df[clname].dtypes == np.int64:
self.addTableColumn(tablename, clname, 'INTEGER')
else:
self.addTableColumn(tablename, clname, 'TEXT')
# Check which values are already in the table, and split
# the dataframe into records that need to be updated, and
# records that need to be inserted
keyintable = self.readDBtable(tablename, limit=None,
selectOptions=keyfld)
keyintable = keyintable[keyfld].tolist()
values = [tuple(x) for x in df.values]
values_insert = list(filter(lambda x: x[0] not in keyintable, values))
values_update = list(filter(lambda x: x[0] in keyintable, values))
if len(values_update):
self.setField(tablename, keyfld, df.columns[1:].tolist(),
values_update)
if len(values_insert):
self.insertInTable(tablename, df.columns.tolist(), values_insert)
return
def exportTable(self, tablename, fileformat, path, filename, cols=None):
"""
Export columns from a table to a file.
Args:
:tablename: Name of the table
:fileformat: Type of output file. Available options are
- 'xlsx'
- 'pkl'
:filepath: Route to the output folder
:filename: Name of the output file
:columnames: Columns to save. It can be a list or a string
of comma-separated columns.
If None, all columns saved.
"""
# Path to the output file
fpath = os.path.join(path, filename)
# Read data:
if cols is list:
options = ','.join(cols)
else:
options = cols
df = self.readDBtable(tablename, selectOptions=options)
# ######################
# Export results to file
if fileformat == 'pkl':
df.to_pickle(fpath)
else:
df.to_excel(fpath)
return
|
StarcoderdataPython
|
5012926
|
#!/usr/bin/python3
from argparse import ArgumentParser
from os import getcwd
from pathlib import Path
from pprint import pprint
from InquirerPy import inquirer
from InquirerPy.base import Choice
from .git import (
get_extra_gitconfig_file,
gitconfig_parse_repotags,
gitconfig_add,
gitconfig_remove,
)
from .logger import get_logger
from .utils import validate_path
def get_arg_parser():
parser = ArgumentParser("git-repotag")
verbosity_group = parser.add_mutually_exclusive_group()
verbosity_group.add_argument("-v", "--verbose", action="store_true", help="verbose")
verbosity_group.add_argument("-q", "--quiet", action="store_true", help="quiet")
subparsers = parser.add_subparsers(title="commands", dest="command", required=True)
parser_add = subparsers.add_parser("add", help="Add tag to git repo")
parser_add.add_argument("tag", help="Gitconfig tag")
parser_add.add_argument("path", nargs="?", help="Repo path")
parser_remove = subparsers.add_parser("remove", help="Removes tag from repo")
parser_remove.add_argument("tag", help="Gitconfig tag")
parser_remove.add_argument("path", nargs="?", help="Repo path")
parser_interactive = subparsers.add_parser(
"interactive", help="Runs inquirerpy prompt"
)
parser_interactive.add_argument("path", nargs="?", help="Repo path")
parser_list = subparsers.add_parser("list", help="Lists objects")
subparsers_list = parser_list.add_subparsers(
title="list subcommand", dest="list_subcommand", required=True
)
parser_list_tags = subparsers_list.add_parser("tags", help="Lists tags")
parser_list_tags.add_argument(
"-p", "--pprint", action="store_true", help="Pretty print output"
)
parser_list_repos = subparsers_list.add_parser("repos", help="Lists repos")
parser_list_repos.add_argument(
"-p", "--pprint", action="store_true", help="Pretty print output"
)
subparsers.add_parser("validate", help="Checks for invalid paths in gitconfig")
parser_cleanup = subparsers.add_parser(
"cleanup", help="Cleanup of invalid paths in gitconfig"
)
parser_cleanup.add_argument(
"-y", help="Assume yes for prompts", dest="assume_yes", action="store_true"
)
return parser
def add(repotags, tag, repo_path, *, extra_gitconfig=None):
get_logger().info('Running "add" command')
validation_err = validate_path(repo_path)
if validation_err is not None:
raise Exception(validation_err)
gitconfig_add(repotags, tag, repo_path, extra_gitconfig)
def remove(repotags, tag, repo_path, *, extra_gitconfig=None):
validation_err = validate_path(repo_path)
if validation_err is not None:
raise Exception(validation_err)
gitconfig_remove(repotags, tag, repo_path, extra_gitconfig)
def interactive(repotags, repo_path, *, extra_gitconfig=None):
get_logger().info('Running "interactive" command')
# Value has to be stringified, otherwise list lookup fails
str_repo_path = str(repo_path)
validation_err = validate_path(repo_path)
if validation_err is not None:
raise Exception(validation_err)
modified_existing_repotags = inquirer.checkbox(
message="Select tags:",
choices=[
Choice(tag, enabled=str_repo_path in repos)
for tag, repos in repotags.items()
],
cycle=True,
).execute()
should_add_new_repotags = inquirer.confirm(
message="Do you want to add extra tags?", default=False
).execute()
completely_new_repotags = (
[
tag.strip()
for tag in inquirer.text(
message="Enter tags (comma separated):",
)
.execute()
.split()
]
if should_add_new_repotags
else []
)
modified_existing_repotags = set(modified_existing_repotags)
previous_existing_repotags = set(
[tag for tag, repos in repotags.items() if str_repo_path in repos]
)
completely_new_repotags = set(completely_new_repotags)
repotags_defined_twice = (
modified_existing_repotags - previous_existing_repotags
) & completely_new_repotags
for tag in repotags_defined_twice:
get_logger().warning(f'Tag "{tag}" was defined on both inputs')
repotags_to_add = (
modified_existing_repotags - previous_existing_repotags
) | completely_new_repotags
repotags_to_remove = previous_existing_repotags - modified_existing_repotags
if not repotags_to_add:
get_logger().warning(f"No repository tags were added")
for tag in repotags_to_remove:
get_logger().info(f'Removing tag "{tag}"')
gitconfig_remove(repotags, tag, repo_path, extra_gitconfig)
for tag in repotags_to_add:
get_logger().info(f'Adding tag "{tag}"')
gitconfig_add(repotags, tag, repo_path, extra_gitconfig)
def get_repotags_by_repos(repotags):
tag_repo_items = [(tag, repo) for tag, repos in repotags.items() for repo in repos]
repotags_by_repos = {}
for tag, repo in tag_repo_items:
if not repo in repotags_by_repos:
repotags_by_repos[repo] = []
repotags_by_repos[repo].append(tag)
return repotags_by_repos
def list_tags(repotags, *, should_pprint=False):
get_logger().info('Running "list_tags" command')
if should_pprint:
pprint(repotags)
else:
print("\n".join(repotags.keys()))
def list_repos(repotags, *, should_pprint=False):
get_logger().info('Running "list_repos" command')
repotags_by_repos = get_repotags_by_repos(repotags)
if should_pprint:
pprint(repotags_by_repos)
else:
for repo, tags in repotags_by_repos.items():
print(f"{repo} tags: {tags}")
def validate(repotags):
get_logger().info('Running "validate" command')
repotags_by_repos = get_repotags_by_repos(repotags)
command_result = 0
for repo, repotags in repotags_by_repos.items():
validation_error = validate_path(Path(repo))
if validation_error:
get_logger().warning(f"{validation_error} (tags: {repotags})")
command_result = 1
return command_result
def cleanup(repotags, *, assume_yes=False, extra_gitconfig=None):
get_logger().info('Running "cleanup" command')
repotags_by_repos = get_repotags_by_repos(repotags)
for repo, tags in repotags_by_repos.items():
validation_error = validate_path(Path(repo))
if validation_error:
should_perform_cleanup = (
assume_yes
or inquirer.confirm(
message=f"{validation_error}\nRemove for tags: {tags}?",
default=False,
).execute()
)
if should_perform_cleanup:
for tag in tags:
gitconfig_remove(repotags, tag, repo, extra_gitconfig)
def get_path_from_args(args):
path = Path(args.path if args.path is not None else getcwd())
return path.expanduser().absolute()
def cli(args):
get_logger().info("Running in verbose mode")
extra_gitconfig = get_extra_gitconfig_file()
repotags = gitconfig_parse_repotags(extra_gitconfig=extra_gitconfig)
cli_result = 0
if args.command == "list":
if args.list_subcommand == "tags":
list_tags(repotags, should_pprint=args.pprint)
elif args.list_subcommand == "repos":
list_repos(repotags, should_pprint=args.pprint)
else:
raise Exception(
f'Unknown subcommand "{args.command} {args.list_subcommand}"'
)
elif args.command == "add":
path = get_path_from_args(args)
add(repotags, args.tag, path, extra_gitconfig=extra_gitconfig)
elif args.command == "remove":
path = get_path_from_args(args)
remove(repotags, args.tag, path, extra_gitconfig=extra_gitconfig)
elif args.command == "interactive":
path = get_path_from_args(args)
interactive(repotags, path, extra_gitconfig=extra_gitconfig)
elif args.command == "validate":
# Apply exit code from the result
cli_result = validate(repotags)
elif args.command == "cleanup":
cleanup(repotags, assume_yes=args.assume_yes, extra_gitconfig=extra_gitconfig)
else:
raise Exception(f'Unknown command "{args.command}"')
return cli_result
|
StarcoderdataPython
|
9662719
|
<reponame>wesselb/matrix
import lab as B
from matrix import (
Constant,
Dense,
Diagonal,
Kronecker,
LowerTriangular,
UpperTriangular,
Zero,
)
# noinspection PyUnresolvedReferences
from ..util import (
AssertDenseWarning,
approx,
check_un_op,
const1,
dense1,
diag1,
kron1,
lr1,
lt1,
ut1,
wb1,
zero1,
)
def power2(x):
return B.power(x, 2)
def test_power_zero(zero1):
check_un_op(power2, zero1, asserted_type=Zero)
def test_power_dense(dense1):
check_un_op(power2, dense1, asserted_type=Dense)
def test_power_diag(diag1):
check_un_op(power2, diag1, asserted_type=Diagonal)
def test_power_const(const1):
check_un_op(power2, const1, asserted_type=Constant)
def test_power_lt(lt1):
check_un_op(power2, lt1, asserted_type=LowerTriangular)
def test_power_ut(ut1):
check_un_op(power2, ut1, asserted_type=UpperTriangular)
def test_power_lr(lr1):
with AssertDenseWarning("power of <low-rank>"):
check_un_op(power2, lr1, asserted_type=Dense)
def test_power_wb(wb1):
with AssertDenseWarning("power of <woodbury>"):
check_un_op(power2, wb1, asserted_type=Dense)
def test_power_kron(kron1):
check_un_op(power2, kron1, asserted_type=Kronecker)
|
StarcoderdataPython
|
6693967
|
import configargparse
import requests
import logging
import getpass
from colorlog import ColoredFormatter
parser = configargparse.ArgumentParser(
description='Connect to a netExtender VPN',
default_config_files=['/etc/nxbender', '~/.nxbender'],
)
parser.add_argument('-c', '--conf', is_config_file=True)
parser.add_argument('-s', '--server', required=True)
parser.add_argument('-P', '--port', type=int, default=443, help='Server port - default 443')
parser.add_argument('-u', '--username', required=True)
parser.add_argument('-p', '--password', required=False)
parser.add_argument('-d', '--domain', required=True)
parser.add_argument('-f', '--fingerprint', help='Verify server\'s SSL certificate has this fingerprint. Overrides all other certificate verification.')
parser.add_argument('--debug', action='store_true', help='Show debugging information')
parser.add_argument('-q', '--quiet', action='store_true', help='Don\'t output basic info whilst running')
parser.add_argument('--show-ppp-log', action='store_true', help='Print PPP log messages to stdout')
# Investigating
parser.add_argument('--use-swap', action='store_true', help='Use swap cookie as sessionID')
def main():
args = parser.parse_args()
if args.debug:
loglevel = logging.DEBUG
elif args.quiet:
loglevel = logging.WARNING
else:
loglevel = logging.INFO
if not args.password:
args.password = <PASSWORD>()
formatter = ColoredFormatter(
"%(log_color)s%(levelname)-8s%(reset)s %(message_log_color)s%(message)s",
secondary_log_colors={
'message': {
'ERROR': 'red',
'CRITICAL': 'red'
}
}
)
logging.basicConfig(level=loglevel)
logging.getLogger().handlers[0].setFormatter(formatter)
if args.debug:
try:
from http.client import HTTPConnection # py3
except ImportError:
from httplib import HTTPConnection # py2
HTTPConnection.debuglevel = 2
from . import nx, sslconn
sess = nx.NXSession(args)
try:
sess.run()
except requests.exceptions.SSLError as e:
logging.error("SSL error: %s" % e)
# print the server's fingerprint for the user to consider
sslconn.print_fingerprint(args.server)
except requests.exceptions.ConnectionError as e:
message = e.message.reason.message.split(':')[1:][-1] # yuk
logging.error("Error connecting to remote host: %s" % message)
|
StarcoderdataPython
|
8066412
|
from setuptools import setup
setup(
name="Verify",
description="Szyfrowanie hasła",
version="v1.0",
author="<NAME>",
author_email="",
licence="MIT",
install_requires=["Click"],
packages=['Verify'],
entry_points={
'console_scripts' : ['verify = Verify.main:main']
}
)
|
StarcoderdataPython
|
128172
|
import requests
from pytrello.decorators import as_json
from pytrello.decorators import authorized
@authorized
@as_json
def get(url, payload=None, **kwargs):
return requests.get(url.format(**kwargs), params=payload)
@authorized
@as_json
def post(url, payload=None, **kwargs):
return requests.post(url.format(**kwargs), data=payload)
@authorized
@as_json
def delete(url, payload=None, **kwargs):
return requests.delete(url.format(**kwargs), data=payload)
@authorized
@as_json
def put(url, payload=None, **kwargs):
return requests.put(url.format(**kwargs), params=payload)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.