blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
41ccba0bac47fc1a2880eccf4cd69dcf9867580d | c42a085521cec895fac0021eb1638d6f077eadf7 | /PYTHON_FUNDAMENTALS_May_August_2020/Exam_Preparation_29_07_20_Python_Fundamentals/02. Registration.py | 30888da99efa5b5a9e2aafbbc8a56bc9b8c8d6ed | [] | no_license | vasil-panoff/Python_Fundamentals_SoftUni_May_2020 | f645ce85efa6db047b52a8b63d411d2e5bd5bd9a | daf1a27ff1a4684d51cf875ee0a4c0706a1a4404 | refs/heads/main | 2023-01-06T22:20:30.151249 | 2020-11-03T22:56:24 | 2020-11-03T22:56:24 | 309,818,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import re
username_pattern = r'U\$([A-Z][a-z]{2,})U\$'
password_pattern = r'P@\$([a-z]{5,}\d+)P@\$'
regex = fr'{username_pattern}{password_pattern}'
n = int(input())
total_count = 0
for i in range(n):
registration = input()
match = re.match(regex, registration)
if match is None:
print("Invalid username or password")
continue
total_count += 1
print(f'Username: {match[1]}, Password: {match[2]}')
print(f'Successful registrations: {total_count}') | [
"[email protected]"
] | |
b6ef00ae2eebd8c444b26cbe6dda215cb3a4043c | 2f36ee70b1224c5700eb9fd4d5b6d568a1c9fe9f | /dec.py | 7c8d7795bfc91e4682d4d875ad613b7bff00ef42 | [
"MIT"
] | permissive | laurelkeys/intimo | d70b4621f49937127c8ea9a72ff23495ba151a1e | f5c8200e52e4aeb9c04b4988a61dbc66c04f8255 | refs/heads/master | 2020-09-03T21:42:12.142312 | 2019-11-24T21:39:44 | 2019-11-24T21:39:44 | 219,578,931 | 0 | 0 | MIT | 2019-11-24T21:37:47 | 2019-11-04T19:22:15 | Python | UTF-8 | Python | false | false | 3,735 | py | import os, sys
import argparse
import warnings
import cv2
import numpy as np
import sounddevice as sd
from scipy.io import wavfile
from codec import decode
from converter import convert
def get_parser():
parser = argparse.ArgumentParser(
description="Retrieve WAV audio data from an image bit plane.")
parser.add_argument("enc_img_path", type=str,
help="File name (with path) of a PNG image with audio encoded")
parser.add_argument("--n_of_channels", "-ch", type=int, choices=[1, 2], default=1,
help="Number of audio channels (1=mono, 2=stereo) (defaults to %(default)d)")
parser.add_argument("--sample_rate", "-sr", type=int, choices=[8000, 44100], default=8000,
help="Sample rate of audio recording (defaults to %(default)dHz)")
parser.add_argument("--bit_plane", "-b", type=int, choices=range(0, 8), default=5,
help="Bit plane in which to hide the captured audio (defaults to %(default)d)")
parser.add_argument("--output_folder", "-o", type=str, default=".",
help="Output folder to store the decoded audio (defaults to '%(default)s/')")
parser.add_argument("--info_in_fname", "-iifn", action="store_true",
help="Get the number of channels, sample rate, and bit plane from the image file name "
"(other arguments will be ignored)")
parser.add_argument("--playback", action="store_true",
help="Play the decoded audio as well")
parser.add_argument("--verbose", "-v", action="store_true",
help="Increase verbosity")
return parser
###############################################################################
def main(args):
enc_img = cv2.imread(args.enc_img_path)
if args.info_in_fname:
# "channels_samplerate_bitplane_YYYYmmdd-HHMMSS"
fname, _ = os.path.splitext(os.path.basename(args.enc_img_path))
try:
ch, sr, b, *_ = fname.split('_')
args.n_of_channels = int(ch)
args.sample_rate = int(sr)
args.bit_plane = int(b)
if args.verbose:
print("Info taken from file name:")
print(" - channels:", args.n_of_channels)
print(" - samplerate:", args.sample_rate)
print(" - bitplane:", args.bit_plane)
except:
print("When using --info_in_fname, the expected file name must be in the format: "
"'channels_samplerate_bitplane_YYYYmmdd-HHMMSS.png'")
exit()
decoded_audio = decode(enc_img, args.bit_plane)
assert decoded_audio.dtype == np.uint8
decoded_audio = convert(decoded_audio, to='int16')
if args.n_of_channels == 2:
warnings.warn("\nWarning: stereo audio isn't currently supported")
# TODO convert decoded_audio to a 2D array if it's stereo
fname, _ = os.path.splitext(os.path.basename(args.enc_img_path))
fname = os.path.join(args.output_folder, fname + "-decoded")
wavfile.write(filename=fname + ".wav", rate=args.sample_rate, data=decoded_audio)
if args.verbose:
print(f"\nSaved audio to '{fname}.wav'")
if args.playback:
if args.verbose:
print(f"\nPlaying (~{decoded_audio.size // args.sample_rate}s) audio..", end='')
sd.play(decoded_audio, args.sample_rate)
sd.wait() # wait until it is done playing
if args.verbose:
print(". done.")
###############################################################################
if __name__ == '__main__':
args = get_parser().parse_args()
main(args) | [
"[email protected]"
] | |
705a6e762c4b0a8c8ffcf7d70018b1b1cca90cc3 | d5f4b09c38cef1ae6ea22c70bd13316661fa1fcb | /Workspace/TestTxt.py | 2da291715b852521b603af99913a1607f0b41aa1 | [
"MIT"
] | permissive | ExtensiveAutomation/extensiveautomation-appclient | fa4ec42c762c0941c104b679374113b9eac8d0a0 | 66f65dd6e4a48909120f63239f630147c733df3f | refs/heads/master | 2023-08-31T00:04:03.766489 | 2023-08-18T08:30:01 | 2023-08-18T08:30:01 | 168,972,301 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,706 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------
# Copyright (c) 2010-2020 Denis Machard
# This file is part of the extensive automation project
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA
# -------------------------------------------------------------------
"""
Test txt module
"""
import sys
import base64
# unicode = str with python3
if sys.version_info > (3,):
unicode = str
from PyQt5.QtWidgets import (QVBoxLayout, QApplication)
from Libs import QtHelper, Logger
try:
from PythonEditor import PyEditor
from PythonEditor import EditorWidget
except ImportError: # python3 support
from .PythonEditor import PyEditor
from .PythonEditor import EditorWidget
try:
import Document
except ImportError: # python3 support
from . import Document
import UserClientInterface as UCI
TYPE = 'txt'
class WTestTxt(Document.WDocument):
"""
Test txt widget
"""
TEST_TXT_EDITOR = 0
def __init__(self, parent = None, path = None, filename = None, extension = None,
nonameId = None, remoteFile=True, repoDest=None, project=0, isLocked=False):
"""
Constructs WScript widget
@param parent:
@type parent:
@param path:
@type path:
@param filename:
@type filename:
@param extension:
@type extension:
@param nonameId:
@type nonameId:
"""
Document.WDocument.__init__(self, parent, path, filename, extension,
nonameId, remoteFile, repoDest, project, isLocked)
self.srcEditor = None
self.createWidgets()
self.createConnections()
def createWidgets (self):
"""
QtWidgets creation
_______________________
| |
| PyEditor |
|_______________________|
|________QSplitter______|
| |
| PyEditor |
|_______________________|
"""
self.srcWidget = EditorWidget( editorId=self.TEST_TXT_EDITOR,
title="Txt Definition:", parent=self,
activePyLexer=False )
self.srcEditor = self.srcWidget.editor
layout = QVBoxLayout()
layout.addWidget(self.srcWidget)
layout.setContentsMargins(2,0,0,0)
self.setLayout(layout)
def createConnections (self):
"""
QtSignals connection
"""
self.srcEditor.FocusChanged.connect( self.focusChanged )
self.srcEditor.cursorPositionChanged.connect(self.onCursorPositionChanged)
self.srcEditor.textChanged.connect(self.setModify)
self.srcEditor.textChanged.connect(self.updateTotalLines)
def viewer(self):
"""
return the document viewer
"""
return self.parent
def setWrappingMode(self, wrap):
"""
Set wrap mode
"""
self.srcEditor.setWrappingMode(wrap=wrap)
def updateTotalLines(self):
"""
On total lines changed
"""
self.viewer().TotalLinesChanged.emit( self.editor().lines() )
def editor(self):
"""
Return the editor
"""
return self.srcEditor
def setDefaultCursorPosition(self):
"""
Set the default cursor position
"""
self.srcEditor.setFocus()
self.srcEditor.setCursorPosition(0,0)
def onCursorPositionChanged (self , ln, col):
"""
Emit signal from parent to update the position of the cursor
@param ln: line index
@type ln: Integer
@param col: column index
@type col: Integer
"""
self.viewer().CursorPositionChanged.emit( ln, col )
def setFolding (self, fold):
"""
Active or deactivate the code folding
@param fold:
@type fold: boolean
"""
if fold:
self.srcEditor.activeFolding(fold)
else:
self.srcEditor.activeFolding(fold)
def setLinesNumbering (self, visible):
"""
Active or deactivate the lines numbering
@param visible:
@type visible: boolean
"""
if visible:
self.srcEditor.setMarginLineNumbers(1, visible)
self.srcEditor.onLinesChanged()
else:
self.srcEditor.setMarginLineNumbers(1, visible)
self.srcEditor.setMarginWidth(1, 0)
def setWhitespaceVisible (self, visible):
"""
Active or deactivate the whitespace visibility
@param visible:
@type visible: boolean
"""
if visible:
self.srcEditor.setWhitespaceVisible(visible)
else:
self.srcEditor.setWhitespaceVisible(visible)
def setIndentationGuidesVisible (self, visible):
"""
Active or deactivate indentation guides visibility
@param visible:
@type visible: boolean
"""
if visible:
self.srcEditor.setIndentationGuidesVisible(visible)
else:
self.srcEditor.setIndentationGuidesVisible(visible)
def currentEditor (self):
"""
Returns the editor that has the focus
@return: Focus editor
@rtype: PyEditor
"""
weditor = QApplication.focusWidget()
if isinstance(weditor, PyEditor):
if weditor.editorId == self.TEST_TXT_EDITOR:
return self.srcEditor
else:
return self.srcEditor
def focusChanged (self):
"""
Called when focus on editors
Emit the signal "focusChanged"
"""
weditor = QApplication.focusWidget()
if isinstance(weditor, PyEditor):
if weditor.editorId == self.TEST_TXT_EDITOR:
self.viewer().findWidget.setEditor( editor = self.srcEditor)
self.viewer().FocusChanged.emit(self)
def defaultLoad (self):
"""
Load default empty script
"""
self.srcEditor.setText( "" )
self.srcEditor.setFocus()
self.setReadOnly( readOnly=False )
def load (self, content=None):
"""
Open file
"""
self.srcEditor.setText( content.decode("utf-8") )
self.srcEditor.setFocus()
self.setReadOnly( readOnly=False )
return True
def getraw_encoded(self):
"""
Returns raw data encoded
"""
encoded = ""
try:
raw = unicode(self.srcEditor.text()).encode('utf-8')
encoded = base64.b64encode( raw )
if sys.version_info > (3,):
encoded = encoded.decode("utf-8")
except Exception as e:
self.error( "unable to encode: %s" % e )
return encoded
| [
"[email protected]"
] | |
0e7b38386ab690b9218ba8a713e93c88f1be4acf | 6ab31b5f3a5f26d4d534abc4b197fe469a68e8e5 | /katas/kyu_7/sorted_yes_no_how.py | 42671da2fe9a26ca20c19645cb1f6d79ec5555f4 | [
"MIT"
] | permissive | mveselov/CodeWars | e4259194bfa018299906f42cd02b8ef4e5ab6caa | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | refs/heads/master | 2021-06-09T04:17:10.053324 | 2017-01-08T06:36:17 | 2017-01-08T06:36:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | def is_sorted_and_how(nums):
a_or_d = {'a': 'ascending', 'd': 'descending'}
diffs = {'d' if b - a < 0 else 'a' for a, b in zip(nums, nums[1:])}
return 'yes, {}'.format(a_or_d[diffs.pop()]) if len(diffs) == 1 else 'no'
| [
"[email protected]"
] | |
7b3b80fb2fc11b92f63feacf8246e4800fda39ae | e78ce85bac254f720e021f5f0ad172189da3ab77 | /banco_de_dados/migrations/0002_exame_alt_anatomia.py | 6b51029e3f25268d8689fa4b2950356fd1de1666 | [] | no_license | lldenisll/backend_papaiz | 4d1a0a0b427e708551d57381591614b9c8bf9c55 | 71f1eceafadbaf120f8ef9c87741fa8e431d192a | refs/heads/master | 2023-07-29T10:28:09.634047 | 2021-09-10T13:01:50 | 2021-09-10T13:01:50 | 405,077,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | # Generated by Django 3.2.5 on 2021-08-11 17:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('banco_de_dados', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Exame_alt_anatomia',
fields=[
('id_exame', models.IntegerField(primary_key=True, serialize=False)),
('id_alteracao', models.IntegerField()),
],
),
]
| [
"[email protected]"
] | |
a7ae83cdc676a6fee62b948a17dfcb27194dca8a | 32809f6f425bf5665fc19de2bc929bacc3eeb469 | /src/1297-Maximum-Number-of-Occurrences-of-a-Substring/1297.py | aee0221f3b7df218ccd20e3d5a2801d51b640b42 | [] | no_license | luliyucoordinate/Leetcode | 9f6bf01f79aa680e2dff11e73e4d10993467f113 | bcc04d49969654cb44f79218a7ef2fd5c1e5449a | refs/heads/master | 2023-05-25T04:58:45.046772 | 2023-05-24T11:57:20 | 2023-05-24T11:57:20 | 132,753,892 | 1,575 | 569 | null | 2023-05-24T11:57:22 | 2018-05-09T12:30:59 | C++ | UTF-8 | Python | false | false | 312 | py | class Solution:
def maxFreq(self, s: str, maxLetters: int, minSize: int, maxSize: int) -> int:
cnt = collections.Counter([s[i:i + minSize] for i in range(len(s) - minSize + 1)])
for k, v in cnt.most_common():
if len(set(k)) <= maxLetters:
return v
return 0 | [
"[email protected]"
] | |
fb96fd36ffe7f093778aed80d23b8f51c8119a10 | d8d6cb5563a71de354072096a5cabfa7a3748dda | /packages/vaex-arrow/vaex_arrow/convert.py | 333d0a7568453cc83f62f5c5093a03bdbc52b769 | [
"MIT"
] | permissive | tpoterba/vaex | f061ad443a1c8ed3f1c90e7206be316f87ab12df | a24e9c4cedc19753e8c7bacede9de37dfcee3c81 | refs/heads/master | 2020-04-11T08:42:52.703154 | 2018-12-13T10:03:13 | 2018-12-13T10:03:13 | 161,653,094 | 0 | 0 | MIT | 2018-12-13T14:48:19 | 2018-12-13T14:48:19 | null | UTF-8 | Python | false | false | 2,682 | py | import pyarrow
import numpy as np
def arrow_array_from_numpy_array(array):
dtype = array.dtype
mask = None
if np.ma.isMaskedArray(array):
mask = array.mask
if dtype.kind == 'S':
type = pyarrow.binary(dtype.itemsize)
arrow_array = pyarrow.array(array, type, mask=mask)
else:
if dtype.isnative:
arrow_array = pyarrow.array(array, mask=mask)
else:
# TODO: we copy here, but I guess we should not... or give some warning
arrow_array = pyarrow.array(array.astype(dtype.newbyteorder('=')), mask=mask)
return arrow_array
def numpy_array_from_arrow_array(arrow_array):
arrow_type = arrow_array.type
buffers = arrow_array.buffers()
assert len(buffers) == 2
bitmap_buffer = buffers[0]
data_buffer = buffers[1]
if isinstance(arrow_type, type(pyarrow.binary(1))): # todo, is there a better way to typecheck?
# mimics python/pyarrow/array.pxi::Array::to_numpy
buffers = arrow_array.buffers()
assert len(buffers) == 2
dtype = "S" + str(arrow_type.byte_width)
# arrow seems to do padding, check if it is all ok
expected_length = arrow_type.byte_width * len(arrow_array)
actual_length = len(buffers[-1])
if actual_length < expected_length:
raise ValueError('buffer is smaller (%d) than expected (%d)' % (actual_length, expected_length))
array = np.frombuffer(buffers[-1], dtype, len(arrow_array))# TODO: deal with offset ? [arrow_array.offset:arrow_array.offset + len(arrow_array)]
else:
dtype = arrow_array.type.to_pandas_dtype()
array = np.frombuffer(data_buffer, dtype, len(arrow_array))
if bitmap_buffer is not None:
# arrow uses a bitmap https://github.com/apache/arrow/blob/master/format/Layout.md
bitmap = np.frombuffer(bitmap_buffer, np.uint8, len(bitmap_buffer))
# we do have to change the ordering of the bits
mask = 1-np.unpackbits(bitmap).reshape((len(bitmap),8))[:,::-1].reshape(-1)[:len(arrow_array)]
array = np.ma.MaskedArray(array, mask=mask)
return array
def arrow_table_from_vaex_df(ds, column_names=None, selection=None, strings=True, virtual=False):
"""Implementation of Dataset.to_arrow_table"""
names = []
arrays = []
for name, array in ds.to_items(column_names=column_names, selection=selection, strings=strings, virtual=virtual):
names.append(name)
arrays.append(arrow_array_from_numpy_array(array))
return pyarrow.Table.from_arrays(arrays, names)
def vaex_df_from_arrow_table(table):
from .dataset import DatasetArrow
return DatasetArrow(table=table)
| [
"[email protected]"
] | |
ea5bf03da06892d8e97f87a9e30552535d42d37a | 6baeb1bcf18a442faa53a73000aa7cbfccffb0ce | /newv/Lib/site-packages/django/template/response.py | d06017c5209e7af0e21dbcc836d94a145bc4129d | [] | no_license | bipinmilan/HouseRental | 22502b2c796f083751f10a7b1724e2566343fbc6 | c047d0d20ad4255edcc493b3ae9d75d7819d73b6 | refs/heads/master | 2022-12-09T05:43:17.588485 | 2019-12-04T06:43:36 | 2019-12-04T06:43:36 | 225,795,647 | 0 | 0 | null | 2022-12-08T05:24:44 | 2019-12-04T06:28:27 | Python | UTF-8 | Python | false | false | 5,425 | py | from django.http import HttpResponse
from .loader import get_template, select_template
class ContentNotRenderedError(Exception):
pass
class SimpleTemplateResponse(HttpResponse):
rendering_attrs = ['template_name', 'context_data', '_post_render_callbacks']
def __init__(self, template, context=None, content_type=None, status=None,
charset=None, using=None):
# It would seem obvious to call these next two members 'template' and
# 'context', but those names are reserved as part of the test Client
# api. To avoid the name collision, we use different names.
self.template_name = template
self.context_data = context
self.using = using
self._post_render_callbacks = []
# _request stores the current request object in subclasses that know
# about requests, like TemplateResponse. It's defined in the base class
# to minimize code duplication.
# It's called self._request because self.request gets overwritten by
# django.test.client.Client. Unlike template_name and context_data,
# _request should not be considered part of the public api.
self._request = None
# content argument doesn't make sense here because it will be replaced
# with rendered template so we always pass empty string in order to
# prevent errors and provide shorter signature.
super().__init__('', content_type, status, charset=charset)
# _is_rendered tracks whether the template and context has been baked
# into a final response.
# Super __init__ doesn't know any better than to set self.content to
# the empty string we just gave it, which wrongly sets _is_rendered
# True, so we initialize it to False after the call to super __init__.
self._is_rendered = False
def __getstate__(self):
"""
Raise an exception if trying to pickle an unrendered response. Pickle
only rendered data, not the data used to construct the response.
"""
obj_dict = self.__dict__.copy()
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be pickled.')
for attr in self.rendering_attrs:
if attr in obj_dict:
del obj_dict[attr]
return obj_dict
def resolve_template(self, template):
"""Accept a template object, path-to-template, or list of paths."""
if isinstance(template, (list, tuple)):
return select_template(template, using=self.using)
elif isinstance(template, str):
return get_template(template, using=self.using)
else:
return template
def resolve_context(self, context):
return context
@property
def rendered_content(self):
"""Return the freshly rendered content for the template and context
described by the TemplateResponse.
This *does not* set the final content of the response. To set the
response content, you must either call render(), or set the
content explicitly using the value of this property.
"""
template = self.resolve_template(self.template_name)
context = self.resolve_context(self.context_data)
content = template.render(context, self._request)
return content
def add_post_render_callback(self, callback):
"""Add a new post-rendering callback.
If the response has already been rendered,
invoke the callback immediately.
"""
if self._is_rendered:
callback(self)
else:
self._post_render_callbacks.append(callback)
def render(self):
"""Render (thereby finalizing) the content of the response.
If the content has already been rendered, this is a no-op.
Return the baked response instance.
"""
retval = self
if not self._is_rendered:
self.content = self.rendered_content
for post_callback in self._post_render_callbacks:
newretval = post_callback(retval)
if newretval is not None:
retval = newretval
return retval
@property
def is_rendered(self):
return self._is_rendered
def __iter__(self):
if not self._is_rendered:
raise ContentNotRenderedError(
'The response content must be rendered before it can be iterated over.'
)
return super().__iter__()
@property
def content(self):
if not self._is_rendered:
raise ContentNotRenderedError(
'The response content must be rendered before it can be accessed.'
)
return super().content
@content.setter
def content(self, value):
"""Set the content for the response."""
HttpResponse.content.fset(self, value)
self._is_rendered = True
class TemplateResponse(SimpleTemplateResponse):
rendering_attrs = SimpleTemplateResponse.rendering_attrs + ['_request']
def __init__(self, request, template, context=None, content_type=None,
status=None, charset=None, using=None):
super().__init__(template, context, content_type, status, charset, using)
self._request = request
| [
"[email protected]"
] | |
638577ab2dbc394a7aacba4ef64f0e646d9b26e6 | 00c6ded41b84008489a126a36657a8dc773626a5 | /.history/Sizing_Method/Aerodynamics/Aerodynamics_20210714153638.py | 9d8de4e6619d3956f350d237612b12fe713a8aee | [] | no_license | 12libao/DEA | 85f5f4274edf72c7f030a356bae9c499e3afc2ed | 1c6f8109bbc18c4451a50eacad9b4dedd29682bd | refs/heads/master | 2023-06-17T02:10:40.184423 | 2021-07-16T19:05:18 | 2021-07-16T19:05:18 | 346,111,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,839 | py | # author: Bao Li #
# Georgia Institute of Technology #
"""Reference:
1: (2.3.1) Mattingly, Jack D., William H. Heiser, and David T. Pratt. Aircraft engine design. American Institute of Aeronautics and Astronautics, 2002.
2. Wedderspoon, J. R. "The high lift development of the A320 aircraft." International Congress of the Aeronautical Sciences, Paper. Vol. 2. No. 2. 1986.
"""
import numpy as np
import Sizing_Method.Aerodynamics.MachNmuber as Ma
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
class aerodynamics_without_pd:
"""this is the class to generate aerodynamics model without dp based on Mattingly Equ 2.9 and section 2.3.1
1. SI Units
2. All assumptions, data, tables, and figures are based on large cargo and passenger aircraft,
where we use A320neo as the baseline.
"""
def __init__(self, altitude, velocity, AR=10.3):
"""
:input h (m): altitude
v (m/s): velocity
AR: wing aspect ratio, normally between 7 and 10
:output K1: 2nd Order Coefficient for Cd
K2: 1st Order Coefficient for Cd
CD_0: drag coefficient at zero lift
"""
self.v = velocity
self.h = altitude
h = 2.43 # height of winglets
b = 35.8
self.AR = AR * (1 + 1.9 * h / b) # equation 9-88, If the wing has winglets the aspect ratio should be corrected
# Mach number based on different altitude
# The Mach number is between 0 to 0.82
self.a = Ma.mach(self.h, self.v).mach_number()
if self.a > 0.82:
print("The Mach number is larger than 0.82, something going wrong!")
self.CL_min = 0.1 # Assume constant: for most large cargo and passenger, 0.1 < Cl_min < 0.3
self.CD_min = 0.02 # Assume constant: From Mattingly Figure 2.9
e = 0.75 # wing planform efficiency factor is between 0.75 and 0.85, no more than 1
self.K_apo1 = 1 / (np.pi * self.AR * e) # self.K_apo1 = 1 / (np.pi * self.AR * e) #
# K_apo2 is between 0.001 to 0.03 for most large cargo and passenger aircraft
# Increase with Mach number increase. Thus, assume they have linear relationship
K_apo2_max = 0.028
K_apo2_min = 0.001
a_max = 0.82
a_min = 0.001
slop = (K_apo2_max - K_apo2_min) / (a_max - a_min)
# is the viscous drag due to lift (skin friction and pressure drag)
self.K_apo2 = K_apo2_max - ((a_max - self.a) * slop)
# K_apo2 = 0.03*self.a**0.5
# K_apo2 = 0.015*np.log(self.a)+0.03
# K_apo2 = 0.0345*self.a**0.25-0.005
def K1(self):
"""2nd Order Coefficient for Cd"""
return self.K_apo1 + self.K_apo2
def K2(self):
"""1st Order Coefficient for Cd"""
return -2 * self.K_apo2 * self.CL_min
def CD_0(self):
"""drag coefficient at zero lift"""
return self.CD_min + self.K_apo2 * self.CL_min ** 2
class aerodynamics_with_pd:
"""Estimation of ΔCL and ΔCD"""
def __init__(self, altitude, velocity, Hp, n, W_S, P_W=90, sweep_angle=25.0,
S=124.0, b=35.8, delta_b=0.5, delta_Dp=0.1, xp=0.5, beta=0.5, AOA_p=0.0, Cf=0.009):
"""
delta_b = 0.64
:param Hp: P_motor/P_total
:param n: number of motor
:param P_W:
:param W_S:
:param S: wing area
:param b: wingspan
:param delta_b:
:param delta_Dp:
:param xp:
:param beta: slipstream correction factor:0-1
:param CL: lift coefficient
:param AOA_p: propeller angle of attack
:param Cf: skin friction coefficient
:output: 1. ΔCD_0: zero lift drag coefficient changes because of the population distribution
2. ΔCL: lift coefficient changes because of the population distribution
"""
self.h = altitude
self.v = velocity
self.n = n
self.s = S
self.delta_y1 = delta_b
self.delta_y2 = delta_Dp
self.beta = beta
self.sp = sweep_angle * np.pi / 180
self.aoa_p = AOA_p
self.cf = Cf
self.ar = b ** 2 / self.s # aspect ratio
# the diameter of the propulsion, reference 1: Equation 21
dp = self.delta_y1 * b / (self.n * (1 + self.delta_y2))
# defining a parameter that indicates how much propulsion-disk
# area is needed per unit of aircraft weight
# reference 1: Equation 22
dp2w = self.delta_y1 ** 2 / (self.n * (1 + self.delta_y2)) ** 2 * self.ar / W_S
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
self.t_w = P_W / self.v
# thrust coefficient Tc of the DP propulsion
# reference 1: Equation 24
tc = 1 / self.n * Hp * self.t_w / (self.rho * self.v ** 2 * dp2w)
# Actuator disk theory shows that there is a maximum theoretical propulsive efficiency
# for a given thrust coefficient
ndp_isolated = 0.76
tc_max = np.pi / 8 * ((2 / ndp_isolated - 1) ** 2 - 1)
if tc >= tc_max:
tc = tc_max
# axial induction factor at the propeller disk (ap) as a
# function of the propeller thrust coefficient, from the actuator disk theory:
# reference 1: Equation 25
ap = 0.5 * ((1 + 8 / np.pi * tc) ** 0.5 - 1)
# the contraction ratio of the slipstream at the wing
# leading edge (Rw/RP) can be expressed as
# reference 1: Equation 26
rw_rp = ((1 + ap) / (
1 + ap * (1 + 2 * xp / dp) / ((2 * xp / dp) ** 2 + 1) ** 0.5)) ** 0.5
# from conservation of mass in incompressible flow: axial induction factor
self.aw = (ap + 1) / rw_rp ** 2 - 1
self.m = Ma.mach(self.h, self.v).mach_number() # Mach number
def delta_lift_coefficient(self, CL):
"""estimate the lift coefficient changes because of pd"""
aoa_w = (CL / (2 * np.pi * self.ar)) * (2 + (
self.ar ** 2 * (1 - self.m ** 2) * (1 + (np.tan(self.sp)) ** 2 / (1 - self.m ** 2)) + 4) ** 0.5)
delta_cl = 2 * np.pi * ((np.sin(aoa_w) - self.aw * self.beta * np.sin(self.aoa_p - aoa_w))
* ((self.aw * self.beta) ** 2 + 2 * self.aw * self.beta * np.cos(self.aoa_p) + 1) ** 0.5
- np.sin(aoa_w))
delta_cl = delta_cl * self.delta_y1
return delta_cl
def delta_CD_0(self):
"""estimate the zero lift drag coefficient changes because of the population distribution"""
delta_cd0 = self.delta_y1 * self.aw ** 2 * self.cf
return delta_cd0
| [
"[email protected]"
] | |
62b7554daa56c6497a148072ef4e2ba0e2acb777 | 56b4d00870af18752b4414495b08e2ec3adf3ae4 | /tests/clims/models/test_substance_visualize.py | 00928f5bc205cc98eb747892cc05a823427582ae | [
"BSD-2-Clause"
] | permissive | commonlims/commonlims | 26c3f937eaa18e6935c5d3fcec823053ab7fefd9 | 36a02ed244c7b59ee1f2523e64e4749e404ab0f7 | refs/heads/develop | 2021-07-01T17:20:46.586630 | 2021-02-02T08:53:22 | 2021-02-02T08:53:22 | 185,200,241 | 4 | 1 | NOASSERTION | 2021-02-02T08:53:23 | 2019-05-06T13:16:37 | Python | UTF-8 | Python | false | false | 1,206 | py | from __future__ import absolute_import
from tests.clims.models.test_substance import SubstanceTestCase
class TestSubstance(SubstanceTestCase):
def setUp(self):
self.has_context()
def test_can_render_substance_graph(self):
sample1 = self.create_gemstone() # sample1.v1
original_id = sample1.id
assert (sample1.id, sample1.version) == (original_id, 1) # sample1.v1
aliquot1 = sample1.create_child() # aliquot1.v1 (from sample1.v1)
aliquot1_id = aliquot1.id
assert (aliquot1.id, aliquot1.version) == (aliquot1_id, 1) # aliquot1.v1
sample1.color = 'red'
sample1.save()
assert (sample1.id, sample1.version) == (original_id, 2) # sample1.v2
sample1.color = 'blue'
sample1.save()
assert (sample1.id, sample1.version) == (original_id, 3) # sample1.v3
aliquot2 = sample1.create_child()
assert aliquot2.version == 1
assert len(aliquot2.parents) == 1
assert (aliquot2.parents[0].id, aliquot2.parents[0].version) == (original_id, 3)
ancestry = sample1.to_ancestry() # returns everything with the same origins (i.e. sample1)
ancestry.to_svg()
| [
"[email protected]"
] | |
b7eeb1f523e69ef574283ab11434784584907c21 | 1bfad01139237049eded6c42981ee9b4c09bb6de | /RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/ipv6.py | aee77725a61a6f8228b403f3188140817c106249 | [
"MIT"
] | permissive | kakkotetsu/IxNetwork | 3a395c2b4de1488994a0cfe51bca36d21e4368a5 | f9fb614b51bb8988af035967991ad36702933274 | refs/heads/master | 2020-04-22T09:46:37.408010 | 2019-02-07T18:12:20 | 2019-02-07T18:12:20 | 170,284,084 | 0 | 0 | MIT | 2019-02-12T08:51:02 | 2019-02-12T08:51:01 | null | UTF-8 | Python | false | false | 33,184 | py |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Ipv6(Base):
"""The Ipv6 class encapsulates a user managed ipv6 node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the Ipv6 property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server using the find method.
The internal properties list can be managed by the user by using the add and remove methods.
"""
_SDM_NAME = 'ipv6'
def __init__(self, parent):
super(Ipv6, self).__init__(parent)
@property
def Bfdv6Interface(self):
"""An instance of the Bfdv6Interface class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bfdv6interface.Bfdv6Interface)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bfdv6interface import Bfdv6Interface
return Bfdv6Interface(self)
@property
def BgpIpv6Peer(self):
"""An instance of the BgpIpv6Peer class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6peer.BgpIpv6Peer)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6peer import BgpIpv6Peer
return BgpIpv6Peer(self)
@property
def Dhcpv6relayAgent(self):
"""An instance of the Dhcpv6relayAgent class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.dhcpv6relayagent.Dhcpv6relayAgent)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.dhcpv6relayagent import Dhcpv6relayAgent
return Dhcpv6relayAgent(self)
@property
def Dhcpv6server(self):
"""An instance of the Dhcpv6server class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.dhcpv6server.Dhcpv6server)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.dhcpv6server import Dhcpv6server
return Dhcpv6server(self)
@property
def Greoipv6(self):
"""An instance of the Greoipv6 class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.greoipv6.Greoipv6)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.greoipv6 import Greoipv6
return Greoipv6(self)
@property
def Ipv6sr(self):
"""An instance of the Ipv6sr class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ipv6sr.Ipv6sr)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ipv6sr import Ipv6sr
return Ipv6sr(self)
@property
def LdpBasicRouterV6(self):
"""An instance of the LdpBasicRouterV6 class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldpbasicrouterv6.LdpBasicRouterV6)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldpbasicrouterv6 import LdpBasicRouterV6
return LdpBasicRouterV6(self)
@property
def LdpTargetedRouterV6(self):
"""An instance of the LdpTargetedRouterV6 class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldptargetedrouterv6.LdpTargetedRouterV6)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldptargetedrouterv6 import LdpTargetedRouterV6
return LdpTargetedRouterV6(self)
@property
def Ldpv6ConnectedInterface(self):
"""An instance of the Ldpv6ConnectedInterface class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldpv6connectedinterface.Ldpv6ConnectedInterface)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldpv6connectedinterface import Ldpv6ConnectedInterface
return Ldpv6ConnectedInterface(self)
@property
def MldHost(self):
"""An instance of the MldHost class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mldhost.MldHost)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mldhost import MldHost
return MldHost(self)
@property
def MldQuerier(self):
"""An instance of the MldQuerier class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mldquerier.MldQuerier)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mldquerier import MldQuerier
return MldQuerier(self)
@property
def Ntpclock(self):
"""An instance of the Ntpclock class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ntpclock.Ntpclock)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ntpclock import Ntpclock
return Ntpclock(self)
@property
def Ospfv3(self):
"""An instance of the Ospfv3 class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ospfv3.Ospfv3)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ospfv3 import Ospfv3
return Ospfv3(self)
@property
def PimV6Interface(self):
"""An instance of the PimV6Interface class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pimv6interface.PimV6Interface)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pimv6interface import PimV6Interface
return PimV6Interface(self)
@property
def Ptp(self):
"""An instance of the Ptp class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ptp.Ptp)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ptp import Ptp
return Ptp(self)
@property
def Tag(self):
"""An instance of the Tag class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag.Tag)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag import Tag
return Tag(self)
@property
def Address(self):
"""IPv6 addresses of the devices
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('address')
@property
def ConnectedVia(self):
"""List of layers this layer used to connect to the wire
Returns:
list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])
"""
return self._get_attribute('connectedVia')
@ConnectedVia.setter
def ConnectedVia(self, value):
self._set_attribute('connectedVia', value)
@property
def Count(self):
"""Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group
Returns:
number
"""
return self._get_attribute('count')
@property
def DescriptiveName(self):
"""Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but maybe offers more context
Returns:
str
"""
return self._get_attribute('descriptiveName')
@property
def Errors(self):
"""A list of errors that have occurred
Returns:
list(dict(arg1:str[None|/api/v1/sessions/1/ixnetwork/?deepchild=*],arg2:list[str]))
"""
return self._get_attribute('errors')
@property
def GatewayIp(self):
"""Gateways of the layer
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('gatewayIp')
@property
def ManualGatewayMac(self):
"""User specified Gateway MAC addresses
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('manualGatewayMac')
@property
def Multiplier(self):
"""Number of layer instances per parent instance (multiplier)
Returns:
number
"""
return self._get_attribute('multiplier')
@Multiplier.setter
def Multiplier(self, value):
self._set_attribute('multiplier', value)
@property
def Name(self):
"""Name of NGPF element, guaranteed to be unique in Scenario
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def Prefix(self):
"""The length (in bits) of the mask to be used in conjunction with all the addresses created in the range
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('prefix')
@property
def ResolveGateway(self):
"""Enables the gateway MAC address discovery.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('resolveGateway')
@property
def ResolvedGatewayMac(self):
"""The resolved gateway's MAC addresses
Returns:
list(str)
"""
return self._get_attribute('resolvedGatewayMac')
@property
def SessionInfo(self):
"""Logs additional information about the session state.
Returns:
list(str[duplicateAddress|interfaceRemoved|none|resolveMacFailed])
"""
return self._get_attribute('sessionInfo')
@property
def SessionStatus(self):
"""Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
Returns:
list(str[down|notStarted|up])
"""
return self._get_attribute('sessionStatus')
@property
def StackedLayers(self):
"""List of secondary (many to one) child layer protocols
Returns:
list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])
"""
return self._get_attribute('stackedLayers')
@StackedLayers.setter
def StackedLayers(self, value):
self._set_attribute('stackedLayers', value)
@property
def StateCounts(self):
"""A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
Returns:
dict(total:number,notStarted:number,down:number,up:number)
"""
return self._get_attribute('stateCounts')
@property
def Status(self):
"""Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns:
str(configured|error|mixed|notStarted|started|starting|stopping)
"""
return self._get_attribute('status')
def add(self, ConnectedVia=None, Multiplier=None, Name=None, StackedLayers=None):
"""Adds a new ipv6 node on the server and retrieves it in this instance.
Args:
ConnectedVia (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of layers this layer used to connect to the wire
Multiplier (number): Number of layer instances per parent instance (multiplier)
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
StackedLayers (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of secondary (many to one) child layer protocols
Returns:
self: This instance with all currently retrieved ipv6 data using find and the newly added ipv6 data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the ipv6 data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ConnectedVia=None, Count=None, DescriptiveName=None, Errors=None, Multiplier=None, Name=None, ResolvedGatewayMac=None, SessionInfo=None, SessionStatus=None, StackedLayers=None, StateCounts=None, Status=None):
"""Finds and retrieves ipv6 data from the server.
All named parameters support regex and can be used to selectively retrieve ipv6 data from the server.
By default the find method takes no parameters and will retrieve all ipv6 data from the server.
Args:
ConnectedVia (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of layers this layer used to connect to the wire
Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group
DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but maybe offers more context
Errors (list(dict(arg1:str[None|/api/v1/sessions/1/ixnetwork/?deepchild=*],arg2:list[str]))): A list of errors that have occurred
Multiplier (number): Number of layer instances per parent instance (multiplier)
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
ResolvedGatewayMac (list(str)): The resolved gateway's MAC addresses
SessionInfo (list(str[duplicateAddress|interfaceRemoved|none|resolveMacFailed])): Logs additional information about the session state.
SessionStatus (list(str[down|notStarted|up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
StackedLayers (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of secondary (many to one) child layer protocols
StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
Status (str(configured|error|mixed|notStarted|started|starting|stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns:
self: This instance with matching ipv6 data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of ipv6 data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the ipv6 data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, Address=None, GatewayIp=None, ManualGatewayMac=None, Prefix=None, ResolveGateway=None):
"""Base class infrastructure that gets a list of ipv6 device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args:
PortNames (str): optional regex of port names
Address (str): optional regex of address
GatewayIp (str): optional regex of gatewayIp
ManualGatewayMac (str): optional regex of manualGatewayMac
Prefix (str): optional regex of prefix
ResolveGateway (str): optional regex of resolveGateway
Returns:
list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def FetchAndUpdateConfigFromCloud(self, Mode):
"""Executes the fetchAndUpdateConfigFromCloud operation on the server.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/globals?deepchild=*|/api/v1/sessions/1/ixnetwork/topology?deepchild=*)): The method internally sets Arg1 to the current href for this instance
Mode (str):
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('FetchAndUpdateConfigFromCloud', payload=locals(), response_object=None)
def RestartDown(self):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('RestartDown', payload=locals(), response_object=None)
def RestartDown(self, SessionIndices):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('RestartDown', payload=locals(), response_object=None)
def RestartDown(self, SessionIndices):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('RestartDown', payload=locals(), response_object=None)
def SendNs(self):
"""Executes the sendNs operation on the server.
Send Neighbor Solicitation request to configured gateway IP address to resolve Gateway MAC for selected items.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('SendNs', payload=locals(), response_object=None)
def SendNs(self, SessionIndices):
"""Executes the sendNs operation on the server.
Send Neighbor Solicitation request to configured gateway IP address to resolve Gateway MAC for selected items.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('SendNs', payload=locals(), response_object=None)
def SendNs(self, SessionIndices):
"""Executes the sendNs operation on the server.
Send Neighbor Solicitation request to configured gateway IP address to resolve Gateway MAC for selected items.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('SendNs', payload=locals(), response_object=None)
def SendNsManual(self, DestIP):
"""Executes the sendNsManual operation on the server.
Send Neighbor Solicitation request to specified IP address for selected items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
DestIP (str): This parameter requires a destIP of type kString
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('SendNsManual', payload=locals(), response_object=None)
def SendNsManual(self, DestIP, SessionIndices):
"""Executes the sendNsManual operation on the server.
Send Neighbor Solicitation request to specified IP address for selected items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
DestIP (str): This parameter requires a destIP of type kString
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('SendNsManual', payload=locals(), response_object=None)
def SendNsManual(self, SessionIndices, DestIP):
"""Executes the sendNsManual operation on the server.
Send Neighbor Solicitation request to specified IP address for selected items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
SessionIndices (str): This parameter requires a destIP of type kString
DestIP (str): This parameter requires a string of session numbers 1-4;6;7-12
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('SendNsManual', payload=locals(), response_object=None)
def SendPing(self, DestIP):
"""Executes the sendPing operation on the server.
Send ping for selected IPv6 items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
DestIP (str): This parameter requires a destIP of type kString
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('SendPing', payload=locals(), response_object=None)
def SendPing(self, DestIP, SessionIndices):
"""Executes the sendPing operation on the server.
Send ping for selected IPv6 items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
DestIP (str): This parameter requires a destIP of type kString
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('SendPing', payload=locals(), response_object=None)
def SendPing(self, SessionIndices, DestIP):
"""Executes the sendPing operation on the server.
Send ping for selected IPv6 items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
SessionIndices (str): This parameter requires a destIP of type kString
DestIP (str): This parameter requires a string of session numbers 1-4;6;7-12
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('SendPing', payload=locals(), response_object=None)
def Start(self):
"""Executes the start operation on the server.
Start selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Start', payload=locals(), response_object=None)
def Start(self, SessionIndices):
"""Executes the start operation on the server.
Start selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Start', payload=locals(), response_object=None)
def Start(self, SessionIndices):
"""Executes the start operation on the server.
Start selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Start', payload=locals(), response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
Stop selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Stop', payload=locals(), response_object=None)
def Stop(self, SessionIndices):
"""Executes the stop operation on the server.
Stop selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Stop', payload=locals(), response_object=None)
def Stop(self, SessionIndices):
"""Executes the stop operation on the server.
Stop selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Stop', payload=locals(), response_object=None)
| [
"[email protected]"
] | |
d29fc2511d456085db14c97843725d23a1057313 | b2c0517a0421c32f6782d76e4df842875d6ffce5 | /Algorithms/Tree/637. Average of Levels in Binary Tree.py | dc59a5deed5812ed84ce07c82b0f4ad35cf9ec86 | [] | no_license | SuYuxi/yuxi | e875b1536dc4b363194d0bef7f9a5aecb5d6199a | 45ad23a47592172101072a80a90de17772491e04 | refs/heads/master | 2022-10-04T21:29:42.017462 | 2022-09-30T04:00:48 | 2022-09-30T04:00:48 | 66,703,247 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def averageOfLevels(self, root):
res = list()
if(not root):
return res
stack = list()
stack.append(root)
while(stack):
temp = list()
Sum = 0
count = 0
while(stack):
node = stack.pop()
Sum += node.val
count += 1
if(node.left):
temp.append(node.left)
if(node.right):
temp.append(node.right)
stack = temp
res.append(Sum / count)
return res
| [
"[email protected]"
] | |
a9c01d49f035fa98e11a770a4c38558c0ae757c1 | 2023cc7a0680af1758c0f667a1eae215c58a806b | /list_comprehension/list_comprehensions_test.py | b9355a9769baaa091f774f56ede82ba9443da35c | [] | no_license | EmpowerSecurityAcademy/master_repo | cc157be6441755903cbd7694e321c44a22d342aa | a2b189fe480e02fc9e83dcbbd3a6c84d480cf2b7 | refs/heads/master | 2021-05-01T12:30:05.921207 | 2016-08-31T22:56:13 | 2016-08-31T22:56:13 | 66,875,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | import unittest
from list_comprehensions import *
class TestNumbers(unittest.TestCase):
def test_even_numbers(self):
result = even_numbers([1, 2, 4, 5, 7, 8])
self.assertEqual(result, [2, 4, 8])
# def test_start_with_a(self):
# result = start_with_a(["apple", "orange", "carrot"])
# self.assertEqual(result, ["apple"])
# def test_multiply_by_11_numbers_divisable_by_three(self):
# result = multiply_by_11_numbers_divisable_by_three([1, 2, 4, 9, 7, 12])
# self.assertEqual(result, [99, 132])
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
a554c517ba038a44869c29b36c0359276d048910 | 16b389c8dcace7f7d010c1fcf57ae0b3f10f88d3 | /docs/jnpr_healthbot_swagger/test/test_rule_schema_byoi_plugin_parameters.py | 26cd3b883961f232096d84c0eb6c999897aa28b2 | [
"Apache-2.0"
] | permissive | Juniper/healthbot-py-client | e4e376b074920d745f68f19e9309ede0a4173064 | 0390dc5d194df19c5845b73cb1d6a54441a263bc | refs/heads/master | 2023-08-22T03:48:10.506847 | 2022-02-16T12:21:04 | 2022-02-16T12:21:04 | 210,760,509 | 10 | 5 | Apache-2.0 | 2022-05-25T05:48:55 | 2019-09-25T05:12:35 | Python | UTF-8 | Python | false | false | 1,046 | py | # coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.rule_schema_byoi_plugin_parameters import RuleSchemaByoiPluginParameters # noqa: E501
from swagger_client.rest import ApiException
class TestRuleSchemaByoiPluginParameters(unittest.TestCase):
"""RuleSchemaByoiPluginParameters unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRuleSchemaByoiPluginParameters(self):
"""Test RuleSchemaByoiPluginParameters"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.rule_schema_byoi_plugin_parameters.RuleSchemaByoiPluginParameters() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
f221c808e80e79fbf7f47d9405fc2c19189e9814 | dec703232249a5b4b3c8c3ecac5e369cb9ed3b3e | /project/urls.py | 85a74567a92a4c7002a7a193a16f76e85509f87c | [] | no_license | mmasterenko/ricod | 4ab8a8bfd84095ee31021ca660b7d37a64f52d91 | 8b98a4393906ad1533266aa3d5c974916e49e0cc | refs/heads/master | 2021-01-12T08:57:08.807616 | 2016-09-04T07:09:22 | 2016-09-04T07:09:22 | 76,731,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | """project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from ricod import views
urlpatterns = [
url(r'^ricodadmin/', include(admin.site.urls)),
url(r'^$', views.home, name='home'),
url(r'^catalog/', views.catalog, name='catalog'),
]
| [
"[email protected]"
] | |
c5dba810e00817877a884148ffdbb50430204751 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /6CGomPbu3dK536PH2_19.py | 2e493421cb511fa5ca0f2bec4664db39a3154aeb | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | """
Create a function that takes in a list and returns a list of the accumulating
sum.
### Examples
accumulating_list([1, 2, 3, 4]) ➞ [1, 3, 6, 10]
# [1, 3, 6, 10] can be written as [1, 1 + 2, 1 + 2 + 3, 1 + 2 + 3 + 4]
accumulating_list([1, 5, 7]) ➞ [1, 6, 13]
accumulating_list([1, 0, 1, 0, 1]) ➞ [1, 1, 2, 2, 3]
accumulating_list([]) ➞ []
### Notes
An empty list input `[]` should return an empty list `[]`.
"""
def accumulating_list(lst):
return [sum(lst[:(i + 1)]) for i in range(0, len(lst), 1)]
| [
"[email protected]"
] | |
7f311b4afa86c8ca906e108d6d07a2e08cb12f38 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2214/60627/300861.py | 243f233a14661085464368b8fe5f50eb1b4e3ef2 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | a = input()[:-1].split('+')
b = input()[:-1].split('+')
print(a,b)
a[0] = int(a[0])
a[1] = int(a[1])
b[0] = int(b[0])
b[1] = int(b[1])
x = a[0]*b[0] + a[1]*b[1]
y = a[1]*b[0] + a[0]*b[1]
print(a,b)
print(str(x) + '+' + str(y) + 'i') | [
"[email protected]"
] | |
20960e289520fe9cc0888be07971cf62745d8cf5 | 0c6024603ec197e66b759d006c7e8c8ddd8c6a27 | /tests/test_docker.py | fdaf5c2d3eb7d325503b0b9c81285ccb2f92e636 | [
"Apache-2.0"
] | permissive | cloudmesh/docker-url-provider | 31f374b19b260564283d5771f319a7ebdf743598 | 95703378962e6a3aef027c11a5a94401b3f14776 | refs/heads/main | 2023-01-19T09:24:33.393770 | 2014-12-24T05:32:16 | 2014-12-24T05:32:16 | 25,997,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | """ run with
nosetests -v --nocapture
"""
from __future__ import print_function
from cloudmesh_common.util import HEADING
from cloudmesh_common.logger import LOGGER, LOGGING_ON, LOGGING_OFF
log = LOGGER(__file__)
class Test:
def setup(self):
pass
def tearDown(self):
pass
def test_01_sample(self):
HEADING()
a = 1
b = 2
assert (a + b == 3)
| [
"[email protected]"
] | |
4ba2261f15df0084b6779fbded58e3a84937c7f5 | e9bb31bf22eb1edcd890d3117dc3b1e5fa929644 | /keras.3-3.0.py | e6e50017a7cc32ac975fcdffe3866373903c3c3e | [] | no_license | paulcwlin/tf.keras-test | c6de79ca3ed49997269510ec85619974f583ed82 | 9914d4e67d1c0dbe5065676c3e25a8afd710f93b | refs/heads/main | 2023-04-29T13:42:38.065355 | 2021-05-21T02:10:44 | 2021-05-21T02:10:44 | 369,390,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten
cnn = Sequential()
cnn.add(Conv2D(filters=8,
kernel_size=[3,3],
padding='same',
input_shape=[100,100,1]))
cnn.add(MaxPooling2D(pool_size=[2,2],
strides=2))
cnn.add(Flatten())
cnn.summary() | [
"[email protected]"
] | |
0f4179f6a499762113ccfe90e72cff07a66e38bf | a31c21bcc4486fd44dd2c5b7f364e8f0320f7dd3 | /synth_controller/synth_controller/mujoco/wrapper/core_test.py | 269565967c0a4809814b3a53e7cf903e8b14740f | [
"Apache-2.0"
] | permissive | SynthAI/SynthAI | 0cb409a4f5eb309dfc6a22d21ac78447af075a33 | 4e28fdf2ffd0eaefc0d23049106609421c9290b0 | refs/heads/master | 2020-03-19T12:49:07.246339 | 2018-06-07T23:27:51 | 2018-06-07T23:27:51 | 136,542,297 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 19,003 | py | # Copyright 2017 The synth_controller Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for core.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes
import gc
import os
# Internal dependencies.
from absl.testing import absltest
from absl.testing import parameterized
from synth_controller.mujoco.testing import assets
from synth_controller.mujoco.wrapper import core
from synth_controller.mujoco.wrapper.mjbindings import enums
from synth_controller.mujoco.wrapper.mjbindings import mjlib
import mock
import numpy as np
from six.moves import cPickle
from six.moves import xrange # pylint: disable=redefined-builtin
HUMANOID_XML_PATH = assets.get_path("humanoid.xml")
MODEL_WITH_ASSETS = assets.get_contents("model_with_assets.xml")
ASSETS = {
"texture.png": assets.get_contents("synthai.png"),
"mesh.stl": assets.get_contents("cube.stl"),
"included.xml": assets.get_contents("sphere.xml")
}
SCALAR_TYPES = (int, float)
ARRAY_TYPES = (np.ndarray,)
OUT_DIR = absltest.get_default_test_tmpdir()
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR) # Ensure that the output directory exists.
class CoreTest(parameterized.TestCase):
def setUp(self):
self.model = core.MjModel.from_xml_path(HUMANOID_XML_PATH)
self.data = core.MjData(self.model)
def _assert_attributes_equal(self, actual_obj, expected_obj, attr_to_compare):
for name in attr_to_compare:
actual_value = getattr(actual_obj, name)
expected_value = getattr(expected_obj, name)
try:
if isinstance(expected_value, np.ndarray):
np.testing.assert_array_equal(actual_value, expected_value)
else:
self.assertEqual(actual_value, expected_value)
except AssertionError as e:
self.fail("Attribute '{}' differs from expected value: {}"
.format(name, str(e)))
def _assert_structs_equal(self, expected, actual):
for name in set(dir(actual) + dir(expected)):
if not name.startswith("_"):
expected_value = getattr(expected, name)
actual_value = getattr(actual, name)
self.assertEqual(
expected_value,
actual_value,
msg="struct field '{}' has value {}, expected {}".format(
name, actual_value, expected_value))
def testLoadXML(self):
with open(HUMANOID_XML_PATH, "r") as f:
xml_string = f.read()
model = core.MjModel.from_xml_string(xml_string)
core.MjData(model)
with self.assertRaises(TypeError):
core.MjModel()
with self.assertRaises(core.Error):
core.MjModel.from_xml_path("/path/to/nonexistent/model/file.xml")
xml_with_warning = """
<mujoco>
<size njmax='2'/>
<worldbody>
<body pos='0 0 0'>
<geom type='box' size='.1 .1 .1'/>
</body>
<body pos='0 0 0'>
<joint type='slide' axis='1 0 0'/>
<geom type='box' size='.1 .1 .1'/>
</body>
</worldbody>
</mujoco>"""
with mock.patch.object(core, "logging") as mock_logging:
core.MjModel.from_xml_string(xml_with_warning)
mock_logging.warn.assert_called_once_with(
"Error: Pre-allocated constraint buffer is full. "
"Increase njmax above 2. Time = 0.0000.")
def testLoadXMLWithAssetsFromString(self):
core.MjModel.from_xml_string(MODEL_WITH_ASSETS, assets=ASSETS)
with self.assertRaises(core.Error):
# Should fail to load without the assets
core.MjModel.from_xml_string(MODEL_WITH_ASSETS)
def testVFSFilenameTooLong(self):
limit = core._MAX_VFS_FILENAME_CHARACTERS
contents = "fake contents"
valid_filename = "a" * limit
with core._temporary_vfs({valid_filename: contents}):
pass
invalid_filename = "a" * (limit + 1)
expected_message = core._VFS_FILENAME_TOO_LONG.format(
length=(limit + 1), limit=limit, filename=invalid_filename)
with self.assertRaisesWithLiteralMatch(ValueError, expected_message):
with core._temporary_vfs({invalid_filename: contents}):
pass
def testSaveLastParsedModelToXML(self):
save_xml_path = os.path.join(OUT_DIR, "tmp_humanoid.xml")
not_last_parsed = core.MjModel.from_xml_path(HUMANOID_XML_PATH)
last_parsed = core.MjModel.from_xml_path(HUMANOID_XML_PATH)
# Modify the model before saving it in order to confirm that the changes are
# written to the XML.
last_parsed.geom_pos.flat[:] = np.arange(last_parsed.geom_pos.size)
core.save_last_parsed_model_to_xml(save_xml_path, check_model=last_parsed)
loaded = core.MjModel.from_xml_path(save_xml_path)
self._assert_attributes_equal(last_parsed, loaded, ["geom_pos"])
core.MjData(loaded)
# Test that `check_model` results in a ValueError if it is not the most
# recently parsed model.
with self.assertRaisesWithLiteralMatch(
ValueError, core._NOT_LAST_PARSED_ERROR):
core.save_last_parsed_model_to_xml(save_xml_path,
check_model=not_last_parsed)
def testBinaryIO(self):
bin_path = os.path.join(OUT_DIR, "tmp_humanoid.mjb")
self.model.save_binary(bin_path)
core.MjModel.from_binary_path(bin_path)
byte_string = self.model.to_bytes()
core.MjModel.from_byte_string(byte_string)
def testDimensions(self):
self.assertEqual(self.data.qpos.shape[0], self.model.nq)
self.assertEqual(self.data.qvel.shape[0], self.model.nv)
self.assertEqual(self.model.body_pos.shape, (self.model.nbody, 3))
def testStep(self):
t0 = self.data.time
mjlib.mj_step(self.model.ptr, self.data.ptr)
self.assertEqual(self.data.time, t0 + self.model.opt.timestep)
self.assert_(np.all(np.isfinite(self.data.qpos[:])))
self.assert_(np.all(np.isfinite(self.data.qvel[:])))
def testMultipleData(self):
data2 = core.MjData(self.model)
self.assertNotEqual(self.data.ptr, data2.ptr)
t0 = self.data.time
mjlib.mj_step(self.model.ptr, self.data.ptr)
self.assertEqual(self.data.time, t0 + self.model.opt.timestep)
self.assertEqual(data2.time, 0)
def testMultipleModel(self):
model2 = core.MjModel.from_xml_path(HUMANOID_XML_PATH)
self.assertNotEqual(self.model.ptr, model2.ptr)
self.model.opt.timestep += 0.001
self.assertEqual(self.model.opt.timestep, model2.opt.timestep + 0.001)
def testModelName(self):
self.assertEqual(self.model.name, "humanoid")
@parameterized.named_parameters(
("_copy", lambda x: x.copy()),
("_pickle_unpickle", lambda x: cPickle.loads(cPickle.dumps(x))),)
def testCopyOrPickleModel(self, func):
timestep = 0.12345
self.model.opt.timestep = timestep
body_pos = self.model.body_pos + 1
self.model.body_pos[:] = body_pos
model2 = func(self.model)
self.assertNotEqual(model2.ptr, self.model.ptr)
self.assertEqual(model2.opt.timestep, timestep)
np.testing.assert_array_equal(model2.body_pos, body_pos)
@parameterized.named_parameters(
("_copy", lambda x: x.copy()),
("_pickle_unpickle", lambda x: cPickle.loads(cPickle.dumps(x))),)
def testCopyOrPickleData(self, func):
for _ in xrange(10):
mjlib.mj_step(self.model.ptr, self.data.ptr)
data2 = func(self.data)
attr_to_compare = ("time", "energy", "qpos", "xpos")
self.assertNotEqual(data2.ptr, self.data.ptr)
self._assert_attributes_equal(data2, self.data, attr_to_compare)
for _ in xrange(10):
mjlib.mj_step(self.model.ptr, self.data.ptr)
mjlib.mj_step(data2.model.ptr, data2.ptr)
self._assert_attributes_equal(data2, self.data, attr_to_compare)
@parameterized.named_parameters(
("_copy", lambda x: x.copy()),
("_pickle_unpickle", lambda x: cPickle.loads(cPickle.dumps(x))),)
def testCopyOrPickleStructs(self, func):
for _ in xrange(10):
mjlib.mj_step(self.model.ptr, self.data.ptr)
data2 = func(self.data)
self.assertNotEqual(data2.ptr, self.data.ptr)
for name in ["warning", "timer", "solver"]:
self._assert_structs_equal(getattr(self.data, name), getattr(data2, name))
for _ in xrange(10):
mjlib.mj_step(self.model.ptr, self.data.ptr)
mjlib.mj_step(data2.model.ptr, data2.ptr)
for expected, actual in zip(self.data.timer, data2.timer):
self._assert_structs_equal(expected, actual)
@parameterized.parameters(
("right_foot", "body", 6),
("right_foot", enums.mjtObj.mjOBJ_BODY, 6),
("left_knee", "joint", 11),
("left_knee", enums.mjtObj.mjOBJ_JOINT, 11))
def testNamesIds(self, name, object_type, object_id):
output_id = self.model.name2id(name, object_type)
self.assertEqual(object_id, output_id)
output_name = self.model.id2name(object_id, object_type)
self.assertEqual(name, output_name)
def testNamesIdsExceptions(self):
with self.assertRaisesRegexp(core.Error, "does not exist"):
self.model.name2id("nonexistent_body_name", "body")
with self.assertRaisesRegexp(core.Error, "is not a valid object type"):
self.model.name2id("right_foot", "nonexistent_type_name")
def testNamelessObject(self):
# The model in humanoid.xml contains a single nameless camera.
name = self.model.id2name(0, "camera")
self.assertEqual("", name)
def testWarningCallback(self):
self.data.qpos[0] = np.inf
with mock.patch.object(core, "logging") as mock_logging:
mjlib.mj_step(self.model.ptr, self.data.ptr)
mock_logging.warn.assert_called_once_with(
"Nan, Inf or huge value in QPOS at DOF 0. The simulation is unstable. "
"Time = 0.0000.")
def testErrorCallback(self):
with mock.patch.object(core, "logging") as mock_logging:
mjlib.mj_activate(b"nonexistent_activation_key")
mock_logging.fatal.assert_called_once_with(
"Could not open activation key file nonexistent_activation_key")
def testSingleCallbackContext(self):
callback_was_called = [False]
def callback(unused_model, unused_data):
callback_was_called[0] = True
mjlib.mj_step(self.model.ptr, self.data.ptr)
self.assertFalse(callback_was_called[0])
class DummyError(RuntimeError):
pass
try:
with core.callback_context("mjcb_passive", callback):
# Stepping invokes the `mjcb_passive` callback.
mjlib.mj_step(self.model.ptr, self.data.ptr)
self.assertTrue(callback_was_called[0])
# Exceptions should not prevent `mjcb_passive` from being reset.
raise DummyError("Simulated exception.")
except DummyError:
pass
# `mjcb_passive` should have been reset to None.
callback_was_called[0] = False
mjlib.mj_step(self.model.ptr, self.data.ptr)
self.assertFalse(callback_was_called[0])
def testNestedCallbackContexts(self):
last_called = [None]
outer_called = "outer called"
inner_called = "inner called"
def outer(unused_model, unused_data):
last_called[0] = outer_called
def inner(unused_model, unused_data):
last_called[0] = inner_called
with core.callback_context("mjcb_passive", outer):
# This should execute `outer` a few times.
mjlib.mj_step(self.model.ptr, self.data.ptr)
self.assertEqual(last_called[0], outer_called)
with core.callback_context("mjcb_passive", inner):
# This should execute `inner` a few times.
mjlib.mj_step(self.model.ptr, self.data.ptr)
self.assertEqual(last_called[0], inner_called)
# When we exit the inner context, the `mjcb_passive` callback should be
# reset to `outer`.
mjlib.mj_step(self.model.ptr, self.data.ptr)
self.assertEqual(last_called[0], outer_called)
# When we exit the outer context, the `mjcb_passive` callback should be
# reset to None, and stepping should not affect `last_called`.
last_called[0] = None
mjlib.mj_step(self.model.ptr, self.data.ptr)
self.assertIsNone(last_called[0])
def testDisableFlags(self):
xml_string = """
<mujoco>
<option gravity="0 0 -9.81"/>
<worldbody>
<geom name="floor" type="plane" pos="0 0 0" size="10 10 0.1"/>
<body name="cube" pos="0 0 0.1">
<geom type="box" size="0.1 0.1 0.1" mass="1"/>
<site name="cube_site" type="box" size="0.1 0.1 0.1"/>
<joint type="slide"/>
</body>
</worldbody>
<sensor>
<touch name="touch_sensor" site="cube_site"/>
</sensor>
</mujoco>
"""
model = core.MjModel.from_xml_string(xml_string)
data = core.MjData(model)
for _ in xrange(100): # Let the simulation settle for a while.
mjlib.mj_step(model.ptr, data.ptr)
# With gravity and contact enabled, the cube should be stationary and the
# touch sensor should give a reading of ~9.81 N.
self.assertAlmostEqual(data.qvel[0], 0, places=4)
self.assertAlmostEqual(data.sensordata[0], 9.81, places=2)
# If we disable both contacts and gravity then the cube should remain
# stationary and the touch sensor should read zero.
with model.disable("contact", "gravity"):
mjlib.mj_step(model.ptr, data.ptr)
self.assertAlmostEqual(data.qvel[0], 0, places=4)
self.assertEqual(data.sensordata[0], 0)
# If we disable contacts but not gravity then the cube should fall through
# the floor.
with model.disable(enums.mjtDisableBit.mjDSBL_CONTACT):
for _ in xrange(10):
mjlib.mj_step(model.ptr, data.ptr)
self.assertLess(data.qvel[0], -0.1)
def testDisableFlagsExceptions(self):
with self.assertRaisesRegexp(ValueError, "not a valid flag name"):
with self.model.disable("invalid_flag_name"):
pass
with self.assertRaisesRegexp(ValueError,
"not a value in `enums.mjtDisableBit`"):
with self.model.disable(-99):
pass
@parameterized.named_parameters(
("MjModel",
lambda _: core.MjModel.from_xml_path(HUMANOID_XML_PATH),
"mj_deleteModel"),
("MjData",
lambda self: core.MjData(self.model),
"mj_deleteData"),
("MjvScene",
lambda _: core.MjvScene(),
"mjv_freeScene"))
def testFree(self, constructor, destructor_name):
for _ in xrange(5):
destructor = getattr(mjlib, destructor_name)
with mock.patch.object(
core.mjlib, destructor_name, wraps=destructor) as mock_destructor:
wrapper = constructor(self)
expected_address = ctypes.addressof(wrapper.ptr.contents)
wrapper.free()
self.assertIsNone(wrapper.ptr)
mock_destructor.assert_called_once()
pointer = mock_destructor.call_args[0][0]
actual_address = ctypes.addressof(pointer.contents)
self.assertEqual(expected_address, actual_address)
# Explicit freeing should not break any automatic GC triggered later.
del wrapper
gc.collect()
def _get_attributes_test_params():
model = core.MjModel.from_xml_path(HUMANOID_XML_PATH)
data = core.MjData(model)
# Get the names of the non-private attributes of model and data through
# introspection. These are passed as parameters to each of the test methods
# in AttributesTest.
array_args = []
scalar_args = []
skipped_args = []
for parent_name, parent_obj in zip(("model", "data"), (model, data)):
for attr_name in dir(parent_obj):
if not attr_name.startswith("_"): # Skip 'private' attributes
args = (parent_name, attr_name)
attr = getattr(parent_obj, attr_name)
if isinstance(attr, ARRAY_TYPES):
array_args.append(args)
elif isinstance(attr, SCALAR_TYPES):
scalar_args.append(args)
elif callable(attr):
# Methods etc. should be covered specifically in CoreTest.
continue
else:
skipped_args.append(args)
return array_args, scalar_args, skipped_args
_array_args, _scalar_args, _skipped_args = _get_attributes_test_params()
class AttributesTest(parameterized.TestCase):
"""Generic tests covering attributes of MjModel and MjData."""
# Iterates over ('parent_name', 'attr_name') tuples
@parameterized.parameters(*_array_args)
def testReadWriteArray(self, parent_name, attr_name):
attr = getattr(getattr(self, parent_name), attr_name)
if not isinstance(attr, ARRAY_TYPES):
raise TypeError("{}.{} has incorrect type {!r} - must be one of {!r}."
.format(parent_name, attr_name, type(attr), ARRAY_TYPES))
# Check that we can read the contents of the array
old_contents = attr[:]
# Don't write to integer arrays since these might contain pointers.
if not np.issubdtype(old_contents.dtype, int):
# Write unique values to the array, check that we can read them back.
new_contents = np.arange(old_contents.size, dtype=old_contents.dtype)
new_contents.shape = old_contents.shape
attr[:] = new_contents
np.testing.assert_array_equal(new_contents, attr[:])
self._take_steps() # Take a few steps, check that we don't get segfaults.
@parameterized.parameters(*_scalar_args)
def testReadWriteScalar(self, parent_name, attr_name):
parent_obj = getattr(self, parent_name)
# Check that we can read the value.
attr = getattr(parent_obj, attr_name)
if not isinstance(attr, SCALAR_TYPES):
raise TypeError("{}.{} has incorrect type {!r} - must be one of {!r}."
.format(parent_name, attr_name, type(attr), SCALAR_TYPES))
# Don't write to integers since these might be pointers.
if not isinstance(attr, int):
# Set the value of this attribute, check that we can read it back.
new_value = type(attr)(99)
setattr(parent_obj, attr_name, new_value)
self.assertEqual(new_value, getattr(parent_obj, attr_name))
self._take_steps() # Take a few steps, check that we don't get segfaults.
@parameterized.parameters(*_skipped_args)
@absltest.unittest.skip("No tests defined for attributes of this type.")
def testSkipped(self, *unused_args):
# This is a do-nothing test that indicates where we currently lack coverage.
pass
def setUp(self):
self.model = core.MjModel.from_xml_path(HUMANOID_XML_PATH)
self.data = core.MjData(self.model)
def _take_steps(self, n=5):
for _ in xrange(n):
mjlib.mj_step(self.model.ptr, self.data.ptr)
if __name__ == "__main__":
absltest.main()
| [
"[email protected]"
] | |
01905e300f6adc6e85e2b45520152fcab72bb167 | 4a27c69443bbbc44b8a826e059fcbb0ae38e0b09 | /src/apps/plant/api/serializers.py | adcf8b3b25e6ac48d37473cac21743f61b8aff69 | [] | no_license | amir-khakshour/3meg | 3b102e694fb9e07221e106ada4e51083ef97738f | 9573797c252480dca266d320ea2e97372c2c2b7a | refs/heads/master | 2022-12-11T12:54:15.340521 | 2020-02-19T09:21:16 | 2020-02-19T09:21:16 | 240,120,884 | 0 | 0 | null | 2022-12-08T03:36:48 | 2020-02-12T21:39:46 | Python | UTF-8 | Python | false | false | 927 | py | from django.conf import settings
from rest_framework import serializers
from ..models import DataPoint, Plant
class PlantSerializer(serializers.ModelSerializer):
class Meta:
model = Plant
fields = '__all__'
class DataPointSerializer(serializers.ModelSerializer):
date_created = serializers.DateTimeField(read_only=True)
class Meta:
model = DataPoint
fields = '__all__'
class DataPointUpdateSerializer(serializers.Serializer):
after = serializers.DateField(format=settings.DATAPOINT_DATE_FILTER_FORMAT,
input_formats=[settings.DATAPOINT_DATE_FILTER_FORMAT, 'iso-8601'], required=True)
before = serializers.DateField(format=settings.DATAPOINT_DATE_FILTER_FORMAT,
input_formats=[settings.DATAPOINT_DATE_FILTER_FORMAT, 'iso-8601'], required=True)
class Meta:
fields = ('after', 'before',)
| [
"[email protected]"
] | |
466bcae133a79639ffac7ea43c69d6dddece5cc3 | 1764780f3bd3cc23b537cb5c59efa08725495c73 | /pjt-back/accounts/migrations/0001_initial.py | de77de9945891d66010ccf66f5cde05117a07fbd | [] | no_license | GaYoung87/GEEG | c048c420a266ed621d66bcd74070953f7e56e12d | 6e31d86e8165e611d16f21cb0ac10ae8c2081f5b | refs/heads/master | 2020-09-15T05:46:13.464753 | 2019-11-29T06:31:35 | 2019-11-29T06:31:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,758 | py | # Generated by Django 2.2.7 on 2019-11-28 06:48
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('age', models.IntegerField(null=True)),
('birthday', models.DateField(default='1993-06-16')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"[email protected]"
] | |
904d4ab48a948edeb19066e5b38eef81900b58bc | 78d23de227a4c9f2ee6eb422e379b913c06dfcb8 | /LeetCode/963.py | 505d68a6549dbbab3bb2fa8d985f20898927548e | [] | no_license | siddharthcurious/Pythonic3-Feel | df145293a3f1a7627d08c4bedd7e22dfed9892c0 | 898b402b7a65073d58c280589342fc8c156a5cb1 | refs/heads/master | 2020-03-25T05:07:42.372477 | 2019-09-12T06:26:45 | 2019-09-12T06:26:45 | 143,430,534 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,558 | py | from itertools import combinations
class Solution(object):
def findArea(self, points):
x1, y1 = points[0]
x2, y2 = points[1]
x3, y3 = points[2]
x4, y4 = points[3]
a = (x1*y2 - x2*y1) + (x2*y3 - x3*y2) + (x3*y4 - x4*y3) + (x4*y1-y4*x1)
print(a)
if a < 0:
a = -1 * a
return a
def sqr(self, num):
return num * num
def isRectangle(self, points):
x1, y1 = points[0]
x2, y2 = points[1]
x3, y3 = points[2]
x4, y4 = points[3]
cx = (x1 + x2 + x3 + x4)/4
cy = (y1 + y2 + y3 + y4)/4
dd1 = self.sqr(cx - x1) + self.sqr(cy - y1)
dd2 = self.sqr(cx - x2) + self.sqr(cy - y2)
dd3 = self.sqr(cx - x3) + self.sqr(cy - y3)
dd4 = self.sqr(cx - x4) + self.sqr(cy - y4)
return dd1 == dd2 and dd1 == dd3 and dd1 == dd4
def minAreaFreeRect(self, points):
"""
:type points: List[List[int]]
:rtype: float
"""
min_area = float("inf")
points_combs = combinations(points, 4)
for p in points_combs:
r = self.isRectangle(p)
if r == True:
a = self.findArea(p)
if min_area > a:
min_area = a
return min_area
if __name__ == "__main__":
s = Solution()
points = [[1,2],[2,1],[1,0],[0,1], [2,3]]
points = [[0,1],[2,1],[1,1],[1,0],[2,0]]
points = [[3,1],[1,1],[0,1],[2,1],[3,3],[3,2],[0,2],[2,3]]
r = s.minAreaFreeRect(points)
print(r) | [
"[email protected]"
] | |
375e9fa42abf66b0ce774aa92bfab88473ddbff4 | 21963071945c7bb54a7f126da536da3c2ff40cbe | /Lesson05/binaryTree.py | 8d9b632d330ef46fba9f1bbe6d99dd55096c4eb4 | [] | no_license | SaretMagnoslove/Data_structures_and_Algorithms_python-Udacity | f05770c112b91206b798305da3dd5f3e2a93a7d9 | 640c30bd39645bbddad10ac50823434ab82b4354 | refs/heads/master | 2020-03-21T08:03:41.210949 | 2018-06-29T17:43:42 | 2018-06-29T17:43:42 | 138,317,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,585 | py | class Node(object):
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class BinaryTree(object):
def __init__(self, root):
self.root = Node(root)
def search(self, find_val):
"""Return True if the value
is in the tree, return
False otherwise."""
return self.preorder_search(self.root, find_val)
def print_tree(self):
"""Print out all tree nodes
as they are visited in
a pre-order traversal."""
return self.preorder_print(self.root, '')[:-1]
def preorder_search(self, start, find_val):
"""Helper method - use this to create a
recursive search solution."""
if start:
return True if start.value == find_val else self.preorder_search(start.left, find_val) or self.preorder_search(start.right, find_val)
return False
def preorder_print(self, start, traversal):
"""Helper method - use this to create a
recursive print solution."""
if start:
traversal += str(start.value) + '-'
traversal = self.preorder_print(start.left, traversal)
traversal = self.preorder_print(start.right,traversal)
return traversal
# Set up tree
tree = BinaryTree(1)
tree.root.left = Node(2)
tree.root.right = Node(3)
tree.root.left.left = Node(4)
tree.root.left.right = Node(5)
# Test search
# Should be True
print (tree.search(4))
# Should be False
print (tree.search(6))
# Test print_tree
# Should be 1-2-4-5-3
print( tree.print_tree()) | [
"[email protected]"
] | |
1f89fc3c868206f312b83b1732337ece61d67b5a | 8813b9e9894ead566efc0ea192a88cd6546ae29e | /ninjag/tk/ioTK/save_text.py | 26cae83c4e3b3846fb06a20ec983e3eb86400daf | [
"MIT"
] | permissive | yuhangwang/ninjag-python | 1638f396711533c2b540dee2d70240fa25009c86 | b42b447260eebdd6909246a5f7bb4098bfa3c0e1 | refs/heads/master | 2021-01-12T04:04:13.395305 | 2017-02-20T19:12:52 | 2017-02-20T19:12:52 | 77,489,214 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | def save_text(f_out, *text):
"""Save text to file
Args:
f_out (str): output file name
text (str): variable number of strings of
file content
"""
with open(f_out, 'w') as OUT:
OUT.write("\n".join(text) + "\n")
| [
"[email protected]"
] | |
c780f7566c732bdec5336ac8598da93c5a0a1b5b | 87bae60470bbe5316d7da8bc4a8709e33b40e2b5 | /setup.py | e5bd575e22fb71ebd994629da6b018f29618bf9b | [] | no_license | saxix/django-whatsnew | c11f0d5fa87e5e1c5c7648e8162bd39c64e69302 | 68b33e5e2599a858e00eda53e1c13a503e1b3856 | refs/heads/develop | 2021-01-19T12:39:41.876635 | 2015-01-28T16:18:29 | 2015-01-28T16:18:29 | 18,416,313 | 0 | 2 | null | 2015-01-28T16:18:30 | 2014-04-03T20:00:33 | Python | UTF-8 | Python | false | false | 689 | py | #!/usr/bin/env python
from setuptools import setup, find_packages
dirname = 'whatsnew'
app = __import__(dirname)
setup(
name=app.NAME,
version=app.get_version(),
url='https://github.com/saxix/django-whatsnew',
description="Simple application to manage `what's new` screen.",
author='sax',
author_email='[email protected]',
license='BSD',
packages=find_packages('.'),
include_package_data=True,
platforms=['linux'],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Developers'
]
)
| [
"[email protected]"
] | |
8fd2b111a6bb6157ab8fc2e2c901b7dcf47cbf51 | ad129f7fc03f10ef2b4734fa2c2b9cb9367c84fa | /Aula 15 - BREAK/Exe070.py | 574b69c6195435654ca96c47aef7bde33477767d | [] | no_license | LucasDatilioCarderelli/Exercises_CursoemVideo | c6dc287d7c08a0349867a17185474744513dbaac | 67c2d572a4817a52dababbca80513e4b977de670 | refs/heads/master | 2022-03-31T20:20:52.827370 | 2020-01-27T13:15:19 | 2020-01-27T13:15:19 | 236,491,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | # Exe070 - Digite o nome e valor de varios produtos e tenha:
# O total da compra, quantos produtos acima de R$1.000,00 e qual o menor produto.
cont = contn = soma = menor = 0
menorn = ' '
print(f'{"LOJINHA":-^40}')
while True:
nome = str(input('Nome: '))
preço = float(input('Preço: R$').strip())
soma += preço
contn += 1
if contn == 1 or preço < menor:
menor = preço
menorn = nome
if preço >= 1000:
cont += 1
parada = ' '
while parada not in 'SN':
parada = str(input('Mais 1 produto [S/N]?: ').strip().upper()[0])
if parada == 'N':
break
print(f'Total: R${soma:.2f}')
print(f'Acima de R$1.000,00: {cont}')
print(f'O menor produto custou R${menor} ({menorn}).')
print(f'{"VOLTE SEMPRE":-^40}')
| [
"[email protected]"
] | |
177ace6fc3932a9be8e88516fc4952b327b9c170 | 68a0a4a92b6d609dbc6da04798e66209cc48a3c3 | /tqdm-auto-recommended.py | 91c24db3fd3751930bd82f96a6b7df2743f7305c | [] | no_license | bertcuyugan/loading_bar | 639ff55927fe06cbec7697510933f68c06a6f15e | ace86d757cb5115caf0b366801d03f764d59246b | refs/heads/main | 2023-07-03T12:01:27.255687 | 2021-08-09T06:06:44 | 2021-08-09T06:06:44 | 311,962,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | #!/usr/bin/python3
from tqdm.auto import tqdm
for i in tqdm(range(100001)):
print(" ", end='\r')
| [
"[email protected]"
] | |
a8e051e4166dba43f6cc50ba51c3b0cf0686feb4 | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /HLTrigger/Configuration/python/HLT_75e33/modules/hltDiEG3023IsoCaloIdHcalIsoL1SeededFilter_cfi.py | aabfec4938f1f470ccd88a2dc9ca5286f84e2174 | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 1,303 | py | import FWCore.ParameterSet.Config as cms
hltDiEG3023IsoCaloIdHcalIsoL1SeededFilter = cms.EDFilter("HLTEgammaGenericQuadraticEtaFilter",
absEtaLowEdges = cms.vdouble(0.0, 0.8, 1.479, 2.0),
candTag = cms.InputTag("hltDiEG3023IsoCaloIdHgcalIsoL1SeededFilter"),
doRhoCorrection = cms.bool(False),
effectiveAreas = cms.vdouble(0.2, 0.2, 0.4, 0.5),
energyLowEdges = cms.vdouble(0.0),
etaBoundaryEB12 = cms.double(0.8),
etaBoundaryEE12 = cms.double(2.0),
l1EGCand = cms.InputTag("hltEgammaCandidatesL1Seeded"),
lessThan = cms.bool(True),
ncandcut = cms.int32(2),
rhoMax = cms.double(99999999.0),
rhoScale = cms.double(1.0),
rhoTag = cms.InputTag("hltFixedGridRhoFastjetAllCaloForEGamma"),
saveTags = cms.bool(True),
thrOverE2EB1 = cms.vdouble(0.0),
thrOverE2EB2 = cms.vdouble(0.0),
thrOverE2EE1 = cms.vdouble(0.0),
thrOverE2EE2 = cms.vdouble(0.0),
thrOverEEB1 = cms.vdouble(0.02),
thrOverEEB2 = cms.vdouble(0.02),
thrOverEEE1 = cms.vdouble(0.02),
thrOverEEE2 = cms.vdouble(0.02),
thrRegularEB1 = cms.vdouble(22),
thrRegularEB2 = cms.vdouble(22),
thrRegularEE1 = cms.vdouble(22),
thrRegularEE2 = cms.vdouble(22),
useEt = cms.bool(True),
varTag = cms.InputTag("hltEgammaHcalPFClusterIsoL1Seeded")
)
| [
"[email protected]"
] | |
8e47efee6b734ae3afc39185eb157680455e8b0f | def78b4f5764e77a12c2ba01cbeb0d41ec7dbc2b | /tests/test_wsgi_interface.py | 98e4b3cf3e98f1d65eaf7c2d03d770d939990d2d | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | aelol/falcon | e7e789cb92c75146eea4a1c627fdda278655594b | 14a9c056542a7e4a99663ee0fe298c81c27a8cdd | refs/heads/master | 2021-01-20T01:13:29.044522 | 2017-04-21T21:49:30 | 2017-04-21T21:49:30 | 89,238,339 | 2 | 0 | null | 2017-04-24T12:36:37 | 2017-04-24T12:36:37 | null | UTF-8 | Python | false | false | 1,478 | py | import re
import sys
import falcon
import falcon.testing as testing
class TestWSGIInterface(object):
def test_srmock(self):
mock = testing.StartResponseMock()
mock(falcon.HTTP_200, ())
assert mock.status == falcon.HTTP_200
assert mock.exc_info is None
mock = testing.StartResponseMock()
exc_info = sys.exc_info()
mock(falcon.HTTP_200, (), exc_info)
assert mock.exc_info == exc_info
def test_pep3333(self):
api = falcon.API()
mock = testing.StartResponseMock()
# Simulate a web request (normally done though a WSGI server)
response = api(testing.create_environ(), mock)
# Verify that the response is iterable
assert _is_iterable(response)
# Make sure start_response was passed a valid status string
assert mock.call_count == 1
assert isinstance(mock.status, str)
assert re.match('^\d+[a-zA-Z\s]+$', mock.status)
# Verify headers is a list of tuples, each containing a pair of strings
assert isinstance(mock.headers, list)
if len(mock.headers) != 0:
header = mock.headers[0]
assert isinstance(header, tuple)
assert len(header) == 2
assert isinstance(header[0], str)
assert isinstance(header[1], str)
def _is_iterable(thing):
try:
for i in thing:
break
return True
except:
return False
| [
"[email protected]"
] | |
91d7a854f623fbb87725ebf17db1832f05482334 | 1207e317fa2837fa4cdb49150b9b2ca99dada2f3 | /sdfs/newReporting/agriculture/migrations/0001_initial.py | 71ede2ca969b0bbe65e49dd7042d57aebba46622 | [] | no_license | ericniyon/all_in_one_repo | d14cb715776f5c23851d23930145fcb707aaca1d | 9080315fbe9e8226a21bf35c49ff7662b4b095b4 | refs/heads/master | 2022-12-16T17:04:48.602534 | 2020-01-12T00:40:54 | 2020-01-12T00:40:54 | 233,317,032 | 0 | 0 | null | 2022-12-08T01:50:51 | 2020-01-12T00:30:03 | Python | UTF-8 | Python | false | false | 10,835 | py | # Generated by Django 2.2.2 on 2019-12-14 23:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('dashboard', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Insemination',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('insemination', models.PositiveIntegerField()),
('achieved', models.PositiveIntegerField(default=0)),
('sector', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dashboard.Sector')),
],
options={
'verbose_name': 'Insemination',
'verbose_name_plural': 'Insemination',
},
),
migrations.CreateModel(
name='InkaZizakurikiranwa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('inka_zizakurikiranwa', models.PositiveIntegerField()),
('achieved', models.PositiveIntegerField(default=0)),
('sector', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dashboard.Sector')),
],
options={
'verbose_name': 'Inka Zizakurikiranwa',
'verbose_name_plural': 'Inka Zizakurikiranwa',
},
),
migrations.CreateModel(
name='Girinka',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('girinka', models.PositiveIntegerField()),
('achieved', models.PositiveIntegerField(default=0)),
('sector', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dashboard.Sector')),
],
options={
'verbose_name': 'Girinka',
'verbose_name_plural': 'Girinka',
},
),
migrations.CreateModel(
name='Vaccination',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vaccination_name', models.CharField(max_length=100)),
('target', models.PositiveIntegerField()),
('achieved', models.PositiveIntegerField(default=0)),
('sector', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dashboard.Sector')),
],
options={
'verbose_name': 'Vaccination',
'verbose_name_plural': 'Vaccinations',
'unique_together': {('vaccination_name', 'sector')},
},
),
migrations.CreateModel(
name='UnusedTerassis',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('target', models.PositiveIntegerField()),
('achieved', models.PositiveIntegerField(default=0)),
('sector', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dashboard.Sector')),
],
options={
'verbose_name': 'Unused Terrasis',
'verbose_name_plural': 'Unused Terrassis',
'unique_together': {('name', 'sector')},
},
),
migrations.CreateModel(
name='Umuhigo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('umuhigo', models.CharField(max_length=100)),
('target', models.PositiveIntegerField()),
('achieved', models.PositiveIntegerField(default=0)),
('sector', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dashboard.Sector')),
],
options={
'verbose_name': 'Umuhigo',
'verbose_name_plural': 'Umuhigo',
'unique_together': {('umuhigo', 'sector')},
},
),
migrations.CreateModel(
name='Ubwanikiro',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ubwanikiro', models.CharField(max_length=100)),
('target', models.PositiveIntegerField()),
('achieved', models.PositiveIntegerField(default=0)),
('sector', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dashboard.Sector')),
],
options={
'verbose_name': 'Ubwanikiro',
'verbose_name_plural': 'Ubwanikiro',
'unique_together': {('ubwanikiro', 'sector')},
},
),
migrations.CreateModel(
name='Trainings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('training_name', models.CharField(max_length=100)),
('target', models.PositiveIntegerField()),
('achieved', models.PositiveIntegerField(default=0)),
('sector', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dashboard.Sector')),
],
options={
'verbose_name': 'Trainings',
'verbose_name_plural': 'Trainings',
'unique_together': {('training_name', 'sector')},
},
),
migrations.CreateModel(
name='Seeds',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('seed_name', models.CharField(max_length=50)),
('target', models.PositiveIntegerField()),
('achieved', models.PositiveIntegerField(default=0)),
('sector', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dashboard.Sector')),
],
options={
'verbose_name': 'Seeds',
'verbose_name_plural': 'Seeds',
'unique_together': {('seed_name', 'sector')},
},
),
migrations.CreateModel(
name='Pumps_in_Sector',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number_of_pumps', models.PositiveIntegerField()),
('achieved', models.PositiveIntegerField(default=0)),
('sector', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dashboard.Sector')),
],
options={
'verbose_name': 'Pumps in Sector',
'verbose_name_plural': 'Pumps in Sectors',
'unique_together': {('number_of_pumps', 'sector')},
},
),
migrations.CreateModel(
name='Ha_irrigated',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ha_irrigated', models.PositiveIntegerField()),
('achieved', models.PositiveIntegerField(default=0)),
('sector', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dashboard.Sector')),
],
options={
'verbose_name': 'Ha Irrigated',
'verbose_name_plural': 'Ha Irrigated',
'unique_together': {('ha_irrigated', 'sector')},
},
),
migrations.CreateModel(
name='Fertilizers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('target', models.PositiveIntegerField()),
('achieved', models.PositiveIntegerField(default=0)),
('sector', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dashboard.Sector')),
],
options={
'verbose_name': 'Fertilizers',
'verbose_name_plural': 'Fertilizers',
'unique_together': {('name', 'sector')},
},
),
migrations.CreateModel(
name='FertilizerImprovedSeeds',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('target', models.PositiveIntegerField()),
('achieved', models.PositiveIntegerField(default=0)),
('sector', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dashboard.Sector')),
],
options={
'verbose_name': 'Fertilizer and Improved Seeds',
'verbose_name_plural': 'Fertilizer and Improved Seeds',
'unique_together': {('target', 'sector')},
},
),
migrations.CreateModel(
name='Crops',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('crop_name', models.CharField(max_length=50)),
('target', models.PositiveIntegerField()),
('achieved', models.PositiveIntegerField(default=0)),
('sector', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dashboard.Sector')),
],
options={
'verbose_name': 'Crops',
'verbose_name_plural': 'Crops',
'unique_together': {('crop_name', 'sector')},
},
),
migrations.CreateModel(
name='Banana_and_Rehabilitation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('target', models.PositiveIntegerField()),
('achieved', models.PositiveIntegerField(default=0)),
('sector', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dashboard.Sector')),
],
options={
'verbose_name': 'Banana and Rehabilitation',
'verbose_name_plural': 'Banana and Rehabilitation',
'unique_together': {('name', 'sector')},
},
),
]
| [
"[email protected]"
] | |
a21f4e0d8bc75227d7a7e081134be283a036133b | 4766d241bbc736e070f79a6ae6a919a8b8bb442d | /20200215Python-China/0392. Is Subsequence.py | 772e0d436bb37843470e51a166049300a942b2c3 | [] | no_license | yangzongwu/leetcode | f7a747668b0b5606050e8a8778cc25902dd9509b | 01f2edd79a1e922bfefecad69e5f2e1ff3a479e5 | refs/heads/master | 2021-07-08T06:45:16.218954 | 2020-07-18T10:20:24 | 2020-07-18T10:20:24 | 165,957,437 | 10 | 8 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | '''Given a string s and a string t, check if s is subsequence of t.
You may assume that there is only lower case English letters in both s and t. t is potentially a very long (length ~= 500,000) string, and s is a short string (<=100).
A subsequence of a string is a new string which is formed from the original string by deleting some (can be none) of the characters without disturbing the relative positions of the remaining characters. (ie, "ace" is a subsequence of "abcde" while "aec" is not).
Example 1:
s = "abc", t = "ahbgdc"
Return true.
Example 2:
s = "axc", t = "ahbgdc"
Return false.
Follow up:
If there are lots of incoming S, say S1, S2, ... , Sk where k >= 1B, and you want to check one by one to see if T has its subsequence. In this scenario, how would you change your code?
Credits:
Special thanks to @pbrother for adding this problem and creating all test cases.
'''
class Solution:
def isSubsequence(self, s: str, t: str) -> bool:
k_s=0
k_t=0
while k_s<len(s):
while k_t<len(t) and s[k_s]!=t[k_t]:
k_t+=1
if k_t==len(t):
return False
k_s+=1
k_t+=1
return True
| [
"[email protected]"
] | |
125a74be48b8ad0a0ee346339d421c491c8a8abb | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/leap/6b4d8efbe16141d1b280e11117525144.py | 57e2f3eea9084a568523dbd855b76d0242f6a9e3 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 262 | py | #
# Skeleton file for the Python "Leap" exercise.
#
def is_leap_year(year):
year = int(year)
cond_1 = (year % 4 == 0) and (year % 100 == 0) and (year % 400 == 0)
cond_2 = (year % 4 == 0) and (not year % 100 == 0 )
return cond_1 or cond_2
| [
"[email protected]"
] | |
fe94db721f7597ce5e2e998690ce8d136cefda09 | c2e15137cd3ddfe574d06ed313f4c4039835a48b | /ACA/update_slot_stat_table.py | 970189c228d04aa09f6b271e0f0dfa3ea2ecfc89 | [] | no_license | chandra-mta/MTA | df57845577ac50f21c4b1775a441804d78060e97 | 60015d4fbbcc7e00595152fb21a8c55e2366a982 | refs/heads/master | 2023-08-18T13:05:33.397195 | 2023-08-11T14:28:40 | 2023-08-11T14:28:40 | 4,586,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,244 | py | #!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#############################################################################################
# #
# update_slot_stat_table.py: update slotrelated stat table data files #
# #
# author: t. isobe ([email protected]) #
# #
# last update: Feb 23, 2021 #
# #
#############################################################################################
import os
import sys
import re
import time
import random
import Chandra.Time
import numpy
#--- reading directory list
#
path = '/data/mta/Script/ACA/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append a path to a private folder to python directory
#
sys.path.append(mta_dir)
import mta_common_functions as mcf
import robust_linear as rlf
#
#--- temp writing file name
#
rfname = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rfname)
oneyear = 86400.0 * 365.0
#----------------------------------------------------------------------------------
#-- update_slot_stat_table: update slot related stat table data files --
#----------------------------------------------------------------------------------
def update_slot_stat_table(this_year='', this_mon=''):
"""
update slot related stat table data files
input: none, but read from:
pos_err_mtatr, diff_mtatr, acacent_mtatr
output:
<name>_year_slope --- yearly stats
<name>_recent_slope --- most recent one year
<name>_full_slope --- entire period
stats are fitted linear slope and std of the data
"""
#
#--- find the current year and month
#
if this_year == '':
out = time.strftime('%Y:%m:%d', time.gmtime())
atemp = re.split(':', out)
this_year = int(float(atemp[0]))
this_mon = int(float(atemp[1]))
this_day = int(float(atemp[2]))
if this_day < 5:
this_mon -= 1
if this_mon < 1:
this_mon = 12
this_year -= 1
#
#--- there are three different data sets
#
for head in ['pos_err', 'diff', 'acacent', 'acacent2']:
thead = head.replace('2', '')
ifile = data_dir + thead + '_mtatr'
#
#--- initialize lists for yearly data saving
#
ytime = []
yslope = []
ystd = []
#
#--- there are 8 slots
#
for k in range(0, 8):
yslope.append([])
ystd.append([])
#
#--- initialize for recent one year stats saving
#
rslope = ''
rstd = ''
#
#--- go through each slot
#
for k in range(0, 8):
if head == 'acacent2':
pos = k + 8
else:
pos = k + 1
[t_array, d_array] = read_in_data(ifile, col=pos)
#
#--- recent one year
#
if len(t_array) > 3:
r_cut = t_array[-1] - oneyear
[slope, std] = get_slope_and_str(t_array, d_array, r_cut, t_array[-1])
rslope = rslope + slope + '\t'
rstd = rstd + std + '\t'
else:
rslope = rslope + 'na\t'
rstd = rstd + 'na\t'
#
#--- yearly
#
for year in range(1999, this_year + 1):
if k == 1:
ytime.append(str(year))
start = int(Chandra.Time.DateTime(str(year) + ':001:00:00:00').secs)
stop = int(Chandra.Time.DateTime(str(year+1) + ':001:00:00:00').secs)
if len(t_array) > 3:
[slope, std] = get_slope_and_str(t_array, d_array, start, stop)
yslope[k].append(slope)
ystd[k].append(std)
else:
yslope[k].append('nan')
ystd[k].append('nan')
#
#--- now update the data files
#
#
#--- most recent one year
#
line = rslope + rstd + '\n'
rout = data_dir + head + '_mtatr_recent_slope'
with open(rout, 'w') as fo:
fo.write(line)
#
#--- yearly
#
line = ''
for k in range(0, len(ytime)):
line = line + ytime[k] + '\t'
for m in range(0, 8):
line = line + yslope[m][k] + '\t'
for m in range(0, 8):
line = line + ystd[m][k] + '\t'
line = line + '\n'
yout = data_dir + head + '_mtatr_year_slope'
with open(yout, 'w') as fo:
fo.write(line)
#
#--- full range stats computation uses different data sets which are already averaged on each month
#
for head in ['pos_err', 'diff', 'acacent', 'acacent2']:
thead = head.replace('2', '')
ifile = data_dir + thead + '_mtatr_month'
slp_line = ''
std_line = ''
for k in range(0, 8):
pos = k + 2
[t_array, d_array] = read_in_data(ifile, col=pos)
if len(t_array) > 3:
t_array = convert_to_fyear_list(t_array)
out = rlf.least_sq(t_array, d_array)
std = numpy.std(d_array)
slp_line = slp_line + '%2.3e\t' % out[1]
std_line = std_line + '%2.3e\t' %std
else:
slp_line = slp_line + 'na\t'
std_line = std_line + 'na\t'
line = slp_line + std_line + '\n'
fout = data_dir + head + '_mtatr_full_slope'
with open(fout, 'w') as fo:
fo.write(line)
#----------------------------------------------------------------------------------
#-- read_in_data: read the data and return the cleaned up arrays of time and data -
#----------------------------------------------------------------------------------
def read_in_data(ifile, col=1):
"""
read the data and return the cleaned up arrays of time and data
input: ifile --- a file name
col --- a column position of the data set; default: 1
we assume that the first column (0) is time in seconds from 1998.1.1
output: t_array --- an array of time
d-array --- an array of data
"""
data = mcf.read_data_file(ifile)
if len(data) < 1:
return [[],[]]
data_set = mcf.separate_data_to_arrays(data)
t_array = numpy.array(data_set[0])
d_array = numpy.array(data_set[col])
#
#--- get rid of nan data points
#
idx = ~numpy.isnan(d_array)
t_array = t_array[idx]
d_array = d_array[idx]
#
#--- get rind of bad data (usually -999.0)
#
idx = d_array > -10
t_array = t_array[idx]
d_array = d_array[idx]
return [t_array, d_array]
#----------------------------------------------------------------------------------
#-- convert_mday_to_stime: convert year, month, mday into Chandra time --
#----------------------------------------------------------------------------------
def convert_mday_to_stime(year, month, mday):
"""
convert year, month, mday into Chandra time
input: year --- year
month --- month
mday --- day of the monty
output: ltime --- time in seconds from 19981.1
"""
ltime = str(year) + ':' + mcf.add_leading_zero(month) + ':' + mcf.add_leading_zero(mday)
ltime = time.strftime('%Y:%j:00:00:00', time.strptime(ltime, '%Y:%m:%d'))
ltime = int(Chandra.Time.DateTime(ltime).secs)
return ltime
#----------------------------------------------------------------------------------
#-- get_slope_and_str: compute fitted slope and std of the data --
#----------------------------------------------------------------------------------
def get_slope_and_str(t_array, d_array, start, stop, cind=0):
"""
compute fitted slope and std of the data
input: t_array --- an array of time
d_datay --- an array of data
start --- period starting time in seconds from 1998.1.1
stop --- period stopping time in seconds from 1998.1.1
output: slope --- slope
std --- standard deviation of d_array
"""
#
#--- select data for the given time period
#
idx = (t_array >= start) & (t_array < stop)
t_array = t_array[idx]
d_array = d_array[idx]
#
#--- compute the stats only when there are more than 3 data points
#
if len(t_array) > 3:
#
#--- convert to ydate
#
if cind == 0:
t_array = convert_to_ydate_list(t_array)
#
#--- convert to fractional year
#
else:
t_array = convert_to_fyear_list(t_array)
#
#--- rlf.least_sq reaturn [<intersect>, <slope>, <err of slope>]
#
out = rlf.least_sq(t_array, d_array)
std = numpy.std(d_array)
slope = '%2.3e' % out[1]
std = '%2.3e' % std
#
#--- otherwise return 'nan'
#
else:
slope = 'nan'
std = 'nan'
return [slope, std]
#----------------------------------------------------------------------------------
#-- convert_to_ydate_list: convert time data in a list from time in seconds to y date
#----------------------------------------------------------------------------------
def convert_to_ydate_list(t_list):
"""
convert time data in a list from time in seconds to y date
input: t_list --- a list/an array of time data in seconds from 1998.1.1
output: t_list --- an array of time data in y date
"""
save = []
[byear, ydate] = chandratime_to_yday(t_list[0])
if mcf.is_leapyear(byear):
base = 366
else:
base = 365
for ent in t_list:
[year, ydate] = chandratime_to_yday(ent)
#
#--- if year changes, make it to the extension of the base year
#
if year > byear:
ydate += base
save.append(ydate)
save = numpy.array(save)
return save
#--------------------------------------------------------------------------
#-- chandratime_to_yday: convert chandra time into a day of year --
#--------------------------------------------------------------------------
def chandratime_to_yday(ctime):
"""
convert chandra time into a day of year
input: ctime --- time in seconds from 1998.1.1
output: ydate --- a day of year (fractional)
"""
atime = mcf.convert_date_format(ctime, ofmt='%Y:%j:%H:%M:%S')
btemp = re.split(':', atime)
year = float(btemp[0])
ydate = float(btemp[1])
hour = float(btemp[2])
mins = float(btemp[3])
sec = float(btemp[4])
ydate = ydate + (hour/24.0 + mins/1440.0 + sec/86400.0)
return [year, ydate]
#----------------------------------------------------------------------------------
#-- convert_to_fyear_list: convert time data in seconds in a list to fractional year in the list
#----------------------------------------------------------------------------------
def convert_to_fyear_list(t_list):
"""
convert time data in seconds in a list to fractional year in the list
input: t_list --- a list of time data in seconds from 1998.1.1
output: t_list --- an array of time data in fractional year
"""
save = []
for ent in t_list:
save.append(mcf.chandratime_to_fraq_year(ent))
save = numpy.array(save)
return save
#----------------------------------------------------------------------------------
if __name__ == '__main__':
if len(sys.argv) > 2:
year = int(float(sys.argv[1]))
mon = int(float(sys.argv[2]))
else:
year = ''
mon = ''
update_slot_stat_table(year, mon)
| [
"[email protected]"
] | |
81ecc501d76374282b768dd904e912cc7b87eda4 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_34/782.py | 314631cc964f6be8ced98e9b41079f0b36341c92 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,031 | py | #! /usr/bin/python
import os
import sys
import glob
from math import sqrt
if len(sys.argv) != 2:
print 'USAGE: q1.py input.in'
sys.exit()
fIn = open(sys.argv[1], 'r')
param = fIn.readline().split()
L = int(param[0])
D = int(param[1])
N = int(param[2])
#print str(L)+str(D)+str(N)
dict = []
for i in range(D):
dict.append(fIn.readline()[:-1])
#print dict
tokens = ['' for i in range(L)]
#print len(tokens)
for i in range(N):
line = fIn.readline()[:-1]
pos = 0
for j in range(L):
m = line[pos]
if m != '(' and m != ')':
tokens[j] = m
pos = pos + 1
if m == '(':
while(1):
pos = pos + 1
m = line[pos]
if m == ')':
pos = pos + 1
break
tokens[j] += m
j = j + 1
# print tokens
count = 0
for j in range(D):
tag = 1
word = dict[j]
# print word
for pos in range(L):
if tokens[pos].count(word[pos]) == 0:
tag = 0
# print 'NOT'
break
if tag == 1:
count = count + 1
print 'Case #'+str(i+1)+': '+str(count)
tokens = ['' for i in range(L)]
# print '\n'
| [
"[email protected]"
] | |
c91e3350c5dd6e4d67b9b480a11112fe1ec6d399 | 190f56bb215311c293e2c07e40be3d7cc3a5e189 | /addresses/migrations/0012_auto_20201005_2323.py | 3535fdcd5265d806be49d98d077cb8380602e716 | [] | no_license | Omsinha017/Ecommerce | 457dec1c5dcc313d4fae6dff0b1d7c7f43874655 | d4e57576eef18626d458f0c06b186d0b8a6bc753 | refs/heads/master | 2023-06-21T11:21:46.495736 | 2021-07-24T09:09:16 | 2021-07-24T09:09:16 | 305,804,444 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | # Generated by Django 3.1 on 2020-10-05 17:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('addresses', '0011_auto_20201005_2317'),
]
operations = [
migrations.AlterField(
model_name='address',
name='address_type',
field=models.CharField(choices=[('shipping', 'Shipping'), ('billing', 'Billing')], max_length=120),
),
]
| [
"[email protected]"
] | |
05d56ee57fb8108d00d0c956207892d6f5fd29ce | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/409/usersdata/317/79303/submittedfiles/av1_programa1.py | 3f2948ad76a641790398b54e2d495c53608bdaf4 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | # ENTRADA
n = int(input('ditige o número'))
if (n/2)%0:
print=int(input('o numero é par')
else:
print=int(input('o numero é impar')
print('----FIM----')
| [
"[email protected]"
] | |
4b7bc8f4343f81177c562ce45bd65b4be418f47f | c2758f58b29917e5f00cdf19389a0f55b2975eae | /examples/analysis_md2.py | d720cbf028ecc04f0d1914b4b2ce5424d136a8a5 | [] | no_license | rdemaria/pyoptics | 4cf0b59e3524996de84c38a98a1444509246e9d4 | 6679a7589c751285757b8166c76aeaa1359742da | refs/heads/master | 2023-07-08T19:55:50.364373 | 2023-06-30T09:50:12 | 2023-06-30T09:50:12 | 25,566,323 | 6 | 11 | null | 2023-06-14T12:31:15 | 2014-10-22T06:47:52 | Python | UTF-8 | Python | false | false | 1,538 | py |
t_start='2011-05-07 10:00:00.000'
t_end='2011-05-07 20:00:00.000'
data=cernlogdb.dbget('RPTE.UA23.RB.A12:I_MEAS',t1=t_start,t2=t_end,conf='ldb.conf')
data1=cernlogdb.dbget(bctdc,t1=t_start,t2=t_end,conf='ldb.conf')
data2=cernlogdb.dbget(bctfr,t1=t_start,t2=t_end,conf='ldb.conf')
data3=cernlogdb.dbget(rffreq,t1=t_start,t2=t_end,conf='ldb.conf')
bpmb1_fns=sorted(glob('data_ats_md2/fill_data/*/BPM/Beam1@Turn@*.sdds.gz'))
bpmb2_fns=sorted(glob('data_ats_md2/fill_data/*/BPM/Beam2@Turn@*.sdds.gz'))
bpmb1_t= cernlogdb.t2num([ bpmdata_date(fn) for fn in bpmb1_fns])
bpmb2_t= cernlogdb.t2num([ bpmdata_date(fn) for fn in bpmb2_fns])
vn=data['datavars'][0]
t,v=data[0]
t=cernlogdb.t2num(t)
plot_date(t,v,'k',label=vn)
ylim(0,8000)
twinx()
cernlogdb.plot_data(data2)
ylim(0,2e10)
twinx()
cernlogdb.plot_data(data3)
ylim(0,2e10)
ax=gca()
[ axvline(t,color='c') for t in bpmb1_t ]
[ axvline(t,color='y') for t in bpmb2_t ]
run harmonic_fit.py
run __init__.py
m.__class__=LHCBPM
bpmb1_fns=sorted(glob('data_ats_md2/fill_data/*/BPM/Beam1@Turn@*.sdds.gz'))
bpmb2_fns=sorted(glob('data_ats_md2/fill_data/*/BPM/Beam2@Turn@*.sdds.gz'))
m=LHCBPM(bpmb1_fns[-7])
m.mk_fitlsq()
t1=optics.open('twiss_lhcb1.tfs')
t2=optics.open('twiss_lhcb2.tfs')
m.mk_spos(t1)
goodx=(~m.badxy) & m.xidx
goody=(~m.badxy) & m.yidx
sum((m.tune*m.res)[goodx])/sum(m.res[goodx])
sum((m.tune*m.res)[goody])/sum(m.res[goody])
u,s,v=svd(m.data[~m.badxy])
u,s,v=svd(m.data[goodx])
f=linspace(0,0.5,2250/2+1)
figure();plot(f,abs(rfft(v[:,:5],axis=0)))
| [
"[email protected]"
] | |
66a874aeab5071d8d192da698ca71a914f216eed | 1a642f40e88f05075c64da1256901d1b796f33fd | /06. Dictionaries/person.py | 34b14cd3b24a14714daa6f94de222abd56a7eb41 | [] | no_license | Mart1nDimtrov/Python-Crash-Course | 357ca4a015929b455395807dfeb260191342e360 | 326fdedc96e3d3e2ae9597349b54dd9e31a8fb4f | refs/heads/master | 2021-01-26T07:07:53.844047 | 2020-09-27T20:59:17 | 2020-09-27T20:59:17 | 243,358,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | #6-1. Person: Use a dictionary to store information about a person you know.
#Store their first name, last name, age, and the city in which they live. You
#should have keys such as first_name, last_name, age, and city. Print each
#piece of information stored in your dictionary.
person = {
'first_name':'stoyan',
'last_name':'filipov',
'age':25,
'city':'Varna',
}
print(f'First name: {person["first_name"].title()}.')
print(f'Last name: {person["last_name"].title()}.')
print(f'Age: {person["age"]}.')
print(f'City: {person["city"]}.') | [
"[email protected]"
] | |
1a14569986273f76fc063f439ab0fb87663f2adf | 47aa27752421393451ebed3389b5f3a52a57577c | /src/Lib/test/test_json/test_recursion.py | 72bccd31ccd7ec23fed0c9ab2c62a5c435638ec7 | [
"MIT"
] | permissive | NUS-ALSET/ace-react-redux-brython | e66db31046a6a3cd621e981977ed0ca9a8dddba9 | d009490263c5716a145d9691cd59bfcd5aff837a | refs/heads/master | 2021-08-08T08:59:27.632017 | 2017-11-10T01:34:18 | 2017-11-10T01:34:18 | 110,187,226 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,106 | py | from test.test_json import PyTest, CTest
class JSONTestObject:
pass
class TestRecursion:
def test_listrecursion(self):
x = []
x.append(x)
try:
self.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on list recursion")
x = []
y = [x]
x.append(y)
try:
self.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on alternating list recursion")
y = []
x = [y, y]
# ensure that the marker is cleared
self.dumps(x)
def test_dictrecursion(self):
x = {}
x["test"] = x
try:
self.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on dict recursion")
x = {}
y = {"a": x, "b": x}
# ensure that the marker is cleared
self.dumps(x)
def test_defaultrecursion(self):
class RecursiveJSONEncoder(self.json.JSONEncoder):
recurse = False
def default(self, o):
if o is JSONTestObject:
if self.recurse:
return [JSONTestObject]
else:
return 'JSONTestObject'
return pyjson.JSONEncoder.default(o)
enc = RecursiveJSONEncoder()
self.assertEqual(enc.encode(JSONTestObject), '"JSONTestObject"')
enc.recurse = True
try:
enc.encode(JSONTestObject)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on default recursion")
def test_highly_nested_objects_decoding(self):
# test that loading highly-nested objects doesn't segfault when C
# accelerations are used. See #12017
with self.assertRaises(RuntimeError):
self.loads('{"a":' * 100000 + '1' + '}' * 100000)
with self.assertRaises(RuntimeError):
self.loads('{"a":' * 100000 + '[1]' + '}' * 100000)
with self.assertRaises(RuntimeError):
self.loads('[' * 100000 + '1' + ']' * 100000)
def test_highly_nested_objects_encoding(self):
# See #12051
l, d = [], {}
for x in range(100000):
l, d = [l], {'k':d}
with self.assertRaises(RuntimeError):
self.dumps(l)
with self.assertRaises(RuntimeError):
self.dumps(d)
def test_endless_recursion(self):
# See #12051
class EndlessJSONEncoder(self.json.JSONEncoder):
def default(self, o):
"""If check_circular is False, this will keep adding another list."""
return [o]
with self.assertRaises(RuntimeError):
EndlessJSONEncoder(check_circular=False).encode(5j)
class TestPyRecursion(TestRecursion, PyTest): pass
class TestCRecursion(TestRecursion, CTest): pass
| [
"[email protected]"
] | |
7061e0aabffe58d1301cd83d5b44595f4f605b73 | 2aa21b0d818397d5299bee411aa4df9058c6369e | /atcoder/abc130_c.py | f2db33e09de9a6f10eda0fc8a7f7dcdd7fe5cdca | [] | no_license | YuheiNakasaka/leetcode | ef4a0c04c44c9e9a727773b7d4a1bed0cbc17cba | 9109f35a20b5a36e1bd611dbe5ad56ad724e0c96 | refs/heads/master | 2020-05-16T08:04:34.523056 | 2020-04-21T00:55:33 | 2020-04-21T00:55:33 | 182,897,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | W, H, x, y = list(map(int, input().split()))
square = (W * H) / 2
cnt = '0'
if x + x == W and y + y == H:
cnt = '1'
print(square, cnt) | [
"[email protected]"
] | |
41b8308911f0359ad34e366f655fdeeddf5b3ed0 | cca5ceb42b09e567d79fcb46f298757c1ff04447 | /Async/Async_4.py | cdf82e563fb9572cf18c1dc4350d6062eba95012 | [] | no_license | NishantGhanate/PythonScripts | 92933237720e624a0f672729743a98557bea79d6 | 60b92984d21394002c0d3920bc448c698e0402ca | refs/heads/master | 2022-12-13T11:56:14.442286 | 2022-11-18T14:26:33 | 2022-11-18T14:26:33 | 132,910,530 | 25 | 15 | null | 2022-12-09T09:03:58 | 2018-05-10T14:18:33 | Python | UTF-8 | Python | false | false | 377 | py | import asyncio
import time
class A:
async def fucn_1(self):
await asyncio.sleep(3)
print('hello')
async def fucn_2(self):
await asyncio.sleep(1)
print('yellow')
a = A()
loop = asyncio.get_event_loop()
task1 = loop.create_task(a.fucn_1())
task2 = loop.create_task(a.fucn_2())
loop.run_until_complete(asyncio.gather(task1, task2))
| [
"[email protected]"
] | |
a0a448f4f413e67c688e869495f5dd1e476d5794 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03623/s414511746.py | 255735db787058e6f8122bb55e8033b5d199aa1e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | def solve():
x, a, b = map(int, input().split())
if abs(x-a) > abs(x-b):
print('B')
else:
print('A')
if __name__ == "__main__":
solve()
| [
"[email protected]"
] | |
54498748abaf38e7ebcd28ac38c33e0bbd96b11c | 70f2fa6eaf1843ea8fc5351c4951babea27f1b77 | /bin/django-admin.py | 59517a60bf2585fa43584bfcd81961175fcd8977 | [] | no_license | zalun/FlightDeck-lib | 223d69cccd9b5f91bc0648792b6bd981176a48ae | 12f047b43a9b89d183aa75ca448bceea2082d11b | refs/heads/master | 2020-12-25T08:59:52.058399 | 2011-11-18T18:09:25 | 2011-11-18T18:09:25 | 1,005,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | #!/home/zalun/Environment/flightdeck/bin/python2.6
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
ac5a3b92fd58d9f527030ff4fcb8246f03858bc0 | ce8d151a075931f2af5c4e2bcc5498fda9dbd1b1 | /foundation/organisation/migrations/0015_remove_networkgroup_position.py | 41256b78b311cff20d00d3ef0f23cf7082ef1a98 | [
"MIT"
] | permissive | okfn/website | d089dfad786b11813c2cad6912cb40e4d277b6e8 | 1055300216619c30cb06d58e51d78f739beb6483 | refs/heads/develop | 2023-08-30T23:43:11.515725 | 2023-08-29T07:41:06 | 2023-08-29T07:41:06 | 15,168,170 | 83 | 110 | MIT | 2023-09-13T04:52:25 | 2013-12-13T16:20:09 | Python | UTF-8 | Python | false | false | 407 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-08-24 10:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('organisation', '0014_auto_20200824_1033'),
]
operations = [
migrations.RemoveField(
model_name='networkgroup',
name='position',
),
]
| [
"[email protected]"
] | |
40147c0abe5fcae15c46d9e2ca12cbd6e7d09e8e | 841b8d707cf42dbb26089c89c83fd3238f7f56cf | /root_numpy/_graph.py | 66e59f7497e70a264117e98aedd252c74946b253 | [
"BSD-3-Clause"
] | permissive | ndawe/root_numpy | d6682976e78acacd25b331ecd9958a270d0eb9eb | 34625988547e8a462cc8e10cba6459e9fa2fa65e | refs/heads/master | 2020-04-05T04:37:47.389235 | 2017-10-23T03:20:18 | 2017-10-23T03:20:18 | 6,723,352 | 1 | 0 | null | 2012-12-16T05:49:48 | 2012-11-16T15:13:52 | C++ | UTF-8 | Python | false | false | 1,369 | py | import numpy as np
from . import _librootnumpy
__all__ = [
'fill_graph',
]
def fill_graph(graph, array):
"""Fill a ROOT graph with a NumPy array.
Parameters
----------
hist : a ROOT TGraph or TGraph2D
The ROOT graph to fill.
array : numpy array of shape [n_samples, n_dimensions]
The values to fill the graph with. The number of columns must match the
dimensionality of the graph.
"""
import ROOT
array = np.asarray(array, dtype=np.double)
if isinstance(graph, ROOT.TGraph):
if array.ndim != 2:
raise ValueError("array must be 2-dimensional")
if array.shape[1] != 2:
raise ValueError(
"length of the second dimension must equal "
"the dimension of the graph")
return _librootnumpy.fill_g1(
ROOT.AsCObject(graph), array)
elif isinstance(graph, ROOT.TGraph2D):
if array.ndim != 2:
raise ValueError("array must be 2-dimensional")
if array.shape[1] != 3:
raise ValueError(
"length of the second dimension must equal "
"the dimension of the graph")
return _librootnumpy.fill_g2(
ROOT.AsCObject(graph), array)
else:
raise TypeError(
"hist must be an instance of ROOT.TGraph or ROOT.TGraph2D")
| [
"[email protected]"
] | |
225faa5c992ef25e33ee5472cd08d2040d303a31 | 6733716dcdcacfcc739ae5c4af976db81ead852b | /ROOT/Project/functions/rootTree_rootHist/just_test/test4_auto.py | 9611e7290189a404df65fa6a16ef108b1c744053 | [] | no_license | StudyGroupPKU/fruit_team | 45202a058d59057081670db97b9229ee720fa77e | 9f9f673f5ce22ce6d25736871f3d7a5bd232c29d | refs/heads/master | 2021-01-24T08:15:37.909327 | 2018-05-11T08:53:06 | 2018-05-11T08:53:06 | 122,975,404 | 0 | 5 | null | 2018-04-05T02:37:14 | 2018-02-26T13:41:24 | Python | UTF-8 | Python | false | false | 4,051 | py | IBin = -2.0
FBin = 2.0
NBins = 100
from ROOT import TFile, TH1F, TH1D, TTree
import numpy
filename = "/Users/leejunho/Desktop/git/python3Env/group_study/fruit_team/ROOT/Project/root_generator/tree/root2_tree.root"
f = TFile(filename,"READ")
outfile = TFile("outfile_please_modify_correspondingly_test4.root","RECREATE")
if(filename[0]=="/"):
filename = filename
else:
filename = os.getcwd() + "/" + filename # get the path included filename
loca=len(filename)
for i in range (1,len(filename)+1): # find the "/" location
if(filename[-i] == "/"):
loca = i-1
break
FILENAME = filename.replace(filename[:-loca],"") # this is the shorten input filename, excluded path
print(FILENAME)
############################################## Getting Name list and Dictionary
DirTreeBranchNameList = {}
SetBranchNameList = set()
dirlist = f.GetListOfKeys()
#dirlist.Print(); #print(dirlist.GetSize())
ITER = dirlist.MakeIterator()
key = ITER.Next()
#key.Print(); print(key.GetName())
while key:
BranchNameList = []
tree = key.ReadObj()
# print(tree.GetName())
branchlist = tree.GetListOfBranches()
if(branchlist.IsEmpty()):
continue
ITER_b = branchlist.MakeIterator()
key_b = ITER_b.Next()
while key_b:
BranchNameList.append(key_b.GetName())
SetBranchNameList.add(key_b.GetName())
# print(key_b.GetName())
key_b = ITER_b.Next()
DirTreeBranchNameList[tree.GetName()] = BranchNameList
key = ITER.Next()
#print(DirTreeBranchNameList) # seperate branch of each tree
#print(SetBranchNameList) # take advantage of no double element in set, define branch variables
#############################################################
################## prepare for SetBranchAddress for each tree. variables definiton for branch ###########
DirNumpyArray_branch = {}
for NumpyArray in SetBranchNameList: ## prepare for SetBranchAddress for each tree. variables definiton for branch
# print(NumpyArray); print(type(NumpyArray))
a = numpy.array([0],'d')
DirNumpyArray_branch[NumpyArray] = a
#print(type(DirNumpyArray_branch.values()[1][0]))
#print(DirNumpyArray_branch)
#############################################################
##########Need to get into tree again, for each tree, do SetBranchAddress(), Setting histograms for each branch!!! ## below ::
dirlist = f.GetListOfKeys()
ITER = dirlist.MakeIterator()
key = ITER.Next()
DirhistList = {} ##### histolist for each tree
while key:
histList = []
NamehistList = []
tree = key.ReadObj()
for i in range(len(DirNumpyArray_branch)):
tree.SetBranchAddress(DirNumpyArray_branch.keys()[i],DirNumpyArray_branch.values()[i]) #### SetBranchAddress of every branch for each tree
branchlist = tree.GetListOfBranches()
if(branchlist.IsEmpty()):
continue
ITER_b = branchlist.MakeIterator()
key_b = ITER_b.Next()
while key_b:
ENTRY = tree.GetEntries()
Namehist = FILENAME.replace(".root","") + "_" + tree.GetName() + "_" + key_b.GetName()+"_hist"
NamehistList.append(Namehist)
hist = TH1D(Namehist, Namehist, NBins, IBin, FBin)
histList.append(hist)
KEY_B = key_b.GetName()
key_b = ITER_b.Next()
for i in range(ENTRY):
tree.GetEntry(i)
for j in range(len(histList)):
# histList[j].Fill(DirNumpyArray_branch.values()[j][0])
for k in range(len(histList)):
if DirNumpyArray_branch.keys()[j] in histList[k].GetName():
histList[k].Fill(DirNumpyArray_branch.values()[j][0])
else :
continue
for i in range(len(histList)):
histList[i].Write()
# print("\n")
DirhistList[tree.GetName()] = histList
key = ITER.Next()
#print(DirhistList)
#print(DirNumpyArray_branch)
################################################################
outfile.Close()
| [
"[email protected]"
] | |
d184e3a98bf8510e166e21b881fec6aa57d581ef | f5f4a0e2dcdcc5ee89bd86480d52390878fe612b | /utils/gap_configs/python/ips/interco/router.py | 1b682cfc9733009a15860010d5c5c4490097348c | [
"Apache-2.0"
] | permissive | MIT-AI-Accelerator/gap_sdk | 0751735b2b7d5a47be234e010eb9f72ebe8f81ef | 6d255c70883cf157d76d006b2dbf55bc6974b21f | refs/heads/master | 2023-09-05T21:23:00.379129 | 2021-11-03T18:37:37 | 2021-11-03T18:37:37 | 400,571,213 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,669 | py | #
# Copyright (C) 2020 GreenWaves Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gsystree as st
class Router(st.Component):
def __init__(self, parent, name, latency=0, bandwidth=4, remove_offset=0, config=None):
super(Router, self).__init__(parent, name)
self.add_property('mappings', {})
self.add_property('vp_component', 'interco.router_impl')
self.add_property('latency', latency)
self.add_property('bandwidth', bandwidth)
self.add_property('remove_offset', remove_offset)
if config is not None:
self.add_properties(config)
def add_mapping(self, name, base=None, size=None, remove_offset=None, add_offset=None, id=None):
mapping = {}
if base is not None:
mapping['base'] = base
if size is not None:
mapping['size'] = size
if remove_offset is not None:
mapping['remove_offset'] = remove_offset
if add_offset is not None:
mapping['add_offset'] = add_offset
if id is not None:
mapping['id'] = id
self.get_property('mappings')[name] = mapping
| [
"[email protected]"
] | |
81adc41f2d08345806384aa1c2e0de279ea5afdf | b12e93c2dde41cc43d30fdd9ffbda968abb8e40e | /HearthStone/HearthStone/ext/card_compiler.py | f96e6a058a02c87bb2bb8155ab21d306050a5ab4 | [
"MIT"
] | permissive | wkhunter/MiniGames | ec16a22dfc31e7a910466ffe65a3b4961e653724 | 910fddce17795c51c3e6a232bd98744865f984dc | refs/heads/master | 2021-04-30T23:39:18.958443 | 2017-01-18T12:42:56 | 2017-01-18T12:42:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,795 | py | #! /usr/bin/python
# -*- encoding: utf-8 -*-
"""A simple compiler of card definition language, using PLY.
Example:
Minion 侏儒发明家 { # Define a new minion
{% id = 0, name = '侏儒发明家', type = 0, CAH = [4, 2, 4], klass = 0 %}
bc { d 1 }
dr { d 1 }
}
"""
import re
from types import new_class
from collections import namedtuple
from ply.lex import lex
from ply.yacc import yacc
from ..game_entities.card import Minion, Spell, Weapon
__author__ = 'fyabc'
#########
# Lexer #
#########
# Reserved words.
ReservedWords = {
'Minion': ('CARD_TYPE', Minion),
'Spell': ('CARD_TYPE', Spell),
'Weapon': ('CARD_TYPE', Weapon),
'bc': ('SKILL', 'battle_cry'),
'battle_cry': ('SKILL', None),
'dr': ('SKILL', 'death_rattle'),
'death_rattle': ('SKILL', None),
'play': ('SKILL', None),
'def': ('DEF', None),
}
# Token list.
tokens = ['DICT', 'LINE_CODE', 'NUM', 'ID', 'CARD_TYPE', 'SKILL', 'DEF']
# Literals list.
literals = ['{', '}', '(', ')']
# Ignored characters.
t_ignore = ' \t\r\n'
# Token specifications (as Regex).
# Token processing functions.
def t_DICT(t):
r"""\{%.*?%}"""
t.value = eval('dict({})'.format(t.value[2:-2]))
return t
def t_LINE_CODE(t):
r"""\$.*?\n"""
t.value = t.value[1:].strip()
return t
def t_NUM(t):
r"""\d+"""
t.value = int(t.value)
return t
def t_ID(t):
# r"""[a-zA-Z_][a-zA-Z_0-9]*"""
r"""[^\W0-9]\w*"""
token = ReservedWords.get(t.value, ('ID', None))
t.type = token[0]
if token[1] is not None:
t.value = token[1]
return t
def t_COMMENT(t):
r"""\#.*"""
pass
# Error handler.
def t_error(t):
print('Bad character: {!r}'.format(t.value[0]))
t.lexer.skip(1)
# Build the lexer
lexer = lex(reflags=re.UNICODE)
##########
# Parser #
##########
Action = namedtuple('Action', ['type', 'value'])
def p_card(p):
"""card : CARD_TYPE ID '{' card_contents '}'
| CARD_TYPE '{' card_contents '}'
"""
if len(p) == 5:
cls_name = ''
cls_dict = p[3]
else:
cls_name = p[2]
cls_dict = p[4]
p[0] = new_class(cls_name, (p[1],), {}, lambda ns: ns.update(cls_dict))
def p_card_contents(p):
"""card_contents : empty
| card_contents content_entry
"""
if len(p) == 2:
p[0] = {}
else:
p[0] = p[1]
k, v = p[2]
p[0][k] = v
def p_content_entry(p):
"""content_entry : data_definition
| skill_definition
| func_definition
"""
p[0] = p[1]
def p_data_definition(p):
"""data_definition : DICT"""
p[0] = '_data', p[1]
def p_func_definition(p):
"""func_definition : DEF ID '(' ')' '{' statements '}'"""
exec('''\
def __func_do_not_override_this_name(self):
{}
pass
'''.format('\n '.join(s.value for s in p[6])), {}, get_skill_locals())
p[0] = p[2], get_skill_locals().pop('__func_do_not_override_this_name')
def p_statements(p):
"""statements : empty
| statements statement
"""
if len(p) == 2:
p[0] = []
else:
p[0] = p[1]
p[0].append(p[2])
def p_statement(p):
"""statement : LINE_CODE"""
p[0] = Action('statement', p[1])
def p_skill_definition(p):
"""skill_definition : SKILL '{' actions '}'"""
# Parse skills.
if p[1] == 'battle_cry':
skill_name = 'run_battle_cry'
args_string = 'self, player_id, index'
elif p[1] == 'death_rattle':
skill_name = 'run_death_rattle'
args_string = 'self, player_id, index'
elif p[1] == 'play':
skill_name = 'play'
args_string = 'self, player_id, target'
else:
skill_name = p[1]
args_string = 'self'
result_statements = []
for action in p[3]:
if action.type == 'statement':
result_statements.append(action.value)
else:
# todo: add more actions
pass
exec('''\
def __skill_do_not_override_this_name({}):
{}
pass
'''.format(args_string, '\n '.join(result_statements)), {}, get_skill_locals())
p[0] = skill_name, get_skill_locals().pop('__skill_do_not_override_this_name')
def p_actions(p):
"""actions : empty
| actions action
"""
if len(p) == 2:
p[0] = []
else:
p[0] = p[1]
p[0].append(p[2])
def p_action(p):
"""action : statement"""
p[0] = p[1]
def p_empty(p):
"""empty :"""
pass
# Error rule for syntax errors
def p_error(p):
print("Syntax error in input {}!".format(p))
_default_locals = None
def get_skill_locals():
global _default_locals
if _default_locals is None:
from HearthStone.ext import Minion, Spell, Weapon, set_description
from HearthStone.ext.card_creator import m_blank, w_blank, m_summon
from HearthStone.ext.card_creator import validator_minion, validator_enemy_minion
from HearthStone.ext.card_creator import action_damage, action_destroy
from HearthStone.ext import DrawCard, AddCardToHand
from HearthStone.ext import Damage, SpellDamage, RestoreHealth, GetArmor
from HearthStone.ext import RandomTargetDamage
from HearthStone.ext import GameHandler, DeskHandler, FreezeOnDamage
from HearthStone.ext import AddMinionToDesk
from HearthStone.ext import TurnBegin
from HearthStone.ext import MinionDeath
from HearthStone.ext import constants
from HearthStone.utils.debug_utils import verbose
_default_locals = locals()
return _default_locals
# Build the parser
parser = yacc()
parse_card = parser.parse
__all__ = [
'lexer',
'parser',
'parse_card',
]
| [
"[email protected]"
] | |
b4a0ad7d276fde1a54a3717d0468ce45efaff51d | d23dab09b21553353ad85246ebafaea790f2afbd | /src/python/pants/backend/python/goals/setup_py_test.py | beeb5859feb6a84d67dd8201b8ea41d45050b1ab | [
"Apache-2.0"
] | permissive | asherf/pants | 00e8c64b7831f814bac3c4fa8c342d2237fef17d | c94d9e08f65e9baf3793dff0ec2c571d682f6b90 | refs/heads/master | 2023-05-28T14:45:35.325999 | 2023-01-18T15:16:07 | 2023-01-18T15:16:07 | 185,082,662 | 0 | 0 | Apache-2.0 | 2023-01-18T15:15:46 | 2019-05-05T21:09:43 | Python | UTF-8 | Python | false | false | 50,932 | py | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import textwrap
from typing import Iterable
import pytest
from pants.backend.python import target_types_rules
from pants.backend.python.goals.setup_py import (
AmbiguousOwnerError,
DependencyOwner,
DistBuildChroot,
DistBuildChrootRequest,
DistBuildSources,
ExportedTarget,
ExportedTargetRequirements,
FinalizedSetupKwargs,
FirstPartyDependencyVersionScheme,
GenerateSetupPyRequest,
InvalidEntryPoint,
InvalidSetupPyArgs,
NoDistTypeSelected,
NoOwnerError,
OwnedDependencies,
OwnedDependency,
SetupKwargs,
SetupKwargsRequest,
SetupPyError,
SetupPyGeneration,
declares_pkg_resources_namespace_package,
determine_explicitly_provided_setup_kwargs,
determine_finalized_setup_kwargs,
generate_chroot,
generate_setup_py,
get_exporting_owner,
get_owned_dependencies,
get_requirements,
get_sources,
merge_entry_points,
package_python_dist,
validate_commands,
)
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.subsystems.setuptools import PythonDistributionFieldSet
from pants.backend.python.target_types import (
PexBinary,
PythonDistribution,
PythonProvidesField,
PythonRequirementTarget,
PythonSourcesGeneratorTarget,
)
from pants.backend.python.util_rules import dists, python_sources
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.base.exceptions import IntrinsicError
from pants.core.goals.package import BuiltPackage
from pants.core.target_types import FileTarget, ResourcesGeneratorTarget, ResourceTarget
from pants.core.target_types import rules as core_target_types_rules
from pants.engine.addresses import Address
from pants.engine.fs import Snapshot
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.rules import SubsystemRule, rule
from pants.engine.target import InvalidFieldException
from pants.engine.unions import UnionRule
from pants.testutil.rule_runner import QueryRule, RuleRunner, engine_error
from pants.util.strutil import softwrap
_namespace_decl = "__import__('pkg_resources').declare_namespace(__name__)"
def create_setup_py_rule_runner(*, rules: Iterable) -> RuleRunner:
rule_runner = RuleRunner(
rules=rules,
target_types=[
PexBinary,
PythonDistribution,
PythonSourcesGeneratorTarget,
PythonRequirementTarget,
ResourceTarget,
ResourcesGeneratorTarget,
FileTarget,
],
objects={"python_artifact": PythonArtifact},
)
rule_runner.set_options([], env_inherit={"PATH", "PYENV_ROOT", "HOME"})
return rule_runner
# We use a trivial test that our SetupKwargs plugin hook works.
class PluginSetupKwargsRequest(SetupKwargsRequest):
@classmethod
def is_applicable(cls, _) -> bool:
return True
@rule
def setup_kwargs_plugin(request: PluginSetupKwargsRequest) -> SetupKwargs:
kwargs = {**request.explicit_kwargs, "plugin_demo": "hello world"}
return SetupKwargs(kwargs, address=request.target.address)
@pytest.fixture
def chroot_rule_runner() -> RuleRunner:
return create_setup_py_rule_runner(
rules=[
*core_target_types_rules(),
determine_explicitly_provided_setup_kwargs,
generate_chroot,
generate_setup_py,
determine_finalized_setup_kwargs,
get_sources,
get_requirements,
get_owned_dependencies,
get_exporting_owner,
*python_sources.rules(),
*target_types_rules.rules(),
setup_kwargs_plugin,
SubsystemRule(SetupPyGeneration),
UnionRule(SetupKwargsRequest, PluginSetupKwargsRequest),
QueryRule(DistBuildChroot, (DistBuildChrootRequest,)),
QueryRule(DistBuildSources, (DistBuildChrootRequest,)),
QueryRule(FinalizedSetupKwargs, (GenerateSetupPyRequest,)),
]
)
def assert_chroot(
rule_runner: RuleRunner,
expected_files: list[str],
expected_setup_kwargs,
addr: Address,
interpreter_constraints: InterpreterConstraints | None = None,
) -> None:
if interpreter_constraints is None:
interpreter_constraints = InterpreterConstraints(
PythonSetup.default_interpreter_constraints
)
tgt = rule_runner.get_target(addr)
req = DistBuildChrootRequest(
ExportedTarget(tgt), interpreter_constraints=interpreter_constraints
)
chroot = rule_runner.request(DistBuildChroot, [req])
snapshot = rule_runner.request(Snapshot, [chroot.digest])
assert sorted(expected_files) == sorted(snapshot.files)
if expected_setup_kwargs is not None:
sources = rule_runner.request(DistBuildSources, [req])
setup_kwargs = rule_runner.request(
FinalizedSetupKwargs,
[GenerateSetupPyRequest(ExportedTarget(tgt), sources, interpreter_constraints)],
)
assert expected_setup_kwargs == setup_kwargs.kwargs
def assert_chroot_error(rule_runner: RuleRunner, addr: Address, exc_cls: type[Exception]) -> None:
tgt = rule_runner.get_target(addr)
with pytest.raises(ExecutionError) as excinfo:
rule_runner.request(
DistBuildChroot,
[
DistBuildChrootRequest(
ExportedTarget(tgt),
InterpreterConstraints(PythonSetup.default_interpreter_constraints),
)
],
)
ex = excinfo.value
assert len(ex.wrapped_exceptions) == 1
assert type(ex.wrapped_exceptions[0]) == exc_cls
def test_use_existing_setup_script(chroot_rule_runner) -> None:
chroot_rule_runner.write_files(
{
"src/python/foo/bar/BUILD": "python_sources()",
"src/python/foo/bar/__init__.py": "",
"src/python/foo/bar/bar.py": "",
# Add a `.pyi` stub file to ensure we include it in the final result.
"src/python/foo/bar/bar.pyi": "",
"src/python/foo/resources/BUILD": 'resource(source="js/code.js")',
"src/python/foo/resources/js/code.js": "",
"files/BUILD": 'file(source="README.txt")',
"files/README.txt": "",
"BUILD": textwrap.dedent(
"""
python_distribution(
name='foo-dist',
dependencies=[
':setup',
],
generate_setup=False,
provides=python_artifact(
name='foo', version='1.2.3',
)
)
python_sources(name="setup", dependencies=["src/python/foo"])
"""
),
"setup.py": textwrap.dedent(
"""
from setuptools import setup
setup(
name = "foo",
version = "1.2.3",
package_dir={"": "src/python"},
packages = ["foo"],
)
"""
),
"src/python/foo/BUILD": textwrap.dedent(
"""
python_sources(
dependencies=[
'src/python/foo/bar',
'src/python/foo/resources',
'files',
]
)
"""
),
"src/python/foo/__init__.py": _namespace_decl,
"src/python/foo/foo.py": "",
}
)
assert_chroot(
chroot_rule_runner,
[
"setup.py",
"files/README.txt",
"src/python/foo/bar/__init__.py",
"src/python/foo/bar/bar.py",
"src/python/foo/bar/bar.pyi",
"src/python/foo/resources/js/code.js",
"src/python/foo/__init__.py",
"src/python/foo/foo.py",
],
None,
Address("", target_name="foo-dist"),
)
def test_use_generate_setup_script_package_provenance_agnostic(chroot_rule_runner) -> None:
chroot_rule_runner.write_files(
{
"src/python/foo/BUILD": textwrap.dedent(
"""
python_sources(
dependencies=[
'src/python/resources',
]
)
"""
),
"src/python/foo/bar.py": "",
# Here we have a Python package of resources.js defined via files owned by a resources
# target. From a packaging perspective, we should be agnostic to what targets own a
# python package when calculating package_data, we just need to know which packages are
# defined by Python files in the distribution.
"src/python/resources/BUILD": 'resources(sources=["**/*.py", "**/*.js"])',
"src/python/resources/js/__init__.py": "",
"src/python/resources/js/code.js": "",
"src/python/BUILD": textwrap.dedent(
"""
python_distribution(
name='foo-dist',
dependencies=[
'src/python/foo',
],
generate_setup=True,
provides=python_artifact(
name='foo', version='1.2.3',
)
)
"""
),
}
)
assert_chroot(
chroot_rule_runner,
[
"foo/bar.py",
"resources/js/__init__.py",
"resources/js/code.js",
"setup.py",
"MANIFEST.in",
],
{
"name": "foo",
"version": "1.2.3",
"plugin_demo": "hello world",
"packages": ("foo", "resources.js"),
"namespace_packages": (),
"package_data": {
"resources.js": (
"__init__.py",
"code.js",
)
},
"install_requires": (),
"python_requires": "<4,>=3.7",
},
Address("src/python", target_name="foo-dist"),
)
def test_merge_entry_points() -> None:
sources = {
"src/python/foo:foo-dist `entry_points`": {
"console_scripts": {"foo_tool": "foo.bar.baz:Tool.main"},
"foo_plugins": {"qux": "foo.qux"},
},
"src/python/foo:foo-dist `provides.entry_points`": {
"console_scripts": {"foo_qux": "foo.baz.qux"},
"foo_plugins": {"foo-bar": "foo.bar:plugin"},
},
}
expect = {
"console_scripts": {
"foo_tool": "foo.bar.baz:Tool.main",
"foo_qux": "foo.baz.qux",
},
"foo_plugins": {
"qux": "foo.qux",
"foo-bar": "foo.bar:plugin",
},
}
assert merge_entry_points(*list(sources.items())) == expect
conflicting_sources = {
"src/python/foo:foo-dist `entry_points`": {"console_scripts": {"my-tool": "ep1"}},
"src/python/foo:foo-dist `provides.entry_points`": {"console_scripts": {"my-tool": "ep2"}},
}
err_msg = softwrap(
"""
Multiple entry_points registered for console_scripts my-tool in:
src/python/foo:foo-dist `entry_points`,
src/python/foo:foo-dist `provides.entry_points`
"""
)
with pytest.raises(ValueError, match=err_msg):
merge_entry_points(*list(conflicting_sources.items()))
def test_generate_chroot(chroot_rule_runner: RuleRunner) -> None:
chroot_rule_runner.write_files(
{
"src/python/foo/bar/baz/BUILD": textwrap.dedent(
"""
python_distribution(
name="baz-dist",
dependencies=[':baz'],
provides=python_artifact(
name='baz',
version='1.1.1'
)
)
python_sources()
"""
),
"src/python/foo/bar/baz/baz.py": "",
"src/python/foo/qux/BUILD": textwrap.dedent(
"""
python_sources()
pex_binary(name="bin", entry_point="foo.qux.bin:main")
"""
),
"src/python/foo/qux/__init__.py": "",
"src/python/foo/qux/qux.py": "",
# Add a `.pyi` stub file to ensure we include it in the final result.
"src/python/foo/qux/qux.pyi": "",
"src/python/foo/resources/BUILD": 'resource(source="js/code.js")',
"src/python/foo/resources/js/code.js": "",
"files/BUILD": 'file(source="README.txt")',
"files/README.txt": "",
"src/python/foo/BUILD": textwrap.dedent(
"""
python_distribution(
name='foo-dist',
dependencies=[
':foo',
],
provides=python_artifact(
name='foo', version='1.2.3'
),
entry_points={
"console_scripts":{
"foo_main": "src/python/foo/qux:bin",
},
},
)
python_sources(
dependencies=[
'src/python/foo/bar/baz',
'src/python/foo/qux',
'src/python/foo/resources',
'files',
]
)
"""
),
"src/python/foo/__init__.py": _namespace_decl,
"src/python/foo/foo.py": "",
}
)
assert_chroot(
chroot_rule_runner,
[
"files/README.txt",
"foo/qux/__init__.py",
"foo/qux/qux.py",
"foo/qux/qux.pyi",
"foo/resources/js/code.js",
"foo/__init__.py",
"foo/foo.py",
"setup.py",
"MANIFEST.in",
],
{
"name": "foo",
"version": "1.2.3",
"plugin_demo": "hello world",
"packages": ("foo", "foo.qux"),
"namespace_packages": ("foo",),
"package_data": {"foo": ("resources/js/code.js",), "foo.qux": ("qux.pyi",)},
"install_requires": ("baz==1.1.1",),
"python_requires": "<4,>=3.7",
"entry_points": {"console_scripts": ["foo_main = foo.qux.bin:main"]},
},
Address("src/python/foo", target_name="foo-dist"),
)
def test_generate_chroot_entry_points(chroot_rule_runner: RuleRunner) -> None:
chroot_rule_runner.write_files(
{
"src/python/foo/qux/BUILD": textwrap.dedent(
"""
python_sources()
pex_binary(name="bin", entry_point="foo.qux.bin:main")
"""
),
"src/python/foo/BUILD": textwrap.dedent(
"""
python_distribution(
name='foo-dist',
entry_points={
"console_scripts":{
"foo_main": "src/python/foo/qux:bin",
"foo_tool":"foo.bar.baz:Tool.main",
"bin_tool":"//src/python/foo/qux:bin",
"bin_tool2":"src/python/foo/qux:bin",
"hello":":foo-bin",
},
"foo_plugins":{
"qux":"foo.qux",
},
},
provides=python_artifact(
name='foo',
version='1.2.3',
entry_points={
"console_scripts":{
"foo_qux":"foo.baz.qux:main",
"foo_bin":":foo-bin",
},
"foo_plugins":[
"foo-bar=foo.bar:plugin",
],
},
)
)
python_sources(
dependencies=[
'src/python/foo/qux',
]
)
pex_binary(name="foo-bin", entry_point="foo.bin:main")
"""
),
}
)
assert_chroot(
chroot_rule_runner,
[
"setup.py",
"MANIFEST.in",
],
{
"name": "foo",
"version": "1.2.3",
"plugin_demo": "hello world",
"packages": tuple(),
"namespace_packages": tuple(),
"package_data": {},
"install_requires": tuple(),
"python_requires": "<4,>=3.7",
"entry_points": {
"console_scripts": [
"foo_main = foo.qux.bin:main",
"foo_tool = foo.bar.baz:Tool.main",
"bin_tool = foo.qux.bin:main",
"bin_tool2 = foo.qux.bin:main",
"hello = foo.bin:main",
"foo_qux = foo.baz.qux:main",
"foo_bin = foo.bin:main",
],
"foo_plugins": [
"qux = foo.qux",
"foo-bar = foo.bar:plugin",
],
},
},
Address("src/python/foo", target_name="foo-dist"),
)
def test_generate_long_description_field_from_file(chroot_rule_runner: RuleRunner) -> None:
chroot_rule_runner.write_files(
{
"src/python/foo/BUILD": textwrap.dedent(
"""
python_distribution(
name='foo-dist',
long_description_path="src/python/foo/readme.md",
provides=python_artifact(
name='foo',
version='1.2.3',
)
)
"""
),
"src/python/foo/readme.md": "Some long description.",
}
)
assert_chroot(
chroot_rule_runner,
[
"setup.py",
"MANIFEST.in",
],
{
"name": "foo",
"version": "1.2.3",
"plugin_demo": "hello world",
"packages": tuple(),
"namespace_packages": tuple(),
"package_data": {},
"install_requires": tuple(),
"python_requires": "<4,>=3.7",
"long_description": "Some long description.",
},
Address("src/python/foo", target_name="foo-dist"),
)
def test_generate_long_description_field_from_file_already_having_it(
chroot_rule_runner: RuleRunner,
) -> None:
chroot_rule_runner.write_files(
{
"src/python/foo/BUILD": textwrap.dedent(
"""
python_distribution(
name='foo-dist',
long_description_path="src/python/foo/readme.md",
provides=python_artifact(
name='foo',
version='1.2.3',
long_description="Some long description.",
)
)
"""
),
"src/python/foo/readme.md": "Some long description.",
}
)
assert_chroot_error(
chroot_rule_runner,
Address("src/python/foo", target_name="foo-dist"),
InvalidFieldException,
)
def test_generate_long_description_field_from_non_existing_file(
chroot_rule_runner: RuleRunner,
) -> None:
chroot_rule_runner.write_files(
{
"src/python/foo/BUILD": textwrap.dedent(
"""
python_distribution(
name='foo-dist',
long_description_path="src/python/foo/readme.md",
provides=python_artifact(
name='foo',
version='1.2.3',
)
)
"""
),
}
)
assert_chroot_error(
chroot_rule_runner,
Address("src/python/foo", target_name="foo-dist"),
IntrinsicError,
)
def test_invalid_binary(chroot_rule_runner: RuleRunner) -> None:
chroot_rule_runner.write_files(
{
"src/python/invalid_binary/lib.py": "",
"src/python/invalid_binary/app1.py": "",
"src/python/invalid_binary/app2.py": "",
"src/python/invalid_binary/BUILD": textwrap.dedent(
"""\
python_sources(name='not_a_binary', sources=['lib.py'])
pex_binary(name='invalid_entrypoint_unowned1', entry_point='app1.py')
pex_binary(name='invalid_entrypoint_unowned2', entry_point='invalid_binary.app2')
python_distribution(
name='invalid_bin1',
provides=python_artifact(
name='invalid_bin1', version='1.1.1'
),
entry_points={
"console_scripts":{
"foo": ":not_a_binary",
},
},
)
python_distribution(
name='invalid_bin2',
provides=python_artifact(
name='invalid_bin2', version='1.1.1'
),
entry_points={
"console_scripts":{
"foo": ":invalid_entrypoint_unowned1",
},
},
)
python_distribution(
name='invalid_bin3',
provides=python_artifact(
name='invalid_bin3', version='1.1.1'
),
entry_points={
"console_scripts":{
"foo": ":invalid_entrypoint_unowned2",
},
},
)
"""
),
}
)
assert_chroot_error(
chroot_rule_runner,
Address("src/python/invalid_binary", target_name="invalid_bin1"),
InvalidEntryPoint,
)
assert_chroot_error(
chroot_rule_runner,
Address("src/python/invalid_binary", target_name="invalid_bin2"),
InvalidEntryPoint,
)
assert_chroot_error(
chroot_rule_runner,
Address("src/python/invalid_binary", target_name="invalid_bin3"),
InvalidEntryPoint,
)
def test_binary_shorthand(chroot_rule_runner: RuleRunner) -> None:
chroot_rule_runner.write_files(
{
"src/python/project/app.py": "",
"src/python/project/BUILD": textwrap.dedent(
"""
python_sources()
pex_binary(name='bin', entry_point='app.py:func')
python_distribution(
name='dist',
provides=python_artifact(
name='bin', version='1.1.1'
),
entry_points={
"console_scripts":{
"foo": ":bin",
},
},
)
"""
),
}
)
assert_chroot(
chroot_rule_runner,
["project/app.py", "setup.py", "MANIFEST.in"],
{
"name": "bin",
"version": "1.1.1",
"plugin_demo": "hello world",
"packages": ("project",),
"namespace_packages": (),
"install_requires": (),
"python_requires": "<4,>=3.7",
"package_data": {},
"entry_points": {"console_scripts": ["foo = project.app:func"]},
},
Address("src/python/project", target_name="dist"),
)
def test_get_sources() -> None:
def assert_sources(
expected_files,
expected_packages,
expected_namespace_packages,
expected_package_data,
addrs,
):
rule_runner = create_setup_py_rule_runner(
rules=[
get_sources,
get_owned_dependencies,
get_exporting_owner,
*target_types_rules.rules(),
*python_sources.rules(),
QueryRule(OwnedDependencies, (DependencyOwner,)),
QueryRule(DistBuildSources, (DistBuildChrootRequest,)),
]
)
rule_runner.write_files(
{
"src/python/foo/bar/baz/BUILD": textwrap.dedent(
"""
python_sources(name='baz1', sources=['baz1.py'])
python_sources(name='baz2', sources=['baz2.py'])
"""
),
"src/python/foo/bar/baz/baz1.py": "",
"src/python/foo/bar/baz/baz2.py": "",
"src/python/foo/bar/__init__.py": _namespace_decl,
"src/python/foo/qux/BUILD": "python_sources()",
"src/python/foo/qux/__init__.py": "",
"src/python/foo/qux/qux.py": "",
"src/python/foo/resources/BUILD": 'resource(source="js/code.js")',
"src/python/foo/resources/js/code.js": "",
"src/python/foo/__init__.py": "",
# We synthesize an owner for the addrs, so we have something to put in SetupPyChrootRequest.
"src/python/foo/BUILD": textwrap.dedent(
f"""
python_distribution(
name="dist",
dependencies=["{'","'.join(addr.spec for addr in addrs)}"],
provides=python_artifact(name="foo", version="3.2.1"),
)
"""
),
}
)
owner_tgt = rule_runner.get_target(Address("src/python/foo", target_name="dist"))
srcs = rule_runner.request(
DistBuildSources,
[
DistBuildChrootRequest(
ExportedTarget(owner_tgt),
InterpreterConstraints(PythonSetup.default_interpreter_constraints),
)
],
)
chroot_snapshot = rule_runner.request(Snapshot, [srcs.digest])
assert sorted(expected_files) == sorted(chroot_snapshot.files)
assert sorted(expected_packages) == sorted(srcs.packages)
assert sorted(expected_namespace_packages) == sorted(srcs.namespace_packages)
assert expected_package_data == dict(srcs.package_data)
assert_sources(
expected_files=["foo/bar/baz/baz1.py", "foo/bar/__init__.py", "foo/__init__.py"],
expected_packages=["foo", "foo.bar", "foo.bar.baz"],
expected_namespace_packages=["foo.bar"],
expected_package_data={},
addrs=[Address("src/python/foo/bar/baz", target_name="baz1")],
)
assert_sources(
expected_files=["foo/bar/baz/baz2.py", "foo/bar/__init__.py", "foo/__init__.py"],
expected_packages=["foo", "foo.bar", "foo.bar.baz"],
expected_namespace_packages=["foo.bar"],
expected_package_data={},
addrs=[Address("src/python/foo/bar/baz", target_name="baz2")],
)
assert_sources(
expected_files=["foo/qux/qux.py", "foo/qux/__init__.py", "foo/__init__.py"],
expected_packages=["foo", "foo.qux"],
expected_namespace_packages=[],
expected_package_data={},
addrs=[Address("src/python/foo/qux")],
)
assert_sources(
expected_files=[
"foo/bar/baz/baz1.py",
"foo/bar/__init__.py",
"foo/qux/qux.py",
"foo/qux/__init__.py",
"foo/__init__.py",
"foo/resources/js/code.js",
],
expected_packages=["foo", "foo.bar", "foo.bar.baz", "foo.qux"],
expected_namespace_packages=["foo.bar"],
expected_package_data={"foo": ("resources/js/code.js",)},
addrs=[
Address("src/python/foo/bar/baz", target_name="baz1"),
Address("src/python/foo/qux"),
Address("src/python/foo/resources"),
],
)
assert_sources(
expected_files=[
"foo/bar/baz/baz1.py",
"foo/bar/baz/baz2.py",
"foo/bar/__init__.py",
"foo/qux/qux.py",
"foo/qux/__init__.py",
"foo/__init__.py",
"foo/resources/js/code.js",
],
expected_packages=["foo", "foo.bar", "foo.bar.baz", "foo.qux"],
expected_namespace_packages=["foo.bar"],
expected_package_data={"foo": ("resources/js/code.js",)},
addrs=[
Address("src/python/foo/bar/baz", target_name="baz1"),
Address("src/python/foo/bar/baz", target_name="baz2"),
Address("src/python/foo/qux"),
Address("src/python/foo/resources"),
],
)
def test_get_requirements() -> None:
rule_runner = create_setup_py_rule_runner(
rules=[
determine_explicitly_provided_setup_kwargs,
get_requirements,
get_owned_dependencies,
get_exporting_owner,
*target_types_rules.rules(),
SubsystemRule(SetupPyGeneration),
QueryRule(ExportedTargetRequirements, (DependencyOwner,)),
]
)
rule_runner.write_files(
{
"3rdparty/BUILD": textwrap.dedent(
"""
python_requirement(name='ext1', requirements=['ext1==1.22.333'])
python_requirement(name='ext2', requirements=['ext2==4.5.6'])
python_requirement(name='ext3', requirements=['ext3==0.0.1'])
"""
),
"src/python/foo/bar/baz/a.py": "",
"src/python/foo/bar/baz/BUILD": "python_sources(dependencies=['3rdparty:ext1'])",
"src/python/foo/bar/qux/a.py": "",
"src/python/foo/bar/qux/BUILD": "python_sources(dependencies=['3rdparty:ext2', 'src/python/foo/bar/baz'])",
"src/python/foo/bar/a.py": "",
"src/python/foo/bar/BUILD": textwrap.dedent(
"""
python_distribution(
name='bar-dist',
dependencies=[':bar'],
provides=python_artifact(name='bar', version='9.8.7'),
)
python_sources(dependencies=['src/python/foo/bar/baz', 'src/python/foo/bar/qux'])
"""
),
"src/python/foo/corge/a.py": "",
"src/python/foo/corge/BUILD": textwrap.dedent(
"""
python_distribution(
name='corge-dist',
# Tests having a 3rdparty requirement directly on a python_distribution.
dependencies=[':corge', '3rdparty:ext3'],
provides=python_artifact(name='corge', version='2.2.2'),
)
python_sources(dependencies=['src/python/foo/bar'])
"""
),
}
)
assert_requirements(
rule_runner,
["ext1==1.22.333", "ext2==4.5.6"],
Address("src/python/foo/bar", target_name="bar-dist"),
)
assert_requirements(
rule_runner,
["ext3==0.0.1", "bar==9.8.7"],
Address("src/python/foo/corge", target_name="corge-dist"),
)
assert_requirements(
rule_runner,
["ext3==0.0.1", "bar~=9.8.7"],
Address("src/python/foo/corge", target_name="corge-dist"),
version_scheme=FirstPartyDependencyVersionScheme.COMPATIBLE,
)
assert_requirements(
rule_runner,
["ext3==0.0.1", "bar"],
Address("src/python/foo/corge", target_name="corge-dist"),
version_scheme=FirstPartyDependencyVersionScheme.ANY,
)
def test_get_requirements_with_exclude() -> None:
rule_runner = create_setup_py_rule_runner(
rules=[
determine_explicitly_provided_setup_kwargs,
get_requirements,
get_owned_dependencies,
get_exporting_owner,
*target_types_rules.rules(),
SubsystemRule(SetupPyGeneration),
QueryRule(ExportedTargetRequirements, (DependencyOwner,)),
]
)
rule_runner.write_files(
{
"3rdparty/BUILD": textwrap.dedent(
"""
python_requirement(name='ext1', requirements=['ext1==1.22.333'])
python_requirement(name='ext2', requirements=['ext2==4.5.6'])
python_requirement(name='ext3', requirements=['ext3==0.0.1'])
"""
),
"src/python/foo/bar/baz/a.py": "",
"src/python/foo/bar/baz/BUILD": "python_sources(dependencies=['3rdparty:ext1'])",
"src/python/foo/bar/qux/a.py": "",
"src/python/foo/bar/qux/BUILD": "python_sources(dependencies=['3rdparty:ext2', 'src/python/foo/bar/baz'])",
"src/python/foo/bar/a.py": "",
"src/python/foo/bar/BUILD": textwrap.dedent(
"""
python_distribution(
name='bar-dist',
dependencies=['!!3rdparty:ext2',':bar'],
provides=python_artifact(name='bar', version='9.8.7'),
)
python_sources(dependencies=['src/python/foo/bar/baz', 'src/python/foo/bar/qux'])
"""
),
}
)
assert_requirements(
rule_runner, ["ext1==1.22.333"], Address("src/python/foo/bar", target_name="bar-dist")
)
def test_get_requirements_with_override_dependency_issue_17593() -> None:
rule_runner = create_setup_py_rule_runner(
rules=[
determine_explicitly_provided_setup_kwargs,
get_requirements,
get_owned_dependencies,
get_exporting_owner,
*target_types_rules.rules(),
SubsystemRule(SetupPyGeneration),
QueryRule(ExportedTargetRequirements, (DependencyOwner,)),
]
)
rule_runner.write_files(
{
"3rdparty/BUILD": textwrap.dedent(
"""
python_requirement(name='ext1', requirements=['ext1==1.22.333'], dependencies=[':ext2'])
python_requirement(name='ext2', requirements=['ext2==4.5.6'])
"""
),
"src/python/foo/bar/baz/a.py": "",
"src/python/foo/bar/baz/BUILD": "python_sources(dependencies=['3rdparty:ext1'])",
"src/python/foo/bar/a.py": "",
"src/python/foo/bar/BUILD": textwrap.dedent(
"""
python_distribution(
name='bar-dist',
dependencies=[':bar'],
provides=python_artifact(name='bar', version='9.8.7'),
)
python_sources(dependencies=['src/python/foo/bar/baz'])
"""
),
}
)
assert_requirements(
rule_runner,
["ext1==1.22.333", "ext2==4.5.6"],
Address("src/python/foo/bar", target_name="bar-dist"),
)
def assert_requirements(
rule_runner,
expected_req_strs,
addr: Address,
*,
version_scheme: FirstPartyDependencyVersionScheme = FirstPartyDependencyVersionScheme.EXACT,
):
rule_runner.set_options(
[f"--setup-py-generation-first-party-dependency-version-scheme={version_scheme.value}"],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
)
tgt = rule_runner.get_target(addr)
reqs = rule_runner.request(
ExportedTargetRequirements,
[DependencyOwner(ExportedTarget(tgt))],
)
assert sorted(expected_req_strs) == list(reqs)
def test_owned_dependencies() -> None:
rule_runner = create_setup_py_rule_runner(
rules=[
get_owned_dependencies,
get_exporting_owner,
*target_types_rules.rules(),
QueryRule(OwnedDependencies, (DependencyOwner,)),
]
)
rule_runner.write_files(
{
"src/python/foo/bar/baz/BUILD": textwrap.dedent(
"""
python_sources(name='baz1')
python_sources(name='baz2')
"""
),
"src/python/foo/bar/resource.txt": "",
"src/python/foo/bar/bar1.py": "",
"src/python/foo/bar/bar2.py": "",
"src/python/foo/bar/BUILD": textwrap.dedent(
"""
python_distribution(
name='bar1-dist',
dependencies=[':bar1'],
provides=python_artifact(name='bar1', version='1.1.1'),
)
python_sources(
name='bar1',
sources=['bar1.py'],
dependencies=['src/python/foo/bar/baz:baz1'],
)
python_sources(
name='bar2',
sources=['bar2.py'],
dependencies=[':bar-resources', 'src/python/foo/bar/baz:baz2'],
)
resource(name='bar-resources', source='resource.txt')
"""
),
"src/python/foo/foo.py": "",
"src/python/foo/BUILD": textwrap.dedent(
"""
python_distribution(
name='foo-dist',
dependencies=[':foo'],
provides=python_artifact(name='foo', version='3.4.5'),
)
python_sources(
sources=['foo.py'],
dependencies=['src/python/foo/bar:bar1', 'src/python/foo/bar:bar2'],
)
"""
),
}
)
def assert_owned(owned: Iterable[str], exported: Address):
tgt = rule_runner.get_target(exported)
assert sorted(owned) == sorted(
od.target.address.spec
for od in rule_runner.request(
OwnedDependencies,
[DependencyOwner(ExportedTarget(tgt))],
)
)
assert_owned(
[
"src/python/foo/bar/bar1.py:bar1",
"src/python/foo/bar:bar1-dist",
"src/python/foo/bar/baz:baz1",
],
Address("src/python/foo/bar", target_name="bar1-dist"),
)
assert_owned(
[
"src/python/foo/bar/bar2.py:bar2",
"src/python/foo/foo.py",
"src/python/foo:foo-dist",
"src/python/foo/bar:bar-resources",
"src/python/foo/bar/baz:baz2",
],
Address("src/python/foo", target_name="foo-dist"),
)
@pytest.fixture
def exporting_owner_rule_runner() -> RuleRunner:
return create_setup_py_rule_runner(
rules=[
get_exporting_owner,
*target_types_rules.rules(),
QueryRule(ExportedTarget, (OwnedDependency,)),
]
)
def assert_is_owner(rule_runner: RuleRunner, owner: str, owned: Address):
tgt = rule_runner.get_target(owned)
assert (
owner
== rule_runner.request(
ExportedTarget,
[OwnedDependency(tgt)],
).target.address.spec
)
def assert_owner_error(rule_runner, owned: Address, exc_cls: type[Exception]):
tgt = rule_runner.get_target(owned)
with pytest.raises(ExecutionError) as excinfo:
rule_runner.request(
ExportedTarget,
[OwnedDependency(tgt)],
)
ex = excinfo.value
assert len(ex.wrapped_exceptions) == 1
assert type(ex.wrapped_exceptions[0]) == exc_cls
def assert_no_owner(rule_runner: RuleRunner, owned: Address):
assert_owner_error(rule_runner, owned, NoOwnerError)
def assert_ambiguous_owner(rule_runner: RuleRunner, owned: Address):
assert_owner_error(rule_runner, owned, AmbiguousOwnerError)
def test_get_owner_simple(exporting_owner_rule_runner: RuleRunner) -> None:
exporting_owner_rule_runner.write_files(
{
"src/python/foo/bar/baz/BUILD": textwrap.dedent(
"""
python_sources(name='baz1')
python_sources(name='baz2')
"""
),
"src/python/foo/bar/resource.ext": "",
"src/python/foo/bar/bar2.py": "",
"src/python/foo/bar/BUILD": textwrap.dedent(
"""
python_distribution(
name='bar1',
dependencies=['src/python/foo/bar/baz:baz1'],
provides=python_artifact(name='bar1', version='1.1.1'),
)
python_sources(
name='bar2',
dependencies=[':bar-resources', 'src/python/foo/bar/baz:baz2'],
)
resource(name='bar-resources', source='resource.ext')
"""
),
"src/python/foo/foo2.py": "",
"src/python/foo/BUILD": textwrap.dedent(
"""
python_distribution(
name='foo1',
dependencies=['src/python/foo/bar/baz:baz2'],
provides=python_artifact(name='foo1', version='0.1.2'),
)
python_sources(name='foo2')
python_distribution(
name='foo3',
dependencies=['src/python/foo/bar:bar2'],
provides=python_artifact(name='foo3', version='3.4.5'),
)
"""
),
}
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo/bar:bar1",
Address("src/python/foo/bar", target_name="bar1"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo/bar:bar1",
Address("src/python/foo/bar/baz", target_name="baz1"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo:foo1",
Address("src/python/foo", target_name="foo1"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo:foo3",
Address("src/python/foo", target_name="foo3"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo:foo3",
Address("src/python/foo/bar", target_name="bar2", relative_file_path="bar2.py"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo:foo3",
Address("src/python/foo/bar", target_name="bar-resources"),
)
assert_no_owner(exporting_owner_rule_runner, Address("src/python/foo", target_name="foo2"))
assert_ambiguous_owner(
exporting_owner_rule_runner, Address("src/python/foo/bar/baz", target_name="baz2")
)
def test_get_owner_siblings(exporting_owner_rule_runner: RuleRunner) -> None:
exporting_owner_rule_runner.write_files(
{
"src/python/siblings/BUILD": textwrap.dedent(
"""
python_sources(name='sibling1')
python_distribution(
name='sibling2',
dependencies=['src/python/siblings:sibling1'],
provides=python_artifact(name='siblings', version='2.2.2'),
)
"""
),
}
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/siblings:sibling2",
Address("src/python/siblings", target_name="sibling1"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/siblings:sibling2",
Address("src/python/siblings", target_name="sibling2"),
)
def test_get_owner_not_an_ancestor(exporting_owner_rule_runner: RuleRunner) -> None:
exporting_owner_rule_runner.write_files(
{
"src/python/notanancestor/aaa/BUILD": textwrap.dedent(
"""
python_sources(name='aaa')
"""
),
"src/python/notanancestor/bbb/BUILD": textwrap.dedent(
"""
python_distribution(
name='bbb',
dependencies=['src/python/notanancestor/aaa'],
provides=python_artifact(name='bbb', version='11.22.33'),
)
"""
),
}
)
assert_no_owner(exporting_owner_rule_runner, Address("src/python/notanancestor/aaa"))
assert_is_owner(
exporting_owner_rule_runner,
"src/python/notanancestor/bbb:bbb",
Address("src/python/notanancestor/bbb"),
)
def test_get_owner_multiple_ancestor_generations(exporting_owner_rule_runner: RuleRunner) -> None:
exporting_owner_rule_runner.write_files(
{
"src/python/aaa/bbb/ccc/BUILD": textwrap.dedent(
"""
python_sources(name='ccc')
"""
),
"src/python/aaa/bbb/BUILD": textwrap.dedent(
"""
python_distribution(
name='bbb',
dependencies=['src/python/aaa/bbb/ccc'],
provides=python_artifact(name='bbb', version='1.1.1'),
)
"""
),
"src/python/aaa/BUILD": textwrap.dedent(
"""
python_distribution(
name='aaa',
dependencies=['src/python/aaa/bbb/ccc'],
provides=python_artifact(name='aaa', version='2.2.2'),
)
"""
),
}
)
assert_is_owner(
exporting_owner_rule_runner, "src/python/aaa/bbb:bbb", Address("src/python/aaa/bbb/ccc")
)
assert_is_owner(
exporting_owner_rule_runner, "src/python/aaa/bbb:bbb", Address("src/python/aaa/bbb")
)
assert_is_owner(exporting_owner_rule_runner, "src/python/aaa:aaa", Address("src/python/aaa"))
def test_validate_args() -> None:
with pytest.raises(InvalidSetupPyArgs):
validate_commands(("bdist_wheel", "upload"))
with pytest.raises(InvalidSetupPyArgs):
validate_commands(("sdist", "-d", "new_distdir/"))
with pytest.raises(InvalidSetupPyArgs):
validate_commands(("--dist-dir", "new_distdir/", "sdist"))
validate_commands(("sdist",))
validate_commands(("bdist_wheel", "--foo"))
@pytest.mark.parametrize(
"python_src",
[
"__import__('pkg_resources').declare_namespace(__name__)",
"\n__import__('pkg_resources').declare_namespace(__name__) # type: ignore[attr-defined]",
"import pkg_resources; pkg_resources.declare_namespace(__name__)",
"from pkg_resources import declare_namespace; declare_namespace(__name__)",
],
)
def test_declares_pkg_resources_namespace_package(python_src: str) -> None:
assert declares_pkg_resources_namespace_package(python_src)
@pytest.mark.parametrize(
"python_src",
[
"",
"import os\n\nos.getcwd()",
"__path__ = 'foo'",
"import pkg_resources",
"add(1, 2); foo(__name__); self.shoot(__name__)",
"declare_namespace(bonk)",
"just nonsense, not even parseable",
],
)
def test_does_not_declare_pkg_resources_namespace_package(python_src: str) -> None:
assert not declares_pkg_resources_namespace_package(python_src)
def test_no_dist_type_selected() -> None:
rule_runner = RuleRunner(
rules=[
determine_explicitly_provided_setup_kwargs,
generate_chroot,
generate_setup_py,
determine_finalized_setup_kwargs,
get_sources,
get_requirements,
get_owned_dependencies,
get_exporting_owner,
package_python_dist,
*dists.rules(),
*python_sources.rules(),
*target_types_rules.rules(),
SubsystemRule(SetupPyGeneration),
QueryRule(BuiltPackage, (PythonDistributionFieldSet,)),
],
target_types=[PythonDistribution],
objects={"python_artifact": PythonArtifact},
)
rule_runner.write_files(
{
"src/python/aaa/BUILD": textwrap.dedent(
"""
python_distribution(
name='aaa',
provides=python_artifact(name='aaa', version='2.2.2'),
wheel=False,
sdist=False
)
"""
),
}
)
address = Address("src/python/aaa", target_name="aaa")
with pytest.raises(ExecutionError) as exc_info:
rule_runner.request(
BuiltPackage,
inputs=[
PythonDistributionFieldSet(
address=address,
provides=PythonProvidesField(
PythonArtifact(name="aaa", version="2.2.2"), address
),
)
],
)
assert 1 == len(exc_info.value.wrapped_exceptions)
wrapped_exception = exc_info.value.wrapped_exceptions[0]
assert isinstance(wrapped_exception, NoDistTypeSelected)
assert (
"In order to package src/python/aaa:aaa at least one of 'wheel' or 'sdist' must be `True`."
== str(wrapped_exception)
)
def test_too_many_interpreter_constraints(chroot_rule_runner: RuleRunner) -> None:
chroot_rule_runner.write_files(
{
"src/python/foo/BUILD": textwrap.dedent(
"""
python_distribution(
name='foo-dist',
provides=python_artifact(
name='foo',
version='1.2.3',
)
)
"""
),
}
)
addr = Address("src/python/foo", target_name="foo-dist")
tgt = chroot_rule_runner.get_target(addr)
err = softwrap(
"""
Expected a single interpreter constraint for src/python/foo:foo-dist,
got: CPython<3,>=2.7 OR CPython<3.10,>=3.8.
"""
)
with engine_error(SetupPyError, contains=err):
chroot_rule_runner.request(
DistBuildChroot,
[
DistBuildChrootRequest(
ExportedTarget(tgt),
InterpreterConstraints([">=2.7,<3", ">=3.8,<3.10"]),
)
],
)
| [
"[email protected]"
] | |
e7144c5ff44d0c156034aa78713708d4b8e205c2 | aeeb89d02db3e617fc118605f5464fbfa6ba0d2a | /comp.py | 2008ceba9b3de5e917142d1e88560354e219f930 | [] | no_license | nadhiyap/begin | 477f837e9ec3f06b9a2151c233dcf6281cf416c5 | 8d5b8b8ffe7e728b5191912b22737945df625c75 | refs/heads/master | 2021-04-18T22:07:07.674250 | 2018-04-15T07:37:19 | 2018-04-15T07:37:19 | 126,681,009 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | n=int(input("enter the num"))
b=1
for i in range(2,n):
if n%i==0 and n!=2:
b=0
if b==0:
print("yes")
else:
print("no")
| [
"[email protected]"
] | |
b350add2dff22ede8b47a2b661cda8cea8f733fd | 6c57b1694817d1710335429c12c2d9774ff446e3 | /2017-08-19/2/AS4-DA3-DB3_case56/generated_files/LEMS_c302_C2_AS4_DA3_DB3_nrn.py | ed1aa310632783f46c2d179ad6f34462fbaaee7e | [] | no_license | lungd/openworm-experiments | cd3875e8071c35eacb919c318344bac56d0fe379 | 065f481fbb445ef12b8ab2110f501686d26c213c | refs/heads/master | 2021-01-01T04:41:38.397726 | 2017-09-12T13:55:40 | 2017-09-12T13:55:40 | 97,220,679 | 1 | 1 | null | 2017-09-01T17:10:28 | 2017-07-14T10:07:56 | Python | UTF-8 | Python | false | false | 40,414 | py | '''
Neuron simulator export for:
Components:
Leak (Type: ionChannelPassive: conductance=1.0E-11 (SI conductance))
k_fast (Type: ionChannelHH: conductance=1.0E-11 (SI conductance))
k_slow (Type: ionChannelHH: conductance=1.0E-11 (SI conductance))
ca_boyle (Type: ionChannelHH: conductance=1.0E-11 (SI conductance))
ca_simple (Type: ionChannelHH: conductance=1.0E-11 (SI conductance))
k_muscle (Type: ionChannelHH: conductance=1.0E-11 (SI conductance))
ca_muscle (Type: ionChannelHH: conductance=1.0E-11 (SI conductance))
null (Type: notes)
CaPool (Type: fixedFactorConcentrationModel: restingConc=0.0 (SI concentration) decayConstant=0.013811870945509265 (SI time) rho=2.38919E-4 (SI rho_factor))
AVAL_to_DA3_elec_syn (Type: gapJunction: conductance=1.2000000000000002E-13 (SI conductance))
AVAR_to_DA3_elec_syn (Type: gapJunction: conductance=1.2000000000000002E-13 (SI conductance))
AVBL_to_DB3_elec_syn (Type: gapJunction: conductance=5.2E-13 (SI conductance))
AVBR_to_DB3_elec_syn (Type: gapJunction: conductance=5.2E-13 (SI conductance))
DA3_to_AVAL_elec_syn (Type: gapJunction: conductance=1.2000000000000002E-13 (SI conductance))
DA3_to_AVAR_elec_syn (Type: gapJunction: conductance=1.2000000000000002E-13 (SI conductance))
DB3_to_AVBL_elec_syn (Type: gapJunction: conductance=5.2E-13 (SI conductance))
DB3_to_AVBR_elec_syn (Type: gapJunction: conductance=5.2E-13 (SI conductance))
silent (Type: silentSynapse)
AVAL_to_DA3_exc_syn (Type: gradedSynapse: conductance=3.0000000000000004E-9 (SI conductance) delta=0.005 (SI voltage) k=500.0 (SI per_time) Vth=0.0 (SI voltage) erev=-0.01 (SI voltage))
AVAR_to_DA3_exc_syn (Type: gradedSynapse: conductance=3.0000000000000004E-9 (SI conductance) delta=0.005 (SI voltage) k=500.0 (SI per_time) Vth=0.0 (SI voltage) erev=-0.01 (SI voltage))
DA3_to_DB3_exc_syn (Type: gradedSynapse: conductance=2.6E-9 (SI conductance) delta=0.005 (SI voltage) k=500.0 (SI per_time) Vth=0.0 (SI voltage) erev=-0.01 (SI voltage))
DB3_to_AS4_inh_syn (Type: gradedSynapse: conductance=4.2E-9 (SI conductance) delta=0.005 (SI voltage) k=15.0 (SI per_time) Vth=0.0 (SI voltage) erev=-0.07 (SI voltage))
AS4_to_DA3_exc_syn (Type: gradedSynapse: conductance=2.0000000000000003E-10 (SI conductance) delta=0.005 (SI voltage) k=500.0 (SI per_time) Vth=0.0 (SI voltage) erev=-0.01 (SI voltage))
GenericMuscleCell (Type: cell)
GenericNeuronCell (Type: cell)
offset_current (Type: pulseGenerator: delay=0.0 (SI time) duration=2.0 (SI time) amplitude=0.0 (SI current))
stim_AVBL_1 (Type: pulseGenerator: delay=0.0 (SI time) duration=1.0 (SI time) amplitude=1.0E-12 (SI current))
stim_AVBR_1 (Type: pulseGenerator: delay=0.0 (SI time) duration=1.0 (SI time) amplitude=1.0E-12 (SI current))
stim_AVAL_1 (Type: pulseGenerator: delay=1.0 (SI time) duration=1.0 (SI time) amplitude=1.0E-12 (SI current))
stim_AVAR_1 (Type: pulseGenerator: delay=1.0 (SI time) duration=1.0 (SI time) amplitude=1.0E-12 (SI current))
stim_AVBL_2 (Type: pulseGenerator: delay=2.0 (SI time) duration=0.9 (SI time) amplitude=1.0E-12 (SI current))
stim_AVBR_2 (Type: pulseGenerator: delay=2.0 (SI time) duration=0.9 (SI time) amplitude=1.0E-12 (SI current))
stim_AS4_1 (Type: pulseGenerator: delay=0.0 (SI time) duration=2.9 (SI time) amplitude=1.5E-11 (SI current))
c302_C2_AS4_DA3_DB3 (Type: network)
sim_c302_C2_AS4_DA3_DB3 (Type: Simulation: length=3.0 (SI time) step=5.0E-5 (SI time))
This NEURON file has been generated by org.neuroml.export (see https://github.com/NeuroML/org.neuroml.export)
org.neuroml.export v1.5.3
org.neuroml.model v1.5.3
jLEMS v0.9.9.0
'''
import neuron
import time
import hashlib
h = neuron.h
h.load_file("stdlib.hoc")
h.load_file("stdgui.hoc")
h("objref p")
h("p = new PythonObject()")
class NeuronSimulation():
def __init__(self, tstop, dt, seed=123456789):
print("\n Starting simulation in NEURON generated from NeuroML2 model...\n")
self.seed = seed
self.randoms = []
self.next_global_id = 0 # Used in Random123 classes for elements using random(), etc.
self.next_spiking_input_id = 0 # Used in Random123 classes for elements using random(), etc.
'''
Adding simulation Component(id=sim_c302_C2_AS4_DA3_DB3 type=Simulation) of network/component: c302_C2_AS4_DA3_DB3 (Type: network)
'''
# ###################### Population: AS4
print("Population AS4 contains 1 instance(s) of component: GenericNeuronCell of type: cell")
print("Setting the default initial concentrations for ca (used in GenericNeuronCell) to 0.0 mM (internal), 2.0 mM (external)")
h("cai0_ca_ion = 0.0")
h("cao0_ca_ion = 2.0")
h.load_file("GenericNeuronCell.hoc")
a_AS4 = []
h("{ n_AS4 = 1 }")
h("objectvar a_AS4[n_AS4]")
for i in range(int(h.n_AS4)):
h("a_AS4[%i] = new GenericNeuronCell()"%i)
h("access a_AS4[%i].soma"%i)
self.next_global_id+=1
h("{ a_AS4[0].position(-1.8750001, -90.200005000000004, -65.375) }")
h("proc initialiseV_AS4() { for i = 0, n_AS4-1 { a_AS4[i].set_initial_v() } }")
h("objref fih_AS4")
h('{fih_AS4 = new FInitializeHandler(0, "initialiseV_AS4()")}')
h("proc initialiseIons_AS4() { for i = 0, n_AS4-1 { a_AS4[i].set_initial_ion_properties() } }")
h("objref fih_ion_AS4")
h('{fih_ion_AS4 = new FInitializeHandler(1, "initialiseIons_AS4()")}')
# ###################### Population: AVAL
print("Population AVAL contains 1 instance(s) of component: GenericNeuronCell of type: cell")
print("Setting the default initial concentrations for ca (used in GenericNeuronCell) to 0.0 mM (internal), 2.0 mM (external)")
h("cai0_ca_ion = 0.0")
h("cao0_ca_ion = 2.0")
h.load_file("GenericNeuronCell.hoc")
a_AVAL = []
h("{ n_AVAL = 1 }")
h("objectvar a_AVAL[n_AVAL]")
for i in range(int(h.n_AVAL)):
h("a_AVAL[%i] = new GenericNeuronCell()"%i)
h("access a_AVAL[%i].soma"%i)
self.next_global_id+=1
h("{ a_AVAL[0].position(-0.55, -271.5, 37.982999999999997) }")
h("proc initialiseV_AVAL() { for i = 0, n_AVAL-1 { a_AVAL[i].set_initial_v() } }")
h("objref fih_AVAL")
h('{fih_AVAL = new FInitializeHandler(0, "initialiseV_AVAL()")}')
h("proc initialiseIons_AVAL() { for i = 0, n_AVAL-1 { a_AVAL[i].set_initial_ion_properties() } }")
h("objref fih_ion_AVAL")
h('{fih_ion_AVAL = new FInitializeHandler(1, "initialiseIons_AVAL()")}')
# ###################### Population: AVAR
print("Population AVAR contains 1 instance(s) of component: GenericNeuronCell of type: cell")
print("Setting the default initial concentrations for ca (used in GenericNeuronCell) to 0.0 mM (internal), 2.0 mM (external)")
h("cai0_ca_ion = 0.0")
h("cao0_ca_ion = 2.0")
h.load_file("GenericNeuronCell.hoc")
a_AVAR = []
h("{ n_AVAR = 1 }")
h("objectvar a_AVAR[n_AVAR]")
for i in range(int(h.n_AVAR)):
h("a_AVAR[%i] = new GenericNeuronCell()"%i)
h("access a_AVAR[%i].soma"%i)
self.next_global_id+=1
h("{ a_AVAR[0].position(-3.5, -271.5, 37.982999999999997) }")
h("proc initialiseV_AVAR() { for i = 0, n_AVAR-1 { a_AVAR[i].set_initial_v() } }")
h("objref fih_AVAR")
h('{fih_AVAR = new FInitializeHandler(0, "initialiseV_AVAR()")}')
h("proc initialiseIons_AVAR() { for i = 0, n_AVAR-1 { a_AVAR[i].set_initial_ion_properties() } }")
h("objref fih_ion_AVAR")
h('{fih_ion_AVAR = new FInitializeHandler(1, "initialiseIons_AVAR()")}')
# ###################### Population: AVBL
print("Population AVBL contains 1 instance(s) of component: GenericNeuronCell of type: cell")
print("Setting the default initial concentrations for ca (used in GenericNeuronCell) to 0.0 mM (internal), 2.0 mM (external)")
h("cai0_ca_ion = 0.0")
h("cao0_ca_ion = 2.0")
h.load_file("GenericNeuronCell.hoc")
a_AVBL = []
h("{ n_AVBL = 1 }")
h("objectvar a_AVBL[n_AVBL]")
for i in range(int(h.n_AVBL)):
h("a_AVBL[%i] = new GenericNeuronCell()"%i)
h("access a_AVBL[%i].soma"%i)
self.next_global_id+=1
h("{ a_AVBL[0].position(0.225, -269.793999999999983, 37.863002999999999) }")
h("proc initialiseV_AVBL() { for i = 0, n_AVBL-1 { a_AVBL[i].set_initial_v() } }")
h("objref fih_AVBL")
h('{fih_AVBL = new FInitializeHandler(0, "initialiseV_AVBL()")}')
h("proc initialiseIons_AVBL() { for i = 0, n_AVBL-1 { a_AVBL[i].set_initial_ion_properties() } }")
h("objref fih_ion_AVBL")
h('{fih_ion_AVBL = new FInitializeHandler(1, "initialiseIons_AVBL()")}')
# ###################### Population: AVBR
print("Population AVBR contains 1 instance(s) of component: GenericNeuronCell of type: cell")
print("Setting the default initial concentrations for ca (used in GenericNeuronCell) to 0.0 mM (internal), 2.0 mM (external)")
h("cai0_ca_ion = 0.0")
h("cao0_ca_ion = 2.0")
h.load_file("GenericNeuronCell.hoc")
a_AVBR = []
h("{ n_AVBR = 1 }")
h("objectvar a_AVBR[n_AVBR]")
for i in range(int(h.n_AVBR)):
h("a_AVBR[%i] = new GenericNeuronCell()"%i)
h("access a_AVBR[%i].soma"%i)
self.next_global_id+=1
h("{ a_AVBR[0].position(-4.581, -269.793999999999983, 37.863002999999999) }")
h("proc initialiseV_AVBR() { for i = 0, n_AVBR-1 { a_AVBR[i].set_initial_v() } }")
h("objref fih_AVBR")
h('{fih_AVBR = new FInitializeHandler(0, "initialiseV_AVBR()")}')
h("proc initialiseIons_AVBR() { for i = 0, n_AVBR-1 { a_AVBR[i].set_initial_ion_properties() } }")
h("objref fih_ion_AVBR")
h('{fih_ion_AVBR = new FInitializeHandler(1, "initialiseIons_AVBR()")}')
# ###################### Population: DA3
print("Population DA3 contains 1 instance(s) of component: GenericNeuronCell of type: cell")
print("Setting the default initial concentrations for ca (used in GenericNeuronCell) to 0.0 mM (internal), 2.0 mM (external)")
h("cai0_ca_ion = 0.0")
h("cao0_ca_ion = 2.0")
h.load_file("GenericNeuronCell.hoc")
a_DA3 = []
h("{ n_DA3 = 1 }")
h("objectvar a_DA3[n_DA3]")
for i in range(int(h.n_DA3)):
h("a_DA3[%i] = new GenericNeuronCell()"%i)
h("access a_DA3[%i].soma"%i)
self.next_global_id+=1
h("{ a_DA3[0].position(-1.65, -123.650000000000006, -58.350002000000003) }")
h("proc initialiseV_DA3() { for i = 0, n_DA3-1 { a_DA3[i].set_initial_v() } }")
h("objref fih_DA3")
h('{fih_DA3 = new FInitializeHandler(0, "initialiseV_DA3()")}')
h("proc initialiseIons_DA3() { for i = 0, n_DA3-1 { a_DA3[i].set_initial_ion_properties() } }")
h("objref fih_ion_DA3")
h('{fih_ion_DA3 = new FInitializeHandler(1, "initialiseIons_DA3()")}')
# ###################### Population: DB3
print("Population DB3 contains 1 instance(s) of component: GenericNeuronCell of type: cell")
print("Setting the default initial concentrations for ca (used in GenericNeuronCell) to 0.0 mM (internal), 2.0 mM (external)")
h("cai0_ca_ion = 0.0")
h("cao0_ca_ion = 2.0")
h.load_file("GenericNeuronCell.hoc")
a_DB3 = []
h("{ n_DB3 = 1 }")
h("objectvar a_DB3[n_DB3]")
for i in range(int(h.n_DB3)):
h("a_DB3[%i] = new GenericNeuronCell()"%i)
h("access a_DB3[%i].soma"%i)
self.next_global_id+=1
h("{ a_DB3[0].position(-1.85, -195.275000000000006, -18.524999999999999) }")
h("proc initialiseV_DB3() { for i = 0, n_DB3-1 { a_DB3[i].set_initial_v() } }")
h("objref fih_DB3")
h('{fih_DB3 = new FInitializeHandler(0, "initialiseV_DB3()")}')
h("proc initialiseIons_DB3() { for i = 0, n_DB3-1 { a_DB3[i].set_initial_ion_properties() } }")
h("objref fih_ion_DB3")
h('{fih_ion_DB3 = new FInitializeHandler(1, "initialiseIons_DB3()")}')
# ###################### Electrical Projection: NC_AVAL_DA3_Generic_GJ
print("Adding electrical projection: NC_AVAL_DA3_Generic_GJ from AVAL to DA3, with 1 connection(s)")
h("objectvar syn_NC_AVAL_DA3_Generic_GJ_AVAL_to_DA3_elec_syn_A[1]")
h("objectvar syn_NC_AVAL_DA3_Generic_GJ_AVAL_to_DA3_elec_syn_B[1]")
# Elect Connection 0: cell 0, seg 0 (0.5) [0.5 on a_AVAL[0].soma] -> cell 0, seg 0 (0.5) [0.5 on a_DA3[0].soma], weight: 6.0
h("a_AVAL[0].soma { syn_NC_AVAL_DA3_Generic_GJ_AVAL_to_DA3_elec_syn_A[0] = new AVAL_to_DA3_elec_syn(0.5) }")
h("a_DA3[0].soma { syn_NC_AVAL_DA3_Generic_GJ_AVAL_to_DA3_elec_syn_B[0] = new AVAL_to_DA3_elec_syn(0.5) }")
h("a_AVAL[0].soma { syn_NC_AVAL_DA3_Generic_GJ_AVAL_to_DA3_elec_syn_A[0].weight = 6.0 }")
h("a_DA3[0].soma { syn_NC_AVAL_DA3_Generic_GJ_AVAL_to_DA3_elec_syn_B[0].weight = 6.0 }")
h("setpointer syn_NC_AVAL_DA3_Generic_GJ_AVAL_to_DA3_elec_syn_A[0].vpeer, a_DA3[0].soma.v(0.5)")
h("setpointer syn_NC_AVAL_DA3_Generic_GJ_AVAL_to_DA3_elec_syn_B[0].vpeer, a_AVAL[0].soma.v(0.5)")
# ###################### Electrical Projection: NC_AVAR_DA3_Generic_GJ
print("Adding electrical projection: NC_AVAR_DA3_Generic_GJ from AVAR to DA3, with 1 connection(s)")
h("objectvar syn_NC_AVAR_DA3_Generic_GJ_AVAR_to_DA3_elec_syn_A[1]")
h("objectvar syn_NC_AVAR_DA3_Generic_GJ_AVAR_to_DA3_elec_syn_B[1]")
# Elect Connection 0: cell 0, seg 0 (0.5) [0.5 on a_AVAR[0].soma] -> cell 0, seg 0 (0.5) [0.5 on a_DA3[0].soma], weight: 7.0
h("a_AVAR[0].soma { syn_NC_AVAR_DA3_Generic_GJ_AVAR_to_DA3_elec_syn_A[0] = new AVAR_to_DA3_elec_syn(0.5) }")
h("a_DA3[0].soma { syn_NC_AVAR_DA3_Generic_GJ_AVAR_to_DA3_elec_syn_B[0] = new AVAR_to_DA3_elec_syn(0.5) }")
h("a_AVAR[0].soma { syn_NC_AVAR_DA3_Generic_GJ_AVAR_to_DA3_elec_syn_A[0].weight = 7.0 }")
h("a_DA3[0].soma { syn_NC_AVAR_DA3_Generic_GJ_AVAR_to_DA3_elec_syn_B[0].weight = 7.0 }")
h("setpointer syn_NC_AVAR_DA3_Generic_GJ_AVAR_to_DA3_elec_syn_A[0].vpeer, a_DA3[0].soma.v(0.5)")
h("setpointer syn_NC_AVAR_DA3_Generic_GJ_AVAR_to_DA3_elec_syn_B[0].vpeer, a_AVAR[0].soma.v(0.5)")
# ###################### Electrical Projection: NC_AVBL_DB3_Generic_GJ
print("Adding electrical projection: NC_AVBL_DB3_Generic_GJ from AVBL to DB3, with 1 connection(s)")
h("objectvar syn_NC_AVBL_DB3_Generic_GJ_AVBL_to_DB3_elec_syn_A[1]")
h("objectvar syn_NC_AVBL_DB3_Generic_GJ_AVBL_to_DB3_elec_syn_B[1]")
# Elect Connection 0: cell 0, seg 0 (0.5) [0.5 on a_AVBL[0].soma] -> cell 0, seg 0 (0.5) [0.5 on a_DB3[0].soma], weight: 9.0
h("a_AVBL[0].soma { syn_NC_AVBL_DB3_Generic_GJ_AVBL_to_DB3_elec_syn_A[0] = new AVBL_to_DB3_elec_syn(0.5) }")
h("a_DB3[0].soma { syn_NC_AVBL_DB3_Generic_GJ_AVBL_to_DB3_elec_syn_B[0] = new AVBL_to_DB3_elec_syn(0.5) }")
h("a_AVBL[0].soma { syn_NC_AVBL_DB3_Generic_GJ_AVBL_to_DB3_elec_syn_A[0].weight = 9.0 }")
h("a_DB3[0].soma { syn_NC_AVBL_DB3_Generic_GJ_AVBL_to_DB3_elec_syn_B[0].weight = 9.0 }")
h("setpointer syn_NC_AVBL_DB3_Generic_GJ_AVBL_to_DB3_elec_syn_A[0].vpeer, a_DB3[0].soma.v(0.5)")
h("setpointer syn_NC_AVBL_DB3_Generic_GJ_AVBL_to_DB3_elec_syn_B[0].vpeer, a_AVBL[0].soma.v(0.5)")
# ###################### Electrical Projection: NC_AVBR_DB3_Generic_GJ
print("Adding electrical projection: NC_AVBR_DB3_Generic_GJ from AVBR to DB3, with 1 connection(s)")
h("objectvar syn_NC_AVBR_DB3_Generic_GJ_AVBR_to_DB3_elec_syn_A[1]")
h("objectvar syn_NC_AVBR_DB3_Generic_GJ_AVBR_to_DB3_elec_syn_B[1]")
# Elect Connection 0: cell 0, seg 0 (0.5) [0.5 on a_AVBR[0].soma] -> cell 0, seg 0 (0.5) [0.5 on a_DB3[0].soma], weight: 2.0
h("a_AVBR[0].soma { syn_NC_AVBR_DB3_Generic_GJ_AVBR_to_DB3_elec_syn_A[0] = new AVBR_to_DB3_elec_syn(0.5) }")
h("a_DB3[0].soma { syn_NC_AVBR_DB3_Generic_GJ_AVBR_to_DB3_elec_syn_B[0] = new AVBR_to_DB3_elec_syn(0.5) }")
h("a_AVBR[0].soma { syn_NC_AVBR_DB3_Generic_GJ_AVBR_to_DB3_elec_syn_A[0].weight = 2.0 }")
h("a_DB3[0].soma { syn_NC_AVBR_DB3_Generic_GJ_AVBR_to_DB3_elec_syn_B[0].weight = 2.0 }")
h("setpointer syn_NC_AVBR_DB3_Generic_GJ_AVBR_to_DB3_elec_syn_A[0].vpeer, a_DB3[0].soma.v(0.5)")
h("setpointer syn_NC_AVBR_DB3_Generic_GJ_AVBR_to_DB3_elec_syn_B[0].vpeer, a_AVBR[0].soma.v(0.5)")
# ###################### Electrical Projection: NC_DA3_AVAL_Generic_GJ
print("Adding electrical projection: NC_DA3_AVAL_Generic_GJ from DA3 to AVAL, with 1 connection(s)")
h("objectvar syn_NC_DA3_AVAL_Generic_GJ_DA3_to_AVAL_elec_syn_A[1]")
h("objectvar syn_NC_DA3_AVAL_Generic_GJ_DA3_to_AVAL_elec_syn_B[1]")
# Elect Connection 0: cell 0, seg 0 (0.5) [0.5 on a_DA3[0].soma] -> cell 0, seg 0 (0.5) [0.5 on a_AVAL[0].soma], weight: 6.0
h("a_DA3[0].soma { syn_NC_DA3_AVAL_Generic_GJ_DA3_to_AVAL_elec_syn_A[0] = new DA3_to_AVAL_elec_syn(0.5) }")
h("a_AVAL[0].soma { syn_NC_DA3_AVAL_Generic_GJ_DA3_to_AVAL_elec_syn_B[0] = new DA3_to_AVAL_elec_syn(0.5) }")
h("a_DA3[0].soma { syn_NC_DA3_AVAL_Generic_GJ_DA3_to_AVAL_elec_syn_A[0].weight = 6.0 }")
h("a_AVAL[0].soma { syn_NC_DA3_AVAL_Generic_GJ_DA3_to_AVAL_elec_syn_B[0].weight = 6.0 }")
h("setpointer syn_NC_DA3_AVAL_Generic_GJ_DA3_to_AVAL_elec_syn_A[0].vpeer, a_AVAL[0].soma.v(0.5)")
h("setpointer syn_NC_DA3_AVAL_Generic_GJ_DA3_to_AVAL_elec_syn_B[0].vpeer, a_DA3[0].soma.v(0.5)")
# ###################### Electrical Projection: NC_DA3_AVAR_Generic_GJ
print("Adding electrical projection: NC_DA3_AVAR_Generic_GJ from DA3 to AVAR, with 1 connection(s)")
h("objectvar syn_NC_DA3_AVAR_Generic_GJ_DA3_to_AVAR_elec_syn_A[1]")
h("objectvar syn_NC_DA3_AVAR_Generic_GJ_DA3_to_AVAR_elec_syn_B[1]")
# Elect Connection 0: cell 0, seg 0 (0.5) [0.5 on a_DA3[0].soma] -> cell 0, seg 0 (0.5) [0.5 on a_AVAR[0].soma], weight: 7.0
h("a_DA3[0].soma { syn_NC_DA3_AVAR_Generic_GJ_DA3_to_AVAR_elec_syn_A[0] = new DA3_to_AVAR_elec_syn(0.5) }")
h("a_AVAR[0].soma { syn_NC_DA3_AVAR_Generic_GJ_DA3_to_AVAR_elec_syn_B[0] = new DA3_to_AVAR_elec_syn(0.5) }")
h("a_DA3[0].soma { syn_NC_DA3_AVAR_Generic_GJ_DA3_to_AVAR_elec_syn_A[0].weight = 7.0 }")
h("a_AVAR[0].soma { syn_NC_DA3_AVAR_Generic_GJ_DA3_to_AVAR_elec_syn_B[0].weight = 7.0 }")
h("setpointer syn_NC_DA3_AVAR_Generic_GJ_DA3_to_AVAR_elec_syn_A[0].vpeer, a_AVAR[0].soma.v(0.5)")
h("setpointer syn_NC_DA3_AVAR_Generic_GJ_DA3_to_AVAR_elec_syn_B[0].vpeer, a_DA3[0].soma.v(0.5)")
# ###################### Electrical Projection: NC_DB3_AVBL_Generic_GJ
print("Adding electrical projection: NC_DB3_AVBL_Generic_GJ from DB3 to AVBL, with 1 connection(s)")
h("objectvar syn_NC_DB3_AVBL_Generic_GJ_DB3_to_AVBL_elec_syn_A[1]")
h("objectvar syn_NC_DB3_AVBL_Generic_GJ_DB3_to_AVBL_elec_syn_B[1]")
# Elect Connection 0: cell 0, seg 0 (0.5) [0.5 on a_DB3[0].soma] -> cell 0, seg 0 (0.5) [0.5 on a_AVBL[0].soma], weight: 9.0
h("a_DB3[0].soma { syn_NC_DB3_AVBL_Generic_GJ_DB3_to_AVBL_elec_syn_A[0] = new DB3_to_AVBL_elec_syn(0.5) }")
h("a_AVBL[0].soma { syn_NC_DB3_AVBL_Generic_GJ_DB3_to_AVBL_elec_syn_B[0] = new DB3_to_AVBL_elec_syn(0.5) }")
h("a_DB3[0].soma { syn_NC_DB3_AVBL_Generic_GJ_DB3_to_AVBL_elec_syn_A[0].weight = 9.0 }")
h("a_AVBL[0].soma { syn_NC_DB3_AVBL_Generic_GJ_DB3_to_AVBL_elec_syn_B[0].weight = 9.0 }")
h("setpointer syn_NC_DB3_AVBL_Generic_GJ_DB3_to_AVBL_elec_syn_A[0].vpeer, a_AVBL[0].soma.v(0.5)")
h("setpointer syn_NC_DB3_AVBL_Generic_GJ_DB3_to_AVBL_elec_syn_B[0].vpeer, a_DB3[0].soma.v(0.5)")
# ###################### Electrical Projection: NC_DB3_AVBR_Generic_GJ
print("Adding electrical projection: NC_DB3_AVBR_Generic_GJ from DB3 to AVBR, with 1 connection(s)")
h("objectvar syn_NC_DB3_AVBR_Generic_GJ_DB3_to_AVBR_elec_syn_A[1]")
h("objectvar syn_NC_DB3_AVBR_Generic_GJ_DB3_to_AVBR_elec_syn_B[1]")
# Elect Connection 0: cell 0, seg 0 (0.5) [0.5 on a_DB3[0].soma] -> cell 0, seg 0 (0.5) [0.5 on a_AVBR[0].soma], weight: 2.0
h("a_DB3[0].soma { syn_NC_DB3_AVBR_Generic_GJ_DB3_to_AVBR_elec_syn_A[0] = new DB3_to_AVBR_elec_syn(0.5) }")
h("a_AVBR[0].soma { syn_NC_DB3_AVBR_Generic_GJ_DB3_to_AVBR_elec_syn_B[0] = new DB3_to_AVBR_elec_syn(0.5) }")
h("a_DB3[0].soma { syn_NC_DB3_AVBR_Generic_GJ_DB3_to_AVBR_elec_syn_A[0].weight = 2.0 }")
h("a_AVBR[0].soma { syn_NC_DB3_AVBR_Generic_GJ_DB3_to_AVBR_elec_syn_B[0].weight = 2.0 }")
h("setpointer syn_NC_DB3_AVBR_Generic_GJ_DB3_to_AVBR_elec_syn_A[0].vpeer, a_AVBR[0].soma.v(0.5)")
h("setpointer syn_NC_DB3_AVBR_Generic_GJ_DB3_to_AVBR_elec_syn_B[0].vpeer, a_DB3[0].soma.v(0.5)")
# ###################### Continuous Projection: NC_AVAL_DA3_Acetylcholine
print("Adding continuous projection: NC_AVAL_DA3_Acetylcholine from AVAL to DA3, with 1 connection(s)")
h("objectvar syn_NC_AVAL_DA3_Acetylcholine_silent_pre[1]")
h("objectvar syn_NC_AVAL_DA3_Acetylcholine_AVAL_to_DA3_exc_syn_post[1]")
# Continuous Connection 0: cell 0, seg 0 (0.5) [0.5 on a_AVAL[0].soma] -> cell 0, seg 0 (0.5) [0.5 on a_DA3[0].soma], weight: 12.0
h("a_AVAL[0].soma { syn_NC_AVAL_DA3_Acetylcholine_silent_pre[0] = new silent(0.500000) }")
h("a_DA3[0].soma { syn_NC_AVAL_DA3_Acetylcholine_AVAL_to_DA3_exc_syn_post[0] = new AVAL_to_DA3_exc_syn(0.500000) }")
h("a_AVAL[0].soma { syn_NC_AVAL_DA3_Acetylcholine_silent_pre[0].weight = 12.0 }")
h("a_DA3[0].soma { syn_NC_AVAL_DA3_Acetylcholine_AVAL_to_DA3_exc_syn_post[0].weight = 12.0 }")
h("setpointer syn_NC_AVAL_DA3_Acetylcholine_silent_pre[0].vpeer, a_DA3[0].soma.v(0.500000)")
h("setpointer syn_NC_AVAL_DA3_Acetylcholine_AVAL_to_DA3_exc_syn_post[0].vpeer, a_AVAL[0].soma.v(0.500000)")
# ###################### Continuous Projection: NC_AVAR_DA3_Acetylcholine
print("Adding continuous projection: NC_AVAR_DA3_Acetylcholine from AVAR to DA3, with 1 connection(s)")
h("objectvar syn_NC_AVAR_DA3_Acetylcholine_silent_pre[1]")
h("objectvar syn_NC_AVAR_DA3_Acetylcholine_AVAR_to_DA3_exc_syn_post[1]")
# Continuous Connection 0: cell 0, seg 0 (0.5) [0.5 on a_AVAR[0].soma] -> cell 0, seg 0 (0.5) [0.5 on a_DA3[0].soma], weight: 8.0
h("a_AVAR[0].soma { syn_NC_AVAR_DA3_Acetylcholine_silent_pre[0] = new silent(0.500000) }")
h("a_DA3[0].soma { syn_NC_AVAR_DA3_Acetylcholine_AVAR_to_DA3_exc_syn_post[0] = new AVAR_to_DA3_exc_syn(0.500000) }")
h("a_AVAR[0].soma { syn_NC_AVAR_DA3_Acetylcholine_silent_pre[0].weight = 8.0 }")
h("a_DA3[0].soma { syn_NC_AVAR_DA3_Acetylcholine_AVAR_to_DA3_exc_syn_post[0].weight = 8.0 }")
h("setpointer syn_NC_AVAR_DA3_Acetylcholine_silent_pre[0].vpeer, a_DA3[0].soma.v(0.500000)")
h("setpointer syn_NC_AVAR_DA3_Acetylcholine_AVAR_to_DA3_exc_syn_post[0].vpeer, a_AVAR[0].soma.v(0.500000)")
# ###################### Continuous Projection: NC_DA3_DB3_Acetylcholine
print("Adding continuous projection: NC_DA3_DB3_Acetylcholine from DA3 to DB3, with 1 connection(s)")
h("objectvar syn_NC_DA3_DB3_Acetylcholine_silent_pre[1]")
h("objectvar syn_NC_DA3_DB3_Acetylcholine_DA3_to_DB3_exc_syn_post[1]")
# Continuous Connection 0: cell 0, seg 0 (0.5) [0.5 on a_DA3[0].soma] -> cell 0, seg 0 (0.5) [0.5 on a_DB3[0].soma], weight: 2.0
h("a_DA3[0].soma { syn_NC_DA3_DB3_Acetylcholine_silent_pre[0] = new silent(0.500000) }")
h("a_DB3[0].soma { syn_NC_DA3_DB3_Acetylcholine_DA3_to_DB3_exc_syn_post[0] = new DA3_to_DB3_exc_syn(0.500000) }")
h("a_DA3[0].soma { syn_NC_DA3_DB3_Acetylcholine_silent_pre[0].weight = 2.0 }")
h("a_DB3[0].soma { syn_NC_DA3_DB3_Acetylcholine_DA3_to_DB3_exc_syn_post[0].weight = 2.0 }")
h("setpointer syn_NC_DA3_DB3_Acetylcholine_silent_pre[0].vpeer, a_DB3[0].soma.v(0.500000)")
h("setpointer syn_NC_DA3_DB3_Acetylcholine_DA3_to_DB3_exc_syn_post[0].vpeer, a_DA3[0].soma.v(0.500000)")
# ###################### Continuous Projection: NC_DB3_AS4_Acetylcholine
print("Adding continuous projection: NC_DB3_AS4_Acetylcholine from DB3 to AS4, with 1 connection(s)")
h("objectvar syn_NC_DB3_AS4_Acetylcholine_silent_pre[1]")
h("objectvar syn_NC_DB3_AS4_Acetylcholine_DB3_to_AS4_inh_syn_post[1]")
# Continuous Connection 0: cell 0, seg 0 (0.5) [0.5 on a_DB3[0].soma] -> cell 0, seg 0 (0.5) [0.5 on a_AS4[0].soma], weight: 2.0
h("a_DB3[0].soma { syn_NC_DB3_AS4_Acetylcholine_silent_pre[0] = new silent(0.500000) }")
h("a_AS4[0].soma { syn_NC_DB3_AS4_Acetylcholine_DB3_to_AS4_inh_syn_post[0] = new DB3_to_AS4_inh_syn(0.500000) }")
h("a_DB3[0].soma { syn_NC_DB3_AS4_Acetylcholine_silent_pre[0].weight = 2.0 }")
h("a_AS4[0].soma { syn_NC_DB3_AS4_Acetylcholine_DB3_to_AS4_inh_syn_post[0].weight = 2.0 }")
h("setpointer syn_NC_DB3_AS4_Acetylcholine_silent_pre[0].vpeer, a_AS4[0].soma.v(0.500000)")
h("setpointer syn_NC_DB3_AS4_Acetylcholine_DB3_to_AS4_inh_syn_post[0].vpeer, a_DB3[0].soma.v(0.500000)")
# ###################### Continuous Projection: NC_AS4_DA3_Acetylcholine
print("Adding continuous projection: NC_AS4_DA3_Acetylcholine from AS4 to DA3, with 1 connection(s)")
h("objectvar syn_NC_AS4_DA3_Acetylcholine_silent_pre[1]")
h("objectvar syn_NC_AS4_DA3_Acetylcholine_AS4_to_DA3_exc_syn_post[1]")
# Continuous Connection 0: cell 0, seg 0 (0.5) [0.5 on a_AS4[0].soma] -> cell 0, seg 0 (0.5) [0.5 on a_DA3[0].soma], weight: 2.0
h("a_AS4[0].soma { syn_NC_AS4_DA3_Acetylcholine_silent_pre[0] = new silent(0.500000) }")
h("a_DA3[0].soma { syn_NC_AS4_DA3_Acetylcholine_AS4_to_DA3_exc_syn_post[0] = new AS4_to_DA3_exc_syn(0.500000) }")
h("a_AS4[0].soma { syn_NC_AS4_DA3_Acetylcholine_silent_pre[0].weight = 2.0 }")
h("a_DA3[0].soma { syn_NC_AS4_DA3_Acetylcholine_AS4_to_DA3_exc_syn_post[0].weight = 2.0 }")
h("setpointer syn_NC_AS4_DA3_Acetylcholine_silent_pre[0].vpeer, a_DA3[0].soma.v(0.500000)")
h("setpointer syn_NC_AS4_DA3_Acetylcholine_AS4_to_DA3_exc_syn_post[0].vpeer, a_AS4[0].soma.v(0.500000)")
# ###################### Input List: Input_AVBL_stim_AVBL_1
print("Adding input list: Input_AVBL_stim_AVBL_1 to AVBL, with 1 inputs of type stim_AVBL_1")
# Adding single input: Component(id=0 type=input)
h("objref Input_AVBL_stim_AVBL_1_0")
h("a_AVBL[0].soma { Input_AVBL_stim_AVBL_1_0 = new stim_AVBL_1(0.5) } ")
# ###################### Input List: Input_AVBR_stim_AVBR_1
print("Adding input list: Input_AVBR_stim_AVBR_1 to AVBR, with 1 inputs of type stim_AVBR_1")
# Adding single input: Component(id=0 type=input)
h("objref Input_AVBR_stim_AVBR_1_0")
h("a_AVBR[0].soma { Input_AVBR_stim_AVBR_1_0 = new stim_AVBR_1(0.5) } ")
# ###################### Input List: Input_AVAL_stim_AVAL_1
print("Adding input list: Input_AVAL_stim_AVAL_1 to AVAL, with 1 inputs of type stim_AVAL_1")
# Adding single input: Component(id=0 type=input)
h("objref Input_AVAL_stim_AVAL_1_0")
h("a_AVAL[0].soma { Input_AVAL_stim_AVAL_1_0 = new stim_AVAL_1(0.5) } ")
# ###################### Input List: Input_AVAR_stim_AVAR_1
print("Adding input list: Input_AVAR_stim_AVAR_1 to AVAR, with 1 inputs of type stim_AVAR_1")
# Adding single input: Component(id=0 type=input)
h("objref Input_AVAR_stim_AVAR_1_0")
h("a_AVAR[0].soma { Input_AVAR_stim_AVAR_1_0 = new stim_AVAR_1(0.5) } ")
# ###################### Input List: Input_AVBL_stim_AVBL_2
print("Adding input list: Input_AVBL_stim_AVBL_2 to AVBL, with 1 inputs of type stim_AVBL_2")
# Adding single input: Component(id=0 type=input)
h("objref Input_AVBL_stim_AVBL_2_0")
h("a_AVBL[0].soma { Input_AVBL_stim_AVBL_2_0 = new stim_AVBL_2(0.5) } ")
# ###################### Input List: Input_AVBR_stim_AVBR_2
print("Adding input list: Input_AVBR_stim_AVBR_2 to AVBR, with 1 inputs of type stim_AVBR_2")
# Adding single input: Component(id=0 type=input)
h("objref Input_AVBR_stim_AVBR_2_0")
h("a_AVBR[0].soma { Input_AVBR_stim_AVBR_2_0 = new stim_AVBR_2(0.5) } ")
# ###################### Input List: Input_AS4_stim_AS4_1
print("Adding input list: Input_AS4_stim_AS4_1 to AS4, with 1 inputs of type stim_AS4_1")
# Adding single input: Component(id=0 type=input)
h("objref Input_AS4_stim_AS4_1_0")
h("a_AS4[0].soma { Input_AS4_stim_AS4_1_0 = new stim_AS4_1(0.5) } ")
trec = h.Vector()
trec.record(h._ref_t)
h.tstop = tstop
h.dt = dt
h.steps_per_ms = 1/h.dt
# ###################### File to save: c302_C2_AS4_DA3_DB3.activity.dat (neurons_activity)
# Column: AS4/0/GenericNeuronCell/caConc
h(' objectvar v_AS4_v_neurons_activity ')
h(' { v_AS4_v_neurons_activity = new Vector() } ')
h(' { v_AS4_v_neurons_activity.record(&a_AS4[0].soma.cai(0.5)) } ')
h.v_AS4_v_neurons_activity.resize((h.tstop * h.steps_per_ms) + 1)
# Column: AVAL/0/GenericNeuronCell/caConc
h(' objectvar v_AVAL_v_neurons_activity ')
h(' { v_AVAL_v_neurons_activity = new Vector() } ')
h(' { v_AVAL_v_neurons_activity.record(&a_AVAL[0].soma.cai(0.5)) } ')
h.v_AVAL_v_neurons_activity.resize((h.tstop * h.steps_per_ms) + 1)
# Column: AVAR/0/GenericNeuronCell/caConc
h(' objectvar v_AVAR_v_neurons_activity ')
h(' { v_AVAR_v_neurons_activity = new Vector() } ')
h(' { v_AVAR_v_neurons_activity.record(&a_AVAR[0].soma.cai(0.5)) } ')
h.v_AVAR_v_neurons_activity.resize((h.tstop * h.steps_per_ms) + 1)
# Column: AVBL/0/GenericNeuronCell/caConc
h(' objectvar v_AVBL_v_neurons_activity ')
h(' { v_AVBL_v_neurons_activity = new Vector() } ')
h(' { v_AVBL_v_neurons_activity.record(&a_AVBL[0].soma.cai(0.5)) } ')
h.v_AVBL_v_neurons_activity.resize((h.tstop * h.steps_per_ms) + 1)
# Column: AVBR/0/GenericNeuronCell/caConc
h(' objectvar v_AVBR_v_neurons_activity ')
h(' { v_AVBR_v_neurons_activity = new Vector() } ')
h(' { v_AVBR_v_neurons_activity.record(&a_AVBR[0].soma.cai(0.5)) } ')
h.v_AVBR_v_neurons_activity.resize((h.tstop * h.steps_per_ms) + 1)
# Column: DA3/0/GenericNeuronCell/caConc
h(' objectvar v_DA3_v_neurons_activity ')
h(' { v_DA3_v_neurons_activity = new Vector() } ')
h(' { v_DA3_v_neurons_activity.record(&a_DA3[0].soma.cai(0.5)) } ')
h.v_DA3_v_neurons_activity.resize((h.tstop * h.steps_per_ms) + 1)
# Column: DB3/0/GenericNeuronCell/caConc
h(' objectvar v_DB3_v_neurons_activity ')
h(' { v_DB3_v_neurons_activity = new Vector() } ')
h(' { v_DB3_v_neurons_activity.record(&a_DB3[0].soma.cai(0.5)) } ')
h.v_DB3_v_neurons_activity.resize((h.tstop * h.steps_per_ms) + 1)
# ###################### File to save: c302_C2_AS4_DA3_DB3.dat (neurons_v)
# Column: AS4/0/GenericNeuronCell/v
h(' objectvar v_AS4_v_neurons_v ')
h(' { v_AS4_v_neurons_v = new Vector() } ')
h(' { v_AS4_v_neurons_v.record(&a_AS4[0].soma.v(0.5)) } ')
h.v_AS4_v_neurons_v.resize((h.tstop * h.steps_per_ms) + 1)
# Column: AVAL/0/GenericNeuronCell/v
h(' objectvar v_AVAL_v_neurons_v ')
h(' { v_AVAL_v_neurons_v = new Vector() } ')
h(' { v_AVAL_v_neurons_v.record(&a_AVAL[0].soma.v(0.5)) } ')
h.v_AVAL_v_neurons_v.resize((h.tstop * h.steps_per_ms) + 1)
# Column: AVAR/0/GenericNeuronCell/v
h(' objectvar v_AVAR_v_neurons_v ')
h(' { v_AVAR_v_neurons_v = new Vector() } ')
h(' { v_AVAR_v_neurons_v.record(&a_AVAR[0].soma.v(0.5)) } ')
h.v_AVAR_v_neurons_v.resize((h.tstop * h.steps_per_ms) + 1)
# Column: AVBL/0/GenericNeuronCell/v
h(' objectvar v_AVBL_v_neurons_v ')
h(' { v_AVBL_v_neurons_v = new Vector() } ')
h(' { v_AVBL_v_neurons_v.record(&a_AVBL[0].soma.v(0.5)) } ')
h.v_AVBL_v_neurons_v.resize((h.tstop * h.steps_per_ms) + 1)
# Column: AVBR/0/GenericNeuronCell/v
h(' objectvar v_AVBR_v_neurons_v ')
h(' { v_AVBR_v_neurons_v = new Vector() } ')
h(' { v_AVBR_v_neurons_v.record(&a_AVBR[0].soma.v(0.5)) } ')
h.v_AVBR_v_neurons_v.resize((h.tstop * h.steps_per_ms) + 1)
# Column: DA3/0/GenericNeuronCell/v
h(' objectvar v_DA3_v_neurons_v ')
h(' { v_DA3_v_neurons_v = new Vector() } ')
h(' { v_DA3_v_neurons_v.record(&a_DA3[0].soma.v(0.5)) } ')
h.v_DA3_v_neurons_v.resize((h.tstop * h.steps_per_ms) + 1)
# Column: DB3/0/GenericNeuronCell/v
h(' objectvar v_DB3_v_neurons_v ')
h(' { v_DB3_v_neurons_v = new Vector() } ')
h(' { v_DB3_v_neurons_v.record(&a_DB3[0].soma.v(0.5)) } ')
h.v_DB3_v_neurons_v.resize((h.tstop * h.steps_per_ms) + 1)
# ###################### File to save: time.dat (time)
# Column: time
h(' objectvar v_time ')
h(' { v_time = new Vector() } ')
h(' { v_time.record(&t) } ')
h.v_time.resize((h.tstop * h.steps_per_ms) + 1)
self.initialized = False
self.sim_end = -1 # will be overwritten
def run(self):
self.initialized = True
sim_start = time.time()
print("Running a simulation of %sms (dt = %sms; seed=%s)" % (h.tstop, h.dt, self.seed))
h.run()
self.sim_end = time.time()
sim_time = self.sim_end - sim_start
print("Finished NEURON simulation in %f seconds (%f mins)..."%(sim_time, sim_time/60.0))
self.save_results()
def advance(self):
if not self.initialized:
h.finitialize()
self.initialized = True
h.fadvance()
###############################################################################
# Hash function to use in generation of random value
# This is copied from NetPyNE: https://github.com/Neurosim-lab/netpyne/blob/master/netpyne/simFuncs.py
###############################################################################
def _id32 (self,obj):
return int(hashlib.md5(obj).hexdigest()[0:8],16) # convert 8 first chars of md5 hash in base 16 to int
###############################################################################
# Initialize the stim randomizer
# This is copied from NetPyNE: https://github.com/Neurosim-lab/netpyne/blob/master/netpyne/simFuncs.py
###############################################################################
def _init_stim_randomizer(self,rand, stimType, gid, seed):
rand.Random123(self._id32(stimType), gid, seed)
def save_results(self):
print("Saving results at t=%s..."%h.t)
if self.sim_end < 0: self.sim_end = time.time()
# ###################### File to save: time.dat (time)
py_v_time = [ t/1000 for t in h.v_time.to_python() ] # Convert to Python list for speed...
f_time_f2 = open('time.dat', 'w')
num_points = len(py_v_time) # Simulation may have been stopped before tstop...
for i in range(num_points):
f_time_f2.write('%f'% py_v_time[i]) # Save in SI units...+ '\n')
f_time_f2.close()
print("Saved data to: time.dat")
# ###################### File to save: c302_C2_AS4_DA3_DB3.activity.dat (neurons_activity)
py_v_AS4_v_neurons_activity = [ float(x ) for x in h.v_AS4_v_neurons_activity.to_python() ] # Convert to Python list for speed, variable has dim: concentration
py_v_AVAL_v_neurons_activity = [ float(x ) for x in h.v_AVAL_v_neurons_activity.to_python() ] # Convert to Python list for speed, variable has dim: concentration
py_v_AVAR_v_neurons_activity = [ float(x ) for x in h.v_AVAR_v_neurons_activity.to_python() ] # Convert to Python list for speed, variable has dim: concentration
py_v_AVBL_v_neurons_activity = [ float(x ) for x in h.v_AVBL_v_neurons_activity.to_python() ] # Convert to Python list for speed, variable has dim: concentration
py_v_AVBR_v_neurons_activity = [ float(x ) for x in h.v_AVBR_v_neurons_activity.to_python() ] # Convert to Python list for speed, variable has dim: concentration
py_v_DA3_v_neurons_activity = [ float(x ) for x in h.v_DA3_v_neurons_activity.to_python() ] # Convert to Python list for speed, variable has dim: concentration
py_v_DB3_v_neurons_activity = [ float(x ) for x in h.v_DB3_v_neurons_activity.to_python() ] # Convert to Python list for speed, variable has dim: concentration
f_neurons_activity_f2 = open('c302_C2_AS4_DA3_DB3.activity.dat', 'w')
num_points = len(py_v_time) # Simulation may have been stopped before tstop...
for i in range(num_points):
f_neurons_activity_f2.write('%e\t'% py_v_time[i] + '%e\t'%(py_v_AS4_v_neurons_activity[i]) + '%e\t'%(py_v_AVAL_v_neurons_activity[i]) + '%e\t'%(py_v_AVAR_v_neurons_activity[i]) + '%e\t'%(py_v_AVBL_v_neurons_activity[i]) + '%e\t'%(py_v_AVBR_v_neurons_activity[i]) + '%e\t'%(py_v_DA3_v_neurons_activity[i]) + '%e\t'%(py_v_DB3_v_neurons_activity[i]) + '\n')
f_neurons_activity_f2.close()
print("Saved data to: c302_C2_AS4_DA3_DB3.activity.dat")
# ###################### File to save: c302_C2_AS4_DA3_DB3.dat (neurons_v)
py_v_AS4_v_neurons_v = [ float(x / 1000.0) for x in h.v_AS4_v_neurons_v.to_python() ] # Convert to Python list for speed, variable has dim: voltage
py_v_AVAL_v_neurons_v = [ float(x / 1000.0) for x in h.v_AVAL_v_neurons_v.to_python() ] # Convert to Python list for speed, variable has dim: voltage
py_v_AVAR_v_neurons_v = [ float(x / 1000.0) for x in h.v_AVAR_v_neurons_v.to_python() ] # Convert to Python list for speed, variable has dim: voltage
py_v_AVBL_v_neurons_v = [ float(x / 1000.0) for x in h.v_AVBL_v_neurons_v.to_python() ] # Convert to Python list for speed, variable has dim: voltage
py_v_AVBR_v_neurons_v = [ float(x / 1000.0) for x in h.v_AVBR_v_neurons_v.to_python() ] # Convert to Python list for speed, variable has dim: voltage
py_v_DA3_v_neurons_v = [ float(x / 1000.0) for x in h.v_DA3_v_neurons_v.to_python() ] # Convert to Python list for speed, variable has dim: voltage
py_v_DB3_v_neurons_v = [ float(x / 1000.0) for x in h.v_DB3_v_neurons_v.to_python() ] # Convert to Python list for speed, variable has dim: voltage
f_neurons_v_f2 = open('c302_C2_AS4_DA3_DB3.dat', 'w')
num_points = len(py_v_time) # Simulation may have been stopped before tstop...
for i in range(num_points):
f_neurons_v_f2.write('%e\t'% py_v_time[i] + '%e\t'%(py_v_AS4_v_neurons_v[i]) + '%e\t'%(py_v_AVAL_v_neurons_v[i]) + '%e\t'%(py_v_AVAR_v_neurons_v[i]) + '%e\t'%(py_v_AVBL_v_neurons_v[i]) + '%e\t'%(py_v_AVBR_v_neurons_v[i]) + '%e\t'%(py_v_DA3_v_neurons_v[i]) + '%e\t'%(py_v_DB3_v_neurons_v[i]) + '\n')
f_neurons_v_f2.close()
print("Saved data to: c302_C2_AS4_DA3_DB3.dat")
save_end = time.time()
save_time = save_end - self.sim_end
print("Finished saving results in %f seconds"%(save_time))
print("Done")
quit()
if __name__ == '__main__':
ns = NeuronSimulation(tstop=3000, dt=0.05, seed=123456789)
ns.run()
| [
"[email protected]"
] | |
11fc96ab7fc8484473a073ad362d9bb2a834d499 | fc049ef8172ed2f63147612d89b4125fe865441c | /scrape/common.py | 4ba416818b8fa9bf4233e789a9a5ed9f8be6436c | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | resistbot/people | a13b5f934cd6f498b349550f8978268648fb509b | a337c4b4d86bbb7b1911c0ec4cac0010c092867a | refs/heads/main | 2023-02-22T11:37:23.600635 | 2021-01-16T22:38:32 | 2021-01-16T22:38:32 | 320,011,736 | 2 | 0 | CC0-1.0 | 2020-12-09T16:16:44 | 2020-12-09T16:16:43 | null | UTF-8 | Python | false | false | 3,409 | py | import re
import uuid
from collections import OrderedDict
from utils import get_jurisdiction_id, reformat_phone_number
def clean_spaces(text):
return re.sub(r"\s+", " ", text).strip()
PARTIES = {
"d": "Democratic",
"r": "Republican",
"dem": "Democratic",
"rep": "Republican",
"democrat": "Democratic",
"republican": "Republican",
}
class ContactDetail:
def __init__(self, note):
self.note = note
self.voice = None
self.fax = None
self.address = None
def to_dict(self):
d = {}
for key in ("voice", "fax", "address"):
val = getattr(self, key)
if val:
if key in ("voice", "fax"):
val = reformat_phone_number(val)
d[key] = val
if d:
d["note"] = self.note
return d
class Person:
def __init__(
self,
name,
*,
state,
party,
district,
chamber,
image=None,
email=None,
given_name=None,
family_name=None,
suffix=None,
):
self.name = clean_spaces(name)
self.party = party
self.district = str(district)
self.chamber = chamber
self.state = state
self.given_name = given_name
self.family_name = family_name
self.suffix = suffix
self.image = image
self.email = email
self.links = []
self.sources = []
self.capitol_office = ContactDetail("Capitol Office")
self.district_office = ContactDetail("District Office")
self.ids = {}
self.extras = {}
def to_dict(self):
party = PARTIES.get(self.party.lower(), self.party)
d = OrderedDict(
{
"id": f"ocd-person/{uuid.uuid4()}",
"name": str(self.name),
"party": [{"name": party}],
"roles": [
{
"district": self.district,
"type": self.chamber,
"jurisdiction": get_jurisdiction_id(self.state),
}
],
"links": self.links,
"sources": self.sources,
}
)
if self.given_name:
d["given_name"] = str(self.given_name)
if self.family_name:
d["family_name"] = str(self.family_name)
if self.suffix:
d["suffix"] = str(self.suffix)
if self.image:
d["image"] = str(self.image)
if self.email:
d["email"] = str(self.email)
if self.ids:
d["ids"] = self.ids
if self.extras:
d["extras"] = self.extras
# contact details
d["contact_details"] = []
if self.district_office.to_dict():
d["contact_details"].append(self.district_office.to_dict())
if self.capitol_office.to_dict():
d["contact_details"].append(self.capitol_office.to_dict())
return d
def add_link(self, url, note=None):
if note:
self.links.append({"url": url, "note": note})
else:
self.links.append({"url": url})
def add_source(self, url, note=None):
if note:
self.sources.append({"url": url, "note": note})
else:
self.sources.append({"url": url})
| [
"[email protected]"
] | |
d92a0924366fbeb47f4ab4a24bc9d72fd62f997a | 13a6b6bc9327fa6128fb0c4687e0fbc2eb80fa5f | /poo_herencia.py | 74ea9e372367bd4838e621418dbd4dd4f3f37855 | [] | no_license | jkaalexkei/poo_python | 1df5d06f30ab5199b0c8c529cfc3faf5b8d97a61 | 1be51074c6eb7818770c34ef4c09cae36f1fa0f9 | refs/heads/master | 2023-05-06T22:52:21.577667 | 2021-05-27T20:43:47 | 2021-05-27T20:43:47 | 370,861,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | #LA HERENCIA:
# Consiste en la reutilizacion de codigo en caso de crear objetos similares
#caracteristicas en comun de los objetos
#comportamientos en comun de los objetos
#clase padre o superclase agrupa caracteristicas y comportamientos en comun
class Vehiculos():
def __init__(self,marca,modelo):
self.marca = marca
self.modelo = modelo
self.enmarcha = False
self.acelera = False
self.frena = False
def arrancar(self):
self.enmarcha = True
def acelerar(self):
self.acelera = True
def frenar(self):
self.frena = True
def estado(self):
print("marca: ", self.marca, "\n Modelo: ",self.modelo,"\n En Marcha: ", self.enmarcha, "\n Acelerando: ",self.acelera,"\n Frenando: ", self.frena)
class Moto(Vehiculos):#Esta es la manera de heredar. Nombre de la clase y entre parentesis el nombre de la clase que va ha heredar
pass
miMoto = Moto("Honda","CBR")
miMoto.estado()
| [
"[email protected]"
] | |
c352f3ffcc5682a0ea7ce13f0b7df3c947e19472 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /eMRXLJLpaSTxZvsKN_20.py | 492ac2498c5298a0703a9eaded759d7c74284e3b | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py |
def is_ladder_safe(ladder):
index, spacing, counter, new_array = 0, [], 0, []
for i in ladder:
if len(i) < 5:
return False
x = list(i)
hashes = 0
for i in x:
if i == "#":
hashes +=1
spacing.append(hashes)
for i in spacing:
if i != spacing[0] and i != spacing[1]:
return False
if i == spacing[0] and index != 0:
counter += 1
if i == spacing[1] and index != 1:
new_array.append(counter)
counter = 0
index +=1
for i in new_array:
if i != new_array[0] or i > 2:
return False
return True
| [
"[email protected]"
] | |
e4ca77f6d1e62a1fc5f27e605bacdd2ab5979bcd | 60715c9ea4c66d861708531def532814eab781fd | /python-programming-workshop/pythondatastructures/console/print.py | acf395827d9e1beb9f219f3f5c828e7e7c2a5473 | [] | no_license | bala4rtraining/python_programming | 6ce64d035ef04486f5dc9572cb0975dd322fcb3e | 99a5e6cf38448f5a01b310d5f7fa95493139b631 | refs/heads/master | 2023-09-03T00:10:26.272124 | 2021-11-01T08:20:52 | 2021-11-01T08:20:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py |
#Python program that uses print statement
# Print a string literal.
print("Hello")
# Print two arguments.
print("Hello", "there")
# Print the value of a string variable.
a = "Python"
print(a)
| [
"[email protected]"
] | |
21df4008c8aa3af325373649228fad8b24379e2f | c80f780b62c76a8a59100721ee5beb1333bc787d | /migrations/0002_tvprogram.py | 826743db7d0c8788e369a76630469eff007b6cb2 | [
"BSD-3-Clause"
] | permissive | alexisbellido/demo-app | 73c5391287038102114dcc5a29399ae5cb31bc84 | 5f855a0fa7813ad830f420dc0e3f3a3d338cdb22 | refs/heads/master | 2020-04-16T03:58:19.869373 | 2019-01-12T00:52:43 | 2019-01-12T00:52:43 | 165,251,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | # Generated by Django 2.1.5 on 2019-01-11 16:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('demo', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TVProgram',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('start_time', models.TimeField()),
('end_time', models.TimeField()),
],
),
]
| [
"[email protected]"
] | |
2b452d1a68cb30c3d6de9fc38e074bf3066cb96f | 11aaeaeb55d587a950456fd1480063e1aed1d9e5 | /.history/ex45-test_20190611164111.py | eb3aae04bb43638fe8b27d57963ad7fb95e726e4 | [] | no_license | Gr4cchus/Learn-Python-3-The-Hard-Way | 8ce9e68f6a91ea33ea45fe64bfff82d65422c4a8 | f5fa34db16cdd6377faa7fcf45c70f94bb4aec0d | refs/heads/master | 2020-05-17T23:18:29.483160 | 2019-06-26T18:42:52 | 2019-06-26T18:42:52 | 184,023,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py |
class Scenes(object):
# def __init__(self):
# # self.starting_room = starting_room
# # self.locations = {
# # 'room1': Room1(),
# # 'room2': Room2()
# # }
def start(self):
print("You are at the start")
print("Where would you like to go")
self.locations()
def room1(self):
print("You enter room 1")
print("Where would you like to go")
def room2(self):
print("You enter room 2")
print("Where would you like to go")
def finish(self):
print("You have finished")
exit(0)
def locations(self):
map_list = [
'room1',
'room2',
'finish'
]
print(map_list)
for i in map_list:
print(f 'i)
# def locations(self):
# dict_locations = {
# 'room1': room1(),
# 'room2': room2()
# }
# return dict_locations
# dict_locations = {
# 'room1': room1(),
# 'room2': room2()
# }
# class Locations(Scenes):
# pass
# def map(self):
# dict_locations = {
# 'room1': room1(),
# 'room2': room2()
# }
# return dict_locations
# class Engine():
# def __init__(self, map):
# self.map = map
# def play(self):
# while True:
# # a = self.map.dict_locations
# print('yes')
thescenes = Scenes()
# thelocations = Locations()
# thedict = thelocations.map()
# while True:
# print("loop")
# thelocations.map.dict_locations.get('room1')
thescenes.start()
action = input("> ")
| [
"[email protected]"
] | |
6a8da79d9928bf4ded71044ae06701d5ce3ada3d | d6cf604d393a22fc5e071a0d045a4fadcaf128a6 | /ABC/183/183_E.py | b8a7fda6b2a115a4e139f632014b51c19d30bfb9 | [] | no_license | shikixyx/AtCoder | bb400dfafd3745c95720b9009881e07bf6b3c2b6 | 7e402fa82a96bc69ce04b9b7884cb9a9069568c7 | refs/heads/master | 2021-08-03T21:06:45.224547 | 2021-07-24T11:58:02 | 2021-07-24T11:58:02 | 229,020,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,688 | py | import sys
sys.setrecursionlimit(10 ** 7)
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
MOD = 10 ** 9 + 7
def main():
H, W = map(int, input().split())
S = [list(input()) for _ in range(H)]
T = [[[0, 0, 0, 0] for _ in range(W)] for _ in range(H)]
T[0][0][0] = 1
T[0][0][1] = 1
T[0][0][2] = 1
T[0][0][3] = 1
for r in range(H):
for c in range(W):
if r == 0 and c == 0:
continue
if S[r][c] == "#":
continue
# 持ってきて足す
# 1: 縦
# 2: 横
# 3: 斜め
cnt = 0
# 上
if 0 <= (r - 1):
cnt += T[r - 1][c][1]
cnt %= MOD
# 左
if 0 <= (c - 1):
cnt += T[r][c - 1][2]
cnt %= MOD
# 左上
if 0 <= (r - 1) and 0 <= (c - 1):
cnt += T[r - 1][c - 1][3]
cnt %= MOD
T[r][c][0] = cnt
T[r][c][1] = cnt
T[r][c][2] = cnt
T[r][c][3] = cnt
# 更新
# 上
if 0 <= (r - 1):
T[r][c][1] += T[r - 1][c][1]
# 左
if 0 <= (c - 1):
T[r][c][2] += T[r][c - 1][2]
# 左上
if 0 <= (r - 1) and 0 <= (c - 1):
T[r][c][3] += T[r - 1][c - 1][3]
T[r][c][1] %= MOD
T[r][c][2] %= MOD
T[r][c][3] %= MOD
ans = T[H - 1][W - 1][0] % MOD
print(ans)
return
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
2a21c9269596e7383db3bf89612249d1c740c959 | 776f52fd8e7c8504373d234b1f453ebfbb252ad9 | /tests/test_lightcurve.py | 794bd09d0af3fb2b2ea6a4aeee65a0a7912cf0d6 | [
"GPL-3.0-only",
"MIT"
] | permissive | jpdeleon/chronos | ae2481f504ef5e7b91d06ad73ba7e7bd7ede6fa6 | 330ab380040944689145a47ab060ee041491d54e | refs/heads/master | 2023-02-05T07:35:38.883328 | 2023-01-31T09:50:13 | 2023-01-31T09:50:13 | 236,907,132 | 7 | 2 | MIT | 2020-06-26T02:12:25 | 2020-01-29T05:05:42 | Jupyter Notebook | UTF-8 | Python | false | false | 2,710 | py | # -*- coding: utf-8 -*-
"""
test methods of lightcurve module
"""
import pytest
import lightkurve as lk
import pandas as pd
# from matplotlib.figure import Figure
from matplotlib.axes import Axes
from chronos import Tess, ShortCadence, LongCadence
TOIID = 837
TICID = 460205581
SECTOR = 10
CUTOUT_SIZE = (15, 15)
QUALITY_BITMASK = "default"
def test_tess_methods():
t = Tess(toiid=TOIID)
ax = t.plot_pdc_sap_comparison()
assert isinstance(ax, Axes)
lcs = t.get_lightcurves()
assert isinstance(lcs, lk.LightCurve)
def test_sc_pipeline():
sc = ShortCadence(
ticid=TICID, sap_mask="pipeline", quality_bitmask=QUALITY_BITMASK
)
_ = sc.get_lc()
assert isinstance(sc.lc_pdcsap, lk.LightCurve)
assert isinstance(sc.lc_sap, lk.LightCurve)
def test_sc_square():
sc = ShortCadence(
ticid=TICID,
sap_mask="square",
aper_radius=1,
threshold_sigma=5,
percentile=95,
quality_bitmask=QUALITY_BITMASK,
)
_ = sc.make_custom_lc()
assert isinstance(sc.lc_custom, lk.LightCurve)
# assert sc.sap_mask == "square"
def test_sc_round():
sc = ShortCadence(
ticid=TICID,
sap_mask="round",
aper_radius=1,
quality_bitmask=QUALITY_BITMASK,
)
_ = sc.make_custom_lc()
assert isinstance(sc.lc_custom, lk.LightCurve)
# assert sc.sap_mask == "round"
def test_sc_threshold():
sc = ShortCadence(
ticid=TICID,
sap_mask="threshold",
threshold_sigma=5,
quality_bitmask=QUALITY_BITMASK,
)
_ = sc.make_custom_lc()
assert isinstance(sc.lc_custom, lk.LightCurve)
# assert sc.sap_mask == "threshold"
def test_sc_percentile():
sc = ShortCadence(
ticid=TICID,
sap_mask="percentile",
percentile=90,
quality_bitmask=QUALITY_BITMASK,
)
_ = sc.make_custom_lc()
assert isinstance(sc.lc_custom, lk.LightCurve)
# assert sc.sap_mask == "percentile"
def test_lc():
lc = LongCadence(
ticid=TICID,
sap_mask="square",
aper_radius=1,
cutout_size=CUTOUT_SIZE,
quality_bitmask=QUALITY_BITMASK,
)
_ = lc.make_custom_lc()
assert isinstance(lc.lc_custom, lk.LightCurve)
@pytest.mark.skip
def test_sc_triceratops():
sc = ShortCadence(ticid=TICID, calc_fpp=True)
df = sc.get_NEB_depths()
# df = sc.get_fpp(flat=flat, plot=False)
assert sc.triceratops is not None
assert isinstance(df, pd.DataFrame)
@pytest.mark.skip
def test_lc_triceratops():
lc = LongCadence(ticid=TICID, calc_fpp=True)
# df = sc.get_NEB_depths()
# df = sc.get_fpp(flat=flat, plot=False)
assert lc.triceratops is not None
| [
"[email protected]"
] | |
aefd0927cb89585b4ba4e7058ed6d5f417ad28ba | 9d90b664ebbd11a57ee6156c528081551b98055b | /wsgi/local_data/brython_programs/string_and_in1.py | 8152f92fcc9ddaa939f87d1c34a58f2639fc7a70 | [] | no_license | 2014cdag21/c21 | d4f85f91ba446feb6669a39903dda38c21e8b868 | faf4b354f7d1d4abec79c683d7d02055c6bab489 | refs/heads/master | 2020-06-03T17:54:16.144118 | 2014-06-20T09:29:02 | 2014-06-20T09:29:02 | 19,724,479 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | print(3 in [1, 2, 3])
print("3" in [1, 2, 3])
print("3" in [1, 2, "3"])
VOWELS = ['a', 'e', 'i', 'o', 'u']
def is_a_vowel(c):
# check if c is a vowel
lowercase_c = c.lower()
if lowercase_c in VOWELS:
# Return (BOOLEAN!) True if c is a vowel
return True
else:
# c must not be a vowel; return (BOOLEAN!) False
return False
def only_vowels(phrase):
# Takes a phrase, and returns a string of all the vowels
# Initalize an empty string to hold all of the vowels
vowel_string = ''
for letter in phrase:
# check if each letter is a vowel
if is_a_vowel(letter):
# If it's a vowel, we append the letter to the vowel string
vowel_string = vowel_string + letter
# if not a vowel, we don't care about it- so do nothing!
return vowel_string
# Code after a "return" doesn't print
print("A line of code after the return!")
print(only_vowels("Takes a phrase, and returns a string of all the vowels")) | [
"[email protected]"
] | |
371e15298db1711c8e04520f405b43a1b83d271f | b972faf032590c9722dc240c45fc60157d5a1bee | /1.py | 39ce810ba84b1a8edf3516f6fd5ba14add3290e2 | [] | no_license | kih1024/codingStudy | 3a91b628bc301d1777d954595e93bf1f9246aca3 | 3e8a6fe86d3861613a85d3e75991f4bc7cd1e716 | refs/heads/master | 2022-12-09T04:58:55.264433 | 2020-09-22T07:29:44 | 2020-09-22T07:29:44 | 269,874,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | def solution(inputString):
answer = -1
arr = [False] * 4
count = [0] * 4
figure = [("(", ")"), ("{", "}"), ("[", "]"), ("<", ">")]
for i in range(len(inputString)):
for j in range(len(figure)):
if inputString[i] == figure[j][0]:
arr[j] = True
break
if inputString[i] == figure[j][1] and arr[j] == True:
count[j] += 1
arr[j] = False
break
for i in arr:
if i == True:
return -1
answer = sum(count)
return answer
ans = solution(">_<")
print(ans)
| [
"[email protected]"
] | |
75b691bc34a70cb9c0af18d31d51db0d33f27155 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5751500831719424_1/Python/phire/1.py | 2394b63ef96ce2f57ad745fc11ff7cc9c0530fec | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | #!/usr/bin/python2
# -*- coding: utf-8 -*-
import sys
import os
def compress(s):
r = []
c = ''
count = 0
for x in s + " ":
if x == c:
count += 1
else:
if count:
r.append((c, count))
c = x
count = 1
return r
def test(arr, x):
r = 0
for y in arr:
r += abs(y - x)
return r
def match(arr):
avg = float(sum(arr)) / len(arr)
avg = int(avg) - 1
avg = max(1, avg)
return min(test(arr, avg), test(arr, avg+1), test(arr, avg+2), test(arr, avg+3))
def main():
T = int(sys.stdin.readline())
for t in xrange(1, T+1):
N = int(sys.stdin.readline())
a = []
for i in xrange(N):
a.append(compress(sys.stdin.readline().strip()))
signature = [x[0] for x in a[0]]
good = True
for r in a:
if [x[0] for x in r] != signature:
print "Case #" + str(t) + ": Fegla Won"
good = False
break
if not good:
continue
ret = sum(match([x[i][1] for x in a]) for i in xrange(len(signature)))
print "Case #" + str(t) + ": " + str(ret)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
4b4439a60a195a76dc373548c6f58dc5a457595b | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/bgp/peerroute1qtr.py | 477b368c64206019a405e5370eedb3dc83296afd | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 38,939 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class PeerRoute1qtr(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.bgp.PeerRoute1qtr", "BGP Peer Route")
counter = CounterMeta("capabilityRcvd", CounterCategory.COUNTER, "packets", "Number of Capability Messages Received")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "capabilityRcvdLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "capabilityRcvdCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "capabilityRcvdPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "capabilityRcvdMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "capabilityRcvdMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "capabilityRcvdAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "capabilityRcvdSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "capabilityRcvdBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "capabilityRcvdThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "capabilityRcvdTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "capabilityRcvdTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "capabilityRcvdRate"
meta._counters.append(counter)
counter = CounterMeta("capabilitySent", CounterCategory.COUNTER, "packets", "Number of Capability Messages Sent")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "capabilitySentLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "capabilitySentCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "capabilitySentPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "capabilitySentMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "capabilitySentMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "capabilitySentAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "capabilitySentSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "capabilitySentBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "capabilitySentThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "capabilitySentTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "capabilitySentTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "capabilitySentRate"
meta._counters.append(counter)
counter = CounterMeta("routeRefreshRcvd", CounterCategory.COUNTER, "packets", "Number of Route Refresh Messages Received")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "routeRefreshRcvdLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "routeRefreshRcvdCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "routeRefreshRcvdPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "routeRefreshRcvdMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "routeRefreshRcvdMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "routeRefreshRcvdAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "routeRefreshRcvdSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "routeRefreshRcvdBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "routeRefreshRcvdThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "routeRefreshRcvdTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "routeRefreshRcvdTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "routeRefreshRcvdRate"
meta._counters.append(counter)
counter = CounterMeta("routeRefreshSent", CounterCategory.COUNTER, "packets", "Number of Route Refresh Messages Sent")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "routeRefreshSentLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "routeRefreshSentCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "routeRefreshSentPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "routeRefreshSentMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "routeRefreshSentMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "routeRefreshSentAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "routeRefreshSentSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "routeRefreshSentBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "routeRefreshSentThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "routeRefreshSentTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "routeRefreshSentTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "routeRefreshSentRate"
meta._counters.append(counter)
meta.moClassName = "bgpPeerRoute1qtr"
meta.rnFormat = "CDbgpPeerRoute1qtr"
meta.category = MoCategory.STATS_CURRENT
meta.label = "current BGP Peer Route stats in 1 quarter"
meta.writeAccessMask = 0x8008020040001
meta.readAccessMask = 0x8008020040001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.bgp.Peer")
meta.superClasses.add("cobra.model.stats.Curr")
meta.superClasses.add("cobra.model.bgp.PeerRoute")
meta.superClasses.add("cobra.model.stats.Item")
meta.rnPrefixes = [
('CDbgpPeerRoute1qtr', False),
]
prop = PropMeta("str", "capabilityRcvdAvg", "capabilityRcvdAvg", 48197, PropCategory.IMPLICIT_AVG)
prop.label = "Number of Capability Messages Received average value"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilityRcvdAvg", prop)
prop = PropMeta("str", "capabilityRcvdBase", "capabilityRcvdBase", 48192, PropCategory.IMPLICIT_BASELINE)
prop.label = "Number of Capability Messages Received baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilityRcvdBase", prop)
prop = PropMeta("str", "capabilityRcvdCum", "capabilityRcvdCum", 48193, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Number of Capability Messages Received cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilityRcvdCum", prop)
prop = PropMeta("str", "capabilityRcvdLast", "capabilityRcvdLast", 48191, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Number of Capability Messages Received current value"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilityRcvdLast", prop)
prop = PropMeta("str", "capabilityRcvdMax", "capabilityRcvdMax", 48196, PropCategory.IMPLICIT_MAX)
prop.label = "Number of Capability Messages Received maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilityRcvdMax", prop)
prop = PropMeta("str", "capabilityRcvdMin", "capabilityRcvdMin", 48195, PropCategory.IMPLICIT_MIN)
prop.label = "Number of Capability Messages Received minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilityRcvdMin", prop)
prop = PropMeta("str", "capabilityRcvdPer", "capabilityRcvdPer", 48194, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Number of Capability Messages Received periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilityRcvdPer", prop)
prop = PropMeta("str", "capabilityRcvdRate", "capabilityRcvdRate", 48202, PropCategory.IMPLICIT_RATE)
prop.label = "Number of Capability Messages Received rate"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilityRcvdRate", prop)
prop = PropMeta("str", "capabilityRcvdSpct", "capabilityRcvdSpct", 48198, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Number of Capability Messages Received suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilityRcvdSpct", prop)
prop = PropMeta("str", "capabilityRcvdThr", "capabilityRcvdThr", 48199, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Number of Capability Messages Received thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("capabilityRcvdThr", prop)
prop = PropMeta("str", "capabilityRcvdTr", "capabilityRcvdTr", 48201, PropCategory.IMPLICIT_TREND)
prop.label = "Number of Capability Messages Received trend"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilityRcvdTr", prop)
prop = PropMeta("str", "capabilityRcvdTrBase", "capabilityRcvdTrBase", 48200, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Number of Capability Messages Received trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilityRcvdTrBase", prop)
prop = PropMeta("str", "capabilitySentAvg", "capabilitySentAvg", 48218, PropCategory.IMPLICIT_AVG)
prop.label = "Number of Capability Messages Sent average value"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilitySentAvg", prop)
prop = PropMeta("str", "capabilitySentBase", "capabilitySentBase", 48213, PropCategory.IMPLICIT_BASELINE)
prop.label = "Number of Capability Messages Sent baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilitySentBase", prop)
prop = PropMeta("str", "capabilitySentCum", "capabilitySentCum", 48214, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Number of Capability Messages Sent cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilitySentCum", prop)
prop = PropMeta("str", "capabilitySentLast", "capabilitySentLast", 48212, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Number of Capability Messages Sent current value"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilitySentLast", prop)
prop = PropMeta("str", "capabilitySentMax", "capabilitySentMax", 48217, PropCategory.IMPLICIT_MAX)
prop.label = "Number of Capability Messages Sent maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilitySentMax", prop)
prop = PropMeta("str", "capabilitySentMin", "capabilitySentMin", 48216, PropCategory.IMPLICIT_MIN)
prop.label = "Number of Capability Messages Sent minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilitySentMin", prop)
prop = PropMeta("str", "capabilitySentPer", "capabilitySentPer", 48215, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Number of Capability Messages Sent periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilitySentPer", prop)
prop = PropMeta("str", "capabilitySentRate", "capabilitySentRate", 48223, PropCategory.IMPLICIT_RATE)
prop.label = "Number of Capability Messages Sent rate"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilitySentRate", prop)
prop = PropMeta("str", "capabilitySentSpct", "capabilitySentSpct", 48219, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Number of Capability Messages Sent suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilitySentSpct", prop)
prop = PropMeta("str", "capabilitySentThr", "capabilitySentThr", 48220, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Number of Capability Messages Sent thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("capabilitySentThr", prop)
prop = PropMeta("str", "capabilitySentTr", "capabilitySentTr", 48222, PropCategory.IMPLICIT_TREND)
prop.label = "Number of Capability Messages Sent trend"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilitySentTr", prop)
prop = PropMeta("str", "capabilitySentTrBase", "capabilitySentTrBase", 48221, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Number of Capability Messages Sent trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("capabilitySentTrBase", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "routeRefreshRcvdAvg", "routeRefreshRcvdAvg", 48239, PropCategory.IMPLICIT_AVG)
prop.label = "Number of Route Refresh Messages Received average value"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshRcvdAvg", prop)
prop = PropMeta("str", "routeRefreshRcvdBase", "routeRefreshRcvdBase", 48234, PropCategory.IMPLICIT_BASELINE)
prop.label = "Number of Route Refresh Messages Received baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshRcvdBase", prop)
prop = PropMeta("str", "routeRefreshRcvdCum", "routeRefreshRcvdCum", 48235, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Number of Route Refresh Messages Received cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshRcvdCum", prop)
prop = PropMeta("str", "routeRefreshRcvdLast", "routeRefreshRcvdLast", 48233, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Number of Route Refresh Messages Received current value"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshRcvdLast", prop)
prop = PropMeta("str", "routeRefreshRcvdMax", "routeRefreshRcvdMax", 48238, PropCategory.IMPLICIT_MAX)
prop.label = "Number of Route Refresh Messages Received maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshRcvdMax", prop)
prop = PropMeta("str", "routeRefreshRcvdMin", "routeRefreshRcvdMin", 48237, PropCategory.IMPLICIT_MIN)
prop.label = "Number of Route Refresh Messages Received minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshRcvdMin", prop)
prop = PropMeta("str", "routeRefreshRcvdPer", "routeRefreshRcvdPer", 48236, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Number of Route Refresh Messages Received periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshRcvdPer", prop)
prop = PropMeta("str", "routeRefreshRcvdRate", "routeRefreshRcvdRate", 48244, PropCategory.IMPLICIT_RATE)
prop.label = "Number of Route Refresh Messages Received rate"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshRcvdRate", prop)
prop = PropMeta("str", "routeRefreshRcvdSpct", "routeRefreshRcvdSpct", 48240, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Number of Route Refresh Messages Received suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshRcvdSpct", prop)
prop = PropMeta("str", "routeRefreshRcvdThr", "routeRefreshRcvdThr", 48241, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Number of Route Refresh Messages Received thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("routeRefreshRcvdThr", prop)
prop = PropMeta("str", "routeRefreshRcvdTr", "routeRefreshRcvdTr", 48243, PropCategory.IMPLICIT_TREND)
prop.label = "Number of Route Refresh Messages Received trend"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshRcvdTr", prop)
prop = PropMeta("str", "routeRefreshRcvdTrBase", "routeRefreshRcvdTrBase", 48242, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Number of Route Refresh Messages Received trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshRcvdTrBase", prop)
prop = PropMeta("str", "routeRefreshSentAvg", "routeRefreshSentAvg", 48260, PropCategory.IMPLICIT_AVG)
prop.label = "Number of Route Refresh Messages Sent average value"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshSentAvg", prop)
prop = PropMeta("str", "routeRefreshSentBase", "routeRefreshSentBase", 48255, PropCategory.IMPLICIT_BASELINE)
prop.label = "Number of Route Refresh Messages Sent baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshSentBase", prop)
prop = PropMeta("str", "routeRefreshSentCum", "routeRefreshSentCum", 48256, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Number of Route Refresh Messages Sent cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshSentCum", prop)
prop = PropMeta("str", "routeRefreshSentLast", "routeRefreshSentLast", 48254, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Number of Route Refresh Messages Sent current value"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshSentLast", prop)
prop = PropMeta("str", "routeRefreshSentMax", "routeRefreshSentMax", 48259, PropCategory.IMPLICIT_MAX)
prop.label = "Number of Route Refresh Messages Sent maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshSentMax", prop)
prop = PropMeta("str", "routeRefreshSentMin", "routeRefreshSentMin", 48258, PropCategory.IMPLICIT_MIN)
prop.label = "Number of Route Refresh Messages Sent minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshSentMin", prop)
prop = PropMeta("str", "routeRefreshSentPer", "routeRefreshSentPer", 48257, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Number of Route Refresh Messages Sent periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshSentPer", prop)
prop = PropMeta("str", "routeRefreshSentRate", "routeRefreshSentRate", 48265, PropCategory.IMPLICIT_RATE)
prop.label = "Number of Route Refresh Messages Sent rate"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshSentRate", prop)
prop = PropMeta("str", "routeRefreshSentSpct", "routeRefreshSentSpct", 48261, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Number of Route Refresh Messages Sent suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshSentSpct", prop)
prop = PropMeta("str", "routeRefreshSentThr", "routeRefreshSentThr", 48262, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Number of Route Refresh Messages Sent thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("routeRefreshSentThr", prop)
prop = PropMeta("str", "routeRefreshSentTr", "routeRefreshSentTr", 48264, PropCategory.IMPLICIT_TREND)
prop.label = "Number of Route Refresh Messages Sent trend"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshSentTr", prop)
prop = PropMeta("str", "routeRefreshSentTrBase", "routeRefreshSentTrBase", 48263, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Number of Route Refresh Messages Sent trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("routeRefreshSentTrBase", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
230890be29b4a007df5076f82d5dff5baaac23ec | d3c673dcb339570eee580f2029d179e5c9dd2535 | /venv/bin/pilconvert.py | a76f7dfb1c2173c16cf0168bd9db8b838f308478 | [
"MIT"
] | permissive | zubeir-Abubakar/zgram | 0304e9a21d4a7976d211a1f6692d7bb1bf5fba2b | 33ed713f758ba86642ce8cb3b68a835bf07c29b5 | refs/heads/master | 2020-06-25T18:56:07.770251 | 2020-03-24T07:51:31 | 2020-03-24T07:51:31 | 199,394,542 | 1 | 0 | MIT | 2020-03-24T07:51:32 | 2019-07-29T06:44:57 | Python | UTF-8 | Python | false | false | 2,394 | py | #!/Users/saadiaomar/Documents/zgram/venv/bin/python3.6
#
# The Python Imaging Library.
# $Id$
#
# convert image files
#
# History:
# 0.1 96-04-20 fl Created
# 0.2 96-10-04 fl Use draft mode when converting images
# 0.3 96-12-30 fl Optimize output (PNG, JPEG)
# 0.4 97-01-18 fl Made optimize an option (PNG, JPEG)
# 0.5 98-12-30 fl Fixed -f option (from Anthony Baxter)
#
from __future__ import print_function
import getopt
import string
import sys
from PIL import Image
def usage():
print("PIL Convert 0.5/1998-12-30 -- convert image files")
print("Usage: pilconvert [option] infile outfile")
print()
print("Options:")
print()
print(" -c <format> convert to format (default is given by extension)")
print()
print(" -g convert to greyscale")
print(" -p convert to palette image (using standard palette)")
print(" -r convert to rgb")
print()
print(" -o optimize output (trade speed for size)")
print(" -q <value> set compression quality (0-100, JPEG only)")
print()
print(" -f list supported file formats")
sys.exit(1)
if len(sys.argv) == 1:
usage()
try:
opt, argv = getopt.getopt(sys.argv[1:], "c:dfgopq:r")
except getopt.error as v:
print(v)
sys.exit(1)
output_format = None
convert = None
options = {}
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats (* indicates output format):")
for i in id:
if i in Image.SAVE:
print(i+"*", end=' ')
else:
print(i, end=' ')
sys.exit(1)
elif o == "-c":
output_format = a
if o == "-g":
convert = "L"
elif o == "-p":
convert = "P"
elif o == "-r":
convert = "RGB"
elif o == "-o":
options["optimize"] = 1
elif o == "-q":
options["quality"] = string.atoi(a)
if len(argv) != 2:
usage()
try:
im = Image.open(argv[0])
if convert and im.mode != convert:
im.draft(convert, im.size)
im = im.convert(convert)
if output_format:
im.save(argv[1], output_format, **options)
else:
im.save(argv[1], **options)
except:
print("cannot convert image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
| [
"[email protected]"
] | |
3fc0d2ac19f0f38ceac5d69162a3f2264facf48a | 8f5ce9cb41649cfcfe4026200c3ec48630cec7fa | /Interview/DSA/Strings/string_permutation.py | 7bfd73a3b18fe04f1a829ffb7464afc5b43e1098 | [] | no_license | karbekk/Python_Data_Structures | 2d44ca0e12355b9b587eefa8a6beeba0ffcf3407 | 87e0ece2893d19ee92f6e72194cab6dcb4a3c4e7 | refs/heads/master | 2020-03-28T07:32:41.649465 | 2019-08-08T03:37:09 | 2019-08-08T03:37:09 | 147,908,721 | 0 | 0 | null | 2018-09-08T06:40:50 | 2018-09-08T06:40:50 | null | UTF-8 | Python | false | false | 534 | py | # def permute1(start, rest):
# res = []
# if len(rest) <= 1:
# res += [start + rest, rest + start]
# else:
# for i, c in enumerate(rest):
# s = rest[:i] + rest[i+1:]
# for perm in permute1(c, s):
# res += [start + perm]
# return res
def permute2(s):
res = []
if len(s) == 1:
res = [s]
else:
for i, c in enumerate(s):
for perm in permute2(s[:i] + s[i+1:]):
res += perm
return res
print permute2('ab') | [
"[email protected]"
] | |
ba704e0b2cc6f8d894c58413e8e9b44815276ea3 | 4127a99269737c4640e53bad9b32c2c2f7f172d3 | /iptw/old/screen_med_old.py | 0a3e746a326001f267d885ae076b80e2bb0863c5 | [] | no_license | calvin-zcx/pasc_phenotype | 0401d920b3cc441405abe9e689672415d57fd984 | 40efce36581721cd91e599ea6e61429fe7ac1f67 | refs/heads/master | 2023-08-31T04:41:34.823658 | 2023-08-30T16:07:52 | 2023-08-30T16:07:52 | 446,634,747 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,536 | py | import sys
# for linux env.
sys.path.insert(0, '..')
import time
import pickle
import argparse
from evaluation import *
import os
import random
import pandas as pd
import json
import matplotlib.pyplot as plt
import numpy as np
from PSModels import ml
from misc import utils
import itertools
import functools
from tqdm import tqdm
print = functools.partial(print, flush=True)
def parse_args():
parser = argparse.ArgumentParser(description='process parameters')
# Input
parser.add_argument('--dataset', choices=['COL', 'MSHS', 'MONTE', 'NYU', 'WCM', 'ALL'], default='COL',
help='site dataset')
parser.add_argument("--random_seed", type=int, default=0)
parser.add_argument('--negative_ratio', type=int, default=2)
# parser.add_argument('--run_model', choices=['LSTM', 'LR', 'MLP', 'XGBOOST', 'LIGHTGBM'], default='MLP')
args = parser.parse_args()
# More args
args.data_file = r'../data/V15_COVID19/output/character/pcr_cohorts_covariate_elixh_encoding_{}.csv'.format(
args.dataset)
args.data_file_outcome = r'../data/V15_COVID19/output/character/pcr_cohorts_outcome_atcl3_encoding_{}.csv'.format(
args.dataset)
if args.random_seed < 0:
from datetime import datetime
args.random_seed = int(datetime.now())
# args.save_model_filename = os.path.join(args.output_dir, '_S{}{}'.format(args.random_seed, args.run_model))
# utils.check_and_mkdir(args.save_model_filename)
return args
def _evaluation_helper(X, T, PS_logits, loss):
y_pred_prob = logits_to_probability(PS_logits, normalized=False)
auc = roc_auc_score(T, y_pred_prob)
max_smd, smd, max_smd_weighted, smd_w, before, after = cal_deviation(X, T, PS_logits, normalized=False,
verbose=False)
n_unbalanced_feature = len(np.where(smd > SMD_THRESHOLD)[0])
n_unbalanced_feature_weighted = len(np.where(smd_w > SMD_THRESHOLD)[0])
result = (loss, auc, max_smd, n_unbalanced_feature, max_smd_weighted, n_unbalanced_feature_weighted)
return result
def _loss_helper(v_loss, v_weights):
return np.dot(v_loss, v_weights) / np.sum(v_weights)
def summary_covariate(df, label, weights, smd, smd_weighted, before, after):
# (covariates_treated_mu, covariates_treated_var, covariates_controlled_mu, covariates_controlled_var), \
# (covariates_treated_w_mu, covariates_treated_w_var, covariates_controlled_w_mu, covariates_controlled_w_var)
columns = df.columns
df_pos = df.loc[label == 1, :]
df_neg = df.loc[label == 0, :]
df_pos_mean = df_pos.mean()
df_neg_mean = df_neg.mean()
df_pos_sum = df_pos.sum()
df_neg_sum = df_neg.sum()
df_summary = pd.DataFrame(index=df.columns, data={
'Positive Total Patients': df_pos.sum(),
'Negative Total Patients': df_neg.sum(),
'Positive Percentage/mean': df_pos.mean(),
'Positive std': before[1],
'Negative Percentage/mean': df_neg.mean(),
'Negative std': before[3],
'Positive mean after re-weighting': after[0],
'Negative mean after re-weighting': after[2],
'Positive std after re-weighting': before[1],
'Negative std after re-weighting': before[3],
'SMD before re-weighting': smd,
'SMD after re-weighting': smd_weighted,
})
return df_summary
if __name__ == "__main__":
# python screen_med.py --dataset ALL 2>&1 | tee log/screen_med.txt
start_time = time.time()
args = parse_args()
np.random.seed(args.random_seed)
random.seed(args.random_seed)
print('args: ', args)
print('random_seed: ', args.random_seed)
# print('save_model_filename', args.save_model_filename)
# %% 1. Load Data
# Load Covariates Data
print('Load data covariates file:', args.data_file)
df = pd.read_csv(args.data_file, dtype={'patid': str, 'covid': int})
# because a patid id may occur in multiple sites. patid were site specific
df_info = df[['Unnamed: 0', 'patid', 'site']]
df_label = df['covid']
df_covs = df.iloc[:, df.columns.get_loc('age20-39'):]
df_covs_array = df_covs.astype('float')
# Cormorbidities were set as true if at least 2 instances were found in the history
df_covs_array.iloc[:, df_covs_array.columns.get_loc('AIDS'):] = (
df_covs_array.iloc[:, df_covs_array.columns.get_loc('AIDS'):] >= 2).astype('float')
print('df.shape:', df.shape)
print('df_covs_array.shape:', df_covs_array.shape)
# Load outcome Data
print('Load data outcome file:', args.data_file_outcome)
df_outcome = pd.read_csv(args.data_file_outcome)
df_outcome = df_outcome.iloc[:, 1:]
print('df_outcome.shape:', df_outcome.shape)
# Load index information
with open(r'../data/mapping/rxnorm_ingredient_mapping_combined.pkl', 'rb') as f:
rxnorm_ing = pickle.load(f)
print('Load rxRNOM_CUI to ingredient mapping done! len(rxnorm_atc):', len(rxnorm_ing))
record_example = next(iter(rxnorm_ing.items()))
print('e.g.:', record_example)
with open(r'../data/mapping/rxnorm_atc_mapping.pkl', 'rb') as f:
rxnorm_atc = pickle.load(f)
print('Load rxRNOM_CUI to ATC mapping done! len(rxnorm_atc):', len(rxnorm_atc))
record_example = next(iter(rxnorm_atc.items()))
print('e.g.:', record_example)
with open(r'../data/mapping/atcL3_index_mapping.pkl', 'rb') as f:
atcl3_encoding = pickle.load(f)
print('Load to ATC-Level-3 to encoding mapping done! len(atcl3_encoding):', len(atcl3_encoding))
record_example = next(iter(atcl3_encoding.items()))
print('e.g.:', record_example)
# %% 2. PASC specific cohorts for causal inference
causal_results = []
for i, pasc in tqdm(enumerate(atcl3_encoding.keys(), start=1), total=len(atcl3_encoding)):
# bulid specific cohorts:
print('\n In screening:', i, pasc)
pasc_flag = df_outcome['flag@' + pasc]
pasc_t2e = df_outcome['t2e@' + pasc].astype('float')
pasc_baseline = df_outcome['baseline@' + pasc]
# Select population free of outcome at baseline
idx = (pasc_baseline < 1)
# Select negative: pos : neg = 1:2 for IPTW
covid_label = df_label[idx]
n_covid_pos = covid_label.sum()
n_covid_neg = (covid_label == 0).sum()
sampled_neg_index = covid_label[(covid_label == 0)].sample(n=args.negative_ratio * n_covid_pos,
replace=False,
random_state=args.random_seed).index
pos_neg_selected = pd.Series(False, index=pasc_baseline.index)
pos_neg_selected[sampled_neg_index] = True
pos_neg_selected[covid_label[covid_label == 1].index] = True
#
covid_label = df_label[pos_neg_selected]
covs_array = df_covs_array.loc[pos_neg_selected, :]
pasc_flag = pasc_flag[pos_neg_selected]
pasc_t2e = pasc_t2e[pos_neg_selected]
print(i, pasc, '-- Selected cohorts {}/{} ({:.2f}%), covid pos:neg = {}:{} sample ratio -/+={}, pasc pos:neg '
'= {}:{}'.format(
pos_neg_selected.sum(), len(df_outcome), pos_neg_selected.sum() / len(df_outcome) * 100,
covid_label.sum(), (covid_label == 0).sum(), args.negative_ratio,
pasc_flag.sum(), (pasc_flag == 0).sum()))
model = ml.PropensityEstimator(learner='LR', random_seed=args.random_seed, ).cross_validation_fit(covs_array, covid_label, verbose=0)
# paras_grid = {
# 'penalty': 'l2',
# 'C': 0.03162277660168379,
# 'max_iter': 200,
# 'random_state': 0}
ps = model.predict_ps(covs_array)
model.report_stats()
iptw = model.predict_inverse_weight(covs_array, covid_label, stabilized=True, clip=False)
smd, smd_weighted, before, after = model.predict_smd(covs_array, covid_label, abs=False, verbose=True)
# plt.scatter(range(len(smd)), smd)
# plt.scatter(range(len(smd)), smd_weighted)
# plt.show()
print('n unbalanced covariates before:after = {}:{}'.format(
(smd > SMD_THRESHOLD).sum(),
(smd_weighted > SMD_THRESHOLD).sum())
)
out_file_balance = r'../data/V15_COVID19/output/character/specificMed/{}-{}-{}-covariates_balance_elixhauser.csv'.format(i, pasc, atcl3_encoding[pasc][2])
utils.check_and_mkdir(out_file_balance)
model.results.to_csv(out_file_balance) # args.save_model_filename +
df_summary = summary_covariate(covs_array, covid_label, iptw, smd, smd_weighted, before, after)
df_summary.to_csv('../data/V15_COVID19/output/character/specificMed/{}-{}-{}-evaluation_elixhauser_encoding_balancing.csv'.format(i, pasc, atcl3_encoding[pasc][2]))
km, km_w, cox, cox_w = weighted_KM_HR(covid_label, iptw, pasc_flag, pasc_t2e,
fig_outfile=r'../data/V15_COVID19/output/character/specificMed/{}-{}-{}-km.png'.format(i, pasc, atcl3_encoding[pasc][2]))
try:
_results = [i, pasc, atcl3_encoding.get(pasc, ''),
covid_label.sum(), (covid_label == 0).sum(),
pasc_flag[covid_label==1].sum(), pasc_flag[covid_label==0].sum(),
pasc_flag[covid_label == 1].mean(), pasc_flag[covid_label == 0].mean(),
(smd > SMD_THRESHOLD).sum(), (smd_weighted > SMD_THRESHOLD).sum(),
np.abs(smd).max(), np.abs(smd_weighted).max(),
km[2], km[3], km[6].p_value,
km_w[2], km_w[3], km_w[6].p_value,
cox[0], cox[1], cox[3].summary.p.treatment if pd.notna(cox[3]) else np.nan, cox[2],
cox_w[0], cox_w[1], cox_w[3].summary.p.treatment if pd.notna(cox_w[3]) else np.nan, cox_w[2]]
causal_results.append(_results)
print(causal_results[-1])
except:
print('Error in ', i, pasc)
df_causal = pd.DataFrame(causal_results, columns=[
'i', 'pasc', 'pasc-med','covid+', 'covid-', 'no. pasc in +', 'no. pasc in -', 'mean pasc in +', 'mean pasc in -',
'no. unbalance', 'no. unbalance iptw',
'max smd', 'max smd iptw',
'km-diff', 'km-diff-time', 'km-diff-p',
'km-w-diff', 'km-w-diff-time', 'km-w-diff-p',
'hr', 'hr-CI', 'hr-p', 'hr-logrank-p',
'hr-w', 'hr-w-CI', 'hr-w-p', 'hr-w-logrank-p'])
df_causal.to_csv(r'../data/V15_COVID19/output/character/specificMed/causal_effects_specific_med-ERRORSAVE.csv')
print('done one pasc, time:', time.strftime("%H:%M:%S", time.gmtime(time.time() - start_time)))
df_causal = pd.DataFrame(causal_results, columns=[
'i', 'pasc', 'pasc-med', 'covid+', 'covid-', 'no. pasc in +', 'no. pasc in -', 'mean pasc in +', 'mean pasc in -',
'no. unbalance', 'no. unbalance iptw', 'max smd', 'max smd iptw',
'km-diff', 'km-diff-time', 'km-diff-p', 'km-w-diff', 'km-w-diff-time', 'km-w-diff-p',
'hr', 'hr-CI', 'hr-p', 'hr-logrank-p', 'hr-w', 'hr-w-CI', 'hr-w-p', 'hr-w-logrank-p'])
df_causal.to_csv(r'../data/V15_COVID19/output/character/specificMed/causal_effects_specific_med.csv')
print('Done! Total Time used:', time.strftime("%H:%M:%S", time.gmtime(time.time() - start_time)))
| [
"[email protected]"
] | |
871c0c6dadbd1a53cd22800b987b3bebc634cb07 | 84263fd1391de079c5447359f1a7cd1abfb47126 | /pythonprog/file_error.py | d221c20dd7b401b5b5f0bd1a8204da7831fea511 | [] | no_license | Shilpa-T/Python | b19259b1be17182b1a9f86a42c0dd8134e749304 | 280fc16e9c7c0f38b33c59381457fcbbd42b8ae3 | refs/heads/master | 2020-04-19T00:13:38.706605 | 2019-01-27T18:57:52 | 2019-01-27T18:57:52 | 167,841,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | """
file error when processing a file
"""
def printfile(name):
try:
open_file = open(name)
except IOError:
print "I canr find file",name
else:
for line in open_file:
print line
printfile(raw_input('enter file name'))
| [
"[email protected]"
] | |
b44b3746f25797f37789e6aa68d7f851c31294f2 | 26f78ba56388765f2fe2dc8fa23ddea097209ec5 | /Leetcode/二叉搜索树/124二叉树中的最大路径和.py | 1b5552f812083ca1dbedefc848c2aa4e370f02c4 | [] | no_license | johnkle/FunProgramming | 3ef2ff32a1a378e1c780138ec9bab630c9ba83c7 | a60e0d17a1e9f0bc1959d7a95737fc4a0362d735 | refs/heads/master | 2023-07-18T16:05:56.493458 | 2021-09-08T19:01:19 | 2021-09-08T19:01:19 | 402,861,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def maxPathSum(self, root: TreeNode) -> int:
self.res = float('-inf')
#计算node的最大贡献值,并计算以node为根的最大路径和
def helper(node):
if not node:
return 0
leftGain = max(helper(node.left),0)
rightGain = max(helper(node.right),0)
path = node.val + leftGain + rightGain
self.res = max(self.res,path)
return node.val+max(leftGain,rightGain)
helper(root)
return self.res
class Solution2:
def maxPathSum(self, root):
res = []
def dfs(node):
if not node:
return 0
leftgain = max(dfs(node.left),0)
rightgain = max(dfs(node.right),0)
res.append(node.val+leftgain+rightgain)
return node.val + max(leftgain,rightgain)
dfs(root)
return max(res) | [
"[email protected]"
] | |
0ab6b8e2ed471f92d7c1b8b6dd7a90eca1c1cc92 | 9055b8f8b1ca2e357473179a5ff551f69541bd34 | /Pandas/VISUALIZATION/2. bar.py | 724ba66ecf640e22130e0f309412b788a6b2bc6e | [] | no_license | YanlinWang128/PyhonStudy | 879c72cbdc1f467a7b4721561692e2deb6a665e9 | 6b9de9afcfa7ba0b4c55025ddcf87664b170c6e7 | refs/heads/master | 2020-03-30T18:42:11.520471 | 2018-11-09T08:38:29 | 2018-11-09T08:38:29 | 151,511,393 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | # @Time : 2018/11/6 9:52
# @Author : Yanlin Wang
# @Email : [email protected]
# @File : 2. bar.py
from time import clock
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
start = clock()
df2 = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd'])
df2.plot.bar()
df2.plot.bar(stacked=True)
plt.show()
end = clock()
print('time: {:.8f}s'.format(end - start)) | [
"[email protected]"
] | |
0563cfeb4ab8f4e39101eaf8fb8c0873253a877e | 51575eeda79a6e12c8839046721168e5cc5b6774 | /experiments/classification/run_classification.py | cd3dd6ff195b4a873ad6fc3a305673ccec6afceb | [] | no_license | rfeinman/GNS-Modeling | 59ad26efea4045c7dae98e98263d1193d53052b8 | 2c6b3400bfbb30f8f117042722fbcca2a8e9cb98 | refs/heads/master | 2023-06-08T21:22:27.914054 | 2021-07-08T14:17:56 | 2021-07-08T14:17:56 | 274,778,209 | 22 | 7 | null | null | null | null | UTF-8 | Python | false | false | 2,108 | py | import os
import argparse
import pickle
import submitit
from get_base_parses import get_base_parses
from optimize_parses import optimize_parses
from refit_parses_multi import refit_parses_multi
def array_step(executor, func, jobs, inputs, errors):
filt = lambda y : not errors[y]
# submit jobs
with executor.batch():
for x in filter(filt, inputs):
jobs[x] = executor.submit(func, x)
# wait for completion
for x in filter(filt, inputs):
try:
jobs[x].result()
except Exception as err:
errors[x] = err
return jobs, errors
def save_errors(errors):
with open("./logs/errors.pkl", "wb") as f:
pickle.dump(errors, f)
def main(args):
if not os.path.exists('./results'):
os.mkdir('./results')
run_IDs = list(range(20))
errors = {r:None for r in run_IDs}
jobs = {r:None for r in run_IDs}
# initialize job executor
executor = submitit.AutoExecutor(folder="./logs")
executor.update_parameters(
nodes=1,
tasks_per_node=1,
cpus_per_task=3,
slurm_mem='20GB',
slurm_gres='gpu:1',
slurm_time='8:00:00',
slurm_job_name='osc',
slurm_array_parallelism=20
)
# execute 3-step process sequentially
print('step 1: parsing')
fn = lambda r : get_base_parses(r, reverse=args.reverse)
jobs, errors = array_step(executor, fn, jobs, run_IDs, errors)
save_errors(errors)
print('step 2: optimization')
fn = lambda r : optimize_parses(r, reverse=args.reverse)
jobs, errors = array_step(executor, fn, jobs, run_IDs, errors)
save_errors(errors)
print('step 3: re-fitting')
executor.update_parameters(slurm_time='48:00:00') # more compute time needed for this step
fn = lambda r : refit_parses_multi(r, reverse=args.reverse)
jobs, errors = array_step(executor, fn, jobs, run_IDs, errors)
save_errors(errors)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--reverse', action='store_true')
args = parser.parse_args()
main(args) | [
"[email protected]"
] | |
88a23680fff66f1933d8f7c3cb647302f881af78 | e736f413ce7a287c2d0d084c051a637b52b4bb8a | /tools/testing/python/hybrid_encrypt_cli.py | ab507b8a19fc2fbb43e6418bf9865c891ea27bad | [
"Apache-2.0"
] | permissive | anaghvj/tink | b9e7041307ce250ee9a8bfbcd0df10401718bc24 | 50150573bd1d6f05e818cc3706cfba3fe9ed490a | refs/heads/master | 2022-06-17T10:11:19.413016 | 2020-04-30T23:04:57 | 2020-04-30T23:05:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,409 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A command-line utility for testing HybridEncrypt-primitives.
It requires 4 arguments:
keyset-file: name of the file with the keyset to be used for encrypting
plaintext-file: name of the file that contains plaintext to be encrypted
contextinfo-file: name of the file that contains contextinfo used for
encryption
output-file: name of the output file for the resulting encryptedtext
"""
from __future__ import absolute_import
from __future__ import division
# Placeholder for import for type annotations
from __future__ import print_function
# Special imports
from absl import app
from absl import flags
from absl import logging
import tink
from tink import cleartext_keyset_handle
from tink import hybrid
FLAGS = flags.FLAGS
def read_keyset(keyset_filename):
"""Load a keyset from a file.
Args:
keyset_filename: A path to a keyset file
Returns:
A KeysetHandle of the file's keyset
Raises:
TinkError: if the file is not valid
IOError: if the file does not exist
"""
with open(keyset_filename, 'rb') as keyset_file:
text = keyset_file.read()
keyset = cleartext_keyset_handle.read(tink.BinaryKeysetReader(text))
return keyset
def main(argv):
if len(argv) != 5:
raise app.UsageError(
'Expected 4 arguments, got %d.\n'
'Usage: %s keyset-file plaintext-file contextinfo-file output-file' %
(len(argv) - 1, argv[0]))
keyset_filename = argv[1]
plaintext_filename = argv[2]
contextinfo_filename = argv[3]
output_filename = argv[4]
logging.info(
'Using keyset from file %s to HybridEncrypt file %s using context '
'info %s\n.The resulting output will be written to file %s',
keyset_filename, plaintext_filename, contextinfo_filename,
output_filename)
# Initialise Tink
try:
hybrid.register()
except tink.TinkError as e:
logging.error('Error initialising Tink: %s', e)
return 1
# Read the keyset into keyset_handle
try:
keyset_handle = read_keyset(keyset_filename)
except tink.TinkError as e:
logging.error('Error reading key: %s', e)
return 1
# Get the primitive
try:
cipher = keyset_handle.primitive(hybrid.HybridEncrypt)
except tink.TinkError as e:
logging.error('Error creating primitive: %s', e)
return 1
# Read the input files
with open(plaintext_filename, 'rb') as plaintext_file:
plaintext_data = plaintext_file.read()
with open(contextinfo_filename, 'rb') as contextinfo_file:
contextinfo_data = contextinfo_file.read()
try:
output_data = cipher.encrypt(plaintext_data, contextinfo_data)
except tink.TinkError as e:
logging.error('Error encrypting the input: %s', e)
with open(output_filename, 'wb') as output_file:
output_file.write(output_data)
logging.info('All done.')
if __name__ == '__main__':
app.run(main)
| [
"[email protected]"
] | |
4ded37c8c6cba07ee5ecc42d4954c609e42e7dbe | 16c41ed01feddb99a0f6e34e7b4ef5f5d83a2848 | /fb_post/views/reactions_to_post/tests/snapshots/snap_test_case_01.py | fb19d3e1492d1d6289233e88483d3eee290f9aaa | [] | no_license | karthik018/fb_post_learning | d6d9363aefd152886b4a466179c407135878dd25 | bc45c937006c35e7ac09cf038e35f3a40f9c8cf8 | refs/heads/master | 2022-04-28T08:18:11.852236 | 2019-07-30T04:19:49 | 2019-07-30T04:19:49 | 197,510,185 | 0 | 0 | null | 2022-04-22T21:59:07 | 2019-07-18T04:14:35 | Python | UTF-8 | Python | false | false | 1,086 | py | # -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['TestCase01ReactionsToPostAPITestCase::test_case status'] = 200
snapshots['TestCase01ReactionsToPostAPITestCase::test_case body'] = {
'reactions': [
{
'profile_pic': '',
'reaction': 'LIKE',
'userid': 1,
'username': 'user1'
},
{
'profile_pic': '',
'reaction': 'LIKE',
'userid': 3,
'username': 'user3'
}
]
}
snapshots['TestCase01ReactionsToPostAPITestCase::test_case header_params'] = {
'content-language': [
'Content-Language',
'en'
],
'content-length': [
'149',
'Content-Length'
],
'content-type': [
'Content-Type',
'application/json'
],
'vary': [
'Accept-Language, Origin, Cookie',
'Vary'
],
'x-frame-options': [
'SAMEORIGIN',
'X-Frame-Options'
]
}
| [
"[email protected]"
] | |
cb7b7cdc8f0c56a1752b15e8b2c877d8149cd557 | 31b3ac7cc2f0cf43a4979e53d43002a9c5fb2038 | /student attendence record1.py | 0e932d0ad4fd5f0b398b54391dff536610e2a32e | [] | no_license | shreyansh-tyagi/leetcode-problem | ed31ada9608a1526efce6178b4fe3ee18da98902 | f8679a7b639f874a52cf9081b84e7c7abff1d100 | refs/heads/master | 2023-08-26T13:50:27.769753 | 2021-10-29T17:39:41 | 2021-10-29T17:39:41 | 378,711,844 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | '''
You are given a string s representing an attendance record for a student where each character signifies whether the student was absent, late, or present on that day. The record only contains the following three characters:
'A': Absent.
'L': Late.
'P': Present.
The student is eligible for an attendance award if they meet both of the following criteria:
The student was absent ('A') for strictly fewer than 2 days total.
The student was never late ('L') for 3 or more consecutive days.
Return true if the student is eligible for an attendance award, or false otherwise.
Example 1:
Input: s = "PPALLP"
Output: true
Explanation: The student has fewer than 2 absences and was never late 3 or more consecutive days.
Example 2:
Input: s = "PPALLL"
Output: false
Explanation: The student was late 3 consecutive days in the last 3 days, so is not eligible for the award.
Constraints:
1 <= s.length <= 1000
s[i] is either 'A', 'L', or 'P'.
'''
class Solution:
def checkRecord(self, s: str) -> bool:
return not(s.count('A')>=2 or s.count('LLL')>0)
| [
"[email protected]"
] | |
1d06c11d4263225e3f0db3098eada829c8ad1be8 | 75519d2a9bf55e2d9376ea08a36676948a8b232c | /t/TopTab4.py | ca2efe64e5b125c5eaaf47f742e32f3765ff91e6 | [
"MIT"
] | permissive | CGFanTuan/damgteam | 9c32d59cbd0ecb9d3acffd9b902b918c40797e14 | aec414f084f6ab6ec5897314390605aaa8380d62 | refs/heads/master | 2020-09-17T00:29:24.832648 | 2019-11-25T09:51:13 | 2019-11-25T09:51:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,954 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script Name: TopTab4.py
Author: Do Trinh/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
""" Import """
# Python
import random
import sys
# PyQt5
from PyQt5.QtCore import pyqtSignal, pyqtSlot, pyqtProperty, Qt, QPointF, QTimer, QSize, QRectF, QSizeF
from PyQt5.QtGui import QRadialGradient, QColor, QPainter, QBrush, QPen
from PyQt5.QtWidgets import QApplication
# PLM
from bin.data.damg import DAMG
from ui.uikits.Widget import Widget
from ui.uikits.GridLayout import GridLayout
from ui.uikits.GroupBox import GroupGrid
# -------------------------------------------------------------------------------------------------------------
""" TopTab4 """
class TopTab4(Widget):
key = 'TopTab4'
def __init__(self, parent=None):
super(TopTab4, self).__init__(parent)
self.layout = GridLayout()
self.buildUI()
self.setLayout(self.layout)
self.signals.regisLayout.emit(self)
def buildUI(self):
sec1Grp, sec1Grid = GroupGrid('Test Layout')
# sec1Grid.addWidget(rc.Label("Update later"), 0, 0, 6, 9)
sec1Grid.addWidget(BubblesWidget(), 0, 0, 6, 9)
self.layout.addWidget(sec1Grp, 0, 0, 6, 9)
class BaseClass(Widget):
def __init__(self, parent=None):
super(BaseClass, self).__init__(parent)
self.resetAuthor()
def getAuthor(self):
return self._author
def setAuthor(self, name):
self._author = name
def resetAuthor(self):
self._author = "David Boddie"
author = pyqtProperty(str, getAuthor, setAuthor, resetAuthor)
class Bubble(DAMG):
def __init__(self, position, radius, speed, innerColor, outerColor):
self.position = position
self.radius = radius
self.speed = speed
self.innerColor = innerColor
self.outerColor = outerColor
self.updateBrush()
def updateBrush(self):
gradient = QRadialGradient(QPointF(self.radius, self.radius), self.radius, QPointF(self.radius * 0.5, self.radius * 0.5))
gradient.setColorAt(0, QColor(255, 255, 255, 255))
gradient.setColorAt(0.25, self.innerColor)
gradient.setColorAt(1, self.outerColor)
self.brush = QBrush(gradient)
def drawBubble(self, painter):
painter.save()
painter.translate(self.position.x() - self.radius,
self.position.y() - self.radius)
painter.setBrush(self.brush)
painter.drawEllipse(0.0, 0.0, 2 * self.radius, 2 * self.radius)
painter.restore()
class BubblesWidget(BaseClass):
bubbleLeft = pyqtSignal()
bubblesRemaining = pyqtSignal(int)
def __init__(self, parent=None):
super(BubblesWidget, self).__init__(parent)
self.pen = QPen(QColor("#cccccc"))
self.bubbles = []
self.backgroundColor1 = self.randomColor()
self.backgroundColor2 = self.randomColor().darker(150)
self.newBubble = None
random.seed()
self.animation_timer = QTimer(self)
self.animation_timer.setSingleShot(False)
self.animation_timer.timeout.connect(self.animate)
self.animation_timer.start(25)
self.bubbleTimer = QTimer()
self.bubbleTimer.setSingleShot(False)
self.bubbleTimer.timeout.connect(self.expandBubble)
self.setMouseTracking(True)
self.setMinimumSize(QSize(200, 200))
self.setWindowTitle("Bubble Maker")
def paintEvent(self, event):
background = QRadialGradient(QPointF(self.rect().topLeft()), 500, QPointF(self.rect().bottomRight()))
background.setColorAt(0, self.backgroundColor1)
background.setColorAt(1, self.backgroundColor2)
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.fillRect(event.rect(), QBrush(background))
painter.setPen(self.pen)
for bubble in self.bubbles:
if QRectF(bubble.position - QPointF(bubble.radius, bubble.radius),
QSizeF(2 * bubble.radius, 2 * bubble.radius)).intersects(QRectF(event.rect())):
bubble.drawBubble(painter)
if self.newBubble:
self.newBubble.drawBubble(painter)
painter.end()
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton and self.newBubble is None:
self.newBubble = Bubble(QPointF(event.pos()), 4.0, 1.0 + random.random() * 7, self.randomColor(),
self.randomColor())
self.bubbleTimer.start(50)
event.accept()
def mouseMoveEvent(self, event):
if self.newBubble:
self.update(QRectF(self.newBubble.position - QPointF(self.newBubble.radius + 1, self.newBubble.radius + 1),
QSizeF(2 * self.newBubble.radius + 2, 2 * self.newBubble.radius + 2)).toRect())
self.newBubble.position = QPointF(event.pos())
self.update(
QRectF(self.newBubble.position - QPointF(self.newBubble.radius + 1, self.newBubble.radius + 1),
QSizeF(2 * self.newBubble.radius + 2, 2 * self.newBubble.radius + 2)).toRect())
event.accept()
def mouseReleaseEvent(self, event):
if self.newBubble:
self.bubbles.append(self.newBubble)
self.newBubble = None
self.bubbleTimer.stop()
self.bubblesRemaining.emit(len(self.bubbles))
event.accept()
def expandBubble(self):
if self.newBubble:
self.newBubble.radius = min(self.newBubble.radius + 4.0, self.width() / 8.0, self.height() / 8.0)
self.update(QRectF(self.newBubble.position - QPointF(self.newBubble.radius + 1, self.newBubble.radius + 1),
QSizeF(2 * self.newBubble.radius + 2, 2 * self.newBubble.radius + 2)).toRect())
self.newBubble.updateBrush()
def randomColor(self):
red = 205 + random.random() * 50
green = 205 + random.random() * 50
blue = 205 + random.random() * 50
alpha = 91 + random.random() * 100
return QColor(red, green, blue, alpha)
def animate(self):
bubbles = []
left = False
for bubble in self.bubbles:
bubble.position = bubble.position + QPointF(0, -bubble.speed)
self.update(QRectF(bubble.position - QPointF(bubble.radius + 1, bubble.radius + 1),
QSizeF(2 * bubble.radius + 2, 2 * bubble.radius + 2 + bubble.speed)).toRect())
if bubble.position.y() + bubble.radius > 0:
bubbles.append(bubble)
else:
self.bubbleLeft.emit()
left = True
if self.newBubble:
self.update(
QRectF(self.newBubble.position - QPointF(
self.newBubble.radius + 1,
self.newBubble.radius + 1),
QSizeF(2 * self.newBubble.radius + 2, 2 * self.newBubble.radius + 2)).toRect())
self.bubbles = bubbles
if left:
self.bubblesRemaining.emit(len(self.bubbles))
def sizeHint(self):
return QSize(200, 200)
def getBubbles(self):
return len(self.bubbles)
@pyqtSlot(int)
def setBubbles(self, value):
value = max(0, value)
while len(self.bubbles) < value:
newBubble = Bubble(QPointF(random.random() * self.width(), random.random() * self.height()),
4.0 + random.random() * 20, 1.0 + random.random() * 7, self.randomColor(),
self.randomColor())
newBubble.updateBrush()
self.bubbles.append(newBubble)
self.bubbles = self.bubbles[:value]
self.bubblesRemaining.emit(value)
self.update()
numberOfBubbles = pyqtProperty(int, getBubbles, setBubbles)
def getColor1(self):
return self.backgroundColor1
def setColor1(self, value):
self.backgroundColor1 = QColor(value)
self.update()
color1 = pyqtProperty(QColor, getColor1, setColor1)
def getColor2(self):
return self.backgroundColor2
def setColor2(self, value):
self.backgroundColor2 = QColor(value)
self.update()
color2 = pyqtProperty(QColor, getColor2, setColor2)
@pyqtSlot()
def stop(self):
self.animation_timer.stop()
@pyqtSlot()
def start(self):
self.animation_timer.start(25)
def main():
app = QApplication(sys.argv)
layout = TopTab4()
layout.show()
app.exec_()
if __name__ == '__main__':
main()
# -------------------------------------------------------------------------------------------------------------
# Created by panda on 25/05/2018 | [
"[email protected]"
] | |
51567e19a19b9c35630147e1ff764e37af61effa | 3cedc2e0867a53ed2f36e01624f369693d1a050d | /rnn/rnn42_females_original.py | 2ff1d39d4a2b26fb8955a0843919eaeae5b424ff | [] | no_license | lkpiel/mastersthesis | a471d8c6a5881e13599b22965dd3f437c83fc967 | 71c723b435b347d2805e159b6e10828f89541e98 | refs/heads/master | 2023-02-20T11:57:45.266361 | 2018-05-06T11:17:43 | 2018-05-06T11:17:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,811 | py | #! /usr/bin/python3
import sys
print(sys.version)
import sys
import pandas
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Average, Merge, Layer, Conv2D, MaxPooling2D, GlobalAveragePooling2D, GlobalAveragePooling2D, AveragePooling2D, Reshape, BatchNormalization
from keras.optimizers import SGD, Adam
from keras import initializers
from keras import regularizers
from keras import constraints
from keras import backend as K
from IPython.core.debugger import Tracer
from keras.layers import Masking, LSTM, TimeDistributed, Bidirectional, Flatten
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras import callbacks
from keras.constraints import maxnorm, unitnorm
from sklearn.preprocessing import OneHotEncoder
import pdb
import keras
#FORMAT DATA
#ONE HOT ENCODES A GIVEN COLUMN
def onehot(x): return np.array(OneHotEncoder().fit_transform(x.values.reshape(-1,1)).todense())
def format(data):
del data['Unnamed: 605']
mask = data['AgeGroup'] == 'ag1'
column_name = 'AgeGroup'
data.loc[mask, column_name] = 0
mask = data['AgeGroup'] == 'ag2'
column_name = 'AgeGroup'
data.loc[mask, column_name] = 1
mask = data['AgeGroup'] == 'ag3'
column_name = 'AgeGroup'
data.loc[mask, column_name] = 2
mask = data['Gender'] == 'm'
column_name = 'Gender'
data.loc[mask, column_name] = 0
mask = data['Gender'] == 'f'
column_name = 'Gender'
data.loc[mask, column_name] = 1
return data
def smooth_labels(y, smooth_factor):
'''Convert a matrix of one-hot row-vector labels into smoothed versions.
# Arguments
y: matrix of one-hot row-vector labels to be smoothed
smooth_factor: label smoothing factor (between 0 and 1)
# Returns
A matrix of smoothed labels.
'''
assert len(y.shape) == 2
if 0 <= smooth_factor <= 1:
# label smoothing ref: https://www.robots.ox.ac.uk/~vgg/rg/papers/reinception.pdf
y *= 1 - smooth_factor
y += smooth_factor / y.shape[1]
else:
raise Exception('Invalid label smoothing factor: ' + str(smooth_factor))
return y
def dot_product(x, kernel):
"""
Wrapper for dot product operation, in order to be compatible with both
Theano and Tensorflow
Args:
x (): input
kernel (): weights
Returns:
"""
if K.backend() == 'tensorflow':
return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)
else:
return K.dot(x, kernel)
class AttentionWithContext(Layer):
"""
Attention operation, with a context/query vector, for temporal data.
Supports Masking.
Follows the work of Yang et al. [https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf]
"Hierarchical Attention Networks for Document Classification"
by using a context vector to assist the attention
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
How to use:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Note: The layer has been tested with Keras 2.0.6
Example:
model.add(LSTM(64, return_sequences=True))
model.add(AttentionWithContext())
# next add a Dense layer (for classification/regression) or whatever...
"""
def __init__(self,
W_regularizer=None, u_regularizer=None, b_regularizer=None,
W_constraint=None, u_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.u_regularizer = regularizers.get(u_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.u_constraint = constraints.get(u_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(AttentionWithContext, self).__init__(**kwargs)
def build(self, input_shape):
print(input_shape)
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1], input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((input_shape[-1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
self.u = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_u'.format(self.name),
regularizer=self.u_regularizer,
constraint=self.u_constraint)
super(AttentionWithContext, self).build(input_shape)
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, x, mask=None):
uit = dot_product(x, self.W)
if self.bias:
uit += self.b
uit = K.tanh(uit)
ait = dot_product(uit, self.u)
a = K.exp(ait)
# apply mask after the exp. will be re-normalized next
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
a *= K.cast(mask, K.floatx())
# in some cases especially in the early stages of training the sum may be almost zero
# and this results in NaN's. A workaround is to add a very small positive number epsilon to the sum.
# a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[-1]
#LOAD LABELS
train_data = pandas.read_csv("/storage/tanel/child_age_gender/exp/ivectors_2048/train/export.csv", sep=" ")
val_data = pandas.read_csv("/storage/tanel/child_age_gender/exp/ivectors_2048/dev/export.csv", sep=" ")
test_data = pandas.read_csv("/storage/tanel/child_age_gender/exp/ivectors_2048/test/export.csv", sep=" ")
train_data = format(train_data)
val_data = format(val_data)
test_data = format(test_data)
trainFemaleIndexes = train_data.index[train_data['Gender'] == 1].tolist()
valFemaleIndexes = val_data.index[val_data['Gender'] == 1].tolist()
testFemaleIndexes = test_data.index[test_data['Gender'] == 1].tolist()
train_data_females = train_data[train_data['Gender'] == 1]
val_data_females = val_data[val_data['Gender'] == 1]
test_data_females = test_data[test_data['Gender'] == 1]
test_data_males = test_data[test_data['Gender'] == 0]
train_labels_females = onehot(train_data_females['AgeGroup'])
val_labels_females = onehot(val_data_females['AgeGroup'])
test_labels_females = onehot(test_data_females['AgeGroup'])
test_labels_males = onehot(test_data_males['AgeGroup'])
train_i_vectors_females = train_data_females.iloc[:, 5:].as_matrix()
val_i_vectors_females = val_data_females.iloc[:, 5:].as_matrix()
test_i_vectors_females = test_data_females.iloc[:, 5:].as_matrix()
test_i_vectors_males = test_data_males.iloc[:, 5:].as_matrix()
#testMaleIndexes = test_data_i_vectors.index[test_data_i_vectors['Gender'] == 1].tolist()
print ("LABELS LOADED")
train_data_padded = np.load("/storage/hpc_lkpiel/data/fbank_train_data_padded.npy", encoding="bytes")[..., np.newaxis]
val_data_padded = np.load("/storage/hpc_lkpiel/data/fbank_val_data_padded.npy", encoding="bytes")[..., np.newaxis]
test_data_padded = np.load("/storage/hpc_lkpiel/data/fbank_test_data_padded.npy", encoding="bytes")[..., np.newaxis]
train_data_padded = train_data_padded[np.array(trainFemaleIndexes)]
val_data_padded = val_data_padded[np.array(valFemaleIndexes)]
test_data_padded = test_data_padded[np.array(testFemaleIndexes)]
print ("DATA LOADED")
################################################################################################
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.7,
patience=2, min_lr=0.0001, verbose=1)
kernel_regularizer = regularizers.l2(0.0001)
model_42 = Sequential([
Conv2D(128, (3, 20), activation='relu', kernel_regularizer=kernel_regularizer, border_mode='valid', input_shape=(1107, 20, 1)),
Conv2D(128, (5, 1), strides=(3,1), activation='relu', kernel_regularizer=kernel_regularizer, border_mode='valid'),
Conv2D(128, (5, 1), strides=(3,1), activation='relu', kernel_regularizer=kernel_regularizer, border_mode='valid'),
Conv2D(128, (5, 1), strides=(3,1), activation='relu', kernel_regularizer=kernel_regularizer, border_mode='valid'),
Conv2D(128, (5, 1), strides=(3,1), activation='relu', kernel_regularizer=kernel_regularizer, border_mode='valid'),
Reshape((-1, 128)),
Bidirectional(LSTM(128, return_sequences=True)),
AttentionWithContext(),
Dense(3, activation='softmax')
])
print (model_42.summary)
print ("model_42 BUILT")
model_42.compile(loss='categorical_crossentropy', optimizer=SGD(0.01), metrics=['accuracy'])
print ("model_42 COMPILED")
model_42.load_weights('/models/model_42.hdf5')
checkpoint = ModelCheckpoint(filepath='/models/model_42.hdf5', monitor='val_acc', save_best_only=True)
'''
history = model_42.fit(x=train_data_padded,
y=train_labels_age_group,
validation_data=(val_data_padded, val_labels_age_group),
epochs=25,
verbose=1,
batch_size=64,
callbacks=[checkpoint]
)
np.save('../history/history_model_42.npy', history.history)
modelHistory = np.load('../history/history_model_42.npy').item()
print ("HISTORY: ")
print (modelHistory)
'''
model_42.load_weights('/models/model_42.hdf5')
val_predictions = model_42.predict(val_data_padded)
print ("VAL PREDICTED")
test_predictions = model_42.predict(test_data_padded)
print ("TEST PREDICTED")
np.save('/home/hpc_lkpiel/predictions/val/model_42_females_original.npy', val_predictions)
print ("VAL SAVED")
np.save('/home/hpc_lkpiel/predictions/test/model_42_females_original.npy', test_predictions)
print ("WROTE TO FILE model_42")
######################################## | [
"[email protected]"
] | |
bb37f68c663d109d60aceb15879fe86829f874b1 | 41de4210af23a8a8a3ca7dd090bb51faecf4a0c8 | /lib/python3.5/site-packages/statsmodels/miscmodels/count.py | 2f2fd127a947c39dea7981e4684d4668d4dbab95 | [
"Python-2.0"
] | permissive | randybrown-github/ziplineMacOS | 42a0c2bfca2a54baa03d2803dc41317647811285 | eb5872c0903d653e19f259f0800fb7aecee0ee5c | refs/heads/master | 2022-11-07T15:51:39.808092 | 2020-06-18T20:06:42 | 2020-06-18T20:06:42 | 272,631,387 | 0 | 1 | null | 2022-11-02T03:21:45 | 2020-06-16T06:48:53 | Python | UTF-8 | Python | false | false | 11,173 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 26 08:34:59 2010
Author: josef-pktd
changes:
added offset and zero-inflated version of Poisson
- kind of ok, need better test cases,
- a nan in ZIP bse, need to check hessian calculations
- found error in ZIP loglike
- all tests pass with
Issues
------
* If true model is not zero-inflated then numerical Hessian for ZIP has zeros
for the inflation probability and is not invertible.
-> hessian inverts and bse look ok if row and column are dropped, pinv also works
* GenericMLE: still get somewhere (where?)
"CacheWriteWarning: The attribute 'bse' cannot be overwritten"
* bfgs is too fragile, doesn't come back
* `nm` is slow but seems to work
* need good start_params and their use in genericmle needs to be checked for
consistency, set as attribute or method (called as attribute)
* numerical hessian needs better scaling
* check taking parts out of the loop, e.g. factorial(endog) could be precalculated
"""
from __future__ import print_function
import numpy as np
from scipy import stats
from scipy.misc import factorial
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
def maxabs(arr1, arr2):
return np.max(np.abs(arr1 - arr2))
def maxabsrel(arr1, arr2):
return np.max(np.abs(arr2 / arr1 - 1))
class NonlinearDeltaCov(object):
'''Asymptotic covariance by Deltamethod
the function is designed for 2d array, with rows equal to
the number of equations and columns equal to the number
of parameters. 1d params work by chance ?
fun: R^{m*k) -> R^{m} where m is number of equations and k is
the number of parameters.
equations follow Greene
'''
def __init__(self, fun, params, cov_params):
self.fun = fun
self.params = params
self.cov_params = cov_params
def grad(self, params=None, **kwds):
if params is None:
params = self.params
kwds.setdefault('epsilon', 1e-4)
from statsmodels.tools.numdiff import approx_fprime
return approx_fprime(params, self.fun, **kwds)
def cov(self):
g = self.grad()
covar = np.dot(np.dot(g, self.cov_params), g.T)
return covar
def expected(self):
# rename: misnomer, this is the MLE of the fun
return self.fun(self.params)
def wald(self, value):
m = self.expected()
v = self.cov()
df = np.size(m)
diff = m - value
lmstat = np.dot(np.dot(diff.T, np.linalg.inv(v)), diff)
return lmstat, stats.chi2.sf(lmstat, df)
class PoissonGMLE(GenericLikelihoodModel):
'''Maximum Likelihood Estimation of Poisson Model
This is an example for generic MLE which has the same
statistical model as discretemod.Poisson.
Except for defining the negative log-likelihood method, all
methods and results are generic. Gradients and Hessian
and all resulting statistics are based on numerical
differentiation.
'''
# copied from discretemod.Poisson
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array-like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
--------
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
XB = np.dot(self.exog, params)
endog = self.endog
return np.exp(XB) - endog*XB + np.log(factorial(endog))
def predict_distribution(self, exog):
'''return frozen scipy.stats distribution with mu at estimated prediction
'''
if not hasattr(self, result):
raise ValueError
else:
mu = np.exp(np.dot(exog, params))
return stats.poisson(mu, loc=0)
class PoissonOffsetGMLE(GenericLikelihoodModel):
'''Maximum Likelihood Estimation of Poisson Model
This is an example for generic MLE which has the same
statistical model as discretemod.Poisson but adds offset
Except for defining the negative log-likelihood method, all
methods and results are generic. Gradients and Hessian
and all resulting statistics are based on numerical
differentiation.
'''
def __init__(self, endog, exog=None, offset=None, missing='none', **kwds):
# let them be none in case user wants to use inheritance
if not offset is None:
if offset.ndim == 1:
offset = offset[:,None] #need column
self.offset = offset.ravel()
else:
self.offset = 0.
super(PoissonOffsetGMLE, self).__init__(endog, exog, missing=missing,
**kwds)
#this was added temporarily for bug-hunting, but shouldn't be needed
# def loglike(self, params):
# return -self.nloglikeobs(params).sum(0)
# original copied from discretemod.Poisson
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array-like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
--------
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
XB = self.offset + np.dot(self.exog, params)
endog = self.endog
nloglik = np.exp(XB) - endog*XB + np.log(factorial(endog))
return nloglik
class PoissonZiGMLE(GenericLikelihoodModel):
'''Maximum Likelihood Estimation of Poisson Model
This is an example for generic MLE which has the same statistical model
as discretemod.Poisson but adds offset and zero-inflation.
Except for defining the negative log-likelihood method, all
methods and results are generic. Gradients and Hessian
and all resulting statistics are based on numerical
differentiation.
There are numerical problems if there is no zero-inflation.
'''
def __init__(self, endog, exog=None, offset=None, missing='none', **kwds):
# let them be none in case user wants to use inheritance
super(PoissonZiGMLE, self).__init__(endog, exog, missing=missing,
**kwds)
if not offset is None:
if offset.ndim == 1:
offset = offset[:,None] #need column
self.offset = offset.ravel() #which way?
else:
self.offset = 0.
#TODO: it's not standard pattern to use default exog
if exog is None:
self.exog = np.ones((self.nobs,1))
self.nparams = self.exog.shape[1]
#what's the shape in regression for exog if only constant
self.start_params = np.hstack((np.ones(self.nparams), 0))
self.cloneattr = ['start_params']
#needed for t_test and summary
self.exog_names.append('zi')
# original copied from discretemod.Poisson
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array-like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
--------
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
beta = params[:-1]
gamm = 1 / (1 + np.exp(params[-1])) #check this
# replace with np.dot(self.exogZ, gamma)
#print(np.shape(self.offset), self.exog.shape, beta.shape
XB = self.offset + np.dot(self.exog, beta)
endog = self.endog
nloglik = -np.log(1-gamm) + np.exp(XB) - endog*XB + np.log(factorial(endog))
nloglik[endog==0] = - np.log(gamm + np.exp(-nloglik[endog==0]))
return nloglik
if __name__ == '__main__':
#Example:
np.random.seed(98765678)
nobs = 1000
rvs = np.random.randn(nobs,6)
data_exog = rvs
data_exog = sm.add_constant(data_exog, prepend=False)
xbeta = 1 + 0.1*rvs.sum(1)
data_endog = np.random.poisson(np.exp(xbeta))
#print(data_endog
modp = MyPoisson(data_endog, data_exog)
resp = modp.fit()
print(resp.params)
print(resp.bse)
from statsmodels.discretemod import Poisson
resdp = Poisson(data_endog, data_exog).fit()
print('\ncompare with discretemod')
print('compare params')
print(resdp.params - resp.params)
print('compare bse')
print(resdp.bse - resp.bse)
gmlp = sm.GLM(data_endog, data_exog, family=sm.families.Poisson())
resgp = gmlp.fit()
''' this creates a warning, bug bse is double defined ???
c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\decorators.py:105: CacheWriteWarning: The attribute 'bse' cannot be overwritten
warnings.warn(errmsg, CacheWriteWarning)
'''
print('\ncompare with GLM')
print('compare params')
print(resgp.params - resp.params)
print('compare bse')
print(resgp.bse - resp.bse)
lam = np.exp(np.dot(data_exog, resp.params))
'''mean of Poisson distribution'''
predmean = stats.poisson.stats(lam,moments='m')
print(np.max(np.abs(predmean - lam)))
fun = lambda params: np.exp(np.dot(data_exog.mean(0), params))
lamcov = NonlinearDeltaCov(fun, resp.params, resdp.cov_params())
print(lamcov.cov().shape)
print(lamcov.cov())
print('analytical')
xm = data_exog.mean(0)
print(np.dot(np.dot(xm, resdp.cov_params()), xm.T) * \
np.exp(2*np.dot(data_exog.mean(0), resp.params)))
''' cov of linear transformation of params
>>> np.dot(np.dot(xm, resdp.cov_params()), xm.T)
0.00038904130127582825
>>> resp.cov_params(xm)
0.00038902428119179394
>>> np.dot(np.dot(xm, resp.cov_params()), xm.T)
0.00038902428119179394
'''
print(lamcov.wald(1.))
print(lamcov.wald(2.))
print(lamcov.wald(2.6))
do_bootstrap = False
if do_bootstrap:
m,s,r = resp.bootstrap(method='newton')
print(m)
print(s)
print(resp.bse)
print('\ncomparison maxabs, masabsrel')
print('discr params', maxabs(resdp.params, resp.params), maxabsrel(resdp.params, resp.params))
print('discr bse ', maxabs(resdp.bse, resp.bse), maxabsrel(resdp.bse, resp.bse))
print('discr bsejac', maxabs(resdp.bse, resp.bsejac), maxabsrel(resdp.bse, resp.bsejac))
print('discr bsejhj', maxabs(resdp.bse, resp.bsejhj), maxabsrel(resdp.bse, resp.bsejhj))
print('')
print('glm params ', maxabs(resdp.params, resp.params), maxabsrel(resdp.params, resp.params))
print('glm bse ', maxabs(resdp.bse, resp.bse), maxabsrel(resdp.bse, resp.bse))
| [
"[email protected]"
] | |
481c79a58047713cf46ebefb97f59c98252341c6 | ee6fc02e8392ff780a4f0d1a5789776e4d0b6a29 | /code/practice/abc/abc067/c.py | d99b0f3ace785fe8942c0c3286d0397a845e0657 | [] | no_license | mollinaca/ac | e99bb5d5c07159b3ef98cd7067424fa2751c0256 | 2f40dd4333c2b39573b75b45b06ad52cf36d75c3 | refs/heads/master | 2020-12-22T11:02:13.269855 | 2020-09-18T01:02:29 | 2020-09-18T01:02:29 | 236,757,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
N = int(input())
a = list(map(int,input().split()))
total = sum(a)
ans = float('inf')
x = 0
for i in range(N-1):
x += a[i]
ans = min(abs(total-2*x),ans)
print (ans)
| [
"[email protected]"
] | |
535eb8c77ca7155e6ad95784ac8ecceb6a0f0798 | 95863ef4e8dfcce24dc7e565950728ba4e95d702 | /7510_고급수학.py | 8ff04a52d3aff48263a3f7671e0ee0c8a039dd83 | [] | no_license | choijaehyeokk/BAEKJOON | ee007cd1f06724872fb2359930b26f32a0d646da | 1fa57407a2d981ddd851c135e81c9861afa2dd81 | refs/heads/master | 2023-05-18T01:59:00.753233 | 2021-06-12T15:50:12 | 2021-06-12T15:50:12 | 329,467,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | import sys, math
test_case = int(sys.stdin.readline().rstrip())
for i in range(test_case):
numbers = sorted(list(map(int, sys.stdin.readline().split())))
if numbers[2] == math.sqrt(pow(numbers[0],2) + pow(numbers[1],2)): print('Scenario #{0}:\nyes'.format(i+1))
else: print('Scenario #{0}:\nno'.format(i+1))
if i != test_case-1: print('')
| [
"[email protected]"
] | |
fee3248f8aaf9dcb88181e2bab42d4549d71b8f1 | 5504f5488f9b2a07c600b556f6a14cb6f08c9b12 | /recursion.py | 71d1c26e17e3e2945f0f46e7c391fa6df06c3e25 | [] | no_license | tt-n-walters/saturday-python | 4087028e24ff1c3e80b705b5a49c381f02bc1d84 | 2ad53feb45b5a0e21b927bce25d52c8d2c679793 | refs/heads/master | 2020-12-26T21:46:18.240026 | 2020-04-18T17:40:23 | 2020-04-18T17:40:23 | 237,655,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | from json import dumps
moves = []
def move(origin, destination):
# print("Moving disk from {} to {}".format(origin, destination))
moves.append([origin, destination])
def hanoi(num_of_disks, origin, temporary, destination):
if num_of_disks == 0:
pass
else:
hanoi(num_of_disks - 1, origin, destination, temporary)
move(origin, destination)
hanoi(num_of_disks - 1, temporary, origin, destination)
hanoi(20, 0, 1, 2)
print(len(moves))
with open("moves.json", "w") as file:
file.write(dumps(moves))
| [
"[email protected]"
] | |
18fb98673882c07988251ea832ca145505d88fab | 53818da6c5a172fe8241465dcbbd34fba382820d | /PythonProgram/chapter_08/8-13.py | 7be5c6f769b32133352770e2e829a2d8b1551e2b | [] | no_license | Lethons/PythonExercises | f4fec3bcbfea4c1d8bc29dfed5b770b6241ad93b | 81d588ffecf543ec9de8c1209c7b26c3d6a423b3 | refs/heads/master | 2021-04-15T11:36:08.991028 | 2018-07-07T09:20:40 | 2018-07-07T09:20:40 | 126,686,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | def build_profile(first, last, **user_info):
profile = {}
profile['first_name'] = first
profile['last_name'] = last
for key, value in user_info.items():
profile[key] = value
return profile
user_profile = build_profile('lethons', 'jiang', county='China', city='wuhu')
print(user_profile)
| [
"[email protected]"
] | |
4d1c6700d7a06ef2289c860c73dcb0131b8d2bce | 46a62c499faaa64fe3cce2356c8b229e9c4c9c49 | /taobao-sdk-python-standard/top/api/rest/TraderateListAddRequest.py | e953ad2c46b9045655a3494289e86978fe0d1523 | [] | no_license | xjwangliang/learning-python | 4ed40ff741051b28774585ef476ed59963eee579 | da74bd7e466cd67565416b28429ed4c42e6a298f | refs/heads/master | 2021-01-01T15:41:22.572679 | 2015-04-27T14:09:50 | 2015-04-27T14:09:50 | 5,881,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | '''
Created by auto_sdk on 2012-09-23 16:46:13
'''
from top.api.base import RestApi
class TraderateListAddRequest(RestApi):
def __init__(self,domain,port):
RestApi.__init__(self,domain, port)
self.anony = None
self.content = None
self.result = None
self.role = None
self.tid = None
def getapiname(self):
return 'taobao.traderate.list.add'
| [
"[email protected]"
] | |
5cd97dcf42249f45f58866521562fd46459ffa15 | e0045eec29aab56212c00f9293a21eb3b4b9fe53 | /purchase/models/res_config_settings.py | fdb001621637fe5fcce1f7f621580d11a93ca173 | [] | no_license | tamam001/ALWAFI_P1 | a3a9268081b9befc668a5f51c29ce5119434cc21 | 402ea8687c607fbcb5ba762c2020ebc4ee98e705 | refs/heads/master | 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,718 | py | # -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
lock_confirmed_po = fields.Boolean("Lock Confirmed Orders", default=lambda self: self.env.user.company_id.po_lock == 'lock')
po_lock = fields.Selection(related='company_id.po_lock', string="Purchase Order Modification *", readonly=False)
po_order_approval = fields.Boolean("Purchase Order Approval", default=lambda self: self.env.user.company_id.po_double_validation == 'two_step')
po_double_validation = fields.Selection(related='company_id.po_double_validation', string="Levels of Approvals *", readonly=False)
po_double_validation_amount = fields.Monetary(related='company_id.po_double_validation_amount', string="Minimum Amount", currency_field='company_currency_id', readonly=False)
company_currency_id = fields.Many2one('res.currency', related='company_id.currency_id', string="Company Currency", readonly=True,
help='Utility field to express amount currency')
default_purchase_method = fields.Selection([
('purchase', 'Ordered quantities'),
('receive', 'Delivered quantities'),
], string="Bill Control", default_model="product.template",
help="This default value is applied to any new product created. "
"This can be changed in the product detail form.", default="receive")
group_warning_purchase = fields.Boolean("Purchase Warnings", implied_group='purchase.group_warning_purchase')
group_manage_vendor_price = fields.Boolean("Vendor Pricelists",
implied_group="purchase.group_manage_vendor_price")
module_account_3way_match = fields.Boolean("3-way matching: purchases, receptions and bills")
module_purchase_requisition = fields.Boolean("Purchase Agreements")
po_lead = fields.Float(related='company_id.po_lead', readonly=False)
use_po_lead = fields.Boolean(
string="Security Lead Time for Purchase",
oldname='default_new_po_lead',
config_parameter='purchase.use_po_lead',
help="Margin of error for vendor lead times. When the system generates Purchase Orders for reordering products,they will be scheduled that many days earlier to cope with unexpected vendor delays.")
@api.onchange('use_po_lead')
def _onchange_use_po_lead(self):
if not self.use_po_lead:
self.po_lead = 0.0
def set_values(self):
super(ResConfigSettings, self).set_values()
self.po_lock = 'lock' if self.lock_confirmed_po else 'edit'
self.po_double_validation = 'two_step' if self.po_order_approval else 'one_step'
| [
"[email protected]"
] | |
fb7a9f1737d29effabc4820243aaec5e5ab2d8d2 | 323f58ecefddd602431eeb285b60ac81316b774a | /aioreactive/operators/pipe.py | 55e16feeb7bc68f8ac14fa5e8e23066505346f21 | [
"MIT"
] | permissive | tr11/aioreactive | aa9798ee5c2f98c0f5301111732e72093232ab8e | 6219f9a0761f69fa1765129b990762affdf661c8 | refs/heads/master | 2021-01-25T13:58:51.892021 | 2018-03-02T22:01:23 | 2018-03-02T22:01:23 | 123,635,129 | 0 | 0 | MIT | 2018-03-02T21:56:46 | 2018-03-02T21:56:46 | null | UTF-8 | Python | false | false | 249 | py | from typing import Callable
from aioreactive.core import AsyncObservable
def pipe(source: AsyncObservable, *args: Callable[[AsyncObservable], AsyncObservable]) -> AsyncObservable:
for op in args:
source = op(source)
return source
| [
"[email protected]"
] | |
819501b2dc2d02295834d523fa81eb7da09526f6 | 9fd3e5f04baf33cdb913fb34d544c35d94d9d397 | /tests/unit_tests/cx_core/integration/state_test.py | 06c2c574c92d4f17a057320664a5f9f8f3d9a8f2 | [
"MIT"
] | permissive | xaviml/controllerx | dfa56b7005af8212d074544eca8542d8d665b9e0 | 387e130b8489282bf3abb5e847ef16dfe88615c7 | refs/heads/main | 2023-09-01T11:19:25.754886 | 2023-04-18T16:13:13 | 2023-04-18T16:13:13 | 222,056,780 | 280 | 83 | MIT | 2023-07-17T11:37:12 | 2019-11-16T06:23:03 | Python | UTF-8 | Python | false | false | 1,329 | py | from typing import Optional
import pytest
from appdaemon.plugins.hass.hassapi import Hass
from cx_core.controller import Controller
from cx_core.integration.state import StateIntegration
from pytest_mock.plugin import MockerFixture
@pytest.mark.parametrize("attribute", ["sensor", "entity_id", None])
async def test_listen_changes(
fake_controller: Controller, mocker: MockerFixture, attribute: Optional[str]
) -> None:
kwargs = {}
if attribute is not None:
kwargs["attribute"] = attribute
controller_id = "controller_id"
state_event_mock = mocker.patch.object(Hass, "listen_state")
state_integration = StateIntegration(fake_controller, kwargs)
await state_integration.listen_changes(controller_id)
state_event_mock.assert_called_once_with(
fake_controller,
state_integration.state_callback,
controller_id,
attribute=attribute,
)
async def test_callback(
fake_controller: Controller,
mocker: MockerFixture,
) -> None:
handle_action_patch = mocker.patch.object(fake_controller, "handle_action")
state_integration = StateIntegration(fake_controller, {})
await state_integration.state_callback("test", None, "old_state", "new_state", {})
handle_action_patch.assert_called_once_with("new_state", previous_state="old_state")
| [
"[email protected]"
] | |
89bd43e6ff9c99d7fc9f7dbcff19a209e1dcfe51 | 926621c29eb55046f9f59750db09bdb24ed3078e | /lib/googlecloudsdk/api_lib/compute/iam_base_classes.py | b7e49a6ddc6b2c345b3642ce1a29bd27f31dea8e | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/SDK | 525d9b29fb2e901aa79697c9dcdf5ddd852859ab | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | refs/heads/master | 2022-11-22T18:24:13.464605 | 2016-05-18T16:53:30 | 2016-05-18T16:53:30 | 282,322,505 | 0 | 0 | NOASSERTION | 2020-07-24T21:52:25 | 2020-07-24T21:52:24 | null | UTF-8 | Python | false | false | 7,876 | py | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Internal base classes for abstracting away common logic."""
import abc
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import request_helper
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.core.iam import iam_util
# TODO(user): Investigate sharing more code with BaseDescriber command.
class BaseGetIamPolicy(base_classes.BaseCommand):
"""Base class for getting the Iam Policy for a resource."""
__metaclass__ = abc.ABCMeta
@staticmethod
def Args(parser, resource=None, list_command_path=None):
BaseGetIamPolicy.AddArgs(parser, resource, list_command_path)
@staticmethod
def AddArgs(parser, resource=None, list_command_path=None):
"""Add required flags for set Iam policy."""
parser.add_argument(
'name',
metavar='NAME',
completion_resource=resource,
list_command_path=list_command_path,
help='The resources whose IAM policy to fetch.')
@property
def method(self):
return 'GetIamPolicy'
def ScopeRequest(self, ref, request):
"""Adds a zone or region to the request object if necessary."""
def SetResourceName(self, ref, request):
"""Adds a the name of the resource to the request object."""
resource_name = self.service.GetMethodConfig(self.method).ordered_params[-1]
setattr(request, resource_name, ref.Name())
@abc.abstractmethod
def CreateReference(self, args):
pass
def Run(self, args):
ref = self.CreateReference(args)
request_class = self.service.GetRequestType(self.method)
request = request_class(project=self.project)
self.ScopeRequest(ref, request)
self.SetResourceName(ref, request)
get_policy_request = (self.service, self.method, request)
errors = []
objects = request_helper.MakeRequests(
requests=[get_policy_request],
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None)
# Converting the objects genrator to a list triggers the
# logic that actually populates the errors list.
resources = list(objects)
if errors:
utils.RaiseToolException(
errors,
error_message='Could not fetch resource:')
# TODO(user): determine how this output should look when empty.
# GetIamPolicy always returns either an error or a valid policy.
# If no policy has been set it returns a valid empty policy (just an etag.)
# It is not possible to have multiple policies for one resource.
return resources[0]
def GetIamPolicyHelp(resource_name):
return {
'brief': 'Get the IAM Policy for a Google Compute Engine {0}.'.format(
resource_name),
'DESCRIPTION': """\
*{{command}}* displays the Iam Policy associated with a Google Compute
Engine {0} in a project.
""".format(resource_name)}
class ZonalGetIamPolicy(BaseGetIamPolicy):
"""Base class for zonal iam_get_policy commands."""
@staticmethod
def Args(parser, resource=None, command=None):
BaseGetIamPolicy.AddArgs(parser, resource, command)
flags.AddZoneFlag(
parser,
resource_type='resource',
operation_type='fetch')
def CreateReference(self, args):
return self.CreateZonalReference(args.name, args.zone)
def ScopeRequest(self, ref, request):
request.zone = ref.zone
class GlobalGetIamPolicy(BaseGetIamPolicy):
"""Base class for global iam_get_policy commands."""
def CreateReference(self, args):
return self.CreateGlobalReference(args.name)
class BaseSetIamPolicy(base_classes.BaseCommand):
"""Base class for setting the Iam Policy for a resource."""
__metaclass__ = abc.ABCMeta
@staticmethod
def Args(parser, resource=None, list_command_path=None):
BaseSetIamPolicy.AddArgs(parser, resource, list_command_path)
@staticmethod
def AddArgs(parser, resource=None, list_command_path=None):
"""Add required flags for set Iam policy."""
parser.add_argument(
'name',
metavar='NAME',
completion_resource=resource,
list_command_path=list_command_path,
help='The resources whose IAM policy to set.')
policy_file = parser.add_argument(
'policy_file',
metavar='POLICY_FILE',
help='Path to a local JSON formatted file contining a valid policy.')
policy_file.detailed_help = """\
Path to a local JSON formatted file containing a valid policy.
"""
# TODO(user): fill in detailed help.
@property
def method(self):
return 'SetIamPolicy'
def ScopeRequest(self, ref, request):
"""Adds a zone or region to the request object if necessary."""
def SetResourceName(self, ref, request):
"""Adds a the name of the resource to the request object."""
resource_name = self.service.GetMethodConfig(self.method).ordered_params[-1]
setattr(request, resource_name, ref.Name())
@abc.abstractmethod
def CreateReference(self, args):
pass
def Run(self, args):
policy = iam_util.ParseJsonPolicyFile(
args.policy_file, self.messages.Policy)
ref = self.CreateReference(args)
request_class = self.service.GetRequestType(self.method)
request = request_class(project=self.project)
self.ScopeRequest(ref, request)
self.SetResourceName(ref, request)
request.policy = policy
set_policy_request = (self.service, self.method, request)
errors = []
objects = request_helper.MakeRequests(
requests=[set_policy_request],
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None)
# Converting the objects genrator to a list triggers the
# logic that actually populates the errors list.
resources = list(objects)
if errors:
utils.RaiseToolException(
errors,
error_message='Could not fetch resource:')
# TODO(user): determine how this output should look when empty.
# SetIamPolicy always returns either an error or the newly set policy.
# If the policy was just set to the empty policy it returns a valid empty
# policy (just an etag.)
# It is not possible to have multiple policies for one resource.
return resources[0]
def SetIamPolicyHelp(resource_name):
return {
'brief': 'Set the IAM Policy for a Google Compute Engine {0}.'.format(
resource_name),
'DESCRIPTION': """\
*{{command}}* sets the Iam Policy associated with a Google Compute
Engine {0} in a project.
""".format(resource_name)}
class ZonalSetIamPolicy(BaseSetIamPolicy):
"""Base class for zonal iam_get_policy commands."""
@staticmethod
def Args(parser, resource=None, command=None):
BaseSetIamPolicy.AddArgs(parser, resource, command)
flags.AddZoneFlag(
parser,
resource_type='resource',
operation_type='fetch')
def CreateReference(self, args):
return self.CreateZonalReference(args.name, args.zone)
def ScopeRequest(self, ref, request):
request.zone = ref.zone
class GlobalSetIamPolicy(BaseSetIamPolicy):
"""Base class for global iam_get_policy commands."""
def CreateReference(self, args):
return self.CreateGlobalReference(args.name)
| [
"[email protected]"
] | |
83d5bfd11fddb5fa5a6e64902de5730ad1d8156d | d5b48163d236ca770be8e687f92192e2971397e8 | /set_natural.py | d00f9c0afd9a0e53b4a09e7a94182bae516ee384 | [] | no_license | Kunal352000/python_program | 191f5d9c82980eb706e11457c2b5af54b0d2ae95 | 7a1c645f9eab87cc45a593955dcb61b35e2ce434 | refs/heads/main | 2023-07-12T19:06:19.121741 | 2021-08-21T11:58:41 | 2021-08-21T11:58:41 | 376,606,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | num=int(input("Enter your number: "))
x=set()
for i in range(1,num+1):
x.add(i)
print(x)
| [
"[email protected]"
] | |
7d948050caa7731a5f9087f1a5117cfa2f185d2d | 179d753991d2578750dc058f1b963f80eab787c8 | /deeppavlov/dataset_readers/faq_reader.py | 826fb4088302a5a406eee9f8c0b5d852d5247b17 | [
"Apache-2.0",
"Python-2.0"
] | permissive | yoptar/DeepPavlov | 89ebd280db22e732bc942490e316d0588baf3803 | 3e7c8821db6d63b3aaac9abdfd8a478104371cb9 | refs/heads/master | 2020-04-07T12:54:56.279903 | 2019-01-09T09:23:44 | 2019-01-09T09:23:44 | 158,386,034 | 1 | 0 | Apache-2.0 | 2019-01-09T08:03:45 | 2018-11-20T12:26:53 | Python | UTF-8 | Python | false | false | 2,021 | py | # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwaredata
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from pandas import read_csv
from deeppavlov.core.data.dataset_reader import DatasetReader
from deeppavlov.core.common.registry import register
@register('faq_reader')
class FaqDatasetReader(DatasetReader):
"""Reader for FAQ dataset"""
def read(self, data_path: str = None, data_url: str = None, x_col_name: str = 'x', y_col_name: str = 'y') -> Dict:
"""
Read FAQ dataset from specified csv file or remote url
Parameters:
data_path: path to csv file of FAQ
data_url: url to csv file of FAQ
x_col_name: name of Question column in csv file
y_col_name: name of Answer column in csv file
Returns:
A dictionary containing training, validation and test parts of the dataset obtainable via
``train``, ``valid`` and ``test`` keys.
"""
if data_url is not None:
data = read_csv(data_url)
elif data_path is not None:
data = read_csv(data_path)
else:
raise ValueError("Please specify data_path or data_url parameter")
x = data[x_col_name]
y = data[y_col_name]
train_xy_tuples = [(x[i].strip(), y[i].strip()) for i in range(len(x))]
dataset = dict()
dataset["train"] = train_xy_tuples
dataset["valid"] = []
dataset["test"] = []
return dataset
| [
"[email protected]"
] | |
19c2262d2a16a689f5af5323727a094140599163 | 000a4b227d970cdc6c8db192f4437698cb782721 | /python/helpers/typeshed/stubs/psutil/psutil/_psbsd.pyi | 6a4df8e9afc90de47c4ec9f2dae8b0a9e9a27ef9 | [
"Apache-2.0",
"MIT"
] | permissive | trinhanhngoc/intellij-community | 2eb2f66a2a3a9456e7a0c5e7be1eaba03c38815d | 1d4a962cfda308a73e0a7ef75186aaa4b15d1e17 | refs/heads/master | 2022-11-03T21:50:47.859675 | 2022-10-19T16:39:57 | 2022-10-19T23:25:35 | 205,765,945 | 1 | 0 | Apache-2.0 | 2019-09-02T02:55:15 | 2019-09-02T02:55:15 | null | UTF-8 | Python | false | false | 3,616 | pyi | from contextlib import AbstractContextManager
from typing import Any, NamedTuple
from ._common import (
FREEBSD as FREEBSD,
NETBSD as NETBSD,
OPENBSD as OPENBSD,
AccessDenied as AccessDenied,
NoSuchProcess as NoSuchProcess,
ZombieProcess as ZombieProcess,
conn_tmap as conn_tmap,
conn_to_ntuple as conn_to_ntuple,
memoize as memoize,
usage_percent as usage_percent,
)
__extra__all__: Any
PROC_STATUSES: Any
TCP_STATUSES: Any
PAGESIZE: Any
AF_LINK: Any
HAS_PER_CPU_TIMES: Any
HAS_PROC_NUM_THREADS: Any
HAS_PROC_OPEN_FILES: Any
HAS_PROC_NUM_FDS: Any
kinfo_proc_map: Any
class svmem(NamedTuple):
total: Any
available: Any
percent: Any
used: Any
free: Any
active: Any
inactive: Any
buffers: Any
cached: Any
shared: Any
wired: Any
class scputimes(NamedTuple):
user: Any
nice: Any
system: Any
idle: Any
irq: Any
class pmem(NamedTuple):
rss: Any
vms: Any
text: Any
data: Any
stack: Any
pfullmem = pmem
class pcputimes(NamedTuple):
user: Any
system: Any
children_user: Any
children_system: Any
class pmmap_grouped(NamedTuple):
path: Any
rss: Any
private: Any
ref_count: Any
shadow_count: Any
class pmmap_ext(NamedTuple):
addr: Any
perms: Any
path: Any
rss: Any
private: Any
ref_count: Any
shadow_count: Any
class sdiskio(NamedTuple):
read_count: Any
write_count: Any
read_bytes: Any
write_bytes: Any
read_time: Any
write_time: Any
busy_time: Any
def virtual_memory(): ...
def swap_memory(): ...
def cpu_times(): ...
def per_cpu_times(): ...
def cpu_count_logical(): ...
def cpu_count_physical(): ...
def cpu_stats(): ...
def disk_partitions(all: bool = ...): ...
disk_usage: Any
disk_io_counters: Any
net_io_counters: Any
net_if_addrs: Any
def net_if_stats(): ...
def net_connections(kind): ...
def sensors_battery(): ...
def sensors_temperatures(): ...
def cpu_freq(): ...
def boot_time(): ...
def users(): ...
def pids(): ...
def pid_exists(pid): ...
def is_zombie(pid): ...
def wrap_exceptions(fun): ...
def wrap_exceptions_procfs(inst) -> AbstractContextManager[None]: ...
class Process:
pid: Any
def __init__(self, pid) -> None: ...
def oneshot(self): ...
def oneshot_enter(self) -> None: ...
def oneshot_exit(self) -> None: ...
def name(self): ...
def exe(self): ...
def cmdline(self): ...
def environ(self): ...
def terminal(self): ...
def ppid(self): ...
def uids(self): ...
def gids(self): ...
def cpu_times(self): ...
def cpu_num(self): ...
def memory_info(self): ...
memory_full_info: Any
def create_time(self): ...
def num_threads(self): ...
def num_ctx_switches(self): ...
def threads(self): ...
def connections(self, kind: str = ...): ...
def wait(self, timeout: Any | None = ...): ...
def nice_get(self): ...
def nice_set(self, value): ...
def status(self): ...
def io_counters(self): ...
def cwd(self): ...
class nt_mmap_grouped(NamedTuple):
path: Any
rss: Any
private: Any
ref_count: Any
shadow_count: Any
class nt_mmap_ext(NamedTuple):
addr: Any
perms: Any
path: Any
rss: Any
private: Any
ref_count: Any
shadow_count: Any
def open_files(self): ...
def num_fds(self): ...
def cpu_affinity_get(self): ...
def cpu_affinity_set(self, cpus) -> None: ...
def memory_maps(self): ...
def rlimit(self, resource, limits: Any | None = ...): ...
| [
"[email protected]"
] | |
532452741987cd2de3241c15dc9dca0f6317a145 | 2cce44d102fc03cdfae7edbf81011c0f9fbbf92a | /_unittests/ut_notebooks/test_LONG_2A_notebook_1.py | 3ece67c44bed8494b39381c71d83d2270845ffa5 | [
"MIT"
] | permissive | AlexisEidelman/ensae_teaching_cs | ddbbccd0f732563d34ca6b6c2e389a8805dc60df | 421d183c1fe6b5c3af9a5acedf1e3ad8f15b02d2 | refs/heads/master | 2021-01-20T08:29:33.182139 | 2017-08-03T15:06:49 | 2017-08-03T15:06:49 | 90,151,574 | 0 | 0 | null | 2017-05-03T13:23:37 | 2017-05-03T13:23:36 | null | UTF-8 | Python | false | false | 2,080 | py | """
@brief test log(time=170s)
notebook test
"""
import sys
import os
import unittest
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
try:
import pyquickhelper as skip_
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..",
"..",
"pyquickhelper",
"src")))
if path not in sys.path:
sys.path.append(path)
import pyquickhelper as skip_
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, add_missing_development_version
class TestNotebookRunner2a_1 (unittest.TestCase):
def setUp(self):
fLOG("add missing dependencies", OutputPrint=__name__ == "__main__")
add_missing_development_version(
["pyensae", "pymyinstall", "pymmails", "jyquickhelper"], __file__)
def test_notebook_runner(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
from src.ensae_teaching_cs.automation.notebook_test_helper import ls_notebooks, execute_notebooks
from src.ensae_teaching_cs.automation.notebook_test_helper import unittest_raise_exception_notebook, clean_function_1a
temp = get_temp_folder(__file__, "temp_notebook2a_1")
keepnote = ls_notebooks("td2a")
assert len(keepnote) > 0
for k in keepnote:
if "_1" in k:
fLOG("*********", k)
res = execute_notebooks(
temp,
keepnote,
lambda i,
n: "_1" in n,
clean_function=clean_function_1a,
fLOG=fLOG)
unittest_raise_exception_notebook(res, fLOG)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
804d16fb294af1e2167aa7cbfe225cbea1e4a12b | 8ecd899a8558ad0a644ecefa28faf93e0710f6fb | /ABC073/ABC073_C.py | d47da98fa19e5b10d9df3438b09a9f9619022de0 | [] | no_license | yut-inoue/AtCoder_ABC | b93885547049788d452e86b442a4a9f5ee191b0e | 3d2c4b2b2f8871c75f86040ad07ccd7736ad3dbe | refs/heads/master | 2021-07-03T09:09:20.478613 | 2021-02-21T13:20:31 | 2021-02-21T13:20:31 | 227,140,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | n=int(input())
dic={}
for i in range(n):
v=int(input())
dic[v]=dic.get(v,0)+1
count=0
for k in dic.keys():
if dic[k]%2!=0:
count+=1
print(count)
| [
"[email protected]"
] | |
c92a6964dce48eca3cd0395daa70a1cda211616a | 8395ffb48750359d1bd51a201a41c7fe124998bc | /apc2015/perception/single_utils/src/common_utils.py | 8da0757fb5b69454a2ac8aacbf9b00f6599b6e48 | [] | no_license | duke-iml/ece490-s2016 | ab6c3d3fb159a28a9c38487cdb1ad3993008b854 | f9cc992fbaadedc8a69678ba39f0c9d108e6910d | refs/heads/master | 2020-04-12T09:03:56.601000 | 2016-11-29T21:36:48 | 2016-11-29T21:36:48 | 49,226,568 | 2 | 6 | null | 2016-11-29T21:36:49 | 2016-01-07T19:42:34 | Python | UTF-8 | Python | false | false | 13,463 | py |
from __future__ import division
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import struct
import networkx as nx
from networkx import Graph
from math import sqrt
from sklearn.linear_model import RANSACRegressor, LinearRegression
import numpy as np
import colorsys
def argmin(list):
'''
return the index of the smallest item in the list
'''
return sorted([[val, idx] for idx, val in enumerate(list)])[0][1]
def argmax(list):
'''
return the index of the largest item in the list
'''
return sorted([[val, idx] for idx, val in enumerate(list)], reverse=True)[0][1]
def local_min(list, idx):
'''
return the index and the value of the local minimum following descent initialized at the specified index
'''
if idx==0 and list[0]>list[1]:
idx += 1
length = len(list)
if idx==length-1 and list[length-2]<list[length-1]:
idx -= 1
while (not idx==0) and (not idx==length-1) and not(list[idx-1]>=list[idx] and list[idx+1]>=list[idx]):
if list[idx]>list[idx-1]:
idx -= 1
else:
idx += 1
return idx, list[idx]
def select(list, idxs):
'''
idxs: a list of indices
return a list that consists of only those in the original list with indices in idxs
'''
return [list[i] for i in idxs]
def select_each(list_of_list, idxs):
'''
apply select on each list in list_of_list
return the new list_of_list with each list being selected by idxs
'''
return [select(l, idxs) for l in list_of_list]
def filter_val_idx(predicate, list):
qualified = filter(lambda p: predicate(p[1]), enumerate(list))
idxs = [i for (i, _) in qualified]
vals = [v for (_, v) in qualified]
return vals, idxs
def quantile(list, q):
'''
q: a float between 0 and 1 specifying the quantile
return the element in the list at the (q*100)th quantile
'''
return list[int(len(list)*q)]
def f_addr_to_i(f):
return struct.unpack('I', struct.pack('f', f))[0]
def i_addr_to_f(i):
return struct.unpack('f', struct.pack('I', i))[0]
def rgb_to_pcl_float(r, g, b):
i = r<<16 | g<<8 | b
return i_addr_to_f(i)
def pcl_float_to_rgb(f):
i = f_addr_to_i(f)
r = i >> 16 & 0x0000ff
g = i >> 8 & 0x0000ff
b = i >> 0 & 0x0000ff
return r,g,b
def render_3d_scatter(points, proportion=1, xlabel="x", ylabel="y", zlabel="z"):
'''
render 3d points. the points are represented by a list of lists (points) i.e. [[x1,y1,z1],[x2,y2,z2],...,[xn,yn,zn]]
return the axis handler of the image (which, for example, can be used to change window limit by set_xlim, set_ylim, set_zlim)
'''
if len(points[0])==4:
ax = render_3d_scatter_with_rgb(points, proportion, xlabel, ylabel, zlabel)
return ax
every = int(1/proportion)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter([x for i,(x,_,_) in enumerate(points) if i%every==0],
[y for i,(_,y,_) in enumerate(points) if i%every==0], zs=[z for i,(_,_,z) in enumerate(points) if i%every==0])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_aspect('equal')
return ax
def render_3d_scatter_with_rgb(points, proportion=1, xlabel="x", ylabel="y", zlabel="z"):
'''
render 3d points. the points are represented by a list of lists (points with rgb) i.e. [[x1,y1,z1,rgb1],[x2,y2,z2,rgb2],...,[xn,yn,zn,rgbn]]
return the axis handler of the image (which, for example, can be used to change window limit by set_xlim, set_ylim, set_zlim)
'''
every = int(1/proportion)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
rgb = [c for _,_,_,c in points]
rgb_int = [struct.unpack('I', struct.pack('f', c))[0] for c in rgb]
r = [c >> 16 & 0x0000ff for c in rgb_int]
g = [c >> 8 & 0x0000ff for c in rgb_int]
b = [c >> 0 & 0x0000ff for c in rgb_int]
rgb = [[r[i]/255, g[i]/255, b[i]/255] for i in xrange(len(r))]
x_selected = [x for i,(x,_,_,_) in enumerate(points) if i%every==0]
y_selected = [y for i,(_,y,_,_) in enumerate(points) if i%every==0]
z_selected = [z for i,(_,_,z,_) in enumerate(points) if i%every==0]
rgb_selected = [c for i,c in enumerate(rgb) if i%every==0]
ax.scatter(x_selected, y_selected, zs=z_selected, c=rgb_selected, linewidths=0)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_aspect('equal')
return ax
def remove_plane(point_cloud, coord, plane, tolerance=0.03):
'''
point_cloud format: [[x1,y1,z1],[x2,y2,z2],...,[xn,yn,zn]]
'''
if plane=="xy":
return [(x,y,z) for x,y,z in point_cloud if abs(z-coord)>tolerance]
elif plane=="yz":
return [(x,y,z) for x,y,z in point_cloud if abs(x-coord)>tolerance]
elif plane=="xz":
return [(x,y,z) for x,y,z in point_cloud if abs(y-coord)>tolerance]
else:
raise Exception("Unrecognized plane name")
def remove_plane_rgb(point_cloud, coord, plane, tolerance=0.03):
'''
point_cloud format: [[x1,y1,z1,rgb1],[x2,y2,z2,rgb2],...,[xn,yn,zn,rgbn]]
rgb is a float packed from three numbers using PCL's encoding scheme
'''
if plane=="xy":
return [(x,y,z,rgb) for x,y,z,rgb in point_cloud if abs(z-coord)>tolerance]
elif plane=="yz":
return [(x,y,z,rgb) for x,y,z,rgb in point_cloud if abs(x-coord)>tolerance]
elif plane=="xz":
return [(x,y,z,rgb) for x,y,z,rgb in point_cloud if abs(y-coord)>tolerance]
else:
raise Exception("Unrecognized plane name")
def remove_plane_idx(point_cloud, coord, plane, fit=True, tolerance=0.03):
'''
return the indices of the points on the plane to be removed
'''
if fit:
if plane=="xy":
points_to_be_removed = [p for p in point_cloud if abs(p[2]-coord)<=tolerance]
a, b, c, d = fit_plane(points_to_be_removed)
return [i for i,p in enumerate(point_cloud) if dist_point_plane(p, a, b, c, d)>tolerance]
elif plane=="yz":
points_to_be_removed = [p for p in point_cloud if abs(p[0]-coord)<=tolerance]
a, b, c, d = fit_plane(points_to_be_removed)
return [i for i,p in enumerate(point_cloud) if dist_point_plane(p, a, b, c, d)>tolerance]
elif plane=="xz":
points_to_be_removed = [p for p in point_cloud if abs(p[1]-coord)<=tolerance]
a, b, c, d = fit_plane(points_to_be_removed)
return [i for i,p in enumerate(point_cloud) if dist_point_plane(p, a, b, c, d)>tolerance]
else:
raise Exception("Unrecognized plane name")
else:
if plane=="xy":
return [i for i,p in enumerate(point_cloud) if abs(p[2]-coord)>tolerance]
elif plane=="yz":
return [i for i,p in enumerate(point_cloud) if abs(p[0]-coord)>tolerance]
elif plane=="xz":
return [i for i,p in enumerate(point_cloud) if abs(p[1]-coord)>tolerance]
else:
raise Exception("Unrecognized plane name")
def brighten_point_cloud(xyzrgb):
'''
brighten a point cloud
the s and v of the hsv color of each point are increased to maximum scale. the object is fully saturated and illuminated
this function does not change the original point cloud but instead returns a new point cloud
'''
new_cloud = []
for i in xrange(len(xyzrgb)):
x,y,z,rgb = xyzrgb[i]
r,g,b = pcl_float_to_rgb(rgb)
r /= 255
g /= 255
b /= 255
h,_,_ = colorsys.rgb_to_hsv(r,g,b)
r,g,b = colorsys.hsv_to_rgb(h,1,1)
r = int(r*255)
g = int(g*255)
b = int(b*255)
rgb = rgb_to_pcl_float(r,g,b)
new_cloud.append([x,y,z,rgb])
def read_pcd_file(f, data):
if isinstance(f, basestring):
f = open(f)
pointsxyzrgb = []
pointsxyz = []
pointsxy = []
pointsyz = []
pointsxz = []
all_x = []
all_y = []
all_z = []
for l in f:
try:
float(l.strip().split()[0])
except:
continue
x, y, z, rgb = map(float, l.strip().split())
pointsxyzrgb.append([x,y,z,rgb])
pointsxyz.append([x,y,z])
pointsxy.append([x,y])
pointsyz.append([y,z])
pointsxz.append([x,z])
all_x.append(x)
all_y.append(y)
all_z.append(z)
ret = []
for d in data:
if d=='rgb' or d=='xyzrgb':
ret.append(pointsxyzrgb)
elif d=='xyz':
ret.append(pointsxyz)
elif d=='xy':
ret.append(pointsxy)
elif d=='yz':
ret.append(pointsyz)
elif d=='xz':
ret.append(pointsxz)
elif d=='x':
ret.append(all_x)
elif d=='y':
ret.append(all_y)
elif d=='z':
ret.append(all_z)
else:
raise Exception("Unrecgonized data format"+str(d))
return ret
def write_pcd_file(point_cloud, f):
if isinstance(f, basestring):
f = open(f, 'w')
tot_num = len(point_cloud)
has_rgb = len(point_cloud[0])==4
if has_rgb:
f.write("VERSION .7\nFIELDS x y z rgb\nSIZE 4 4 4 4\nTYPE F F F F\nCOUNT 1 1 1 1\n")
f.write("WIDTH "+str(tot_num)+"\n")
f.write("HEIGHT 1\nVIEWPOINT 0 0 0 1 0 0 0\n")
f.write("POINTS "+str(tot_num)+"\n")
f.write("DATA ascii\n")
else:
f.write("VERSION .7\nFIELDS x y z\nSIZE 4 4 4\nTYPE F F F\nCOUNT 1 1 1\n")
f.write("WIDTH "+str(tot_num)+"\n")
f.write("HEIGHT 1\nVIEWPOINT 0 0 0 1 0 0 0\n")
f.write("POINTS "+str(tot_num)+"\n")
f.write("DATA ascii\n")
for p in point_cloud:
f.write(" ".join(map(str,p))+"\n")
f.close()
def euclidian_2d_dist(p1, p2):
return sqrt( (p1[0]-p2[0])**2 + (p1[1]-p2[1])**2 )
def euclidian_3d_dist(p1, p2):
return sqrt( (p1[0]-p2[0])**2 + (p1[1]-p2[1])**2 + (p1[2]-p2[2])**2 )
def make_graph(points, neighbor_max_dist=0.01):
graph = Graph()
graph.add_nodes_from(range(len(points)))
for i in xrange(len(points)):
for j in xrange(i+1, len(points)):
if euclidian_3d_dist(points[i], points[j])<neighbor_max_dist:
graph.add_edge(i,j)
return graph
def get_largest_cc(points, neighbor_max_dist=0.01, eligible_condition=None):
graph = make_graph(points, neighbor_max_dist)
if eligible_condition is None:
idxs = sorted(nx.connected_components(graph), key=len, reverse=True)[0]
return select(points, idxs)
else:
ccs = list(nx.connected_components(graph))
max_count = None
max_cc_idx = None
for i in xrange(len(ccs)):
idxs = ccs[i]
tmp_points = select(points, idxs)
eligible_count = len(filter(eligible_condition, tmp_points))
if max_count is None or eligible_count>max_count:
max_count = eligible_count
max_cc_idx = i
return select(points, ccs[max_cc_idx])
def fit_plane(points):
'''
fit a plane through a list of 3d points and return a, b, c, d that represents the plane as ax+by+cz+d=0
'''
X = [[p[0], p[1]] for p in points]
X = np.matrix(X)
y = [p[2] for p in points]
model = RANSACRegressor(LinearRegression())
model.fit(X, y)
d = list(model.estimator_.intercept_.flatten())[0]
a, b = list(model.estimator_.coef_.flatten())
c = -1
return a, b, c, d
def dist_point_plane(p, a, b, c, d):
'''
distance between point p: (p[0], p[1], p[2]) and plane ax+by+cz+d=0
'''
return abs(a*p[0]+b*p[1]+c*p[2]+d)/sqrt(a**2+b**2+c**2)
def normalize(vec):
'''
normalize vector
'''
return list(np.divide(vec, float(euclidian_3d_dist(vec, [0,0,0]))).flatten())
def project(vec1, vec2):
'''
project vec1 down to vec2 and return the projected vector
'''
assert len(vec1)==3 and len(vec2)==3
coef = np.dot(vec1, vec2)/np.dot(vec2, vec2)
return list(np.multiply(vec2, coef).flatten())
def project_to_plane(point, a, b, c):
'''
project point down to plane ax+by+cz=0
return the coordinate of the point of projection in the plane
'''
assert 3<=len(point)<=4
point_3d = [point[0], point[1], point[2]]
n = (a, b, c)
projected = project(point_3d, n)
if len(point)==3:
return [point[0]-projected[0], point[1]-projected[1], point[2]-projected[2]]
else:
return [point[0]-projected[0], point[1]-projected[1], point[2]-projected[2], point[3]]
def get_basis_for_plane(a, b, c):
'''
find a set of basis vectors for the plane ax+by+cz=0
'''
basis1 = normalize(project_to_plane([1,0,0], a, b, c))
attempt = [0, 1, 0]
attempt_on_plane = project_to_plane(attempt, a, b, c)
attempt_on_plane_on_basis1 = project(attempt_on_plane, basis1)
basis2 = np.array(attempt_on_plane)-np.array(attempt_on_plane_on_basis1)
basis2 = normalize(list(basis2.flatten()))
return basis1, basis2
def basis_decompose(vec, basis1, basis2):
'''
decompose vec into a*basis1+b*basis2 and return (a, b)
basis1 and basis2 must be perpendicular to each other
'''
assert np.dot(basis1, basis2)<1e-6
assert 3<=len(vec)<=4
if len(vec)==4:
v = [vec[0], vec[1], vec[2]]
else:
v = vec
a = np.dot(v, basis1) / np.dot(basis1, basis1)
b = np.dot(v, basis2) / np.dot(basis2, basis2)
return a, b
| [
"[email protected]"
] | |
67cc58df811c8468e0e57e2bfa9176ab97cede56 | c5a7003de780b3b92f4dff39d5b9d8364bdd28a8 | /HW5/python/q4.py | e9bd110e454a44955f8d3bc41d2cc414271c3393 | [
"ICU"
] | permissive | rainmiku/16720-19Spring-Homework | 961bb00e1ba46de7acc9884ec61c32389d5a6f4a | 9ebc8e178bd2cca85ada52f0cb8ea5f22b47d57e | refs/heads/master | 2020-06-18T15:29:02.340995 | 2019-04-22T03:57:41 | 2019-04-22T03:57:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py | import numpy as np
import skimage
import skimage.measure
import skimage.color
import skimage.restoration
import skimage.filters
import skimage.morphology
import skimage.segmentation
# takes a color image
# returns a list of bounding boxes and black_and_white image
def findLetters(image):
bboxes = []
bw = None
# insert processing in here
# one idea estimate noise -> denoise -> greyscale -> threshold -> morphology -> label -> skip small boxes
# this can be 10 to 15 lines of code using skimage functions
denoise_image = skimage.restoration.denoise_bilateral(image, multichannel = True)
greyscale_image = skimage.color.rgb2gray(image)
threshold = skimage.filters.threshold_otsu(greyscale_image)
bw = greyscale_image < threshold
bw = skimage.morphology.closing(bw, skimage.morphology.square(5))
label_image = skimage.morphology.label(bw, connectivity = 2)
props = skimage.measure.regionprops(label_image)
mean_size = sum([prop.area for prop in props]) / len(props)
bboxes = [prop.bbox for prop in props if prop.area > mean_size / 3]
bw = (~bw).astype(np.float)
return bboxes, bw | [
"[email protected]"
] | |
677777ac7e27e0ebebf55cb1e8df3142b5de111f | f5dd918e0b98bfb72def6c6fc5d903d07f56a6ab | /3/task3.2_.py | cfaaa983b3a837f0f7d3d176ae0e56e287041a21 | [] | no_license | kuzovkov/python_labs | c0c250a6a514202d798ee4176321279b87f1c318 | 503c01024461629f18ad9846b5ed9f57a7f74980 | refs/heads/master | 2021-01-11T03:41:17.510860 | 2016-10-19T21:17:02 | 2016-10-19T21:17:02 | 71,400,114 | 0 | 0 | null | null | null | null | WINDOWS-1251 | Python | false | false | 1,455 | py | #Кузовков Александр Владимирович
import time
import random
n1=range(10000)
n2=range(12000)
list1=[]
for i in n1:
list1.append(random.randrange(1,50,1))
list2=[]
for i in n2:
list2.append(random.randrange(1,50,1))
cor1=tuple(list1)
cor2=tuple(list2)
#1
print "1 -for"
#print list1
#print list2
start=time.clock()
for item2 in list2:
flag=False
for item1 in list1:
if item1==item2:
flag=True
if flag ==False:
list1.append(item2)
end=time.clock()
#print list1
print 'Time: %s'%(end-start)
#2
print "2 - in"
list1=list(cor1)
#print list1
#print list2
start=time.clock()
for item2 in list2:
if item2 not in list1:
list1.append(item2)
end=time.clock()
#print list1
print 'Time: %s'%(end-start)
#3
print "3 - dict"
list1=list(cor1)
#print list1
#print list2
d1=dict(zip(list1,range(len(list1))))
start=time.clock()
for item2 in list2:
if not d1.has_key(item2):
list1.append(item2)
end=time.clock()
#print list1
print 'Time: %s'%(end-start)
#4
print "4 - set"
list1=list(cor1)
#print list1
#print list2
set1=set(list1)
start=time.clock()
for item2 in list2:
if item2 not in set1:
list1.append(item2)
end=time.clock()
#print list1
print 'Time: %s'%(end-start)
| [
"[email protected]"
] | |
28275d76fc7068091da1f1482abe349efcc37d25 | fa7c302f7df6b1773b27de3b742d551bd54aa4e2 | /test/test_full_project.py | e13c489a7f2a9a9010537ebe73affc9fb3c1d668 | [] | no_license | cons3rt/cons3rt-python-sdk | d01b3b174c295491130fba0d76d046b16492e9f7 | f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0 | refs/heads/master | 2021-11-04T02:31:54.485541 | 2021-10-26T19:28:57 | 2021-10-26T19:28:57 | 241,673,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | # coding: utf-8
"""
CONS3RT Web API
A CONS3RT ReSTful API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.full_project import FullProject # noqa: E501
from openapi_client.rest import ApiException
class TestFullProject(unittest.TestCase):
"""FullProject unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFullProject(self):
"""Test FullProject"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.full_project.FullProject() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
cccda1d3bd2bf6bddebd293ef21833236a18e6eb | 28c80b58099d467e1f54f798e91cd8d495de4a1c | /Hunter_1_1.py | 1f9ef72e245d2449592068b4ad01b9570b5a296f | [] | no_license | Raagini539/programs | a01a17bcf5bec2ae5717beb877f1f7f55d6520f2 | 62767573a21ffd8e8d697eca42685e6fc2a96e0a | refs/heads/master | 2020-04-15T22:12:05.095611 | 2019-06-12T13:33:43 | 2019-06-12T13:33:43 | 165,063,410 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | #raagini
n=int(input())
n1=list(map(int,input().split()))
b=[]
l=[0]*10
for i in range(0,len(n1)):
l[n1[i]]+=1
a=max(l)
for i in range(0,len(l)):
if a==l[i]:
b.append(i)
if b==n1:
print("unique")
else:
for i in range(0,len(b)):
if i==len(b)-1:
print(b[i])
else:
print(b[i],end=' ')
| [
"[email protected]"
] | |
c9062b511a27050875abdd2ce5566e8cb651ffd5 | bc441bb06b8948288f110af63feda4e798f30225 | /ucpro_sdk/model/tool/extra_info_pb2.py | 2aa19a3002490b7f1718426fdc634f5975076749 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 17,904 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: extra_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from ucpro_sdk.model.tool import callback_pb2 as ucpro__sdk_dot_model_dot_tool_dot_callback__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='extra_info.proto',
package='tool',
syntax='proto3',
serialized_options=_b('Z>go.easyops.local/contracts/protorepo-models/easyops/model/tool'),
serialized_pb=_b('\n\x10\x65xtra_info.proto\x12\x04tool\x1a#ucpro_sdk/model/tool/callback.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xf2\x05\n\tExtraInfo\x12\x10\n\x08toolName\x18\x01 \x01(\t\x12\x10\n\x08\x65xecMode\x18\x02 \x01(\t\x12\x0f\n\x07outputs\x18\x03 \x03(\t\x12\x0e\n\x06origin\x18\x04 \x01(\t\x12\x0f\n\x07trigger\x18\x05 \x01(\t\x12&\n\x06\x64\x65tail\x18\x06 \x01(\x0b\x32\x16.tool.ExtraInfo.Detail\x12\x0e\n\x06toolId\x18\x07 \x01(\t\x12\x10\n\x08\x65xecUser\x18\x08 \x01(\t\x12\'\n\x06inputs\x18\t \x01(\x0b\x32\x17.google.protobuf.Struct\x1a\x9b\x04\n\x06\x44\x65tail\x12 \n\x08\x63\x61llback\x18\x01 \x01(\x0b\x32\x0e.tool.Callback\x12+\n\x0btoolOutputs\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value\x12(\n\x07toolEnv\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x35\n\noutputDefs\x18\x04 \x03(\x0b\x32!.tool.ExtraInfo.Detail.OutputDefs\x12\x33\n\ttableDefs\x18\x05 \x03(\x0b\x32 .tool.ExtraInfo.Detail.TableDefs\x12\x13\n\x0bsubscribers\x18\x06 \x03(\t\x1a&\n\nOutputDefs\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x1a\xee\x01\n\tTableDefs\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12?\n\ndimensions\x18\x03 \x03(\x0b\x32+.tool.ExtraInfo.Detail.TableDefs.Dimensions\x12\x39\n\x07\x63olumns\x18\x04 \x03(\x0b\x32(.tool.ExtraInfo.Detail.TableDefs.Columns\x1a&\n\nDimensions\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x1a#\n\x07\x43olumns\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\tB@Z>go.easyops.local/contracts/protorepo-models/easyops/model/toolb\x06proto3')
,
dependencies=[ucpro__sdk_dot_model_dot_tool_dot_callback__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_EXTRAINFO_DETAIL_OUTPUTDEFS = _descriptor.Descriptor(
name='OutputDefs',
full_name='tool.ExtraInfo.Detail.OutputDefs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tool.ExtraInfo.Detail.OutputDefs.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='tool.ExtraInfo.Detail.OutputDefs.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=569,
serialized_end=607,
)
_EXTRAINFO_DETAIL_TABLEDEFS_DIMENSIONS = _descriptor.Descriptor(
name='Dimensions',
full_name='tool.ExtraInfo.Detail.TableDefs.Dimensions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tool.ExtraInfo.Detail.TableDefs.Dimensions.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='tool.ExtraInfo.Detail.TableDefs.Dimensions.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=773,
serialized_end=811,
)
_EXTRAINFO_DETAIL_TABLEDEFS_COLUMNS = _descriptor.Descriptor(
name='Columns',
full_name='tool.ExtraInfo.Detail.TableDefs.Columns',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tool.ExtraInfo.Detail.TableDefs.Columns.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='tool.ExtraInfo.Detail.TableDefs.Columns.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=813,
serialized_end=848,
)
_EXTRAINFO_DETAIL_TABLEDEFS = _descriptor.Descriptor(
name='TableDefs',
full_name='tool.ExtraInfo.Detail.TableDefs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tool.ExtraInfo.Detail.TableDefs.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='tool.ExtraInfo.Detail.TableDefs.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dimensions', full_name='tool.ExtraInfo.Detail.TableDefs.dimensions', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='columns', full_name='tool.ExtraInfo.Detail.TableDefs.columns', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_EXTRAINFO_DETAIL_TABLEDEFS_DIMENSIONS, _EXTRAINFO_DETAIL_TABLEDEFS_COLUMNS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=610,
serialized_end=848,
)
_EXTRAINFO_DETAIL = _descriptor.Descriptor(
name='Detail',
full_name='tool.ExtraInfo.Detail',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='callback', full_name='tool.ExtraInfo.Detail.callback', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='toolOutputs', full_name='tool.ExtraInfo.Detail.toolOutputs', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='toolEnv', full_name='tool.ExtraInfo.Detail.toolEnv', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outputDefs', full_name='tool.ExtraInfo.Detail.outputDefs', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tableDefs', full_name='tool.ExtraInfo.Detail.tableDefs', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subscribers', full_name='tool.ExtraInfo.Detail.subscribers', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_EXTRAINFO_DETAIL_OUTPUTDEFS, _EXTRAINFO_DETAIL_TABLEDEFS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=309,
serialized_end=848,
)
_EXTRAINFO = _descriptor.Descriptor(
name='ExtraInfo',
full_name='tool.ExtraInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='toolName', full_name='tool.ExtraInfo.toolName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='execMode', full_name='tool.ExtraInfo.execMode', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outputs', full_name='tool.ExtraInfo.outputs', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='origin', full_name='tool.ExtraInfo.origin', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trigger', full_name='tool.ExtraInfo.trigger', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='detail', full_name='tool.ExtraInfo.detail', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='toolId', full_name='tool.ExtraInfo.toolId', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='execUser', full_name='tool.ExtraInfo.execUser', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inputs', full_name='tool.ExtraInfo.inputs', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_EXTRAINFO_DETAIL, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=94,
serialized_end=848,
)
_EXTRAINFO_DETAIL_OUTPUTDEFS.containing_type = _EXTRAINFO_DETAIL
_EXTRAINFO_DETAIL_TABLEDEFS_DIMENSIONS.containing_type = _EXTRAINFO_DETAIL_TABLEDEFS
_EXTRAINFO_DETAIL_TABLEDEFS_COLUMNS.containing_type = _EXTRAINFO_DETAIL_TABLEDEFS
_EXTRAINFO_DETAIL_TABLEDEFS.fields_by_name['dimensions'].message_type = _EXTRAINFO_DETAIL_TABLEDEFS_DIMENSIONS
_EXTRAINFO_DETAIL_TABLEDEFS.fields_by_name['columns'].message_type = _EXTRAINFO_DETAIL_TABLEDEFS_COLUMNS
_EXTRAINFO_DETAIL_TABLEDEFS.containing_type = _EXTRAINFO_DETAIL
_EXTRAINFO_DETAIL.fields_by_name['callback'].message_type = ucpro__sdk_dot_model_dot_tool_dot_callback__pb2._CALLBACK
_EXTRAINFO_DETAIL.fields_by_name['toolOutputs'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_EXTRAINFO_DETAIL.fields_by_name['toolEnv'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_EXTRAINFO_DETAIL.fields_by_name['outputDefs'].message_type = _EXTRAINFO_DETAIL_OUTPUTDEFS
_EXTRAINFO_DETAIL.fields_by_name['tableDefs'].message_type = _EXTRAINFO_DETAIL_TABLEDEFS
_EXTRAINFO_DETAIL.containing_type = _EXTRAINFO
_EXTRAINFO.fields_by_name['detail'].message_type = _EXTRAINFO_DETAIL
_EXTRAINFO.fields_by_name['inputs'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
DESCRIPTOR.message_types_by_name['ExtraInfo'] = _EXTRAINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ExtraInfo = _reflection.GeneratedProtocolMessageType('ExtraInfo', (_message.Message,), {
'Detail' : _reflection.GeneratedProtocolMessageType('Detail', (_message.Message,), {
'OutputDefs' : _reflection.GeneratedProtocolMessageType('OutputDefs', (_message.Message,), {
'DESCRIPTOR' : _EXTRAINFO_DETAIL_OUTPUTDEFS,
'__module__' : 'extra_info_pb2'
# @@protoc_insertion_point(class_scope:tool.ExtraInfo.Detail.OutputDefs)
})
,
'TableDefs' : _reflection.GeneratedProtocolMessageType('TableDefs', (_message.Message,), {
'Dimensions' : _reflection.GeneratedProtocolMessageType('Dimensions', (_message.Message,), {
'DESCRIPTOR' : _EXTRAINFO_DETAIL_TABLEDEFS_DIMENSIONS,
'__module__' : 'extra_info_pb2'
# @@protoc_insertion_point(class_scope:tool.ExtraInfo.Detail.TableDefs.Dimensions)
})
,
'Columns' : _reflection.GeneratedProtocolMessageType('Columns', (_message.Message,), {
'DESCRIPTOR' : _EXTRAINFO_DETAIL_TABLEDEFS_COLUMNS,
'__module__' : 'extra_info_pb2'
# @@protoc_insertion_point(class_scope:tool.ExtraInfo.Detail.TableDefs.Columns)
})
,
'DESCRIPTOR' : _EXTRAINFO_DETAIL_TABLEDEFS,
'__module__' : 'extra_info_pb2'
# @@protoc_insertion_point(class_scope:tool.ExtraInfo.Detail.TableDefs)
})
,
'DESCRIPTOR' : _EXTRAINFO_DETAIL,
'__module__' : 'extra_info_pb2'
# @@protoc_insertion_point(class_scope:tool.ExtraInfo.Detail)
})
,
'DESCRIPTOR' : _EXTRAINFO,
'__module__' : 'extra_info_pb2'
# @@protoc_insertion_point(class_scope:tool.ExtraInfo)
})
_sym_db.RegisterMessage(ExtraInfo)
_sym_db.RegisterMessage(ExtraInfo.Detail)
_sym_db.RegisterMessage(ExtraInfo.Detail.OutputDefs)
_sym_db.RegisterMessage(ExtraInfo.Detail.TableDefs)
_sym_db.RegisterMessage(ExtraInfo.Detail.TableDefs.Dimensions)
_sym_db.RegisterMessage(ExtraInfo.Detail.TableDefs.Columns)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
f316f1b6031ef6cf0a93f051abf8f7a09d1dc624 | 4823d075d43af119dd65d7031f7611b269d8fab1 | /servo_example.py | 26e91c918e6761ffbdd05f41f1991eff78d6f518 | [] | no_license | mpdevilleres/quadcopter | 0588039659ca4b1c9b282ee1d02f044709608712 | 8c91e722b08be6f85a6ef6d5f2a7d58ed4cef828 | refs/heads/master | 2021-01-10T06:11:46.903986 | 2015-12-02T14:19:07 | 2015-12-02T14:19:07 | 47,264,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | #!/usr/bin/python
from pcA9685 import PWM
import time
# ===========================================================================
# Example Code
# ===========================================================================
# Initialise the PWM device using the default address
pwm = PWM(0x40)
# Note if you'd like more debug output you can instead run:
#pwm = PWM(0x40, debug=True)
servoMin = 150 # Min pulse length out of 4096
servoMax = 600 # Max pulse length out of 4096
def setServoPulse(channel, pulse):
pulseLength = 1000000 # 1,000,000 us per second
pulseLength /= 60 # 60 Hz
print "%d us per period" % pulseLength
pulseLength /= 4096 # 12 bits of resolution
print "%d us per bit" % pulseLength
pulse *= 1000
pulse /= pulseLength
pwm.setPWM(channel, 0, pulse)
pwm.setPWMFreq(60) # Set frequency to 60 Hz
while (True):
# Change speed of continuous servo on channel O
pwm.setPWM(0, 0, servoMin)
time.sleep(1)
pwm.setPWM(0, 0, servoMax)
time.sleep(1)
| [
"[email protected]"
] | |
251789d10b58bdaddd1fa7f2b3837d547323ace7 | 6879a8596df6f302c63966a2d27f6b4d11cc9b29 | /abc/problems070/062/a.py | 385a7ee280b6c28a6aea6253a7839fbc1054c3aa | [] | no_license | wkwkgg/atcoder | 41b1e02b88bf7a8291b709306e54cb56cb93e52a | 28a7d4084a4100236510c05a88e50aa0403ac7cd | refs/heads/master | 2020-07-26T03:47:19.460049 | 2020-03-01T18:29:57 | 2020-03-01T18:29:57 | 208,523,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | x, y = map(int, input().split())
A = [1,3,5,7,8,10,12]
B = [4,6,9,11]
ans = "No"
if x in A and y in A:
ans = "Yes"
elif x in B and y in B:
ans = "Yes"
print(ans) | [
"[email protected]"
] | |
dd6bcf1ef05674887d1083e99174ba463f169eb5 | f3b233e5053e28fa95c549017bd75a30456eb50c | /mcl1_input/L28/28-47_MD_NVT_rerun/set_1ns_equi_1_m.py | af38002c6274aeca880c96a0e6b883ada530dcf5 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | import os
dir = '/mnt/scratch/songlin3/run/mcl1/L28/MD_NVT_rerun/ti_one-step/28_47/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_1_m.in'
temp_pbs = filesdir + 'temp_1ns_equi_1_m.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_1_m.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi_1_m.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../28-47_merged.prmtop .")
os.system("cp ../0.5_equi_0_3.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.