blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb9254defb1dc4344f4dc02075844f7a20a3bc07 | 69729ce2a0d2147b7b52e14008d8fc9960e3c099 | /fast_rl/core/metrics.py | 3233e2fc611672b7ffd218e9a04ec813f65eb1a7 | [
"Apache-2.0"
] | permissive | swelchm/fast-reinforcement-learning | 2f5d5aa51830f774ca0e6814833a736029e88f4d | 9649b6d1bb931c4e4b7200a73b172325a1d8346f | refs/heads/master | 2020-07-29T16:04:10.926035 | 2019-09-18T02:43:50 | 2019-09-18T02:43:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | import torch
from fastai.basic_train import LearnerCallback
from fastai.callback import Callback, is_listy, add_metrics
class EpsilonMetric(LearnerCallback):
_order = -20 # Needs to run before the recorder
def __init__(self, learn):
super().__init__(learn)
self.epsilon = 0
if not hasattr(self.learn.model, 'exploration_strategy'):
raise ValueError('Your model is not using an exploration strategy! Please use epsilon based exploration')
if not hasattr(self.learn.model.exploration_strategy, 'epsilon'):
raise ValueError('Please use epsilon based exploration (should have an epsilon field)')
def on_train_begin(self, **kwargs):
self.learn.recorder.add_metric_names(['epsilon'])
def on_epoch_end(self, last_metrics, **kwargs):
self.epsilon = self.learn.model.exploration_strategy.epsilon
return add_metrics(last_metrics, [self.epsilon]) | [
"[email protected]"
] | |
fb4eb2d3fc2b6b557ef5f486e64a77a51611a0bc | 87a6d7e83a25cb3b1696fb6094fda88858754c19 | /src/review/views.py | a2a5efdfd814e12518e46fac60f1fd21ab2a9492 | [
"BSD-3-Clause"
] | permissive | tegarty/socialrating | 20b45f8eb233fed0b69ae0fd8110cf8a73f1f782 | b80888ee8e637bd0a5517614c78235d563fead2e | refs/heads/master | 2020-04-20T08:42:52.231718 | 2018-12-06T17:57:43 | 2018-12-06T17:57:43 | 168,747,496 | 1 | 0 | BSD-3-Clause | 2019-02-01T19:11:19 | 2019-02-01T19:11:19 | null | UTF-8 | Python | false | false | 4,163 | py | from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django import forms
from django.shortcuts import redirect, reverse
from item.mixins import ItemViewMixin
from rating.models import Vote
from team.mixins import TeamViewMixin
from context.models import Context
from .models import Review
class ReviewListView(ItemViewMixin, ListView):
model = Review
paginate_by = 100
template_name = 'review_list.html'
def get_queryset(self):
return super().get_queryset().filter(item=self.item)
class ReviewCreateView(TeamViewMixin, ItemViewMixin, CreateView):
model = Review
template_name = 'review_form.html'
fields = ['headline', 'body', 'context']
def get_context_data(self):
"""
Add Item to the context
"""
context = super().get_context_data()
context['item'] = self.item
return context
def get_form(self, form_class=None):
"""
Add ratings to the form and set initial Context
QuerySet
"""
form = super().get_form(form_class)
for rating in self.item.category.ratings.all():
choices = []
for choice in range(1, rating.max_rating+1):
choices.append((choice, choice))
form.fields["%s_vote" % rating.slug] = forms.TypedChoiceField(
choices=choices,
coerce=int,
widget=forms.widgets.RadioSelect,
required=False,
label='%s: Please vote between 1-%s' % (rating.name, rating.max_rating),
)
form.fields["%s_comment" % rating.slug] = forms.CharField(
label='%s: A short comment for the Vote above' % rating.name,
required=False,
)
form.fields['context'].queryset = Context.objects.filter(team=self.team)
return form
def form_valid(self, form):
"""
First save the new Review,
then save any Votes, Attachments and Tags.
"""
review = form.save(commit=False)
review.item = self.item
review.actor = self.request.user.actor
review.save()
# loop over ratings available for this item,
# saving a new Vote for each as needed
for rating in self.item.category.ratings.all():
votefield = "%s_vote" % rating.slug
commentfield = "%s_comment" % rating.slug
if votefield in form.fields and form.cleaned_data[votefield]:
Vote.objects.create(
review=review,
rating=rating,
vote=form.cleaned_data[votefield],
comment=form.cleaned_data[commentfield] if commentfield in form.cleaned_data else '',
)
return redirect(reverse(
'team:category:item:review:detail',
kwargs={
'team_slug': self.team.slug,
'category_slug': self.item.category.slug,
'item_slug': self.item.slug,
'review_uuid': review.pk
}
))
class ReviewDetailView(ItemViewMixin, DetailView):
model = Review
template_name = 'review_detail.html'
pk_url_kwarg = 'review_uuid'
class ReviewUpdateView(ItemViewMixin, UpdateView):
model = Review
template_name = 'review_form.html'
pk_url_kwarg = 'review_uuid'
fields = ['headline', 'body', 'context']
class ReviewDeleteView(ItemViewMixin, DeleteView):
model = Review
template_name = 'review_delete.html'
pk_url_kwarg = 'review_uuid'
def delete(self, request, *args, **kwargs):
messages.success(self.request, "Review %s has been deleted, along with all Votes that related to it." % self.get_object())
return super().delete(request, *args, **kwargs)
def get_success_url(self):
return(reverse('team:category:item:detail', kwargs={
'camp_slug': self.camp.slug,
'category_slug': self.category.slug,
'item_slug': self.item.slug,
}))
| [
"[email protected]"
] | |
1d01b17589e954f3dd2578ee3bc07e5bbed380dc | ff99c677aba11e27c252f773b52cd54f5de79279 | /ctt-server/openapi_server/models/project.py | 0220631032783c2d8b7da9e44e5e0a94cbfdbdab | [
"Apache-2.0"
] | permissive | radon-h2020/radon-ctt | b7eeb82f59e36e2a258d0a2ba9cd9483eb3dd247 | 97fcf5e800a0129d24e119b430d94f07ca248ba9 | refs/heads/master | 2023-01-04T23:44:49.611599 | 2021-09-15T15:34:41 | 2021-09-15T15:34:41 | 235,379,642 | 0 | 7 | Apache-2.0 | 2022-12-27T15:56:38 | 2020-01-21T15:48:45 | Python | UTF-8 | Python | false | false | 2,758 | py | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class Project(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, uuid=None, name=None, repository_url=None): # noqa: E501
"""Project - a model defined in OpenAPI
:param uuid: The uuid of this Project. # noqa: E501
:type uuid: str
:param name: The name of this Project. # noqa: E501
:type name: str
:param repository_url: The repository_url of this Project. # noqa: E501
:type repository_url: str
"""
self.openapi_types = {
'uuid': str,
'name': str,
'repository_url': str
}
self.attribute_map = {
'uuid': 'uuid',
'name': 'name',
'repository_url': 'repository_url'
}
self._uuid = uuid
self._name = name
self._repository_url = repository_url
@classmethod
def from_dict(cls, dikt) -> 'Project':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Project of this Project. # noqa: E501
:rtype: Project
"""
return util.deserialize_model(dikt, cls)
@property
def uuid(self):
"""Gets the uuid of this Project.
:return: The uuid of this Project.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this Project.
:param uuid: The uuid of this Project.
:type uuid: str
"""
self._uuid = uuid
@property
def name(self):
"""Gets the name of this Project.
:return: The name of this Project.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Project.
:param name: The name of this Project.
:type name: str
"""
self._name = name
@property
def repository_url(self):
"""Gets the repository_url of this Project.
:return: The repository_url of this Project.
:rtype: str
"""
return self._repository_url
@repository_url.setter
def repository_url(self, repository_url):
"""Sets the repository_url of this Project.
:param repository_url: The repository_url of this Project.
:type repository_url: str
"""
self._repository_url = repository_url
| [
"[email protected]"
] | |
5dc3e5eb54602009e6f8a02450af13bf34566f0c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_workers.py | 925498db6457cf5ab2857092165fbc8709111a52 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.nouns._worker import _WORKER
#calss header
class _WORKERS(_WORKER, ):
def __init__(self,):
_WORKER.__init__(self)
self.name = "WORKERS"
self.specie = 'nouns'
self.basic = "worker"
self.jsondata = {}
| [
"[email protected]"
] | |
ebf9b4a30f7ce8099e5020d7dc4df985c9055dc2 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2138/60585/257214.py | 6a0f930bf2e12114cdb52468e38c6cf2a97dc12e | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | arr=list(map(int,input().strip().split(',')))
k=eval(input())
n=len(arr)
isM=False
for i in range(0,n-1):
j=i+1
temp=arr[i]
while j<n:
temp+=arr[j]
j+=1
if temp%k==0:
isM=True
break
if isM:
break
print(isM) | [
"[email protected]"
] | |
f5053d55e7dfd80861b7d268d991e8ad2d4ff27e | 780a18c55af7a8744b408e1efd4aaf08a0d3a3e7 | /passbook/api/__init__.py | d5deace33843d920f8be361ce93e1885635aed8c | [] | no_license | squidnee/passbook | 86507c6675122f1b67333f55048eb55f3dff664a | 551de76b95049185820a3fc8729fbc126c423994 | refs/heads/master | 2020-03-22T05:27:31.718897 | 2018-07-21T12:30:32 | 2018-07-21T12:30:32 | 139,567,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d | [
"[email protected]"
] | |
f5a0e695f1a50cbc20de0463e14d5f362bb054ee | 6499d0b71b19fd4416bfd74fa9fd88e3d0b0618a | /king_phisher/client/dialogs/exception.py | 70b21f7817f9aa453b632b5a60c493afdd5eccd9 | [
"BSD-3-Clause"
] | permissive | Meatballs1/king-phisher | dfb0a539a2d0455113b40698f7151521774addb1 | a16b1de055260f6f33d8c1fd0765bd06ffb733c2 | refs/heads/master | 2020-05-20T17:55:30.441239 | 2015-10-15T19:21:22 | 2015-10-15T19:21:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,018 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/dialogs/exception.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import platform
import sys
import traceback
from king_phisher import its
from king_phisher import utilities
from king_phisher import version
from king_phisher.client import gui_utilities
from king_phisher.third_party import AdvancedHTTPServer
from gi.repository import Gtk
__all__ = ['ExceptionDialog']
EXCEPTION_DETAILS_TEMPLATE = """
Error Type: {error_type}
Error Details: {error_details}
Error UID: {error_uid}
RPC Error: {rpc_error_details}
King Phisher Version: {king_phisher_version}
Platform Version: {platform_version}
Python Version: {python_version}
Gtk Version: {gtk_version}
{stack_trace}
"""
class ExceptionDialog(gui_utilities.GladeGObject):
"""
Display a dialog which shows an error message for a python exception.
The dialog includes useful details for reporting and debugging the exception
which occurred.
"""
gobject_ids = ('linkbutton_github_issues',)
top_gobject = 'dialog'
def __init__(self, application, exc_info=None, error_uid=None):
"""
:param application: The parent application for this object.
:type application: :py:class:`Gtk.Application`
:param tuple exc_info: The exception information as provided by :py:func:`sys.exc_info`.
:param str error_uid: An optional unique identifier for the exception that can be provided for tracking purposes.
"""
super(ExceptionDialog, self).__init__(application)
self.error_description = self.gtk_builder_get('label_error_description')
self.error_details = self.gtk_builder_get('textview_error_details')
self.exc_info = exc_info or sys.exc_info()
self.error_uid = error_uid
linkbutton = self.gobjects['linkbutton_github_issues']
linkbutton.set_label('Project Issue Tracker')
linkbutton.connect('activate-link', lambda _: utilities.open_uri(linkbutton.get_property('uri')))
def interact(self):
exc_type, exc_value, exc_traceback = self.exc_info
pversion = 'UNKNOWN'
if its.on_linux:
pversion = 'Linux: ' + ' '.join(platform.linux_distribution())
elif its.on_windows:
pversion = 'Windows: ' + ' '.join(platform.win32_ver())
if its.frozen:
pversion += ' (Frozen=True)'
else:
pversion += ' (Frozen=False)'
exc_name = "{0}.{1}".format(exc_type.__module__, exc_type.__name__)
rpc_error_details = 'N/A (Not a remote RPC error)'
if isinstance(exc_value, AdvancedHTTPServer.AdvancedHTTPServerRPCError) and exc_value.is_remote_exception:
rpc_error_details = "Name: {0}".format(exc_value.remote_exception['name'])
if exc_value.remote_exception.get('message'):
rpc_error_details += " Message: '{0}'".format(exc_value.remote_exception['message'])
details = EXCEPTION_DETAILS_TEMPLATE.format(
error_details=repr(exc_value),
error_type=exc_name,
error_uid=(self.error_uid or 'N/A'),
rpc_error_details=rpc_error_details,
king_phisher_version=version.version,
platform_version=pversion,
python_version="{0}.{1}.{2}".format(*sys.version_info),
gtk_version="{0}.{1}.{2}".format(Gtk.get_major_version(), Gtk.get_minor_version(), Gtk.get_micro_version()),
stack_trace=''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
)
details = details.strip() + '\n'
if exc_name.startswith('king_phisher.third_party.'):
exc_name = exc_name[25:]
self.error_description.set_text("Error type: {0}".format(exc_name))
self.error_details.get_buffer().set_text(details)
self.dialog.show_all()
self.dialog.run()
self.dialog.destroy()
return
| [
"[email protected]"
] | |
7d4ad877ef0674f0248f5b402f5ca2ec0fbca0b5 | 83932f1d956a6b7818c6e58a31205e6e26f2fb5c | /0x11-python-network_1/2-post_email.py | ae506265afc23c32cdffd2a0428200f828ddb688 | [] | no_license | Nzparra/holbertonschool-higher_level_programming | a17834b8239e477a7284119acac69da0e7d7261e | 6cf7a44a10db7a10be3c3c02cbacfea9a7b897f2 | refs/heads/master | 2020-09-29T02:45:04.458850 | 2020-05-14T21:12:45 | 2020-05-14T21:12:45 | 226,930,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | #!/usr/bin/python3
""" sends a POST request to the passed URL with the email as a parameter """
from urllib import request, parse
import sys
if __name__ == "__main__":
req = parse.urlencode({'email': sys.argv[2]})
req = req.encode('ascii')
reqst = request.Request(sys.argv[1], req)
with request.urlopen(reqst) as response:
html = response.read()
print(html.decode('utf-8'))
| [
"[email protected]"
] | |
99b9f127259fa1b88da83c73c1b13ae51336a33c | 20a3cc1106fa86fc2d45cd1728cc87d5db97e1f7 | /old/s3c/explosion.py | d214948ae8a509ca9fcf7d2f5cbf3d133373c71a | [] | no_license | sarahboufelja54/galatea | f5664f0b3117629b2c5bbe078a1bd52bb5e359e6 | 002a9f2905868be25b71770190fb2d5eda11c861 | refs/heads/master | 2020-12-04T13:45:07.697189 | 2018-12-12T16:27:09 | 2018-12-12T16:27:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | #attempting to create an excitation-based explosion (I don't think it's possible)
hat_h = [1., 1.]
alpha = [.01 , .01]
W = [ -1., 1. ]
beta = 1
w = [ beta * (weight ** 2) for weight in W ]
init_hat_s = [ 1., 1.5 ]
hat_s = [ val for val in init_hat_s ]
#like mu in our current model, except that it isn't gated by h
always_on_mu = [ 0., 0. ]
v = 1
def update():
rval = []
for i in xrange(2):
scaleback = alpha[i] + w[i]
mean_term = always_on_mu[i]
data_term = beta * v * W[i]
j = 1 - i
interaction_term = - W[i] * W[j] * beta * hat_h[j] * hat_s[j]
hat_s_i = (mean_term + data_term + interaction_term) / scaleback
rval.append(hat_s_i)
return rval
for iter in xrange(100):
print hat_s
hat_s = update()
| [
"[email protected]"
] | |
6b8caec8ea96cb715c6ece897689627a1d8ec789 | eb5c7ddab43fbb24b91c29a4560749a2386490fb | /tensorflow/contrib/learn/python/learn/estimators/head_test.py | 99bdcc7d8fe281c928257d70ff31d4db8ac7f386 | [
"Apache-2.0"
] | permissive | ivankreso/tensorflow | 8fb3f599564a6afe8c054bf05ea5a63aa8f4ec3d | 38d8238edb041b9fbf3b2762b09df450937a5b40 | refs/heads/master | 2021-01-21T16:27:58.859552 | 2017-01-31T10:33:22 | 2017-01-31T10:33:22 | 80,117,846 | 0 | 1 | null | 2017-01-26T13:42:27 | 2017-01-26T13:42:27 | null | UTF-8 | Python | false | false | 41,317 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
# pylint: disable=g-bad-todo,g-import-not-at-top
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
import six
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# pylint: enable=g-bad-todo,g-import-not-at-top
def _assert_variables(test_case,
expected_global=None,
expected_model=None,
expected_trainable=None):
test_case.assertItemsEqual(
tuple([] if expected_global is None else expected_global),
tuple([k.name for k in variables.global_variables()]))
test_case.assertItemsEqual(
tuple([] if expected_model is None else expected_model),
tuple([k.name for k in variables.model_variables()]))
test_case.assertItemsEqual(
tuple([] if expected_trainable is None else expected_trainable),
tuple([k.name for k in variables.trainable_variables()]))
def _assert_no_variables(test_case):
_assert_variables(test_case)
# This must be called from within a tf.Session.
def _assert_metrics(test_case, expected_loss, expected_eval_metrics,
model_fn_ops):
test_case.assertAlmostEqual(expected_loss, model_fn_ops.loss.eval(), places=4)
for k in six.iterkeys(expected_eval_metrics):
test_case.assertIn(k, six.iterkeys(model_fn_ops.eval_metric_ops))
variables.initialize_local_variables().run()
for key, expected_value in six.iteritems(expected_eval_metrics):
value_tensor, update_tensor = model_fn_ops.eval_metric_ops[key]
update = update_tensor.eval()
test_case.assertAlmostEqual(
expected_value,
update,
places=4,
msg="%s: update, expected %s, got %s." % (key, expected_value, update))
value = value_tensor.eval()
test_case.assertAlmostEqual(
expected_value,
value,
places=4,
msg="%s: value, expected %s, got %s." % (key, expected_value, value))
# This must be called from within a tf.Session.
def _assert_summary_tags(test_case, expected_tags=None):
actual_tags = []
for summary_op in ops.get_collection(ops.GraphKeys.SUMMARIES):
summ = summary_pb2.Summary()
summ.ParseFromString(summary_op.eval())
actual_tags.append(summ.value[0].tag)
test_case.assertItemsEqual(expected_tags or [], actual_tags)
def _sigmoid(x):
return 1. / (1. + math.exp(-1 * x))
class RegressionModelHeadTest(test.TestCase):
# TODO(zakaria): test multilabel regression.
def testRegressionWithLogits(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1.,), (1.,), (3.,)))
_assert_summary_tags(self, ["loss"])
_assert_no_variables(self)
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithInvalidLogits(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1., 1.), (1., 1.), (3., 1.)))
def testRegressionWithLogitsInput(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits_input=((0., 0.), (0., 0.), (0., 0.)))
w = ("logits/weights:0", "logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 2. / 3, {"loss": 2. / 3}, model_fn_ops)
def testRegressionWithLogitsAndLogitsInput(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits_input=((0., 0.), (0., 0.), (0., 0.)),
logits=((1.,), (1.,), (3.,)))
def testRegressionEvalMode(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((1.,), (1.,), (3.,)),
mode=model_fn.ModeKeys.EVAL,
train_op_fn=_noop_train_op,
logits=((0.,), (1.,), (1.,)))
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithLabelName(self):
label_name = "my_label"
head = head_lib._regression_head(label_name=label_name)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels={label_name: ((0.,), (1.,), (1.,))},
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1.,), (1.,), (3.,)))
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithWeights(self):
head = head_lib._regression_head(weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
weights = ((2.,), (5.,), (0.,))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weights},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1.,), (1.,), (3.,)))
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 2. / len(weights), {"loss": 2. / np.sum(weights)},
model_fn_ops)
def testRegressionWithCenteredBias(self):
head = head_lib._regression_head(enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1.,), (1.,), (3.,)))
_assert_variables(
self,
expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",),
expected_trainable=("centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss", "centered_bias/bias_0"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionErrorInSparseTensorLabels(self):
head = head_lib._regression_head()
with ops.Graph().as_default():
labels = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (2, 0)),
values=(0., 1., 1.),
dense_shape=(3, 1))
with self.assertRaisesRegexp(ValueError,
"SparseTensor is not supported as labels."):
head.create_model_fn_ops(
{},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1.,), (1.,), (3.,)))
class MultiLabelModelHeadTest(test.TestCase):
def setUp(self):
self._logits = ((1., 0., 0.),)
self._labels = ((0, 0, 1),)
def _expected_eval_metrics(self, expected_loss):
return {
"accuracy": 1. / 3,
"auc": 1. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": self._labels[0][0],
"labels/actual_label_mean/class1": self._labels[0][1],
"labels/actual_label_mean/class2": self._labels[0][2],
"labels/logits_mean/class0": self._logits[0][0],
"labels/logits_mean/class1": self._logits[0][1],
"labels/logits_mean/class2": self._logits[0][2],
"labels/prediction_mean/class0": self._logits[0][0],
"labels/prediction_mean/class1": self._logits[0][1],
"labels/prediction_mean/class2": self._logits[0][2],
"labels/probability_mean/class0": _sigmoid(self._logits[0][0]),
"labels/probability_mean/class1": _sigmoid(self._logits[0][1]),
"labels/probability_mean/class2": _sigmoid(self._logits[0][2]),
}
def testMultiLabelWithLogits(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelWithInvalidLogits(self):
head = head_lib._multi_label_head(n_classes=len(self._labels[0]) + 1)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits=self._logits)
def testMultiLabelWithLogitsInput(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits_input=((0., 0.),))
w = ("logits/weights:0", "logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = .69314718
_assert_metrics(self, expected_loss, {
"accuracy": 2. / 3,
"auc": 2. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": self._labels[0][0],
"labels/actual_label_mean/class1": self._labels[0][1],
"labels/actual_label_mean/class2": self._labels[0][2],
"labels/logits_mean/class0": 0.,
"labels/logits_mean/class1": 0.,
"labels/logits_mean/class2": 0.,
"labels/prediction_mean/class0": 0.,
"labels/prediction_mean/class1": 0.,
"labels/prediction_mean/class2": 0.,
"labels/probability_mean/class0": .5,
"labels/probability_mean/class1": .5,
"labels/probability_mean/class2": .5,
}, model_fn_ops)
def testMultiLabelWithLogitsAndLogitsInput(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits_input=((0., 0.),), logits=self._logits)
def testMultiLabelEvalMode(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.EVAL, _noop_train_op,
logits=self._logits)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelWithLabelName(self):
n_classes = 3
label_name = "my_label"
head = head_lib._multi_label_head(
n_classes=n_classes,
label_name=label_name,
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, {label_name: self._labels}, model_fn.ModeKeys.TRAIN,
_noop_train_op, logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelWithWeight(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": .1},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, .089985214,
self._expected_eval_metrics(2.69956), model_fn_ops)
def testMultiLabelWithCenteredBias(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes,
enable_centered_bias=True,
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits=self._logits)
_assert_variables(
self,
expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",),
expected_trainable=("centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, (
"loss", "centered_bias/bias_0", "centered_bias/bias_1",
"centered_bias/bias_2"
))
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
class BinaryClassificationModelHeadTest(test.TestCase):
def setUp(self):
self._logits = ((1.,), (1.,))
self._labels = ((1.,), (0.,))
def _expected_eval_metrics(self, expected_loss):
label_mean = np.mean(self._labels)
return {
"accuracy": 1. / 2,
"accuracy/baseline_label_mean": label_mean,
"accuracy/threshold_0.500000_mean": 1. / 2,
"auc": 1. / 2,
"labels/actual_label_mean": label_mean,
"labels/prediction_mean": .731059, # softmax
"loss": expected_loss,
"precision/positive_threshold_0.500000_mean": 1. / 2,
"recall/positive_threshold_0.500000_mean": 1. / 1,
}
def testBinaryClassificationWithLogits(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationWithInvalidLogits(self):
head = head_lib._multi_class_head(n_classes=len(self._labels) + 1)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits=self._logits)
def testBinaryClassificationWithLogitsInput(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits_input=((0., 0.), (0., 0.)))
w = ("logits/weights:0", "logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = .69314718
label_mean = np.mean(self._labels)
_assert_metrics(self, expected_loss, {
"accuracy": 1. / 2,
"accuracy/baseline_label_mean": label_mean,
"accuracy/threshold_0.500000_mean": 1. / 2,
"auc": 1. / 2,
"labels/actual_label_mean": label_mean,
"labels/prediction_mean": .5, # softmax
"loss": expected_loss,
"precision/positive_threshold_0.500000_mean": 0. / 2,
"recall/positive_threshold_0.500000_mean": 0. / 1,
}, model_fn_ops)
def testBinaryClassificationWithLogitsAndLogitsInput(self):
head = head_lib._multi_class_head(n_classes=2)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits_input=((0., 0.), (0., 0.)), logits=self._logits)
def testBinaryClassificationEvalMode(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.EVAL, _noop_train_op,
logits=self._logits)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationInferMode(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.INFER, _noop_train_op,
logits=self._logits)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
self.assertEquals(1, len(model_fn_ops.output_alternatives))
self.assertEquals(constants.ProblemType.LOGISTIC_REGRESSION,
model_fn_ops.output_alternatives[None][0])
def testErrorInSparseTensorLabels(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default():
labels = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 1),
dense_shape=(3, 1))
with self.assertRaisesRegexp(ValueError,
"SparseTensor is not supported as labels."):
head.create_model_fn_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=((1.,), (1.,), (3.,)))
def testBinaryClassificationWithLabelName(self):
label_name = "my_label"
head = head_lib._multi_class_head(n_classes=2, label_name=label_name)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{},
labels={label_name: self._labels},
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationWithWeights(self):
n_classes = 2
head = head_lib._multi_class_head(
n_classes=n_classes, weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
weights = ((1.,), (0.,))
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weights},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_total_loss = .31326166
_assert_metrics(
self,
expected_total_loss / len(weights),
{
"accuracy": 1. / 1,
"accuracy/baseline_label_mean": 1. / 1,
"accuracy/threshold_0.500000_mean": 1. / 1,
"auc": 0. / 1,
"labels/actual_label_mean": 1. / 1,
"labels/prediction_mean": .731059, # softmax
# TODO(ptucker): Is this the correct eval loss, sum not average?
"loss": expected_total_loss,
"precision/positive_threshold_0.500000_mean": 1. / 1,
"recall/positive_threshold_0.500000_mean": 1. / 1,
},
model_fn_ops)
def testBinaryClassificationWithCenteredBias(self):
head = head_lib._multi_class_head(n_classes=2, enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits=self._logits)
_assert_variables(
self,
expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",),
expected_trainable=("centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss", "centered_bias/bias_0"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
class MultiClassModelHeadTest(test.TestCase):
def setUp(self):
self._logits = ((1., 0., 0.),)
self._labels = (2,)
def _expected_eval_metrics(self, expected_loss):
return {
"accuracy": 0.,
"auc": 1. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": 0. / 1,
"labels/actual_label_mean/class1": 0. / 1,
"labels/actual_label_mean/class2": 1. / 1,
"labels/logits_mean/class0": self._logits[0][0],
"labels/logits_mean/class1": self._logits[0][1],
"labels/logits_mean/class2": self._logits[0][2],
"labels/prediction_mean/class0": self._logits[0][0],
"labels/prediction_mean/class1": self._logits[0][1],
"labels/prediction_mean/class2": self._logits[0][2],
"labels/probability_mean/class0": 0.576117, # softmax
"labels/probability_mean/class1": 0.211942, # softmax
"labels/probability_mean/class2": 0.211942, # softmax
}
def testMultiClassWithLogits(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514446
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassWithInvalidLogits(self):
head = head_lib._multi_class_head(n_classes=len(self._logits[0]) + 1)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits=self._logits)
def testMultiClassWithLogitsInput(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits_input=((0., 0.),))
w = ("logits/weights:0", "logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = 1.0986123
_assert_metrics(self, expected_loss, {
"accuracy": 0.,
"auc": 2. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": 0. / 1,
"labels/actual_label_mean/class1": 0. / 1,
"labels/actual_label_mean/class2": 1. / 1,
"labels/logits_mean/class0": 0.,
"labels/logits_mean/class1": 0.,
"labels/logits_mean/class2": 0.,
"labels/prediction_mean/class0": 1.,
"labels/prediction_mean/class1": 0.,
"labels/prediction_mean/class2": 0.,
"labels/probability_mean/class0": 0.333333, # softmax
"labels/probability_mean/class1": 0.333333, # softmax
"labels/probability_mean/class2": 0.333333, # softmax
}, model_fn_ops)
def testMultiClassWithLogitsAndLogitsInput(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits_input=((0., 0.),), logits=self._logits)
def testMultiClassEvalMode(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.EVAL, _noop_train_op,
logits=self._logits)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514446
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassWithWeight(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
weight = .1
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weight},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514446
_assert_metrics(self, expected_loss * weight,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testInvalidNClasses(self):
for n_classes in (None, -1, 0, 1):
with self.assertRaisesRegexp(ValueError, "n_classes must be > 1"):
head_lib._multi_class_head(n_classes=n_classes)
class BinarySvmModelHeadTest(test.TestCase):
def setUp(self):
# Prediction for first example is in the right side of the hyperplane
# (i.e., < 0) but it is within the [-1,1] margin. There is a 0.5 loss
# incurred by this example. The 2nd prediction is outside the margin so it
# incurs no loss at all.
self._predictions = ((-.5,), (1.2,))
self._labels = (0, 1)
self._expected_losses = (.5, 0.)
def testBinarySVMWithLogits(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
self._labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=self._predictions)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithInvalidLogits(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
logits=np.ones((2, 2)))
def testBinarySVMWithLogitsInput(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
self._labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits_input=((0., 0.), (0., 0.)))
w = ("logits/weights:0", "logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = 1.
_assert_metrics(self, expected_loss, {
"accuracy": .5,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithLogitsAndLogitsInput(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{},
self._labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits_input=((0., 0.), (0., 0.)),
logits=self._predictions)
def testBinarySVMEvalMode(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
self._labels,
model_fn.ModeKeys.EVAL,
_noop_train_op,
logits=self._predictions)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithLabelName(self):
label_name = "my_label"
head = head_lib._binary_svm_head(label_name=label_name)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
{label_name: self._labels},
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=self._predictions)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithWeights(self):
head = head_lib._binary_svm_head(weight_column_name="weights")
with ops.Graph().as_default(), session.Session():
weights = (7., 11.)
model_fn_ops = head.create_model_fn_ops(
features={"weights": weights},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=self._predictions)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_weighted_sum = np.sum(
np.multiply(weights, self._expected_losses))
_assert_metrics(self, expected_weighted_sum / len(weights), {
"accuracy": 1.,
"loss": expected_weighted_sum / np.sum(weights),
}, model_fn_ops)
def testBinarySVMWithCenteredBias(self):
head = head_lib._binary_svm_head(enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
self._labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=self._predictions)
_assert_variables(
self,
expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",),
expected_trainable=("centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss", "centered_bias/bias_0"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
class MultiHeadTest(test.TestCase):
def testTrain_withNoHeadWeights(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head((head1, head2))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNotNone(model_fn_ops.train_op)
self.assertFalse(model_fn_ops.eval_metric_ops)
self.assertIsNone(model_fn_ops.output_alternatives)
with session.Session() as sess:
self.assertAlmostEqual(2.224, sess.run(model_fn_ops.loss), places=3)
def testTrain_withHeadWeights(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head((head1, head2), (1, .5))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNotNone(model_fn_ops.train_op)
self.assertFalse(model_fn_ops.eval_metric_ops)
self.assertIsNone(model_fn_ops.output_alternatives)
with session.Session() as sess:
self.assertAlmostEqual(1.531, sess.run(model_fn_ops.loss), places=3)
def testInfer(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head((head1, head2), (1, .5))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.INFER,
train_op_fn=_noop_train_op,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNone(model_fn_ops.loss)
self.assertIsNone(model_fn_ops.train_op)
self.assertFalse(model_fn_ops.eval_metric_ops)
self.assertEquals(2, len(model_fn_ops.output_alternatives))
# Tests predictions keys.
pred_keys = model_fn_ops.predictions.keys()
self.assertIn(
("head1", prediction_key.PredictionKey.PROBABILITIES), pred_keys)
self.assertIn(
("head1", prediction_key.PredictionKey.CLASSES), pred_keys)
self.assertIn(
("head2", prediction_key.PredictionKey.PROBABILITIES), pred_keys)
self.assertIn(
("head2", prediction_key.PredictionKey.CLASSES), pred_keys)
# Tests output alternative.
out_alts = model_fn_ops.output_alternatives
self.assertEquals(constants.ProblemType.CLASSIFICATION,
out_alts["head1"][0])
self.assertIn(prediction_key.PredictionKey.PROBABILITIES,
out_alts["head1"][1].keys())
self.assertIn(
prediction_key.PredictionKey.CLASSES, out_alts["head1"][1].keys())
self.assertEquals(constants.ProblemType.CLASSIFICATION,
out_alts["head2"][0])
self.assertIn(prediction_key.PredictionKey.PROBABILITIES,
out_alts["head2"][1].keys())
self.assertIn(
prediction_key.PredictionKey.CLASSES, out_alts["head2"][1].keys())
def testEval(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head((head1, head2), (1, .5))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.EVAL,
train_op_fn=_noop_train_op,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNone(model_fn_ops.train_op)
self.assertIsNotNone(model_fn_ops.eval_metric_ops)
self.assertIsNone(model_fn_ops.output_alternatives)
metric_ops = model_fn_ops.eval_metric_ops
# Tests eval keys.
self.assertIn("accuracy/head1", metric_ops.keys())
self.assertIn("accuracy/head2", metric_ops.keys())
def _noop_train_op(unused_loss):
return control_flow_ops.no_op()
if __name__ == "__main__":
test.main()
| [
"[email protected]"
] | |
42de88eb553c0e4e996822b8763fa6c13507faa7 | e5eeb6d9e7c2d7a53f864f8b9df7ca0cb79932ef | /sa/profiles/Alstec/MSPU/__init__.py | b7d0c1ced7d78897c77243798ea9274c7900e37e | [
"BSD-3-Clause"
] | permissive | 0pt1on/noc | aa583a6684f8299467c665e303f7ffa47ad6b88a | 4eb26dd44002a0a4a562973815567237d979cab5 | refs/heads/master | 2020-06-20T08:51:11.653330 | 2019-07-12T07:13:45 | 2019-07-12T07:13:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Vendor: Alstec
# OS: MSPU
# ---------------------------------------------------------------------
# Copyright (C) 2007-2017 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.profile.base import BaseProfile
class Profile(BaseProfile):
name = "Alstec.MSPU"
pattern_prompt = r"^\S+\$> "
pattern_more = r"^--More-- or \(q\)uit$"
pattern_syntax_error = r"\^ error"
command_exit = "exit"
| [
"[email protected]"
] | |
13a4e0a500494230f0b097836ef8e1748b2c0f01 | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /tools/grit/grit/format/resource_map_unittest.py | ecc997a180675ab38dc887c768b9bb3b395cddb7 | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-unknown",
"MIT"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 12,456 | py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.format.resource_map'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import StringIO
import unittest
from grit import grd_reader
from grit import util
from grit.format import resource_map
class FormatResourceMapUnittest(unittest.TestCase):
def testFormatResourceMap(self):
grd = grd_reader.Parse(StringIO.StringIO(
'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en" current_release="3"
base_dir=".">
<outputs>
<output type="rc_header" filename="the_rc_header.h" />
<output type="resource_map_header"
filename="the_resource_map_header.h" />
</outputs>
<release seq="3">
<structures first_id="300">
<structure type="menu" name="IDC_KLONKMENU"
file="grit\\testdata\\klonk.rc" encoding="utf-16" />
</structures>
<includes first_id="10000">
<include type="foo" file="abc" name="IDS_FIRSTPRESENT" />
<if expr="False">
<include type="foo" file="def" name="IDS_MISSING" />
</if>
<if expr="lang != 'es'">
<include type="foo" file="ghi" name="IDS_LANGUAGESPECIFIC" />
</if>
<if expr="lang == 'es'">
<include type="foo" file="jkl" name="IDS_LANGUAGESPECIFIC" />
</if>
<include type="foo" file="mno" name="IDS_THIRDPRESENT" />
</includes>
</release>
</grit>'''), util.PathFromRoot('.'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_header')(grd, 'en', '.')))
self.assertEqual('''\
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap kTheRcHeader[];
extern const size_t kTheRcHeaderSize;''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDC_KLONKMENU", IDC_KLONKMENU},
{"IDS_FIRSTPRESENT", IDS_FIRSTPRESENT},
{"IDS_MISSING", IDS_MISSING},
{"IDS_LANGUAGESPECIFIC", IDS_LANGUAGESPECIFIC},
{"IDS_THIRDPRESENT", IDS_THIRDPRESENT},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_file_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"grit/testdata/klonk.rc", IDC_KLONKMENU},
{"abc", IDS_FIRSTPRESENT},
{"def", IDS_MISSING},
{"ghi", IDS_LANGUAGESPECIFIC},
{"jkl", IDS_LANGUAGESPECIFIC},
{"mno", IDS_THIRDPRESENT},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
def testFormatResourceMapWithOutputAllEqualsFalseForStructures(self):
grd = grd_reader.Parse(StringIO.StringIO(
'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en" current_release="3"
base_dir="." output_all_resource_defines="false">
<outputs>
<output type="rc_header" filename="the_rc_header.h" />
<output type="resource_map_header"
filename="the_resource_map_header.h" />
<output type="resource_map_source"
filename="the_resource_map_header.cc" />
</outputs>
<release seq="3">
<structures first_id="300">
<structure type="chrome_scaled_image" name="IDR_KLONKMENU"
file="foo.png" />
<if expr="False">
<structure type="chrome_scaled_image" name="IDR_MISSING"
file="bar.png" />
</if>
<if expr="True">
<structure type="chrome_scaled_image" name="IDR_BLOB"
file="blob.png" />
</if>
<if expr="True">
<then>
<structure type="chrome_scaled_image" name="IDR_METEOR"
file="meteor.png" />
</then>
<else>
<structure type="chrome_scaled_image" name="IDR_METEOR"
file="roetem.png" />
</else>
</if>
<if expr="False">
<structure type="chrome_scaled_image" name="IDR_LAST"
file="zyx.png" />
</if>
<if expr="True">
<structure type="chrome_scaled_image" name="IDR_LAST"
file="xyz.png" />
</if>
</structures>
</release>
</grit>'''), util.PathFromRoot('.'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_header')(grd, 'en', '.')))
self.assertEqual('''\
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap kTheRcHeader[];
extern const size_t kTheRcHeaderSize;''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDR_KLONKMENU", IDR_KLONKMENU},
{"IDR_BLOB", IDR_BLOB},
{"IDR_METEOR", IDR_METEOR},
{"IDR_LAST", IDR_LAST},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDR_KLONKMENU", IDR_KLONKMENU},
{"IDR_BLOB", IDR_BLOB},
{"IDR_METEOR", IDR_METEOR},
{"IDR_LAST", IDR_LAST},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
def testFormatResourceMapWithOutputAllEqualsFalseForIncludes(self):
grd = grd_reader.Parse(StringIO.StringIO(
'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en" current_release="3"
base_dir="." output_all_resource_defines="false">
<outputs>
<output type="rc_header" filename="the_rc_header.h" />
<output type="resource_map_header"
filename="the_resource_map_header.h" />
</outputs>
<release seq="3">
<structures first_id="300">
<structure type="menu" name="IDC_KLONKMENU"
file="grit\\testdata\\klonk.rc" encoding="utf-16" />
</structures>
<includes first_id="10000">
<include type="foo" file="abc" name="IDS_FIRSTPRESENT" />
<if expr="False">
<include type="foo" file="def" name="IDS_MISSING" />
</if>
<include type="foo" file="mno" name="IDS_THIRDPRESENT" />
<if expr="True">
<include type="foo" file="blob" name="IDS_BLOB" />
</if>
<if expr="True">
<then>
<include type="foo" file="meteor" name="IDS_METEOR" />
</then>
<else>
<include type="foo" file="roetem" name="IDS_METEOR" />
</else>
</if>
<if expr="False">
<include type="foo" file="zyx" name="IDS_LAST" />
</if>
<if expr="True">
<include type="foo" file="xyz" name="IDS_LAST" />
</if>
</includes>
</release>
</grit>'''), util.PathFromRoot('.'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_header')(grd, 'en', '.')))
self.assertEqual('''\
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap kTheRcHeader[];
extern const size_t kTheRcHeaderSize;''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDC_KLONKMENU", IDC_KLONKMENU},
{"IDS_FIRSTPRESENT", IDS_FIRSTPRESENT},
{"IDS_THIRDPRESENT", IDS_THIRDPRESENT},
{"IDS_BLOB", IDS_BLOB},
{"IDS_METEOR", IDS_METEOR},
{"IDS_LAST", IDS_LAST},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_file_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"grit/testdata/klonk.rc", IDC_KLONKMENU},
{"abc", IDS_FIRSTPRESENT},
{"mno", IDS_THIRDPRESENT},
{"blob", IDS_BLOB},
{"meteor", IDS_METEOR},
{"xyz", IDS_LAST},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
def testFormatStringResourceMap(self):
grd = grd_reader.Parse(StringIO.StringIO(
'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en" current_release="3"
base_dir=".">
<outputs>
<output type="rc_header" filename="the_rc_header.h" />
<output type="resource_map_header" filename="the_rc_map_header.h" />
<output type="resource_map_source" filename="the_rc_map_source.cc" />
</outputs>
<release seq="1" allow_pseudo="false">
<messages fallback_to_english="true">
<message name="IDS_PRODUCT_NAME" desc="The application name">
Application
</message>
<if expr="True">
<message name="IDS_DEFAULT_TAB_TITLE_TITLE_CASE"
desc="In Title Case: The default title in a tab.">
New Tab
</message>
</if>
<if expr="False">
<message name="IDS_DEFAULT_TAB_TITLE"
desc="The default title in a tab.">
New tab
</message>
</if>
</messages>
</release>
</grit>'''), util.PathFromRoot('.'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_header')(grd, 'en', '.')))
self.assertEqual('''\
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap kTheRcHeader[];
extern const size_t kTheRcHeaderSize;''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_rc_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDS_PRODUCT_NAME", IDS_PRODUCT_NAME},
{"IDS_DEFAULT_TAB_TITLE_TITLE_CASE", IDS_DEFAULT_TAB_TITLE_TITLE_CASE},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
5360f0f0d9b911bb3033292064920cc4edcb718e | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Flask/Book_evaluator/venv/Lib/site-packages/urllib3/util/__init__.py | 130a48f4f4e13e706d68fa3f49aa7081eb6997c7 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:e916a7e28015232e340aefe810f5a7355f5bc05e6b5f1e86d43519ee87a18cf6
size 1044
| [
"[email protected]"
] | |
a8fa9c19001403543360c111212013e80ce6d390 | 45799ccc3a16c785ab3c65f3296d66f8463590dc | /docs/_downloads/7dfb273e58ce9eea02e428696e9a9672/q108.py | de3417bd434085db9999cb7596c5a4dfcfe82b2f | [
"MIT"
] | permissive | odys-z/hello | 9d29b7af68ea8c490b43994cf16d75c0e8ace08e | fedd0aec7273f3170aa77316d0d5f317cc18a979 | refs/heads/master | 2023-08-19T03:25:58.684050 | 2023-08-18T08:07:27 | 2023-08-18T08:07:27 | 154,006,292 | 0 | 0 | MIT | 2023-04-18T22:50:56 | 2018-10-21T12:34:12 | C++ | UTF-8 | Python | false | false | 1,496 | py | '''
108. Convert Sorted Array to Binary Search Tree
https://leetcode.com/problems/convert-sorted-array-to-binary-search-tree/
Given an integer array nums where the elements are sorted in ascending
order, convert it to a height-balanced binary search tree.
A height-balanced binary tree is a binary tree in which the depth of
the two subtrees of every node never differs by more than one.
Created on 17 Apr 2021
@author: Odys Zhou
'''
from unittest import TestCase
from typing import List
from utils.treehelper2 import TreeNode, list2tree
# Definition for a binary tree node.
class Solution:
'''
64.58%
'''
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
'''
nums: sorted
'''
l, r = 0, len(nums) - 1
def buildTree(lix, rix) -> 'root':
'''
0, 0 0, 1 0, 2 0, 3 0, 4
0 0 1 1 2
'''
if lix > rix: return None
m = (lix + rix) // 2
root = TreeNode(nums[m])
root.left = buildTree(lix, m-1)
root.right = buildTree(m+1, rix)
return root
return buildTree(l, r)
if __name__ == "__main__":
t = TestCase()
s = Solution()
res =s.sortedArrayToBST([-10, -3, 0, 5, 9]).print()
t.assertTrue( list2tree([0, -3, 9, -10, None, 5]).print() == res or
list2tree([0, -10, 5, None, -3, None, 9]).print() == res)
print('q108 OK!')
| [
"[email protected]"
] | |
72228121b096510616532a4edb9408df229e04ab | 5b9485c4ad9db15ff3e535085092fb45057f7364 | /src/nuxeo/javascript/cpsskins/tests/functional/treeview/browser.py | 6162bef9cd7e0d77dbb40da6b1080e30ac097356 | [
"ZPL-2.1"
] | permissive | nuxeo-cps/zope3--nuxeo.javascript | 06109541949c1e612b232efeddec3aa04ecb7d84 | 3ac03c8c46daf75ae7b3ff2fba308cba8caff245 | refs/heads/main | 2023-01-24T06:54:13.659442 | 2009-12-22T09:24:26 | 2009-12-22T09:24:26 | 317,995,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,947 | py |
from urllib import unquote
from zope.app.publisher.browser import BrowserView
from cpsskins import minjson as json
tree_data = {
'items': [
{'id': '1', 'title': 'item 1', 'depth': 1, 'type': 'inner'},
{'id': '2', 'title': 'item 2', 'depth': 2, 'type': 'inner',
'empty': True},
{'id': '3', 'title': 'item 3', 'depth': 2, 'type': 'leaf'},
{'id': '4', 'title': 'item 4', 'depth': 2, 'type': 'inner'},
{'id': '5', 'title': 'item 5', 'depth': 3, 'type': 'leaf',
'position': 'last'},
{'id': '6', 'title': 'item 6', 'depth': 1, 'type': 'inner'},
{'id': '7', 'title': 'item 7', 'depth': 2, 'type': 'inner',
'empty': True},
{'id': '8', 'title': 'item 8', 'depth': 2, 'type': 'leaf',
'position': 'last'},
]
}
MAX_DEPTH = 10
class Views(BrowserView):
def getTreeData(self):
local_data = self._getLocalStorageData(1)
if local_data is None:
local_data = {}
tree_state = local_data.get('state', {})
filtered_items = []
filter_depth = MAX_DEPTH
for item in tree_data['items']:
depth = item['depth']
if depth > filter_depth:
continue
else:
filter_depth = MAX_DEPTH
if item['type'] == 'inner':
state = tree_state.get(item['id'])
if state != 'open':
filter_depth = depth
filtered_items.append(item)
self.request.response.setHeader('content-type', 'text/x-json')
return json.write({'items': filtered_items})
def setTreeData(self, data):
return self.getTreeData()
# TODO: moves this to an API
def _getLocalStorageData(self, id):
value = self.request.cookies.get('cpsskins_local_storage_%s' % id)
if value is not None:
return json.read(unquote(value))
return None
| [
"devnull@localhost"
] | devnull@localhost |
9b80d82c0f685c41a834444780cd8207ebb71348 | 9f9b19a26ed931207878364d395e47a3d986751b | /dmam/migrations/0006_auto_20181022_2230.py | 6b6b882350caefc90a3b9690311255482d54076e | [] | no_license | lishulincug/waterwork | 6697f5264dc880a92d9b91e91b703eda3818d7a3 | 690fb344e7f271a3ded66f0cdf4c9161811ed1f4 | refs/heads/master | 2020-09-09T13:19:21.301200 | 2019-07-25T09:37:04 | 2019-07-25T09:37:04 | 221,456,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | # Generated by Django 2.0 on 2018-10-22 22:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dmam', '0005_auto_20181022_1555'),
]
operations = [
migrations.AlterField(
model_name='station',
name='dmaid',
field=models.ManyToManyField(to='dmam.DMABaseinfo'),
),
]
| [
"[email protected]"
] | |
52875bb3e78a1bda06ee3642b5d4c9156ba64ac9 | 55b57d64ec547869835334318f3059fbb507558c | /Fred2/Data/pssms/tepitopepan/mat/DRB1_0480_9.py | f6f1ff603742b33032918eb4444a0a117596dd3a | [
"BSD-3-Clause"
] | permissive | FRED-2/Fred2 | 9845f6678d4011cb746c7a5a6f283eea68077a02 | b3e54c8c4ed12b780b61f74672e9667245a7bb78 | refs/heads/master | 2021-07-12T05:05:54.515427 | 2020-05-25T06:56:25 | 2020-05-25T06:56:25 | 16,275,425 | 42 | 35 | null | 2021-07-07T12:05:11 | 2014-01-27T10:08:11 | Python | UTF-8 | Python | false | false | 2,169 | py | DRB1_0480_9 = {0: {'A': -999.0, 'E': -999.0, 'D': -999.0, 'G': -999.0, 'F': -0.004754, 'I': -0.99525, 'H': -999.0, 'K': -999.0, 'M': -0.99525, 'L': -0.99525, 'N': -999.0, 'Q': -999.0, 'P': -999.0, 'S': -999.0, 'R': -999.0, 'T': -999.0, 'W': -0.004754, 'V': -0.99525, 'Y': -0.004754}, 1: {'A': 0.0, 'E': 0.1, 'D': -1.3, 'G': 0.5, 'F': 0.8, 'I': 1.1, 'H': 0.8, 'K': 1.1, 'M': 1.1, 'L': 1.0, 'N': 0.8, 'Q': 1.2, 'P': -0.5, 'S': -0.3, 'R': 2.2, 'T': 0.0, 'W': -0.1, 'V': 2.1, 'Y': 0.9}, 2: {'A': 0.0, 'E': -1.2, 'D': -1.3, 'G': 0.2, 'F': 0.8, 'I': 1.5, 'H': 0.2, 'K': 0.0, 'M': 1.4, 'L': 1.0, 'N': 0.5, 'Q': 0.0, 'P': 0.3, 'S': 0.2, 'R': 0.7, 'T': 0.0, 'W': 0.0, 'V': 0.5, 'Y': 0.8}, 3: {'A': 0.0, 'E': -0.33143, 'D': -0.36514, 'G': -2.0591, 'F': 0.36585, 'I': 0.95537, 'H': -0.2281, 'K': -1.4856, 'M': 1.4546, 'L': 0.86213, 'N': -0.17568, 'Q': 0.23244, 'P': -1.3948, 'S': -0.23788, 'R': -2.0746, 'T': -0.3733, 'W': -0.34023, 'V': -0.00016686, 'Y': -0.77543}, 4: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 5: {'A': 0.0, 'E': -2.3798, 'D': -1.1245, 'G': -1.483, 'F': -1.1051, 'I': -0.084718, 'H': -1.3744, 'K': -2.3275, 'M': -1.0952, 'L': -1.0742, 'N': 1.263, 'Q': -1.4765, 'P': 0.0093511, 'S': 0.97855, 'R': -2.3322, 'T': 1.8771, 'W': -1.0072, 'V': 0.90462, 'Y': -1.4966}, 6: {'A': 0.0, 'E': -0.70382, 'D': -1.2017, 'G': -1.1093, 'F': -0.366, 'I': 0.042256, 'H': -0.31036, 'K': -0.86425, 'M': 0.6483, 'L': 0.42064, 'N': 0.34102, 'Q': -0.25364, 'P': -0.85722, 'S': -0.020483, 'R': -0.81007, 'T': -0.15788, 'W': -0.83077, 'V': -0.16921, 'Y': -0.51701}, 7: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 8: {'A': 0.0, 'E': -0.29158, 'D': -0.43532, 'G': -0.37531, 'F': -0.55063, 'I': -0.19134, 'H': 0.63181, 'K': -0.62119, 'M': 0.13433, 'L': -0.5213, 'N': -0.46339, 'Q': 0.85581, 'P': -1.0975, 'S': 0.70702, 'R': -0.97291, 'T': -0.77221, 'W': -0.58512, 'V': -0.54344, 'Y': -0.46934}} | [
"[email protected]"
] | |
a9dbd0cdcd940053789e278ea1754c00d7bcc81d | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/plugin/core/diff/DiffProgramManager.pyi | a0a10fe1025a5187fd858ff298fe56649ee228e8 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,233 | pyi | from typing import List
import ghidra.app.services
import ghidra.framework.model
import ghidra.program.model.address
import ghidra.program.model.listing
import java.awt
import java.lang
import java.net
class DiffProgramManager(object, ghidra.app.services.ProgramManager):
OPEN_CURRENT: int = 1
OPEN_HIDDEN: int = 0
OPEN_VISIBLE: int = 2
def __init__(self, __a0: ghidra.app.plugin.core.diff.ProgramDiffPlugin): ...
def closeAllPrograms(self, __a0: bool) -> bool: ...
def closeOtherPrograms(self, __a0: bool) -> bool: ...
@overload
def closeProgram(self) -> bool: ...
@overload
def closeProgram(self, __a0: ghidra.program.model.listing.Program, __a1: bool) -> bool: ...
def equals(self, __a0: object) -> bool: ...
def getAllOpenPrograms(self) -> List[ghidra.program.model.listing.Program]: ...
def getClass(self) -> java.lang.Class: ...
def getCurrentProgram(self) -> ghidra.program.model.listing.Program: ...
def getProgram(self, __a0: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Program: ...
def hashCode(self) -> int: ...
def isLocked(self) -> bool: ...
def isVisible(self, __a0: ghidra.program.model.listing.Program) -> bool: ...
def lockDown(self, __a0: bool) -> None: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
@overload
def openProgram(self, __a0: ghidra.framework.model.DomainFile) -> ghidra.program.model.listing.Program: ...
@overload
def openProgram(self, __a0: ghidra.program.model.listing.Program) -> None: ...
@overload
def openProgram(self, __a0: ghidra.framework.model.DomainFile, __a1: int) -> ghidra.program.model.listing.Program: ...
@overload
def openProgram(self, __a0: ghidra.program.model.listing.Program, __a1: int) -> None: ...
@overload
def openProgram(self, __a0: java.net.URL, __a1: int) -> ghidra.program.model.listing.Program: ...
@overload
def openProgram(self, __a0: ghidra.program.model.listing.Program, __a1: bool) -> None: ...
@overload
def openProgram(self, __a0: ghidra.framework.model.DomainFile, __a1: java.awt.Component) -> ghidra.program.model.listing.Program: ...
@overload
def openProgram(self, __a0: ghidra.framework.model.DomainFile, __a1: int, __a2: int) -> ghidra.program.model.listing.Program: ...
def releaseProgram(self, __a0: ghidra.program.model.listing.Program, __a1: object) -> None: ...
def setCurrentProgram(self, __a0: ghidra.program.model.listing.Program) -> None: ...
def setPersistentOwner(self, __a0: ghidra.program.model.listing.Program, __a1: object) -> bool: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def allOpenPrograms(self) -> List[ghidra.program.model.listing.Program]: ...
@property
def currentProgram(self) -> ghidra.program.model.listing.Program: ...
@currentProgram.setter
def currentProgram(self, value: ghidra.program.model.listing.Program) -> None: ...
@property
def locked(self) -> bool: ...
| [
"[email protected]"
] | |
d5fc867cf915437ad5f65f07e94dd1e3c0cf089d | 101ffbee515a5b8f23d77361558dea1e42794dbd | /pip_save/toml/tests/test_writer/test_statement_nodes.py | 87c6eb7bd5d88df7645289d2751f38fa6795af0e | [] | no_license | mkurnikov/pip-save | 0a841710c28983c1c769d87e18f2e584a554e1a1 | e1e2fb9b0404a25790edcb5fd134267b92675470 | refs/heads/master | 2021-01-12T16:49:50.163661 | 2016-10-21T11:13:37 | 2016-10-21T11:13:37 | 71,442,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | from collections import OrderedDict
from unittest import TestCase
from pip_save.toml.model import TomlStatementNodes, Table
class TestStatementNodes(TestCase):
def test_append(self):
toml_nodes = TomlStatementNodes()
toml_nodes[('keyword',)] = '1'
self.assertEqual(len(toml_nodes), 1)
self.assertTrue(('keyword',) in toml_nodes)
def test_insert_after(self):
od = OrderedDict()
od[('deps',)] = Table()
od[('django',)] = '1.10.2'
toml_nodes = TomlStatementNodes(od)
toml_nodes.insert_after(('deps',), ('flask',), '1.3')
self.assertEqual(toml_nodes.keys(), [('deps',), ('flask',), ('django',)])
def test_insert_before(self):
od = OrderedDict()
od[('deps',)] = Table()
od[('django',)] = '1.10.2'
toml_nodes = TomlStatementNodes(od)
toml_nodes.insert_before(('django',), ('flask',), '1.3')
self.assertEqual(toml_nodes.keys(), [('deps',), ('flask',), ('django',)])
| [
"[email protected]"
] | |
03141166a5bda5723952bf49f8c51c0d52f15fc7 | 43c24c890221d6c98e4a45cd63dba4f1aa859f55 | /test/tests/assign_empty_list.py | a0b6e7f7ede213462c6b0daa209159a69c6f24cb | [
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | jmgc/pyston | c8e4df03c33c6b81d20b7d51a781d9e10148238e | 9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f | refs/heads/master | 2020-12-11T07:51:58.968440 | 2020-09-11T14:38:38 | 2020-09-11T14:38:38 | 39,242,644 | 0 | 0 | NOASSERTION | 2020-09-11T14:38:39 | 2015-07-17T08:09:31 | Python | UTF-8 | Python | false | false | 8 | py | [] = []
| [
"[email protected]"
] | |
020f91f4d1a8a9caa5c59fe28145b52c554f09ff | bf4178e73f0f83781be6784d7587cb34a38d6edd | /platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/common/calculators/calc_modulator.py | d1572f6a1abc18443087ee2ab822b05f0a10716a | [] | no_license | kolbertv/ZigbeeSiliconV3 | 80d70515e93be1413c24cdcb3485f50c65a1564b | ab0bd8d4bb6c1048adef81d0e66d96006c2fabd9 | refs/heads/master | 2023-01-02T07:18:01.393003 | 2020-10-25T15:33:08 | 2020-10-25T15:33:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,642 | py | """Core CALC_Modulator Calculator Package
Calculator functions are pulled by using their names.
Calculator functions must start with "calc_", if they are to be consumed by the framework.
Or they should be returned by overriding the function:
def getCalculationList(self):
"""
#import math
from pyradioconfig.calculator_model_framework.Utils.CustomExceptions import CalculationException
from pyradioconfig.calculator_model_framework.interfaces.icalculator import ICalculator
from pyradioconfig.parts.common.calculators.calc_utilities import CALC_Utilities
from enum import Enum
from pycalcmodel.core.variable import ModelVariableFormat, CreateModelVariableEnum
from py_2_and_3_compatibility import *
class CALC_Modulator(ICalculator):
"""
Init internal variables
"""
def __init__(self):
self._major = 1
self._minor = 0
self._patch = 0
def buildVariables(self, model):
"""Populates a list of needed variables for this calculator
Args:
model (ModelRoot) : Builds the variables specific to this calculator
"""
# symbol_encoding
var = self._addModelVariable(model, 'symbol_encoding', Enum, ModelVariableFormat.DECIMAL, 'Symbol Encoding Options')
member_data = [
['NRZ', 0, 'Non Return Zero Coding'],
['Manchester', 1, 'Manchester Coding'],
['DSSS', 2, 'Direct Sequence Spread Spectrum Coding'],
]
# Only Nerio (and Panther) support LINECODE Encoding, used for BLE Long Range
if model.part_family.lower() not in ["dumbo","jumbo","nixi"]:
member_data.append(['LINECODE', 3, 'Maps 0 to 0011 symbol and 1 to 1100 symbol'])
var.var_enum = CreateModelVariableEnum(
'SymbolEncodingEnum',
'List of supported symbol encoding options',
member_data)
# symbol_encoding
var = self._addModelVariable(model, 'manchester_mapping', Enum, ModelVariableFormat.DECIMAL, 'Manchester Code Mapping Options for packet payload')
member_data = [
['Default', 0, '0-bit corresponds to a 0 to 1 transition and 1-bit corresponds to 1 to 0 transition'],
['Inverted', 1, '0-bit corresponds to a 1 to 0 transition and 1-bit corresponds to 0 to 1 transition'],
]
var.var_enum = CreateModelVariableEnum(
'ManchesterMappingEnum',
'List of supported Manchester Code options',
member_data)
def calc_tx_baud_rate_actual(self, model):
"""
calculate actual TX baud rate from register settings
Args:
model (ModelRoot) : Data model to read and write variables from
"""
fxo = model.vars.xtal_frequency.value
txbr_ratio = model.vars.txbr_ratio_actual.value
tx_baud_rate = fxo / (8.0 * txbr_ratio)
model.vars.tx_baud_rate_actual.value = tx_baud_rate
def calc_symbol_rates_actual(self, model):
encoding = model.vars.symbol_encoding.value
encodingEnum = model.vars.symbol_encoding.var_enum
baud_per_symbol = 1
if model.part_family.lower() in ["nerio", "panther", "lynx", "ocelot"]:
if model.vars.MODEM_LONGRANGE_LRBLE.value == 1:
# In case of BLE LR 125 kps, baud_per_symbol is 8
if model.vars.FRC_CTRL_RATESELECT.value == 0:
baud_per_symbol = 8
# In case of BLE LR 500 kps, baud_per_symbol is 2
elif model.vars.FRC_CTRL_RATESELECT.value == 2:
baud_per_symbol = 2
else:
raise ValueError("Invalid FRC_CTRL_RATESELECT value used in LONGRANGE configuration")
if model.vars.FRC_CTRL_RATESELECT.value == 1:
encoding = model.vars.MODEM_CTRL6_CODINGB
if encoding == encodingEnum.LINECODE:
baud_per_symbol *= 4
if encoding == encodingEnum.DSSS:
baud_per_symbol *= model.vars.dsss_len.value
elif encoding == encodingEnum.Manchester:
baud_per_symbol *= 2
model.vars.baud_per_symbol_actual.value = baud_per_symbol
if encoding == encodingEnum.DSSS:
bits_per_symbol = model.vars.dsss_bits_per_symbol.value
else:
modFormat = model.vars.modulation_type.value
modFormatEnum = model.vars.modulation_type.var_enum
if modFormat in [modFormatEnum.FSK4, modFormatEnum.OQPSK]:
bits_per_symbol = 2
else:
bits_per_symbol = 1
model.vars.bits_per_symbol_actual.value = bits_per_symbol
#TODO: add support for ASK modulation
def calc_modindex_value(self, model):
"""
calculate MODINDEX value
Equations from Table 5.25 in EFR32 Reference Manual (internal.pdf)
Args:
model (ModelRoot) : Data model to read and write variables from
"""
fxo = model.vars.xtal_frequency.value * 1.0
modformat = model.vars.modulation_type.value
freq_dev_hz = model.vars.deviation.value * 1.0
synth_res = model.vars.synth_res_actual.value
shaping_filter_gain = model.vars.shaping_filter_gain_actual.value
interpolation_gain = model.vars.interpolation_gain_actual.value
if modformat == model.vars.modulation_type.var_enum.FSK2 or \
modformat == model.vars.modulation_type.var_enum.FSK4:
modindex = freq_dev_hz * 16.0 / (synth_res * shaping_filter_gain * interpolation_gain)
elif modformat == model.vars.modulation_type.var_enum.OQPSK or \
modformat == model.vars.modulation_type.var_enum.MSK:
modindex = fxo / (synth_res * 2 * shaping_filter_gain * interpolation_gain)
elif modformat == model.vars.modulation_type.var_enum.BPSK or \
modformat == model.vars.modulation_type.var_enum.OOK or \
modformat == model.vars.modulation_type.var_enum.DBPSK:
modindex = 150.0 * 16 / (shaping_filter_gain * interpolation_gain)
else:
raise CalculationException("ERROR: %s modulation not yet supported!" % modformat)
return
model.vars.modindex.value = modindex
def calc_modindex_field(self, model):
"""
convert desired modindex fractional value to MODINDEXM * 2^MODINDEXE
Equations (5.13) of EFR32 Reference Manual (internal.pdf)
Args:
model (ModelRoot) : Data model to read and write variables from
"""
modindex = model.vars.modindex.value
# convert fractional modindex into m * 2^e format
m, e = CALC_Utilities().frac2exp(31, modindex)
# MODEINDEXE is a signed value
if e < 0:
e += 32
# verify number fits into register
if m > 31:
m = 31
if e > 31:
e = 31
if m < 0:
m = 0
self._reg_write(model.vars.MODEM_MODINDEX_MODINDEXM, int(m))
self._reg_write(model.vars.MODEM_MODINDEX_MODINDEXE, int(e))
def calc_modindex_actual(self, model):
"""
given register settings return actual MODINDEX as fraction
Equations (5.13) of EFR32 Reference Manual (internal.pdf)
Args:
model (ModelRoot) : Data model to read and write variables from
"""
m = model.vars.MODEM_MODINDEX_MODINDEXM.value
e = model.vars.MODEM_MODINDEX_MODINDEXE.value
# MODEINDEXE is a signed value
if e > 15:
e -= 32
model.vars.modindex_actual.value = 1.0 * m * 2**e
def calc_modulation_index_actual(self, model):
"""
calculate the actual modulation index for given PHY
This is the traditional modulation index as 2 * deviation / baudrate
the one above we call modindex and is specific value used by EFR32
Args:
model (ModelRoot) : Data model to read and write variables from
"""
baudrate_hz = model.vars.tx_baud_rate_actual.value
tx_deviation = model.vars.tx_deviation_actual.value
model.vars.modulation_index_actual.value = tx_deviation * 2.0 / baudrate_hz
def calc_tx_freq_dev_actual(self, model):
"""
given register setting return actual frequency deviation used in the modulator
Using Equations in Table 5.25 of EFR32 Reference Manual (internal.pdf)
Args:
model (ModelRoot) : Data model to read and write variables from
"""
modformat = model.vars.modulation_type.value
modindex = model.vars.modindex_actual.value
synth_res = model.vars.synth_res_actual.value
shaping_filter_gain = model.vars.shaping_filter_gain_actual.value
interpolation_gain = model.vars.interpolation_gain_actual.value
if modformat == model.vars.modulation_type.var_enum.FSK2 or \
modformat == model.vars.modulation_type.var_enum.FSK4:
freq_dev_hz = modindex * (synth_res * shaping_filter_gain * interpolation_gain) / 16.0
else:
freq_dev_hz = 0.0
model.vars.tx_deviation_actual.value = freq_dev_hz
# calculate TX baudrate ratio
# Using Equation (5.7) of EFR32 Reference Manual (internal.pdf)
def calc_txbr_value(self, model):
"""
calculate TX baudrate ratio
Using Equation (5.7) of EFR32 Reference Manual (internal.pdf)
Args:
model (ModelRoot) : Data model to read and write variables from
"""
fxo = model.vars.xtal_frequency.value
baudrate = model.vars.baudrate.value
# calculate baudrate to fxo ratio
ratio = fxo / (baudrate * 8.0)
model.vars.txbr_ratio.value = ratio
def calc_txbr_reg(self, model):
"""
given desired TX baudrate ratio calculate TXBRNUM and TXBRDEN
that gets as close as possible to the ratio.
Note that we start from the highest possible value for TXBRDEN
and go down since having largest possible values in these register
to have better phase resolution in OQPSK and MSK (see end of section
5.6.5 in the manual)
Args:
model (ModelRoot) : Data model to read and write variables from
"""
ratio = model.vars.txbr_ratio.value
# find best integer ratio to match desired ratio
for den in xrange(255, 0, -1):
num = ratio * den
if abs(round(num) - num) < 0.003 and num < 32768:
break
self._reg_write(model.vars.MODEM_TXBR_TXBRNUM, int(round(num)))
self._reg_write(model.vars.MODEM_TXBR_TXBRDEN, int(den))
def calc_txbr_actual(self, model):
"""
given register values calculate actual TXBR ratio implemented
Args:
model (ModelRoot) : Data model to read and write variables from
"""
num = model.vars.MODEM_TXBR_TXBRNUM.value * 1.0
den = model.vars.MODEM_TXBR_TXBRDEN.value
ratio = num / den
model.vars.txbr_ratio_actual.value = ratio
def calc_txbases_reg(self, model):
"""
set TXBASES based on preamble length and base bits value
Args:
model (ModelRoot) : Data model to read and write variables from
"""
txbases = model.vars.preamble_length.value / model.vars.preamble_pattern_len_actual.value
# Some input combinations can produce values out of range for the register fields,
# such as applying ludicrously long preamble lengths.
# MCUW_RADIO_CFG-793
# TODO: is would be best to query the register model to determine these two fields are 7 bits wide
if (txbases) > 0xffff:
raise CalculationException("Calculated TX preamble sequences (TXBASE) value of %s exceeds limit of 65535! Adjust preamble inputs." % txbases )
self._reg_write(model.vars.MODEM_PRE_TXBASES, int(txbases))
def calc_symbol_encoding(self, model):
"""
set CODING register
Args:
model (ModelRoot) : Data model to read and write variables from
"""
encoding = model.vars.symbol_encoding.value
if encoding == model.vars.symbol_encoding.var_enum.DSSS:
coding = 2
elif encoding == model.vars.symbol_encoding.var_enum.Manchester:
coding = 1
else:
coding = 0
self._reg_write(model.vars.MODEM_CTRL0_CODING, coding)
def calc_mapfsk_reg(self, model):
"""
program MAPFSK register based on input
Args:
model (ModelRoot) : Data model to read and write variables from
"""
mod_format = model.vars.modulation_type.value
manchester_map = model.vars.manchester_mapping.value
fsk_map = model.vars.fsk_symbol_map.value
encoding = model.vars.symbol_encoding.value
FSKMAP_LOOKUP = {
model.vars.fsk_symbol_map.var_enum.MAP0.value: 0,
model.vars.fsk_symbol_map.var_enum.MAP1.value: 1,
model.vars.fsk_symbol_map.var_enum.MAP2.value: 2,
model.vars.fsk_symbol_map.var_enum.MAP3.value: 3,
model.vars.fsk_symbol_map.var_enum.MAP4.value: 4,
model.vars.fsk_symbol_map.var_enum.MAP5.value: 5,
model.vars.fsk_symbol_map.var_enum.MAP6.value: 6,
model.vars.fsk_symbol_map.var_enum.MAP7.value: 7,
}
mapfsk = FSKMAP_LOOKUP[fsk_map.value]
if mod_format != model.vars.modulation_type.var_enum.FSK4:
# if we're using Manchester encoding (or any FSK modulation actually),
# then only MAP0 and MAP1 are valid
if mapfsk > 1:
raise CalculationException("Invalid fsk symbol map value for modulation type selected.")
if encoding == model.vars.symbol_encoding.var_enum.Manchester:
# if we're using Manchester encoding,
# then only MAP0 and MAP1 are valid
if mapfsk > 1:
raise CalculationException("Invalid fsk_symbol_map value for Manchester encoding")
# if we're using inverted Manchester encoding, then flip the polarity of the fsk
# map. This flips the polarity of the entire transmission, including the preamble
# and syncword. We don't want the preamble and syncword flipped, so we'll invert those
# registers elsewhere
if manchester_map != model.vars.manchester_mapping.var_enum.Default:
mapfsk ^= 1
self._reg_write(model.vars.MODEM_CTRL0_MAPFSK, mapfsk)
| [
"[email protected]"
] | |
8ce29595818ea2d4b7f8186cbb954cbdb7739d39 | a3ff13ecac60f891a3ebdcb4c72bf6a4b581a2d8 | /YCD/10.16公开课红心代码heart_3d.py | 307b46ff179ed773578ac66438fc7032e575e55a | [] | no_license | kekirk/pycode | 75533afc3018cba30d0abd3c29ab1c945b85504b | 06dab1a61d7b445cc19b41e4d281f62251e2583b | refs/heads/master | 2020-04-01T17:13:53.828118 | 2019-01-04T09:02:00 | 2019-01-04T09:02:00 | 153,419,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | #!/usr/bin/env python
# coding: utf-8
# In[3]:
from pyecharts import Scatter3D
import numpy as np
# # 心形解析式
# # (x^2+9/4*y^2+z^2-1)^3-x^2*z^3-9/80*y^2*z^3=0
# In[5]:
scatter3D = Scatter3D("I Love You", width=1700, height=1000)
data = list()
x = list(np.linspace(-1.5, 1.5,150))
y = list(np.linspace(-1,1,100))
z = list(np.linspace(-1.5,1.5,100))
for a in x:
for b in y:
for c in z:
if -0.05<=(a**2+9.0/4.0*b**2+c**2-1)**3-a**2*c**3-9.0/80.0*b**2*c**3 <=0:
data.append([a,b,c])
scatter3D.add("", data, is_visualmap=True, visual_range_color="red")
scatter3D.render()
scatter3D
# In[20]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| [
"[email protected]"
] | |
b1b5d4c9662f948f6cb0351194e8af4c4eab7524 | acc9d729e0182b17023e9660457eed0e19f4f828 | /test/test_exception_scope.py | 22945ab9e2fae2a05406475b22a8ad88e8dbef90 | [] | no_license | secuwave/nexpose_client | 2f00907ef3ffea33c8e9f5cc2543e708f349de6c | 5ceff219ae03cadb5407dc48d8858ffa56bb3463 | refs/heads/master | 2020-05-22T13:54:22.675479 | 2019-05-13T09:12:09 | 2019-05-13T09:12:09 | 186,369,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,879 | py | # coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"api.json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is like` ` not like` | | `container-status` | `is` ` is not` | | `containers` | `are` | | `criticality-tag` | `is` ` is not` ` is greater than` ` is less than` ` is applied` ` is not applied` | | `custom-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `cve` | `is` ` is not` ` contains` ` does not contain` | | `cvss-access-complexity` | `is` ` is not` | | `cvss-authentication-required` | `is` ` is not` | | `cvss-access-vector` | `is` ` is not` | | `cvss-availability-impact` | `is` ` is not` | | `cvss-confidentiality-impact` | `is` ` is not` | | `cvss-integrity-impact` | `is` ` is not` | | `cvss-v3-confidentiality-impact` | `is` ` is not` | | `cvss-v3-integrity-impact` | `is` ` is not` | | `cvss-v3-availability-impact` | `is` ` is not` | | `cvss-v3-attack-vector` | `is` ` is not` | | `cvss-v3-attack-complexity` | `is` ` is not` | | `cvss-v3-user-interaction` | `is` ` is not` | | `cvss-v3-privileges-required` | `is` ` is not` | | `host-name` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is empty` ` is not empty` ` is like` ` not like` | | `host-type` | `in` ` not in` | | `ip-address` | `is` ` is not` ` in range` ` not in range` ` is like` ` not like` | | `ip-address-type` | `in` ` not in` | | `last-scan-date` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `location-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is earlier than` | | `open-ports` | `is` ` is not` ` in range` | | `operating-system` | `contains` ` does not contain` ` is empty` ` is not empty` | | `owner-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is not` ` in range` ` greater than` ` less than` | | `service-name` | `contains` ` does not contain` | | `site-id` | `in` ` not in` | | `software` | `contains` ` does not contain` | | `vAsset-cluster` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-datacenter` | `is` ` is not` | | `vAsset-host-name` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-power-state` | `in` ` not in` | | `vAsset-resource-pool-path` | `contains` ` does not contain` | | `vulnerability-assessed` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `vulnerability-category` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` | | `vulnerability-cvss-v3-score` | `is` ` is not` | | `vulnerability-cvss-score` | `is` ` is not` ` in range` ` is greater than` ` is less than` | | `vulnerability-exposures` | `includes` ` does not include` | | `vulnerability-title` | `contains` ` does not contain` ` is` ` is not` ` starts with` ` ends with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `string` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import nexpose_client
from nexpose_client.models.exception_scope import ExceptionScope # noqa: E501
from nexpose_client.rest import ApiException
class TestExceptionScope(unittest.TestCase):
"""ExceptionScope unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testExceptionScope(self):
"""Test ExceptionScope"""
# FIXME: construct object with mandatory attributes with example values
# model = nexpose_client.models.exception_scope.ExceptionScope() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
63d35cddd89c965242e94321cf091a8e71be87ec | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_swivels.py | ee26ae2f31371bd1bbcf01ad3ec765b20b2961cb | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.verbs._swivel import _SWIVEL
#calss header
class _SWIVELS(_SWIVEL, ):
def __init__(self,):
_SWIVEL.__init__(self)
self.name = "SWIVELS"
self.specie = 'verbs'
self.basic = "swivel"
self.jsondata = {}
| [
"[email protected]"
] | |
917caa8803de6237510c15044bfbe71ebee37d83 | 4564fd0cfb9009f0b85d15c3b9164b865c4c86e7 | /tests/test_model.py | 6640508bf3a1581cded6a9fe52d2d2d572937326 | [
"Apache-2.0"
] | permissive | rajaramcomputers/client | 0188a1cf8e989dcd180c280a4db4d00c44bac390 | 65badf61fb9a5430596d6d2c0b9b7833cf30ec06 | refs/heads/master | 2021-01-16T21:59:35.657394 | 2016-02-01T22:08:18 | 2016-02-01T22:08:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | """ run with
nosetests -v --nocapture tests/test_model.py
or
nosetests -v tests/test_model.py
"""
from __future__ import print_function
from pprint import pprint
from cloudmesh_base.util import HEADING
import cloudmesh_client.db
import cloudmesh_client.db.model
class Test_model:
def setup(self):
pass
def tearDown(self):
pass
def test_001(self):
HEADING()
pprint(cloudmesh_client.db.tables())
assert True
def test_002(self):
HEADING()
print(cloudmesh_client.db.tablenames())
assert True
def test_003(self):
HEADING()
for name in cloudmesh_client.db.tablenames():
print(cloudmesh_client.db.table(name))
assert True
| [
"[email protected]"
] | |
b113b1db3bfe5f8e92de554cc4f803a2b126bac7 | 902e8b6f2c39c0a7baa8abd9637aa43f4be27e27 | /Code/Chapter 1/src/blueblog/urls.py | 9e80e946967b3d343885c7d48be82d6ec68c8c7b | [] | no_license | PacktPublishing/Django-Projects-Blueprints | 8151e611ae5cf95dc985ac7d08ce503bd41e0c4a | 7d2409ea1b43b057d1e4c337e348cb6e102f75d6 | refs/heads/master | 2023-02-08T13:34:22.658965 | 2023-01-30T10:17:40 | 2023-01-30T10:17:40 | 59,006,898 | 32 | 30 | null | null | null | null | UTF-8 | Python | false | false | 1,744 | py | from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.views import login
from django.contrib.auth.views import logout
from accounts.views import UserRegistrationView
from blog.views import NewBlogView
from blog.views import HomeView
from blog.views import UpdateBlogView
from blog.views import NewBlogPostView
from blog.views import UpdateBlogPostView
from blog.views import BlogPostDetailsView
from blog.views import SharePostWithBlog
from blog.views import StopSharingPostWithBlog
from blog.views import ShareBlogPostView
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', HomeView.as_view(), name='home'),
url(r'^new-user/$', UserRegistrationView.as_view(), name='user_registration'),
url(r'^login/$', login, {'template_name': 'login.html'}, name='login'),
url(r'^logout/$', logout, {'next_page': '/login/'}, name='logout'),
url(r'^blog/new/$', NewBlogView.as_view(), name='new-blog'),
url(r'^blog/(?P<pk>\d+)/update/$', UpdateBlogView.as_view(), name='update-blog'),
url(r'blog/post/new/$', NewBlogPostView.as_view(), name='new-blog-post'),
url(r'blog/post/(?P<pk>\d+)/update/$', UpdateBlogPostView.as_view(), name='update-blog-post'),
url(r'blog/post/(?P<pk>\d+)/$', BlogPostDetailsView.as_view(), name='blog-post-details'),
url(r'blog/post/(?P<pk>\d+)/share/$', ShareBlogPostView.as_view(), name='share-blog-post-with-blog'),
url(r'blog/post/(?P<post_pk>\d+)/share/to/(?P<blog_pk>\d+)/$', SharePostWithBlog.as_view(), name='share-post-with-blog'),
url(r'blog/post/(?P<post_pk>\d+)/stop/share/to/(?P<blog_pk>\d+)/$', StopSharingPostWithBlog.as_view(), name='stop-sharing-post-with-blog'),
]
| [
"[email protected]"
] | |
a9e2cbb4176684f4ffa52c1888fae3102c5fa7b6 | 9b59f76f3b312951519a15651290476c34a54174 | /QUANTAXIS_Test/QABacktest_Test/QABacktestSimple_Test.py | 37e9feda4909d833898186c4d41be55ad36d35fd | [
"MIT"
] | permissive | sjtututu/QUANTAXIS | b8d9ba35d20159680f25cd3e583ebcfc7ff34c75 | e9e20cdeda8b8d132433037b639a7e60f286a190 | refs/heads/master | 2020-08-16T11:19:19.689925 | 2020-02-22T01:21:57 | 2020-02-22T01:21:57 | 215,495,655 | 1 | 0 | MIT | 2019-12-28T08:13:57 | 2019-10-16T08:22:54 | Python | UTF-8 | Python | false | false | 4,471 | py | import unittest
import numpy as np
import pandas as pd
import QUANTAXIS as QA
class QABacktestSimple_Test(unittest.TestCase):
# define the MACD strategy
def MACD_JCSC(self, dataframe, SHORT=12, LONG=26, M=9):
"""
1.DIF向上突破DEA,买入信号参考。
2.DIF向下跌破DEA,卖出信号参考。
"""
CLOSE = dataframe.close
DIFF = QA.EMA(CLOSE, SHORT) - QA.EMA(CLOSE, LONG)
DEA = QA.EMA(DIFF, M)
MACD = 2 * (DIFF - DEA)
CROSS_JC = QA.CROSS(DIFF, DEA)
CROSS_SC = QA.CROSS(DEA, DIFF)
ZERO = 0
return pd.DataFrame(
{'DIFF': DIFF, 'DEA': DEA, 'MACD': MACD, 'CROSS_JC': CROSS_JC, 'CROSS_SC': CROSS_SC, 'ZERO': ZERO})
def setUp(self):
# 准备数据
# create account
self.Account = QA.QA_Account()
self.Broker = QA.QA_BacktestBroker()
self.Account.reset_assets(1000000)
self.Account.account_cookie = 'user_admin_macd'
# get data from mongodb
self.data = QA.QA_fetch_stock_day_adv(
['000001', '000002', '000004', '600000'], '2017-09-01', '2018-05-20')
self.data = self.data.to_qfq()
# add indicator
self.ind = self.data.add_func(self.MACD_JCSC)
# ind.xs('000001',level=1)['2018-01'].plot()
self.data_forbacktest = self.data.select_time(
'2018-01-01', '2018-05-20')
def tearDown(self):
print(self.Account.history)
print(self.Account.history_table)
print(self.Account.daily_hold)
# create Risk analysis
Risk = QA.QA_Risk(self.Account)
print(Risk.message)
print(Risk.assets)
Risk.plot_assets_curve()
Risk.plot_dailyhold()
Risk.plot_signal()
# Risk.assets.plot()
# Risk.benchmark_assets.plot()
# save result
self.Account.save()
Risk.save()
account_info = QA.QA_fetch_account(
{'account_cookie': 'user_admin_macd'})
account = QA.QA_Account().from_message(account_info[0])
print(account)
def test_simpleQABacktest(self):
for items in self.data_forbacktest.panel_gen:
for item in items.security_gen:
daily_ind = self.ind.loc[item.index]
if daily_ind.CROSS_JC.iloc[0] > 0:
order = self.Account.send_order(
code=item.code[0],
time=item.date[0],
amount=1000,
towards=QA.ORDER_DIRECTION.BUY,
price=0,
order_model=QA.ORDER_MODEL.CLOSE,
amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
)
if order:
self.Broker.receive_order(QA.QA_Event(order=order, market_data=item))
trade_mes = self.Broker.query_orders(self.Account.account_cookie, 'filled')
res = trade_mes.loc[order.account_cookie, order.realorder_id]
order.trade(res.trade_id, res.trade_price,
res.trade_amount, res.trade_time)
elif daily_ind.CROSS_SC.iloc[0] > 0:
if self.Account.sell_available.get(item.code[0], 0) > 0:
order = self.Account.send_order(
code=item.code[0],
time=item.date[0],
amount=self.Account.sell_available.get(
item.code[0], 0),
towards=QA.ORDER_DIRECTION.SELL,
price=0,
order_model=QA.ORDER_MODEL.MARKET,
amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
)
if order:
self.Broker.receive_order(QA.QA_Event(order=order, market_data=item))
trade_mes = self.Broker.query_orders(self.Account.account_cookie, 'filled')
res = trade_mes.loc[order.account_cookie, order.realorder_id]
order.trade(res.trade_id, res.trade_price,
res.trade_amount, res.trade_time)
self.Account.settle()
| [
"[email protected]"
] | |
6366520696258a461f3115d86f78471be03fe8ae | 7c3a2a44536779d711349f38a18c0edd95ff5b1f | /algolia_places/__init__.py | 7b9dcb29453e2ba317b160934479cabb2fbb81ee | [
"MIT"
] | permissive | m-vdb/algolia-places-python | f0c566d5801a9397406cce32bbc8593da85cf769 | 84fcbf93abf35ad4c42ade0415fdafa2674639f7 | refs/heads/master | 2021-07-11T13:38:00.898814 | 2018-08-20T12:54:11 | 2018-08-20T12:54:11 | 145,402,115 | 2 | 1 | MIT | 2020-06-16T07:53:03 | 2018-08-20T10:20:15 | Python | UTF-8 | Python | false | false | 69 | py | """Algolia places module."""
from .client import AlgoliaPlacesClient
| [
"[email protected]"
] | |
922632bd7fd107d2f4b5713afca0a914316f2f55 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/195/48791/submittedfiles/testes.py | 697187f7d52f0e988ed1e5a2cdacc5e64b225503 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | # -*- coding: utf-8 -*-
a=int(input('digite a:'))
b=int(input('digite b:'))
c=int(input('digite c:'))
d=int(input('digite d:'))
if a>b and a>c and a>d:
print(a)
if b>a and b>c and b>d:
print(b)
if c>a and c>b and c>d:
print(c)
if d>a and d>b and d>c:
print(d)
if a<b and a<c and a<d:
print(a)
if b<a and b<c and b<d:
print(b)
if c<a and c<b and c<d:
print(c)
if d<a and d<b and d<c:
print(d) | [
"[email protected]"
] | |
695b0730a411e071970885a7c9a14c7fb5b55754 | 444ef2c07e05cf6b2c85ee33535f228d7c5b384e | /allenact/embodiedai/mapping/mapping_utils/map_builders.py | 1d31f53f59e84ca4263e6e57f69e0ff2ca84cf30 | [
"MIT"
] | permissive | zcczhang/allenact | 4d92d771e31868c3e6909c358787b46d2ff995fa | 4657479e8127393f5996e70649da2e2a7eae7332 | refs/heads/main | 2023-08-21T19:30:19.397165 | 2021-10-06T17:33:58 | 2021-10-06T17:33:58 | 396,886,059 | 2 | 0 | NOASSERTION | 2021-08-16T16:52:59 | 2021-08-16T16:52:58 | null | UTF-8 | Python | false | false | 23,116 | py | # MIT License
#
# Original Copyright (c) 2020 Devendra Chaplot
#
# Modified work Copyright (c) 2021 Allen Institute for Artificial Intelligence
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
from typing import Optional, Sequence, Union, Dict
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from allenact.embodiedai.mapping.mapping_utils.point_cloud_utils import (
depth_frame_to_world_space_xyz,
project_point_cloud_to_map,
)
class BinnedPointCloudMapBuilder(object):
"""Class used to iteratively construct a map of "free space" based on input
depth maps (i.e. pointclouds).
Adapted from https://github.com/devendrachaplot/Neural-SLAM
This class can be used to (iteratively) construct a metric map of free space in an environment as
an agent moves around. After every step the agent takes, you should call the `update` function and
pass the agent's egocentric depth image along with the agent's new position. This depth map will
be converted into a pointcloud, binned along the up/down axis, and then projected
onto a 3-dimensional tensor of shape (HxWxC) whose where HxW represent the ground plane
and where C equals the number of bins the up-down coordinate was binned into. This 3d map counts the
number of points in each bin. Thus a lack of points within a region can be used to infer that
that region is free space.
# Attributes
fov : FOV of the camera used to produce the depth images given when calling `update`.
vision_range_in_map_units : The maximum distance (in number of rows/columns) that will
be updated when calling `update`, points outside of this map vision range are ignored.
map_size_in_cm : Total map size in cm.
resolution_in_cm : Number of cm per row/column in the map.
height_bins : The bins used to bin the up-down coordinate (for us the y-coordinate). For example,
if `height_bins = [0.1, 1]` then
all y-values < 0.1 will be mapped to 0, all y values in [0.1, 1) will be mapped to 1, and
all y-values >= 1 will be mapped to 2.
**Importantly:** these y-values will first be recentered by the `min_xyz` value passed when
calling `reset(...)`.
device : A `torch.device` on which to run computations. If this device is a GPU you can potentially
obtain significant speed-ups.
"""
def __init__(
self,
fov: float,
vision_range_in_cm: int,
map_size_in_cm: int,
resolution_in_cm: int,
height_bins: Sequence[float],
device: torch.device = torch.device("cpu"),
):
assert vision_range_in_cm % resolution_in_cm == 0
self.fov = fov
self.vision_range_in_map_units = vision_range_in_cm // resolution_in_cm
self.map_size_in_cm = map_size_in_cm
self.resolution_in_cm = resolution_in_cm
self.height_bins = height_bins
self.device = device
self.binned_point_cloud_map = np.zeros(
(
self.map_size_in_cm // self.resolution_in_cm,
self.map_size_in_cm // self.resolution_in_cm,
len(self.height_bins) + 1,
),
dtype=np.float32,
)
self.min_xyz: Optional[np.ndarray] = None
def update(
self,
depth_frame: np.ndarray,
camera_xyz: np.ndarray,
camera_rotation: float,
camera_horizon: float,
) -> Dict[str, np.ndarray]:
"""Updates the map with the input depth frame from the agent.
See the `allenact.embodiedai.mapping.mapping_utils.point_cloud_utils.project_point_cloud_to_map`
function for more information input parameter definitions. **We assume that the input
`depth_frame` has depths recorded in meters**.
# Returns
Let `map_size = self.map_size_in_cm // self.resolution_in_cm`. Returns a dictionary with keys-values:
* `"egocentric_update"` - A tensor of shape
`(vision_range_in_map_units)x(vision_range_in_map_units)x(len(self.height_bins) + 1)` corresponding
to the binned pointcloud after having been centered on the agent and rotated so that
points ahead of the agent correspond to larger row indices and points further to the right of the agent
correspond to larger column indices. Note that by "centered" we mean that one can picture
the agent as being positioned at (0, vision_range_in_map_units/2) and facing downward. Each entry in this tensor
is a count equaling the number of points in the pointcloud that, once binned, fell into this
entry. This is likely the output you want to use if you want to build a model to predict free space from an image.
* `"allocentric_update"` - A `(map_size)x(map_size)x(len(self.height_bins) + 1)` corresponding
to `"egocentric_update"` but rotated to the world-space coordinates. This `allocentric_update`
is what is used to update the internally stored representation of the map.
* `"map"` - A `(map_size)x(map_size)x(len(self.height_bins) + 1)` tensor corresponding
to the sum of all `"allocentric_update"` values since the last `reset()`.
```
"""
with torch.no_grad():
assert self.min_xyz is not None, "Please call `reset` before `update`."
camera_xyz = (
torch.from_numpy(camera_xyz - self.min_xyz).float().to(self.device)
)
depth_frame = torch.from_numpy(depth_frame).to(self.device)
depth_frame[
depth_frame
> self.vision_range_in_map_units * self.resolution_in_cm / 100
] = np.NaN
world_space_point_cloud = depth_frame_to_world_space_xyz(
depth_frame=depth_frame,
camera_world_xyz=camera_xyz,
rotation=camera_rotation,
horizon=camera_horizon,
fov=self.fov,
)
world_binned_map_update = project_point_cloud_to_map(
xyz_points=world_space_point_cloud,
bin_axis="y",
bins=self.height_bins,
map_size=self.binned_point_cloud_map.shape[0],
resolution_in_cm=self.resolution_in_cm,
flip_row_col=True,
)
# Center the cloud on the agent
recentered_point_cloud = world_space_point_cloud - (
torch.FloatTensor([1.0, 0.0, 1.0]).to(self.device) * camera_xyz
).reshape((1, 1, 3))
# Rotate the cloud so that positive-z is the direction the agent is looking
theta = (
np.pi * camera_rotation / 180
) # No negative since THOR rotations are already backwards
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
rotation_transform = torch.FloatTensor(
[
[cos_theta, 0, -sin_theta],
[0, 1, 0], # unchanged
[sin_theta, 0, cos_theta],
]
).to(self.device)
rotated_point_cloud = recentered_point_cloud @ rotation_transform.T
xoffset = (self.map_size_in_cm / 100) / 2
agent_centric_point_cloud = rotated_point_cloud + torch.FloatTensor(
[xoffset, 0, 0]
).to(self.device)
allocentric_update_numpy = world_binned_map_update.cpu().numpy()
self.binned_point_cloud_map = (
self.binned_point_cloud_map + allocentric_update_numpy
)
agent_centric_binned_map = project_point_cloud_to_map(
xyz_points=agent_centric_point_cloud,
bin_axis="y",
bins=self.height_bins,
map_size=self.binned_point_cloud_map.shape[0],
resolution_in_cm=self.resolution_in_cm,
flip_row_col=True,
)
vr = self.vision_range_in_map_units
vr_div_2 = self.vision_range_in_map_units // 2
width_div_2 = agent_centric_binned_map.shape[1] // 2
agent_centric_binned_map = agent_centric_binned_map[
:vr, (width_div_2 - vr_div_2) : (width_div_2 + vr_div_2), :
]
return {
"egocentric_update": agent_centric_binned_map.cpu().numpy(),
"allocentric_update": allocentric_update_numpy,
"map": self.binned_point_cloud_map,
}
def reset(self, min_xyz: np.ndarray):
"""Reset the map.
Resets the internally stored map.
# Parameters
min_xyz : An array of size (3,) corresponding to the minimum possible x, y, and z values that will be observed
as a point in a pointcloud when calling `.update(...)`. The (world-space) maps returned by calls to `update`
will have been normalized so the (0,0,:) entry corresponds to these minimum values.
"""
self.min_xyz = min_xyz
self.binned_point_cloud_map = np.zeros_like(self.binned_point_cloud_map)
class ObjectHull2d:
def __init__(
self,
object_id: str,
object_type: str,
hull_points: Union[np.ndarray, Sequence[Sequence[float]]],
):
"""A class used to represent 2d convex hulls of objects when projected
to the ground plane.
# Parameters
object_id : A unique id for the object.
object_type : The type of the object.
hull_points : A Nx2 matrix with `hull_points[:, 0]` being the x coordinates and `hull_points[:, 1]` being
the `z` coordinates (this is using the Unity game engine conventions where the `y` axis is up/down).
"""
self.object_id = object_id
self.object_type = object_type
self.hull_points = (
hull_points
if isinstance(hull_points, np.ndarray)
else np.array(hull_points)
)
class SemanticMapBuilder(object):
"""Class used to iteratively construct a semantic map based on input depth
maps (i.e. pointclouds).
Adapted from https://github.com/devendrachaplot/Neural-SLAM
This class can be used to (iteratively) construct a semantic map of objects in the environment.
This map is similar to that generated by `BinnedPointCloudMapBuilder` (see its documentation for
more information) but the various channels correspond to different object types. Thus
if the `(i,j,k)` entry of a map generated by this function is `True`, this means that an
object of type `k` is present in position `i,j` in the map. In particular, by "present" we mean that,
after projecting the object to the ground plane and taking the convex hull of the resulting
2d object, a non-trivial portion of this convex hull overlaps the `i,j` position.
For attribute information, see the documentation of the `BinnedPointCloudMapBuilder` class. The
only attribute present in this class that is not present in `BinnedPointCloudMapBuilder` is
`ordered_object_types` which corresponds to a list of unique object types where
object type `ordered_object_types[i]` will correspond to the `i`th channel of the map
generated by this class.
"""
def __init__(
self,
fov: float,
vision_range_in_cm: int,
map_size_in_cm: int,
resolution_in_cm: int,
ordered_object_types: Sequence[str],
device: torch.device = torch.device("cpu"),
):
self.fov = fov
self.vision_range_in_map_units = vision_range_in_cm // resolution_in_cm
self.map_size_in_cm = map_size_in_cm
self.resolution_in_cm = resolution_in_cm
self.ordered_object_types = tuple(ordered_object_types)
self.device = device
self.object_type_to_index = {
ot: i for i, ot in enumerate(self.ordered_object_types)
}
self.ground_truth_semantic_map = np.zeros(
(
self.map_size_in_cm // self.resolution_in_cm,
self.map_size_in_cm // self.resolution_in_cm,
len(self.ordered_object_types),
),
dtype=np.uint8,
)
self.explored_mask = np.zeros(
(
self.map_size_in_cm // self.resolution_in_cm,
self.map_size_in_cm // self.resolution_in_cm,
1,
),
dtype=bool,
)
self.min_xyz: Optional[np.ndarray] = None
@staticmethod
def randomly_color_semantic_map(
map: Union[np.ndarray, torch.Tensor], threshold: float = 0.5, seed: int = 1
) -> np.ndarray:
if not isinstance(map, np.ndarray):
map = np.array(map)
rnd = random.Random(seed)
semantic_int_mat = (
(map >= threshold)
* np.array(list(range(1, map.shape[-1] + 1))).reshape((1, 1, -1))
).max(-1)
# noinspection PyTypeChecker
return np.uint8(
np.array(
[(0, 0, 0)]
+ [
tuple(rnd.randint(0, 256) for _ in range(3))
for _ in range(map.shape[-1])
]
)[semantic_int_mat]
)
def _xzs_to_colrows(self, xzs: np.ndarray):
height, width, _ = self.ground_truth_semantic_map.shape
return np.clip(
np.int32(
(
(100 / self.resolution_in_cm)
* (xzs - np.array([[self.min_xyz[0], self.min_xyz[2]]]))
)
),
a_min=0,
a_max=np.array(
[width - 1, height - 1]
), # width then height as we're returns cols then rows
)
def build_ground_truth_map(self, object_hulls: Sequence[ObjectHull2d]):
self.ground_truth_semantic_map.fill(0)
height, width, _ = self.ground_truth_semantic_map.shape
for object_hull in object_hulls:
ot = object_hull.object_type
if ot in self.object_type_to_index:
ind = self.object_type_to_index[ot]
self.ground_truth_semantic_map[
:, :, ind : (ind + 1)
] = cv2.fillConvexPoly(
img=np.array(
self.ground_truth_semantic_map[:, :, ind : (ind + 1)],
dtype=np.uint8,
),
points=self._xzs_to_colrows(np.array(object_hull.hull_points)),
color=255,
)
def update(
self,
depth_frame: np.ndarray,
camera_xyz: np.ndarray,
camera_rotation: float,
camera_horizon: float,
) -> Dict[str, np.ndarray]:
"""Updates the map with the input depth frame from the agent.
See the documentation for `BinnedPointCloudMapBuilder.update`,
the inputs and outputs are similar except that channels are used
to represent the presence/absence of objects of given types.
Unlike `BinnedPointCloudMapBuilder.update`, this function also
returns two masks with keys `"egocentric_mask"` and `"mask"`
that can be used to determine what portions of the map have been
observed by the agent so far in the egocentric and world-space
reference frames respectively.
"""
with torch.no_grad():
assert self.min_xyz is not None
camera_xyz = torch.from_numpy(camera_xyz - self.min_xyz).to(self.device)
map_size = self.ground_truth_semantic_map.shape[0]
depth_frame = torch.from_numpy(depth_frame).to(self.device)
depth_frame[
depth_frame
> self.vision_range_in_map_units * self.resolution_in_cm / 100
] = np.NaN
world_space_point_cloud = depth_frame_to_world_space_xyz(
depth_frame=depth_frame,
camera_world_xyz=camera_xyz,
rotation=camera_rotation,
horizon=camera_horizon,
fov=self.fov,
)
world_newly_explored = (
project_point_cloud_to_map(
xyz_points=world_space_point_cloud,
bin_axis="y",
bins=[],
map_size=map_size,
resolution_in_cm=self.resolution_in_cm,
flip_row_col=True,
)
> 0.001
)
world_update_and_mask = torch.cat(
(
torch.logical_and(
torch.from_numpy(self.ground_truth_semantic_map).to(
self.device
),
world_newly_explored,
),
world_newly_explored,
),
dim=-1,
).float()
world_update_and_mask_for_sample = world_update_and_mask.unsqueeze(
0
).permute(0, 3, 1, 2)
# We now use grid sampling to rotate world_update_for_sample into the egocentric coordinate
# frame of the agent so that the agent's forward direction is downwards in the tensor
# (and it's right side is to the right in the image, this means that right/left
# when taking the perspective of the agent in the image). This convention aligns with
# what's expected by grid_sample where +x corresponds to +cols and +z corresponds to +rows.
# Here also the rows/cols have been normalized so that the center of the image is at (0,0)
# and the bottom right is at (1,1).
# Mentally you can think of the output from the F.affine_grid function as you wanting
# rotating/translating an axis-aligned square on the image-to-be-sampled and then
# copying whatever is in this square to a new image. Note that the translation always
# happens in the global reference frame after the rotation. We'll start by rotating
# the square so that the the agent's z direction is downwards in the image.
# Since the global axis of the map and the grid sampling are aligned, this requires
# rotating the square by the rotation of the agent. As rotation is negative the usual
# standard in THOR, we need to negate the rotation of the agent.
theta = -np.pi * camera_rotation / 180
# Here form the rotation matrix
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
rot_mat = torch.FloatTensor(
[[cos_theta, -sin_theta], [sin_theta, cos_theta]]
).to(self.device)
# Now we need to figure out the translation. For an intuitive understanding, we break this
# translation into two different "offsets". The first offset centers the square on the
# agent's current location:
scaler = 2 * (100 / (self.resolution_in_cm * map_size))
offset_to_center_the_agent = (
scaler
* torch.FloatTensor([camera_xyz[0], camera_xyz[2]])
.unsqueeze(-1)
.to(self.device)
- 1
)
# The second offset moves the square in the direction of the agent's z direction
# so that the output image will have the agent's view starting directly at the
# top of the image.
offset_to_top_of_image = rot_mat @ torch.FloatTensor([0, 1.0]).unsqueeze(
1
).to(self.device)
rotation_and_translate_mat = torch.cat(
(rot_mat, offset_to_top_of_image + offset_to_center_the_agent,), dim=1,
)
ego_update_and_mask = F.grid_sample(
world_update_and_mask_for_sample.to(self.device),
F.affine_grid(
rotation_and_translate_mat.to(self.device).unsqueeze(0),
world_update_and_mask_for_sample.shape,
align_corners=False,
),
align_corners=False,
)
# All that's left now is to crop out the portion of the transformed tensor that we actually
# care about (i.e. the portion corresponding to the agent's `self.vision_range_in_map_units`.
vr = self.vision_range_in_map_units
half_vr = vr // 2
center = self.map_size_in_cm // (2 * self.resolution_in_cm)
cropped = ego_update_and_mask[
:, :, :vr, (center - half_vr) : (center + half_vr)
]
np.logical_or(
self.explored_mask,
world_newly_explored.cpu().numpy(),
out=self.explored_mask,
)
return {
"egocentric_update": cropped[0, :-1].permute(1, 2, 0).cpu().numpy(),
"egocentric_mask": (cropped[0, -1:].view(vr, vr, 1) > 0.001)
.cpu()
.numpy(),
"explored_mask": np.array(self.explored_mask),
"map": np.logical_and(
self.explored_mask, (self.ground_truth_semantic_map > 0)
),
}
def reset(self, min_xyz: np.ndarray, object_hulls: Sequence[ObjectHull2d]):
"""Reset the map.
Resets the internally stored map.
# Parameters
min_xyz : An array of size (3,) corresponding to the minimum possible x, y, and z values that will be observed
as a point in a pointcloud when calling `.update(...)`. The (world-space) maps returned by calls to `update`
will have been normalized so the (0,0,:) entry corresponds to these minimum values.
object_hulls : The object hulls corresponding to objects in the scene. These will be used to
construct the map.
"""
self.min_xyz = min_xyz
self.build_ground_truth_map(object_hulls=object_hulls)
| [
"[email protected]"
] | |
d80dd720858dc042a2f195293139c38d8a080e38 | 50de54517ef5e157b43598e412c477fd66890a3e | /Assignment 05/Problem 04.py | b0883029d93e9bfb1ca023132749cea0e5ea3943 | [] | no_license | Shihabsarker93/BRACU-CSE111 | f530be247bebaaee9cc5e85948dc070adae0c6ae | 17c95c76f84abffe9d9bdcb5861fbacbc510b5a6 | refs/heads/main | 2023-08-13T15:33:57.331850 | 2021-10-07T10:56:09 | 2021-10-07T10:56:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | class Color:
def __init__(self, color):
self.clr = color
def __add__(self, other):
self.clr = self.clr + other.clr
if self.clr == "redyellow" or self.clr == "yellowred":
self.clr = "Orange"
elif self.clr == "redblue" or self.clr == "bluered":
self.clr = "Violet"
elif self.clr == "yellowblue" or self.clr == "blueyellow":
self.clr = "Green"
return Color(self.clr)
C1 = Color(input("First Color: ").lower())
C2 = Color(input("Second Color: ").lower())
C3 = C1 + C2
print("Color formed:", C3.clr)
| [
"[email protected]"
] | |
51856a03ef40020ac8c9e0586c08bcf06f66111d | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part003471.py | 2cabb575ba5e4c06ee0f115ab7c8fc7bab070e46 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,927 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher16359(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.4.1.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher16359._instance is None:
CommutativeMatcher16359._instance = CommutativeMatcher16359()
return CommutativeMatcher16359._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 16358
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.4.1.1.0', S(1))
except ValueError:
pass
else:
pass
# State 16360
if len(subjects) >= 1:
tmp2 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.3.1.1.0', tmp2)
except ValueError:
pass
else:
pass
# State 16361
if len(subjects) == 0:
pass
# 0: f*x
yield 0, subst2
subjects.appendleft(tmp2)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp4 = subjects.popleft()
associative1 = tmp4
associative_type1 = type(tmp4)
subjects5 = deque(tmp4._args)
matcher = CommutativeMatcher16363.get()
tmp6 = subjects5
subjects5 = []
for s in tmp6:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp6, subst0):
pass
if pattern_index == 0:
pass
# State 16364
if len(subjects) == 0:
pass
# 0: f*x
yield 0, subst1
subjects.appendleft(tmp4)
return
yield
from .generated_part003472 import *
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from matchpy.utils import VariableWithCount
from multiset import Multiset | [
"[email protected]"
] | |
1119b7cb0f9c85f7fa3a4421a3c00101eb810077 | 1595b644191c9c18a5503379703347a853b63348 | /investpy/stocks.py | bf4c06d8a6d8d0ebe499d46de5747c6612ac4bfb | [
"MIT"
] | permissive | ben-haim/investpy | d26c05c3e957d3ba623f408076746edbf5a8107b | 7ace4ac7693f505c199074de3333f56e6b89cfef | refs/heads/master | 2022-05-30T00:43:00.473082 | 2019-11-20T15:45:37 | 2019-11-20T15:45:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,164 | py | #!/usr/bin/python3
# Copyright 2018-2019 Alvaro Bartolome @ alvarob96 in GitHub
# See LICENSE for details.
from datetime import datetime, date
import json
from random import randint
import pandas as pd
import pkg_resources
import requests
import unidecode
from lxml.html import fromstring
from investpy.utils.user_agent import get_random
from investpy.utils.data import Data
from investpy.data.stocks_data import stocks_as_df, stocks_as_list, stocks_as_dict
from investpy.data.stocks_data import stock_countries_as_list
def get_stocks(country=None):
"""
This function retrieves all the stock data stored in `stocks.csv` file, which previously was
retrieved from Investing.com. Since the resulting object is a matrix of data, the stock data is properly
structured in rows and columns, where columns are the stock data attribute names. Additionally, country
filtering can be specified, which will make this function return not all the stored stock data, but just
the stock data of the stocks from the introduced country.
Args:
country (:obj:`str`, optional): name of the country to retrieve all its available stocks from.
Returns:
:obj:`pandas.DataFrame` - stocks_df:
The resulting :obj:`pandas.DataFrame` contains all the stock data from the introduced country if specified,
or from every country if None was specified, as indexed in Investing.com from the information previously
retrieved by investpy and stored on a csv file.
So on, the resulting :obj:`pandas.DataFrame` will look like::
country | name | full name | isin | currency | symbol
--------|------|-----------|------|----------|--------
xxxxxxx | xxxx | xxxxxxxxx | xxxx | xxxxxxxx | xxxxxx
Raises:
ValueError: raised whenever any of the introduced arguments is not valid.
FileNotFoundError: raised if stocks file was not found.
IOError: raised when stocks file is missing or empty.
"""
return stocks_as_df(country)
def get_stocks_list(country=None):
"""
This function retrieves all the stock symbols stored in `stocks.csv` file, which contains all the
data from the stocks as previously retrieved from Investing.com. So on, this function will just return
the stock symbols which will be one of the input parameters when it comes to stock data retrieval functions
from investpy. Additionally, note that the country filtering can be applied, which is really useful since
this function just returns the symbols and in stock data retrieval functions both the symbol and the country
must be specified and they must match.
Args:
country (:obj:`str`, optional): name of the country to retrieve all its available stocks from.
Returns:
:obj:`list` - stocks_list:
The resulting :obj:`list` contains the all the stock symbols from the introduced country if specified,
or from every country if None was specified, as indexed in Investing.com from the information previously
retrieved by investpy and stored on a csv file.
In case the information was successfully retrieved, the :obj:`list` of stock symbols will look like::
stocks_list = ['TS', 'APBR', 'GGAL', 'TXAR', 'PAMP', ...]
Raises:
ValueError: raised whenever any of the introduced arguments is not valid.
FileNotFoundError: raised if stocks file was not found.
IOError: raised when stocks file is missing or empty.
"""
return stocks_as_list(country)
def get_stocks_dict(country=None, columns=None, as_json=False):
"""
This function retrieves all the stock information stored in the `stocks.csv` file and formats it as a
Python dictionary which contains the same information as the file, but every row is a :obj:`dict` and
all of them are contained in a :obj:`list`. Note that the dictionary structure is the same one as the
JSON structure. Some optional paramaters can be specified such as the country, columns or as_json, which
are a filtering by country so not to return all the stocks but just the ones from the introduced country,
the column names that want to be retrieved in case of needing just some columns to avoid unnecessary information
load, and whether the information wants to be returned as a JSON object or as a dictionary; respectively.
Args:
country (:obj:`str`, optional): name of the country to retrieve all its available stocks from.
columns (:obj:`list`, optional):column names of the stock data to retrieve, can be: <country, name, full_name, isin, currency, symbol>
as_json (:obj:`bool`, optional): if True the returned data will be a :obj:`json` object, if False, a :obj:`list` of :obj:`dict`.
Returns:
:obj:`list` of :obj:`dict` OR :obj:`json` - stocks_dict:
The resulting :obj:`list` of :obj:`dict` contains the retrieved data from every stock as indexed in Investing.com from
the information previously retrieved by investpy and stored on a csv file.
In case the information was successfully retrieved, the :obj:`list` of :obj:`dict` will look like::
stocks_dict = {
'country': country,
'name': name,
'full_name': full_name,
'tag': tag,
'isin': isin,
'id': id,
'currency': currency,
'symbol': symbol,
}
Raises:
ValueError: raised whenever any of the introduced arguments is not valid.
FileNotFoundError: raised if stocks file was not found.
IOError: raised when stocks file is missing or empty.
"""
return stocks_as_dict(country=country, columns=columns, as_json=as_json)
def get_stock_countries():
"""
This function returns a listing with all the available countries from where stocks can be retrieved, so to
let the user know which of them are available, since the parameter country is mandatory in every stock retrieval
function. Also, not just the available countries, but the required name is provided since Investing.com has a
certain country name standard and countries should be specified the same way they are in Investing.com.
Returns:
:obj:`list` - countries:
The resulting :obj:`list` contains all the available countries with stocks as indexed in Investing.com
Raises:
FileNotFoundError: raised if stock countries file was not found.
IOError: raised when stock countries file is missing or empty.
"""
return stock_countries_as_list()
def get_stock_recent_data(stock, country, as_json=False, order='ascending', interval='Daily'):
"""
This function retrieves recent historical data from the introduced stock from Investing.com. So on, the recent data
of the introduced stock from the specified country will be retrieved and returned as a :obj:`pandas.DataFrame` if
the parameters are valid and the request to Investing.com succeeds. Note that additionally some optional parameters
can be specified: as_json and order, which let the user decide if the data is going to be returned as a
:obj:`json` or not, and if the historical data is going to be ordered ascending or descending (where the index is the
date), respectively.
Args:
stock (:obj:`str`): symbol of the stock to retrieve recent historical data from.
country (:obj:`str`): name of the country from where the stock is.
as_json (:obj:`bool`, optional):
to determine the format of the output data, either a :obj:`pandas.DataFrame` if False and a :obj:`json` if True.
order (:obj:`str`, optional): to define the order of the retrieved data which can either be ascending or descending.
interval (:obj:`str`, optional):
value to define the historical data interval to retrieve, by default `Daily`, but it can also be `Weekly` or `Monthly`.
Returns:
:obj:`pandas.DataFrame` or :obj:`json`:
The function can return either a :obj:`pandas.DataFrame` or a :obj:`json` object, containing the retrieved
recent data of the specified stock from the specified country. So on, the resulting dataframe contains the
open, high, low, close and volume values for the selected stock on market days and the currency in which those
values are presented.
The resulting recent data, in case that the default parameters were applied, will look like::
Date || Open | High | Low | Close | Volume | Currency
-----||------|------|-----|-------|--------|----------
xxxx || xxxx | xxxx | xxx | xxxxx | xxxxxx | xxxxxxxx
but in case that as_json parameter was defined as True, then the output will be::
{
name: name,
recent: [
{
date: 'dd/mm/yyyy',
open: x,
high: x,
low: x,
close: x,
volume: x,
currency: x
},
...
]
}
Raises:
ValueError: raised whenever any of the introduced arguments is not valid or errored.
IOError: raised if stocks object/file was not found or unable to retrieve.
RuntimeError: raised if the introduced stock/country was not found or did not match any of the existing ones.
ConnectionError: raised if connection to Investing.com could not be established.
IndexError: raised if stock recent data was unavailable or not found in Investing.com.
Examples:
>>> investpy.get_stock_recent_data(stock='bbva', country='spain')
Open High Low Close Volume Currency
Date
2019-08-13 4.263 4.395 4.230 4.353 27250000 EUR
2019-08-14 4.322 4.325 4.215 4.244 36890000 EUR
2019-08-15 4.281 4.298 4.187 4.234 21340000 EUR
2019-08-16 4.234 4.375 4.208 4.365 46080000 EUR
2019-08-19 4.396 4.425 4.269 4.269 18950000 EUR
"""
if not stock:
raise ValueError("ERR#0013: stock parameter is mandatory and must be a valid stock name.")
if not isinstance(stock, str):
raise ValueError("ERR#0027: stock argument needs to be a str.")
if country is None:
raise ValueError("ERR#0039: country can not be None, it should be a str.")
if country is not None and not isinstance(country, str):
raise ValueError("ERR#0025: specified country value not valid.")
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
if order not in ['ascending', 'asc', 'descending', 'desc']:
raise ValueError("ERR#0003: order argument can just be ascending (asc) or descending (desc), str type.")
if not interval:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
if not isinstance(interval, str):
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
if interval not in ['Daily', 'Weekly', 'Monthly']:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'stocks', 'stocks.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
stocks = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0056: stocks file not found or errored.")
if stocks is None:
raise IOError("ERR#0001: stocks object not found or unable to retrieve.")
if unidecode.unidecode(country.lower()) not in get_stock_countries():
raise RuntimeError("ERR#0034: country " + country.lower() + " not found, check if it is correct.")
stocks = stocks[stocks['country'] == unidecode.unidecode(country.lower())]
stock = stock.strip()
stock = stock.lower()
if unidecode.unidecode(stock) not in [unidecode.unidecode(value.lower()) for value in stocks['symbol'].tolist()]:
raise RuntimeError("ERR#0018: stock " + stock + " not found, check if it is correct.")
symbol = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'symbol']
id_ = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'id']
name = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'name']
stock_currency = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'currency']
header = symbol + ' Historical Data'
params = {
"curr_id": id_,
"smlID": str(randint(1000000, 99999999)),
"header": header,
"interval_sec": interval,
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data"
}
head = {
"User-Agent": get_random(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
url = "https://www.investing.com/instruments/HistoricalDataAjax"
req = requests.post(url, headers=head, data=params)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
root_ = fromstring(req.text)
path_ = root_.xpath(".//table[@id='curr_table']/tbody/tr")
result = list()
if path_:
for elements_ in path_:
if elements_.xpath(".//td")[0].text_content() == 'No results found':
raise IndexError("ERR#0007: stock information unavailable or not found.")
info = []
for nested_ in elements_.xpath(".//td"):
info.append(nested_.get('data-real-value'))
stock_date = datetime.fromtimestamp(int(info[0]))
stock_date = date(stock_date.year, stock_date.month, stock_date.day)
stock_close = float(info[1].replace(',', ''))
stock_open = float(info[2].replace(',', ''))
stock_high = float(info[3].replace(',', ''))
stock_low = float(info[4].replace(',', ''))
stock_volume = 0
if info[5].__contains__('K'):
stock_volume = int(float(info[5].replace('K', '').replace(',', '')) * 1e3)
elif info[5].__contains__('M'):
stock_volume = int(float(info[5].replace('M', '').replace(',', '')) * 1e6)
elif info[5].__contains__('B'):
stock_volume = int(float(info[5].replace('B', '').replace(',', '')) * 1e9)
result.insert(len(result),
Data(stock_date, stock_open, stock_high, stock_low,
stock_close, stock_volume, stock_currency))
if order in ['ascending', 'asc']:
result = result[::-1]
elif order in ['descending', 'desc']:
result = result
if as_json is True:
json_ = {
'name': name,
'recent':
[value.stock_as_json() for value in result]
}
return json.dumps(json_, sort_keys=False)
elif as_json is False:
df = pd.DataFrame.from_records([value.stock_to_dict() for value in result])
df.set_index('Date', inplace=True)
return df
else:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
def get_stock_historical_data(stock, country, from_date, to_date, as_json=False, order='ascending', interval='Daily'):
"""
This function retrieves historical data from the introduced stock from Investing.com. So on, the historical data
of the introduced stock from the specified country in the specified data range will be retrieved and returned as
a :obj:`pandas.DataFrame` if the parameters are valid and the request to Investing.com succeeds. Note that additionally
some optional parameters can be specified: as_json and order, which let the user decide if the data is going to
be returned as a :obj:`json` or not, and if the historical data is going to be ordered ascending or descending (where the
index is the date), respectively.
Args:
stock (:obj:`str`): symbol of the stock to retrieve historical data from.
country (:obj:`str`): name of the country from where the stock is.
from_date (:obj:`str`): date formatted as `dd/mm/yyyy`, since when data is going to be retrieved.
to_date (:obj:`str`): date formatted as `dd/mm/yyyy`, until when data is going to be retrieved.
as_json (:obj:`bool`, optional):
to determine the format of the output data, either a :obj:`pandas.DataFrame` if False and a :obj:`json` if True.
order (:obj:`str`, optional): to define the order of the retrieved data which can either be ascending or descending.
interval (:obj:`str`, optional):
value to define the historical data interval to retrieve, by default `Daily`, but it can also be `Weekly` or `Monthly`.
Returns:
:obj:`pandas.DataFrame` or :obj:`json`:
The function can return either a :obj:`pandas.DataFrame` or a :obj:`json` object, containing the retrieved
historical data of the specified stock from the specified country. So on, the resulting dataframe contains the
open, high, low, close and volume values for the selected stock on market days and the currency in which those
values are presented.
The returned data is case we use default arguments will look like::
Date || Open | High | Low | Close | Volume | Currency
-----||------|------|-----|-------|--------|----------
xxxx || xxxx | xxxx | xxx | xxxxx | xxxxxx | xxxxxxxx
but if we define `as_json=True`, then the output will be::
{
name: name,
historical: [
{
date: 'dd/mm/yyyy',
open: x,
high: x,
low: x,
close: x,
volume: x,
currency: x
},
...
]
}
Raises:
ValueError: raised whenever any of the introduced arguments is not valid or errored.
IOError: raised if stocks object/file was not found or unable to retrieve.
RuntimeError: raised if the introduced stock/country was not found or did not match any of the existing ones.
ConnectionError: raised if connection to Investing.com could not be established.
IndexError: raised if stock historical data was unavailable or not found in Investing.com.
Examples:
>>> investpy.get_stock_historical_data(stock='bbva', country='spain', from_date='01/01/2010', to_date='01/01/2019')
Open High Low Close Volume Currency
Date
2010-01-04 12.73 12.96 12.73 12.96 0 EUR
2010-01-05 13.00 13.11 12.97 13.09 0 EUR
2010-01-06 13.03 13.17 13.02 13.12 0 EUR
2010-01-07 13.02 13.11 12.93 13.05 0 EUR
2010-01-08 13.12 13.22 13.04 13.18 0 EUR
"""
if not stock:
raise ValueError("ERR#0013: stock parameter is mandatory and must be a valid stock name.")
if not isinstance(stock, str):
raise ValueError("ERR#0027: stock argument needs to be a str.")
if country is None:
raise ValueError("ERR#0039: country can not be None, it should be a str.")
if country is not None and not isinstance(country, str):
raise ValueError("ERR#0025: specified country value not valid.")
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
if order not in ['ascending', 'asc', 'descending', 'desc']:
raise ValueError("ERR#0003: order argument can just be ascending (asc) or descending (desc), str type.")
if not interval:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
if not isinstance(interval, str):
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
if interval not in ['Daily', 'Weekly', 'Monthly']:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
try:
datetime.strptime(from_date, '%d/%m/%Y')
except ValueError:
raise ValueError("ERR#0011: incorrect from_date date format, it should be 'dd/mm/yyyy'.")
try:
datetime.strptime(to_date, '%d/%m/%Y')
except ValueError:
raise ValueError("ERR#0012: incorrect to_date format, it should be 'dd/mm/yyyy'.")
start_date = datetime.strptime(from_date, '%d/%m/%Y')
end_date = datetime.strptime(to_date, '%d/%m/%Y')
if start_date >= end_date:
raise ValueError("ERR#0032: to_date should be greater than from_date, both formatted as 'dd/mm/yyyy'.")
date_interval = {
'intervals': [],
}
flag = True
while flag is True:
diff = end_date.year - start_date.year
if diff > 20:
obj = {
'start': start_date.strftime('%m/%d/%Y'),
'end': start_date.replace(year=start_date.year + 20).strftime('%m/%d/%Y'),
}
date_interval['intervals'].append(obj)
start_date = start_date.replace(year=start_date.year + 20)
else:
obj = {
'start': start_date.strftime('%m/%d/%Y'),
'end': end_date.strftime('%m/%d/%Y'),
}
date_interval['intervals'].append(obj)
flag = False
interval_limit = len(date_interval['intervals'])
interval_counter = 0
data_flag = False
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'stocks', 'stocks.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
stocks = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0056: stocks file not found or errored.")
if stocks is None:
raise IOError("ERR#0001: stocks object not found or unable to retrieve.")
if unidecode.unidecode(country.lower()) not in get_stock_countries():
raise RuntimeError("ERR#0034: country " + country.lower() + " not found, check if it is correct.")
stocks = stocks[stocks['country'] == unidecode.unidecode(country.lower())]
stock = stock.strip()
stock = stock.lower()
if unidecode.unidecode(stock) not in [unidecode.unidecode(value.lower()) for value in stocks['symbol'].tolist()]:
raise RuntimeError("ERR#0018: stock " + stock + " not found, check if it is correct.")
symbol = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'symbol']
id_ = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'id']
name = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'name']
stock_currency = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'currency']
final = list()
header = symbol + ' Historical Data'
for index in range(len(date_interval['intervals'])):
interval_counter += 1
params = {
"curr_id": id_,
"smlID": str(randint(1000000, 99999999)),
"header": header,
"st_date": date_interval['intervals'][index]['start'],
"end_date": date_interval['intervals'][index]['end'],
"interval_sec": interval,
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data"
}
head = {
"User-Agent": get_random(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
url = "https://www.investing.com/instruments/HistoricalDataAjax"
req = requests.post(url, headers=head, data=params)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
if not req.text:
continue
root_ = fromstring(req.text)
path_ = root_.xpath(".//table[@id='curr_table']/tbody/tr")
result = list()
if path_:
for elements_ in path_:
if elements_.xpath(".//td")[0].text_content() == 'No results found':
if interval_counter < interval_limit:
data_flag = False
else:
raise IndexError("ERR#0007: stock information unavailable or not found.")
else:
data_flag = True
info = []
for nested_ in elements_.xpath(".//td"):
info.append(nested_.get('data-real-value'))
if data_flag is True:
stock_date = datetime.fromtimestamp(int(info[0]))
stock_date = date(stock_date.year, stock_date.month, stock_date.day)
stock_close = float(info[1].replace(',', ''))
stock_open = float(info[2].replace(',', ''))
stock_high = float(info[3].replace(',', ''))
stock_low = float(info[4].replace(',', ''))
stock_volume = 0
if info[5].__contains__('K'):
stock_volume = int(float(info[5].replace('K', '').replace(',', '')) * 1e3)
elif info[5].__contains__('M'):
stock_volume = int(float(info[5].replace('M', '').replace(',', '')) * 1e6)
elif info[5].__contains__('B'):
stock_volume = int(float(info[5].replace('B', '').replace(',', '')) * 1e9)
result.insert(len(result),
Data(stock_date, stock_open, stock_high, stock_low,
stock_close, stock_volume, stock_currency))
if data_flag is True:
if order in ['ascending', 'asc']:
result = result[::-1]
elif order in ['descending', 'desc']:
result = result
if as_json is True:
json_ = {
'name': name,
'historical':
[value.stock_as_json() for value in result]
}
final.append(json_)
elif as_json is False:
df = pd.DataFrame.from_records([value.stock_to_dict() for value in result])
df.set_index('Date', inplace=True)
final.append(df)
else:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
if as_json is True:
return json.dumps(final[0], sort_keys=False)
elif as_json is False:
return pd.concat(final)
def get_stock_company_profile(stock, country='spain', language='english'):
"""
This function retrieves the company profile of a stock company in the specified language. This
function is really useful if NLP techniques want to be applied to stocks, since the company profile
is a short description of what the company does and since it is written by the company, it can give
the user an overview on what does the company do. The company profile can be retrieved either in english
or in spanish, the only thing that changes is the source from where the data is retrieved, but the
resulting object will be the same. Note that this functionalliy as described in the docs is just supported
for spanish stocks currently, so on, if any other stock from any other country is introduced as parameter,
the function will raise an exception.
Args:
stock (:obj:`str`): symbol of the stock to retrieve its company profile from.
country (:obj:`str`): name of the country from where the stock is.
language (:obj:`str`, optional): language in which the company profile is going to be retrieved, can either be english or spanish.
Returns:
:obj:`dict` - company_profile:
The resulting :obj:`dict` contains the retrieved company profile from the selected source depending
on the specified language in the function parameters, which can be either Investing.com (english)
or Bolsa de Madrid (spanish); and the URL from where it was retrieved, so to have both the source
and the description of the company_profile.
So the resulting :obj:`dict` should look like::
company_profile = {
url: 'https://www.investing.com/equities/bbva-company-profile',
desc: 'Banco Bilbao Vizcaya Argentaria, S.A. (BBVA) is a ...'
}
Raises:
ValueError: raised whenever any of the introduced arguments is not valid or errored.
FileNotFound: raised if the `stocks.csv` file was not found or unable to retrieve.
IOError: raised if stocks object/file was not found or unable to retrieve.
RuntimeError: raised if the introduced stock/country was not found or did not match any of the existing ones.
ConnectionError: raised if connection to Investing.com could not be established.
Examples:
>>> investpy.get_stock_company_profile(stock='bbva', country='spain', language='english')
company_profile = {
url: 'https://www.investing.com/equities/bbva-company-profile',
desc: 'Banco Bilbao Vizcaya Argentaria, S.A. (BBVA) is a ...'
}
"""
available_sources = {
'english': 'Investing',
'en': 'Investing',
'spanish': 'Bolsa de Madrid',
'es': 'Bolsa de Madrid',
}
if not stock:
raise ValueError("ERR#0013: stock parameter is mandatory and must be a valid stock name.")
if not isinstance(stock, str):
raise ValueError("ERR#0027: stock argument needs to be a str.")
if country is None:
raise ValueError("ERR#0039: country can not be None, it should be a str.")
if country is not None and not isinstance(country, str):
raise ValueError("ERR#0025: specified country value not valid.")
if language.lower() not in available_sources.keys():
raise ValueError(
"ERR#0014: the specified language is not valid, it can just be either spanish (es) or english (en).")
if unidecode.unidecode(country.lower()) not in ['spain']:
raise RuntimeError("ERR#0034: country " + country.lower() + " not found, check if it is correct.")
selected_source = available_sources[language.lower()]
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'stocks', 'stocks.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
stocks = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0056: stocks file not found or errored.")
if stocks is None:
raise IOError("ERR#0001: stocks object not found or unable to retrieve.")
stocks = stocks[stocks['country'] == unidecode.unidecode(country.lower())]
stock = stock.strip()
if unidecode.unidecode(stock.lower()) not in [unidecode.unidecode(value.lower()) for value in
stocks['symbol'].tolist()]:
raise RuntimeError("ERR#0018: stock " + stock.lower() + " not found, check if it is correct.")
company_profile = {
'url': None,
'desc': None
}
if selected_source == 'Bolsa de Madrid':
isin = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'isin']
url = "http://www.bolsamadrid.es/esp/aspx/Empresas/FichaValor.aspx?ISIN=" + isin
company_profile['url'] = url
head = {
"User-Agent": get_random(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
req = requests.get(url, headers=head)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
root_ = fromstring(req.text)
path_ = root_.xpath(".//td[contains(@class, 'Perfil')]/p")
if path_:
text = list()
for element_ in path_:
if not element_.xpath(".//a"):
text.append(element_.text_content())
text = ''.join(text)
company_profile['desc'] = ' '.join(text.replace('\n', ' ').replace('\xa0', ' ').split())
return company_profile
else:
return company_profile
elif selected_source == 'Investing':
tag = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'tag']
url = "https://www.investing.com/equities/" + tag + "-company-profile"
company_profile['url'] = url
head = {
"User-Agent": get_random(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
req = requests.get(url, headers=head)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
root_ = fromstring(req.text)
path_ = root_.xpath(".//*[@id=\"profile-fullStory-showhide\"]")
if path_:
company_profile['desc'] = str(path_[0].text_content())
return company_profile
else:
return company_profile
def get_stock_dividends(stock, country):
"""
This function retrieves the stock dividends from the introduced stocks, which are token rewards paid to
the shareholders for their investment in a company's stock/equity. Dividends data include date of the
dividend, dividend value, type, payment date and yield. This information is really useful when it comes
to creating portfolios.
Args:
stock (:obj:`str`): symbol of the stock to retrieve its dividends from.
country (:obj:`country`): name of the country from where the stock is from.
Returns:
:obj:`pandas.DataFrame` - stock_dividends:
Returns a :obj:`pandas.DataFrame` containing the retrieved information of stock dividends for every stock
symbol introduced as parameter.
So on, the resulting :obj:`pandas.DataFrame` will look like::
Date Dividend Type Payment Date Yield
0 2019-10-11 0.2600 trailing_twelve_months 2019-10-15 5,67%
1 2019-04-08 0.2600 trailing_twelve_months 2019-04-10 5,53%
2 2018-06-11 0.3839 trailing_twelve_months 2018-06-13 3,96%
3 2018-04-06 0.2400 trailing_twelve_months 2018-04-10 4,41%
4 2017-10-06 0.3786 trailing_twelve_months 2017-10-10 4,45%
"""
if not stock:
raise ValueError("ERR#0013: stock parameter is mandatory and must be a valid stock symbol.")
if not isinstance(stock, str):
raise ValueError("ERR#0027: stock argument needs to be a str.")
if country is None:
raise ValueError("ERR#0039: country can not be None, it should be a str.")
if country is not None and not isinstance(country, str):
raise ValueError("ERR#0025: specified country value not valid.")
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'stocks', 'stocks.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
stocks = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0056: stocks file not found or errored.")
if stocks is None:
raise IOError("ERR#0001: stocks object not found or unable to retrieve.")
if unidecode.unidecode(country.lower()) not in get_stock_countries():
raise RuntimeError("ERR#0034: country " + country.lower() + " not found, check if it is correct.")
stocks = stocks[stocks['country'].str.lower() == unidecode.unidecode(country.lower())]
stock = stock.strip()
stock = stock.lower()
if unidecode.unidecode(stock) not in [unidecode.unidecode(value.lower()) for value in stocks['symbol'].tolist()]:
raise RuntimeError("ERR#0018: stock " + stock + " not found, check if it is correct.")
tag_ = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'tag']
headers = {
"User-Agent": get_random(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
url = 'https://es.investing.com/equities/' + str(tag_) + '-dividends'
req = requests.get(url=url, headers=headers)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
root_ = fromstring(req.text)
path_ = root_.xpath(".//table[contains(@id, 'dividendsHistoryData')]")
if path_:
more_results_id = path_[0].get('id').replace('dividendsHistoryData', '')
path_ = root_.xpath(".//table[@id='dividendsHistoryData" + str(more_results_id) + "']/tbody/tr")
objs = list()
type_values = {
'1': 'monthly',
'2': 'quarterly',
'3': 'semi_annual',
'4': 'annual',
'5': 'trailing_twelve_months',
}
if path_:
last_timestamp = path_[-1].get('event_timestamp')
for elements_ in path_:
dividend_date = dividend_value = dividend_type = dividend_payment_date = dividend_yield = None
for element_ in elements_.xpath(".//td"):
if element_.get('class'):
if element_.get('class').__contains__('first'):
dividend_date = datetime.strptime(element_.text_content().strip().replace('.', '-'), '%d-%m-%Y')
dividend_value = float(element_.getnext().text_content().replace('.', '').replace(',', '.'))
if element_.get('data-value') in type_values.keys():
dividend_type = type_values[element_.get('data-value')]
dividend_payment_date = datetime.strptime(element_.getnext().text_content().strip().replace('.', '-'), '%d-%m-%Y')
next_element_ = element_.getnext()
dividend_yield = next_element_.getnext().text_content()
obj = {
'Date': dividend_date,
'Dividend': dividend_value,
'Type': dividend_type,
'Payment Date': dividend_payment_date,
'Yield': dividend_yield,
}
objs.append(obj)
flag = True
while flag is True:
headers = {
"User-Agent": get_random(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
params = {
'pairID': int(more_results_id),
'last_timestamp': int(last_timestamp)
}
url = 'https://es.investing.com/equities/MoreDividendsHistory'
req = requests.post(url=url, headers=headers, params=params)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
res = req.json()
if res['hasMoreHistory'] is False:
flag = False
root_ = fromstring(res['historyRows'])
path_ = root_.xpath(".//tr")
if path_:
last_timestamp = path_[-1].get('event_timestamp')
for elements_ in path_:
dividend_date = dividend_value = dividend_type = dividend_payment_date = dividend_yield = None
for element_ in elements_.xpath(".//td"):
if element_.get('class'):
if element_.get('class').__contains__('first'):
dividend_date = datetime.strptime(element_.text_content().strip().replace('.', '-'), '%d-%m-%Y')
dividend_value = float(
element_.getnext().text_content().replace('.', '').replace(',', '.'))
if element_.get('data-value') in type_values.keys():
dividend_type = type_values[element_.get('data-value')]
dividend_payment_date = datetime.strptime(element_.getnext().text_content().strip().replace('.', '-'), '%d-%m-%Y')
next_element_ = element_.getnext()
dividend_yield = next_element_.getnext().text_content()
obj = {
'Date': dividend_date,
'Dividend': dividend_value,
'Type': dividend_type,
'Payment Date': dividend_payment_date,
'Yield': dividend_yield,
}
objs.append(obj)
df = pd.DataFrame(objs)
return df
else:
raise RuntimeError("ERR#0061: introduced stock has no dividend's data to display.")
def search_stocks(by, value):
"""
This function searches stocks by the introduced value for the specified field. This means that this function
is going to search if there is a value that matches the introduced one for the specified field which is the
`stocks.csv` column name to search in. Available fields to search stocks are 'name', 'full_name' and 'isin'.
Args:
by (:obj:`str`): name of the field to search for, which is the column name which can be: 'name', 'full_name' or 'isin'.
value (:obj:`str`): value of the field to search for, which is the value that is going to be searched.
Returns:
:obj:`pandas.DataFrame` - search_result:
The resulting :obj:`pandas.DataFrame` contains the search results from the given query, which is
any match of the specified value in the specified field. If there are no results for the given query,
an error will be raised, but otherwise the resulting :obj:`pandas.DataFrame` will contain all the
available stocks that match the introduced query.
Raises:
ValueError: raised if any of the introduced parameters is not valid or errored.
IOError: raised if data could not be retrieved due to file error.
RuntimeError: raised if no results were found for the introduced value in the introduced field.
"""
available_search_fields = ['name', 'full_name', 'isin']
if not by:
raise ValueError('ERR#0006: the introduced field to search is mandatory and should be a str.')
if not isinstance(by, str):
raise ValueError('ERR#0006: the introduced field to search is mandatory and should be a str.')
if isinstance(by, str) and by not in available_search_fields:
raise ValueError('ERR#0026: the introduced field to search can either just be '
+ ' or '.join(available_search_fields))
if not value:
raise ValueError('ERR#0017: the introduced value to search is mandatory and should be a str.')
if not isinstance(value, str):
raise ValueError('ERR#0017: the introduced value to search is mandatory and should be a str.')
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'stocks', 'stocks.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
stocks = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0056: stocks file not found or errored.")
if stocks is None:
raise IOError("ERR#0001: stocks object not found or unable to retrieve.")
stocks['matches'] = stocks[by].str.contains(value, case=False)
search_result = stocks.loc[stocks['matches'] == True].copy()
if len(search_result) == 0:
raise RuntimeError('ERR#0043: no results were found for the introduced ' + str(by) + '.')
search_result.drop(columns=['tag', 'id', 'matches'], inplace=True)
search_result.reset_index(drop=True, inplace=True)
return search_result
| [
"[email protected]"
] | |
c3e9e1cc8cff9dfb3fae569dd6c04fa4f03eb0c9 | b1fbe7460427dbb891d4b1951e43e551e86b1e3b | /arcnlp/torch/nn/encoders/rnn_encoder.py | 650f222a3a14fcc7307c3081e46773c751216295 | [] | no_license | linhx13/arc-nlp | 88a45601e09deb7883ddf4583f6f2f4607fb85d0 | 760cca0d44958fb4011eaa039263575388a858ae | refs/heads/master | 2023-05-04T12:59:21.232168 | 2021-05-18T17:38:28 | 2021-05-18T17:38:28 | 230,442,944 | 1 | 0 | null | 2021-04-17T03:41:42 | 2019-12-27T12:48:02 | Python | UTF-8 | Python | false | false | 3,606 | py | import torch
import torch.nn as nn
from ...nn.utils import get_sequence_lengths
__all__ = ["RNNEncoder", "LSTMEncoder", "GRUEncoder"]
class _RNNBaseEncoder(nn.Module):
def __init__(self, module, return_sequences):
super(_RNNBaseEncoder, self).__init__()
self.module = module
self.return_sequences = return_sequences
@property
def input_dim(self) -> int:
return self.module.input_size
@property
def output_dim(self) -> int:
return self.module.hidden_size * (
2 if self.module.bidirectional else 1
)
def forward(
self,
inputs: torch.Tensor,
mask: torch.BoolTensor = None,
hidden_state: torch.Tensor = None,
) -> torch.Tensor:
if mask is None:
outputs, _ = self.module(inputs, hidden_state)
if self.return_sequences:
return outputs
else:
return outputs[:, -1, :]
total_length = inputs.size(1)
lengths = get_sequence_lengths(mask)
packed_inputs = nn.utils.rnn.pack_padded_sequence(
inputs, lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed_outputs, state = self.module(packed_inputs, hidden_state)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
packed_outputs, batch_first=True, total_length=total_length
)
if self.return_sequences:
return outputs
else:
if isinstance(state, tuple):
state = state[0]
state = state.transpose(0, 1)
num_directions = 2 if self.module.bidirectional else 1
last_state = state[:, -num_directions:, :]
return last_state.contiguous().view([-1, self.output_dim])
class RNNEncoder(_RNNBaseEncoder):
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
return_sequences: bool = False,
):
module = nn.RNN(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module, return_sequences)
class LSTMEncoder(_RNNBaseEncoder):
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
return_sequences: bool = False,
):
module = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module, return_sequences)
class GRUEncoder(_RNNBaseEncoder):
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
return_sequences: bool = False,
):
module = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module, return_sequences)
| [
"[email protected]"
] | |
c5b7c0831380a9b4fd9effc5cea7908430770144 | 92f6ffb240a1fbaa52ae23f614663b2b915e4187 | /backend/home/migrations/0002_load_initial_data.py | 79e2423fce37abb55ff579976a85fe906d1c2f41 | [] | no_license | crowdbotics-apps/msgs-sghsg56-dev-12782 | 3b196351f5ff932916802912c7740c7455a78459 | 10f95c9e897dcad50e21950879adc97b9fe689f4 | refs/heads/master | 2022-12-24T00:35:35.056481 | 2020-10-06T09:11:48 | 2020-10-06T09:11:48 | 301,672,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "MSGS-sghsg56"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">MSGS-sghsg56</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "msgs-sghsg56-dev-12782.botics.co"
site_params = {
"name": "MSGS-sghsg56",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
47c023614d7d2ba1c4b4f921d42350aec154cb40 | ace30d0a4b1452171123c46eb0f917e106a70225 | /filesystems/vnx_rootfs_lxc_ubuntu64-16.04-v025-openstack-compute/rootfs/usr/lib/python2.7/dist-packages/openstackclient/common/clientmanager.py | 3e1a50e3e6423cbe6c7010004e2266d04e7627b8 | [
"Python-2.0"
] | permissive | juancarlosdiaztorres/Ansible-OpenStack | e98aa8c1c59b0c0040c05df292964520dd796f71 | c01951b33e278de9e769c2d0609c0be61d2cb26b | refs/heads/master | 2022-11-21T18:08:21.948330 | 2018-10-15T11:39:20 | 2018-10-15T11:39:20 | 152,568,204 | 0 | 3 | null | 2022-11-19T17:38:49 | 2018-10-11T09:45:48 | Python | UTF-8 | Python | false | false | 5,435 | py | # Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Manage access to the clients, including authenticating when needed."""
import logging
import pkg_resources
import sys
from osc_lib import clientmanager
from osc_lib import shell
LOG = logging.getLogger(__name__)
PLUGIN_MODULES = []
USER_AGENT = 'python-openstackclient'
class ClientManager(clientmanager.ClientManager):
"""Manages access to API clients, including authentication
Wrap osc_lib's ClientManager to maintain compatibility for the existing
plugin V2 interface. Some currently private attributes become public
in osc-lib so we need to maintain a transition period.
"""
# A simple incrementing version for the plugin to know what is available
PLUGIN_INTERFACE_VERSION = "2"
def __init__(
self,
cli_options=None,
api_version=None,
):
super(ClientManager, self).__init__(
cli_options=cli_options,
api_version=api_version,
# TODO(dtroyer): Remove this when osc-lib 1.2 is released
pw_func=shell.prompt_for_password,
)
# TODO(dtroyer): For compatibility; mark this for removal when plugin
# interface v2 is removed
self._region_name = self.region_name
self._interface = self.interface
self._cacert = self.cacert
self._insecure = not self.verify
# store original auth_type
self._original_auth_type = cli_options.auth_type
def setup_auth(self):
"""Set up authentication"""
if self._auth_setup_completed:
return
# NOTE(dtroyer): Validate the auth args; this is protected with 'if'
# because openstack_config is an optional argument to
# CloudConfig.__init__() and we'll die if it was not
# passed.
if self._cli_options._openstack_config is not None:
self._cli_options._openstack_config._pw_callback = \
shell.prompt_for_password
try:
self._cli_options._auth = \
self._cli_options._openstack_config.load_auth_plugin(
self._cli_options.config,
)
except TypeError as e:
self._fallback_load_auth_plugin(e)
return super(ClientManager, self).setup_auth()
def _fallback_load_auth_plugin(self, e):
# NOTES(RuiChen): Hack to avoid auth plugins choking on data they don't
# expect, delete fake token and endpoint, then try to
# load auth plugin again with user specified options.
# We know it looks ugly, but it's necessary.
if self._cli_options.config['auth']['token'] == 'x':
# restore original auth_type
self._cli_options.config['auth_type'] = \
self._original_auth_type
del self._cli_options.config['auth']['token']
del self._cli_options.config['auth']['endpoint']
self._cli_options._auth = \
self._cli_options._openstack_config.load_auth_plugin(
self._cli_options.config,
)
else:
raise e
def is_network_endpoint_enabled(self):
"""Check if the network endpoint is enabled"""
# NOTE(dtroyer): is_service_available() can also return None if
# there is no Service Catalog, callers here are
# not expecting that so fold None into True to
# use Network API by default
return self.is_service_available('network') is not False
# Plugin Support
def get_plugin_modules(group):
"""Find plugin entry points"""
mod_list = []
for ep in pkg_resources.iter_entry_points(group):
LOG.debug('Found plugin %r', ep.name)
__import__(ep.module_name)
module = sys.modules[ep.module_name]
mod_list.append(module)
init_func = getattr(module, 'Initialize', None)
if init_func:
init_func('x')
# Add the plugin to the ClientManager
setattr(
clientmanager.ClientManager,
module.API_NAME,
clientmanager.ClientCache(
getattr(sys.modules[ep.module_name], 'make_client', None)
),
)
return mod_list
def build_plugin_option_parser(parser):
"""Add plugin options to the parser"""
# Loop through extensions to get parser additions
for mod in PLUGIN_MODULES:
parser = mod.build_option_parser(parser)
return parser
# Get list of base plugin modules
PLUGIN_MODULES = get_plugin_modules(
'openstack.cli.base',
)
# Append list of external plugin modules
PLUGIN_MODULES.extend(get_plugin_modules(
'openstack.cli.extension',
))
| [
"[email protected]"
] | |
24a54a7565b8d38155fddd08742ae1389e50ac05 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5695413893988352_0/Python/algomaus/b.py | 2d3892eda43ade0e73e3d0638dc64a9dc402c531 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,205 | py | #! /usr/bin/env python
def parse(lines):
n = int(lines[0])
words = []
for i in range(n):
words.append(lines[i+1])
return words
def asInt(string, lastPosition):
if lastPosition == -1:
return 0
#if lastPosition == 0 and string[0] == '?':
#return 0
lis = []
for i in range(lastPosition+1):
lis.append(string[i])
return int(''.join(lis).replace(' ','').replace('[','').replace(']','').replace(',',''))
def solve(word):
splitted = word.split(' ')
coder = []
jammer = []
for i in splitted[0]:
coder.append(i)
for i in splitted[1]:
jammer.append(i)
coder_add = []
jammer_add = []
for i in range(len(coder)):
if coder[i] == '?' and jammer[i] == '?':
if i == 0 or (asInt(coder, i-1) == asInt(jammer, i-1)):
if i+1 < len(coder) and coder[i+1] != '?' and jammer[i+1] != '?':
if coder[i+1] > jammer[i+1]:
coder[i] = '0'
coder_add.append('0')
jammer[i] = '1'
jammer_add.append('1')
elif coder[i+1] < jammer[i+1]:
coder[i] = '1'
coder_add.append('1')
jammer[i] = '0'
jammer_add.append('0')
else:
coder[i] = '0'
coder_add.append(0)
jammer[i] = '0'
jammer_add.append(0)
else:
coder[i] = '0'
coder_add.append(0)
jammer[i] = '0'
jammer_add.append(0)
elif asInt(coder, i-1) > asInt(jammer, i-1):
coder[i] = '0'
coder_add.append(0)
jammer[i] = '9'
jammer_add.append(9)
else:
coder[i] = '9'
coder_add.append(9)
jammer[i] = '0'
jammer_add.append(0)
elif coder[i] == '?':
if asInt(coder, i-1) == asInt(jammer, i-1):
coder[i] = jammer[i]
coder_add.append(jammer[i])
#if int(jammer[i]) <= 5:
#coder[i] = '0'
#coder_add.append(0)
#else:
#coder[i] = '9'
#coder_add.append(9)
elif asInt(coder, i-1) > asInt(jammer, i-1):
coder[i] = '0'
coder_add.append(0)
else:
coder[i] = '9'
coder_add.append(9)
elif jammer[i] == '?':
if asInt(coder, i-1) == asInt(jammer, i-1):
jammer[i] = coder[i]
jammer_add.append(coder[i])
#if int(coder[i]) <= 5:
# jammer[i] = '0'
# jammer_add.append(0)
#else:
# jammer[i] = '9'
# jammer_add.append(9)
elif asInt(coder, i-1) < asInt(jammer, i-1):
jammer[i] = '0'
jammer_add.append(0)
else:
jammer[i] = '9'
jammer_add.append(9)
coder_add_str = str(coder).replace(' ','').replace('[','').replace(']','').replace(',','').replace('\'','')
jammer_add_str = str(jammer).replace(' ','').replace('[','').replace(']','').replace(',','').replace('\'','')
return coder_add_str + " " + jammer_add_str
#with open('A-large.in', 'r') as f:
with open('B-small-attempt1.in', 'r') as f:
words = parse(f.read().splitlines())
for i in range(len(words)):
wordSorted = solve(words[i])
print "Case #" + str(i+1) + ": " + wordSorted
| [
"[email protected]"
] | |
1ebf50f2fe945bd4d55d54c13e76a24165a05cf2 | a0f0efaaaf69d6ccdc2a91596db29f04025f122c | /build/botcmd_msgs/devel/lib/python2.7/dist-packages/botcmd_msgs/srv/_bot_getenabledi_cmd.py | 56881c75882d7bfcd72f305eeff5b2ca7dffd6bc | [] | no_license | chiuhandsome/ros_ws_test-git | 75da2723154c0dadbcec8d7b3b1f3f8b49aa5cd6 | 619909130c23927ccc902faa3ff6d04ae0f0fba9 | refs/heads/master | 2022-12-24T05:45:43.845717 | 2020-09-22T10:12:54 | 2020-09-22T10:12:54 | 297,582,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,723 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from botcmd_msgs/bot_getenabledi_cmdRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class bot_getenabledi_cmdRequest(genpy.Message):
_md5sum = "481ac5a494c3140a2539020bd74c82c7"
_type = "botcmd_msgs/bot_getenabledi_cmdRequest"
_has_header = False # flag to mark the presence of a Header object
_full_text = """int8 command
"""
__slots__ = ['command']
_slot_types = ['int8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
command
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(bot_getenabledi_cmdRequest, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.command is None:
self.command = 0
else:
self.command = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.command
buff.write(_get_struct_b().pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.command,) = _get_struct_b().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.command
buff.write(_get_struct_b().pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.command,) = _get_struct_b().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_b = None
def _get_struct_b():
global _struct_b
if _struct_b is None:
_struct_b = struct.Struct("<b")
return _struct_b
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from botcmd_msgs/bot_getenabledi_cmdResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class bot_getenabledi_cmdResponse(genpy.Message):
_md5sum = "01a64608314d5f77b6df20caba78d455"
_type = "botcmd_msgs/bot_getenabledi_cmdResponse"
_has_header = False # flag to mark the presence of a Header object
_full_text = """bool result
int32 status
"""
__slots__ = ['result','status']
_slot_types = ['bool','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
result,status
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(bot_getenabledi_cmdResponse, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.result is None:
self.result = False
if self.status is None:
self.status = 0
else:
self.result = False
self.status = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_Bi().pack(_x.result, _x.status))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 5
(_x.result, _x.status,) = _get_struct_Bi().unpack(str[start:end])
self.result = bool(self.result)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_Bi().pack(_x.result, _x.status))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 5
(_x.result, _x.status,) = _get_struct_Bi().unpack(str[start:end])
self.result = bool(self.result)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_Bi = None
def _get_struct_Bi():
global _struct_Bi
if _struct_Bi is None:
_struct_Bi = struct.Struct("<Bi")
return _struct_Bi
class bot_getenabledi_cmd(object):
_type = 'botcmd_msgs/bot_getenabledi_cmd'
_md5sum = 'c310784b062f6ef0f7752130ef306c28'
_request_class = bot_getenabledi_cmdRequest
_response_class = bot_getenabledi_cmdResponse
| [
"[email protected]"
] | |
4570702ee558fd5356cbb6e61347d548044dc91f | 98efe1aee73bd9fbec640132e6fb2e54ff444904 | /loldib/getratings/models/NA/na_velkoz/na_velkoz_jng.py | 03ed1057207505472af839e30740f1e89491e018 | [
"Apache-2.0"
] | permissive | koliupy/loldib | be4a1702c26546d6ae1b4a14943a416f73171718 | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | refs/heads/master | 2021-07-04T03:34:43.615423 | 2017-09-21T15:44:10 | 2017-09-21T15:44:10 | 104,359,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,545 | py | from getratings.models.ratings import Ratings
class NA_Velkoz_Jng_Aatrox(Ratings):
pass
class NA_Velkoz_Jng_Ahri(Ratings):
pass
class NA_Velkoz_Jng_Akali(Ratings):
pass
class NA_Velkoz_Jng_Alistar(Ratings):
pass
class NA_Velkoz_Jng_Amumu(Ratings):
pass
class NA_Velkoz_Jng_Anivia(Ratings):
pass
class NA_Velkoz_Jng_Annie(Ratings):
pass
class NA_Velkoz_Jng_Ashe(Ratings):
pass
class NA_Velkoz_Jng_AurelionSol(Ratings):
pass
class NA_Velkoz_Jng_Azir(Ratings):
pass
class NA_Velkoz_Jng_Bard(Ratings):
pass
class NA_Velkoz_Jng_Blitzcrank(Ratings):
pass
class NA_Velkoz_Jng_Brand(Ratings):
pass
class NA_Velkoz_Jng_Braum(Ratings):
pass
class NA_Velkoz_Jng_Caitlyn(Ratings):
pass
class NA_Velkoz_Jng_Camille(Ratings):
pass
class NA_Velkoz_Jng_Cassiopeia(Ratings):
pass
class NA_Velkoz_Jng_Chogath(Ratings):
pass
class NA_Velkoz_Jng_Corki(Ratings):
pass
class NA_Velkoz_Jng_Darius(Ratings):
pass
class NA_Velkoz_Jng_Diana(Ratings):
pass
class NA_Velkoz_Jng_Draven(Ratings):
pass
class NA_Velkoz_Jng_DrMundo(Ratings):
pass
class NA_Velkoz_Jng_Ekko(Ratings):
pass
class NA_Velkoz_Jng_Elise(Ratings):
pass
class NA_Velkoz_Jng_Evelynn(Ratings):
pass
class NA_Velkoz_Jng_Ezreal(Ratings):
pass
class NA_Velkoz_Jng_Fiddlesticks(Ratings):
pass
class NA_Velkoz_Jng_Fiora(Ratings):
pass
class NA_Velkoz_Jng_Fizz(Ratings):
pass
class NA_Velkoz_Jng_Galio(Ratings):
pass
class NA_Velkoz_Jng_Gangplank(Ratings):
pass
class NA_Velkoz_Jng_Garen(Ratings):
pass
class NA_Velkoz_Jng_Gnar(Ratings):
pass
class NA_Velkoz_Jng_Gragas(Ratings):
pass
class NA_Velkoz_Jng_Graves(Ratings):
pass
class NA_Velkoz_Jng_Hecarim(Ratings):
pass
class NA_Velkoz_Jng_Heimerdinger(Ratings):
pass
class NA_Velkoz_Jng_Illaoi(Ratings):
pass
class NA_Velkoz_Jng_Irelia(Ratings):
pass
class NA_Velkoz_Jng_Ivern(Ratings):
pass
class NA_Velkoz_Jng_Janna(Ratings):
pass
class NA_Velkoz_Jng_JarvanIV(Ratings):
pass
class NA_Velkoz_Jng_Jax(Ratings):
pass
class NA_Velkoz_Jng_Jayce(Ratings):
pass
class NA_Velkoz_Jng_Jhin(Ratings):
pass
class NA_Velkoz_Jng_Jinx(Ratings):
pass
class NA_Velkoz_Jng_Kalista(Ratings):
pass
class NA_Velkoz_Jng_Karma(Ratings):
pass
class NA_Velkoz_Jng_Karthus(Ratings):
pass
class NA_Velkoz_Jng_Kassadin(Ratings):
pass
class NA_Velkoz_Jng_Katarina(Ratings):
pass
class NA_Velkoz_Jng_Kayle(Ratings):
pass
class NA_Velkoz_Jng_Kayn(Ratings):
pass
class NA_Velkoz_Jng_Kennen(Ratings):
pass
class NA_Velkoz_Jng_Khazix(Ratings):
pass
class NA_Velkoz_Jng_Kindred(Ratings):
pass
class NA_Velkoz_Jng_Kled(Ratings):
pass
class NA_Velkoz_Jng_KogMaw(Ratings):
pass
class NA_Velkoz_Jng_Leblanc(Ratings):
pass
class NA_Velkoz_Jng_LeeSin(Ratings):
pass
class NA_Velkoz_Jng_Leona(Ratings):
pass
class NA_Velkoz_Jng_Lissandra(Ratings):
pass
class NA_Velkoz_Jng_Lucian(Ratings):
pass
class NA_Velkoz_Jng_Lulu(Ratings):
pass
class NA_Velkoz_Jng_Lux(Ratings):
pass
class NA_Velkoz_Jng_Malphite(Ratings):
pass
class NA_Velkoz_Jng_Malzahar(Ratings):
pass
class NA_Velkoz_Jng_Maokai(Ratings):
pass
class NA_Velkoz_Jng_MasterYi(Ratings):
pass
class NA_Velkoz_Jng_MissFortune(Ratings):
pass
class NA_Velkoz_Jng_MonkeyKing(Ratings):
pass
class NA_Velkoz_Jng_Mordekaiser(Ratings):
pass
class NA_Velkoz_Jng_Morgana(Ratings):
pass
class NA_Velkoz_Jng_Nami(Ratings):
pass
class NA_Velkoz_Jng_Nasus(Ratings):
pass
class NA_Velkoz_Jng_Nautilus(Ratings):
pass
class NA_Velkoz_Jng_Nidalee(Ratings):
pass
class NA_Velkoz_Jng_Nocturne(Ratings):
pass
class NA_Velkoz_Jng_Nunu(Ratings):
pass
class NA_Velkoz_Jng_Olaf(Ratings):
pass
class NA_Velkoz_Jng_Orianna(Ratings):
pass
class NA_Velkoz_Jng_Ornn(Ratings):
pass
class NA_Velkoz_Jng_Pantheon(Ratings):
pass
class NA_Velkoz_Jng_Poppy(Ratings):
pass
class NA_Velkoz_Jng_Quinn(Ratings):
pass
class NA_Velkoz_Jng_Rakan(Ratings):
pass
class NA_Velkoz_Jng_Rammus(Ratings):
pass
class NA_Velkoz_Jng_RekSai(Ratings):
pass
class NA_Velkoz_Jng_Renekton(Ratings):
pass
class NA_Velkoz_Jng_Rengar(Ratings):
pass
class NA_Velkoz_Jng_Riven(Ratings):
pass
class NA_Velkoz_Jng_Rumble(Ratings):
pass
class NA_Velkoz_Jng_Ryze(Ratings):
pass
class NA_Velkoz_Jng_Sejuani(Ratings):
pass
class NA_Velkoz_Jng_Shaco(Ratings):
pass
class NA_Velkoz_Jng_Shen(Ratings):
pass
class NA_Velkoz_Jng_Shyvana(Ratings):
pass
class NA_Velkoz_Jng_Singed(Ratings):
pass
class NA_Velkoz_Jng_Sion(Ratings):
pass
class NA_Velkoz_Jng_Sivir(Ratings):
pass
class NA_Velkoz_Jng_Skarner(Ratings):
pass
class NA_Velkoz_Jng_Sona(Ratings):
pass
class NA_Velkoz_Jng_Soraka(Ratings):
pass
class NA_Velkoz_Jng_Swain(Ratings):
pass
class NA_Velkoz_Jng_Syndra(Ratings):
pass
class NA_Velkoz_Jng_TahmKench(Ratings):
pass
class NA_Velkoz_Jng_Taliyah(Ratings):
pass
class NA_Velkoz_Jng_Talon(Ratings):
pass
class NA_Velkoz_Jng_Taric(Ratings):
pass
class NA_Velkoz_Jng_Teemo(Ratings):
pass
class NA_Velkoz_Jng_Thresh(Ratings):
pass
class NA_Velkoz_Jng_Tristana(Ratings):
pass
class NA_Velkoz_Jng_Trundle(Ratings):
pass
class NA_Velkoz_Jng_Tryndamere(Ratings):
pass
class NA_Velkoz_Jng_TwistedFate(Ratings):
pass
class NA_Velkoz_Jng_Twitch(Ratings):
pass
class NA_Velkoz_Jng_Udyr(Ratings):
pass
class NA_Velkoz_Jng_Urgot(Ratings):
pass
class NA_Velkoz_Jng_Varus(Ratings):
pass
class NA_Velkoz_Jng_Vayne(Ratings):
pass
class NA_Velkoz_Jng_Veigar(Ratings):
pass
class NA_Velkoz_Jng_Velkoz(Ratings):
pass
class NA_Velkoz_Jng_Vi(Ratings):
pass
class NA_Velkoz_Jng_Viktor(Ratings):
pass
class NA_Velkoz_Jng_Vladimir(Ratings):
pass
class NA_Velkoz_Jng_Volibear(Ratings):
pass
class NA_Velkoz_Jng_Warwick(Ratings):
pass
class NA_Velkoz_Jng_Xayah(Ratings):
pass
class NA_Velkoz_Jng_Xerath(Ratings):
pass
class NA_Velkoz_Jng_XinZhao(Ratings):
pass
class NA_Velkoz_Jng_Yasuo(Ratings):
pass
class NA_Velkoz_Jng_Yorick(Ratings):
pass
class NA_Velkoz_Jng_Zac(Ratings):
pass
class NA_Velkoz_Jng_Zed(Ratings):
pass
class NA_Velkoz_Jng_Ziggs(Ratings):
pass
class NA_Velkoz_Jng_Zilean(Ratings):
pass
class NA_Velkoz_Jng_Zyra(Ratings):
pass
| [
"[email protected]"
] | |
e878483efb96ff6a75498766da8723c34864fa39 | 694d3929b23a8434cab14ddab623030a0fe4ac38 | /apps/reports/views.py | f3a408b99c91eae03444b5863ff332d455c98ab2 | [] | no_license | gehongming/django_api | 03fec87a25c2ad3cb603aad2f1b5d9b680debf12 | fb8e0623e9171deb8706ed258cc5d5bd0d9fe6aa | refs/heads/main | 2023-09-01T06:57:22.677374 | 2021-10-12T05:39:07 | 2021-10-12T05:39:07 | 415,173,097 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,648 | py | import json
import re
import os
from datetime import datetime
from django.http import StreamingHttpResponse
from django.utils.encoding import escape_uri_path
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from rest_framework import permissions, status
from rest_framework.decorators import action
from rest_framework.settings import settings
from .models import Reports
from .serializer import ReportsSerializer
from .utils import format_output, get_file_contents
class ReportsViewSet(ModelViewSet):
"""
list:
返回测试报告(多个)列表数据
create:
创建测试报告
update:
更新测试报告
partial_update:
更新(部分)测试报告
destroy:
逻辑删除测试报告
retrieve:
获取测试报告详情
"""
queryset = Reports.objects.filter(is_delete=0)
serializer_class = ReportsSerializer
ordering_fields = ['name']
# 定义权限
permission_classes = [permissions.IsAuthenticated]
def list(self, request, *args, **kwargs):
# 调用父类的list方法。
response = super().list(request, *args, **kwargs)
response.data['results'] = format_output(response.data['results'])
return response
# 逻辑删除 重写。原有的destroy 是物理删除
def perform_destroy(self, instance):
# 修改字段 is_delete
instance.is_delete = 1
instance.save()
@action(detail=True)
def download(self, request, pk=None):
# 1、获取html源码
instance = self.get_object()
html = instance.html
name = instance.name
# 正则取存储的文件名称
mtch = re.match(r'(.*_)\d+', name)
if mtch:
mtch = mtch.group(1)
report_filename = mtch + datetime.strftime(datetime.now(), '%Y%m%d%H%M%S' + '.html')
# 获取文件路径。可以通过settings.BASE_DI获取项目路径
else:
report_filename = name+'.html'
# settings.REPORTS_DIR 设置 报告目录。
report_path = os.path.join(settings.REPORTS_DIR, report_filename)
# 将html数据 写入到保存的html文件内。
with open(report_path, 'w+', encoding='utf-8') as one_file:
one_file.write(html)
# 下载专用返回格式
response = StreamingHttpResponse(get_file_contents(report_path))
report_path_final = escape_uri_path(report_filename)
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = f"attachment; filename*=UTF-8''{report_path_final}"
return response
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
data = serializer.data
try:
data['summary'] = json.loads(data['summary'], encoding='utf-8')
return Response(data)
except Exception as e:
return Response({
'err': '测试报告summary格式有误'
}, status=status.HTTP_400_BAD_REQUEST)
# def retrieve(self, request, *args, **kwargs):
# instance = self.get_object()
# try:
# summary = json.loads(instance.summary, encoding='utf-8')
# return Response({
# 'id': instance.id,
# 'summary': summary
# }, status=status.HTTP_200_OK)
# except Exception:
# return Response({
# 'err': '测试报告summary格式有误'
# }, status=status.HTTP_400_BAD_REQUEST)
| [
"[email protected]"
] | |
397da806a95f70217bf79901c8e1ad9ffe4fcefe | e0ed932fc2e4edb953cc4e423362dabc19083008 | /python/002_note/learn_with/002_有异常的例子.py | 8704a23fa87700b015cb24d95bd2053e1d7f4bde | [] | no_license | glfAdd/note | 90baee45003ac3998d898dcfbc618caa28f33b74 | 19a9aff61450be25904bff0fe672f660d49d90ff | refs/heads/main | 2023-05-27T13:28:36.092352 | 2023-05-24T03:35:58 | 2023-05-24T03:35:58 | 240,066,208 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | class Test:
def __enter__(self):
print('__enter__() is call!')
return self
@staticmethod
def start():
print('------------------------------ test')
return 1 / 0
def __exit__(self, exc_type, exc_value, traceback):
"""
@param exc_type:
@param exc_value:
@param traceback:
@return:
True: 不抛出异常
False: 抛出异常
"""
print('__exit__() is call!')
print(f'exc_type:{exc_type}')
print(f'exc_value:{exc_value}')
print(f'traceback:{traceback}')
print('__exit()__ is call!')
return True
# return False
with Test() as t:
print('------------ 1')
t.start()
print('------------ 2')
raise TypeError
print('------------ 3')
| [
"[email protected]"
] | |
2e56820469786281eea6a55179cfaa0fae7337b3 | 5635a3b02f7695a50471c8c08970520858d2277c | /venv/bin/pyrsa-sign | 12ff831eef1cc1f5b697b68f04379992425ffe5c | [] | no_license | BethMwangi/Flask-social | 358325ea09b143c2aaa059594607d0a872fcabd1 | 4d0d902ee959054a95f0d7ab0dbfee3692521f91 | refs/heads/master | 2020-04-02T06:13:40.307975 | 2016-06-13T17:16:11 | 2016-06-13T17:16:11 | 60,806,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | #!/home/beth/Documents/Github/Flask-social/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import sign
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(sign())
| [
"[email protected]"
] | ||
e299c60be9d53012b8b77da119af0d359f1e54d0 | c4ffab6cc6b5470a212d1b6a0d241de9427266ee | /test/functional/rpc_bind.py | ee454df4bb87a910eb3f5749321d750a3a4c467f | [
"MIT"
] | permissive | Upsidedoge/upsidedoge | 1b8d49787eedb84cb7c5aff77549d7d1239ab807 | 32dd022d43b8b90ae1aa1ad7d81c0dfeb89611a2 | refs/heads/main | 2023-04-26T16:56:17.024158 | 2021-05-21T21:12:57 | 2021-05-21T21:12:57 | 369,643,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,428 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running upsidedoged with the -rpcbind and -rpcallowip options."""
import sys
from test_framework.netutil import all_interfaces, addr_to_hex, get_bind_addrs, test_ipv6_local
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import assert_equal, assert_raises_rpc_error, get_rpc_proxy, rpc_port, rpc_url
class RPCBindTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.bind_to_localhost_only = False
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes, None)
def add_options(self, parser):
parser.add_argument("--ipv4", action='store_true', dest="run_ipv4", help="Run ipv4 tests only", default=False)
parser.add_argument("--ipv6", action='store_true', dest="run_ipv6", help="Run ipv6 tests only", default=False)
parser.add_argument("--nonloopback", action='store_true', dest="run_nonloopback", help="Run non-loopback tests only", default=False)
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
node_args = \
['-disablewallet', '-nolisten'] + \
['-rpcallowip='+x for x in allow_ips] + \
['-rpcbind='+addr for addr in ['127.0.0.1', "%s:%d" % (rpchost, rpcport)]] # Bind to localhost as well so start_nodes doesn't hang
self.nodes[0].rpchost = None
self.start_nodes([node_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(self.nodes[0].datadir, 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if sum([self.options.run_ipv4, self.options.run_ipv6, self.options.run_nonloopback]) > 1:
raise AssertionError("Only one of --ipv4, --ipv6 and --nonloopback can be set")
self.log.info("Check for linux")
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
self.log.info("Check for ipv6")
have_ipv6 = test_ipv6_local()
if not have_ipv6 and not (self.options.run_ipv4 or self.options.run_nonloopback):
raise SkipTest("This test requires ipv6 support.")
self.log.info("Check for non-loopback interface")
self.non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
self.non_loopback_ip = ip
break
if self.non_loopback_ip is None and self.options.run_nonloopback:
raise SkipTest("This test requires a non-loopback ip address.")
self.defaultport = rpc_port(0)
if not self.options.run_nonloopback:
self._run_loopback_tests()
if not self.options.run_ipv4 and not self.options.run_ipv6:
self._run_nonloopback_tests()
def _run_loopback_tests(self):
if self.options.run_ipv4:
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', self.defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
else:
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check default with rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', self.defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
def _run_nonloopback_tests(self):
self.log.info("Using interface %s for testing" % self.non_loopback_ip)
# check only non-loopback interface
self.run_bind_test([self.non_loopback_ip], self.non_loopback_ip, [self.non_loopback_ip],
[(self.non_loopback_ip, self.defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([self.non_loopback_ip], self.non_loopback_ip, self.defaultport)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], self.non_loopback_ip, self.defaultport)
if __name__ == '__main__':
RPCBindTest().main()
| [
"[email protected]"
] | |
e496f6a4b65e3fb3ed5cffda376a44cc1e6829cb | 7357d367b0af4650ccc5b783b7a59090fdde47bb | /py-appscript/tags/py-appscript-0.18.0/Lib/aem/types/objectspecifiers/testclause.py | cd023de05f848f30e64af15fc725899e0d6eb54c | [
"MIT"
] | permissive | BarracudaPff/code-golf-data-python | fb0cfc74d1777c4246d56a5db8525432bf37ab1a | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | refs/heads/main | 2023-05-29T05:52:22.856551 | 2020-05-23T22:12:48 | 2020-05-23T22:12:48 | 378,832,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,297 | py | """testclause -- Used to construct test expressions for use in by-filter references.
(C) 2005 HAS
"""
from CarbonX import kAE
import base
class Test(base.BASE):
"""Base class for all comparison and logic test classes (Equals, NotEquals, AND, OR, etc.)."""
def AND(self, operand2, *operands):
"""AND(test,...) --> logical AND test"""
return AND((self, operand2) + operands)
def OR(self, operand2, *operands):
"""OR(test,...) --> logical OR test"""
return OR((self, operand2) + operands)
NOT = property(lambda self: NOT((self,)), doc="NOT --> logical NOT test")
class _ComparisonTest(Test):
"""Subclassed by comparison test classes."""
def __init__(self, operand1, operand2):
self._operand1 = operand1
self._operand2 = operand2
def __repr__(self):
return "%r.%s(%r)" % (self._operand1, self._name, self._operand2)
def AEM_resolve(self, obj):
return getattr(self._operand1.AEM_resolve(obj), self._name)(self._operand2)
def AEM_packSelf(self, codecs):
return base.packListAs(kAE.typeCompDescriptor, [(kAE.keyAEObject1, codecs.pack(self._operand1)), (kAE.keyAECompOperator, self._operator), (kAE.keyAEObject2, codecs.pack(self._operand2))])
class GreaterThan(_ComparisonTest):
_name = "gt"
_operator = base.packEnum(kAE.kAEGreaterThan)
class GreaterOrEquals(_ComparisonTest):
_name = "ge"
_operator = base.packEnum(kAE.kAEGreaterThanEquals)
class Equals(_ComparisonTest):
_name = "eq"
_operator = base.packEnum(kAE.kAEEquals)
class NotEquals(Equals):
_name = "ne"
_operatorNOT = base.packEnum(kAE.kAENOT)
def AEM_packSelf(self, codecs):
return self._operand1.eq(self._operand2).NOT.AEM_packSelf(codecs)
class LessThan(_ComparisonTest):
_name = "lt"
_operator = base.packEnum(kAE.kAELessThan)
class LessOrEquals(_ComparisonTest):
_name = "le"
_operator = base.packEnum(kAE.kAELessThanEquals)
class BeginsWith(_ComparisonTest):
_name = "beginswith"
_operator = base.packEnum(kAE.kAEBeginsWith)
class EndsWith(_ComparisonTest):
_name = "endswith"
_operator = base.packEnum(kAE.kAEEndsWith)
class Contains(_ComparisonTest):
_name = "contains"
_operator = base.packEnum(kAE.kAEContains)
class IsIn(Contains):
_name = "isin"
def AEM_packSelf(self, codecs):
return base.packListAs(kAE.typeCompDescriptor, [(kAE.keyAEObject1, codecs.pack(self._operand2)), (kAE.keyAECompOperator, self._operator), (kAE.keyAEObject2, codecs.pack(self._operand1))])
class _LogicalTest(Test):
"""Subclassed by logical test classes."""
def __init__(self, operands):
self._operands = operands
def __repr__(self):
return "%r.%s(%s)" % (self._operands[0], self._name, repr(list(self._operands[1:]))[1:-1])
def AEM_resolve(self, obj):
return getattr(self._operands[0].AEM_resolve(obj), self._name)(*self._operands[1:])
def AEM_packSelf(self, codecs):
return base.packListAs(kAE.typeLogicalDescriptor, [(kAE.keyAELogicalOperator, self._operator), (kAE.keyAELogicalTerms, codecs.pack(self._operands))])
class AND(_LogicalTest):
_operator = base.packEnum(kAE.kAEAND)
_name = "AND"
class OR(_LogicalTest):
_operator = base.packEnum(kAE.kAEOR)
_name = "OR"
class NOT(_LogicalTest):
_operator = base.packEnum(kAE.kAENOT)
_name = "NOT"
def __repr__(self):
return "%r.NOT" % self._operands[0]
def AEM_resolve(self, obj):
return self._operands[0].AEM_resolve(obj).NOT | [
"[email protected]"
] | |
e60acfc6dfaaa850aa14c36de95d0f2dd9dbd345 | baefee5fbbc015cdc0b71ffc8956fad2d7d93683 | /openstack_dashboard/dashboards/admin/routers/ports/forms.py | 6010f5c792917a435eb64386f99e60d176fda8e1 | [
"Apache-2.0"
] | permissive | dsullivanwr/stx-horizon | 8312fa01bf28a6bfad175e66f4172add6cabf60c | ee6c9b17e34d1dc310790b9d5e0252361c86b8fb | refs/heads/master | 2020-03-29T06:51:49.902050 | 2018-10-11T19:37:40 | 2018-10-11T19:37:40 | 149,643,878 | 0 | 0 | Apache-2.0 | 2018-10-10T16:02:36 | 2018-09-20T17:11:28 | Python | UTF-8 | Python | false | false | 2,970 | py | # Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.routers.ports \
import forms as project_forms
LOG = logging.getLogger(__name__)
class SetGatewayForm(project_forms.SetGatewayForm):
network_id = forms.ChoiceField(label=_("External Network"))
ip_address = forms.IPField(
label=_("IP Address (optional)"),
required=False,
initial="",
help_text=_("IP address of gateway interface (e.g. 192.168.0.254). "
"Specify an explicit address to use when creating the "
"gateway interface. If one is not specified an address "
"will be allocated from the external subnet."),
version=forms.IPv4 | forms.IPv6,
mask=False)
router_name = forms.CharField(label=_("Router Name"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
router_id = forms.CharField(label=_("Router ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
enable_snat = forms.BooleanField(label=_("Enable SNAT"),
initial=True, required=False)
failure_url = 'horizon:admin:routers:index'
def handle(self, request, data):
try:
ip_address = data.get('ip_address') or None
enable_snat = data.get('enable_snat', True)
api.neutron.router_add_gateway(request,
data['router_id'],
data['network_id'],
ip_address=ip_address,
enable_snat=enable_snat)
msg = _('Gateway interface is added')
LOG.debug(msg)
messages.success(request, msg)
return True
except Exception as e:
msg = _('Failed to set gateway %s') % e
LOG.info(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
| [
"[email protected]"
] | |
cbb5b5e0a29153cfef89be24a515e1b90dbd5ce0 | 2a1e2c298773148983805f1e0fba62bc2bf79267 | /lib/network/vgg_base.py | 7057d7ce930283300e3f9abeacd0c7ce46869275 | [] | no_license | copperdong/CTPN | 42fde81010ba5c0bff193b4132d4c397c251dedd | 3d559406c7ad2a02ac54b07ff1cc3603b3c5b6c9 | refs/heads/master | 2020-11-25T10:51:23.753733 | 2019-07-22T12:29:15 | 2019-07-22T12:29:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,410 | py | import tensorflow.contrib.slim as slim
from lib.utils.config import cfg
def vgg_base(inputs, scope=None):
featuremap_scale = 1
net = slim.conv2d(inputs, 64, [3, 3], scope='conv1_1')
net = slim.conv2d(net, 64, [3, 3], scope='conv1_2')
net = slim.max_pool2d(net, [2, 2], stride=2, padding='VALID', scope='pool1')
featuremap_scale *= 2
net = slim.conv2d(net, 128, [3, 3], scope='conv2_1')
net = slim.conv2d(net, 128, [3, 3], scope='conv2_2')
net = slim.max_pool2d(net, [2, 2], stride=2, padding='VALID', scope='pool2')
featuremap_scale *= 2
net = slim.conv2d(net, 256, [3, 3], scope='conv3_1')
net = slim.conv2d(net, 256, [3, 3], scope='conv3_2')
net = slim.conv2d(net, 256, [3, 3], scope='conv3_3')
net = slim.max_pool2d(net, [2, 2], stride=2, padding='VALID', scope='pool3')
featuremap_scale *= 2
net = slim.conv2d(net, 512, [3, 3], scope='conv4_1')
net = slim.conv2d(net, 512, [3, 3], scope='conv4_2')
net = slim.conv2d(net, 512, [3, 3], scope='conv4_3')
if featuremap_scale != cfg["ANCHOR_WIDTH"]:
net = slim.max_pool2d(net, [2, 2], stride=2, padding='VALID', scope='pool4')
featuremap_scale *= 2
net = slim.conv2d(net, 512, [3, 3], scope='conv5_1')
net = slim.conv2d(net, 512, [3, 3], scope='conv5_2')
net = slim.conv2d(net, 512, [3, 3], scope='conv5_3')
return net, featuremap_scale
| [
"[email protected]"
] | |
39f9f6cb12e59735ebe32a3c579294e54cc3f58e | 9039f309649d0b7c6dd974706fc507938ed0e47a | /03. Logistics.py | 51054e067ae37313a5cfc1e9833e3de6735c07c5 | [] | no_license | antondelchev/For-Loop---More-Exercises | 2b5dadb31c273611c15e6523b536f994a0353a52 | 891266ff8b931e19d179b22dd33647887814555e | refs/heads/main | 2023-03-03T11:59:16.990004 | 2021-02-16T15:01:02 | 2021-02-16T15:01:02 | 335,062,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | number_of_loads = int(input())
tonnes_total = 0
bus_tonnes_total = 0
truck_tonnes_total = 0
train_tonnes_total = 0
bus_price_total = 0
truck_price_total = 0
train_price_total = 0
for i in range(1, number_of_loads + 1):
tonnes = int(input())
tonnes_total += tonnes
if tonnes <= 3:
bus_tonnes_total += tonnes
bus_price_total += tonnes * 200
elif 4 <= tonnes <= 11:
truck_tonnes_total += tonnes
truck_price_total += tonnes * 175
elif tonnes >= 12:
train_tonnes_total += tonnes
train_price_total += tonnes * 120
average_ton_price = (bus_price_total + truck_price_total + train_price_total) / tonnes_total
percent_tonnes_bus = bus_tonnes_total / tonnes_total * 100
percent_tonnes_truck = truck_tonnes_total / tonnes_total * 100
percent_tonnes_train = train_tonnes_total / tonnes_total * 100
print(f"{average_ton_price:.2f}")
print(f"{percent_tonnes_bus:.2f}%")
print(f"{percent_tonnes_truck:.2f}%")
print(f"{percent_tonnes_train:.2f}%")
| [
"[email protected]"
] | |
964b812d02375eb43441088299f997192ca9d36b | 894b290b4f4f47b5eb523c23efd7bd6110d91b2f | /116_fang_shop/fang_shop/fang_shop/spiders/fang_shop_spider.py | c3f9547cad1f9b1ec5db7c8618dd0e8ddbf53a24 | [] | no_license | wliustc/SpiderS | 6650c00616d11239de8c045828bafdc5a299b1ce | 441f309c50d28c1a3917bed19321cd5cbe7c2861 | refs/heads/master | 2020-03-27T06:15:39.495785 | 2018-06-14T07:55:44 | 2018-06-14T07:55:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,684 | py | # -*- coding: utf-8 -*-
import scrapy
import re
from fang_shop.items import FangShopItem
import web
import urlparse
import hashlib
import json
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/59.0.3071.115 Safari/537.36'
}
dbo2o = web.database(dbn='mysql', db='o2o', user='writer', pw='hh$writer', port=3306, host='10.15.1.24')
db = web.database(dbn='mysql', db='hillinsight', user='writer', pw='hh$writer', port=3306, host='10.15.1.24')
class Fang_Shop_Spider(scrapy.Spider):
name = 'fang_shop_spider'
def start_requests(self):
sql = '''select city,city_link,province from t_hh_fang_city_list'''
results = db.query(sql)
for result in results:
if result['city'] == '北京':
url = 'http://shop.fang.com/loupan/house/'
yield scrapy.Request(url, headers=headers, callback=self.list_parse, meta={
'city': result['city'], 'province': result['province']
}, dont_filter=True)
else:
pattern = re.search('(.*?)\.fang', result['city_link'])
city_code = pattern.group(1)
url = 'http://shop.%s.fang.com/loupan/house/' % city_code
yield scrapy.Request(url, headers=headers, callback=self.list_parse, meta={
'city': result['city'], 'province': result['province']
}, dont_filter=True)
def list_parse(self, response):
content = str(response.body).decode('gb18030').encode('utf-8')
pattern = re.compile('class="title"><a target="_blank" href="(.*?)"')
city = response.meta['city']
province = response.meta['province']
url_list = re.findall(pattern, content)
for url in url_list:
url = re.sub('/esf/', '/', url)
url_new = url + 'xiangqing/'
yield scrapy.Request(url_new, headers=headers, callback=self.detail_parse, meta={
'city': city, 'province': province
}, dont_filter=True)
pattern_next = re.search('id="PageControl1_hlk_next" href="(.*?)"', content)
url_domain = urlparse.urlparse(response.url).netloc
if pattern_next:
url_next = 'http://' + url_domain + pattern_next.group(1)
yield scrapy.Request(url_next, headers=headers, callback=self.list_parse, meta={
'city': city, 'province': province
}, dont_filter=True)
def detail_parse(self, response):
content = str(response.body).decode('gb18030').encode('utf-8')
city = response.meta['city']
province = response.meta['province']
items = FangShopItem()
base_info = {}
pattern1 = re.search('所属区域:([\s\S]*?)<', content)
base_info['所属区域'] = pattern1.group(1)
pattern2 = re.search('楼盘地址:<span title="([\s\S]*?)"', content)
base_info['楼盘地址'] = pattern2.group(1)
pattern3 = re.search('环线位置:([\s\S]*?)<', content)
base_info['环线位置'] = pattern3.group(1)
pattern4 = re.search('物业类别:([\s\S]*?)<', content)
base_info['物业类别'] = pattern4.group(1)
pattern5 = re.search('建筑类别:([\s\S]*?)<', content)
base_info['建筑类别'] = pattern5.group(1)
pattern6 = re.search('总 层 数:([\s\S]*?)<', content)
base_info['总层数'] = pattern6.group(1)
pattern7 = re.search('开 发 商:([\s\S]*?)<', content)
base_info['开发商'] = pattern7.group(1)
pattern8 = re.search('竣工时间:([\s\S]*?)<', content)
base_info['竣工时间'] = pattern8.group(1)
pattern9 = re.search('物 业 费:([\s\S]*?)<', content)
base_info['物业费'] = pattern9.group(1)
pattern10 = re.search('物业公司:([\s\S]*?)<', content)
base_info['物业公司'] = pattern10.group(1)
pattern11 = re.search('占地面积:([\s\S]*?)<', content)
base_info['占地面积'] = pattern11.group(1)
pattern12 = re.search('建筑面积:([\s\S]*?)<', content)
base_info['建筑面积'] = pattern12.group(1)
pattern13 = re.search('开间面积:([\s\S]*?)<', content)
base_info['开间面积'] = pattern13.group(1)
pattern14 = re.search('是否可分割:([\s\S]*?)<', content)
base_info['是否可分割'] = pattern14.group(1)
pattern15 = re.search('电梯数量:([\s\S]*?)<', content)
base_info['电梯数量'] = pattern15.group(1)
pattern16 = re.search('空 调:([\s\S]*?)<', content)
base_info['空调'] = pattern16.group(1)
pattern17 = re.search('装修状况:([\s\S]*?)<', content)
base_info['装修状况'] = pattern17.group(1)
pattern18 = re.search('停 车 位:([\s\S]*?)<', content)
base_info['停车位'] = pattern18.group(1)
base_info = json.dumps(base_info, ensure_ascii=False, encoding='utf-8')
items['base_info'] = base_info
pattern19 = re.search('交通状况</dt>[\s\S]*?<dl class="xiangqing">([\s\S]*?)</div>', content)
traffic_con = pattern19.group(1)
if '暂无资料' in traffic_con:
items['traffic_info'] = '暂无资料'
# print traffic_con
# raw_input('enter')
else:
traffic_info = {}
pattern19_1 = re.search('公交:([\s\S]*?)<', traffic_con)
if pattern19_1:
traffic_info['公交'] = pattern19_1.group(1)
pattern19_2 = re.search('地铁:([\s\S]*?)<', traffic_con)
if pattern19_2:
traffic_info['地铁'] = pattern19_2.group(1)
traffic_info = json.dumps(traffic_info, ensure_ascii=False, encoding='utf-8')
items['traffic_info'] = traffic_info
pattern20 = re.search('周边信息</dt>[\s\S]*?<dl class="xiangqing">([\s\S]*?)</div>', content)
around_con = pattern20.group(1)
if '暂无资料' in around_con:
items['around_info'] = '暂无资料'
else:
around_info = {}
pattern20_1 = re.search('商场:([\s\S]*?)<', around_con)
if pattern20_1:
around_info['商场'] = pattern20_1.group(1)
pattern20_2 = re.search('医院:([\s\S]*?)<', around_con)
if pattern20_2:
around_info['医院'] = pattern20_2.group(1)
pattern20_3 = re.search('邮局:([\s\S]*?)<', around_con)
if pattern20_3:
around_info['邮局'] = pattern20_3.group(1)
pattern20_4 = re.search('银行:([\s\S]*?)<', around_con)
if pattern20_4:
around_info['银行'] = pattern20_4.group(1)
pattern20_5 = re.search('餐饮:([\s\S]*?)<', around_con)
if pattern20_5:
around_info['餐饮'] = pattern20_5.group(1)
around_info = json.dumps(around_info, ensure_ascii=False, encoding='utf-8')
items['around_info'] = around_info
pattern21 = re.search('class="biaoti">([\s\S]*?)<', content)
pattern22 = re.search('newcode=(\d+)"', content)
items['shop_name'] = pattern21.group(1)
if pattern22:
items['src_uid'] = pattern22.group(1)
else:
md5 = hashlib.md5()
md5.update(response.url)
items['src_uid'] = md5.hexdigest()
items['city'] = city
items['province'] = province
items['url'] = response.url
yield items
| [
"[email protected]"
] | |
0a61d3455c62c56d19a40625fbc67c86684cf673 | de64b143a346585f51590bd674e8d13bbc672386 | /algorithm/Intermediate_Class/뉴스 클러스터링/Wooseong.py | a3809d890839206d63713c42df2c288ccf43d48e | [] | no_license | ai-kmu/etc | 304ec20f59e4026025abdcbcae21863c80630dcb | 9c29941e19b7dd2a2037b110dd6e16690e9a0cc2 | refs/heads/master | 2023-08-21T16:30:31.149956 | 2023-08-21T16:26:19 | 2023-08-21T16:26:19 | 199,843,899 | 3 | 24 | null | 2023-05-31T09:56:59 | 2019-07-31T11:36:16 | Jupyter Notebook | UTF-8 | Python | false | false | 1,976 | py | import copy
def solution(str1, str2):
# 대소문자 무시 - 모두 소문자로
str1 = str1.lower()
str2 = str2.lower()
# 각 str을 다중 집합으로 만들기 - 알파벳쌍만 가능
# -> .isalpha()는 str가 모두 알파벳일 때만 True
set1 = []
for i in range(len(str1) - 1):
temp = str1[i:i+2]
if temp.isalpha():
set1.append(temp)
set2 = []
for i in range(len(str2) - 1):
temp = str2[i:i+2]
if temp.isalpha():
set2.append(temp)
# 두 집합이 모두 공집합일 경우는 1로 정의
if (not set1) and (not set2):
return 65536
# 교집합과 합집합
# 겹친 게 나올 경우
# 교집합에는 적은 개수만큼, 합집합에는 많은 개수만큼 넣음
# 안 겹치면 합집합에만 넣음
set1_copy = copy.deepcopy(set1)
set2_copy = copy.deepcopy(set2)
inter = []
union = []
# 둘 중 하나 다 떨어짐 = 겹칠 수 없음
while set1_copy and set2_copy:
elem = set1_copy.pop()
if elem in set2_copy:
# set1은 이미 하나 pop 해서 +1로 보정
in_set1 = set1_copy.count(elem) + 1
in_set2 = set2_copy.count(elem)
# 교집합엔 적은 개수만큼
inter += [elem] * min(in_set1, in_set2)
# 합집합엔 많은 개수만큼
union += [elem] * max(in_set1, in_set2)
# 넣은 거 빼기
set1_copy = [i for i in set1_copy if i != elem]
set2_copy = [i for i in set2_copy if i != elem]
# 안 겹치는 건 union에만 넣음
else:
union.append(elem)
# 합집합에 남은 거 더 해주기 (둘 중 하나는 빈 리스트)
union += set1_copy + set2_copy
# print("교", inter)
# print("합", union)
return ((len(inter) / len(union)) * 655360) // 10
| [
"[email protected]"
] | |
77ea35da65f61abce7c44b9a46ee137770cc95ec | fc5becca3e2e48a444b512e059df1cd21601829b | /Aulas/Aula23A.py | 3baac0f53fa2d741ffa7e4838bd99fbeb5af6205 | [
"MIT"
] | permissive | Felix-xilef/Curso-de-Python | c44bf8c22b393aefaed3a2bb3127ef7999e27fb8 | cdff7c7f3850e6326e274c8c1987b9e1a18ce910 | refs/heads/master | 2021-05-19T11:09:22.644638 | 2020-04-01T22:09:02 | 2020-04-01T22:09:02 | 251,665,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | try:
a = int(input('\n\tNumerador: '))
b = int(input('\tDenominador: '))
r = a / b
# except: - apenas redireciona caso dê erro (GENÉRICO)
# print('\n\tProblema encontrao') # erro - mostra erro | erro.__class__ - mostra classe do erro
except (ValueError, TypeError): # except classe: - apenas redireciona, caso dê erro da classe informada *colocar entre parêntesis e separado por vírgula caso haja mais de um erro
print('\n\tTivemos um problema com os tipos de dados que você digitou')
except ZeroDivisionError:
print('\n\tNão é possível dividir um número por zero!')
except KeyboardInterrupt:
print('\n\tO usuário preferiu não informar os dados!')
except Exception as erro: # guarda a exeção na variável erro (GENÉRICO)
print(f'\n\tProblema encontrao:\n\t{erro.__class__}') # erro - mostra erro | erro.__class__ - mostra classe do erro
else: # opcional (o que ocorrerá caso não der erro)
print(f'\n\t{a}/{b} = {r}')
finally: # opcional (sempre é executado, isto é, caso dê ou não erro)
print('\n\tVolte sempre! Muito obrigado!')
input('\n\nPressione <enter> para continuar')
| [
"[email protected]"
] | |
37f9ffe43f45931ee39051d3b509924093639327 | 33af6185b48bd76f97f0a74390a3a812ee216c78 | /angr/angr/procedures/libc/fseek.py | 12804e949829a38007056038d366ae0bb5839ae7 | [
"BSD-2-Clause"
] | permissive | Ruide/angr-dev | dab0cabd907fce47ac698f890c3f3a8b80ab7e2a | 964dc80c758e25c698c2cbcc454ef5954c5fa0a0 | refs/heads/master | 2022-11-10T11:27:13.355024 | 2017-10-07T14:29:09 | 2017-10-07T14:29:09 | 104,417,044 | 0 | 1 | BSD-2-Clause | 2022-10-16T04:48:10 | 2017-09-22T01:35:12 | C | UTF-8 | Python | false | false | 1,201 | py | import angr
from . import io_file_data_for_arch
######################################
# fseek
######################################
class fseek(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, file_ptr, offset, whence):
# TODO: Support symbolic file_ptr, offset, and whence
# Make sure whence can only be one of the three values: SEEK_SET(0), SEEK_CUR(1), and SEEK_END(2)
if self.state.se.symbolic(whence) and len(self.state.se.eval_upto(whence, 2)) > 1:
raise angr.SimProcedureError('multi-valued "whence" is not supported in fseek.')
else:
# Get all possible values
all_whence = self.state.se.eval_upto(whence, 2)
if not all_whence:
raise angr.SimProcedureError('"whence" has no satisfiable value.')
# There is only one value left
whence_int = all_whence[0]
if whence_int not in (0, 1, 2):
return 22 # EINVAL
fd_offset = io_file_data_for_arch(self.state.arch)['fd']
fd = self.state.mem[file_ptr + fd_offset : ].int.resolved
r = self.state.posix.seek(fd, offset, whence_int)
return r
| [
"[email protected]"
] | |
fc1594f425c1a54f1e64a6aef2c262b5c450c273 | 736730d72c24470a0c9ba58309ee3a95fe09d5e4 | /projeto/feriados/feriados/urls.py | d23c9df344b2022619b6e27648331c943d93a479 | [] | no_license | orlandosaraivajr/FATEC_2021_1SEM_Topicos3 | 3f9c6b983c8b012330527848862f9f22649f0f5a | 83610f798510e1bad69eedaed6b3b4ed08e2014e | refs/heads/master | 2023-05-02T10:24:05.865947 | 2021-05-19T00:20:38 | 2021-05-19T00:20:38 | 339,551,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('feriado.urls')),
]
| [
"[email protected]"
] | |
6e4abc00113d6b561e5acc7b39de395f44ae02c3 | 388ff52dec8f4780a2d1cfd3f07f9228373a6b03 | /0x0A-python-inheritance/6-base_geometry.py | 1f8561cd5c07e6e9c0738b8ac8295dfb5d3a7038 | [] | no_license | dairof7/holbertonschool-higher_level_programming | 6bbbb0eb2f2c13553e63056e0cee0ade7e028afe | 6de0ea30c02a69f9721b4304eb0d48fca626e2df | refs/heads/master | 2023-01-14T09:58:13.327692 | 2020-11-10T16:51:50 | 2020-11-10T16:51:50 | 259,339,091 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | #!/usr/bin/python3
""" this module createa class BaseGeometry"""
class BaseGeometry():
"""empty BaseGeometry class"""
pass
def area(self):
"""method area
return a Exception"""
raise Exception("area() is not implemented")
| [
"[email protected]"
] | |
d1447815d97faff47b44f8a1895258fb69c4f969 | 2c8d3e341e813c1b1b88ae824edeaadb366aec0a | /Parser/SW4/SW4/bin/Debug/smo2-25-path-31.py | ebf93cad9a6ff9ab926373c9abce13ff101bc0cb | [] | no_license | kiriphorito/MoveAndTag-Manticore | 2e24a958f4941556b2d2714563718069cc5b208f | d07a3d8c0bacf34cf5f433384a6fd45170896b7a | refs/heads/master | 2021-01-20T11:40:49.232449 | 2017-02-26T14:08:48 | 2017-02-26T14:08:48 | 82,548,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,842 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
u"""
@brief: Path Planning Sample Code with Randamized Rapidly-Exploring Random Trees (RRT)
@author: AtsushiSakai
@license: MIT
"""
import shapely
from shapely.geometry import Polygon, LineString, Point, MultiPoint, GeometryCollection
import matplotlib.pyplot as plt
from ast import literal_eval
import datetime
import random
import math
import copy
def drawRobots(robots):
for (x,y) in robots:
plt.plot(x,y,"o")
def drawPolygonNoFill(points,color):
polygon = plt.Polygon(points,color=color,fill=False)
plt.gca().add_patch(polygon)
def drawPolygon(points):
polygon = plt.Polygon(points)
plt.gca().add_patch(polygon)
def drawPolygons(polygons):
try:
for xs in polygons:
drawPolygon(xs)
except ValueError:
print ("no polygons specified")
def drawPolygonNoFill(points,color):
polygon = plt.Polygon(points,color=color,fill=False)
plt.gca().add_patch(polygon)
def drawPolygonsNoFill(polygons):
try:
for xs in polygons:
drawPolygonNoFill(xs,'red')
except ValueError:
print ("no polygons specified")
class RRT():
u"""
Class for RRT Planning
"""
def __init__(self, start, goal, obstacleList,randArea,expandDis=1.0,goalSampleRate=5,maxIter=500):
u"""
Setting Parameter
start:Start Position [x,y]
goal:Goal Position [x,y]
obstacleList:obstacle Positions [[x,y,size],...]
randArea:Ramdom Samping Area [min,max]
"""
self.start=Node(start[0],start[1])
self.end=Node(goal[0],goal[1])
self.minrand = randArea[0]
self.maxrand = randArea[1]
self.expandDis = expandDis
self.goalSampleRate = goalSampleRate
self.maxIter = maxIter
def Planning(self,animation=True):
u"""
Pathplanning
animation: flag for animation on or off
"""
self.nodeList = [self.start]
while True:
# Random Sampling
if random.randint(0, 100) > self.goalSampleRate:
rnd = [random.uniform(self.minrand, self.maxrand), random.uniform(self.minrand, self.maxrand)]
else:
rnd = [self.end.x, self.end.y]
# Find nearest node
nind = self.GetNearestListIndex(self.nodeList, rnd)
# print(nind)
# expand tree
nearestNode =self.nodeList[nind]
theta = math.atan2(rnd[1] - nearestNode.y, rnd[0] - nearestNode.x)
newNode = copy.deepcopy(nearestNode)
newNode.x += self.expandDis * math.cos(theta)
newNode.y += self.expandDis * math.sin(theta)
newNode.parent = nind
if not self.__CollisionCheck(newNode, obstacleList,nearestNode):
continue
self.nodeList.append(newNode)
# check goal
dx = newNode.x - self.end.x
dy = newNode.y - self.end.y
d = math.sqrt(dx * dx + dy * dy)
if d <= self.expandDis:
if not self.__CollisionCheck(newNode, obstacleList,self.end):
continue
else:
#print("Goal!!")
break
if animation:
self.DrawGraph(rnd)
path=[[self.end.x,self.end.y]]
lastIndex = len(self.nodeList) - 1
while self.nodeList[lastIndex].parent is not None:
node = self.nodeList[lastIndex]
path.append([node.x,node.y])
lastIndex = node.parent
path.append([self.start.x, self.start.y])
return path
def DrawGraph(self,rnd=None):
u"""
Draw Graph
"""
import matplotlib.pyplot as plt
plt.clf()
if rnd is not None:
plt.plot(rnd[0], rnd[1], "^k")
for node in self.nodeList:
if node.parent is not None:
plt.plot([node.x, self.nodeList[node.parent].x], [node.y, self.nodeList[node.parent].y], "-g")
# plt.plot([ox for (ox,oy,size) in obstacleList],[oy for (ox,oy,size) in obstacleList], "ok", ms=size * 20)
drawPolygons(obstacleList)
plt.plot(self.start.x, self.start.y, "xr")
plt.plot(self.end.x, self.end.y, "xr")
plt.axis()
plt.grid(True)
plt.pause(0.01)
def GetNearestListIndex(self, nodeList, rnd):
dlist = [(node.x - rnd[0]) ** 2 + (node.y - rnd[1]) ** 2 for node in nodeList]
minind = dlist.index(min(dlist))
return minind
def __CollisionCheck(self, node,obstacleList,nearestNode):
x1 = nearestNode.x
y1 = nearestNode.y
x2 = node.x
y2 = node.y
first = [x1,y1]
second = [x2,y2]
return LineCollisionCheck(first,second,obstacleList)
def LineCollisionCheck(first,second, obstacleList):
from shapely import geometry,wkt
EPS = 1.2e-16 #======= may need to change this value depending on precision
x1 = first[0]
y1 = first[1]
x2 = second[0]
y2 = second[1]
line = geometry.LineString([(x1,y1),(x2,y2)])
#============ changed here =======
# for p1 in obstacleList:
#
# poly = geometry.Polygon(p1)
# ips = line.intersection(poly.boundary)
## print ips
# if type(ips) is Point:
## print "hello"
# if ips.distance(poly) < EPS:
## print "INTERSECT"
# return False
# elif type(ips) is MultiPoint:
# for i in ips:
# if (i.distance(poly) <EPS):
## print "INTERSECT2"
# return False
# elif type(ips) is GeometryCollection:
# continue
# else:
# print (ips,type(ips))
# return False
# return True
#============ changed here =======
for poly in obstacleList:
p1 = Polygon(poly)
if p1.buffer(EPS).intersects(line):
# print "collision"
return False
# print "safe"
return True
#============ changed here =======
def supersmoothie(smoothie,obstacleList):
path = smoothie
state = True
counter1 = 0
counter2 = len(path)-1
while state:
counter2 = len(path)-1
if counter1 == counter2:
state = False
break
coord1 = path[counter1]
for counter in range(counter2,0,-1):
coord2 = path[counter]
if LineCollisionCheck(coord1,coord2,obstacleList): #if no obstacle
del path[(counter1+1):(counter)]
break
counter1 += 1
return path
class Node():
u"""
RRT Node
"""
def __init__(self, x, y):
self.x = x
self.y = y
self.parent = None
def rrtpath(obstacles,startcoord,goalcoord,randAreas):
rrt = RRT(start=startcoord, goal=goalcoord,randArea = randAreas, obstacleList=obstacles)
path= rrt.Planning(animation=False)
# rrt.DrawGaph()
# plt.plot([x for (x,y) in path], [y for (x,y) in path],'-r')
# print path
smoothiePath = supersmoothie(path,obstacles)
plt.plot([x for (x,y) in smoothiePath], [y for (x,y) in smoothiePath],'-r')
smoothiePath.reverse()
#print smoothiePath
return smoothiePath
obstacleList = [[(0.11019254188514943,-0.033252198573121027),(0.08691070491878392,2.9666574594111776),(-0.913059181075982,2.958896847089056),(-0.9208197933981044,3.9588667330838216),(1.0791199785914285,3.9743879577280654),(1.1567261018126462,-6.025310902219596),(3.1566658738021784,-6.009789677575353),(3.07905975058096,3.9899091823723096),(4.079029636575727,3.9976697946944317),(4.257523719984532,-19.00163758318519),(2.2575839479949953,-19.017158807829432),(2.179977824773777,-9.017459947881774),(0.1800380527842449,-9.032981172526018),(0.2576441760054621,-19.032680032473678),(-0.7423257099892999,-19.040440644795797),(-0.7888893839220328,-13.040621328827205),(-2.7888291559115723,-13.056142553471448),(-2.773307931267319,-15.056082325460983),(-4.773247703256854,-15.071603550105223),(-4.765487090934737,-16.071573436099985),(-2.7655473189452007,-16.056052211455746),(-2.7577867066230746,-17.056022097450512),(-5.757696364607374,-17.079303934416878),(-5.780978201573743,-14.07939427643258),(-6.78094808756851,-14.0871548887547),(-6.757666250602139,-17.087064546738997),(-7.757636136596918,-17.094825159061116),(-7.749875524274789,-18.094795045055882),(-2.75002609430096,-18.055991983445278),(-2.742265481978834,-19.055961869440043),(-25.741572859858454,-19.234455952848847),(-25.695009185925723,-25.23427526881744),(-18.695219983962367,-25.179950982562588),(-18.291668143212032,-77.17838505429044),(-5.292059625280101,-77.07749709410284),(-5.695611466030389,-25.079063022375006),(4.304087393917248,-25.00145689915379),(4.350651067849983,-31.001276215122388),(9.35050049782383,-30.962473153511773),(9.078879066549558,4.036472856305042),(11.078818838539092,4.051994080949282),(11.063297613894846,6.051933852938816),(5.063478297926248,6.0053701790060865),(5.016914623993522,12.005189494974681),(4.0169447379987515,11.99742888265256),(4.063508411931484,5.997609566683963),(2.06356863994195,5.98208834203972),(2.02476557833134,10.981937772013552),(1.0247956923365782,10.974177159691429),(1.063598753947185,5.974327729717598),(-0.9363410180423481,5.958806505073355),(-0.9441016303644705,6.958776391068121),(-1.9440715163592361,6.951015778745999),(-1.928550291714992,4.951076006756467),(-3.9284900637045257,4.935554782112222),(-4.006096186925744,14.935253642059884),(-6.006035958915275,14.91973241741564),(-5.990514734271034,12.919792645426108),(-8.990424392255333,12.896510808459741),(-8.982663779933207,11.896540922464977),(-5.98275412194891,11.919822759431343),(-5.9284298356940575,4.920033557467978),(-8.928339493678356,4.896751720501613),(-8.920578881356233,3.896781834506848),(-1.9207896793928703,3.9511061207617),(-1.9130290670707477,2.9511362347669343),(-2.9129989530655145,2.9433756224448118),(-2.9052383407433933,1.9434057364500452),(-6.905117884722457,1.9123632871615595),(-6.897357272400336,0.9123934011667933),(-4.897417500410803,0.9279146258110347),(-4.881896275766559,-1.0720251461784969),(-7.881805933750857,-1.0953069831448634),(-7.912848383039345,2.904572560834203),(-8.912818269034112,2.896811948512081),(-8.89729704438987,0.8968721765225512),(-12.897176588368934,0.8658297272340604),(-12.889415976046811,-0.13414015876070418),(-8.889536432067747,-0.10309770947221705),(-8.881775819745625,-1.1030675954669855),(-12.88165536372469,-1.1341100447554688),(-12.866134139080446,-3.134049816745008),(-4.866375051122317,-3.0719649181680286),(-4.858614438800193,-4.071934804162796),(-3.8586445528054285,-4.0641741918406735),(-3.8974476144160373,0.9356752381331583),(-2.897477728421271,0.9434358504552797),(-2.8897171160991495,-0.05653403553948637),(-1.889747230104383,-0.04877342321736459),(-1.8431835561716523,-6.0485927391859615),(-0.8432136701768869,-6.040832126863839),(-0.8897773441096168,-0.04101281089524281)],[(17.400649320507974,-33.71949229515208),(18.356933127973114,-34.011932510546565),(18.06449291257861,-34.96821631801171),(16.151925297648344,-34.38333588722271),(15.859485082253842,-35.33961969468785),(17.772052697184144,-35.92450012547683),(17.187172266395162,-37.83706774040712),(11.44946942160431,-36.08242644804017),(11.157029206209828,-37.03871025550529),(13.06959682114011,-37.62359068629431),(12.777156605745628,-38.57987449375945),(9.908305183350187,-37.702553847575956),(11.078066044928146,-33.87741861771543),(12.990633659858421,-34.46229904850438),(13.283073875252924,-33.506015241039265),(11.370506260322653,-32.921134810250265),(11.662946475717142,-31.96485100278512),(10.706662668252001,-31.67241078739064),(9.536901806674017,-35.49754601725115),(4.755482769348346,-34.03534494027877),(4.463042553953832,-34.991628747743846),(9.244461591279531,-36.453829824716315),(8.952021375885046,-37.41011363218147),(1.3017509161639325,-35.07059190902549),(0.716870485374951,-36.98315952395579),(8.367140945096075,-39.32268124711176),(5.442738791151182,-48.8855193217632),(7.3553064060814926,-49.47039975255218),(7.6477466214759655,-48.514115945087),(30.59855800063948,-55.532681114554634),(32.35319929300634,-49.79497826976387),(9.402387913842906,-42.776413100296196),(10.27970856002635,-39.907561677900745),(12.192276174956648,-40.49244210868974),(11.607395744167675,-42.40500972362003),(12.563679551632866,-42.697449939014525),(14.025880628605243,-37.9160309016888),(14.982164436070397,-38.2084711170833),(13.519963359097959,-42.989890154409004),(14.47624716656309,-43.28233036980349),(15.938448243535541,-38.500911332477784),(16.89473205100068,-38.79335154787228),(16.0174114048172,-41.66220297026767),(16.973695212282358,-41.95464318566218),(17.558575643071347,-40.0420755707319),(31.90283275504851,-44.4286788016491),(32.195272970442986,-43.47239499418396),(35.0641243928384,-44.349715640367435),(35.35656460823287,-43.39343183290226),(32.48771318583744,-42.516111186718845),(32.780153401231956,-41.559827379253676),(18.435896289254792,-37.17322414833647),(18.728336504649285,-36.21694034087133),(25.422323156905275,-38.26402184863272),(25.714763372299764,-37.307738041167596),(19.02077672004377,-35.26065653340619),(19.313216935438252,-34.30437272594105),(20.26950074290339,-34.596812941335536),(20.561940958297885,-33.640529133870395),(23.4307923806933,-34.517849780053844),(23.723232596087787,-33.56156597258868),(22.766948788622653,-33.26912575719423),(23.351829219411623,-31.35655814226395),(28.133248256737332,-32.81875921923636),(27.255927610553876,-35.6876106416318),(28.212211418019017,-35.98005085702621),(29.089532064202466,-33.11119943463082),(30.045815871667585,-33.40363965002528),(30.33825608706207,-32.4473558425602),(28.4256884721318,-31.86247541177121),(36.61401450317736,-5.086528802747274),(35.65773069571233,-4.794088587352828),(37.412371988079315,0.9436142574379787),(35.499804373148976,1.5284946882269566),(33.745163080782035,-4.209208156563875),(32.78887927331691,-3.9167679411693292),(24.600553242271232,-30.692714550193266),(23.644269434806105,-30.40027433479883),(24.229149865595083,-28.487706719868523),(23.27286605812995,-28.195266504474027),(21.810664981157505,-32.97668554179973),(20.85438117369236,-32.684245326405254),(21.14682138908686,-31.72796151894012),(20.190537581621708,-31.435521303545634),(21.36029844319967,-27.61038607368509),(20.404014635734516,-27.317945858290575),(19.819134204945556,-29.23051347322086),(15.993998975085022,-28.06075261164291),(15.7015587596905,-29.01703641910807),(19.526693989551056,-30.186797280686015),(19.234253774156574,-31.14308108815115),(18.277969966691433,-30.850640872756657),(17.98552975129694,-31.8069246802218),(16.072962136366687,-31.222044249432816),(15.780521920972184,-32.17832805689796),(17.69308953590246,-32.763208487686946)],[(-27.8904877290891,34.81391505870865),(-28.279435699813977,35.73517478108286),(-25.515656532691374,36.90201869325751),(-24.348812620516718,34.13823952613488),(-23.427552898142515,34.52718749685978),(-24.59439681031717,37.29096666398239),(-23.673137087942976,37.67991463470728),(-24.062085058667854,38.601174357081476),(-24.98334478104205,38.212226386356605),(-25.761240722491845,40.054745831105016),(-20.233682388246606,42.38843365545433),(-17.899994563897323,36.860875321209086),(-16.978734841523107,37.249823291934),(-19.31242266587241,42.77738162617921),(-18.39116294349821,43.16632959690406),(-15.279579177699128,35.79625181791047),(-14.358319455324942,36.18519978863538),(-15.914111338224446,39.87023867813215),(-6.701514114482432,43.75971838538102),(-7.479410055932242,45.60223783012951),(-16.692007279674264,41.71275812288061),(-17.469903221124042,43.55527756762901),(-16.548643498749826,43.9442255383539),(-17.326539440199557,45.78674498310227),(-26.539136663941616,41.8972652758534),(-26.928084634666504,42.81852499822757),(-22.32178602279548,44.76326485185205),(-22.710733993520368,45.684524574226245),(-27.31703260539138,43.739784720601826),(-27.705980576116247,44.661044442976014),(-28.627240298490452,44.27209647225115),(-25.90460450341627,37.82327841563171),(-26.82586422579045,37.43433044490683),(-29.54850002086466,43.88314850152625),(-30.469759743238875,43.49420053080138),(-27.74712394816466,37.04538247418194),(-28.668383670538866,36.65643450345706),(-29.05733164126375,37.57769422583126),(-29.978591363637953,37.18874625510637),(-31.534383246537498,40.873785144603175),(-32.455642968911704,40.48483717387829),(-32.06669499818682,39.56357745150411),(-32.987954720561014,39.17462948077922),(-34.93269457418548,43.78092809265021),(-35.85395429655964,43.39198012192535),(-35.465006325834764,42.47072039955114),(-38.228785492957385,41.303876487376456),(-41.34036925875644,48.67395426637012),(-2.647460919039897,65.00976903681531),(-4.981148743389372,70.53732737106061),(-43.67405708310577,54.20151260061539),(-44.451953024555564,56.04403204536375),(-46.29447246930391,55.26613610391398),(-40.07130493770577,40.52598054592673),(-40.99256466007997,40.13703257520186),(-42.54835654297955,43.82207146469866),(-43.46961626535371,43.43312349397376),(-41.9138243824542,39.74808460447685),(-42.8350841048284,39.359136633751994),(-42.05718816337861,37.51661718900366),(-34.68711038438499,40.628200954802736),(-33.90921444293523,38.7856815100543),(-35.75173388768364,38.00778556860452),(-35.362785916958735,37.086525846230344),(-31.677747027461923,38.64231772912991),(-30.899851086012156,36.7997982843815),(-31.821110808386365,36.4108503136566),(-31.43216283766148,35.48959059128242),(-33.274682282409884,34.71169464983262),(-32.88573431168499,33.79043492745841),(-31.043214866936594,34.56833086890819),(-30.65426689621171,33.64707114653399),(-29.733007173837493,34.036019117258896),(-29.34405920311262,33.11475939488467),(-31.186578647861026,32.33686345343491),(-30.79763067713613,31.415603731060706),(-28.955111232387733,32.19349967251048),(-28.566163261662844,31.272239950136274),(-31.329942428785444,30.105396037961597),(-30.940994458060565,29.184136315587402),(-30.019734735686363,29.573084286312287),(-29.24183879423656,27.73056484156389),(-31.084358238984997,26.95266890011412),(-30.695410268260105,26.031409177739903),(-28.85289082351174,26.80930511918972),(-28.074994882061933,24.96678567444129),(-27.153735159687795,25.35573364516616),(-29.09847501331218,29.962032257037183),(-28.177215290937994,30.350980227762065),(-27.788267320213105,29.42972050538787),(-26.867007597838874,29.81866847611274),(-28.03385151001354,32.582447643235355),(-27.112591787639342,32.971395613960254),(-24.389955992565167,26.52257755734081),(-23.46869627019095,26.91152552806569),(-23.857644240915903,27.83278525043988),(-22.936384518541637,28.22173322116478),(-22.158488577091916,26.37921377641633),(-28.607306633711318,23.656577981342195),(-28.21835866298636,22.735318258967986),(-25.454579495863822,23.902162171142653),(-16.119828198466706,1.791928834161716),(-15.19856847609255,2.180876804886566),(-14.420672534642883,0.33835736013809026),(-13.499412812268504,0.7273053308630466),(-14.277308753718303,2.5698247756114654),(-13.356049031344055,2.9587727463363684),(-22.690800328741215,25.06900608331726),(-21.769540606367023,25.457954054042155),(-20.99164466491725,23.61543460929374),(-20.07038494254305,24.004382580018614),(-20.848280883992782,25.84690202476706),(-6.108125326005531,32.07006955636505),(-6.886021267455273,33.9125890011135),(-21.626176825442563,27.68942146951544),(-22.015124796167438,28.610681191889654),(-18.33008590667061,30.166473074789167),(-18.719033877395496,31.08773279716339),(-24.246592211640706,28.754044972814125),(-24.63554018236561,29.675304695188323),(-20.950501292868793,31.23109657808784),(-21.72839723431854,33.07361602283625),(-25.41343612381538,31.517824139936707),(-26.191332065265133,33.360343584685126),(-25.270072342890934,33.74929155540999),(-25.659020313615812,34.67055127778419),(-28.422799480738423,33.50370736560957),(-28.8117474514633,34.42496708798378)],[(-27.771470721193953,-5.366684866522082),(-27.175196946980105,-4.56390370942834),(-26.372415789886354,-5.160177483642189),(-38.29789127416335,-21.21580062551707),(-36.69232895997574,-22.408348173944837),(-39.0774240568311,-25.61947280231984),(-43.09132984229987,-22.638103931250594),(-43.6876036165138,-23.44088508834428),(-41.27926014523254,-25.229706410985862),(-42.47180769366026,-26.835268725173332),(-41.669026536566406,-27.43154249938725),(-40.47647898813871,-25.82598018519974),(-39.67369783104494,-26.422253959413567),(-40.26997160525875,-27.225035116507303),(-39.46719044816497,-27.821308890721202),(-35.88954780288201,-23.004621948158647),(-35.0867666457882,-23.600895722372556),(-23.161291161511354,-7.545272580497544),(-21.555728847323856,-8.737820128925245),(-20.959455073110004,-7.93503897183145),(-22.565017387297516,-6.742491423403784),(-19.58364851622831,-2.7285856379350357),(-20.386429673322077,-2.132311863721204),(-23.367798544391263,-6.1462176491899285),(-24.973360858578765,-4.95367010076225),(-21.991991987509536,-0.93976431529354),(-22.79477314460329,-0.34349054107968247),(-25.77614201567251,-4.357396326548434),(-26.57892317276626,-3.761122552334598),(-25.982649398552407,-2.9583413952408373),(-26.78543055564616,-2.36206762102699),(-24.99660923300462,0.04627585025423819),(-25.799390390098374,0.6425496244680939),(-27.5882117127399,-1.765793846813152),(-28.39099286983365,-1.169520072599303),(-28.987266644047494,-1.9723012296930542),(-30.592828958235007,-0.7797536812653636),(-26.418912538738066,4.839714418390868),(-27.22169369583178,5.435988192604713),(-31.39561011532875,-0.18347990705152029),(-32.198391272422455,0.4127938671623559),(-28.024474852925543,6.0322619668185755),(-28.827256010019234,6.628535741032415),(-29.423529784233107,5.82575458393868),(-30.226310941326865,6.422028358152515),(-29.033763392899164,8.02759067234),(-26.625419921617944,6.238769349698453),(-26.029146147404088,7.041550506792195),(-26.83192730449791,7.637824281006067),(-25.63937975607017,9.243386595193547),(-26.44216091316392,9.839660369407376),(-27.634708461591625,8.234098055219894),(-28.43748961868525,8.830371829433789),(-26.64866829604389,11.238715300714976),(-27.45144945313757,11.834989074928824),(-31.029092098420595,7.018302132366371),(-32.63465441260817,8.210849680794025),(-28.460737993111188,13.83031778045028),(-29.263519150204868,14.42659155466411),(-30.45606669863263,12.821029240476609),(-32.06162901282012,14.013576788904285),(-32.65790278703394,13.210795631810608),(-31.05234047284648,12.018248083382911),(-32.24488802127419,10.412685769195395),(-35.45601264964919,12.797780866050772),(-33.0709175527938,16.008905494425765),(-34.676479866981175,17.20145304285352),(-37.061574963836605,13.990328414478505),(-37.86435612093037,14.586602188692364),(-38.46062989514427,13.783821031598581),(-32.841161795488034,9.609904612101637),(-33.43743556970185,8.807123455007897),(-35.04299788388926,9.99967100343565),(-35.639271658103176,9.196889846341875),(-30.019803558446945,5.022973426844922),(-30.616077332660847,4.220192269751175),(-37.03832658941073,8.990382463461998),(-37.63460036362464,8.187601306368236),(-36.83181920653099,7.59132753215431),(-38.02436675495862,5.985765217966858),(-42.03827254042737,8.967134089036056),(-42.634546314641206,8.164352931942323),(-41.02898400045372,6.971805383514647),(-42.221531548881416,5.36624306932713),(-47.84099964853766,9.540159488824001),(-48.43727342275152,8.737378331730278),(-42.81780532309524,4.563461912233382),(-43.41407909730913,3.7606807551396173),(-41.80851678312159,2.568133206711872),(-39.423421686266124,5.779257835086936),(-38.620640529172455,5.182984060873142),(-39.81318807760015,3.577421746685582),(-39.01040692050641,2.981147972471714),(-36.02903804943719,6.995053757940488),(-31.212351106874692,3.4174111126574207),(-31.808624881088534,2.6146299555636823),(-35.8225306665573,5.595998826632911),(-36.41880444077117,4.7932176695391675),(-32.404898655302404,1.8118487984699163),(-33.001172429516224,1.0090676413761859),(-33.80395358660998,1.6053414155900283),(-34.400227360823855,0.802560258496257),(-29.583540418261343,-2.775082386786786),(-30.179814192475185,-3.577863543880526),(-29.37703303538143,-4.17413731809439),(-30.56958058380914,-5.77969963228188),(-36.99182984055912,-1.0095094385711478),(-37.588103614772955,-1.812290595664865),(-31.16585435802299,-6.582480789375623),(-31.762128132236843,-7.385261946469354),(-37.38159623189308,-3.2113455269724698),(-37.97787000610691,-4.014126684066204),(-32.35840190645068,-8.188043103563114),(-32.954675680664536,-8.990824260656849),(-38.57414378032075,-4.816907841159947),(-39.17041755453461,-5.619688998253702),(-38.367636397440855,-6.215962772467556),(-43.13782659115161,-12.638212029217549),(-42.33504543405788,-13.234485803431369),(-37.5648552403471,-6.812236546681382),(-36.76207408325336,-7.408510320895228),(-42.12853805117801,-14.633540734738897),(-66.21197276399042,3.2546724916762706),(-69.1933416350597,-0.7592332937924118),(-45.109906922247205,-18.647446520207673),(-45.70618069646106,-19.450227677301424),(-43.297837225179784,-21.239048999942995),(-34.35373061197211,-9.197331643536794),(-33.55094945487837,-9.793605417750598),(-34.14722322909223,-10.596386574844338),(-33.344442071998465,-11.192660349058208),(-28.574251878287697,-4.770411092308235)],[(66.60322623182422,37.83233353631165),(66.86955455022202,36.86845116443247),(62.050142690826235,35.5368095724435),(62.31647100922399,34.572927200564365),(67.1358828686198,35.904568792553334),(67.40221118701763,34.940686420674155),(68.36609355889674,35.20701473907194),(68.89875019569241,33.27924999531365),(66.97098545193403,32.74659335851803),(67.23731377033185,31.78271098663886),(69.16507851409017,32.31536762343446),(69.69773515088579,30.38760287967618),(67.76997040712747,29.854946242880562),(68.03629872552533,28.89106387100143),(69.96406346928363,29.423720507797),(70.23039178768136,28.45983813591784),(71.19427415956063,28.726166454315663),(70.92794584116282,29.690048826194847),(71.89182821304193,29.956377144592583),(72.42448484983761,28.02861240083436),(67.60507299044173,26.69697080884527),(67.87140130883952,25.73308843696612),(72.6908131682354,27.064730028955168),(72.95714148663326,26.10084765707596),(67.17384725535818,24.50287774668909),(67.44017557375598,23.53899537480997),(73.22346980503109,25.136965285196865),(73.75612644182655,23.209200541438452),(66.04506746679321,21.07857399425607),(66.31139578519104,20.114691622376846),(74.02245476022445,22.245318169559347),(74.55511139702018,20.317553425801016),(64.91628767822829,17.65427024182299),(63.051989449443816,24.40144684497709),(66.90751893696033,25.466760118568324),(66.37486230016475,27.394524862326627),(65.41097992828563,27.1281965439288),(64.07933833629664,31.947608403324573),(66.00710308005493,32.48026504012025),(65.7407747616571,33.44414741199942),(63.813010017898755,32.911490775203816),(63.54668169950101,33.87537314708295),(62.58279932762181,33.60904482868517),(64.44709755640639,26.861868225531005),(62.51933281264812,26.329211588735383),(62.253004494250334,27.29309396061457),(60.32523975049196,26.760437323818962),(62.98852293447001,17.12161360502734),(62.02464056259083,16.85528528662948),(61.49198392579535,18.783050030387837),(60.5281015539161,18.516721711990076),(61.06075819071167,16.588956968231763),(60.096875818832764,16.32262864983388),(34.26302893424553,109.81921872211258),(35.22691130612524,110.08554704051045),(37.35753785330739,102.37448806547728),(38.321420225186074,102.64081638387485),(36.19079367800411,110.35187535890825),(38.118558421762145,110.88453199570381),(46.64106461049202,80.04029609557071),(50.49659409800853,81.10560936916178),(41.97408790927844,111.94984526929478),(48.72126451243287,113.81414349807942),(50.05290610442185,108.99473163868366),(51.01678847630103,109.26105995708157),(49.68514688431203,114.08047181647736),(51.612911628070144,114.61312845327299),(51.08025499127519,116.54089319703134),(59.7551963381871,118.93784806261161),(59.48886801978925,119.90173043449072),(67.19992699482307,122.03235698167308),(66.93359867642512,122.99623935355213),(62.114186817029264,121.66459776156334),(60.24988858824444,128.41177436471747),(59.286006216365,128.1454460463197),(61.15030444514988,121.3982694431654),(59.222539701391824,120.86561280637002),(58.95621138299349,121.82949517824906),(50.28127003608134,119.43254031266873),(50.01494171768351,120.39642268454783),(37.48447088325406,116.93415454537633),(33.22321778888931,132.35627249544302),(29.367688301372745,131.29095922185172),(33.62894139573811,115.86884127178551),(32.66505902385826,115.60251295338733),(32.39873070546105,116.56639532526683),(19.86825987103122,113.10412718609487),(47.566404984403626,12.86036051066236),(33.108169406216255,8.865435734695122),(31.2438711774316,15.612612337849285),(30.279988805552385,15.34628401945147),(32.14428703433696,8.59910741629757),(28.28875754682047,7.533794142706228),(29.886727457207193,1.750499911431234),(76.15308130740695,14.534259194526118),(78.81636449138486,4.895435475734402),(71.1053055163516,2.7648089285518935),(71.37163383474953,1.8009265566728558),(79.08269280978259,3.931553103855272),(79.34902112818075,2.967670731976213),(83.20455061569726,4.032984005567471),(75.74735770055861,31.021690418183844),(78.63900481619608,31.820675373377227),(77.57369154260483,35.67620486089393),(69.86263256757152,33.54557831371141),(69.32997593077593,35.47334305746974),(70.29385830265511,35.73967137586751),(70.02752998425731,36.70355374774671),(73.88305947177395,37.76886702133788),(73.61673115337614,38.732749393217155),(71.68896640961782,38.200092756421505),(71.42263809122005,39.16397512830065),(78.16981469437415,41.028273357085205),(77.90348637597637,41.99215572896446),(71.15630977282223,40.1278575001798),(70.88998145442444,41.091739872058994),(69.92609908254529,40.82541155366117),(70.72508403773865,37.93376443802369),(69.7612016658595,37.66743611962587),(69.49487334746173,38.63131849150502),(68.53099097558253,38.364990173107245),(67.46567770199141,42.220519660623886),(82.88779565205793,46.481772754988825),(82.35513901526232,48.40953749874709),(81.39125664338313,48.14320918034928),(81.12492832498529,49.107091552228496),(83.05269306874374,49.63974818902393),(89.44457271029106,26.506571263924435),(90.40845508217015,26.77289958232209),(91.74009667415915,21.953487722926226),(92.70397904603817,22.219816041323845),(91.37233745404927,27.03922790071985),(92.33621982592857,27.30555621911771),(91.53723487073505,30.197203334755223),(145.5146476959681,45.111589165032285),(141.78605123839876,58.60594237134066),(87.80863841316581,43.6915565410633),(85.94434018438113,50.438733144217494),(91.72763441565608,52.036703054604324),(93.32560432604294,46.25340882332941),(94.28948669792207,46.51973714172729),(92.69151678753533,52.30303137300201),(97.51092864693115,53.63467296499103),(98.30991360212457,50.74302584935354),(99.27379597400373,51.00935416775104),(98.4748110188102,53.90100128338901),(100.40257576256856,54.43365792018443),(99.07093417057962,59.25306977958029),(79.79328673299639,53.926503411624225),(79.26063009620081,55.85426815538253),(78.29674772432149,55.58793983698478),(80.42737427150402,47.87688086195143),(79.46349189962483,47.61055254355367),(62.418479522165164,109.29902434382004),(54.707420547131996,107.16839779663758),(71.75243292459157,45.47992599637123),(66.9330210651958,44.14828440438225),(66.66669274679789,45.1121667762614),(65.70281037491883,44.84583845786359),(67.56710860370339,38.098661854709455)]]
rand = (-85,161)
content = ""
starttime = datetime.datetime.now()
print "Path 31 of 109"
path = []
start = (29.678455265696826,-38.64042004355886)
goal = (-0.9114987590597821,-59.597094142558504)
print " Node 1 and 2 of 2"
path += rrtpath(obstacleList,start,goal,rand)
pathStr = str(path)[1:-1] + ";"
pathStr = pathStr.replace("[", "(")
pathStr = pathStr.replace("]", ")")
f = open('smo2sol-25-path-31.txt', 'a+')
f.write(pathStr)
f.close
| [
"[email protected]"
] | |
a46a09b36dea4eddb1483fcdee6e292962b2ab51 | f47d17b53977cf745d453b654529e8cd6be7890f | /3level_N20_ainbin1.py | 120aacee0e37205a96e1666348518b2b537c19d0 | [] | no_license | rareearthquantum/model_upconversion_peter | b4cce7556a167ba0e9813625dc924d3542d33cd1 | dcf08000ec21770659318409a686bb2b88a7a1be | refs/heads/master | 2020-04-28T19:54:34.795590 | 2019-06-14T09:43:28 | 2019-06-14T09:43:28 | 175,526,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,357 | py | from Frequency_response_3level import *
p = {}
p['deltamu'] = 0.
p['deltao'] = 0.
p['d13'] = 2e-32*math.sqrt(1/3)
p['d23'] = 2e-32*math.sqrt(2/3)
p['gamma13'] = p['d13']**2/(p['d13']**2+p['d23']**2)*1/11e-3
p['gamma23'] = p['d23']**2/(p['d13']**2+p['d23']**2)*1/11e-3
p['gamma2d'] = 1e6
p['gamma3d'] = 1e6
p['nbath'] = 20
p['gammamu'] = 1/(p['nbath']+1) * 1e3
p['go'] = 51.9 #optical coupling
p['No'] = 1.28e15 # number of atoms in the optical mode
p['deltac']=0 #detuning for
p['kappaoi']=2*pi*7.95e6 # intrinsic loss for optical resonator
p['kappaoc']=2*pi*1.7e6 # coupling loss for optical resonator
#p['df']=0.1e6 # how small descretisation step to take when integrating over the
# inhomogeneous lines
p['mean_delam']=0
p['sd_delam']=2*pi*25e6/2.355 #microwave inhomogeneous broadening
#2.355is to turn FWHM into standard deviation
p['mean_delao']=0
p['sd_delao']=2*pi*170e6/2.355 #optical inhomogeneous broadening
p['kappami'] = 650e3*2*pi # intrinsic loss for microwave cavity
p['kappamc'] = 70e3*2*pi # coupling loss for optical cavity
# this is for one of the two output ports
p['Nm'] = 2.22e16 #toal number of atoms
p['gm'] = 1.04 #coupling between atoms and microwave field
p['gammaoc']=2*pi*1.7e6
p['gammaoi']=2*pi*7.95e6
p['gammamc']=2*pi*70e3
p['gammami']=2*pi*650e3
muBohr=927.4009994e-26; # Bohr magneton in J/T in J* T^-1
p['mu12'] = 4.3803*muBohr # transition dipole moment for microwave cavity (J T^-1)
p['Lsample']=12e-3 # the length of the sample, in m
p['dsample']=5e-3 # the diameter of the sample, in m
p['fillfactor']=0.8 #microwave filling factor
p['freqmu'] = 5.186e9
p['freq_pump'] = 195113.36e9 #pump frequency
p['freqo']=p['freqmu']+p['freq_pump']
p['Lcavity_vac'] = 49.5e-3 # length of the vacuum part of the optical
# Fabry Perot (m)
p['Wcavity'] = 0.6e-3# width of optical resonator beam in sample (m)
p['nYSO'] = 1.76
p['Omega']=-492090.88755145477
delovals=np.linspace(-20e5,20e5,31)
delmvals=np.linspace(-1e6,1e6,31)
binvals=[600000]
ainvals=[600000]
aoutvals,boutvals,effic_a,effic_b=find_outputs(ainvals,binvals,delovals,delmvals,p)
np.savez('output_N20_ainbin1',aoutvals=aoutvals,boutvals=boutvals,effic_a=effic_a,effic_b=effic_b,ainvals=ainvals,binvals=binvals,delovals=delovals,delmvals=delmvals,p=p)
| [
"[email protected]"
] | |
6e94b8ef6dd9af3e5218e7cac10b5f3da2521727 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_270/ch54_2020_03_27_00_44_11_634478.py | 4a4969942833af9e67d2166564562fd7a64f393d | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | def calcula_fibonnacci(n):
k = 0
lista = []
soma = 1
while k < n:
lista.append(soma)
soma += soma
k += 1
return lista
| [
"[email protected]"
] | |
0e6b139dec6db4c8aa222b7937adfc0f12e6045a | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/heatmap/_y0.py | 8bae24f08506a20ba2d7ca6fb4ba46ef4651f570 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 441 | py | import _plotly_utils.basevalidators
class Y0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name='y0', parent_name='heatmap', **kwargs):
super(Y0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc+clearAxisTypes',
implied_edits={'ytype': 'scaled'},
role='info',
**kwargs
)
| [
"[email protected]"
] | |
44c1925930e893f90665e267105f0de38e06806c | 885a722e3e5814ae4942ac5e8cf8d0091e734b4c | /게임 개발_Python/CodingTest.py | 44a46c74629a33f66008719905f685de00396184 | [] | no_license | ledpear/algorithm | 52f3ea25842eee20b3bbd48e51825b9df4942e03 | 4922c6fe5ca0b98a90dee218b756006e7ba05d82 | refs/heads/master | 2023-06-09T17:47:45.674244 | 2023-06-03T13:47:11 | 2023-06-03T13:47:11 | 133,370,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,493 | py | n = 4
m = 4
pos_x = 1
pos_y = 1
dir = 0 # 0 : 북, 1 : 동, 2 : 남, 3 : 서
game_map = [[1,1,1,1], [1,0,0,1], [1,1,0,1], [1,1,1,1]]
bool_map = [[0,0,0,0], [0,0,0,0], [0,0,0,0], [0,0,0,0]]
bool_map[pos_y][pos_x] = 1
count = 0
score = 1
while True:
dir -= 1
if dir < 0 : dir = 3
bResult = False
if dir == 0 :
if pos_y - 1 >= 0 :
if game_map[pos_y - 1][pos_x] == 0 and bool_map[pos_y - 1][pos_x] == 0 :
pos_y -= 1;
bool_map[pos_y][pos_x] = 1
bResult = True
score += 1
elif dir == 1 :
if pos_x + 1 < m :
if game_map[pos_y][pos_x + 1] == 0 and bool_map[pos_y][pos_x + 1] == 0 :
pos_x += 1;
bool_map[pos_y][pos_x] = 1
bResult = True
score += 1
elif dir == 2 :
if pos_y + 1 < n :
if game_map[pos_y + 1][pos_x] == 0 and bool_map[pos_y + 1][pos_x] == 0 :
pos_y += 1;
bool_map[pos_y][pos_x] = 1
bResult = True
score += 1
elif dir == 3 :
if pos_x - 1 >= 0 :
if game_map[pos_y][pos_x - 1] == 0 and bool_map[pos_y][pos_x - 1] == 0 :
pos_x -= 1;
bool_map[pos_y][pos_x] = 1
bResult = True
score += 1
if bResult :
count = 0
else :
count += 1
if count == 4 :
if dir == 0 :
if pos_y + 1 < n :
if game_map[pos_y + 1][pos_x] == 0 :
pos_y += 1
count = 0
else :
break
else :
break
elif dir == 1 :
if pos_x - 1 >= 0 :
if game_map[pos_y][pos_x - 1] == 0 :
pos_x -= 1
count = 0
else :
break
else :
break
elif dir == 2 :
if pos_y - 1 >= 0 :
if game_map[pos_y - 1][pos_x] == 0 :
pos_y -= 1
count = 0
else :
break
else :
break
elif dir == 3 :
if pos_x + 1 < m :
if game_map[pos_y][pos_x + 1] == 0 :
pos_x += 1
count = 0
else :
break
else :
break
print(score) | [
"[email protected]"
] | |
d8a5803e900c1a81f57eb6e8232a6067e465a51c | 3c300c79359f1c989df4403835abbc5513364fee | /bitshares_tradehistory_analyzer/parser.py | 56c9520cd316cf14dfea532af60b1ebf20c94920 | [
"MIT"
] | permissive | ds-voting/bitshares-tradehistory-analyzer | 73ef81a1748fabef055f512b46366dc848c09a15 | 1dfd293dd6b4d692a078c403b79355fef0165799 | refs/heads/master | 2020-07-23T15:06:04.733405 | 2019-07-19T13:51:33 | 2019-07-19T13:51:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,029 | py | import copy
import logging
from decimal import Decimal
from bitshares.account import Account
from bitshares.amount import Amount
from bitshares.asset import Asset
from .consts import LINE_DICT_TEMPLATE
log = logging.getLogger(__name__)
class Parser:
""" Entries parser
:param BitShares bitshares_instance:
:param Account account:
"""
def __init__(self, bitshares_instance, account):
self.bitshares = bitshares_instance
self.account = Account(account, bitshares_instance=self.bitshares)
def parse_transfer_entry(self, entry):
""" Parse single transfer entry into a dict object suitable for writing line
:param dict entry: elastic wrapper entry
:return: dict object suitable for writing line
"""
op_id = entry['account_history']['operation_id']
op_date = entry['block_data']['block_time']
op = entry['operation_history']['op_object']
data = copy.deepcopy(LINE_DICT_TEMPLATE)
amount = Amount(op['amount_'], bitshares_instance=self.bitshares)
from_account = Account(op['from'], bitshares_instance=self.bitshares)
to_account = Account(op['to'], bitshares_instance=self.bitshares)
fee = Amount(op['fee'], bitshares_instance=self.bitshares)
log.info('Transfer: {} -> {}, {}'.format(from_account.name, to_account.name, amount))
if from_account.name == self.account.name:
data['kind'] = 'Withdrawal'
data['sell_cur'] = amount.symbol
data['sell_amount'] = amount.amount
data['fee_cur'] = fee.symbol
data['fee_amount'] = fee.amount
else:
data['kind'] = 'Deposit'
data['buy_cur'] = amount.symbol
data['buy_amount'] = amount.amount
data['comment'] = op_id
data['date'] = op_date
return data
def parse_trade_entry(self, entry):
""" Parse single trade entry (fill order) into a dict object suitable for writing line
:param dict entry: elastic wrapper entry
:return: dict object suitable for writing line
"""
op_id = entry['account_history']['operation_id']
op_date = entry['block_data']['block_time']
op = entry['operation_history']['op_object']
data = copy.deepcopy(LINE_DICT_TEMPLATE)
op = entry['operation_history']['op_object']
sell_asset = Asset(op['pays']['asset_id'], bitshares_instance=self.bitshares)
sell_amount = Decimal(op['pays']['amount']).scaleb(-sell_asset['precision'])
buy_asset = Asset(op['receives']['asset_id'], bitshares_instance=self.bitshares)
buy_amount = Decimal(op['receives']['amount']).scaleb(-buy_asset['precision'])
fee_asset = Asset(op['fee']['asset_id'], bitshares_instance=self.bitshares)
fee_amount = Decimal(op['fee']['amount']).scaleb(-fee_asset['precision'])
# Subtract fee from buy_amount
# For ccgains, any fees for the transaction should already have been substracted from *amount*, but included
# in *cost*.
if fee_asset.symbol == buy_asset.symbol:
buy_amount -= fee_amount
data['kind'] = 'Trade'
data['sell_cur'] = sell_asset.symbol
data['sell_amount'] = sell_amount
data['buy_cur'] = buy_asset.symbol
data['buy_amount'] = buy_amount
data['fee_cur'] = fee_asset.symbol
data['fee_amount'] = fee_amount
data['comment'] = op_id
data['order_id'] = op['order_id']
data['prec'] = max(sell_asset['precision'], buy_asset['precision'])
# Prevent division by zero
price = Decimal('0')
price_inverted = Decimal('0')
if sell_amount and buy_amount:
price = buy_amount / sell_amount
price_inverted = sell_amount / buy_amount
data['price'] = price
data['price_inverted'] = price_inverted
data['date'] = entry['block_data']['block_time']
return data
| [
"[email protected]"
] | |
6db846cc3de7d7f5c3535eafad242cb11e1da445 | 9dee94907e6456a4af9855d358693923c17b4e0d | /0111_Minimum_Depth_of_Binary_Tree.py | 711a407cde57b1a9863453b7f34b3ebbcf63c43b | [] | no_license | chien-wei/LeetCode | e215915a8103e56f182040dacc9fb0d6996c86ec | 0d6f414e7610fedb2ec4818ecf88d51aa69e1355 | refs/heads/master | 2021-05-13T14:48:22.891100 | 2019-08-20T05:52:59 | 2019-08-20T05:52:59 | 116,749,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def minDepth(self, root: TreeNode) -> int:
# BFS find the first leaf
if not root:
return 0
queue = [root]
depth = 1
while len(queue) > 0:
new_queue = []
for i in range(len(queue)):
q = queue[i]
if not q.left and not q.right:
return depth
if q.left:
new_queue.append(q.left)
if q.right:
new_queue.append(q.right)
queue = new_queue
depth += 1
return 0 | [
"[email protected]"
] | |
1b304b18d44960ff768c90217ce7ba455dec8c93 | 3378d73f5e7c67ddcf0179e3574357e3354c7c11 | /stripe/db/api.py | 11d21c1e2826ff99a1d951f04beb5a8753b50b8e | [
"Apache-2.0"
] | permissive | babarnazmi/stripe | e8cece6f4697d05c4262b25f40e7056bb61349e5 | f98454e7260b5140aaec35d932a78b3ada73e7a4 | refs/heads/master | 2021-01-15T12:41:17.140601 | 2013-10-30T04:25:50 | 2013-10-30T04:25:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,694 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2013 PolyBeacon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQLAlchemy storage backend."""
from sqlalchemy.orm import exc
from stripe.common import exception
from stripe.db import models
from stripe.openstack.common.db import api
from stripe.openstack.common.db.sqlalchemy import session as db_session
from stripe.openstack.common import log as logging
LOG = logging.getLogger(__name__)
get_session = db_session.get_session
def get_instance():
"""Return a DB API instance."""
backend_mapping = {'sqlalchemy': 'stripe.db.api'}
return api.DBAPI(backend_mapping=backend_mapping)
def get_backend():
"""The backend is this module itself."""
return Connection()
def model_query(model, *args, **kwargs):
"""Query helper for simpler session usage.
:param session: if present, the session to use
"""
session = kwargs.get('session') or get_session()
query = session.query(model, *args)
return query
class Connection(object):
"""SqlAlchemy connection."""
def __init__(self):
pass
def create_agent(self, values):
"""Create a new agent."""
res = self._create_model(model=models.Agent(), values=values)
return res
def create_queue(self, values):
"""Create a new queue."""
res = self._create_model(model=models.Queue(), values=values)
return res
def create_queue_member(self, agent_id, queue_id):
"""Create a new queue member."""
values = {
'agent_id': agent_id,
'queue_id': queue_id,
}
res = self._create_model(model=models.QueueMember(), values=values)
return res
def delete_agent(self, agent_id):
"""Delete an agent."""
res = self._delete_model(model=models.Agent, id=agent_id)
if res != 1:
raise exception.AgentNotFound(agent_id=agent_id)
def delete_queue(self, queue_id):
"""Delete a queue."""
res = self._delete_model(model=models.Queue, id=queue_id)
if res != 1:
raise exception.QueueNotFound(queue_id=queue_id)
def delete_queue_member(self, agent_id, queue_id):
"""Delete a queue member."""
res = self._delete_model(
model=models.QueueMember, agent_id=agent_id, queue_id=queue_id
)
if res != 1:
raise exception.QueueMemberNotFound(
agent_id=agent_id
)
def get_agent(self, agent_id):
"""Retrieve information about the given agent."""
try:
res = self._get_model(model=models.Agent, id=agent_id)
except exc.NoResultFound:
raise exception.AgentNotFound(agent_id=agent_id)
return res
def get_queue(self, queue_id):
"""Retrieve information about the given queue."""
try:
res = self._get_model(model=models.Queue, id=queue_id)
except exc.NoResultFound:
raise exception.QueueNotFound(queue_id=queue_id)
return res
def get_queue_member(self, agent_id, queue_id):
"""Retrieve information about the given queue member."""
try:
res = self._get_model(
model=models.QueueMember, agent_id=agent_id, queue_id=queue_id
)
except exc.NoResultFound:
raise exception.QueueMemberNotFound(
agent_id=agent_id
)
return res
def get_user(self, user_id):
"""Retrieve information about the given user."""
try:
res = self._get_model(model=models.User, id=user_id)
except exc.NoResultFound:
raise exception.UserNotFound(user_id=user_id)
return res
def list_agents(self):
"""Retrieve a list of agents."""
res = self._list_model(model=models.Agent)
return res
def list_queues(self):
"""Retrieve a list of queues."""
res = self._list_model(model=models.Queue)
return res
def list_queue_members(self):
"""Retrieve a list of queue members."""
res = self._list_model(model=models.QueueMember)
return res
def list_users(self):
"""Retrieve a list of users."""
res = self._list_model(model=models.User)
return res
def _create_model(self, model, values):
"""Create a new model."""
model.update(values)
model.save()
return model
def _delete_model(self, model, **kwargs):
session = get_session()
with session.begin():
query = model_query(
model, session=session
).filter_by(**kwargs)
count = query.delete()
return count
def _get_model(self, model, **kwargs):
"""Retrieve information about the given model."""
query = model_query(model).filter_by(**kwargs)
res = query.one()
return res
def _list_model(self, model):
"""Retrieve a list of the given model."""
query = model_query(model)
return [m for m in query.all()]
| [
"[email protected]"
] | |
c0d8734c640e57bc7339310e1f014f3f748709bb | 8b95a7225a67b6e8ad30b8ab0ef66076858a29e5 | /app/db.py | 87ae41110e3fe4c87ec667bc808b744a168090c4 | [] | no_license | tehhuu/auto_key | e413669b61b7f3f5832b66e753b86c68d16daa1a | 95866259de5781cdde1f010d286c7e42ba99d5ff | refs/heads/master | 2021-04-16T02:05:03.564332 | 2020-06-12T02:42:14 | 2020-06-12T02:42:14 | 252,633,541 | 0 | 0 | null | 2020-04-03T04:38:09 | 2020-04-03T04:38:09 | null | UTF-8 | Python | false | false | 1,446 | py | from sqlalchemy import create_engine, Column, String, Integer, DATETIME
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
import hashlib
engine = create_engine('sqlite:///shimerundesu.db', connect_args={"check_same_thread": False}) #違うスレッドからもアクセスできるようにする
Base = declarative_base()
#与えられた文字列をハッシュ化
def hash(password): return str(hashlib.sha256(password.strip().encode("utf-8")).digest())
#データの構造
class User(Base):
__tablename__ = 'users'
name = Column(String, primary_key=True, unique=True)
password = Column(String)
email = Column(String)
def __repr__(self):
return "User<{}, {}, {}>".format(self.name, self.password)
Base.metadata.create_all(engine)
SessionMaker = sessionmaker(bind=engine)
session = SessionMaker()
if __name__ == "__main__":
#データベース作成処理。このファイルを直接実行すればデータベースを作成可能
user1 = User(name="AAA", password=hash("AAA"), email="[email protected]")
user2 = User(name="BBB", password=hash("BBB"), email="[email protected]")
user3 = User(name="CCC", password=hash("CCC"), email="[email protected]")
user4 = User(name="DDD", password=hash("DDD"), email="[email protected]")
session.add(user1)
session.add(user2)
session.add(user3)
session.add(user4)
session.commit() | [
"[email protected]"
] | |
a0abbc1ed0bab74222442b06db0a1214f2cf0b8a | a44d853d6a7354129d7fdfcf0f43e4f9a9106015 | /tests/mesh_utils_test.py | 8e2b29f7f37cc3baabd584c9ba35ddee05fc4abe | [
"Apache-2.0"
] | permissive | matthewfeickert/jax | 4f6b9ba2a96e1521f776886a08be38dd229f1402 | b0d96bd42440231cc7e98c61f52106f46578fca4 | refs/heads/main | 2021-12-10T06:03:36.919415 | 2021-12-09T06:04:13 | 2021-12-09T06:04:46 | 436,520,694 | 0 | 0 | Apache-2.0 | 2021-12-09T07:23:30 | 2021-12-09T07:23:29 | null | UTF-8 | Python | false | false | 6,407 | py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mesh utils."""
import dataclasses
from typing import Sequence
from absl.testing import absltest
from absl.testing import parameterized
from jax import test_util
from jax.experimental import mesh_utils
@dataclasses.dataclass
class MockTpuDevice:
"""Mock TPU device for testing."""
platform: str
device_kind: str
process_index: int
coords: Sequence[int]
core_on_chip: int
def mock_devices(x, y, z, dev_kind, two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 8x8, 4x4x4."""
devices = []
process_index = 0
device_id = 0
for k in range(z):
for j in range(0, y, 2):
for i in range(0, x, 2):
# Local 2x2 subgrid of chips, with 2 cores per chip.
host_devices = [
MockTpuDevice('tpu', dev_kind, process_index, (i, j, k), 0),
MockTpuDevice('tpu', dev_kind, process_index, (i, j, k), 1),
MockTpuDevice('tpu', dev_kind, process_index, (i + 1, j, k), 0),
MockTpuDevice('tpu', dev_kind, process_index, (i + 1, j, k), 1),
MockTpuDevice('tpu', dev_kind, process_index, (i, j + 1, k), 0),
MockTpuDevice('tpu', dev_kind, process_index, (i, j + 1, k), 1),
MockTpuDevice('tpu', dev_kind, process_index, (i + 1, j + 1, k), 0),
MockTpuDevice('tpu', dev_kind, process_index, (i + 1, j + 1, k), 1),
]
if two_cores_per_chip:
# Only include core_on_chip = 0.
host_devices = host_devices[::2]
devices.extend(host_devices)
device_id += len(host_devices)
process_index += 1
return devices
def mock_8x8_devices():
"""Hard-coded reproduction of jax.devices() output on 8x8."""
return mock_devices(8, 8, 1, 'TPU v3', False)
def mock_2x2x1_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 2x2x1."""
return mock_devices(2, 2, 1, 'TPU v4', two_cores_per_chip)
def mock_2x2x4_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 2x2x4."""
return mock_devices(2, 2, 4, 'TPU v4', two_cores_per_chip)
def mock_4x4x4_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 4x4x4."""
return mock_devices(4, 4, 4, 'TPU v4', two_cores_per_chip)
def mock_4x4x8_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 4x4x4."""
return mock_devices(4, 4, 8, 'TPU v4', two_cores_per_chip)
def mock_8x8x8_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 8x8x8."""
return mock_devices(8, 8, 8, 'TPU v4', two_cores_per_chip)
def mock_4x8x8_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 4x8x8."""
return mock_devices(4, 8, 8, 'TPU v4', two_cores_per_chip)
def mock_4x8x16_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 4x8x16."""
return mock_devices(4, 8, 16, 'TPU v4', two_cores_per_chip)
def mock_8x8x16_devices(two_cores_per_chip):
"""Hard-coded reproduction of jax.devices() output on 8x8x16."""
return mock_devices(8, 8, 16, 'TPU v4', two_cores_per_chip)
class PartitioningTest(test_util.JaxTestCase):
@parameterized.named_parameters(
('2x2x1_t', mock_2x2x1_devices, True, (2, 2, 1, 1)),
('2x2x1_f', mock_2x2x1_devices, False, (2, 2, 1, 2)),
('8x8x16_t', mock_8x8x16_devices, True, (8, 8, 16, 1)),
('8x8x16_f', mock_8x8x16_devices, False, (8, 8, 16, 2)),
)
def test_bounds_from_last_device(self, devices, two_cores_per_chip,
expected_bounds):
self.assertEqual(
mesh_utils._bounds_from_last_device(devices(two_cores_per_chip)[-1]),
expected_bounds)
@parameterized.named_parameters(
('4x4x4', mock_4x4x4_devices, (4, 4, 4)),
('4x4x8', mock_4x4x8_devices, (4, 4, 8)),
('8x8x8', mock_8x8x8_devices, (8, 8, 8)),
('8x8x16', mock_8x8x16_devices, (8, 8, 16)),
)
def test_jax_devices_order_normalized(self, devices, expected_shape):
jax_local_devices_from_process_0 = mock_2x2x1_devices(True)
jax_devices = devices(True)
normalized = mesh_utils._jax_devices_order_normalized(
jax_local_devices_from_process_0, jax_devices)
self.assertEqual(normalized.shape, expected_shape)
x, y, z = expected_shape
# major_to_minor: x, y, z
for i in range(x):
for j in range(y):
for k in range(z):
self.assertEqual(normalized[i, j, k].coords, (i, j, k))
@parameterized.named_parameters(
('2x2x1', mock_2x2x1_devices, [1, 1, 4], ((), (2,), (0, 1))),
('2x2x4', mock_2x2x4_devices, [1, 4, 4], ((), (2,), (0, 1))),
('4x4x4', mock_4x4x4_devices, [1, 16, 4], ((), (1, 2), (0,))),
('4x4x8a', mock_4x4x8_devices, [1, 16, 8], ((), (0, 1), (2,))),
('4x4x8b', mock_4x4x8_devices, [1, 8, 16], ((), (2,), (0, 1))),
('4x4x8c', mock_4x4x8_devices, [16, 8, 1], ((0, 1), (2,), ())),
('4x8x8', mock_4x8x8_devices, [1, 32, 8], ((), (0, 2), (1,))),
('8x8x8', mock_8x8x8_devices, [1, 64, 8], ((), (1, 2), (0,))),
('8x8x16', mock_8x8x16_devices, [1, 64, 16], ((), (0, 1), (2,))),
)
def test_create_device_mesh_for_tpu_v4(self, devices, mesh_shape,
expected_assignment):
jax_local_devices_from_process_0 = mock_2x2x1_devices(True)
jax_devices = devices(True)
physical_mesh = mesh_utils._jax_devices_order_normalized(
jax_local_devices_from_process_0, jax_devices)
_, assignment = mesh_utils._create_device_mesh_for_tpu_v4(
physical_mesh, mesh_shape)
self.assertEqual(assignment, expected_assignment)
if __name__ == '__main__':
absltest.main()
| [
"[email protected]"
] | |
ad6bf91b33b968d54e7db7520ad4160735b51f89 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv2/lib/python3.8/site-packages/ansible/modules/cloud/rackspace/rax_mon_notification.py | 6aee351b964b059b494cccdaa5d0ebe4607d31ee | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 5,165 | py | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_mon_notification
short_description: Create or delete a Rackspace Cloud Monitoring notification.
description:
- Create or delete a Rackspace Cloud Monitoring notification that specifies a
channel that can be used to communicate alarms, such as email, webhooks, or
PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check ->
*rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm
version_added: "2.0"
options:
state:
description:
- Ensure that the notification with this C(label) exists or does not exist.
choices: ['present', 'absent']
label:
description:
- Defines a friendly name for this notification. String between 1 and 255
characters long.
required: true
notification_type:
description:
- A supported notification type.
choices: ["webhook", "email", "pagerduty"]
required: true
details:
description:
- Dictionary of key-value pairs used to initialize the notification.
Required keys and meanings vary with notification type. See
http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/
service-notification-types-crud.html for details.
required: true
author: Ash Wilson
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Monitoring notification example
gather_facts: False
hosts: local
connection: local
tasks:
- name: Email me when something goes wrong.
rax_mon_entity:
credentials: ~/.rax_pub
label: omg
type: email
details:
address: [email protected]
register: the_notification
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
def notification(module, state, label, notification_type, details):
if len(label) < 1 or len(label) > 255:
module.fail_json(msg='label must be between 1 and 255 characters long')
changed = False
notification = None
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
existing = []
for n in cm.list_notifications():
if n.label == label:
existing.append(n)
if existing:
notification = existing[0]
if state == 'present':
should_update = False
should_delete = False
should_create = False
if len(existing) > 1:
module.fail_json(msg='%s existing notifications are labelled %s.' %
(len(existing), label))
if notification:
should_delete = (notification_type != notification.type)
should_update = (details != notification.details)
if should_update and not should_delete:
notification.update(details=notification.details)
changed = True
if should_delete:
notification.delete()
else:
should_create = True
if should_create:
notification = cm.create_notification(notification_type,
label=label, details=details)
changed = True
else:
for n in existing:
n.delete()
changed = True
if notification:
notification_dict = {
"id": notification.id,
"type": notification.type,
"label": notification.label,
"details": notification.details
}
module.exit_json(changed=changed, notification=notification_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
label=dict(required=True),
notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
details=dict(required=True, type='dict')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
notification_type = module.params.get('notification_type')
details = module.params.get('details')
setup_rax_module(module, pyrax)
notification(module, state, label, notification_type, details)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
3623809ed7baff2ef3553ae5ea56de4d7103565c | 930309163b930559929323647b8d82238724f392 | /abc104_b.py | 71226076067ba3980f151a868e680909d3029fb5 | [] | no_license | GINK03/atcoder-solvers | 874251dffc9f23b187faa77c439b445e53f8dfe1 | b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7 | refs/heads/master | 2021-11-07T14:16:52.138894 | 2021-09-12T13:32:29 | 2021-09-12T13:32:29 | 11,724,396 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | from collections import Counter
s = input()
head = s[0]
mid = dict(Counter(s[2:-1]))
removed = s.replace('A', '').replace('C', '')
if head == 'A' and mid.get('C') is not None and mid['C'] == 1 and removed == removed.lower():
print('AC')
else:
print('WA')
| [
"[email protected]"
] | |
772a6b05963c1796c9a2f54b96ab884eee44995f | 067020d4bd39b6a2df300492c09b6cc65915ab71 | /engineerx/posts/modules/initialize.py | a4d80863fe566411bd4139a90152dae2e145ce37 | [] | no_license | HsnVahedi/engineerx-backend | 2e6d43079d94311f60089d052c278e2cbbfec76b | 018257fc53e2588aec2dd159922275d544147e18 | refs/heads/main | 2023-04-30T22:21:25.873313 | 2021-05-15T22:00:37 | 2021-05-15T22:00:37 | 336,623,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | from posts.models import PostsPage
from home.models import HomePage
def create_posts_page(owner):
if PostsPage.objects.exists():
return
posts_page = PostsPage(title='Posts', owner=owner)
home_page = HomePage.objects.first()
home_page.add_child(instance=posts_page)
posts_page = PostsPage.objects.get(slug=posts_page.slug)
posts_page.save()
posts_page.save_revision().publish()
| [
"[email protected]"
] | |
d3366b8875c54405497810ad860a6ad92779b450 | 2265c393b8396292b79fdbcdd08727be24c2337a | /tbonlineproject/relatedcontent/models.py | 2795965bfa7595e7eab4cec3e5338a95be54a301 | [
"MIT"
] | permissive | nathangeffen/tbonline-2 | 4275b2f970170f01f62e01ade008ab5cd1aee0d5 | 0d5869197e66a0057fa07cb99f21dde7f5b47c30 | refs/heads/master | 2023-01-07T08:43:35.261568 | 2019-03-31T15:54:16 | 2019-03-31T15:54:16 | 30,840,752 | 0 | 0 | MIT | 2022-12-26T20:18:09 | 2015-02-15T20:24:49 | Python | UTF-8 | Python | false | false | 3,347 | py | from django.db import models
# Create your models here.
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from enhancedtext.fields import EnhancedTextField
TYPES_OF_RELATED_CONTENT = (
('00', _('Related articles')),
('05', _('Further Reading')),
('10', _('See also')),
('15', _('Source')),
('20', _('Reference'))
)
class Webpage(models.Model):
"""Represents manually maintained links to external web pages for display,
say, on the front page of a website.
"""
title = models.CharField(max_length=200)
url = models.CharField(max_length=200,
verbose_name=_('URL'))
byline = models.CharField(blank=True, max_length=200,
help_text=_('The institution or organisation '
'that produces this website. There is no '
'problem with leaving this blank.'))
date = models.DateField(blank=True, null=True,
help_text=_('Sometimes it is useful to include the '
'date a blog was written. But mostly this '
'field will be left blank.'))
html_A_tag_options = models.CharField(max_length=200, blank=True,
help_text=_('You can put link, title and other '
'HTML A tag attributes here. '
'Leave blank if you are unsure.'))
description = EnhancedTextField(blank=True, default="\W")
date_last_edited = models.DateTimeField(auto_now=True, editable=False)
def __unicode__(self):
return self.title
class Meta:
ordering = ['date_last_edited',]
verbose_name = _('webpage')
verbose_name_plural = _('webpages')
class RelatedContent(models.Model):
'''Model for representing additional reading links that can be attached
to articles.
'''
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
webpage = models.ForeignKey(Webpage,
verbose_name=_('link'))
category = models.CharField(max_length=2,
choices=TYPES_OF_RELATED_CONTENT,
default='05')
position = models.PositiveIntegerField(default=0, blank=True, null=True)
@staticmethod
def get_related_content(model_instance=None, content_type=None, object_id=None):
'''Returns all instances on this model which point to either the given model_instance
or the model instance specified by content_type and object_id.
Either pass model_instance or content_type and object_id. Don't pass both.
'''
if model_instance:
content_type = ContentType.objects.get_for_model(model_instance)
object_id = model_instance.pk
return RelatedContent.objects.filter(content_type=content_type, object_id=object_id)
def __unicode__(self):
return unicode(self.content_type) + u' - ' + unicode(self.webpage)
class Meta:
verbose_name = _("related content")
verbose_name_plural = _("related content")
ordering = ('content_type', 'object_id', 'category', 'position',)
| [
"[email protected]"
] | |
404d772e9f913c90fd54e1ed82b4691f76b47fc4 | 66213c48da0b752dc6c350789935fe2b2b9ef5ca | /abc/115/d_.py | cb8036d381327e5ffd4f0470a37d3047600699e7 | [] | no_license | taketakeyyy/atcoder | 28c58ae52606ba85852687f9e726581ab2539b91 | a57067be27b27db3fee008cbcfe639f5309103cc | refs/heads/master | 2023-09-04T16:53:55.172945 | 2023-09-04T07:25:59 | 2023-09-04T07:25:59 | 123,848,306 | 0 | 0 | null | 2019-04-21T07:39:45 | 2018-03-05T01:37:20 | Python | UTF-8 | Python | false | false | 1,047 | py | # -*- coding:utf-8 -*-
import sys
def solve():
N, X = list(map(int, sys.stdin.readline().split()))
As = [1] # レベルiバーガーの厚さ(層の総数)(必ず奇数)
Ps = [1] # レベルiバーガーのパティの総数
for i in range(N):
As.append(As[i]*2 + 3) # レベルが1上がると、総数は2倍+3になる
Ps.append(Ps[i]*2 + 1) # レベルが1上がると、パティの数は2倍+1になる
# dp[i][x] := レベルiバーガーの下からx層に含まれているパティの総数
dp = [[0]*(X+1) for _ in range(2)]
dp[0][0] = 0
for i in range(1, X+1):
dp[0][i] = 1
# 漸化式を解く
for i in range(1, 2):
median = (As[i]+1)//2
for x in range(X+1):
if x < median:
dp[i&1][x] = dp[i-1][x-1]
elif x == median:
dp[i][x] = Ps[i-1] + 1
else:
dp[i][x] = Ps[i-1] + 1 + dp[i-1][x-median]
print(dp[N][X])
if __name__ == "__main__":
solve() | [
"[email protected]"
] | |
10e3e838a5a6c0e937e453765f9a61bb9f30cbaa | 572107468a93152774849373d09cb1ecb3b60998 | /members/migrations/0001_initial.py | 237afb6a3316ceec9e1999400ad26e9db96f2a10 | [] | no_license | parkhongbeen/instagram-restudy | 794d9962d37aec56961cee4c145de6dc291d3d40 | 5587d23163e6242e5deb5dbe1051caae179c6eb4 | refs/heads/master | 2020-12-09T18:12:10.783801 | 2020-01-12T11:16:01 | 2020-01-12T11:16:01 | 233,380,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,861 | py | # Generated by Django 2.2.9 on 2020-01-10 05:13
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"[email protected]"
] | |
0ea487eefddf2b691bbd4615be6c28583189c22e | 02c394db353d996038c9bedbeaf91bb080c12ca2 | /dsm/epaxos/replica/config.py | fbfd36f11f0765a18dcf07d8a0b82a49e91101b1 | [
"MIT"
] | permissive | Limber0117/python-epaxos | 0633752cffaca65c0d8b9c3aecf9c8bc6ca70f3e | e68bab50e7df32770103196c91d8708863691579 | refs/heads/master | 2021-08-23T22:31:47.283682 | 2017-12-06T22:16:21 | 2017-12-06T22:16:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | from collections import defaultdict
from typing import Any, List
class ReplicaState:
def __init__(
self,
# channel: Any,
epoch: int,
replica_id: int,
quorum_fast: List[int],
quorum_full: List[int],
live: bool = True,
timeout: int = 3,
jiffies: int = 33,
timeout_range: int = 3,
checkpoint_each: int = 10,
):
# self.channel = channel
self.epoch = epoch
self.replica_id = replica_id
self.quorum_fast = quorum_fast
self.quorum_full = quorum_full
self.live = live
self.timeout = timeout
self.ticks = 0
self.jiffies = jiffies
self.seconds_per_tick = 1. / self.jiffies
self.packet_counts = defaultdict(int)
self.timeout_range = timeout_range
self.total_sleep = 0
self.total_exec = 0
self.total_timeouts = 0
self.total_recv = 0
self.checkpoint_each = checkpoint_each
def tick(self):
self.ticks += 1 | [
"[email protected]"
] | |
ee99160a507f18d502ef1b8e1695b0e8369b54d8 | 8049ba531ea34f07b065a11dd1c9a5d68a00580f | /app/models.py | bac68d9dcf6103505c200b55cdaa3262065c452d | [] | no_license | aoisoratoumi/django-booking | 94b29020c2390bd51d0d1a8451e3be08a9062793 | a178c5f2d05bffe629fc828e7dc307f517718f37 | refs/heads/master | 2022-09-13T22:43:26.308133 | 2020-05-29T23:57:22 | 2020-05-29T23:57:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,674 | py | from django.db import models
from django.utils import timezone
from accounts.models import CustomUser
class Store(models.Model):
name = models.CharField('店舗', max_length=100)
address = models.CharField('住所', max_length=100, null=True, blank=True)
tel = models.CharField('電話番号', max_length=100, null=True, blank=True)
description = models.TextField('説明', default="", blank=True)
image = models.ImageField(upload_to='images', verbose_name='イメージ画像', null=True, blank=True)
def __str__(self):
return self.name
class Staff(models.Model):
user = models.OneToOneField(CustomUser, verbose_name='スタッフ', on_delete=models.CASCADE)
store = models.ForeignKey(Store, verbose_name='店舗', on_delete=models.CASCADE)
def __str__(self):
return f'{self.store}:{self.user}'
class Booking(models.Model):
staff = models.ForeignKey(Staff, verbose_name='スタッフ', on_delete=models.CASCADE)
first_name = models.CharField('姓', max_length=100, null=True, blank=True)
last_name = models.CharField('名', max_length=100, null=True, blank=True)
tel = models.CharField('電話番号', max_length=100, null=True, blank=True)
remarks = models.TextField('備考', default="", blank=True)
start = models.DateTimeField('開始時間', default=timezone.now)
end = models.DateTimeField('終了時間', default=timezone.now)
def __str__(self):
start = timezone.localtime(self.start).strftime('%Y/%m/%d %H:%M')
end = timezone.localtime(self.end).strftime('%Y/%m/%d %H:%M')
return f'{self.first_name}{self.last_name} {start} ~ {end} {self.staff}'
| [
"[email protected]"
] | |
67625ed8122fc11c906ad83907a8303cc83d77b9 | fb28906c1f0347ffe50193f6c2bad2d4b490fa9c | /budger/directory/migrations/0018_ksoemployee_is_developer.py | 035b40d574b286ea6259fb47f17f3cee1ebb2261 | [] | no_license | pavkozlov/budger-server | 20c695309c34a0451d25b83ab8583b14f0d21c0c | 7a98c1789414c83625bda1e5b29cbe5587c3cd6a | refs/heads/master | 2020-12-17T06:35:10.550905 | 2020-01-13T13:27:42 | 2020-01-13T13:27:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | # Generated by Django 2.2.6 on 2019-12-04 13:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('directory', '0017_auto_20191203_1640'),
]
operations = [
migrations.AddField(
model_name='ksoemployee',
name='is_developer',
field=models.BooleanField(db_index=True, default=False),
),
]
| [
"[email protected]"
] | |
732ccf2811be54afbd199a94e72658e129c6f81b | 8e09c9562173cb40fe26912fcdb1d4c6c08897d7 | /tfx/components/evaluator/component_test.py | 52d28474308225045d848dd0c656642a98ec0934 | [
"Apache-2.0"
] | permissive | robertlugg/tfx | 6a0050f6f1876ba5d53e45fd0d80acac2441187d | 49778c502bb6668ed8230877407fe40ae3a99a06 | refs/heads/master | 2020-07-27T17:00:47.355938 | 2019-09-16T23:00:02 | 2019-09-16T23:00:32 | 209,164,014 | 0 | 0 | Apache-2.0 | 2019-09-17T21:58:47 | 2019-09-17T21:58:46 | null | UTF-8 | Python | false | false | 1,928 | py | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.evaluator.component."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tfx.components.evaluator import component
from tfx.proto import evaluator_pb2
from tfx.types import channel_utils
from tfx.types import standard_artifacts
class ComponentTest(tf.test.TestCase):
def testConstruct(self):
examples = standard_artifacts.Examples()
model_exports = standard_artifacts.Model()
evaluator = component.Evaluator(
examples=channel_utils.as_channel([examples]),
model_exports=channel_utils.as_channel([model_exports]))
self.assertEqual('ModelEvalPath', evaluator.outputs.output.type_name)
def testConstructWithSliceSpec(self):
examples = standard_artifacts.Examples()
model_exports = standard_artifacts.Model()
evaluator = component.Evaluator(
examples=channel_utils.as_channel([examples]),
model_exports=channel_utils.as_channel([model_exports]),
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
evaluator_pb2.SingleSlicingSpec(
column_for_slicing=['trip_start_hour'])
]))
self.assertEqual('ModelEvalPath', evaluator.outputs.output.type_name)
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
f55510e0cc367aad9ebfda9b2a6faa0435ae1473 | 2119953dd04916fa2adf3f42a487f3f9754d1f66 | /modules/sandbox/docker/geo-web-viz/app.py | 9034f8727870c8bdee5a64203363aecd3f7ec266 | [
"MIT"
] | permissive | sarahwertz/sepal | 91d12e3317cd07ad4c99469d5b6211d74013b330 | efbbc33ac99db332fc13f9dfd4c777a8d2c1b41e | refs/heads/master | 2020-06-11T07:42:08.835556 | 2019-05-27T14:21:28 | 2019-05-27T14:21:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,038 | py | import json
import logging
import traceback
import sys
from flask import Flask, Blueprint, request, Response
import config
import layers
import raster
import render
from config import to_file
app = Flask(__name__)
http = Blueprint(__name__, __name__)
session_state = {'layer_by_id': {}, 'index_by_id': {}, 'renderers': {}}
@http.errorhandler(Exception)
def handle_invalid_usage(error):
print(error)
print_stacktrace()
return "Internal Error", 500
@http.route('/layers', methods=['GET'])
def list_layers():
return json_response(layers.list_layers(state()))
@http.route('/layers/order', methods=['POST'])
def order_layers():
layers.reorder(json.loads(request.values['order']), state())
return json_response({'status': 'OK'})
@http.route('/raster/info', methods=['GET'])
def raster_info():
raster_file = to_file(request.values['path'])
return json_response(
{
'bandCount': raster.band_count(raster_file),
'nodata': raster.read_nodata(raster_file)
}
)
@http.route('/raster/band/<band_index>', methods=['GET'])
def band_info(band_index):
nodata = request.values.get('nodata', None)
if nodata:
nodata = float(nodata)
return json_response(
raster.band_info(
raster_file=to_file(request.values['path']),
band_index=int(band_index),
nodata=nodata)
)
@http.route('/raster/save', methods=['POST'])
def save_raster():
layer = json.loads(request.values['layer'])
bounds = layers.save_raster(layer, state())
return json_response({'bounds': bounds})
@http.route('/shape/save', methods=['POST'])
def save_shape():
layer = json.loads(request.values['layer'])
bounds = layers.save_shape(layer, state())
return json_response({'bounds': bounds})
@http.route('/layers/<layer_id>', methods=['DELETE'])
def remove_raster(layer_id):
layers.remove_layer(layer_id, state())
return json_response({'status': 'OK'})
@http.route('/layers/features/<lat>/<lng>')
def attributes(lat, lng):
return json_response(layers.features(float(lat), float(lng), state()))
@http.route('/layer/<layer_id>/<z>/<x>/<y>.<fmt>')
def render_tile(layer_id, z, x, y, fmt):
return Response(
render.render_tile(layer_id, int(z), int(x), int(y), str(fmt), renderers()),
mimetype=('image/%s' % fmt)
)
def state():
return session_state
def renderers():
return state().get('renderers', {})
def json_response(data):
return Response(json.dumps(data), mimetype='application/json')
def print_stacktrace():
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
if __name__ == '__main__':
logging.basicConfig(level=logging.WARNING)
app.config['PROPAGATE_EXCEPTIONS'] = True
app.register_blueprint(http)
app.secret_key = config.session_key
app.run(
host='0.0.0.0',
port=config.server_port,
threaded=True,
debug=config.debug_mode
)
| [
"[email protected]"
] | |
efc55a073b926991fd43116f9fdd132aabaee02c | 55a4573cdeb116b20a625a398af04337f180d598 | /instrument/ifmessage.py | a4c5e4261d01cb8b46c95bd26d5bfe772ae5403e | [
"Unlicense"
] | permissive | NOAA-PMEL/omega-trh-daq | f506e4c968b7942dccb6cf012c377c3719a04143 | 98a18c62130af36d43c2882659e65321c3a98529 | refs/heads/master | 2020-04-02T11:10:18.632072 | 2019-07-23T15:33:39 | 2019-07-23T15:33:39 | 154,374,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,079 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 9 09:33:42 2018
@author: derek
"""
class InterfaceMessage():
imFirstIndex = 0
imLastIndex = 1
imAllIndex = 2
# interface STATES
imWaitingToConnect = 'WaitingToConnect'
imConnected = 'Connected'
imDisconnected = 'Disconnected'
imStopped = 'Stopped'
def __init__(self):
self.input_ready = False
self.input = []
self.output_ready = False
self.output = []
self.connection_status = False
self.state = self.imStopped
def add_input(self, msg):
self.input.append(msg)
self.input_ready = True
def has_input(self):
if (len(self.input) > 0):
return True
return False
def get_input(self, index=None, clear_buffer=False):
msg = []
if (index is None or index == InterfaceMessage.imFirstIndex):
msg.append(self.input.pop(0))
elif (index == InterfaceMessage.imLastIndex):
msg.append(self.input.pop())
elif (index == InterfaceMessage.imAllIndex):
clear_buffer = True
msg = self.input
else:
# throw exception?
pass
if (clear_buffer):
self.input = []
return msg
def add_output(self, msg):
self.output.append(msg)
self.output_ready = True
def has_output(self):
if (len(self.output) > 0):
return True
return False
def get_output(self, index=None, clear_buffer=True):
msg = []
if (index is None or index == InterfaceMessage.imFirstIndex):
msg.append(self.output.pop(0))
elif (index == InterfaceMessage.imLastIndex):
msg.append(self.output.pop())
elif (index == InterfaceMessage.imAllIndex):
clear_buffer = True
msg = self.output
else:
# throw exception?
pass
if (clear_buffer):
self.output = []
# print(self.output)
return msg
| [
"[email protected]"
] | |
14867c67fd1d822563fe8ecb1841dce728a316df | 1c801375ead766790f5c097081a1bbbc6a593a9e | /baseSpider/算法/随机获取1000此列表元素并统计次数.py | f9d47c387d183b937269c6fbd47b14e83dfe9a35 | [] | no_license | llf-1996/python3Spider | 5803d1f42b660c7c2643bbc31f17126ac06e7ceb | 4621db8c7383940f8e60754d6640406101141095 | refs/heads/master | 2023-06-01T04:31:27.555140 | 2020-12-13T09:38:19 | 2020-12-13T09:38:19 | 156,145,515 | 2 | 3 | null | 2023-05-23T00:12:59 | 2018-11-05T01:48:46 | Python | UTF-8 | Python | false | false | 527 | py | '''
随机获取一个字符串列表中的字符串,求获取一千次的情况下,各字符串被随机到的次数。
'''
__author__ = 'llf'
import random
from collections import Counter
c = Counter()
ll = ['a', 'b']
for i in range(1000):
a = random.choice(ll)
c[a] = c[a] + 1
print('结果:', type(c), dir(c), c)
'''
<class 'collections.Counter'>
[
'clear', 'copy', 'elements', 'fromkeys', 'get', 'items',
'keys', 'most_common', 'pop', 'popitem', 'setdefault',
'subtract', 'update', 'values'
]
'''
| [
"[email protected]"
] | |
3d9db26167c279a19af9f8aece67edc185736ec1 | 495531870c08ea3495bb45393b05f907366f052e | /x7-src/dashboard/steer/steer/dashboards/engine/images_and_snapshots/images/urls.py | c5aa8ebb24e073dc5629b46e39aea61201b738c5 | [
"Apache-2.0"
] | permissive | wendy-king/x7_venv | 5fcb326cf3ecaa26d3b839af743b027d23af29e0 | d8266c1dc474935c54126ce36d1a6410a7e452f5 | refs/heads/master | 2021-01-01T06:33:24.605851 | 2012-01-19T15:54:44 | 2012-01-19T15:54:44 | 3,209,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import patterns, url
VIEWS_MOD = 'steer.dashboards.engine.images_and_snapshots.images.views'
urlpatterns = patterns(VIEWS_MOD,
url(r'^$', 'index', name='index'),
url(r'^(?P<image_id>[^/]+)/launch/$', 'launch', name='launch'),
url(r'^(?P<image_id>[^/]+)/update/$', 'update', name='update'))
| [
"[email protected]"
] | |
2351627cba429794c787f1b8b52c0bf5472cd577 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_9/mchjos007/question2.py | 95a03b09fd80edb8ce36c1da69e53ec942c2d03e | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,493 | py | filein = open (input("Enter the input filename:\n"), "r")
lines = filein.readlines()
filein.close()
fileOut = open(input("Enter the output filename:\n"),"w")
width = eval(input("Enter the line width:\n"))
finalFormattedString=""
linecount= 0
currentlineinprogress = ""
for currentline in lines:
wordcount=0
linecount += 1
currentlinearray = currentline.split(" ")
if(currentline != "\n"):
for word in currentlinearray:
wordcount+=1
if linecount == len(lines) and wordcount == len(currentlinearray):
if len(currentlineinprogress) + len(word) >= width:
finalFormattedString += currentlineinprogress +"\n" + word
currentlineinprogress = ""
else:
finalFormattedString += currentlineinprogress +" " + word
else:
if word[-1] == "\n":
word = word[:-1]
if len(currentlineinprogress) + len(word) >= width:
finalFormattedString += currentlineinprogress +"\n"
currentlineinprogress = ""
if currentlineinprogress != "":
currentlineinprogress+= " "
currentlineinprogress += word
else:
finalFormattedString += currentlineinprogress + "\n\n"
currentlineinprogress = ""
print(finalFormattedString, file = fileOut)
fileOut.close() | [
"[email protected]"
] | |
e25b350871e12d31f6b6bc62c04e5aba3c26130e | 5db3009eb36afe7110ed5402be3a9e570c58c540 | /my_plugins/YouCompleteMe/third_party/ycmd/third_party/jedi_deps/jedi/test/completion/docstring.py | 2b9f3481cf5fd27532a2eb46fe7d83f487fbd3c2 | [
"GPL-3.0-only",
"GPL-1.0-or-later",
"MIT"
] | permissive | imfangli/vimrc | ced2c6caece1cf19421c6ea7deb017bec4ca3a27 | d2d14e7d083d70cc8627ddccb5b99c53c3c38be3 | refs/heads/master | 2022-02-01T00:34:31.855421 | 2022-01-22T15:57:28 | 2022-01-22T15:57:28 | 211,766,038 | 2 | 0 | MIT | 2019-09-30T03:15:03 | 2019-09-30T03:15:02 | null | UTF-8 | Python | false | false | 3,723 | py | """ Test docstrings in functions and classes, which are used to infer types """
# -----------------
# sphinx style
# -----------------
def sphinxy(a, b, c, d, x):
""" asdfasdf
:param a: blablabla
:type a: str
:type b: (str, int)
:type c: random.Random
:type d: :class:`random.Random`
:param str x: blablabla
:rtype: dict
"""
#? str()
a
#? str()
b[0]
#? int()
b[1]
#? ['seed']
c.seed
#? ['seed']
d.seed
#? ['lower']
x.lower
#? dict()
sphinxy()
# wrong declarations
def sphinxy2(a, b, x, y, z):
"""
:param a: Forgot type declaration
:type a:
:param b: Just something
:type b: ``
:param x: Just something without type
:param y: A function
:type y: def l(): pass
:param z: A keyword
:type z: return
:rtype:
"""
#?
a
#?
b
#?
x
#?
y
#?
z
#?
sphinxy2()
def sphinxy_param_type_wrapped(a):
"""
:param str a:
Some description wrapped onto the next line with no space after the
colon.
"""
#? str()
a
# local classes -> github #370
class ProgramNode():
pass
def local_classes(node, node2):
"""
:type node: ProgramNode
... and the class definition after this func definition:
:type node2: ProgramNode2
"""
#? ProgramNode()
node
#? ProgramNode2()
node2
class ProgramNode2():
pass
def list_with_non_imports(lst):
"""
Should be able to work with tuples and lists and still import stuff.
:type lst: (random.Random, [collections.defaultdict, ...])
"""
#? ['seed']
lst[0].seed
import collections as col
# use some weird index
#? col.defaultdict()
lst[1][10]
def two_dots(a):
"""
:type a: json.decoder.JSONDecoder
"""
#? ['raw_decode']
a.raw_decode
# sphinx returns
def return_module_object():
"""
:rtype: :class:`random.Random`
"""
#? ['seed']
return_module_object().seed
# -----------------
# epydoc style
# -----------------
def epydoc(a, b):
""" asdfasdf
@type a: str
@param a: blablabla
@type b: (str, int)
@param b: blablah
@rtype: list
"""
#? str()
a
#? str()
b[0]
#? int()
b[1]
#? list()
epydoc()
# Returns with param type only
def rparam(a,b):
"""
@type a: str
"""
return a
#? str()
rparam()
# Composite types
def composite():
"""
@rtype: (str, int, dict)
"""
x, y, z = composite()
#? str()
x
#? int()
y
#? dict()
z
# Both docstring and calculated return type
def both():
"""
@rtype: str
"""
return 23
#? str() int()
both()
class Test(object):
def __init__(self):
self.teststr = ""
"""
# jedi issue #210
"""
def test(self):
#? ['teststr']
self.teststr
# -----------------
# statement docstrings
# -----------------
d = ''
""" bsdf """
#? str()
d.upper()
# -----------------
# class docstrings
# -----------------
class InInit():
def __init__(self, foo):
"""
:type foo: str
"""
#? str()
foo
class InClass():
"""
:type foo: str
"""
def __init__(self, foo):
#? str()
foo
class InBoth():
"""
:type foo: str
"""
def __init__(self, foo):
"""
:type foo: int
"""
#? str() int()
foo
def __init__(foo):
"""
:type foo: str
"""
#? str()
foo
# -----------------
# Renamed imports (#507)
# -----------------
import datetime
from datetime import datetime as datetime_imported
def import_issues(foo):
"""
@type foo: datetime_imported
"""
#? datetime.datetime()
foo
| [
"[email protected]"
] | |
b763c6f7ccf02fc091dbceba1f1aa1bff14ba011 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /8qD23E6XRMaWhyJ5z_9.py | 4d2e12540b5fba5e913c81d0957e9d467412bb06 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py |
def happiness_number(s):
happy = 0
sad = 0
happy += s.count(":)")
happy += s.count("(:")
sad -= s.count(":(")
sad -= s.count("):")
return happy + sad
| [
"[email protected]"
] | |
94074b5e17d457e7a9d022d4332e0c95e6d45fa4 | c471e8d4d5cf59a68ccfbe79037cb256505b5502 | /venv/lib/python3.8/site-packages/hass_nabucasa/__init__.py | fb1c329404b66c3ff972eece6c2260f24dbd7c43 | [
"Apache-2.0"
] | permissive | vsevolodpohvalenko/home-assistant | b7fc37537929cc2c9989df357a8b76eb4de849e3 | 4ae19b7d5d843c65ba700922c1814755257eb4e0 | refs/heads/master | 2023-07-16T08:05:07.126996 | 2021-08-30T11:52:35 | 2021-08-30T11:52:35 | 401,318,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,947 | py | """Component to integrate the Home Assistant cloud."""
import asyncio
from datetime import datetime, timedelta
import json
import logging
from pathlib import Path
from typing import Awaitable, Callable, Coroutine, List
import aiohttp
import async_timeout
from atomicwrites import atomic_write
from jose import jwt
from .auth import CognitoAuth
from .client import CloudClient
from .cloudhooks import Cloudhooks
from .const import CONFIG_DIR, MODE_DEV, SERVERS, STATE_CONNECTED
from .google_report_state import GoogleReportState
from .iot import CloudIoT
from .remote import RemoteUI
from .utils import UTC, gather_callbacks, parse_date, utcnow
from .voice import Voice
_LOGGER = logging.getLogger(__name__)
class Cloud:
"""Store the configuration of the cloud connection."""
def __init__(
self,
client: CloudClient,
mode: str,
cognito_client_id=None,
user_pool_id=None,
region=None,
relayer=None,
google_actions_report_state_url=None,
subscription_info_url=None,
cloudhook_create_url=None,
remote_api_url=None,
alexa_access_token_url=None,
account_link_url=None,
voice_api_url=None,
acme_directory_server=None,
thingtalk_url=None,
):
"""Create an instance of Cloud."""
self._on_start: List[Callable[[], Awaitable[None]]] = []
self._on_stop: List[Callable[[], Awaitable[None]]] = []
self.mode = mode
self.client = client
self.id_token = None
self.access_token = None
self.refresh_token = None
self.iot = CloudIoT(self)
self.google_report_state = GoogleReportState(self)
self.cloudhooks = Cloudhooks(self)
self.remote = RemoteUI(self)
self.auth = CognitoAuth(self)
self.voice = Voice(self)
# Set reference
self.client.cloud = self
if mode == MODE_DEV:
self.cognito_client_id = cognito_client_id
self.user_pool_id = user_pool_id
self.region = region
self.relayer = relayer
self.google_actions_report_state_url = google_actions_report_state_url
self.subscription_info_url = subscription_info_url
self.cloudhook_create_url = cloudhook_create_url
self.remote_api_url = remote_api_url
self.alexa_access_token_url = alexa_access_token_url
self.acme_directory_server = acme_directory_server
self.account_link_url = account_link_url
self.voice_api_url = voice_api_url
self.thingtalk_url = thingtalk_url
return
info = SERVERS[mode]
self.cognito_client_id = info["cognito_client_id"]
self.user_pool_id = info["user_pool_id"]
self.region = info["region"]
self.relayer = info["relayer"]
self.google_actions_report_state_url = info["google_actions_report_state_url"]
self.subscription_info_url = info["subscription_info_url"]
self.cloudhook_create_url = info["cloudhook_create_url"]
self.remote_api_url = info["remote_api_url"]
self.alexa_access_token_url = info["alexa_access_token_url"]
self.account_link_url = info["account_link_url"]
self.voice_api_url = info["voice_api_url"]
self.acme_directory_server = info["acme_directory_server"]
self.thingtalk_url = info["thingtalk_url"]
@property
def is_logged_in(self) -> bool:
"""Get if cloud is logged in."""
return self.id_token is not None
@property
def is_connected(self) -> bool:
"""Return True if we are connected."""
return self.iot.state == STATE_CONNECTED
@property
def websession(self) -> aiohttp.ClientSession:
"""Return websession for connections."""
return self.client.websession
@property
def subscription_expired(self) -> bool:
"""Return a boolean if the subscription has expired."""
return utcnow() > self.expiration_date + timedelta(days=7)
@property
def expiration_date(self) -> datetime:
"""Return the subscription expiration as a UTC datetime object."""
return datetime.combine(
parse_date(self.claims["custom:sub-exp"]), datetime.min.time()
).replace(tzinfo=UTC)
@property
def username(self) -> str:
"""Return the subscription username."""
return self.claims["cognito:username"]
@property
def claims(self):
"""Return the claims from the id token."""
return self._decode_claims(self.id_token)
@property
def user_info_path(self) -> Path:
"""Get path to the stored auth."""
return self.path("{}_auth.json".format(self.mode))
def register_on_start(self, on_start_cb: Callable[[], Awaitable[None]]):
"""Register an async on_start callback."""
self._on_start.append(on_start_cb)
def register_on_stop(self, on_stop_cb: Callable[[], Awaitable[None]]):
"""Register an async on_stop callback."""
self._on_stop.append(on_stop_cb)
def path(self, *parts) -> Path:
"""Get config path inside cloud dir.
Async friendly.
"""
return Path(self.client.base_path, CONFIG_DIR, *parts)
def run_task(self, coro: Coroutine) -> Coroutine:
"""Schedule a task.
Return a coroutine.
"""
return self.client.loop.create_task(coro)
def run_executor(self, callback: Callable, *args) -> asyncio.Future:
"""Run function inside executore.
Return a awaitable object.
"""
return self.client.loop.run_in_executor(None, callback, *args)
async def fetch_subscription_info(self):
"""Fetch subscription info."""
await self.auth.async_check_token()
return await self.websession.get(
self.subscription_info_url, headers={"authorization": self.id_token}
)
async def login(self, email: str, password: str) -> None:
"""Log a user in."""
async with async_timeout.timeout(30):
await self.auth.async_login(email, password)
self.run_task(self.start())
async def logout(self) -> None:
"""Close connection and remove all credentials."""
await self.stop()
self.id_token = None
self.access_token = None
self.refresh_token = None
# Cleanup auth data
if self.user_info_path.exists():
await self.run_executor(self.user_info_path.unlink)
await self.client.cleanups()
def write_user_info(self) -> None:
"""Write user info to a file."""
base_path = self.path()
if not base_path.exists():
base_path.mkdir()
with atomic_write(self.user_info_path, overwrite=True) as fp:
fp.write(
json.dumps(
{
"id_token": self.id_token,
"access_token": self.access_token,
"refresh_token": self.refresh_token,
},
indent=4,
)
)
self.user_info_path.chmod(0o600)
async def start(self):
"""Start the cloud component."""
def load_config():
"""Load config."""
# Ensure config dir exists
base_path = self.path()
if not base_path.exists():
base_path.mkdir()
if not self.user_info_path.exists():
return None
try:
return json.loads(self.user_info_path.read_text())
except (ValueError, OSError) as err:
path = self.user_info_path.relative_to(self.client.base_path)
self.client.user_message(
"load_auth_data",
"Home Assistant Cloud error",
f"Unable to load authentication from {path}. [Please login again](/config/cloud)",
)
_LOGGER.warning(
"Error loading cloud authentication info from %s: %s", path, err
)
return None
if not self.is_logged_in:
info = await self.run_executor(load_config)
if info is None:
# No previous token data
return
self.id_token = info["id_token"]
self.access_token = info["access_token"]
self.refresh_token = info["refresh_token"]
await self.client.logged_in()
await gather_callbacks(_LOGGER, "on_start", self._on_start)
async def stop(self):
"""Stop the cloud component."""
if not self.is_logged_in:
return
await gather_callbacks(_LOGGER, "on_stop", self._on_stop)
@staticmethod
def _decode_claims(token):
"""Decode the claims in a token."""
return jwt.get_unverified_claims(token)
| [
"[email protected]"
] | |
ad7b0b5fccd2951a4d8e3d28056322b5a64c1f14 | f9646f1a269b0108b174b68172424f19ea563da5 | /lande/utilities/shell.py | b81a52fb5394cf59fcfe24f8cce4cf478e85e955 | [] | no_license | zimmerst/PhD-python | 07a4ef2dd66e2bc9ac08861a04acbf934cb0ae49 | 21d24c0ae70925201b05f73c8044cc39639f8859 | refs/heads/master | 2020-12-26T04:56:27.165230 | 2014-01-27T00:55:17 | 2014-01-27T00:55:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py |
def format_command(*args, **kwargs):
r""" Create a string suitable for running a shell program command where
*args are the positional arguments for the command and
**kwargs are the keyword arguments for the script
For example:
>>> print format_command('ls','-al', '--author')
ls \
-al \
--author
>>> print format_command('gtlike', evfile='ft1.fits')
gtlike \
evfile=ft1.fits
If you need parameters with dashes, you can pass in a dictionary:
>>> print format_command('du', '-h', {'--max-depth':3})
du \
-h \
--max-depth=3
This function is not (yet) very robust, but does what I need.
"""
line_break = ' \\' # slash
tab=' '
sep = '\n'.join([line_break,tab])
args=list(args)
for i,v in enumerate(args):
if isinstance(v,dict):
kwargs.update(args.pop(i))
if args < 1: raise Exception("Command name must be passed into script")
return sep.join(map(str,args) + ['%s=%s' % (a,b) for a,b in kwargs.items()])
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"lande@37a9682d-6443-41a2-8582-b44379b6e86f"
] | lande@37a9682d-6443-41a2-8582-b44379b6e86f |
579528bb6dac8b7a786b56c7fa8aebcbc771d0bc | dd15b5ed1050bdd6de3d9a0ee0c448d2ccba09e0 | /assets/python/mm_surface.py | f39a152bc61b12bf8992ad5c81b8cbbfa09dac2c | [] | no_license | rblack42/nffs-2021-symposium | 7f5c581fb46c23dd6896a37e0ac429b22d9de823 | 496696a43958fdf6ad5870b730675ed0b097e8cc | refs/heads/master | 2023-02-24T02:16:01.579345 | 2021-01-27T21:47:15 | 2021-01-27T21:47:15 | 329,161,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,925 | py | import math
class Surface(object):
def __init__(self,
span, # with dihedral
chord, # root chord
camber, # root camber
tip_radius, # outer LE tip radius
center_span, # center section span (<= span)
tip_elevation # dihedral tip elevation
):
self.root_chord = chord
self.root_camber = camber * chord / 100
self.center_span = center_span
self.tip_radius = tip_radius
self.span = span
self.tip_elevation = tip_elevation
def radius(self, c, t):
return c**2/(8*t) + t/2
def arc_height(self, x_percent, chord, camber):
xr = x_percent * chord
rad = self.radius(chord, camber)
cx = chord/2
cy = -(rad-camber)
fact = math.sqrt(rad**2 - (xr - cx)**2)
xh = cy + fact
print(xr,xh, rad, camber, cx,cy,rad,fact)
return xh
def get_chord(self, y):
r = self.tip_radius
c = self.root_chord
yt = y - (self.span/2 - r)
print("->",y,r, yt)
if yt < 0:
return c
f = r**2 - yt**2
print("F:",f)
return c - r + math.sqrt(f)
def gen_mesh(self,nx, ny):
dx = 1.0/nx
dy = 1.0/ny
print(dx,dy)
for y in range(ny+1):
yr = y * dy * self.span/2 # 0-span
ch = self.get_chord(yr)
x0 = self.root_chord - ch;
for x in range(nx+1):
xr = x0 + x * dx * ch
print("(%3.2f,%3.2f)" % (xr,yr), end="")
print()
def run(self):
tip_span = (self.span - self.center_span)/2
self.dihedral_angle = \
math.atan2(self.tip_elevation, tip_span)
print (self.dihedral_angle * 180/math.pi)
self.gen_mesh(nx=5,ny=50)
if __name__ == "__main__":
s = Surface(18,5,6,2,10,1.75);
s.run()
| [
"[email protected]"
] | |
e54990b791469e8f9788843e62d9cbd5ba1586b7 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/Py Box/Games/Connect4.py | f10c212bbe6d558099c3a54e5c383f2009f477de | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:1559de38727aac969fef6c397825e86b6e68b16dacaafbe4b2f54499954aaaa9
size 5271
| [
"[email protected]"
] | |
fa751f128d9ce6cc8de27b5d0d8262f701ca0df7 | 1dc727f5b326dd984962efa4d982ed9fe036c8fc | /cmsplugin_hanz_card/cms_plugins.py | 9342ebd45a4d621b861df6bbe5db794242c93700 | [] | no_license | hanztura/iamhanz | 2a7380dfe5aa9f05d72fdc1d77d77c950692d30c | 1aeee4c3404ed5048a48187e8b75f0e958c042ba | refs/heads/master | 2021-08-30T22:51:53.916315 | 2017-12-19T18:13:44 | 2017-12-19T18:13:44 | 113,453,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,369 | py | from django.utils.translation import ugettext as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cmsplugin_filer_image.cms_plugins import FilerImagePlugin
from .models import Card
from .forms import CardForm
@plugin_pool.register_plugin
class CardPlugin(FilerImagePlugin):
cache = False
form = CardForm
model = Card
name = _("Card Plugin")
render_template = "plugins/card_plugin.html"
fieldsets = (
(_('Card'), {
'fields': [
'style',
'card_title',
'caption_text',
'image',
'image_url',
'alt_text',
]
}),
(_('Image resizing options'), {
'fields': (
'use_original_image',
('width', 'height',),
('crop', 'upscale',),
'thumbnail_option',
'use_autoscale',
)
}),
(None, {
'fields': ('alignment',)
}),
(_('More'), {
'classes': ('collapse',),
'fields': (
'free_link',
'page_link',
'file_link',
('original_link', 'target_blank',),
'link_attributes',
'description',
),
}),
) | [
"[email protected]"
] | |
0af95378e0e392f99cf06587dc97eef7e8859d13 | ef2ea1152afc07e1341abdc99b037f2c803a0a68 | /test_cnn.py | 6de00fafda4942ffd6bbc0f62aafb20aaa792164 | [
"Apache-2.0"
] | permissive | Diriba-Getch/CNN-Multi-Label-Text-Classificati2on | 484a82ed66e7266fb565ebe834e2c7842d1d2f91 | 0792c0f244b8190e097da42e8719c8bb03573e14 | refs/heads/master | 2023-05-14T16:22:32.973452 | 2021-05-27T14:47:21 | 2021-05-27T14:47:21 | 362,522,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,167 | py | # -*- coding:utf-8 -*-
import os
import time
import numpy as np
import tensorflow as tf
import data_helpers
# Parameters
# ==================================================
logger = data_helpers.logger_fn('tflog', 'test-{}.log'.format(time.asctime()))
MODEL = input("☛ Please input the model file you want to test, it should be like(1490175368): ")
while not (MODEL.isdigit() and len(MODEL) == 10):
MODEL = input('✘ The format of your input is illegal, it should be like(1490175368), please re-input: ')
logger.info('✔︎ The format of your input is legal, now loading to next step...')
CLASS_BIND = input("☛ Use Class Bind or Not?(Y/N) \n")
while not (CLASS_BIND.isalpha() and CLASS_BIND.upper() in ['Y', 'N']):
CLASS_BIND = input('✘ The format of your input is illegal, please re-input: ')
logger.info('✔︎ The format of your input is legal, now loading to next step...')
CLASS_BIND = CLASS_BIND.upper()
TRAININGSET_DIR = 'Train.json'
VALIDATIONSET_DIR = 'Validation.json'
TESTSET_DIR = 'Test.json'
MODEL_DIR = 'runs/' + MODEL + '/checkpoints/'
SAVE_FILE = 'predictions.txt'
# Data loading params
tf.flags.DEFINE_string("training_data_file", TRAININGSET_DIR, "Data source for the training data.")
tf.flags.DEFINE_string("validation_data_file", VALIDATIONSET_DIR, "Data source for the validation data")
tf.flags.DEFINE_string("test_data_file", TESTSET_DIR, "Data source for the test data")
tf.flags.DEFINE_string("checkpoint_dir", MODEL_DIR, "Checkpoint directory from training run")
tf.flags.DEFINE_string("use_classbind_or_not", CLASS_BIND, "Use the class bind info or not.")
# Model Hyperparameters
tf.flags.DEFINE_integer("pad_seq_len", 150, "Recommand padding Sequence length of data (depends on the data)")
tf.flags.DEFINE_integer("embedding_dim", 100, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_integer("embedding_type", 1, "The embedding type (default: 1)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_classes", 367, "Number of labels (depends on the task)")
tf.flags.DEFINE_integer("top_num", 2, "Number of top K prediction classess (default: 3)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
tf.flags.DEFINE_boolean("gpu_options_allow_growth", True, "Allow gpu options growth")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
dilim = '-' * 100
logger.info('\n'.join([dilim, *['{:>50}|{:<50}'.format(attr.upper(), value)
for attr, value in sorted(FLAGS.__flags.items())], dilim]))
def test_cnn():
"""Test CNN model."""
# Load data
logger.info("✔ Loading data...")
logger.info('Recommand padding Sequence length is: {}'.format(FLAGS.pad_seq_len))
logger.info('✔︎ Test data processing...')
test_data = data_helpers.load_data_and_labels(FLAGS.test_data_file, FLAGS.num_classes, FLAGS.embedding_dim)
logger.info('✔︎ Test data padding...')
x_test, y_test = data_helpers.pad_data(test_data, FLAGS.pad_seq_len)
y_test_bind = test_data.labels_bind
# Build vocabulary
VOCAB_SIZE = data_helpers.load_vocab_size(FLAGS.embedding_dim)
pretrained_word2vec_matrix = data_helpers.load_word2vec_matrix(VOCAB_SIZE, FLAGS.embedding_dim)
# Load cnn model
logger.info("✔ Loading model...")
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
logger.info(checkpoint_file)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
session_conf.gpu_options.allow_growth = FLAGS.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# pre-trained_word2vec
pretrained_embedding = graph.get_operation_by_name("embedding/W").outputs[0]
# Tensors we want to evaluate
logits = graph.get_operation_by_name("output/logits").outputs[0]
# Generate batches for one epoch
batches = data_helpers.batch_iter(list(zip(x_test, y_test, y_test_bind)),
FLAGS.batch_size, 1, shuffle=False)
# Collect the predictions here
all_predicitons = []
eval_loss, eval_rec, eval_acc, eval_counter = 0.0, 0.0, 0.0, 0
for batch_test in batches:
x_batch_test, y_batch_test, y_batch_test_bind = zip(*batch_test)
feed_dict = {
input_x: x_batch_test,
dropout_keep_prob: 1.0
}
batch_logits = sess.run(logits, feed_dict)
if FLAGS.use_classbind_or_not == 'Y':
predicted_labels = data_helpers.get_label_using_logits_and_classbind(
batch_logits, y_batch_test_bind, top_number=FLAGS.top_num)
if FLAGS.use_classbind_or_not == 'N':
predicted_labels = data_helpers.get_label_using_logits(batch_logits, top_number=FLAGS.top_num)
all_predicitons = np.append(all_predicitons, predicted_labels)
cur_rec, cur_acc = 0.0, 0.0
for index, predicted_label in enumerate(predicted_labels):
rec_inc, acc_inc = data_helpers.cal_rec_and_acc(predicted_label, y_batch_test[index])
cur_rec, cur_acc = cur_rec + rec_inc, cur_acc + acc_inc
cur_rec = cur_rec / len(y_batch_test)
cur_acc = cur_acc / len(y_batch_test)
eval_rec, eval_acc, eval_counter = eval_rec + cur_rec, eval_acc + cur_acc, eval_counter + 1
logger.info("✔︎ validation batch {} finished.".format(eval_counter))
eval_rec = float(eval_rec / eval_counter)
eval_acc = float(eval_acc / eval_counter)
logger.info("☛ Recall {:g}, Accuracy {:g}".format(eval_rec, eval_acc))
np.savetxt(SAVE_FILE, list(zip(all_predicitons)), fmt='%s')
logger.info("✔ Done.")
if __name__ == '__main__':
test_cnn()
| [
"[email protected]"
] | |
20bb0ef25901482c47de8542f21e7e78fb02f09f | 614cad3588af9c0e51e0bb98963075e3195e92f5 | /models/vote_net/backbone_module.py | 674167186b7eb5fdbacd2d4702c1c38abea4bcc9 | [] | no_license | dragonlong/haoi-pose | 2810dae7f9afd0a26b3d0a5962fd9ae8a5abac58 | 43388efd911feecde588b27a753de353b8e28265 | refs/heads/master | 2023-07-01T14:18:29.029484 | 2021-08-10T10:57:42 | 2021-08-10T10:57:42 | 294,602,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,862 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR, '../models/pointnet2'))
from pointnet2_modules import PointnetSAModuleVotes, PointnetFPModule
class Pointnet2Backbone(nn.Module):
r"""
Backbone network for point cloud feature learning.
Based on Pointnet++ single-scale grouping network.
Parameters
----------
input_feature_dim: int
Number of input channels in the feature descriptor for each point.
e.g. 3 for RGB.
"""
def __init__(self, input_feature_dim=0):
super().__init__()
self.sa1 = PointnetSAModuleVotes(
npoint=2048,
radius=0.2,
nsample=64,
mlp=[input_feature_dim, 64, 64, 128],
use_xyz=True,
normalize_xyz=True
)
self.sa2 = PointnetSAModuleVotes(
npoint=1024,
radius=0.4,
nsample=32,
mlp=[128, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.sa3 = PointnetSAModuleVotes(
npoint=512,
radius=0.8,
nsample=16,
mlp=[256, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.sa4 = PointnetSAModuleVotes(
npoint=256,
radius=1.2,
nsample=16,
mlp=[256, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.fp1 = PointnetFPModule(mlp=[256+256,256,256])
self.fp2 = PointnetFPModule(mlp=[256+256,256,256])
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, pointcloud: torch.cuda.FloatTensor, end_points=None):
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_feature_dim) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
Returns
----------
end_points: {XXX_xyz, XXX_features, XXX_inds}
XXX_xyz: float32 Tensor of shape (B,K,3)
XXX_features: float32 Tensor of shape (B,K,D)
XXX-inds: int64 Tensor of shape (B,K) values in [0,N-1]
"""
if not end_points: end_points = {}
batch_size = pointcloud.shape[0]
xyz, features = self._break_up_pc(pointcloud)
# --------- 4 SET ABSTRACTION LAYERS ---------
xyz, features, fps_inds = self.sa1(xyz, features)
end_points['sa1_inds'] = fps_inds
end_points['sa1_xyz'] = xyz
end_points['sa1_features'] = features
xyz, features, fps_inds = self.sa2(xyz, features) # this fps_inds is just 0,1,...,1023
end_points['sa2_inds'] = fps_inds
end_points['sa2_xyz'] = xyz
end_points['sa2_features'] = features
xyz, features, fps_inds = self.sa3(xyz, features) # this fps_inds is just 0,1,...,511
end_points['sa3_xyz'] = xyz
end_points['sa3_features'] = features
xyz, features, fps_inds = self.sa4(xyz, features) # this fps_inds is just 0,1,...,255
end_points['sa4_xyz'] = xyz
end_points['sa4_features'] = features
# --------- 2 FEATURE UPSAMPLING LAYERS --------
features = self.fp1(end_points['sa3_xyz'], end_points['sa4_xyz'], end_points['sa3_features'], end_points['sa4_features'])
features = self.fp2(end_points['sa2_xyz'], end_points['sa3_xyz'], end_points['sa2_features'], features)
end_points['fp2_features'] = features
end_points['fp2_xyz'] = end_points['sa2_xyz']
num_seed = end_points['fp2_xyz'].shape[1]
end_points['fp2_inds'] = end_points['sa1_inds'][:,0:num_seed] # indices among the entire input point clouds
return end_points
if __name__=='__main__':
backbone_net = Pointnet2Backbone(input_feature_dim=0).cuda()
print(backbone_net)
backbone_net.eval()
out = backbone_net(torch.rand(16,20000,3).cuda())
for key in sorted(out.keys()):
print(key, '\t', out[key].shape)
| [
"[email protected]"
] | |
d0c04d8d6b0caebcc5131c6d7c9185c6da08fb8a | b7ebcfa8429948745dbd9fb11f6d13c6905e9aa1 | /lib/panda/_obj.py | fd7b9191f276e967f2b4dc2a6fbb176e63be53ec | [] | no_license | SiewYan/PandaTree | c00c83a92044b59d460dd2d9a4319eef9f777045 | 5d2da2dc5d419c498a3a14870197aad360d6b071 | refs/heads/master | 2020-12-30T12:35:36.718617 | 2018-02-01T16:25:54 | 2018-02-01T16:25:54 | 91,395,990 | 0 | 1 | null | 2017-05-16T00:16:27 | 2017-05-16T00:16:27 | null | UTF-8 | Python | false | false | 2,279 | py | from base import Definition
from oneliner import Include
from constexpr import Constant, Enum
from refbranch import RefBranch
from refvbranch import RefVectorBranch
from generic import GenericBranch
from objbranch import ObjBranch
from branch import Branch
from reference import Reference
from function import Function
from obj import Object
def __init__(self, name, source):
"""
Constructor called either by PhysicsObject or Tree.
Parse the source text block and collect all information on this object.
"""
self.name = name
self.includes = []
self.constants = []
self.enums = []
self.objbranches = []
self.branches = []
self.references = []
self.functions = []
while True:
line = source.readline()
line = line.strip()
if line == '':
break
try:
self.includes.append(Include(line))
continue
except Definition.NoMatch:
pass
try:
self.enums.append(Enum(line, source))
continue
except Definition.NoMatch:
pass
try:
self.constants.append(Constant(line, source))
continue
except Definition.NoMatch:
pass
try:
self.branches.append(RefBranch(line))
continue
except Definition.NoMatch:
pass
try:
self.branches.append(RefVectorBranch(line))
continue
except Definition.NoMatch:
pass
try:
self.objbranches.append(ObjBranch(line))
continue
except Definition.NoMatch:
pass
try:
self.branches.append(Branch(line))
continue
except Definition.NoMatch:
pass
try:
self.branches.append(GenericBranch(line))
continue
except Definition.NoMatch:
pass
try:
self.references.append(Reference(line))
continue
except Definition.NoMatch:
pass
try:
self.functions.append(Function(line, source))
continue
except Definition.NoMatch:
pass
break
Object.__init__ = __init__
| [
"[email protected]"
] | |
31d9a115cbd2a43f5ea11e98d4b3a4cde1224566 | 6bdb32ddbd72c4337dab12002ff05d6966538448 | /gridpack_folder/mc_request/LHEProducer/Spin-0/Radion_ZZ_ZlepZhad/Radion_ZZ_ZlepZhad_narrow_M2500_13TeV-madgraph_cff.py | 55b8ce6b6aafef4854d46112ee4cdb202e8e7861 | [] | no_license | cyrilbecot/DibosonBSMSignal_13TeV | 71db480de274c893ba41453025d01bfafa19e340 | d8e685c40b16cde68d25fef9af257c90bee635ba | refs/heads/master | 2021-01-11T10:17:05.447035 | 2016-08-17T13:32:12 | 2016-08-17T13:32:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | import FWCore.ParameterSet.Config as cms
# link to cards:
# https://github.com/cms-sw/genproductions/tree/b9fddd83b7d8e490347744408902940547e8135f/bin/MadGraph5_aMCatNLO/cards/production/13TeV/exo_diboson/Spin-0/Radion_ZZ_ZlepZhad/Radion_ZZ_ZlepZhad_narrow_M2500
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/V5_2.2.2/exo_diboson/Spin-0/Radion_ZZ_ZlepZhad/narrow/v1/Radion_ZZ_ZlepZhad_narrow_M2500_tarball.tar.xz'),
nEvents = cms.untracked.uint32(5000),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
| [
"[email protected]"
] | |
d3fc31344ad05d1cccd859ad51a3d6332059f748 | 8b7559f7b69173109d7b6e89ab912dbb8b675c3f | /main/tests/test_models.py | 104c45dcd9bc75e5d3b2024147d13fa149a12099 | [] | no_license | GoodnessEzeokafor/django-bookstore | 7859b74ad0bddd32415b6bd917d37c008ba38a73 | dc47e7fe201cf2a62a93c30730fa1e72a6707f93 | refs/heads/master | 2023-02-14T14:13:58.941227 | 2021-01-08T10:14:29 | 2021-01-08T10:14:29 | 327,135,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | from decimal import Decimal
from django.test import TestCase
from main import models
class TestModel(TestCase):
def test_active_manager_works(self):
models.Product.objects.create(
name="The cathedral and the bazaar",
price=Decimal("10.0")
)
models.Product.objects.create(
name="Pride and Prejudice",
price=Decimal("2.00")
)
models.Product.objects.create(
name="A Tale of Two Cities",
price = Decimal("2.00"),
active=False
)
self.assertEqual(len(models.Product.objects.active()), 2) | [
"[email protected]"
] | |
65da8f31eec34e35df36db0edc77988d9760b5bb | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/test/test_plat_product_get_list_request.py | f8948ba21e7402d29e9aed6e09e6cc8e9cb8dcca | [
"Apache-2.0"
] | permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 738 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.platproduct.model.plat_product_get_list_request import PlatProductGetListRequest
class TestPlatProductGetListRequest(unittest.TestCase):
"""PlatProductGetListRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPlatProductGetListRequest(self):
"""Test PlatProductGetListRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = PlatProductGetListRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
ab3cd9228918e57b3233b0500f80929d0d936bb2 | 85a247759d026d03eb8b625667a5aa99bdace5b0 | /deep_learning_nano_degree/4_recurrent_neural_networks/language_translation/language_translation.py | 2e5ce1af4051fd693596d7f5a0a4da351d800331 | [] | no_license | mkao006/dl_udacity | 000d325bdeb507f2b57a25e592c34ec72287cb06 | a3f795b3c66c16946b0cdab0255cc1d79fc0f82f | refs/heads/master | 2022-12-15T06:50:49.403783 | 2018-01-30T23:45:52 | 2018-01-30T23:45:52 | 93,637,489 | 0 | 0 | null | 2022-12-07T23:59:22 | 2017-06-07T13:19:11 | Jupyter Notebook | UTF-8 | Python | false | false | 18,903 | py | import time
import numpy as np
import tensorflow as tf
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
def inspect_data(source_text, target_text, view_sentence_range=(0, 10)):
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(
len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(
np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[
view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[
view_sentence_range[0]:view_sentence_range[1]]))
def text_to_ids(source_text, target_text, source_vocab_to_int,
target_vocab_to_int):
"""
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
"""
source_id_text = [
[source_vocab_to_int[word] for word in sentence.split()]
for sentence in source_text.split('\n')]
target_id_text = [
[target_vocab_to_int[word] for word in sentence.split()] +
[target_vocab_to_int['<EOS>']]
for sentence in target_text.split('\n')]
return source_id_text, target_id_text
def model_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate, keep probability)
"""
input = tf.placeholder(tf.int32, shape=[None, None], name='input')
targets = tf.placeholder(tf.int32, shape=[None, None], name='target')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return input, targets, learning_rate, keep_prob
def process_decoding_input(target_data, target_vocab_to_int, batch_size):
"""
Preprocess target data for decoding
:param target_data: Target Placeholder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
"""
go_id = target_vocab_to_int['<GO>']
truncated_data = tf.strided_slice(
input_=target_data,
begin=[0, 0],
end=[batch_size, -1],
strides=[1, 1])
start_signal = tf.fill(dims=[batch_size, 1], value=go_id)
processed_decoding_input = tf.concat(
[start_signal, truncated_data], axis=1)
return processed_decoding_input
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob):
"""
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:return: RNN state
"""
cell = tf.contrib.rnn.BasicLSTMCell(num_units=rnn_size)
cell_with_dropout = tf.contrib.rnn.DropoutWrapper(
cell=cell, output_keep_prob=keep_prob)
encoder = tf.contrib.rnn.MultiRNNCell(
cells=[cell_with_dropout] * num_layers)
_, encoder_state = tf.nn.dynamic_rnn(cell=encoder,
inputs=rnn_inputs,
dtype=tf.float32)
return encoder_state
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input,
sequence_length, decoding_scope, output_fn,
keep_prob):
"""
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param sequence_length: Sequence Length
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Train Logits
"""
train_decoder_function = tf.contrib.seq2seq.simple_decoder_fn_train(
encoder_state=encoder_state)
train_pred, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(
cell=dec_cell,
decoder_fn=train_decoder_function,
inputs=dec_embed_input,
sequence_length=sequence_length,
scope=decoding_scope)
logit = output_fn(train_pred)
return logit
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings,
start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size, decoding_scope,
output_fn, keep_prob):
# NOTE (Michael): Need to double check where the 'keep_prob' goes.
"""
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param maximum_length: The maximum allowed time steps to decode
:param vocab_size: Size of vocabulary
:param decoding_scope: TensorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Inference Logits
"""
infer_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_inference(
output_fn=output_fn,
encoder_state=encoder_state,
embeddings=dec_embeddings,
start_of_sequence_id=start_of_sequence_id,
end_of_sequence_id=end_of_sequence_id,
maximum_length=maximum_length - 1,
num_decoder_symbols=vocab_size)
infer_logit, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(
cell=dec_cell,
decoder_fn=infer_decoder_fn,
scope=decoding_scope)
return infer_logit
def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size,
sequence_length, rnn_size, num_layers, target_vocab_to_int,
keep_prob):
"""
Create decoding layer
:param dec_embed_input: Decoder embedded input
:param dec_embeddings: Decoder embeddings
:param encoder_state: The encoded state
:param vocab_size: Size of vocabulary
:param sequence_length: Sequence Length
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param keep_prob: Dropout keep probability
:return: Tuple of (Training Logits, Inference Logits)
"""
cell = tf.contrib.rnn.BasicLSTMCell(num_units=rnn_size)
cell_with_dropout = tf.contrib.rnn.DropoutWrapper(
cell=cell, output_keep_prob=keep_prob)
decoder = tf.contrib.rnn.MultiRNNCell(
cells=[cell_with_dropout] * num_layers)
with tf.variable_scope('decoding_scope') as decoding_scope:
# NOTE (Michael): Need to double check the activation function
output_fn = (lambda x: tf.contrib.layers.fully_connected(
inputs=x,
num_outputs=vocab_size,
activation_fn=None,
scope=decoding_scope))
train_logit = decoding_layer_train(
encoder_state=encoder_state,
dec_cell=decoder,
dec_embed_input=dec_embed_input,
sequence_length=sequence_length,
decoding_scope=decoding_scope,
output_fn=output_fn,
keep_prob=keep_prob)
with tf.variable_scope('decoding_scope', reuse=True) as decoding_scope:
start_of_sequence_id = target_vocab_to_int['<GO>']
end_of_sequence_id = target_vocab_to_int['<EOS>']
infer_logit = decoding_layer_infer(
encoder_state=encoder_state,
dec_cell=decoder,
dec_embeddings=dec_embeddings,
start_of_sequence_id=start_of_sequence_id,
end_of_sequence_id=end_of_sequence_id,
maximum_length=sequence_length - 1,
vocab_size=vocab_size,
decoding_scope=decoding_scope,
output_fn=output_fn,
keep_prob=keep_prob)
return train_logit, infer_logit
def seq2seq_model(input_data, target_data, keep_prob, batch_size,
sequence_length, source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size, rnn_size, num_layers,
target_vocab_to_int):
"""
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param sequence_length: Sequence Length
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training Logits, Inference Logits)
"""
# Encode the source and output the state
embedded_input = tf.contrib.layers.embed_sequence(
ids=input_data,
vocab_size=source_vocab_size,
embed_dim=enc_embedding_size)
encoder_state = encoding_layer(rnn_inputs=embedded_input,
rnn_size=rnn_size,
num_layers=num_layers,
keep_prob=keep_prob)
decoder_input = process_decoding_input(
target_data=target_data,
target_vocab_to_int=target_vocab_to_int,
batch_size=batch_size)
# Take in the state and processed target input and output the
# training and inference logits
decoder_embeddings_weights = tf.Variable(
tf.random_uniform([target_vocab_size, dec_embedding_size]))
decoder_embed_input = tf.nn.embedding_lookup(
params=decoder_embeddings_weights,
ids=decoder_input)
train_logit, infer_logit = decoding_layer(
dec_embed_input=decoder_embed_input,
dec_embeddings=decoder_embeddings_weights,
encoder_state=encoder_state,
vocab_size=target_vocab_size,
sequence_length=sequence_length,
rnn_size=rnn_size,
num_layers=num_layers,
target_vocab_to_int=target_vocab_to_int,
keep_prob=keep_prob)
return train_logit, infer_logit
def get_accuracy(target, logits):
"""
Calculate accuracy
"""
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0, 0), (0, max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0, 0), (0, max_seq - logits.shape[1]), (0, 0)],
'constant')
return np.mean(np.equal(target, np.argmax(logits, 2)))
def train_model(source,
target,
epochs=10,
batch_size=128,
rnn_size=256,
num_layers=3,
encoding_embedding_size=256,
decoding_embedding_size=256,
learning_rate=0.001,
keep_probability=0.5,
save_path='checkpoints/dev'):
''' Wrapper to train the model
'''
train_source = source[batch_size:]
train_target = target[batch_size:]
valid_source = helper.pad_sentence_batch(source[:batch_size])
valid_target = helper.pad_sentence_batch(target[:batch_size])
(source_int_text, target_int_text), (source_vocab_to_int,
target_vocab_to_int), _ = (
helper.load_preprocess())
max_source_sentence_length = max([len(sentence)
for sentence in source_int_text])
# Build the graphx
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob = model_inputs()
sequence_length = tf.placeholder_with_default(
max_source_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
# According to the original paper, reversing the input actually
# improves the model.
train_logits, inference_logits = seq2seq_model(
input_data=tf.reverse(input_data, [-1]),
target_data=targets,
keep_prob=keep_prob,
batch_size=batch_size,
sequence_length=sequence_length,
source_vocab_size=len(source_vocab_to_int),
target_vocab_size=len(target_vocab_to_int),
encoding_embedding_size=encoding_embedding_size,
decoding_embedding_size=decoding_embedding_size,
rnn_size=rnn_size,
num_layers=num_layers,
target_vocab_to_int=target_vocab_to_int)
tf.identity(inference_logits, 'logits')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
train_logits,
targets,
tf.ones([input_shape[0], sequence_length]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var)
for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
# Train the model
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch) in enumerate(
helper.batch_data(train_source, train_target, batch_size)):
start_time = time.time()
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
sequence_length: target_batch.shape[1],
keep_prob: keep_probability})
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch, keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_source, keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(
np.array(valid_target), batch_valid_logits)
end_time = time.time()
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'
.format(epoch_i, batch_i,
len(source_int_text) // batch_size,
train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
# Save parameters for checkpoint
helper.save_params(save_path)
def sentence_to_seq(sentence, vocab_to_int):
"""
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
"""
sentence_ind = [vocab_to_int.get(word, vocab_to_int['<UNK>'])
for word in sentence.lower().split()]
return sentence_ind
def translate(sentence):
(_,
(source_vocab_to_int, target_vocab_to_int),
(source_int_to_vocab, target_int_to_vocab)) = helper.load_preprocess()
load_path = helper.load_params()
translate_sentence_ind = sentence_to_seq(
translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('logits:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(
logits, {input_data: [translate_sentence_ind], keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format(
[i for i in translate_sentence_ind]))
print(' English Words: {}'.format(
[source_int_to_vocab[i] for i in translate_sentence_ind]))
print('\nPrediction')
print(' Word Ids: {}'.format(
[i for i in np.argmax(translate_logits, 1)]))
print(' French Words: {}'.format(
[target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))
# Inspect data
inspect_data(source_text, target_text, (0, 5))
# Unit tests
tests.test_text_to_ids(text_to_ids)
tests.test_model_inputs(model_inputs)
tests.test_process_decoding_input(process_decoding_input)
tests.test_encoding_layer(encoding_layer)
tests.test_decoding_layer_train(decoding_layer_train)
tests.test_decoding_layer_infer(decoding_layer_infer)
tests.test_decoding_layer(decoding_layer)
tests.test_seq2seq_model(seq2seq_model)
tests.test_sentence_to_seq(sentence_to_seq)
# Preprocess all the data and save it
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
# Load the preprocessed data
((source_int_text, target_int_text),
(source_vocab_to_int, target_vocab_to_int),
_) = helper.load_preprocess()
# Train the model
train_model(source_int_text,
target_int_text,
epochs=10,
batch_size=256,
rnn_size=256,
num_layers=3,
encoding_embedding_size=256,
decoding_embedding_size=256,
learning_rate=0.001,
keep_probability=0.5,
save_path='checkpoints/dev')
# Translate
translate_sentence = 'he saw a old yellow truck .'
translate(translate_sentence)
| [
"[email protected]"
] | |
456a38ad9b87e1b826c521e146df928c90163e88 | 0fbd56d4a2ee512cb47f557bea310618249a3d2e | /official/vision/beta/modeling/layers/roi_sampler.py | 46b4c349839f207291fc2ca42a601d9eaabce92c | [
"Apache-2.0"
] | permissive | joppemassant/models | 9968f74f5c48096f3b2a65e6864f84c0181465bb | b2a6712cbe6eb9a8639f01906e187fa265f3f48e | refs/heads/master | 2022-12-10T01:29:31.653430 | 2020-09-11T11:26:59 | 2020-09-11T11:26:59 | 294,675,920 | 1 | 1 | Apache-2.0 | 2020-09-11T11:21:51 | 2020-09-11T11:21:51 | null | UTF-8 | Python | false | false | 5,978 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ROI sampler."""
# Import libraries
import tensorflow as tf
from official.vision.beta.modeling.layers import box_matcher
from official.vision.beta.modeling.layers import box_sampler
from official.vision.beta.ops import box_ops
@tf.keras.utils.register_keras_serializable(package='Vision')
class ROISampler(tf.keras.layers.Layer):
"""Sample ROIs and assign targets to the sampled ROIs."""
def __init__(self,
mix_gt_boxes=True,
num_sampled_rois=512,
foreground_fraction=0.25,
foreground_iou_threshold=0.5,
background_iou_high_threshold=0.5,
background_iou_low_threshold=0,
**kwargs):
"""Initializes a ROI sampler.
Args:
mix_gt_boxes: bool, whether to mix the groundtruth boxes with proposed
ROIs.
num_sampled_rois: int, the number of sampled ROIs per image.
foreground_fraction: float in [0, 1], what percentage of proposed ROIs
should be sampled from the foreground boxes.
foreground_iou_threshold: float, represent the IoU threshold for a box to
be considered as positive (if >= `foreground_iou_threshold`).
background_iou_high_threshold: float, represent the IoU threshold for a
box to be considered as negative (if overlap in
[`background_iou_low_threshold`, `background_iou_high_threshold`]).
background_iou_low_threshold: float, represent the IoU threshold for a box
to be considered as negative (if overlap in
[`background_iou_low_threshold`, `background_iou_high_threshold`])
**kwargs: other key word arguments passed to Layer.
"""
self._config_dict = {
'mix_gt_boxes': mix_gt_boxes,
'num_sampled_rois': num_sampled_rois,
'foreground_fraction': foreground_fraction,
'foreground_iou_threshold': foreground_iou_threshold,
'background_iou_high_threshold': background_iou_high_threshold,
'background_iou_low_threshold': background_iou_low_threshold,
}
self._matcher = box_matcher.BoxMatcher(
foreground_iou_threshold,
background_iou_high_threshold,
background_iou_low_threshold)
self._sampler = box_sampler.BoxSampler(
num_sampled_rois, foreground_fraction)
super(ROISampler, self).__init__(**kwargs)
def call(self, boxes, gt_boxes, gt_classes):
"""Assigns the proposals with groundtruth classes and performs subsmpling.
Given `proposed_boxes`, `gt_boxes`, and `gt_classes`, the function uses the
following algorithm to generate the final `num_samples_per_image` RoIs.
1. Calculates the IoU between each proposal box and each gt_boxes.
2. Assigns each proposed box with a groundtruth class and box by choosing
the largest IoU overlap.
3. Samples `num_samples_per_image` boxes from all proposed boxes, and
returns box_targets, class_targets, and RoIs.
Args:
boxes: a tensor of shape of [batch_size, N, 4]. N is the number of
proposals before groundtruth assignment. The last dimension is the
box coordinates w.r.t. the scaled images in [ymin, xmin, ymax, xmax]
format.
gt_boxes: a tensor of shape of [batch_size, MAX_NUM_INSTANCES, 4].
The coordinates of gt_boxes are in the pixel coordinates of the scaled
image. This tensor might have padding of values -1 indicating the
invalid box coordinates.
gt_classes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES]. This
tensor might have paddings with values of -1 indicating the invalid
classes.
Returns:
sampled_rois: a tensor of shape of [batch_size, K, 4], representing the
coordinates of the sampled RoIs, where K is the number of the sampled
RoIs, i.e. K = num_samples_per_image.
sampled_gt_boxes: a tensor of shape of [batch_size, K, 4], storing the
box coordinates of the matched groundtruth boxes of the samples RoIs.
sampled_gt_classes: a tensor of shape of [batch_size, K], storing the
classes of the matched groundtruth boxes of the sampled RoIs.
sampled_gt_indices: a tensor of shape of [batch_size, K], storing the
indices of the sampled groudntruth boxes in the original `gt_boxes`
tensor, i.e.
gt_boxes[sampled_gt_indices[:, i]] = sampled_gt_boxes[:, i].
"""
if self._config_dict['mix_gt_boxes']:
gt_boxes = tf.cast(gt_boxes, dtype=boxes.dtype)
boxes = tf.concat([boxes, gt_boxes], axis=1)
(matched_gt_boxes, matched_gt_classes, matched_gt_indices,
positive_matches, negative_matches, ignored_matches) = (
self._matcher(boxes, gt_boxes, gt_classes))
sampled_indices = self._sampler(
positive_matches, negative_matches, ignored_matches)
sampled_rois, sampled_gt_boxes, sampled_gt_classes, sampled_gt_indices = (
box_ops.gather_instances(
sampled_indices,
boxes,
matched_gt_boxes,
matched_gt_classes,
matched_gt_indices))
return (sampled_rois, sampled_gt_boxes, sampled_gt_classes,
sampled_gt_indices)
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| [
"[email protected]"
] | |
1dba441eba9e895c8b00e03309a0bcd68e736e31 | d61d05748a59a1a73bbf3c39dd2c1a52d649d6e3 | /chromium/mojo/public/tools/bindings/pylib/mojom/generate/test_support.py | eb394619d2bf4855522d7157dff0d13e87c59850 | [
"BSD-3-Clause"
] | permissive | Csineneo/Vivaldi | 4eaad20fc0ff306ca60b400cd5fad930a9082087 | d92465f71fb8e4345e27bd889532339204b26f1e | refs/heads/master | 2022-11-23T17:11:50.714160 | 2019-05-25T11:45:11 | 2019-05-25T11:45:11 | 144,489,531 | 5 | 4 | BSD-3-Clause | 2022-11-04T05:55:33 | 2018-08-12T18:04:37 | null | UTF-8 | Python | false | false | 6,092 | py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import traceback
import module as mojom
# Support for writing mojom test cases.
# RunTest(fn) will execute fn, catching any exceptions. fn should return
# the number of errors that are encountered.
#
# EXPECT_EQ(a, b) and EXPECT_TRUE(b) will print error information if the
# expectations are not true and return a non zero value. This allows test cases
# to be written like this
#
# def Foo():
# errors = 0
# errors += EXPECT_EQ('test', test())
# ...
# return errors
#
# RunTest(foo)
def FieldsAreEqual(field1, field2):
if field1 == field2:
return True
return field1.name == field2.name and \
KindsAreEqual(field1.kind, field2.kind) and \
field1.ordinal == field2.ordinal and \
field1.default == field2.default
def KindsAreEqual(kind1, kind2):
if kind1 == kind2:
return True
if kind1.__class__ != kind2.__class__ or kind1.spec != kind2.spec:
return False
if kind1.__class__ == mojom.Kind:
return kind1.spec == kind2.spec
if kind1.__class__ == mojom.Struct:
if kind1.name != kind2.name or \
kind1.spec != kind2.spec or \
len(kind1.fields) != len(kind2.fields):
return False
for i in range(len(kind1.fields)):
if not FieldsAreEqual(kind1.fields[i], kind2.fields[i]):
return False
return True
if kind1.__class__ == mojom.Array:
return KindsAreEqual(kind1.kind, kind2.kind)
print 'Unknown Kind class: ', kind1.__class__.__name__
return False
def ParametersAreEqual(parameter1, parameter2):
if parameter1 == parameter2:
return True
return parameter1.name == parameter2.name and \
parameter1.ordinal == parameter2.ordinal and \
parameter1.default == parameter2.default and \
KindsAreEqual(parameter1.kind, parameter2.kind)
def MethodsAreEqual(method1, method2):
if method1 == method2:
return True
if method1.name != method2.name or \
method1.ordinal != method2.ordinal or \
len(method1.parameters) != len(method2.parameters):
return False
for i in range(len(method1.parameters)):
if not ParametersAreEqual(method1.parameters[i], method2.parameters[i]):
return False
return True
def InterfacesAreEqual(interface1, interface2):
if interface1 == interface2:
return True
if interface1.name != interface2.name or \
len(interface1.methods) != len(interface2.methods):
return False
for i in range(len(interface1.methods)):
if not MethodsAreEqual(interface1.methods[i], interface2.methods[i]):
return False
return True
def ModulesAreEqual(module1, module2):
if module1 == module2:
return True
if module1.name != module2.name or \
module1.namespace != module2.namespace or \
len(module1.structs) != len(module2.structs) or \
len(module1.interfaces) != len(module2.interfaces):
return False
for i in range(len(module1.structs)):
if not KindsAreEqual(module1.structs[i], module2.structs[i]):
return False
for i in range(len(module1.interfaces)):
if not InterfacesAreEqual(module1.interfaces[i], module2.interfaces[i]):
return False
return True
# Builds and returns a Module suitable for testing/
def BuildTestModule():
module = mojom.Module('test', 'testspace')
struct = module.AddStruct('teststruct')
struct.AddField('testfield1', mojom.INT32)
struct.AddField('testfield2', mojom.Array(mojom.INT32), 42)
interface = module.AddInterface('Server')
method = interface.AddMethod('Foo', 42)
method.AddParameter('foo', mojom.INT32)
method.AddParameter('bar', mojom.Array(struct))
return module
# Tests if |module| is as built by BuildTestModule(). Returns the number of
# errors
def TestTestModule(module):
errors = 0
errors += EXPECT_EQ('test', module.name)
errors += EXPECT_EQ('testspace', module.namespace)
errors += EXPECT_EQ(1, len(module.structs))
errors += EXPECT_EQ('teststruct', module.structs[0].name)
errors += EXPECT_EQ(2, len(module.structs[0].fields))
errors += EXPECT_EQ('testfield1', module.structs[0].fields[0].name)
errors += EXPECT_EQ(mojom.INT32, module.structs[0].fields[0].kind)
errors += EXPECT_EQ('testfield2', module.structs[0].fields[1].name)
errors += EXPECT_EQ(mojom.Array, module.structs[0].fields[1].kind.__class__)
errors += EXPECT_EQ(mojom.INT32, module.structs[0].fields[1].kind.kind)
errors += EXPECT_EQ(1, len(module.interfaces))
errors += EXPECT_EQ('Server', module.interfaces[0].name)
errors += EXPECT_EQ(1, len(module.interfaces[0].methods))
errors += EXPECT_EQ('Foo', module.interfaces[0].methods[0].name)
errors += EXPECT_EQ(2, len(module.interfaces[0].methods[0].parameters))
errors += EXPECT_EQ('foo', module.interfaces[0].methods[0].parameters[0].name)
errors += EXPECT_EQ(mojom.INT32,
module.interfaces[0].methods[0].parameters[0].kind)
errors += EXPECT_EQ('bar', module.interfaces[0].methods[0].parameters[1].name)
errors += EXPECT_EQ(
mojom.Array,
module.interfaces[0].methods[0].parameters[1].kind.__class__)
errors += EXPECT_EQ(
module.structs[0],
module.interfaces[0].methods[0].parameters[1].kind.kind)
return errors
def PrintFailure(string):
stack = traceback.extract_stack()
frame = stack[len(stack)-3]
sys.stderr.write("ERROR at %s:%d, %s\n" % (frame[0], frame[1], string))
print "Traceback:"
for line in traceback.format_list(stack[:len(stack)-2]):
sys.stderr.write(line)
def EXPECT_EQ(a, b):
if a != b:
PrintFailure("%s != %s" % (a, b))
return 1
return 0
def EXPECT_TRUE(a):
if not a:
PrintFailure('Expecting True')
return 1
return 0
def RunTest(fn):
sys.stdout.write('Running %s...' % fn.__name__)
try:
errors = fn()
except:
traceback.print_exc(sys.stderr)
errors = 1
if errors == 0:
sys.stdout.write('OK\n')
elif errors == 1:
sys.stdout.write('1 ERROR\n')
else:
sys.stdout.write('%d ERRORS\n' % errors)
return errors
| [
"[email protected]"
] | |
d8f8388ccf0bde786d3c4b612af5b9f908999b36 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v5/googleads-py/google/ads/googleads/v5/errors/types/keyword_plan_idea_error.py | 7317f5950bfe3158aa862fb8e1d10e4d1711708a | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v5.errors',
marshal='google.ads.googleads.v5',
manifest={
'KeywordPlanIdeaErrorEnum',
},
)
class KeywordPlanIdeaErrorEnum(proto.Message):
r"""Container for enum describing possible errors from
KeywordPlanIdeaService.
"""
class KeywordPlanIdeaError(proto.Enum):
r"""Enum describing possible errors from KeywordPlanIdeaService."""
UNSPECIFIED = 0
UNKNOWN = 1
URL_CRAWL_ERROR = 2
INVALID_VALUE = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
3123b2e166e0c4d6732e9496e51de271ea7f14b1 | 512f48fdcfa78e322526cf47163110009b84bf73 | /rapid7vmconsole/models/privileges.py | 62da3458bc049d084294218fcc09b1e20475b6aa | [
"MIT"
] | permissive | confluentinc/vm-console-client-python | 9a0f540c0113acf68ee9dc914715bc255e4d99f4 | ccbd944a0e0333c73e098b769fe4c82755d29874 | refs/heads/master | 2023-07-18T10:33:58.909287 | 2021-09-02T20:52:20 | 2021-09-02T20:52:20 | 402,559,283 | 0 | 0 | MIT | 2021-09-02T20:49:56 | 2021-09-02T20:49:56 | null | UTF-8 | Python | false | false | 4,944 | py | # coding: utf-8
"""
Python InsightVM API Client
OpenAPI spec version: 3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Privileges(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'links': 'list[Link]',
'resources': 'list[str]'
}
attribute_map = {
'links': 'links',
'resources': 'resources'
}
def __init__(self, links=None, resources=None): # noqa: E501
"""Privileges - a model defined in Swagger""" # noqa: E501
self._links = None
self._resources = None
self.discriminator = None
if links is not None:
self.links = links
if resources is not None:
self.resources = resources
@property
def links(self):
"""Gets the links of this Privileges. # noqa: E501
:return: The links of this Privileges. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this Privileges.
:param links: The links of this Privileges. # noqa: E501
:type: list[Link]
"""
self._links = links
@property
def resources(self):
"""Gets the resources of this Privileges. # noqa: E501
:return: The resources of this Privileges. # noqa: E501
:rtype: list[str]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this Privileges.
:param resources: The resources of this Privileges. # noqa: E501
:type: list[str]
"""
allowed_values = ["all-permissions", "create-reports", "configure-global-settings", "manage-sites", "manage-tags", "manage-static-asset-groups", "manage-dynamic-asset-groups", "manage-scan-templates", "manage-report-templates", "manage-scan-engines", "submit-vulnerability-exceptions", "approve-vulnerability-exceptions", "delete-vulnerability-exceptions", "manage-vuln-investigations", "view-vuln-investigations", "create-tickets", "close-tickets", "assign-ticket-assignee", "manage-site-access", "manage-asset-group-access", "manage-report-access", "use-restricted-report-sections", "manage-policies", "view-asset-group-asset-data", "manage-asset-group-assets", "view-site-asset-data", "specify-site-metadata", "purge-site-asset-data", "specify-scan-targets", "assign-scan-engine", "assign-scan-template", "manage-site-credentials", "manage-scan-alerts", "schedule-automatic-scans", "start-unscheduled-scans"] # noqa: E501
if not set(resources).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `resources` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(resources) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._resources = resources
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Privileges, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Privileges):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
8c462a9504616211d1a864ac6f1a00d0a2cba936 | b7b2f80ab5e1ee0ea028576e3014b62b8d3a8d7e | /pypos/pypos-000/pypos.py | 1d36f8dacb776baa51da76f15440425f8f40a5f8 | [] | no_license | pglen/pgpygtk | 4d1405478a714f003984cf3e3db04ff1f767470b | 33f58010e304f1a312f2356de453ecedb7aa21ef | refs/heads/master | 2021-01-22T01:18:52.238415 | 2019-01-01T01:37:24 | 2019-01-01T01:37:24 | 102,215,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,615 | py | #!/usr/bin/env python
import os, sys, getopt, signal
import gobject, gtk, pango
# ------------------------------------------------------------------------
# This is open source sticker program. Written in python.
GAP = 4 # Gap in pixels
TABSTOP = 4
FGCOLOR = "#000000"
BGCOLOR = "#ffff88"
version = 1.0
verbose = False
# Where things are stored (backups, orgs, macros)
config_dir = os.path.expanduser("~/.pypos")
def OnExit(win):
gtk.main_quit()
def help():
print
print "Pypos version: ", version
print
print "Usage: " + os.path.basename(sys.argv[0]) + " [options] [[filename] ... [filenameN]]"
print
print "Options:"
print
print " -d level - Debug level 1-10. (Limited implementation)"
print " -v - Verbose (to stdout and log)"
print " -c - Dump Config"
print " -h - Help"
print
def area_motion(self, area, event):
print "window motion event", event.state, event.x, event.y
if event.state & gtk.gdk.BUTTON1_MASK:
print "drag"
# Start of program:
if __name__ == '__main__':
try:
if not os.path.isdir(config_dir):
os.mkdir(config_dir)
except: pass
# Let the user know it needs fixin'
if not os.path.isdir(config_dir):
print "Cannot access config dir:", config_dir
sys.exit(1)
opts = []; args = []
try:
opts, args = getopt.getopt(sys.argv[1:], "hv")
except getopt.GetoptError, err:
print "Invalid option(s) on command line:", err
sys.exit(1)
#print "opts", opts, "args", args
for aa in opts:
if aa[0] == "-d":
try:
pgdebug = int(aa[1])
except:
pgdebug = 0
if aa[0] == "-h": help(); exit(1)
if aa[0] == "-v": verbose = True
#if aa[0] == "-x": clear_config = True
#if aa[0] == "-c": show_config = True
#if aa[0] == "-t": show_timing = True
if verbose:
print "PyPos running on", "'" + os.name + "'", \
"GTK", gtk.gtk_version, "PyGtk", gtk.pygtk_version
www = gtk.gdk.screen_width(); hhh = gtk.gdk.screen_height();
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
#window.set_decorated(False)
window.set_position(gtk.WIN_POS_CENTER)
window.set_default_size(3*www/4, 3*hhh/4)
window.set_flags(gtk.CAN_FOCUS | gtk.SENSITIVE)
window.connect("destroy", OnExit)
window.show_all()
gtk.main()
| [
"[email protected]"
] | |
e9e3cda5266717a5660707d3e5cbb04a54cdf11c | 34a7e30c3ceafb06c9a21c59c88c3ea5a6e91388 | /python/datagen/addPriority.py | dbbff881d7bd7fd56ded7dd0a280ef0ad32f27fd | [] | no_license | DinoBektesevic/DinoBektesevic.github.io | 91643f54411d214e7552e9ef2e1e0fbece5fb841 | be8cc8b3b2b58cbc1517593377228ff541fd515c | refs/heads/main | 2023-05-29T22:39:23.801299 | 2021-06-10T02:55:12 | 2021-06-10T02:55:12 | 364,038,461 | 0 | 0 | null | 2021-05-10T20:30:01 | 2021-05-03T19:27:07 | HTML | UTF-8 | Python | false | false | 1,301 | py | import glob
import pandas as pd
import numpy
import seaborn as sns
import matplotlib.pyplot as plt
# filename = "mjd-59662-sdss-simple-expanded.csv"
allfiles = glob.glob("testDir/mjd*-simple-expanded.csv")
for filename in allfiles:
df = pd.read_csv(filename, index_col=0)
newfilename = filename.strip(".csv") + "-priority.csv"
priority = numpy.array([-1]*len(df))
completion = numpy.array([-1]*len(df))
df["priority"] = priority
df["completion"] = completion
fields = list(set(df[df.objType=="sdss field"]["fieldID"]))
fieldPriority = numpy.random.choice([0,1,2,3,4,5], size=len(fields))
fieldCompletion = numpy.random.uniform(high=100, size=len(fields))
for field, priority, completion in zip(fields, fieldPriority, fieldCompletion):
# check if its scheduled
sched = list(df[df.fieldID==field]["scheduled"])
if True in sched:
# give all scheduled plates high priority
priority = 0
df["priority"].loc[df["fieldID"]==field] = priority
df["completion"].loc[df["fieldID"]==field] = completion
df.reset_index()
df.to_csv(newfilename)
# sns.scatterplot(x="fieldID", y="completion", data=df[df.objType=="sdss field"])
# plt.show()
# import pdb; pdb.set_trace()
# print(fields) | [
"[email protected]"
] | |
b992279df4179e343cd86a13c730cb7d56b36b83 | 96909e3b2eb787afa739f3020a9292afae61b0b5 | /web/__init__.py | f2ab81fbc93b8f2f236e99d276ed434f89b742c1 | [] | no_license | fengges/se | 09bd6306f67d78fe0f51286ab41f629237fcf4d6 | 51e199a7fc5f7666063a556f41669a6a8b4fe37d | refs/heads/master | 2020-03-27T04:29:32.207191 | 2018-08-24T05:47:40 | 2018-08-24T05:47:40 | 145,944,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,314 | py |
# author :feng
# time :2018/1/25
# function : 应用初始化
# 注册蓝图
import os
from main import Query
from flask import Flask,json,request
app = Flask(__name__)
subject = [{"code": '01', "k": 46}, {"code": '02', "k": 98}, {"code": '03', "k": 98},
{"code": '04', "k": 88}, {"code": '05', "k": 98}, {"code": '06', "k": 28},
{"code": '07', "k": 54}, {"code": '0701', "k": 64}, {"code": '0702', "k": 30},
{"code": '0703', "k": 52}, {"code": '0705', "k": 16}, {"code": '0706', "k": 12},
{"code": '0707', "k": 14}, {"code": '0709', "k": 98}, {"code": '0710', "k": 98},
{"code": '0712', "k": 10}, {"code": '08', "k": 50}, {"code": '0801', "k": 26},
{"code": '0802', "k": 98}, {"code": '0803', "k": 14}, {"code": '0804', "k": 12},
{"code": '0805', "k": 98}, {"code": '0806', "k": 12}, {"code": '0807', "k": 38},
{"code": '0808', "k": 98}, {"code": '0809', "k": 52}, {"code": '0810', "k": 98},
{"code": '0811', "k": 22}, {"code": '0812', "k": 72}, {"code": '0813', "k": 30},
{"code": '0814', "k": 68}, {"code": '0815', "k": 14}, {"code": '0816', "k": 14},
{"code": '0817', "k": 98}, {"code": '0818', "k": 14}, {"code": '0819', "k": 18},
{"code": '0820', "k": 18}, {"code": '0821', "k": 18}, {"code": '0823', "k": 24},
{"code": '0824', "k": 14}, {"code": '0825', "k": 26}, {"code": '0826', "k": 10},
{"code": '0827', "k": 12}, {"code": '0828', "k": 36}, {"code": '0829', "k": 14},
{"code": '0830', "k": 82}, {"code": '0831', "k": 16}, {"code": '0832', "k": 28},
{"code": '09', "k": 74}, {"code": '10', "k": 98}, {"code": '11', "k": 14},
{"code": '12', "k": 98}]
a = Query(subject)
@app.route('/search',methods=['GET','POST'])
def index6():
t = request.data
if len(t)==0:
t=request.values['data']
data = json.loads(t)
text=data['keyword']
if "filer" not in data:
filer={}
else:
filer = data['filer']
if "school" in filer and "all" in filer["school"]:
del filer["school"]
if "code" in filer and "all" in filer["code"]:
del filer["code"]
r=a.do_query(text,filer)
s=json.jsonify(r)
return s
| [
"[email protected]"
] | |
b055d8ae6cafcbe25b727929949414109497dfbf | fe91e0f7f74c3156a5c194713a69d9846b9e26a2 | /flask_app/blueprints/api/blueprint.py | 9493ed788ee23a4f569ba5bbd705e244e4682ac4 | [
"BSD-3-Clause"
] | permissive | getslash/backslash | cbf963006e3de565a1512f79c6c9ab84e705c67e | 67554c039f8ac6a648deb191cc7fb69480f28253 | refs/heads/develop | 2023-01-10T22:26:11.666887 | 2022-06-17T05:06:00 | 2022-06-17T05:06:00 | 23,376,788 | 17 | 15 | NOASSERTION | 2022-12-27T16:17:59 | 2014-08-27T04:30:58 | Python | UTF-8 | Python | false | false | 1,268 | py | import functools
from flask import Blueprint
from flask_simple_api import SimpleAPI
from ... import activity
from ...utils.api_utils import (auto_render, requires_login,
requires_login_or_runtoken)
blueprint = Blueprint('api', __name__, url_prefix='/api')
api = SimpleAPI(blueprint)
_api_info = {'endpoints': {}}
def API(func=None, require_real_login=False, generates_activity=True, require_login=True, version=1):
if func is None:
return functools.partial(API, require_real_login=require_real_login, generates_activity=generates_activity, require_login=require_login, version=version)
returned = auto_render(func)
endpoint_info = _api_info['endpoints'][func.__name__] = {}
endpoint_info['login_required'] = require_login
endpoint_info['version'] = version
if generates_activity:
returned = activity.updates_last_active(returned)
if require_login:
if require_real_login:
returned = requires_login(returned)
else:
returned = requires_login_or_runtoken(returned)
return api.include(returned)
@blueprint.route('/', methods=['OPTIONS'], strict_slashes=False)
def get_api_info():
from flask import jsonify
return jsonify(_api_info)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.