blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fdd57a47de5f55ea470232da56fddf9705ce85ae
|
d442044fca0cb5c2102845c93194246273b3778b
|
/event_participant_labels/event_participant.py
|
fa3118b33c26f27f76ace611464af7b6d91e8720
|
[] |
no_license
|
vertelab/odoo-event-extra
|
f9d0ee7ac5140ee2cbcb15fbb7af61d1772aaa2e
|
63dd0b981b23941bae18d2d968b34a7977bc7699
|
refs/heads/master
| 2022-07-09T00:26:50.416313 | 2020-07-01T15:28:58 | 2020-07-01T15:28:58 | 47,540,528 | 2 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,562 |
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution, third party addon
# Copyright (C) 2004-2016 Vertel AB (<http://vertel.se>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, _
import logging
_logger = logging.getLogger(__name__)
class event_participant(models.Model):
_inherit = 'event.participant'
parent_name = fields.Char(related="partner_id.parent_id.name")
participant_name = fields.Char(related="partner_id.name")
event_name = fields.Char(related="event_id.name")
#~ event_type = fields.Char(related="event_id.event_type.name")
course_leader = fields.Char(related="event_id.course_leader.name")
#~ event_date = fields.Datetime(related="event_id.date_start")
|
[
"[email protected]"
] | |
2c2813d3ae5577ac8185b8e3feff5f018541b05e
|
35b59ea3b2800b170f82ccec229c88e102e1f4bd
|
/calculator/codes/solution/pythonprogs/calc_mul.py
|
30e609967c7df02a1c063aeb0c570826c1787858
|
[] |
no_license
|
gadepall/LA1400
|
efcc47c708efee9bebe8db415cd80997db64e834
|
bbdccf26b2f1c6a40325c139e8e406f923eb023f
|
refs/heads/master
| 2020-03-24T16:32:50.508068 | 2018-07-30T05:14:02 | 2018-07-30T05:14:02 | 142,827,762 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,687 |
py
|
#-*-coding: utf-8-*-
#Don't remove the above line
#This program uses a C routine for multiplication
#in the calculator. Other arithmetic operations are not included.
from Tkinter import *
from ctypes import *
import math
class calc:
def getandreplace(self):
"""replace x with * and ÷ with /"""
self.expression = self.e.get()
self.newtext=self.expression.replace(self.newdiv,'/')
self.newtext=self.newtext.replace('x','*')
def equals(self):
"""when the equal button is pressed"""
self.getandreplace()
try:
for i in self.newtext:
if(i=='*'):
multi=CDLL('./mul.so')
y=self.newtext.split('*')
a=c_float(float(y[0]))
b=c_float(float(y[1]))
mul=multi.mul
mul.restype=c_float
self.value=mul(a,b)
except SyntaxError or NameErrror:
self.e.delete(0,END)
self.e.insert(0,'Invalid Input!')
else:
self.e.delete(0,END)
self.e.insert(0,self.value)
def clearall(self):
"""when clear button is pressed,clears the text input area"""
self.e.delete(0,END)
def clear1(self):
self.txt=self.e.get()[:-1]
self.e.delete(0,END)
self.e.insert(0,self.txt)
def action(self,argi):
"""pressed button's value is inserted into the end of the text area"""
self.e.insert(END,argi)
def __init__(self,master):
"""Constructor method"""
master.title('Calulator')
master.geometry()
self.e = Entry(master)
self.e.grid(row=0,column=0,columnspan=6,pady=3)
self.e.focus_set() #Sets focus on the input text area
self.div='÷'
self.newdiv=self.div.decode('utf-8')
#Generating Buttons
Button(master,text="=",width=10,command=lambda:self.equals()).grid(row=4, column=4,columnspan=2)
Button(master,text='AC',width=3,command=lambda:self.clearall()).grid(row=1, column=4)
Button(master,text='C',width=3,command=lambda:self.clear1()).grid(row=1, column=5)
Button(master,text="+",width=3,command=lambda:self.action('+')).grid(row=4, column=3)
Button(master,text="x",width=3,command=lambda:self.action('x')).grid(row=2, column=3)
Button(master,text="-",width=3,command=lambda:self.action('-')).grid(row=3, column=3)
Button(master,text="÷",width=3,command=lambda:self.action(self.newdiv)).grid(row=1, column=3)
Button(master,text="%",width=3,command=lambda:self.action('%')).grid(row=4, column=2)
Button(master,text="7",width=3,command=lambda:self.action('7')).grid(row=1, column=0)
Button(master,text="8",width=3,command=lambda:self.action(8)).grid(row=1, column=1)
Button(master,text="9",width=3,command=lambda:self.action(9)).grid(row=1, column=2)
Button(master,text="4",width=3,command=lambda:self.action(4)).grid(row=2, column=0)
Button(master,text="5",width=3,command=lambda:self.action(5)).grid(row=2, column=1)
Button(master,text="6",width=3,command=lambda:self.action(6)).grid(row=2, column=2)
Button(master,text="1",width=3,command=lambda:self.action(1)).grid(row=3, column=0)
Button(master,text="2",width=3,command=lambda:self.action(2)).grid(row=3, column=1)
Button(master,text="3",width=3,command=lambda:self.action(3)).grid(row=3, column=2)
Button(master,text="0",width=3,command=lambda:self.action(0)).grid(row=4, column=0)
Button(master,text=".",width=3,command=lambda:self.action('.')).grid(row=4, column=1)
Button(master,text="(",width=3,command=lambda:self.action('(')).grid(row=2, column=4)
Button(master,text=")",width=3,command=lambda:self.action(')')).grid(row=2, column=5)
Button(master,text="√",width=3,command=lambda:self.squareroot()).grid(row=3, column=4)
Button(master,text="x²",width=3,command=lambda:self.square()).grid(row=3, column=5)
#Main
root = Tk()
obj=calc(root) #object instantiated
root.mainloop()
|
[
"[email protected]"
] | |
484d13dc81d16a486d91bb29c9c89c4680416a38
|
f8933a29319d9062b3f0070d133b9e533efbbc50
|
/trilpy/ldpnr.py
|
75544f25ee6722c4f87c617177f690c39f7b0b18
|
[] |
no_license
|
zimeon/trilpy
|
13c999eb164c0a935abfdd4c94a7ab842c53cf67
|
825bd803ed5e5d7b6c906067a4c406a4db18c9c6
|
refs/heads/master
| 2021-01-16T23:24:39.374726 | 2018-07-24T03:28:09 | 2018-07-24T03:28:09 | 95,748,971 | 1 | 2 | null | 2018-02-07T20:39:00 | 2017-06-29T07:07:14 |
Python
|
UTF-8
|
Python
| false | false | 771 |
py
|
"""An LDPNR - LDP Non-RDF Source."""
from .ldpr import LDPR
from .namespace import LDP
class LDPNR(LDPR):
"""LDPNR - A binary object.
An LDPR whose state is not represented in RDF. For example,
these can be binary or text documents that do not have useful
RDF representations.
See <https://www.w3.org/TR/ldp/#ldpnr>.
"""
type_label = 'LDPNR'
def __init__(self, uri=None, content=None, content_type=None, describedby=None):
"""Initialize LDPNR."""
super(LDPNR, self).__init__(uri)
self.content = content
self.content_type = content_type
self.describedby = describedby
@property
def rdf_types(self):
"""List of RDF types for this resource."""
return([LDP.NonRDFSource])
|
[
"[email protected]"
] | |
b4c12bac0b2b9b434698891190b175383559a6af
|
7742d4ac18867efa9cb976cea831288f9b4887f9
|
/tensorflow/contrib/estimator/python/estimator/multi_head_test.py
|
16177aebd53cbff5c8fd727477ac5d18c9f8bce5
|
[
"Apache-2.0"
] |
permissive
|
liulimin90/tensorflow
|
cf83a69088329863e5da81a46f550578287ba8a5
|
daf17e8c041ad9441da57e15e791bfac7553d727
|
refs/heads/master
| 2021-08-07T21:18:04.576892 | 2017-11-08T22:19:12 | 2017-11-08T22:19:12 | 110,049,267 | 1 | 0 | null | 2017-11-09T01:05:43 | 2017-11-09T01:05:43 | null |
UTF-8
|
Python
| false | false | 16,240 |
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for head."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.contrib.estimator.python.estimator import head as head_lib
from tensorflow.contrib.estimator.python.estimator import multi_head as multi_head_lib
from tensorflow.core.framework import summary_pb2
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
def _initialize_variables(test_case, scaffold):
scaffold.finalize()
test_case.assertIsNone(scaffold.init_feed_dict)
test_case.assertIsNone(scaffold.init_fn)
scaffold.init_op.run()
scaffold.ready_for_local_init_op.eval()
scaffold.local_init_op.run()
scaffold.ready_op.eval()
test_case.assertIsNotNone(scaffold.saver)
def _assert_simple_summaries(test_case, expected_summaries, summary_str,
tol=1e-6):
"""Assert summary the specified simple values.
Args:
test_case: test case.
expected_summaries: Dict of expected tags and simple values.
summary_str: Serialized `summary_pb2.Summary`.
tol: Tolerance for relative and absolute.
"""
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
test_case.assertAllClose(expected_summaries, {
v.tag: v.simple_value for v in summary.value
}, rtol=tol, atol=tol)
def _assert_no_hooks(test_case, spec):
test_case.assertAllEqual([], spec.training_chief_hooks)
test_case.assertAllEqual([], spec.training_hooks)
def _sigmoid(logits):
return 1 / (1 + np.exp(-logits))
class MultiHeadTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def test_no_heads(self):
with self.assertRaisesRegexp(
ValueError, r'Must specify heads\. Given: \[\]'):
multi_head_lib.multi_head(heads=[])
def test_head_name_missing(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3)
with self.assertRaisesRegexp(
ValueError, r'All given heads must have name specified\.'):
multi_head_lib.multi_head([head1, head2])
def test_head_weights_wrong_size(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
with self.assertRaisesRegexp(
ValueError,
r'heads and head_weights must have the same size\. '
r'Given len\(heads\): 2. Given len\(head_weights\): 1\.'):
multi_head_lib.multi_head([head1, head2], head_weights=[1.])
def test_name(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head([head1, head2])
self.assertEqual('head1_head2', multi_head.name)
def test_predict_two_heads(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head([head1, head2])
logits = {
'head1': np.array([[-1., 1.], [-1.5, 1.]], dtype=np.float32),
'head2': np.array([[2., -2., 2.], [-3., 2., -2.]], dtype=np.float32)
}
expected_probabilities = {
'head1': _sigmoid(logits['head1']),
'head2': _sigmoid(logits['head2']),
}
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
self.assertItemsEqual(
(_DEFAULT_SERVING_KEY, 'head1', 'classification/head1', 'predict/head1',
'head2', 'classification/head2', 'predict/head2'),
spec.export_outputs.keys())
# Assert predictions and export_outputs.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(
logits['head1'],
predictions[('head1', prediction_keys.PredictionKeys.LOGITS)])
self.assertAllClose(
logits['head2'],
predictions[('head2', prediction_keys.PredictionKeys.LOGITS)])
self.assertAllClose(
expected_probabilities['head1'],
predictions[('head1', prediction_keys.PredictionKeys.PROBABILITIES)])
self.assertAllClose(
expected_probabilities['head2'],
predictions[('head2', prediction_keys.PredictionKeys.PROBABILITIES)])
self.assertAllClose(
expected_probabilities['head1'],
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].scores))
self.assertAllClose(
expected_probabilities['head1'],
sess.run(spec.export_outputs['head1'].scores))
self.assertAllClose(
expected_probabilities['head2'],
sess.run(spec.export_outputs['head2'].scores))
def test_eval_two_heads_with_weights(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head(
[head1, head2], head_weights=[1., 2.])
logits = {
'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
'head2': np.array([[20., -20., 20.], [-30., 20., -20.]],
dtype=np.float32),
}
labels = {
'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),
'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),
}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# head1: expected_unweighted_loss = [[10., 10.], [15., 0.]]
# head2: expected_unweighted_loss = [[20., 20., 20.], [30., 0., 0]]
# Average over classes, weighted sum over batch and heads.
expected_loss_head1 = 17.5
expected_loss_head2 = 30.0
expected_loss = 1. * expected_loss_head1 + 2. * expected_loss_head2
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over examples.
keys.LOSS_MEAN + '/head1': expected_loss_head1 / 2,
keys.LOSS_MEAN + '/head2': expected_loss_head2 / 2,
# auc and auc_pr cannot be reliably calculated for only 4-6 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC + '/head1': 0.1667,
keys.AUC + '/head2': 0.3333,
keys.AUC_PR + '/head1': 0.6667,
keys.AUC_PR + '/head2': 0.5000,
}
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
tol = 1e-3
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops},
rtol=tol,
atol=tol)
def test_train_create_loss_one_head(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
multi_head = multi_head_lib.multi_head([head1])
logits = {'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)}
labels = {'head1': np.array([[1, 0], [1, 1]], dtype=np.int64)}
loss = multi_head.create_loss(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)[0]
tol = 1e-3
with self.test_session():
# Unreduced loss of the head is [[(10 + 10) / 2], (15 + 0) / 2]
# (averaged over classes, sum-reduced over examples).
self.assertAllClose(17.5, loss.eval(), rtol=tol, atol=tol)
def test_train_create_loss_two_heads_with_weights(self):
# Use different example weighting for each head weighting.
weights1 = np.array([[1.], [2.]], dtype=np.float32)
weights2 = np.array([[2.], [3.]])
head1 = head_lib.multi_label_head(n_classes=2, name='head1',
weight_column='weights1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2',
weight_column='weights2')
multi_head = multi_head_lib.multi_head(
[head1, head2], head_weights=[1., 2.])
logits = {
'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
'head2': np.array([[20., -20., 20.], [-30., 20., -20.]],
dtype=np.float32),
}
labels = {
'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),
'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),
}
weighted_sum_loss, example_weight_sum, _ = multi_head.create_loss(
features={
'x': np.array(((42,),), dtype=np.int32),
'weights1': weights1,
'weights2': weights2
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
tol = 1e-3
with self.test_session():
# loss of the first head is [[(10 + 10) / 2], [(15 + 0) / 2]]
# = [10, 7.5]
# weighted_sum_loss = 1 * 10 + 2 * 7.5 = 25
# loss of the second head is [[(20 + 20 + 20) / 3], [(30 + 0 + 0) / 3]]
# = [20, 10]
# weighted_sum_loss = 2 * 20 + 3 * 10 = 70
# head-weighted merge = 1 * 25 + 2 * 70 = 165
self.assertAllClose(165, weighted_sum_loss.eval(), rtol=tol, atol=tol)
# example_weight_sum = 1 * (1 + 2) + 2 * (2 + 3) = 13
self.assertAllClose(13., example_weight_sum.eval(), rtol=tol, atol=tol)
def test_train_one_head(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
multi_head = multi_head_lib.multi_head([head1])
logits = {'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)}
labels = {'head1': np.array([[1, 0], [1, 1]], dtype=np.int64)}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# Average over classes, sum over weights.
expected_loss = 17.5
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-3
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS + '/head1': expected_loss,
# Average loss over examples.
metric_keys.MetricKeys.LOSS_MEAN + '/head1': expected_loss / 2,
}, summary_str, tol)
def test_train_two_heads_with_weights(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head(
[head1, head2], head_weights=[1., 2.])
logits = {
'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
'head2': np.array([[20., -20., 20.], [-30., 20., -20.]],
dtype=np.float32),
}
labels = {
'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),
'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),
}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# head1: expected_unweighted_loss = [[10., 10.], [15., 0.]]
# head2: expected_unweighted_loss = [[20., 20., 20.], [30., 0., 0]]
# Average over classes, weighted sum over batch and heads.
expected_loss_head1 = 17.5
expected_loss_head2 = 30.0
expected_loss = 1. * expected_loss_head1 + 2. * expected_loss_head2
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-3
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS + '/head1': expected_loss_head1,
metric_keys.MetricKeys.LOSS + '/head2': expected_loss_head2,
# Average loss over examples.
metric_keys.MetricKeys.LOSS_MEAN + '/head1': expected_loss_head1 / 2,
metric_keys.MetricKeys.LOSS_MEAN + '/head2': expected_loss_head2 / 2,
}, summary_str, tol)
if __name__ == '__main__':
test.main()
|
[
"[email protected]"
] | |
8968aa043a78587dbecbfc6589d1382d05f98626
|
814992618962991b1b6dd6f1cdf2853687cbfcd0
|
/quantarhei/qm/propagators/svpropagator.py
|
e235da8d523f349c30ad53ed9290882148e6a24d
|
[
"MIT"
] |
permissive
|
MichalPt/quantarhei
|
a5db7916405236dc78778e4ef378141a19a28ff2
|
536d4f39bb7f7d6893664520351d93eac2bc90f1
|
refs/heads/master
| 2022-12-15T09:36:53.108896 | 2022-07-28T09:44:12 | 2022-07-28T09:44:12 | 226,359,238 | 1 | 0 |
MIT
| 2019-12-06T15:37:24 | 2019-12-06T15:37:23 | null |
UTF-8
|
Python
| false | false | 2,624 |
py
|
# -*- coding: utf-8 -*-
"""
StateVector propagator
"""
import numpy
from .statevectorevolution import StateVectorEvolution
from ..hilbertspace.evolutionoperator import EvolutionOperator
from ... import REAL
class StateVectorPropagator:
def __init__(self, timeaxis, ham):
self.timeaxis = timeaxis
self.ham = ham
self.Odt = self.timeaxis.data[1]-self.timeaxis.data[0]
self.dt = self.Odt
self.Nref = 1
self.Nt = self.timeaxis.length
N = self.ham.data.shape[0]
self.N = N
self.data = numpy.zeros((self.Nt,N),dtype=numpy.complex64)
def setDtRefinement(self, Nref):
"""
The TimeAxis object specifies at what times the propagation
should be stored. We can tell the propagator to use finer
time step for the calculation by setting the refinement. The
refinement is an integer by which the TimeAxis time step should
be devided to get the finer time step. In the code below, we
have dt = 10 in the TimeAxis, but we want to calculate with
dt = 1
>>> HH = numpy.array([[0.0, 0.0],[0.0,1.0]])
>>> times = numpy.linspace(0,1000,10)
>>> pr = StateVectorPropagator(HH,times)
>>> pr.setDtRefinement(10)
"""
self.Nref = Nref
self.dt = self.Odt/self.Nref
def propagate(self, psii):
return self._propagate_short_exp(psii,L=4)
def get_evolution_operator(self):
eop = 0.0
return EvolutionOperator(self.timeaxis, data=eop)
def _propagate_short_exp(self, psii, L=4):
"""
Short exp integration
"""
pr = StateVectorEvolution(self.timeaxis, psii)
psi1 = psii.data
psi2 = psii.data
#
# RWA is applied here
#
if self.ham.has_rwa:
HH = self.ham.get_RWA_data()
else:
HH = self.ham.data
indx = 1
for ii in range(1,self.Nt):
for jj in range(0,self.Nref):
for ll in range(1,L+1):
pref = (self.dt/ll)
psi1 = -1j*pref*numpy.dot(HH,psi1)
psi2 = psi2 + psi1
psi1 = psi2
pr.data[indx,:] = psi2
indx += 1
if self.ham.has_rwa:
pr.is_in_rwa = True
return pr
|
[
"[email protected]"
] | |
f7c48c6551b9cc700832df0bd94985a171d283a6
|
8c643886e810f09a2c596066911300ceec64605b
|
/exercises/chapter03/data/simple2_solution.py
|
ad2ae388f43b368e5a606e792400b47e8732cef7
|
[
"MIT"
] |
permissive
|
matt-gardner/test-allennlp-course
|
fea62c18c983e5a76927a2e8b97ba30081f6838a
|
15b18144c30cfcdbe9acf5ad9bb30e24f6522d11
|
refs/heads/master
| 2023-01-28T05:10:03.787036 | 2019-06-13T20:16:12 | 2019-06-13T20:16:12 | 191,823,476 | 6 | 1 |
MIT
| 2023-01-11T20:28:39 | 2019-06-13T19:44:21 |
CSS
|
UTF-8
|
Python
| false | false | 1,214 |
py
|
from allennlp.data.fields import TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenCharactersIndexer
from allennlp.data.tokenizers import WordTokenizer, CharacterTokenizer
from allennlp.data import Vocabulary
# Splits text into characters (instead of words or wordpieces).
tokenizer = CharacterTokenizer()
# Represents each token (which is a _character_) with a single id from a vocabulary.
token_indexer = SingleIdTokenIndexer(namespace='character_vocab')
vocab = Vocabulary()
vocab.add_tokens_to_namespace(['T', 'h', 'i', 's', ' ', 'o', 'm', 'e', 't', 'x', '.'],
namespace='character_vocab')
text = "This is some text."
tokens = tokenizer.tokenize(text)
print(tokens)
text_field = TextField(tokens, {'token_characters': token_indexer})
# In order to convert the token strings into integer ids, we need to tell the
# TextField what Vocabulary to use.
text_field.index(vocab)
# We typically batch things together when making tensors, which requires some
# padding computation. Don't worry too much about the padding for now.
padding_lengths = text_field.get_padding_lengths()
tensor_dict = text_field.as_tensor(padding_lengths)
print(tensor_dict)
|
[
"[email protected]"
] | |
4e618625235505db5cacd9c840d94bebff946ac7
|
75d8667735782cd1d0eb4877e52c89da5cd92dde
|
/nova/scheduler/driver.py
|
da8568c970018cf13ea07a0bcacab608bf2c9ac6
|
[
"Apache-2.0"
] |
permissive
|
bopopescu/nova-token
|
ffecfd3ec561936b7d9d7e691bc57383cde05436
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
refs/heads/master
| 2022-11-22T09:53:31.073483 | 2016-05-14T02:47:01 | 2016-05-15T22:02:55 | 282,105,621 | 0 | 0 |
Apache-2.0
| 2020-07-24T02:42:19 | 2020-07-24T02:42:18 | null |
UTF-8
|
Python
| false | false | 4,264 |
py
|
begin_unit
comment|'# Copyright (c) 2010 OpenStack Foundation'
nl|'\n'
comment|'# Copyright 2010 United States Government as represented by the'
nl|'\n'
comment|'# Administrator of the National Aeronautics and Space Administration.'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
string|'"""\nScheduler base class that all Schedulers should inherit from\n"""'
newline|'\n'
nl|'\n'
name|'import'
name|'abc'
newline|'\n'
nl|'\n'
name|'from'
name|'oslo_log'
name|'import'
name|'log'
name|'as'
name|'logging'
newline|'\n'
name|'import'
name|'six'
newline|'\n'
name|'from'
name|'stevedore'
name|'import'
name|'driver'
newline|'\n'
nl|'\n'
name|'import'
name|'nova'
op|'.'
name|'conf'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'servicegroup'
newline|'\n'
nl|'\n'
DECL|variable|CONF
name|'CONF'
op|'='
name|'nova'
op|'.'
name|'conf'
op|'.'
name|'CONF'
newline|'\n'
nl|'\n'
DECL|variable|LOG
name|'LOG'
op|'='
name|'logging'
op|'.'
name|'getLogger'
op|'('
name|'__name__'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
op|'@'
name|'six'
op|'.'
name|'add_metaclass'
op|'('
name|'abc'
op|'.'
name|'ABCMeta'
op|')'
newline|'\n'
DECL|class|Scheduler
name|'class'
name|'Scheduler'
op|'('
name|'object'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""The base class that all Scheduler classes should inherit from."""'
newline|'\n'
nl|'\n'
DECL|member|__init__
name|'def'
name|'__init__'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'host_manager'
op|'='
name|'driver'
op|'.'
name|'DriverManager'
op|'('
nl|'\n'
string|'"nova.scheduler.host_manager"'
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'scheduler_host_manager'
op|','
nl|'\n'
name|'invoke_on_load'
op|'='
name|'True'
op|')'
op|'.'
name|'driver'
newline|'\n'
name|'self'
op|'.'
name|'servicegroup_api'
op|'='
name|'servicegroup'
op|'.'
name|'API'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|run_periodic_tasks
dedent|''
name|'def'
name|'run_periodic_tasks'
op|'('
name|'self'
op|','
name|'context'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Manager calls this so drivers can perform periodic tasks."""'
newline|'\n'
name|'pass'
newline|'\n'
nl|'\n'
DECL|member|hosts_up
dedent|''
name|'def'
name|'hosts_up'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'topic'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Return the list of hosts that have a running service for topic."""'
newline|'\n'
nl|'\n'
name|'services'
op|'='
name|'objects'
op|'.'
name|'ServiceList'
op|'.'
name|'get_by_topic'
op|'('
name|'context'
op|','
name|'topic'
op|')'
newline|'\n'
name|'return'
op|'['
name|'service'
op|'.'
name|'host'
nl|'\n'
name|'for'
name|'service'
name|'in'
name|'services'
nl|'\n'
name|'if'
name|'self'
op|'.'
name|'servicegroup_api'
op|'.'
name|'service_is_up'
op|'('
name|'service'
op|')'
op|']'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'abc'
op|'.'
name|'abstractmethod'
newline|'\n'
DECL|member|select_destinations
name|'def'
name|'select_destinations'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'spec_obj'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Must override select_destinations method.\n\n :return: A list of dicts with \'host\', \'nodename\' and \'limits\' as keys\n that satisfies the request_spec and filter_properties.\n """'
newline|'\n'
name|'return'
op|'['
op|']'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
|
[
"[email protected]"
] | |
3746bcad6abb98b4692490cc69798029096ca777
|
2ff7e53d5e512cd762217ca54317982e07a2bb0c
|
/eve/client/script/ui/shared/neocom/attributes.py
|
8ba6cb12ed61f8ac0ac1c5e816617ba2ad9a4bc9
|
[] |
no_license
|
nanxijw/Clara-Pretty-One-Dick
|
66d3d69426642b79e8fd4cc8e0bec23adeeca6d6
|
50de3488a2140343c364efc2615cf6e67f152be0
|
refs/heads/master
| 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 22,035 |
py
|
#Embedded file name: eve/client/script/ui/shared/neocom\attributes.py
"""
This file contains the UI needed to view and respec your attributes
independently from the character sheet
"""
import blue
import uiprimitives
import uicontrols
import util
import form
from carbonui.primitives.container import Container
from carbonui.primitives.layoutGrid import LayoutGrid
from carbonui.primitives.line import Line
from eve.client.script.ui.control.buttons import Button
from eve.client.script.ui.control.eveLabel import EveLabelMedium
import uthread
import uicls
import carbonui.const as uiconst
import localization
class AttributeRespecWindow(uicontrols.Window):
__guid__ = 'form.attributeRespecWindow'
__notifyevents__ = ['OnSessionChanged']
default_windowID = 'attributerespecification'
default_iconNum = 'res:/ui/Texture/WindowIcons/attributes.png'
def ApplyAttributes(self, attributes):
uicontrols.Window.ApplyAttributes(self, attributes)
self.readOnly = attributes.readOnly
self.MakeUnResizeable()
self.SetCaption(localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/NavScroll/Attributes'))
self.SetWndIcon(self.iconNum)
self.godma = sm.StartService('godma')
self.skillHandler = self.godma.GetSkillHandler()
uicontrols.WndCaptionLabel(text=localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/NeuralRemapping'), subcaption=localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/NeuralRemappingTagline'), parent=self.sr.topParent, align=uiconst.RELATIVE)
self.attributes = [const.attributePerception,
const.attributeMemory,
const.attributeWillpower,
const.attributeIntelligence,
const.attributeCharisma]
self.implantTypes = [19540,
19551,
19553,
19554,
19555]
self.attributeIcons = ['ui_22_32_5',
'ui_22_32_4',
'ui_22_32_2',
'ui_22_32_3',
'ui_22_32_1']
self.attributeLabels = [localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/AttributePerception'),
localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/AttributeMemory'),
localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/AttributeWillpower'),
localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/AttributeIntelligence'),
localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/AttributeCharisma')]
self.currentAttributes = {}
self.implantModifier = {}
self.unspentPts = 0
self.ConstructLayout()
self.Load()
def OnSessionChanged(self, isRemote, session, change):
self.skillHandler = self.godma.GetSkillHandler()
def Load(self, *args):
"""
Performs the initialization of the window's data by loading
the current session's character's base attributes, clamping them to
conform to basic business rules (for the player's convenience),
and setting up the UI's initial display data.
"""
if not eve.session.charid or self.destroyed:
return
dogmaLM = self.godma.GetDogmaLM()
attrDict = dogmaLM.GetCharacterBaseAttributes()
unspentPts = const.respecTotalRespecPoints
for x in xrange(0, 5):
attr = self.attributes[x]
if attr in attrDict:
attrValue = attrDict[attr]
if attrValue > const.respecMaximumAttributeValue:
attrValue = const.respecMaximumAttributeValue
if attrValue < const.respecMinimumAttributeValue:
attrValue = const.respecMinimumAttributeValue
self.currentAttributes[attr] = attrValue
self.respecBar[x].SetValue(attrValue - const.respecMinimumAttributeValue)
unspentPts -= attrValue
modifiers = self.skillHandler.GetCharacterAttributeModifiers(attr)
implantBonus = 0
for itemID, typeID, operation, value in modifiers:
categoryID = cfg.invtypes.Get(typeID).categoryID
if categoryID == const.categoryImplant:
implantBonus += value
totalAttributesText = localization.formatters.FormatNumeric(int(self.currentAttributes[attr]) + implantBonus, decimalPlaces=0)
self.totalLabels[x].text = totalAttributesText
self.implantModifier[x] = implantBonus
label, icon = self.implantLabels[x]
if implantBonus == 0:
icon.SetAlpha(0.5)
label.text = localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/ImplantBonusZero')
label.SetAlpha(0.5)
else:
label.text = localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/ImplantBonus', implantBonus=int(implantBonus))
if not self.readOnly:
self.unspentPts = unspentPts
self.sr.unassignedBar.SetValue(unspentPts)
unspentPtsText = localization.formatters.FormatNumeric(self.unspentPts, decimalPlaces=0)
self.availableLabel.text = unspentPtsText
if self.unspentPts <= 0:
self.sr.saveWarningText.state = uiconst.UI_HIDDEN
else:
self.sr.saveWarningText.state = uiconst.UI_DISABLED
def ConstructLayout(self):
"""
This method lays out the elements that comprise the respecification window,
assigning them initial display values that make sense in the case that
Load() later fails.
Performs no data initialization.
"""
self.implantLabels = []
self.respecBar = []
self.totalLabels = []
iconsize = 32
buttonSize = 24
boxWidth = 6
boxHeight = 12
boxMargin = 1
boxSpacing = 1
numBoxes = const.respecMaximumAttributeValue - const.respecMinimumAttributeValue
barWidth = numBoxes * boxSpacing + 2 * boxMargin + numBoxes * boxWidth - 1
barHeight = boxHeight + 2 * boxMargin
backgroundColor = (0.0, 0.0, 0.0, 0.0)
colorDict = {uicls.ClickableBoxBar.COLOR_UNSELECTED: (0.2, 0.2, 0.2, 1.0),
uicls.ClickableBoxBar.COLOR_SELECTED: (0.2, 0.8, 0.2, 1.0)}
headerText = EveLabelMedium(parent=self.sr.main, text=localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/CharacterRespecMessage'), state=uiconst.UI_NORMAL, align=uiconst.TOTOP, padding=8)
self.headerText = headerText
if self.readOnly:
columns = 7
else:
columns = 9
mainGrid = LayoutGrid(parent=self.sr.main, columns=columns, cellPadding=4, left=6, top=6, OnGridSizeChanged=self.OnMainGridSizeChanged)
self.mainGrid = mainGrid
for labelPath, colSpan in (('UI/CharacterSheet/CharacterSheetWindow/NavScroll/Attributes', 2),
('UI/CharacterSheet/CharacterSheetWindow/Attributes/BaseStatPoints', 1),
('UI/CharacterSheet/CharacterSheetWindow/Attributes/CharacterImplants', 2),
('UI/CharacterSheet/CharacterSheetWindow/Attributes/RemappableStat', 1 if self.readOnly else 3),
('UI/CharacterSheet/CharacterSheetWindow/Attributes/StatTotal', 1)):
label = EveLabelMedium(text=localization.GetByLabel(labelPath), align=uiconst.CENTER)
mainGrid.AddCell(cellObject=label, colSpan=colSpan, cellPadding=(10, 2, 10, 2))
line = Line(align=uiconst.TOTOP)
mainGrid.AddCell(cellObject=line, colSpan=mainGrid.columns)
for x in xrange(5):
uicontrols.Icon(parent=mainGrid, width=iconsize, height=iconsize, size=iconsize, icon=self.attributeIcons[x], align=uiconst.TOPLEFT)
EveLabelMedium(text=self.attributeLabels[x], parent=mainGrid, state=uiconst.UI_DISABLED, align=uiconst.CENTERLEFT)
minText = localization.formatters.FormatNumeric(const.respecMinimumAttributeValue, decimalPlaces=0)
EveLabelMedium(text=minText, parent=mainGrid, state=uiconst.UI_DISABLED, align=uiconst.CENTER, bold=True)
icon = uicontrols.Icon(parent=mainGrid, width=32, height=32, size=32, icon=util.IconFile(cfg.invtypes.Get(self.implantTypes[x]).iconID), align=uiconst.TOPLEFT, ignoreSize=True)
implantLabel = EveLabelMedium(text='0', parent=mainGrid, align=uiconst.CENTERLEFT)
self.implantLabels.append((implantLabel, icon))
if not self.readOnly:
minusText = localization.GetByLabel('UI/Common/Buttons/Minus')
Button(parent=mainGrid, label=minusText, fixedwidth=buttonSize, func=self.DecreaseAttribute, args=(x,), align=uiconst.CENTERRIGHT)
bar = Container(parent=mainGrid, align=uiconst.CENTER, width=barWidth, height=barHeight, state=uiconst.UI_PICKCHILDREN)
bar = uicls.ClickableBoxBar(parent=bar, numBoxes=numBoxes, boxWidth=boxWidth, boxHeight=boxHeight, boxMargin=boxMargin, boxSpacing=boxSpacing, backgroundColor=backgroundColor, colorDict=colorDict)
bar.OnValueChanged = self.OnMemberBoxClick
bar.OnAttemptBoxClicked = self.ValidateBoxClick
self.respecBar.append(bar)
if not self.readOnly:
plusText = localization.GetByLabel('UI/Common/Buttons/Plus')
Button(parent=mainGrid, label=plusText, fixedwidth=buttonSize, func=self.IncreaseAttribute, args=(x,), align=uiconst.CENTERLEFT)
totalLabel = EveLabelMedium(text='0', parent=mainGrid, left=8, align=uiconst.CENTERRIGHT, bold=True)
self.totalLabels.append(totalLabel)
if not self.readOnly:
line = Line(align=uiconst.TOTOP)
mainGrid.AddCell(cellObject=line, colSpan=mainGrid.columns)
textObj = EveLabelMedium(text=localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/UnassignedAttributePoints'))
mainGrid.AddCell(cellObject=textObj, colSpan=6)
numBoxes = const.respecTotalRespecPoints - const.respecMinimumAttributeValue * 5
barWidth = numBoxes * boxSpacing + 2 * boxMargin + numBoxes * boxWidth - 1
unassignedBarParent = Container(align=uiconst.TOPLEFT, width=barWidth, height=barHeight, state=uiconst.UI_PICKCHILDREN)
mainGrid.AddCell(cellObject=unassignedBarParent, colSpan=2)
self.sr.unassignedBar = uicls.ClickableBoxBar(parent=unassignedBarParent, numBoxes=numBoxes, boxWidth=boxWidth, boxHeight=boxHeight, boxMargin=boxMargin, boxSpacing=boxSpacing, backgroundColor=backgroundColor, colorDict=colorDict, readonly=True, hintFormat='UI/CharacterSheet/CharacterSheetWindow/Attributes/UnassignedPointsHint')
self.availableLabel = EveLabelMedium(parent=mainGrid, align=uiconst.CENTERRIGHT, left=8)
mainGrid.FillRow()
self.sr.saveWarningText = EveLabelMedium(text=localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/CannotSaveUnassignedPoints'), color=(1.0, 0.0, 0.0, 0.9))
mainGrid.AddCell(cellObject=self.sr.saveWarningText, colSpan=mainGrid.columns)
if not self.readOnly:
uicontrols.ButtonGroup(btns=[[localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/SaveStatChanges'),
self.SaveChanges,
(),
None], [localization.GetByLabel('UI/Common/Buttons/Cancel'),
self.CloseByUser,
(),
None]], parent=self.sr.main, idx=0)
def OnMainGridSizeChanged(self, width, height, *args, **kwds):
self.mainGrid.top = self.headerText.height + self.headerText.padTop + self.headerText.padBottom
self.SetMinSize([width + 12, self.mainGrid.top + height + 110], refresh=1)
def SaveChanges(self, *args):
"""
This method is called when the user clicks the "Save Changes" button in the UI.
It performs basic sanity checks (primarily on the ranges of the user's desired
attribute values) and then forwards the request to the server.
It also forces the UI to no-op if the user didn't change anything, so the user
doesn't waste a respec by doing nothing.
"""
totalAttrs = 0
newAttributes = {}
for x in xrange(0, 5):
newAttributes[self.attributes[x]] = const.respecMinimumAttributeValue + self.respecBar[x].GetValue()
for attrValue in newAttributes.itervalues():
if attrValue < const.respecMinimumAttributeValue:
raise UserError('RespecAttributesTooLow')
elif attrValue > const.respecMaximumAttributeValue:
raise UserError('RespecAttributesTooHigh')
totalAttrs += attrValue
if totalAttrs != const.respecTotalRespecPoints or self.sr.unassignedBar.GetValue() > 0:
self.sr.saveWarningText.state = uiconst.UI_DISABLED
raise UserError('RespecAttributesMisallocated')
allSame = True
for attr in self.attributes:
if int(self.currentAttributes[attr]) != int(newAttributes[attr]):
allSame = False
break
if not allSame:
respecInfo = sm.GetService('skills').GetRespecInfo()
freeRespecs = respecInfo['freeRespecs']
if respecInfo['nextTimedRespec'] is None or respecInfo['nextTimedRespec'] <= blue.os.GetWallclockTime():
if eve.Message('ConfirmRespec2', {'months': int(const.respecTimeInterval / const.MONTH30)}, uiconst.YESNO) != uiconst.ID_YES:
return
elif freeRespecs > 0:
if eve.Message('ConfirmRespecFree', {'freerespecs': int(respecInfo['freeRespecs']) - 1}, uiconst.YESNO) != uiconst.ID_YES:
return
else:
raise UserError('RespecTooSoon', {'nextTime': respecInfo['nextTimedRespec']})
self.skillHandler.RespecCharacter(newAttributes[const.attributeCharisma], newAttributes[const.attributeIntelligence], newAttributes[const.attributeMemory], newAttributes[const.attributePerception], newAttributes[const.attributeWillpower])
self.CloseByUser()
def IncreaseAttribute(self, attribute, *args):
"""
This method is called when the user clicks on one of the "+" buttons to increase
an attribute. It ensures that the user has a point to spend and won't exceed the
maximum permissible value, then modifies the remaining points and updates the UI.
The ID of the attribute to increase is passed in as an argument.
"""
if self.respecBar[attribute].GetValue() >= const.respecMaximumAttributeValue - const.respecMinimumAttributeValue:
return
if self.unspentPts <= 0:
raise UserError('RespecCannotIncrementNotEnoughPoints')
if not self.respecBar[attribute].Increment():
raise UserError('RespecAttributesTooHigh')
def DecreaseAttribute(self, attribute, *args):
"""
This method is called when the user clicks on one of the "-" buttons to decrease
an attribute. It ensures that the user has a point to spend and won't fall below the
minimum permissible value, then modifies the remaining points and updates the UI.
The ID of the attribute to decrease is passed in as an argument.
"""
if self.respecBar[attribute].GetValue() <= 0:
return
if not self.respecBar[attribute].Decrement():
raise UserError('RespecAttributesTooLow')
def ValidateBoxClick(self, oldValue, newValue):
"""
This is an override of a method used in the clickableboxbar.
A CBB calls this method before attempting to change a value,
allowing us to perform business logic related to changing
the value of the bar.
Here, we use it for validation.
"""
if self.readOnly:
return False
if oldValue >= newValue:
return True
if self.unspentPts < newValue - oldValue:
return False
return True
def OnMemberBoxClick(self, oldValue, newValue):
"""
This is an override of a method on a clickableboxbar.
It is called whenever the value of a clickableboxbar changes.
"""
if oldValue is None or oldValue == newValue:
return
if self.readOnly:
return
self.unspentPts -= newValue - oldValue
self.sr.unassignedBar.SetValue(self.unspentPts)
unspentPtsText = localization.formatters.FormatNumeric(self.unspentPts, decimalPlaces=0)
self.availableLabel.text = unspentPtsText
for x in xrange(0, 5):
totalPts = const.respecMinimumAttributeValue + self.respecBar[x].GetValue() + self.implantModifier[x]
totalPtsText = localization.formatters.FormatNumeric(int(totalPts), decimalPlaces=0)
self.totalLabels[x].text = totalPtsText
if self.unspentPts <= 0:
self.sr.saveWarningText.state = uiconst.UI_HIDDEN
class AttributeRespecEntry(uicontrols.SE_BaseClassCore):
__guid__ = 'listentry.AttributeRespec'
default_showHilite = False
ENTRYHEIGHT = 44
def Startup(self, *args):
"""
This method lays out the elements used to display a character's next respec time.
"""
self.OnSelectCallback = None
self.sr.label = uicontrols.EveLabelSmall(text=localization.GetByLabel('UI/Neocom/NextDNAModification'), parent=self, left=8, top=4, maxLines=1)
self.sr.respecTime = uicontrols.EveLabelMedium(text='', parent=self, left=8, top=18, maxLines=1)
self.sr.numberOfRemaps = uicontrols.EveLabelMedium(text='', parent=self, left=8, top=38, maxLines=1, state=uiconst.UI_HIDDEN)
self.sr.respecButton = uicontrols.Button(parent=self, label=localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/RemapStatsNow'), align=uiconst.TOPRIGHT, pos=(2, 16, 0, 0), func=self.OpenRespecWindow, args=(False,))
self.hint = localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/CharacterSheetHint')
def Load(self, node):
"""
This method loads in the data needed to display a character's next respec time
and sets the UI's display data as a result of checking the loaded data.
It also launches the UI refresh thread in the case that the character cannot
respec yet.
Node must contain:
nextTimedRespec - A bluetime indicating when the next respec is available.
freeRespecs - An integer indicating how many free/bonus respecs the character has.
"""
self.sr.node = node
freeRespecs = node.Get('freeRespecs', 0)
nextRespecTime = node.Get('nextTimedRespec', None)
canRemap = False
if nextRespecTime is None or nextRespecTime <= blue.os.GetWallclockTime():
self.sr.respecTime.text = localization.GetByLabel('UI/Generic/Now')
canRemap = True
else:
self.sr.respecTime.text = util.FmtDate(node.nextTimedRespec)
self.refreshThread = uthread.new(self.RefreshThread)
if freeRespecs > 0:
canRemap = True
lbl = localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/BonusRemapsAvailable', remapsAvailable=freeRespecs)
self.sr.numberOfRemaps.text = lbl
if nextRespecTime is not None and nextRespecTime > blue.os.GetWallclockTime():
self.hint = localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/CharacterSheetHintFree')
else:
self.hint = localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/CharacterSheetHintFreeTimed')
self.sr.numberOfRemaps.state = uiconst.UI_DISABLED
if not canRemap:
self.sr.respecButton.SetLabel(localization.GetByLabel('UI/CharacterSheet/CharacterSheetWindow/Attributes/AttributesOverview'))
self.sr.respecButton.args = (True,)
def OpenRespecWindow(self, readOnly, *args):
"""
This method is called when a user clicks the "Respec Now" (currently "Modify DNA") button.
It maximizes/foregrounds the attribute respec window if it's already open; otherwise,
it creates and displays the window.
"""
wnd = form.attributeRespecWindow.GetIfOpen()
if wnd is not None and not wnd.destroyed:
wnd.Maximize()
else:
form.attributeRespecWindow.Open(readOnly=readOnly)
def RefreshThread(self):
"""
This thread is launched when the UI opens and the character cannot yet respec.
It loops, sleeping for a second before checking if the respec time has passed.
If it has passed, it displays the Respec button, updates the UI text to "now"
and halts.
If the respec time is still in the future, it continues sleeping.
"""
if not self or self.destroyed:
return
sleepMsec = max(self.sr.node.nextTimedRespec - blue.os.GetWallclockTime(), 0) / 10000L
sleepMsec = min(sleepMsec, 60000)
while sleepMsec > 0:
blue.pyos.synchro.SleepWallclock(sleepMsec)
if not self or self.destroyed:
return
sleepMsec = max(self.sr.node.nextTimedRespec - blue.os.GetWallclockTime(), 0) / 10000L
sleepMsec = min(sleepMsec, 60000)
if not self or self.destroyed:
return
self.sr.respecButton.state = uiconst.UI_NORMAL
self.sr.respecTime.text = localization.GetByLabel('UI/Generic/Now')
|
[
"[email protected]"
] | |
3aa77a9c0f183d217f06a630f4191d7fb841a73d
|
ca3ff0bc4f7e9e8fcf677afa3a1a18dd2129a5d4
|
/daily_problems/problem_0_to_100/problem_86.py
|
42defc172f8b8a0ea3af081435c7fda161acd383
|
[
"MIT"
] |
permissive
|
rrwt/daily-coding-challenge
|
d9b23a82a1a3c4824b8f1aeacf6584afc5189ce7
|
4dcd59eaff021be0b9b1aba1dda73248c81454b7
|
refs/heads/master
| 2022-05-29T04:32:44.406196 | 2022-05-25T01:12:01 | 2022-05-25T01:12:01 | 181,972,357 | 1 | 0 |
MIT
| 2021-04-20T19:58:43 | 2019-04-17T21:41:25 |
Python
|
UTF-8
|
Python
| false | false | 1,125 |
py
|
"""
Given a string of parentheses, write a function to compute the minimum number of parentheses
to be removed to make the string valid (i.e. each open parenthesis is eventually closed).
For example,
given the string "()())()", you should return 1.
Given the string ")(", you should return 2, since we must remove all of them.
"""
def count_remove_parenthesis(text: str) -> int:
count_removal = 0
stack = []
for char in text:
if char == "(":
stack.append(char)
elif char == ")":
if not stack or stack[-1] == ")":
count_removal += 1
else:
stack.pop()
else:
raise AssertionError(f"{char} is unacceptable as a parenthesis")
return count_removal + len(stack)
if __name__ == "__main__":
assert count_remove_parenthesis("()())()") == 1
assert count_remove_parenthesis(")(") == 2
assert count_remove_parenthesis("") == 0
assert count_remove_parenthesis("((()))") == 0
assert count_remove_parenthesis("()(") == 1
assert count_remove_parenthesis("((()())())()()()(())") == 0
|
[
"[email protected]"
] | |
42ea4f4a66ce8ed2ede113d9666e461fe30a5244
|
af43615e07f2bfaa908d6d96b4c90f98ce3ad47b
|
/rdr_service/lib_fhir/fhirclient_1_0_6/models/deviceusestatement.py
|
642e4f165a8b132be219bba1db9b4de5dc7ab412
|
[
"BSD-3-Clause"
] |
permissive
|
all-of-us/raw-data-repository
|
11aa25385543f5f8ef706663b79ce181775c1c9a
|
461ae46aeda21d54de8a91aa5ef677676d5db541
|
refs/heads/devel
| 2023-09-01T06:47:25.710651 | 2023-09-01T01:18:56 | 2023-09-01T01:18:56 | 66,000,771 | 46 | 22 |
BSD-3-Clause
| 2023-09-14T21:06:38 | 2016-08-18T13:47:08 |
Python
|
UTF-8
|
Python
| false | false | 3,846 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/DeviceUseStatement) on 2016-06-23.
# 2016, SMART Health IT.
from . import domainresource
class DeviceUseStatement(domainresource.DomainResource):
""" None.
A record of a device being used by a patient where the record is the result
of a report from the patient or another clinician.
"""
resource_name = "DeviceUseStatement"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.bodySiteCodeableConcept = None
""" Target body site.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.bodySiteReference = None
""" Target body site.
Type `FHIRReference` referencing `BodySite` (represented as `dict` in JSON). """
self.device = None
""" None.
Type `FHIRReference` referencing `Device` (represented as `dict` in JSON). """
self.identifier = None
""" None.
List of `Identifier` items (represented as `dict` in JSON). """
self.indication = None
""" None.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.notes = None
""" None.
List of `str` items. """
self.recordedOn = None
""" None.
Type `FHIRDate` (represented as `str` in JSON). """
self.subject = None
""" None.
Type `FHIRReference` referencing `Patient` (represented as `dict` in JSON). """
self.timingDateTime = None
""" None.
Type `FHIRDate` (represented as `str` in JSON). """
self.timingPeriod = None
""" None.
Type `Period` (represented as `dict` in JSON). """
self.timingTiming = None
""" None.
Type `Timing` (represented as `dict` in JSON). """
self.whenUsed = None
""" None.
Type `Period` (represented as `dict` in JSON). """
super(DeviceUseStatement, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DeviceUseStatement, self).elementProperties()
js.extend([
("bodySiteCodeableConcept", "bodySiteCodeableConcept", codeableconcept.CodeableConcept, False, "bodySite", False),
("bodySiteReference", "bodySiteReference", fhirreference.FHIRReference, False, "bodySite", False),
("device", "device", fhirreference.FHIRReference, False, None, True),
("identifier", "identifier", identifier.Identifier, True, None, False),
("indication", "indication", codeableconcept.CodeableConcept, True, None, False),
("notes", "notes", str, True, None, False),
("recordedOn", "recordedOn", fhirdate.FHIRDate, False, None, False),
("subject", "subject", fhirreference.FHIRReference, False, None, True),
("timingDateTime", "timingDateTime", fhirdate.FHIRDate, False, "timing", False),
("timingPeriod", "timingPeriod", period.Period, False, "timing", False),
("timingTiming", "timingTiming", timing.Timing, False, "timing", False),
("whenUsed", "whenUsed", period.Period, False, None, False),
])
return js
from . import codeableconcept
from . import fhirdate
from . import fhirreference
from . import identifier
from . import period
from . import timing
|
[
"[email protected]"
] | |
316f82491c8f735a152f72a405f04e15d34d706e
|
0cf054b1740339b22d3211695e44e11e68c81328
|
/suggestion/analysis_util.py
|
d03d3e3988a20faeabe5cc43261d257a8d83659e
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
kcarnold/sentiment-slant-gi18
|
076aaf557c8e33c84349f78a883c0fa3210e9ada
|
6028b42627e3eec14a1f27986f8925d8b1e6ad9c
|
refs/heads/master
| 2022-07-01T10:20:50.314847 | 2017-09-13T23:03:14 | 2017-09-13T23:03:14 | 125,105,730 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,623 |
py
|
import os
try:
import ujson as json
except ImportError:
import json
import re
import numpy as np
from suggestion.util import mem
from suggestion.paths import paths
import subprocess
#
# Data for decoding surveys.
#
skip_col_re = re.compile(
r'Great.job|Q_\w+|nextURL|clientId|Timing.*|Browser.*|Location.*|Recipient.*|Response.+|ExternalDataReference|Finished|Status|IPAddress|StartDate|EndDate|Welcome.+|Display Order|Demographic Questions|Closing Survey.+|revisionDesc|prewrite')
prefix_subs = {
"How much do you agree with the following statements about the suggestions that the system gave?-They ": "suggs-",
"How much do you agree with the following statements?-The suggestions ": "suggs-",
"How much do you agree with the following statements about the words or phrases that the keyboard...-They ": "suggs-",
"Now think about the brainstorming you did before the final writing. How much do you agree with th...-": "brainstorm-",
"Think about when you were typing out your ${e://Field/revisionDesc}. How much do you agree with t...-": "final-",
"How Accurately Can You Describe Yourself? Describe yourself as you generally are now, not as you...-": "pers-",
"Describe yourself as you generally are now, not as you wish to be in the future. Describe yoursel...-": "pers-",
}
decode_scales = {
"Strongly disagree": 1,
"Disagree": 2,
"Somewhat disagree": 3,
"Neither agree nor disagree": 4,
"Somewhat agree": 5,
"Agree": 6,
"Strongly agree": 7,
"Very Inaccurate": 1,
"Moderately Inaccurate": 2,
"Neither Accurate Nor Inaccurate": 3,
"Moderately Accurate": 4,
"Very Accurate": 5}
def get_rev(logpath):
with open(logpath) as logfile:
for line in logfile:
line = json.loads(line)
if 'rev' in line:
return line['rev']
def checkout_old_code(git_rev):
import shutil
by_rev = paths.parent / 'old-code'
rev_root = by_rev / git_rev
if not os.path.isdir(rev_root):
print("Checking out repository at", git_rev)
subprocess.check_call(['git', 'clone', '..', git_rev], cwd=by_rev)
subprocess.check_call(['git', 'checkout', git_rev], cwd=rev_root)
print("Installing npm packages")
subprocess.check_call(['yarn'], cwd=os.path.join(rev_root, 'frontend'))
@mem.cache
def get_log_analysis_raw(logpath, logfile_size, git_rev=None, analysis_files=None):
# Ignore analysis_files; just use them to know when to invalidate the cache.
checkout_old_code(git_rev)
analyzer_path = os.path.join(paths.parent, 'frontend', 'analysis')
with open(logpath) as logfile:
result = subprocess.check_output([analyzer_path], stdin=logfile)
assert len(result) > 0
return result
def get_log_analysis(participant, git_rev=None):
analysis_files = {
name: open(paths.parent / 'frontend' / name).read()
for name in ['analyze.js', 'analysis', 'src/Analyzer.js']
}
logpath = paths.parent / 'logs' / (participant+'.jsonl')
if git_rev is None:
git_rev = get_rev(logpath)
logfile_size = os.path.getsize(logpath)
result = get_log_analysis_raw(logpath, logfile_size, git_rev=git_rev, analysis_files=analysis_files)
analyzed = json.loads(result)
analyzed['git_rev'] = git_rev
return analyzed
def classify_annotated_event(evt):
typ = evt['type']
if typ in {'externalAction', 'next', 'resized', 'tapText'}:
return None
text = evt['curText']
null_word = len(text) == 0 or text[-1] == ' '
text = text.strip()
bos = len(text) == 0 or text[-1] in '.?!'
if typ == 'tapKey':
return 'tapKey'
if typ == 'tapBackspace':
return 'tapBackspace'
if typ == 'tapSuggestion':
if bos:
sugg_mode = 'bos'
elif null_word:
sugg_mode = 'full'
else:
sugg_mode = 'part'
return 'tapSugg_' + sugg_mode
assert False, typ
def get_content_stats_single_suggestion(sugg, word_freq_analyzer):
from suggestion import suggestion_generator
sugg = sugg.copy()
meta = sugg.pop('flags')
if not meta['domain'].startswith('yelp'):
return
if sugg['cur_word']:
# Skip partial words.
return
model = suggestion_generator.Model.get_or_load_model(meta['domain'])
try:
toks = suggestion_generator.tokenize_sofar(sugg['sofar'])
except:
# Tokenization failed.
return
# Optimization: trim context to the n-gram level, plus some padding.
toks = toks[-10:]
state = model.get_state(toks)[0]
clf_startstate = suggestion_generator.sentiment_classifier.get_state(toks)
res = []
for sugg_slot, rec in enumerate(sugg['recs']['predictions']):
phrase = rec['words']
if phrase:
sentiment_posteriors = suggestion_generator.sentiment_classifier.classify_seq_by_tok(clf_startstate, phrase)
sentiment = np.mean(sentiment_posteriors, axis=0) @ suggestion_generator.sentiment_classifier.sentiment_weights
else:
sentiment = None
analyzer_indices = [word_freq_analyzer.word2idx.get(tok) for tok in phrase]
res.append(dict(
request_id=sugg['request_id'],
sugg_slot=sugg_slot,
sugg_contextual_llk=model.score_seq(state, phrase)[0],
sugg_unigram_llk=np.nanmean(np.array([word_freq_analyzer.log_freqs[idx] if idx is not None else np.nan for idx in analyzer_indices])),
sugg_sentiment=sentiment))
return res
|
[
"[email protected]"
] | |
bd3081c830696819735002e529dbbb9483b40d02
|
7d8cafdc9e2a00979381f93149ba247b400badf9
|
/RequestDemo/script/add_house_develop.py
|
bb2ea12dfe5ca4bfef7b578ecc96101b3055b62a
|
[] |
no_license
|
zhonglinglong/isz_request
|
1987d0897aac390c77a018a08bd0a65647eaf4c4
|
8eccabfd8bcefdb58f79db0cd9cb5f3f9d227b3a
|
refs/heads/master
| 2021-05-12T06:20:22.312090 | 2018-01-18T11:59:28 | 2018-01-18T11:59:28 | 117,217,090 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,002 |
py
|
# -*- coding:utf8 -*-
from time import sleep
import pymysql
import json
from requests import Session
import sys
reload(sys)
sys.setdefaultencoding('utf8')
sqlConn = pymysql.connect(host='192.168.0.208', user='zhonglinglong', password='zll.123',db='isz_erp_npd',charset='utf8',port=3306)
sqlCursor = sqlConn.cursor()
def searchSQL(sql,type='tuple'):
sqlCursor.execute(sql)
if type == 'list':
value = map(lambda x:x[0].encode('utf-8'),sqlCursor.fetchall())
return value
if type == 'dict':
value = {}
data = sqlCursor.fetchall()
for i in data:
try:
value[i[0].encode('utf-8')] = i[1].encode('utf-8')
except:
value[i[0].encode('utf-8')] = str(i[1])
return value
if type == 'tuple':
value = sqlCursor.fetchall()
return value
class RequestsApi:
def __init__(self):
self.S = Session()
def SendPost(self, url, header,datas):
data = {
"authTag": "4028803E60E8CE080160F3A2C1ED0058",
"auth_code": "EA4A263F29FF586C9FC6D16A20E8C86ABD708EEE6CFAE74074727F766533FA33F12EFB25CF3B273F912CB1A096C9B9A7DBF53B27F7A7302F22E6CBBCE51E4311A8B6A1CBC7BCC2F28C611200F7166DC8EDB90287CF05CF045DCC9BE29AA5EE74F77544AB717D8433F12C269751243CE0E8F78955AB8BC8028490D40323F337D0929C8150AB3511DB1F34234446D3D8BAC71000B3BB723D189F74424EDF0E0A773D922AE7991CE199D672EB0DE067940BD69450A2D5CE334F1C234B9F868429E26F056702028732D5DD89063E463D17B0ABAD033DCF3599F1A4C370C8D8D3ECEAFB2C71054650AB26ECBE35DD2E15A30D5A78E8DC8E84B8AF5BF2B63A5A8FDCD8F4B3774B036758BB8A0DE2303BEBCA5876D1373405DA084E9A2997D113F80D2974D0567F512732FD11011F163553A206E2C6E08C9DE2CD86D80E7B2ACD3C76A7",
"user_phone": "18279881085",
"user_pwd": "isz123456",
"verificationCode": "0451"
}
self.r = self.S.post(url='http://isz.ishangzu.com/isz_base/LoginController/login.action', data=json.dumps(data), headers={"Content-Type": "application/json"})
self.r=self.S.post(url=url,headers=eval(header),data=json.dumps(eval(datas)))
return self.r.json()
def add_develop_house(ApartmentData,RoomNumberData):
"新增楼盘"
try:
sql = "SELECT sd.parent_id from sys_department sd INNER JOIN sys_user sur on sur.dep_id = sd.dep_id INNER JOIN sys_position spt on spt.position_id = sur.position_id " \
"where sd.dep_district = '330100' and sd.dep_id <> '00000000000000000000000000000000' and (spt.position_name like '资产管家%' or spt.position_name like '综合管家%') " \
"ORDER BY RAND() LIMIT 1"
dutyDepID = searchSQL(sql)[0][0]
header = '{ "Content-Type": "application/json", "Referer": "http://isz.ishangzu.com/isz_house/jsp/residential/residentialList.jsp"}'
url = 'http://isz.ishangzu.com/isz_house/ResidentialController/saveResidential.action'
data = '''{"residential_name": "%s",
"residential_jianpin": "zllgj",
"residential_jianpin": "csgj",
"city_code": "330100",
"area_code": "330102",
"taBusinessCircleString": "5",
"address": "工具创建楼盘",
"gd_lng": "120.149395",
"gd_lat": "30.298125",
"property_type": "ordinary",
"taDepartString": "%s",
"build_date": "1988",
"totle_buildings": "2",
"total_unit_count": "200",
"total_house_count": "1000",
"build_area": "100.00",
"property_company": "杭州科技有限公司",
"property_fee": "20",
"plot_ratio": "30.00",
"green_rate": "100.00",
"parking_amount": "200",
"other_info": "楼盘亮点",
"bus_stations": "公交站",
"metro_stations": "地铁站","byname": "fh"}''' % (ApartmentData,dutyDepID)
result = RequestsApi().SendPost(url, header, data)
if result["code"] == 0:
print ("新增楼盘名称为:'"+"%s"+"'成功!") % ApartmentData
else:
print result
return
except BaseException as e:
print e
return
#新增栋座
try:
sql = "select residential_id from residential WHERE residential_name='%s' and deleted=0" % ApartmentData
residentialID = searchSQL(sql)[0][0]
header = '{ "Content-Type": "application/json", "Referer": "http://isz.ishangzu.com/isz_house/jsp/residential/residentialList.jsp"}'
url = 'http://isz.ishangzu.com/isz_house/ResidentialBuildingController/saveResidentialBuildingNew.action'
data = '''{
"property_name": "%s",
"building_name": "1幢",
"no_building": "无",
"gd_lng": "120.152476",
"gd_lat": "30.287232",
"housing_type": "ordinary",
"ground_floors": "20",
"underground_floors": "2",
"ladder_count": "10",
"house_count": "200",
"residential_id": "%s",
"have_elevator": "Y"}''' % (ApartmentData, residentialID)
result = RequestsApi().SendPost(url, header, data)
if result["code"] == 0:
print "新增栋成功!"
else:
print result
return
except BaseException as e:
print e
return
# 新增单元
try:
sql = "SELECT building_id from residential_building where residential_id ='%s'and deleted=0 " % residentialID
buildingID = searchSQL(sql)[0][0]
header = '{ "Content-Type": "application/json", "Referer": "http://isz.ishangzu.com/isz_house/jsp/residential/residentialList.jsp"}'
url = 'http://isz.ishangzu.com/isz_house/ResidentialBuildingController/saveResidentialBuildingUnit.action'
data = '''{"property_name": "%s","unit_name": "A","no_unit": "无","building_id": "%s"}''' % (ApartmentData + '1幢', buildingID)
result = RequestsApi().SendPost(url, header, data)
if result["code"] == 0:
print "新增单元成功!"
else:
print result
return
except BaseException as e:
print e
return
# 新增楼层
try:
sql = "SELECT unit_id from residential_building_unit where building_id='%s' " % buildingID
unitID = searchSQL(sql)[0][0]
header = '{ "Content-Type": "application/json", "Referer": "http://isz.ishangzu.com/isz_house/jsp/residential/residentialList.jsp"}'
url = 'http://isz.ishangzu.com/isz_house/ResidentialBuildingController/saveResidentialBuildingFloor.action'
data = '{"property_name":"%s","floor_name":"1","building_id":"%s","unit_id":"%s"}' % (ApartmentData + '1幢A', buildingID, unitID)
result = RequestsApi().SendPost(url, header, data)
if result["code"] == 0:
print "新增楼层成功!"
else:
print result
return
except BaseException as e:
print e
return
# 新增房间号
try:
sql = "SELECT floor_id from residential_building_floor where unit_id='%s' " % unitID
floorID = searchSQL(sql)[0][0]
housenumber = 100
for i in range(int(RoomNumberData)):
housenumber = housenumber + 1
header = '{ "Content-Type": "application/json", "Referer": "http://isz.ishangzu.com/isz_house/jsp/residential/residentialList.jsp"}'
url = 'http://isz.ishangzu.com/isz_house/ResidentialBuildingController/saveResidentialBuildingHouseNo.action'
data = '''{
"property_name": "%s",
"house_no": "%s",
"rooms": "1",
"livings": "1",
"bathrooms": "1",
"kitchens": "1",
"balconys": "1",
"build_area": "100.00",
"orientation": "NORTH",
"building_id": "%s",
"unit_id": "%s",
"floor_id": "%s"}''' % (ApartmentData + '1幢A1层', housenumber, buildingID, unitID, floorID)
result = RequestsApi().SendPost(url, header, data)
if result["code"] == 0:
print ("新增"+"%s"+"房间成功!") % housenumber
else:
print result
return
except BaseException as e:
print e
return
# 新增房源
try:
housenoID = []
sql = "SELECT house_no_id from residential_building_house_no where floor_id='%s' ORDER BY create_time " % floorID
housenoID.append(searchSQL(sql))
house_no = 100
residential_name = ApartmentData + '(fh)'
for i in range(int(RoomNumberData)):
house_no_search = housenoID[0][i][0]
house_no = house_no + 1
residential_address = '杭州市 上城区 四季青 ' + ApartmentData
url = "http://isz.ishangzu.com/isz_house/HouseController/saveHouseDevelop.action"
header = '{ "Content-Type": "application/json", "Referer": "http://isz.ishangzu.com/isz_house/jsp/house/develop/houseDevelopinfoAdd.jsp"}'
data = """{
"residential_name_search": "%s",
"building_name_search": "%s",
"unit_search": "%s",
"house_no_search": "%s",
"residential_name": "%s",
"building_name": "1幢",
"unit": "A",
"house_no": "%s",
"residential_address": "%s",
"city_code": "330100",
"area_code": "330102",
"business_circle_id": "35",
"contact": "钟晓晓",
"did": "8A215243584E2141015867FD6E1F5E9D",
"uid": "4028803E5B196FD1015B1E5CF23C0294",
"house_status": "WAITING_RENT",
"category": "NOLIMIT",
"source": "INTRODUCE",
"rental_price": "4567.00",
"rooms": "1",
"livings": "1",
"kitchens": "1",
"bathrooms": "1",
"balconys": "1",
"build_area": "100",
"orientation": "NORTH",
"property_type": "MULTI_LIFE",
"property_use": "HOUSE",
"remark": "测试工具新增房源",
"look_type": "DIRECTION",
"residential_id": "%s",
"building_id": "%s",
"unit_id": "%s",
"house_no_id": "%s",
"business_circle_name": "四季青",
"contact_tel": "18233669988",
"floor": "1",
"floor_id": "%s"}""" % (residentialID, buildingID, unitID, house_no_search, residential_name, house_no, residential_address,
residentialID, buildingID, unitID, house_no_search, floorID)
result = RequestsApi().SendPost(url, header, data)
if result["code"] == 0:
print ("新增楼盘名称:'"+"%s'下%s"+"房源成功!") % (residential_address,house_no)
else:
print result
return
except BaseException as e:
print e
return
# 审核房源
try:
sql = 'SELECT house_develop_id from house_develop where residential_name = "%s" ORDER BY create_time and deleted=0 ' % residential_name
sqltime = 'SELECT update_time from house_develop where residential_name = "%s" ORDER BY create_time and deleted=0 ' % residential_name
housedevelopid = []
update_times = []
housedevelopid.append(searchSQL(sql))
update_times.append(searchSQL(sqltime))
house_no = 100
for i in range(int(RoomNumberData)):
update_time = update_times[0][i][0]
house_develop_id = housedevelopid[0][i][0]
house_no = house_no + 1
house_no_search = housenoID[0][i][0]
url = "http://isz.ishangzu.com/isz_house/HouseController/auditHouseDevelop.action"
header = '{ "Content-Type": "application/json", "Referer":"http://isz.ishangzu.com/isz_house/jsp/house/develop/houseDevelopList.jsp?from=waitAudit"}'
data = '''{
"residential_name_search": "%s",
"building_name_search": "%s",
"unit_search": "%s",
"house_no_search": "%s",
"residential_name": "%s",
"building_name": "1幢",
"floor": "1",
"house_no_suffix": "xxx",
"residential_address": "杭州市 上城区 四季青 工具创建楼盘",
"residential_department_did": "%s",
"house_status": "WAITING_RENT",
"category": "NOLIMIT",
"rental_price": "4567.00",
"build_area": "100.00",
"rooms": "1",
"livings": "1",
"kitchens": "1",
"bathrooms": "1",
"balconys": "1",
"orientation": "NORTH",
"source": "INTRODUCE",
"property_use": "HOUSE",
"property_type": "MULTI_LIFE",
"look_type": "DIRECTION",
"remark": "测试工具新增房源",
"houseRent": {
"house_status": "WAITING_RENT",
"category": "NOLIMIT",
"source": "INTRODUCE",
"look_type": "DIRECTION",
"rental_price": "4567.00",
"remark": "测试工具新增房源"
},
"audit_status": "PASS",
"building_id": "%s",
"residential_id": "%s",
"unit_id": "%s",
"unit": "A",
"floor_id":"%s",
"house_no": "%s",
"house_no_id": "%s",
"area_code": "330102",
"city_code": "330100",
"house_develop_id": "%s",
"update_time": "%s",
"audit_content": "同意"}''' % (residentialID, buildingID, unitID, house_no_search, residential_name,dutyDepID, buildingID, residentialID, unitID,floorID, house_no, house_no_search, house_develop_id, update_time)
result = RequestsApi().SendPost(url, header, data)
if result["code"] == 0:
print ("审核杭州市 上城区 四季青 工具创建楼盘下:"+"%s"+"号房间房源成功!") % house_no
else:
print result
return
except BaseException as e:
print e
return
print '添加开发房源OK'
add_develop_house("新增开发房源脚本重构",3)
|
[
"[email protected]"
] | |
045342ac2e42a05f9cab53a4746fa576a9d0dea4
|
244ecfc2017a48c70b74556be8c188e7a4815848
|
/res_bw/scripts/common/lib/xml/dom/minidom.py
|
b74bd0880ba9e6ca29ea81fe6f521e6852347bd7
|
[] |
no_license
|
webiumsk/WOT-0.9.12
|
c1e1259411ba1e6c7b02cd6408b731419d3174e5
|
5be5fd9186f335e7bae88c9761c378ff5fbf5351
|
refs/heads/master
| 2021-01-10T01:38:36.523788 | 2015-11-18T11:33:37 | 2015-11-18T11:33:37 | 46,414,438 | 1 | 0 | null | null | null | null |
WINDOWS-1250
|
Python
| false | false | 60,353 |
py
|
# 2015.11.18 12:06:40 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/xml/dom/minidom.py
"""Simple implementation of the Level 1 DOM.
Namespaces and other minor Level 2 features are also supported.
parse("foo.xml")
parseString("<foo><bar/></foo>")
Todo:
=====
* convenience methods for getting elements and text.
* more testing
* bring some of the writer and linearizer code into conformance with this
interface
* SAX 2 namespaces
"""
import xml.dom
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE, domreg
from xml.dom.minicompat import *
from xml.dom.xmlbuilder import DOMImplementationLS, DocumentLS
_nodeTypes_with_children = (xml.dom.Node.ELEMENT_NODE, xml.dom.Node.ENTITY_REFERENCE_NODE)
class Node(xml.dom.Node):
namespaceURI = None
parentNode = None
ownerDocument = None
nextSibling = None
previousSibling = None
prefix = EMPTY_PREFIX
def __nonzero__(self):
return True
def toxml(self, encoding = None):
return self.toprettyxml('', '', encoding)
def toprettyxml(self, indent = '\t', newl = '\n', encoding = None):
writer = _get_StringIO()
if encoding is not None:
import codecs
writer = codecs.lookup(encoding)[3](writer)
if self.nodeType == Node.DOCUMENT_NODE:
self.writexml(writer, '', indent, newl, encoding)
else:
self.writexml(writer, '', indent, newl)
return writer.getvalue()
def hasChildNodes(self):
if self.childNodes:
return True
else:
return False
def _get_childNodes(self):
return self.childNodes
def _get_firstChild(self):
if self.childNodes:
return self.childNodes[0]
def _get_lastChild(self):
if self.childNodes:
return self.childNodes[-1]
def insertBefore(self, newChild, refChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(newChild.childNodes):
self.insertBefore(c, refChild)
return newChild
else:
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr('%s cannot be child of %s' % (repr(newChild), repr(self)))
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
if refChild is None:
self.appendChild(newChild)
else:
try:
index = self.childNodes.index(refChild)
except ValueError:
raise xml.dom.NotFoundErr()
if newChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
self.childNodes.insert(index, newChild)
newChild.nextSibling = refChild
refChild.previousSibling = newChild
if index:
node = self.childNodes[index - 1]
node.nextSibling = newChild
newChild.previousSibling = node
else:
newChild.previousSibling = None
newChild.parentNode = self
return newChild
def appendChild(self, node):
if node.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(node.childNodes):
self.appendChild(c)
return node
else:
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr('%s cannot be child of %s' % (repr(node), repr(self)))
elif node.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
if node.parentNode is not None:
node.parentNode.removeChild(node)
_append_child(self, node)
node.nextSibling = None
return node
def replaceChild(self, newChild, oldChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
refChild = oldChild.nextSibling
self.removeChild(oldChild)
return self.insertBefore(newChild, refChild)
else:
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr('%s cannot be child of %s' % (repr(newChild), repr(self)))
if newChild is oldChild:
return
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
try:
index = self.childNodes.index(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
self.childNodes[index] = newChild
newChild.parentNode = self
oldChild.parentNode = None
if newChild.nodeType in _nodeTypes_with_children or oldChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
newChild.nextSibling = oldChild.nextSibling
newChild.previousSibling = oldChild.previousSibling
oldChild.nextSibling = None
oldChild.previousSibling = None
if newChild.previousSibling:
newChild.previousSibling.nextSibling = newChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
return oldChild
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
if oldChild.nextSibling is not None:
oldChild.nextSibling.previousSibling = oldChild.previousSibling
if oldChild.previousSibling is not None:
oldChild.previousSibling.nextSibling = oldChild.nextSibling
oldChild.nextSibling = oldChild.previousSibling = None
if oldChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
oldChild.parentNode = None
return oldChild
def normalize(self):
L = []
for child in self.childNodes:
if child.nodeType == Node.TEXT_NODE:
if not child.data:
if L:
L[-1].nextSibling = child.nextSibling
if child.nextSibling:
child.nextSibling.previousSibling = child.previousSibling
child.unlink()
elif L and L[-1].nodeType == child.nodeType:
node = L[-1]
node.data = node.data + child.data
node.nextSibling = child.nextSibling
if child.nextSibling:
child.nextSibling.previousSibling = node
child.unlink()
else:
L.append(child)
else:
L.append(child)
if child.nodeType == Node.ELEMENT_NODE:
child.normalize()
self.childNodes[:] = L
def cloneNode(self, deep):
return _clone_node(self, deep, self.ownerDocument or self)
def isSupported(self, feature, version):
return self.ownerDocument.implementation.hasFeature(feature, version)
def _get_localName(self):
return None
def isSameNode(self, other):
return self is other
def getInterface(self, feature):
if self.isSupported(feature, None):
return self
else:
return
return
def getUserData(self, key):
try:
return self._user_data[key][0]
except (AttributeError, KeyError):
return None
return None
def setUserData(self, key, data, handler):
old = None
try:
d = self._user_data
except AttributeError:
d = {}
self._user_data = d
if key in d:
old = d[key][0]
if data is None:
handler = None
if old is not None:
del d[key]
else:
d[key] = (data, handler)
return old
def _call_user_data_handler(self, operation, src, dst):
if hasattr(self, '_user_data'):
for key, (data, handler) in self._user_data.items():
if handler is not None:
handler.handle(operation, key, data, src, dst)
return
def unlink(self):
self.parentNode = self.ownerDocument = None
if self.childNodes:
for child in self.childNodes:
child.unlink()
self.childNodes = NodeList()
self.previousSibling = None
self.nextSibling = None
return
defproperty(Node, 'firstChild', doc='First child node, or None.')
defproperty(Node, 'lastChild', doc='Last child node, or None.')
defproperty(Node, 'localName', doc='Namespace-local name of this node.')
def _append_child(self, node):
childNodes = self.childNodes
if childNodes:
last = childNodes[-1]
node.__dict__['previousSibling'] = last
last.__dict__['nextSibling'] = node
childNodes.append(node)
node.__dict__['parentNode'] = self
def _in_document(node):
while node is not None:
if node.nodeType == Node.DOCUMENT_NODE:
return True
node = node.parentNode
return False
def _write_data(writer, data):
"""Writes datachars to writer."""
if data:
data = data.replace('&', '&').replace('<', '<').replace('"', '"').replace('>', '>')
writer.write(data)
def _get_elements_by_tagName_helper(parent, name, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE and (name == '*' or node.tagName == name):
rc.append(node)
_get_elements_by_tagName_helper(node, name, rc)
return rc
def _get_elements_by_tagName_ns_helper(parent, nsURI, localName, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
if (localName == '*' or node.localName == localName) and (nsURI == '*' or node.namespaceURI == nsURI):
rc.append(node)
_get_elements_by_tagName_ns_helper(node, nsURI, localName, rc)
return rc
class DocumentFragment(Node):
nodeType = Node.DOCUMENT_FRAGMENT_NODE
nodeName = '#document-fragment'
nodeValue = None
attributes = None
parentNode = None
_child_node_types = (Node.ELEMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.NOTATION_NODE)
def __init__(self):
self.childNodes = NodeList()
class Attr(Node):
nodeType = Node.ATTRIBUTE_NODE
attributes = None
ownerElement = None
specified = False
_is_id = False
_child_node_types = (Node.TEXT_NODE, Node.ENTITY_REFERENCE_NODE)
def __init__(self, qName, namespaceURI = EMPTY_NAMESPACE, localName = None, prefix = None):
d = self.__dict__
d['nodeName'] = d['name'] = qName
d['namespaceURI'] = namespaceURI
d['prefix'] = prefix
d['childNodes'] = NodeList()
self.childNodes.append(Text())
def _get_localName(self):
return self.nodeName.split(':', 1)[-1]
def _get_specified(self):
return self.specified
def __setattr__(self, name, value):
d = self.__dict__
if name in ('value', 'nodeValue'):
d['value'] = d['nodeValue'] = value
d2 = self.childNodes[0].__dict__
d2['data'] = d2['nodeValue'] = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
elif name in ('name', 'nodeName'):
d['name'] = d['nodeName'] = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
else:
d[name] = value
return
def _set_prefix(self, prefix):
nsuri = self.namespaceURI
if prefix == 'xmlns':
if nsuri and nsuri != XMLNS_NAMESPACE:
raise xml.dom.NamespaceErr("illegal use of 'xmlns' prefix for the wrong namespace")
d = self.__dict__
d['prefix'] = prefix
if prefix is None:
newName = self.localName
else:
newName = '%s:%s' % (prefix, self.localName)
if self.ownerElement:
_clear_id_cache(self.ownerElement)
d['nodeName'] = d['name'] = newName
return
def _set_value(self, value):
d = self.__dict__
d['value'] = d['nodeValue'] = value
if self.ownerElement:
_clear_id_cache(self.ownerElement)
self.childNodes[0].data = value
def unlink(self):
elem = self.ownerElement
if elem is not None:
del elem._attrs[self.nodeName]
del elem._attrsNS[self.namespaceURI, self.localName]
if self._is_id:
self._is_id = False
elem._magic_id_nodes -= 1
self.ownerDocument._magic_id_count -= 1
for child in self.childNodes:
child.unlink()
del self.childNodes[:]
return
def _get_isId(self):
if self._is_id:
return True
else:
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return False
info = doc._get_elem_info(elem)
if info is None:
return False
if self.namespaceURI:
return info.isIdNS(self.namespaceURI, self.localName)
return info.isId(self.nodeName)
return
def _get_schemaType(self):
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return _no_type
else:
info = doc._get_elem_info(elem)
if info is None:
return _no_type
if self.namespaceURI:
return info.getAttributeTypeNS(self.namespaceURI, self.localName)
return info.getAttributeType(self.nodeName)
return
defproperty(Attr, 'isId', doc='True if this attribute is an ID.')
defproperty(Attr, 'localName', doc='Namespace-local name of this attribute.')
defproperty(Attr, 'schemaType', doc='Schema type for this attribute.')
class NamedNodeMap(object):
"""The attribute list is a transient interface to the underlying
dictionaries. Mutations here will change the underlying element's
dictionary.
Ordering is imposed artificially and does not reflect the order of
attributes as found in an input document.
"""
__slots__ = ('_attrs', '_attrsNS', '_ownerElement')
def __init__(self, attrs, attrsNS, ownerElement):
self._attrs = attrs
self._attrsNS = attrsNS
self._ownerElement = ownerElement
def _get_length(self):
return len(self._attrs)
def item(self, index):
try:
return self[self._attrs.keys()[index]]
except IndexError:
return None
return None
def items(self):
L = []
for node in self._attrs.values():
L.append((node.nodeName, node.value))
return L
def itemsNS(self):
L = []
for node in self._attrs.values():
L.append(((node.namespaceURI, node.localName), node.value))
return L
def has_key(self, key):
if isinstance(key, StringTypes):
return key in self._attrs
else:
return key in self._attrsNS
def keys(self):
return self._attrs.keys()
def keysNS(self):
return self._attrsNS.keys()
def values(self):
return self._attrs.values()
def get(self, name, value = None):
return self._attrs.get(name, value)
__len__ = _get_length
__hash__ = None
def __cmp__(self, other):
if self._attrs is getattr(other, '_attrs', None):
return 0
else:
return cmp(id(self), id(other))
return
def __getitem__(self, attname_or_tuple):
if isinstance(attname_or_tuple, tuple):
return self._attrsNS[attname_or_tuple]
else:
return self._attrs[attname_or_tuple]
def __setitem__(self, attname, value):
if isinstance(value, StringTypes):
try:
node = self._attrs[attname]
except KeyError:
node = Attr(attname)
node.ownerDocument = self._ownerElement.ownerDocument
self.setNamedItem(node)
node.value = value
else:
if not isinstance(value, Attr):
raise TypeError, 'value must be a string or Attr object'
node = value
self.setNamedItem(node)
def getNamedItem(self, name):
try:
return self._attrs[name]
except KeyError:
return None
return None
def getNamedItemNS(self, namespaceURI, localName):
try:
return self._attrsNS[namespaceURI, localName]
except KeyError:
return None
return None
def removeNamedItem(self, name):
n = self.getNamedItem(name)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrs[n.nodeName]
del self._attrsNS[n.namespaceURI, n.localName]
if 'ownerElement' in n.__dict__:
n.__dict__['ownerElement'] = None
return n
else:
raise xml.dom.NotFoundErr()
return
def removeNamedItemNS(self, namespaceURI, localName):
n = self.getNamedItemNS(namespaceURI, localName)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrsNS[n.namespaceURI, n.localName]
del self._attrs[n.nodeName]
if 'ownerElement' in n.__dict__:
n.__dict__['ownerElement'] = None
return n
else:
raise xml.dom.NotFoundErr()
return
def setNamedItem(self, node):
if not isinstance(node, Attr):
raise xml.dom.HierarchyRequestErr('%s cannot be child of %s' % (repr(node), repr(self)))
old = self._attrs.get(node.name)
if old:
old.unlink()
self._attrs[node.name] = node
self._attrsNS[node.namespaceURI, node.localName] = node
node.ownerElement = self._ownerElement
_clear_id_cache(node.ownerElement)
return old
def setNamedItemNS(self, node):
return self.setNamedItem(node)
def __delitem__(self, attname_or_tuple):
node = self[attname_or_tuple]
_clear_id_cache(node.ownerElement)
node.unlink()
def __getstate__(self):
return (self._attrs, self._attrsNS, self._ownerElement)
def __setstate__(self, state):
self._attrs, self._attrsNS, self._ownerElement = state
defproperty(NamedNodeMap, 'length', doc='Number of nodes in the NamedNodeMap.')
AttributeList = NamedNodeMap
class TypeInfo(object):
__slots__ = ('namespace', 'name')
def __init__(self, namespace, name):
self.namespace = namespace
self.name = name
def __repr__(self):
if self.namespace:
return '<TypeInfo %r (from %r)>' % (self.name, self.namespace)
else:
return '<TypeInfo %r>' % self.name
def _get_name(self):
return self.name
def _get_namespace(self):
return self.namespace
_no_type = TypeInfo(None, None)
class Element(Node):
nodeType = Node.ELEMENT_NODE
nodeValue = None
schemaType = _no_type
_magic_id_nodes = 0
_child_node_types = (Node.ELEMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE)
def __init__(self, tagName, namespaceURI = EMPTY_NAMESPACE, prefix = None, localName = None):
self.tagName = self.nodeName = tagName
self.prefix = prefix
self.namespaceURI = namespaceURI
self.childNodes = NodeList()
self._attrs = {}
self._attrsNS = {}
def _get_localName(self):
return self.tagName.split(':', 1)[-1]
def _get_tagName(self):
return self.tagName
def unlink(self):
for attr in self._attrs.values():
attr.unlink()
self._attrs = None
self._attrsNS = None
Node.unlink(self)
return
def getAttribute(self, attname):
try:
return self._attrs[attname].value
except KeyError:
return ''
def getAttributeNS(self, namespaceURI, localName):
try:
return self._attrsNS[namespaceURI, localName].value
except KeyError:
return ''
def setAttribute(self, attname, value):
attr = self.getAttributeNode(attname)
if attr is None:
attr = Attr(attname)
d = attr.__dict__
d['value'] = d['nodeValue'] = value
d['ownerDocument'] = self.ownerDocument
self.setAttributeNode(attr)
elif value != attr.value:
d = attr.__dict__
d['value'] = d['nodeValue'] = value
if attr.isId:
_clear_id_cache(self)
return
def setAttributeNS(self, namespaceURI, qualifiedName, value):
prefix, localname = _nssplit(qualifiedName)
attr = self.getAttributeNodeNS(namespaceURI, localname)
if attr is None:
attr = Attr(qualifiedName, namespaceURI, localname, prefix)
d = attr.__dict__
d['prefix'] = prefix
d['nodeName'] = qualifiedName
d['value'] = d['nodeValue'] = value
d['ownerDocument'] = self.ownerDocument
self.setAttributeNode(attr)
else:
d = attr.__dict__
if value != attr.value:
d['value'] = d['nodeValue'] = value
if attr.isId:
_clear_id_cache(self)
if attr.prefix != prefix:
d['prefix'] = prefix
d['nodeName'] = qualifiedName
return
def getAttributeNode(self, attrname):
return self._attrs.get(attrname)
def getAttributeNodeNS(self, namespaceURI, localName):
return self._attrsNS.get((namespaceURI, localName))
def setAttributeNode(self, attr):
if attr.ownerElement not in (None, self):
raise xml.dom.InuseAttributeErr('attribute node already owned')
old1 = self._attrs.get(attr.name, None)
if old1 is not None:
self.removeAttributeNode(old1)
old2 = self._attrsNS.get((attr.namespaceURI, attr.localName), None)
if old2 is not None and old2 is not old1:
self.removeAttributeNode(old2)
_set_attribute_node(self, attr)
if old1 is not attr:
return old1
elif old2 is not attr:
return old2
else:
return
setAttributeNodeNS = setAttributeNode
def removeAttribute(self, name):
try:
attr = self._attrs[name]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNS(self, namespaceURI, localName):
try:
attr = self._attrsNS[namespaceURI, localName]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNode(self, node):
if node is None:
raise xml.dom.NotFoundErr()
try:
self._attrs[node.name]
except KeyError:
raise xml.dom.NotFoundErr()
_clear_id_cache(self)
node.unlink()
node.ownerDocument = self.ownerDocument
return
removeAttributeNodeNS = removeAttributeNode
def hasAttribute(self, name):
return name in self._attrs
def hasAttributeNS(self, namespaceURI, localName):
return (namespaceURI, localName) in self._attrsNS
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(self, namespaceURI, localName, NodeList())
def __repr__(self):
return '<DOM Element: %s at %#x>' % (self.tagName, id(self))
def writexml(self, writer, indent = '', addindent = '', newl = ''):
writer.write(indent + '<' + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(' %s="' % a_name)
_write_data(writer, attrs[a_name].value)
writer.write('"')
if self.childNodes:
writer.write('>')
if len(self.childNodes) == 1 and self.childNodes[0].nodeType == Node.TEXT_NODE:
self.childNodes[0].writexml(writer, '', '', '')
else:
writer.write(newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write(indent)
writer.write('</%s>%s' % (self.tagName, newl))
else:
writer.write('/>%s' % newl)
def _get_attributes(self):
return NamedNodeMap(self._attrs, self._attrsNS, self)
def hasAttributes(self):
if self._attrs:
return True
else:
return False
def setIdAttribute(self, name):
idAttr = self.getAttributeNode(name)
self.setIdAttributeNode(idAttr)
def setIdAttributeNS(self, namespaceURI, localName):
idAttr = self.getAttributeNodeNS(namespaceURI, localName)
self.setIdAttributeNode(idAttr)
def setIdAttributeNode(self, idAttr):
if idAttr is None or not self.isSameNode(idAttr.ownerElement):
raise xml.dom.NotFoundErr()
if _get_containing_entref(self) is not None:
raise xml.dom.NoModificationAllowedErr()
if not idAttr._is_id:
idAttr.__dict__['_is_id'] = True
self._magic_id_nodes += 1
self.ownerDocument._magic_id_count += 1
_clear_id_cache(self)
return
defproperty(Element, 'attributes', doc='NamedNodeMap of attributes on the element.')
defproperty(Element, 'localName', doc='Namespace-local name of this element.')
def _set_attribute_node(element, attr):
_clear_id_cache(element)
element._attrs[attr.name] = attr
element._attrsNS[attr.namespaceURI, attr.localName] = attr
attr.__dict__['ownerElement'] = element
class Childless():
"""Mixin that makes childless-ness easy to implement and avoids
the complexity of the Node methods that deal with children.
"""
attributes = None
childNodes = EmptyNodeList()
firstChild = None
lastChild = None
def _get_firstChild(self):
return None
def _get_lastChild(self):
return None
def appendChild(self, node):
raise xml.dom.HierarchyRequestErr(self.nodeName + ' nodes cannot have children')
def hasChildNodes(self):
return False
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(self.nodeName + ' nodes do not have children')
def removeChild(self, oldChild):
raise xml.dom.NotFoundErr(self.nodeName + ' nodes do not have children')
def normalize(self):
pass
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(self.nodeName + ' nodes do not have children')
class ProcessingInstruction(Childless, Node):
nodeType = Node.PROCESSING_INSTRUCTION_NODE
def __init__(self, target, data):
self.target = self.nodeName = target
self.data = self.nodeValue = data
def _get_data(self):
return self.data
def _set_data(self, value):
d = self.__dict__
d['data'] = d['nodeValue'] = value
def _get_target(self):
return self.target
def _set_target(self, value):
d = self.__dict__
d['target'] = d['nodeName'] = value
def __setattr__(self, name, value):
if name == 'data' or name == 'nodeValue':
self.__dict__['data'] = self.__dict__['nodeValue'] = value
elif name == 'target' or name == 'nodeName':
self.__dict__['target'] = self.__dict__['nodeName'] = value
else:
self.__dict__[name] = value
def writexml(self, writer, indent = '', addindent = '', newl = ''):
writer.write('%s<?%s %s?>%s' % (indent,
self.target,
self.data,
newl))
class CharacterData(Childless, Node):
def _get_length(self):
return len(self.data)
__len__ = _get_length
def _get_data(self):
return self.__dict__['data']
def _set_data(self, data):
d = self.__dict__
d['data'] = d['nodeValue'] = data
_get_nodeValue = _get_data
_set_nodeValue = _set_data
def __setattr__(self, name, value):
if name == 'data' or name == 'nodeValue':
self.__dict__['data'] = self.__dict__['nodeValue'] = value
else:
self.__dict__[name] = value
def __repr__(self):
data = self.data
if len(data) > 10:
dotdotdot = '...'
else:
dotdotdot = ''
return '<DOM %s node "%r%s">' % (self.__class__.__name__, data[0:10], dotdotdot)
def substringData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr('offset cannot be negative')
if offset >= len(self.data):
raise xml.dom.IndexSizeErr('offset cannot be beyond end of data')
if count < 0:
raise xml.dom.IndexSizeErr('count cannot be negative')
return self.data[offset:offset + count]
def appendData(self, arg):
self.data = self.data + arg
def insertData(self, offset, arg):
if offset < 0:
raise xml.dom.IndexSizeErr('offset cannot be negative')
if offset >= len(self.data):
raise xml.dom.IndexSizeErr('offset cannot be beyond end of data')
if arg:
self.data = '%s%s%s' % (self.data[:offset], arg, self.data[offset:])
def deleteData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr('offset cannot be negative')
if offset >= len(self.data):
raise xml.dom.IndexSizeErr('offset cannot be beyond end of data')
if count < 0:
raise xml.dom.IndexSizeErr('count cannot be negative')
if count:
self.data = self.data[:offset] + self.data[offset + count:]
def replaceData(self, offset, count, arg):
if offset < 0:
raise xml.dom.IndexSizeErr('offset cannot be negative')
if offset >= len(self.data):
raise xml.dom.IndexSizeErr('offset cannot be beyond end of data')
if count < 0:
raise xml.dom.IndexSizeErr('count cannot be negative')
if count:
self.data = '%s%s%s' % (self.data[:offset], arg, self.data[offset + count:])
defproperty(CharacterData, 'length', doc='Length of the string data.')
class Text(CharacterData):
nodeType = Node.TEXT_NODE
nodeName = '#text'
attributes = None
def splitText(self, offset):
if offset < 0 or offset > len(self.data):
raise xml.dom.IndexSizeErr('illegal offset value')
newText = self.__class__()
newText.data = self.data[offset:]
newText.ownerDocument = self.ownerDocument
next = self.nextSibling
if self.parentNode and self in self.parentNode.childNodes:
if next is None:
self.parentNode.appendChild(newText)
else:
self.parentNode.insertBefore(newText, next)
self.data = self.data[:offset]
return newText
def writexml(self, writer, indent = '', addindent = '', newl = ''):
_write_data(writer, '%s%s%s' % (indent, self.data, newl))
def _get_wholeText(self):
L = [self.data]
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.insert(0, n.data)
n = n.previousSibling
else:
break
n = self.nextSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.append(n.data)
n = n.nextSibling
else:
break
return ''.join(L)
def replaceWholeText(self, content):
parent = self.parentNode
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.previousSibling
parent.removeChild(n)
n = next
else:
break
n = self.nextSibling
if not content:
parent.removeChild(self)
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.nextSibling
parent.removeChild(n)
n = next
else:
break
if content:
d = self.__dict__
d['data'] = content
d['nodeValue'] = content
return self
else:
return
return
def _get_isWhitespaceInElementContent(self):
if self.data.strip():
return False
else:
elem = _get_containing_element(self)
if elem is None:
return False
info = self.ownerDocument._get_elem_info(elem)
if info is None:
return False
return info.isElementContent()
return
defproperty(Text, 'isWhitespaceInElementContent', doc='True iff this text node contains only whitespace and is in element content.')
defproperty(Text, 'wholeText', doc='The text of all logically-adjacent text nodes.')
def _get_containing_element(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ELEMENT_NODE:
return c
c = c.parentNode
return
def _get_containing_entref(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ENTITY_REFERENCE_NODE:
return c
c = c.parentNode
return
class Comment(Childless, CharacterData):
nodeType = Node.COMMENT_NODE
nodeName = '#comment'
def __init__(self, data):
self.data = self.nodeValue = data
def writexml(self, writer, indent = '', addindent = '', newl = ''):
if '--' in self.data:
raise ValueError("'--' is not allowed in a comment node")
writer.write('%s<!--%s-->%s' % (indent, self.data, newl))
class CDATASection(Text):
nodeType = Node.CDATA_SECTION_NODE
nodeName = '#cdata-section'
def writexml(self, writer, indent = '', addindent = '', newl = ''):
if self.data.find(']]>') >= 0:
raise ValueError("']]>' not allowed in a CDATA section")
writer.write('<![CDATA[%s]]>' % self.data)
class ReadOnlySequentialNamedNodeMap(object):
__slots__ = ('_seq',)
def __init__(self, seq = ()):
self._seq = seq
def __len__(self):
return len(self._seq)
def _get_length(self):
return len(self._seq)
def getNamedItem(self, name):
for n in self._seq:
if n.nodeName == name:
return n
def getNamedItemNS(self, namespaceURI, localName):
for n in self._seq:
if n.namespaceURI == namespaceURI and n.localName == localName:
return n
def __getitem__(self, name_or_tuple):
if isinstance(name_or_tuple, tuple):
node = self.getNamedItemNS(*name_or_tuple)
else:
node = self.getNamedItem(name_or_tuple)
if node is None:
raise KeyError, name_or_tuple
return node
def item(self, index):
if index < 0:
return None
else:
try:
return self._seq[index]
except IndexError:
return None
return None
def removeNamedItem(self, name):
raise xml.dom.NoModificationAllowedErr('NamedNodeMap instance is read-only')
def removeNamedItemNS(self, namespaceURI, localName):
raise xml.dom.NoModificationAllowedErr('NamedNodeMap instance is read-only')
def setNamedItem(self, node):
raise xml.dom.NoModificationAllowedErr('NamedNodeMap instance is read-only')
def setNamedItemNS(self, node):
raise xml.dom.NoModificationAllowedErr('NamedNodeMap instance is read-only')
def __getstate__(self):
return [self._seq]
def __setstate__(self, state):
self._seq = state[0]
defproperty(ReadOnlySequentialNamedNodeMap, 'length', doc='Number of entries in the NamedNodeMap.')
class Identified():
"""Mix-in class that supports the publicId and systemId attributes."""
def _identified_mixin_init(self, publicId, systemId):
self.publicId = publicId
self.systemId = systemId
def _get_publicId(self):
return self.publicId
def _get_systemId(self):
return self.systemId
class DocumentType(Identified, Childless, Node):
nodeType = Node.DOCUMENT_TYPE_NODE
nodeValue = None
name = None
publicId = None
systemId = None
internalSubset = None
def __init__(self, qualifiedName):
self.entities = ReadOnlySequentialNamedNodeMap()
self.notations = ReadOnlySequentialNamedNodeMap()
if qualifiedName:
prefix, localname = _nssplit(qualifiedName)
self.name = localname
self.nodeName = self.name
def _get_internalSubset(self):
return self.internalSubset
def cloneNode(self, deep):
if self.ownerDocument is None:
clone = DocumentType(None)
clone.name = self.name
clone.nodeName = self.name
operation = xml.dom.UserDataHandler.NODE_CLONED
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in self.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
clone.notations._seq.append(notation)
n._call_user_data_handler(operation, n, notation)
for e in self.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId, e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
clone.entities._seq.append(entity)
e._call_user_data_handler(operation, n, entity)
self._call_user_data_handler(operation, self, clone)
return clone
else:
return
return
def writexml(self, writer, indent = '', addindent = '', newl = ''):
writer.write('<!DOCTYPE ')
writer.write(self.name)
if self.publicId:
writer.write("%s PUBLIC '%s'%s '%s'" % (newl,
self.publicId,
newl,
self.systemId))
elif self.systemId:
writer.write("%s SYSTEM '%s'" % (newl, self.systemId))
if self.internalSubset is not None:
writer.write(' [')
writer.write(self.internalSubset)
writer.write(']')
writer.write('>' + newl)
return
class Entity(Identified, Node):
attributes = None
nodeType = Node.ENTITY_NODE
nodeValue = None
actualEncoding = None
encoding = None
version = None
def __init__(self, name, publicId, systemId, notation):
self.nodeName = name
self.notationName = notation
self.childNodes = NodeList()
self._identified_mixin_init(publicId, systemId)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_encoding(self):
return self.encoding
def _get_version(self):
return self.version
def appendChild(self, newChild):
raise xml.dom.HierarchyRequestErr('cannot append children to an entity node')
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr('cannot insert children below an entity node')
def removeChild(self, oldChild):
raise xml.dom.HierarchyRequestErr('cannot remove children from an entity node')
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr('cannot replace children of an entity node')
class Notation(Identified, Childless, Node):
nodeType = Node.NOTATION_NODE
nodeValue = None
def __init__(self, name, publicId, systemId):
self.nodeName = name
self._identified_mixin_init(publicId, systemId)
class DOMImplementation(DOMImplementationLS):
_features = [('core', '1.0'),
('core', '2.0'),
('core', None),
('xml', '1.0'),
('xml', '2.0'),
('xml', None),
('ls-load', '3.0'),
('ls-load', None)]
def hasFeature(self, feature, version):
if version == '':
version = None
return (feature.lower(), version) in self._features
def createDocument(self, namespaceURI, qualifiedName, doctype):
if doctype and doctype.parentNode is not None:
raise xml.dom.WrongDocumentErr('doctype object owned by another DOM tree')
doc = self._create_document()
add_root_element = not (namespaceURI is None and qualifiedName is None and doctype is None)
if not qualifiedName and add_root_element:
raise xml.dom.InvalidCharacterErr('Element with no name')
if add_root_element:
prefix, localname = _nssplit(qualifiedName)
if prefix == 'xml' and namespaceURI != 'http://www.w3.org/XML/1998/namespace':
raise xml.dom.NamespaceErr("illegal use of 'xml' prefix")
if prefix and not namespaceURI:
raise xml.dom.NamespaceErr('illegal use of prefix without namespaces')
element = doc.createElementNS(namespaceURI, qualifiedName)
if doctype:
doc.appendChild(doctype)
doc.appendChild(element)
if doctype:
doctype.parentNode = doctype.ownerDocument = doc
doc.doctype = doctype
doc.implementation = self
return doc
def createDocumentType(self, qualifiedName, publicId, systemId):
doctype = DocumentType(qualifiedName)
doctype.publicId = publicId
doctype.systemId = systemId
return doctype
def getInterface(self, feature):
if self.hasFeature(feature, None):
return self
else:
return
return
def _create_document(self):
return Document()
class ElementInfo(object):
"""Object that represents content-model information for an element.
This implementation is not expected to be used in practice; DOM
builders should provide implementations which do the right thing
using information available to it.
"""
__slots__ = ('tagName',)
def __init__(self, name):
self.tagName = name
def getAttributeType(self, aname):
return _no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return _no_type
def isElementContent(self):
return False
def isEmpty(self):
"""Returns true iff this element is declared to have an EMPTY
content model."""
return False
def isId(self, aname):
"""Returns true iff the named attribute is a DTD-style ID."""
return False
def isIdNS(self, namespaceURI, localName):
"""Returns true iff the identified attribute is a DTD-style ID."""
return False
def __getstate__(self):
return self.tagName
def __setstate__(self, state):
self.tagName = state
def _clear_id_cache(node):
if node.nodeType == Node.DOCUMENT_NODE:
node._id_cache.clear()
node._id_search_stack = None
elif _in_document(node):
node.ownerDocument._id_cache.clear()
node.ownerDocument._id_search_stack = None
return
class Document(Node, DocumentLS):
_child_node_types = (Node.ELEMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.DOCUMENT_TYPE_NODE)
nodeType = Node.DOCUMENT_NODE
nodeName = '#document'
nodeValue = None
attributes = None
doctype = None
parentNode = None
previousSibling = nextSibling = None
implementation = DOMImplementation()
actualEncoding = None
encoding = None
standalone = None
version = None
strictErrorChecking = False
errorHandler = None
documentURI = None
_magic_id_count = 0
def __init__(self):
self.childNodes = NodeList()
self._elem_info = {}
self._id_cache = {}
self._id_search_stack = None
return
def _get_elem_info(self, element):
if element.namespaceURI:
key = (element.namespaceURI, element.localName)
else:
key = element.tagName
return self._elem_info.get(key)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_doctype(self):
return self.doctype
def _get_documentURI(self):
return self.documentURI
def _get_encoding(self):
return self.encoding
def _get_errorHandler(self):
return self.errorHandler
def _get_standalone(self):
return self.standalone
def _get_strictErrorChecking(self):
return self.strictErrorChecking
def _get_version(self):
return self.version
def appendChild(self, node):
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr('%s cannot be child of %s' % (repr(node), repr(self)))
if node.parentNode is not None:
node.parentNode.removeChild(node)
if node.nodeType == Node.ELEMENT_NODE and self._get_documentElement():
raise xml.dom.HierarchyRequestErr('two document elements disallowed')
return Node.appendChild(self, node)
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
oldChild.nextSibling = oldChild.previousSibling = None
oldChild.parentNode = None
if self.documentElement is oldChild:
self.documentElement = None
return oldChild
def _get_documentElement(self):
for node in self.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
return node
def unlink(self):
if self.doctype is not None:
self.doctype.unlink()
self.doctype = None
Node.unlink(self)
return
def cloneNode(self, deep):
if not deep:
return
else:
clone = self.implementation.createDocument(None, None, None)
clone.encoding = self.encoding
clone.standalone = self.standalone
clone.version = self.version
for n in self.childNodes:
childclone = _clone_node(n, deep, clone)
if not childclone.ownerDocument.isSameNode(clone):
raise AssertionError
clone.childNodes.append(childclone)
raise childclone.nodeType == Node.DOCUMENT_NODE and (clone.documentElement is None or AssertionError)
elif not (childclone.nodeType == Node.DOCUMENT_TYPE_NODE and clone.doctype is None):
raise AssertionError
clone.doctype = childclone
childclone.parentNode = clone
self._call_user_data_handler(xml.dom.UserDataHandler.NODE_CLONED, self, clone)
return clone
def createDocumentFragment(self):
d = DocumentFragment()
d.ownerDocument = self
return d
def createElement(self, tagName):
e = Element(tagName)
e.ownerDocument = self
return e
def createTextNode(self, data):
if not isinstance(data, StringTypes):
raise TypeError, 'node contents must be a string'
t = Text()
t.data = data
t.ownerDocument = self
return t
def createCDATASection(self, data):
if not isinstance(data, StringTypes):
raise TypeError, 'node contents must be a string'
c = CDATASection()
c.data = data
c.ownerDocument = self
return c
def createComment(self, data):
c = Comment(data)
c.ownerDocument = self
return c
def createProcessingInstruction(self, target, data):
p = ProcessingInstruction(target, data)
p.ownerDocument = self
return p
def createAttribute(self, qName):
a = Attr(qName)
a.ownerDocument = self
a.value = ''
return a
def createElementNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
e = Element(qualifiedName, namespaceURI, prefix)
e.ownerDocument = self
return e
def createAttributeNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
a = Attr(qualifiedName, namespaceURI, localName, prefix)
a.ownerDocument = self
a.value = ''
return a
def _create_entity(self, name, publicId, systemId, notationName):
e = Entity(name, publicId, systemId, notationName)
e.ownerDocument = self
return e
def _create_notation(self, name, publicId, systemId):
n = Notation(name, publicId, systemId)
n.ownerDocument = self
return n
def getElementById(self, id):
if id in self._id_cache:
return self._id_cache[id]
elif not (self._elem_info or self._magic_id_count):
return
else:
stack = self._id_search_stack
if stack is None:
stack = [self.documentElement]
self._id_search_stack = stack
elif not stack:
return
result = None
while stack:
node = stack.pop()
stack.extend([ child for child in node.childNodes if child.nodeType in _nodeTypes_with_children ])
info = self._get_elem_info(node)
if info:
for attr in node.attributes.values():
if attr.namespaceURI:
if info.isIdNS(attr.namespaceURI, attr.localName):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif info.isId(attr.name):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif node._magic_id_nodes == 1:
break
elif node._magic_id_nodes:
for attr in node.attributes.values():
if attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
if result is not None:
break
return result
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(self, namespaceURI, localName, NodeList())
def isSupported(self, feature, version):
return self.implementation.hasFeature(feature, version)
def importNode(self, node, deep):
if node.nodeType == Node.DOCUMENT_NODE:
raise xml.dom.NotSupportedErr('cannot import document nodes')
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
raise xml.dom.NotSupportedErr('cannot import document type nodes')
return _clone_node(node, deep, self)
def writexml(self, writer, indent = '', addindent = '', newl = '', encoding = None):
if encoding is None:
writer.write('<?xml version="1.0" ?>' + newl)
else:
writer.write('<?xml version="1.0" encoding="%s"?>%s' % (encoding, newl))
for node in self.childNodes:
node.writexml(writer, indent, addindent, newl)
return
def renameNode(self, n, namespaceURI, name):
if n.ownerDocument is not self:
raise xml.dom.WrongDocumentErr('cannot rename nodes from other documents;\nexpected %s,\nfound %s' % (self, n.ownerDocument))
if n.nodeType not in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE):
raise xml.dom.NotSupportedErr('renameNode() only applies to element and attribute nodes')
if namespaceURI != EMPTY_NAMESPACE:
if ':' in name:
prefix, localName = name.split(':', 1)
if prefix == 'xmlns' and namespaceURI != xml.dom.XMLNS_NAMESPACE:
raise xml.dom.NamespaceErr("illegal use of 'xmlns' prefix")
else:
if name == 'xmlns' and namespaceURI != xml.dom.XMLNS_NAMESPACE and n.nodeType == Node.ATTRIBUTE_NODE:
raise xml.dom.NamespaceErr("illegal use of the 'xmlns' attribute")
prefix = None
localName = name
else:
prefix = None
localName = None
if n.nodeType == Node.ATTRIBUTE_NODE:
element = n.ownerElement
if element is not None:
is_id = n._is_id
element.removeAttributeNode(n)
else:
element = None
d = n.__dict__
d['prefix'] = prefix
d['localName'] = localName
d['namespaceURI'] = namespaceURI
d['nodeName'] = name
if n.nodeType == Node.ELEMENT_NODE:
d['tagName'] = name
else:
d['name'] = name
if element is not None:
element.setAttributeNode(n)
if is_id:
element.setIdAttributeNode(n)
return n
defproperty(Document, 'documentElement', doc='Top-level element of this document.')
def _clone_node(node, deep, newOwnerDocument):
"""
Clone a node and give it the new owner document.
Called by Node.cloneNode and Document.importNode
"""
if node.ownerDocument.isSameNode(newOwnerDocument):
operation = xml.dom.UserDataHandler.NODE_CLONED
else:
operation = xml.dom.UserDataHandler.NODE_IMPORTED
if node.nodeType == Node.ELEMENT_NODE:
clone = newOwnerDocument.createElementNS(node.namespaceURI, node.nodeName)
for attr in node.attributes.values():
clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value)
a = clone.getAttributeNodeNS(attr.namespaceURI, attr.localName)
a.specified = attr.specified
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
clone = newOwnerDocument.createDocumentFragment()
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.TEXT_NODE:
clone = newOwnerDocument.createTextNode(node.data)
elif node.nodeType == Node.CDATA_SECTION_NODE:
clone = newOwnerDocument.createCDATASection(node.data)
elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
clone = newOwnerDocument.createProcessingInstruction(node.target, node.data)
elif node.nodeType == Node.COMMENT_NODE:
clone = newOwnerDocument.createComment(node.data)
elif node.nodeType == Node.ATTRIBUTE_NODE:
clone = newOwnerDocument.createAttributeNS(node.namespaceURI, node.nodeName)
clone.specified = True
clone.value = node.value
elif not (node.nodeType == Node.DOCUMENT_TYPE_NODE and node.ownerDocument is not newOwnerDocument):
raise AssertionError
operation = xml.dom.UserDataHandler.NODE_IMPORTED
clone = newOwnerDocument.implementation.createDocumentType(node.name, node.publicId, node.systemId)
clone.ownerDocument = newOwnerDocument
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in node.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
notation.ownerDocument = newOwnerDocument
clone.notations._seq.append(notation)
if hasattr(n, '_call_user_data_handler'):
n._call_user_data_handler(operation, n, notation)
for e in node.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId, e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
entity.ownerDocument = newOwnerDocument
clone.entities._seq.append(entity)
if hasattr(e, '_call_user_data_handler'):
e._call_user_data_handler(operation, n, entity)
else:
raise xml.dom.NotSupportedErr('Cannot clone node %s' % repr(node))
if hasattr(node, '_call_user_data_handler'):
node._call_user_data_handler(operation, node, clone)
return clone
def _nssplit(qualifiedName):
fields = qualifiedName.split(':', 1)
if len(fields) == 2:
return fields
else:
return (None, fields[0])
return None
def _get_StringIO():
from StringIO import StringIO
return StringIO()
def _do_pulldom_parse(func, args, kwargs):
events = func(*args, **kwargs)
toktype, rootNode = events.getEvent()
events.expandNode(rootNode)
events.clear()
return rootNode
def parse(file, parser = None, bufsize = None):
"""Parse a file into a DOM by filename or file object."""
if parser is None and not bufsize:
from xml.dom import expatbuilder
return expatbuilder.parse(file)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parse, (file,), {'parser': parser,
'bufsize': bufsize})
return
def parseString(string, parser = None):
"""Parse a file into a DOM from a string."""
if parser is None:
from xml.dom import expatbuilder
return expatbuilder.parseString(string)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parseString, (string,), {'parser': parser})
return
def getDOMImplementation(features = None):
if features:
if isinstance(features, StringTypes):
features = domreg._parse_feature_string(features)
for f, v in features:
if not Document.implementation.hasFeature(f, v):
return None
return Document.implementation
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\xml\dom\minidom.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 12:06:42 Střední Evropa (běžný čas)
|
[
"[email protected]"
] | |
81aa4a71b40e0f4171ac987b67ac21022703be28
|
200b310a18514177117cda1d1faed81dbfaa3a3e
|
/devel/.private/bwi_moveit_utils/lib/python2.7/dist-packages/bwi_moveit_utils/srv/_MoveitJointPose.py
|
0bd46260d0c17d725b0ae73147cbfad24f3bb357
|
[] |
no_license
|
YoheiHayamizu/rl_ws
|
c63aedd2dc539bd56398dd19eafe9932bc598040
|
7fdde2f72a3b9cbef585e218d568e8c44c2e374e
|
refs/heads/main
| 2023-08-16T09:29:33.289334 | 2021-10-23T20:59:08 | 2021-10-23T20:59:08 | 420,521,618 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 70,169 |
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from bwi_moveit_utils/MoveitJointPoseRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import moveit_msgs.msg
import geometry_msgs.msg
import shape_msgs.msg
import object_recognition_msgs.msg
import std_msgs.msg
class MoveitJointPoseRequest(genpy.Message):
_md5sum = "75324f926a5e25d5030db04af6e4578c"
_type = "bwi_moveit_utils/MoveitJointPoseRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """float64[] target
moveit_msgs/CollisionObject[] collision_objects
moveit_msgs/Constraints constraints
================================================================================
MSG: moveit_msgs/CollisionObject
# a header, used for interpreting the poses
Header header
# the id of the object (name used in MoveIt)
string id
# The object type in a database of known objects
object_recognition_msgs/ObjectType type
# the the collision geometries associated with the object;
# their poses are with respect to the specified header
# solid geometric primitives
shape_msgs/SolidPrimitive[] primitives
geometry_msgs/Pose[] primitive_poses
# meshes
shape_msgs/Mesh[] meshes
geometry_msgs/Pose[] mesh_poses
# bounding planes (equation is specified, but the plane can be oriented using an additional pose)
shape_msgs/Plane[] planes
geometry_msgs/Pose[] plane_poses
# Adds the object to the planning scene. If the object previously existed, it is replaced.
byte ADD=0
# Removes the object from the environment entirely (everything that matches the specified id)
byte REMOVE=1
# Append to an object that already exists in the planning scene. If the does not exist, it is added.
byte APPEND=2
# If an object already exists in the scene, new poses can be sent (the geometry arrays must be left empty)
# if solely moving the object is desired
byte MOVE=3
# Operation to be performed
byte operation
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: object_recognition_msgs/ObjectType
################################################## OBJECT ID #########################################################
# Contains information about the type of a found object. Those two sets of parameters together uniquely define an
# object
# The key of the found object: the unique identifier in the given db
string key
# The db parameters stored as a JSON/compressed YAML string. An object id does not make sense without the corresponding
# database. E.g., in object_recognition, it can look like: "{'type':'CouchDB', 'root':'http://localhost'}"
# There is no conventional format for those parameters and it's nice to keep that flexibility.
# The object_recognition_core as a generic DB type that can read those fields
# Current examples:
# For CouchDB:
# type: 'CouchDB'
# root: 'http://localhost:5984'
# collection: 'object_recognition'
# For SQL household database:
# type: 'SqlHousehold'
# host: 'wgs36'
# port: 5432
# user: 'willow'
# password: 'willow'
# name: 'household_objects'
# module: 'tabletop'
string db
================================================================================
MSG: shape_msgs/SolidPrimitive
# Define box, sphere, cylinder, cone
# All shapes are defined to have their bounding boxes centered around 0,0,0.
uint8 BOX=1
uint8 SPHERE=2
uint8 CYLINDER=3
uint8 CONE=4
# The type of the shape
uint8 type
# The dimensions of the shape
float64[] dimensions
# The meaning of the shape dimensions: each constant defines the index in the 'dimensions' array
# For the BOX type, the X, Y, and Z dimensions are the length of the corresponding
# sides of the box.
uint8 BOX_X=0
uint8 BOX_Y=1
uint8 BOX_Z=2
# For the SPHERE type, only one component is used, and it gives the radius of
# the sphere.
uint8 SPHERE_RADIUS=0
# For the CYLINDER and CONE types, the center line is oriented along
# the Z axis. Therefore the CYLINDER_HEIGHT (CONE_HEIGHT) component
# of dimensions gives the height of the cylinder (cone). The
# CYLINDER_RADIUS (CONE_RADIUS) component of dimensions gives the
# radius of the base of the cylinder (cone). Cone and cylinder
# primitives are defined to be circular. The tip of the cone is
# pointing up, along +Z axis.
uint8 CYLINDER_HEIGHT=0
uint8 CYLINDER_RADIUS=1
uint8 CONE_HEIGHT=0
uint8 CONE_RADIUS=1
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
================================================================================
MSG: shape_msgs/Mesh
# Definition of a mesh
# list of triangles; the index values refer to positions in vertices[]
MeshTriangle[] triangles
# the actual vertices that make up the mesh
geometry_msgs/Point[] vertices
================================================================================
MSG: shape_msgs/MeshTriangle
# Definition of a triangle's vertices
uint32[3] vertex_indices
================================================================================
MSG: shape_msgs/Plane
# Representation of a plane, using the plane equation ax + by + cz + d = 0
# a := coef[0]
# b := coef[1]
# c := coef[2]
# d := coef[3]
float64[4] coef
================================================================================
MSG: moveit_msgs/Constraints
# This message contains a list of motion planning constraints.
# All constraints must be satisfied for a goal to be considered valid
string name
JointConstraint[] joint_constraints
PositionConstraint[] position_constraints
OrientationConstraint[] orientation_constraints
VisibilityConstraint[] visibility_constraints
================================================================================
MSG: moveit_msgs/JointConstraint
# Constrain the position of a joint to be within a certain bound
string joint_name
# the bound to be achieved is [position - tolerance_below, position + tolerance_above]
float64 position
float64 tolerance_above
float64 tolerance_below
# A weighting factor for this constraint (denotes relative importance to other constraints. Closer to zero means less important)
float64 weight
================================================================================
MSG: moveit_msgs/PositionConstraint
# This message contains the definition of a position constraint.
Header header
# The robot link this constraint refers to
string link_name
# The offset (in the link frame) for the target point on the link we are planning for
geometry_msgs/Vector3 target_point_offset
# The volume this constraint refers to
BoundingVolume constraint_region
# A weighting factor for this constraint (denotes relative importance to other constraints. Closer to zero means less important)
float64 weight
================================================================================
MSG: geometry_msgs/Vector3
# This represents a vector in free space.
# It is only meant to represent a direction. Therefore, it does not
# make sense to apply a translation to it (e.g., when applying a
# generic rigid transformation to a Vector3, tf2 will only apply the
# rotation). If you want your data to be translatable too, use the
# geometry_msgs/Point message instead.
float64 x
float64 y
float64 z
================================================================================
MSG: moveit_msgs/BoundingVolume
# Define a volume in 3D
# A set of solid geometric primitives that make up the volume to define (as a union)
shape_msgs/SolidPrimitive[] primitives
# The poses at which the primitives are located
geometry_msgs/Pose[] primitive_poses
# In addition to primitives, meshes can be specified to add to the bounding volume (again, as union)
shape_msgs/Mesh[] meshes
# The poses at which the meshes are located
geometry_msgs/Pose[] mesh_poses
================================================================================
MSG: moveit_msgs/OrientationConstraint
# This message contains the definition of an orientation constraint.
Header header
# The desired orientation of the robot link specified as a quaternion
geometry_msgs/Quaternion orientation
# The robot link this constraint refers to
string link_name
# optional axis-angle error tolerances specified
float64 absolute_x_axis_tolerance
float64 absolute_y_axis_tolerance
float64 absolute_z_axis_tolerance
# A weighting factor for this constraint (denotes relative importance to other constraints. Closer to zero means less important)
float64 weight
================================================================================
MSG: moveit_msgs/VisibilityConstraint
# The constraint is useful to maintain visibility to a disc (the target) in a particular frame.
# This disc forms the base of a visibiliy cone whose tip is at the origin of the sensor.
# Maintaining visibility is done by ensuring the robot does not obstruct the visibility cone.
# Note:
# This constraint does NOT enforce minimum or maximum distances between the sensor
# and the target, nor does it enforce the target to be in the field of view of
# the sensor. A PositionConstraint can (and probably should) be used for such purposes.
# The radius of the disc that should be maintained visible
float64 target_radius
# The pose of the disc; as the robot moves, the pose of the disc may change as well
# This can be in the frame of a particular robot link, for example
geometry_msgs/PoseStamped target_pose
# From the sensor origin towards the target, the disc forms a visibility cone
# This cone is approximated using many sides. For example, when using 4 sides,
# that in fact makes the visibility region be a pyramid.
# This value should always be 3 or more.
int32 cone_sides
# The pose in which visibility is to be maintained.
# The frame id should represent the robot link to which the sensor is attached.
# It is assumed the sensor can look directly at the target, in any direction.
# This assumption is usually not true, but additional PositionConstraints
# can resolve this issue.
geometry_msgs/PoseStamped sensor_pose
# Even though the disc is maintained visible, the visibility cone can be very small
# because of the orientation of the disc with respect to the sensor. It is possible
# for example to view the disk almost from a side, in which case the visibility cone
# can end up having close to 0 volume. The view angle is defined to be the angle between
# the normal to the visibility disc and the direction vector from the sensor origin.
# The value below represents the minimum desired view angle. For a perfect view,
# this value will be 0 (the two vectors are exact opposites). For a completely obstructed view
# this value will be Pi/2 (the vectors are perpendicular). This value defined below
# is the maximum view angle to be maintained. This should be a value in the open interval
# (0, Pi/2). If 0 is set, the view angle is NOT enforced.
float64 max_view_angle
# This angle is used similarly to max_view_angle but limits the maximum angle
# between the sensor origin direction vector and the axis that connects the
# sensor origin to the target frame origin. The value is again in the range (0, Pi/2)
# and is NOT enforced if set to 0.
float64 max_range_angle
# The axis that is assumed to indicate the direction of view for the sensor
# X = 2, Y = 1, Z = 0
uint8 SENSOR_Z=0
uint8 SENSOR_Y=1
uint8 SENSOR_X=2
uint8 sensor_view_direction
# A weighting factor for this constraint (denotes relative importance to other constraints. Closer to zero means less important)
float64 weight
================================================================================
MSG: geometry_msgs/PoseStamped
# A Pose with reference coordinate frame and timestamp
Header header
Pose pose
"""
__slots__ = ['target','collision_objects','constraints']
_slot_types = ['float64[]','moveit_msgs/CollisionObject[]','moveit_msgs/Constraints']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
target,collision_objects,constraints
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(MoveitJointPoseRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.target is None:
self.target = []
if self.collision_objects is None:
self.collision_objects = []
if self.constraints is None:
self.constraints = moveit_msgs.msg.Constraints()
else:
self.target = []
self.collision_objects = []
self.constraints = moveit_msgs.msg.Constraints()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
length = len(self.target)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *self.target))
length = len(self.collision_objects)
buff.write(_struct_I.pack(length))
for val1 in self.collision_objects:
_v1 = val1.header
buff.write(_get_struct_I().pack(_v1.seq))
_v2 = _v1.stamp
_x = _v2
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v1.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v3 = val1.type
_x = _v3.key
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = _v3.db
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(val1.primitives)
buff.write(_struct_I.pack(length))
for val2 in val1.primitives:
buff.write(_get_struct_B().pack(val2.type))
length = len(val2.dimensions)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *val2.dimensions))
length = len(val1.primitive_poses)
buff.write(_struct_I.pack(length))
for val2 in val1.primitive_poses:
_v4 = val2.position
_x = _v4
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v5 = val2.orientation
_x = _v5
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
length = len(val1.meshes)
buff.write(_struct_I.pack(length))
for val2 in val1.meshes:
length = len(val2.triangles)
buff.write(_struct_I.pack(length))
for val3 in val2.triangles:
buff.write(_get_struct_3I().pack(*val3.vertex_indices))
length = len(val2.vertices)
buff.write(_struct_I.pack(length))
for val3 in val2.vertices:
_x = val3
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
length = len(val1.mesh_poses)
buff.write(_struct_I.pack(length))
for val2 in val1.mesh_poses:
_v6 = val2.position
_x = _v6
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v7 = val2.orientation
_x = _v7
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
length = len(val1.planes)
buff.write(_struct_I.pack(length))
for val2 in val1.planes:
buff.write(_get_struct_4d().pack(*val2.coef))
length = len(val1.plane_poses)
buff.write(_struct_I.pack(length))
for val2 in val1.plane_poses:
_v8 = val2.position
_x = _v8
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v9 = val2.orientation
_x = _v9
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
buff.write(_get_struct_b().pack(val1.operation))
_x = self.constraints.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.constraints.joint_constraints)
buff.write(_struct_I.pack(length))
for val1 in self.constraints.joint_constraints:
_x = val1.joint_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_get_struct_4d().pack(_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight))
length = len(self.constraints.position_constraints)
buff.write(_struct_I.pack(length))
for val1 in self.constraints.position_constraints:
_v10 = val1.header
buff.write(_get_struct_I().pack(_v10.seq))
_v11 = _v10.stamp
_x = _v11
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v10.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.link_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v12 = val1.target_point_offset
_x = _v12
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v13 = val1.constraint_region
length = len(_v13.primitives)
buff.write(_struct_I.pack(length))
for val3 in _v13.primitives:
buff.write(_get_struct_B().pack(val3.type))
length = len(val3.dimensions)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *val3.dimensions))
length = len(_v13.primitive_poses)
buff.write(_struct_I.pack(length))
for val3 in _v13.primitive_poses:
_v14 = val3.position
_x = _v14
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v15 = val3.orientation
_x = _v15
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
length = len(_v13.meshes)
buff.write(_struct_I.pack(length))
for val3 in _v13.meshes:
length = len(val3.triangles)
buff.write(_struct_I.pack(length))
for val4 in val3.triangles:
buff.write(_get_struct_3I().pack(*val4.vertex_indices))
length = len(val3.vertices)
buff.write(_struct_I.pack(length))
for val4 in val3.vertices:
_x = val4
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
length = len(_v13.mesh_poses)
buff.write(_struct_I.pack(length))
for val3 in _v13.mesh_poses:
_v16 = val3.position
_x = _v16
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v17 = val3.orientation
_x = _v17
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
buff.write(_get_struct_d().pack(val1.weight))
length = len(self.constraints.orientation_constraints)
buff.write(_struct_I.pack(length))
for val1 in self.constraints.orientation_constraints:
_v18 = val1.header
buff.write(_get_struct_I().pack(_v18.seq))
_v19 = _v18.stamp
_x = _v19
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v18.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v20 = val1.orientation
_x = _v20
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
_x = val1.link_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_get_struct_4d().pack(_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight))
length = len(self.constraints.visibility_constraints)
buff.write(_struct_I.pack(length))
for val1 in self.constraints.visibility_constraints:
buff.write(_get_struct_d().pack(val1.target_radius))
_v21 = val1.target_pose
_v22 = _v21.header
buff.write(_get_struct_I().pack(_v22.seq))
_v23 = _v22.stamp
_x = _v23
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v22.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v24 = _v21.pose
_v25 = _v24.position
_x = _v25
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v26 = _v24.orientation
_x = _v26
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
buff.write(_get_struct_i().pack(val1.cone_sides))
_v27 = val1.sensor_pose
_v28 = _v27.header
buff.write(_get_struct_I().pack(_v28.seq))
_v29 = _v28.stamp
_x = _v29
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v28.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v30 = _v27.pose
_v31 = _v30.position
_x = _v31
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v32 = _v30.orientation
_x = _v32
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
_x = val1
buff.write(_get_struct_2dBd().pack(_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.collision_objects is None:
self.collision_objects = None
if self.constraints is None:
self.constraints = moveit_msgs.msg.Constraints()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.target = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.collision_objects = []
for i in range(0, length):
val1 = moveit_msgs.msg.CollisionObject()
_v33 = val1.header
start = end
end += 4
(_v33.seq,) = _get_struct_I().unpack(str[start:end])
_v34 = _v33.stamp
_x = _v34
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v33.frame_id = str[start:end].decode('utf-8')
else:
_v33.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.id = str[start:end].decode('utf-8')
else:
val1.id = str[start:end]
_v35 = val1.type
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v35.key = str[start:end].decode('utf-8')
else:
_v35.key = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v35.db = str[start:end].decode('utf-8')
else:
_v35.db = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.primitives = []
for i in range(0, length):
val2 = shape_msgs.msg.SolidPrimitive()
start = end
end += 1
(val2.type,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val2.dimensions = struct.unpack(pattern, str[start:end])
val1.primitives.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.primitive_poses = []
for i in range(0, length):
val2 = geometry_msgs.msg.Pose()
_v36 = val2.position
_x = _v36
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v37 = val2.orientation
_x = _v37
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
val1.primitive_poses.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.meshes = []
for i in range(0, length):
val2 = shape_msgs.msg.Mesh()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val2.triangles = []
for i in range(0, length):
val3 = shape_msgs.msg.MeshTriangle()
start = end
end += 12
val3.vertex_indices = _get_struct_3I().unpack(str[start:end])
val2.triangles.append(val3)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val2.vertices = []
for i in range(0, length):
val3 = geometry_msgs.msg.Point()
_x = val3
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
val2.vertices.append(val3)
val1.meshes.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.mesh_poses = []
for i in range(0, length):
val2 = geometry_msgs.msg.Pose()
_v38 = val2.position
_x = _v38
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v39 = val2.orientation
_x = _v39
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
val1.mesh_poses.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.planes = []
for i in range(0, length):
val2 = shape_msgs.msg.Plane()
start = end
end += 32
val2.coef = _get_struct_4d().unpack(str[start:end])
val1.planes.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.plane_poses = []
for i in range(0, length):
val2 = geometry_msgs.msg.Pose()
_v40 = val2.position
_x = _v40
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v41 = val2.orientation
_x = _v41
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
val1.plane_poses.append(val2)
start = end
end += 1
(val1.operation,) = _get_struct_b().unpack(str[start:end])
self.collision_objects.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.constraints.name = str[start:end].decode('utf-8')
else:
self.constraints.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.constraints.joint_constraints = []
for i in range(0, length):
val1 = moveit_msgs.msg.JointConstraint()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.joint_name = str[start:end].decode('utf-8')
else:
val1.joint_name = str[start:end]
_x = val1
start = end
end += 32
(_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight,) = _get_struct_4d().unpack(str[start:end])
self.constraints.joint_constraints.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.constraints.position_constraints = []
for i in range(0, length):
val1 = moveit_msgs.msg.PositionConstraint()
_v42 = val1.header
start = end
end += 4
(_v42.seq,) = _get_struct_I().unpack(str[start:end])
_v43 = _v42.stamp
_x = _v43
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v42.frame_id = str[start:end].decode('utf-8')
else:
_v42.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.link_name = str[start:end].decode('utf-8')
else:
val1.link_name = str[start:end]
_v44 = val1.target_point_offset
_x = _v44
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v45 = val1.constraint_region
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
_v45.primitives = []
for i in range(0, length):
val3 = shape_msgs.msg.SolidPrimitive()
start = end
end += 1
(val3.type,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val3.dimensions = struct.unpack(pattern, str[start:end])
_v45.primitives.append(val3)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
_v45.primitive_poses = []
for i in range(0, length):
val3 = geometry_msgs.msg.Pose()
_v46 = val3.position
_x = _v46
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v47 = val3.orientation
_x = _v47
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
_v45.primitive_poses.append(val3)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
_v45.meshes = []
for i in range(0, length):
val3 = shape_msgs.msg.Mesh()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val3.triangles = []
for i in range(0, length):
val4 = shape_msgs.msg.MeshTriangle()
start = end
end += 12
val4.vertex_indices = _get_struct_3I().unpack(str[start:end])
val3.triangles.append(val4)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val3.vertices = []
for i in range(0, length):
val4 = geometry_msgs.msg.Point()
_x = val4
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
val3.vertices.append(val4)
_v45.meshes.append(val3)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
_v45.mesh_poses = []
for i in range(0, length):
val3 = geometry_msgs.msg.Pose()
_v48 = val3.position
_x = _v48
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v49 = val3.orientation
_x = _v49
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
_v45.mesh_poses.append(val3)
start = end
end += 8
(val1.weight,) = _get_struct_d().unpack(str[start:end])
self.constraints.position_constraints.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.constraints.orientation_constraints = []
for i in range(0, length):
val1 = moveit_msgs.msg.OrientationConstraint()
_v50 = val1.header
start = end
end += 4
(_v50.seq,) = _get_struct_I().unpack(str[start:end])
_v51 = _v50.stamp
_x = _v51
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v50.frame_id = str[start:end].decode('utf-8')
else:
_v50.frame_id = str[start:end]
_v52 = val1.orientation
_x = _v52
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.link_name = str[start:end].decode('utf-8')
else:
val1.link_name = str[start:end]
_x = val1
start = end
end += 32
(_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight,) = _get_struct_4d().unpack(str[start:end])
self.constraints.orientation_constraints.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.constraints.visibility_constraints = []
for i in range(0, length):
val1 = moveit_msgs.msg.VisibilityConstraint()
start = end
end += 8
(val1.target_radius,) = _get_struct_d().unpack(str[start:end])
_v53 = val1.target_pose
_v54 = _v53.header
start = end
end += 4
(_v54.seq,) = _get_struct_I().unpack(str[start:end])
_v55 = _v54.stamp
_x = _v55
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v54.frame_id = str[start:end].decode('utf-8')
else:
_v54.frame_id = str[start:end]
_v56 = _v53.pose
_v57 = _v56.position
_x = _v57
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v58 = _v56.orientation
_x = _v58
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
start = end
end += 4
(val1.cone_sides,) = _get_struct_i().unpack(str[start:end])
_v59 = val1.sensor_pose
_v60 = _v59.header
start = end
end += 4
(_v60.seq,) = _get_struct_I().unpack(str[start:end])
_v61 = _v60.stamp
_x = _v61
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v60.frame_id = str[start:end].decode('utf-8')
else:
_v60.frame_id = str[start:end]
_v62 = _v59.pose
_v63 = _v62.position
_x = _v63
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v64 = _v62.orientation
_x = _v64
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
_x = val1
start = end
end += 25
(_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight,) = _get_struct_2dBd().unpack(str[start:end])
self.constraints.visibility_constraints.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
length = len(self.target)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(self.target.tostring())
length = len(self.collision_objects)
buff.write(_struct_I.pack(length))
for val1 in self.collision_objects:
_v65 = val1.header
buff.write(_get_struct_I().pack(_v65.seq))
_v66 = _v65.stamp
_x = _v66
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v65.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v67 = val1.type
_x = _v67.key
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = _v67.db
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(val1.primitives)
buff.write(_struct_I.pack(length))
for val2 in val1.primitives:
buff.write(_get_struct_B().pack(val2.type))
length = len(val2.dimensions)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(val2.dimensions.tostring())
length = len(val1.primitive_poses)
buff.write(_struct_I.pack(length))
for val2 in val1.primitive_poses:
_v68 = val2.position
_x = _v68
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v69 = val2.orientation
_x = _v69
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
length = len(val1.meshes)
buff.write(_struct_I.pack(length))
for val2 in val1.meshes:
length = len(val2.triangles)
buff.write(_struct_I.pack(length))
for val3 in val2.triangles:
buff.write(val3.vertex_indices.tostring())
length = len(val2.vertices)
buff.write(_struct_I.pack(length))
for val3 in val2.vertices:
_x = val3
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
length = len(val1.mesh_poses)
buff.write(_struct_I.pack(length))
for val2 in val1.mesh_poses:
_v70 = val2.position
_x = _v70
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v71 = val2.orientation
_x = _v71
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
length = len(val1.planes)
buff.write(_struct_I.pack(length))
for val2 in val1.planes:
buff.write(val2.coef.tostring())
length = len(val1.plane_poses)
buff.write(_struct_I.pack(length))
for val2 in val1.plane_poses:
_v72 = val2.position
_x = _v72
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v73 = val2.orientation
_x = _v73
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
buff.write(_get_struct_b().pack(val1.operation))
_x = self.constraints.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.constraints.joint_constraints)
buff.write(_struct_I.pack(length))
for val1 in self.constraints.joint_constraints:
_x = val1.joint_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_get_struct_4d().pack(_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight))
length = len(self.constraints.position_constraints)
buff.write(_struct_I.pack(length))
for val1 in self.constraints.position_constraints:
_v74 = val1.header
buff.write(_get_struct_I().pack(_v74.seq))
_v75 = _v74.stamp
_x = _v75
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v74.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.link_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v76 = val1.target_point_offset
_x = _v76
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v77 = val1.constraint_region
length = len(_v77.primitives)
buff.write(_struct_I.pack(length))
for val3 in _v77.primitives:
buff.write(_get_struct_B().pack(val3.type))
length = len(val3.dimensions)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(val3.dimensions.tostring())
length = len(_v77.primitive_poses)
buff.write(_struct_I.pack(length))
for val3 in _v77.primitive_poses:
_v78 = val3.position
_x = _v78
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v79 = val3.orientation
_x = _v79
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
length = len(_v77.meshes)
buff.write(_struct_I.pack(length))
for val3 in _v77.meshes:
length = len(val3.triangles)
buff.write(_struct_I.pack(length))
for val4 in val3.triangles:
buff.write(val4.vertex_indices.tostring())
length = len(val3.vertices)
buff.write(_struct_I.pack(length))
for val4 in val3.vertices:
_x = val4
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
length = len(_v77.mesh_poses)
buff.write(_struct_I.pack(length))
for val3 in _v77.mesh_poses:
_v80 = val3.position
_x = _v80
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v81 = val3.orientation
_x = _v81
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
buff.write(_get_struct_d().pack(val1.weight))
length = len(self.constraints.orientation_constraints)
buff.write(_struct_I.pack(length))
for val1 in self.constraints.orientation_constraints:
_v82 = val1.header
buff.write(_get_struct_I().pack(_v82.seq))
_v83 = _v82.stamp
_x = _v83
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v82.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v84 = val1.orientation
_x = _v84
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
_x = val1.link_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_get_struct_4d().pack(_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight))
length = len(self.constraints.visibility_constraints)
buff.write(_struct_I.pack(length))
for val1 in self.constraints.visibility_constraints:
buff.write(_get_struct_d().pack(val1.target_radius))
_v85 = val1.target_pose
_v86 = _v85.header
buff.write(_get_struct_I().pack(_v86.seq))
_v87 = _v86.stamp
_x = _v87
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v86.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v88 = _v85.pose
_v89 = _v88.position
_x = _v89
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v90 = _v88.orientation
_x = _v90
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
buff.write(_get_struct_i().pack(val1.cone_sides))
_v91 = val1.sensor_pose
_v92 = _v91.header
buff.write(_get_struct_I().pack(_v92.seq))
_v93 = _v92.stamp
_x = _v93
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v92.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v94 = _v91.pose
_v95 = _v94.position
_x = _v95
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v96 = _v94.orientation
_x = _v96
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
_x = val1
buff.write(_get_struct_2dBd().pack(_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.collision_objects is None:
self.collision_objects = None
if self.constraints is None:
self.constraints = moveit_msgs.msg.Constraints()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.target = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.collision_objects = []
for i in range(0, length):
val1 = moveit_msgs.msg.CollisionObject()
_v97 = val1.header
start = end
end += 4
(_v97.seq,) = _get_struct_I().unpack(str[start:end])
_v98 = _v97.stamp
_x = _v98
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v97.frame_id = str[start:end].decode('utf-8')
else:
_v97.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.id = str[start:end].decode('utf-8')
else:
val1.id = str[start:end]
_v99 = val1.type
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v99.key = str[start:end].decode('utf-8')
else:
_v99.key = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v99.db = str[start:end].decode('utf-8')
else:
_v99.db = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.primitives = []
for i in range(0, length):
val2 = shape_msgs.msg.SolidPrimitive()
start = end
end += 1
(val2.type,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val2.dimensions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
val1.primitives.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.primitive_poses = []
for i in range(0, length):
val2 = geometry_msgs.msg.Pose()
_v100 = val2.position
_x = _v100
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v101 = val2.orientation
_x = _v101
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
val1.primitive_poses.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.meshes = []
for i in range(0, length):
val2 = shape_msgs.msg.Mesh()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val2.triangles = []
for i in range(0, length):
val3 = shape_msgs.msg.MeshTriangle()
start = end
end += 12
val3.vertex_indices = numpy.frombuffer(str[start:end], dtype=numpy.uint32, count=3)
val2.triangles.append(val3)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val2.vertices = []
for i in range(0, length):
val3 = geometry_msgs.msg.Point()
_x = val3
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
val2.vertices.append(val3)
val1.meshes.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.mesh_poses = []
for i in range(0, length):
val2 = geometry_msgs.msg.Pose()
_v102 = val2.position
_x = _v102
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v103 = val2.orientation
_x = _v103
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
val1.mesh_poses.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.planes = []
for i in range(0, length):
val2 = shape_msgs.msg.Plane()
start = end
end += 32
val2.coef = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=4)
val1.planes.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.plane_poses = []
for i in range(0, length):
val2 = geometry_msgs.msg.Pose()
_v104 = val2.position
_x = _v104
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v105 = val2.orientation
_x = _v105
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
val1.plane_poses.append(val2)
start = end
end += 1
(val1.operation,) = _get_struct_b().unpack(str[start:end])
self.collision_objects.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.constraints.name = str[start:end].decode('utf-8')
else:
self.constraints.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.constraints.joint_constraints = []
for i in range(0, length):
val1 = moveit_msgs.msg.JointConstraint()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.joint_name = str[start:end].decode('utf-8')
else:
val1.joint_name = str[start:end]
_x = val1
start = end
end += 32
(_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight,) = _get_struct_4d().unpack(str[start:end])
self.constraints.joint_constraints.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.constraints.position_constraints = []
for i in range(0, length):
val1 = moveit_msgs.msg.PositionConstraint()
_v106 = val1.header
start = end
end += 4
(_v106.seq,) = _get_struct_I().unpack(str[start:end])
_v107 = _v106.stamp
_x = _v107
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v106.frame_id = str[start:end].decode('utf-8')
else:
_v106.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.link_name = str[start:end].decode('utf-8')
else:
val1.link_name = str[start:end]
_v108 = val1.target_point_offset
_x = _v108
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v109 = val1.constraint_region
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
_v109.primitives = []
for i in range(0, length):
val3 = shape_msgs.msg.SolidPrimitive()
start = end
end += 1
(val3.type,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val3.dimensions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
_v109.primitives.append(val3)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
_v109.primitive_poses = []
for i in range(0, length):
val3 = geometry_msgs.msg.Pose()
_v110 = val3.position
_x = _v110
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v111 = val3.orientation
_x = _v111
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
_v109.primitive_poses.append(val3)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
_v109.meshes = []
for i in range(0, length):
val3 = shape_msgs.msg.Mesh()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val3.triangles = []
for i in range(0, length):
val4 = shape_msgs.msg.MeshTriangle()
start = end
end += 12
val4.vertex_indices = numpy.frombuffer(str[start:end], dtype=numpy.uint32, count=3)
val3.triangles.append(val4)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val3.vertices = []
for i in range(0, length):
val4 = geometry_msgs.msg.Point()
_x = val4
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
val3.vertices.append(val4)
_v109.meshes.append(val3)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
_v109.mesh_poses = []
for i in range(0, length):
val3 = geometry_msgs.msg.Pose()
_v112 = val3.position
_x = _v112
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v113 = val3.orientation
_x = _v113
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
_v109.mesh_poses.append(val3)
start = end
end += 8
(val1.weight,) = _get_struct_d().unpack(str[start:end])
self.constraints.position_constraints.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.constraints.orientation_constraints = []
for i in range(0, length):
val1 = moveit_msgs.msg.OrientationConstraint()
_v114 = val1.header
start = end
end += 4
(_v114.seq,) = _get_struct_I().unpack(str[start:end])
_v115 = _v114.stamp
_x = _v115
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v114.frame_id = str[start:end].decode('utf-8')
else:
_v114.frame_id = str[start:end]
_v116 = val1.orientation
_x = _v116
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.link_name = str[start:end].decode('utf-8')
else:
val1.link_name = str[start:end]
_x = val1
start = end
end += 32
(_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight,) = _get_struct_4d().unpack(str[start:end])
self.constraints.orientation_constraints.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.constraints.visibility_constraints = []
for i in range(0, length):
val1 = moveit_msgs.msg.VisibilityConstraint()
start = end
end += 8
(val1.target_radius,) = _get_struct_d().unpack(str[start:end])
_v117 = val1.target_pose
_v118 = _v117.header
start = end
end += 4
(_v118.seq,) = _get_struct_I().unpack(str[start:end])
_v119 = _v118.stamp
_x = _v119
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v118.frame_id = str[start:end].decode('utf-8')
else:
_v118.frame_id = str[start:end]
_v120 = _v117.pose
_v121 = _v120.position
_x = _v121
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v122 = _v120.orientation
_x = _v122
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
start = end
end += 4
(val1.cone_sides,) = _get_struct_i().unpack(str[start:end])
_v123 = val1.sensor_pose
_v124 = _v123.header
start = end
end += 4
(_v124.seq,) = _get_struct_I().unpack(str[start:end])
_v125 = _v124.stamp
_x = _v125
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v124.frame_id = str[start:end].decode('utf-8')
else:
_v124.frame_id = str[start:end]
_v126 = _v123.pose
_v127 = _v126.position
_x = _v127
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v128 = _v126.orientation
_x = _v128
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
_x = val1
start = end
end += 25
(_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight,) = _get_struct_2dBd().unpack(str[start:end])
self.constraints.visibility_constraints.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
_struct_d = None
def _get_struct_d():
global _struct_d
if _struct_d is None:
_struct_d = struct.Struct("<d")
return _struct_d
_struct_i = None
def _get_struct_i():
global _struct_i
if _struct_i is None:
_struct_i = struct.Struct("<i")
return _struct_i
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_b = None
def _get_struct_b():
global _struct_b
if _struct_b is None:
_struct_b = struct.Struct("<b")
return _struct_b
_struct_4d = None
def _get_struct_4d():
global _struct_4d
if _struct_4d is None:
_struct_4d = struct.Struct("<4d")
return _struct_4d
_struct_2dBd = None
def _get_struct_2dBd():
global _struct_2dBd
if _struct_2dBd is None:
_struct_2dBd = struct.Struct("<2dBd")
return _struct_2dBd
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_3d = None
def _get_struct_3d():
global _struct_3d
if _struct_3d is None:
_struct_3d = struct.Struct("<3d")
return _struct_3d
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from bwi_moveit_utils/MoveitJointPoseResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class MoveitJointPoseResponse(genpy.Message):
_md5sum = "9e58faa17a4877a1efc0e251b503c53b"
_type = "bwi_moveit_utils/MoveitJointPoseResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """bool completed
"""
__slots__ = ['completed']
_slot_types = ['bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
completed
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(MoveitJointPoseResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.completed is None:
self.completed = False
else:
self.completed = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_B().pack(self.completed))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.completed,) = _get_struct_B().unpack(str[start:end])
self.completed = bool(self.completed)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_B().pack(self.completed))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.completed,) = _get_struct_B().unpack(str[start:end])
self.completed = bool(self.completed)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
class MoveitJointPose(object):
_type = 'bwi_moveit_utils/MoveitJointPose'
_md5sum = '63f4641487b3d9ad59149152e0942f80'
_request_class = MoveitJointPoseRequest
_response_class = MoveitJointPoseResponse
|
[
"[email protected]"
] | |
3d7128ad8c2784853ee5a33f3ae54cdc77d44044
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02778/s805881510.py
|
4e1cfd4d744d78cdc97b17f641d6bcb6444548dc
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 121 |
py
|
S=input()
list=list(S)
a=len(list)
answer=[]
s=0
while s<a:
answer.append('x')
s=s+1
y=''.join(answer)
print(y)
|
[
"[email protected]"
] | |
04aa06f4bac7b7bbb2b7030e09e1f27f1ed8fde4
|
6206ad73052b5ff1b6690c225f000f9c31aa4ff7
|
/Code/Reshape the Matrix.py
|
360911bc17af54a593ab369ee1c177df5d68cbb7
|
[] |
no_license
|
mws19901118/Leetcode
|
7f9e3694cb8f0937d82b6e1e12127ce5073f4df0
|
752ac00bea40be1e3794d80aa7b2be58c0a548f6
|
refs/heads/master
| 2023-09-01T10:35:52.389899 | 2023-09-01T03:37:22 | 2023-09-01T03:37:22 | 21,467,719 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,005 |
py
|
class Solution:
def matrixReshape(self, mat: List[List[int]], r: int, c: int) -> List[List[int]]:
m, n = len(mat), len(mat[0]) #Get dimensions.
if m * n != r * c: #If total number of elements do not match, cannot reshape so return mat.
return mat
reshape = [[0 for _ in range(c)] for _ in range(r)] #Initialize the reshape matrix.
row, col = 0, 0 #Initialize the pointer to current row and column of pointer traversing reshape matrix.
for i in range(m): #Traverse mat.
for j in range(n):
reshape[row][col] = mat[i][j] #Set the current element of reshape to current element of mat.
row += (col + 1) // c #Update row.
col = (col + 1) % c #Update col.
return reshape
|
[
"[email protected]"
] | |
e38ae86f9b1b30b7cde74169a92408df5d3a5d51
|
8ce2ef401bfa8a7edc075f30671ceb7e12001566
|
/tensorflow/contrib/distribute/python/estimator_training_test.py
|
018512ae5a22eaa7fb78a8c4e5918fec22eb8178
|
[
"Apache-2.0"
] |
permissive
|
TomZRoid/tensorflow
|
e8167a31dcd707279365c8ee5ec283c00edaafba
|
89390faf68c153ef8bea0e20ba128c0d54cee0e0
|
refs/heads/master
| 2020-03-30T22:38:50.662448 | 2018-11-08T06:25:34 | 2018-11-08T06:25:34 | 151,673,686 | 2 | 0 |
Apache-2.0
| 2018-10-05T05:15:45 | 2018-10-05T05:15:44 | null |
UTF-8
|
Python
| false | false | 23,226 |
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that show Distribute Coordinator works with Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import glob
import json
import os
import sys
import tempfile
import threading
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import collective_all_reduce_strategy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.contrib.optimizer_v2 import adagrad
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import estimator_training as dc_training
from tensorflow.python.distribute.distribute_config import DistributeConfig
from tensorflow.python.eager import context
from tensorflow.python.estimator import exporter as exporter_lib
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.estimator import training as estimator_training
from tensorflow.python.estimator.canned import dnn_linear_combined
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export as export_lib
from tensorflow.python.feature_column import feature_column
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary_iterator
from tensorflow.python.summary.writer import writer_cache
BATCH_SIZE = 10
LABEL_DIMENSION = 2
DATA = np.linspace(
0., 2., BATCH_SIZE * LABEL_DIMENSION, dtype=np.float32).reshape(
BATCH_SIZE, LABEL_DIMENSION)
EVAL_NAME = "foo"
EXPORTER_NAME = "saved_model_exporter"
MAX_STEPS = 10
CHIEF = dc._TaskType.CHIEF
EVALUATOR = dc._TaskType.EVALUATOR
WORKER = dc._TaskType.WORKER
PS = dc._TaskType.PS
original_run_std_server = dc._run_std_server
class MockOsEnv(dict):
def __init__(self, *args):
self._thread_local = threading.local()
super(MockOsEnv, self).__init__(*args)
def get(self, key, default):
if not hasattr(self._thread_local, "dict"):
self._thread_local.dict = dict()
if key == "TF_CONFIG":
return dict.get(self._thread_local.dict, key, default)
else:
return dict.get(self, key, default)
def __getitem__(self, key):
if not hasattr(self._thread_local, "dict"):
self._thread_local.dict = dict()
if key == "TF_CONFIG":
return dict.__getitem__(self._thread_local.dict, key)
else:
return dict.__getitem__(self, key)
def __setitem__(self, key, val):
if not hasattr(self._thread_local, "dict"):
self._thread_local.dict = dict()
if key == "TF_CONFIG":
return dict.__setitem__(self._thread_local.dict, key, val)
else:
return dict.__setitem__(self, key, val)
class DistributeCoordinatorIntegrationTest(test.TestCase,
parameterized.TestCase):
@classmethod
def setUpClass(cls):
"""Create a local cluster with 2 workers."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=2, has_eval=True)
def setUp(self):
self._model_dir = tempfile.mkdtemp()
self._mock_os_env = MockOsEnv()
self._mock_context = test.mock.patch.object(os, "environ",
self._mock_os_env)
super(DistributeCoordinatorIntegrationTest, self).setUp()
self._mock_context.__enter__()
def tearDown(self):
self._mock_context.__exit__(None, None, None)
super(DistributeCoordinatorIntegrationTest, self).tearDown()
def dataset_input_fn(self, x, y, batch_size, shuffle):
def input_fn():
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
if shuffle:
dataset = dataset.shuffle(batch_size)
dataset = dataset.repeat(100).batch(batch_size)
return dataset
return input_fn
def _get_exporter(self, name, fc):
feature_spec = feature_column.make_parse_example_spec(fc)
serving_input_receiver_fn = (
export_lib.build_parsing_serving_input_receiver_fn(feature_spec))
return exporter_lib.LatestExporter(
name, serving_input_receiver_fn=serving_input_receiver_fn)
def _extract_loss_and_global_step(self, event_folder):
"""Returns the loss and global step in last event."""
event_paths = glob.glob(os.path.join(event_folder, "events*"))
self.assertGreater(len(event_paths), 0,
msg="Event file not found in dir %s" % event_folder)
loss = None
global_step_count = None
for e in summary_iterator.summary_iterator(event_paths[-1]):
current_loss = None
for v in e.summary.value:
if v.tag == "loss":
current_loss = v.simple_value
# If loss is not found, global step is meaningless.
if current_loss is None:
continue
current_global_step = e.step
if global_step_count is None or current_global_step > global_step_count:
global_step_count = current_global_step
loss = current_loss
return (loss, global_step_count)
def _get_estimator(self,
train_distribute,
eval_distribute,
remote_cluster=None):
input_dimension = LABEL_DIMENSION
linear_feature_columns = [
feature_column.numeric_column("x", shape=(input_dimension,))
]
dnn_feature_columns = [
feature_column.numeric_column("x", shape=(input_dimension,))
]
return dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
label_dimension=LABEL_DIMENSION,
model_dir=self._model_dir,
dnn_optimizer=adagrad.AdagradOptimizer(0.001),
linear_optimizer=adagrad.AdagradOptimizer(0.001),
config=run_config_lib.RunConfig(
experimental_distribute=DistributeConfig(
train_distribute=train_distribute,
eval_distribute=eval_distribute,
remote_cluster=remote_cluster)))
def _complete_flow(self,
train_distribute,
eval_distribute,
remote_cluster=None,
use_train_and_evaluate=True):
estimator = self._get_estimator(train_distribute, eval_distribute,
remote_cluster)
input_dimension = LABEL_DIMENSION
train_input_fn = self.dataset_input_fn(
x={"x": DATA},
y=DATA,
batch_size=BATCH_SIZE // len(train_distribute.worker_devices),
shuffle=True)
if eval_distribute:
eval_batch_size = BATCH_SIZE // len(eval_distribute.worker_devices)
else:
eval_batch_size = BATCH_SIZE
eval_input_fn = self.dataset_input_fn(
x={"x": DATA}, y=DATA, batch_size=eval_batch_size, shuffle=False)
linear_feature_columns = [
feature_column.numeric_column("x", shape=(input_dimension,))
]
dnn_feature_columns = [
feature_column.numeric_column("x", shape=(input_dimension,))
]
feature_columns = linear_feature_columns + dnn_feature_columns
eval_spec = estimator_training.EvalSpec(
name=EVAL_NAME,
input_fn=eval_input_fn,
steps=None,
exporters=self._get_exporter(EXPORTER_NAME, feature_columns),
start_delay_secs=0,
throttle_secs=1)
if use_train_and_evaluate:
estimator_training.train_and_evaluate(
estimator,
estimator_training.TrainSpec(train_input_fn, max_steps=MAX_STEPS),
eval_spec)
else:
estimator.train(train_input_fn, max_steps=MAX_STEPS)
latest_ckpt_path = estimator.latest_checkpoint()
metrics = estimator.evaluate(eval_input_fn,
checkpoint_path=latest_ckpt_path,
name=EVAL_NAME)
# Export the eval result to files.
eval_result = estimator_training._EvalResult(
status=estimator_training._EvalStatus.EVALUATED,
metrics=metrics,
checkpoint_path=latest_ckpt_path)
evaluator = estimator_training._TrainingExecutor._Evaluator(estimator,
eval_spec,
None)
evaluator._export_eval_result(eval_result, True)
return estimator
def _inspect_train_and_eval_events(self, estimator):
# Make sure nothing is stuck in limbo.
writer_cache.FileWriterCache.clear()
# Examine the training events. Use a range to check global step to avoid
# flakyness due to global step race condition.
training_loss, _ = self._extract_loss_and_global_step(self._model_dir)
self.assertIsNotNone(training_loss)
# Examine the eval events. The global step should be accurate.
eval_dir = os.path.join(self._model_dir, "eval_" + EVAL_NAME)
eval_loss, eval_global_step = self._extract_loss_and_global_step(
event_folder=eval_dir)
self.assertIsNotNone(eval_loss)
self.assertGreaterEqual(eval_global_step, MAX_STEPS)
# Examine the export folder.
export_dir = os.path.join(
os.path.join(self._model_dir, "export"), EXPORTER_NAME)
self.assertTrue(gfile.Exists(export_dir))
# Examine the ckpt for predict.
def predict_input_fn():
return dataset_ops.Dataset.from_tensor_slices({
"x": DATA
}).batch(BATCH_SIZE)
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in estimator.predict(predict_input_fn)
])
self.assertAllEqual((BATCH_SIZE, LABEL_DIMENSION), predicted_proba.shape)
@combinations.generate(
combinations.combine(
mode=["graph"],
train_distribute_cls=[
collective_all_reduce_strategy.CollectiveAllReduceStrategy,
mirrored_strategy.MirroredStrategy,
parameter_server_strategy.ParameterServerStrategy
],
eval_distribute_cls=[
None, mirrored_strategy.MirroredStrategy,
parameter_server_strategy.ParameterServerStrategy,
],
required_gpus=[0, 1]))
def test_complete_flow_standalone_client(self, train_distribute_cls,
eval_distribute_cls):
try:
train_distribute = train_distribute_cls(num_gpus=context.num_gpus())
except TypeError:
train_distribute = train_distribute_cls(num_gpus_per_worker=2)
if eval_distribute_cls:
eval_distribute = eval_distribute_cls(
num_gpus_per_worker=context.num_gpus())
else:
eval_distribute = None
cluster_spec = copy.deepcopy(self._cluster_spec)
if (train_distribute_cls !=
parameter_server_strategy.ParameterServerStrategy):
cluster_spec.pop("ps", None)
estimator = self._complete_flow(train_distribute, eval_distribute,
cluster_spec)
self._inspect_train_and_eval_events(estimator)
@combinations.generate(
combinations.combine(
mode=["graph"],
train_distribute_cls=[
mirrored_strategy.MirroredStrategy,
],
eval_distribute_cls=[
None,
mirrored_strategy.MirroredStrategy,
],
required_gpus=[0, 1]))
def test_estimator_standalone_client(self, train_distribute_cls,
eval_distribute_cls):
train_distribute = train_distribute_cls(
num_gpus_per_worker=context.num_gpus())
if eval_distribute_cls:
eval_distribute = eval_distribute_cls(
num_gpus_per_worker=context.num_gpus())
else:
eval_distribute = None
# We use the whole cluster for evaluation.
cluster = copy.deepcopy(self._cluster_spec)
cluster.pop("evaluator", None)
estimator = self._complete_flow(
train_distribute, eval_distribute, remote_cluster=cluster,
use_train_and_evaluate=False)
self._inspect_train_and_eval_events(estimator)
def _mock_run_std_server(self, *args, **kwargs):
ret = original_run_std_server(*args, **kwargs)
# Wait for all std servers to be brought up in order to reduce the chance of
# remote sessions taking local ports that have been assigned to std servers.
self._barrier.wait()
return ret
def _task_thread(self, train_distribute, eval_distribute, tf_config):
os.environ["TF_CONFIG"] = json.dumps(tf_config)
with test.mock.patch.object(dc, "_run_std_server",
self._mock_run_std_server):
self._complete_flow(train_distribute, eval_distribute)
def _run_task_in_thread(self, cluster_spec, task_type, task_id,
train_distribute, eval_distribute):
if task_type:
tf_config = {
"cluster": cluster_spec,
"task": {
"type": task_type,
"index": task_id
}
}
else:
tf_config = {
"cluster": cluster_spec,
"task": {
"type": task_type,
"index": task_id
}
}
t = threading.Thread(
target=self._task_thread,
args=(train_distribute, eval_distribute, tf_config))
t.start()
return t
def _run_multiple_tasks_in_threads(self, cluster_spec, train_distribute,
eval_distribute):
threads = {}
for task_type in cluster_spec.keys():
threads[task_type] = []
for task_id in range(len(cluster_spec[task_type])):
t = self._run_task_in_thread(cluster_spec, task_type, task_id,
train_distribute, eval_distribute)
threads[task_type].append(t)
return threads
@combinations.generate(
combinations.combine(
mode=["graph"],
train_distribute_cls=[
collective_all_reduce_strategy.CollectiveAllReduceStrategy,
parameter_server_strategy.ParameterServerStrategy,
],
eval_distribute_cls=[
None, mirrored_strategy.MirroredStrategy,
parameter_server_strategy.ParameterServerStrategy,
],
required_gpus=[0, 1]))
def test_complete_flow_indepedent_worker_between_graph(
self, train_distribute_cls, eval_distribute_cls):
train_distribute = train_distribute_cls(
num_gpus_per_worker=context.num_gpus())
if (context.num_gpus() < 2 and eval_distribute_cls ==
collective_all_reduce_strategy.CollectiveAllReduceStrategy):
self.skipTest("`CollectiveAllReduceStrategy` needs at least two towers.")
if eval_distribute_cls:
eval_distribute = eval_distribute_cls(
num_gpus_per_worker=context.num_gpus())
else:
eval_distribute = None
if (train_distribute_cls == parameter_server_strategy
.ParameterServerStrategy):
cluster_spec = multi_worker_test_base.create_cluster_spec(
num_workers=3, num_ps=2, has_eval=True)
# 3 workers, 2 ps and 1 evaluator.
self._barrier = dc._Barrier(6)
else:
cluster_spec = multi_worker_test_base.create_cluster_spec(
num_workers=3, num_ps=0, has_eval=True)
# 3 workers and 1 evaluator.
self._barrier = dc._Barrier(4)
threads = self._run_multiple_tasks_in_threads(
cluster_spec, train_distribute, eval_distribute)
for task_type, ts in threads.items():
if task_type == PS:
continue
for t in ts:
t.join()
estimator = self._get_estimator(train_distribute, eval_distribute)
self._inspect_train_and_eval_events(estimator)
@combinations.generate(
combinations.combine(
mode=["graph"],
train_distribute_cls=[mirrored_strategy.MirroredStrategy],
eval_distribute_cls=[None, mirrored_strategy.MirroredStrategy],
required_gpus=[0, 1]))
def test_complete_flow_indepedent_worker_in_graph(self, train_distribute_cls,
eval_distribute_cls):
train_distribute = train_distribute_cls(
num_gpus_per_worker=context.num_gpus())
if eval_distribute_cls:
eval_distribute = eval_distribute_cls(
num_gpus_per_worker=context.num_gpus())
else:
eval_distribute = None
cluster_spec = multi_worker_test_base.create_cluster_spec(
num_workers=3, num_ps=0, has_eval=True)
# 3 workers and 1 evaluator.
self._barrier = dc._Barrier(4)
threads = self._run_multiple_tasks_in_threads(
cluster_spec, train_distribute, eval_distribute)
threads[WORKER][0].join()
threads[EVALUATOR][0].join()
estimator = self._get_estimator(train_distribute, eval_distribute)
self._inspect_train_and_eval_events(estimator)
TF_CONFIG_WITH_CHIEF = {
"cluster": {
"chief": ["fake_chief"],
},
"task": {
"type": "chief",
"index": 0
}
}
TF_CONFIG_WITH_MASTER = {
"cluster": {
"master": ["fake_master"],
},
"task": {
"type": "master",
"index": 0
}
}
TF_CONFIG_WITHOUT_TASK = {"cluster": {"chief": ["fake_worker"]}}
class RunConfigTest(test.TestCase):
def test_previously_unexpected_cluster_spec(self):
with test.mock.patch.dict(
"os.environ", {"TF_CONFIG": json.dumps(TF_CONFIG_WITHOUT_TASK)}):
run_config_lib.RunConfig(
experimental_distribute=DistributeConfig(
train_distribute=mirrored_strategy.MirroredStrategy(num_gpus=2)))
def test_should_run_distribute_coordinator(self):
"""Tests that should_run_distribute_coordinator return a correct value."""
# We don't use distribute coordinator for local training.
self.assertFalse(
dc_training.should_run_distribute_coordinator(
run_config_lib.RunConfig()))
# When `train_distribute` is not specified, don't use distribute
# coordinator.
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(TF_CONFIG_WITH_CHIEF)}):
self.assertFalse(
dc_training.should_run_distribute_coordinator(
run_config_lib.RunConfig()))
# When `train_distribute` is specified and TF_CONFIG is detected, use
# distribute coordinator.
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(TF_CONFIG_WITH_CHIEF)}):
config_with_train_distribute = run_config_lib.RunConfig(
experimental_distribute=DistributeConfig(
train_distribute=mirrored_strategy.MirroredStrategy(num_gpus=2)))
config_with_eval_distribute = run_config_lib.RunConfig(
experimental_distribute=DistributeConfig(
eval_distribute=mirrored_strategy.MirroredStrategy(num_gpus=2)))
self.assertTrue(
dc_training.should_run_distribute_coordinator(
config_with_train_distribute))
self.assertFalse(
dc_training.should_run_distribute_coordinator(
config_with_eval_distribute))
# With a master in the cluster, don't run distribute coordinator.
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(TF_CONFIG_WITH_MASTER)}):
config = run_config_lib.RunConfig(
experimental_distribute=DistributeConfig(
train_distribute=mirrored_strategy.MirroredStrategy(num_gpus=2)))
self.assertFalse(dc_training.should_run_distribute_coordinator(config))
def test_init_run_config_duplicate_distribute(self):
with self.assertRaises(ValueError):
run_config_lib.RunConfig(
train_distribute=mirrored_strategy.MirroredStrategy(),
experimental_distribute=DistributeConfig(
train_distribute=mirrored_strategy.MirroredStrategy()))
with self.assertRaises(ValueError):
run_config_lib.RunConfig(
eval_distribute=mirrored_strategy.MirroredStrategy(),
experimental_distribute=DistributeConfig(
eval_distribute=mirrored_strategy.MirroredStrategy()))
def test_init_run_config_none_distribute_coordinator_mode(self):
# We don't use distribute coordinator for local training.
config = run_config_lib.RunConfig(
train_distribute=mirrored_strategy.MirroredStrategy())
dc_training.init_run_config(config, {})
self.assertIsNone(config._distribute_coordinator_mode)
# With a master in the cluster, don't run distribute coordinator.
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(TF_CONFIG_WITH_MASTER)}):
config = run_config_lib.RunConfig(
train_distribute=mirrored_strategy.MirroredStrategy())
self.assertIsNone(config._distribute_coordinator_mode)
# When `train_distribute` is not specified, don't use distribute
# coordinator.
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(TF_CONFIG_WITH_CHIEF)}):
config = run_config_lib.RunConfig()
self.assertFalse(hasattr(config, "_distribute_coordinator_mode"))
def test_init_run_config_independent_worker(self):
# When `train_distribute` is specified and TF_CONFIG is detected, use
# distribute coordinator with INDEPENDENT_WORKER mode.
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(TF_CONFIG_WITH_CHIEF)}):
config = run_config_lib.RunConfig(
train_distribute=mirrored_strategy.MirroredStrategy())
self.assertEqual(config._distribute_coordinator_mode,
dc.CoordinatorMode.INDEPENDENT_WORKER)
def test_init_run_config_standalone_client(self):
# When `train_distribute` is specified, TF_CONFIG is detected and
# `experimental.remote_cluster` is set use distribute coordinator with
# STANDALONE_CLIENT mode.
config = run_config_lib.RunConfig(
train_distribute=mirrored_strategy.MirroredStrategy(),
experimental_distribute=DistributeConfig(
remote_cluster={"chief": ["fake_worker"]}))
self.assertEqual(config._distribute_coordinator_mode,
dc.CoordinatorMode.STANDALONE_CLIENT)
if __name__ == "__main__":
with test.mock.patch.object(sys, "exit", os._exit):
test.main()
|
[
"[email protected]"
] | |
778b02ce1304dfc64f45b5c82dc3ad7d820143a9
|
bad62c2b0dfad33197db55b44efeec0bab405634
|
/sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/activestamp/aio/operations/_job_operation_results_operations.py
|
669fb507126bbbd85b124e09a62c97d867745408
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
test-repo-billy/azure-sdk-for-python
|
20c5a2486456e02456de17515704cb064ff19833
|
cece86a8548cb5f575e5419864d631673be0a244
|
refs/heads/master
| 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 |
MIT
| 2019-07-25T22:28:52 | 2019-04-19T20:59:15 |
Python
|
UTF-8
|
Python
| false | false | 4,956 |
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._job_operation_results_operations import build_get_request
from .._vendor import MixinABC
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class JobOperationResultsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.recoveryservicesbackup.activestamp.aio.RecoveryServicesBackupClient`'s
:attr:`job_operation_results` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get( # pylint: disable=inconsistent-return-statements
self,
vault_name: str,
resource_group_name: str,
job_name: str,
operation_id: str,
**kwargs: Any
) -> None:
"""Fetches the result of any operation.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param job_name: Job name whose operation result has to be fetched.
:type job_name: str
:param operation_id: OperationID which represents the operation whose result has to be fetched.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-06-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_get_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
job_name=job_name,
operation_id=operation_id,
api_version=api_version,
template_url=self.get.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupJobs/{jobName}/operationResults/{operationId}"} # type: ignore
|
[
"[email protected]"
] | |
aa9712bce0f3ef3690bb9a42fab492d7fae1aaf2
|
5332fef91e044555e605bb37cbef7c4afeaaadb0
|
/hy-data-analysis-with-python-spring-2019-OLD/part01-e04_multiplication_table/src/multiplication_table.py
|
aa3628c78721753935ebdfc0a18b3e1524ce97a2
|
[] |
no_license
|
nopomi/hy-data-analysis-python-2019
|
f3baa96bbe9b6ee7f0b3e6f6b8b0f3adfc3b6cc8
|
464685cb377cfdeee890a008fbfbd9ed6e3bcfd0
|
refs/heads/master
| 2021-07-10T16:16:56.592448 | 2020-08-16T18:27:38 | 2020-08-16T18:27:38 | 185,044,621 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 191 |
py
|
#!/usr/bin/env python3
def main():
for i in range(1,11):
for j in range(1,11):
print(str(i*j)+'\t', end="")
print('')
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
24a21563e8b0f54b4f865519a0221f033b7f345c
|
2ab759b4796e36cee89df3718b7042eb527e4b26
|
/args.py
|
2ad011c5c0ff5f89e7e555c02e1b52b469c6fbe4
|
[
"Apache-2.0"
] |
permissive
|
zhangjunjieGit/bert-utils
|
b78d289effedfa64716219b771a370c410f3d066
|
7142632ea6b2e6656a2873a60971dbf7330f9550
|
refs/heads/master
| 2020-08-22T06:47:33.103393 | 2019-10-20T09:58:54 | 2019-10-20T09:59:05 | 216,340,794 | 1 | 0 |
Apache-2.0
| 2019-10-20T10:00:53 | 2019-10-20T10:00:53 | null |
UTF-8
|
Python
| false | false | 748 |
py
|
import os
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
file_path = os.path.dirname(__file__)
model_dir = os.path.join(file_path, 'chinese_L-12_H-768_A-12/')
config_name = os.path.join(model_dir, 'bert_config.json')
ckpt_name = os.path.join(model_dir, 'bert_model.ckpt')
output_dir = os.path.join(model_dir, '../tmp/result/')
vocab_file = os.path.join(model_dir, 'vocab.txt')
data_dir = os.path.join(model_dir, '../data/')
num_train_epochs = 10
batch_size = 128
learning_rate = 0.00005
# gpu使用率
gpu_memory_fraction = 0.8
# 默认取倒数第二层的输出值作为句向量
layer_indexes = [-2]
# 序列的最大程度,单文本建议把该值调小
max_seq_len = 5
# graph名字
graph_file = 'tmp/result/graph'
|
[
"[email protected]"
] | |
399eeab0b7aac90ad1fbae0cecd4ca49be31ec81
|
7437b9482592119dd6b4fc78706fed1c7c7df89f
|
/modules/neighbour_covariance_op.py
|
520f8756b84359cebb69699864b5c50b00d2df88
|
[
"BSD-3-Clause"
] |
permissive
|
cms-pepr/HGCalML
|
163c5f2b0ca1079003628b5a53a4aee2305fb3e9
|
d28477501d93992d25c22b7d5c355a3da3bffa5c
|
refs/heads/master
| 2023-08-31T03:33:31.294369 | 2023-05-10T15:51:59 | 2023-05-10T15:51:59 | 236,987,707 | 11 | 13 |
BSD-3-Clause
| 2023-08-08T12:43:50 | 2020-01-29T13:25:18 |
Python
|
UTF-8
|
Python
| false | false | 1,973 |
py
|
import tensorflow as tf
from tensorflow.python.framework import ops
from accknn_op import AccumulateKnn
def NeighbourCovariance(coordinates, distsq, features, n_idxs):
'''
expands to V x F x C**2, but not in the neighbour dimension
Feed features without activation!
'''
features = tf.nn.sigmoid(features) + 1e-3 #make sure they're in a good range
nF = features.shape[1]
nC = coordinates.shape[1]
nKf = tf.cast(distsq.shape[1],dtype='float32')
#calc mean of features over all neighbours (1/K factor too much)
sum_F = AccumulateKnn(distsq, features, n_idxs, mean_and_max=False)[0] * nKf
#not gonna work like this
#build feature-weighted coordinates: V x 1 x C * V x F x 1
FC = tf.expand_dims(coordinates,axis=1) * tf.expand_dims(features,axis=2)
#reshape to V x F*C
FC = tf.reshape(FC, [-1, nF*nC])
#sum over neighbours (factor 1/K too much)
sum_FC = AccumulateKnn(distsq, FC, n_idxs, mean_and_max=False)[0] * nKf
#reshape back to V x F x C
mean_C = tf.reshape(sum_FC, [-1, nF, nC])
mean_C = tf.math.divide_no_nan(mean_C, tf.expand_dims(sum_F, axis=2)+1e-3)
#now we have centred coordinates: V x F x C
centered_C = tf.expand_dims(coordinates,axis=1) - mean_C
#build covariance input: V x F x C x 1 * V x F x 1 x C
cov = tf.expand_dims(centered_C, axis=3) * tf.expand_dims(centered_C, axis=2)
# reshape to something useful
cov = tf.reshape(cov, [-1, nF,nC**2])
cov *= tf.expand_dims(features, axis=2) #add feature weights
cov = tf.reshape(cov, [-1, nF*nC**2])
#sum over neighbours
cov = AccumulateKnn(distsq, cov, n_idxs, mean_and_max=False)[0] * nKf
#reshape back
cov = tf.reshape(cov, [-1, nF, nC**2])
cov = tf.math.divide_no_nan(cov, tf.expand_dims(sum_F, axis=2)+1e-3)
cov = tf.reshape(cov, [-1, nF, nC**2])#just for keras
return cov, mean_C
|
[
"[email protected]"
] | |
d7552e3b106bb7b596621fa2810ed985b0175dd9
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03013/s787334180.py
|
28e033a57e8473172944172d8c1031ab7c555e8e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 324 |
py
|
N, M = map(int, input().split())
if N == 1:
print(1)
else:
s = [-1] * N
s[0] = 1
s[1] = 1
for m in range(0, M):
i = int(input())
s[i] = 0
for n in range(2, N):
if s[n] == 0:
continue
s[n] = s[n - 1] + s[n - 2]
print((s[N-1] + s[N-2]) % 1000000007)
|
[
"[email protected]"
] | |
ab08ca6833912ff2321dd2234faded534afab1ab
|
c13ccf912360f02010f3185dc29f3e72205984dd
|
/Hauz/migrations/0015_auto_20180119_0824.py
|
a94915c8159be33de7285c6226946eca01f3f871
|
[
"MIT"
] |
permissive
|
VirginiaNdungu1/HauzBox
|
e3dd238b8ef302e69dd4cefa2036a50500b6f3bc
|
c586d221a903f2be681b895eec20dd01664ce141
|
refs/heads/master
| 2021-05-15T05:41:37.189912 | 2018-01-31T14:45:43 | 2018-01-31T14:46:18 | 116,761,502 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 519 |
py
|
# Generated by Django 2.0.1 on 2018-01-19 05:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Hauz', '0014_auto_20180119_0821'),
]
operations = [
migrations.AlterField(
model_name='house',
name='tenant_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='house_tenant', to='Hauz.Tenant'),
),
]
|
[
"[email protected]"
] | |
79dcbfe3d5544e48a61ca3c29afc26988d642d42
|
64bf39b96a014b5d3f69b3311430185c64a7ff0e
|
/intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/chocolatey/chocolatey/plugins/modules/win_chocolatey_source.py
|
cae7637ea3d821e51ef6c10dfcf1d8769d184a97
|
[
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only",
"MIT"
] |
permissive
|
SimonFangCisco/dne-dna-code
|
7072eba7da0389e37507b7a2aa5f7d0c0735a220
|
2ea7d4f00212f502bc684ac257371ada73da1ca9
|
refs/heads/master
| 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 |
MIT
| 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null |
UTF-8
|
Python
| false | false | 3,734 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2020, Chocolatey Software
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_chocolatey_source
version_added: '2.7'
short_description: Manages Chocolatey sources
description:
- Used to managed Chocolatey sources configured on the client.
- Requires Chocolatey to be already installed on the remote host.
options:
admin_only:
description:
- Makes the source visible to Administrators only.
- Requires Chocolatey >= 0.10.8.
- When creating a new source, this defaults to C(no).
type: bool
allow_self_service:
description:
- Allow the source to be used with self-service
- Requires Chocolatey >= 0.10.4.
- When creating a new source, this defaults to C(no).
type: bool
bypass_proxy:
description:
- Bypass the proxy when using this source.
- Requires Chocolatey >= 0.10.4.
- When creating a new source, this defaults to C(no).
type: bool
certificate:
description:
- The path to a .pfx file to use for X509 authenticated feeds.
- Requires Chocolatey >= 0.9.10.
type: str
certificate_password:
description:
- The password for I(certificate) if required.
- Requires Chocolatey >= 0.9.10.
name:
description:
- The name of the source to configure.
required: yes
priority:
description:
- The priority order of this source compared to other sources, lower is
better.
- All priorities above C(0) will be evaluated first, then zero-based values
will be evaluated in config file order.
- Requires Chocolatey >= 0.9.9.9.
- When creating a new source, this defaults to C(0).
type: int
source:
description:
- The file/folder/url of the source.
- Required when I(state) is C(present) or C(disabled) and the source does
not already exist.
source_username:
description:
- The username used to access I(source).
source_password:
description:
- The password for I(source_username).
- Required if I(source_username) is set.
state:
description:
- When C(absent), will remove the source.
- When C(disabled), will ensure the source exists but is disabled.
- When C(present), will ensure the source exists and is enabled.
choices:
- absent
- disabled
- present
default: present
update_password:
description:
- When C(always), the module will always set the password and report a
change if I(certificate_password) or I(source_password) is set.
- When C(on_create), the module will only set the password if the source
is being created.
choices:
- always
- on_create
default: always
seealso:
- module: win_chocolatey
- module: win_chocolatey_config
- module: win_chocolatey_facts
- module: win_chocolatey_feature
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: Remove the default public source
win_chocolatey_source:
name: chocolatey
state: absent
- name: Add new internal source
win_chocolatey_source:
name: internal repo
state: present
source: http://chocolatey-server/chocolatey
- name: Create HTTP source with credentials
win_chocolatey_source:
name: internal repo
state: present
source: https://chocolatey-server/chocolatey
source_username: username
source_password: password
- name: Disable Chocolatey source
win_chocolatey_source:
name: chocolatey
state: disabled
'''
RETURN = r'''
'''
|
[
"[email protected]"
] | |
a9daa25aa586b9c519f86b630fdd29e6a115d159
|
c81ea73e93df307d35191ab184a85d6c67c57112
|
/dockers/rotnet/prepare_nets.py
|
63510dd367617d93fe4ec8309755cc686e82c50c
|
[] |
no_license
|
BlenderCN-Org/diplomka
|
8d0503fc5902dfede8317aed84f5a17f691f687f
|
575fe3f2436b9c511496c1dc019d9cc3423ba5f0
|
refs/heads/master
| 2020-05-22T15:42:00.143738 | 2019-05-07T07:37:46 | 2019-05-07T07:37:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 567 |
py
|
import os
def replace(file, what, for_what):
# Read in the file
with open(file, 'r') as f:
filedata = f.read()
# Replace the target string
filedata = filedata.replace(what, for_what)
#print(filedata)
# Write the file out again
with open(file, 'w') as f:
f.write(filedata)
def set_num_cats(file, num_cats, views):
replace(file, '$NUMCATS', str(num_cats+1))
replace(file, "$INNER", str((num_cats+1) * views))
def set_batch_size(file, batch_size):
replace(file, "$BATCHSIZE", str(batch_size))
|
[
"[email protected]"
] | |
cc9e9573db428adfe20c7a930d036f9c4a3eb3ba
|
29eacf3b29753d65d8ec0ab4a60ea1f7ddecbd68
|
/examples/pytorch_lightning_distributed/dcl.py
|
ecf6e7bcd014282101b6c04ec204b23362d2d964
|
[
"MIT"
] |
permissive
|
lightly-ai/lightly
|
5b655fe283b7cc2ddf1d7f5bd098603fc1cce627
|
5650ee8d4057139acf8aa10c884d5d5cdc2ccb17
|
refs/heads/master
| 2023-08-17T11:08:00.135920 | 2023-08-16T12:43:02 | 2023-08-16T12:43:02 | 303,705,119 | 2,473 | 229 |
MIT
| 2023-09-14T14:47:16 | 2020-10-13T13:02:56 |
Python
|
UTF-8
|
Python
| false | false | 2,288 |
py
|
# Note: The model and training settings do not follow the reference settings
# from the paper. The settings are chosen such that the example can easily be
# run on a small dataset with a single GPU.
import pytorch_lightning as pl
import torch
import torchvision
from torch import nn
from lightly.loss import DCLLoss
from lightly.models.modules import SimCLRProjectionHead
from lightly.transforms.simclr_transform import SimCLRTransform
class DCL(pl.LightningModule):
def __init__(self):
super().__init__()
resnet = torchvision.models.resnet18()
self.backbone = nn.Sequential(*list(resnet.children())[:-1])
self.projection_head = SimCLRProjectionHead(512, 2048, 2048)
# enable gather_distributed to gather features from all gpus
# before calculating the loss
self.criterion = DCLLoss(gather_distributed=True)
# or use the weighted DCLW loss:
# self.criterion = DCLWLoss(gather_distributed=True)
def forward(self, x):
x = self.backbone(x).flatten(start_dim=1)
z = self.projection_head(x)
return z
def training_step(self, batch, batch_index):
(x0, x1) = batch[0]
z0 = self.forward(x0)
z1 = self.forward(x1)
loss = self.criterion(z0, z1)
return loss
def configure_optimizers(self):
optim = torch.optim.SGD(self.parameters(), lr=0.06)
return optim
model = DCL()
transform = SimCLRTransform(input_size=32)
dataset = torchvision.datasets.CIFAR10(
"datasets/cifar10", download=True, transform=transform
)
# or create a dataset from a folder containing images or videos:
# dataset = LightlyDataset("path/to/folder", transform=transform)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=256,
shuffle=True,
drop_last=True,
num_workers=8,
)
# Train with DDP and use Synchronized Batch Norm for a more accurate batch norm
# calculation. Distributed sampling is also enabled with replace_sampler_ddp=True.
trainer = pl.Trainer(
max_epochs=10,
devices="auto",
accelerator="gpu",
strategy="ddp",
sync_batchnorm=True,
use_distributed_sampler=True, # or replace_sampler_ddp=True for PyTorch Lightning <2.0
)
trainer.fit(model=model, train_dataloaders=dataloader)
|
[
"[email protected]"
] | |
63e8a987a551c5b04f8b8a223840ca4ec9d6208c
|
1fc7fc8cc0ad49133ba9a4dae910fd7d6e9b242c
|
/pyqtgraph/graphicsItems/HistogramLUTItem.py
|
89c45568d5230a161ace8f4ef141337ea99ff687
|
[
"MIT"
] |
permissive
|
Yingzhang1122/DiffractionLimitedAnalysis
|
2a67ac2ac87e9fdaf9262a565cc717899e439561
|
6ea260b738a624962a329dcb7ae19ee048515edf
|
refs/heads/main
| 2023-06-03T16:12:15.684375 | 2021-05-26T18:47:40 | 2021-05-26T18:47:40 | 368,825,659 | 0 | 0 |
MIT
| 2021-05-19T10:11:17 | 2021-05-19T10:11:17 | null |
UTF-8
|
Python
| false | false | 13,765 |
py
|
# -*- coding: utf-8 -*-
"""
GraphicsWidget displaying an image histogram along with gradient editor. Can be used to adjust the appearance of images.
"""
from ..Qt import QtGui, QtCore
from .. import functions as fn
from .GraphicsWidget import GraphicsWidget
from .ViewBox import *
from .GradientEditorItem import *
from .LinearRegionItem import *
from .PlotDataItem import *
from .AxisItem import *
from .GridItem import *
from ..Point import Point
from .. import functions as fn
import numpy as np
from .. import debug as debug
import weakref
__all__ = ['HistogramLUTItem']
class HistogramLUTItem(GraphicsWidget):
"""
This is a graphicsWidget which provides controls for adjusting the display of an image.
Includes:
- Image histogram
- Movable region over histogram to select black/white levels
- Gradient editor to define color lookup table for single-channel images
================ ===========================================================
image (:class:`~pyqtgraph.ImageItem` or ``None``) If *image* is
provided, then the control will be automatically linked to
the image and changes to the control will be immediately
reflected in the image's appearance.
fillHistogram (bool) By default, the histogram is rendered with a fill.
For performance, set ``fillHistogram=False``
rgbHistogram (bool) Sets whether the histogram is computed once over all
channels of the image, or once per channel.
levelMode 'mono' or 'rgba'. If 'mono', then only a single set of
black/white level lines is drawn, and the levels apply to
all channels in the image. If 'rgba', then one set of
levels is drawn for each channel.
================ ===========================================================
"""
sigLookupTableChanged = QtCore.Signal(object)
sigLevelsChanged = QtCore.Signal(object)
sigLevelChangeFinished = QtCore.Signal(object)
def __init__(self, image=None, fillHistogram=True, rgbHistogram=False, levelMode='mono'):
GraphicsWidget.__init__(self)
self.lut = None
self.imageItem = lambda: None # fake a dead weakref
self.levelMode = levelMode
self.rgbHistogram = rgbHistogram
self.layout = QtGui.QGraphicsGridLayout()
self.setLayout(self.layout)
self.layout.setContentsMargins(1,1,1,1)
self.layout.setSpacing(0)
self.vb = ViewBox(parent=self)
self.vb.setMaximumWidth(152)
self.vb.setMinimumWidth(45)
self.vb.setMouseEnabled(x=False, y=True)
self.gradient = GradientEditorItem()
self.gradient.setOrientation('right')
self.gradient.loadPreset('grey')
self.regions = [
LinearRegionItem([0, 1], 'horizontal', swapMode='block'),
LinearRegionItem([0, 1], 'horizontal', swapMode='block', pen='r',
brush=fn.mkBrush((255, 50, 50, 50)), span=(0., 1/3.)),
LinearRegionItem([0, 1], 'horizontal', swapMode='block', pen='g',
brush=fn.mkBrush((50, 255, 50, 50)), span=(1/3., 2/3.)),
LinearRegionItem([0, 1], 'horizontal', swapMode='block', pen='b',
brush=fn.mkBrush((50, 50, 255, 80)), span=(2/3., 1.)),
LinearRegionItem([0, 1], 'horizontal', swapMode='block', pen='w',
brush=fn.mkBrush((255, 255, 255, 50)), span=(2/3., 1.))]
for region in self.regions:
region.setZValue(1000)
self.vb.addItem(region)
region.lines[0].addMarker('<|', 0.5)
region.lines[1].addMarker('|>', 0.5)
region.sigRegionChanged.connect(self.regionChanging)
region.sigRegionChangeFinished.connect(self.regionChanged)
self.region = self.regions[0] # for backward compatibility.
self.axis = AxisItem('left', linkView=self.vb, maxTickLength=-10, parent=self)
self.layout.addItem(self.axis, 0, 0)
self.layout.addItem(self.vb, 0, 1)
self.layout.addItem(self.gradient, 0, 2)
self.range = None
self.gradient.setFlag(self.gradient.ItemStacksBehindParent)
self.vb.setFlag(self.gradient.ItemStacksBehindParent)
self.gradient.sigGradientChanged.connect(self.gradientChanged)
self.vb.sigRangeChanged.connect(self.viewRangeChanged)
add = QtGui.QPainter.CompositionMode_Plus
self.plots = [
PlotCurveItem(pen=(200, 200, 200, 100)), # mono
PlotCurveItem(pen=(255, 0, 0, 100), compositionMode=add), # r
PlotCurveItem(pen=(0, 255, 0, 100), compositionMode=add), # g
PlotCurveItem(pen=(0, 0, 255, 100), compositionMode=add), # b
PlotCurveItem(pen=(200, 200, 200, 100), compositionMode=add), # a
]
self.plot = self.plots[0] # for backward compatibility.
for plot in self.plots:
plot.setRotation(90)
self.vb.addItem(plot)
self.fillHistogram(fillHistogram)
self._showRegions()
self.vb.addItem(self.plot)
self.autoHistogramRange()
if image is not None:
self.setImageItem(image)
def fillHistogram(self, fill=True, level=0.0, color=(100, 100, 200)):
colors = [color, (255, 0, 0, 50), (0, 255, 0, 50), (0, 0, 255, 50), (255, 255, 255, 50)]
for i,plot in enumerate(self.plots):
if fill:
plot.setFillLevel(level)
plot.setBrush(colors[i])
else:
plot.setFillLevel(None)
def paint(self, p, *args):
if self.levelMode != 'mono':
return
pen = self.region.lines[0].pen
rgn = self.getLevels()
p1 = self.vb.mapFromViewToItem(self, Point(self.vb.viewRect().center().x(), rgn[0]))
p2 = self.vb.mapFromViewToItem(self, Point(self.vb.viewRect().center().x(), rgn[1]))
gradRect = self.gradient.mapRectToParent(self.gradient.gradRect.rect())
p.setRenderHint(QtGui.QPainter.Antialiasing)
for pen in [fn.mkPen((0, 0, 0, 100), width=3), pen]:
p.setPen(pen)
p.drawLine(p1 + Point(0, 5), gradRect.bottomLeft())
p.drawLine(p2 - Point(0, 5), gradRect.topLeft())
p.drawLine(gradRect.topLeft(), gradRect.topRight())
p.drawLine(gradRect.bottomLeft(), gradRect.bottomRight())
def setHistogramRange(self, mn, mx, padding=0.1):
"""Set the Y range on the histogram plot. This disables auto-scaling."""
self.vb.enableAutoRange(self.vb.YAxis, False)
self.vb.setYRange(mn, mx, padding)
def autoHistogramRange(self):
"""Enable auto-scaling on the histogram plot."""
self.vb.enableAutoRange(self.vb.XYAxes)
def setImageItem(self, img):
"""Set an ImageItem to have its levels and LUT automatically controlled
by this HistogramLUTItem.
"""
self.imageItem = weakref.ref(img)
img.sigImageChanged.connect(self.imageChanged)
self._setImageLookupTable()
self.regionChanged()
self.imageChanged(autoLevel=True)
def viewRangeChanged(self):
self.update()
def gradientChanged(self):
if self.imageItem() is not None:
self._setImageLookupTable()
self.lut = None
self.sigLookupTableChanged.emit(self)
def _setImageLookupTable(self):
if self.gradient.isLookupTrivial():
self.imageItem().setLookupTable(None) #lambda x: x.astype(np.uint8))
else:
self.imageItem().setLookupTable(self.getLookupTable) ## send function pointer, not the result
def getLookupTable(self, img=None, n=None, alpha=None):
"""Return a lookup table from the color gradient defined by this
HistogramLUTItem.
"""
if self.levelMode != 'mono':
return None
if n is None:
if img.dtype == np.uint8:
n = 256
else:
n = 512
if self.lut is None:
self.lut = self.gradient.getLookupTable(n, alpha=alpha)
return self.lut
def regionChanged(self):
if self.imageItem() is not None:
self.imageItem().setLevels(self.getLevels())
self.sigLevelChangeFinished.emit(self)
def regionChanging(self):
if self.imageItem() is not None:
self.imageItem().setLevels(self.getLevels())
self.update()
self.sigLevelsChanged.emit(self)
def imageChanged(self, autoLevel=False, autoRange=False):
if self.imageItem() is None:
return
if self.levelMode == 'mono':
for plt in self.plots[1:]:
plt.setVisible(False)
self.plots[0].setVisible(True)
# plot one histogram for all image data
profiler = debug.Profiler()
h = self.imageItem().getHistogram()
profiler('get histogram')
if h[0] is None:
return
self.plot.setData(*h)
profiler('set plot')
if autoLevel:
mn = h[0][0]
mx = h[0][-1]
self.region.setRegion([mn, mx])
profiler('set region')
else:
mn, mx = self.imageItem().levels
self.region.setRegion([mn, mx])
else:
# plot one histogram for each channel
self.plots[0].setVisible(False)
ch = self.imageItem().getHistogram(perChannel=True)
if ch[0] is None:
return
for i in range(1, 5):
if len(ch) >= i:
h = ch[i-1]
self.plots[i].setVisible(True)
self.plots[i].setData(*h)
if autoLevel:
mn = h[0][0]
mx = h[0][-1]
self.region[i].setRegion([mn, mx])
else:
# hide channels not present in image data
self.plots[i].setVisible(False)
# make sure we are displaying the correct number of channels
self._showRegions()
def getLevels(self):
"""Return the min and max levels.
For rgba mode, this returns a list of the levels for each channel.
"""
if self.levelMode == 'mono':
return self.region.getRegion()
else:
nch = self.imageItem().channels()
if nch is None:
nch = 3
return [r.getRegion() for r in self.regions[1:nch+1]]
def setLevels(self, min=None, max=None, rgba=None):
"""Set the min/max (bright and dark) levels.
Arguments may be *min* and *max* for single-channel data, or
*rgba* = [(rmin, rmax), ...] for multi-channel data.
"""
if self.levelMode == 'mono':
if min is None:
min, max = rgba[0]
assert None not in (min, max)
self.region.setRegion((min, max))
else:
if rgba is None:
raise TypeError("Must specify rgba argument when levelMode != 'mono'.")
for i, levels in enumerate(rgba):
self.regions[i+1].setRegion(levels)
def setLevelMode(self, mode):
""" Set the method of controlling the image levels offered to the user.
Options are 'mono' or 'rgba'.
"""
assert mode in ('mono', 'rgba')
if mode == self.levelMode:
return
oldLevels = self.getLevels()
self.levelMode = mode
self._showRegions()
# do our best to preserve old levels
if mode == 'mono':
levels = np.array(oldLevels).mean(axis=0)
self.setLevels(*levels)
else:
levels = [oldLevels] * 4
self.setLevels(rgba=levels)
# force this because calling self.setLevels might not set the imageItem
# levels if there was no change to the region item
self.imageItem().setLevels(self.getLevels())
self.imageChanged()
self.update()
def _showRegions(self):
for i in range(len(self.regions)):
self.regions[i].setVisible(False)
if self.levelMode == 'rgba':
imax = 4
if self.imageItem() is not None:
# Only show rgb channels if connected image lacks alpha.
nch = self.imageItem().channels()
if nch is None:
nch = 3
xdif = 1.0 / nch
for i in range(1, nch+1):
self.regions[i].setVisible(True)
self.regions[i].setSpan((i-1) * xdif, i * xdif)
self.gradient.hide()
elif self.levelMode == 'mono':
self.regions[0].setVisible(True)
self.gradient.show()
else:
raise ValueError("Unknown level mode %r" % self.levelMode)
def saveState(self):
return {
'gradient': self.gradient.saveState(),
'levels': self.getLevels(),
'mode': self.levelMode,
}
def restoreState(self, state):
if 'mode' in state:
self.setLevelMode(state['mode'])
self.gradient.restoreState(state['gradient'])
self.setLevels(*state['levels'])
|
[
"[email protected]"
] | |
0ccaa945ee61fe6e02bfa5331ec1bacd14f0ee07
|
a560269290749e10466b1a29584f06a2b8385a47
|
/Notebooks/py/sonukumarsaw/fork-of-titanic-survivors-dataset/fork-of-titanic-survivors-dataset.py
|
e15a01032f6d55138434ecd5ca6cd6a3f757da58
|
[] |
no_license
|
nischalshrestha/automatic_wat_discovery
|
c71befad1aa358ae876d5494a67b0f4aa1266f23
|
982e700d8e4698a501afffd6c3a2f35346c34f95
|
refs/heads/master
| 2022-04-07T12:40:24.376871 | 2020-03-15T22:27:39 | 2020-03-15T22:27:39 | 208,379,586 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,475 |
py
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().magic(u'matplotlib inline')
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
# In[ ]:
train = pd.read_csv('../input/train.csv')
gender_sub = pd.read_csv('../input/gender_submission.csv')
test = pd.read_csv('../input/test.csv')
# **Training Dataset**
# In[ ]:
#showing the sample of train data
train.head()
# In[ ]:
# describe the train dataset
train.describe()
# In[ ]:
#checking for null values in data
sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis')
# We can see here in the heat map Age and Cabin columns have lots of null data.
# What we can do here is either drop the column or fill the null values with average age.
# We cant fill cabin values becouse there isn't any relation between cabin and other columns so we will drop it from the table.
# In[ ]:
# Count of survived and those who don't
sns.set_style('whitegrid')
sns.countplot(x='Survived',data=train,palette='RdBu_r')
# In[ ]:
# Those who survived (male /female)
sns.set_style('whitegrid')
sns.countplot(x='Survived',hue='Sex',data=train,palette='RdBu_r')
# In[ ]:
# survived on basis of class
sns.set_style('whitegrid')
sns.countplot(x='Survived',hue='Pclass',data=train,palette='rainbow')
# In[ ]:
# column has so much null values
train=train.drop('Cabin',axis=1)
# In[ ]:
train.head()
# In[ ]:
sns.countplot(x='SibSp',data=train)
# Below Graph shows the relation between the age of passanger and there class
# In[ ]:
# Average age and passanger class
plt.figure(figsize=(16, 10))
sns.boxplot(x='Pclass',y='Age',data=train,palette='winter')
# Above graph shows that Passangers having class 1 have average age of 37 similarly class 2 average age is 29 and class 3 have age of 24 years.
# In[ ]:
def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 37
elif Pclass == 2:
return 29
else:
return 24
else:
return Age
train['Age']=train[['Age','Pclass']].apply(impute_age,axis=1)
# In[ ]:
train.head()
# We Just filled all the null values with the average age of passangers.
# In[ ]:
sex = pd.get_dummies(train['Sex'],drop_first=True)
embark = pd.get_dummies(train['Embarked'],drop_first=True)
# In[ ]:
train.drop(['Sex','Embarked','Name','Ticket'],axis=1,inplace=True)
# In[ ]:
train = pd.concat([train,sex,embark],axis=1)
# Regression model cant predict on strings therefore we converted the string here to binaries
# In[ ]:
train.head()
# In[ ]:
plt.figure(figsize=(16, 10))
# this graph is showing that there is no null value in dataset
sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis')
# Now dataset is ready for fitting in algorithm
#
# **Testing dataset**
#
# In[ ]:
sns.heatmap(test.isnull(),yticklabels=False,cbar=False,cmap='viridis')
# Here, in testing set also we have null values.
# What we have done with training set we will repeat the same with testing set.
# In[ ]:
# droping the cabin
test = test.drop('Cabin',axis=1)
#here axis 1 specifies that we are searching for columns if it is 0 then rows.
# In[ ]:
test.head()
# * Now we have to convert Sex and Embarked columns from string to binaries.
# * Fill the age with average values
# In[ ]:
sex = pd.get_dummies(test['Sex'],drop_first=True)
embark = pd.get_dummies(test['Embarked'],drop_first=True)
test.drop(['Sex','Embarked','Name','Ticket'],axis=1,inplace=True)
test= pd.concat([test,sex,embark],axis=1)
# In[ ]:
test.head()
# In[ ]:
plt.figure(figsize=(16, 10))
sns.boxplot(x='Pclass',y='Age',data=test,palette='winter')
# We can see here that there is slight deference in average age between training set and testing dataset. We will now impute age on the basis of this new graph.
# In[ ]:
def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 42
elif Pclass == 2:
return 28
else:
return 24
else:
return Age
test['Age']=test[['Age','Pclass']].apply(impute_age,axis=1)
# In[ ]:
test.head()
# In[ ]:
plt.figure(figsize=(16, 10))
sns.heatmap(test.isnull(),yticklabels=False,cbar=False,cmap='viridis')
# Fare column is having a null value. Better we should fill it with average value rather than droping it.
#
# For this we have to check is there is any relation between pclass.
# In[ ]:
plt.figure(figsize=(16, 10))
sns.boxplot(x='Pclass',y='Fare',data=test,palette='winter')
plt.ylim(0,100)
# In[ ]:
def impute_fare(cols):
Fare = cols[0]
Pclass = cols[1]
if pd.isnull(Fare):
if Pclass == 1:
return 60
elif Pclass == 2:
return 16
else:
return 10
else:
return Fare
test['Fare']=test[['Fare','Pclass']].apply(impute_fare,axis=1)
# Now our test set is also ready for fitting in algorithm.
# **MACHINE LEARNING**
# In[ ]:
X_train=train.drop('Survived',axis=1)
X_train.head()
# In[ ]:
y_train=train['Survived']
y_train.head()
# In[ ]:
y_test=gender_sub['Survived']
# In[ ]:
from sklearn.linear_model import LogisticRegression
# In[ ]:
logmodel = LogisticRegression()
logmodel.fit(X_train,y_train)
# In[ ]:
X_test=test
# In[ ]:
predictions = logmodel.predict(X_test)
# In[ ]:
from sklearn.metrics import classification_report
# In[ ]:
print(classification_report(y_test,predictions))
# In[ ]:
from sklearn.metrics import jaccard_similarity_score
jaccard_similarity_score(y_test, predictions)
# In[ ]:
passid=np.array(list(range(892,1310)))
df = pd.DataFrame({'PassengerId':passid,'Survived':predictions})
df.to_csv('submission.csv',index=False)
# In[ ]:
|
[
"[email protected]"
] | |
bc7efdd131ecce9958da72f2a08feb76ecb8da2f
|
b24c7086c8174023177f67a88980cb2b4a92522d
|
/src/robots/naoqi/res.py
|
22647370c74cb65b99651d261179ecfc88cc6c6b
|
[
"ISC"
] |
permissive
|
chili-epfl/pyrobots-nao
|
f67da21112dcb3cb33b5c5336a4d0c1abb090673
|
981addf10beda75466dc3e0a7a4be223b39c260c
|
refs/heads/master
| 2021-01-20T11:09:57.440700 | 2015-02-28T11:42:54 | 2015-02-28T11:42:54 | 29,869,379 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 260 |
py
|
from robots.resources import Resource, CompoundResource
# hardware resource that need to be shared
LEYE = Resource("left eye")
REYE = Resource("right eye")
EYES = CompoundResource(LEYE, REYE, name = "eyes")
AUDIO = Resource("audio")
HEAD = Resource("head")
|
[
"[email protected]"
] | |
2aefd765c2ca7565c4eead4054bcd72a6c70737b
|
72316a1d1a2e0358486d50aeecbac8219ccdf092
|
/ietf/group/migrations/0006_auto__chg_field_groupmilestonehistory_time.py
|
bd66b5d0e282adc4bb69431ab002dd658d62d069
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
algby/ietfdb
|
363541941bd6e806bed70891bed4c7f47c9f0539
|
9ff37e43abbecac873c0362b088a6d9c16f6eed2
|
refs/heads/master
| 2021-01-16T18:57:50.100055 | 2014-09-29T21:16:55 | 2014-09-29T21:16:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 24,502 |
py
|
# encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'GroupMilestoneHistory.time'
db.alter_column('group_groupmilestonehistory', 'time', self.gf('django.db.models.fields.DateTimeField')())
def backwards(self, orm):
# Changing field 'GroupMilestoneHistory.time'
db.alter_column('group_groupmilestonehistory', 'time', self.gf('django.db.models.fields.DateTimeField')(auto_now=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'doc.document': {
'Meta': {'object_name': 'Document'},
'abstract': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ad_document_set'", 'null': 'True', 'to': "orm['person.Person']"}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['person.Email']", 'symmetrical': 'False', 'through': "orm['doc.DocumentAuthor']", 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'external_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']", 'null': 'True', 'blank': 'True'}),
'intended_std_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.IntendedStdLevelName']", 'null': 'True', 'blank': 'True'}),
'internal_comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'notify': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'pages': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'rev': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'shepherd': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'shepherd_document_set'", 'null': 'True', 'to': "orm['person.Person']"}),
'states': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.State']", 'symmetrical': 'False', 'blank': 'True'}),
'std_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.StdLevelName']", 'null': 'True', 'blank': 'True'}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.StreamName']", 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['name.DocTagName']", 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocTypeName']", 'null': 'True', 'blank': 'True'})
},
'doc.documentauthor': {
'Meta': {'ordering': "['document', 'order']", 'object_name': 'DocumentAuthor'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Email']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'doc.state': {
'Meta': {'ordering': "['type', 'order']", 'object_name': 'State'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'next_states': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'previous_states'", 'blank': 'True', 'to': "orm['doc.State']"}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.StateType']"}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'doc.statetype': {
'Meta': {'object_name': 'StateType'},
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '30', 'primary_key': 'True'})
},
'group.changestategroupevent': {
'Meta': {'ordering': "['-time', 'id']", 'object_name': 'ChangeStateGroupEvent', '_ormbases': ['group.GroupEvent']},
'groupevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['group.GroupEvent']", 'unique': 'True', 'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.GroupStateName']"})
},
'group.group': {
'Meta': {'object_name': 'Group'},
'acronym': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']", 'null': 'True', 'blank': 'True'}),
'charter': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'chartered_group'", 'unique': 'True', 'null': 'True', 'to': "orm['doc.Document']"}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_archive': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'list_email': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'list_subscribe': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.GroupStateName']", 'null': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.GroupTypeName']", 'null': 'True'}),
'unused_states': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.State']", 'symmetrical': 'False', 'blank': 'True'}),
'unused_tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['name.DocTagName']", 'symmetrical': 'False', 'blank': 'True'})
},
'group.groupevent': {
'Meta': {'ordering': "['-time', 'id']", 'object_name': 'GroupEvent'},
'by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']"}),
'desc': ('django.db.models.fields.TextField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'group.grouphistory': {
'Meta': {'object_name': 'GroupHistory'},
'acronym': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']", 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'history_set'", 'to': "orm['group.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_archive': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'list_email': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'list_subscribe': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.GroupStateName']", 'null': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.GroupTypeName']", 'null': 'True'}),
'unused_states': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.State']", 'symmetrical': 'False', 'blank': 'True'}),
'unused_tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['name.DocTagName']", 'symmetrical': 'False', 'blank': 'True'})
},
'group.groupmilestone': {
'Meta': {'ordering': "['due', 'id']", 'object_name': 'GroupMilestone'},
'desc': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'docs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.Document']", 'symmetrical': 'False', 'blank': 'True'}),
'due': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resolved': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.GroupMilestoneStateName']"}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'group.groupmilestonehistory': {
'Meta': {'ordering': "['due', 'id']", 'object_name': 'GroupMilestoneHistory'},
'desc': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'docs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.Document']", 'symmetrical': 'False', 'blank': 'True'}),
'due': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'milestone': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'history_set'", 'to': "orm['group.GroupMilestone']"}),
'resolved': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.GroupMilestoneStateName']"}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
'group.groupstatetransitions': {
'Meta': {'object_name': 'GroupStateTransitions'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'next_states': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'previous_groupstatetransitions_states'", 'symmetrical': 'False', 'to': "orm['doc.State']"}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.State']"})
},
'group.groupurl': {
'Meta': {'object_name': 'GroupURL'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'group.milestonegroupevent': {
'Meta': {'ordering': "['-time', 'id']", 'object_name': 'MilestoneGroupEvent', '_ormbases': ['group.GroupEvent']},
'groupevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['group.GroupEvent']", 'unique': 'True', 'primary_key': 'True'}),
'milestone': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.GroupMilestone']"})
},
'group.role': {
'Meta': {'object_name': 'Role'},
'email': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Email']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.RoleName']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']"})
},
'group.rolehistory': {
'Meta': {'object_name': 'RoleHistory'},
'email': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Email']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.GroupHistory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.RoleName']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']"})
},
'name.doctagname': {
'Meta': {'ordering': "['order']", 'object_name': 'DocTagName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.doctypename': {
'Meta': {'ordering': "['order']", 'object_name': 'DocTypeName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.groupmilestonestatename': {
'Meta': {'ordering': "['order']", 'object_name': 'GroupMilestoneStateName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.groupstatename': {
'Meta': {'ordering': "['order']", 'object_name': 'GroupStateName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.grouptypename': {
'Meta': {'ordering': "['order']", 'object_name': 'GroupTypeName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.intendedstdlevelname': {
'Meta': {'ordering': "['order']", 'object_name': 'IntendedStdLevelName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.rolename': {
'Meta': {'ordering': "['order']", 'object_name': 'RoleName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.stdlevelname': {
'Meta': {'ordering': "['order']", 'object_name': 'StdLevelName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.streamname': {
'Meta': {'ordering': "['order']", 'object_name': 'StreamName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'person.email': {
'Meta': {'object_name': 'Email'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']", 'null': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'person.person': {
'Meta': {'object_name': 'Person'},
'address': ('django.db.models.fields.TextField', [], {'max_length': '255', 'blank': 'True'}),
'affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ascii': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ascii_short': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['group']
|
[
"[email protected]@7b24d068-2d4e-4fce-9bd7-cbd2762980b0"
] |
[email protected]@7b24d068-2d4e-4fce-9bd7-cbd2762980b0
|
f2056e40108d671f9b0355deab9f4e12a257eebc
|
6564b596ec27e67ee1b48377da1e7cee59cdcfe9
|
/shenfun/optimization/__init__.py
|
d1a6e8057a3b55601f99b532c29a35d270f271d9
|
[
"BSD-2-Clause"
] |
permissive
|
GeraintPratten/shenfun
|
077b13d904fd6bf6880c412f74300d78494bee11
|
d92eb058c9969175da19b23926fb80148cf92ace
|
refs/heads/master
| 2023-07-04T13:46:27.969149 | 2021-08-10T11:48:32 | 2021-08-10T11:48:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,095 |
py
|
"""Module for optimized functions
Some methods performed in Python may be slowing down solvers. In this optimization
module we place optimized functions that are to be used instead of default
Python methods. Some methods are implemented solely in Cython and only called
from within the regular Python modules.
"""
import os
import importlib
from functools import wraps
from . import cython
try:
from . import numba
except ModuleNotFoundError:
numba = None
def optimizer(func):
"""Decorator used to wrap calls to optimized versions of functions."""
mod = os.environ.get('SHENFUN_OPTIMIZATION', 'cython')
if mod.lower() not in ('cython', 'numba'):
# Use python function
#print(func.__name__ + ' not optimized')
return func
mod = importlib.import_module('shenfun.optimization.'+mod.lower())
fun = getattr(mod, func.__name__, func)
#if fun is func:
# print(fun.__name__ + ' not optimized')
@wraps(func)
def wrapped_function(*args, **kwargs):
u0 = fun(*args, **kwargs)
return u0
return wrapped_function
|
[
"[email protected]"
] | |
b2c253c3b2b826b2d4c91a55667970a087f6c604
|
1786dad5941d4b50561e04104d11d1412433d1f3
|
/core/admin.py
|
10d5f471b467447c2538cf468cbb6db779b18fd1
|
[] |
no_license
|
daryabsb/imdb
|
5dc4704cb589d97815d98cfa97866a50055ab690
|
649f9dcc673a3b56c28329af15d1d1bae5f3c370
|
refs/heads/master
| 2022-12-25T06:59:26.738297 | 2020-09-19T13:55:05 | 2020-09-19T13:55:05 | 290,747,050 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 160 |
py
|
from django.contrib import admin
from .models import Movie, MovieLink
# Register your models here.
admin.site.register(Movie)
admin.site.register(MovieLink)
|
[
"[email protected]"
] | |
22c3b4bba12d8a2089c01805105ab6950a3845eb
|
23db23583a49dd42002f3815bcfb26249096cb99
|
/input/channelConfig_dielectron_Legacy2018_EBEE.py
|
b14170031da10b7294d93389ddde0313092551aa
|
[] |
no_license
|
JanFSchulte/BiasTests
|
34560adb1bc747aa3b594dd2c1014dee6093a25e
|
ff9ad12a2391fe0f409bac2945d21576f4a0cb3d
|
refs/heads/master
| 2021-04-22T01:00:35.124191 | 2020-03-24T23:23:07 | 2020-03-24T23:23:07 | 249,836,538 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,672 |
py
|
import ROOT,sys
ROOT.gROOT.SetBatch(True)
ROOT.gErrorIgnoreLevel = 1
from ROOT import *
from math import sqrt
from resolution_cfg_2018 import DCB_para
nBkg = -1
dataFile = "input/eventList_ele_2018_BE.txt"
def addBkgUncertPrior(ws,label,channel,uncert):
beta_bkg = RooRealVar('beta_%s_%s'%(label,channel),'beta_%s_%s'%(label,channel),0,-5,5)
getattr(ws,'import')(beta_bkg,ROOT.RooCmdArg())
uncert = 1. + uncert
bkg_kappa = RooRealVar('%s_%s_kappa'%(label,channel),'%s_%s_kappa'%(label,channel),uncert)
bkg_kappa.setConstant()
getattr(ws,'import')(bkg_kappa,ROOT.RooCmdArg())
ws.factory("PowFunc::%s_%s_nuis(%s_%s_kappa, beta_%s_%s)"%(label,channel,label,channel,label,channel))
ws.factory("prod::%s_%s_forUse(%s_%s, %s_%s_nuis)"%(label,channel,label,channel,label,channel))
def provideSignalScaling(mass,spin2=False):
nz = 3401386
nsig_scale = 1./0.031377
eff = signalEff(mass,spin2)
result = (nsig_scale*nz*eff)
return result
def signalEff(mass,spin2=False):
eff_a = 0.01461
eff_b = 479.6
eff_c = 635.3
eff_d = -1.071e+05
eff_e = 8.345e+04
eff_f = 1.302e+07
eff_g = 2.337e+07
if spin2:
eff_a = 0.06212
eff_b = -7.192
eff_c = 56.72
eff_d = -43.69
eff_e = 822.9
eff_f = 3.579e08
eff_g = 3.048e09
return (eff_a+eff_b/(mass+eff_c)+eff_d/(mass*mass+eff_e))+eff_f/(mass**3+eff_g)
def provideUncertainties(mass):
result = {}
result["sigEff"] = [1.08] # must be list in case the uncertainty is asymmetric
result["massScale"] = 0.01
result ["bkgUncert"] = 1.4
result ["res"] = 0.0
result ["reco"] = [0.0]
result["bkgParams"] = {"bkg_a":0.00313740766932294824, "bkg_b":0.01135596583199909373, "bkg_c":0.54125714622824727673, "bkg_d":0.00000000000000000000, "bkg_e":0.00194931370350556223, "bkg_b2":0.01948124695032613443, "bkg_c2":0.28782235398250377578, "bkg_d2":0.41138999196844272532, "bkg_thr":0.01928153410885654132}
return result
def provideCorrelations():
result = {}
''' Combine correlates uncertainties that have the same name. So wa hve to adjust the names to achieve what we want.
1) put the full channel name. That will make it uncorrelated with all other channels
2) keep the channel name but remove the last bit: will correlate between the two subcategories within a year
3) Just keep the dimuon or dielectron name, so we correlate between the years
4) To correlate some specific combination of uncertainties, come up with a name and add it to all releavent channel configs
'''
#result['sigEff'] = 'dielectron'
#result['massScale'] = 'dielectron'
#result['bkgUncert'] = 'dielectron_Legacy2018_EBEE'
#result['res'] = 'dielectron'
#result['bkgParams'] = 'dielectron_Legacy2018_EBEE'
result['sigEff'] = 'dielectron'
result['massScale'] = 'dielectron_Legacy2018_EBEE'
result['bkgUncert'] = 'dielectron_Legacy2018_EBEE'
result['res'] = 'dielectron_Legacy2018_EBEE'
result['reco'] = 'dielectron_Legacy2018_EBEE'
result['bkgParams'] = 'dielectron_Legacy2018_EBEE'
return result
def getResolution(mass):
CBObject = DCB_para("dcb")
CBObject.get_value(mass,False)
result = {}
result["res"] = CBObject.sigma
result["scale"] = CBObject.mean
result["nR"] = CBObject.PowerR
result["nL"] = CBObject.PowerL
result["alphaL"] = CBObject.CutL
result["alphaR"] = CBObject.CutR
if result["nR"] < 0:
result["nR"] = 0.
return result
def loadBackgroundShape(ws,useShapeUncert=False):
bkg_a = RooRealVar('bkg_a_dielectron_Legacy2018_EBEE','bkg_a_dielectron_Legacy2018_EBEE',11.76585112)
bkg_b = RooRealVar('bkg_b_dielectron_Legacy2018_EBEE','bkg_b_dielectron_Legacy2018_EBEE',-0.003566666494)
bkg_c = RooRealVar('bkg_c_dielectron_Legacy2018_EBEE','bkg_c_dielectron_Legacy2018_EBEE',-2.513733207e-07)
bkg_d = RooRealVar('bkg_d_dielectron_Legacy2018_EBEE','bkg_d_dielectron_Legacy2018_EBEE',0.0)
bkg_e = RooRealVar('bkg_e_dielectron_Legacy2018_EBEE','bkg_e_dielectron_Legacy2018_EBEE',-2.860692377)
bkg_b2 = RooRealVar('bkg_b2_dielectron_Legacy2018_EBEE','bkg_b2_dielectron_Legacy2018_EBEE',-0.00159101029)
bkg_c2 = RooRealVar('bkg_c2_dielectron_Legacy2018_EBEE','bkg_c2_dielectron_Legacy2018_EBEE',-2.610407295e-08)
bkg_d2 = RooRealVar('bkg_d2_dielectron_Legacy2018_EBEE','bkg_d2_dielectron_Legacy2018_EBEE',2.822681727e-12)
bkg_thr = RooRealVar('bkg_thr_dielectron_Legacy2018_EBEE','bkg_thr_dielectron_Legacy2018_EBEE',537.7173207)
bkg_a.setConstant()
bkg_b.setConstant()
bkg_c.setConstant()
bkg_d.setConstant()
bkg_e.setConstant()
getattr(ws,'import')(bkg_a,ROOT.RooCmdArg())
getattr(ws,'import')(bkg_b,ROOT.RooCmdArg())
getattr(ws,'import')(bkg_c,ROOT.RooCmdArg())
getattr(ws,'import')(bkg_d,ROOT.RooCmdArg())
getattr(ws,'import')(bkg_e,ROOT.RooCmdArg())
bkg_b2.setConstant()
bkg_c2.setConstant()
bkg_d2.setConstant()
bkg_thr.setConstant()
getattr(ws,'import')(bkg_b2,ROOT.RooCmdArg())
getattr(ws,'import')(bkg_c2,ROOT.RooCmdArg())
getattr(ws,'import')(bkg_d2,ROOT.RooCmdArg())
getattr(ws,'import')(bkg_thr,ROOT.RooCmdArg())
# background systematics
bkg_syst_a = RooRealVar('bkg_syst_a_dielectron_Legacy2018_EBEE','bkg_syst_a_dielectron_Legacy2018_EBEE',1.0)
bkg_syst_b = RooRealVar('bkg_syst_b_dielectron_Legacy2018_EBEE','bkg_syst_b_dielectron_Legacy2018_EBEE',0.0)
bkg_syst_a.setConstant()
bkg_syst_b.setConstant()
getattr(ws,'import')(bkg_syst_a,ROOT.RooCmdArg())
getattr(ws,'import')(bkg_syst_b,ROOT.RooCmdArg())
# background shape
if useShapeUncert:
bkgParamsUncert = provideUncertainties(1000)["bkgParams"]
for uncert in bkgParamsUncert:
addBkgUncertPrior(ws,uncert,"dielectron_Legacy2018_EBEE",bkgParamsUncert[uncert] )
ws.factory("ZPrimeEleBkgPdf5::bkgpdf_dielectron_Legacy2018_EBEE(mass_dielectron_Legacy2018_EBEE, bkg_a_dielectron_Legacy2018_EBEE_forUse, bkg_b_dielectron_Legacy2018_EBEE_forUse, bkg_c_dielectron_Legacy2018_EBEE_forUse,bkg_d_dielectron_Legacy2018_EBEE_forUse,bkg_e_dielectron_Legacy2018_EBEE_forUse, bkg_b2_dielectron_Legacy2018_EBEE_forUse, bkg_c2_dielectron_Legacy2018_EBEE_forUse,bkg_d2_dielectron_Legacy2018_EBEE_forUse,bkg_thr_dielectron_Legacy2018_EBEE_forUse,bkg_syst_a_dielectron_Legacy2018_EBEE,bkg_syst_b_dielectron_Legacy2018_EBEE)")
ws.factory("ZPrimeEleBkgPdf5::bkgpdf_fullRange(massFullRange, bkg_a_dielectron_Legacy2018_EBEE_forUse, bkg_b_dielectron_Legacy2018_EBEE_forUse, bkg_c_dielectron_Legacy2018_EBEE_forUse,bkg_d_dielectron_Legacy2018_EBEE_forUse,bkg_e_dielectron_Legacy2018_EBEE_forUse, bkg_b2_dielectron_Legacy2018_EBEE_forUse, bkg_c2_dielectron_Legacy2018_EBEE_forUse,bkg_d2_dielectron_Legacy2018_EBEE_forUse,bkg_thr_dielectron_Legacy2018_EBEE_forUse,bkg_syst_a_dielectron_Legacy2018_EBEE,bkg_syst_b_dielectron_Legacy2018_EBEE)")
else:
ws.factory("ZPrimeEleBkgPdf5::bkgpdf_dielectron_Legacy2018_EBEE(mass_dielectron_Legacy2018_EBEE, bkg_a_dielectron_Legacy2018_EBEE, bkg_b_dielectron_Legacy2018_EBEE, bkg_c_dielectron_Legacy2018_EBEE,bkg_d_dielectron_Legacy2018_EBEE,bkg_e_dielectron_Legacy2018_EBEE, bkg_b2_dielectron_Legacy2018_EBEE, bkg_c2_dielectron_Legacy2018_EBEE,bkg_d2_dielectron_Legacy2018_EBEE,bkg_thr_dielectron_Legacy2018_EBEE,bkg_syst_a_dielectron_Legacy2018_EBEE,bkg_syst_b_dielectron_Legacy2018_EBEE)")
ws.factory("ZPrimeEleBkgPdf5::bkgpdf_fullRange(massFullRange, bkg_a_dielectron_Legacy2018_EBEE, bkg_b_dielectron_Legacy2018_EBEE, bkg_c_dielectron_Legacy2018_EBEE,bkg_d_dielectron_Legacy2018_EBEE,bkg_e_dielectron_Legacy2018_EBEE, bkg_b2_dielectron_Legacy2018_EBEE, bkg_c2_dielectron_Legacy2018_EBEE,bkg_d2_dielectron_Legacy2018_EBEE,bkg_thr_dielectron_Legacy2018_EBEE,bkg_syst_a_dielectron_Legacy2018_EBEE,bkg_syst_b_dielectron_Legacy2018_EBEE)")
return ws
|
[
"[email protected]"
] | |
372479be2663f3a0b763022ee4cd64facba7add6
|
5e0755091efd2d4ed61bead8aa38b45bab5a8b07
|
/python/anyascii/_data/_034.py
|
0c20f1bba793e6f964ca6785a7c6290389254b52
|
[
"ISC"
] |
permissive
|
casept/anyascii
|
c27261d87257c17c47fe0e9fc77438437de94c1c
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
refs/heads/master
| 2022-12-05T07:13:53.075144 | 2020-08-07T07:55:50 | 2020-08-07T07:55:50 | 285,904,577 | 0 | 0 |
ISC
| 2020-08-07T19:20:00 | 2020-08-07T19:19:59 | null |
UTF-8
|
Python
| false | false | 875 |
py
|
b='Qiu Tian Hei Kua Wu Yin Yi Xie Chou Nuo Dan Dai Xu Xing Xiong Liu Lin Xiang Yong Xin Zhen Dai Wu Pan Ru Ma Qian Yi Yin Nei Cheng Feng Taai Zhuo Fang Ao Wu Zuo Zhou Dong Su Yi Qiong Kuang Lei Nao Zhu Shu Xu Shen Jie Die Nuo Su Yi Long Ying Beng Lan Miao Yi Li Ji Yu Luo Chai Nom Hun Xu Hui Rao Zhou Caam Han Xi Tai Yao Hui Jun Ma Lue Tang Yao Zhao Zhai Yu Zhuo Er Ran Qi Chi Wu Han Tang Se Si Qiong Lei Sa Kui Pu Ta Shu Yang Ou Tai Mian Yin Diao Yu Mie Jun Niao Xie You Che Feng Lei Li Sin Luo Sek Ji Kwaan Jip Quan Cai Liang Gu Mao Gung Gua Sui Din Wang Mao Man Quan Shi Li Wang Kou Du Zhen Ting Bing Huo Dong Gong Cheng Qin Jiong Lu Xing Nan Xie Mie Bi Jie Su Hung Gong Gung You Xing Qia Pi Dian Fu Luo Qia Qia Tang Bai Gan Ci Xuan Lang Fu She Diu Li Hua Tou Pian Di Ruan E Qie Yi Zhuo Rui Jian Gong Chi Chong Xi'
|
[
"[email protected]"
] | |
33a273361603bc9162e66b2ca04a3dc441178775
|
8ef477149fdd8cd8c0ad88f160e2b8a445550a1e
|
/base_ecommerce_v13/models/stock.py
|
86a7f7c483c94f799c99fe122bff69e115aeeda4
|
[] |
no_license
|
cokotracy/ebay_connector_v13
|
97f3e23ba951f14457514b71b408b389a7c16dc7
|
3c08603875d464e5bee818091fa704f5f7192499
|
refs/heads/master
| 2022-07-19T21:15:23.065880 | 2020-05-20T15:20:23 | 2020-05-20T15:20:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 186 |
py
|
from odoo import api, fields, models, _
import odoo.netsvc
class stock_picking(models.Model):
_inherit = "stock.picking"
shop_id = fields.Many2one('sale.shop', string='Shop')
|
[
"https://[email protected]"
] |
https://[email protected]
|
4889cedcd8425ab04971c8f8ff25cf9734da01c9
|
e5e8553fe434f399d24c1a8f981d5d258574e4af
|
/universal_landmark_detection/model/networks/globalNet.py
|
0b2bab7140532e0dbb277efecde6822b98a6b8f8
|
[
"MIT"
] |
permissive
|
egozoro/YOLO_Universal_Anatomical_Landmark_Detection
|
4dee00cf4ee140fa373b2dafeea8c95b69ff66c7
|
465a3d6afcdb23fdec609efe336beebdc9ed61f4
|
refs/heads/main
| 2023-08-24T02:04:40.543561 | 2021-10-07T13:58:16 | 2021-10-07T13:58:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,472 |
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class myConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1):
super(myConv2d, self).__init__()
padding = (kernel_size-1)//2
self.conv = nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size, padding=padding)
def forward(self, x):
return self.conv(x)
class dilatedConv(nn.Module):
''' stride == 1 '''
def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1):
super(dilatedConv, self).__init__()
# f = (kernel_size-1) * d +1
# new_width = (width - f + 2 * padding)/stride + stride
padding = (kernel_size-1) * dilation // 2
self.conv = nn.Conv2d(in_channels, out_channels,
kernel_size, dilation=dilation, padding=padding)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.relu(self.bn(self.conv(x)))
class GlobalNet(nn.Module):
def __init__(self, in_channels, out_channels, scale_factor=0.25, kernel_size=3, dilations=None):
super(GlobalNet, self).__init__()
self.scale_factor = scale_factor
if not isinstance(in_channels, list):
in_channels = [in_channels]
if not isinstance(out_channels, list):
out_channels = [out_channels]
mid_channels = 128
if dilations is None:
dilations = [1, 2, 5]
for i, n_chan in enumerate(in_channels):
setattr(self, 'in{i}'.format(i=i),
myConv2d(n_chan, mid_channels, 3))
for i, n_chan in enumerate(out_channels):
setattr(self, 'out{i}'.format(i=i),
myConv2d(mid_channels, n_chan, 1))
convs = [dilatedConv(mid_channels, mid_channels,
kernel_size, dilation) for dilation in dilations]
convs = nn.Sequential(*convs)
setattr(self, 'convs{}'.format(i), convs)
def forward(self, x, task_idx=0):
size = x.size()[2:]
sf = self.scale_factor
x = F.interpolate(x, scale_factor=sf)
x = getattr(self, 'in{}'.format(task_idx))(x)
x = getattr(self, 'convs{}'.format(task_idx))(x)
x = getattr(self, 'out{}'.format(task_idx))(x)
x = F.interpolate(x, size=size)
return {'output': torch.sigmoid(x)}
|
[
"[email protected]"
] | |
847b6985e225a008a43c6455650f7843ffa2f1d6
|
99697559d046cdd04dd9068bd518e4da4177aaa2
|
/Finish/M019_Remove_Nth_Node_From_End_of_List.py
|
d1a7c1a10eb90195f94c135192f33c5f6de79380
|
[] |
no_license
|
Azurisky/Leetcode
|
3e3621ef15f2774cfdfac8c3018e2e4701760c3b
|
8fa215fb0d5b2e8f6a863756c874d0bdb2cffa04
|
refs/heads/master
| 2020-03-18T22:46:35.780864 | 2018-10-07T05:45:30 | 2018-10-07T05:45:30 | 135,364,168 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 668 |
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
first = head
ans = second = ListNode(0)
second.next = head
for i in range(n):
first = first.next
while first:
first = first.next
second = second.next
second.next = second.next.next
return ans.next
|
[
"[email protected]"
] | |
f8b639c29269c2aae5cac88c606ca55cac9e1ec6
|
1d871064c463d4e55c6eec8e479dd3a594e07593
|
/tests/sequence_classification_tests/dataset_test.py
|
d892a00ba89dddefe0212dea0065e6d00cbed7d5
|
[
"Apache-2.0"
] |
permissive
|
little-bigtiger/transformers-keras
|
76ad5f67e71a1286971735208a13b2b235afc281
|
d8712a21e0a34a3f26d1e48459d7505c96931a5d
|
refs/heads/master
| 2023-08-17T04:19:05.080257 | 2021-09-29T03:08:42 | 2021-09-29T03:08:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,295 |
py
|
import unittest
from transformers_keras.sequence_classification.dataset import SequenceClassificationDataset
class DatasetTest(unittest.TestCase):
"""Dataset test."""
def test_sequence_classification_dataset_examples(self):
print()
print("====from_jsonl_files")
d = SequenceClassificationDataset.from_jsonl_files(
"testdata/sequence_classify.jsonl", vocab_file="testdata/vocab.bert.txt", batch_size=2
)
print(next(iter(d)))
print("====jsonl_to_examples")
examples = SequenceClassificationDataset.jsonl_to_examples(
"testdata/sequence_classify.jsonl", vocab_file="testdata/vocab.bert.txt"
)
for i in range(2):
print(examples[i])
print("====from_examples")
d = SequenceClassificationDataset.from_examples(examples, batch_size=2)
print(next(iter(d)))
print("====examples_to_tfrecord")
SequenceClassificationDataset.examples_to_tfrecord(examples, output_files=["testdata/sequence_classify.tfrecord"])
print("====from_tfrecord_files")
d = SequenceClassificationDataset.from_tfrecord_files("testdata/sequence_classify.tfrecord", batch_size=2)
print(next(iter(d)))
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
1c29f61d1b995d9e89dc5f3a52a350d1a6308b32
|
3ab7695bfc34355ba579bc43b9fea396933514dc
|
/dbaas_cloudstack/util/models.py
|
79546d1939ff00c64b02437d9629be07d551d26d
|
[] |
no_license
|
globocom/dbaas-cloudstack
|
d59ee6147235c5933eb5fa36a3047c61a9de9e5a
|
8445dde83c231a6af932ef179821c3e0b62485ff
|
refs/heads/master
| 2023-01-05T00:18:36.304237 | 2018-04-18T20:51:30 | 2018-04-18T20:51:30 | 18,342,699 | 8 | 1 | null | 2022-12-26T19:43:38 | 2014-04-01T20:02:04 |
Python
|
UTF-8
|
Python
| false | false | 769 |
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
class BaseModel(models.Model):
"""Base model class"""
created_at = models.DateTimeField(verbose_name=_("created_at"), auto_now_add=True)
updated_at = models.DateTimeField(verbose_name=_("updated_at"), auto_now=True)
class Meta:
abstract = True
def __unicode__(self):
if hasattr(self, 'name'):
result = "%s" % self.name
if hasattr(self, 'region'):
result = result + " - %s" % self.region
return result
elif hasattr(self, '__unicode__'):
result = self.__unicode__()
return result
|
[
"[email protected]"
] | |
113a7bac74c95751ac2210ca63365a1b2e1fd96e
|
42b61bf376b172a36759e6c3264562e585630d47
|
/ascent/wsgi.py
|
e0b9ba7dc44cd2c055733c9d3a2cd7ec618e6449
|
[] |
no_license
|
destinymalone/ascent
|
9ad4d9e1a0db017c6ff8d4820fb92e46b0f282e6
|
526227115ce6703f66f4c39a7bb2e12153427757
|
refs/heads/master
| 2020-09-23T05:03:44.071148 | 2020-01-27T14:54:59 | 2020-01-27T14:54:59 | 225,411,341 | 1 | 0 | null | 2019-12-05T18:50:43 | 2019-12-02T15:44:27 | null |
UTF-8
|
Python
| false | false | 389 |
py
|
"""
WSGI config for ascent project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ascent.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
13284b6e642981ad9bab7c9f4b5bd1a8b32ea645
|
88553181929aa251bc5ae02d81f17970249bca40
|
/django/angular/aenv/Scripts/painter.py
|
f7b0e2584de0e394bb34f5a8be649eb0ce7ac5de
|
[] |
no_license
|
reddymadhira111/Python
|
2d9c3e5dba8238df6be9a67d422468ac1ca16d35
|
0f1a0c87748e67a879cd8c31eda7b65c69c5d648
|
refs/heads/master
| 2022-11-05T07:23:30.211706 | 2017-06-13T19:11:25 | 2017-06-13T19:11:25 | 94,243,400 | 0 | 1 | null | 2022-10-29T10:56:29 | 2017-06-13T18:13:59 |
Python
|
UTF-8
|
Python
| false | false | 2,205 |
py
|
#!c:\users\reddy\desktop\python\django\angular\aenv\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates pasting into an already displayed
# photoimage. note that the current version of Tk updates the whole
# image every time we paste, so to get decent performance, we split
# the image into a set of tiles.
#
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
#
# painter widget
class PaintCanvas(tkinter.Canvas):
def __init__(self, master, image):
tkinter.Canvas.__init__(self, master,
width=image.size[0], height=image.size[1])
# fill the canvas
self.tile = {}
self.tilesize = tilesize = 32
xsize, ysize = image.size
for x in range(0, xsize, tilesize):
for y in range(0, ysize, tilesize):
box = x, y, min(xsize, x+tilesize), min(ysize, y+tilesize)
tile = ImageTk.PhotoImage(image.crop(box))
self.create_image(x, y, image=tile, anchor=tkinter.NW)
self.tile[(x, y)] = box, tile
self.image = image
self.bind("<B1-Motion>", self.paint)
def paint(self, event):
xy = event.x - 10, event.y - 10, event.x + 10, event.y + 10
im = self.image.crop(xy)
# process the image in some fashion
im = im.convert("L")
self.image.paste(im, xy)
self.repair(xy)
def repair(self, box):
# update canvas
dx = box[0] % self.tilesize
dy = box[1] % self.tilesize
for x in range(box[0]-dx, box[2]+1, self.tilesize):
for y in range(box[1]-dy, box[3]+1, self.tilesize):
try:
xy, tile = self.tile[(x, y)]
tile.paste(self.image.crop(xy))
except KeyError:
pass # outside the image
self.update_idletasks()
#
# main
if len(sys.argv) != 2:
print("Usage: painter file")
sys.exit(1)
root = tkinter.Tk()
im = Image.open(sys.argv[1])
if im.mode != "RGB":
im = im.convert("RGB")
PaintCanvas(root, im).pack()
root.mainloop()
|
[
"reddymadhira111gmail.com"
] |
reddymadhira111gmail.com
|
62af4afbd3bef2ec5d05634ac926d9d41c739692
|
4a9dada02c749e9e5277fe1e35357d7b2b28ad5c
|
/顾天媛2018010980/操作系统实验/作业3.py
|
7e4ba1e0148c936d956319f37d9a32e6f1f3efc9
|
[] |
no_license
|
wanghan79/2020_Option_System
|
631cc80f52829390a128a86677de527472470348
|
f37b870614edf7d85320da197d932df2f25a5720
|
refs/heads/master
| 2021-01-09T13:10:05.630685 | 2020-07-10T03:30:39 | 2020-07-10T03:30:39 | 242,312,271 | 13 | 9 | null | 2020-07-04T16:13:11 | 2020-02-22T09:12:56 |
Python
|
UTF-8
|
Python
| false | false | 1,037 |
py
|
# !/usr/bin/python3
# -*- coding: UTF-8 -*-
"""
Author: Ty.Gu
Purpose: multiprocessing
Created: 26/6/2020
"""
# 作业3. 采用Python语言创建多进程;提示:采用Python内置工具包multiprocessing
from multiprocessing import Pool
import time, os
from random import random
def task(task_name):
print('开始做任务啦!', task_name)
start = time.time()
time.sleep(random() * 2)
end = time.time()
print('完成任务:{}! 耗时:{} ,进程ID:{}'.format(task_name, (end - start), os.getpid()))
# 容器
container = []
def callback_func(n):
container.append(n)
if __name__ == '__main__':
pool = Pool(5)
tasks = ['听音乐', '看电影', '读书', '看报', '玩游戏', '打篮球', '弹钢琴']
for t in tasks:
pool.apply_async(task, args=(t,))
pool.close() # 关闭进程池,不允许继续添加进程
pool.join() # 等待进程池中的所有进程结束
print('------' * 10)
for c in container:
print(c)
print('over!!!!!!!!!!')
|
[
"[email protected]"
] | |
b7e58fa1fcf498998c1ed4a723ca290fc280f15c
|
23514a0e2baf6da053690dd511f1eef75a573e6b
|
/log-mining/com/haodou/log-mining/log/searchKeyword45.py
|
57442a3fa8cb219cd0a04d9be9ebe10fd6f97859
|
[] |
no_license
|
rainly/scripts-1
|
b5f31880a1a917df23e4c110bb7661685851eff5
|
3ef01a58162b94fb36cdd38581c899d8a118eda0
|
refs/heads/master
| 2020-07-12T01:41:31.491344 | 2019-08-27T08:50:10 | 2019-08-27T08:50:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,296 |
py
|
#encoding=utf-8
#
#专门为4.5版本定制
import sys
import operator
sys.path.append("./")
sys.path.append("../")
sys.path.append("../abtest")
import column
import column2
from clickCount import *
from cardClick import *
CardFix="##Card##_"
def getRidPos(ret):
rids={}
if "rids" in ret:
size=FirstPageNum
if len(ret["rids"]) < FirstPageNum:
size=len(ret["rids"])
for i in range(size):
rids[ret["rids"][i]]=i
return rids
THead=".html?id="
THeadLen=len(THead)
inn=0
def searchFollowMapper(f):
lastIP=""
rids={}
oc=0
otherRids={}
otherAids={}
otherTids={}
searchRet={}
click=None
keywordClick={}
lastK=""
hc=0
keyword=""
cardBank=CardBank()
for line in f:
if True:
cols=line.strip().split("\t")
if len(cols) < 3:
continue
ip=cols[0]
if lastIP == "":
lastIP=ip
if lastIP != ip:
lastIP=ip
keyword=""
rids={}
uc={}
otherRids={}
otherAids={}
otherTids={}
searchRet={}
click=None
lastSearchRet={}
lastClick=None
cardBank.reset()
if line.find("m.haodou.com") > 0 and line.find("GET") > 0:
cols=cols[2].strip().split("\01")
if len(cols) < 5:
continue
p=cols[4].find(THead)
if p > 0:
end=cols[4].find("&",p+THeadLen)
if end < 0:
end=len(cols[4])
id=cols[4][p+THeadLen:end]
if id !="":
if "topicId" in searchRet and searchRet["topicId"] == id:
click.addTopicHit(id)
oc+=1
if "ttitle" in searchRet:
title=searchRet["ttitle"]
click.addTopicTitleHit(title,keyword)
elif id in otherTids:
oc+=1
click.getMs().tb+=1
else:
if cols[4].find("uuid") > 0:
sys.stderr.write(line)
mtid=id
continue
if len(cols) < column.APP_LOG_COLUMNS+1:
continue
version=cols[column.VERSION_CID+1]
v=column.intVersion(version)
if v < 400:
continue
u=column.uuidFirst(cols[1:]) #获得uuid
if u == None or u.find("{") >= 0:
u=""
para=cols[column.PARA_ID+1] #得到请求参数
method=cols[column.METHOD_CID+1] #获得请求的方法
hasSearch=False
if method == 'search.getsearchindex': #搜索方法
keyword=V45Fix+column.getValue(para,"keyword") #搜索的关键字
hasSearch=True
searchRet=column2.FuncMap[method](cols[-1]) #获得搜索返回的食谱列表
rids=getRidPos(searchRet)
if keyword not in keywordClick:
keywordClick[keyword]=Click()
click=keywordClick[keyword]
click.addSearchRet(searchRet,keyword)
cardBank.addSearch(keyword[len(V45Fix):],searchRet)
otherRids={}
otherAids={}
otherTids={}
if "rids" in searchRet:
for i in range(FirstPageNum,len(searchRet["rids"]),1):
otherRids[searchRet["rids"][i]]=i
elif method == "search.getlist" and (v < 450 or v >= 480):
offset=column.getValue(para,"offset")
scene=column.getValue(para,"scene")
if scene != "k1":
continue
#keyword=column.getValue(para,"keyword") #搜索的关键字
searchRet=column2.FuncMap[method](cols[-1]) #
if offset != "0":
kw=column.getValue(para,"keyword")
if kw != keyword:
continue
if "rids" in searchRet:
for i in range(len(searchRet["rids"])):
otherRids[searchRet["rids"][i]]=i
continue
keyword=column.getValue(para,"keyword")
hasSearch=True
rids=getRidPos(searchRet)
otherRids={}
#otherAids={}
#otherTids={}
if "rids" in searchRet:
for i in range(len(searchRet["rids"])):
if i >= FirstPageNum:
otherRids[searchRet["rids"][i]]=i
continue
rids[searchRet["rids"][i]]=i
if keyword not in keywordClick:
keywordClick[keyword]=Click()
click=keywordClick[keyword]
click.addSearchRet(searchRet,keyword)
cardBank.addSearch(keyword,searchRet,False)
if hasSearch:
if lastK != "":
keywordClick[lastK].addHitCount(hc)
if hc + oc > 0:
keywordClick[lastK].addHasHit()
hc=0
oc=0
lastK=keyword
if click == None:
continue
if method == "info.getinfo":
rid=column.getValue(para,"rid") #获得点击的食谱id
pos=-1
if rid in rids:
click.addRecipeHit(rid,rids[rid])
kw=keyword
if v >= 450:
kw=keyword[len(V45Fix):]
cardBank.addHit(kw,rid,rids[rid],(v >= 450))
hc+=1
pos=rids[rid]
elif rid in otherRids:
oc+=1
click.getMs().rb+=1
if pos >= 0:
ret=column2.FuncMap[method](cols[-1])
if ret == None:
pass
#sys.stderr.write(cols[-1])
elif "title" in ret:
title=ret["title"]
click.addTitleHit(title,pos,keyword)
elif method == "info.getfoodinfo":
fid=column.getValue(para,"foodid")
if fid != "" and "food" in searchRet and fid == searchRet["food"]:
click.addFoodHit(fid)
oc+=1
elif v >= 450:
if method == "info.getalbuminfo":
id=column.getValue(para,"aid") #获取点击的专辑id
if id !="":
if "aid" in searchRet and searchRet["aid"]==id :
click.addAlbumHit(id)
oc+=1
if "atitle" in searchRet:
title=searchRet["atitle"]
click.addAlbumTitleHit(title,keyword)
elif id in otherAids:
oc+=1
click.getMs().ab+=1
elif method == "search.gettags":
kw=V45Fix+column.getValue(para,"keyword") #搜索的关键字
if kw != keyword:
continue
tags=column2.FuncMap[method](cols[-1])
for tagid in tags:
if tagid not in click.getMs().rTagShow:
click.getMs().rTagShow[tagid]=1
else:
click.getMs().rTagShow[tagid]+=1
elif method == "search.getlist":
offset=column.getValue(para,"offset")
scene=column.getValue(para,"scene")
if scene != "k1":
continue
kw=V45Fix+column.getValue(para,"keyword") #搜索的关键字
if kw != keyword:
continue
searchRet=column2.FuncMap[method](cols[-1]) #
if "rids" in searchRet:
for i in range(len(searchRet["rids"])):
otherRids[searchRet["rids"][i]]=i
if offset != "0":
continue
tagid=column.getValue(para,"tagid")
if tagid == "" or tagid == "null":
click.getMs().r+=1
else:
if tagid not in click.getMs().rt:
click.getMs().rt[tagid]=1
else:
click.getMs().rt[tagid]+=1
click.getMs().rtn=1
elif method == "search.getalbumlist":
offset=column.getValue(para,"offset")
kw=V45Fix+column.getValue(para,"keyword") #搜索的关键字
if kw != keyword:
continue
ret=column2.getList(cols[-1],"AlbumId")
for aid in ret:
otherAids[aid]=1
if offset == "0":
click.getMs().a+=1
elif method == "search.gettopiclist":
offset=column.getValue(para,"offset")
kw=V45Fix+column.getValue(para,"keyword") #搜索的关键字
if kw != keyword:
continue
ret=column2.getList(cols[-1],"TopicId","int")
for tid in ret:
#sys.stderr.write("tid:"+tid+"\n")
otherTids[tid]=1
if offset == "0":
click.getMs().t+=1
if lastK != "":
keywordClick[lastK].addHitCount(hc)
if hc + oc > 0:
keywordClick[lastK].addHasHit()
ck45=Click()
ck44=Click()
for kw in keywordClick:
if kw.startswith(V45Fix):
ck45.merge(keywordClick[kw])
else:
ck44.merge(keywordClick[kw])
print kw+"\t"+str(keywordClick[kw])
print "ck45_##total##"+"\t"+str(ck45)
print "ck44_##total##"+"\t"+str(ck44)
for card in cardBank.bank:
print CardFix+str(cardBank.bank[card])
def searchFollowReducer(f):
lastK=""
ck=Click()
for line in sys.stdin:
cols=line.split("\t")
kw=cols[0]
#空搜索
if kw.strip() == "":
kw="[VOID]"
if lastK == "":
lastK=kw
if kw.startswith(CardFix):
cc=CardClick(kw)
if lastK != kw:
if lastK.startswith(CardFix):
print cc
else:
print lastK+"\t"+str(ck)
if kw.startswith(CardFix):
cc=CardClick(kw)
else:
ck=Click()
lastK=kw
try:
if line.startswith(CardFix):
tcc=readCardClick(cols)
cc.merge(tcc)
else:
(kw,tck)=readClick(cols)
ck.merge(tck)
except:
sys.stderr.write(line)
if lastK != "":
if lastK.startswith(CardFix):
print cc
else:
print lastK+"\t"+str(ck)
if __name__=="__main__":
if sys.argv[1] == "map":
searchFollowMapper(sys.stdin)
elif sys.argv[1] == "reduce":
searchFollowReducer(sys.stdin)
|
[
"[email protected]"
] | |
d8c7a38a8aeec36a0e7f7cb277b9cbcea34d348d
|
1fbb308b15a83fd53d7d1eeee1ad103d3ba36ac4
|
/venv/bin/pip
|
87b74d7a8f139ff92ee41d635bc4ef98e35fbf19
|
[] |
no_license
|
jcohen66/twit
|
a2b585f523a7168c1438c4eea6cf872d7dc3a985
|
3d164947a63d3102b2b34c86199b26b2f17cc4db
|
refs/heads/master
| 2023-03-25T01:29:25.814931 | 2021-03-16T00:33:07 | 2021-03-16T00:33:07 | 347,211,087 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 257 |
#!/Users/jcohen66/PycharmProjects/twit/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
4266bb3abd19427b2cd8572b67e7b4af4d73d196
|
54708c482b13760fc4a747274b62849b5e8b8277
|
/leetcode_python/Hash_table/contains-duplicate-ii.py
|
cc82bea65b88eb03d3c4b67b958a77ccc25b4d1e
|
[] |
no_license
|
DataEngDev/CS_basics
|
6520818caa61609eae9b026fb5b25ef9e4ea6961
|
05e8f5a4e39d448eb333c813093fc7c1df4fc05e
|
refs/heads/master
| 2023-03-02T18:58:42.469872 | 2021-02-14T02:23:27 | 2021-02-14T02:23:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,021 |
py
|
"""
Given an array of integers and an integer k, find out whether there are two distinct indices i and j in the array such that nums[i] = nums[j] and the absolute difference between i and j is at most k.
Example 1:
Input: nums = [1,2,3,1], k = 3
Output: true
Example 2:
Input: nums = [1,0,1,1], k = 1
Output: true
Example 3:
Input: nums = [1,2,3,1,2,3], k = 2
Output: false
"""
# Time: O(n)
# Space: O(n)
#
# Given an array of integers and an integer k, return true if
# and only if there are two distinct indices i and j in the array
# such that nums[i] = nums[j] and the difference between i and j is at most k.
#
# V0
# V1
# https://blog.csdn.net/coder_orz/article/details/51674266
# IDEA : HASH TABLE
class Solution(object):
def containsNearbyDuplicate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
num_map = {}
for i in range(len(nums)):
if nums[i] in num_map and i - num_map[nums[i]] <= k:
return True
else:
num_map[nums[i]] = i
return False
# V1'
# https://blog.csdn.net/coder_orz/article/details/51674266
# IDEA : SET
# IDEA : SET OPERATION
# In [12]: window = set([1,3,4,])
# ...:
# In [13]: window
# Out[13]: {1, 3, 4}
# In [14]: window.discard(1)
# In [15]: window
# Out[15]: {3, 4}
# In [16]: window.discard(3)
# In [17]: window
# Out[17]: {4}
class Solution(object):
def containsNearbyDuplicate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
window = set([])
for i in range(len(nums)):
if i > k:
window.discard(nums[i-k-1])
if nums[i] in window:
return True
else:
window.add(nums[i])
return False
# V1'
# https://www.jiuzhang.com/solution/contains-duplicate-ii/#tag-highlight-lang-python
class Solution:
"""
@param nums: the given array
@param k: the given number
@return: whether there are two distinct indices i and j in the array such that nums[i] = nums[j] and the absolute difference between i and j is at most k
"""
def containsNearbyDuplicate(self, nums, k):
# Write your code here
dic = {}
for index, value in enumerate(nums):
if value in dic and index - dic[value] <= k:
return True
dic[value] = index
return False
# V2
class Solution:
# @param {integer[]} nums
# @param {integer} k
# @return {boolean}
def containsNearbyDuplicate(self, nums, k):
lookup = {}
for i, num in enumerate(nums):
if num not in lookup:
lookup[num] = i
else:
# It the value occurs before, check the difference.
if i - lookup[num] <= k:
return True
# Update the index of the value.
lookup[num] = i
return False
|
[
"[email protected]"
] | |
af950af06cd4d8cb80ff701f37666dc4d78deba6
|
34652a47355a8dbe9200db229a1bbc62619de364
|
/Matlibplots/samples2/axes_demo.py
|
4d3fa3d8cc09e86facd7b5e96a3059c143a7f1e4
|
[] |
no_license
|
btrif/Python_dev_repo
|
df34ab7066eab662a5c11467d390e067ab5bf0f8
|
b4c81010a1476721cabc2621b17d92fead9314b4
|
refs/heads/master
| 2020-04-02T13:34:11.655162 | 2019-11-10T11:08:23 | 2019-11-10T11:08:23 | 154,487,015 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 771 |
py
|
#!/usr/bin/env python
from pylab import *
# create some data to use for the plot
dt = 0.001
t = arange(0.0, 10.0, dt)
r = exp(-t[:1000] / 0.05) # impulse response
x = randn(len(t))
s = convolve(x, r)[:len(x)] * dt # colored noise
# the main axes is subplot(111) by default
plot(t, s)
axis([0, 1, 1.1 * amin(s), 2 * amax(s) ])
xlabel('time (s)')
ylabel('current (nA)')
title('Gaussian colored noise')
# this is an inset axes over the main axes
a = axes([.65, .6, .2, .2], axisbg='y')
n, bins, patches = hist(s, 400, normed=1)
title('Probability')
setp(a, xticks=[], yticks=[])
# this is another inset axes over the main axes
a = axes([0.2, 0.6, .2, .2], axisbg='y')
plot(t[:len(r)], r)
title('Impulse response')
setp(a, xlim=(0, .2), xticks=[], yticks=[])
show()
|
[
"[email protected]"
] | |
027f90da6ae7a9f981c03a08fedff984b0d56959
|
c309e7d19af94ebcb537f1e8655c0122dbe0cb13
|
/Chapter03/01-chapter-content/read_video_file_all_properties.py
|
db4569016c150e9cb77c318253011c5af90209bc
|
[
"MIT"
] |
permissive
|
PacktPublishing/Mastering-OpenCV-4-with-Python
|
0fb82c88cb7205c7050c8db9f95a6deb3b1b3333
|
4194aea6f925a4b39114aaff8463be4d18e73aba
|
refs/heads/master
| 2023-03-07T04:51:16.071143 | 2023-02-13T10:17:48 | 2023-02-13T10:17:48 | 151,057,527 | 375 | 226 |
MIT
| 2022-08-27T13:32:19 | 2018-10-01T08:27:29 |
Python
|
UTF-8
|
Python
| false | false | 3,750 |
py
|
"""
Example to introduce how to read a video file and get all properties
"""
# Import the required packages:
import cv2
import argparse
def decode_fourcc(fourcc):
"""Decodes the fourcc value to get the four chars identifying it"""
# Convert to int:
fourcc_int = int(fourcc)
# We print the int value of fourcc
print("int value of fourcc: '{}'".format(fourcc_int))
# We can also perform this in one line:
# return "".join([chr((fourcc_int >> 8 * i) & 0xFF) for i in range(4)])
fourcc_decode = ""
for i in range(4):
int_value = fourcc_int >> 8 * i & 0xFF
print("int_value: '{}'".format(int_value))
fourcc_decode += chr(int_value)
return fourcc_decode
# We first create the ArgumentParser object
# The created object 'parser' will have the necessary information
# to parse the command-line arguments into data types.
parser = argparse.ArgumentParser()
# We add 'video_path' argument using add_argument() including a help.
parser.add_argument("video_path", help="path to the video file")
args = parser.parse_args()
# Create a VideoCapture object and read from input file
# If the input is the camera, pass 0 instead of the video file name
capture = cv2.VideoCapture(args.video_path)
# Get and print these values:
print("CV_CAP_PROP_FRAME_WIDTH: '{}'".format(capture.get(cv2.CAP_PROP_FRAME_WIDTH)))
print("CV_CAP_PROP_FRAME_HEIGHT : '{}'".format(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
print("CAP_PROP_FPS : '{}'".format(capture.get(cv2.CAP_PROP_FPS)))
print("CAP_PROP_POS_MSEC : '{}'".format(capture.get(cv2.CAP_PROP_POS_MSEC)))
print("CAP_PROP_POS_FRAMES : '{}'".format(capture.get(cv2.CAP_PROP_POS_FRAMES)))
print("CAP_PROP_FOURCC : '{}'".format(decode_fourcc(capture.get(cv2.CAP_PROP_FOURCC))))
print("CAP_PROP_FRAME_COUNT : '{}'".format(capture.get(cv2.CAP_PROP_FRAME_COUNT)))
print("CAP_PROP_MODE : '{}'".format(capture.get(cv2.CAP_PROP_MODE)))
print("CAP_PROP_BRIGHTNESS : '{}'".format(capture.get(cv2.CAP_PROP_BRIGHTNESS)))
print("CAP_PROP_CONTRAST : '{}'".format(capture.get(cv2.CAP_PROP_CONTRAST)))
print("CAP_PROP_SATURATION : '{}'".format(capture.get(cv2.CAP_PROP_SATURATION)))
print("CAP_PROP_HUE : '{}'".format(capture.get(cv2.CAP_PROP_HUE)))
print("CAP_PROP_GAIN : '{}'".format(capture.get(cv2.CAP_PROP_GAIN)))
print("CAP_PROP_EXPOSURE : '{}'".format(capture.get(cv2.CAP_PROP_EXPOSURE)))
print("CAP_PROP_CONVERT_RGB : '{}'".format(capture.get(cv2.CAP_PROP_CONVERT_RGB)))
print("CAP_PROP_RECTIFICATION : '{}'".format(capture.get(cv2.CAP_PROP_RECTIFICATION)))
print("CAP_PROP_ISO_SPEED : '{}'".format(capture.get(cv2.CAP_PROP_ISO_SPEED)))
print("CAP_PROP_BUFFERSIZE : '{}'".format(capture.get(cv2.CAP_PROP_BUFFERSIZE)))
# Check if camera opened successfully
if capture.isOpened() is False:
print("Error opening video stream or file")
# Read until video is completed
while capture.isOpened():
# Capture frame-by-frame
ret, frame = capture.read()
if ret is True:
# Print current frame number per iteration
print("CAP_PROP_POS_FRAMES : '{}'".format(capture.get(cv2.CAP_PROP_POS_FRAMES)))
# Get the timestamp of the current frame in milliseconds
print("CAP_PROP_POS_MSEC : '{}'".format(capture.get(cv2.CAP_PROP_POS_MSEC)))
# Display the resulting frame
cv2.imshow('Original frame', frame)
# Convert the frame to grayscale:
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the grayscale frame
cv2.imshow('Grayscale frame', gray_frame)
# Press q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# Release everything:
capture.release()
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
418631b09fa5046da457ec6b6d12fde3127fe3a9
|
fab14fae2b494068aa793901d76464afb965df7e
|
/benchmarks/f3_wrong_hints/scaling_software_termination/11-2Nested_false-termination_13.py
|
e93f6811be066b40a1149e24f28d064d54e66d26
|
[
"MIT"
] |
permissive
|
teodorov/F3
|
673f6f9ccc25acdfdecbfc180f439253474ba250
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
refs/heads/master
| 2023-08-04T17:37:38.771863 | 2021-09-16T07:38:28 | 2021-09-16T07:38:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,279 |
py
|
from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
symbols = frozenset([pc, x, y])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = n_locs
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
# pc = 0 & (x >= 0) -> pc' = 1
cond = mgr.GE(x, ints[0])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x >= 0) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
# pc = 1 -> pc' = 2
cfg.append(mgr.Implies(pcs[1], x_pcs[2]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
# pc = -1 -> pc' = -1
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same_x = mgr.Equals(x_x, x)
same_y = mgr.Equals(x_y, y)
same = mgr.And(same_x, same_y)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> x' = x + y & same_y
trans.append(mgr.Implies(pcs[1],
mgr.And(mgr.Equals(x_x, mgr.Plus(x, y)),
same_y)))
# pc = 2 -> same_x & y' = y + 1
trans.append(mgr.Implies(pcs[2],
mgr.And(same_x,
mgr.Equals(x_y, mgr.Plus(y, ints[1])))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
symbs = frozenset([pc, x, y])
m_100 = mgr.Int(-100)
m_1 = mgr.Int(-1)
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_4 = mgr.Int(4)
i_20 = mgr.Int(20)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
res = []
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GE(x, i_20), mgr.GE(y, i_1), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x0", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
stutter = mgr.Equals(x_y, y)
loc = Location(env, mgr.TRUE(), mgr.LE(x, i_20), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_y, mgr.Plus(x, y)))
h_y = Hint("h_y1", env, frozenset([y]), symbs)
h_y.set_locs([loc])
res.append(h_y)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x1", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(x, y)))
loc1 = Location(env, mgr.GE(x, i_2), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x2", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.GT(x_pc, pc))
loc1 = Location(env, mgr.GE(pc, i_2))
loc1.set_progress(0, mgr.Equals(x_pc, mgr.Div(pc, pc)))
h_pc = Hint("h_pc2", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.GE(y, m_100))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(y, y)))
loc1 = Location(env, mgr.GE(y, i_0))
loc1.set_progress(0, mgr.GE(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y6", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, x), i_1)))
loc1 = Location(env, mgr.GE(x, i_20))
loc1.set_progress(0, mgr.LT(x_x, mgr.Times(m_1, x, x)))
h_x = Hint("h_x6", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, i_0), mgr.GE(pc, i_1))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, pc)))
loc1 = Location(env, mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_y, y))
h_y = Hint("h_y7", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GT(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, y), i_1)))
loc1 = Location(env, mgr.GE(x, i_2))
loc1.set_progress(2, mgr.LT(x_x, mgr.Times(m_1, x, x)))
loc2 = Location(env, mgr.LE(x, i_4))
loc2.set_progress(0, mgr.GE(x_x, mgr.Div(x, x)))
h_x = Hint("h_x7", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
return frozenset(res)
|
[
"[email protected]"
] | |
13cd64bc158351be30552a0174bbd7805c1ee073
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_8/ksskou001/question1.py
|
cbf7c2c649b94f8451d86b9956d6a62f42752649
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,146 |
py
|
'''This program uses a recursion function to shows whether or not a string
is palindromique
By Hermann KOUASSI: KSSKOU001
On 3 May 2014'''
def pal(the_str):
'''check if a given string is palindromic'''
#in case empty string is the input
if len(the_str)==0:
print('Palindrome!')
#stop condition
elif len(the_str) == 2 or len(the_str)==3:
#when first character == last character
if the_str[0]==the_str[-1]:
print('Palindrome!')
#stop checking
else: print('Not a palindrome!')
#when more than 3 characters in string
else:
# call function to carry on checking if same first and last character
if the_str[0]==the_str[-1]:
#new string leaves out the first and last characters
pal(the_str[1:len(the_str)-1])
else:
#otherwise stop checking
print('Not a palindrome!')
def main():
'''main function'''
#get the string
the_str = input('Enter a string:\n')
#call palindromic function
pal(the_str)
if __name__=="__main__":
main()
|
[
"[email protected]"
] | |
856eaeeccbd6c812aee69d362e74757971e1e1b9
|
391937be6d8c8bcf5c3fe9fae8790024fc1e1727
|
/Starctf_2019/girlfriend/changeld.py
|
03c035cf38d567f31740c7a68a2268662cc4b46a
|
[] |
no_license
|
n132/Watermalon
|
a9d93d3f0d598de7f8ed9bbe13ed02af364f770f
|
79b4479458ae0884d9cdd52d317674298d601d0a
|
refs/heads/master
| 2022-05-13T21:04:08.108705 | 2022-04-08T23:29:48 | 2022-04-08T23:29:48 | 154,925,917 | 6 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,358 |
py
|
import os
from pwn import *
def change_ld(binary, ld):
"""
Force to use assigned new ld.so by changing the binary
"""
if not os.access(ld, os.R_OK):
log.failure("Invalid path {} to ld".format(ld))
return None
if not isinstance(binary, ELF):
if not os.access(binary, os.R_OK):
log.failure("Invalid path {} to binary".format(binary))
return None
binary = ELF(binary)
for segment in binary.segments:
if segment.header['p_type'] == 'PT_INTERP':
size = segment.header['p_memsz']
addr = segment.header['p_paddr']
data = segment.data()
if size <= len(ld):
log.failure("Failed to change PT_INTERP from {} to {}".format(data, ld))
return None
binary.write(addr, ld.ljust(size, '\0'))
if not os.access('/tmp/pwn', os.F_OK): os.mkdir('/tmp/pwn')
path = '/tmp/pwn/{}_debug'.format(os.path.basename(binary.path))
if os.access(path, os.F_OK):
os.remove(path)
info("Removing exist file {}".format(path))
binary.save(path)
os.chmod(path, 0b111000000) #rwx------
success("PT_INTERP has changed from {} to {}. Using temp file {}".format(data, ld, path))
return ELF(path)
|
[
"[email protected]"
] | |
db424bfcd1d912fa13fa0d54efe254c2af125fb6
|
93ccc1138fd28385e7cebf69fda8327cbf974d13
|
/test/generate_runtime.py
|
59710979953e46f6194872112714237b001bfb78
|
[] |
no_license
|
hbcbh1999/Phy-Net
|
09613fbd146b6c70e4b52e7e00837ae92c9e6ef8
|
06ba854b3f281027546b8acfdec29fdbe6eb6649
|
refs/heads/master
| 2021-01-19T22:53:19.430191 | 2017-04-07T19:10:32 | 2017-04-07T19:10:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,327 |
py
|
import math
import numpy as np
import tensorflow as tf
import sys
sys.path.append('../')
from model.ring_net import *
from model.loss import *
from utils.experiment_manager import make_checkpoint_path
from systems.fluid_createTFRecords import generate_feed_dict
import random
import time
from tqdm import *
FLAGS = tf.app.flags.FLAGS
# get restore dir
RESTORE_DIR = make_checkpoint_path(FLAGS.base_dir, FLAGS)
# shape of test simulation
shape = FLAGS.test_dimensions.split('x')
shape = map(int, shape)
def evaluate():
""" Eval the system"""
with tf.Graph().as_default():
# make inputs
state, boundary = inputs(empty=True)
state = state[0:1,0]
boundary = boundary[0:1,0]
# unwrap
y_1, small_boundary_mul, small_boundary_add, x_2, y_2 = continual_unroll_template(state, boundary)
# make variable to iterate
compressed_shape = [x / pow(2,FLAGS.nr_downsamples) for x in shape]
print(compressed_shape)
compressed_state_1 = tf.Variable(np.zeros([1] + compressed_shape + [FLAGS.filter_size_compression], dtype=np.float32), trainable=False)
small_boundary_mul_var = tf.Variable(np.zeros([1] + compressed_shape + [FLAGS.filter_size_compression], dtype=np.float32), trainable=False)
small_boundary_add_var = tf.Variable(np.zeros([1] + compressed_shape + [FLAGS.filter_size_compression], dtype=np.float32), trainable=False)
# make steps to init
assign_compressed_state_step = tf.group(compressed_state_1.assign(y_1))
assign_boundary_mul_step = tf.group(small_boundary_mul_var.assign(small_boundary_mul))
assign_boundary_add_step = tf.group(small_boundary_add_var.assign(small_boundary_add))
# computation!
compressed_state_1_boundary = (small_boundary_mul_var * compressed_state_1) + small_boundary_add_var
compressed_state_2 = compress_template(compressed_state_1_boundary)
run_step = tf.group(compressed_state_1.assign(compressed_state_2))
state_out = decoding_template(compressed_state_2)
# restore network
init = tf.global_variables_initializer()
#variables_to_restore = tf.trainable_variables()
#saver = tf.train.Saver(variables_to_restore)
sess = tf.Session()
sess.run(init)
#ckpt = tf.train.get_checkpoint_state(RESTORE_DIR)
#if ckpt and ckpt.model_checkpoint_path:
# print("restoring file from " + ckpt.model_checkpoint_path)
# saver.restore(sess, ckpt.model_checkpoint_path)
#else:
# print("no chekcpoint file found from " + RESTORE_DIR + ", this is an error")
# exit()
# make fake zero frame to test on
state_feed_dict = np.zeros([1]+shape+[FLAGS.lattice_size])
boundary_feed_dict = np.zeros([1]+shape+[1])
feed_dict = {state:state_feed_dict, boundary:boundary_feed_dict}
assign_compressed_state_step.run(session=sess, feed_dict=feed_dict)
assign_boundary_mul_step.run(session=sess, feed_dict=feed_dict)
assign_boundary_add_step.run(session=sess, feed_dict=feed_dict)
run_step.run(session=sess)
# open file to log results
with open("figs/" + "runtime_log.txt", "a") as myfile:
# run no state_out
t = time.time()
run_length = 1000
for step in tqdm(xrange(run_length)):
run_step.run(session=sess)
elapsed = time.time() - t
print("time per " + str(run_length) + " step is " + str(elapsed) + " with shape " + str(shape) + " and compression shape " + str(compressed_shape) + "\n")
myfile.write("no decompression time per " + str(run_length) + " step is " + str(elapsed) + " with shape " + str(shape) + " and compression shape " + str(compressed_shape) + "\n")
# run with state out
t = time.time()
run_length = 1000
for step in tqdm(xrange(run_length)):
run_step.run(session=sess)
state_out.eval(session=sess)
elapsed = time.time() - t
print("with decompression time per " + str(run_length) + " step is " + str(elapsed) + " with shape " + str(shape) + " and compression shape " + str(compressed_shape))
myfile.write("with decompression time per " + str(run_length) + " step is " + str(elapsed) + " with shape " + str(shape) + " and compression shape " + str(compressed_shape) + "\n")
def main(argv=None): # pylint: disable=unused-argument
evaluate()
if __name__ == '__main__':
tf.app.run()
|
[
"[email protected]"
] | |
a127a74237962f2453d6466dd7901670b2f745be
|
c67b74a8de4d60f2aba025dfba8351996cdaa46c
|
/tkinter/other/autologging/grid_layout/target_ui.py
|
f97b6f974a38b238aa9d4c893c0ccf4e81479fff
|
[] |
no_license
|
texttest/storytext-selftest
|
a30a58a0ab75fd26e60056222150cf6ae773470e
|
438977bf044c1ebc98089b667f0ae3d835bc7f37
|
refs/heads/master
| 2020-04-26T18:16:20.242220 | 2015-02-17T15:53:27 | 2015-02-17T15:53:27 | 173,739,750 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 475 |
py
|
# File: hello2.py
try:
from tkinter import *
except ImportError:
from Tkinter import *
class App:
def __init__(self, master):
Label(master, text="Top Left").grid(row=0, column=0)
Label(master, text="Top Right").grid(row=0, column=1, rowspan=3)
Label(master, text="Bottom Left").grid(row=1, column=0)
Button(master, text="QUIT", fg="red", command=master.quit).grid(row=2, column=0)
root = Tk()
app = App(root)
root.mainloop()
|
[
"[email protected]"
] | |
fb98535b70b7993474b77e91b3feff3d46b9b3de
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/7BYXC8befjYqzhMsc_11.py
|
66660ac86c91acd077e3841dd8119a65d3ac8e96
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,737 |
py
|
"""
Kathleen owns a beautiful rug store. She likes to group the rugs into 4
mutually exclusive categories.
* imperfect
* horizontally symmetric
* vertically symmetric
* perfect
An **imperfect** rug is one that is **neither horizontally nor vertically
symmetric**. Here is an example of an **imperfect** rug:
[
["a", "a", "a", "a"],
["a", "a", "a", "a"],
["a", "a", "b", "b"]
]
The following is an **horizontally symmetric** rug. You could "fold" the rug
across a hypothetical x-axis, and both sides would be identical. A
horizontally symmetric rug is **not** vertically symmetric (otherwise this rug
would be classified as **perfect** ).
[
["c", "a", "a", "a"],
["b", "b", "b", "b"],
["c", "a", "a", "a"]
]
The following is a **vertically symmetric** rug. You could "fold" the rug
across a hypothetical y-axis, and both sides would be identical. A vertically
symmetric is **not** horizontally symmetric (otherwise this rug would be
classified as **perfect** ).
[
["a", "b", "a"],
["b", "b", "b"],
["a", "b", "a"],
["a", "b", "a"]
]
Finally, a **perfect** rug is one that is **both vertically and horizontally
symmetric**. That is, folded either length-wise or width-wise will yield two
identical pieces.
[
["a", "b", "b", "a"],
["b", "b", "b", "b"],
["a", "b", "b", "a"]
]
Given a rug of `m x n` dimension, determine whether it is **imperfect,
horizontally symmetric, vertically symmetric or perfect**. Rugs are
represented using a two-dimensional list.
### Examples
classify_rug([
["a", "a"],
["a", "a"]
]) ➞ "perfect"
classify_rug([
["a", "a", "b"],
["a", "a", "a"],
["b", "a", "a"]
]) ➞ "imperfect"
classify_rug([
["b", "a"],
["b", "a"]
]) ➞ "horizontally symmetric"
classify_rug([
["a", "a"],
["b", "b"]
]) ➞ "vertically symmetric"
### Notes
You can consider a `1 x n` rug as being trivially **horizontally symmetric** ,
an `n x 1` rug as being trivially **vertically symmetric** , and a `1 x 1` rug
as being trivially **perfect**.
"""
def classify_rug(pattern):
p = pattern
c = [1,1]
for i in range(len(p)//2):
if not p[i]==p[-i-1]:
c[0]=0
break
for e in p:
if c[1]==0:
break
for j in range(len(e)//2):
if not e[j]==e[-j-1]:
c[1]=0
break
if sum(c)==0:
return "imperfect"
elif sum(c)==1:
if c[0]==1:
return "horizontally symmetric"
else:
return "vertically symmetric"
else:
return "perfect"
|
[
"[email protected]"
] | |
8f17fad612ed72e9207bd0bc6151fd98258a7479
|
c047518e0bc0be1d1a46b734fbf53610cb8a407f
|
/URI/1564.py
|
3cf5007b193896b8264fe8720ab63225560887f8
|
[] |
no_license
|
fernandozanutto/competitive_programming
|
c3e006544ddba1702a37eeb437cb015713e8c2d1
|
cf721a7bcce6c5d5fc9f739ad729079c939fc421
|
refs/heads/master
| 2020-06-19T06:57:32.288602 | 2020-04-04T14:58:45 | 2020-04-04T14:58:45 | 196,607,123 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 183 |
py
|
while True:
try:
x = int(input())
if x > 0:
print('vai ter duas!')
else:
print('vai ter copa!')
except EOFError:
break
|
[
"[email protected]"
] | |
6bafb7696525ab7f42a5402714c53c58186334ea
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/machinelearning/v20160501preview/get_web_service.py
|
0310879ee63fd0a9c81caf2427c8125f783e25fc
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 |
Apache-2.0
| 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null |
UTF-8
|
Python
| false | false | 3,930 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetWebServiceResult',
'AwaitableGetWebServiceResult',
'get_web_service',
]
@pulumi.output_type
class GetWebServiceResult:
"""
Instance of an Azure ML web service resource.
"""
def __init__(__self__, location=None, name=None, properties=None, tags=None, type=None):
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def location(self) -> str:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.WebServicePropertiesForGraphResponse':
"""
Contains the property payload that describes the web service.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetWebServiceResult(GetWebServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebServiceResult(
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_web_service(resource_group_name: Optional[str] = None,
web_service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebServiceResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: Name of the resource group in which the web service is located.
:param str web_service_name: The name of the web service.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['webServiceName'] = web_service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:machinelearning/v20160501preview:getWebService', __args__, opts=opts, typ=GetWebServiceResult).value
return AwaitableGetWebServiceResult(
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
|
[
"[email protected]"
] | |
9821c77472261b94840d40045f8030e18e1d5e13
|
927748a4de2b1388d83e554eb76deaa61c1ef167
|
/namer/admin.py
|
b47d64e5ce90b02485e6bdd8686abc92b7c2286c
|
[
"Apache-2.0"
] |
permissive
|
grahamgilbert/macnamer
|
f2486758ac25ce8fb93a9cabaa8d56600f7f5d71
|
809345a5c82d890ece2ee6a26e797f540561f49c
|
refs/heads/master
| 2021-06-06T20:50:29.651924 | 2014-11-01T12:06:29 | 2014-11-01T12:06:29 | 6,016,337 | 23 | 6 |
Apache-2.0
| 2019-03-01T01:36:12 | 2012-09-30T06:08:49 |
Python
|
UTF-8
|
Python
| false | false | 158 |
py
|
from django.contrib import admin
from namer.models import *
admin.site.register(Computer)
admin.site.register(ComputerGroup)
admin.site.register(Network)
|
[
"[email protected]"
] | |
cffd172dc336a08ee43dd702322e72e587f814a8
|
ae7ba9c83692cfcb39e95483d84610715930fe9e
|
/csrgxtu/maxent/src/basketball/CVKNN.py
|
5cd2ae78495e3c298550c337b20cafe503e6473c
|
[] |
no_license
|
xenron/sandbox-github-clone
|
364721769ea0784fb82827b07196eaa32190126b
|
5eccdd8631f8bad78eb88bb89144972dbabc109c
|
refs/heads/master
| 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,299 |
py
|
#!/usr/bin/env python
# coding = utf-8
#
# Author: Archer Reilly
# Date: 23/DEC/2014
# File: CVKNN.py
# Desc: KNN -- K Nearest Neighbours, use KNN classifier
#
# Produced By CSRGXTU
import cv2
import numpy as np
from Utility import loadMatrixFromFile, loadSeasons, loadTeamIds
# buildTrainingSets
# build training sets from raw data file
#
# @param inputFile
# @return numpy.ndarray
def buildTrainingSets(inputFile):
res = []
mat = loadMatrixFromFile(inputFile)
for row in mat:
if (float(row[1]) - float(row[2])) < 0:
leaguerank = 0
else:
leaguerank = 1
res.append([row[0], leaguerank])
return np.array(res).astype(np.float32)
# buildTrainingLabels
# build training labels from raw data file
#
# @param inputFile
# @return numpy.ndarray
def buildTrainingLabels(inputFile):
res = []
mat = loadMatrixFromFile(inputFile)
for row in mat:
if row[3] == 'W':
WIN = 1
else:
WIN = 0
res.append([[WIN]])
return np.array(res).astype(np.float32)
# buildTestingSets
# build testing sets from raw data file
#
# @param inputFile
# @return numpy.ndarray
def buildTestingSets(inputFile):
res = []
mat = loadMatrixFromFile(inputFile)
for row in mat:
if (float(row[1]) - float(row[2])) < 0:
leaguerank = 0
else:
leaguerank = 1
res.append([row[0], leaguerank])
return np.array(res).astype(np.float32)
# buildTestingLabels
# build testing labels from raw data file
#
# @param inputFile
# @return numpy.ndarray
def buildTestingLabels(inputFile):
res = []
mat = loadMatrixFromFile(inputFile)
for row in mat:
if row[3] == 'W':
WIN = 1
else:
WIN = 0
res.append([[WIN]])
return np.array(res).astype(np.float32)
# teamMain
# train and test for team
def teamMain():
DIR = '/home/archer/Documents/maxent/data/basketball/leaguerank/'
teamIds = loadTeamIds(DIR + 'teamidshortname.csv')
teamNames = [x[1] for x in loadMatrixFromFile(DIR + 'teamidshortname.csv')]
countTotal = 0
total = 0
for team in teamIds:
trainData = buildTrainingSets(DIR + team + '-train.csv')
trainLabels = buildTrainingLabels(DIR + team + '-train.csv')
testData = buildTestingSets(DIR + team + '-test.csv')
testLabels = buildTestingLabels(DIR + team + '-test.csv')
total = total + len(testLabels)
knn = cv2.KNearest()
knn.train(trainData, trainLabels)
# Accuracy
count = 0
for i in range(len(testLabels)):
ret, results, neighbours, dist = knn.find_nearest(np.array([testData[i]]), 11)
if results[0][0] == testLabels[i][0]:
count = count + 1
countTotal = countTotal + count
print 'INFO: Accuracy(', teamNames[teamIds.index(team)], ')', count/float(len(testLabels))
print 'INFO: Total Accuracy: ', countTotal/float(total)
# seasonMain
# train and test for seasons
def seasonMain():
DIR = '/home/archer/Documents/maxent/data/basketball/leaguerank/'
seasons = loadSeasons(DIR + 'seasons-18-Nov-2014.txt')
countTotal = 0
total = 0
for season in seasons:
trainData = buildTrainingSets(DIR + season + '-train.csv')
testData = buildTestingSets(DIR + season + '-test.csv')
trainLabels = buildTestingLabels(DIR + season + '-train.csv')
testLabels = buildTestingLabels(DIR + season + '-test.csv')
total = total + len(testLabels)
knn = cv2.KNearest()
knn.train(trainData, trainLabels)
# Accuracy
count = 0
for i in range(len(testLabels)):
ret, results, neighbours, dist = knn.find_nearest(np.array([testData[i]]), 11)
if results[0][0] == testLabels[i][0]:
count = count + 1
countTotal = countTotal + count
print 'INFO: Accuracy(', season, ')', count/float(len(testLabels))
print 'INFO: Total Accuracy: ', countTotal/float(total)
# main
# train and test for all
def main():
DIR = '/home/archer/Documents/maxent/data/basketball/leaguerank/'
seasons = loadSeasons(DIR + 'seasons-18-Nov-2014.txt')
total = 0
count = 0
trainData = []
trainLabels = []
testData = []
testLabels = []
for season in seasons:
tmpTrainData = buildTrainingSets(DIR + season + '-train.csv').tolist()
tmpTrainLabels = buildTestingLabels(DIR + season + '-train.csv').tolist()
tmpTestData = buildTestingSets(DIR + season + '-test.csv').tolist()
tmpTestLabels = buildTestingLabels(DIR + season + '-test.csv').tolist()
trainData.extend(tmpTrainData)
trainLabels.extend(tmpTrainLabels)
testData.extend(tmpTestData)
testLabels.extend(tmpTestLabels)
trainData = np.array(trainData).astype(np.float32)
trainLabels = np.array(trainLabels).astype(np.float32)
testData = np.array(testData).astype(np.float32)
testLabels = np.array(testLabels).astype(np.float32)
total = len(testLabels)
knn = cv2.KNearest()
knn.train(trainData, trainLabels)
for i in range(len(testLabels)):
ret, results, neighbours, dist = knn.find_nearest(np.array([testData[i]]), 21)
if results[0][0] == testLabels[i][0]:
count = count + 1
print 'INFO: Total Accuracy: ', count/float(total)
if __name__ == '__main__':
print "+++++++++++++++++Main+++++++++++++++++++++++++"
main()
print "+++++++++++++++++teamMain+++++++++++++++++++++++++"
teamMain()
print "+++++++++++++++++seasonMain+++++++++++++++++++++++++"
seasonMain()
|
[
"[email protected]"
] | |
1c7d4811f1c3dfd5bc63c22ca4fa582315a02824
|
0809673304fe85a163898983c2cb4a0238b2456e
|
/tmp/ve_asus-rt-n14uhp-mrtg/lib/python3.4/site-packages/setuptools/command/test.py
|
39746a02bf60246a3d7f1feb3e2f9243977c6177
|
[
"Apache-2.0"
] |
permissive
|
jasonwee/asus-rt-n14uhp-mrtg
|
244092292c94ff3382f88f6a385dae2aa6e4b1e1
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
refs/heads/master
| 2022-12-13T18:49:02.908213 | 2018-10-05T02:16:41 | 2018-10-05T02:16:41 | 25,589,776 | 3 | 1 |
Apache-2.0
| 2022-11-27T04:03:06 | 2014-10-22T15:42:28 |
Python
|
UTF-8
|
Python
| false | false | 7,134 |
py
|
import sys
import contextlib
from distutils.errors import DistutilsOptionError
from unittest import TestLoader
from setuptools.extern import six
from setuptools.extern.six.moves import map
from pkg_resources import (resource_listdir, resource_exists, normalize_path,
working_set, _namespace_packages,
add_activation_listener, require, EntryPoint)
from setuptools import Command
from setuptools.py31compat import unittest_main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module, pattern=None):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__ + '.' + file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
# adapted from jaraco.classes.properties:NonDataProperty
class NonDataProperty(object):
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self.fget(obj)
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=', 'm', "Run 'test_suite' in specified module"),
('test-suite=', 's',
"Test suite to run (e.g. 'some_module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite and self.test_module:
msg = "You may specify a module or a suite, but not both"
raise DistutilsOptionError(msg)
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
@NonDataProperty
def test_args(self):
return list(self._test_args())
def _test_args(self):
if self.verbose:
yield '--verbose'
if self.test_suite:
yield self.test_suite
def with_project_on_sys_path(self, func):
"""
Backward compatibility for project_on_sys_path context.
"""
with self.project_on_sys_path():
func()
@contextlib.contextmanager
def project_on_sys_path(self):
with_2to3 = six.PY3 and getattr(self.distribution, 'use_2to3', False)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
sys.path.insert(0, normalize_path(ei_cmd.egg_base))
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
yield
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(
self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
cmd = ' '.join(self._argv)
if self.dry_run:
self.announce('skipping "%s" (dry run)' % cmd)
return
self.announce('running "%s"' % cmd)
with self.project_on_sys_path():
self.run_tests()
def run_tests(self):
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if six.PY3 and getattr(self.distribution, 'use_2to3', False):
module = self.test_suite.split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
unittest_main(
None, None, self._argv,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
)
@property
def _argv(self):
return ['unittest'] + self.test_args
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.resolve()()
|
[
"[email protected]"
] | |
ee5cb4dba4207a57b701f57cb2ad43198828c213
|
096fde1f6e629de8b921de621c1e2eaed3d149e7
|
/src/android/toga_android/widgets/label.py
|
66e971a274fe62d5a88ba01b124bb779f3afec2b
|
[
"BSD-3-Clause"
] |
permissive
|
zeerorg/toga
|
78079ccde9fc33970f03dd0718cb191d037d9db8
|
be8182e6131938982dc757f847f938349009de7e
|
refs/heads/master
| 2021-01-23T03:16:17.750455 | 2017-03-19T06:44:37 | 2017-03-19T06:44:37 | 86,062,968 | 0 | 0 | null | 2017-03-24T11:54:09 | 2017-03-24T11:54:09 | null |
UTF-8
|
Python
| false | false | 1,250 |
py
|
# from ..app import MobileApp
# from .base import Widget
#
# from toga.constants import *
#
#
# class Label(Widget):
# def __init__(self, text=None, alignment=LEFT_ALIGNED):
# super(Label, self).__init__()
#
# self.startup()
#
# self.alignment = alignment
# self.text = text
#
# def startup(self):
# print ("startup label")
# self._impl = TextView(MobileApp._impl)
#
# @property
# def alignment(self):
# return self._alignment
#
# @alignment.setter
# def alignment(self, value):
# self._alignment = value
# self._impl.setGravity({
# LEFT_ALIGNED: Gravity.CENTER_VERTICAL | Gravity.LEFT,
# RIGHT_ALIGNED: Gravity.CENTER_VERTICAL | Gravity.RIGHT,
# CENTER_ALIGNED: Gravity.CENTER_VERTICAL | Gravity.CENTER_HORIZONTAL,
# JUSTIFIED_ALIGNED: Gravity.CENTER_VERTICAL | Gravity.CENTER_HORIZONTAL,
# NATURAL_ALIGNED: Gravity.CENTER_VERTICAL | Gravity.CENTER_HORIZONTAL,
# }[value])
#
# @property
# def text(self):
# return self._text
#
# @text.setter
# def text(self, value):
# self._text = value
# self._impl.setHint(self._text)
|
[
"[email protected]"
] | |
c5ee9c15ca65ada5c73aba41feef5b69a74b50d4
|
9805edf2b923c74cf72a3cfb4c2c712255256f15
|
/python/120_triangle.py
|
719010ee065e6c6d8cc995ea1c85fc78889cc7a4
|
[
"MIT"
] |
permissive
|
jixinfeng/leetcode-soln
|
5b28e49c2879cdff41c608fc03628498939b0e99
|
24cf8d5f1831e838ea99f50ce4d8f048bd46c136
|
refs/heads/master
| 2022-10-12T17:02:53.329565 | 2022-10-06T03:21:56 | 2022-10-06T03:21:56 | 69,371,757 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,283 |
py
|
"""
Given a triangle, find the minimum path sum from top to bottom. Each step you
may move to adjacent numbers on the row below.
For example, given the following triangle
[
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]
The minimum path sum from top to bottom is 11 (i.e., 2 + 3 + 5 + 1 = 11).
Note:
Bonus point if you are able to do this using only O(n) extra space, where n is
the total number of rows in the triangle.
In oldMinSum=newMinSum[:] the [:] can not be omitted.
"""
class Solution(object):
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
if triangle is None or triangle == []:
return 0
height = len(triangle)
newMinSum = [0] * height
for row in triangle:
oldMinSum = newMinSum[:] #deep copy
for i in range(len(row)):
if i == 0:
newMinSum[i] = oldMinSum[i] + row[i]
elif i == len(row) - 1:
newMinSum[i] = oldMinSum[i - 1] + row[i]
else:
newMinSum[i] = min(oldMinSum[i], oldMinSum[i - 1]) + row[i]
return min(newMinSum)
a = Solution()
print(a.minimumTotal([[-1],[2,3],[1,-1,-3]]))
|
[
"[email protected]"
] | |
3605b8c88df214a553152d099f2136282236394f
|
bd01fbbc28b98814c0deb428b412aeec456a3712
|
/make_train_target.py
|
360bc19136e82616fd92eb8929c7c0ac37f2beb0
|
[] |
no_license
|
thusodangersimon/sanral_hack
|
d693a8fedf6de4f10dde5efcdb1bc5e0a95b0b9d
|
69c71fc80935ee42488226962b29d643d0a6e75a
|
refs/heads/master
| 2020-08-11T18:09:23.997842 | 2019-10-12T14:31:24 | 2019-10-12T14:31:24 | 214,606,084 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,579 |
py
|
"""
This file creates training data set.
"""
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class MakeTarget(BaseEstimator, TransformerMixin):
def __init__(self, start, end, time_col, segment_col, agg_col):
self.start = start
self.end = end
self.time_col = time_col
self.segment_col = segment_col
self.agg_col = agg_col
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
train_df = X.set_index(self.time_col)
train_df = train_df.groupby([pd.Grouper(freq='H'), self.segment_col])[self.agg_col].count()
# get daterange
date_range = pd.date_range(self.start, self.end, freq='H')
# fill in data
out_df = train_df.reset_index().groupby(self.segment_col).apply(self._reindex, date_range=date_range)
return out_df
def _reindex(self, df, date_range):
df = df.set_index(self.time_col)
out_df = df.reindex(date_range, fill_value=0)
out_df = out_df[[self.agg_col]]
return out_df
if __name__ == '__main__':
train_path = 'data/train.csv'
train_df = pd.read_csv(train_path)
# make col datetime
train_df['Occurrence Local Date Time'] = pd.to_datetime(train_df['Occurrence Local Date Time'])
# init transformer
make_target = MakeTarget('2016-01-01', '2019-01-01', 'Occurrence Local Date Time', 'road_segment_id', 'EventId')
target = make_target.fit_transform(train_df)
print('found events =', target.EventId.sum())
target.to_csv('train_target.csv')
|
[
"[email protected]"
] | |
213eae2e246ee6a732fe0b4e2584202dfc940337
|
b6a97526938a923f442d54b3c02e82051c0df1ff
|
/tests/functions_tests/test_concat.py
|
32c9def4fe7e3bdeef21c63267e77160d6a381ac
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
amoliu/chainer
|
96992ff235dec616e39edb0a840d621fbea4ce12
|
df4362bc8a2a7bef6513ac788f373f0b5028e03b
|
refs/heads/master
| 2021-01-22T16:38:16.727714 | 2015-06-18T08:31:13 | 2015-06-18T08:31:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,853 |
py
|
from unittest import TestCase
import numpy
from chainer import cuda, Variable
from chainer.cuda import to_gpu, GPUArray
from chainer.gradient_check import assert_allclose
from chainer.functions import concat
cuda.init()
class Concat(TestCase):
def setUp(self):
self.y0 = numpy.arange(42, dtype=numpy.float32).reshape(2, 7, 3)
self.xs0 = [self.y0[:, :2], self.y0[:, 2:5], self.y0[:, 5:]]
self.y1 = numpy.arange(21, dtype=numpy.float32).reshape(7, 3)
self.xs1 = [self.y1[:2], self.y1[2:5], self.y1[5:]]
def check_forward(self, xs_data, y_data, axis):
xs = tuple(Variable(x_data) for x_data in xs_data)
y = concat(xs, axis=axis)
assert_allclose(y_data, y.data, atol=0, rtol=0)
def test_forward_cpu_0(self):
self.check_forward(self.xs0, self.y0, axis=1)
def test_forward_cpu_1(self):
self.check_forward(self.xs1, self.y1, axis=0)
def test_forward_gpu_0(self):
self.check_forward(
[to_gpu(x.copy()) for x in self.xs0], to_gpu(self.y0), axis=1)
def test_forward_gpu_1(self):
self.check_forward(
[to_gpu(x.copy()) for x in self.xs1], to_gpu(self.y1), axis=0)
def check_backward(self, xs_data, axis):
xs = tuple(Variable(x_data) for x_data in xs_data)
y = concat(xs, axis=axis)
y.grad = y.data
y.backward()
for x in xs:
assert_allclose(x.data, x.grad, atol=0, rtol=0)
def test_backward_cpu_0(self):
self.check_backward(self.xs0, axis=1)
def test_backward_cpu_1(self):
self.check_backward(self.xs1, axis=0)
def test_backward_gpu_0(self):
self.check_backward([to_gpu(x.copy()) for x in self.xs0], axis=1)
def test_backward_gpu_1(self):
self.check_backward([to_gpu(x.copy()) for x in self.xs1], axis=0)
|
[
"[email protected]"
] | |
efd3b2095fe805965d530e8825bb361b4af2b186
|
dfab6798ece135946aebb08f93f162c37dd51791
|
/core/luban/cli/db/help.py
|
efa834580fb7667beb71a842f62daa146103d562
|
[] |
no_license
|
yxqd/luban
|
405f5f7dcf09015d214079fe7e23d644332be069
|
00f699d15c572c8bf160516d582fa37f84ac2023
|
refs/heads/master
| 2020-03-20T23:08:45.153471 | 2012-05-18T14:52:43 | 2012-05-18T14:52:43 | 137,831,650 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 713 |
py
|
# -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2006-2011 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
import os
def run(*args, **kwds):
from . import public_commands
print( 'luban db -- db management commands')
print( 'http://lubanui.org')
print()
print('Commands:')
for cmd in public_commands:
print(' luban db %s' % cmd)
continue
return
def parse_cmdline():
return [], {}
# End of file
|
[
"[email protected]"
] | |
bc78baed56d1faf996c6ec825a93f0f64c9c943f
|
8e18c91dae4787b53e1ff6b35dc04fa38aa374d3
|
/Pautas Interrogaciones/Examen/Pregunta 2/P2a.py
|
ccb0956454e0a77b4a1a6193255f4836793b511d
|
[] |
no_license
|
GbPoblete/syllabus
|
4a0cb0a2d92fea04b7891c84efef678e4596fa08
|
ca0e8c44d7c13c98ce4e4b99b4559daf99804cab
|
refs/heads/master
| 2020-05-16T00:49:10.663749 | 2015-12-01T23:22:14 | 2015-12-01T23:22:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,391 |
py
|
__author__ = 'figarrido'
def _promedio(datos):
return sum(datos) / len(datos)
def _varianza(datos):
prom = _promedio(datos)
suma = 0
for i in datos:
suma += (i - prom)**2
return suma / len(datos)
class Estrella:
def __init__(self, clase, RA, DEC, id, observaciones=[]):
self.clase = clase
self.RA = RA
self.DEC = DEC
self.id = id
self.observaciones = observaciones
def get_brillos(self):
return [i.brillo for i in self.observaciones]
@property
def promedio(self):
brillos = self.get_brillos()
return _promedio(brillos)
@property
def varianza(self):
brillos = self.get_brillos()
return _varianza(brillos)
def agregar_observacion(self, observacion):
self.observaciones.append(observacion)
class Observacion(object):
def __init__(self, brillo, tiempo, error):
self.brillo = brillo
self.tiempo = tiempo
self.error = error
class Field:
def __init__(self, estrellas=[]):
self.estrellas = estrellas
def agregar_estrella(self, estrella):
self.estrella.append(estrella)
class Cielo:
def __init__(self, fields=[]):
self.fields = fields
def agregar_field(self, field):
self.fields.append(field)
|
[
"[email protected]"
] | |
9d6b2500746c6557a739357ecf53aed73bbf15d8
|
55a849e02a9a3819c72d67e0ef52cee2b5223db2
|
/ftrace/parsers/sched_task_usage_ratio.py
|
e6f8c72c3122c2121fd387d2e76aaa87edd6c514
|
[
"Apache-2.0"
] |
permissive
|
Gracker/SystraceAnalysis
|
08055bceea6018f6e1dd425d1976c1893bd9f945
|
b27217e33a260614a9fe9ff3f8c3e470efdbd9a3
|
refs/heads/master
| 2023-02-18T19:34:26.071277 | 2021-01-21T01:51:37 | 2021-01-21T01:57:40 | 248,390,276 | 4 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,032 |
py
|
#!/usr/bin/python
# Copyright 2015 Huawei Devices USA Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Authors:
# Chuk Orakwue <[email protected]>
import re
from ftrace.common import ParserError
from .register import register_parser
try:
from ftrace.third_party.cnamedtuple import namedtuple
except ImportError:
from collections import namedtuple
TRACEPOINT = 'sched_task_usage_ratio'
__all__ = [TRACEPOINT]
SchedTaskUsageRatioBase = namedtuple(TRACEPOINT,
[
'comm',
'pid',
'ratio'
]
)
class SchedTaskUsageRatio(SchedTaskUsageRatioBase):
"""
Tracked task cpu usage ratio [0..1023].
"""
__slots__ = ()
def __new__(cls, comm, pid, ratio):
pid = int(pid)
ratio = float(self.ratio)/1023.0
return super(cls, SchedTaskUsageRatio).__new__(
cls,
comm=comm,
pid=pid,
ratio=ratio,
)
sched_task_usage_ratio_pattern = re.compile(
r"""comm=(?P<comm>.*)\s+
pid=(?P<pid>\d+)\s+
ratio=(?P<ratio>\d+)\s+
""",
re.X|re.M
)
@register_parser
def sched_task_usage_ratio(payload):
"""Parser for `sched_task_usage_ratio` tracepoint"""
try:
match = re.match(sched_task_usage_ratio_pattern, payload)
if match:
match_group_dict = match.groupdict()
return SchedTaskUsageRatio(**match_group_dict)
except Exception, e:
raise ParserError(e.message)
|
[
"[email protected]"
] | |
d2791fd72bd25721bcc5818685206a2359cb9cb0
|
3f100a1002a1f8ed453c8b81a9b403444d77b4c6
|
/assignment_2/assignment_2_final.py
|
d1cdbd0f6ced27c57032a5e8f8d15b90444bac53
|
[] |
no_license
|
Kimuda/Phillip_Python
|
c19c85a43c5a13760239e4e94c08436c99787ebf
|
59d56a0d45839656eb15dbe288bdb0d18cb7df2b
|
refs/heads/master
| 2016-09-09T22:19:02.347744 | 2015-05-01T10:56:49 | 2015-05-01T10:56:49 | 32,330,951 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,362 |
py
|
uniprot_text="""Entry Gene names Cross-reference (PDB) Length
O95813 CER1 DAND4 267
Q8N907 DAND5 CER2 CKTSF1B3 GREM3 SP1 189
O60565 GREM1 CKTSF1B1 DAND2 DRM PIG2 184
P41271 NBL1 DAN DAND1 4X1J; 181
Q96S42 NODAL 4N1D; 347
Q15465 SHH 3HO5;3M1N;3MXW; 462"""
uniprot_text_list=uniprot_text.split("\n")
protein_list=uniprot_text_list[1:]
#print(uniprot_text_list,protein_list)
d=[]
newlist=[]
for i in protein_list:
d=i.split("\t")
newlist+=[d]
#print(newlist)
list3=[]
counter=0
counter2=len(newlist)-1
while counter<len(newlist):
newlist2={}
newlist2["Entry"]=newlist[counter2][0]
newlist2["Gene_name"]=newlist[counter2][1]
newlist2["Cross_ref_pdb"]=newlist[counter2][2]
newlist2["Length"]=newlist[counter2][3]
list3+=[newlist2]
counter=counter+1
counter2=counter2-1
#print(list3)
#----Question 1. Create a function that returns the protein ID of the shortest protein----
def ID_of_shortest_protein():
list_sorted_by_length = sorted(list3, key=lambda k: k['Length'])
protein_with_shortest_length=list_sorted_by_length[0]
print("The protein ID of the shortest protein is-",protein_with_shortest_length["Entry"])
ID_of_shortest_protein()
#----Question 2. Create a function that receives a gene name and returns the protein ID.----
def genesearch():
query=(input("Enter a gene name to retrieve the protein ID or a blank line to exit: ")).upper()
listofgenes=""
while query!="":
for gene in list3:
listofgenes=gene["Gene_name"]
if query in listofgenes:
print("Protein ID for",query,"is",gene["Entry"])
query=(input("To exit enter a blank line or Enter a gene name to continue: ")).upper()
genesearch()
#GLITCHES; even a single letter in query, returns a result, and when no match is found the user is not informed.
#----Question 3. Create a function that receives protein ID and returns the PDB IDs. If the protein doesn’t have PDBs reported, the function should return False.
def pdbqueryusingproteinID():
query=(input("Enter a protein ID to retrieve the protein PDB IDs or a blank line to exit: ")).upper()
listofproteinIDs=""
while query!="":
for proteinID in list3:
listofproteinIDs=proteinID["Entry"]
if query in listofproteinIDs:
if proteinID["Cross_ref_pdb"]!="":
commaseperatedpdbs=proteinID["Cross_ref_pdb"].split(";")
for i in commaseperatedpdbs:
if i!="":
print(i+(","),end="")
print()
else:
print("False")
query=(input("To exit enter a blank line or Enter a protein ID to continue: ")).upper()
pdbqueryusingproteinID()
#GLITCHES; even a single letter can return a result, and when no match is found the user is not informed.
#----Question 4. Create a function that prints the proteins IDs and the number of reported genes. The list should be sorted by the number of genes.
def proteinIDsnumberofgenes():
print("Protein ID\tNumber of genes")
list2=[]
for item in list3:
list2=list2 + [[item["Entry"]]+item["Gene_name"].split()]
newdictionary={t[0]:t[1:] for t in list2}
#print(newdictionary
numberofgeneslist=[]
for key in newdictionary:
numberofgeneslist+=[[key]+[len(newdictionary[key])]]
sortednumberofgeneslist=sorted(numberofgeneslist, key=lambda k: k[1])
#print(key,len(newdictionary[key]))
#print(numberofgeneslist)
for item in sortednumberofgeneslist:
print(item[0],"\t\t",item[1])
#print(sortednumberofgeneslist)
proteinIDsnumberofgenes()
#----Question 5. Create a function that prints a list of pairs of all the reported combinations of genes and PDBs
def gene_names_paired_with_pdbs():
print("Gene_name\tCross_ref_pdb")
for item in list3:
for item2 in item["Cross_ref_pdb"].split(";"):
if item2!="":
for item3 in item["Gene_name"].split():
print(item3,'\t\t',item2)
gene_names_paired_with_pdbs()
#GlITCHES; the tabulation, sometimes makes the results look misaligned when gene_names of varying word length are used (i tried it with a different batch of uniprot data)
|
[
"[email protected]"
] | |
0fa399848f39ede5587c2770483c24ccf4e954b1
|
dd14fd0545093bc7fd3ff8b93e32180cec00e24d
|
/data_utils/load_dataset.py
|
fb31455fa3afab4b18465f204cfa5061c220c449
|
[] |
no_license
|
bigdatasciencegroup/PyTorch-GAN-Shop
|
ce38abf86185c6894ee46166bd41419260b026e5
|
95d4c3f8d836255cea162b8af38f810b5c638f2a
|
refs/heads/master
| 2022-11-08T20:59:29.839442 | 2020-06-19T05:32:56 | 2020-06-19T05:32:56 | 273,416,695 | 2 | 0 | null | 2020-06-19T06:03:16 | 2020-06-19T06:03:16 | null |
UTF-8
|
Python
| false | false | 4,837 |
py
|
from torch.utils.data import Dataset
import os
import h5py as h5
import numpy as np
from scipy import io
import torch
import torchvision.transforms as transforms
from torchvision.datasets import CIFAR10, STL10
from torchvision.datasets import ImageFolder
class LoadDataset(Dataset):
def __init__(self, dataset_name, data_path, train, download, resize_size, hdf5_path=None, consistency_reg=False, make_positive_aug=False):
super(LoadDataset, self).__init__()
self.dataset_name = dataset_name
self.data_path = data_path
self.train = train
self.download = download
self.resize_size = resize_size
self.hdf5_path = hdf5_path
self.consistency_reg = consistency_reg
self.make_positive_aug = make_positive_aug
self.load_dataset()
def load_dataset(self):
if self.dataset_name == 'cifar10':
if self.hdf5_path is not None:
print('Loading %s into memory...' % self.hdf5_path)
with h5.File(self.hdf5_path, 'r') as f:
self.data = f['imgs'][:]
self.labels = f['labels'][:]
else:
self.data = CIFAR10(root=os.path.join('data', self.dataset_name),
train=self.train,
download=self.download)
elif self.dataset_name == 'imagenet':
if self.hdf5_path is not None:
print('Loading %s into memory...' % self.hdf5_path)
with h5.File(self.hdf5_path, 'r') as f:
self.data = f['imgs'][:]
self.labels = f['labels'][:]
else:
mode = 'train' if self.train == True else 'val'
root = os.path.join('data','ILSVRC2012', mode)
self.data = ImageFolder(root=root)
elif self.dataset_name == "tiny_imagenet":
if self.hdf5_path is not None:
print('Loading %s into memory...' % self.hdf5_path)
with h5.File(self.hdf5_path, 'r') as f:
self.data = f['imgs'][:]
self.labels = f['labels'][:]
else:
mode = 'train' if self.train == True else 'val'
root = os.path.join('data','TINY_ILSVRC2012', mode)
self.data = ImageFolder(root=root)
else:
raise NotImplementedError
def __len__(self):
if self.hdf5_path is not None:
num_dataset = self.data.shape[0]
else:
num_dataset = len(self.data)
return num_dataset
@staticmethod
def _decompose_index(index):
index = index % 18
flip_index = index // 9
index = index % 9
tx_index = index // 3
index = index % 3
ty_index = index
return flip_index, tx_index, ty_index
def __getitem__(self, index):
if self.hdf5_path is not None:
img = np.asarray((self.data[index]-127.5)/127.5, np.float32)
label = int(self.labels[index])
elif self.hdf5_path is None and self.dataset_name == 'imagenet':
img, label = self.data[index]
size = (min(img.size), min(img.size))
i = (0 if size[0] == img.size[0]
else (img.size[0] - size[0]) // 2)
j = (0 if size[1] == img.size[1]
else (img.size[1] - size[1]) // 2)
img = img.crop((i, j, i + size[0], j + size[1]))
img = np.asarray(img.resize((self.resize_size, self.resize_size)), np.float32)
img = np.transpose((img-127.5)/127.5, (2,0,1))
else:
img, label = self.data[index]
img = np.asarray(img, np.float32)
img = np.transpose((img-127.5)/127.5, (2,0,1))
if self.consistency_reg or self.make_positive_aug:
flip_index, tx_index, ty_index = self._decompose_index(index)
img_aug = np.copy(img)
c,h,w = img_aug.shape
if flip_index == 0:
img_aug = img_aug[:,:,::-1]
pad_h = int(h//8)
pad_w = int(w//8)
img_aug = np.pad(img_aug, [(0, 0), (pad_h, pad_h), (pad_w, pad_w)], mode='reflect')
if ty_index == 0:
i = 0
elif ty_index == 1:
i = pad_h
else:
i = 2*pad_h
if tx_index == 0:
j = 0
elif tx_index == 1:
j = pad_w
else:
j = 2*pad_w
img_aug = img_aug[:, i:i+h, j:j+w]
img = torch.from_numpy(img)
img_aug = torch.from_numpy(img_aug)
return img, label, img_aug
img = torch.from_numpy(img)
return img, label
|
[
"[email protected]"
] | |
07c6474da4c44a115dfccccca315bbc59945c0d8
|
a8b37bd399dd0bad27d3abd386ace85a6b70ef28
|
/airbyte-integrations/connectors/source-convex/source_convex/source.py
|
ecf094ff9c01ce502d526d5681fd2131c8a7bb17
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"Elastic-2.0"
] |
permissive
|
thomas-vl/airbyte
|
5da2ba9d189ba0b202feb952cadfb550c5050871
|
258a8eb683634a9f9b7821c9a92d1b70c5389a10
|
refs/heads/master
| 2023-09-01T17:49:23.761569 | 2023-08-25T13:13:11 | 2023-08-25T13:13:11 | 327,604,451 | 1 | 0 |
MIT
| 2021-01-07T12:24:20 | 2021-01-07T12:24:19 | null |
UTF-8
|
Python
| false | false | 8,739 |
py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from datetime import datetime
from json import JSONDecodeError
from typing import Any, Dict, Iterable, Iterator, List, Mapping, MutableMapping, Optional, Tuple, TypedDict
import requests
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import IncrementalMixin, Stream
from airbyte_cdk.sources.streams.http import HttpStream
from airbyte_cdk.sources.streams.http.requests_native_auth.token import TokenAuthenticator
ConvexConfig = TypedDict(
"ConvexConfig",
{
"deployment_url": str,
"access_key": str,
},
)
ConvexState = TypedDict(
"ConvexState",
{
"snapshot_cursor": Optional[str],
"snapshot_has_more": bool,
"delta_cursor": Optional[int],
},
)
CONVEX_CLIENT_VERSION = "0.2.0"
# Source
class SourceConvex(AbstractSource):
def _json_schemas(self, config: ConvexConfig) -> requests.Response:
deployment_url = config["deployment_url"]
access_key = config["access_key"]
url = f"{deployment_url}/api/json_schemas?deltaSchema=true&format=convex_json"
headers = {
"Authorization": f"Convex {access_key}",
"Convex-Client": f"airbyte-export-{CONVEX_CLIENT_VERSION}",
}
return requests.get(url, headers=headers)
def check_connection(self, logger: Any, config: ConvexConfig) -> Tuple[bool, Any]:
"""
Connection check to validate that the user-provided config can be used to connect to the underlying API
:param config: the user-input config object conforming to the connector's spec.yaml
:param logger: logger object
:return Tuple[bool, any]: (True, None) if the input config can be used to connect to the API successfully, (False, error) otherwise.
"""
resp = self._json_schemas(config)
if resp.status_code == 200:
return True, None
else:
return False, format_http_error("Connection to Convex via json_schemas endpoint failed", resp)
def streams(self, config: ConvexConfig) -> List[Stream]:
"""
:param config: A Mapping of the user input configuration as defined in the connector spec.
"""
resp = self._json_schemas(config)
if resp.status_code != 200:
raise Exception(format_http_error("Failed request to json_schemas", resp))
json_schemas = resp.json()
table_names = list(json_schemas.keys())
return [
ConvexStream(
config["deployment_url"],
config["access_key"],
table_name,
json_schemas[table_name],
)
for table_name in table_names
]
class ConvexStream(HttpStream, IncrementalMixin):
def __init__(self, deployment_url: str, access_key: str, table_name: str, json_schema: Mapping[str, Any]):
self.deployment_url = deployment_url
self.table_name = table_name
if json_schema:
json_schema["additionalProperties"] = True
json_schema["properties"]["_ab_cdc_lsn"] = {"type": "number"}
json_schema["properties"]["_ab_cdc_updated_at"] = {"type": "string"}
json_schema["properties"]["_ab_cdc_deleted_at"] = {"anyOf": [{"type": "string"}, {"type": "null"}]}
else:
json_schema = {}
self.json_schema = json_schema
self._snapshot_cursor_value: Optional[str] = None
self._snapshot_has_more = True
self._delta_cursor_value: Optional[int] = None
self._delta_has_more = True
super().__init__(TokenAuthenticator(access_key, "Convex"))
@property
def name(self) -> str:
return self.table_name
@property
def url_base(self) -> str:
return self.deployment_url
def get_json_schema(self) -> Mapping[str, Any]:
return self.json_schema
primary_key = "_id"
cursor_field = "_ts"
# Checkpoint stream reads after this many records. This prevents re-reading of data if the stream fails for any reason.
state_checkpoint_interval = 128
@property
def state(self) -> ConvexState:
return {
"snapshot_cursor": self._snapshot_cursor_value,
"snapshot_has_more": self._snapshot_has_more,
"delta_cursor": self._delta_cursor_value,
}
@state.setter
def state(self, value: ConvexState) -> None:
self._snapshot_cursor_value = value["snapshot_cursor"]
self._snapshot_has_more = value["snapshot_has_more"]
self._delta_cursor_value = value["delta_cursor"]
def next_page_token(self, response: requests.Response) -> Optional[ConvexState]:
if response.status_code != 200:
raise Exception(format_http_error("Failed request", response))
resp_json = response.json()
if self._snapshot_has_more:
self._snapshot_cursor_value = resp_json["cursor"]
self._snapshot_has_more = resp_json["hasMore"]
self._delta_cursor_value = resp_json["snapshot"]
else:
self._delta_cursor_value = resp_json["cursor"]
self._delta_has_more = resp_json["hasMore"]
return self.state if self._delta_has_more else None
def path(
self,
stream_state: Optional[ConvexState] = None,
stream_slice: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[ConvexState] = None,
) -> str:
# https://docs.convex.dev/http-api/#sync
if self._snapshot_has_more:
return "/api/list_snapshot"
else:
return "/api/document_deltas"
def parse_response(
self,
response: requests.Response,
stream_state: ConvexState,
stream_slice: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[ConvexState] = None,
) -> Iterable[Any]:
if response.status_code != 200:
raise Exception(format_http_error("Failed request", response))
resp_json = response.json()
return list(resp_json["values"])
def request_params(
self,
stream_state: ConvexState,
stream_slice: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[ConvexState] = None,
) -> MutableMapping[str, Any]:
params: Dict[str, Any] = {"tableName": self.table_name, "format": "convex_json"}
if self._snapshot_has_more:
if self._snapshot_cursor_value:
params["cursor"] = self._snapshot_cursor_value
if self._delta_cursor_value:
params["snapshot"] = self._delta_cursor_value
else:
if self._delta_cursor_value:
params["cursor"] = self._delta_cursor_value
return params
def request_headers(
self,
stream_state: ConvexState,
stream_slice: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[ConvexState] = None,
) -> Dict[str, str]:
"""
Custom headers for each HTTP request, not including Authorization.
"""
return {
"Convex-Client": f"airbyte-export-{CONVEX_CLIENT_VERSION}",
}
def get_updated_state(self, current_stream_state: ConvexState, latest_record: Mapping[str, Any]) -> ConvexState:
"""
This (deprecated) method is still used by AbstractSource to update state between calls to `read_records`.
"""
return self.state
def read_records(self, sync_mode: SyncMode, *args: Any, **kwargs: Any) -> Iterator[Any]:
self._delta_has_more = sync_mode == SyncMode.incremental
for record in super().read_records(sync_mode, *args, **kwargs):
ts_ns = record["_ts"]
ts_seconds = ts_ns / 1e9 # convert from nanoseconds.
# equivalent of java's `new Timestamp(transactionMillis).toInstant().toString()`
ts_datetime = datetime.utcfromtimestamp(ts_seconds)
ts = ts_datetime.isoformat()
# DebeziumEventUtils.CDC_LSN
record["_ab_cdc_lsn"] = ts_ns
# DebeziumEventUtils.CDC_DELETED_AT
record["_ab_cdc_updated_at"] = ts
record["_deleted"] = "_deleted" in record and record["_deleted"]
# DebeziumEventUtils.CDC_DELETED_AT
record["_ab_cdc_deleted_at"] = ts if record["_deleted"] else None
yield record
def format_http_error(context: str, resp: requests.Response) -> str:
try:
err = resp.json()
return f"{context}: {resp.status_code}: {err['code']}: {err['message']}"
except (JSONDecodeError, KeyError):
return f"{context}: {resp.text}"
|
[
"[email protected]"
] | |
e0e2a417665d20c8069db0592caec6107e06bf18
|
328afd873e3e4fe213c0fb4ce6621cb1a450f33d
|
/W3School/conditional_statement_loops/s.py
|
46f9fed08f807249747610045fd405d9e9df72d3
|
[] |
no_license
|
TorpidCoder/Python
|
810371d1bf33c137c025344b8d736044bea0e9f5
|
9c46e1de1a2926e872eee570e6d49f07dd533956
|
refs/heads/master
| 2021-07-04T08:21:43.950665 | 2020-08-19T18:14:09 | 2020-08-19T18:14:09 | 148,430,171 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 535 |
py
|
row=15
col=18
result_str=""
for i in range(1,row+1):
if((i<=3)or(i>=7 and i<=9)or(i>=13 and i<=15)):
for j in range(1,col):
result_str=result_str+"o"
result_str=result_str+"\n"
elif(i>=4 and i<=6):
for j in range(1,5):
result_str=result_str+"o"
result_str=result_str+"\n"
else:
for j in range(1,14):
result_str=result_str+" "
for j in range(1,5):
result_str=result_str+"o"
result_str=result_str+"\n"
print(result_str);
|
[
"[email protected]"
] | |
81e30fa5ec864972411e2d70f75db454d407a91c
|
15581a76b36eab6062e71d4e5641cdfaf768b697
|
/LeetCode_30days_challenge/2020/November/Longest Mountain in Array.py
|
027f86eb75dd45b219ebf43ad294d9e1baa32db8
|
[] |
no_license
|
MarianDanaila/Competitive-Programming
|
dd61298cc02ca3556ebc3394e8d635b57f58b4d2
|
3c5a662e931a5aa1934fba74b249bce65a5d75e2
|
refs/heads/master
| 2023-05-25T20:03:18.468713 | 2023-05-16T21:45:08 | 2023-05-16T21:45:08 | 254,296,597 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,061 |
py
|
class Solution:
def longestMountain(self, A):
if len(A) < 3:
return 0
peak = False
max_length = curr_length = 1
for i in range(1, len(A)):
if A[i] > A[i - 1]:
if peak:
max_length = max(max_length, curr_length)
curr_length = 2
peak = False
else:
curr_length += 1
elif A[i] < A[i - 1]:
if curr_length == 1:
continue
if not peak:
peak = True
curr_length += 1
else:
if curr_length == 1:
continue
if not peak:
curr_length = 1
else:
max_length = max(curr_length, max_length)
curr_length = 1
if peak:
max_length = max(max_length, curr_length)
if max_length < 3:
return 0
else:
return max_length
|
[
"[email protected]"
] | |
010db5b2f3c269b146f0cb527d435487f4a08c5e
|
c548c10c4fd0b6c1d1c10cc645cb3b90b31f2de6
|
/keras/keras35_4_load_model.py
|
b8b3c7da89651bd7a87c1f5e27aeb77c05f64640
|
[] |
no_license
|
sswwd95/Study
|
caf45bc3c8c4301260aaac6608042e53e60210b6
|
3c189090c76a68fb827cf8d6807ee1a5195d2b8b
|
refs/heads/master
| 2023-06-02T21:44:00.518810 | 2021-06-26T03:01:26 | 2021-06-26T03:01:26 | 324,061,105 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,019 |
py
|
import numpy as np
a = np.array(range(1,11))
size = 5
def split_x(seq, size):
aaa = []
for i in range(len(seq) - size +1):
subset = seq[i : (i+size)]
aaa.append(subset)
print(type(aaa))
return np.array(aaa)
dataset = split_x(a, size)
print("=======================")
print(dataset)
x = dataset[:,:4]
y = dataset[:,4]
print(x.shape)
print(y.shape)
x = x.reshape(x.shape[0],x.shape[1],1)
from tensorflow.keras.models import load_model
model = load_model("../data/h5/save_keras35.h5")
'''
###############테스트 #####################
from tensorflow.keras.layers import Dense
model.add(Dense(5)) # summary 이름 : dense
model.add(Dense(1)) # summary 이름 : dense_1
# 에러 뜨는 이유 : 이름 중복
###########################################
'''
from tensorflow.keras.layers import Dense
model.add(Dense(10, name = 'kingkeras1'))
model.add(Dense(1, name = 'kingkeras2'))
# 위, 아래 레이어 붙이기 가능
# dense_3 (Dense) (None, 110) 12210
# _________________________________________________________________
# dense_4 (Dense) (None, 1) 111
# _________________________________________________________________
# kingkeras1 (Dense) (None, 10) 20
# _________________________________________________________________
# kingkeras2 (Dense) (None, 1) 11
# =================================================================
model.summary()
# 3. 컴파일, 훈련
model.compile(loss = 'mse', optimizer='adam')
from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='loss',patience=20, mode='min')
model.fit(x, y, batch_size=1, callbacks=[early_stopping],epochs=1000)
#4. 평가,예측
loss = model.evaluate(x,y, batch_size=1)
print('loss : ',loss)
x_pred = np.array([7,8,9,10])
x_pred = x_pred.reshape(1,4,1)
result = model.predict(x_pred)
print('result : ',result)
# loss : 0.0750507190823555
# result : [[10.734548]]
|
[
"[email protected]"
] | |
e3d34ccc3b810f8a408faaffe681a75d70af1e98
|
213b8cab639c7d45cbf6a4fd46eb23e379d9d374
|
/python/curses_examples/tutorial/05_center_text.py
|
a255e9f843510cbe683534ba35f7dd302d8b021c
|
[] |
no_license
|
DevDungeon/Cookbook
|
f85b04b690ea0a202ddfaeda6460b6ba5797cb70
|
a49a1c77f2b89dc303fa9f2563bb3c19777e4c6c
|
refs/heads/master
| 2023-05-12T06:58:50.606019 | 2022-03-30T04:48:16 | 2022-03-30T04:48:16 | 34,371,982 | 307 | 94 | null | 2023-05-03T22:53:45 | 2015-04-22T06:02:53 |
HTML
|
UTF-8
|
Python
| false | false | 651 |
py
|
# Draw text to center of screen
import curses
import time
screen = curses.initscr()
num_rows, num_cols = screen.getmaxyx()
def print_center(message):
# Calculate center row
middle_row = int(num_rows / 2)
# Calculate center column, and then adjust starting position based
# on the length of the message
half_length_of_message = int(len(message) / 2)
middle_column = int(num_cols / 2)
x_position = middle_column - half_length_of_message
# Draw the text
screen.addstr(middle_row, x_position, message)
screen.refresh()
print_center("Hello from the center!")
# Wait and cleanup
time.sleep(3)
curses.endwin()
|
[
"[email protected]"
] | |
1e4ec58320505450428e6b302e9b75e278535852
|
cbd2f187fb60939c49a00f154570f53d4bb19910
|
/rxpy/src/rxpy/engine/parallel/beam/engine.py
|
3fab110bdf23a65bf2b9be38f20a95c3820dac6c
|
[] |
no_license
|
andrewcooke/rxpy
|
3c4443f3ccba479d936f0e49d7d009a64dfc89b3
|
e7f330dc8c5fa49392a1a018ceda6312270e9a93
|
refs/heads/master
| 2021-01-10T13:46:37.129155 | 2011-06-03T23:29:58 | 2011-06-03T23:29:58 | 52,740,676 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,500 |
py
|
# The contents of this file are subject to the Mozilla Public License
# (MPL) Version 1.1 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License
# at http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and
# limitations under the License.
#
# The Original Code is RXPY (http://www.acooke.org/rxpy)
# The Initial Developer of the Original Code is Andrew Cooke.
# Portions created by the Initial Developer are Copyright (C) 2010
# Andrew Cooke ([email protected]). All Rights Reserved.
#
# Alternatively, the contents of this file may be used under the terms
# of the LGPL license (the GNU Lesser General Public License,
# http://www.gnu.org/licenses/lgpl.html), in which case the provisions
# of the LGPL License are applicable instead of those above.
#
# If you wish to allow use of your version of this file only under the
# terms of the LGPL License and not to allow others to use your version
# of this file under the MPL, indicate your decision by deleting the
# provisions above and replace them with the notice and other provisions
# required by the LGPL License. If you do not delete the provisions
# above, a recipient may use your version of this file under either the
# MPL or the LGPL License.
from rxpy.engine.parallel.base import ParallelEngine
from rxpy.engine.parallel.beam.support import States
class BeamEngine(ParallelEngine):
'''
Restrict the total number of states under consideration, doubling on
failure until we either match, or fail with no discards.
'''
def __init__(self, parser_state, graph, hash_state=False,
beam_start=1, beam_scale=2):
super(BeamEngine, self).__init__(parser_state, graph,
hash_state=hash_state)
self.__beam_start = beam_start
self.__beam_scale = beam_scale
def _new_states(self, initial):
return States(initial, self._hash_state,
beam_start=self.__beam_start, beam_scale=self.__beam_scale)
def _outer_loop(self, states, search, new_state):
initial_offset = self._offset
growing = True
while not states.final_state and growing:
super(BeamEngine, self)._outer_loop(states, search, new_state)
if not states.final_state and states.overflowed:
growing = True
states.grow()
self._set_offset(initial_offset)
else:
growing = False
class HashingBeamEngine(BeamEngine):
def __init__(self, parser_state, graph, hash_state=True,
beam_start=1, beam_scale=2):
super(HashingBeamEngine, self).__init__(parser_state, graph,
hash_state=hash_state,
beam_start=beam_start, beam_scale=beam_scale)
|
[
"[email protected]"
] | |
5370e7752ece0589186787cb62da98ecb25774b8
|
a43b180c8911736bb158a98bb49200001c25ef88
|
/test/dynamo/test_unspec.py
|
392dbc899743b7e4c1d142a49711b309ec223fbe
|
[
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
unlimblue/pytorch
|
bafbe98eba7412456c956d99f5c61b64478a8492
|
4433f508e7b1d916820da7f12ce265a868e17cca
|
refs/heads/main
| 2023-06-17T16:01:53.631576 | 2023-06-14T03:19:12 | 2023-06-14T03:19:12 | 193,814,069 | 0 | 2 |
NOASSERTION
| 2019-06-30T15:29:08 | 2019-06-26T02:20:36 |
C++
|
UTF-8
|
Python
| false | false | 10,462 |
py
|
# Owner(s): ["module: dynamo"]
import math
import random
import unittest
import numpy as np
import torch
import torch._dynamo.test_case
import torch._dynamo.testing
import torch.nn.functional as F
from torch._dynamo.comptime import comptime
from torch._dynamo.testing import same
# The intention of this test file is you should put test cases specifically
# for assume_static_by_default=False, aka you want to YOLO make everything as
# dynamic as possible. If you want to test the more normal situation where
# you assume static by default, put it in a regular test file and
# test_dynamic_shapes will cover both the YOLO and non-YOLO cases.
@torch._dynamo.config.patch(dynamic_shapes=True, assume_static_by_default=False)
class UnspecTests(torch._dynamo.test_case.TestCase):
def test_numpy_correctness(self):
def fn(x, y, z):
xy = [x + y, y, False]
np_x = x.numpy()
np_y = y.numpy()
return {
"x": x,
"z": z,
"a": np_y.sum(),
"b": xy,
"c": np_y[0][0] / 68,
"d": np_x.sum(),
"e": np_x + np_y,
}, x + np_y.sum() + z
x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64)
y = torch.ones([2, 2], dtype=torch.int64)
z = np.int64(12)
res1 = fn(x, y, z)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
res2 = opt_fn(x, y, z)
self.assertTrue(same(res1, res2))
def test_no_recompilations(self):
# no recompilations if passing on different numpy int values
def fn(x, y):
return {"a": x + 1, "b": y / 2}
x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
for i in range(10):
opt_fn(x, np.int64(i))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
def test_builtin_max_min(self):
# test unspecialized primitive max/min
def fn(x, y, z):
return z + 1, max(x, y), min(x - 4, y)
x = np.int64(12)
y = 10
z = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64)
res1 = fn(x, y, z)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
res2 = opt_fn(x, y, z)
self.assertTrue(same(res1, res2, relax_numpy_equality=True))
def test_feed_random_values_into_graph_only(self):
def fn(shape):
torch.manual_seed(123)
x = torch.randn(shape, device="cpu") * random.randint(30, 100)
return x
shape = [2, 3]
random.seed(1)
res1 = fn(shape)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
random.seed(1)
res2 = opt_fn(shape)
self.assertTrue(same(res1, res2))
def test_random_values_with_graph_break(self):
def fn(x):
r1 = random.random()
y = x + random.uniform(10, 20)
y.sum().item()
r2 = random.randint(2, 18) # no graph output in this frame
y.sum().item()
return y + r1, r2
x = torch.tensor([[1.0, 2.0], [3.0, 4.0]])
random.seed(1)
res1 = fn(x)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
random.seed(1)
res2 = opt_fn(x)
self.assertTrue(same(res1, res2))
# Really annoying intersection of specialization and RandomValueSource
# If we get a RandomValueSource with a single element tensor, we should return a ConstantVariable like other
# unspects... but if we do, we break the bytecode assumptions and guards will not work as we will be reffering
# to a name from a source that is not there. If we call .item() and take the wrapped_value out, where we do
# wrapped_value = wrapped_value.item() where we send unspec down to wrap_fx_proxy, this test passes and then
# some models fail on missing codegen.tx.output.random_values_var. If we let the tensor value go into wrap as
# it is, this test fails.
# The real solution here is to rewrite RandomValueSource and all the codegen it does from the ground up.
def test_multiple_consecutive_random_calls_before_graph(self):
def fn(x):
dim1 = random.randrange(start=0, stop=5)
dim2 = random.randrange(start=0, stop=5)
dim3 = random.randrange(start=0, stop=5)
y = torch.rand(dim1, dim2, dim3)
return x + 2, y
x = torch.tensor([[1.0, 2.0], [3.0, 4.0]])
random.seed(1)
res1 = fn(x)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
random.seed(1)
res2 = opt_fn(x)
self.assertTrue(same(res1, res2))
def test_compiled_random_calls_are_random(self):
# For compiled functions with random calls,
# it should return different values for every iteration.
# https://github.com/pytorch/pytorch/issues/95425
@torch.compile(backend="eager", fullgraph=True)
def fn(x):
return (x + 1) * random.uniform(0, 1)
res = []
for _ in range(5):
res.append(fn(torch.ones(2)))
for i in range(1, 5):
self.assertFalse(same(res[i - 1], res[i]))
def test_random_call_with_while_loop(self):
def fn(x):
dim1 = random.randrange(start=0, stop=3)
dim2 = dim1
while dim1 == dim2:
dim2 = random.randrange(start=0, stop=3)
return x * 2
x = torch.randn(4)
random.seed(1)
res1 = fn(x)
opt_fn = torch._dynamo.optimize("eager")(fn)
random.seed(1)
res2 = opt_fn(x)
self.assertTrue(same(res1, res2))
@unittest.expectedFailure # https://github.com/pytorch/pytorch/issues/103545
def test_builtin_getitem(self):
# builtin getitem args[0] is python list and args[1] is unspec
def fn(x, idx):
return (torch.zeros(idx), x[idx], x[idx:])
x = list(range(50))
ref = fn(x, 48) # 48 is unspecialized
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
res = opt_fn(x, 48)
self.assertTrue(same(ref, res))
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
def test_builtin_functions_on_cuda(self):
def fn(x, scaler):
m = torch.nn.ReLU()
y = m(x) * scaler
return y
x = torch.randn([3, 6], device="cuda")
scaler = 0.23 # 0.23 is unspecialized
ref = fn(x, scaler)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
res = opt_fn(x, scaler)
self.assertTrue(same(ref, res))
self.assertEqual(ref.device, res.device)
def test_unspec_float_precision(self):
def fn(image, scale_factor):
image = torch.nn.functional.interpolate(
image[None],
size=None,
scale_factor=scale_factor,
mode="bilinear",
recompute_scale_factor=True,
align_corners=False,
)[0]
return image.shape
x = torch.rand([3, 427, 640])
scale_factor = 1.873536229133606
ref = fn(x, scale_factor)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize(cnts)(fn)
res = opt_fn(x, scale_factor)
self.assertTrue(same(ref, res))
def test_specializing_numpy_float_in_control_flow(self):
# np.float is unspecialized by default,
# but it should be specialized when used in control flow.
def fn(x, y):
if y > 1.0:
return x + 1
else:
return x - 1
x = torch.rand(4)
opt_fn = torch._dynamo.optimize("eager", nopython=True)(fn)
for t in [np.float16, np.float32, np.float64]:
y = t(1.23)
ref = fn(x, y)
res = opt_fn(x, y)
self.assertTrue(same(ref, res))
def test_shape_graph_break(self):
from torch._dynamo.comptime import comptime
def fn(x):
x_shape = x.size()
comptime.graph_break()
return x + torch.randn(x_shape)
x = torch.randn(20)
opt_fn = torch._dynamo.optimize("eager")(fn)
opt_fn(x)
def test_isinstance_symint(self):
def fn(x):
assert isinstance(x.size(0), int)
return x * 2
x = torch.randn(20)
opt_fn = torch._dynamo.optimize("eager")(fn)
opt_fn(x)
y = torch.randn(30)
torch._dynamo.mark_dynamic(y, 0)
opt_fn(y)
def test_mark_01_dynamic(self):
def fn(x):
return x * 2
x = torch.randn(1)
torch._dynamo.mark_dynamic(x, 0)
opt_fn = torch._dynamo.optimize("eager")(fn)
# This will fail to compile a generic kernel, but we should not
# complain about it (mark dynamic will try its best but 0/1
# specialization is allowed)
opt_fn(x)
@unittest.expectedFailure
def test_conv1d_symint_padding(self):
kernel = torch.randn(1, 1, 4)
def func(x):
padding = math.ceil((kernel.shape[-1] + x.shape[-1] % 2) / 2) - 1
out = F.conv1d(x, kernel, padding=padding, stride=2)
return out
# TODO: NameError: name 's1' is not defined when dynamic=True
opt_func = torch.compile(func)
x = torch.randn(1, 1, 175)
opt_func(x) # passes
x = torch.randn(1, 1, 249)
opt_func(x) # crashes
@torch._dynamo.config.patch("assume_static_by_default", True)
def test_propagate_dynamic_dim(self):
x = torch.randn(20)
torch._dynamo.mark_dynamic(x, 0)
@torch.compile()
def fn(x):
y = x * 2
comptime.graph_break()
z = y * 2
return z
z = fn(x)
self.assertEqual(z._dynamo_weak_dynamic_indices, {0})
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
[
"[email protected]"
] | |
d6d449b80e62d56b88fbfb2e7e3bc89530bb83f6
|
1aa5216e8ed1fc53999637a46c6af0716a8a8cdf
|
/disk_snapshot_service/business_logic/locker_manager.py
|
3668ebcb7834e64b93ca17fbb9226de6467e3525
|
[] |
no_license
|
ShawnYi5/DiskInProgress
|
c3a47fd5c52b1efeeaeee5b0de56626077a947a4
|
b13d0cdcd0ab08b6dd5b106cda739d7c8ac9e41a
|
refs/heads/master
| 2020-05-17T06:19:40.579905 | 2019-08-22T03:29:31 | 2019-08-22T03:29:31 | 183,555,060 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 549 |
py
|
import threading
class LockWithTrace(object):
def __init__(self):
self._locker = threading.Lock()
self._current_trace = None
def acquire(self, trace):
self._locker.acquire()
self._current_trace = trace
return self
def release(self):
self._current_trace = None
self._locker.release()
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
@property
def current_trace(self):
return self._current_trace
|
[
"[email protected]"
] | |
576462d84b48c93ce54baefe84067f27897cbe9b
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_neutral.py
|
49c9babdf333c189682df5fb0fa4e46a59f17ec3
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 801 |
py
|
#calss header
class _NEUTRAL():
def __init__(self,):
self.name = "NEUTRAL"
self.definitions = [u'not saying or doing anything that would encourage or help any of the groups involved in an argument or war: ', u'A neutral ground or field is a sports stadium that does not belong to either of the two teams taking part in a competition or game: ', u'having features or characteristics that are not easily noticed: ', u'A neutral chemical substance is neither an acid nor an alkali: ', u'A neutral object in physics has no electrical charge: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"[email protected]"
] | |
7847b19733a6494cd75d660ff936f2f6c1b72c56
|
17658f39e410b6a179f4640e49ff86d86a6541ba
|
/taxonomic-annotation/reconcile.py
|
f76cf0b4962f05e96ca6ebeb9d0de3c6da90d91f
|
[
"MIT"
] |
permissive
|
luispedro/Coelho2021_GMGCv1
|
f91f82d3c78cf0773b69ce39cd0143d872933716
|
caa9b5e156f5f74e147fde371e36ea259fee1407
|
refs/heads/main
| 2023-04-16T05:49:27.375227 | 2022-02-22T14:38:21 | 2022-02-22T14:38:21 | 368,166,901 | 14 | 8 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,654 |
py
|
import pandas as pd
from taxonomic import ncbi
n = ncbi.NCBI()
taxonomic = pd.read_table('/g/bork1/coelho/DD_DeCaF/genecats.cold/GMGC10.taxonomic.map', index_col=0, engine='c')
species = pd.read_table('/g/bork1/coelho/DD_DeCaF/genecats.cold/GMGC10.species.match.map', header=None, usecols=[1,2], index_col=0, squeeze=True, names=['gene', 'TaxID'], engine='c')
superkingdom = pd.read_table('/g/bork1/coelho/DD_DeCaF/genecats.cold/GMGC10.kingdom.annotation', header=None, names=['gene', 'superkingdom'], index_col=0, squeeze=True, engine='c')
taxid = taxonomic['NCBI TaxID'].to_dict()
d_superkingdom = superkingdom.to_dict()
d_species = species.to_dict()
d_predicted_taxid = taxonomic['NCBI TaxID'].to_dict()
taxid = taxonomic['NCBI TaxID'][taxonomic.Rank == 'species'].to_dict()
gs = {}
for g,t in taxid.items():
gs[g] = n.ancestors.get(str(t), '1')
if len(gs) % 10_000_000 == 0:
print(len(gs) // 1_000_000)
no_match = {'None', 'no_match'}
prok = {'Bacteria', 'Archaea'}
final = d_species.copy()
for g,sk in d_superkingdom.items():
if sk in no_match:
continue
if g in d_species:
continue
elif sk not in prok:
final[g] = sk
elif g in gs:
final[g] = gs[g]
else:
final[g] = d_predicted_taxid.get(g, 1)
for g,p in d_predicted_taxid.items():
if g not in final:
final[g] = 1
final = pd.Series(final)
finalstr = final.map(str)
finalnames = finalstr.map(n.names)
finalranks = finalstr.map(n.ranks)
finalframe = pd.DataFrame({'taxid' : finalstr, 'rank' : finalranks, 'name': finalnames})
finalframe.to_csv('taxonomic.final.tsv', sep='\t')
|
[
"[email protected]"
] | |
a873f56004bb76bdee75e786e67d6e1897053095
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/semantic_segmentation/FastSCNN/segmentron/models/unet.py
|
f6425505373e615f46bcd51c550b3f3410fe3164
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 |
Apache-2.0
| 2022-10-15T09:29:12 | 2022-04-20T04:11:18 |
Python
|
UTF-8
|
Python
| false | false | 4,555 |
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.import torch
import torch.nn as nn
import torch.nn.functional as F
from .segbase import SegBaseModel
from .model_zoo import MODEL_REGISTRY
from ..modules import _FCNHead
from ..config import cfg
__all__ = ['UNet']
@MODEL_REGISTRY.register()
class UNet(SegBaseModel):
def __init__(self):
super(UNet, self).__init__(need_backbone=False)
self.inc = DoubleConv(3, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
self.down4 = Down(512, 512)
self.head = _UNetHead(self.nclass)
self.__setattr__('decoder', ['head', 'auxlayer'] if self.aux else ['head'])
def forward(self, x):
size = x.size()[2:]
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
outputs = list()
x = self.head(x1, x2, x3, x4, x5)
x = F.interpolate(x, size, mode='bilinear', align_corners=True)
outputs.append(x)
return tuple(outputs)
class _UNetHead(nn.Module):
def __init__(self, nclass, norm_layer=nn.BatchNorm2d):
super(_UNetHead, self).__init__()
bilinear = True
self.up1 = Up(1024, 256, bilinear)
self.up2 = Up(512, 128, bilinear)
self.up3 = Up(256, 64, bilinear)
self.up4 = Up(128, 64, bilinear)
self.outc = OutConv(64, nclass)
def forward(self, x1, x2, x3, x4, x5):
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = torch.tensor([x2.size()[2] - x1.size()[2]])
diffX = torch.tensor([x2.size()[3] - x1.size()[3]])
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
|
[
"[email protected]"
] | |
c59757992997a99fd2580f5c29f1c4843bfa9307
|
e4ee9f2ca60b60ea9fa1b05c982594a2c1b10484
|
/day30 课堂笔记以及代码/day30/验证客户端合法性/server.py
|
2e9c5a0e7e6055064d96994f662687f7d8d4dcdc
|
[] |
no_license
|
tianshang486/Pythonlaonanhai
|
100df2cc437aad1ee1baf45bdfc4500b1302092b
|
2a5b46986f5ca684b2ae350596e293db54e1e2f4
|
refs/heads/master
| 2022-09-19T02:16:56.972160 | 2020-06-04T09:24:30 | 2020-06-04T09:24:30 | 269,314,860 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 692 |
py
|
import os
import socket
import hashlib
secret_key = b'alex_sb'
sk = socket.socket()
sk.bind(('127.0.0.1',9001))
sk.listen()
conn,addr = sk.accept()
# 创建一个随机的字符串
rand = os.urandom(32)
# 发送随机字符串
conn.send(rand)
# 根据发送的字符串 + secrete key 进行摘要
sha = hashlib.sha1(secret_key)
sha.update(rand)
res = sha.hexdigest()
# 等待接收客户端的摘要结果
res_client = conn.recv(1024).decode('utf-8')
# 做比对
if res_client == res:
print('是合法的客户端')
# 如果一致,就显示是合法的客户端
# 并可以继续操作
conn.send(b'hello')
else:
conn.close()
# 如果不一致,应立即关闭连接
|
[
"[email protected]"
] | |
346794fd0cf489c5c4b7c4d74e700415d735faa8
|
4d43aabef4cd709bf27dbc644d12a6f32eddddf4
|
/tensorflow/contrib/layers/python/ops/loss_ops_test.py
|
1453af533180c2616ce322f43139aabc19d50b93
|
[
"Apache-2.0"
] |
permissive
|
jmhodges/tensorflow
|
560165f6804d97f12edc6adb395bfcaf81091e0b
|
672b969b8c033548df4cbec89b87694b6024bf12
|
refs/heads/master
| 2022-10-13T09:53:57.176987 | 2016-03-24T16:27:10 | 2016-03-24T16:27:10 | 54,669,784 | 1 | 0 |
Apache-2.0
| 2022-09-23T22:51:01 | 2016-03-24T19:48:15 |
C++
|
UTF-8
|
Python
| false | false | 12,024 |
py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.layers.python.ops.loss_ops."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers.python.framework import tensor_util
pi = 3.14
indiana_pi = 3.2 # https://en.wikipedia.org/wiki/Indiana_Pi_Bill
class ReduceBatchSumTest(tf.test.TestCase):
def testDimensionNone(self):
with self.test_session():
input_array = np.array([
[1.0, 2.0],
[-1.0, -2.0]
], dtype=np.float32)
placeholder_vec = tf.placeholder(tf.float32, name="placeholder_vec")
expected_result = np.array([3.0, -3.0])
actual_result = tf.contrib.layers.reduce_batch_sum(placeholder_vec)
self.assertEqual(actual_result.get_shape().as_list(), [None])
self.assertAllClose(expected_result, actual_result.eval(feed_dict={
placeholder_vec: input_array
}))
def testDimension0(self):
with self.test_session():
input_vec = tf.constant(2.0)
with self.assertRaises(ValueError):
tf.contrib.layers.reduce_batch_sum(input_vec)
def testDimension1(self):
with self.test_session():
input_vec = tf.constant([1.0, 2.0])
expected_result = np.array([1.0, 2.0])
actual_result = tf.contrib.layers.reduce_batch_sum(input_vec)
self.assertAllClose(expected_result, actual_result.eval())
def testDimension2(self):
with self.test_session():
input_vec = tf.constant([
[1.0, 2.0],
[-1.0, -2.0]
])
expected_result = np.array([3.0, -3.0])
actual_result = tf.contrib.layers.reduce_batch_sum(input_vec)
self.assertAllClose(expected_result, actual_result.eval())
def testReturnShape(self):
with self.test_session():
input_vec = tf.constant([
[1.0, 2.0],
[-1.0, -2.0]
])
expected_result = np.array([3.0, -3.0])
actual_result = tf.contrib.layers.reduce_batch_sum(input_vec)
self.assertShapeEqual(expected_result, actual_result)
def testDimensionN(self):
with self.test_session():
input_vec = tf.constant([
[
[1.0, 2.0],
[3.0, 4.0]
],
[
[5.0, 6.0],
[7.0, 8.0]
]
])
expected_result = np.array([10.0, 26.0])
actual_result = tf.contrib.layers.reduce_batch_sum(input_vec)
self.assertAllClose(expected_result, actual_result.eval())
class AbsoluteLossTest(tf.test.TestCase):
def _getTestVectors(self):
target = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="target")
predicted = tf.constant([1.1, -0.2, 3.3, 1.6], shape=[2, 2],
name="predicted")
expected_loss = np.array([0.1, 0.2, 0.3, 0.4]).reshape(2, 2)
return target, predicted, expected_loss
def testAbsoluteLoss(self):
with self.test_session():
target, predicted, expected_loss = self._getTestVectors()
result = tf.contrib.layers.absolute_loss(predicted, target)
self.assertAllClose(expected_loss, result.eval())
def testAbsoluteLossReturnShape(self):
with self.test_session():
target, predicted, expected_loss = self._getTestVectors()
result = tf.contrib.layers.absolute_loss(predicted, target)
self.assertShapeEqual(expected_loss, result)
def testInvalidShapesValueError(self):
with self.test_session():
target = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="target")
incompatible_shape = tf.constant([0.0, 1.1], shape=[2],
name="incompatible_shape")
with self.assertRaises(ValueError):
tf.contrib.layers.absolute_loss(incompatible_shape, target)
class SquaredLossTest(tf.test.TestCase):
def _getTestVectors(self):
target = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="target")
predicted = tf.constant([1.1, -0.2, 3.3, 1.6], shape=[2, 2],
name="predicted")
expected_loss = np.array([0.005, 0.02, 0.045, 0.08]).reshape(2, 2)
return target, predicted, expected_loss
def testSquaredLoss(self):
with self.test_session():
target, predicted, expected_loss = self._getTestVectors()
result = tf.contrib.layers.squared_loss(predicted, target)
self.assertAllClose(expected_loss, result.eval())
def testSquaredLossReturnShape(self):
with self.test_session():
target, predicted, expected_loss = self._getTestVectors()
result = tf.contrib.layers.squared_loss(predicted, target)
self.assertShapeEqual(expected_loss, result)
def testInvalidShapesValueError(self):
with self.test_session():
target = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="target")
incompatible_shape = tf.constant([0.0, 1.1], shape=[2],
name="incompatible_shape")
with self.assertRaises(ValueError):
tf.contrib.layers.squared_loss(incompatible_shape, target)
class SumSquaredLossTest(tf.test.TestCase):
def _getTestVectors(self):
target = tf.constant([[0.0, 1.0],
[3.0, 2.0]],
shape=[2, 2],
name="target")
predicted = tf.constant([[3.0, -2.0],
[1.0, 2.0]],
shape=[2, 2],
name="predicted")
expected_loss = np.array([9.0, 2.0])
return target, predicted, expected_loss
def testSumSquaredLoss(self):
with self.test_session():
target, predicted, expected_loss = self._getTestVectors()
result = tf.contrib.layers.sum_squared_loss(predicted, target)
self.assertAllClose(expected_loss, result.eval())
def testSumSquaredLossReturnShape(self):
with self.test_session():
target, predicted, expected_loss = self._getTestVectors()
result = tf.contrib.layers.sum_squared_loss(predicted, target)
self.assertShapeEqual(expected_loss, result)
def testInvalidShapesValueError(self):
with self.test_session():
target = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="target")
incompatible_shape = tf.constant([0.0, 1.1], shape=[2],
name="incompatible_shape")
with self.assertRaises(ValueError):
tf.contrib.layers.sum_squared_loss(incompatible_shape, target)
class ScalarAbsoluteLossTest(tf.test.TestCase):
def testScalarAbsoluteLoss(self):
with self.test_session():
actual = tf.constant([pi], name="pi")
actual_placeholder = tf.placeholder(tf.float32)
label = tf.constant([indiana_pi], name="lbl")
label_placeholder = tf.placeholder(tf.float32, name="lbl_ph")
expected_loss = abs(indiana_pi - pi)
# Both shapes are set.
both_shapes_loss = tf.contrib.layers.scalar_absolute_loss(actual, label)
tf.initialize_all_variables().run()
np.testing.assert_almost_equal(
both_shapes_loss.eval(), expected_loss, decimal=6)
# No shape for 'actual' - check that the loss layer can be created.
no_actual_shape_loss = tf.contrib.layers.scalar_absolute_loss(
actual_placeholder, label)
tf.initialize_all_variables().run()
np.testing.assert_almost_equal(
no_actual_shape_loss.eval({actual_placeholder: [pi]}),
expected_loss, decimal=6)
# No shape for 'label' - check that the loss layer can be created.
no_label_shape_loss = tf.contrib.layers.scalar_absolute_loss(
actual, label_placeholder)
tf.initialize_all_variables().run()
np.testing.assert_almost_equal(
no_label_shape_loss.eval({label_placeholder: [indiana_pi]}),
expected_loss, decimal=6)
# No shapes.
no_shape_loss = tf.contrib.layers.scalar_absolute_loss(
actual_placeholder, label_placeholder)
tf.initialize_all_variables().run()
np.testing.assert_almost_equal(
no_shape_loss.eval({label_placeholder: [indiana_pi],
actual_placeholder: [pi]}),
expected_loss, decimal=6)
# Evaluate the previous one again, but this time with different
# (matching) shapes. This should still work.
np.testing.assert_almost_equal(
no_shape_loss.eval({label_placeholder: [indiana_pi, indiana_pi],
actual_placeholder: [pi, pi]}),
expected_loss, decimal=6)
class ScalarSquaredLossTest(tf.test.TestCase):
def testScalarSquaredLoss(self):
with self.test_session():
actual = tf.constant([pi], name="pi")
actual_placeholder = tf.placeholder(tf.float32)
label = tf.constant([indiana_pi], name="lbl")
label_placeholder = tf.placeholder(tf.float32, name="lbl_ph")
expected_loss = (indiana_pi - pi) * (indiana_pi - pi) / 2
# Both shapes are set.
both_shapes_loss = tf.contrib.layers.scalar_squared_loss(actual, label)
tf.initialize_all_variables().run()
np.testing.assert_almost_equal(
both_shapes_loss.eval(), expected_loss, decimal=6)
# No shape for 'actual' - check that the loss layer can be created.
no_actual_shape_loss = tf.contrib.layers.scalar_squared_loss(
actual_placeholder, label)
tf.initialize_all_variables().run()
np.testing.assert_almost_equal(
no_actual_shape_loss.eval({actual_placeholder: [pi]}),
expected_loss, decimal=6)
# No shape for 'label' - check that the loss layer can be created.
no_label_shape_loss = tf.contrib.layers.scalar_squared_loss(
actual, label_placeholder)
tf.initialize_all_variables().run()
np.testing.assert_almost_equal(
no_label_shape_loss.eval({label_placeholder: [indiana_pi]}),
expected_loss,
decimal=6)
# No shapes.
no_shape_loss = tf.contrib.layers.scalar_squared_loss(
actual_placeholder, label_placeholder)
tf.initialize_all_variables().run()
np.testing.assert_almost_equal(
no_shape_loss.eval({label_placeholder: [indiana_pi],
actual_placeholder: [pi]}),
expected_loss, decimal=6)
# Evaluate the previous one again, but this time with different
# (matching) shapes. This should still work.
np.testing.assert_almost_equal(
no_shape_loss.eval({label_placeholder: [indiana_pi, indiana_pi],
actual_placeholder: [pi, pi]}),
expected_loss, decimal=6)
class ScalarLogisticLossTest(tf.test.TestCase):
def _expected_loss(self, logit, target):
sigmoid = 1.0 / (1.0 + np.exp(-logit))
logistic_loss = (target * -np.log(sigmoid)) - (
(1.0 - target) * np.log(1.0 - sigmoid))
batch_losses = np.sum(logistic_loss, 1)
return np.sum(batch_losses) / len(batch_losses)
def test_scalar_logistic_loss(self):
logit = np.array([[9.45, -42], [4.2, 1], [-0.6, 20]])
target = np.array([[0.8, 0.9], [0.45, 0.99999], [0.1, 0.0006]])
with self.test_session():
result = tf.contrib.layers.scalar_logistic_loss(
tf.constant(logit), tf.constant(target))
self.assertAllClose(self._expected_loss(logit, target), result.eval())
if __name__ == "__main__":
tf.test.main()
|
[
"[email protected]"
] | |
72ddea90b5285225d914e9a05f92d1fcd6f57ce3
|
e10a6d844a286db26ef56469e31dc8488a8c6f0e
|
/explaining_risk_increase/input_fn.py
|
96a243efb7905e56aa629b71a68dcb4a3c29b3c0
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
Jimmy-INL/google-research
|
54ad5551f97977f01297abddbfc8a99a7900b791
|
5573d9c5822f4e866b6692769963ae819cb3f10d
|
refs/heads/master
| 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 |
Apache-2.0
| 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null |
UTF-8
|
Python
| false | false | 13,661 |
py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input function to observation sequence model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from tensorflow.contrib import data as contrib_data
CONTEXT_KEY_PREFIX = 'c-'
SEQUENCE_KEY_PREFIX = 's-'
def _example_index_to_sparse_index(example_indices, batch_size):
"""Creates a 2D sparse index tensor from a list of 1D example indices.
For example, this would do the transformation:
[0, 0, 0, 1, 3, 3] -> [[0,0], [0,1], [0,2], [1,0], [3,0], [3,1]]
The second column of the output tensor is the running count of the occurrences
of that example index.
Args:
example_indices: A sorted 1D Tensor with example indices.
batch_size: The batch_size. Could be larger than max(example_indices) if the
last examples of the batch do not have the feature present.
Returns:
The sparse index tensor.
The maxmium length of a row in this tensor.
"""
binned_counts = tf.bincount(example_indices, minlength=batch_size)
max_len = tf.to_int64(tf.reduce_max(binned_counts))
return tf.where(tf.sequence_mask(binned_counts)), max_len
def _remove_empty_timesteps(sp_tensor):
"""Creates a 3D SparseTensor skipping empty time steps.
Args:
sp_tensor: A SparseTensor with at least 2 dimensions (subsequent ones will
be ignored and simply flattened into the 2nd dimension).
Returns:
A 3D SparseTensor with index 0 for dimension 3 and a series from [0,..k]
for dimension 1 for each batch entry.
"""
batch_size = tf.to_int32(sp_tensor.dense_shape[0])
indices, max_len = _example_index_to_sparse_index(
tf.to_int32(sp_tensor.indices[:, 0]), batch_size)
indices = tf.concat([indices, tf.zeros_like(indices[:, 0:1])], axis=1)
return tf.SparseTensor(
indices=indices,
values=sp_tensor.values,
dense_shape=[batch_size, max_len, 1])
def _extend_with_dummy(extend_with, to_extend, dummy_value='n/a'):
"""Extends one SparseTensor with dummy_values at positions of other."""
dense_shape = tf.to_int64(
tf.concat([[tf.shape(extend_with)[0]],
[tf.maximum(tf.shape(extend_with)[1],
tf.shape(to_extend)[1])],
[tf.maximum(tf.shape(extend_with)[2],
tf.shape(to_extend)[2])]],
axis=0))
additional_indices = tf.sets.set_difference(
tf.SparseTensor(
indices=extend_with.indices,
values=tf.zeros_like(extend_with.values, dtype=tf.int32),
dense_shape=dense_shape),
tf.SparseTensor(
indices=to_extend.indices,
values=tf.zeros([tf.shape(to_extend.indices)[0]], dtype=tf.int32),
dense_shape=dense_shape)).indices
# Supply defaults for all other indices.
default = tf.tile(
tf.constant([dummy_value]), multiples=[tf.shape(additional_indices)[0]])
string_value = (
tf.as_string(to_extend.values)
if to_extend.values.dtype != tf.string else to_extend.values)
return tf.sparse_reorder(
tf.SparseTensor(
indices=tf.concat([to_extend.indices, additional_indices], axis=0),
values=tf.concat([string_value, default], axis=0),
dense_shape=dense_shape))
def _sparse_intersect_indices(sp_tensor, required_sp_tensor):
"""Filters timestamps in sp_tensor to those present in required_sp_tensor."""
# We extend both sp_tensor and required_sp_tensor with each others indices
# so that they have the same indices.
# E.g. their dense representation of one batch entry could be:
# [dummy, dummy, 1 ]
dummy_value = 'n/a'
dummy_required_sp_tensor = _extend_with_dummy(
sp_tensor, required_sp_tensor, dummy_value)
dummy_sp_tensor = _extend_with_dummy(required_sp_tensor, sp_tensor,
dummy_value)
# We get rid to dummy values both for indices in the required_sp_tensor and
# the sp_tensor.
# First get rid of indices with dummy values in dummy_required_sp_tensor.
in_required = tf.sparse_retain(
dummy_sp_tensor,
tf.logical_not(tf.equal(dummy_required_sp_tensor.values, dummy_value)))
# Remove empty timesteps so that the timesteps align with the original
# required_sp_tensor.
# Then remove the indices with dummy values.
in_required = tf.sparse_retain(
_remove_empty_timesteps(in_required),
tf.logical_not(tf.equal(in_required.values, dummy_value)))
if sp_tensor.values.dtype != tf.string:
in_required = tf.SparseTensor(
indices=in_required.indices, dense_shape=in_required.dense_shape,
values=tf.strings.to_number(
in_required.values, out_type=sp_tensor.values.dtype))
return in_required
def _dense_intersect_indices(tensor, required_sp_tensor):
required_2d_indices = required_sp_tensor.indices[:, 0:2]
values = tf.gather_nd(tensor, required_2d_indices)
indices, max_len = _example_index_to_sparse_index(
tf.to_int32(required_sp_tensor.indices[:, 0]),
tf.to_int32(required_sp_tensor.dense_shape[0]))
return tf.expand_dims(
tf.sparse_to_dense(
indices, tf.stack([required_sp_tensor.dense_shape[0], max_len]),
values),
axis=2)
def _intersect_indices(sequence_feature, required_sp_tensor):
if isinstance(sequence_feature, tf.SparseTensor):
return _sparse_intersect_indices(sequence_feature, required_sp_tensor)
else:
return _dense_intersect_indices(sequence_feature, required_sp_tensor)
def _make_parsing_fn(mode, label_name, sequence_features,
dense_sequence_feature):
"""Creates an input function to an estimator.
Args:
mode: The execution mode, as defined in tf.estimator.ModeKeys.
label_name: Name of the label present as context feature in the
SequenceExamples.
sequence_features: List of sequence features (strings) that are valid keys
in the tf.SequenceExample.
dense_sequence_feature: Name of the float sequence feature.
Returns:
Two dictionaries with the parsing config for the context features and
sequence features.
"""
sequence_features_config = dict()
for feature in sequence_features:
dtype = tf.string
if feature == dense_sequence_feature:
dtype = tf.float32
sequence_features_config[feature] = tf.io.VarLenFeature(dtype)
sequence_features_config['eventId'] = tf.io.FixedLenSequenceFeature(
[], tf.int64, allow_missing=False)
context_features_config = dict()
context_features_config['timestamp'] = tf.io.FixedLenFeature(
[], tf.int64, default_value=-1)
context_features_config['sequenceLength'] = tf.io.FixedLenFeature(
[], tf.int64, default_value=-1)
if mode != tf_estimator.ModeKeys.PREDICT:
context_features_config[label_name] = tf.io.VarLenFeature(tf.string)
def _parse_fn(serialized_examples):
"""Parses tf.(Sparse)Tensors from the serialized tf.SequenceExamples.
Requires TF versions >= 1.12 but is faster than _parse_fn_old.
Args:
serialized_examples: A batch of serialized tf.SequenceExamples.
Returns:
A dictionary from name to (Sparse)Tensors of the context and sequence
features.
"""
context, sequence, _ = tf.io.parse_sequence_example(
serialized_examples,
context_features=context_features_config,
sequence_features=sequence_features_config,
name='parse_sequence_example')
feature_map = dict()
for k, v in context.items():
feature_map[CONTEXT_KEY_PREFIX + k] = v
for k, v in sequence.items():
feature_map[SEQUENCE_KEY_PREFIX + k] = v
return feature_map
return _parse_fn
def _make_feature_engineering_fn(required_sp_tensor_name, label_name):
"""Creates an input function to an estimator.
Args:
required_sp_tensor_name: Name of the SparseTensor that is required. Other
sequence features will be reduced to times at which this SparseTensor is
also present.
label_name: Name of label.
Returns:
Two dictionaries with the parsing config for the context features and
sequence features.
"""
def _process(examples):
"""Supplies input to our model.
This function supplies input to our model after parsing.
Args:
examples: The dictionary from key to (Sparse)Tensors with context and
sequence features.
Returns:
A tuple consisting of 1) a dictionary of tensors whose keys are
the feature names, and 2) a tensor of target labels if the mode
is not INFER (and None, otherwise).
"""
# Combine into a single dictionary.
feature_map = {}
# Flatten sparse tensor to compute event age. This dense tensor also
# contains padded values. These will not be used when gathering elements
# from the dense tensor since each sparse feature won't have a value
# defined for the padding.
padded_event_age = (
# Broadcast current time along sequence dimension.
tf.expand_dims(examples.pop(CONTEXT_KEY_PREFIX + 'timestamp'), 1)
# Subtract time of events.
- examples.pop(SEQUENCE_KEY_PREFIX + 'eventId'))
examples[SEQUENCE_KEY_PREFIX + 'deltaTime'] = padded_event_age
if CONTEXT_KEY_PREFIX + label_name in examples:
label = examples.pop(CONTEXT_KEY_PREFIX + label_name)
label = tf.sparse.to_dense(tf.SparseTensor(
indices=label.indices, dense_shape=[label.dense_shape[0], 1],
values=tf.ones_like(label.values, dtype=tf.float32)))
feature_map[CONTEXT_KEY_PREFIX + label_name] = label
for k, v in examples.items():
if k.startswith(CONTEXT_KEY_PREFIX):
feature_map[k] = v
else:
feature_map[k] = _intersect_indices(
v, examples[SEQUENCE_KEY_PREFIX + required_sp_tensor_name])
sequence_length = tf.reduce_sum(
_intersect_indices(
tf.ones_like(examples[SEQUENCE_KEY_PREFIX + 'deltaTime']),
examples[SEQUENCE_KEY_PREFIX + required_sp_tensor_name]),
axis=1)
feature_map[CONTEXT_KEY_PREFIX + 'sequenceLength'] = sequence_length
return feature_map
return _process
def get_input_fn(mode,
input_files,
label_name,
sequence_features,
dense_sequence_feature,
required_sequence_feature,
batch_size,
shuffle=True):
"""Creates an input function to an estimator.
Args:
mode: The execution mode, as defined in tf.estimator.ModeKeys.
input_files: List of input files in TFRecord format containing
tf.SequenceExamples.
label_name: Name of the label present as context feature in the
SequenceExamples.
sequence_features: List of sequence features (strings) that are valid keys
in the tf.SequenceExample.
dense_sequence_feature: Name of float sequence feature.
required_sequence_feature: Name of SparseTensor sequence feature that
determines which events will be kept.
batch_size: The size of the batch when reading in data.
shuffle: Whether to shuffle the examples.
Returns:
A function that returns a dictionary of features and the target labels.
"""
def input_fn():
"""Supplies input to our model.
This function supplies input to our model, where this input is a
function of the mode. For example, we supply different data if
we're performing training versus evaluation.
Returns:
A tuple consisting of 1) a dictionary of tensors whose keys are
the feature names, and 2) a tensor of target labels if the mode
is not INFER (and None, otherwise).
"""
is_training = mode == tf_estimator.ModeKeys.TRAIN
num_epochs = None if is_training else 1
with tf.name_scope('read_batch'):
file_names = input_files
files = tf.data.Dataset.list_files(file_names)
if shuffle:
files = files.shuffle(buffer_size=len(file_names))
dataset = (
files.apply(
contrib_data.parallel_interleave(
tf.data.TFRecordDataset, cycle_length=10)).repeat(num_epochs))
if shuffle:
dataset = dataset.shuffle(buffer_size=100)
parse_fn = _make_parsing_fn(mode, label_name, sequence_features,
dense_sequence_feature)
feature_engineering_fn = _make_feature_engineering_fn(
required_sequence_feature, label_name)
feature_map = (
dataset.batch(batch_size)
# Parallelize the input processing and put it behind a
# queue to increase performance by removing it from the
# critical path of per-step-computation.
.map(parse_fn, num_parallel_calls=8)
.map(feature_engineering_fn, num_parallel_calls=8)
.prefetch(buffer_size=1).make_one_shot_iterator().get_next())
label = None
if mode != tf_estimator.ModeKeys.PREDICT:
label = feature_map.pop(CONTEXT_KEY_PREFIX + label_name)
return feature_map, {label_name: label}
return input_fn
|
[
"[email protected]"
] | |
0007bb4b25661e5bdbf01bf24eb7cc44d2721b2d
|
b11899d2edfa17f88da4f45cc828f092125091a0
|
/udacity/wsgi.py
|
79b9e9c0657245bbc91b43c1df6900d525977d0f
|
[] |
no_license
|
chemalle/udacity
|
15726c03a108dc0e68952027e63b5689870cc5b0
|
69ee8e5acda4776df1f46c922b30ec799f5589af
|
refs/heads/master
| 2020-03-09T14:11:32.381362 | 2018-04-09T20:17:43 | 2018-04-09T20:17:43 | 128,828,589 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 392 |
py
|
"""
WSGI config for udacity project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "udacity.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
84cd1164a4f1b0551e720d90ed604c3b8a83c45b
|
d83fde3c891f44014f5339572dc72ebf62c38663
|
/_bin/google-cloud-sdk/.install/.backup/lib/surface/access_context_manager/levels/__init__.py
|
fd037667cf0efbce2318d315932364a60be5025f
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
gyaresu/dotfiles
|
047cc3ca70f4b405ba272856c69ee491a79d2ebe
|
e5e533b3a081b42e9492b228f308f6833b670cfe
|
refs/heads/master
| 2022-11-24T01:12:49.435037 | 2022-11-01T16:58:13 | 2022-11-01T16:58:13 | 17,139,657 | 1 | 1 | null | 2020-07-25T14:11:43 | 2014-02-24T14:59:59 |
Python
|
UTF-8
|
Python
| false | false | 1,021 |
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The command group for the Access Context Manager levels CLI."""
from __future__ import absolute_import
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class AccessContextManager(base.Group):
"""Manage Access Context Manager levels.
An access level is a classification of requests based on raw attributes of
that request (e.g. IP address, device identity, time of day, etc.).
"""
|
[
"[email protected]"
] | |
71b112d6742fd0c9701974a2bddaacf7c0f5fc91
|
dde1cf596cf5969812ecda999828baa9c73e788d
|
/isi_sdk_8_1_1/api/sync_policies_api.py
|
b89f81af89e7ae2f788de9d37722a0e1d4e5d839
|
[] |
no_license
|
dctalbot/isilon_sdk_python3.7
|
bea22c91096d80952c932d6bf406b433af7f8e21
|
4d9936cf4b9e6acbc76548167b955a7ba8e9418d
|
refs/heads/master
| 2020-04-25T20:56:45.523351 | 2019-02-28T19:32:11 | 2019-02-28T19:32:11 | 173,065,379 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,319 |
py
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from isi_sdk_8_1_1.api_client import ApiClient
class SyncPoliciesApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_policy_reset_item(self, policy_reset_item, policy, **kwargs): # noqa: E501
"""create_policy_reset_item # noqa: E501
Reset a SyncIQ policy incremental state and force a full sync/copy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_policy_reset_item(policy_reset_item, policy, async=True)
>>> result = thread.get()
:param async bool
:param Empty policy_reset_item: (required)
:param str policy: (required)
:return: CreateResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_policy_reset_item_with_http_info(policy_reset_item, policy, **kwargs) # noqa: E501
else:
(data) = self.create_policy_reset_item_with_http_info(policy_reset_item, policy, **kwargs) # noqa: E501
return data
def create_policy_reset_item_with_http_info(self, policy_reset_item, policy, **kwargs): # noqa: E501
"""create_policy_reset_item # noqa: E501
Reset a SyncIQ policy incremental state and force a full sync/copy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_policy_reset_item_with_http_info(policy_reset_item, policy, async=True)
>>> result = thread.get()
:param async bool
:param Empty policy_reset_item: (required)
:param str policy: (required)
:return: CreateResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_reset_item', 'policy'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_policy_reset_item" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_reset_item' is set
if ('policy_reset_item' not in params or
params['policy_reset_item'] is None):
raise ValueError("Missing the required parameter `policy_reset_item` when calling `create_policy_reset_item`") # noqa: E501
# verify the required parameter 'policy' is set
if ('policy' not in params or
params['policy'] is None):
raise ValueError("Missing the required parameter `policy` when calling `create_policy_reset_item`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy' in params:
path_params['Policy'] = params['policy'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'policy_reset_item' in params:
body_params = params['policy_reset_item']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/sync/policies/{Policy}/reset', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"[email protected]"
] | |
c3d3ce522f487468d1cd7a2495dd1144c24a1b38
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_6658.py
|
d053f34bf35961831817d9b5d3b432ed001ec245
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 106 |
py
|
# Paramiko SFTP - Avoid having to specify full local filename?
os.path.join(os.getcwd(), remote_filename)
|
[
"[email protected]"
] | |
a720749f0cff19ebb480f74da0728b668a147c1e
|
276e15e7426b97ae550e45f2f7ffb6c3acc3ce07
|
/company/meituan/1.py
|
2f23806abc88bfd559908d4e1caca1b0600cb164
|
[] |
no_license
|
JDer-liuodngkai/LeetCode
|
1115b946a1495622b1a5905257a8c92523022b8b
|
4ca0ec2ab9510b12b7e8c65af52dee719f099ea6
|
refs/heads/master
| 2023-03-25T03:43:16.630977 | 2020-11-13T00:44:01 | 2020-11-13T00:44:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 973 |
py
|
"""
每天最多 n 个,各有1个正整数重量
已做好 m 个
买 最重 最轻 a,b; 保证 a/b 大小关系
剩余 n - m 在烤
"""
# 不保证 a/b 大小关系
# 1 ≤ n,m,a,b ≤ 1000 , m≤n , 蛋糕重量不会超过1000
def cake():
vmax, vmin = max(arr), min(arr)
tmax, tmin = max(a, b), min(a, b)
# 已做出的蛋糕 不满足要求
if vmax > tmax or vmin < tmin:
return 'NO'
# 仍在区间内
remain = n - m
if remain == 0:
if vmax == tmax and vmin == tmin: # 比如两个都相等
return 'YES'
else:
return 'NO'
elif remain == 1:
if vmax == tmax or vmin == tmin: # 只要有1个已经相等 即可
return 'YES'
else:
return 'NO'
else: # 仍在区间内,并且还有两个能做
return 'YES'
while True:
n, m, a, b = list(map(int, input().split()))
arr = list(map(int, input().split()))
print(cake())
|
[
"[email protected]"
] | |
1c15f3b8d623086176e436fd2f27d05307235689
|
caed98915a93639e0a56b8296c16e96c7d9a15ab
|
/Walmart Labs/Container With Most Water.py
|
fa9f0a5f15d485592776bc34920e9a6c3966b44c
|
[] |
no_license
|
PiyushChandra17/365-Days-Of-LeetCode
|
0647787ec7e8f1baf10b6bfc687bba06f635838c
|
7e9e9d146423ca2c5b1c6a3831f21dd85fa376d5
|
refs/heads/main
| 2023-02-13T10:41:36.110303 | 2021-01-17T11:58:51 | 2021-01-17T11:58:51 | 319,974,573 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 318 |
py
|
class Solution:
def maxArea(self, height: List[int]) -> int:
i,j = 0,len(height)-1
water = 0
while i < j:
water = max(water,(j-i)*min(height[i],height[j]))
if height[i] < height[j]:
i += 1
else:
j -= 1
return water
|
[
"[email protected]"
] | |
9c85607ad9ff5b44cb32636cec3ac25cc7456f43
|
fb28906c1f0347ffe50193f6c2bad2d4b490fa9c
|
/budger/schedules/migrations/0033_add_group.py
|
7da1b8b63df95fef6af8c788e35aaecf915488d1
|
[] |
no_license
|
pavkozlov/budger-server
|
20c695309c34a0451d25b83ab8583b14f0d21c0c
|
7a98c1789414c83625bda1e5b29cbe5587c3cd6a
|
refs/heads/master
| 2020-12-17T06:35:10.550905 | 2020-01-13T13:27:42 | 2020-01-13T13:27:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,128 |
py
|
# Generated by Django 2.2.8 on 2019-12-12 06:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('schedules', '0032_auto_20191211_1652'),
]
operations = [
migrations.AlterModelOptions(
name='event',
options={'ordering': ['exec_from'], 'permissions': [('manage_event', 'Создание и редактирование черновиков.')]},
),
migrations.AddField(
model_name='event',
name='group',
field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Контрольные мероприятия. Последующий контроль за исполнением бюджета Московской области'), (2, 'Последующий контроль за исполнением бюджета Территориального фонда обязательного медицинского страхования Московской области'), (3, 'Последующий контроль за исполнением бюджетов муниципальных образований, в бюджетах которых доля дотаций из других бюджетов бюджетной системы Российской Федерации и (или) налоговых доходов по дополнительным нормативам отчислений в размере, не превышающем расчетного объема дотации на выравнивание бюджетной обеспеченности (части расчетного объема дотации), замененной дополнительными нормативами отчислений, в течение двух из трех последних отчетных финансовых лет превышала 50 процентов объема собственных доходов местных бюджетов, а также в муниципальных образованиях, которые не имеют годовой отчетности об исполнении местного бюджета за один год и более из трех последних отчетных финансовых лет'), (4, 'Тематические контрольные мероприятия'), (5, 'Экспертно-аналитические мероприятия. Последующий контроль за исполнением бюджета Московской области'), (6, 'Оперативный контроль за исполнением бюджета Московской области'), (7, 'Оперативный контроль за исполнением бюджета Территориального фонда обязательного медицинского страхования Московской области'), (8, 'Тематические экспертно-аналитические мероприятия')], null=True),
),
]
|
[
"[email protected]"
] | |
7221f4e4fe00f96cc27131f8659619b01f6e8124
|
84bdc0fd6aaaac7c519866fef855be8eae88a80f
|
/0x07-python-classes/3-square.py
|
1c50d5d34600f02b131a03adfde770787a230345
|
[] |
no_license
|
KatyaKalache/holbertonschool-higher_level_programming
|
b74ca3e3c32ded6f54a40748775d0d4475e32409
|
e746a41ccb3f268c9d6d4578b80a0b9e7cf7d067
|
refs/heads/master
| 2021-01-20T02:29:04.972257 | 2017-10-11T06:24:17 | 2017-10-11T06:24:17 | 89,413,672 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 311 |
py
|
#!/usr/bin/python3
class Square:
def __init__(self, size=0):
if not isinstance(size, int):
raise TypeError("size must be an integer")
if (size < 0):
raise ValueError("size must be >= 0")
self.__size = size
def area(self):
return self.__size ** 2
|
[
"[email protected]"
] | |
f3ae81f967741b44a1451c0540bfd316e877f45c
|
9804b20e9bbd2b4ac405700b920b93fb0b4394c0
|
/server/plugins/admin.py
|
2d1770caa66eb0d80b9d35ee8f10c5ece83289ef
|
[] |
no_license
|
jonathanverner/brython-misc
|
ba5c53989159fdf1684cc956e51e6a22728ca75b
|
b9d6d243000aa0c4dc587fbcd0e6cf3e7359a9fe
|
refs/heads/master
| 2020-04-06T07:02:30.362571 | 2016-08-20T11:28:22 | 2016-08-20T11:28:22 | 58,376,490 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 263 |
py
|
from ..lib.tornado import RPCService, export
from tornado.gen import coroutine
class AdminService(RPCService):
SERVICE_NAME='admin'
@coroutine
@export
def persists_storage(self):
yield self._api.store.persist()
services = [AdminService]
|
[
"[email protected]"
] | |
1e3830f608817ee5f8ae218d7fe951a627fe886e
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startCirq2360.py
|
e567cdcc03d3850100d2ad44ef059ff6d601d9e8
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,005 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=36
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.Y.on(input_qubit[2])) # number=18
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=33
c.append(cirq.Z.on(input_qubit[3])) # number=34
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=35
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=21
c.append(cirq.H.on(input_qubit[3])) # number=22
c.append(cirq.X.on(input_qubit[3])) # number=13
c.append(cirq.H.on(input_qubit[3])) # number=23
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=24
c.append(cirq.H.on(input_qubit[3])) # number=25
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=10
c.append(cirq.H.on(input_qubit[1])) # number=19
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=16
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.Y.on(input_qubit[1])) # number=26
c.append(cirq.Y.on(input_qubit[1])) # number=27
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=29
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=30
c.append(cirq.X.on(input_qubit[0])) # number=31
c.append(cirq.X.on(input_qubit[0])) # number=32
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2360.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
5bdfd26ec6382992c81f1083de5d813e77df1989
|
41710e9133d660739f8f9f17040a2a8a6082e9fb
|
/python/aa_modules/argsparse/mutually_exclusive1.py
|
922f775acf7b00de572185f03d88335e0519a460
|
[] |
no_license
|
hanjiangxue007/Programming
|
591678150e2e300051fdeaf09124d3893076d3a9
|
7a545ef2300b004497f30d27d1f2aaa032e26af5
|
refs/heads/master
| 2020-06-29T18:50:27.776557 | 2016-10-27T18:31:39 | 2016-10-27T18:31:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 838 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author : Bhishan Poudel; Physics PhD Student, Ohio University
# Date : Oct-15-2016 Sat
# Last update :
#
# Ref: https://docs.python.org/3.5/howto/argparse.html
#
# Imports
import argparse
parser = argparse.ArgumentParser(description="calculate X to the power of Y")
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true")
group.add_argument("-q", "--quiet", action="store_true")
parser.add_argument("x", type=int, help="the base")
parser.add_argument("y", type=int, help="the exponent")
args = parser.parse_args()
answer = args.x**args.y
if args.quiet:
print(answer)
elif args.verbose:
print("{} to the power {} equals {}".format(args.x, args.y, answer))
else:
print("{}^{} == {}".format(args.x, args.y, answer))
|
[
"[email protected]"
] | |
92aea1fc2467f5278d784d1a69ffb5aecdbbc629
|
1858001ecc913ad5270f12b2e69e46eead3c7970
|
/awx/main/tests/functional/models/test_job_options.py
|
c601413a5d8f46e7ff479e88b5d1b8d2466d3b78
|
[
"Apache-2.0"
] |
permissive
|
mabashian/awx
|
e3274046cabe3e539bf842f1f8ee2409efb28677
|
904cb4af34141ca0cfec6f597eb8016e575a670e
|
refs/heads/devel
| 2023-09-04T04:30:31.256861 | 2017-09-22T17:39:24 | 2017-09-22T17:39:24 | 104,504,771 | 0 | 1 |
Apache-2.0
| 2021-03-22T20:41:15 | 2017-09-22T18:12:24 |
Python
|
UTF-8
|
Python
| false | false | 1,245 |
py
|
import pytest
from django.core.exceptions import ValidationError
from awx.main.models import Credential
@pytest.mark.django_db
def test_clean_credential_with_ssh_type(credentialtype_ssh, job_template):
credential = Credential(
name='My Credential',
credential_type=credentialtype_ssh
)
credential.save()
job_template.credential = credential
job_template.full_clean()
@pytest.mark.django_db
def test_clean_credential_with_invalid_type_xfail(credentialtype_aws, job_template):
credential = Credential(
name='My Credential',
credential_type=credentialtype_aws
)
credential.save()
with pytest.raises(ValidationError):
job_template.credential = credential
job_template.full_clean()
@pytest.mark.django_db
def test_clean_credential_with_custom_types(credentialtype_aws, credentialtype_net, job_template):
aws = Credential(
name='AWS Credential',
credential_type=credentialtype_aws
)
aws.save()
net = Credential(
name='Net Credential',
credential_type=credentialtype_net
)
net.save()
job_template.extra_credentials.add(aws)
job_template.extra_credentials.add(net)
job_template.full_clean()
|
[
"[email protected]"
] | |
7963ef741b939e0ecb1eb8a00e0e97309e07b69b
|
8a378ddae37f834a1c00ba6e63d3ff8f9cabbffb
|
/tailpos_sync/tailpos_sync/report/product_bundle__register_report/other_methods.py
|
44e1da2be0547326648c3e5adb24f39630bb769d
|
[
"MIT"
] |
permissive
|
aakvatech/tailpos-sync
|
2985d998d0e7ee394c68da578e19819f00cc4acd
|
7f199459769395d8d21e8effad1af39b1512c205
|
refs/heads/master
| 2020-09-08T19:16:57.633724 | 2020-04-22T05:56:49 | 2020-04-22T05:56:49 | 221,221,323 | 2 | 0 |
NOASSERTION
| 2020-04-22T05:56:50 | 2019-11-12T13:13:32 | null |
UTF-8
|
Python
| false | false | 5,896 |
py
|
import frappe
def get_columns(columns):
columns.append({"fieldname": "invoice_date", "label": "Invoice Date", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "invoice_number", "label": "Invoice Number", "fieldtype": "Link", "options": "Sales Invoice", "width": 150, })
columns.append({"fieldname": "receipt_no", "label": "Receipt Number", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "receipt_date", "label": "Receipt Date", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "cost_center", "label": "Cost Center", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "pos_profile", "label": "POS Profile", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "store_id", "label": "Store ID", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "item_code", "label": "Item Code", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "item_name", "label": "Item Name", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "uom", "label": "UOM", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "qty", "label": "QTY", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "rate", "label": "Rate", "fieldtype": "Float", "width": 150, "precision" : 2})
columns.append({"fieldname": "amount", "label": "Amount", "fieldtype": "Float", "width": 150, "precision" : 2})
columns.append({"fieldname": "packed_items", "label": "Packed Items", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "packed_item_code", "label": "Item Code", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "packed_item_name", "label": "Item Name", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "packed_qty", "label": "Qty", "fieldtype": "Float", "width": 150, "precision" : 2})
columns.append({"fieldname": "packed_uom", "label": "UOM", "fieldtype": "Data", "width": 150})
columns.append({"fieldname": "packed_valuation_rate", "label":"Valuation Rate", "fieldtype": "Float", "width": 150, "precision" : 2})
columns.append({"fieldname": "warehouse", "label":"Warehouse", "fieldtype": "Data", "width": 150})
def get_invoices(filters, data):
from_date = filters.get("from_date")
to_date = filters.get("to_date")
print(from_date)
print(to_date)
query = """
SELECT
SI.name as name,
SI.posting_date as posting_date,
R.receiptnumber as receiptnumber,
R.date as date,
R.deviceid as deviceid
FROM `tabSales Invoice` AS SI
INNER JOIN `tabReceipts` AS R ON SI.name = R.reference_invoice
WHERE SI.posting_date BETWEEN '{0}' AND '{1}' ORDER BY SI.name
""".format(from_date,to_date)
invoices = frappe.db.sql(query, as_dict=1)
modify_records(invoices, data)
return invoices
def modify_records(invoices, data):
for idx, value in enumerate(invoices):
total = {
"qty": "Total",
"rate": 0,
"amount": 0,
"packed_valuation_rate": 0,
}
sales_invoice_item = frappe.db.sql(""" SELECT * FROM `tabSales Invoice Item` WHERE parent=%s """, value.name, as_dict=True)
device = frappe.db.sql(""" SELECT * FROM `tabDevice` WHERE name=%s""",value.deviceid, as_dict=True)
obj = {
"invoice_date": value.posting_date,
"invoice_number": value.name,
"receipt_no": value.receiptnumber,
"receipt_date": value.date,
"packed_items": "",
}
if len(device) > 0:
pos_profile = frappe.db.sql(""" SELECT * FROM `tabPOS Profile` WHERE name=%s""", device[0].pos_profile, as_dict=True)
if len(pos_profile) > 0:
obj['cost_center'] = pos_profile[0].cost_center
obj['pos_profile'] = device[0].pos_profile
obj['store_id'] = device[0].name
for idxx, i in enumerate(sales_invoice_item):
if idxx == 0:
obj['item_code'] = i.item_code
obj['item_name'] = i.item_name
obj['qty'] = i.qty
obj['rate'] = i.rate
obj['amount'] = i.amount
else:
obj = {
"item_code": i.item_code,
"item_name": i.item_name,
"qty": i.qty,
"rate": i.rate,
"amount": i.amount,
}
total["rate"] += i.rate
total["amount"] += i.amount
packed_items = frappe.db.sql(""" SELECT * FROM `tabPacked Item` WHERE parent_item=%s and parent=%s """, (i.item_code, value.name) , as_dict=True)
for idxxx,ii in enumerate(packed_items):
if idxxx == 0:
obj['packed_item_code'] = ii.item_code
obj['packed_item_name'] = ii.item_name
obj['packed_qty'] = ii.qty
obj['packed_uom'] = ii.uom
obj['warehouse'] = ii.warehouse
else:
obj = {
"packed_item_code": ii.item_code,
"packed_item_name": ii.item_name,
"packed_qty": ii.qty,
"packed_uom": ii.uom,
"warehouse": ii.warehouse,
}
valuation_rate = frappe.db.sql(""" SELECT * FROM tabItem WHERE name=%s""", ii.item_code, as_dict=True)
if len(valuation_rate) > 0:
obj['packed_valuation_rate'] = valuation_rate[0].valuation_rate
total["packed_valuation_rate"] += valuation_rate[0].valuation_rate
data.append(obj)
data.append(obj)
data.append(total)
|
[
"[email protected]"
] | |
ada51a31e0faed934a41299946478c420f2573b6
|
4d51aa09c7b65d2c54212a3004794a50f36a7a89
|
/leetcode/DP/213.HouseRobber/213HouserRoober.py
|
87c13157a361c4607ac3426c9c4425ec78b843c3
|
[] |
no_license
|
letterbeezps/leetcode-algorithm
|
30bd7335e96fdcca3c3ec1269c1c6fa78afd2f3b
|
b6211b1bfd699e45164a8cb5a8fbf2b4ec9756f9
|
refs/heads/master
| 2021-07-06T18:29:18.666607 | 2020-08-06T17:52:50 | 2020-08-06T17:52:50 | 158,728,987 | 10 | 2 | null | 2020-08-06T17:50:17 | 2018-11-22T16:56:13 |
JavaScript
|
UTF-8
|
Python
| false | false | 674 |
py
|
class Solution:
def rob(self, nums: List[int]) -> int:
if not nums:
return 0
if len(nums) <= 3:
return max(nums)
if len(nums) == 4:
return max(nums[0]+nums[2], nums[1]+nums[3])
n = len(nums)
f = [0] * n
num1 = nums[:-1]
f[1] = num1[0]
for i in range(2, n):
f[i] = max(f[i-2]+num1[i-1], f[i-1])
temp1 = f[n-1]
f = [0] * n
num2 = nums[1:]
f[1] = num2[0]
for i in range(2, n):
f[i] = max(f[i-2]+num2[i-1], f[i-1])
temp2 = f[n-1]
return max(temp1, temp2)
|
[
"[email protected]"
] | |
5d758d4d95cc1b6405db90c73d4118106a45b53a
|
6219e6536774e8eeb4cadc4a84f6f2bea376c1b0
|
/scraper/storage_spiders/aobongdacom.py
|
73b1780602e6b41472488d0a26444813f9b8abeb
|
[
"MIT"
] |
permissive
|
nguyenminhthai/choinho
|
109d354b410b92784a9737f020894d073bea1534
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
refs/heads/master
| 2023-05-07T16:51:46.667755 | 2019-10-22T07:53:41 | 2019-10-22T07:53:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 927 |
py
|
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//h1[@class='product-title']",
'price' : "//div[@class='price-box']/h4[@class='product-price']",
'category' : "//ol[@class='breadcrumb pull-left']/li/a",
'description' : "//div[@class='art-content']",
'images' : "//div[@class='main-slider']//li/a/@href",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'aobongda.com'
allowed_domains = ['aobongda.com']
start_urls = ['http://aobongda.com/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+-id\d+-c\d+\.htm$']), 'parse_item'),
Rule(LinkExtractor(deny=['/[a-zA-Z0-9-]+-id\d+-c\d+\.htm$']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
|
[
"[email protected]"
] | |
4a1e2aa3c3a7ae5b3de5e9f843d7313f73b1a3d0
|
9b3f578e63a7e17e2b1bab5f38aa8625b8a80251
|
/descarteslabs/workflows/types/array/array_.py
|
30e0fa674c82ddb4c8c20b53cc3d60ea823a17f7
|
[
"Apache-2.0"
] |
permissive
|
carderne/descarteslabs-python
|
e6f7000f08cd1569e0ddd0f7fb8e53abb6765183
|
757b480efb8d58474a3bf07f1dbd90652b46ed64
|
refs/heads/master
| 2022-12-09T23:19:02.361226 | 2020-08-13T11:52:30 | 2020-08-13T11:52:30 | 287,264,851 | 0 | 0 |
NOASSERTION
| 2020-08-13T11:46:58 | 2020-08-13T11:46:57 | null |
UTF-8
|
Python
| false | false | 3,060 |
py
|
import numpy as np
from descarteslabs.common.graft import client
from ...cereal import serializable
from ..core import ProxyTypeError
from ..containers import List
from ..primitives import Int, Float, Bool
from .base_array import BaseArray
DTYPE_KIND_TO_WF = {"b": Bool, "i": Int, "f": Float}
WF_TO_DTYPE_KIND = dict(zip(DTYPE_KIND_TO_WF.values(), DTYPE_KIND_TO_WF.keys()))
@serializable()
class Array(BaseArray):
"""
Proxy Array representing a multidimensional, homogenous array of fixed-size items.
Can be instantiated from a NumPy ndarray (via `from_numpy`), or a Python iterable.
Currently, Arrays can only be constructed from small local arrays (< 10MB).
Array follows the same syntax as NumPy arrays. It supports vectorized operations, broadcasting,
and multidimensional indexing. There are some limitations including slicing with lists/arrays in multiple
axes (``x[[1, 2, 3], [3, 2, 1]]``) and slicing with a multidimensional list/array of integers.
Note
----
Array is an experimental API. It may be changed in the future, will not necessarily be
backwards compatible, and may have unexpected bugs. Please contact us with any feedback!
Examples
--------
>>> import descarteslabs.workflows as wf
>>> # Create a 1-dimensional Array of Ints
>>> arr = wf.Array([1, 2, 3, 4, 5])
>>> arr
<descarteslabs.workflows.types.array.array_.Array object at 0x...>
>>> arr.compute(geoctx) # doctest: +SKIP
array([1, 2, 3, 4, 5])
>>> import numpy as np
>>> import descarteslabs.workflows as wf
>>> ndarray = np.ones((3, 10, 10))
>>> # Create an Array from the 3-dimensional numpy array
>>> arr = wf.Array(ndarray)
>>> arr
<descarteslabs.workflows.types.array.array_.Array object at 0x...>
"""
def __init__(self, arr):
if isinstance(arr, np.generic):
arr = arr.tolist()
if isinstance(arr, (int, float, bool)):
self._literal_value = arr
self.graft = client.apply_graft("wf.array.create", arr)
elif isinstance(arr, (Int, Float, Bool, List)):
self.graft = client.apply_graft("wf.array.create", arr)
else:
if not isinstance(arr, np.ndarray):
try:
arr = np.asarray(arr)
except Exception:
raise ValueError("Cannot construct Array from {!r}".format(arr))
if arr.dtype.kind not in ("b", "i", "f"):
raise TypeError("Invalid dtype {} for an Array".format(arr.dtype))
self._literal_value = arr
arr_list = arr.tolist()
self.graft = client.apply_graft("wf.array.create", arr_list)
@classmethod
def _promote(cls, obj):
if isinstance(obj, cls):
return obj
try:
return obj.cast(cls)
except Exception:
try:
return Array(obj)
except Exception as e:
raise ProxyTypeError("Cannot promote {} to Array: {}".format(obj, e))
|
[
"[email protected]"
] | |
395840ba68ac389c3468cbd55fcc66294d3322da
|
c2f42e145c03feb891d83ea294cdda9f37cfc717
|
/src/modelling/capacity_planning/erlang/erlangb.py
|
7695f16ce57e10ce6be5b40622e5f76eb9e2d211
|
[] |
no_license
|
FelixKleineBoesing/queuingSystem
|
5b38c123f206d9c71014064404b2f50f0f4491a5
|
09ff583831aa7f8b604f01dc97cf0284ed342f77
|
refs/heads/master
| 2023-04-12T00:00:20.309232 | 2021-04-25T11:55:04 | 2021-04-25T11:55:04 | 361,413,917 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 835 |
py
|
import numpy as np
from src.modelling.capacity_planning.erlang.erlang_base import ErlangBase
from src.modelling.helpers import power_faculty
class ErlangB(ErlangBase):
def get_probability(self, number_agents: int, lambda_: float, mu: float):
"""
calculates the probability that there are c number people in the system
:param number_agents: number of available agents
:param lambda_: average arrival time in times per second
:param mu: average supply time in times per second
:return: probability of a blocked queue
"""
workload = lambda_ / mu
sum = 0.0
for i in range(number_agents + 1):
sum += power_faculty(workload, i)
if np.isnan(sum):
break
return power_faculty(workload, number_agents) / sum
|
[
"[email protected]"
] | |
9899e3436a3df3f0622c8329670adccfbfa8ae22
|
f8961fc56e95ea75f2edbb08fae5ad1af102f6f0
|
/chapter_12/when_to_use_threading.py
|
08613b1bc4647ea050623289f895564f0a8b60d5
|
[] |
no_license
|
larago/python_cookbook
|
eb2cea7425033802775a168d63199eb8e43e2d50
|
34abdb173c78d9eea046707f88a4bd976dfa26e9
|
refs/heads/master
| 2021-01-09T20:12:50.605774 | 2016-07-13T14:06:22 | 2016-07-13T14:06:22 | 63,230,687 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 581 |
py
|
# GIL limit python only run in single thread, not proper to run cpu concentration task
# Better to handle them with mutiple cpu
# But thread is appropriate to use when handling potential blocking muti-task
# like IO / get result from databse these kind of jobs
from threading import Thread
class CountdownThread(Thread):
def __init__(self, n):
super(CountdownThread,self).__init__()
self.n = 0
def run(self):
while self.n > 0:
print('T-minus', self.n)
self.n -= 1
time.sleep(5)
c = CountdownThread(5)
c.start()
|
[
"[email protected]"
] | |
a8b67575d0a9a5cbdc8f1fe45b98a99aa43eb729
|
1978a9455159b7c2f3286e0ad602652bc5277ffa
|
/exercises/05_basic_scripts/task_5_1c.py
|
4613a5511bcd55aadf4fb864116bb31116c9e7d4
|
[] |
no_license
|
fortredux/py_net_eng
|
338fd7a80debbeda55b5915dbfba4f5577279ef0
|
61cf0b2a355d519c58bc9f2b59d7e5d224922890
|
refs/heads/master
| 2020-12-03T17:32:53.598813 | 2020-04-08T20:55:45 | 2020-04-08T20:55:45 | 231,409,656 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,620 |
py
|
# -*- coding: utf-8 -*-
'''
Задание 5.1c
Переделать скрипт из задания 5.1b таким образом, чтобы, при запросе параметра,
которого нет в словаре устройства, отображалось сообщение 'Такого параметра нет'.
> Попробуйте набрать неправильное имя параметра или несуществующий параметр,
чтобы увидеть какой будет результат. А затем выполняйте задание.
Если выбран существующий параметр,
вывести информацию о соответствующем параметре, указанного устройства.
Пример выполнения скрипта:
$ python task_5_1c.py
Введите имя устройства: r1
Введите имя параметра (ios, model, vendor, location, ip): ips
Такого параметра нет
Ограничение: нельзя изменять словарь london_co.
Все задания надо выполнять используя только пройденные темы.
То есть эту задачу можно решить без использования условия if.
'''
london_co = {
'r1': {
'location': '21 New Globe Walk',
'vendor': 'Cisco',
'model': '4451',
'ios': '15.4',
'ip': '10.255.0.1'
},
'r2': {
'location': '21 New Globe Walk',
'vendor': 'Cisco',
'model': '4451',
'ios': '15.4',
'ip': '10.255.0.2'
},
'sw1': {
'location': '21 New Globe Walk',
'vendor': 'Cisco',
'model': '3850',
'ios': '3.6.XE',
'ip': '10.255.0.101',
'vlans': '10,20,30',
'routing': True
}
}
device = input('Введите имя устройства: ')
'''
vocab = london_co[device].keys()
vocab = str(vocab)
vocab = vocab[11:-2]
'''
vocab = str(london_co[device].keys()) #Shorter version
vocab = vocab[11:-2]
vocab = vocab.replace("'", "")
paste = 'Введите имя параметра (' + vocab + '):'
param = input(paste)
variable0 = london_co[device]
'''
variable1 = variable0.get(param, 'Такого параметра нет') # Вариант менее громоздкий
# variable1 = variable0[param] # Так было
print(variable1)
'''
print(variable0.get(param, 'Такого параметра нет')) # Вариант сложнее
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.