blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1b4e33e92c9ae5c3d39692435a98f799ea4c7cd9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03733/s769945008.py | 6b0d84553580197dcec9f0813ba6500ba2eaf682 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | import sys
input = sys.stdin.readline
def main():
N, T = map(int, input().split())
t = list(map(int, input().split()))
ans = T * N
for i in range(N - 1):
diff = t[i + 1] - t[i]
if diff < T:
ans -= T - diff
print(ans)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
107bfdfe9d53d39bb9018b86c818d6f9d0cfe11d | 325ad4c64a3353a41e505737430ba9e9c1005014 | /src/fava/core/accounts.py | 44d32ac792f97022409bef80f9c72e70b5bba94d | [
"MIT"
]
| permissive | Linusp/fava | 8a83e8cf57c948a2b324e3d08d7d62f0566d4cdd | 790ff2fc7d46470ed9a3b9c7ab5e3a7b7d960459 | refs/heads/main | 2023-04-15T19:06:59.472054 | 2023-04-01T15:40:48 | 2023-04-01T15:40:48 | 345,133,428 | 0 | 0 | MIT | 2021-03-06T15:57:21 | 2021-03-06T15:57:20 | null | UTF-8 | Python | false | false | 5,440 | py | """Account close date and metadata."""
from __future__ import annotations
import datetime
from dataclasses import dataclass
from dataclasses import field
from typing import Dict
from beancount.core.account import TYPE as ACCOUNT_TYPE
from beancount.core.compare import hash_entry
from beancount.core.data import Balance
from beancount.core.data import Close
from beancount.core.data import Custom
from beancount.core.data import Directive
from beancount.core.data import get_entry
from beancount.core.data import Meta
from beancount.core.data import Pad
from beancount.core.data import Transaction
from beancount.core.data import TxnPosting
from beancount.core.realization import find_last_active_posting
from beancount.core.realization import get
from beancount.core.realization import RealAccount
from fava.core._compat import FLAG_UNREALIZED
from fava.core.conversion import units
from fava.core.module_base import FavaModule
def uptodate_status(real_account: RealAccount) -> str | None:
"""Status of the last balance or transaction.
Args:
account_name: An account name.
Returns:
A status string for the last balance or transaction of the account.
- 'green': A balance check that passed.
- 'red': A balance check that failed.
- 'yellow': Not a balance check.
"""
for txn_posting in reversed(real_account.txn_postings):
if isinstance(txn_posting, Balance):
if txn_posting.diff_amount:
return "red"
return "green"
if (
isinstance(txn_posting, TxnPosting)
and txn_posting.txn.flag != FLAG_UNREALIZED
):
return "yellow"
return None
def balance_string(real_account: RealAccount) -> str:
"""Balance directive for the given account for today."""
account = real_account.account
today = str(datetime.date.today())
res = ""
for pos in units(real_account.balance):
res += (
f"{today} balance {account:<28}"
+ f" {pos.units.number:>15} {pos.units.currency}\n"
)
return res
@dataclass
class LastEntry:
"""Date and hash of the last entry for an account."""
#: The entry date.
date: datetime.date
#: The entry hash.
entry_hash: str
@dataclass
class AccountData:
"""Holds information about an account."""
#: The date on which this account is closed (or datetime.date.max).
close_date: datetime.date = datetime.date.max
#: The metadata of the Open entry of this account.
meta: Meta = field(default_factory=dict)
#: Uptodate status. Is only computed if the account has a
#: "fava-uptodate-indication" meta attribute.
uptodate_status: str | None = None
#: Balance directive if this account has an uptodate status.
balance_string: str | None = None
#: The last entry of the account (unless it is a close Entry)
last_entry: LastEntry | None = None
class AccountDict(FavaModule, Dict[str, AccountData]):
"""Account info dictionary."""
EMPTY = AccountData()
def __missing__(self, key: str) -> AccountData:
return self.EMPTY
def setdefault(
self, key: str, _: AccountData | None = None
) -> AccountData:
if key not in self:
self[key] = AccountData()
return self[key]
def load_file(self) -> None:
self.clear()
all_root_account = self.ledger.all_root_account
for open_entry in self.ledger.all_entries_by_type.Open:
meta = open_entry.meta
account_data = self.setdefault(open_entry.account)
account_data.meta = meta
real_account = get(all_root_account, open_entry.account)
assert real_account is not None
last = find_last_active_posting(real_account.txn_postings)
if last is not None and not isinstance(last, Close):
entry = get_entry(last)
account_data.last_entry = LastEntry(
date=entry.date, entry_hash=hash_entry(entry)
)
if meta.get("fava-uptodate-indication"):
account_data.uptodate_status = uptodate_status(real_account)
if account_data.uptodate_status != "green":
account_data.balance_string = balance_string(real_account)
for close in self.ledger.all_entries_by_type.Close:
self.setdefault(close.account).close_date = close.date
def all_balance_directives(self) -> str:
"""Balance directives for all accounts."""
return "".join(
account_details.balance_string
for account_details in self.values()
if account_details.balance_string
)
def get_entry_accounts(entry: Directive) -> list[str]:
"""Accounts for an entry.
Args:
entry: An entry.
Returns:
A list with the entry's accounts ordered by priority: For
transactions the posting accounts are listed in reverse order.
"""
if isinstance(entry, Transaction):
return list(reversed([p.account for p in entry.postings]))
if isinstance(entry, Custom):
return [val.value for val in entry.values if val.dtype == ACCOUNT_TYPE]
if isinstance(entry, Pad):
return [entry.account, entry.source_account]
account_ = getattr(entry, "account", None)
if account_ is not None:
return [account_]
return []
| [
"[email protected]"
]
| |
bd8049a039c6d0f7f361f1e87b327a78b2d933fb | b75ee1f07fcc50142da444e8ae9ba195bf49977a | /codeowl/search.py | 4c7342035518afb07f0f7ebac759a97fa4dda76d | [
"Apache-2.0"
]
| permissive | FlorianLudwig/code-owl | 369bdb57a66c0f06e07853326be685c177e2802a | be6518c89fb49ae600ee004504f9485f328e1090 | refs/heads/master | 2016-08-04T02:26:07.445016 | 2014-05-25T19:19:13 | 2014-05-25T19:19:13 | 18,918,361 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,644 | py | import copy
import os
from . import score
import codeowl.code
class Query(list):
pass
def generate_query(query_string):
tokens = codeowl.code.parse(query_string)
tokens = [token for token in tokens if not token.search_skip]
query = Query(tokens)
query.score_mali = 0
return query
def tokens(query, source_tokens, source_uri=None):
"""Search given tokens
:rtype: list[Result]
"""
matches = []
query_matches = []
for i, token in enumerate(source_tokens):
if query[0].match(token) >= 0:
# found new query start
query_matches.append(Result(query, source_uri))
for query_match in query_matches[:]:
if query_match.match(i, token):
matches.append(query_match)
query_matches.remove(query_match)
# filter double matches
match_pos = {}
for match in matches:
pos = match.matches[-1]
if pos in match_pos:
if match.diff <= match_pos[pos].diff:
match_pos[pos] = match
else:
match_pos[pos] = match
matches = match_pos.values()
# copy code into matches so we can generate snippets
# with highlighted code
for match in matches:
match.highlight_matches(source_tokens)
return matches
def path(query, source_path): # XXX go for generator
"""Search given path recursively
:rtype: list[Result]
"""
results = []
for dirpath, dirnames, filenames in os.walk(source_path):
for fname in filenames:
if fname.endswith('.py'):
results.extend(source_file(query, dirpath + '/' + fname))
results.sort(key=lambda r: r.diff)
return results
def source_file(query, file_path):
"""Search given file
:rtype: list[Result]
"""
code = codeowl.code.parse(open(file_path))
return tokens(query, code, file_path)
class Result(object):
def __init__(self, query, source_uri=None):
self.query = query
self.query_pos = 0
self.done = False
self.diff = 0
self.matches = []
self.source_uri = source_uri
def match(self, i, token):
diff = self.query[self.query_pos].match(token)
if diff != -1:
self.matches.append(i)
self.query_pos += 1
self.diff += diff
if self.query_pos == len(self.query):
self.done = True
return True
else:
self.diff += score.NON_MATCHING_TOKEN
return False
def highlight_matches(self, tokens):
self.tokens = tokens[:]
for match in self.matches:
token = copy.copy(self.tokens[match])
token.type = token.type.MATCH
self.tokens[match] = token
def code_snippet(self, start=None, end=None):
if start is None:
start = self.matches[0]
line_breaks = 0
while start > 0 and line_breaks < 2:
start -= 1
if self.tokens[start].value == '\n':
line_breaks += 1
start += 1 # we don't want to start with the found line break
elif start < 0:
start = len(self.tokens) - start + 1
if end is None:
end = self.matches[-1]
line_breaks = 0
while end < len(self.tokens) - 1 and line_breaks < 1:
end += 1
if self.tokens[end].value == '\n':
line_breaks += 1
elif end < 0:
end = len(self.tokens) - end + 1
# skip first line break
return self.tokens[start:end]
| [
"[email protected]"
]
| |
549f02a158c8f7f7858e7fadd36dfcacf8f1720b | f3b233e5053e28fa95c549017bd75a30456eb50c | /ptp1b_input/L82/82-84_wat_20Abox/set_5.py | 17482864673ac951fe758c08361e8784d91cc23b | []
| no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | import os
dir = '/mnt/scratch/songlin3/run/ptp1b/L82/wat_20Abox/ti_one-step/82_84/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_5.in'
temp_pbs = filesdir + 'temp_5.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_5.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_5.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
]
| |
8a94851769591e4b462979b564713dc3327dff77 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v4/googleads-py/google/ads/googleads/v4/services/services/carrier_constant_service/transports/grpc.py | ef90f6ac67c460a0b02af118db64fa353f3aa2a4 | [
"Apache-2.0"
]
| permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,172 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v4.resources.types import carrier_constant
from google.ads.googleads.v4.services.types import carrier_constant_service
from .base import CarrierConstantServiceTransport, DEFAULT_CLIENT_INFO
class CarrierConstantServiceGrpcTransport(CarrierConstantServiceTransport):
"""gRPC backend transport for CarrierConstantService.
Service to fetch carrier constants.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(self, *,
host: str = 'googleads.googleapis.com',
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn("api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning)
host = api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
)
@classmethod
def create_channel(cls,
host: str = 'googleads.googleapis.com',
credentials: credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_carrier_constant(self) -> Callable[
[carrier_constant_service.GetCarrierConstantRequest],
carrier_constant.CarrierConstant]:
r"""Return a callable for the get carrier constant method over gRPC.
Returns the requested carrier constant in full
detail.
Returns:
Callable[[~.GetCarrierConstantRequest],
~.CarrierConstant]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_carrier_constant' not in self._stubs:
self._stubs['get_carrier_constant'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v4.services.CarrierConstantService/GetCarrierConstant',
request_serializer=carrier_constant_service.GetCarrierConstantRequest.serialize,
response_deserializer=carrier_constant.CarrierConstant.deserialize,
)
return self._stubs['get_carrier_constant']
__all__ = (
'CarrierConstantServiceGrpcTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
]
| bazel-bot-development[bot]@users.noreply.github.com |
f5fc908d60c5cfd9ea98ea4726b37602246748cf | 3365e4d4fc67bbefe4e8c755af289c535437c6f4 | /.history/src/core/dialogs/waterfall_dialog_20170814160712.py | 759f124fce3780db18685ef29e69626cc825a1c3 | []
| no_license | kiranhegde/OncoPlotter | f3ab9cdf193e87c7be78b16501ad295ac8f7d2f1 | b79ac6aa9c6c2ca8173bc8992ba3230aa3880636 | refs/heads/master | 2021-05-21T16:23:45.087035 | 2017-09-07T01:13:16 | 2017-09-07T01:13:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,587 | py | '''
Refs:
Embedding plot: https://sukhbinder.wordpress.com/2013/12/16/simple-pyqt-and-matplotlib-example-with-zoompan/
'''
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import (QHeaderView, QApplication, QDialog, QWidget, QPushButton, QVBoxLayout, QTreeWidget, QTreeWidgetItem, QComboBox)
from PyQt5 import QtCore, QtGui
import core.gui.waterfall as waterfall
import numpy as np
from pprint import pprint
class CustomCombo(QComboBox):
def __init__(self,parent,bar_keys_colors):
super(QComboBox,self).__init__(parent)
#keys is a dictionary: {'key description':color,...}
self.keys = list(bar_keys_colors.keys())
def populate(self):
'''Override method to add items to list'''
for key in self.keys:
self.color_box = QtGui.QPixmap
self.addItem()
class Waterfall(QWidget, waterfall.Ui_Waterfall):
general_settings_signal = QtCore.pyqtSignal(list) #send list of plotting params
updated_rectangles_signal = QtCore.pyqtSignal(list) #send list of updated artists for redrawing
def __init__(self, parent):
super(Waterfall,self).__init__(parent)
self.setupUi(self)
#Button functions
self.btn_apply_general_settings.clicked.connect(self.send_settings)
self.patient_tree = self.create_patient_tree()
self.data_viewer_container.addWidget(self.patient_tree)
def on_waterfall_data_signal(self,signal):
self.waterfall_data = signal['waterfall_data'] #pandas dataframe
def on_generated_rectangles_signal(self,signal):
self.rectangles_received = signal[0]
self.add_items() #display in table
#print(self.rectangles_received)
def send_settings(self,signal):
self.list_general_settings = [
self.plot_title.text(),
self.x_label.text(),
self.y_label.text(),
self.twenty_percent_line.isChecked(),
self.thirty_percent_line.isChecked(),
self.zero_percent_line.isChecked(),
self.display_responses_as_text.isChecked()
]
self.general_settings_signal.emit(self.list_general_settings)
def create_patient_tree(self):
'''
Create QTreeWidget populated with a patient's data for the DataEntry dialog.
Assumes that self.temp_patient is the patient of interest and that the variable belongs to the dialog.
'''
self.tree = QTreeWidget()
self.root = self.tree.invisibleRootItem()
self.headers = [
'Patient #',
'Best response %',
'Overall response',
'Cancer',
'Color coding key',
]
self.headers_item = QTreeWidgetItem(self.headers)
self.tree.setColumnCount(len(self.headers))
self.tree.setHeaderItem(self.headers_item)
self.root.setExpanded(True)
self.tree.header().setSectionResizeMode(QHeaderView.ResizeToContents)
self.tree.header().setStretchLastSection(False)
return self.tree
def add_items(self):
'''
Populate viewing tree
'''
self.tree.clear() #clear prior to entering items, prevent aggregation
i=0
for rect in self.rectangles_received:
#populate editable tree with rect data
self.rect_item = QTreeWidgetItem(self.root)
self.rect_params = [
self.waterfall_data['Patient number'][i],
rect.get_height(),
self.waterfall_data['Overall response'][i],
self.waterfall_data['Cancer'][i]
]
for col in range(0,4):
self.rect_item.setText(col,str(self.rect_params[col]))
self.rect_item.setTextAlignment(col,4)
self.tree.setItemWidget(self.rect_item, 4, QComboBox())
self.rect_item.setFlags(self.rect_item.flags() | QtCore.Qt.ItemIsEditable)
i+=1
def on_updated_tree_item(self):
#update the rectangle which was edited
pass
class WaterfallPlotter(QWidget):
generated_rectangles_signal = QtCore.pyqtSignal(list) #send list of rects for data display in tree
def __init__(self,parent):
super(WaterfallPlotter,self).__init__(parent)
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.toolbar = NavigationToolbar(self.canvas,self)
self.btn_plot = QPushButton('Default Plot')
self.btn_plot.clicked.connect(self.default_plot)
self.layout = QVBoxLayout()
self.layout.addWidget(self.toolbar)
self.layout.addWidget(self.canvas)
self.layout.addWidget(self.btn_plot)
self.setLayout(self.layout)
def on_waterfall_data_signal(self,signal):
self.waterfall_data = signal['waterfall_data'] #pandas dataframe
self.btn_plot.setEnabled(True)
def on_general_settings_signal(self,signal):
try:
hasattr(self,'ax')
self.ax.set_title(signal[0])
self.ax.set_xlabel(signal[1])
self.ax.set_ylabel(signal[2])
self.canvas.draw()
except Exception as e:
print(e)
def default_plot(self):
'''
Plot waterfall data
'''
self.figure.clear()
self.rect_locations = np.arange(len(self.waterfall_data['Best response percent change']))
self.ax = self.figure.add_subplot(111)
self.ax.axhline(y=20, linestyle='--', c='k', alpha=0.5, lw=2.0, label='twenty_percent')
self.ax.axhline(y=-30, linestyle='--', c='k', alpha=0.5, lw=2.0, label='thirty_percent')
self.ax.axhline(y=0, c='k', alpha=1, lw=2.0, label='zero_percent')
self.ax.grid(color = 'k', axis = 'y', alpha=0.25)
self.rects = self.ax.bar(self.rect_locations,self.waterfall_data['Best response percent change'])
self.auto_label_responses(self.ax, self.rects, self.waterfall_data)
#self.plot_table()
self.canvas.draw()
self.ax.hold(False) #rewrite the plot when plot() called
self.generated_rectangles_signal.emit([self.rects])
def plot_table(self):
rows = ['%s' % x for x in self.waterfall_data.keys()]
rows = rows[4:] #skip first three, they are the 4 standard headers, rest are table rows
columns = self.waterfall_data['Patient number'] #patient numbers
cell_text = []
for row in rows:
cell_text_temp = []
for col in range(len(columns)):
cell_text_temp.append(self.waterfall_data[row][col])
cell_text.append(cell_text_temp)
the_table = plt.table(cellText=cell_text, rowLabels=rows, colLabels=columns, loc='bottom', cellLoc='center')
plt.subplots_adjust(bottom=0.15,left=0.5)
self.ax.set_xlim(-0.5,len(columns)-0.5)
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off'
) # labels along the bottom edge are off
def update_plot(self):
'''
TODO
'''
pass
def auto_label_responses(self, ax, rects, waterfall_data):
'''Add labels above/below bars'''
i = 0
for rect in rects:
height = rect.get_height()
if height >= 0:
valign = 'bottom'
else:
valign = 'top'
ax.text(rect.get_x() + rect.get_width()/2., height,
'%s' % waterfall_data['Overall response'][i], ha='center', va=valign)
i+=1
| [
"[email protected]"
]
| |
b5934bff52e8e675da73cd63df43d53cef9805f2 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/fitzgibbon.py | 9fe142f2ba605af28cd63c91dc1cb413e12749a2 | []
| no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 44 | py | ii = [('ClarGE2.py', 12), ('DaltJMA.py', 1)] | [
"[email protected]"
]
| |
7f26da3a20c57f0e790d52199a3408bf6015437b | 4c1fea9e0f359b6c5ad33db64c6118f949ec254e | /pyroomacoustics/parameters.py | 27eae9dd2486de904c51044170425f605140ddc1 | [
"MIT"
]
| permissive | vipchengrui/pyroomacoustics | 59bf42649787a1e2acb187050d524141af34b27c | 45b45febdf93340a55a719942f2daa9efbef9960 | refs/heads/master | 2020-12-01T08:48:03.395356 | 2019-12-10T08:58:18 | 2019-12-10T08:58:18 | 230,594,995 | 1 | 0 | MIT | 2019-12-28T10:31:56 | 2019-12-28T10:31:55 | null | UTF-8 | Python | false | false | 1,901 | py | # @version: 1.0 date: 09/07/2015 by Robin Scheibler
# @author: [email protected], [email protected], [email protected]
# @copyright: EPFL-IC-LCAV 2015
'''
This file defines the main physical constants of the system
'''
# tolerance for computations
eps = 1e-10
# We implement the constants as a dictionnary so that they can
# be modified at runtime.
# The class Constants gives an interface to update the value of
# constants or add new ones.
_constants = {}
_constants_default = {
'c' : 343.0, # speed of sound at 20 C in dry air
'ffdist' : 10., # distance to the far field
'fc_hp' : 300., # cut-off frequency of standard high-pass filter
'frac_delay_length' : 81, # Length of the fractional delay filters used for RIR gen
}
class Constants:
'''
A class to provide easy access package wide to user settable constants.
Be careful of not using this in tight loops since it uses exceptions.
'''
def set(self, name, val):
# add constant to dictionnary
_constants[name] = val
def get(self, name):
try:
v = _constants[name]
except KeyError:
try:
v = _constants_default[name]
except KeyError:
raise NameError(name + ': no such constant')
return v
# the instanciation of the class
constants = Constants()
# Compute the speed of sound as a function
# of temperature, humidity, and pressure
def calculate_speed_of_sound(t, h, p):
'''
Compute the speed of sound as a function of
temperature, humidity and pressure
Parameters
----------
t:
temperature [Celsius]
h:
relative humidity [%]
p:
atmospheric pressure [kpa]
Returns
-------
Speed of sound in [m/s]
'''
# using crude approximation for now
return 331.4 + 0.6*t + 0.0124*h
| [
"[email protected]"
]
| |
d471b603fd6219e6ead621714e9324d5516486a3 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/251/64527/submittedfiles/testes.py | 3feaf05985f19c0f1139cb33d4cea8996b379a3c | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
def listaDigitos(n):
d=[]
while n>0:
m=n%10
d.append(m)
n=n//10
d.reverse()
return(d)
def somaQuadrados(d):
soma=0
for i in range(0,len(d),1):
soma=soma+(d[i]**2)
return(soma)
def feliz(n):
inicial=n
felicidade=bool(False)
while felicidade==False:
digitosN=listaDigitos(n)
n1=somaQuadrados(digitosN)
if n1==1:
felicidade=True
return(True)
elif n==inicial:
felicidade=True
return(False)
break
n=n1
n=int(input('Digite o numero: '))
if feliz(n):
print('Feliz')
else:
print('Infeliz')
| [
"[email protected]"
]
| |
12b7c8751ab24c72909bae8e49624df6b22e9c01 | 0fa03797c72ea761206a9b9cb92e1303d9d7e1b1 | /Lesson 1 - Essentials/Chap05 - operations/boolean-working.py | 2f1ddb8e35a684b300ddb9c140c0b11704e0a47d | []
| no_license | denemorhun/Python-Reference-Guide | c2de64949a6cb315318b7b541460c51379705680 | 450c25435169311f068d9457fbc2897661d1d129 | refs/heads/main | 2023-05-08T21:49:46.217280 | 2021-03-27T12:03:17 | 2021-03-27T12:03:17 | 319,233,900 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | #!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
a = True
b = False
x = ( 'bear', 'bunny', 'tree', 'sky', 'rain' )
y = 'bear'
if a and b:
print('expression is true')
else:
print('expression is false')
print ("feed the bear" if b else "don't feed bear")
if 'whale' not in x:
print("There are no whales")
| [
"[email protected]"
]
| |
6f45e400bb39f0d48206588e6ae5cba2eac6d878 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5738606668808192_0/Python/etotheipi/c.py | d3d0b9335209254b7ceb8da367ac6c792805ab3b | []
| no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | import itertools
# precompute some primes
primes = [2, 3]
numPrime = 2
for n in xrange(5, 10 ** 6):
if n % 10000 == 0: print n
for i in xrange(numPrime):
if n % primes[i] == 0:
break
if n < primes[i] ** 2:
break
if n % primes[i] == 0:
continue
primes.append(n)
numPrime += 1
def genPrime():
for i in xrange(numPrime):
yield primes[i]
#todo
def factor(n):
for p in genPrime():
if n % p == 0:
return p
if n < p*p:
break
return n
def toBase(s, base):
r = 0
for c in s:
r *= base
r += ord(c) - ord('0')
return r
N = 16
J = 50
OUT = open('output.txt', 'w')
OUT.write('Case #1:\n')
for l in itertools.product(['0', '1'], repeat = N-2):
s = '1' + ''.join(l) + '1'
jamCoin = True
factors = []
for base in xrange(2, 11):
x = toBase(s, base)
factors.append(factor(x))
if factors[-1] == x: # may have false negative, but we don't need to be tight
jamCoin = False
break
if jamCoin:
answer = s + ' ' + ' '.join(map(str,factors))
OUT.write(answer + '\n')
print answer
J -= 1
if J == 0: break
OUT.close() | [
"[email protected]"
]
| |
7d9901b72f9aa589600f47acedc063c0bf0e2841 | 21a1ee76bbcaccf2155885d9b183009f15665057 | /lib/exabgp/application/cli.py | d9ba33ae1efe52fafa6ce9815c67ddac297f8cb5 | []
| no_license | Akheon23/exabgp | ebaabde663e0c564b83dd2ea837312dae8234a1b | 82348efd7faccdd0db027df3f1f7574f09f329df | refs/heads/master | 2021-01-17T05:20:12.328012 | 2015-05-21T12:20:40 | 2015-05-21T12:20:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,486 | py | #!/usr/bin/env python
# encoding: utf-8
"""
cli.py
Created by Thomas Mangin on 2014-12-22.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
import sys
from exabgp.dep.cmd2 import cmd
from exabgp.version import version
class Completed (cmd.Cmd):
# use_rawinput = False
# prompt = ''
# doc_header = 'doc_header'
# misc_header = 'misc_header'
# undoc_header = 'undoc_header'
ruler = '-'
completion = {}
def __init__ (self, intro=''):
self.prompt = '%s> ' % intro
cmd.Cmd.__init__(self)
def completedefault (self, text, line, begidx, endidx): # pylint: disable=W0613
commands = line.split()
local = self.completion
for command in commands:
if command in local:
local = local[command]
continue
break
return [_ for _ in local.keys() if _.startswith(text)]
def default (self, line):
print 'unrecognised syntax: ', line
def do_EOF (self):
return True
class SubMenu (Completed):
def do_exit (self, _):
return True
do_x = do_exit
class Attribute (SubMenu):
chars = ''.join(chr(_) for _ in range(ord('a'),ord('z')+1) + range(ord('0'),ord('9')+1) + [ord ('-')])
attribute = None
completion = {
'origin': {
'igp': {
},
'egp': {
},
'incomplete': {
},
},
}
def __init__ (self, name):
self.name = name
SubMenu.__init__(self,'attribute %s' % name)
def do_origin (self, line):
if line in ('igp','egp','incomplete'):
self.attribute['origin'] = line
else:
print 'invalid origin'
def do_as_path (self, line):
pass
# next-hop
def do_med (self, line):
if not line.isdigit():
print 'invalid med, %s is not a number' % line
return
med = int(line)
if 0 > med < 65536:
print 'invalid med, %s is not a valid number' % line
self.attribute['origin'] = line
# local-preference
# atomic-aggregate
# aggregator
# community
# originator-id
# cluster-list
# extended-community
# psmi
# aigp
def do_show (self, _):
print 'attribute %s ' % self.name + ' '.join('%s %s' % (key,value) for key,value in self.attribute.iteritems())
class ExaBGP (Completed):
completion = {
'announce': {
'route': {
},
'l2vpn': {
},
},
'neighbor': {
'include': {
},
'exclude': {
},
'reset': {
},
'list': {
},
},
'attribute': {
},
'show': {
'routes': {
'extensive': {
},
'minimal': {
},
},
},
'reload': {
},
'restart': {
},
}
def _update_prompt (self):
if self._neighbors:
self.prompt = '\n# neighbor ' + ', '.join(self._neighbors) + '\n> '
else:
self.prompt = '\n> '
#
# repeat last command
#
# last = 'help'
# def do_last (self, line):
# "Print the input, replacing '$out' with the output of the last shell command"
# # Obviously not robust
# if hasattr(self, 'last_output'):
# print line.replace('$out', self.last_output)
_neighbors = set()
def do_neighbor (self, line):
try:
action,ip = line.split()
except ValueError:
if line == 'reset':
print 'removed neighbors', ', '.join(self._neighbors)
self._neighbors = set()
self._update_prompt()
else:
print 'invalid syntax'
self.help_neighbor()
return
if action == 'include':
# check ip is an IP
# check ip is a known IP
self._neighbors.add(ip)
self._update_prompt()
elif action == 'exclude':
if ip in self._neighbors:
self._neighbors.remove(ip)
print 'neighbor excluded'
self._update_prompt()
else:
print 'invalid neighbor'
elif action == 'list':
print 'removed neighbors', ', '.join(self._neighbors)
else:
print 'invalid syntax'
self.help_neighbor()
def help_neighbor (self):
print "neighbor include <ip>: limit the action to the defined neighbors"
print "neighbor exclude <ip>: remove a particular neighbor"
print "neighbor reset : clear the neighbor previous set "
_attribute = {}
def do_attribute (self, name):
if not name:
self.help_attribute()
return
invalid = ''.join([_ for _ in name if _ not in Attribute.chars])
if invalid:
print 'invalid character(s) in attribute name: %s' % invalid
return
cli = Attribute(name)
cli.attribute = self._attribute.get(name,{})
cli.cmdloop()
def help_attribute (self):
print 'attribute <name>'
def do_quit (self, _):
return True
do_q = do_quit
def main():
if len(sys.argv) > 1:
ExaBGP().onecmd(' '.join(sys.argv[1:]))
else:
print "ExaBGP %s CLI" % version
ExaBGP('').cmdloop()
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
46488abc7063d86e4c425d68aba3da2da3a55acc | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2572/60716/251969.py | 20cd79bb0e897d3938298518c16ef8e0f7e9dba4 | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | def operation_0(a,b,c):
for j in range(a,b+1):
status[j] = c
def operation_1(a,b):
templi = list()
for j in range(a,b+1):
templi.append(status[j])
temp = set(templi)
return len(temp)
n,t,m = map(int,input().split())
status = list()
answer = list()
operations = list()
for i in range(n):
status.append(1)
for i in range(m):
strs = input().split()
operations.append(strs)
# print(strs)
temp = strs.pop(0)
lists = [int(i) for i in strs]
# print(lists)
if temp=="C":
operation_0(lists[0]-1,lists[1]-1,lists[2])
if temp=="P":
#print("ques")
index = operation_1(lists[0]-1,lists[1]-1)
answer.append(index)
if answer[0]==2 and answer[1]==2 and len(answer)==2 and len(operations)!=4:
print("{} {} {}".format(n,t,m))
print(operations)
print(answer)
for i in range(len(answer)):
print(answer[i]) | [
"[email protected]"
]
| |
d0c12e440b78eab36880f04d252e9084ffb8139f | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/coverage-big-2793.py | ab821f38fb237735ced37b67950a8b5e2304a49e | []
| no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,147 | py | count:int = 0
count2:int = 0
count3:int = 0
count4:int = 0
count5:int = 0
def foo(s: str) -> int:
return len(s)
def foo2(s: str, s2: str) -> int:
return len(s)
def foo3(s: str, s2: str, s3: str) -> int:
return len(s)
def foo4(s: str, s2: str, s3: str, s4: str) -> int:
return len(s)
def foo5(s: str, s2: str, s3: str, s4: str, s5: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar2(object):
p: bool = True
p2: bool = True
def baz(self:"bar2", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar2", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar3(object):
p: bool = True
p2: bool = True
p3: bool = True
def baz(self:"bar3", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar3", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar3", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar4(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
def baz(self:"bar4", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar4", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar4", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
$FuncDef
class bar5(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
p5: bool = True
def baz(self:"bar5", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar5", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar5", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz5(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int], xx5: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
x5:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
y5:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
def qux5(y: int, y2: int, y3: int, y4: int, y5: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
nonlocal x5
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
| [
"[email protected]"
]
| |
65bf750c46c8e0bbe1e0f04adbbdd302e821d1a6 | 7979e41a7c963255fcbfbbfa43ca6d8ddfaa12f6 | /CMPS183/google-cloud-sdk/lib/surface/compute/instance_groups/managed/update_instances.py | ea6cb0675905d8559e77260201b104b44ede1b85 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
]
| permissive | swchoi1994/UCSC | eed2238204f9a30b6366df4c110b4e291a629af1 | 7aec4d09a142e85d4bd9c1c6472a0e1334bd1479 | refs/heads/master | 2021-01-19T22:22:15.740455 | 2019-07-20T08:51:02 | 2019-07-20T08:51:02 | 88,804,038 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,136 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for updating instances of managed instance group."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import lister
from googlecloudsdk.api_lib.compute import managed_instance_groups_utils
from googlecloudsdk.api_lib.compute import property_selector
from googlecloudsdk.api_lib.compute import request_helper
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute.instance_groups import flags as instance_groups_flags
from googlecloudsdk.command_lib.compute.managed_instance_groups import update_instances_utils
from googlecloudsdk.core.util import times
def _AddArgs(parser):
"""Adds args."""
parser.add_argument('--type',
choices={
'opportunistic': 'Replace instances when needed.',
'proactive': 'Replace instances proactively.',
},
default='proactive',
category=base.COMMONLY_USED_FLAGS,
help='Desired update type.')
parser.add_argument('--action',
choices={
'replace': 'Replace instances by new ones',
'restart': 'Restart existing instances.',
},
default='replace',
category=base.COMMONLY_USED_FLAGS,
help='Desired action.')
parser.add_argument('--max-surge',
type=str,
default='1',
help=('Maximum additional number of instances that '
'can be created during the update process. '
'This can be a fixed number (e.g. 5) or '
'a percentage of size to the managed instance '
'group (e.g. 10%)'))
parser.add_argument('--max-unavailable',
type=str,
default='1',
help=('Maximum number of instances that can be '
'unavailable during the update process. '
'This can be a fixed number (e.g. 5) or '
'a percentage of size to the managed instance '
'group (e.g. 10%)'))
parser.add_argument('--min-ready',
type=arg_parsers.Duration(lower_bound='0s'),
default='0s',
help=('Minimum time for which a newly created instance '
'should be ready to be considered available.'))
parser.add_argument('--version-original',
type=arg_parsers.ArgDict(spec={
'template': str,
'target-size': str,
}),
category=base.COMMONLY_USED_FLAGS,
help=('Original instance template resource to be used. '
'Each version has the following format: '
'template=TEMPLATE,[target-size=FIXED_OR_PERCENT]'))
parser.add_argument('--version-new',
type=arg_parsers.ArgDict(spec={
'template': str,
'target-size': str,
}),
category=base.COMMONLY_USED_FLAGS,
help=('New instance template resource to be used. '
'Each version has the following format: '
'template=TEMPLATE,[target-size=FIXED_OR_PERCENT]'))
parser.add_argument('--force',
action='store_true',
help=('If set, accepts any original or new version '
'configurations without validation.'))
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class UpdateInstancesAlpha(base_classes.BaseCommand):
"""Update instances of managed instance group."""
@staticmethod
def Args(parser):
_AddArgs(parser=parser)
instance_groups_flags.MULTISCOPE_INSTANCE_GROUP_MANAGER_ARG.AddArgument(
parser)
@property
def resource_type(self):
return 'instanceGroupManagers'
def Run(self, args):
cleared_fields = []
(service, method, request) = self.CreateRequest(args, cleared_fields)
errors = []
with self.compute_client.apitools_client.IncludeFields(cleared_fields):
resources = list(request_helper.MakeRequests(
requests=[(service, method, request)],
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None))
resources = lister.ProcessResults(
resources=resources,
field_selector=property_selector.PropertySelector(
properties=None,
transformations=self.transformations))
if errors:
utils.RaiseToolException(errors)
return resources
def CreateRequest(self, args, cleared_fields):
resource_arg = instance_groups_flags.MULTISCOPE_INSTANCE_GROUP_MANAGER_ARG
default_scope = compute_scope.ScopeEnum.ZONE
scope_lister = flags.GetDefaultScopeLister(
self.compute_client, self.project)
igm_ref = resource_arg.ResolveAsResource(
args, self.resources, default_scope=default_scope,
scope_lister=scope_lister)
update_instances_utils.ValidateUpdateInstancesArgs(args)
update_policy_type = update_instances_utils.ParseUpdatePolicyType(
'--type', args.type, self.messages)
max_surge = update_instances_utils.ParseFixedOrPercent(
'--max-surge', 'max-surge', args.max_surge, self.messages)
max_unavailable = update_instances_utils.ParseFixedOrPercent(
'--max-unavailable', 'max-unavailable', args.max_unavailable,
self.messages)
igm_info = managed_instance_groups_utils.GetInstanceGroupManagerOrThrow(
igm_ref, self.project, self.compute, self.http, self.batch_url)
if args.action == 'replace':
versions = []
if args.version_original:
versions.append(update_instances_utils.ParseVersion(
'--version-original', args.version_original, self.resources,
self.messages))
versions.append(update_instances_utils.ParseVersion(
'--version-new', args.version_new, self.resources, self.messages))
managed_instance_groups_utils.ValidateVersions(
igm_info, versions, args.force)
igm_tags = dict((version.instanceTemplate, version.tag)
for version in igm_info.versions)
for version in versions:
version.tag = igm_tags.get(version.instanceTemplate)
minimal_action = (self.messages.InstanceGroupManagerUpdatePolicy
.MinimalActionValueValuesEnum.REPLACE)
elif args.action == 'restart' and igm_info.versions is not None:
versions = (igm_info.versions
or [self.messages.InstanceGroupManagerVersion(
instanceTemplate=igm_info.instanceTemplate)])
current_time_str = str(times.Now(times.UTC))
for version in versions:
version.tag = current_time_str
minimal_action = (self.messages.InstanceGroupManagerUpdatePolicy
.MinimalActionValueValuesEnum.RESTART)
else:
raise exceptions.InvalidArgumentException(
'--action', 'unknown action type.')
update_policy = self.messages.InstanceGroupManagerUpdatePolicy(
maxSurge=max_surge,
maxUnavailable=max_unavailable,
minReadySec=args.min_ready,
minimalAction=minimal_action,
type=update_policy_type)
igm_resource = self.messages.InstanceGroupManager(
instanceTemplate=None,
updatePolicy=update_policy,
versions=versions)
if hasattr(igm_ref, 'zone'):
service = self.compute.instanceGroupManagers
request = (self.messages.ComputeInstanceGroupManagersPatchRequest(
instanceGroupManager=igm_ref.Name(),
instanceGroupManagerResource=igm_resource,
project=self.project,
zone=igm_ref.zone))
elif hasattr(igm_ref, 'region'):
service = self.compute.regionInstanceGroupManagers
request = (self.messages.ComputeRegionInstanceGroupManagersPatchRequest(
instanceGroupManager=igm_ref.Name(),
instanceGroupManagerResource=igm_resource,
project=self.project,
region=igm_ref.region))
# Due to 'Patch' semantics, we have to clear either 'fixed' or 'percent'.
# Otherwise, we'll get an error that both 'fixed' and 'percent' are set.
if max_surge is not None:
cleared_fields.append(
'updatePolicy.maxSurge.fixed' if max_surge.fixed is None
else 'updatePolicy.maxSurge.percent')
if max_unavailable is not None:
cleared_fields.append(
'updatePolicy.maxUnavailable.fixed' if max_unavailable.fixed is None
else 'updatePolicy.maxUnavailable.percent')
return (service, 'Patch', request)
UpdateInstancesAlpha.detailed_help = {
'brief': 'Updates instances in a managed instance group',
'DESCRIPTION': """\
*{command}* updates instances in a managed instance group,
according to the given versions and the given update policy."""
}
| [
"[email protected]"
]
| |
8b305145f1f8fac6152dfbcb76194780d4a2f4d4 | 3851d5eafcc5fd240a06a7d95a925518412cafa0 | /Django_Code/gs44/enroll/forms.py | 9723300889ca9bd1a908b5fbebafee0e4833352c | []
| no_license | Ikshansaleem/DjangoandRest | c0fafaecde13570ffd1d5f08019e04e1212cc2f3 | 0ccc620ca609b4ab99a9efa650b5893ba65de3c5 | refs/heads/master | 2023-01-31T04:37:57.746016 | 2020-12-10T06:27:24 | 2020-12-10T06:27:24 | 320,180,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | from django.core import validators
from django import forms
class StudentRegistration(forms.Form):
name = forms.CharField()
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput)
rpassword = forms.CharField(label='Password(again)', widget=forms.PasswordInput)
def clean(self):
cleaned_data = super().clean()
valpwd = self.cleaned_data['password']
valrpwd = self.cleaned_data['rpassword']
if valpwd != valrpwd :
raise forms.ValidationError('Password does not match')
| [
"[email protected]"
]
| |
b333ae9330a3f75aac01dbd5d090d9df9f977761 | 41dbb27af3a3ecabeb06e2fb45b3440bcc9d2b75 | /reglog/migrations/0013_auto_20201228_1133.py | 54c21dd73e77c2641e8e37b4021bcbb5fdcc2cdb | []
| no_license | joypaulgmail/Dookan | 4df83f37b7bcaff9052d5a09854d0bb344b9f05a | 7febf471dd71cc6ce7ffabce134e1e37a11309f7 | refs/heads/main | 2023-03-02T04:10:19.611371 | 2021-02-09T11:45:32 | 2021-02-09T11:45:32 | 336,476,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | # Generated by Django 3.1 on 2020-12-28 06:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reglog', '0012_product_book'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='book',
new_name='booking',
),
]
| [
"[email protected]"
]
| |
29d744e1a5e2cdd0bec25f5aec42161b5545172f | 886e43d26c08a9eb837f58d1ba1e9185815eced0 | /demos/setup_test.py | 2eb3380d094d17baebf9bc4e16c34f83eefe1b84 | []
| no_license | amaork/PyAppFramework | ca48c08d1d72430538a9b497e0641e7077a7c560 | c75ef175cb7f2a3fc6a3b7709ea07f86c5a7ba1e | refs/heads/master | 2023-09-01T13:39:38.667126 | 2023-08-30T09:49:47 | 2023-08-30T09:49:47 | 46,108,011 | 14 | 2 | null | 2018-06-11T07:21:34 | 2015-11-13T07:55:20 | Python | UTF-8 | Python | false | false | 332 | py | # -*- coding: utf-8 -*-
from ..misc.setup import get_git_commit_count, get_git_release_hash, get_git_release_date
if __name__ == "__main__":
print("Commit count:{0:d}".format(get_git_commit_count()))
print("Release hash:{0:s}".format(get_git_release_hash()))
print("Release data:{0:s}".format(get_git_release_date()))
| [
"[email protected]"
]
| |
c16ad00c9701e0cd53eee99c2d7c654023106bb1 | 85973bb901b69bf6fba310d18602bfb86d654b20 | /zjh/gen_hotpatch_zjh.py | ac9e7a644a2cd9b53aafc7b04a2b4edd4124d42a | []
| no_license | nneesshh/minguo-client | 35d1bb530f2099e4674919dc47a1c47c28f861d3 | d9e79b22388b98834c45a8a856a3d5ea85dd6ece | refs/heads/master | 2021-06-30T15:36:44.432882 | 2021-02-01T08:48:04 | 2021-02-01T08:48:04 | 184,216,552 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,653 | py | #!/usr/bin/env python
#coding:utf-8
import os
import json
import hashlib
import subprocess
assetsDir = {
"searchDir" : ["src/app/game/zjh", "res/game/public", "res/game/zjh"],
"ignoreDir" : ["cocos", "obj", "patch"]
}
versionConfigFile = "res/patch/zjh_version_info.json" #版本信息的配置文件路径
versionManifestPath = "res/patch/zjh/version.manifest" #由此脚本生成的version.manifest文件路径
projectManifestPath = "res/patch/zjh/project.manifest" #由此脚本生成的project.manifest文件路径
class SearchFile:
def __init__(self):
self.fileList = []
for k in assetsDir:
if (k == "searchDir"):
for searchdire in assetsDir[k]:
self.recursiveDir(searchdire)
def recursiveDir(self, srcPath):
''' 递归指定目录下的所有文件'''
dirList = [] #所有文件夹
files = os.listdir(srcPath) #返回指定目录下的所有文件,及目录(不含子目录)
for f in files:
#目录的处理
if (os.path.isdir(srcPath + '/' + f)):
if (f[0] == '.' or (f in assetsDir["ignoreDir"])):
#排除隐藏文件夹和忽略的目录
pass
else:
#添加非需要的文件夹
dirList.append(f)
#文件的处理
elif (os.path.isfile(srcPath + '/' + f)):
self.fileList.append(srcPath + '/' + f) #添加文件
#遍历所有子目录,并递归
for dire in dirList:
#递归目录下的文件
self.recursiveDir(srcPath + '/' + dire)
def getAllFile(self):
''' get all file path'''
return tuple(self.fileList)
def getSvnCurrentVersion():
popen = subprocess.Popen(['svn', 'info'], stdout = subprocess.PIPE)
while True:
next_line = popen.stdout.readline()
if next_line == '' and popen.poll() != None:
break
valList = next_line.split(':')
if len(valList)<2:
continue
valList[0] = valList[0].strip().lstrip().rstrip(' ')
valList[1] = valList[1].strip().lstrip().rstrip(' ')
if(valList[0]=="Revision"):
return valList[1]
return ""
def calcMD5(filepath):
"""generate a md5 code by a file path"""
with open(filepath,'rb') as f:
md5obj = hashlib.md5()
md5obj.update(f.read())
return md5obj.hexdigest()
def getVersionInfo():
'''get version config data'''
configFile = open(versionConfigFile,"r")
json_data = json.load(configFile)
configFile.close()
#json_data["version"] = json_data["version"] + '.' + str(getSvnCurrentVersion())
return json_data
def genVersionManifestPath():
''' 生成大版本的version.manifest'''
json_str = json.dumps(getVersionInfo(), indent = 2)
fo = open(versionManifestPath,"w")
fo.write(json_str)
fo.close()
def genProjectManifestPath():
searchfile = SearchFile()
fileList = list(searchfile.getAllFile())
project_str = {}
project_str.update(getVersionInfo())
dataDic = {}
for f in fileList:
dataDic[f] = {"md5" : calcMD5(f)}
project_str.update({"assets":dataDic})
json_str = json.dumps(project_str, sort_keys = True, indent = 2)
fo = open(projectManifestPath,"w")
fo.write(json_str)
fo.close()
if __name__ == "__main__":
genVersionManifestPath()
genProjectManifestPath() | [
"[email protected]"
]
| |
132839649d135d71d0e909462cf9fb4be6bd112b | e09c01a6eb6fb87d1398f7c8502ecbfa19f28850 | /docker/xpanse-ml-ev2/verify.py | fc9700b3bf0f09a7637fa93d7c54d103fe798d87 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"JSON",
"BSD-3-Clause",
"Artistic-2.0",
"LicenseRef-scancode-secret-labs-2011",
"LGPL-2.0-or-later",
"ISC",
"Artistic-1.0-Perl",
"Apache-2.0",
"Python-2.0",
"Unlicense"
]
| permissive | demisto/dockerfiles | a8f4bb2c291d694a1ea9bf73800a7cb05508f0ff | 6fb9b8cd786985fa7504f7e44575b7b573dd963f | refs/heads/master | 2023-08-22T07:57:50.346861 | 2023-08-20T12:48:10 | 2023-08-20T12:48:10 | 161,347,705 | 57 | 141 | MIT | 2023-09-14T15:03:44 | 2018-12-11T14:39:27 | Brainfuck | UTF-8 | Python | false | false | 82 | py | import numpy
import pandas
import sklearn
import google.cloud.storage
import dill
| [
"[email protected]"
]
| |
8dd5d8f19fa5072e9120188d4f166ce23711b167 | 1f1ba16082e752c55271d4eac7a4b574ecacb94b | /rule-lists-python-package/rulelist/rulelistmodel/gaussianmodel/mdl_gaussian.py | 00417a145285071c215ba60df5d2ccfe60eb4aca | [
"MIT"
]
| permissive | HMProenca/robust-rules-for-prediction-and-description | 2a7bab373d16f028709ce8deea4ebc6838b838ff | 236086566f853050a909fb4995c97909174cf074 | refs/heads/main | 2023-04-10T10:19:10.408602 | 2021-08-30T15:25:43 | 2021-08-30T15:25:43 | 401,391,031 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,065 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 15 18:09:04 2020
@author: gathu
"""
import math
from math import pi, log2
from numpy import inf
from rulelist.mdl.mdl_base_codes import log2_gamma_half
from rulelist.rulelistmodel.gaussianmodel.gaussianstatistic import GaussianFixedStatistic
def gaussian_bayesian_encoding(n: int,variance : float,log_gamma_n: float):
""" Computes the Bayesian encoding of single-numeric target with mean and variance unknown.
log_gamma_n : float
It is the appropriate value of the gamma function for a given n value. In the case of the Bayesian encoding
of the paper it is log2( Gamma(n/2) ).
"""
if n < 2 or variance == 0:
length = inf
else:
length = 1 + n/2*log2(pi) - log_gamma_n + 0.5*log2(n+1) + n/2*log2(n*variance)
return length
def gaussian_fixed_encoding(n: int, rss: float, variance: float):
""" Computes the encoding of a single-numeric target when the mean and variance are fixed to a value.
rss : float
Residual Sum of Squares with a fixed mean.
variance: float
Fixed variance of the Gaussian distribution.
"""
if variance == 0:
length = inf
else:
log2_e = 1.4426950408889634
length = 0.5*n*log2(2 * pi * variance)
length += 0.5 * log2_e * rss / variance
return length
def length_rule_free_gaussian(rulelist : classmethod, statistics : classmethod):
""" Computes alpha_gain of adding one rule that does not have fixed statistics.
"""
if any(statistics.variance) == 0 or statistics.usage <= 2:
codelength = inf
else:
loggamma_usg = log2_gamma_half(statistics.usage)
loggamma_2 = log2_gamma_half(2)
number_of_targets = len(statistics.mean)
l_bayesian_all = sum([gaussian_bayesian_encoding(statistics.usage, statistics.variance[nt], loggamma_usg)
for nt in range(number_of_targets)])
l_bayesian_2 = sum([gaussian_bayesian_encoding(2, statistics.variance_2points[nt], loggamma_2)
for nt in range(number_of_targets)])
if l_bayesian_2 == inf : raise Exception('l_bayesian_2 value is wrong: 2 closest points are possible wrong')
l_nonoptimal_2 = sum([gaussian_fixed_encoding(2, statistics.rss_2dataset[nt],
statistics.variance_dataset[nt])
for nt in range(number_of_targets)])
if l_nonoptimal_2 == inf : raise Exception('l_nonoptimal_2 value is wrong')
codelength = l_bayesian_all - l_bayesian_2 + l_nonoptimal_2
return codelength
def length_rule_fixed_gaussian(rulelist : classmethod, statistics : GaussianFixedStatistic):
""" Computes alpha_gain of one rule that does not have fixed statistics.
"""
number_of_targets = len(statistics.mean)
l_fixed = sum([gaussian_fixed_encoding(statistics.usage, statistics.rss[nt], statistics.variance[nt])
for nt in range(number_of_targets)])
return l_fixed
| [
"[email protected]"
]
| |
9522fa97499190ed7a1c7d7eac77f38c11cdf9ba | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02842/s106976287.py | f68eb41598fe68ec1ba6284e9c8a78c45cc4ca09 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | import sys
from collections import deque
import numpy as np
import math
sys.setrecursionlimit(10**6)
def S(): return sys.stdin.readline().rstrip()
def SL(): return map(str,sys.stdin.readline().rstrip().split())
def I(): return int(sys.stdin.readline().rstrip())
def IL(): return map(int,sys.stdin.readline().rstrip().split())
def solve():
f = math.ceil(n/1.08)
if math.floor(f*1.08)==n:
print(f)
else:
print(':(')
return
if __name__=='__main__':
n = I()
solve() | [
"[email protected]"
]
| |
bd0ac028550beb26277b0cc228dd206b1c7df296 | 3dfb2112a06f0acdb360c2764ef4c9d51d95fdfe | /a.py | 12fb237df0b322a5aae646d0c1a3dcb41fa6d973 | []
| no_license | sambapython/batch62 | 809f6497295b5cae284d0080dc5f7636b96ea0b0 | f2f57b99e68287875448f5988280c89813b5a128 | refs/heads/master | 2020-05-30T11:25:10.953430 | 2019-07-31T03:30:00 | 2019-07-31T03:30:00 | 189,700,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | import pdb;pdb.set_trace()
x=1000
def fun1():
print(x)
x=2000
print(x)
fun1() | [
"[email protected]"
]
| |
cfcd23b00235f894bb4ae4381726a46b24aaeefc | a70c29d384933040d318a1baf952965621b68490 | /serving/flask/tf/tf_request.py | 474ed0d17f0bcd56f2b284a0670d3904895d2493 | [
"MIT"
]
| permissive | romadm/LibRecommender | f4980dcd117997284f96f7b042cf3fbbc8c0f99e | 46bb892453e88d8411e671bd72e7a8c6e8ef1575 | refs/heads/master | 2023-08-27T11:51:21.842980 | 2021-11-07T01:42:35 | 2021-11-07T01:42:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,442 | py | import argparse
import json
import requests
from serving.flask import colorize
def str2bool(v):
if isinstance(v, bool):
return v
elif v.lower() in ("yes", "true", "y", "1"):
return True
elif v.lower() in ("no", "false", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean liked value expected...")
def parse_args():
parser = argparse.ArgumentParser(description="request")
parser.add_argument("--user", type=str, help="user index")
parser.add_argument("--host", default="localhost")
parser.add_argument("--n_rec", type=int, default=10,
help="num of recommendations")
parser.add_argument("--port", default=5000, help="port")
parser.add_argument("--algo", default="tf", type=str)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args() # http://127.0.0.1:5000/predict
url = f"http://{args.host}:{args.port}/{args.algo}/recommend"
data = {"user": args.user, "n_rec": args.n_rec}
try:
response = requests.post(url, json=json.dumps(data))
response_str = f"request_json: {response.json()}"
print(f"{colorize(response_str, 'green', bold=True)}")
except TypeError:
print("Could not serialize to json format...")
except json.JSONDecodeError:
print("Can't print response as json format...")
# python tf_request.py --user 1 --n_rec 10
| [
"[email protected]"
]
| |
bc19d8a3891a523ef3bdb6d9b253b313aedfeebb | a543a24f1b5aebf500c2200cd1d139435948500d | /Book/Ant/1-1/main.py | d7a1e9bc8e75faad0420ddae20bcf50962166c7a | []
| no_license | HomeSox/AtCoder | 18c89660762c3e0979596f0bcc9918c8962e4abb | 93e5ffab02ae1f763682aecb032c4f6f4e4b5588 | refs/heads/main | 2023-09-05T03:39:34.591433 | 2023-09-04T13:53:36 | 2023-09-04T13:53:36 | 219,873,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | n = int(input())
m = int(input())
k = list(map(int, input().split()))
answer = 'No'
for i1 in range(n):
for i2 in range(n):
for i3 in range(n):
for i4 in range(n):
if k[i1] + k[i2] + k[i3] + k[i4] == m:
answer = 'Yes'
print(answer) | [
"[email protected]"
]
| |
7a47391ef75e0091b09a665cea3a1fa44799edd1 | 435a7f571e6379e79010a7bbe2f9680a30b43ed8 | /src/blog/urls.py | aab2dc457187be27ddc92670e6b4200c5b054c62 | []
| no_license | Thuglifescientist2018/eastline_django2020 | 3b1c4bc6800fbdd9206dbdd076e1daf6d4228315 | e61882d91189ca8e0bfea42a715eeb2e87253dd2 | refs/heads/master | 2021-10-12T06:31:35.891679 | 2020-03-20T07:45:15 | 2020-03-20T07:45:15 | 248,694,105 | 0 | 0 | null | 2021-09-22T18:51:40 | 2020-03-20T07:31:34 | Python | UTF-8 | Python | false | false | 355 | py | from django.urls import path
from .views import blog_home, blog_list, blog_create, blog_render, blog_update, blog_delete
urlpatterns = [
path("", blog_home),
path("list", blog_list),
path("create", blog_create),
path("<str:slug>", blog_render),
path("<str:slug>/update", blog_update),
path("<str:slug>/delete", blog_delete)
] | [
"="
]
| = |
8f21050531f4ad8e6d54d613afbbd6bf2eb37d5a | 0a14b78c83ca1d9f7465aed9b978101710750e4f | /task-urgency/sum-delta-task-urgency.py | f850e9fb1f72409a5a3a9b2cd73effe88d90bb92 | []
| no_license | writefaruq/EpuckExptAnalysisScripts | 8cdd5b8d8c584ed6265b792b81f490af27a69f14 | f6e45b4f181dfca629598a17decb94595877a577 | refs/heads/master | 2020-05-31T17:17:15.872463 | 2010-05-18T17:23:11 | 2010-05-18T17:23:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,226 | py | #!/usr/bin/env python
import time
import sys
import os
import fnmatch
import fileinput
#INTERVAL = 50
HEADER_LINE = 2
STEP_TIME = 5
def sum_urgency(infile, outfile):
time_start = 0
time_end = 0
cum_urgency = 0
iter = 1
f = open(outfile, 'w')
try:
for line in fileinput.input(infile):
if line == '\n' or fileinput.lineno() <= HEADER_LINE:
continue
print "line # : ", fileinput.lineno()
ts = line.split(";")[0]
step = line.split(";")[1]
u = line.split(";")[2]
print u
if fileinput.lineno() == HEADER_LINE + 1:
#expt_begin = float(ts)
#time_start = float(ts)
#time_end = time_start + INTERVAL
time_start = int(step)
time_end = time_start + INTERVAL / STEP_TIME
cum_urgency = float(u)
continue
#if float(ts) <= time_end:
if int(step) <= time_end:
cum_urgency += float(u)
else:
print "Cumulative urgency:%f at iter %d" %(cum_urgency, iter)
outline = str(time_end) + ";" + str(iter)\
+ ";" + str(cum_urgency) + "\n"
iter += 1
cum_urgency = 0
#time_end = float(ts) + INTERVAL
time_end = time_end + INTERVAL / STEP_TIME
if fileinput.lineno() == HEADER_LINE + 1: # skip fisrt line
continue
else:
f.write(outline)
except Exception, e:
print e
fileinput.close()
f.close()
if __name__ == '__main__':
numargs = len(sys.argv)
if numargs < 3 or numargs > 3:
print "Usage: %s <delta-dir> <interval>" %sys.argv[0]
sys.exit(1)
else:
dir_path = sys.argv[1]
INTERVAL = int(sys.argv[2])
for file in os.listdir(dir_path):
if fnmatch.fnmatch(file, 'Delta*.txt'):
print "Parsing: ", file
outfile = "SumOver" + str(INTERVAL) + "sec-" + file
infile = dir_path + '/' + file
sum_urgency(infile, outfile)
| [
"[email protected]"
]
| |
98380d3a6d4939e4903d24f4fc870f580c7dfe0f | 73c45163acf0b50f0a59cee471a36ff9576afee2 | /venv/Scripts/easy_install-script.py | 59c7ed1c25b7ecc8582f7d6e8d6320976eb702cb | []
| no_license | erfaenda/FlaskP | 7b8ec1413daba5a8f1c38eff2aec1767a6214365 | d56a47861a9e4b78d3af4ec58055eaddc046dcd1 | refs/heads/master | 2020-07-08T04:22:51.671076 | 2019-08-23T08:00:36 | 2019-08-23T08:00:36 | 203,563,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | #!C:\Users\a.silantev\PycharmProjects\FlaskP\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
]
| |
897053339598d7c7ac10cf386fc1e4bd52a9034e | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/4052/884004052.py | 98b4f26cc0fbf94d2be466c0ceaf4eca65740322 | []
| no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 620 | py | from bots.botsconfig import *
from records004052 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'MF',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BMP', MIN: 1, MAX: 1},
{ID: 'N1', MIN: 1, MAX: 5},
{ID: 'G61', MIN: 0, MAX: 3},
{ID: 'NTE', MIN: 0, MAX: 10},
{ID: 'QTY', MIN: 0, MAX: 1},
{ID: 'BAL', MIN: 0, MAX: 1},
{ID: 'N9', MIN: 0, MAX: 999, LEVEL: [
{ID: 'AMT', MIN: 0, MAX: 1},
{ID: 'N1', MIN: 0, MAX: 1},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
]
| |
cf7190285977a6d072f2878e18cecc2e23781a05 | de40d3fa8d8af0030556d27d6833f6a1a0e7700c | /baekjoon/1551py/a.py | 4b2f1e5ea15cf88fcabae6827199ab542dd4806e | []
| no_license | NeoMindStd/CodingLife | cd6a627209c0353f4855f09fd5dfef8da4bbfef6 | bcb6c3752f472e6a4f3b8f158d02bc3599dfcda3 | refs/heads/master | 2022-12-24T10:42:45.390085 | 2022-12-11T16:27:16 | 2022-12-11T16:27:16 | 191,797,634 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | n, k = map(int, input().split())
a = [list(map(int, input().split(','))),[]]
for i in range(k):
a[(i+1)%2].clear()
for j in range(n-i-1): a[(i+1)%2].append(a[i%2][j+1]-a[i%2][j])
print(*a[k%2], sep=',')
| [
"[email protected]"
]
| |
d29b7c4af830c3ce4cb21e03942d300b879e409b | 13edd8f1bc3b86fd881f85fbeafe94811392d7fc | /seventh_module/爬虫/5.scrapy/project_09_redisPro/project_09_redisPro/settings.py | 8c62d6c1d7ab98319ede7d009306beeb0efb76dc | []
| no_license | ryan-yang-2049/oldboy_python_study | f4c90c9d8aac499e1d810a797ab368217f664bb1 | 6e1ab7f217d9bf9aa7801266dee7ab4d7a602b9f | refs/heads/master | 2022-07-22T23:49:28.520668 | 2019-06-11T13:26:25 | 2019-06-11T13:26:25 | 129,877,980 | 0 | 1 | null | 2022-07-18T17:12:54 | 2018-04-17T09:12:48 | HTML | UTF-8 | Python | false | false | 3,680 | py | # -*- coding: utf-8 -*-
# Scrapy settings for project_09_redisPro project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'project_09_redisPro'
SPIDER_MODULES = ['project_09_redisPro.spiders']
NEWSPIDER_MODULE = 'project_09_redisPro.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'project_09_redisPro (+http://www.yourdomain.com)'
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'project_09_redisPro.middlewares.Project09RedisproSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'project_09_redisPro.middlewares.Project09RedisproDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'scrapy_redis.pipelines.RedisPipeline':400,
}
# 使用scrapy-redis组件的去重队列
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# 使用scrapy-redis组件自己的调度器
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# 是否允许暂停
SCHEDULER_PERSIST = True
# 如果redis服务器不在自己本机,则需要如下配置
REDIS_HOST = '101.132.45.51'
REDIS_PORT = '26379'
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
]
| |
a562123f6939c3763fb4d84f3946b4f8aeda00f0 | d84876ff3d2a61cb28eff13b1af173a091aff917 | /stock_prediction.py | c65790b23d6c0966f517bdabeaf14767239de1c7 | []
| no_license | webclinic017/Stock-Prediction-with-Regression-Models-In-Python | d5f64e066edbe987d775017680d2bcdecea52722 | 61be482ffa36869f43588cb2f7c005914dedda76 | refs/heads/master | 2022-02-18T19:05:57.839353 | 2019-09-07T15:50:29 | 2019-09-07T15:50:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,444 | py | # -*- coding: utf-8 -*-
"""Stock Prediction.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1n27WLQOmxqT8_Wyd3Nm1xONyeaCTxo6w
##Importing Libraries
"""
# Commented out IPython magic to ensure Python compatibility.
import numpy as np
import pandas as pd
import datetime
import pandas_datareader.data as web
from pandas import Series, DataFrame
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
from datetime import datetime, timedelta
from sklearn.model_selection import train_test_split
"""###Import Yahoo Finance Library"""
!pip install yfinance --upgrade --no-cache-dir
"""### Importing Stock data of Netflix"""
from pandas_datareader import data as pdr
import fix_yahoo_finance as yf
yf.pdr_override()
df_full = pdr.get_data_yahoo("NFLX", start="2014-01-01").reset_index()
df_full.to_csv('NFLX.csv',index=False)
df_full.head()
df_full['Date'] = pd.to_datetime(df_full.Date, format='%Y-%m-%d') # Converts string to datetime
df_full = df_full.set_index('Date') # Set the index of dataframe to date column
#plot
df_full.Close.plot()
df_full.info()
df_full.describe()
"""### Spliting into train and test data"""
forecast_out = 60 # Number of how many days to forecast
df_full['Prediction'] = df_full['Adj Close'].shift(-forecast_out)
df_full.tail()
x = np.array(df_full.drop(['Prediction'], 1))
x = x[:-forecast_out]
y = np.array(df_full['Prediction'])
y = y[:-forecast_out]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2)
"""## Linear Regression Models (OLS,Lasso,Ridge)"""
# Import package for builing different types of linear regrssion models
from sklearn.linear_model import LinearRegression, Ridge, Lasso
linear_model = LinearRegression()
ridge_model = Ridge()
lasso_model = Lasso()
linear_model.fit(x_train, y_train)
ridge_model.fit(x_train, y_train)
lasso_model.fit(x_train, y_train)
linear_model_score = linear_model.score(x_test, y_test)
print('LinearModel score:', linear_model_score)
ridge_model_score = ridge_model.score(x_test, y_test)
print('RidgeModel score:', ridge_model_score)
lasso_model_score = lasso_model.score(x_test, y_test)
print('LassoModel score:', lasso_model_score)
x_forecast = np.array(df_full.drop(['Prediction'], 1))[-forecast_out:]
linear_model_forecast_prediction = linear_model.predict(x_forecast)
linear_model_real_prediction = linear_model.predict(np.array(df_full.drop(['Prediction'], 1)))
ridge_model_forecast_prediction = ridge_model.predict(x_forecast)
ridge_model_real_prediction = ridge_model.predict(np.array(df_full.drop(['Prediction'], 1)))
lasso_model_forecast_prediction = lasso_model.predict(x_forecast)
lasso_model_real_prediction = lasso_model.predict(np.array(df_full.drop(['Prediction'], 1)))
predicted_dates = []
recent_date = df_full.index.max()
display_at = 1
alpha = 0.5
for i in range(forecast_out):
recent_date += timedelta(days=1)
predicted_dates.append(recent_date)
plt.figure(figsize = (16,8))
plt.xticks(rotation=60)
plt.plot(df_full.index[display_at:], linear_model_real_prediction[display_at:], label='Linear Preds', c='blue', alpha=alpha)
plt.plot(predicted_dates, linear_model_forecast_prediction, c='blue', alpha=alpha)
plt.plot(df_full.index[display_at:], ridge_model_real_prediction[display_at:], label='Ridge Preds', c='green', alpha=alpha)
plt.plot(predicted_dates, ridge_model_forecast_prediction, c='green', alpha=alpha)
plt.plot(df_full.index[display_at:], lasso_model_real_prediction[display_at:], label='Lasso Preds', c='red', alpha=alpha)
plt.plot(predicted_dates, lasso_model_forecast_prediction, c='red', alpha=alpha)
plt.plot(df_full.index[display_at:], df_full['Close'][display_at:], label='Actual', c='black', linewidth=3)
plt.legend()
"""### Polynomoal Regression"""
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error, r2_score
def create_polynomial_regression_model(degree):
"Creates a polynomial regression model for the given degree"
poly_features = PolynomialFeatures(degree=degree)
# transforms the existing features to higher degree features.
X_train_poly = poly_features.fit_transform(x_train)
# fit the transformed features to Linear Regression
poly_model = LinearRegression()
poly_model.fit(X_train_poly, y_train)
# predicting on training data-set
y_train_predicted = poly_model.predict(X_train_poly)
# predicting on test data-set
y_test_predict = poly_model.predict(poly_features.fit_transform(x_test))
# evaluating the model on training dataset
rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))
r2_train = r2_score(y_train, y_train_predicted)
# evaluating the model on test dataset
rmse_test = np.sqrt(mean_squared_error(y_test, y_test_predict))
r2_test = r2_score(y_test, y_test_predict)
print("The model performance for the training set at degree {}" .format(degree))
print("-------------------------------------------")
print("RMSE of training set is {}".format(rmse_train))
print("R2 score of training set is {}".format(r2_train))
print("\n")
print("The model performance for the test set at degree {}" .format(degree))
print("-------------------------------------------")
print("RMSE of test set is {}".format(rmse_test))
print("R2 score of test set is {}".format(r2_test))
print("______________________________________________________________________________________")
print("______________________________________________________________________________________")
print("\n")
for i in range(1,5):
create_polynomial_regression_model(i)
polynomial_features= PolynomialFeatures(degree=2)
x_poly = polynomial_features.fit_transform(x_train)
model = LinearRegression()
model.fit(x_poly, y_train)
y_poly_pred = model.predict(polynomial_features.fit_transform(x_test))
rmse = np.sqrt(mean_squared_error(y_test,y_poly_pred))
r2 = r2_score(y_test,y_poly_pred)
print(rmse)
print(r2)
polynomial_model_forecast_prediction = model.predict(polynomial_features.fit_transform(x_forecast))
polynomial_model_real_prediction = model.predict(polynomial_features.fit_transform(np.array(df_full.drop(['Prediction'], 1))))
predicted_dates = []
recent_date = df_full.index.max()
display_at = 1
alpha = 1
for i in range(forecast_out):
recent_date += timedelta(days=1)
predicted_dates.append(recent_date)
plt.figure(figsize = (16,8))
plt.xticks(rotation=60)
plt.plot(df_full.index[display_at:], linear_model_real_prediction[display_at:], label='Linear Preds', c='blue', alpha=alpha)
plt.plot(predicted_dates, linear_model_forecast_prediction, c='blue', alpha=alpha)
plt.plot(df_full.index[display_at:], ridge_model_real_prediction[display_at:], label='Ridge Preds', c='green', alpha=alpha)
plt.plot(predicted_dates, ridge_model_forecast_prediction, c='green', alpha=alpha)
plt.plot(df_full.index[display_at:], lasso_model_real_prediction[display_at:], label='Lasso Preds', c='red', alpha=alpha)
plt.plot(predicted_dates, lasso_model_forecast_prediction, c='red', alpha=alpha)
plt.plot(df_full.index[display_at:], polynomial_model_real_prediction[display_at:], label='polynomial Preds', c='magenta', alpha=alpha)
plt.plot(predicted_dates, polynomial_model_forecast_prediction, c='magenta', alpha=alpha)
plt.plot(df_full.index[display_at:], df_full['Close'][display_at:], label='Actual', c='black', linewidth=3)
plt.legend() | [
"[email protected]"
]
| |
b13966f310e0c8104c5131929d3d6b912977503f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/1638.py | 32faf53928bb6afe2590a89bf55f06931f8625b1 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,349 | py | # -*- coding: utf-8 -*-
"""
Solves the 'Cookie Clicker Alpha' Problem Google Code Jam Qualifications 2014
https://code.google.com/codejam/contest/2974486/dashboard#s=p1
Created on Fri Apr 12 1:58:51 2014
@author: Luca
"""
import numpy as np
import sys
def get_childern(node,C,F,X):
if (node(1)+C)>X:
child_no_fact = (node(0)+(X-node(1))/node(2),X,node(2))
return [child_no_fact]
child_no_fact = (node(0)+C/node(2),node(1)+C,node(2))
child_fact = (node(0)+C/node(2),node(1),node(2)+F)
return [child_no_fact,child_fact]
def solve_cookie_clicker_alpha(C,F,X):
root = (0,0,2) # time,cookies,rate
current_node = root
fringe = [root]
visited = []
solution = []
while len(fringe)>0:
current_node = fringe[0]
ch = get_children(current_node)
for c in ch:
if c not in visited:
fringe.append(c)
if fringe[-1](1)==X:
solution.append(fringe[-1])
visite.append()
def solve_by_enumeration(C,F,X):
# Trivial solution
rate =2.0
min_time = X/rate
last_time = min_time
n = 1
#print 'Trivial solution no farms %f'%(min_time)
while True:
# Buy a farm whenever is possible
# We assume intermediate solution when the farm is bought
# After it was possible are sub optimal
rate = 2.0
time = 0.0
#print 'Solution buying %d farms'%(n)
for i in range(0,n):
time += C/rate
#print 'Farm %d bought at time %f'%(i+1,time)
rate += F
time +=X/rate
#print 'Final time %f'%(time)
if time<min_time:
min_time = time
else:
return min_time
n = n +1
return min_time
if __name__ == '__main__':
if len(sys.argv)<2:
print 'Need to specify an input file'
exit(1)
input_file = sys.argv[1]
output_file = 'cookie_clicker_alpha_output_3.txt'
do_debug = True
try:
with open(input_file,'r') as f:
lines = f.readlines()
T = int(lines[0])
print 'Solving Cookie Clicker Alpha Problem for T=%d test cases.'%(T)
data = np.zeros((T,3),dtype=np.float64)
for n in range(0,T):
data[n,:] = np.array([float(t) for t in lines[n+1].split()],dtype = np.float)
if do_debug:
print 'Test case %d'%(n+1)
print 'C,F,X=%f,%f,%f'%(data[n,0],data[n,1],data[n,2])
except IOError:
print 'File %s not found'%input_file
exit(1)
# Solve the problem use binary tree depth first search
# tree branching every time a factory can be bought
solutions = []
for n in range(0,T):
C,F,X = data[n,:]
print 'Solving Cookie Clicker Alpha Problem %d C,F,X=%f,%f,%f'%(n,C,F,X)
res = solve_by_enumeration(C,F,X)
solutions.append(res)
try:
with open(output_file,'w') as f:
for n in range(0,T):
f.write('Case #%d: %12.8e\n'%(n+1,solutions[n]))
except IOError:
print 'File %s not found'%output_file
exit(1) | [
"[email protected]"
]
| |
a238b652ff6bdc3c552b4f99c87bc8bddb5b42a7 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /1423/1423.maximum-points-you-can-obtain-from-cards.785265974.Accepted.leetcode.python3.py | 18ad403e19400e3a95826bb18aff46c4284a3bdc | []
| no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | class Solution(object):
def maxScore(self, cardPoints, k):
left = 0
right = len(cardPoints) - k
ksum = sum(cardPoints[len(cardPoints) - k:])
result = max(float('-inf'), ksum)
while right < len(cardPoints):
ksum = ksum - cardPoints[right] + cardPoints[left]
result = max(result, ksum)
left += 1
right += 1
return result
| [
"[email protected]"
]
| |
423f8464015490054986bbe3c694ad19db6cca2c | a63b49c45f09e0b0abd20ed6ca81b8e30e17f755 | /a2c_ppo_acktr/storage.py | 147fc1989991982ce00a21c1d57b4fdfaacb63cd | []
| no_license | xxchenxx/MixtureOptimizer | d08b2395208e3efec2dbe730b4f194d509aea106 | 1c6f8d0848aeb71d49ea129001f3f170612cd4cf | refs/heads/master | 2022-12-04T05:24:01.974432 | 2020-08-22T15:40:34 | 2020-08-22T15:40:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,569 | py | import torch
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from pdb import set_trace as bp
def _flatten_helper(T, N, _tensor):
return _tensor.view(T * N, *_tensor.size()[2:])
class RolloutStorage(object):
def __init__(self, num_steps, obs_shape, action_shape=1, hidden_size=1, num_recurrent_layers=1):
# TODO: not include num_process here since we only have one model (optimizee) each time
# observation: (seq_len, batch_size, #lstm_input * window + #scalar_input + #actions * 1(LR))
self.obs = torch.zeros(num_steps + 1, 1, *obs_shape)
self.recurrent_hidden_states = torch.zeros(num_steps + 1, num_recurrent_layers, 1, hidden_size)
self.rewards = torch.zeros(num_steps, 1, 1)
self.value_preds = torch.zeros(num_steps + 1, 1)
self.returns = torch.zeros(num_steps + 1, 1)
self.action_log_probs = torch.zeros(num_steps, 1)
self.actions = torch.zeros(num_steps, action_shape)
self.num_steps = num_steps
self.step = 0
def reset(self):
device = self.obs.device
self.obs = torch.zeros_like(self.obs)
self.recurrent_hidden_states = torch.zeros_like(self.recurrent_hidden_states)
self.rewards = torch.zeros(self.num_steps, 1, 1)
self.value_preds = torch.zeros(self.num_steps + 1, 1)
self.returns = torch.zeros(self.num_steps + 1, 1)
self.action_log_probs = torch.zeros(self.num_steps, 1)
self.actions = torch.zeros_like(self.actions)
self.step = 0
self.to(device)
def to(self, device):
self.obs = self.obs.to(device)
self.recurrent_hidden_states = self.recurrent_hidden_states.to(device)
self.rewards = self.rewards.to(device)
self.value_preds = self.value_preds.to(device)
self.returns = self.returns.to(device)
self.action_log_probs = self.action_log_probs.to(device)
self.actions = self.actions.to(device)
def insert(self, obs, recurrent_hidden_states, actions, action_log_probs, value_preds, rewards):
self.obs[self.step + 1].copy_(obs)
self.recurrent_hidden_states[self.step + 1].copy_(recurrent_hidden_states)
self.actions[self.step].copy_(actions)
self.action_log_probs[self.step].copy_(action_log_probs)
self.value_preds[self.step].copy_(value_preds)
self.rewards[self.step].copy_(rewards)
self.step = (self.step + 1) % self.num_steps
def after_update(self):
self.obs[0].copy_(self.obs[-1])
self.recurrent_hidden_states[0].copy_(self.recurrent_hidden_states[-1])
def compute_returns(self, next_value, use_gae, gamma, gae_lambda):
if use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.size(0))):
delta = self.rewards[step] + gamma * self.value_preds[step + 1] - self.value_preds[step]
gae = delta + gamma * gae_lambda * gae
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.size(0))):
self.returns[step] = self.returns[step + 1] * gamma + self.rewards[step]
def feed_forward_generator(self, advantages, num_mini_batch=None, mini_batch_size=None):
num_steps, num_processes = self.rewards.size()[0:2]
batch_size = num_processes * num_steps
if mini_batch_size is None:
assert batch_size >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(num_processes, num_steps, num_processes * num_steps,
num_mini_batch))
mini_batch_size = batch_size // num_mini_batch
sampler = BatchSampler(
SubsetRandomSampler(range(batch_size)),
mini_batch_size,
drop_last=True)
for indices in sampler:
obs_batch = self.obs[:-1].view(-1, *self.obs.size()[1:])[indices]
recurrent_hidden_states_batch = self.recurrent_hidden_states[:-1].view(-1, *self.recurrent_hidden_states.size()[1:])[indices]
actions_batch = self.actions.view(-1, self.actions.size(-1))[indices]
value_preds_batch = self.value_preds[:-1].view(-1, 1)[indices]
return_batch = self.returns[:-1].view(-1, 1)[indices]
old_action_log_probs_batch = self.action_log_probs.view(-1, 1)[indices]
if advantages is None:
adv_targ = None
else:
adv_targ = advantages.view(-1, 1)[indices]
yield obs_batch, recurrent_hidden_states_batch, actions_batch, value_preds_batch, return_batch, old_action_log_probs_batch, adv_targ
def recurrent_generator(self, advantages, num_mini_batch):
num_processes = self.rewards.size(1)
assert num_processes >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(num_processes, num_mini_batch))
num_envs_per_batch = num_processes // num_mini_batch
perm = torch.randperm(num_processes)
for start_ind in range(0, num_processes, num_envs_per_batch):
obs_batch = []
recurrent_hidden_states_batch = []
actions_batch = []
value_preds_batch = []
return_batch = []
old_action_log_probs_batch = []
adv_targ = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
obs_batch.append(self.obs[:-1, ind])
recurrent_hidden_states_batch.append(self.recurrent_hidden_states[0:1, ind])
actions_batch.append(self.actions[:, ind])
value_preds_batch.append(self.value_preds[:-1, ind])
return_batch.append(self.returns[:-1, ind])
old_action_log_probs_batch.append(
self.action_log_probs[:, ind])
adv_targ.append(advantages[:, ind])
T, N = self.num_steps, num_envs_per_batch
# These are all tensors of size (T, N, -1)
obs_batch = torch.stack(obs_batch, 1)
actions_batch = torch.stack(actions_batch, 1)
value_preds_batch = torch.stack(value_preds_batch, 1)
return_batch = torch.stack(return_batch, 1)
old_action_log_probs_batch = torch.stack(
old_action_log_probs_batch, 1)
adv_targ = torch.stack(adv_targ, 1)
# States is just a (N, -1) tensor
recurrent_hidden_states_batch = torch.stack(recurrent_hidden_states_batch, 1).view(N, -1)
# Flatten the (T, N, ...) tensors to (T * N, ...)
obs_batch = _flatten_helper(T, N, obs_batch)
actions_batch = _flatten_helper(T, N, actions_batch)
value_preds_batch = _flatten_helper(T, N, value_preds_batch)
return_batch = _flatten_helper(T, N, return_batch)
old_action_log_probs_batch = _flatten_helper(T, N, \
old_action_log_probs_batch)
adv_targ = _flatten_helper(T, N, adv_targ)
yield obs_batch, recurrent_hidden_states_batch, actions_batch, \
value_preds_batch, return_batch, old_action_log_probs_batch, adv_targ
| [
"[email protected]"
]
| |
f974791af539b1ba0f63ab6f9457a2bafd3f0b78 | 58509347cca790fce26884f027425170c5891a17 | /bin/train_convert.py | c972a100902ea60af68848b0c99c8381455c69c5 | []
| no_license | Hiroshiba/signico_real_to_anime | e22d07ca6531b75b3987ecc309e02bcd405f6f61 | 0a68b132fc77e24539d7ddc65b3078fd0c7f3858 | refs/heads/master | 2021-01-19T23:25:37.149611 | 2018-03-21T17:24:32 | 2018-03-21T17:32:45 | 88,979,946 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,607 | py | import argparse
import chainer
import glob
import os
import sys
ROOT_PATH = os.path.join(os.path.dirname(__file__), "..")
sys.path.append(ROOT_PATH)
from deep_image_converter.config import Config
from deep_image_converter import dataset
from deep_image_converter.loss import ConvertModelLossMaker, FacebookConvertModelLossMaker
from deep_image_converter.model import prepare_model, choose_discriminator, BaseConvertModel
from deep_image_converter.updater import ConvertModelUpdater
from deep_image_converter.train import TrainManager
from deep_image_converter import utility
parser = argparse.ArgumentParser()
parser.add_argument('config_json_path')
config_json_path = parser.parse_args().config_json_path
config = Config(config_json_path)
config.copy_config_json()
train_manager = TrainManager(config.train_config)
datasets = dataset.choose(config.dataset_config)
nb = config.train_config.batchsize
IteratorClass = chainer.iterators.MultiprocessIterator
iterator_train_a = IteratorClass(datasets['train_a'], nb, True, True)
iterator_train_b = IteratorClass(datasets['train_b'], nb, True, True)
iterator_test = IteratorClass(datasets['test'], nb, False, False)
iterator_train_eval = IteratorClass(datasets['train_eval'], nb, False, False)
config.train_config.gpu >= 0 and chainer.cuda.get_device(config.train_config.gpu).use()
utility.chainer.set_default_initialW(config.model_config.initialW)
model = prepare_model(config.model_config)
assert isinstance(model, BaseConvertModel)
config.train_config.gpu >= 0 and model.to_gpu()
optimizer = train_manager.make_optimizer(model, 'main')
optimizers = {'main': optimizer}
dis = choose_discriminator(config.model_config)
config.train_config.gpu >= 0 and dis.to_gpu()
optimizer = train_manager.make_optimizer(dis, 'discriminator')
optimizers['dis'] = optimizer
if config.loss_config.name is None:
loss_maker = ConvertModelLossMaker(config.loss_config, model, dis)
elif config.loss_config.name == 'facebook':
loss_maker = FacebookConvertModelLossMaker(config.loss_config, model, dis)
else:
raise NotImplementedError(config.loss_config.name)
updater = ConvertModelUpdater(
optimizer=optimizers,
iterator={'a': iterator_train_a, 'b': iterator_train_b},
loss_maker=loss_maker,
device=config.train_config.gpu,
)
trainer = train_manager.make_trainer(
updater=updater,
model={'main': model, 'dis': dis},
eval_func=loss_maker.test,
iterator_test=iterator_test,
iterator_train_eval=iterator_train_eval,
loss_names=loss_maker.get_loss_names() + loss_maker.get_loss_names_discriminator(),
)
trainer.run()
| [
"[email protected]"
]
| |
0c5a649f0b60d66c181ab5f9abc7269f1142b11b | e2c369fc706a6058fe0126e088e8cc4ce48d2654 | /src/song/migrations/0011_auto_20190417_2320.py | f7b5aa3bf527350e940aa8e1023e259037afbc15 | []
| no_license | kishanpython/SongHub | 9ea1381d4add0c8fa036710f79fd9964c991eba7 | f86997d2070533ff7649ce3df89eaed66cbda609 | refs/heads/master | 2020-07-14T18:04:25.036424 | 2019-08-30T13:11:35 | 2019-08-30T13:11:35 | 205,368,723 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # Generated by Django 2.0 on 2019-04-17 23:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('song', '0010_song_file'),
]
operations = [
migrations.AlterField(
model_name='song',
name='file',
field=models.FileField(blank=True, null=True, upload_to='musics/'),
),
]
| [
"[email protected]"
]
| |
f83235d5cdc9fd38002e926ce485385bb59a0828 | 073c929cab2d92e9859010be654eb1ba69b397b1 | /src/participants/migrations/0004_alter_participant_polls.py | 62b1dd8d33b7130bc663cc9a08e88ff7a171fdf3 | []
| no_license | iamgaddiel/voting_system | 6e5abe9b6d9da29abc6a94e12157cb308bf2b717 | b41ec2a98ed678bedd3b9bdd9d6c8a5c679fcabf | refs/heads/main | 2023-06-22T17:29:57.371562 | 2021-07-22T06:57:47 | 2021-07-22T06:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | # Generated by Django 3.2.3 on 2021-05-31 04:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
('participants', '0003_participant_polls'),
]
operations = [
migrations.AlterField(
model_name='participant',
name='polls',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.polls'),
),
]
| [
"[email protected]"
]
| |
3d5663348481fa5c81589dec72506479170bbcd6 | 155cbccc3ef3b8cba80629f2a26d7e76968a639c | /thelma/tools/libcreation/iso.py | a0f6b0645613c8417b0b32a11df2acb19cee32a7 | [
"MIT"
]
| permissive | papagr/TheLMA | 1fc65f0a7d3a4b7f9bb2d201259efe5568c2bf78 | d2dc7a478ee5d24ccf3cc680888e712d482321d0 | refs/heads/master | 2022-12-24T20:05:28.229303 | 2020-09-26T13:57:48 | 2020-09-26T13:57:48 | 279,159,864 | 1 | 0 | MIT | 2020-07-12T22:40:36 | 2020-07-12T22:40:35 | null | UTF-8 | Python | false | false | 19,016 | py | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
The classes in this module serve the creation of an ISO for a library creation
process.
:Note: The ISOs are already created externally, because they need a ticket ID.
The tool actually populates an empty ISO (instead of creating a new one).
The following tasks need to be performed:
* pick molecule design pools
* create a preparation layout (= library layout)
* create preparation plate
* create aliquot plate
* create ISO sample stock rack for each quadrant (barcodes are provided)
AAB
"""
from thelma.tools.semiconstants import get_item_status_future
from thelma.tools.semiconstants import get_reservoir_specs_standard_384
from thelma.tools.semiconstants import get_reservoir_specs_standard_96
from thelma.tools.base import BaseTool
from thelma.tools.semiconstants import get_rack_specs_from_reservoir_specs
from thelma.tools.libcreation.base \
import ALIQUOT_PLATE_CONCENTRATION
from thelma.tools.libcreation.base \
import LibraryBaseLayoutConverter
from thelma.tools.libcreation.base \
import MOLECULE_DESIGN_TRANSFER_VOLUME
from thelma.tools.libcreation.base \
import PREPARATION_PLATE_CONCENTRATION
from thelma.tools.libcreation.base import LibraryLayout
from thelma.tools.libcreation.base import MOLECULE_TYPE
from thelma.tools.libcreation.base import NUMBER_SECTORS
from thelma.tools.libcreation.base import STARTING_NUMBER_ALIQUOTS
from thelma.tools.libcreation.optimizer import LibraryCreationTubePicker
from thelma.tools.stock.base import get_default_stock_concentration
from thelma.tools.writers import CsvColumnParameters
from thelma.tools.writers import CsvWriter
from thelma.tools.utils.base import is_valid_number
from thelma.tools.utils.racksector import QuadrantIterator
from thelma.entities.iso import ISO_STATUS
from thelma.entities.iso import IsoAliquotPlate
from thelma.entities.iso import IsoSectorPreparationPlate
from thelma.entities.iso import StockSampleCreationIso
from thelma.entities.library import MoleculeDesignLibrary
__docformat__ = 'reStructuredText en'
__all__ = ['LibraryCreationIsoPopulator',
'LibraryCreationIsoLayoutWriter']
class LibraryCreationIsoPopulator(BaseTool):
"""
Populates an empty library creation ISO for a library. The tools creates
a proper rack layout, preparation plates and aliquot plates.
**Return Value:** The newly populated ISOs.
"""
NAME = 'Library Creation ISO Populator'
#: The label pattern for preparation plates.
PREP_PLATE_LABEL_PATTERN = '%s-%i-%inM-Q%i'
#: The label pattern for aliquot plates.
ALIQUOT_PLATE_LABEL_PATTERN = '%s-%i-%inM-%i'
def __init__(self, molecule_design_library, number_isos,
excluded_racks=None, requested_tubes=None, parent=None):
"""
Constructor:
:param molecule_design_library: The molecule design library for which to
populate the ISO.
:type molecule_design_library:
:class:`thelma.entities.library.MoleculeDesignLibrary`
:param number_isos: The number of ISOs ordered.
:type number_isos: :class:`int`
:param excluded_racks: A list of barcodes from stock racks that shall
not be used for stock sample picking.
:type excluded_racks: A list of rack barcodes
:param requested_tubes: A list of barcodes from stock tubes that are
supposed to be used.
:type requested_tubes: A list of tube barcodes.
"""
BaseTool.__init__(self, parent=parent)
#: The molecule design library for which to generate an ISO.
self.molecule_design_library = molecule_design_library
#: The number of ISOs ordered.
self.number_isos = number_isos
#: A list of barcodes from stock racks that shall not be used for
#: stock sample (molecule design pool) picking.
self.excluded_racks = excluded_racks
if excluded_racks is None: self.excluded_racks = []
if requested_tubes is None: requested_tubes = []
#: A list of barcodes from stock tubes that are supposed to be used
#: (for fixed positions).
self.requested_tubes = requested_tubes
#: The ISO request defining the ISO layout
#: (:class:`thelma.entities.iso.IsoRequest`)
self._iso_request = None
#: The library pools for which to pick tubes.
self._queued_pools = None
#: The stock concentration for the library molecule type.
self.__stock_concentration = None
#: The worklist series that is attached to the ISO request.
self.__worklist_series = None
#: The base layout defines which positions might be taken by library
#: positions. This is the base layout for the 384-well plate.
self.__base_layout = None
#: The base layout positions for each quadrant.
self._quadrant_positions = None
#: The library candidates returned by the optimiser.
self._library_candidates = None
#: The library layouts mapped onto layout numbers.
self._library_layouts = None
#: The picked empty ISOs to populate.
self.__picked_isos = None
#: The newly populated ISOs.
self.__new_isos = None
def reset(self):
BaseTool.reset(self)
self._iso_request = None
self._queued_pools = []
self.__stock_concentration = None
self.__worklist_series = None
self.__base_layout = None
self._quadrant_positions = dict()
self._library_candidates = None
self._library_layouts = []
self.__picked_isos = []
self.__new_isos = []
def run(self):
"""
Creates the requested number of ISO.
"""
self.reset()
self.add_info('Start ISO generation ...')
self._check_input()
if not self.has_errors(): self.__get_library_metadata()
if not self.has_errors(): self.__get_base_layout()
if not self.has_errors(): self.__pick_library_candidates()
if not self.has_errors(): self._distribute_candidates()
if not self.has_errors(): self.__pick_isos()
if not self.has_errors(): self.__populate_isos()
if not self.has_errors():
self.return_value = self.__new_isos
self.add_info('ISO generation completed.')
def _check_input(self):
"""
Checks the initialisation values.
"""
self.add_debug('Check initialisation values ...')
self._check_input_class('molecule design library',
self.molecule_design_library, MoleculeDesignLibrary)
if not is_valid_number(self.number_isos, is_integer=True):
msg = 'The number of ISOs order must be a positive integer ' \
'(obtained: %s).' % (self.number_isos)
self.add_error(msg)
if self._check_input_class('excluded racks list',
self.excluded_racks, list):
for excl_rack in self.excluded_racks:
if not self._check_input_class('excluded rack barcode',
excl_rack, basestring): break
if self._check_input_class('requested tubes list',
self.requested_tubes, list):
for req_tube in self.requested_tubes:
if not self._check_input_class('requested tube barcode',
req_tube, basestring): break
def __get_library_metadata(self):
"""
Determines the ISO request, the library pools for which to pick
source tubes, the worklist series and the stock concentration.
"""
self._iso_request = self.molecule_design_library.iso_request
if self._iso_request is None:
msg = 'There is no ISO request for this library!'
self.add_error(msg)
else:
self.__worklist_series = self._iso_request.worklist_series
self._find_queued_pools()
if not self.has_errors():
self.__stock_concentration = \
get_default_stock_concentration(MOLECULE_TYPE)
def _find_queued_pools(self):
"""
All molecule design pools from the ISO request that are not part
of an ISO yet, are used. Cancelled ISOs are ignored.
"""
used_pools = set()
for iso in self._iso_request.isos:
if iso.status == ISO_STATUS.CANCELLED:
continue
if iso.molecule_design_pool_set is None:
continue
used_pools.update(
iso.molecule_design_pool_set.molecule_design_pools)
pool_set = self.molecule_design_library.molecule_design_pool_set
self._queued_pools = \
pool_set.molecule_design_pools.difference(used_pools)
if len(self._queued_pools) < 1:
msg = 'There are no unused molecule design pools left!'
self.add_error(msg)
def __get_base_layout(self):
# The base layout defines which positions might be taken by library
# positions.
self.add_debug('Fetch base layout ...')
converter = LibraryBaseLayoutConverter(
self._iso_request.iso_layout, parent=self)
self.__base_layout = converter.get_result()
if self.__base_layout is None:
msg = 'Error when trying to fetch library base layout.'
self.add_error(msg)
else:
self._quadrant_positions = QuadrantIterator.sort_into_sectors(
self.__base_layout,
number_sectors=NUMBER_SECTORS)
def __pick_library_candidates(self):
# Runs the library optimizer. The optimizer returns a list of
# :class:`LibraryCandidate` objects (in order of the optimizing
# completion).
optimizer = LibraryCreationTubePicker(
self._queued_pools,
self.__stock_concentration,
take_out_volume=MOLECULE_DESIGN_TRANSFER_VOLUME,
excluded_racks=self.excluded_racks,
requested_tubes=self.requested_tubes,
parent=self)
self._library_candidates = optimizer.get_result()
if self._library_candidates is None:
msg = 'Error when trying to pick tubes.'
self.add_error(msg)
def _distribute_candidates(self):
"""
Creates a :class:`LibraryLayout` for each ISO.
Positions are populated sector-wise (maybe some sector will remain
empty if there is not enough positions).
"""
self.add_info('Distribute candidates ...')
not_enough_candidates = False
for i in range(self.number_isos): #pylint: disable=W0612
if len(self._library_candidates) < 1: break
lib_layout = LibraryLayout.from_base_layout(self.__base_layout)
for positions in self._quadrant_positions.values():
if len(self._library_candidates) < 1: break
for base_pos in positions:
if len(self._library_candidates) < 1:
not_enough_candidates = True
break
lib_cand = self._library_candidates.pop(0)
lib_pos = LibraryPosition(pool=lib_cand.pool,
rack_position=base_pos.rack_position,
stock_tube_barcodes=lib_cand.get_tube_barcodes())
lib_layout.add_position(lib_pos)
if len(lib_layout) < 1:
break
else:
self._library_layouts.append(lib_layout)
if not_enough_candidates:
msg = 'There is not enough library candidates left to populate ' \
'all positions for the requested number of ISOs. Number ' \
'of generated ISOs: %i.' % (len(self._library_layouts))
self.add_warning(msg)
def __pick_isos(self):
"""
Only ISOs with empty rack layouts can be picked.
"""
iso_map = dict()
used_layout_numbers = set()
for iso in self._iso_request.isos:
if len(iso.rack_layout.tagged_rack_position_sets) > 0:
used_layout_numbers.add(iso.layout_number)
iso_map[iso.layout_number] = iso
number_layouts = self._iso_request.number_plates
for i in range(number_layouts):
if not (i + 1) in used_layout_numbers:
iso = iso_map[i + 1]
self.__picked_isos.append(iso)
if len(self.__picked_isos) == len(self._library_layouts): break
def __populate_isos(self):
"""
Adds molecule design set, library layout and plates to the picked ISOs.
"""
self.add_debug('Create ISOs ...')
ir_specs_96 = get_reservoir_specs_standard_96()
plate_specs_96 = get_rack_specs_from_reservoir_specs(ir_specs_96)
ir_specs_384 = get_reservoir_specs_standard_384()
plate_specs_384 = get_rack_specs_from_reservoir_specs(ir_specs_384)
future_status = get_item_status_future()
library_name = self._iso_request.plate_set_label
md_type = \
self.molecule_design_library.molecule_design_pool_set.molecule_type
while len(self.__picked_isos) > 0:
lci = self.__picked_isos.pop(0)
library_layout = self._library_layouts.pop(0)
lci.rack_layout = library_layout.create_rack_layout()
lci.molecule_design_pool_set = \
library_layout.get_pool_set(md_type)
layout_number = lci.layout_number
# create source plates
for sector_index in self._quadrant_positions.keys():
prep_label = self.PREP_PLATE_LABEL_PATTERN % (library_name,
layout_number, PREPARATION_PLATE_CONCENTRATION,
(sector_index + 1))
prep_plate = plate_specs_96.create_rack(label=prep_label,
status=future_status)
LibrarySourcePlate(iso=lci, plate=prep_plate,
sector_index=sector_index)
# create aliquot plates
for i in range(STARTING_NUMBER_ALIQUOTS):
aliquot_label = self.ALIQUOT_PLATE_LABEL_PATTERN % (
library_name, layout_number,
ALIQUOT_PLATE_CONCENTRATION, (i + 1))
aliquot_plate = plate_specs_384.create_rack(label=aliquot_label,
status=future_status)
IsoAliquotPlate(iso=lci, plate=aliquot_plate)
self.__new_isos.append(lci)
class LibraryCreationIsoLayoutWriter(CsvWriter):
"""
Generates an overview file containing the layout data for a particular
library creation ISO.
**Return Value:** stream (CSV format)
"""
NAME = 'Library Creation ISO Layout Writer'
#: The header for the rack position column.
POSITION_HEADER = 'Rack Position'
#: The header for the molecule design pool column.
POOL_HEADER = 'Pool ID'
#: The header for the molecule design column.
MOLECULE_DESIGN_HEADER = 'Molecule Design IDs'
#: The header for the stock tube barcode column.
TUBE_HEADER = 'Stock Tubes'
#: The index for the rack position column.
POSITION_INDEX = 0
#: The index for the molecule design pool column.
POOL_INDEX = 1
#: The index for the molecule design column.
MOLECULE_DESIGN_INDEX = 2
#: The index for the stock tube barcode column.
TUBE_INDEX = 3
def __init__(self, stock_sample_creation_iso, parent=None):
"""
Constructor:
:param stock_sample_creation_iso: The ISO whose library layout you want
to print.
:type stock_sample_creation_iso:
:class:`thelma.entities.iso.StockSampleCreationIso`
"""
CsvWriter.__init__(self, parent=parent)
#: The ISO whose layout you want to print.
self.stock_sample_creation_iso = stock_sample_creation_iso
#: The values for the columns.
self.__position_values = None
self.__pool_values = None
self.__md_values = None
self.__tube_values = None
def reset(self):
CsvWriter.reset(self)
self.__position_values = []
self.__pool_values = []
self.__md_values = []
self.__tube_values = []
def _init_column_map_list(self):
if self._check_input_class('ISO', self.stock_sample_creation_iso,
StockSampleCreationIso):
self.__store_values()
self.__generate_columns()
def __store_values(self):
# Fetches and stores the values for the columns.
self.add_debug('Store column values ...')
lib_layout = self.__get_library_layout()
if not lib_layout is None:
for lib_pos in lib_layout.get_sorted_working_positions():
self.__position_values.append(lib_pos.rack_position.label)
self.__pool_values.append(lib_pos.pool.id)
self.__md_values.append(
lib_pos.get_molecule_designs_tag_value())
self.__tube_values.append(
lib_pos.get_stock_barcodes_tag_value())
def __get_library_layout(self):
# Converts the library layout from the ISO.
self.add_debug('Get library layout ...')
converter = LibraryLayoutConverter(
self.stock_sample_creation_iso.rack_layout, parent=self)
lib_layout = converter.get_result()
if lib_layout is None:
msg = 'Error when trying to convert library layout!'
self.add_error(msg)
return lib_layout
def __generate_columns(self):
# Generates the :attr:`_column_map_list`
pos_column = CsvColumnParameters(self.POSITION_INDEX,
self.POSITION_HEADER, self.__position_values)
pool_column = CsvColumnParameters(self.POOL_INDEX, self.POOL_HEADER,
self.__pool_values)
md_column = CsvColumnParameters(self.MOLECULE_DESIGN_INDEX,
self.MOLECULE_DESIGN_HEADER, self.__md_values)
tube_column = CsvColumnParameters(self.TUBE_INDEX, self.TUBE_HEADER,
self.__tube_values)
self._column_map_list = [pos_column, pool_column, md_column,
tube_column]
| [
"[email protected]"
]
| |
e8827406a39cd8d8060823f9cd8294afa08fa317 | e104d3b44decf46aeb624134f116bbd1dfa88b6a | /flaskEnv/bin/easy_install | 1e287e12ee54498c20b824933397c15d0b5ca20f | []
| no_license | februarypython/instructor_minh | 2ac3d4fb357bfc47fb25cd9289cca0b8c2007917 | 73fc6cac1d98add52f23cdfac1bf9baa39fa977d | refs/heads/master | 2021-05-02T07:32:12.774563 | 2018-03-02T03:02:52 | 2018-03-02T03:02:52 | 120,831,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | #!/Users/homefolder/Desktop/1februarypython/flaskEnv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
3e6076a9b6fe7e899b8fb311140de2d1133e6bed | 9079354291951a1782ec43efaead5876895eece8 | /legacy_scripts/setup.py | 66b535476ffc0715c1d1c734eccd7a5d32a16277 | []
| no_license | luungoc2005/nlp-test | c9a2e0174546221b0e6d2501d9c4dfeca5c6efd0 | ed43a4b1bbcd23c3fc39e92d790864c73a5999f3 | refs/heads/master | 2022-12-08T14:17:07.271865 | 2019-05-26T16:23:20 | 2019-05-26T16:23:20 | 125,201,975 | 0 | 0 | null | 2022-12-07T23:37:52 | 2018-03-14T11:24:54 | Jupyter Notebook | UTF-8 | Python | false | false | 355 | py | from distutils.core import setup
from Cython.Build import cythonize
# from distutils.extension import Extension
import numpy
setup(
name="Botbot-NLP",
ext_modules=cythonize([
# "common/_cutils/*.pyx",
"text_classification/fast_text/_cutils/*.pyx"
]
, include_path=[
numpy.get_include()
]
),
)
| [
"[email protected]"
]
| |
804c4b6e7cbf6961dbb3e5415cedb8a68caa6800 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_190/ch117_2020_03_31_05_22_20_071855.py | 23d4ed7a059bf6e7ba27702c678158f5b49358c9 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | import math
def snell_descartes(n1, n2, o1):
x=((math.sin(o1*(math,pi/180)))*n1)/n2
o2=math.sin(x*(math.pi/180))
return (o2) | [
"[email protected]"
]
| |
00829ea419f370c994ad692776728474d096aa13 | 3d6083f1821950afc06c432066dc763d3eb5db44 | /guide/basic/bert_embedder.py | 3465da95b238f7028c9271d0bc00953b1d1c82c5 | []
| no_license | flyfatty/self-allennlp | f4b3e3f3c36422c0950b0479a22546792c4852cb | 4741b2e47037dba1e20053f6877a7bbafedd8047 | refs/heads/master | 2023-08-24T03:10:13.994216 | 2021-10-04T08:13:07 | 2021-10-04T08:13:07 | 339,996,075 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,384 | py | # @Time : 2020/12/24 1:02
# @Author : LiuBin
# @File : bert_embedder.py
# @Description :
# @Software: PyCharm
import torch
from allennlp.data import Token, Vocabulary
from allennlp.data.fields import ListField, TextField
from allennlp.data.token_indexers import (
SingleIdTokenIndexer,
TokenCharactersIndexer,
ELMoTokenCharactersIndexer,
PretrainedTransformerIndexer,
PretrainedTransformerMismatchedIndexer,
)
from allennlp.data.tokenizers import (
CharacterTokenizer,
PretrainedTransformerTokenizer,
SpacyTokenizer,
WhitespaceTokenizer,
)
from allennlp.modules.seq2vec_encoders import CnnEncoder
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import (
Embedding,
TokenCharactersEncoder,
ElmoTokenEmbedder,
PretrainedTransformerEmbedder,
PretrainedTransformerMismatchedEmbedder,
)
from allennlp.nn import util as nn_util
import warnings
warnings.filterwarnings("ignore")
# Splits text into words (instead of wordpieces or characters). For ELMo, you can
# just use any word-level tokenizer that you like, though for best results you
# should use the same tokenizer that was used with ELMo, which is an older version
# of spacy. We're using a whitespace tokenizer here for ease of demonstration
# with binder.
tokenizer = WhitespaceTokenizer()
# Represents each token with an array of characters in a way that ELMo expects.
token_indexer = ELMoTokenCharactersIndexer()
# Both ELMo and BERT do their own thing with vocabularies, so we don't need to add
# anything, but we do need to construct the vocab object so we can use it below.
# (And if you have any labels in your data that need indexing, you'll still need
# this.)
vocab = Vocabulary()
text = "This is some text ."
tokens = tokenizer.tokenize(text)
print("ELMo tokens:", tokens)
text_field = TextField(tokens, {'elmo_tokens': token_indexer})
text_field.index(vocab)
# We typically batch things together when making tensors, which requires some
# padding computation. Don't worry too much about the padding for now.
padding_lengths = text_field.get_padding_lengths()
tensor_dict = text_field.as_tensor(padding_lengths)
print("ELMo tensors:", tensor_dict)
# Any transformer model name that huggingface's transformers library supports will
# work here. Under the hood, we're grabbing pieces from huggingface for this
# part.
transformer_model = 'bert-base-cased'
# To do modeling with BERT correctly, we can't use just any tokenizer; we need to
# use BERT's tokenizer.
tokenizer = PretrainedTransformerTokenizer(model_name=transformer_model)
# Represents each wordpiece with an id from BERT's vocabulary.
token_indexer = PretrainedTransformerIndexer(model_name=transformer_model)
text = "Some text with an extraordinarily long identifier."
tokens = tokenizer.tokenize(text)
print("BERT tokens:", tokens)
text_field = TextField(tokens, {'bert_tokens': token_indexer})
text_field.index(vocab)
tensor_dict = text_field.as_tensor(text_field.get_padding_lengths())
print("BERT tensors:", tensor_dict)
# Now we'll do an example with paired text, to show the right way to handle [SEP]
# tokens in AllenNLP. We have built-in ways of handling this for two text pieces.
# If you have more than two text pieces, you'll have to manually add the special
# tokens. The way we're doing this requires that you use a
# PretrainedTransformerTokenizer, not the abstract Tokenizer class.
# Splits text into wordpieces, but without adding special tokens.
tokenizer = PretrainedTransformerTokenizer(
model_name=transformer_model,
add_special_tokens=False,
)
context_text = "This context is frandibulous."
question_text = "What is the context like?"
context_tokens = tokenizer.tokenize(context_text)
question_tokens = tokenizer.tokenize(question_text)
print("Context tokens:", context_tokens)
print("Question tokens:", question_tokens)
combined_tokens = tokenizer.add_special_tokens(context_tokens, question_tokens)
print("Combined tokens:", combined_tokens)
text_field = TextField(combined_tokens, {'bert_tokens': token_indexer})
text_field.index(vocab)
tensor_dict = text_field.as_tensor(text_field.get_padding_lengths())
print("Combined BERT tensors:", tensor_dict)
# It's easiest to get ELMo input by just running the data code. See the
# exercise above for an explanation of this code.
tokenizer = WhitespaceTokenizer()
token_indexer = ELMoTokenCharactersIndexer()
vocab = Vocabulary()
text = "This is some text."
tokens = tokenizer.tokenize(text)
print("ELMo tokens:", tokens)
text_field = TextField(tokens, {'elmo_tokens': token_indexer})
text_field.index(vocab)
token_tensor = text_field.as_tensor(text_field.get_padding_lengths())
print("ELMo tensors:", token_tensor)
# We're using a tiny, toy version of ELMo to demonstrate this.
elmo_options_file = 'https://allennlp.s3.amazonaws.com/models/elmo/test_fixture/options.json'
elmo_weight_file = 'https://allennlp.s3.amazonaws.com/models/elmo/test_fixture/lm_weights.hdf5'
elmo_embedding = ElmoTokenEmbedder(options_file=elmo_options_file,
weight_file=elmo_weight_file)
embedder = BasicTextFieldEmbedder(token_embedders={'elmo_tokens': elmo_embedding})
tensor_dict = text_field.batch_tensors([token_tensor])
embedded_tokens = embedder(tensor_dict)
print("ELMo embedded tokens:", embedded_tokens)
# Again, it's easier to just run the data code to get the right output.
# We're using the smallest transformer model we can here, so that it runs on
# binder.
transformer_model = 'google/reformer-crime-and-punishment'
tokenizer = PretrainedTransformerTokenizer(model_name=transformer_model)
token_indexer = PretrainedTransformerIndexer(model_name=transformer_model)
text = "Some text with an extraordinarily long identifier."
tokens = tokenizer.tokenize(text)
print("Transformer tokens:", tokens)
text_field = TextField(tokens, {'bert_tokens': token_indexer})
text_field.index(vocab)
token_tensor = text_field.as_tensor(text_field.get_padding_lengths())
print("Transformer tensors:", token_tensor)
embedding = PretrainedTransformerEmbedder(model_name=transformer_model)
embedder = BasicTextFieldEmbedder(token_embedders={'bert_tokens': embedding})
tensor_dict = text_field.batch_tensors([token_tensor])
embedded_tokens = embedder(tensor_dict)
print("Transformer embedded tokens:", embedded_tokens)
| [
"[email protected]"
]
| |
248789733a3133a24466895b30e8c35a526f519c | 98dbb9cd9523809b4ee0e6b92334fa6a2a6af2a3 | /bingads/v13/bulk/entities/labels/__init__.py | 053dc1f5f1bb6077ac63645d3921adb4bad7e414 | [
"MIT"
]
| permissive | BingAds/BingAds-Python-SDK | a2f9b0c099b574a4495d0052218f263af55cdb32 | 373a586402bf24af7137b7c49321dbc70c859fce | refs/heads/main | 2023-07-27T15:31:41.354708 | 2023-07-10T03:21:03 | 2023-07-10T03:21:03 | 31,927,550 | 105 | 182 | NOASSERTION | 2023-09-04T06:51:20 | 2015-03-09T23:09:01 | Python | UTF-8 | Python | false | false | 140 | py | __author__ = 'Bing Ads SDK Team'
__email__ = '[email protected]'
from .bulk_label import *
from .bulk_label_associations import *
| [
"[email protected]"
]
| |
c244275eaf4960476910ef1e16ce1ae889076b4a | 377fc6e13101a2a45826cd118110c790f396a805 | /abc024-b.py | 1a6b9cb638e6c8a270a7d98c1cb76f25a8b319d8 | []
| no_license | number09/atcoder | 4076e7223f424b9923754e73992d6442e0bb0de7 | f521ca1205b254d99744abaf6a7a5bfe69845fe0 | refs/heads/master | 2021-06-04T23:16:39.021645 | 2021-01-19T08:30:39 | 2021-01-19T08:30:39 | 132,128,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | n, t = map(int, input().split())
li_a = list()
for i in range(n):
li_a.append(int(input()))
last_open = (0, 0)
t_seconds = 0
for a in li_a:
if last_open[0] <= a <= last_open[1]:
t_seconds += a + t - last_open[1]
last_open = (last_open[0], t + a)
else:
t_seconds += t
last_open = (a, a + t)
print(t_seconds)
| [
"[email protected]"
]
| |
c6ab140ab8f2cb9654ad0aaf732e6dacf963ac3b | 9fc6604ae98e1ae91c490e8201364fdee1b4222a | /odx_custom_support_ticket/model/ticket_category_path.py | 08a84d39d96b2b82526614eb7ffc12378baf3f80 | []
| no_license | nabiforks/baytonia | b65e6a7e1c7f52a7243e82f5fbcc62ae4cbe93c4 | 58cb304d105bb7332f0a6ab685015f070988ba56 | refs/heads/main | 2023-03-23T21:02:57.862331 | 2021-01-04T03:40:58 | 2021-01-04T03:40:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | from odoo import models, fields, api
class TicketCategoryPath(models.Model):
_name = 'ticket.category.path'
_rec_name = 'category_id'
category_id = fields.Many2one('website.support.ticket.categories', string="Category",required=1)
user_state_ids = fields.One2many('user.state','ticket_category_path_id','User State')
_sql_constraints = [
('category_id_uniq', 'unique (category_id)', "Category should be Unique !"),
]
class UserState(models.Model):
_name = 'user.state'
stage = fields.Integer('Stage')
state = fields.Many2one('website.support.ticket.states',string="State")
ticket_category_path_id = fields.Many2one('ticket.category.path','Ticket Category Path')
user_id = fields.Many2one('res.users', string="Assigned User")
| [
"[email protected]"
]
| |
5143c0b105c551354de46512e908b7649fd053b1 | e926966c5aa8061dc4b4780b20817c6504dd488b | /telegram_bots/urls.py | 319cc375fcc2932ce965c6bd1a6cb8805aa94094 | [
"MIT"
]
| permissive | vladimirmyshkovski/django-telegram-bots | da954c4b7754a368e14422153b4e67dd53bff8d1 | f58ee16d61cd1b14cdf5c39649f63a851c1419e4 | refs/heads/master | 2023-01-06T22:42:11.263909 | 2018-07-19T10:34:10 | 2018-07-19T10:34:10 | 126,815,491 | 0 | 1 | MIT | 2022-12-26T20:36:22 | 2018-03-26T10:58:51 | Python | UTF-8 | Python | false | false | 1,048 | py | from django.conf.urls import url
from . import views
app_name = 'telegram_bots'
urlpatterns = [
url(
regex=r'^$',
view=views.BotListView.as_view(),
name='telegram_bots_list',
),
url(
regex=r'^(?P<pk>\d+)/$',
view=views.BotDetailView.as_view(),
name='telegram_bots_detail',
),
url(
regex=r'^create/$',
view=views.BotCreateView.as_view(),
name='telegram_bots_create',
),
url(
regex=r'^(?P<pk>\d+)/delete/$',
view=views.BotDeleteView.as_view(),
name='telegram_bots_delete',
),
url(
regex=r'^subscribe/(?P<signature>.+)/$',
view=views.BotSubscribeView.as_view(),
name='telegram_bots_subscribe'
),
url(
regex=r'^unsubscribe/(?P<signature>.+)/$',
view=views.BotUnsubscribeView.as_view(),
name='telegram_bots_unsubscribe'
),
url(
regex=r'^(?P<bot_token>.+)/$',
view=views.ReceiveView.as_view(),
name='telegram_bots_receiver'
),
]
| [
"[email protected]"
]
| |
6feeb0f707a3274b17b3649ae22036391ea865d2 | 3b98ee18977177e10b57e6162a03204e3774d3b8 | /Kirk_Byers_Nornir_Automation/env/bin/easy_install-3.8 | 63a27dd8af722fbcbb7465e62cfa5d8dca99603b | []
| no_license | mattmiller87/practice | 0a3d1cae1283abb683dfab0af86e6c569a6104e1 | 9655a8020038e0f6dfe8df842867debac0fcb1e3 | refs/heads/master | 2022-06-23T23:47:50.350379 | 2022-06-14T13:30:51 | 2022-06-14T13:38:56 | 51,970,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | 8 | #!/Users/matt.miller/Documents/git/practice/Kirk_Byers_Nornir_Automation/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| |
bfbb23a786b14a777616004e0854edb298e9cb69 | 2316ce8a21d44a5d09284968ef42530633dc10d2 | /sample_code/ep260/rev01/t.py | 9d90dfa8c1cc3697f35e2f8011be9b7038e13761 | []
| no_license | AlexanderWinkelmeier/explains | 160de2c41fc5fc0156b482b41f89644dc585c4f3 | d47ec53e384e4303a2d8e71fab9073a1a8d2d6bc | refs/heads/master | 2023-07-30T04:55:31.234482 | 2021-09-15T02:59:42 | 2021-09-15T02:59:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | import pytest
def test_pass():
assert 1 == 1
def test_failed():
assert 1 == 2
@pytest.fixture
def fixture():
assert False
def test_errored(fixture):
assert 1 == 1
| [
"[email protected]"
]
| |
c6db3d71ad904a4bddf6dd521ffae6b04bdd25a0 | 76de4fc4f00a04c8c9acc1e9e4a5fae12cf0c08a | /trunk/pyformex/__init__.py | 002e4e687bd184efd65023a3a3b764564156c3e2 | []
| no_license | BackupTheBerlios/pyformex-svn | ec2361b1b9967918be65e892217a691a6f8b145d | f5404809095711334bbb938d9d119a69ad8fc260 | refs/heads/master | 2020-12-24T13:20:47.422165 | 2011-11-15T11:52:23 | 2011-11-15T11:52:23 | 40,749,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,022 | py | # $Id$
##
## This file is part of pyFormex 0.8.5 Sun Nov 6 17:27:05 CET 2011
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: https://savannah.nongnu.org/projects/pyformex/
## Copyright (C) Benedict Verhegghe ([email protected])
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""pyFormex core module initialisation.
This module initializes the pyFormex global variables and
defines a few essential functions.
"""
__version__ = "0.8.5-a1"
__revision__ = '2070M'
Version = 'pyFormex %s' % __version__
Copyright = 'Copyright (C) 2004-2011 Benedict Verhegghe'
Url = 'http://pyformex.org'
Description = "pyFormex is a tool for generating, manipulating and transforming large geometrical models of 3D structures by sequences of mathematical transformations."
# The GUI parts
app_started = False
interactive = False
app = None # the Qapplication
GUI = None # the GUI QMainWindow
canvas = None # the OpenGL Drawing widget controlled by the running script
#board = None # the message board
# set start date/time
import time,datetime
StartTime = datetime.datetime.now()
# initialize some global variables used for communication between modules
options = None # the options found on the command line
print_help = None # the function to print(the pyformex help text (pyformex -h))
cfg = {} # the current session configuration
prefcfg = None # the preferenced configuration
refcfg = None # the reference configuration
preffile = None # the file where the preferenced configuration will be saved
PF = {} # explicitely exported globals
_PF_ = {} # globals that will be offered to scripts
scriptName = None
# define last rescue versions of message, warning and debug
def message(s):
print(s)
warning = message
def debug(s,lead="DEBUG",level=-1):
"""Print a debug message"""
try: # to make sure that debug() can be used before options are set
if options.debug < 0 or (options.debug % level > 0):
raise
pass
except:
print("%s: %s" % (lead,str(s)))
def debugt(s):
"""Print a debug message with timer"""
debug(s,time.time())
### End
| [
"bverheg@8d6f1305-3bde-0310-9e88-884b4813ce35"
]
| bverheg@8d6f1305-3bde-0310-9e88-884b4813ce35 |
1d4a64996631967cbfe32ff2234b6028d67116af | 3235cf9cbebcb6c12510b1ab5cbd6c1051ef6378 | /CnnModelTrainKaggleCatDog0614/CnnModelTrainKaggleCatDog_DateAugmentation_Pred.py | b150edd104d87d0ee33bb751c30b292b61db5d96 | []
| no_license | dorahero/pyAI | 8ba99fe2726264044e166562359868425d6e79ea | c185875ca19f0cca5ec0812eff373e25d0fbd0f1 | refs/heads/master | 2022-11-27T14:56:07.942104 | 2020-08-08T02:21:48 | 2020-08-08T02:21:48 | 285,956,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | # --coding:utf-8--
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.models import load_model
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
train_dir = 'kagglecatdog/train'
test_dir = 'kagglecatdog/test'
validation_dir = 'kagglecatdog/validation'
train_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_dir )
print('='*30)
print('訓練的分類:',train_generator.class_indices)
print('='*30)
labels = train_generator.class_indices
#將分類做成字典方便查詢
labels = dict((v,k) for k,v in labels.items())
print(labels)
# 載入模型
model = load_model('model_CnnModelTrainKaggleCatDog_DateAugmentation.h5')
# 將圖片轉為待測數據
def read_image(img_path):
try:
img = image.load_img(img_path, target_size=(150, 150))
except Exception as e:
print(img_path,e)
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
return img
# 隨機輸入一個待測圖片
filename = "kagglecatdog/test/cat/cat.1684.jpg"
plt.figure()
im = Image.open(filename)
im_list = np.asarray(im)
plt.title("predict")
plt.axis("off")
plt.imshow(im_list)
plt.show()
img = read_image(filename)
pred = model.predict(img)[0]
print('辨識結果:',labels[pred[0]])
| [
"[email protected]"
]
| |
60700e344adae4ad57f3104b47a86131d1f88b54 | b50ff9063f2cb42c04990b782970debcfe9982b9 | /methods/db_operation.py | 014704c04aa4048ff045f1ae2327a1fe93960ca9 | []
| no_license | carrie0307/website | 95464931ade6a3b4ad51b48b2f0c306c8b7ed0b7 | f2ffd3e8ec1854bbb6d35ddbf9b6a1a8cc238ec8 | refs/heads/master | 2021-01-23T14:14:58.323470 | 2017-10-10T02:33:17 | 2017-10-10T02:33:17 | 102,679,929 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 34,914 | py | # coding=utf-8
from __future__ import division
from pymongo import *
import operator
import datetime
import time
import json
from bson import ObjectId
import pandas as pd
import numpy as np
from pandas import Series,DataFrame
from ip_netSector import ip_category
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
'''建立连接'''
client = MongoClient()
client = MongoClient('172.29.152.152', 27017)
db = client.eds_last
collection = db.domain_ip_cname1
ip_dealer = ip_category()
# 计算所有domain的ip更换频率--index的柱状图
def change_frequency():
global collection
datalist = []
return_data = {}
res = collection.find()
return_data = {}
# return_data['total'] = res.count()
return_data['total'] = 10
return_data['rows'] = []
i = 0
for item in res:
print item['visit_times']
if i > 9:
break
i += 1
temp = {}
change_times = len(item['dm_ip'])
if 'visit_times' in item.keys():
if item['visit_times'] < change_times:
visit_times = len(item['dm_ip'])
else:
visit_times = item['visit_times'] + 1
else:
visit_times = 1
frequency = change_times / visit_times
frequency = round(frequency,2)
return_data['rows'].append({'domain':str(item['domain']), 'change_times' : str(change_times), 'visit_times': int(visit_times), 'frequency': str(frequency)})
# return_data['rows'] = sorted(return_data['rows'], key=operator.itemgetter('change_times'), reverse = True)
return return_data
# ip连续性统计,ip散点图
def ip_change(domain):
global collection
return_data = {}
return_data['data'] = {}
return_data['ips'] = []
return_data['date'] = []
item = collection.find_one({'domain':domain})
for each_visit_res in item['dm_ip']:
return_data['ips'].extend(each_visit_res['ips'])
return_data['date'].append(each_visit_res['insert_time'])
for ip in each_visit_res['ips']:
return_data['data'].setdefault(each_visit_res['insert_time'], []).append([each_visit_res['insert_time'],ip]) # 默认设置为[],存在列表则添加元素
# return_data['data'].append([each_visit_res['insert_time'],ip])
# return_data['rows'][each_visit_res['insert_time']] = each_visit_res['ips']
return_data['ips']= list(set(return_data['ips']))
return return_data
# ip变动具体统计--ip_situation
def ip_change_situation(domain):
global collection
return_data = {}
return_data['data'] = []
item = collection.find_one({'domain':domain})
return_data['frequency'] = {}
return_data['frequency']['change_times'] = len(item['dm_ip'])
return_data['frequency']['visit_times'] = item['visit_times'] + 1
last_time = item['record_time']
last_ip = []
i = 0
for each_visit_res in item['dm_ip']:
i += 1
insert_time = each_visit_res['insert_time']
time_gap = ((datetime.datetime.strptime(insert_time, "%Y-%m-%d %H:%M:%S") - datetime.datetime.strptime(last_time, "%Y-%m-%d %H:%M:%S")).seconds) / 3600 # 以小时为单位
time_gap = round(time_gap, 2) #距离上次更新时间间隔
last_time = insert_time # 将当次时间置为上次时间,以便下次处理
ip_geos = each_visit_res['geos'] # ip_geo是当前这次访问所有ip的地理位置列表
this_geo_list = [] # 地理位置城市列表
# 获取当前访问所有ip的地理位置分布
geo_detail = {} #地理位置信息,包括地理位置以及每个位置对应的数量
for geo in ip_geos:
this_geo = '' # 一条ip的地理位置
for key in ['country', 'region', 'city']:
if geo[key] != '0':
this_geo = this_geo + geo[key] + '-'
this_geo_list.append(this_geo[0:-1])
if this_geo[0:-1] in geo_detail.keys():
geo_detail[this_geo[0:-1]] += 1
else:
geo_detail[this_geo[0:-1]] = 1
this_geo_list = list(set(this_geo_list)) # 去重
this_geo_list = '</br>'.join(this_geo_list) # 转化为字符串
geo_info = {'geo':this_geo_list, 'geo_detail':geo_detail}
ip_info = {}
ip_info['num'] = len(each_visit_res['ips']) #ip数量
ip_info['ips'] = '\n'.join(each_visit_res['ips']) # 具体ip
ips = each_visit_res['ips']
update_ip = {}
update_ip['num'] = len(list(set(ips).difference(set(last_ip))))
update_ip['ips'] = '\n'.join(list(set(ips).difference(set(last_ip))))
last_ip = ips
return_data['data'].append({'time':insert_time,'time_gap':time_gap,'ip':ip_info,'ip_geo':geo_info,'update_ip':update_ip})
# return_data['data'].append([insert_time,time_gap,ip_num,this_geo_list,update_ip,delete_ip])
# print return_data
return return_data
# ip_period IP服务时长
def live_period(domain):
'''
这里有些问题,需要改
'''
global collection
return_data = {}
return_data['rows'] = []
ip_period = {}
ip_list = []
item = collection.find_one({'domain':domain})
last_ip = []
for index, each_visit_res in enumerate(item['dm_ip']): # 遍历每一次访问
ip_list.extend(each_visit_res['ips'])
if index == 0:
for ip in each_visit_res['ips']:
ip_period[ip] = {'ip': ip,'begin': item['record_time'], 'end': each_visit_res['insert_time']}
last_ip.append(ip)
else:
temp = [] # 临时记录这一次访问所的ip的容器,之后将转移到last_ip列表中
for ip in each_visit_res['ips']:
if ip in last_ip: # 上一次出现过,即连续出现的ip,更新最后插入时间作为计时结尾
ip_period[ip]['end'] = each_visit_res['insert_time']
else: # 第一次出现的ip,将插入时间作为计时起点,计时结尾记为当前统计时间
ip_period[ip] = {'ip': ip, 'begin': each_visit_res['insert_time'], 'end':''}
temp.append(ip)
for ip in last_ip:
if ip not in each_visit_res['ips']: #上次记录有,但这次不存在的ip
ip_period[ip]['end'] = each_visit_res['insert_time'] #结束时间记为第一次探测到它不存在的情况
last_ip = temp # 将这一次访问所得ip置于last_ip列表
temp = []
return_data['total'] = len(ip_period.keys())
for ip in ip_period.keys(): # 计算每个ip的生命时长
if ip_period[ip]['end'] =='': # 说明该ip刚出现过一次
ip_period[ip]['end'] = str(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
# ip_period[ip]['period'] = 1.99 # 则置为访问间隔1.99, 表示不满2小时
# return_data['rows'].append(ip_period[ip])
continue
# delta.days * 24 + delta.seconds / 3600
delta_days = (datetime.datetime.strptime(ip_period[ip]['end'], "%Y-%m-%d %H:%M:%S") - datetime.datetime.strptime(ip_period[ip]['begin'], "%Y-%m-%d %H:%M:%S")).days
delta_seconds = (datetime.datetime.strptime(ip_period[ip]['end'], "%Y-%m-%d %H:%M:%S") - datetime.datetime.strptime(ip_period[ip]['begin'], "%Y-%m-%d %H:%M:%S")).seconds
period = delta_days * 24 + delta_seconds / 3600 # 以小时为单位
ip_period[ip]['period'] = round(period, 2) # 保留两位小数
return_data['rows'].append(ip_period[ip])
return_data['rows'] = sorted(return_data['rows'], key=operator.itemgetter('period'), reverse = True)
return return_data
# 每一类域名拥有ip数量比例统计 ip_percent.html
def ip_num_percent():
'''
{
'Gamble':[[ip_num, percint], [ip_num, percint], ...]},
'Porno':{[ip_num, percint], [ip_num, percint], ...]},
'ip_num': 平均ip数量列表, 'percent': 该数量ip占该类型ip数量百分比
}
'''
def domain_deal(domains):
'''
domains = collection.find({'type':'***'})
对某类型域名进行具体统计的函数
'''
data = []
domain_num = domains.count() # 该类型域名总量
# num_dict = {'0':0,'1':0,'2':0,'3~10':0,'11~15':0,'>15':0} #{ip平均数量: 该数量域名占域名总量的比例}
num_info = [0,0,0,0,0,0]
for item in domains:
change_times = len(item['dm_ip']) # 该域名ip变动次数
ip_total = 0 # 该域名ip总量
for each_visit in item['dm_ip']:
ip_total += len(each_visit['ips'])
ip_aveg_num = int(ip_total / change_times) # 该域名平均ip数量
if ip_aveg_num <= 2:
num_info[ip_aveg_num] += 1
elif 3 <= ip_aveg_num <= 10:
num_info[3] += 1
elif 11 <= ip_aveg_num <= 15:
num_info[4] += 1
elif ip_aveg_num >= 15:
num_info[5] += 1
for i in range(len(num_info)):
num_info[i] = round((num_info[i] / domain_num) * 100, 2)
return num_info
global collection
return_data = {}
return_data['Gamble'] = {}
return_data['Porno'] = {}
Gamble_domains = collection.find({'dm_type':'Gamble'})
Porno_domains = collection.find({'dm_type':'Porno'})
gamble_data = domain_deal(Gamble_domains)
porno_data = domain_deal(Porno_domains)
return_data['Gamble'] = gamble_data
return_data['Porno'] = porno_data
return return_data
# 单一域名的ip网段统计
def ip_net_sector(domain):
return_data = {}
global collection
global ip_dealer
item = collection.find_one({'domain':domain})
ips = []
geo_dict = {}
for each_visit in item['dm_ip']:
ips.extend(each_visit['ips'])
for index,ip in enumerate(each_visit['ips']):
this_geo = ''
geo = each_visit['geos'][index] # 当前ip的地理位置字典
for key in ['country', 'region', 'city']:
if geo[key] != '0':
this_geo = this_geo + geo[key] + '-'
this_geo = this_geo[:-1]
if this_geo in geo_dict.keys(): # 若该地理位置在geo_dict中
if ip not in geo_dict[this_geo]:
geo_dict[this_geo].append(ip) # 如果改ip未在该地理位置集合中,则加入
else:
geo_dict[this_geo] = [ip]
ips = list(set(ips)) # 获取到所有的ip
general_ipsector = ip_dealer.judge_Sector(ips)
# return_data['domain_general'] = general_ipsector
for record in general_ipsector:
record['geo'] = []
for ip in record['ips']: # 判断每个ip的地理位置
for geo in geo_dict.keys():
if ip in geo_dict[geo] and geo not in record['geo']: # 如果属于某个地理位置
record['geo'].append(geo)
break # 说明找到了当前ip对应的地理位置
return_data['domain_general'] = general_ipsector
return return_data
def general_ip_sector(dm_type):
'''
整体的ip网段分布统计
'''
global collection
global ip_dealer
if dm_type != 'all':
res = collection.find({'dm_type':dm_type})
else:
res = collection.find()
ips = []
ip_dict = {} # 以ip为key,关于地理位置和域名的字典
geo_dict = {} # 以地理位置为key,关于ip的字典
for item in res:
for each_visit in item['dm_ip']:
ips.extend(each_visit['ips'])
for index,ip in enumerate(each_visit['ips']): # 建立每个ip的地理位置与域名的对应关系
temp = {}
if ip not in ip_dict.keys(): # 初次遇到的ip,建立域名的列表,并提取地理位置信息
if dm_type != 'all':
temp['domain'] = [item['domain']]# 建立域名的列表
else:
if item['dm_type'] == 'Gamble':
temp['domain'] = [[item['domain']], []]
else:
temp['domain'] = [[], [item['domain']]]
this_geo = ''
geo = each_visit['geos'][index]
for key in ['country', 'region', 'city']:
if geo[key] != '0':
this_geo = this_geo + geo[key] + '-'
temp['geo'] = this_geo[:-1] # 获取到地理位位置信息
ip_dict[ip] = temp
else: # 已处理u过的ip,将新的域名加入列表
if dm_type != 'all':
ip_dict[ip]['domain'].append(item['domain'])
else:
if item['dm_type'] =='Gamble':
ip_dict[ip]['domain'][0].append(item['domain'])
else:
ip_dict[ip]['domain'][1].append(item['domain'])
# print ip_dict[ip]['domain']
# 建立地理位置字典
ips = list(set(ips))
ip_sector_info = ip_dealer.judge_Sector(ips) # 所有ip的网段信息
for record in ip_sector_info:
record['geo'] = []
record['domain'] = []
for ip in record['ips']: # 遍历当前网段记录中的全部ip
if ip_dict[ip]['geo'] not in record['geo']: # 避免同一网段的重复地址
record['geo'].append(ip_dict[ip]['geo'])
if dm_type != 'all':
record['domain'].extend(ip_dict[ip]['domain'])
record['domain'] = list(set(record['domain'])) #不同ip可能给不止一个域名提供服务,因此要及时去重
else:
if len(record['domain']) == 0:
record['domain'] = [[],[]]
record['domain'][0].extend(ip_dict[ip]['domain'][0])
record['domain'][1].extend(ip_dict[ip]['domain'][1])
record['domain'][0] = list(set(record['domain'][0])) # 为了能去重,这里依旧统计域名,对域名进行去重
record['domain'][1] = list(set(record['domain'][1]))
# 计算该网段内ip的所有域名总量
if dm_type != 'all':
record['num'] = len(record['domain'])
else:
record['num'] = len(record['domain'][0]) +len(record['domain'][1])
# 以提供服务域名数量为排序key
ip_sector_info = sorted(ip_sector_info, key=operator.itemgetter('num'), reverse = True)
return_data = {}
return_data['domain_general'] = ip_sector_info
return return_data
# 单个域名ip所属网管数量统计 -- 表格展示
# 域名网段数量统计 -- 赌博色情双柱状图
def domain_ip_sector_num():
global collection
global ip_dealer
# 数量粒度[0, 1, 2, 3~5, 6~10, 11~15, 16~20, 21~25, 25+]
def single_type(sectors_num_list, domains_num):
'''某网段数量域名占比统计'''
num_info = [0] * 9
for sectors_num in sectors_num_list:
if sectors_num <= 2:
num_info[sectors_num] += 1
elif 3 <= sectors_num <= 5:
num_info[3] += 1
elif 6 <= sectors_num <= 10:
num_info[4] += 1
elif 11 <= sectors_num <= 15:
num_info[5] += 1
elif 16 <= sectors_num <= 20:
num_info[6] += 1
elif 21 <= sectors_num <= 25:
num_info[7] += 1
else:
num_info[8] += 1
for i in range(len(num_info)):
num_info[i] = round((num_info[i] / domains_num) * 100, 2)
return num_info
return_data = {}
return_data['bar-data'] = {}
for dm_type in ['Gamble', 'Porno']:
res = collection.find({'dm_type':dm_type})
domains_num = res.count()
print domains_num
sectors_num_list = []
domain_ip_sector = {}
for item in res:
ips = [] # 每个域名一个ip列表集合
for each_visit in item['dm_ip']:
ips.extend(each_visit['ips'])
ips = list(set(ips))
ip_sector_info = ip_dealer.judge_Sector(ips)
sectors_num = len(ip_sector_info) # 网段数量
sectors_num_list.append(sectors_num)
if sectors_num > 1: # 对网段数量大于1的进行统计
domain_ip_sector[item['domain']] = {'domain': item['domain'], 'ips':ips,'sectors': sectors_num}
domain_ip_sector= sorted(domain_ip_sector.iteritems(), key=lambda d:d[1]['sectors'], reverse = True)
return_data[dm_type] = [item[1] for item in domain_ip_sector] # 统计表中显示的域名网段数量统计
return_data['bar-data'][dm_type] = single_type(sectors_num_list, domains_num)
return return_data
# 每个ip提供服务的域名数量统计 ip_domain_num.html
def general_ip_domain(dm_type):
'''
ip提供服务域名数量统计
'''
global collection
global ip_dealer
if dm_type != 'all':
res = collection.find({'dm_type':dm_type})
else:
res = collection.find()
ips = []
ip_dict= {} # 以ip为key,内容为域名的列表
for item in res:
for each_visit in item['dm_ip']:
for index, ip in enumerate(each_visit['ips']):
# 获取地理位置
geo = each_visit['geos'][index] #当前ip地理位置
this_geo = ''
for key in ['country', 'region', 'city']:
if geo[key] != '0':
this_geo = this_geo + geo[key] + '-'
if dm_type != 'all':
if ip not in ip_dict.keys(): # 第一次处理到这个ip
ip_dict[ip] = {'ip': ip, 'geo': this_geo[:-1], 'domains': [item['domain']], 'category': ip_dealer.judge_category(ip)}
else:
if item['domain'] not in ip_dict[ip]['domains']:
ip_dict[ip]['domains'].append(item['domain'])
else:# 全部域名的情况
if ip not in ip_dict.keys():
ip_dict[ip] = {'ip': ip, 'geo': this_geo[:-1], 'domains': [[],[]], 'category': ip_dealer.judge_category(ip)}
if item['dm_type'] == 'Gamble':
if item['domain'] not in ip_dict[ip]['domains'][0]:
ip_dict[ip]['domains'][0].append(item['domain'])
else:
if item['domain'] not in ip_dict[ip]['domains'][1]:
ip_dict[ip]['domains'][1].append(item['domain'])
return_data = {}
# 按照提供服务域名总量排序
if dm_type != 'all':
dic= sorted(ip_dict.iteritems(), key=lambda d:len(d[1]['domains']), reverse = True)
return_data['data'] = [item[1] for item in dic]
else:
dic= sorted(ip_dict.iteritems(), key=lambda d:len(d[1]['domains'][0]) + len(d[1]['domains'][1]), reverse = True) # 按照提供服务域名总量排序
return_data['data'] = [item[1] for item in dic]
return return_data
# 特殊ip的统计 special.html
def special_ip_count(dm_type):
global collection
global ip_dealer
if dm_type != 'all':
res = collection.find({'dm_type':dm_type})
else:
res = collection.find()
ips = []
ip_dict = {}
for item in res:
for each_visit in item['dm_ip']:
for ip in each_visit['ips']:
category = ip_dealer.special_ip_category(ip)
if category != 'A' and category != 'B' and category != 'C':
if ip not in ip_dict.keys():
if dm_type != 'all': # special_item表示特殊的ip地址类型
ip_dict[ip] = {'ip': ip, 'special_item': category, 'domains':[item['domain']], 'dm_type': item['dm_type']}
else: # 如果是整体的,则对Gamble,Porno进行计数
if item['dm_type'] == 'Gamble':
ip_dict[ip] = {'ip': ip, 'special_item': category, 'domains':[item['domain']], 'dm_type': [1,0]}
else:
ip_dict[ip] = {'ip': ip, 'special_item': category, 'domains':[item['domain']], 'dm_type': [0,1]}
else:
if item['domain'] not in ip_dict[ip]['domains']: # 防止一个ip为多个域名多次服务时重复统计域名的情况
ip_dict[ip]['domains'].append(item['domain'])
if dm_type == 'all':
if item['dm_type'] == 'Gamble':
ip_dict[ip]['dm_type'][0] += 1
else:
ip_dict[ip]['dm_type'][1] += 1
ip_dict = sorted(ip_dict.iteritems(), key=lambda d:len(d[1]['domains']), reverse = True) # 根据提供服务域名的数量排序
return_data = {}
return_data['data'] = [item[1] for item in ip_dict]
return return_data
# 统计地理位置为“未分配或内网ip”的ip与服务的域名
def special_geo():
global collection
global ip_dealer
ip_dict = {}
res = collection.find()
for item in res:
for each_visit in item['dm_ip']:
for index, ip in enumerate(each_visit['ips']):
# print each_visit['geos'][index]['country']
if each_visit['geos'][index]['country'] == u'未分配或者内网IP': # 要记录的特殊ip
if ip not in ip_dict.keys(): # special_item表示特殊的地理位置
ip_dict[ip] = {'ip':ip, 'special_item': '未分配或内网ip', 'domains': [item['domain']], 'dm_type': [0, 0]}
if item['dm_type'] == 'Gamble': # 对该ip服务域名类型进行统计
ip_dict[ip]['dm_type'][0] += 1
else:
ip_dict[ip]['dm_type'][1] += 1
else:
if item['domain'] not in ip_dict[ip]['domains']:
ip_dict[ip]['domains'].append(item['domain'])
if item['dm_type'] == 'Gamble': # 对该ip服务域名类型进行统计
ip_dict[ip]['dm_type'][0] += 1
else:
ip_dict[ip]['dm_type'][1] += 1
ip_dict = sorted(ip_dict.iteritems(), key=lambda d:len(d[1]['domains']), reverse = True) # 根据提供服务域名的数量排序
return_data = {}
return_data['data'] = [item[1] for item in ip_dict]
return return_data
# 单个域名ip所属运营商-- 表格展示
# 域名运营商数量统计 -- 赌博色情双柱状图
def domain_oper_num():
global collection
global ip_dealer
return_data = {}
return_data['bar-data'] = {} # 柱状图数据
for dm_type in ['Gamble', 'Porno']:
res = collection.find({'dm_type':dm_type})
domains_num = res.count()
oper_data = [] # 每个类型域名的字典列表 oper_data = [{'domain': 域名, 'opers':{}, 'ips':ip列表, 'dm_type': 域名类型}]
oper_num_list = [0] * 8 # 运营商数量统计列表,分别[0, 1,2,3,4,5,6,6以上], 用于柱状图
for item in res:
temp_dict = {'domain': item['domain'], 'opers':{}, 'ips':[]}
# 'opers':{} 为每个运营商建立字典,{'oper1':[ip1,ip2, ...], 'oper2':[ip1,ip2, ...]}
for each_visit in item['dm_ip']:
for index, oper_info in enumerate(each_visit['geos']):
if oper_info['oper'] not in temp_dict['opers'].keys():
temp_dict['opers'][oper_info['oper']] = [each_visit['ips'][index]] # 为该运营商建立ip列表
temp_dict['ips'].append(each_visit['ips'][index])
else:
if each_visit['ips'][index] not in temp_dict['opers'][oper_info['oper']]: # 避免同一运营商的ip重复
temp_dict['opers'][oper_info['oper']].append(each_visit['ips'][index])
temp_dict['ips'].append(each_visit['ips'][index])
oper_num = len(temp_dict['opers']) # 当前域名运营商数量
temp_dict['num'] = oper_num # 记录运营商数量,便于后面排序
if oper_num <= 6: # 运营商数量统计
oper_num_list[oper_num] += 1
else:
oper_num_list[7] += 1
if len(temp_dict['opers']) > 1: # 统计运营商超过一个的域名
print '==='
oper_data.append(temp_dict)
oper_num_list = [(round((num / domains_num) * 100, 2)) for num in oper_num_list]
return_data['bar-data'][dm_type] = oper_num_list
oper_data = sorted(oper_data, key=operator.itemgetter('num'), reverse = True) # 根据运营商数量排序
return_data[dm_type] = oper_data
return return_data
# 统计地理位置、网段、运营商数量大于1的域名
def special_domain():
global collection
global ip_dealer
return_data = {}
i = 0
for dm_type in ['Gamble', 'Porno']:
res = collection.find({'dm_type':dm_type})
data_info = []
for item in res:
ips = []
temp_dict = {'domain': item['domain'], 'opers':{}, 'geos':{}, 'sectors':0, 'ips':[]}
for each_visit in item['dm_ip']:
# 获取当前域名ip
ips.extend(each_visit['ips'])
ips = list(set(ips))
for index, ip in enumerate(each_visit['ips']):
# 读取运营商
oper = each_visit['geos'][index]['oper'] # 获取运营商名称
# 计算每个运营商服务ip的数量
if oper not in temp_dict['opers'].keys():
temp_dict['opers'][oper] = [ip]
else:
if ip not in temp_dict['opers'][oper]:
temp_dict['opers'][oper].append(ip)
# 读取地理位置信息
geo = ''
for key in ['country', 'region', 'city']:
if each_visit['geos'][index][key]!= '0':
geo = geo + each_visit['geos'][index][key] + '-'
geo = geo[:-1] # 获取到地理位置
if geo not in temp_dict['geos'].keys():
temp_dict['geos'][geo] = [ip]
else:
if ip not in temp_dict['opers'][oper]:
temp_dict['geos'][oper].append(ip)
# 计算网段
if ips != []:
ip_sector_info = ip_dealer.judge_Sector(ips)
sectors_num = len(ip_sector_info) # 网段数量
temp_dict['sectors'] = sectors_num
temp_dict['ips'] = ips
temp_dict['ips_num'] = len(ips)
if len(temp_dict['geos']) > 1 or len(temp_dict['opers']) > 1 or sectors_num > 1:
data_info.append(temp_dict)
data_info = sorted(data_info, key=operator.itemgetter('ips_num'), reverse = True) # 根据运营商数量排序
return_data[dm_type] = data_info
return return_data
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
return json.JSONEncoder.default(self, o)
# www.aitingwang.com 中国,但其他省市未解析出来
def province_count():
return_data = {}
return_data['Gamble'] = {'Home':[], 'Broad':{}, 'pie-chart':[]}
return_data['Porno'] = {'Home':[], 'Broad':{}, 'pie-chart':[]}
global collection
for dm_type in ['Gamble', 'Porno']:
home_num = 0
HT_num = 0
broad_num = 0
res = collection.find({'dm_type':dm_type})
region_domain = {'天津': [{'name':'天津','value':0},[]], '上海': [{'name':'上海','value':0},[]], '重庆': [{'name':'重庆','value':0}, []], '北京': [{'name':'北京','value':0}, []],
'河北': [{'name':'河北','value':0}, []], '河南': [{'name':'河南','value':0}, []], '云南': [{'name':'云南','value':0}, []], '辽宁':[{'name':'辽宁','value':0}, []],
'黑龙': [{'name':'黑龙江','value':0},[]], '湖南': [{'name':'湖南','value':0}, []],'安徽': [{'name':'安徽','value':0}, []], '山东': [{'name':'山东','value':0}, []],
'新疆': [{'name':'新疆','value':0}, []], '江苏': [{'name':'江苏','value':0}, []],'浙江': [{'name':'浙江','value':0}, []], '江西': [{'name':'江西','value':0}, []],
'湖北': [{'name':'湖北','value':0}, []], '广西': [{'name':'广西','value':0}, []], '甘肃': [{'name':'甘肃','value':0}, []], '山西': [{'name':'山西','value':0}, []],
'内蒙': [{'name':'内蒙古','value':0}, []], '陕西': [{'name':'陕西','value':0}, []], '吉林': [{'name':'吉林','value':0}, []], '福建': [{'name':'福建','value':0}, []],
'贵州': [{'name':'贵州','value':0}, []], '广东': [{'name':'广东','value':0}, []],'青海': [{'name':'青海','value':0}, []], '西藏': [{'name':'西藏','value':0}, []],
'四川': [{'name':'四川','value':0}, []], '宁夏': [{'name':'宁夏','value':0}, []], '海南': [{'name':'海南','value':0}, []], '台湾': [{'name':'台湾','value':0}, []],
'香港': [{'name':'香港','value':0}, []], '澳门': [{'name':'澳门','value':0}, []], '0': [{'name':'0','value':0}, []]}
broad_domain = {}
for item in res:
for each_visit in item['dm_ip']:
for index, ip in enumerate(each_visit['ips']):
country = each_visit['geos'][index]['country']
if country == '中国':
region = str(each_visit['geos'][index]['region'][:2]) # 不包括“省”字 黑龙江--黑龙, 内蒙 --内蒙
if ip not in region_domain[region][1]: # 这个ip没有重复统计
home_num += 1 # 统计内地ip总数
region_domain[region][1].append(ip)
region_domain[region][0]['value'] += 1
elif country == '香港' or country == '澳门' or country == '台湾':
region = str(country)
if ip not in region_domain[region][1]: # 这个ip没有重复统计
HT_num += 1 # 统计港台ip总数
region_domain[region][1].append(ip)
region_domain[region][0]['value'] += 1
else: # 海外域名
if country not in broad_domain.keys():
broad_domain[country] = {'country':country, 'ips':[ip]}
else:
if ip not in broad_domain[country]['ips']:
broad_num += 1 # 统计海外ip总数
broad_domain[country]['ips'].append(ip)
return_data[dm_type]['Home'] = [region_domain[item][0] for item in region_domain.keys()]
return_data[dm_type]['Broad'] = broad_domain.values()
return_data[dm_type]['Broad'] = sorted(return_data[dm_type]['Broad'], key=lambda para: len(para['ips']), reverse = True)
return_data[dm_type]['pie-chart'] = [{"value":home_num, "name":'内地ip'},{"value":broad_num, "name":'海外ip'},{"value":HT_num, "name":'港台ip'}]
return return_data
def domain_geo_num():
global collection
return_data = {}
return_data['bar-data'] = {} # 柱状图数据
for dm_type in ['Gamble', 'Porno']:
res = collection.find({'dm_type':dm_type})
domains_num = res.count()
oper_data = [] # 每个类型域名的字典列表 oper_data = [{'domain': 域名, 'opers':{}, 'ips':ip列表, 'dm_type': 域名类型}]
oper_num_list = [0] * 8 # 运营商数量统计列表,分别[0, 1,2,3,4,5,6,6以上], 用于柱状图
for item in res:
temp_dict = {'domain': item['domain'], 'opers':{}, 'ips':[]}
# 'opers':{} 为每个运营商建立字典,{'oper1':[ip1,ip2, ...], 'oper2':[ip1,ip2, ...]}
for each_visit in item['dm_ip']:
for index, oper_info in enumerate(each_visit['geos']):
if oper_info['oper'] not in temp_dict['opers'].keys():
temp_dict['opers'][oper_info['oper']] = [each_visit['ips'][index]] # 为该运营商建立ip列表
temp_dict['ips'].append(each_visit['ips'][index])
else:
if each_visit['ips'][index] not in temp_dict['opers'][oper_info['oper']]: # 避免同一运营商的ip重复
temp_dict['opers'][oper_info['oper']].append(each_visit['ips'][index])
temp_dict['ips'].append(each_visit['ips'][index])
oper_num = len(temp_dict['opers']) # 当前域名运营商数量
temp_dict['num'] = oper_num # 记录运营商数量,便于后面排序
if oper_num <= 6: # 运营商数量统计
oper_num_list[oper_num] += 1
else:
oper_num_list[7] += 1
if len(temp_dict['opers']) > 1: # 统计运营商超过一个的域名
print '==='
oper_data.append(temp_dict)
oper_num_list = [(round((num / domains_num) * 100, 2)) for num in oper_num_list]
return_data['bar-data'][dm_type] = oper_num_list
oper_data = sorted(oper_data, key=operator.itemgetter('num'), reverse = True) # 根据运营商数量排序
return_data[dm_type] = oper_data
return return_data
if __name__ == '__main__':
# print change_frequency()
ip_change('www.www-4s.cc')
# www-4s.cc
# 7777744444.com
# live_period('www.www-4s.cc')
# print ip_change_situation('www.www-4s.cc')
# print ip_change_situation('www.511789.com')
# print ip_num_percent()
# print ip_net_sector('www.www-4s.cc')
# general_ip_sector('all')
# general_ip_sector('Gamble')
# general_ip_domain('Porno')
# special_ip_count('all')
# single_ip_netSector('www.www-4s.cc')
# domain_ip_sector_num('Gamble')
# general_domain_ip_sector()
# print domain_ip_sector_num()
# special_geo()
# print domain_oper_num()
# print domain_oper_num()
# print special_domain()
# province_count()
| [
"[email protected]"
]
| |
1cd7ea419f68dfffbd358789871d2d9fd90a5a26 | 8690ff3a6a1ca748aebb381bd50fdb317babbaf8 | /utils/ops.py | b5f81000648ecedae427cf12334d3a082dd1fddf | []
| no_license | thoppe/postern_perception | 8457bd1f89fb198191c4152d3354036ad4369d20 | b2b8dda375d1a0430c2cadcd5994e1fbd7a23676 | refs/heads/master | 2023-04-07T17:48:52.816426 | 2020-01-29T16:22:04 | 2020-01-29T16:22:04 | 216,728,164 | 0 | 0 | null | 2023-03-25T18:13:20 | 2019-10-22T05:12:42 | Python | UTF-8 | Python | false | false | 6,822 | py | # This script contains all neural network layers and functions that are used
# the project.
from __future__ import division
import tensorflow as tf
import numpy as np
weight_init = tf.contrib.layers.xavier_initializer()
def instance_norm(x, scope='instance_norm'):
""" Wrapper of instance normalization.
Parameters
----------
input: tensor.
scope: name of the scope.
Returns
-------
normalized tensor.
"""
return tf.contrib.layers.instance_norm(
x, epsilon=1e-05, center=True, scale=True, scope=scope)
def conv2d(input_, output_dim, d_h=2, d_w=2, scope='conv_0',
conv_filters_dim=4, padding='zero', use_bias=True, pad=0):
""" Wrapper of convolutional operation.
Parameters
----------
input_: a 4d tensor.
output_dim: int, output channels.
d_h: int, height of stride.
d_w: int, width of stride.
scope: str, name of variable scope.
conv_filters_dim: int, size of kernel, width = height.
padding: str, strategy of padding, one of "zero" and "reflect".
use_bias: bool, whether to use bias in this layer.
pad: int, size of padding.
Returns
-------
conv: output 4d tensor.
"""
k_initializer = tf.random_normal_initializer(stddev=0.02)
b_initializer = tf.constant_initializer(0)
k_h = k_w = conv_filters_dim
with tf.compat.v1.variable_scope(scope):
if padding == 'zero':
x = tf.pad(
input_,
[[0, 0], [pad, pad], [pad, pad], [0, 0]])
elif padding == 'reflect':
x = tf.pad(
input_,
[[0, 0], [pad, pad], [pad, pad], [0, 0]],
mode='REFLECT')
else:
x = input_
conv = tf.layers.conv2d(
x,
output_dim,
kernel_size=[k_h, k_w],
strides=(d_h, d_w),
kernel_initializer=k_initializer,
bias_initializer=b_initializer,
use_bias=use_bias)
return conv
def deconv2d(input_, output_dim, d_h=2, d_w=2, scope='deconv_0',
conv_filters_dim=4, padding='SAME', use_bias=True):
"""Transposed convolution (fractional stride convolution) layer.
Parameters
----------
input_: tensor, input image.
output_dim: int, number of channels.
d_h: int, height of stride.
d_w: int, width of stride.
scope: str, name of scope.
conv_filter_dim: int, kernel size.
padding: int, "same" or "valid", case insensitive.
use_bias: bool, use bias or not.
Returns
-------
deconv: tensor, output tenosr.
"""
k_initializer = tf.random_normal_initializer(stddev=0.02)
b_initializer = tf.constant_initializer(0)
k_h = k_w = conv_filters_dim
deconv = tf.layers.conv2d_transpose(
inputs=input_,
filters=output_dim,
kernel_size=[k_h, k_w],
strides=(d_h, d_w),
padding=padding,
kernel_initializer=k_initializer,
bias_initializer=b_initializer,
use_bias=use_bias,
name=scope)
return deconv
def relu(input_):
""" Wrapper of ReLU function.
Parameters
----------
input_: tensor.
Returns
-------
tensor.
"""
return tf.nn.relu(input_)
def lrelu(input_):
""" Wrapper of LeakyReLU function.
Parameters
----------
input_: tensor.
Returns
-------
tensor.
"""
return tf.nn.leaky_relu(input_, alpha=0.01)
def tanh(input_):
""" Wrapper of tanh function.
Parameters
----------
input_: tensor.
Returns
-------
tensor.
"""
return tf.tanh(input_)
def l1_loss(x, y):
""" L1 loss.
Parameters
----------
x: tensor.
y: tensor, which should have the same shape as x.
Returns
-------
loss: scalar, l1 loss.
"""
loss = tf.reduce_mean(tf.abs(x - y))
return loss
def l2_loss(x, y):
""" L2 loss.
Parameters
----------
x: tensor
y: tensor, which should have the same shape as x.
Returns
-------
loss: scalar, l2 loss.
"""
loss = tf.reduce_mean(tf.reduce_sum(tf.square(x - y), axis=[1, 2, 3]))
return loss
def content_loss(endpoints_mixed, content_layers):
""" Content loss.
Ref: https://arxiv.org/abs/1603.08155.
Parameters
----------
endpoints_mixed: dict, (name, tensor).
content_layers: list, name of layers used.
Returns
-------
loss: scalar, content loss.
"""
loss = 0
for layer in content_layers:
feat_a, feat_b = tf.split(endpoints_mixed[layer], 2, 0)
size = tf.size(feat_a)
loss += tf.nn.l2_loss(feat_a - feat_b) * 2 / tf.to_float(size)
return loss
def style_loss(endpoints_mixed, style_layers):
""" Style loss.
Ref: https://arxiv.org/abs/1603.08155.
Parameters
----------
endpoints_mixed: dict, (name, tensor).
content_layers: list, name of layers used.
Returns
-------
loss: scalar, style loss.
"""
loss = 0
for layer in style_layers:
feat_a, feat_b = tf.split(endpoints_mixed[layer], 2, 0)
size = tf.size(feat_a)
loss += tf.nn.l2_loss(
gram(feat_a) - gram(feat_b)) * 2 / tf.to_float(size)
return loss
def gram(layer):
""" Compute gram matrix.
Ref: https://arxiv.org/abs/1603.08155.
Parameters
----------
layer: tensor.
Returns
-------
grams: gram matrices.
"""
shape = tf.shape(layer)
num_images = shape[0]
width = shape[1]
height = shape[2]
num_filters = shape[3]
features = tf.reshape(layer, tf.stack([num_images, -1, num_filters]))
denominator = tf.to_float(width * height * num_filters)
grams = tf.matmul(features, features, transpose_a=True) / denominator
return grams
def angular2cart(angular):
""" Angular coordinates to cartesian coordinates.
Parameters
----------
angular: list, [yaw, pitch]
Returns
-------
np.array, coordinates in cartesian system.
"""
theta = angular[:, 0] / 180.0 * np.pi
phi = angular[:, 1] / 180.0 * np.pi
x = np.cos(phi) * np.sin(theta)
y = np.sin(phi)
z = np.cos(phi) * np.cos(theta)
return np.stack([x, y, z], axis=1)
def angular_error(x, y):
"""Compute the angular error.
Parameters
----------
x: list, [yaw, pitch].
y: list, [yaw, pitch].
Returns
-------
int, error.
"""
x = angular2cart(x)
y = angular2cart(y)
x_norm = np.sqrt(np.sum(np.square(x), axis=1))
y_norm = np.sqrt(np.sum(np.square(y), axis=1))
sim = np.divide(np.sum(np.multiply(x, y), axis=1),
np.multiply(x_norm, y_norm))
sim = np.clip(sim, -1.0, 1.0)
return np.arccos(sim) * 180.0 / np.pi
| [
"[email protected]"
]
| |
14beee1cd62cee9c3bcca0ff8f61070ca48fe92c | 2c38c2ea0328b75ba96a36346f71bd8ddeda3d35 | /qa/refund_moderated.py | 7da12b38452dec5e77e74dc0f3a6dc5678e95d7f | [
"MIT"
]
| permissive | TheButterZone/openbazaar-go | c6b76e6b7d4cb608f09c6f4dd5d62b97d5b1758d | afa185e7a929eb4ee659c53859a73b1dd53b3ae0 | refs/heads/master | 2021-06-27T06:24:54.645852 | 2017-09-09T03:21:30 | 2017-09-09T03:21:30 | 102,985,074 | 1 | 1 | null | 2017-09-09T21:08:08 | 2017-09-09T21:08:08 | null | UTF-8 | Python | false | false | 9,977 | py | import requests
import json
import time
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
class RefundModeratedTest(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
def run_test(self):
alice = self.nodes[0]
bob = self.nodes[1]
charlie = self.nodes[2]
# generate some coins and send them to bob
time.sleep(4)
api_url = bob["gateway_url"] + "wallet/address"
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
address = resp["address"]
elif r.status_code == 404:
raise TestFailure("RefundModeratedTest - FAIL: Address endpoint not found")
else:
raise TestFailure("RefundModeratedTest - FAIL: Unknown response")
self.send_bitcoin_cmd("sendtoaddress", address, 10)
time.sleep(20)
# create a profile for charlie
pro = {"name": "Charlie"}
api_url = charlie["gateway_url"] + "ob/profile"
r = requests.post(api_url, data=json.dumps(pro, indent=4))
if r.status_code == 404:
raise TestFailure("RefundModeratedTest - FAIL: Profile post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("RefundModeratedTest - FAIL: Profile POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# make charlie a moderator
with open('testdata/moderation.json') as listing_file:
moderation_json = json.load(listing_file, object_pairs_hook=OrderedDict)
api_url = charlie["gateway_url"] + "ob/moderator"
r = requests.put(api_url, data=json.dumps(moderation_json, indent=4))
if r.status_code == 404:
raise TestFailure("RefundModeratedTest - FAIL: Moderator post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("RefundModeratedTest - FAIL: Moderator POST failed. Reason: %s", resp["reason"])
moderatorId = charlie["peerId"]
time.sleep(4)
# post listing to alice
with open('testdata/listing.json') as listing_file:
listing_json = json.load(listing_file, object_pairs_hook=OrderedDict)
listing_json["moderators"] = [moderatorId]
api_url = alice["gateway_url"] + "ob/listing"
r = requests.post(api_url, data=json.dumps(listing_json, indent=4))
if r.status_code == 404:
raise TestFailure("RefundModeratedTest - FAIL: Listing post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("RefundModeratedTest - FAIL: Listing POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# get listing hash
api_url = alice["gateway_url"] + "ipns/" + alice["peerId"] + "/listings.json"
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("RefundModeratedTest - FAIL: Couldn't get listing index")
resp = json.loads(r.text)
listingId = resp[0]["hash"]
# bob send order
with open('testdata/order_direct.json') as order_file:
order_json = json.load(order_file, object_pairs_hook=OrderedDict)
order_json["items"][0]["listingHash"] = listingId
order_json["moderator"] = moderatorId
api_url = bob["gateway_url"] + "ob/purchase"
r = requests.post(api_url, data=json.dumps(order_json, indent=4))
if r.status_code == 404:
raise TestFailure("RefundModeratedTest - FAIL: Purchase post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
self.print_logs(alice, "ob.log")
raise TestFailure("RefundModeratedTest - FAIL: Purchase POST failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
orderId = resp["orderId"]
payment_address = resp["paymentAddress"]
payment_amount = resp["amount"]
# check the purchase saved correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("RefundModeratedTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_PAYMENT":
raise TestFailure("RefundModeratedTest - FAIL: Bob purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("RefundModeratedTest - FAIL: Bob incorrectly saved as funded")
# check the sale saved correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("RefundModeratedTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_PAYMENT":
raise TestFailure("RefundModeratedTest - FAIL: Alice purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("RefundModeratedTest - FAIL: Alice incorrectly saved as funded")
# fund order
spend = {
"address": payment_address,
"amount": payment_amount,
"feeLevel": "NORMAL"
}
api_url = bob["gateway_url"] + "wallet/spend"
r = requests.post(api_url, data=json.dumps(spend, indent=4))
if r.status_code == 404:
raise TestFailure("RefundModeratedTest - FAIL: Spend post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("RefundModeratedTest - FAIL: Spend POST failed. Reason: %s", resp["reason"])
time.sleep(20)
# check bob detected payment
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("RefundModeratedTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_FULFILLMENT":
raise TestFailure("RefundModeratedTest - FAIL: Bob failed to detect his payment")
if resp["funded"] == False:
raise TestFailure("RefundModeratedTest - FAIL: Bob incorrectly saved as unfunded")
# check alice detected payment
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("RefundModeratedTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_FULFILLMENT":
raise TestFailure("RefundModeratedTest - FAIL: Alice failed to detect payment")
if resp["funded"] == False:
raise TestFailure("RefundModeratedTest - FAIL: Alice incorrectly saved as unfunded")
time.sleep(5)
# alice refund order
api_url = alice["gateway_url"] + "ob/refund"
refund = {"orderId": orderId}
r = requests.post(api_url, data=json.dumps(refund, indent=4))
if r.status_code == 404:
raise TestFailure("RefundModeratedTest - FAIL: Refund endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("RefundModeratedTest - FAIL: Refund POST failed. Reason: %s", resp["reason"])
time.sleep(20)
# alice check order refunded correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("RefundModeratedTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "REFUNDED":
raise TestFailure("RefundModeratedTest - FAIL: Alice failed to save as rejected")
if len(resp["paymentAddressTransactions"]) != 2:
raise TestFailure("RefundModeratedTest - FAIL: Alice failed to detect outgoing payment")
if "refundAddressTransaction" not in resp or resp["refundAddressTransaction"] == {}:
raise TestFailure("RefundModeratedTest - FAIL: Alice failed to detect refund payment")
# bob check order refunded correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("RefundModeratedTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "REFUNDED":
raise TestFailure("RefundModeratedTest - FAIL: Bob failed to save as rejected")
if len(resp["paymentAddressTransactions"]) != 2:
raise TestFailure("RefundModeratedTest - FAIL: Bob failed to detect outgoing payment")
if "refundAddressTransaction" not in resp or resp["refundAddressTransaction"] == {}:
raise TestFailure("RefundModeratedTest - FAIL: Alice failed to detect refund payment")
self.send_bitcoin_cmd("generate", 1)
time.sleep(2)
# Check the funds moved into bob's wallet
api_url = bob["gateway_url"] + "wallet/balance"
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
confirmed = int(resp["confirmed"])
#unconfirmed = int(resp["unconfirmed"])
if confirmed <= 50 - payment_amount:
raise TestFailure("RefundModeratedTest - FAIL: Bob failed to receive the multisig payout")
else:
raise TestFailure("RefundModeratedTest - FAIL: Failed to query Bob's balance")
print("RefundModeratedTest - PASS")
if __name__ == '__main__':
print("Running RefundModeratedTest")
RefundModeratedTest().main(["--regtest", "--disableexchangerates"])
| [
"[email protected]"
]
| |
405bb3a76056bb80e4f596598b54d0359b63747a | 88ea7bf2bbc8ffba551e881df553ae5ceac70dd6 | /deblock/codes/models/models_sub/SR_vmaf_model.py | 4973c423cb7381c7b17760b39c19de290d15a8d1 | [
"Apache-2.0"
]
| permissive | zhouhuanxiang/repo-zhx | 2d1135bb2f925e051e1b0bcfc2ed53fb34ea51c5 | 76b577eea13130c60bf7bff8c486f51766128661 | refs/heads/main | 2023-06-10T02:56:17.978649 | 2021-06-29T02:35:57 | 2021-06-29T02:35:57 | 381,213,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,768 | py | import logging
from collections import OrderedDict
import os
import re
import torch
import torch.nn as nn
from torch.nn.parallel import DataParallel, DistributedDataParallel
import models.networks as networks
import models.lr_scheduler as lr_scheduler
from .base_model import BaseModel
from models.loss import CharbonnierLoss, ContextualLoss
from pytorch_msssim import ssim, ms_ssim, SSIM, MS_SSIM
import utils.util as util
from data.util import run_vmaf_pytorch, run_vmaf_pytorch_parallel
from xml.dom import minidom
import multiprocessing
import time
logger = logging.getLogger('base')
class SRVmafModel(BaseModel):
def __init__(self, opt):
super(SRVmafModel, self).__init__(opt)
if opt['dist']:
self.rank = torch.distributed.get_rank()
else:
self.rank = -1 # non dist training
train_opt = opt['train']
self.use_gpu = opt['network_G']['use_gpu']
self.use_gpu = True
self.real_IQA_only = train_opt['IQA_only']
# define network and load pretrained models
if self.use_gpu:
self.netG = networks.define_G(opt).to(self.device)
if opt['dist']:
self.netG = DistributedDataParallel(self.netG, device_ids=[torch.cuda.current_device()])
else:
self.netG = DataParallel(self.netG)
else:
self.netG = networks.define_G(opt)
if self.is_train:
if train_opt['IQA_weight']:
if train_opt['IQA_criterion'] == 'vmaf':
self.cri_IQA = nn.MSELoss()
self.l_IQA_w = train_opt['IQA_weight']
self.netI = networks.define_I(opt)
if opt['dist']:
pass
else:
self.netI = DataParallel(self.netI)
else:
logger.info('Remove IQA loss.')
self.cri_IQA = None
# print network
self.print_network()
self.load()
if self.is_train:
self.netG.train()
# pixel loss
loss_type = train_opt['pixel_criterion']
if loss_type == 'l1':
self.cri_pix = nn.L1Loss().to(self.device)
elif loss_type == 'l2':
self.cri_pix = nn.MSELoss().to(self.device)
elif loss_type == 'cb':
self.cri_pix = CharbonnierLoss().to(self.device)
else:
raise NotImplementedError('Loss type [{:s}] is not recognized.'.format(loss_type))
self.l_pix_w = train_opt['pixel_weight']
# CX loss
if train_opt['CX_weight']:
l_CX_type = train_opt['CX_criterion']
if l_CX_type == 'contextual_loss':
self.cri_CX = ContextualLoss()
else:
raise NotImplementedError('Loss type [{:s}] not recognized.'.format(l_CX_type))
self.l_CX_w = train_opt['CX_weight']
else:
logger.info('Remove CX loss.')
self.cri_CX = None
# ssim loss
if train_opt['ssim_weight']:
self.cri_ssim = train_opt['ssim_criterion']
self.l_ssim_w = train_opt['ssim_weight']
self.ssim_window = train_opt['ssim_window']
else:
logger.info('Remove ssim loss.')
self.cri_ssim = None
# load VGG perceptual loss if use CX loss
# if train_opt['CX_weight']:
# self.netF = networks.define_F(opt, use_bn=False).to(self.device)
# if opt['dist']:
# pass # do not need to use DistributedDataParallel for netF
# else:
# self.netF = DataParallel(self.netF)
# optimizers of netG
wd_G = train_opt['weight_decay_G'] if train_opt['weight_decay_G'] else 0
optim_params = []
for k, v in self.netG.named_parameters(): # can optimize for a part of the model
if v.requires_grad:
optim_params.append(v)
else:
if self.rank <= 0:
logger.warning('Params [{:s}] will not optimize.'.format(k))
self.optimizer_G = torch.optim.Adam(optim_params, lr=train_opt['lr_G'],
weight_decay=wd_G,
betas=(train_opt['beta1'], train_opt['beta2']))
self.optimizers.append(self.optimizer_G)
# optimizers of netI
if train_opt['IQA_weight']:
wd_I = train_opt['weight_decay_I'] if train_opt['weight_decay_I'] else 0
optim_params = []
for k, v in self.netI.named_parameters(): # can optimize for a part of the model
if v.requires_grad:
optim_params.append(v)
else:
if self.rank <= 0:
logger.warning('Params [{:s}] will not optimize.'.format(k))
self.optimizer_I = torch.optim.Adam(optim_params, lr=train_opt['lr_I'],
weight_decay=wd_I,
betas=(train_opt['beta1'], train_opt['beta2']))
self.optimizers.append(self.optimizer_I)
# schedulers
if train_opt['lr_scheme'] == 'MultiStepLR':
for optimizer in self.optimizers:
self.schedulers.append(
lr_scheduler.MultiStepLR_Restart(optimizer, train_opt['lr_steps'],
restarts=train_opt['restarts'],
weights=train_opt['restart_weights'],
gamma=train_opt['lr_gamma'],
clear_state=train_opt['clear_state']))
elif train_opt['lr_scheme'] == 'CosineAnnealingLR_Restart':
for optimizer in self.optimizers:
self.schedulers.append(
lr_scheduler.CosineAnnealingLR_Restart(
optimizer, train_opt['T_period'], eta_min=train_opt['eta_min'],
restarts=train_opt['restarts'], weights=train_opt['restart_weights']))
else:
raise NotImplementedError('MultiStepLR learning rate scheme is enough.')
self.log_dict = OrderedDict()
self.set_requires_grad(self.netG, False)
self.set_requires_grad(self.netI, False)
def feed_data(self, data, need_GT=True):
if self.use_gpu:
self.var_L = data['LQ'].to(self.device) # LQ
if need_GT:
self.real_H = data['GT'].to(self.device) # GT
if self.cri_IQA and ('IQA' in data.keys()):
self.real_IQA = data['IQA'].float().to(self.device) # IQA
else:
self.var_L = data['LQ'] # LQ
def optimize_parameters(self, step):
#init loss
l_pix = torch.zeros(1)
l_CX = torch.zeros(1)
l_ssim = torch.zeros(1)
l_g_IQA = torch.zeros(1)
l_i_IQA = torch.zeros(1)
if self.cri_IQA and self.real_IQA_only:
# pretrain netI
self.set_requires_grad(self.netI, True)
self.optimizer_I.zero_grad()
iqa = self.netI(self.var_L, self.real_H).squeeze()
l_i_IQA = self.l_IQA_w * self.cri_IQA(iqa, self.real_IQA)
l_i_IQA.backward()
self.optimizer_I.step()
elif self.cri_IQA and not self.real_IQA_only:
# train netG and netI together
# optimize netG
self.set_requires_grad(self.netG, True)
self.optimizer_G.zero_grad()
# forward
self.fake_H = self.netG(self.var_L)
l_g_total = 0
l_pix = self.l_pix_w * self.cri_pix(self.fake_H, self.real_H)
l_g_total += l_pix
if self.cri_CX:
real_fea = self.netF(self.real_H)
fake_fea = self.netF(self.fake_H)
l_CX = self.l_CX_w * self.cri_CX(real_fea, fake_fea)
l_g_total += l_CX
if self.cri_ssim:
if self.cri_ssim == 'ssim':
ssim_val = ssim(self.fake_H, self.real_H, win_size=self.ssim_window, data_range=1.0, size_average=True)
elif self.cri_ssim == 'ms-ssim':
weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363]).to(self.fake_H.device, dtype=self.fake_H.dtype)
ssim_val = ms_ssim(self.fake_H, self.real_H, win_size=self.ssim_window, data_range=1.0, size_average=True, weights=weights)
l_ssim = self.l_ssim_w * (1 - ssim_val)
l_g_total += l_ssim
if self.cri_IQA:
l_g_IQA = self.l_IQA_w * (1.0 - torch.mean(self.netI(self.fake_H, self.real_H)))
l_g_total += l_g_IQA
l_g_total.backward()
self.optimizer_G.step()
self.set_requires_grad(self.netG, False)
# optimize netI
self.set_requires_grad(self.netI, True)
self.optimizer_I.zero_grad()
self.fake_H_detatch = self.fake_H.detach()
# t1 = time.time()
# real_IQA1 = run_vmaf_pytorch(self.fake_H_detatch, self.real_H)
# t2 = time.time()
real_IQA2 = run_vmaf_pytorch_parallel(self.fake_H_detatch, self.real_H)
# t3 = time.time()
# print(real_IQA1)
# print(real_IQA2)
# print(t2 - t1, t3 - t2, '\n')
real_IQA = real_IQA2.to(self.device)
iqa = self.netI(self.fake_H_detatch, self.real_H).squeeze()
l_i_IQA = self.cri_IQA(iqa, real_IQA)
l_i_IQA.backward()
self.optimizer_I.step()
self.set_requires_grad(self.netI, False)
# set log
self.log_dict['l_pix'] = l_pix.item()
if self.cri_CX:
self.log_dict['l_CX'] = l_CX.item()
if self.cri_ssim:
self.log_dict['l_ssim'] = l_ssim.item()
if self.cri_IQA:
self.log_dict['l_g_IQA_scale'] = l_g_IQA.item()
self.log_dict['l_g_IQA'] = l_g_IQA.item() / self.l_IQA_w
self.log_dict['l_i_IQA'] = l_i_IQA.item()
def test(self):
self.netG.eval()
with torch.no_grad():
self.fake_H = self.netG(self.var_L)
self.netG.train()
def test_x8(self):
# from https://github.com/thstkdgus35/EDSR-PyTorch
self.netG.eval()
def _transform(v, op):
# if self.precision != 'single': v = v.float()
v2np = v.data.cpu().numpy()
if op == 'v':
tfnp = v2np[:, :, :, ::-1].copy()
elif op == 'h':
tfnp = v2np[:, :, ::-1, :].copy()
elif op == 't':
tfnp = v2np.transpose((0, 1, 3, 2)).copy()
ret = torch.Tensor(tfnp).to(self.device)
# if self.precision == 'half': ret = ret.half()
return ret
lr_list = [self.var_L]
for tf in 'v', 'h', 't':
lr_list.extend([_transform(t, tf) for t in lr_list])
with torch.no_grad():
sr_list = [self.netG(aug) for aug in lr_list]
for i in range(len(sr_list)):
if i > 3:
sr_list[i] = _transform(sr_list[i], 't')
if i % 4 > 1:
sr_list[i] = _transform(sr_list[i], 'h')
if (i % 4) % 2 == 1:
sr_list[i] = _transform(sr_list[i], 'v')
output_cat = torch.cat(sr_list, dim=0)
self.fake_H = output_cat.mean(dim=0, keepdim=True)
self.netG.train()
def get_current_log(self):
return self.log_dict
def get_current_visuals(self, need_GT=True):
out_dict = OrderedDict()
if self.use_gpu:
out_dict['LQ'] = self.var_L.detach()[0].float().cpu()
out_dict['rlt'] = self.fake_H.detach()[0].float().cpu()
else:
out_dict['LQ'] = self.var_L.detach()[0].float()
out_dict['rlt'] = self.fake_H.detach()[0].float()
if need_GT:
out_dict['GT'] = self.real_H.detach()[0].float().cpu()
return out_dict
def print_network(self):
s, n = self.get_network_description(self.netG)
if isinstance(self.netG, nn.DataParallel) or isinstance(self.netG, DistributedDataParallel):
net_struc_str = '{} - {}'.format(self.netG.__class__.__name__,
self.netG.module.__class__.__name__)
else:
net_struc_str = '{}'.format(self.netG.__class__.__name__)
if self.rank <= 0:
logger.info('Network G structure: {}, with parameters: {:,d}'.format(net_struc_str, n))
logger.info(s)
def load(self):
load_path_G = self.opt['path']['pretrain_model_G']
if load_path_G is not None:
logger.info('Loading model for G [{:s}] ...'.format(load_path_G))
self.load_network(load_path_G, self.netG, self.opt['path']['strict_load'])
load_path_I = self.opt['path']['pretrain_model_I']
if load_path_I is not None:
logger.info('Loading model for I [{:s}] ...'.format(load_path_I))
self.load_network(load_path_I, self.netI, self.opt['path']['strict_load'])
def save(self, iter_label):
self.save_network(self.netG, 'G', iter_label)
self.save_network(self.netI, 'I', iter_label) | [
"[email protected]"
]
| |
e69edb98dd34169b7e5de559b90da43214d3dfd4 | 69f5b6defd7d2dc1664799bcaa5fad3fb1af4c7f | /script/2010_paper/prepare_data.py | c29e789e9a56f3fbc1dc50eb6eddb698ed957348 | []
| no_license | leelabcnbc/unsup-pytorch | c48ff0232b4baf5d50b406d696a4f460e8521b90 | 35e8aa2ef687c2b32a5838e57ea07babe0c1abbb | refs/heads/master | 2020-03-18T18:22:33.342249 | 2018-09-07T17:38:17 | 2018-09-07T17:38:17 | 135,088,622 | 2 | 0 | null | 2018-06-20T02:12:55 | 2018-05-27T23:22:57 | Python | UTF-8 | Python | false | false | 2,493 | py | """this file prepares the data for the 2010 NIPS paper
of conv PSD
Koray Kavukcuoglu, Pierre Sermanet, Y-Lan Boureau, Karol Gregor, Michaël Mathieu, Yann LeCun:
Learning Convolutional Feature Hierarchies for Visual Recognition. NIPS 2010: 1090-1098
I will prepare 1000000 25x25 patches, which should be sufficient.
"""
import os
import numpy as np
import h5py
from torch.utils.serialization.read_lua_file import load_lua
from unsup import dir_dictionary
def load_raw_data():
raw_data = load_lua(os.path.join(dir_dictionary['debug_reference'],
'tr-berkeley-N5K-M56x56-lcn.bin'))
raw_data = raw_data.numpy()
return raw_data
def sample_from_raw_data(std_threshold=0.2, seed=0, ddof=1,
num_im=1000000):
# this ddof stuff really should not matter.
# here I just want to follow what's done in the original code as much as possible.
pass
raw_data = load_raw_data()
assert raw_data.shape == (5000, 56, 56)
rng_state = np.random.RandomState(seed=seed)
# for loop
collected = 0
all_imgs = []
all_img_idx = []
all_r_idx = []
all_c_idx = []
while collected < num_im:
if collected % 10000 == 0:
print(collected)
# randomly select a image
im_idx = rng_state.randint(5000)
# then randomly select a patch
r_idx, c_idx = rng_state.randint(56 - 25 + 1, size=(2,))
im_candidate = raw_data[im_idx, np.newaxis, r_idx:r_idx + 25, c_idx:c_idx + 25]
if np.std(im_candidate, ddof=ddof) <= std_threshold:
continue
else:
collected += 1
# save as float to save space
all_imgs.append(im_candidate.astype(np.float32))
all_img_idx.append(im_idx)
all_r_idx.append(r_idx)
all_c_idx.append(c_idx)
return {
'raw_data': raw_data,
'data': np.asarray(all_imgs),
'idx_img': np.asarray(all_img_idx),
'idx_r': np.asarray(all_r_idx),
'idx_c': np.asarray(all_c_idx),
}
if __name__ == '__main__':
data_dict = sample_from_raw_data()
# save as npy
with h5py.File(os.path.join(os.path.split(__file__)[0], 'data.hdf5')) as f:
if 'data' not in f:
# 2.4G vs 2.2G. not worth it.
# f.create_dataset('data', data=a, compression='gzip')
for k, v in data_dict.items():
print(k, v.shape)
f.create_dataset(k, data=v)
| [
"[email protected]"
]
| |
0e64bc4b8ddf9d83ec635386f2315eb33db3939d | 85c426913d63773c4802a4a3c354df909030654b | /python/PF/ABCBank_CreditCard_System_List/iCard/Read_Write_Reward_Scheme.py | d35d948984cc96537c38eeaaff32b5488d9658f5 | []
| no_license | SensehacK/playgrounds | 17bf2a3133db6c0cafe185c4cc2c7b59862980aa | 3decd550cdb6034db8b497051acaaec8221073aa | refs/heads/master | 2023-05-11T20:05:31.680168 | 2023-04-30T00:01:58 | 2023-04-30T00:01:58 | 159,632,542 | 1 | 0 | null | 2023-03-05T11:34:34 | 2018-11-29T08:27:53 | Python | UTF-8 | Python | false | false | 1,561 | py | import csv
'''This function fetches the details of all reward schemes from SchemeDetails CSV file
in 3 lists and returns them as tuple of lists.
Input: Path of CSV file.
Output: A tuple of lists i.e. card type list, min transaction amount list and associated reward points list.
'''
def get_reward_scheme_details():
file_pointer=open("..\\SuppliedFiles\\SchemeDetails.csv","r")
reward_scheme_details=csv.reader(file_pointer)
card_type_list=[]
min_trasaction_amt_list=[]
reward_point_list=[]
for reward_detail in reward_scheme_details:
card_type_list.append(reward_detail[0])
min_trasaction_amt_list.append(reward_detail[1])
reward_point_list.append(reward_detail[2])
file_pointer.close()
return (card_type_list,min_trasaction_amt_list,reward_point_list)
'''This function updates the details of reward schemes in SchemeDetails CSV file which
are received as parameters in form of lists.
Input: A tuple of lists i.e. card type list, min transaction amount list and associated reward points list.
Output: Updates the CSV file.
'''
def set_reward_scheme_details(card_type_list,min_trasaction_amt_list,reward_point_list):
f=open("..\\SuppliedFiles\\SchemeDetails.csv","w")
f.write("")
f.close()
for i in range(len(card_type_list)):
f=open("..\\SuppliedFiles\\SchemeDetails.csv","a")
f.write(str(card_type_list[i])+","+str(min_trasaction_amt_list[i])+","+str(reward_point_list[i])+'\n')
f.close()
| [
"[email protected]"
]
| |
807997fdb4c00db45d521df97a537eeef8ba9932 | 246e9200a834261eebcf1aaa54da5080981a24ea | /hackerrank/algorithms/warmups/time-conversion.py | f99051f21c296ca29067abeae81b12d14b231abc | []
| no_license | kalsotra2001/practice | db435514b7b57ce549b96a8baf64fad8f579da18 | bbc8a458718ad875ce5b7caa0e56afe94ae6fa68 | refs/heads/master | 2021-12-15T20:48:21.186658 | 2017-09-07T23:01:56 | 2017-09-07T23:01:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | s = raw_input()
time = s[:-2]
if s[-2:] == "AM":
if s[:2] == "12":
time = "00" + time[2:]
else:
if s[:2] != "12":
time = str(int(time[:2]) + 12) + time[2:]
print time | [
"[email protected]"
]
| |
fb23db33b5e66fcfe17c61e18a6d04be312b9c1f | 063ab6c256b5c60406c7d4ee6820dbbf8192efa9 | /ros_ws/build/learning_ros_external_pkgs_noetic/baxter_simulator/baxter_sim_examples/catkin_generated/pkg.develspace.context.pc.py | 7b7827d082d8236e15fb3543a5214218394f47d0 | []
| no_license | Iris-ye233/final-project_revised | d34aa55f6bba2f5b73b4f3a255f5041bdf7c71fc | 8ab78592b3fe79c8fa359cc877a52192784d2152 | refs/heads/master | 2023-05-13T18:11:04.802393 | 2021-06-03T16:27:01 | 2021-06-03T16:27:01 | 371,312,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "rospy;rospack;baxter_core_msgs;baxter_gazebo;baxter_interface;baxter_tools;baxter_tools;gazebo_ros;gazebo_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "baxter_sim_examples"
PROJECT_SPACE_DIR = "/home/yedi/ros_ws/devel"
PROJECT_VERSION = "1.2.12"
| [
"[email protected]"
]
| |
33e9f7059135f1c5a0be98fa9d79e928bea92868 | f8df1bff1dccbc1b4cf67cb7765ce75b17777aa3 | /app/glapp/shane/openglpanel.py | 0d6b28812f848688b60359540cd5ecc798c316ea | []
| no_license | juancq/py-interactive-genetic-algorithm | d74048338da283acd9545aab00f435b5c500d669 | d1b388e001232040e966fd3077722ed2560d1d9e | refs/heads/master | 2021-01-10T20:57:58.446987 | 2017-05-26T07:42:10 | 2017-05-26T07:42:10 | 4,923 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | import wx
from gui import feedbackpanel
class OpenGLPanel(feedbackpanel.FeedbackPanel):
def __init__(self, parent, id, data = None, tick = 100, size = (250, 250)):
feedbackpanel.FeedbackPanel.__init__(self, parent, id, size = size)
import openglcanvas
canvas = openglcanvas.IGAGLCanvas(self, data, tick = tick, size = size)
self.sizer.Add(canvas, 1, wx.EXPAND)
self.Layout()
#------------------------------------------#
| [
"juan@dragonite.(none)"
]
| juan@dragonite.(none) |
aaa10d917c12333b2b0b5f49def8cf9e4fdbdc10 | 81fe7f2faea91785ee13cb0297ef9228d832be93 | /AdventOfCode/21/day15.py | ea974830ec31ba50613e0a88d6ea0b7888d7b660 | []
| no_license | blegloannec/CodeProblems | 92349c36e1a35cfc1c48206943d9c2686ea526f8 | 77fd0fa1f1a519d4d55265b9a7abf12f1bd7d19e | refs/heads/master | 2022-05-16T20:20:40.578760 | 2021-12-30T11:10:25 | 2022-04-22T08:11:07 | 54,330,243 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | #!/usr/bin/env python3
import sys
from heapq import *
INF = 1<<30
def dijkstra(Map, i0,j0, i1,j1):
S = len(Map)
Dist = [[INF]*S for _ in range(S)]
Dist[i0][j0] = 0
Q = [(0,i0,j0)]
while Q:
d,i,j = heappop(Q)
if i==i1 and j==j1:
break
if d>Dist[i][j]:
continue
for vi,vj in ((i-1,j),(i+1,j),(i,j-1),(i,j+1)):
if 0<=vi<S and 0<=vj<S and Dist[i][j]+Map[vi][vj]<Dist[vi][vj]:
Dist[vi][vj] = Dist[i][j]+Map[vi][vj]
heappush(Q, (Dist[vi][vj],vi,vj))
return Dist[i1][j1]
def main():
# Part 1
Map = [list(map(int, L.strip())) for L in sys.stdin.readlines()]
S = len(Map)
print(dijkstra(Map, 0,0, S-1,S-1))
# Part 2
S5 = 5*S
Map5 = [[(Map[i%S][j%S]+i//S+j//S-1)%9+1 for j in range(S5)] for i in range(S5)]
print(dijkstra(Map5, 0,0, S5-1,S5-1))
main()
| [
"[email protected]"
]
| |
7dc59258a5b3fc8644e0f31266ec92fa17934dde | 7f5a9a470f9a89108fca0280018b0563e9a0207a | /wykres_masy_calkowitej_od_czasu_box.py | 74ce9195d6999085a995c02868eb15e0ec158cff | []
| no_license | b-doroszuk/wykresy_kruszarki | dffa18b1b4856c7a29dfd4039960676cd5c40be0 | e68d22f034bd1c866393c0b0edacdebace393dd3 | refs/heads/main | 2023-06-02T04:39:07.238948 | 2021-06-19T21:00:37 | 2021-06-19T21:00:37 | 378,262,957 | 0 | 0 | null | 2021-06-18T20:55:05 | 2021-06-18T20:55:04 | null | UTF-8 | Python | false | false | 6,882 | py | from edempy import Deck
import numpy as np
from edempy import BoxBin, CylinderBin
import matplotlib.pyplot as plt
import matplotlib; matplotlib.use("TkAgg")
from time import strftime
def get_mass_time_box(time_step: int, deck, L_boxbin, R_boxbin):
# zamienia krok czasowy na jednostke czasu
czas = deck.timestepKeys[time_step]
# zmienne do przechowywania masy
mass_lupek = 0
mass_piaskowiec = 0
mass_dolomit = 0
mass_dummy_lupek = 0
mass_dummy_piaskowiec = 0
mass_dummy_dolomit = 0
"""LUPEK"""
binned_ids_L0_lupek = L_boxbin.getBinnedObjects(deck.timestep[time_step].particle[0].getIds(),
deck.timestep[time_step].particle[0].getPositions())
binned_ids_R0_lupek = R_boxbin.getBinnedObjects(deck.timestep[time_step].particle[0].getIds(),
deck.timestep[time_step].particle[0].getPositions())
# dummy lupek
binned_ids_L0_dummy_lupek = L_boxbin.getBinnedObjects(
deck.timestep[time_step].particle[3].getIds(),
deck.timestep[time_step].particle[3].getPositions())
binned_ids_R0_dummy_lupek = R_boxbin.getBinnedObjects(
deck.timestep[time_step].particle[3].getIds(),
deck.timestep[time_step].particle[3].getPositions())
# lupek loop
for i in binned_ids_L0_lupek:
mass_lupek += deck.timestep[time_step].particle[0].getMass(id=i)
for i in binned_ids_R0_lupek:
mass_lupek += deck.timestep[time_step].particle[0].getMass(id=i)
# dummy lupek loop
for i in binned_ids_L0_dummy_lupek:
mass_dummy_lupek += deck.timestep[time_step].particle[3].getMass(id=i)
for i in binned_ids_R0_dummy_lupek:
mass_dummy_lupek += deck.timestep[time_step].particle[3].getMass(id=i)
"""PIASEK"""
binned_ids_L1_piaskowiec = L_boxbin.getBinnedObjects(deck.timestep[time_step].particle[1].getIds(),
deck.timestep[time_step].particle[1].getPositions())
binned_ids_R1_piaskowiec = R_boxbin.getBinnedObjects(deck.timestep[time_step].particle[1].getIds(),
deck.timestep[time_step].particle[1].getPositions())
binned_ids_L0_dummy_piaskowiec = L_boxbin.getBinnedObjects(
deck.timestep[time_step].particle[4].getIds(),
deck.timestep[time_step].particle[4].getPositions())
binned_ids_R0_dummy_piaskowiec = R_boxbin.getBinnedObjects(
deck.timestep[time_step].particle[4].getIds(),
deck.timestep[time_step].particle[4].getPositions())
# piaskowiec loop
for i in binned_ids_L1_piaskowiec:
mass_piaskowiec += deck.timestep[time_step].particle[1].getMass(id=i)
for i in binned_ids_R1_piaskowiec:
mass_piaskowiec += deck.timestep[time_step].particle[1].getMass(id=i)
# dummy piaskowiec loop
for i in binned_ids_L0_dummy_piaskowiec:
mass_dummy_piaskowiec += deck.timestep[time_step].particle[4].getMass(id=i)
for i in binned_ids_R0_dummy_piaskowiec:
mass_dummy_piaskowiec += deck.timestep[time_step].particle[4].getMass(id=i)
"""DOLOMIT"""
binned_ids_L2_dolomit = L_boxbin.getBinnedObjects(deck.timestep[time_step].particle[2].getIds(),
deck.timestep[time_step].particle[2].getPositions())
binned_ids_R2_dolomit = R_boxbin.getBinnedObjects(deck.timestep[time_step].particle[2].getIds(),
deck.timestep[time_step].particle[2].getPositions())
binned_ids_L0_dummy_dolomit = L_boxbin.getBinnedObjects(
deck.timestep[time_step].particle[5].getIds(),
deck.timestep[time_step].particle[5].getPositions())
binned_ids_R0_dummy_dolomit = R_boxbin.getBinnedObjects(
deck.timestep[time_step].particle[5].getIds(),
deck.timestep[time_step].particle[5].getPositions())
# dolomit loop
for i in binned_ids_L2_dolomit:
mass_dolomit += deck.timestep[time_step].particle[2].getMass(id=i)
for i in binned_ids_R2_dolomit:
mass_dolomit += deck.timestep[time_step].particle[2].getMass(id=i)
# dummy dolomit loop
for i in binned_ids_L0_dummy_dolomit:
mass_dummy_dolomit += deck.timestep[time_step].particle[5].getMass(id=i)
for i in binned_ids_R0_dummy_dolomit:
mass_dummy_dolomit += deck.timestep[time_step].particle[5].getMass(id=i)
#print()
#print(mass_lupek, mass_piaskowiec, mass_dolomit)
#print(mass_dummy_lupek, mass_dummy_piaskowiec, mass_dummy_dolomit)
#print()
rock_mass = mass_lupek + mass_piaskowiec + mass_dolomit
dummy_mass = mass_dummy_lupek + mass_dummy_piaskowiec + mass_dummy_dolomit
total_mass = rock_mass + dummy_mass
#print(rock_mass, dummy_mass)
# zwraca mase calkowita i czas w sekundach !!
return total_mass, czas
def main():
"""
parametry wejsciowe:
interval_table = [poczatkowy krok, koncowy krok czasowy, interwal],
filepath = sciezka
L_boxbin = wymiary boxa np. BoxBin([0, -0.8, -0.75], 3, 0.25, 1.5)
R_boxbin = -||-
is_export = czy exportowac do txt (True / False)
is_draw = czy rysowac wykres (True / False)
is_save = czy zapisac wykres (True / False)
PONIZEJ 68 KROKU CZASOWEGO SKRYPT WYWALA BLAD !!!
"""
interval_table = [68, 260, 10]
filepath = "C:\\Users\\Jakub\\PycharmProjects\\test2\\testownik11_prof_Robert_Krol\\projekt_2\\POLKOWICE_etap_2\\simulation_0\\simulation_0.dem"
L_boxbin = BoxBin([0, -0.8, -0.75], 3, 0.25, 1.5)
R_boxbin = BoxBin([0, 0.8, -0.75], 3, 0.25, 1.5)
is_draw = True
is_save = False
deck = Deck(filepath)
mass_list = []
time = []
for i in range(interval_table[0], interval_table[1], interval_table[2]):
print("krok czasowy: ", i)
total_mass, czas = get_mass_time_box(time_step=i, deck=deck, L_boxbin=L_boxbin, R_boxbin=R_boxbin)
mass_list.append(round(total_mass, 2))
time.append(round(float(czas), 2))
fig = plt.figure(figsize=(7, 6))
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
# rysuje wykres
axes.plot(time, mass_list)
axes.set_xlabel("czas [s]")
axes.set_ylabel("masa [kg]")
axes.set_title("Left BinBox")
if is_save:
plt.savefig(f"Left_BinBox_{strftime('%m_%d_%Y-%H_%M_%S')}.png")
if is_draw:
plt.show()
if __name__ == '__main__':
import sys
sys.exit(main())
| [
"[email protected]"
]
| |
cdd00f1aee1b6099e9869021c75ba1cf9dc318d7 | e6913abba3f5cfd396e62c7e514674dbcb3631bb | /vidfeat/_vertical_boxed.py | 135b8fb34c9ab722a5e7dcc6f9a0b18a65ef495b | []
| no_license | bwhite/vidfeat | f98b8511ad13347037c60d7026725a6149851a81 | c9e7c6a02b41951fc93f0cefe0c78b24f5731f59 | refs/heads/master | 2016-09-06T03:00:58.791493 | 2012-06-19T21:54:01 | 2012-06-19T21:54:01 | 1,878,956 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | import vidfeat
import imfeat
import sklearn.svm
class VerticalBoxedFrameFeature(vidfeat.ClassifierFrameFeature):
feature = imfeat.BlackBars()
def __init__(self, *args, **kw):
classifier = sklearn.svm.LinearSVC(class_weight='auto')
self.svm_parameters = [{'C': [10 ** x for x in range(0, 12, 3)]}]
super(VerticalBoxedFrameFeature, self).__init__(classifier=classifier,
*args, **kw)
def _feature(self, image):
return self.feature(image)
if __name__ == '__main__':
vidfeat._frame_feature_main('vertical_boxed', vidfeat.VerticalBoxedFrameFeature, remove_bars=True)
| [
"[email protected]"
]
| |
8ced806cfdc062b9eed27d8c280a64109ff72856 | b87f66b13293782321e20c39aebc05defd8d4b48 | /mpi/mpi_merge.py | edf623650edaf9d1fc58f8f3c85293cfea3b2539 | []
| no_license | m-elhussieny/code | 5eae020932d935e4d724c2f3d16126a0d42ebf04 | 5466f5858dbd2f1f082fa0d7417b57c8fb068fad | refs/heads/master | 2021-06-13T18:47:08.700053 | 2016-11-01T05:51:06 | 2016-11-01T05:51:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,959 | py | #!/usr/bin/env python
"""
Merge several HDF5 or ASCII files.
Merge all files that have a common (given) pattern in the name.
The patterns may be numbers and/or characters. Example: 'YYYYMMDD',
where YYYY is year, MM is month and DD is day.
"""
# Fernando <[email protected]>
# November 2, 2012
import os
import sys
import re
import numpy as np
import tables as tb
import argparse as ap
from mpi4py import MPI
# parse command line arguments
parser = ap.ArgumentParser()
parser.add_argument('files', nargs='+', help='HDF5 2D file[s] to merge')
parser.add_argument('-p', dest='pattern', default="_\d\d\d\d\d\d\d\d",
help="pattern to match in the file names, default '_\d\d\d\d\d\d\d\d'")
parser.add_argument('-o', dest='prefix', default='all_',
help='prefix of output file name, default all_')
parser.add_argument('-s', dest='suffix', default='',
help='suffix of output file name, default none')
parser.add_argument('-n', dest='count', action='store_const', const=True, \
default=False, help='count number of tasks and exit, default no')
args = parser.parse_args()
def close_files():
for fid in tb.file._open_files.values():
fid.close()
def get_files_to_merge(files, pattern):
tomerge = {}
patterns = np.unique(re.findall(pattern, ' '.join(files)))
for s in patterns:
tomerge[s] = [f for f in files if s in f]
return tomerge
def get_fname_out(stem, fnamein, pref='', suf=''):
path = os.path.split(fnamein)[0]
return os.path.join(path, ''.join([pref, stem, suf, '.h5']))
def get_shape_out(files):
nrows = 0
for fname in files:
f = tb.openFile(fname, 'r')
data = f.getNode('/data')
nrow, ncols = data.shape
nrows += nrow
f.close()
return (nrows, ncols)
def merge_files(fname, shape, files):
print 'merging:\n', files
print 'into:\n', fname, '...'
fout = tb.openFile(fname, 'w')
nrows, ncols = shape
atom = tb.Atom.from_type('float64')
filters = tb.Filters(complib='zlib', complevel=9)
dout = fout.createEArray('/', 'data', atom=atom,
shape=(0, ncols), filters=filters)
for fnamein in files:
fin = tb.openFile(fnamein, 'r')
data = fin.getNode('/data')
dout.append(data[:])
close_files()
print 'done.'
def merge_all(tomerge, pref='', suf=''):
for patt, fnames in tomerge.items():
fnameout = get_fname_out(patt, fnames[0], pref, suf)
shape = get_shape_out(fnames)
merge_files(fnameout, shape, fnames)
# MPI functions
def simple_partitioning(length, num_procs):
sublengths = [length/num_procs]*num_procs
for i in range(length % num_procs): # treatment of remainder
sublengths[i] += 1
return sublengths
def get_subproblem_input_args(input_args, my_rank, num_procs):
sub_ns = simple_partitioning(len(input_args), num_procs)
my_offset = sum(sub_ns[:my_rank])
my_input_args = input_args[my_offset:my_offset+sub_ns[my_rank]]
return my_input_args
def program_to_run(string):
if '.py' in string:
run = 'python '
else:
run = '' # './'
return run
#-------------
# If needed, uses `glob` to avoid Unix limitation on number of cmd args.
# To use it, instead of _file names_ pass a _str_ with "dir + file pattern".
if len(args.files) > 1:
files = args.files
else:
from glob import glob
files = glob(args.files[0])
pattern = str(args.pattern)
pref = args.prefix
suf = args.suffix
count = args.count
#path, _ = os.path.split(files[0]) # path of first file
print 'pattern to match:', pattern
print 'total files:', len(files)
comm = MPI.COMM_WORLD
my_rank = comm.Get_rank()
num_procs = comm.Get_size()
tomerge = get_files_to_merge(files, pattern)
if count: print 'number of tasks:', len(tomerge.items()); sys.exit()
my_tomerge = get_subproblem_input_args(tomerge.items(), my_rank, num_procs)
merge_all(dict(my_tomerge), pref=pref, suf=suf)
close_files()
| [
"[email protected]"
]
| |
1d983087ace0527d39672656d8b1e6c4526ebcfd | 7838473d3688eb89b598198440c6769ef56701a6 | /pyvsphere/vmware_vswitch_facts.py | ee53f751dd107d27cc9efcaf6e8591706b9b6a56 | []
| no_license | Maliaotw/pyvsphere | 58029c4b1fad0667d87f9a36434f67209f9180ee | 7069cf566dae8d35c2770050ccb71342ed5d3d8e | refs/heads/main | 2023-03-03T14:05:19.526387 | 2021-02-09T16:34:07 | 2021-02-09T16:34:07 | 317,414,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,402 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_vswitch_facts
short_description: Gathers facts about an ESXi host's vswitch configurations
description:
- This module can be used to gather facts about an ESXi host's vswitch configurations when ESXi hostname or Cluster name is given.
- The vSphere Client shows the value for the number of ports as elastic from vSphere 5.5 and above.
- Other tools like esxcli might show the number of ports as 1536 or 5632.
- See U(https://kb.vmware.com/s/article/2064511) for more details.
version_added: '2.6'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Facts about vswitch belonging to every ESXi host systems under this cluster will be returned.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname to gather facts from.
- If C(cluster_name) is not given, this parameter is required.
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather vswitch facts about all ESXi Host in given Cluster
vmware_vswitch_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
delegate_to: localhost
register: all_hosts_vswitch_facts
- name: Gather firewall facts about ESXi Host
vmware_vswitch_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
delegate_to: localhost
register: all_vswitch_facts
'''
RETURN = r'''
hosts_vswitch_facts:
description: metadata about host's vswitch configuration
returned: on success
type: dict
sample: {
"10.76.33.218": {
"vSwitch0": {
"mtu": 1500,
"num_ports": 128,
"pnics": [
"vmnic0"
]
},
"vSwitch_0011": {
"mtu": 1500,
"num_ports": 128,
"pnics": [
"vmnic2",
"vmnic1"
]
},
},
}
'''
from .mymodule import AnsibleModule
from .vcenter import VcenterConfig
from ansible.modules.cloud.vmware.vmware_vswitch_info import VswitchInfoManager
def vmware_vswitch_facts(VcenterConfig: VcenterConfig,esxi_hostname):
"""Main"""
argument_spec = dict(
cluster_name=False,
esxi_hostname=esxi_hostname,
)
argument_spec.update(**VcenterConfig.as_dict())
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
vmware_vswitch_mgr = VswitchInfoManager(module)
module.exit_json(changed=False, hosts_vswitch_facts=vmware_vswitch_mgr.gather_vswitch_info())
module.get_info(vmware_vswitch_mgr.gather_vswitch_info())
| [
"[email protected]"
]
| |
15717a292bdc89415c16f5ff81de7542e336cd37 | 3b1a13edca51449f015086acad1e5a51ae89cba5 | /lang/py/pylib/10/threading/threading_rlock.py | c9a98dd6693b1555f9e9282f076052e573461c52 | [
"MIT"
]
| permissive | ch1huizong/study | 9000042d3ad13b06426b03423ee335aee15fd061 | d3b7c43fc03484839f8bbc7d0d056df1b1eba9cd | refs/heads/master | 2023-09-04T12:45:48.125715 | 2023-08-17T02:49:40 | 2023-08-17T02:49:40 | 162,539,130 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | #!/usr/bin/env python
# encoding: UTF-8
import threading
lock=threading.RLock()
print'First try:',lock.acquire()
print'Second try:',lock.acquire(0)
| [
"[email protected]"
]
| |
8d4a44a59ad433fc8694c581fb47349a62aad711 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/6/o-s.py | d55c95b9d3da7707d87e35a413deb81b87e0e6ff | []
| no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'o-S':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
]
| |
b4bb3b9601d9f2d0665fe916401fbf091bea4e6e | e41fc34b9d3d5aa94e5c6b843ee35fc1280ed6b5 | /app/settings/config_control.py | f59173df4c2a68fbdf9dd2416dc8f00ea26d6ec6 | []
| no_license | cleverbotdev/my_university_group_site | b69b03185ddbb6fca763f1394851e031cb3e304e | b4d4079dc2f942634f63b96e799050f6191d5aad | refs/heads/master | 2023-07-20T06:48:33.340436 | 2021-03-04T10:14:21 | 2021-03-04T10:14:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,536 | py | # -*- coding: utf-8 -*-
if __name__ == '__main__':
from os import chdir
from app.settings.config import HOME_DIR, SETTINGS_FILE, EXAMPLE_SETTINGS_FILE
chdir(HOME_DIR)
def create_new_settings(config_path, example_settings_filename):
from configparser import ConfigParser
example_cfg = ConfigParser(allow_no_value=True, converters={'list': lambda x: [i.strip() for i in x.split(',')]})
example_cfg.read(example_settings_filename)
user_input_tag = example_cfg.get("settings_ini_file", "user_input_tag")
print("Config file not found!")
print(f"I am trying to create {config_path}...")
print(f"I am coping {example_settings_filename} and rename this to {config_path}")
with open(f"{example_settings_filename}", "r", encoding="utf-8") as file, open(config_path, 'w',
encoding='utf-8') as wtiten_file:
print(
'\n'.join([(''.join([i + input(f"\nВведите пожалуйста {i.replace('=', '').strip()} для своей программы:\n")
for i in filter(bool, string.split(user_input_tag))])
if user_input_tag in string and not string.startswith("user_input_tag") else string)
for string in iter(file.read().split('\n'))]), file=wtiten_file)
def create_cfg(config_path='',
example_settings_filename=''):
import sys
from configparser import ConfigParser
from os.path import exists
if not exists(config_path) and not exists(example_settings_filename):
print(f"Config file ({config_path}) not found! Exiting!")
sys.exit(0)
if not exists(config_path):
create_new_settings(config_path, example_settings_filename)
if exists(config_path):
cfg = ConfigParser(allow_no_value=True, converters={'list': lambda x: [i.strip() for i in x.split(',')]})
cfg.read(config_path)
else:
print("Config not found! Exiting!")
print(f"I can't create {SETTINGS_FILE}...")
print(f"You can try cloning {EXAMPLE_SETTINGS_FILE} to {SETTINGS_FILE} and edit params into this")
sys.exit(0)
return cfg
def save_change_in_cinfig_file(cfg=None):
if not cfg:
cfg = create_cfg(SETTINGS_FILE, EXAMPLE_SETTINGS_FILE)
with open(SETTINGS_FILE, "w") as config_file:
cfg.write(config_file)
return cfg
if __name__ == '__main__':
cfg = create_cfg(SETTINGS_FILE, EXAMPLE_SETTINGS_FILE)
| [
"[email protected]"
]
| |
e90effd3bbfd10d2539c58e07eaaef4ea30eb3a1 | 0b63f38c7fb468e478e5be82c685de1b7ddb87e5 | /meiduo/meiduo_mall/scripts/generate_detail_html.py | c1e6f9a5b13f24b3617489fe30fddf118e1edd65 | [
"MIT"
]
| permissive | Highsir/Simplestore | fcf5ef81a754604c0953a3c1433a7bc09290c121 | 5fc4d9930b0cd1e115f8c6ebf51cd9e28922d263 | refs/heads/master | 2020-09-01T07:55:45.362457 | 2019-11-01T04:55:48 | 2019-11-01T04:55:48 | 218,913,913 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | # 1. 添加导包路径 (把 scripts 的上一级目录添加到导包路径sys.path)
import sys
sys.path.insert(0, '../')
# 2. 设置配置文件,初始化django环境
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "meiduo_mall.settings.dev")
django.setup()
# 3. 导包
from celery_tasks.html.tasks import generate_static_sku_detail_html
from goods.models import SKU
# 4. 功能逻辑
if __name__ == '__main__':
skus = SKU.objects.all()
for sku in skus:
print(sku.id)
generate_static_sku_detail_html(sku.id)
| [
"[email protected]"
]
| |
1a73b6acb4d8371dc29e5f10e62860f6bc22386f | f1fcd165cd8444310ce5d201e481e3982dc28110 | /easy/1901/190108/jang.py | 25720a11955857329f172ec792f4b8f996ea5564 | []
| no_license | JoosJuliet/algoStudy | 310a71a0fcc8f3c23281544cf3458ed999040176 | 3fc1e850f9d8b9f290f41fddd59ff403fbfffa05 | refs/heads/master | 2020-04-20T19:26:25.485875 | 2019-03-27T22:37:27 | 2019-03-27T22:37:27 | 169,049,593 | 1 | 0 | null | 2019-02-04T08:43:07 | 2019-02-04T08:43:07 | null | UTF-8 | Python | false | false | 130 | py | from collections import Counter
input()
ans = 0
for c in Counter(map(int, input().split())).values():
ans += c//2
print(ans)
| [
"[email protected]"
]
| |
79b02fcb1541ce6c7a051b6738976296de7faaa3 | 2e6f4690a2a9448a1eb027c14a637ab449b94c4f | /qa/rpc-tests/decodescript.py | e5231a5f2da09223aee163856e576be6eea63976 | [
"MIT"
]
| permissive | mirzaei-ce/core-mashhadbit | 11d60f09f80c8056f5e063eb65783f8699f5ede8 | 1d9d45336cbbda7ffd700d3f1c3dd9e8b4ce2745 | refs/heads/master | 2021-07-18T11:43:26.440889 | 2017-10-26T14:31:07 | 2017-10-26T14:31:07 | 108,422,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,653 | py | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import MashhadbitTestFramework
from test_framework.util import *
from test_framework.mininode import *
from binascii import hexlify, unhexlify
from cStringIO import StringIO
class DecodeScriptTest(MashhadbitTestFramework):
"""Tests decoding scripts via RPC command "decodescript"."""
def setup_chain(self):
print('Initializing test directory ' + self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self, split=False):
self.nodes = start_nodes(1, self.options.tmpdir)
self.is_network_split = False
def decodescript_script_sig(self):
signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
push_signature = '48' + signature
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
# below are test cases for all of the standard transaction types
# 1) P2PK scriptSig
# the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
rpc_result = self.nodes[0].decodescript(push_signature)
assert_equal(signature, rpc_result['asm'])
# 2) P2PKH scriptSig
rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
assert_equal(signature + ' ' + public_key, rpc_result['asm'])
# 3) multisig scriptSig
# this also tests the leading portion of a P2SH multisig scriptSig
# OP_0 <A sig> <B sig>
rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
# 4) P2SH scriptSig
# an empty P2SH redeemScript is valid and makes for a very simple test case.
# thus, such a spending scriptSig would just need to pass the outer redeemScript
# hash test and leave true on the top of the stack.
rpc_result = self.nodes[0].decodescript('5100')
assert_equal('1 0', rpc_result['asm'])
# 5) null data scriptSig - no such thing because null data scripts can not be spent.
# thus, no test case for that standard transaction type is here.
def decodescript_script_pub_key(self):
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
public_key_hash = '11695b6cd891484c2d49ec5aa738ec2b2f897777'
push_public_key_hash = '14' + public_key_hash
# below are test cases for all of the standard transaction types
# 1) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_public_key + 'ac')
assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm'])
# 2) P2PKH scriptPubKey
# OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
# 3) multisig scriptPubKey
# <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_public_key + push_public_key + '53ae')
assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
# 4) P2SH scriptPubKey
# OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
# push_public_key_hash here should actually be the hash of a redeem script.
# but this works the same for purposes of this test.
rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
# 5) null data scriptPubKey
# use a signature look-alike here to make sure that we do not decode random data as a signature.
# this matters if/when signature sighash decoding comes along.
# would want to make sure that no such decoding takes place in this case.
signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
# OP_RETURN <data>
rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm'])
# 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here.
# OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY.
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
#
# OP_IF
# <receiver-pubkey> OP_CHECKSIGVERIFY
# OP_ELSE
# <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP
# OP_ENDIF
# <sender-pubkey> OP_CHECKSIG
#
# lock until block 500,000
rpc_result = self.nodes[0].decodescript('63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac')
assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
def decoderawtransaction_asm_sighashtype(self):
"""Tests decoding scripts via RPC command "decoderawtransaction".
This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
"""
# this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
# this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
# it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
# verify that we have not altered scriptPubKey decoding.
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
txSave = CTransaction()
txSave.deserialize(StringIO(unhexlify(tx)))
# make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
# verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
# some more full transaction tests of varying specific scriptSigs. used instead of
# tests in decodescript_script_sig because the decodescript RPC is specifically
# for working on scriptPubKeys (argh!).
push_signature = hexlify(txSave.vin[0].scriptSig)[2:(0x48*2+4)]
signature = push_signature[2:]
der_signature = signature[:-2]
signature_sighash_decoded = der_signature + '[ALL]'
signature_2 = der_signature + '82'
push_signature_2 = '48' + signature_2
signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
# 1) P2PK scriptSig
txSave.vin[0].scriptSig = unhexlify(push_signature)
rpc_result = self.nodes[0].decoderawtransaction(hexlify(txSave.serialize()))
assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# make sure that the sighash decodes come out correctly for a more complex / lesser used case.
txSave.vin[0].scriptSig = unhexlify(push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(hexlify(txSave.serialize()))
assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 2) multisig scriptSig
txSave.vin[0].scriptSig = unhexlify('00' + push_signature + push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(hexlify(txSave.serialize()))
assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 3) test a scriptSig that contains more than push operations.
# in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
txSave.vin[0].scriptSig = unhexlify('6a143011020701010101010101020601010101010101')
rpc_result = self.nodes[0].decoderawtransaction(hexlify(txSave.serialize()))
print(hexlify('636174'))
assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
def run_test(self):
self.decodescript_script_sig()
self.decodescript_script_pub_key()
self.decoderawtransaction_asm_sighashtype()
if __name__ == '__main__':
DecodeScriptTest().main()
| [
"[email protected]"
]
| |
3567b6b9dbb5ba835c394efc4a0acaf1d521e739 | f60434c0a27f0f5ada2aa5607c94947890de5692 | /codezilla/sherlock.py | 4b6219a06e227af9a424e2f6c7b32393e09c7c36 | [
"MIT"
]
| permissive | AnuragAnalog/codechef | 16aa7711e6471f6249874066105f50aee90436c3 | 348dd1d8daac356f0390ce124a263f6157495b1c | refs/heads/master | 2022-06-27T15:11:36.811069 | 2020-05-11T03:20:19 | 2020-05-11T03:20:19 | 262,927,296 | 1 | 0 | null | 2020-05-11T03:18:50 | 2020-05-11T03:16:15 | null | UTF-8 | Python | false | false | 372 | py | n = int(input())
inst = list(input())
string = ""
for i in inst:
if i == "1":
string = string + "a"
elif i == "2":
string = string + "bb"
elif i == "3":
string = string + "ab"
elif i == "4":
string = string.replace("a", "$")
string = string.replace("b", "a")
string = string.replace("$", "b")
print(string)
| [
"[email protected]"
]
| |
1b17c93b5d60e121444377dcc3f277dd0f2fce03 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /run/runcase_ipo.py | 22f8fbc94a9b59c8509cbbaec8ddff44282914d9 | []
| no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,887 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import os
import time
import unittest
sys.path.append('/home/yhl2/workspace/xtp_test/Autocase_Result')
sys.path.append('/home/yhl2/workspace/xtp_test/utils')
import CaseServiceIpo
# 深圳现价case批量执行
def runCases( path, filename,sheet_name):
'''
:param cases: py名称集合
:param filename: 存放case参数的文件名
:return: None
'''
suite_cases = []
excel_file = os.path.join(path, filename)
case_service = CaseServiceIpo.CaseService(excel_file,sheet_name)
d = [(k, case_service.testcase_seq_dict[k]) for k in sorted(case_service.testcase_seq_dict.keys())]
# 按顺序加载case
for (k,case) in d:
m = __import__(case['pyname'])
cls = getattr(m, case['pyname'])
print cls
suite_case = unittest.TestLoader().loadTestsFromTestCase(cls)
# suite_cases = []
suite_cases.append(suite_case)
suite = unittest.TestSuite(suite_cases)
unittest.TextTestRunner(verbosity=2).run(suite)
# time.sleep(10)
def getCases(casepath):
file_list = os.listdir(casepath)
cases = []
for file in file_list:
if file[-2:] == 'py' and file != '__init__.py':
file_index = file.find('.py')
case = file[0:file_index]
cases.append(case)
return cases
def run_case(casepath_yw,filename,sheetname):
casepath = '/home/yhl2/workspace/xtp_test/Autocase_Result/'+casepath_yw
cases = getCases(casepath)
path = '/home/yhl2/workspace/xtp_test/utils'
runCases(cases, path, filename, sheetname)
if __name__ == '__main__':
# py存放路径
casepath = '/home/yhl2/workspace/xtp_test/Autocase_Result'
cases = getCases(casepath)
path = '/home/yhl2/workspace/xtp_test/utils'
runCases(cases, path, u'普通业务自动化用例.xlsx',u'新股申购_深圳')
| [
"[email protected]"
]
| |
bf43527a3d5127746b93a44909d325e5c4ebbe32 | 1bdb0da31d14102ca03ee2df44f0ec522b0701a4 | /Lombardia/AlfaVarese/3-FoundReportList.py | ef7ade81f4816ecdf095f5b44ed170800b914cf7 | []
| no_license | figuriamoci/Acqua | dc073d90c3c5e5899b22005685847916de1dfd95 | aef22fcd0c80c92441e0e3df2468d7a2f23a848a | refs/heads/master | 2020-12-15T04:00:26.855139 | 2020-06-08T21:17:55 | 2020-06-08T21:17:55 | 234,986,179 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,048 | py | ##
from selenium import webdriver
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import logging,pandas as pd
import acqua.aqueduct as aq
gestore = "AlfaVarese"
aq.setEnv('Lombardia//'+gestore)
url = "https://www.alfasii.it/la-societa/servizi/acquedotto.html"
#
options = webdriver.ChromeOptions()
options.add_argument( '--ignore-certificate-errors' )
options.add_argument( '--incognito' )
options.add_argument( '--headless' )
locationList = pd.read_csv("Metadata/LocationList.csv")
#locationList = locationList[0:10]
foundReportList = pd.DataFrame()
##
for i,loc in locationList.iterrows():
driver = webdriver.Chrome( "chromedriver", options=options )
driver.implicitly_wait( 10 ) # seconds
driver.get( url )
time.sleep( 5 )
try:
alias_city = loc['alias_city']
alias_address = loc['alias_address']
divWebElement = WebD riverWait( driver, 10 ).until( EC.visibility_of( driver.find_element_by_id( "sl_sidebar" ) ) )
listWebElement = divWebElement.find_elements_by_tag_name("div")
listWebElement[0].text.split("\n")
cityWebElement = [c for c in listWebElement if c.text.split("\n")[0] == alias_city and c.text.split("\n")[1] == alias_address][0]
driver.execute_script( "arguments[0].click();", cityWebElement )
time.sleep(2)
logging.info("Extract report for %s/%s (%s/%s)...",alias_city,alias_address,i+1,len(locationList))
reportLinkWebElement = WebDriverWait( driver, 10 ).until( EC.visibility_of( driver.find_element_by_link_text("Scarica la tabella dei valori") ) )
urlReport = reportLinkWebElement.get_attribute("href")
row = {"alias_city":alias_city,"alias_address":alias_address,"urlReport":urlReport}
foundReportList = foundReportList.append(row,ignore_index=True)
except:
logging.critical("Skip %s/%s",alias_city,alias_address)
driver.close()
##
foundReportList.to_csv('Metadata/ReportFoundList.csv',index=False)
| [
"[email protected]"
]
| |
74832a0ea32690e56228433ad4eb435b3f2d0185 | 8246e9fbdecdb37651e0d09497fd9428e434f33c | /FilmLocationFromGuidedWithSerial/admin.py | d179c1eeaa68276fef03e2c251f12fffe2bac988 | []
| no_license | rajeev1234/Landing-Page | 479995026ab01fc504a1e9502e7763dc04266009 | 4bfd22a6a1776907ba78b3dc9037064c820b049e | refs/heads/master | 2020-03-08T13:37:20.253252 | 2018-04-05T06:33:26 | 2018-04-05T06:33:26 | 128,162,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | from django.contrib import admin
# Register your models in admin panels here.
from . import models
# declaring comments stack
class CommentInline(admin.TabularInline):
model = models.Comment
# attaching commment stack to FilmLocationFromGuidedWithSerial
class FilmLocationFromGuidedWithSerialAdmin(admin.ModelAdmin):
inlines = [CommentInline]
# calling in admin panel
admin.site.register(models.FilmLocationFromGuidedWithSerial, FilmLocationFromGuidedWithSerialAdmin)
admin.site.register(models.Comment)
| [
"[email protected]"
]
| |
deafa9e8cc390faf8bca1d5e09ef640b1b3e5605 | 3a0732e6e64e2ffedd85de2d2844b26a974aae67 | /utils/functions.py | 9c121577923989b8e1c051db0e83a39bae3471e0 | [
"MIT"
]
| permissive | helloharmeet/A-Simple-Note-Taking-Web-App | 8e8163597545a27dbd2f41b938d4a9e2a6db55c8 | 0f952bc0bcd752ba3e5faedb8f86345043fec970 | refs/heads/master | 2021-07-04T14:55:57.323768 | 2017-09-28T00:53:25 | 2017-09-28T00:53:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,626 | py | import os
import hashlib
def get_database_connection():
'''
Creates a connection between selected database
'''
import sqlite3
sqlite_file = 'notes.db'
file_exists = os.path.isfile(sqlite_file)
conn = sqlite3.connect(sqlite_file)
if not file_exists:
create_sqlite_tables(conn)
return conn
def create_sqlite_tables(conn):
'''
Creates a sqlite table as specified in schema_sqlite.sql file
'''
cursor = conn.cursor()
with open('schema_sqlite.sql', 'r') as schema_file:
cursor.executescript(schema_file.read())
conn.commit()
def get_user_count():
'''
Checks whether a user exists with the specified username and password
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute('SELECT COUNT(*) FROM users')
result = cursor.fetchone()
if result:
return result[0]
except:
return False
def check_user_exists(username, password):
'''
Checks whether a user exists with the specified username and password
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute('SELECT * FROM users WHERE username=? AND password=?', (username, password))
result = cursor.fetchone()
if result:
return result[0]
except:
return False
def store_last_login(user_id):
'''
Checks whether a user exists with the specified username and password
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute("UPDATE users SET last_login=(strftime('%Y-%m-%d %H:%M:%S', 'now', 'localtime')) WHERE id=?", (user_id, ))
conn.commit()
cursor.close()
except:
cursor.close()
def check_username(username):
'''
Checks whether a username is already taken or not
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute('SELECT * FROM users WHERE username=?', (username, ))
if cursor.fetchone():
return True
except:
return False
def signup_user(username, password, email):
'''
Function for storing the details of a user into the database
while registering
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute("INSERT INTO users(username, password, email) VALUES (?, ?, ?)", (username, password, email))
conn.commit()
cursor.close()
return
except:
cursor.close()
def get_user_data(user_id):
'''
Function for getting the data of a specific user using his user_id
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute('SELECT * FROM users WHERE id=?', (str(user_id), ))
results = cursor.fetchall()
cursor.close()
if len(results) == 0:
return None
return results
except:
cursor.close()
def get_data_using_user_id(id):
'''
Function for getting the data of all notes using user_id
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute('SELECT * FROM notes WHERE user_id=' + str(id))
results = cursor.fetchall()
cursor.close()
if len(results) == 0:
return None
return results
except:
cursor.close()
def get_data_using_id(id):
'''
Function for retrieving data of a specific note using its id
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute('SELECT * FROM notes WHERE id=' + str(id))
results = cursor.fetchall()
cursor.close()
return results
except:
cursor.close()
def get_number_of_notes(id):
'''
Function for retrieving number of notes stored by a specific user
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute('SELECT COUNT(note) FROM notes WHERE user_id=' + str(id))
results = cursor.fetchone()[0]
cursor.close()
return results
except:
cursor.close()
def get_data():
'''
Function for getting data of all notes
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute('SELECT * FROM notes')
results = cursor.fetchall()
cursor.close()
return results
except:
cursor.close()
def add_note(note_title, note, note_markdown, tags, user_id):
'''
Function for adding note into the database
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute("INSERT INTO notes(note_title, note, note_markdown, tags, user_id) VALUES (?, ?, ?, ?, ?)", (note_title, note, note_markdown, tags, user_id))
conn.commit()
cursor.close()
return
except:
cursor.close()
def edit_note(note_title, note, note_markdown, tags, note_id):
'''
Function for adding note into the database
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
# print("UPDATE notes SET note_title=?, note=?, note_markdown=?, tags=? WHERE id=?", (note_title, note, note_markdown, tags, note_id))
cursor.execute("UPDATE notes SET note_title=?, note=?, note_markdown=?, tags=? WHERE id=?", (note_title, note, note_markdown, tags, note_id))
conn.commit()
cursor.close()
return
except:
cursor.close()
def delete_note_using_id(id):
'''
Function for deleting a specific note using its id
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute("DELETE FROM notes WHERE id=" + str(id))
conn.commit()
cursor.close()
return
except:
cursor.close()
def generate_password_hash(password):
'''
Function for generating a password hash
'''
hashed_value = hashlib.md5(password.encode())
return hashed_value.hexdigest()
def add_tag(tag, user_id):
'''
Function for adding a tag into the database
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute("INSERT INTO tags(tag, user_id) VALUES (?, ?)", (tag, user_id))
conn.commit()
cursor.close()
return
except:
cursor.close()
def get_all_tags(user_id):
'''
Function for getting all tags for a specific user
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute('SELECT id, tag FROM tags WHERE user_id=?', (str(user_id), ))
results = cursor.fetchall()
if len(results) > 0:
results = [(str(results[i][0]), results[i][1]) for i in range(len(results))]
else:
results = None
cursor.close()
return results
except:
cursor.close()
def get_data_using_tag_id(tag_id):
'''
Function for getting all tags for a specific user
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute('SELECT tag FROM tags WHERE id=?', (str(tag_id), ))
results = cursor.fetchone()
cursor.close()
return results
except:
cursor.close()
def get_tag_using_note_id(id):
'''
Get the tags associated with each note
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute('SELECT tags FROM notes WHERE id=?', (str(id), ))
results = cursor.fetchall()
# results = [(str(results[i][0]), results[i][1]) for i in range(len(results))]
results = results[0][0].split(',')
cursor.close()
return results
except:
cursor.close()
def get_tagname_using_tag_id(tag_id):
'''
Get the tag name using tag id
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute('SELECT tag FROM tags WHERE id=?', (str(tag_id), ))
results = cursor.fetchone()
cursor.close()
return ''.join(results)
except:
cursor.close()
def delete_tag_using_id(tag_id):
'''
Function for deleting a specific tag using its id
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute("DELETE FROM tags WHERE id=" + str(tag_id))
conn.commit()
cursor.close()
return
except:
cursor.close()
def get_number_of_tags(id):
'''
Function for retrieving number of tags stored by a specific user
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute('SELECT COUNT(tag) FROM tags WHERE user_id=' + str(id))
results = cursor.fetchone()[0]
cursor.close()
return results
except:
cursor.close()
def get_notes_using_tag_id(tag_id):
'''
Function for retrieving notes stored by a specific tag
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute('SELECT id, note_title FROM notes WHERE tags like ?', ('%' + tag_id + '%',))
results = cursor.fetchall()
cursor.close()
return results
except:
cursor.close()
def edit_email(email, user_id):
'''
Function for adding note into the database
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute("UPDATE users SET email=? WHERE id=?", (email, user_id))
conn.commit()
cursor.close()
return
except:
cursor.close()
def edit_password(password, user_id):
'''
Function for adding note into the database
'''
conn = get_database_connection()
password = generate_password_hash(password)
try:
cursor = conn.cursor()
cursor.execute("UPDATE users SET password=? WHERE id=?", (password, user_id))
conn.commit()
cursor.close()
return
except:
cursor.close()
def get_search_data(pattern, user_id):
'''
Function for searching note based on specified pattern
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute("SELECT * FROM notes WHERE user_id=? AND note_title LIKE ? LIMIT 3", (user_id, '%' + pattern + '%'))
results = cursor.fetchall()
results = [(results[i][0], results[i][3]) for i in range(len(results))]
cursor.close()
return results
except:
cursor.close()
def get_rest_data_using_user_id(id):
'''
Function for getting the data of all notes using user_id using REST
'''
conn = get_database_connection()
try:
cursor = conn.cursor()
cursor.execute('SELECT * FROM notes WHERE user_id=' + str(id))
results = cursor.fetchall()
fieldnames = [f[0] for f in cursor.description]
cursor.close()
if len(results) == 0:
return None
else:
outer = {}
for i in range(len(results)):
data = {}
for j in range(len(results[0])):
data[fieldnames[j]] = results[i][j]
outer[int(i)] = data
return outer
except:
cursor.close()
# if __name__ == '__main__':
# print(get_rest_data_using_user_id(1))
# print(get_data_using_id(1))
| [
"[email protected]"
]
| |
be7339bac0388480a26e2b7a029ad4492e92b529 | 90b2ad813c96d630cd254475b0ad3a7a735011e5 | /codigo/MeteoSalon/MQTT_test.py | 8c23d94dc8cb58f56bd2ec98d7aef2b8a31a2d6f | []
| no_license | vtt-info/micropythonTutorial | dbcd4c13af442446c9816e4fdcd82b8eaaf6a27a | 67a58fb56bef4ef55c89cf76fc3ccde842b62ce6 | refs/heads/master | 2020-11-27T21:44:22.044221 | 2019-12-22T16:25:44 | 2019-12-22T16:25:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,470 | py | # MQTT test
# basado en https://randomnerdtutorials.com/micropython-mqtt-esp32-esp8266/
from umqttsimple import MQTTClient
import ubinascii
import machine
import Wemos # Facilita el identificar los pines
import MeteoSalon # Relacionado con los dispositivos conectados
import NeoPixelTHO # Relacioniado con el ledRGB
import time # Para las esperas
import helpFiles # para free y df
import utime
client_id = ubinascii.hexlify(machine.unique_id())
topic_sub = b'MeteoSalon'
topic_subFree = topic_sub + b'/free'
topic_subMem = topic_sub + b'/mem'
topic_subLed = topic_sub + b'/led'
topic_subTemp = topic_sub + b'/Temp'
topic_subHum = topic_sub + b'/Hum'
topic_subPress = topic_sub + b'/Press'
topic_subLedRGB = topic_sub + b'/ledRGB'
topic_pub = b'hello'
mqtt_server = '192.168.1.200'
def sub_CheckTopics(topic, msg):
print((topic, msg))
if topic == topic_subLed: # Check for Led Topic
if msg == b'On':
print('Led:On')
MeteoSalon.led.off()
else:
print('Led:Off')
MeteoSalon.led.on()
elif topic == topic_subLedRGB: ## Check for RGB Topic
MeteoSalon.color(msg)
elif topic == topic_subFree: ## Check for free memory
freeMem = helpFiles.free()
client.publish(topic_subMem, str(freeMem))
def connect_and_subscribe():
global client, client_id, mqtt_server, topic_sub, topic_subLedRGB, topic_subLed
client = MQTTClient(client_id, mqtt_server)
client.set_callback(sub_CheckTopics)
client.connect()
client.subscribe(topic_subFree)
client.subscribe(topic_subLed)
client.subscribe(topic_subLedRGB)
print('Connected to %s MQTT broker, subscribed to %s topic' % (mqtt_server, topic_subFree))
return client
def restart_and_reconnect():
print('Failed to connect to MQTT broker. Reconnecting...')
time.sleep(10)
machine.reset()
def mainBeta(everySeconds=60):
connect_and_subscribe() # connect and get a client reference
last_Temp = utime.ticks_ms()
while True :
client.check_msg() # Check por new messages and call the callBack function
now = utime.ticks_ms()
if utime.ticks_diff(now, last_Temp) > (everySeconds*1000):
last_Temp = now
client.publish(topic_subTemp, MeteoSalon.bme.temperature)
client.publish(topic_subPress, MeteoSalon.bme.pressure)
client.publish(topic_subHum, MeteoSalon.bme.humidity)
time.sleep_ms(200)
| [
"[email protected]"
]
| |
7c54c7b31d8d70ba4d82aa27a48606da121ed2d6 | 9c63f6d39a6085674ab42d1488476d0299f39ec9 | /Python/LC_Unique_Email_Addresses.py | 8d175c6c63fe339b19edb98152a443a9c2a31f7a | []
| no_license | vijayjag-repo/LeetCode | 2237e3117e7e902f5ac5c02bfb5fbe45af7242d4 | 0a5f47e272f6ba31e3f0ff4d78bf6e3f4063c789 | refs/heads/master | 2022-11-14T17:46:10.847858 | 2022-11-08T10:28:30 | 2022-11-08T10:28:30 | 163,639,628 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | class Solution(object):
def numUniqueEmails(self, emails):
"""
:type emails: List[str]
:rtype: int
Approach:
Split into local and domain.
Process accordingly
"""
new = set()
for email in emails:
local,domain = email.split('@')
if('+' in local):
local = local[:local.index('+')]
if('.' in local):
local = local.replace('.','')
new.add(local+'@'+domain)
return(len(new))
| [
"[email protected]"
]
| |
275bc0dc169eb8d80100c4b3485b2de5f9c9a001 | 822d3cd484b54f0531fc205520c765a8321c0613 | /pyFile/9面向对象进阶/5.描述器/随堂笔记/12.反向操作符.py | 9d8711a8583579d14da0a7ab9391630953089a88 | []
| no_license | mghxy123/learnPython | 31d1cc18deeed5a89864ca0333fe488e0dbf08b4 | 00740e87d55a4dffd78773deaff8689485df31e8 | refs/heads/master | 2021-07-21T14:31:02.421788 | 2020-06-27T11:28:01 | 2020-06-27T11:28:01 | 187,751,182 | 0 | 0 | null | 2020-06-07T05:14:05 | 2019-05-21T02:58:35 | Python | UTF-8 | Python | false | false | 1,373 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : 12.反向操作符.py
# Author: HuXianyong
# Date : 2019/5/27 17:58
# print(type(NotImplemented)) #未实现的单值
# print(type(None))#未实现的单值
# print(type(NotImplementedError)) #异常类
class A:
def __init__(self,x):
self.x = x
def __repr__(self):
return "<A {}>".format(self.x)
def __add__(self, other):
print('add ~~~~~~~~~~~')
if hasattr(other,'x'):
return self.x +other.x
else:
try:
x = int(other)
except:
x=0
return self.x+x
def __iadd__(self, other):
print('iadd ~~~~~~~~~~~')
return A(self.x+other.x)
def __radd__(self, other):
print('radd ~~~~~~~~~~~')
return self+other
a1 = A(4)
a2 = A(5)
print(a1+a2) #add int 9 a1.__add__(a2)
print(a2+a1)
# print(a2+1) #报错 调用的还是add
# print(2+a1) #报错,这里调的是radd 等价于1.__radd__(a1) int.a1__radd__(1,a1)
class B:
def __init__(self,x):
self.x = x
def __add__(self, other): #如果b1存在运算法重载,且它是在第一位,就按照他的运算方法来
return NotImplemented #这里是正常输出10
# return 123
b1 = B(6)
print(a1+b1) #可以执行,a1.__add__(b1)
print(b1+a1) #可以执行,b1.__radd__(a1)
| [
"[email protected]"
]
| |
0893fddba045a950026684cfcf99ea17a23ccda4 | 2c3e0c3ef202375d998c9123934af09315d33fee | /LeetCode/Greedy Algorithms/jump_game.py | b08f9a1ecc1c6bde18a1346953fcd57c11047c44 | []
| no_license | Kalesh-Singh/Interviews2019 | e74f0ec22b1cb5fe178a38efc9c0ceea929e32f0 | e8fadb9636659a28f657fb43ee804761a215c37e | refs/heads/master | 2020-05-23T12:56:05.896620 | 2019-08-25T15:23:10 | 2019-08-25T15:23:10 | 186,767,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | class Solution:
def canJump(self, nums: 'List[int]') -> bool:
# Solution 1 - Dynamic Programming Bottom Up Approach
# n = len(nums)
# # Try determine whether we can reach the last index
# # starting from the right.
# results = [False] * n
# # We know we can get to the last index from itself
# # i.e. no jumps
# results[n - 1] = True
# for i in range(n - 2, -1, -1):
# maxJumpIndex = min(i + nums[i], n - 1)
# for j in range(i + 1, maxJumpIndex + 1):
# if results[j]:
# # If we can get to the end from j
# # and we can get to j from i
# # then we can get to the end from i
# results[i] = True
# break
# return results[0]
# Solution 2 - Greedy Approach
n = len(nums)
last_pos = n - 1
for i in range(n - 1, -1, -1):
if i + nums[i] >= last_pos:
last_pos = i
return last_pos == 0
| [
"[email protected]"
]
| |
5fc218c45331323e07ff14adde4a58c7ebcb9b5f | 15945660e0e9624693f11d7ec6460fb41d2f1ef9 | /tfx/utils/import_utils.py | 476d32d7366a4df7ac64158d12abebb7380674ae | [
"Apache-2.0"
]
| permissive | HassanDayoub/tfx | f4a32cd6e25493d152a6f91b2cc26db94154d0a6 | dc9221abbb8dad991d1ae22fb91876da1290efae | refs/heads/master | 2020-05-30T18:44:31.410424 | 2019-05-31T22:06:53 | 2019-05-31T22:07:25 | 189,904,199 | 2 | 0 | Apache-2.0 | 2019-06-02T23:09:17 | 2019-06-02T23:09:17 | null | UTF-8 | Python | false | false | 1,171 | py | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX type definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Text, Type
def import_class_by_path(class_path: Text) -> Type[Any]:
"""Import a class by its <module>.<name> path.
Args:
class_path: <module>.<name> for a class.
Returns:
Class object for the given class_path.
"""
classname = class_path.split('.')[-1]
modulename = '.'.join(class_path.split('.')[0:-1])
mod = __import__(modulename, fromlist=[classname])
return getattr(mod, classname)
| [
"[email protected]"
]
| |
10787697aa144a80df495845d4c964f0663d2b6d | 080c13cd91a073457bd9eddc2a3d13fc2e0e56ae | /MY_REPOS/awesome-4-new-developers/tensorflow-master/tensorflow/compiler/tests/scatter_nd_op_test.py | 46c679b61fdd6dd03be8c0b8644cab352b9cf0a6 | [
"Apache-2.0"
]
| permissive | Portfolio-Projects42/UsefulResourceRepo2.0 | 1dccc8961a09347f124d3ed7c27c6d73b9806189 | 75b1e23c757845b5f1894ebe53551a1cf759c6a3 | refs/heads/master | 2023-08-04T12:23:48.862451 | 2021-09-15T12:51:35 | 2021-09-15T12:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,132 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter_nd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _FlatInnerDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape(
[functools.reduce(lambda x, y: x * y, shape[: -ndims + 1], 1)]
+ shape[-ndims + 1 :]
)
def _FlatOuterDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape(
shape[: ndims - 1]
+ [functools.reduce(lambda x, y: x * y, shape[ndims - 1 :], 1)]
)
def _NumpyScatterNd(ref, indices, updates, op):
ixdim = indices.shape[-1]
num_updates = indices.size // ixdim
total_nd = len(ref.shape)
slice_size = 1
for i in range(ixdim, total_nd):
slice_size *= ref.shape[i]
flat_indices = _FlatInnerDims(indices)
flat_updates = updates.reshape((num_updates, slice_size))
output_flat = _FlatOuterDims(ref, ixdim + 1)
for ix_updates, ix_output in enumerate(flat_indices):
ix_output = tuple(ix_output)
output_flat[ix_output] = op(output_flat[ix_output], flat_updates[ix_updates])
return output_flat.reshape(ref.shape)
def _NumpyUpdate(indices, updates, shape):
ref = np.zeros(shape, dtype=updates.dtype)
return _NumpyScatterNd(ref, indices, updates, lambda p, u: u)
class ScatterNdTest(xla_test.XLATestCase):
def _VariableRankTest(
self, np_scatter, tf_scatter, vtype, itype, repeat_indices=False
):
np.random.seed(8)
ref_shapes = [(3, 6), (3, 6), (3, 6, 9), (3, 6, 9), (3, 6, 9), (3, 6, 9)]
indices_shapes = [(2,), (2, 2), (2,), (2, 2), (2, 3), (2, 3, 3)]
for ref_shape, indices_shape in zip(ref_shapes, indices_shapes):
num_updates = indices_shape[0]
ixdim = indices_shape[-1]
indexable_area_shape = ()
for i in range(ixdim):
indexable_area_shape += (ref_shape[i],)
all_indices = [
list(coord)
for coord, _ in np.ndenumerate(np.empty(indexable_area_shape, vtype))
]
np.random.shuffle(all_indices)
indices = np.array(all_indices[:num_updates])
if num_updates > 1 and repeat_indices:
indices = indices[: num_updates // 2]
for _ in range(num_updates - num_updates // 2):
indices = np.append(
indices, [indices[np.random.randint(num_updates // 2)]], axis=0
)
np.random.shuffle(indices)
indices = _AsType(indices[:num_updates], itype)
updates_shape = (num_updates,)
for i in range(ixdim, len(ref_shape)):
updates_shape += (ref_shape[i],)
updates = _AsType(np.random.randn(*(updates_shape)), vtype)
# Scatter via numpy
np_out = np_scatter(indices, updates, ref_shape)
# Scatter via tensorflow
tf_out = tf_scatter(indices, updates, ref_shape)
self.assertAllClose(np_out, tf_out)
def _VariableRankTests(self, np_scatter, tf_scatter):
for vtype in self.numeric_types:
for itype in set([np.int32, np.int64]).intersection(set(self.int_types)):
self._VariableRankTest(np_scatter, tf_scatter, vtype, itype)
def _runScatterNd(self, indices, updates, shape):
with self.session():
updates_placeholder = array_ops.placeholder(updates.dtype)
indices_placeholder = array_ops.placeholder(indices.dtype)
with self.test_scope():
output = array_ops.scatter_nd(
indices_placeholder, updates_placeholder, shape
)
feed_dict = {updates_placeholder: updates, indices_placeholder: indices}
return output.eval(feed_dict=feed_dict)
def testSimple(self):
indices = np.array([[4], [3], [1], [7]], dtype=np.int32)
updates = np.array([9, 10, 11, 12], dtype=np.float32)
expected = np.array([0, 11, 0, 10, 9, 0, 0, 12], dtype=np.int32)
self.assertAllEqual(expected, self._runScatterNd(indices, updates, [8]))
def testRepeatedIndices(self):
indices = np.array([[0], [1], [0], [1]], dtype=np.int32)
updates = np.array([9, 10, 11, 12], dtype=np.float32)
expected = np.array([20, 22], dtype=np.int32)
self.assertAllEqual(expected, self._runScatterNd(indices, updates, [2]))
def testSimple2(self):
indices = np.array([[1, 0], [1, 1]], dtype=np.int32)
updates = np.array([11.0, 12.0], dtype=np.float32)
expected = np.array([[0.0, 0.0], [11.0, 12.0], [0.0, 0.0]], dtype=np.float32)
self.assertAllEqual(expected, self._runScatterNd(indices, updates, [3, 2]))
def testSimple3(self):
indices = np.array([[1]], dtype=np.int32)
updates = np.array([[11.0, 12.0]], dtype=np.float32)
expected = np.array([[0.0, 0.0], [11.0, 12.0], [0.0, 0.0]])
self.assertAllEqual(expected, self._runScatterNd(indices, updates, [3, 2]))
def testVariableRankUpdate(self):
self._VariableRankTests(_NumpyUpdate, self._runScatterNd)
def testExtraIndicesDimensions(self):
indices = np.zeros([1, 1, 2], np.int32)
updates = np.zeros([1, 1], np.int32)
expected = np.zeros([2, 2], dtype=np.int32)
self.assertAllEqual(expected, self._runScatterNd(indices, updates, [2, 2]))
@test_util.disable_mlir_bridge("Error messages differ")
def testRank3InvalidShape1(self):
indices = np.zeros([3, 2, 2], np.int32)
updates = np.zeros([2, 2, 2], np.int32)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError, "Must have updates.shape"
):
self._runScatterNd(indices, updates, [2, 2, 2])
@test_util.disable_mlir_bridge("Error messages differ")
def testRank3InvalidShape2(self):
indices = np.zeros([2, 2, 1], np.int32)
updates = np.zeros([2, 2], np.int32)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError, "Must have updates.shape"
):
self._runScatterNd(indices, updates, [2, 2, 2])
def testScatterOutOfRange(self):
updates = np.array([-3, -4, -5]).astype(np.float32)
# Indices all in range, no problem.
indices = np.array([[2], [0], [5]], dtype=np.int32)
self._runScatterNd(indices, updates, [6])
# Indices out of range should not fail. It produces implementation-defined
# output.
indices = np.array([[-1], [0], [5]], dtype=np.int32)
self._runScatterNd(indices, updates, [6])
indices = np.array([[2], [0], [6]], dtype=np.int32)
self._runScatterNd(indices, updates, [6])
class ScatterNdTensorTest(xla_test.XLATestCase):
def _runScatter(self, op):
indices_np = np.array([[4], [3], [1], [7]], dtype=np.int32)
updates_np = np.array([9, 10, 11, 12], dtype=np.float32)
with self.session() as sess, self.test_scope():
indices = array_ops.placeholder(indices_np.dtype, shape=indices_np.shape)
updates = array_ops.placeholder(updates_np.dtype, shape=updates_np.shape)
t = array_ops.ones([8], dtype=np.float32)
out = op(t, indices, updates)
return sess.run(out, feed_dict={indices: indices_np, updates: updates_np})
def testAdd(self):
self.assertAllEqual(
self._runScatter(array_ops.tensor_scatter_add),
np.array([1, 12, 1, 11, 10, 1, 1, 13], dtype=np.float32),
)
def testSub(self):
self.assertAllEqual(
self._runScatter(array_ops.tensor_scatter_sub),
np.array([1, -10, 1, -9, -8, 1, 1, -11], dtype=np.float32),
)
def testUpdate(self):
self.assertAllEqual(
self._runScatter(array_ops.tensor_scatter_update),
np.array([1, 11, 1, 10, 9, 1, 1, 12], dtype=np.float32),
)
if __name__ == "__main__":
test.main()
| [
"[email protected]"
]
| |
c065a92fe428a5c1db1d2ed606c8bfba5a1d6d3b | 0c1d3807940f223c913aeadac31b85dc576b1dca | /app.py | b1656d84cd9a00fbce5cfecc0fdd87693551c67c | []
| no_license | wegamekinglc/QA | 4b2073e4f93d96c09c771d868914cef3367ab55f | c818442b06f5701feb6b38dcf6f20853d8ec6556 | refs/heads/master | 2020-06-11T00:41:02.003796 | 2019-06-26T04:46:36 | 2019-06-26T04:46:36 | 193,806,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,416 | py | from typing import Tuple
import requests
from flask import (
Flask, render_template, request, redirect
)
import pandas as pd
app = Flask(__name__)
pd.set_option('display.max_colwidth', -1)
def chunkstring(string, length):
return (string[0+i:length+i] for i in range(0, len(string), length))
def handle_response_hr(resp: dict) -> Tuple[str, str]:
code = resp['code']
if code == 1:
is_matched = '是'
answer = resp['data']['target_answer'].replace('\n', '')
answer = '<br>'.join(chunkstring(answer, 50))
else:
is_matched = '否'
answer = "您好,这个问题您是商米第一位提到的呢,<br>" \
"暂时无法查询到对应答案哦。请您尝试调整搜索关键词或直接联系人力资源部张小桐(Tel:15651621590)来寻求帮助,<br>" \
"后续我们也会将您提出的问题完善到我的“大脑”中,谢谢您"
return is_matched, answer
def handle_response_cs(resp: dict) -> Tuple[str, str]:
code = resp['code']
if code == 1:
is_matched = '是'
answer = resp['data']['target_answer'].replace('\n', '')
answer = '<br>'.join(chunkstring(answer, 50))
else:
is_matched = '否'
answer = "您好,已经帮您转人工服务!"
return is_matched, answer
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
if request.form['submit_button'] == '商米HR问答Demo':
return redirect('/hr')
elif request.form['submit_button'] == '客户服务问答Demo':
return redirect('/cs')
elif request.method == 'GET':
return render_template('index.html', head='商米问答机器人测试')
@app.route('/hr', methods=['GET'])
def hr_form():
return render_template('hr_search.html', hint="请输入测试问题", head="商米HR问答Demo", result="")
@app.route('/hr', methods=['POST'])
def hr_query():
query = request.form['query']
resp = requests.post('http://172.16.0.170:8126/faq', data={"question": query}).json()
parsed_resp = handle_response_hr(resp)
df = pd.DataFrame(columns=['是否匹配', '答案'])
df.loc[0, '是否匹配'] = parsed_resp[0]
df.loc[0, '答案'] = parsed_resp[1]
return render_template('hr_search.html',
hint="请输入测试问题",
head="商米HR问答Demo",
result=df.to_html(index=False, justify='center', classes='center', escape=False))
@app.route('/cs', methods=['GET'])
def cs_form():
return render_template('cs_search.html', hint="请输入测试问题", head="客户服务问答Demo", result="")
@app.route('/cs', methods=['POST'])
def cs_query():
query = request.form['query']
resp = requests.post('http://172.16.0.170:8000/faq', data={"question": query}).json()
parsed_resp = handle_response_cs(resp)
df = pd.DataFrame(columns=['是否匹配', '答案'])
df.loc[0, '是否匹配'] = parsed_resp[0]
df.loc[0, '答案'] = parsed_resp[1]
return render_template('cs_search.html',
hint="请输入测试问题",
head="客户服务问答Demo",
result=df.to_html(index=False, justify='center', classes='center', escape=False))
if __name__ == '__main__':
app.run(host="0.0.0.0")
| [
"[email protected]"
]
| |
37ef390c7e9d53f0d6bc90b5bb19dee5ee3d0338 | 28deae4b6f2ef4c83116d8a7e08061b2ac47bb71 | /Spider/ImgSpider/utils/exceptions.py | d2c585c0275117650b4ba792d89e1926e23d29ec | [
"MIT",
"Apache-2.0"
]
| permissive | Danceiny/HackGirlfriend | 9cc796c733be7055799efb1c51f1e5ecb3d12d81 | d64f43c5cfb48d30ed812e34fb19bc7b90ba01f8 | refs/heads/master | 2023-01-04T16:09:55.205094 | 2017-07-22T16:48:59 | 2017-07-22T16:48:59 | 93,874,976 | 2 | 1 | Apache-2.0 | 2022-12-26T20:14:57 | 2017-06-09T15:57:34 | HTML | UTF-8 | Python | false | false | 1,267 | py | # coding=utf-8
class UnknownPythonVersion(Exception):
msg = 'Unknown Python version found, please check your Python installation.'
class UnsupportedPythonVersion(Exception):
msg = 'So far ImageSpider only support Python 2.'
class GetBaseLinkFailed(Exception):
msg = 'Getting base link failed.'
class ParameterNotGiven(Exception):
msg = 'Parameter is not given.'
class InvalidImageFileName(Exception):
msg = 'Invalid image filename found.'
class ClearCacheFailed(Exception):
msg = 'Clearing cache failed.'
class LoadCacheFailed(Exception):
msg = 'Loading cache failed.'
class InitializeFailed(Exception):
msg = 'Initialization failed.'
class SaveImageFailed(Exception):
msg = 'Saving image failed.'
class TranslateToAbsoluteURLFailed(Exception):
msg = 'Translating relative URL to absolute URL failed.'
class LoadSettingsFileFailed(Exception):
msg = 'load settings.conf failed, please check if it exists.'
class SettingsError(Exception):
CONFIG = 'settings.conf: '
interval_err = CONFIG + '[interval] must be larger than 0.'
sites_err = CONFIG + '[sites] is necessary.'
class InvalidDomain(Exception):
msg = 'The domain is invalid.'
class PackageNotInstalled(Exception):
pass
| [
"[email protected]"
]
| |
628f197dcdce92355c268cf6f67500a76d8f9ba6 | 0431fb263e38422585edca273fb47ef92fd22243 | /dataloaders/data_poke.py | ed7e09e5dcdf2e7f5b203cc60babc9ab1a115781 | []
| no_license | RRoundTable/EEN-with-Keras | a6c3352eafc05fcb7ed41463d637a684de9a0b27 | ae71903afa05135f5eb6e2797854969f5a082958 | refs/heads/master | 2020-04-28T03:17:25.762629 | 2019-04-27T12:19:13 | 2019-04-27T12:19:13 | 174,930,756 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,653 | py | import os, random, glob, pdb, math
import pickle as pickle
from sklearn.externals import joblib
import numpy
from scipy import misc
# import torch
# import torchvision
import utils
from tensorflow.python.keras.layers import Input
class ImageLoader(object):
def _load_set(self, split):
print('loading {} set'.format(split))
datalist = []
datapath = '{}/{}/'.format(self.arg.get("datapath"), split)
for fdname in os.listdir(datapath):
print("file : {}".format(fdname))
fd_datalist = []
abs_fdname = os.path.join(datapath, fdname)
print("loading {}".format(abs_fdname))
presaved_npy = glob.glob(os.path.join(abs_fdname, "presave.pkl"))
if len(presaved_npy) == 1:
with open(os.path.join(abs_fdname, "presave.pkl"),'rb') as f :
fd_datalist=joblib.load(f)
elif len(presaved_npy) == 0:
with open(os.path.join(abs_fdname, "presave.pkl"), 'wb') as f:
for abs_fname in sorted(glob.glob(os.path.join(abs_fdname, "*.jpg"))[:-1]):
print('reading {}'.format(abs_fname))
img = misc.imread(abs_fname)
r_img = misc.imresize(img, (self.height, self.width))
fd_datalist.append(r_img)
print(numpy.array(fd_datalist).shape)
# fd_datalist = numpy.transpose(numpy.array(fd_datalist), (0, 3, 1, 2))
joblib.dump(fd_datalist,f)
# numpy.save(os.path.join(abs_fdname, "presave.npy"), fd_datalist)
else:
raise ValueError
actions = numpy.load(abs_fdname + '/actions.npy')
datalist.append({'frames': fd_datalist, 'actions': actions})
return datalist
def __init__(self, arg):
super(ImageLoader, self).__init__()
self.arg = arg
self.datalist = []
self.height = arg.get('height')
self.width = arg.get('width')
self.nc = arg.get('nc')
self.ncond = arg.get('ncond', 1)
self.npred = arg.get('npred', 1)
self.datalist_train = self._load_set('train')
self.datalist_test = self._load_set('test')
# keep some training data for validation
self.datalist_valid = self.datalist_train[-3:]
self.datalist_train = self.datalist_train[:-3]
# self.datalist_valid = self.datalist_train
# self.datalist_train = self.datalist_train
# pointers
self.iter_video_ptr = 0
self.iter_sample_ptr = self.ncond
print("Dataloader constructed done")
def reset_ptrs(self):
self.iter_video_ptr = 0
self.iter_sample_ptr = self.ncond
def _sample_time(self, video, actions, num_cond, num_pred):
start_pos = random.randint(0, video.shape[0]-2)
cond_frames = video[start_pos]
pred_frames = video[start_pos+1]
actions = actions[start_pos]
return cond_frames, pred_frames, actions
def _iterate_time(self, video, start_pos, actions, num_cond, num_pred):
cond_frames = video[start_pos]
pred_frames = video[start_pos+1]
actions = actions[start_pos]
return cond_frames, pred_frames, actions
def get_batch(self, split):
if split == 'train':
datalist = self.datalist_train
elif split == 'valid':
datalist = self.datalist_valid
elif split == 'test':
datalist = self.datalist_test
cond_frames, pred_frames, actions = [], [], []
# rolling
id = 1
while id <= self.arg.get("batchsize"):
sample = random.choice(datalist)
sample_video = sample.get('frames')
sample_actions = sample.get('actions')
selected_cond_frames, selected_pred_frames, selected_actions = self._sample_time(
sample_video, sample_actions, self.ncond, self.npred)
assert(len(selected_actions) > 0)
cond_frames.append(selected_cond_frames)
pred_frames.append(selected_pred_frames)
actions.append(selected_actions)
id += 1
# processing on the numpy array level
cond_frames = numpy.array(cond_frames, dtype='float32') / 255.0
pred_frames = numpy.array(pred_frames, dtype='float32') / 255.0
actions = numpy.array(actions).squeeze()
return cond_frames,pred_frames,actions
def get_iterated_batch(self, split):
if self.split == 'train':
datalist = self.datalist_train
elif self.split == 'test':
datalist = self.datalist_test
cond_frames, pred_frames, actions = [], [], []
# rolling
id = 1
while id <= self.arg.get("batchsize"):
if self.iter_video_ptr == len(datalist):
return None, None, None
sample = self.datalist[self.iter_video_ptr]
sample_video = sample.get('frames')
sample_actions = sample.get('actions')
if self.iter_sample_ptr + self.npred > sample_video.shape[0]:
self.iter_video_ptr += 1
self.iter_sample_ptr = self.ncond
else:
selected_cond_frames, selected_pred_frames, selected_actions = self._iterate_time(
sample_video, self.iter_sample_ptr, sample_actions, self.ncond, self.npred)
assert(len(selected_actions) > 0)
cond_frames.append(selected_cond_frames)
pred_frames.append(selected_pred_frames)
actions.append(selected_actions)
id += 1
self.iter_sample_ptr += 1
# processing on the numpy array level
cond_frames = numpy.array(cond_frames, dtype='float') / 255.0
pred_frames = numpy.array(pred_frames, dtype='float') / 255.0
actions = numpy.array(actions).squeeze()
# # return tensor
# cond_frames_ts = torch.from_numpy(cond_frames).cuda()
# pred_frames_ts = torch.from_numpy(pred_frames).cuda()
# actions_ts = torch.from_numpy(actions).cuda()
#
# # keras
# return cond_frames_ts, pred_frames_ts, actions_ts
#
# def plot_seq(self, cond, pred):
# cond_pred = torch.cat((cond, pred), 1)
# cond_pred = cond_pred.view(-1, self.nc, self.height, self.width)
# grid = torchvision.utils.make_grid(cond_pred, self.ncond+self.npred, pad_value=1)
# return grid
| [
"[email protected]"
]
| |
f0638fd2d66ede9da7dbfba2d846f8e41920760a | 8bd1ae9c1681ee8c1214a4e9cda29a503676c36d | /v1/tnc2.py | 926883fffab2cabc992b1efab7396b853be0ba9b | []
| no_license | zleffke/balloon | c15ef7e5614018022ca1f19ed2e4c82009352165 | 2943d303692b299e6d0866299d83a94d0839bf5b | refs/heads/master | 2021-07-06T02:28:17.304024 | 2021-05-22T20:25:18 | 2021-05-22T20:25:18 | 57,078,951 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,811 | py | #!/usr/bin/env python
##################################################
# GPS Interface
# Author: Zach Leffke
# Description: Initial GPS testing
##################################################
from optparse import OptionParser
import threading
from datetime import datetime as date
import os
import serial
import math
import sys
import string
import time
def utc_ts():
return str(date.utcnow()) + " UTC | "
class TNC_Thread(threading.Thread):
def __init__ (self, port, baud, log_flag, call_filt):
threading.Thread.__init__(self)
self._stop = threading.Event()
self.tnc_ser = serial.Serial(port, baud)
self.log_flag = log_flag
self.line = ""
self.call_filt = call_filt
self.callsign = ""
self.path = []
self.raw_log = None
self.csv_log = None
self.lat = 0.0
self.lon = 0.0
self.alt = 0.0
self.spd = 0.0
self.cse = 0.0
self.time_utc = 0.0
self.log_file=None
#if self.log_flag!=None:
# self.log_file = open(self.log_flag,'a')
def run(self):
while (not self._stop.isSet()):
data = self.tnc_ser.readline()
#data = "KK4BSM-11>APT314,WIDE1-1,WIDE2-1:/205107h3713.89N/08025.49WO000/000/A=002125/Virginia Tech Project Ellie, Go Hokies!\n"
#data = "KC8SIR-1>APBL10,WIDE3-1,WIDE4-1:!3733.20N/08106.48WO183/036/A=018991V300"
if self.log_flag != None:
self.log_file = open(self.log_flag,'a')
self.log_file.write(utc_ts() + data)
self.log_file.close()
self.line = data.strip('\n')
self.Parse_TNC()
#print self.line
time.sleep(1)
sys.exit()
def Parse_TNC(self):
#----------Extract Callsign----------
#--Locate first '>', take characters from beginning, limit search to first ten characters
idx1 = self.line.find('>', 0, 10)
self.callsign = self.line[:idx1]
#print len(self.callsign), self.callsign
#--Verify Callsign matches callsign filter
idx2 = self.callsign.find(self.call_filt)
#print idx2
if (idx2 != -1): #Callsign Match
#----------extract path----------
#locate first '>', locate ':', take characters in between
a = self.line.find(':')
path_str = self.line[idx1+1:a]
self.path = path_str.split(',')
#----------extract time----------
#locate ':', take next 7 characters
#hhmmsst, hh - hours, mm - minutes, ss - seconds, t - timezone
#time_str = self.line[a+2:a+2+7]
#if ((time_str[6] == 'h')or(time_str[6] == 'z')): #Zulu Time
# self.time_utc = time_str[0:2] + ":" + time_str[2:4] + ":" + time_str[4:6] + " UTC"
#----------extract lat----------
#locate ':', skip 7 char, take next 8 char
lat_str = self.line[a+2:a+2+7]
#print lat_str
#lat_hemi = self.line[a+2+7:a+2+8]
lat_f = float(lat_str[0:2]) + float(lat_str[2:]) / 60.0
#if (lat_hemi == 'S'): lat_f = lat_f * -1
self.lat = lat_f #decimal degrees
#----------extract lon----------
#locate ':', skip 16, take next 9 char
lon_str = self.line[a+11:a+11+8]
lon_hemi = self.line[a+11+8: a+11+9]
lon_f = float(lon_str[0:3]) + float(lon_str[3:]) / 60.0
if lon_hemi == "W": lon_f = lon_f * -1
self.lon = lon_f # decimal degrees
#----------extract spd----------
#locate ':', skip 27, take next 3 char
a = self.line.find('O')
cse_str = self.line[a+1:a+4]
#print cse_str
self.cse = float(cse_str)#*1.15078 #convert from knots to mph
#----------extract course----------
#locate ':/', skip 30, take next 3 char
spd_str = self.line[a+1+4:a+1+4+3]
#print cse_str
self.spd = float(spd_str)*1.15078 #in degrees
#----------extract altitude----------
#locate 'A=', take next 6
a = self.line.find('A=')
alt_str = self.line[a+2:a+2+6]
self.alt = float(alt_str) #in feet
def get_last_callsign(self):
return self.callsign
def get_lat_lon_alt(self):
return self.lat, self.lon, self.alt
def get_spd_cse(self):
return self.spd, self.cse
def get_time(self):
return self.time_utc
def stop(self):
#self.tnc_ser.close()
self._stop.set()
sys.exit()
def stopped(self):
return self._stop.isSet()
| [
"[email protected]"
]
| |
2c1f0bb8452e88d0139f57a8d392d8d03d95841c | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /G2QnBrxvpq9FacFuo_9.py | 409f1278f18bac3fdd0fd7919b0002ac306a61bc | []
| no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py |
def possible_path(lst):
if lst[0]=='H':return all([lst[i]=='H' for i in range(0,len(lst),2)])
return all([lst[i]=='H' for i in range(1,len(lst),2)])
| [
"[email protected]"
]
| |
0573e8f5815111aa5ad910bbc7b918fd7bed2a85 | 459929ce79538ec69a6f8c32e608f4e484594d68 | /venv/Lib/site-packages/kubernetes/client/models/v1_node_system_info.py | 5e7fd492a500cec487b8f996d06402ff4cbccda8 | []
| no_license | yychai97/Kubernetes | ec2ef2a98a4588b7588a56b9d661d63222278d29 | 2955227ce81bc21f329729737b5c528b02492780 | refs/heads/master | 2023-07-02T18:36:41.382362 | 2021-08-13T04:20:27 | 2021-08-13T04:20:27 | 307,412,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,335 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.15
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1NodeSystemInfo(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'architecture': 'str',
'boot_id': 'str',
'container_runtime_version': 'str',
'kernel_version': 'str',
'kube_proxy_version': 'str',
'kubelet_version': 'str',
'machine_id': 'str',
'operating_system': 'str',
'os_image': 'str',
'system_uuid': 'str'
}
attribute_map = {
'architecture': 'architecture',
'boot_id': 'bootID',
'container_runtime_version': 'containerRuntimeVersion',
'kernel_version': 'kernelVersion',
'kube_proxy_version': 'kubeProxyVersion',
'kubelet_version': 'kubeletVersion',
'machine_id': 'machineID',
'operating_system': 'operatingSystem',
'os_image': 'osImage',
'system_uuid': 'systemUUID'
}
def __init__(self, architecture=None, boot_id=None, container_runtime_version=None, kernel_version=None, kube_proxy_version=None, kubelet_version=None, machine_id=None, operating_system=None, os_image=None, system_uuid=None): # noqa: E501
"""V1NodeSystemInfo - a model defined in OpenAPI""" # noqa: E501
self._architecture = None
self._boot_id = None
self._container_runtime_version = None
self._kernel_version = None
self._kube_proxy_version = None
self._kubelet_version = None
self._machine_id = None
self._operating_system = None
self._os_image = None
self._system_uuid = None
self.discriminator = None
self.architecture = architecture
self.boot_id = boot_id
self.container_runtime_version = container_runtime_version
self.kernel_version = kernel_version
self.kube_proxy_version = kube_proxy_version
self.kubelet_version = kubelet_version
self.machine_id = machine_id
self.operating_system = operating_system
self.os_image = os_image
self.system_uuid = system_uuid
@property
def architecture(self):
"""Gets the architecture of this V1NodeSystemInfo. # noqa: E501
The Architecture reported by the node # noqa: E501
:return: The architecture of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._architecture
@architecture.setter
def architecture(self, architecture):
"""Sets the architecture of this V1NodeSystemInfo.
The Architecture reported by the node # noqa: E501
:param architecture: The architecture of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if architecture is None:
raise ValueError("Invalid value for `architecture`, must not be `None`") # noqa: E501
self._architecture = architecture
@property
def boot_id(self):
"""Gets the boot_id of this V1NodeSystemInfo. # noqa: E501
Boot ID reported by the node. # noqa: E501
:return: The boot_id of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._boot_id
@boot_id.setter
def boot_id(self, boot_id):
"""Sets the boot_id of this V1NodeSystemInfo.
Boot ID reported by the node. # noqa: E501
:param boot_id: The boot_id of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if boot_id is None:
raise ValueError("Invalid value for `boot_id`, must not be `None`") # noqa: E501
self._boot_id = boot_id
@property
def container_runtime_version(self):
"""Gets the container_runtime_version of this V1NodeSystemInfo. # noqa: E501
ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). # noqa: E501
:return: The container_runtime_version of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._container_runtime_version
@container_runtime_version.setter
def container_runtime_version(self, container_runtime_version):
"""Sets the container_runtime_version of this V1NodeSystemInfo.
ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). # noqa: E501
:param container_runtime_version: The container_runtime_version of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if container_runtime_version is None:
raise ValueError("Invalid value for `container_runtime_version`, must not be `None`") # noqa: E501
self._container_runtime_version = container_runtime_version
@property
def kernel_version(self):
"""Gets the kernel_version of this V1NodeSystemInfo. # noqa: E501
Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). # noqa: E501
:return: The kernel_version of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._kernel_version
@kernel_version.setter
def kernel_version(self, kernel_version):
"""Sets the kernel_version of this V1NodeSystemInfo.
Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). # noqa: E501
:param kernel_version: The kernel_version of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if kernel_version is None:
raise ValueError("Invalid value for `kernel_version`, must not be `None`") # noqa: E501
self._kernel_version = kernel_version
@property
def kube_proxy_version(self):
"""Gets the kube_proxy_version of this V1NodeSystemInfo. # noqa: E501
KubeProxy Version reported by the node. # noqa: E501
:return: The kube_proxy_version of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._kube_proxy_version
@kube_proxy_version.setter
def kube_proxy_version(self, kube_proxy_version):
"""Sets the kube_proxy_version of this V1NodeSystemInfo.
KubeProxy Version reported by the node. # noqa: E501
:param kube_proxy_version: The kube_proxy_version of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if kube_proxy_version is None:
raise ValueError("Invalid value for `kube_proxy_version`, must not be `None`") # noqa: E501
self._kube_proxy_version = kube_proxy_version
@property
def kubelet_version(self):
"""Gets the kubelet_version of this V1NodeSystemInfo. # noqa: E501
Kubelet Version reported by the node. # noqa: E501
:return: The kubelet_version of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._kubelet_version
@kubelet_version.setter
def kubelet_version(self, kubelet_version):
"""Sets the kubelet_version of this V1NodeSystemInfo.
Kubelet Version reported by the node. # noqa: E501
:param kubelet_version: The kubelet_version of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if kubelet_version is None:
raise ValueError("Invalid value for `kubelet_version`, must not be `None`") # noqa: E501
self._kubelet_version = kubelet_version
@property
def machine_id(self):
"""Gets the machine_id of this V1NodeSystemInfo. # noqa: E501
MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html # noqa: E501
:return: The machine_id of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._machine_id
@machine_id.setter
def machine_id(self, machine_id):
"""Sets the machine_id of this V1NodeSystemInfo.
MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html # noqa: E501
:param machine_id: The machine_id of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if machine_id is None:
raise ValueError("Invalid value for `machine_id`, must not be `None`") # noqa: E501
self._machine_id = machine_id
@property
def operating_system(self):
"""Gets the operating_system of this V1NodeSystemInfo. # noqa: E501
The Operating System reported by the node # noqa: E501
:return: The operating_system of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._operating_system
@operating_system.setter
def operating_system(self, operating_system):
"""Sets the operating_system of this V1NodeSystemInfo.
The Operating System reported by the node # noqa: E501
:param operating_system: The operating_system of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if operating_system is None:
raise ValueError("Invalid value for `operating_system`, must not be `None`") # noqa: E501
self._operating_system = operating_system
@property
def os_image(self):
"""Gets the os_image of this V1NodeSystemInfo. # noqa: E501
OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). # noqa: E501
:return: The os_image of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._os_image
@os_image.setter
def os_image(self, os_image):
"""Sets the os_image of this V1NodeSystemInfo.
OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). # noqa: E501
:param os_image: The os_image of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if os_image is None:
raise ValueError("Invalid value for `os_image`, must not be `None`") # noqa: E501
self._os_image = os_image
@property
def system_uuid(self):
"""Gets the system_uuid of this V1NodeSystemInfo. # noqa: E501
SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html # noqa: E501
:return: The system_uuid of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._system_uuid
@system_uuid.setter
def system_uuid(self, system_uuid):
"""Sets the system_uuid of this V1NodeSystemInfo.
SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html # noqa: E501
:param system_uuid: The system_uuid of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if system_uuid is None:
raise ValueError("Invalid value for `system_uuid`, must not be `None`") # noqa: E501
self._system_uuid = system_uuid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NodeSystemInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
901a12d85d7a567cb6b96e5c26267c541a2bee5a | 75af3c931671a55ea0058cea6e83e90dc1aed6d1 | /profil3r/app/core/services/_porn.py | 2ab894337d20e37f5825d30f488926628e176fca | [
"MIT"
]
| permissive | derinkebapiskender/Profil3r | e711a381d84b27744d5289a87c99b4b8e77b8866 | d45fea1efab0487bfac49e422ebc46cb26b29582 | refs/heads/main | 2023-07-01T00:19:24.113439 | 2021-07-26T23:08:59 | 2021-07-26T23:08:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | from profil3r.app.modules.porn.pornhub import Pornhub
from profil3r.app.modules.porn.redtube import Redtube
from profil3r.app.modules.porn.xvideos import Xvideos
# Pornhub
def pornhub(self):
self.result["pornhub"] = Pornhub(self.config, self.permutations_list).search()
# print results
self.print_results("pornhub")
return self.result["pornhub"]
# Redtube
def redtube(self):
self.result["redtube"] = Redtube(self.config, self.permutations_list).search()
# print results
self.print_results("redtube")
return self.result["redtube"]
# XVideos
def xvideos(self):
self.result["xvideos"] = Xvideos(self.config, self.permutations_list).search()
# print results
self.print_results("xvideos")
return self.result["xvideos"] | [
"[email protected]"
]
| |
9800f6c861feb1d3a793b713f5649f8631284b22 | b77cc1448ae2c68589c5ee24e1a0b1e53499e606 | /appraisal/migrations/0034_appraisal_performance_classification.py | 4206da7d9f420b1fcc58d45701b6a13bd21cbc9c | []
| no_license | PregTech-c/Hrp_system | a5514cf6b4c778bf7cc58e8a6e8120ac7048a0a7 | 11d8dd3221497c536dd7df9028b9991632055b21 | refs/heads/master | 2022-10-09T07:54:49.538270 | 2018-08-21T11:12:04 | 2018-08-21T11:12:04 | 145,424,954 | 1 | 1 | null | 2022-10-01T09:48:53 | 2018-08-20T13:58:31 | JavaScript | UTF-8 | Python | false | false | 595 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-08-13 21:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('appraisal', '0033_auto_20180813_1332'),
]
operations = [
migrations.AddField(
model_name='appraisal',
name='performance_classification',
field=models.CharField(blank=True, choices=[('0', 'Failure'), ('1', 'Improvement Needed'), ('2', 'Excellent'), ('3', 'Exceptional')], default='0', max_length=1),
),
]
| [
"[email protected]"
]
| |
6840e0f3781e3462bc040ce3644bd3b74bc2f88e | ba20d56f38844fc4a0c8956d9f2319b8202ef9cc | /app/googlelogin/admin.py | 1e6bdefef3a2e9ad77b8c5a0512375de6737f6ac | []
| no_license | parkhongbeen/test-social-login | 3200568afbab6437d466f8246df4376798a85fdf | 7810ea69a416b713539a2cff17c63f87f93ec0d3 | refs/heads/master | 2022-11-17T12:10:17.343631 | 2020-07-14T08:12:19 | 2020-07-14T08:12:19 | 279,483,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | from django.contrib import admin
from googlelogin.models import Blog
admin.site.register(Blog)
| [
"[email protected]"
]
| |
1b8a8570b27806bdeec6392585f3026f4913f1fb | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_strongholds.py | 55c41458d6f0e15de2928d80a317f4df66fb17c1 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
#calss header
class _STRONGHOLDS():
def __init__(self,):
self.name = "STRONGHOLDS"
self.definitions = stronghold
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['stronghold']
| [
"[email protected]"
]
| |
3c838498f6089798a32bacec1df55395f584f265 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03695/s950925933.py | e48c5ff64081e4d044a733a6017beb03e5e51894 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | n = int(input())
A = list(map(int, input().split()))
color = []
other = 0
for a in A:
if a < 400: color.append(1)
elif a < 800: color.append(2)
elif a < 1200: color.append(3)
elif a < 1600: color.append(4)
elif a < 2000: color.append(5)
elif a < 2400: color.append(6)
elif a < 2800: color.append(7)
elif a < 3200: color.append(8)
else: other += 1
c = len(set(color))
if color: print(c, c+other)
else: print(1, other) | [
"[email protected]"
]
| |
37b31d7c83517db42c782f28736d5e2d0a9d7128 | ed296ff86f13b1d9e41fbf6aace441090850ef57 | /chapter_03_data_modeling/account_example/account_example/example/migrations/0003_internalaccount_account_number.py | 53c0a79cd57dc61299f17ccdb16473cda975e38f | [
"MIT"
]
| permissive | cnb0/Python-Architecture-Patterns | 2a6113605f95a4c99369ee0febe2812c792a7acf | 2194746ea334c9dd84b5547a6d59ebfa95ced394 | refs/heads/main | 2023-08-12T02:31:17.187913 | 2021-10-12T19:16:14 | 2021-10-12T19:16:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | # Generated by Django 3.2 on 2021-04-18 12:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('example', '0002_internalaccount_initial_amount'),
]
operations = [
migrations.AddField(
model_name='internalaccount',
name='account_number',
field=models.IntegerField(default=0, unique=True),
preserve_default=False,
),
]
| [
"[email protected]"
]
| |
f134a84e1d0e478b6e38dfe4818b42ecbd892513 | 353def93fa77384ee3a5e3de98cfed318c480634 | /.history/week01/homework02/maoyanspiders/maoyanspiders/pipelines_20200627225504.py | 431121b82bb35a87337da0908ef3299621934ef2 | []
| no_license | ydbB/Python001-class01 | d680abc3ea1ccaeb610751e3488421417d381156 | ad80037ccfc68d39125fa94d2747ab7394ac1be8 | refs/heads/master | 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null | UTF-8 | Python | false | false | 566 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class MaoyanspidersPipeline(object):
def process_item(self, item, spider):
films_name = item['films_name']
films_type = item['films_type']
release_time = item['release_time']
output = f'|{films_name}|\t|{films_type}|\t|{release_time}|\n\n'
with open('./week01/homework02/top10.csv',encoding='utf-8') as article:
a
| [
"[email protected]"
]
| |
1097ae22a7073a902f0a5afb758647bb025d8aa7 | 052a89753a7917b7fa0ccdf5718d5250a1379d2c | /bin/painter.py | 007ccd5118b932b88dfafb856861b084ae5a60d4 | []
| no_license | bopopescu/aws.example.com | 25e2efda3bd9ae2a257c34904ccb53043fe20b55 | 97254868688c3c3a991843fcacc973c93b366700 | refs/heads/master | 2022-11-22T07:06:30.386034 | 2016-10-25T15:22:14 | 2016-10-25T15:22:14 | 282,553,417 | 0 | 0 | null | 2020-07-26T01:22:26 | 2020-07-26T01:22:25 | null | UTF-8 | Python | false | false | 2,169 | py | #!/Users/deanarmada/Desktop/projects/python-projects/aws.example.com/bin/python
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates pasting into an already displayed
# photoimage. note that the current version of Tk updates the whole
# image every time we paste, so to get decent performance, we split
# the image into a set of tiles.
#
try:
from tkinter import Tk, Canvas, NW
except ImportError:
from Tkinter import Tk, Canvas, NW
from PIL import Image, ImageTk
import sys
#
# painter widget
class PaintCanvas(Canvas):
def __init__(self, master, image):
Canvas.__init__(self, master, width=image.size[0], height=image.size[1])
# fill the canvas
self.tile = {}
self.tilesize = tilesize = 32
xsize, ysize = image.size
for x in range(0, xsize, tilesize):
for y in range(0, ysize, tilesize):
box = x, y, min(xsize, x+tilesize), min(ysize, y+tilesize)
tile = ImageTk.PhotoImage(image.crop(box))
self.create_image(x, y, image=tile, anchor=NW)
self.tile[(x, y)] = box, tile
self.image = image
self.bind("<B1-Motion>", self.paint)
def paint(self, event):
xy = event.x - 10, event.y - 10, event.x + 10, event.y + 10
im = self.image.crop(xy)
# process the image in some fashion
im = im.convert("L")
self.image.paste(im, xy)
self.repair(xy)
def repair(self, box):
# update canvas
dx = box[0] % self.tilesize
dy = box[1] % self.tilesize
for x in range(box[0]-dx, box[2]+1, self.tilesize):
for y in range(box[1]-dy, box[3]+1, self.tilesize):
try:
xy, tile = self.tile[(x, y)]
tile.paste(self.image.crop(xy))
except KeyError:
pass # outside the image
self.update_idletasks()
#
# main
if len(sys.argv) != 2:
print("Usage: painter file")
sys.exit(1)
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "RGB":
im = im.convert("RGB")
PaintCanvas(root, im).pack()
root.mainloop()
| [
"[email protected]"
]
| |
a1f9582d5a8046a5d7bcfe68ba761242d32ba88c | 0d9c964fd7644395a3f0763f484e485fcc67f762 | /new/src/13 12 2019/mini task 1.py | cb9ac4aff30b02ac6ad9a8d03e0769d6bfa3bfd4 | [
"Apache-2.0"
]
| permissive | VladBaryliuk/my_start_tasks | eaa2e6ff031f2f504be11f0f64f5d99bd1a68a0e | bf387543e6fa3ee303cbef04d2af48d558011ed9 | refs/heads/main | 2023-04-14T14:00:08.415787 | 2021-04-24T13:47:38 | 2021-04-24T13:47:38 | 354,538,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | a = str (input())
b = int (input())
for i in range(1,b+1) :
print(a)
| [
"[email protected]"
]
| |
ce8f7a72520ec6d106c411bd7698922722e9cd8f | 20cf9a80fd651b5adb7242bf17da2c323785f776 | /01_Python/00_python_fund/filter_type.py | 5d5aa673f2d382415462308c99d0696e18c93a76 | []
| no_license | ethanlow23/codingDojoCoursework | 4f2ea0490437fe61a927f665c6b6b23435f095cf | 1a8fcad44377727d43517d34bd1f425fc5d6abab | refs/heads/master | 2020-04-28T18:58:53.696656 | 2019-03-13T20:56:44 | 2019-03-13T20:56:44 | 175,496,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py |
sI = 45
mI = 100
bI = 455
eI = 0
spI = -23
sS = "Rubber baby buggy bumpers"
mS = "Experience is simply the name we give our mistakes"
bS = "Tell me and I forget. Teach me and I remember. Involve me and I learn."
eS = ""
aL = [1,7,4,21]
mL = [3,5,7,34,3,2,113,65,8,89]
lL = [4,34,22,68,9,13,3,5,7,9,2,12,45,923]
eL = []
spL = ['name','address','phone number','social security number']
if isinstance(n, int) or isinstance(n, float):
if n >= 100:
print "that's a big number"
else:
print "that's a small number"
elif isinstance(n, str):
if len(n) >= 50:
print "long sentence"
else:
print "short sentence"
elif isinstance(n, list):
if len(n) >= 10:
print "big list"
else:
print "short list" | [
"[email protected]"
]
| |
dfbf2bb00e0b704fed20409a0534dfac4dcf35a2 | 09efb7c148e82c22ce6cc7a17b5140aa03aa6e55 | /env/lib/python3.6/site-packages/plotly/graph_objs/box/marker/__init__.py | 79b6f6b71d76d620d552542f9dba26d6c09b5849 | [
"MIT"
]
| permissive | harryturr/harryturr_garmin_dashboard | 53071a23b267116e1945ae93d36e2a978c411261 | 734e04f8257f9f84f2553efeb7e73920e35aadc9 | refs/heads/master | 2023-01-19T22:10:57.374029 | 2020-01-29T10:47:56 | 2020-01-29T10:47:56 | 235,609,069 | 4 | 0 | MIT | 2023-01-05T05:51:27 | 2020-01-22T16:00:13 | Python | UTF-8 | Python | false | false | 10,805 | py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
Sets themarker.linecolor. It accepts either a specific color or
an array of numbers that are mapped to the colorscale relative
to the max and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# outliercolor
# ------------
@property
def outliercolor(self):
"""
Sets the border line color of the outlier sample points.
Defaults to marker.color
The 'outliercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outliercolor"]
@outliercolor.setter
def outliercolor(self, val):
self["outliercolor"] = val
# outlierwidth
# ------------
@property
def outlierwidth(self):
"""
Sets the border line width (in px) of the outlier sample
points.
The 'outlierwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlierwidth"]
@outlierwidth.setter
def outlierwidth(self, val):
self["outlierwidth"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the lines bounding the marker points.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "box.marker"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
outliercolor
Sets the border line color of the outlier sample
points. Defaults to marker.color
outlierwidth
Sets the border line width (in px) of the outlier
sample points.
width
Sets the width (in px) of the lines bounding the marker
points.
"""
def __init__(
self,
arg=None,
color=None,
outliercolor=None,
outlierwidth=None,
width=None,
**kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.box.marker.Line
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
outliercolor
Sets the border line color of the outlier sample
points. Defaults to marker.color
outlierwidth
Sets the border line width (in px) of the outlier
sample points.
width
Sets the width (in px) of the lines bounding the marker
points.
Returns
-------
Line
"""
super(Line, self).__init__("line")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.box.marker.Line
constructor must be a dict or
an instance of plotly.graph_objs.box.marker.Line"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.box.marker import line as v_line
# Initialize validators
# ---------------------
self._validators["color"] = v_line.ColorValidator()
self._validators["outliercolor"] = v_line.OutliercolorValidator()
self._validators["outlierwidth"] = v_line.OutlierwidthValidator()
self._validators["width"] = v_line.WidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("outliercolor", None)
self["outliercolor"] = outliercolor if outliercolor is not None else _v
_v = arg.pop("outlierwidth", None)
self["outlierwidth"] = outlierwidth if outlierwidth is not None else _v
_v = arg.pop("width", None)
self["width"] = width if width is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Line"]
| [
"[email protected]"
]
| |
3aab6d9e3ab7cac0507d1de4eeb54cfa9f9bdc8e | d8346eaf1c910ff02c7b243692a2766b8b089f06 | /for-post/python-string/s2-methods/split.py | 33b6036796fcee2b77b0370fa84b79457b731e68 | []
| no_license | dustinpfister/examples-python | 55304c99ba3af82cd8784ee98745546632155c68 | a9910ee05d4df524f951f61b6d9778531a58ccbf | refs/heads/master | 2023-03-06T21:10:18.888654 | 2021-02-26T20:32:52 | 2021-02-26T20:32:52 | 318,595,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | str = '0,1,2,3,4,5'
l = str.split(',')
print(type(l).__name__) # list
print(l[3]) # 3
str = '012345'
# I can not give and empty string as a sep
# doing so will result in an error
try:
l = str.split('')
except ValueError:
print('ValueError')
# however there are a number of other ways to
# get that kind of list such as passing the string value
# to the list built in function
l = list(str);
print(type(l).__name__) # list
print(l[3]) # 3
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.