blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7274d5d6acd06026bb0e3945cca73daf74e06bf3
|
4e163aa4aa0f4c4ddc22f74ae21b6fb1c85a7a09
|
/134.加油站.py
|
2f5d07a462e88ceb77dbdc591efd179e9402385b
|
[] |
no_license
|
dxc19951001/Everyday_LeetCode
|
72f46a0ec2fc651168129720ad0b1e7b5c372b0b
|
3f7b2ea959308eb80f4c65be35aaeed666570f80
|
refs/heads/master
| 2023-08-03T09:22:08.467100 | 2023-07-23T17:08:27 | 2023-07-23T17:08:27 | 270,723,436 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,093 |
py
|
# coding=utf-8
"""
@project: Everyday_LeetCode
@Author:Charles
@file: 134.加油站.py
@date:2023/1/10 15:35
"""
class Solution(object):
def canCompleteCircuit(self, gas, cost):
"""
:type gas: List[int]
:type cost: List[int]
:rtype: int
"""
# 贪心算法
# 1.如果sum(gas) < sum(cost),说明加油满足不了消耗,所以无法跑完一圈
# 2.为了跑完一圈,则再前往一个加油站时要有油
# 所以cur_sum += gas[i] - cost[i],必须一直为正数
# 若出现负数则表示无法跑到下一个加油站
# 题目说明有唯一的解,所以当cur_sum一直大于0的起始点,就为出发点
if sum(gas) < sum(cost):
return -1
start = 0
cur_sum = 0
for i in range(len(gas)):
print(gas[i] - cost[i])
cur_sum += gas[i] - cost[i]
print("cur sun", cur_sum)
if cur_sum < 0:
cur_sum = 0
start = i + 1
return start
|
[
"[email protected]"
] | |
e1dcd2a11d7423ba518efc1697c3a148293ffa2a
|
5456502f97627278cbd6e16d002d50f1de3da7bb
|
/components/google/core/browser/DEPS
|
26e9743a04d2db628f4a7357a7d73e4ad5cf843a
|
[
"BSD-3-Clause"
] |
permissive
|
TrellixVulnTeam/Chromium_7C66
|
72d108a413909eb3bd36c73a6c2f98de1573b6e5
|
c8649ab2a0f5a747369ed50351209a42f59672ee
|
refs/heads/master
| 2023-03-16T12:51:40.231959 | 2017-12-20T10:38:26 | 2017-12-20T10:38:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 135 |
include_rules = [
"+components/data_use_measurement/core",
"+components/keyed_service/core",
"+components/pref_registry",
]
|
[
"[email protected]"
] | ||
bfe6d113c6248860931cd8d1870126fdd8a59693
|
2194b6c17f3153c5976d6ac4a9ab78211027adab
|
/otoroshi_admin_api_client/models/otoroshimodels_rs_algo_settings.py
|
d3bb0d40170f95f52921aa7d6749dcfb1d4114f7
|
[] |
no_license
|
krezreb/otoroshi-admin-api-client
|
7fab5e873c9c5950d77fffce6bcf80d3fdf4c319
|
9b3156c11eac227024cfe4a26c0129618deb2c4d
|
refs/heads/master
| 2023-05-08T08:32:00.982987 | 2021-05-27T09:55:00 | 2021-05-27T09:55:00 | 371,324,636 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,732 |
py
|
from typing import Any, Dict, List, Type, TypeVar, Union, cast
import attr
from ..models.null import Null
from ..models.otoroshimodels_rs_algo_settings_type import OtoroshimodelsRSAlgoSettingsType
from ..types import UNSET, Unset
T = TypeVar("T", bound="OtoroshimodelsRSAlgoSettings")
@attr.s(auto_attribs=True)
class OtoroshimodelsRSAlgoSettings:
"""Settings to use RSA signing algorithm"""
private_key: Union[Null, Unset, str] = UNSET
size: Union[Unset, int] = UNSET
public_key: Union[Unset, str] = UNSET
type: Union[Unset, OtoroshimodelsRSAlgoSettingsType] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
private_key: Union[Dict[str, Any], Unset, str]
if isinstance(self.private_key, Unset):
private_key = UNSET
elif isinstance(self.private_key, Null):
private_key = UNSET
if not isinstance(self.private_key, Unset):
private_key = self.private_key.to_dict()
else:
private_key = self.private_key
size = self.size
public_key = self.public_key
type: Union[Unset, str] = UNSET
if not isinstance(self.type, Unset):
type = self.type.value
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if private_key is not UNSET:
field_dict["privateKey"] = private_key
if size is not UNSET:
field_dict["size"] = size
if public_key is not UNSET:
field_dict["publicKey"] = public_key
if type is not UNSET:
field_dict["type"] = type
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
def _parse_private_key(data: object) -> Union[Null, Unset, str]:
if isinstance(data, Unset):
return data
try:
if not isinstance(data, dict):
raise TypeError()
_private_key_type_0 = data
private_key_type_0: Union[Unset, Null]
if isinstance(_private_key_type_0, Unset):
private_key_type_0 = UNSET
else:
private_key_type_0 = Null.from_dict(_private_key_type_0)
return private_key_type_0
except: # noqa: E722
pass
return cast(Union[Null, Unset, str], data)
private_key = _parse_private_key(d.pop("privateKey", UNSET))
size = d.pop("size", UNSET)
public_key = d.pop("publicKey", UNSET)
_type = d.pop("type", UNSET)
type: Union[Unset, OtoroshimodelsRSAlgoSettingsType]
if isinstance(_type, Unset):
type = UNSET
else:
type = OtoroshimodelsRSAlgoSettingsType(_type)
otoroshimodels_rs_algo_settings = cls(
private_key=private_key,
size=size,
public_key=public_key,
type=type,
)
otoroshimodels_rs_algo_settings.additional_properties = d
return otoroshimodels_rs_algo_settings
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
[
"[email protected]"
] | |
f38e52cda7f5a3f771a65f7eeb92d6375981bb4a
|
f25440c9f9fd470ba44394a36d5659dd47ee8800
|
/tests/conftest.py
|
6ee0b688b6b226162706d75c8e1acd7eadcb3541
|
[] |
no_license
|
kqf/hubmap
|
75010d9109f8b8656e244179de5de226be584d5b
|
37b3d839f0ad3f47dc39c1b9b036cb1acc27ca2c
|
refs/heads/master
| 2023-02-20T04:06:00.145932 | 2021-01-23T07:56:13 | 2021-01-23T07:56:13 | 317,635,257 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 552 |
py
|
import pytest
import tempfile
from pathlib import Path
from models.preprocess import write
from models.mc import make_blob, blob2image
@pytest.fixture
def size():
return 256
@pytest.fixture
def fake_dataset(size=256, nfiles=5):
with tempfile.TemporaryDirectory() as dirname:
path = Path(dirname)
for i in range(nfiles):
mask = make_blob(size, size)
write(mask, path / str(i) / "mask.png")
tile = blob2image(mask)
write(tile, path / str(i) / "tile.png")
yield path
|
[
"[email protected]"
] | |
5fd5f69280f7e2c8dfa60b2c1d5a770471cc61ab
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2520/60790/274856.py
|
18e4e0a2884251d28a2c4c3bc79d6f4d2f5ba4c8
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 168 |
py
|
R=int(input())
C=int(input())
r0=int(input())
c0=int(input())
print(sorted([[i, j] for i in range(R) for j in range(C)], key=lambda x: abs(x[0] - r0) + abs(x[1] - c0)))
|
[
"[email protected]"
] | |
b903d4dafdaad69917379130429923b552115ff8
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-workbench-ide/aliyunsdkworkbench_ide/request/v20210121/AddEnvironmentRequest.py
|
0d20b6717b9971baad3c4aba3f7c1bdd0b316b36
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 |
NOASSERTION
| 2023-09-14T08:51:06 | 2015-07-23T09:39:45 |
Python
|
UTF-8
|
Python
| false | false | 2,700 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class AddEnvironmentRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Workbench-ide', '2021-01-21', 'AddEnvironment')
self.set_method('POST')
def get_ProductId(self): # Long
return self.get_query_params().get('ProductId')
def set_ProductId(self, ProductId): # Long
self.add_query_param('ProductId', ProductId)
def get_EnvName(self): # String
return self.get_query_params().get('EnvName')
def set_EnvName(self, EnvName): # String
self.add_query_param('EnvName', EnvName)
def get_CurrentOrgId(self): # String
return self.get_query_params().get('CurrentOrgId')
def set_CurrentOrgId(self, CurrentOrgId): # String
self.add_query_param('CurrentOrgId', CurrentOrgId)
def get_SecurityGroupId(self): # String
return self.get_query_params().get('SecurityGroupId')
def set_SecurityGroupId(self, SecurityGroupId): # String
self.add_query_param('SecurityGroupId', SecurityGroupId)
def get_EnvDescription(self): # String
return self.get_query_params().get('EnvDescription')
def set_EnvDescription(self, EnvDescription): # String
self.add_query_param('EnvDescription', EnvDescription)
def get_SupportComputeTypess(self): # RepeatList
return self.get_query_params().get('SupportComputeTypes')
def set_SupportComputeTypess(self, SupportComputeTypes): # RepeatList
for depth1 in range(len(SupportComputeTypes)):
self.add_query_param('SupportComputeTypes.' + str(depth1 + 1), SupportComputeTypes[depth1])
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_IsOpenNatEip(self): # Boolean
return self.get_query_params().get('IsOpenNatEip')
def set_IsOpenNatEip(self, IsOpenNatEip): # Boolean
self.add_query_param('IsOpenNatEip', IsOpenNatEip)
|
[
"[email protected]"
] | |
62c20ca9fb15d381b187ac793e03b1b5242e6d37
|
495b0b8de3ecc341511cdb10f11368b35b585bea
|
/SoftLayer/CLI/modules/filters.py
|
1e4274ac04ae064468c5d1d0736b540b8f35416c
|
[] |
no_license
|
hugomatic/softlayer-api-python-client
|
cf6c1e6bfa32e559e72f8b0b069339ae8edd2ede
|
9c115f0912ee62763b805941593f6dd50de37068
|
refs/heads/master
| 2021-01-18T11:09:19.122162 | 2013-04-09T01:44:51 | 2013-04-09T01:44:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 996 |
py
|
"""
usage: sl help filters
Filters are used to limit the amount of results. Some commands will accept a
filter operation for certain fields. Filters can be applied across multiple
fields in most cases.
Available Operations:
Case Insensitive
'value' Exact value match
'value*' Begins with value
'*value' Ends with value
'*value*' Contains value
Case Sensitive
'~ value' Exact value match
'> value' Greater than value
'< value' Less than value
'>= value' Greater than or equal to value
'<= value' Less than or equal to value
Examples:
sl cci list --datacenter=dal05
sl cci list --hostname='prod*'
sl cci list --network=100 --cpu=2
sl cci list --network='< 100' --cpu=2
sl cci list --memory='>= 2048'
Note: Comparison operators (>, <, >=, <=) can be used with integers, floats,
and strings.
"""
# :copyright: (c) 2013, SoftLayer Technologies, Inc. All rights reserved.
# :license: BSD, see LICENSE for more details.
|
[
"[email protected]"
] | |
af585888517df64c46a62653fa6ff3912e6b9f0d
|
508c5e01aa7dce530093d5796250eff8d74ba06c
|
/code/venv/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/schemas/tests/test_schema_get.py
|
d39692be0008269bf1791e585f1e0e92b09181fa
|
[
"MIT",
"PostgreSQL"
] |
permissive
|
jhkuang11/UniTrade
|
f220b0d84db06ff17626b3daa18d4cb8b72a5d3f
|
5f68b853926e167936b58c8543b8f95ebd6f5211
|
refs/heads/master
| 2022-12-12T15:58:30.013516 | 2019-02-01T21:07:15 | 2019-02-01T21:07:15 | 166,479,655 | 0 | 0 |
MIT
| 2022-12-07T03:59:47 | 2019-01-18T22:19:45 |
Python
|
UTF-8
|
Python
| false | false | 2,132 |
py
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.browser.server_groups.servers.tests import utils as server_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
class SchemaGetTestCase(BaseTestGenerator):
""" This class will add new schema under database node. """
scenarios = [
# Fetching default URL for extension node.
('Check Schema Node URL', dict(url='/browser/schema/obj/'))
]
def runTest(self):
""" This function will delete schema under database node. """
schema = parent_node_dict["schema"][-1]
db_id = schema["db_id"]
server_id = schema["server_id"]
server_response = server_utils.connect_server(self, server_id)
if not server_response["data"]["connected"]:
raise Exception("Could not connect to server to connect the"
" database.")
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
server_id,
db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database to get the schema.")
schema_id = schema["schema_id"]
schema_response = self.tester.get(
self.url + str(utils.SERVER_GROUP) + '/' +
str(server_id) + '/' + str(db_id) +
'/' + str(schema_id),
content_type='html/json')
self.assertEquals(schema_response.status_code, 200)
# Disconnect the database
database_utils.disconnect_database(self, server_id, db_id)
|
[
"[email protected]"
] | |
6c16d977d5da188d8203250fd478cfac76c891cc
|
85c9d6fdff58b9cb40f5fdb9f01ff1a0dd386113
|
/bot_tests/reminder.py
|
ef7aa772e1bbf39b40113c0d3d7e94d3036748d1
|
[] |
no_license
|
jmccormac01/karmafleet
|
5874644c496b0bbcb2037404ad7ed43a1e4caaae
|
57ebefbbc6ec3aae634cd9196950f103d48eae95
|
refs/heads/master
| 2020-03-25T17:24:39.187176 | 2019-04-20T18:17:05 | 2019-04-20T18:17:05 | 143,976,406 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,922 |
py
|
"""
Bot for converting EVE times to local timezones
"""
from datetime import datetime
from pytz import timezone
import discord
from discord.ext import commands
import asyncio
# pylint: disable=invalid-name
Client = discord.Client()
client = commands.Bot(command_prefix="!")
reminders = {}
async def reminder_handler(reminders):
await client.wait_until_ready()
while not client.is_closed:
broke = False
print('Checking reminders...')
print(reminders)
now = datetime.utcnow()
for a in reminders:
print('Checking for author {}'.format(a))
for t in reminders[a]:
if now > t:
print(a, reminders[a][t])
await client.send_message(a, reminders[a][t])
# remove the reminder from the list
del reminders[a][t]
broke = True
break
if broke:
break
await asyncio.sleep(10)
@client.event
async def on_ready():
"""
Simple print to say we're ready
"""
print('Ready for remembering stuff...')
@client.event
async def on_message(message):
"""
Handle incoming messages and convert time requests
"""
sp = message.content.split()
return_message = ""
error_count = 0
# check we want time conversion from eve time
if len(sp) >= 3 and sp[0].lower() == '!reminder':
author = message.author
await client.delete_message(message)
# split the command up
reminder_time = datetime.strptime(sp[1], '%Y-%m-%dT%H:%M')
note = ' '.join(sp[2:])
if author not in reminders.keys():
reminders[author] = {}
reminders[author][reminder_time] = note
print(reminders)
client.loop.create_task(reminder_handler(reminders))
client.run('NDk0OTQ2Mzg3ODM5MDI1MTYz.Do66Yw.nsleHS3S8UvbWdBugiDtPWHrIKY')
|
[
"[email protected]"
] | |
41dfb043debbb31d564d9bdcdda0dd997a4a98a5
|
dca5705c291da76cbfaf3897680eb0ae2eb56e2b
|
/aayushg_assgn/myauth/views.py
|
face35c4566395dead6248d30c8430cf8b2fedf8
|
[] |
no_license
|
gadia-aayush/Django-API-1
|
41a40598653009def8ca5bda9a578a26b8bf9115
|
307202ad0aa4357408e756cd74f3723e74fca253
|
refs/heads/master
| 2022-12-13T23:09:45.960562 | 2020-08-30T19:36:16 | 2020-08-30T19:36:16 | 273,763,155 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,723 |
py
|
from django.shortcuts import render
from django.contrib.auth.models import User
from django.shortcuts import redirect
from django.contrib.auth import authenticate, login, logout
from django.http import JsonResponse
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.decorators import api_view
from rest_framework import views
from rest_framework.authtoken.models import Token
from rest_framework.permissions import IsAuthenticated
import re
def user_login(request):
if request.method == 'POST':
username = request.POST.get('phone')
password = request.POST.get('password')
user = authenticate(username = username, password = password)
if user :
if user.is_active:
login(request,user)
data = {"code" : 200, "status" : "OK", "message" : "LogIn Successfull"}
return JsonResponse(data)
else:
data = {"code" : 403, "status" : "Forbidden", "message" : "User Disabled"}
return JsonResponse(data)
else:
data = {"code" : 401, "status" : "Unauthorized", "message" : "Invalid Login Credentials"}
return JsonResponse(data)
else:
return render(request,'login.html')
# Django Rest Framework used
class logout(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request):
user = request.user
token = Token.objects.get(user=user)
if token:
token.delete()
data = {"code" : 200, "status" : "OK", "message" : "Log Out Successfull"}
return Response(data)
def user_signup(request):
if request.method == 'POST':
username = request.POST.get('phone')
password = request.POST.get('password')
name = request.POST.get('name')
email = request.POST.get('email')
#validate whether the phone number is registered or not
try:
if User.objects.get(username = username):
data = {"code" : 403, "status" : "Forbidden", "message" : "Entered Mobile Number is already registered. Try loggin-in"}
return JsonResponse(data)
except:
pass
#validate mobile number [must be 10 digits. assumed that all are of India, so ignored prefixed country codes]
phoneregex = re.compile(r'^[1-9]\d{9}$')
if phoneregex.search(str(username)):
pass
else:
data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Mobile Number should be of 10 digits- ^[1-9]\d{9}$"}
return JsonResponse(data)
#validate name, making sure it is not empty
firstregex = re.compile(r"^[A-Za-z][A-Za-z,.'].*$")
if firstregex.search(str(name)):
pass
else:
data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Name should start with an alphabet- ^[A-Za-z][A-Za-z,.']*$"}
return JsonResponse(data)
#validate email address
emailregex = re.compile(r"^([\w\.\-]+)@([\w\-]+)((\.(\w){2,3})+)$")
if str(email) != "":
if emailregex.search(str(email)):
pass
else:
data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Enter a valid email address- ^([\w\.\-]+)@([\w\-]+)((\.(\w){2,3})+)$"}
return JsonResponse(data)
#validate password
passregex = re.compile(r"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[^a-zA-Z0-9])(?!.*\s).{8,15}$")
if passregex.search(str(password)):
pass
else:
data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Password should be between 8 to 15 characters which contain at least one lowercase letter, one uppercase letter, one numeric digit, and one special character- ^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[^a-zA-Z0-9])(?!.*\s).{8,15}$"}
return JsonResponse(data)
authobj = User.objects.create_user(username = username, password = password, first_name = name, email = email)
authobj.save()
data = {"code" : 201, "status" : "Created", "message" : "Sign-Up Successfull"}
return JsonResponse(data)
else:
return render(request,'user_signup.html')
# Django Rest Framework used
@api_view(['POST', ])
def get_token(request):
if request.method == 'POST':
username = request.data.get('phone')
password = request.data.get('password')
user = authenticate(username = username, password = password)
if user :
if user.is_active:
tokened = Token.objects.filter(user=user)
data = {}
if tokened.count()>0:
data["code"] = 200
data["status"] = "OK"
data["message"] = "Token already Exists"
data["phone"] = username
data["Token"] = tokened[0].key
return Response(data)
else:
token = Token.objects.create(user=user)
data["code"] = 201
data["status"] = "Created"
data["message"] = "Token Created"
data["Token"] = token.key
data["phone"] = username
return Response(data)
else:
data = {"code" : 403, "status" : "Forbidden", "message" : "User Disabled"}
return Response(data)
else:
data = {"code" : 401, "status" : "Unauthorized", "message" : "Invalid Login Credentials"}
return Response(data)
|
[
"[email protected]"
] | |
ab04985a81690a29fc99f93e08d4a4ec4e364ad5
|
847273de4b1d814fab8b19dc651c651c2d342ede
|
/.history/Sudoku_II_004_20180618143456.py
|
c999da2e6ae97112548cc81b5e4e3de4c117dc62
|
[] |
no_license
|
Los4U/sudoku_in_python
|
0ba55850afcffeac4170321651620f3c89448b45
|
7d470604962a43da3fc3e5edce6f718076197d32
|
refs/heads/master
| 2020-03-22T08:10:13.939424 | 2018-07-04T17:21:13 | 2018-07-04T17:21:13 | 139,749,483 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,415 |
py
|
from random import randint
# Sudoku1 almost solved
sudoku1 = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, ' ', ' ', ' ', ' ', ' ', ' ', 2]
]
# Sudoku 2 almost solved
# row1 = [9,8,7,4,3,2,5,6,1]
# row2 = [2,4,3,5,1,6,8,7,9]
# row3 = [5,6,1,7,9,8,4,3,2]
# row4 = [3,9,5,6,4,7,2,1,8]
# row5 = [8,2,4,3,5,1,6,9,7]
# row6 = [1,7,6,2,8,9,3,4,5]
# row7 = [7,1,2,8,6,3,9,5,4]
# row8 = [4,3,8,9,7,5,1,2,6]
# row9 = [' ',5,' ',' ',2,' ',7,' ',' ']
def printSudoku():
i = 0
while i < 10:
if i == 0:
print(" 1 2 3 4 5 6 7 8 9")
print(" -------------------------")
elif i == 3 or i == 6 or i == 9:
print(" -------------------------")
spaceBar = "|"
if i < 9:
print('{2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku1[i], spaceBar,i+1))
i = i + 1
while True: # prints Sudoku until is solved
print("Your sudoku to solve:")
printSudoku()
print("Input 3 numbers in format a b c, np. 4 5 8")
print(" a - row number")
print(" b - column number ")
print(" c - value")
# vprint(" r - reset chart to start\n ")
x = input("Input a b c: ")
print("")
numbers = " 0123456789" # conditions of entering the numbers !
if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (
str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "):
if x == "r": # reset
print(" Function reset() will be ready in Next Week")
else:
print("Error - wrong number format \n ")
continue
sudoku1[int(x[0])-1][int(x[2])-1] = x[4]
try:
i = 0
for item in sudoku1:
if sum(item) == 45:
i = i + 1
if i == 9:
print("YOU WIN")
break
except TypeError:
print()
'''
print(" ")
print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ")
print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ")
print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ")
print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ")
print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ")
print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ")
print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ")
print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ")
print("To start game input:")
print(" r - to load random puzzle:")
print(" 1 - to load chart nr 1:")
print(" 2 - to load chart nr 2:")
print(" 3 - to load chart nr 3:")
choice = input("Input here: ")
if choice == "R" or choice == "r":
sudoku_number = randint(0, 1)
rows_fill(sudoku_number)
elif int(choice) == 1:
rows_fill(0)
elif int(choice) == 2:
rows_fill(1)
elif int(choice) == 3:
rows_fill(0)
'''
|
[
"[email protected]"
] | |
c75ea51b954cef8081502d553948e07b0487abe9
|
bf813d2b877fb8ba62feb4263484db3d0f26d5cd
|
/early-phd/map_to_flux.py
|
1c2d0eab20e2c6fa5e1fe3228a8f9507a9b7ba48
|
[] |
no_license
|
9217392354A/astro-scripts
|
1e8e8c827097a877518d1f3e10870a5c2609417c
|
cd7a175bd504b4e291020b551db3077b067bc632
|
refs/heads/master
| 2021-01-13T00:40:57.481755 | 2016-03-25T17:04:28 | 2016-03-25T17:04:28 | 54,730,096 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 406 |
py
|
#Program created by Chris Fuller to test a function for extracting flux's from a fits file using appature photomotry
#import stuff
from numpy import *
import numpy
import scipy
import math
import sys
import os
from os.path import join as pj
#File stuff
cat = "bigcoma.csv"
catfolder = "/Users/chrisfuller/Dropbox/coma/Catalogues"
catout ="comaTEST.csv"
folder = "/Users/chrisfuller/Dropbox/coma/flux2/"
|
[
"[email protected]"
] | |
8d8b46573115c470483434c30bc2fd15efceb159
|
73785aea08895d0fc15e914ce329716712f057ec
|
/recipes/errorAnal.py
|
9208c6a48ac906004212b9520360e38dbc9b8806
|
[] |
no_license
|
Peder2911/ModelComp
|
5e93e6db7fbc809e7444448729a91ff7a762b0cc
|
91ee3835ddc560adeb4af457953905aaeca79cd6
|
refs/heads/master
| 2020-05-20T05:09:01.877547 | 2019-05-18T13:37:34 | 2019-05-18T13:37:34 | 185,397,609 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 188 |
py
|
ppSentences(sentences,y,x):
for i,s in enumerate(errorSents):
print('#'*38)
print(f'{s} - pred: {prediction[err][i]} | actual: {actual[err][i]}')
print('\n')
|
[
"[email protected]"
] | |
99cd43a8c940db281d4db4d33d06b1cee795bc61
|
c5291e50a3c72c885922378573a0ad423fcedf05
|
/analysis/data/urls.py
|
e7638f31b2b04491d30e6f29d5a4d9826f2a05c3
|
[] |
no_license
|
raghurammanyam/django-projects
|
bcc3ed6285882af437a2995514cef33760fb063e
|
dd20ae354f7f111a0176a1cc047c099bd23e9f05
|
refs/heads/master
| 2022-12-12T19:22:31.698114 | 2018-12-09T09:41:45 | 2018-12-09T09:41:45 | 137,443,359 | 0 | 0 | null | 2022-11-22T03:01:07 | 2018-06-15T05:08:15 |
Python
|
UTF-8
|
Python
| false | false | 196 |
py
|
from django.conf.urls import url
from django.urls import path
from .views import test,get
from django.http import HttpResponse
urlpatterns = [
url(r'^date/',test),
url(r'^get/',get)
]
|
[
"[email protected]"
] | |
f231f73dec833a474cefcee2707d8742f92f9d51
|
125bc51efb95f383257e7bdb50ae74e5dc05b7f7
|
/src/belajarIntegerString.py
|
f28765c84ddfefc5911c0710cd851199053fcd21
|
[] |
no_license
|
frestea09/learn_ch1_python
|
f9688fffda5f0fa312b82bd25081b986fa0779e9
|
510ea59bf85ec024ebc473db2533e92becaefbf3
|
refs/heads/master
| 2020-05-26T18:22:31.171688 | 2019-05-26T05:42:08 | 2019-05-26T05:42:08 | 188,334,119 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 239 |
py
|
from __future__ import print_function
def main():
variabelNama = input('nama : ')
variabelInteger = int(input('Umur'))
print('Nama anda %s dan umur anda %d'%(variabelNama,variabelInteger))
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
a6d6d50572836ba4614154dce36cf5e2c21f9c51
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02679/s613915096.py
|
fec86a56bc93ae2efcf62264eb570f7a448a4ed4
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 667 |
py
|
import math, collections
N = int(input())
AB = [[int(_) for _ in input().split()] for _ in range(N)]
mod = 10**9 + 7
C = collections.Counter()
gcd = math.gcd
a0 = 0
for a, b in AB:
if a == b == 0:
a0 += 1
elif a == 0:
C[0, -1] += 1
else:
g = gcd(a, b)
a //= g
b //= g
if a < 0:
a *= -1
b *= -1
C[a, b] += 1
ans = 1
for a, b in C:
if C[b, -a]:
continue
elif C[-b, a]:
ans *= (pow(2, C[a, b], mod) + pow(2, C[-b, a], mod) - 1) % mod
ans %= mod
else:
ans *= pow(2, C[a, b], mod)
ans %= mod
ans += a0 - 1
ans %= mod
print(ans)
|
[
"[email protected]"
] | |
8951afe2b51d654fd469ed7fd936879e3610aa30
|
35894bca47cf0c9a51a05caf7b56a0d69c05b033
|
/04_subrotinas_numpy/25_fibonacci.py
|
1067f8b8abc1c15bc44a985e9b4f892471d34f46
|
[] |
no_license
|
alcebytes/Phyton-Estudo
|
0a2d33f5f3e668e6ab2f99e5e4499545a3bc1273
|
a3f9a0b3e0a91d71a9359480d6ec17e692572694
|
refs/heads/master
| 2023-01-14T17:24:16.486956 | 2020-10-08T02:02:02 | 2020-10-08T02:02:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 942 |
py
|
import time as time
num_iter = int(input("Digitar o valor do número máximo para a sequência de Fibonacci = "))
tempo_inicio = time.time()
#tempo_inicio_CPU = time.clock() #ABSOLETO
tempo_inicio_CPU = time.process_time()
tempo_inicio_CPU_2 = time.perf_counter()
# f(0)
f = []
f.append(0)
print(f)
# f(1)
f.append(1)
print(f)
"""
f(n + 2) = f(n) + f(n + 1)
for n in range(0, num_iter - 2, 1)
f.append(f[n] + f[n + 1] )
"""
n = 0
while n <= num_iter - 3:
f.append(f[n] + f[n + 1])
n = n + 1
print(f)
# Imprimir último termo de f
print(f[-1])
# Outra forma:
print(f[len(f) - 1])
tempo_fim = time.time() - tempo_inicio
print("O tempo de execução da aplicação é", tempo_fim, "s")
tempo_fim_CPU_2 = time.perf_counter() - tempo_inicio_CPU_2
print("O tempo de execução da CPU é", tempo_fim_CPU_2)
tempo_fim_CPU = time.process_time() - tempo_inicio_CPU
print("O tempo de execução da CPU é", tempo_fim_CPU)
|
[
"[email protected]"
] | |
9a0f0433298aaf2b0b0aa33f5a64b0273f639e93
|
24d8cf871b092b2d60fc85d5320e1bc761a7cbe2
|
/eXe/rev2669-2722/left-trunk-2722/twisted/internet/iocpreactor/udp.py
|
3bf7a5bba392de8252482bdf0e1ba0600cfe27fa
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
joliebig/featurehouse_fstmerge_examples
|
af1b963537839d13e834f829cf51f8ad5e6ffe76
|
1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad
|
refs/heads/master
| 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,644 |
py
|
import socket
from twisted.internet import interfaces, defer, error, protocol, address
from twisted.internet.abstract import isIPAddress
from twisted.persisted import styles
from twisted.python import log, failure, reflect
from ops import ReadFileOp, WriteFileOp, WSARecvFromOp, WSASendToOp
from util import StateEventMachineType
from zope.interface import implements
ERROR_PORT_UNREACHABLE = 1234
class Port(log.Logger, styles.Ephemeral, object):
__metaclass__ = StateEventMachineType
implements(interfaces.IUDPTransport)
events = ["startListening", "stopListening", "write", "readDone", "readErr", "writeDone", "writeErr", "connect"]
sockinfo = (socket.AF_INET, socket.SOCK_DGRAM, 0)
read_op_class = WSARecvFromOp
write_op_class = WSASendToOp
reading = False
_realPortNumber = None
disconnected = property(lambda self: self.state == "disconnected")
def __init__(self, bindAddress, proto, maxPacketSize=8192):
assert isinstance(proto, protocol.DatagramProtocol)
self.state = "disconnected"
from twisted.internet import reactor
self.bindAddress = bindAddress
self._connectedAddr = None
self.protocol = proto
self.maxPacketSize = maxPacketSize
self.logstr = reflect.qual(self.protocol.__class__) + " (UDP)"
self.read_op = self.read_op_class(self)
self.readbuf = reactor.AllocateReadBuffer(maxPacketSize)
self.reactor = reactor
def __repr__(self):
if self._realPortNumber is not None:
return "<%s on %s>" % (self.protocol.__class__, self._realPortNumber)
else:
return "<%s not connected>" % (self.protocol.__class__,)
def handle_listening_connect(self, host, port):
if not isIPAddress(host):
raise ValueError, "please pass only IP addresses, not domain names"
self.state = "connecting"
return defer.maybeDeferred(self._connectDone, host, port)
def handle_connecting_connect(self, host, port):
raise RuntimeError, "already connected, reconnecting is not currently supported (talk to itamar if you want this)"
handle_connected_connect = handle_connecting_connect
def _connectDone(self, host, port):
self._connectedAddr = (host, port)
self.state = "connected"
self.socket.connect((host, port))
return self._connectedAddr
def handle_disconnected_startListening(self):
self._bindSocket()
host, port = self.bindAddress
if isIPAddress(host):
return defer.maybeDeferred(self._connectSocket, host)
else:
d = self.reactor.resolve(host)
d.addCallback(self._connectSocket)
return d
def _bindSocket(self):
try:
skt = socket.socket(*self.sockinfo)
skt.bind(self.bindAddress)
except socket.error, le:
raise error.CannotListenError, (None, None, le)
self._realPortNumber = skt.getsockname()[1]
log.msg("%s starting on %s"%(self.protocol.__class__, self._realPortNumber))
self.socket = skt
def _connectSocket(self, host):
self.bindAddress = (host, self.bindAddress[1])
self.protocol.makeConnection(self)
self.startReading()
self.state = "listening"
def startReading(self):
self.reading = True
try:
self.read_op.initiateOp(self.socket.fileno(), self.readbuf)
except WindowsError, we:
log.msg("initiating read failed with args %s" % (we,))
def stopReading(self):
self.reading = False
def handle_listening_readDone(self, bytes, addr = None):
if addr:
self.protocol.datagramReceived(self.readbuf[:bytes], addr)
else:
self.protocol.datagramReceived(self.readbuf[:bytes])
if self.reading:
self.startReading()
handle_connecting_readDone = handle_listening_readDone
handle_connected_readDone = handle_listening_readDone
def handle_listening_readErr(self, ret, bytes):
log.msg("read failed with err %s" % (ret,))
if ret == 1234: # ERROR_PORT_UNREACHABLE
self.protocol.connectionRefused()
if self.reading:
self.startReading()
handle_connecting_readErr = handle_listening_readErr
handle_connected_readErr = handle_listening_readErr
def handle_disconnected_readErr(self, ret, bytes):
pass # no kicking the dead horse
def handle_disconnected_readDone(self, bytes, addr = None):
pass # no kicking the dead horse
def handle_listening_write(self, data, addr):
self.performWrite(data, addr)
def handle_connected_write(self, data, addr = None):
assert addr in (None, self._connectedAddr)
self.performWrite(data, addr)
def performWrite(self, data, addr = None):
self.writing = True
try:
write_op = self.write_op_class(self)
if not addr:
addr = self._connectedAddr
write_op.initiateOp(self.socket.fileno(), data, addr)
except WindowsError, we:
log.msg("initiating write failed with args %s" % (we,))
def handle_listening_writeDone(self, bytes):
log.msg("write success with bytes %s" % (bytes,))
handle_connecting_writeDone = handle_listening_writeDone
handle_connected_writeDone = handle_listening_writeDone
def handle_listening_writeErr(self, ret, bytes):
log.msg("write failed with err %s" % (ret,))
if ret == ERROR_PORT_UNREACHABLE:
self.protocol.connectionRefused()
handle_connecting_writeErr = handle_listening_writeErr
handle_connected_writeErr = handle_listening_writeErr
def handle_disconnected_writeErr(self, ret, bytes):
pass # no kicking the dead horse
def handle_disconnected_writeDone(self, bytes):
pass # no kicking the dead horse
def writeSequence(self, seq, addr):
self.write("".join(seq), addr)
def handle_listening_stopListening(self):
self.stopReading()
self.connectionLost()
handle_connecting_stopListening = handle_listening_stopListening
handle_connected_stopListening = handle_listening_stopListening
def connectionLost(self, reason=None):
log.msg('(Port %s Closed)' % self._realPortNumber)
self._realPortNumber = None
self.protocol.doStop()
self.socket.close()
del self.socket
self.state = "disconnected"
def logPrefix(self):
return self.logstr
def getHost(self):
return address.IPv4Address('UDP', *(self.socket.getsockname() + ('INET_UDP',)))
|
[
"[email protected]"
] | |
1d539066706ca4f69d3130d49688deb922c477b3
|
98311c7b2b2257f14f0f4a0657363e893872798e
|
/project/src/python/practicum.py
|
e3f1dfcf9ef76f4b71a4dd1106d26832dc48802f
|
[
"MIT"
] |
permissive
|
aslupin/Yak-Ngaen-Project
|
fed9a264a863e1174c00ec8ad360f1c03422f393
|
c91b3cc83d2eda22b62fe877276bdd1a8a1b24fd
|
refs/heads/master
| 2022-01-28T02:44:39.385903 | 2019-05-09T13:36:04 | 2019-05-09T13:36:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,616 |
py
|
import usb
# RQ_SET_LED = 0
# RQ_SET_LED_VALUE = 1
# RQ_GET_SWITCH = 2
# RQ_GET_LIGHT = 3
RQ_GET_SOUND_PLAYER_I = 1
RQ_GET_SOUND_PLAYER_II = 2
####################################
def find_mcu_boards():
'''
Find all Practicum MCU boards attached to the machine, then return a list
of USB device handles for all the boards
>>> devices = find_mcu_boards()
>>> first_board = McuBoard(devices[0])
'''
boards = [dev for bus in usb.busses()
for dev in bus.devices
if (dev.idVendor,dev.idProduct) == (0x16c0,0x05dc)]
return boards
####################################
class McuBoard:
'''
Generic class for accessing Practicum MCU board via USB connection.
'''
################################
def __init__(self, dev):
self.device = dev
self.handle = dev.open()
################################
def usb_write(self, request, data=[], index=0, value=0):
'''
Send data output to the USB device (i.e., MCU board)
request: request number to appear as bRequest field on the USB device
index: 16-bit value to appear as wIndex field on the USB device
value: 16-bit value to appear as wValue field on the USB device
'''
reqType = usb.TYPE_VENDOR | usb.RECIP_DEVICE | usb.ENDPOINT_OUT
self.handle.controlMsg(
reqType, request, data, value=value, index=index)
################################
def usb_read(self, request, length=1, index=0, value=0):
'''
Request data input from the USB device (i.e., MCU board)
request: request number to appear as bRequest field on the USB device
length: number of bytes to read from the USB device
index: 16-bit value to appear as wIndex field on the USB device
value: 16-bit value to appear as wValue field on the USB device
If successful, the method returns a tuple of length specified
containing data returned from the MCU board.
'''
reqType = usb.TYPE_VENDOR | usb.RECIP_DEVICE | usb.ENDPOINT_IN
buf = self.handle.controlMsg(
reqType, request, length, value=value, index=index)
return buf
####################################
class PeriBoard:
################################
def __init__(self, mcu):
self.mcu = mcu
################################
# def get_sound_playeri(self):
# sound = self.mcu.usb_read(request=RQ_GET_SOUND_PLAYER_I, length=2)
# return sound[0]
# def get_sound_playerii(self):
# sound = self.mcu.usb_read(request=RQ_GET_SOUND_PLAYER_II, length=2)
# return sound[0]
def get_sound(self, player):
'''
Return the current reading of light sensor on peripheral board
'''
if(player == RQ_GET_SOUND_PLAYER_I):
sound = self.mcu.usb_read(request=RQ_GET_SOUND_PLAYER_I, length=2)
return sound[0]
# return sound[0]
elif(player == RQ_GET_SOUND_PLAYER_II):
sound = self.mcu.usb_read(request=RQ_GET_SOUND_PLAYER_II, length=2)
# return sound[0]
return sound[0]
# light[1] *= 256
# result = light[1] + light[0]
# return (sound[1] * 256 ) + sound[0]
# ################################
# def set_led(self, led_no, led_state):
# '''
# Set status of LED led_no on peripheral board to led_state
# (0 => off, 1 => on)
# '''
# self.mcu.usb_write(request=RQ_SET_LED, index=led_no, value=led_state)
# # return
# ################################
# def set_led_value(self, value):
# '''
# Display right 3 bits of value on peripheral board's LEDs
# '''
# self.mcu.usb_write(request=RQ_SET_LED_VALUE, value=value)
# # return
# ################################
# def get_switch(self):
# '''
# Return a boolean value indicating whether the switch on the peripheral
# board is currently pressed
# '''
# state = self.mcu.usb_read(request=RQ_GET_SWITCH, length=1)
# return state[0] == 1
# ################################
# def get_light(self):
# '''
# Return the current reading of light sensor on peripheral board
# '''
# light = self.mcu.usb_read(request=RQ_GET_LIGHT, length=2)
# # light[1] *= 256
# # result = light[1] + light[0]
# return ( light[1] * 256 ) + light[0]
|
[
"[email protected]"
] | |
5dcf1531f3266b5a1c867bd6a62ba36a36b2bbc2
|
7b08ceb8c901a09e41d4a67804e2adf94142cb17
|
/description2process/__init__.py
|
2f99a8019b7c0dace78658a646cc5d28bfb7d318
|
[] |
no_license
|
simrit1/Description2Process
|
1e7cfcc4dc6bb762d69f27bbe1eedd4e0cef6a38
|
223372f3588f7ac67537eae3012667951b5543e0
|
refs/heads/master
| 2023-08-25T23:12:50.838804 | 2019-05-16T16:51:51 | 2019-05-16T16:51:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,370 |
py
|
import tensorflow as tf
# We need to enable eager execution for inference at the end of this notebook.
tfe = tf.contrib.eager
tfe.enable_eager_execution()
TFVERSION='1.13'
import os
os.environ['TFVERSION'] = TFVERSION
# Import library
from description2process import data_generation
from description2process import contraction_expansion
from description2process import coreference_resolution
from description2process import clause_extraction
from description2process import activity_recognition
from description2process import activity_extraction
from description2process import structured_description
from description2process import xml_model
from description2process import visualization
from description2process import evaluation
# Returns the visualisation of a process description
# INPUT: process description in string format
def description2model(description, png = False):
# step1 : contraction expansion
description = contraction_expansion.expand_contractions(description)
print("Step 1/8 DONE: contraction expansion")
# step2 : coreference resolution
description = coreference_resolution.resolve_coreferences(description)
print("Step 2/8 DONE: coreference resolution")
# step3 : clause extraction
subsentences = clause_extraction.get_clauses(description)
print("Step 3/8 DONE: extracted clauses ")
# step4: label clauses
labeled_clauses_df = activity_recognition.contains_activity_list(subsentences)
print("Step 4/8 DONE: labeled clauses ")
# step5: activity extraction
df_activities = activity_extraction.get_activity_df(labeled_clauses_df)
print("Step 5/8 DONE: extracted activities ")
# step6: get a structured_descriptions
str_descr = structured_description.get_structured_description(description, df_activities)
print("Step 6/8 DONE: semi-structured descriptions")
# step7: get XML format of models
xml = xml_model.structured2xml(str_descr)
print("Step 7/8 DONE: model in XML")
# step8: Visualize the model in xml
model = visualization.xml2model(xml, png)
print("Step 8/8 DONE: Visualize model")
return model
# Returns the xml format of the process description
# INPUT: process description in string format
def description2xml(description):
# step1 : contraction expansion
description = contraction_expansion.expand_contractions(description)
print("Step 1/7 DONE: contraction expansion")
# step2 : coreference resolution
description = coreference_resolution.resolve_coreferences(description)
print("Step 2/7 DONE: coreference resolution")
# step3 : clause extraction
subsentences = clause_extraction.get_clauses(description)
print("Step 3/7 DONE: extracted clauses ")
# step4: label clauses
labeled_clauses_df = activity_recognition.contains_activity_list(subsentences)
print("Step 4/7 DONE: labeled clauses ")
# step5: activity extraction
df_activities = activity_extraction.get_activity_df(labeled_clauses_df)
print("Step 5/7 DONE: extracted activities ")
# step6: get a structured_descriptions
str_descr = structured_description.get_structured_description(description, df_activities)
print("Step 6/7 DONE: semi-structured descriptions")
# step7: get XML format of models
xml = xml_model.structured2xml(str_descr)
print("Step 7/7 DONE: model in XML")
return xml
# returns the structured description of raw process descriptions
# Input: pandas dataframe of process descriptions
def description2structured_df(description_df):
# step1 : contraction expansion
description_df = contraction_expansion.expand_contractions_df(description_df)
print("Step 1/6 DONE: contraction expansion")
# step2 : coreference resolution
description_df = coreference_resolution.resolve_coreferences_df(description_df)
print("Step 2/6 DONE: coreference resolution")
# step3 : clause extraction
description_df = clause_extraction.get_clauses_df(description_df)
print("Step 3/6 DONE: extracted clauses ")
# step4: label clauses
labeled_clauses = activity_recognition.contains_activity_df(description_df)
print("Step 4/6 DONE: labeled clauses ")
# step5: activity extraction
df_activities = activity_extraction.get_activity_df(labeled_clauses)
print("Step 5/6 DONE: extracted activities ")
# step6: get a structured_descriptions
str_descr = structured_description.get_structured_description_df(description_df, df_activities)
print("Step 6/6 DONE: returned structured descriptions")
return str_descr
# return the descripition after contraction expansion and coreference resolution.
# This type of description can be seen as a cleaned version of the original one.
# Input: pandas dataframe of process descriptions
def description2referenceresolved_df(description_df):
# step1 : contraction expansion
description_df = contraction_expansion.expand_contractions_df(description_df)
# step2 : coreference resolution
description_df = coreference_resolution.resolve_coreferences_df(description_df)
return description_df
# Return the description with a list containing the description's extracted clauses
# Input: pandas dataframe of process description
def description2clauses_df(description_df):
# step1 : contraction expansion
description_df = contraction_expansion.expand_contractions_df(description_df)
# step2 : coreference resolution
description_df = coreference_resolution.resolve_coreferences_df(description_df)
# step3 : clause extraction
description_df = clause_extraction.get_clauses_df(description_df)
return description_df
# Return the description with a list containg the descriptions's extracted clauses
# + an extra dataframe with all its labeled clauses
# Input: pandas dataframe of process descriptions
def description2labeledclauses_df(description_df):
# step1 : contraction expansion
description_df = contraction_expansion.expand_contractions_df(description_df)
# step2 : coreference resolution
description_df = coreference_resolution.resolve_coreferences_df(description_df)
# step3 : clause extraction
description_df = clause_extraction.get_clauses_df(description_df)
# step4: label clauses
labeled_clauses = activity_recognition.contains_activity_df(description_df)
return labeled_clauses, description_df
|
[
"[email protected]"
] | |
d008e616c943f18e5f7f5c090bc112e713db99cf
|
c4b7b5a9c56a9b6394a14704d2faf76754175473
|
/rooms/templatetags/is_booked.py
|
da615b5d82465d9cb146e16beb8eeaefaf53bbc4
|
[] |
no_license
|
seungjinhan/airbnb_clone_django
|
71a15e5242bad28fd96d5f47652a049a77f12f61
|
4c38780746409ea1ed9b4f5b02abca60326752c2
|
refs/heads/master
| 2022-12-02T15:14:39.341441 | 2020-08-23T13:50:42 | 2020-08-23T13:50:42 | 280,878,495 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 554 |
py
|
import datetime
from django import template
from reservations import models as reservation_model
register = template.Library()
@register.simple_tag
def is_booked(room, day):
if day.number == 0:
return False
try:
date = datetime.datetime(
year=day.year, month=day.month, day=day.number)
reservation_model.BookedDay.objects.get(
day=date, reservation__room=room)
print(date)
print(room)
return True
except reservation_model.BookedDay.DoesNotExist:
return False
|
[
"[email protected]"
] | |
e5f7852757d20d9217562defb3d22da0c1893cb6
|
5e809acc62b080f1adad2c34e647241cdc5ad297
|
/myenv/bin/markdown_py
|
fa2c63491a1647ccda5e1725538898c521cfc6a8
|
[
"MIT"
] |
permissive
|
RootenberG/My-blog-project
|
f520af79a2f3eb416b3dadee46813a812ce9d53d
|
7ef4670cfa9d54d9345d52ca008aae5fed5605bc
|
refs/heads/master
| 2020-08-15T20:04:29.478049 | 2020-02-08T21:57:46 | 2020-02-08T21:57:46 | 215,400,930 | 0 | 0 |
MIT
| 2019-10-30T20:54:38 | 2019-10-15T21:34:30 |
Python
|
UTF-8
|
Python
| false | false | 255 |
#!/home/den/devspace/My-blog-project/myenv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from markdown.__main__ import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"[email protected]"
] | ||
cb87f2390f4328b284144e4fa1564341cb8bdcf7
|
c27c51f5c33e0431dbe7db6e18c21b249d476cfa
|
/OpenSource_Python_Code/nova-2013.2/nova/tests/api/ec2/test_faults.py
|
36cee0663bf4ff4b4c640f0b081a869d016d26a6
|
[
"Apache-2.0"
] |
permissive
|
bopopescu/Python_Stuff
|
9bef74e0db17bb5e3ba2d908ced01ee744820d80
|
9aa94a0fa5e4e802090c7b29ec88b840e304d9e5
|
refs/heads/master
| 2022-11-20T06:54:36.581623 | 2017-12-04T18:56:02 | 2017-12-04T18:56:02 | 282,171,169 | 0 | 0 | null | 2020-07-24T08:54:37 | 2020-07-24T08:54:36 | null |
UTF-8
|
Python
| false | false | 1,914 |
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
import webob
from nova.api.ec2 import faults
from nova import test
from nova import wsgi
class TestFaults(test.NoDBTestCase):
"""Tests covering ec2 Fault class."""
def test_fault_exception(self):
# Ensure the status_int is set correctly on faults.
fault = faults.Fault(webob.exc.HTTPBadRequest(
explanation='test'))
self.assertTrue(isinstance(fault.wrapped_exc,
webob.exc.HTTPBadRequest))
def test_fault_exception_status_int(self):
# Ensure the status_int is set correctly on faults.
fault = faults.Fault(webob.exc.HTTPNotFound(explanation='test'))
self.assertEquals(fault.wrapped_exc.status_int, 404)
def test_fault_call(self):
# Ensure proper EC2 response on faults.
message = 'test message'
ex = webob.exc.HTTPNotFound(explanation=message)
fault = faults.Fault(ex)
req = wsgi.Request.blank('/test')
req.GET['AWSAccessKeyId'] = "test_user_id:test_project_id"
self.mox.StubOutWithMock(faults, 'ec2_error_response')
faults.ec2_error_response(mox.IgnoreArg(), 'HTTPNotFound',
message=message, status=ex.status_int)
self.mox.ReplayAll()
fault(req)
|
[
"[email protected]"
] | |
8de31727528745859574b0a71d4d7f4265c46740
|
2718b6f68a717b24cd6238a20d4116b3dea3201b
|
/BlogTemplate/mysite_env/mysite/apps/blog/views.py
|
39b584eea388bcf248d6a6d595bae4840b4bf60b
|
[] |
no_license
|
tminlun/BlogTemplate
|
e94654e01e170f27c97c197c898c102518ad13ab
|
d475587fdd9e111961bbfa56666255d38cfdc056
|
refs/heads/master
| 2022-12-11T00:51:53.019391 | 2018-12-05T14:54:04 | 2018-12-05T14:54:04 | 138,825,320 | 0 | 0 | null | 2022-12-08T02:25:29 | 2018-06-27T03:30:20 |
Python
|
UTF-8
|
Python
| false | false | 4,519 |
py
|
from django.shortcuts import render,get_object_or_404
from django.core.paginator import Paginator
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.db.models.aggregates import Count
from read_statistics.utils import read_statistics_once_read
from comment.models import Comment
from .models import Blog,BlogType
#获取博客列表共同的数据,设置参数blog_all_list全部博客,因为每个方法都有不同的获取方法
def get_blog_list_common_data(request, blog_all_list):
paginator = Paginator(blog_all_list, settings.EACH_PAGE_BLOG_NUMBER) # 每一页10篇博客
page_num = request.GET.get('page', 1) # 获取页码参数,get请求
page_of_blogs = paginator.get_page(page_num) # 获取当前页码
current_page_num = page_of_blogs.number # 获取当前页码
# current_page_num - 2 , 1 只是拿1和currentr_page_num - 2比,range范围还是
# current_page_num - 2, currentr_page_num
page_range = list(range(max(current_page_num - 2, 1), current_page_num)) + \
list(range(current_page_num, min(current_page_num + 2, paginator.num_pages) + 1))
# 添加省略
if page_range[0] - 1 >= 2:
page_range.insert(0, '...')
# 如果总页 - 最后一页 大于等于2
if paginator.num_pages - page_range[-1] >= 2:
page_range.append('...')
# 添加第一页和最后一页
if page_range[0] != 1:
page_range.insert(0, 1) # 将第一个页码变成1(insert在第一个插入)
if page_range[-1] != paginator.num_pages:
page_range.append(paginator.num_pages) # 添加总页码到最后显示页码(append在尾部添加)
blog_dates = Blog.objects.dates('created_time','month',order="DESC")
blog_dates_dict = {}
for blog_date in blog_dates:
date_count = Blog.objects.filter(created_time__year=blog_date.year,created_time__month=blog_date.month).count()
blog_dates_dict[blog_date] = date_count
context = {}
context['page_of_blogs'] = page_of_blogs # 当前页码
context['page_range'] = page_range # 返回所有页码给模板
context['blogs'] = page_of_blogs.object_list # 获取所有博客
# annotate自动返回BlogType的所有数据
context['blog_types']=BlogType.objects.annotate(type_count = Count('blog')).filter(type_count__gt=0)
# 获取到全部的年和月
context['blog_dates'] = blog_dates_dict # 这里是一个坑,记住把日期和数量给对象
return context #返回给模板 render(request,'?.html',context)
def blog_list(request):
blog_all_list = Blog.objects.all()#全部的博客列表
context = get_blog_list_common_data(request,blog_all_list) #传递给context
return render(request, 'blog/blog_list.html', context)
def blogs_with_type(request,blog_with_type_pk):
blog_type = get_object_or_404(BlogType,pk = blog_with_type_pk)#获取分类
blog_all_list = Blog.objects.filter(blog_type=blog_type)#获取所有筛选类型博客
context = get_blog_list_common_data(request, blog_all_list)
context['blog_type'] = blog_type # 分类名
return render(request, 'blog/blogs_with_type.html', context)
def blogs_with_date(request,year,month):
#获取到对应年和月的博客
blog_all_list = Blog.objects.filter(created_time__year=year, created_time__month=month)
context = get_blog_list_common_data(request, blog_all_list)
context['blog_with_date'] = "%s年%s月" %(year,month) #当前的年月
return render(request, 'blog/blogs_with_date.html', context)
#博客细节
def blog_detail(request,blog_pk):
context = {}
blog = get_object_or_404(Blog, pk = blog_pk)
#判断浏览器是否有cookie记录,有不加数,没有加数;get获取字典的key
read_cookie_key = read_statistics_once_read(request, blog)
blog_content_type = ContentType.objects.get_for_model(blog)
comments = Comment.objects.filter(content_type=blog_content_type,object_id=blog.pk)
context['blog'] = blog
#前一篇博客,大于:__gt=
context['previous_blog'] = Blog.objects.filter(created_time__gt=blog.created_time).last()
#后一篇博客,小于:__lt=
context['next_blog'] = Blog.objects.filter(created_time__lt=blog.created_time).first()
context['user'] = request.user
context['comments'] = comments
response=render(request, 'blog/blog_detail.html', context)
response.set_cookie(read_cookie_key, 'ture') #坑,值 记得填写
return response
|
[
"[email protected]"
] | |
4da9c1e6ca004b93d1f275e2bd86ea3be8e69b31
|
52bb1d25a8c146b81b876343f861025e034fa964
|
/roglick/dungeon/utils.py
|
fcf6a2a864c5ae7cc6c50f2c302b33b63529bf23
|
[
"MIT"
] |
permissive
|
Kromey/roglick
|
b3fc7a6bce7e60a150c9a9ed1cc825ef3988cf8a
|
b76202af71df0c30be0bd5f06a3428c990476e0e
|
refs/heads/master
| 2020-12-14T15:49:53.163385 | 2016-05-24T16:29:06 | 2016-05-24T16:29:06 | 21,549,421 | 6 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,519 |
py
|
from roglick.engine import random
from roglick.utils import clamp
def smoothstep(a, b, x):
"""Basic S-curve interpolation function.
Based on reference implementation available at
https://en.wikipedia.org/wiki/Smoothstep
"""
x = clamp((x - a)/(b - a), 0.0, 1.0)
return x*x*(3 - 2*x)
def smootherstep(a, b, x):
"""Improved S-curve interpolation function.
Based on reference implementation of the improved algorithm proposed by
Ken Perlin that is available at https://en.wikipedia.org/wiki/Smoothstep
"""
x = clamp((x - a)/(b - a), 0.0, 1.0)
return x*x*x*(x*(x*6 - 15) + 10);
def lerp(a, b, x):
"""Linear interpolation function."""
return a + x * (b - a)
class PerlinNoise2D(object):
def __init__(self, seed=None):
self.p = [x for x in range(256)]
if seed is None:
seed = random.get_int()
rand = random.Random(seed)
rand.shuffle(self.p)
def octave(self, x, y, octaves=5, persistence=0.5):
total = 0
frequency = 1
amplitude = 1
max_val = 0
for i in range(octaves):
total += self.noise(x*frequency, y*frequency) * amplitude
max_val += amplitude
amplitude *= persistence
frequency *= 2
return total / max_val
def noise(self, x, y):
xi = int(x)
yi = int(y)
xf = x - xi
yf = y - yi
u = self.fade(xf)
v = self.fade(yf)
aa = self.p_hash(self.p_hash( xi )+ yi )
ab = self.p_hash(self.p_hash( xi )+ yi+1)
ba = self.p_hash(self.p_hash(xi+1)+ yi )
bb = self.p_hash(self.p_hash(xi+1)+ yi+1)
x1 = lerp(self.grad(aa, xf , yf), self.grad(ba, xf-1, yf), u)
x2 = lerp(self.grad(ab, xf , yf-1), self.grad(bb, xf-1, yf-1), u)
return (lerp(x1, x2, v) + 1) / 2 # Constrain to [0,1] rather than [-1,1]
def fade(self, t):
return smootherstep(0.0, 1.0, t)
def p_hash(self, i):
i = i & 255
return self.p[i]
def grad(self, h, x, y):
"""This gradient function is based on Riven's optimization
Source: http://riven8192.blogspot.com/2010/08/calculate-perlinnoise-twice-as-fast.html
"""
h = h % 4
if h == 0:
return x + y
elif h == 1:
return -x + y
elif h == 2:
return x - y
elif h == 3:
return -x - y
else:
# Never happens
return 0
|
[
"[email protected]"
] | |
c2a6d24f20bb1c2478b4feea8182623aca53bac4
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_14413.py
|
5e67c83692878ae8becbb59fe8019e05781959d1
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 140 |
py
|
# changing type of a entry in dictionary throws error
d = {'today': datetime.today()}
d['today'] = d['today'].strftime(<your format>)
|
[
"[email protected]"
] | |
eeb85c0763b4b58838c030ceccd1de9ec42a82e6
|
5cea11c9373d997430b523227ce81b61972ad1e3
|
/tests/test_client_events.py
|
bd3bc8ac4bf3a96cd62673408ee09427626646ff
|
[
"BSD-3-Clause"
] |
permissive
|
tinylambda/grpclib
|
fcc0d4f5723fe36359ceb9655764e9a37c87ebc1
|
948e32a29a4ad82ebbfdbb681f7a797f6233bff3
|
refs/heads/master
| 2023-07-15T16:19:59.776603 | 2021-08-25T19:56:10 | 2021-08-25T19:56:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,234 |
py
|
import pytest
from multidict import MultiDict
from google.rpc.error_details_pb2 import ResourceInfo
from grpclib.const import Status
from grpclib.events import listen, SendRequest, SendMessage, RecvMessage
from grpclib.events import RecvInitialMetadata, RecvTrailingMetadata
from grpclib.testing import ChannelFor
from grpclib._compat import nullcontext
from grpclib.exceptions import GRPCError
from dummy_pb2 import DummyRequest, DummyReply
from dummy_grpc import DummyServiceStub, DummyServiceBase
class DummyService(DummyServiceBase):
def __init__(self, fail=False):
self.fail = fail
async def UnaryUnary(self, stream):
await stream.recv_message()
await stream.send_initial_metadata(metadata={'initial': 'true'})
await stream.send_message(DummyReply(value='pong'))
if self.fail:
await stream.send_trailing_metadata(
status=Status.NOT_FOUND,
status_message="Everything is not OK",
status_details=[ResourceInfo()],
metadata={'trailing': 'true'},
)
else:
await stream.send_trailing_metadata(metadata={'trailing': 'true'})
async def UnaryStream(self, stream):
raise GRPCError(Status.UNIMPLEMENTED)
async def StreamUnary(self, stream):
raise GRPCError(Status.UNIMPLEMENTED)
async def StreamStream(self, stream):
raise GRPCError(Status.UNIMPLEMENTED)
async def _test(event_type, *, fail=False):
service = DummyService(fail)
events = []
async def callback(event_):
events.append(event_)
async with ChannelFor([service]) as channel:
listen(channel, event_type, callback)
stub = DummyServiceStub(channel)
ctx = pytest.raises(GRPCError) if fail else nullcontext()
with ctx:
reply = await stub.UnaryUnary(DummyRequest(value='ping'),
timeout=1,
metadata={'request': 'true'})
assert reply == DummyReply(value='pong')
event, = events
return event
@pytest.mark.asyncio
async def test_send_request():
event = await _test(SendRequest)
assert event.metadata == MultiDict({'request': 'true'})
assert event.method_name == '/dummy.DummyService/UnaryUnary'
assert event.deadline.time_remaining() > 0
assert event.content_type == 'application/grpc'
@pytest.mark.asyncio
async def test_send_message():
event = await _test(SendMessage)
assert event.message == DummyRequest(value='ping')
@pytest.mark.asyncio
async def test_recv_message():
event = await _test(RecvMessage)
assert event.message == DummyReply(value='pong')
@pytest.mark.asyncio
async def test_recv_initial_metadata():
event = await _test(RecvInitialMetadata)
assert event.metadata == MultiDict({'initial': 'true'})
@pytest.mark.asyncio
async def test_recv_trailing_metadata():
event = await _test(RecvTrailingMetadata, fail=True)
assert event.metadata == MultiDict({'trailing': 'true'})
assert event.status is Status.NOT_FOUND
assert event.status_message == "Everything is not OK"
assert isinstance(event.status_details[0], ResourceInfo)
|
[
"[email protected]"
] | |
31dd5fd0705bfebccf299f10eb6ba594038b885d
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/5ejvPTQeiioTTA9xZ_0.py
|
9b5d0b04aa8e5dca2af5037100305f74b9f4c108
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 395 |
py
|
"""
Create a function that checks if the argument is an integer or a string.
Return `"int"` if it's an integer and `"str"` if it's a string.
### Examples
int_or_string(8) ➞ "int"
int_or_string("Hello") ➞ "str"
int_or_string(9843532) ➞ "int"
### Notes
Input will either be an integer or a string.
"""
def int_or_string(var):
return var.__class__.__name__
|
[
"[email protected]"
] | |
7b288b67b9fa3473f2fb3c72085b6de7ea893109
|
6cecdc007a3aafe0c0d0160053811a1197aca519
|
/apps/receiver/management/commands/generate_submissions.py
|
ae672ba20a318c1fc46d7ecce22a17363b20c062
|
[] |
no_license
|
commtrack/temp-aquatest
|
91d678c927cc4b2dce6f709afe7faf2768b58157
|
3b10d179552b1e9d6a0e4ad5e91a92a05dba19c7
|
refs/heads/master
| 2016-08-04T18:06:47.582196 | 2010-09-29T13:20:13 | 2010-09-29T13:20:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,172 |
py
|
""" This script generates all the necessary data to
synchronize with a remote CommCareHQ server on that server.
This is only really useful if you intend to manually
scp/rsync data to your local server, which requires a
login to the remote server. So this is not the standard
synchronization workflow (but is necessary for low-connectivity
settings)
"""
import bz2
import sys
import urllib2
import httplib
import cStringIO
from urlparse import urlparse
from optparse import make_option
from django.core.management.base import LabelCommand, CommandError
from django_rest_interface import util as rest_util
from receiver.models import Submission
class Command(LabelCommand):
option_list = LabelCommand.option_list + (
make_option('-a','--all', action='store_true', dest='all', \
default=False, help='Generate all files'),
make_option('-?','--debug', action='store_true', dest='debug', \
default=False, help='Generate some files'),
make_option('-d','--download', action='store_true', dest='download', \
default=False, help='Download files.'),
)
help = "Generate synchronization files on a CommCareHQ remote server."
args = "<remote_url username password>"
label = 'IP address of the remote server (including port), username, and password'
def handle(self, *args, **options):
if len(args) != 3:
raise CommandError('Please specify %s.' % self.label)
remote_url = args[0]
username = args[1]
password = args[2]
print "Generating synchronization data from %s" % remote_url
all = options.get('all', False)
debug = options.get('debug', False)
download = options.get('download', False)
generate_submissions(remote_url, username, password, not all, debug, download)
def __del__(self):
pass
def generate_submissions(remote_url, username, password, latest=True, debug=False, download=False, to='submissions.tar'):
""" Generate sync data from remote server
remote_url: url of remote server (ip:port)
username, password: credentials for logging in
"""
status = rest_util.login(remote_url, username, password)
if not status:
print "Sorry. Your credentials were not accepted."
sys.exit()
url = 'http://%s/api/submissions/' % remote_url
if latest:
MD5_buffer = rest_util.get_field_as_bz2(Submission, 'checksum', debug)
response = rest_util.request(url, username, password, MD5_buffer)
print "Generated latest remote submissions"
else:
response = urllib2.urlopen(url)
print "Generated all remote submissions archive"
if download:
fout = open(to, 'w+b')
fout.write(response.read())
fout.close()
print "Submissions downloaded to %s" % to
else:
# Check for status messages
# (i think tar payloads always begin 'BZ'...)
response = response.read(255)
if response[:2] != "BZ":
print response
return response
|
[
"[email protected]"
] | |
5a5a5583911ddb9db5402f6b3d6030070b115f57
|
1e50f1643376039ca988d909e79f528e01fa1371
|
/leetcode/editor/cn/292.nim-游戏.py
|
174da887a6b080c9b99b41e140bf445662a9f611
|
[] |
no_license
|
mahatmaWM/leetcode
|
482a249e56e2121f4896e34c58d9fa44d6d0034b
|
4f41dad6a38d3cac1c32bc1f157e20aa14eab9be
|
refs/heads/master
| 2022-09-04T17:53:54.832210 | 2022-08-06T07:29:46 | 2022-08-06T07:29:46 | 224,415,259 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 981 |
py
|
#
# @lc app=leetcode.cn id=292 lang=python3
#
# [292] Nim 游戏
#
# https://leetcode-cn.com/problems/nim-game/description/
#
# algorithms
# Easy (69.45%)
# Likes: 326
# Dislikes: 0
# Total Accepted: 50K
# Total Submissions: 71.9K
# Testcase Example: '4'
#
# 你和你的朋友,两个人一起玩 Nim 游戏:桌子上有一堆石头,每次你们轮流拿掉 1 - 3 块石头。 拿掉最后一块石头的人就是获胜者。你作为先手。
#
# 你们是聪明人,每一步都是最优解。 编写一个函数,来判断你是否可以在给定石头数量的情况下赢得游戏。
#
# 示例:
#
# 输入: 4
# 输出: false
# 解释: 如果堆中有 4 块石头,那么你永远不会赢得比赛;
# 因为无论你拿走 1 块、2 块 还是 3 块石头,最后一块石头总是会被你的朋友拿走。
#
#
#
# @lc code=start
class Solution:
def canWinNim(self, n: int) -> bool:
return False if n % 4 == 0 else True
# @lc code=end
|
[
"[email protected]"
] | |
22f4ffa79f304c929e6c0680c0a2228d0e15dd2b
|
dbf2d3f8eb11d04123894e398446b56ca791c9f6
|
/examples/02.py
|
c9847666ba51a1574e379280d847d651e7982b21
|
[] |
no_license
|
podhmo/nendo
|
ed8d9a62ab23f7409a8ce519f28deff7d3642942
|
841ec7a990019596c769a2f581a1190aeb8cbd56
|
refs/heads/master
| 2021-01-22T17:47:58.964323 | 2015-06-28T11:37:38 | 2015-06-28T11:37:38 | 37,828,656 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 550 |
py
|
# -*- coding:utf-8 -*-
import logging
logger = logging.getLogger(__name__)
"""
-- select explicitly
SELECT open_emp_id, product_cd
FROM account
ORDER BY open_emp_id, product_cd;
"""
from nendo import Query, make_record, render
from nendo.value import List
Account = make_record("account", "account_id product_cd open_date avail_balance open_emp_id")
query = (Query()
.from_(Account)
.order_by(List([Account.open_emp_id, Account.product_cd]).desc())
.select(Account.open_emp_id, Account.product_cd))
print(render(query))
|
[
"[email protected]"
] | |
e5ae86739f26139d2a56b19277ea7832e21d41bd
|
f74dd098c3e665d8f605af5ebe7e2874ac31dd2f
|
/aiogithubapi/namespaces/user.py
|
1d1bd8b8cab4928b70f10f1d9836568e6cc2db64
|
[
"MIT"
] |
permissive
|
ludeeus/aiogithubapi
|
ce87382698827939aaa127b378b9a11998f13c06
|
90f3fc98e5096300269763c9a5857481b2dec4d2
|
refs/heads/main
| 2023-08-20T19:30:05.309844 | 2023-08-14T20:24:21 | 2023-08-14T20:24:21 | 198,505,021 | 21 | 20 |
MIT
| 2023-09-11T06:12:10 | 2019-07-23T20:39:53 |
Python
|
UTF-8
|
Python
| false | false | 2,993 |
py
|
"""
Methods for the authenticated user namespace
https://docs.github.com/en/rest/reference/users#get-the-authenticated-user
"""
from __future__ import annotations
from typing import Any, Dict
from ..const import GitHubRequestKwarg
from ..models.organization import GitHubOrganizationMinimalModel
from ..models.repository import GitHubRepositoryModel
from ..models.response import GitHubResponseModel
from ..models.user import GitHubAuthenticatedUserModel
from .base import BaseNamespace
from .projects import GitHubUserProjectsNamespace
class GitHubUserNamespace(BaseNamespace):
"""Methods for the user namespace"""
def __post_init__(self) -> None:
self._projects = GitHubUserProjectsNamespace(self._client)
@property
def projects(self) -> GitHubUserProjectsNamespace:
"""Property to access the users projects namespace"""
return self._projects
async def get(
self,
**kwargs: Dict[GitHubRequestKwarg, Any],
) -> GitHubResponseModel[GitHubAuthenticatedUserModel]:
"""
Get the authenticated user
https://docs.github.com/en/rest/reference/users#get-a-user
"""
response = await self._client.async_call_api(
endpoint="/user",
**kwargs,
)
response.data = GitHubAuthenticatedUserModel(response.data)
return response
async def starred(
self,
**kwargs: Dict[GitHubRequestKwarg, Any],
) -> GitHubResponseModel[list[GitHubRepositoryModel]]:
"""
Get the authenticated user starred repositories
https://docs.github.com/en/rest/reference/users#get-a-user
"""
response = await self._client.async_call_api(
endpoint="/user/starred",
**kwargs,
)
response.data = [GitHubRepositoryModel(data) for data in response.data]
return response
async def repos(
self,
**kwargs: Dict[GitHubRequestKwarg, Any],
) -> GitHubResponseModel[list[GitHubRepositoryModel]]:
"""
Get the repositories for the authenticated user
https://docs.github.com/en/rest/reference/repos#list-repositories-for-a-user
"""
response = await self._client.async_call_api(
endpoint="/user/repos",
**kwargs,
)
response.data = [GitHubRepositoryModel(data) for data in response.data]
return response
async def orgs(
self,
**kwargs: Dict[GitHubRequestKwarg, Any],
) -> GitHubResponseModel[list[GitHubOrganizationMinimalModel]]:
"""
List public organization memberships for the specified user.
https://docs.github.com/en/rest/reference/orgs#list-organizations-for-the-authenticated-user
"""
response = await self._client.async_call_api(endpoint="/user/orgs", **kwargs)
response.data = [GitHubOrganizationMinimalModel(data) for data in response.data or []]
return response
|
[
"[email protected]"
] | |
88c38efa8ff0a8056b6fc309011e034888426fa0
|
26acc7e23024098661a42da37e2cb4ed56c21b44
|
/dgp/genera/load/loader.py
|
daf5ca8acee012f9dd328fd48ef0fb2baf85a38a
|
[
"MIT"
] |
permissive
|
dataspot/dgp
|
80536c0e296570c109511de3dae6e0297bb8b0fd
|
e86d604c8af5534985f9b788ba809facbc325152
|
refs/heads/master
| 2023-03-16T05:15:38.362702 | 2023-03-09T07:07:28 | 2023-03-09T07:07:28 | 169,378,970 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,879 |
py
|
import os
import json
import requests
from hashlib import md5
from dataflows import Flow, load, dump_to_path
from dataflows.base.schema_validator import ignore
from ...core import BaseDataGenusProcessor, Required, Validator, ConfigurableDGP
from .analyzers import FileFormatDGP, StructureDGP
from ...config.consts import CONFIG_URL, CONFIG_PUBLISH_ALLOWED, RESOURCE_NAME
from ...config.log import logger
class LoaderDGP(BaseDataGenusProcessor):
PRE_CHECKS = Validator(
Required(CONFIG_URL, 'Source data URL or path')
)
def init(self):
self.steps = self.init_classes([
FileFormatDGP,
StructureDGP,
])
def hash_key(self, *args):
data = json.dumps(args, sort_keys=True, ensure_ascii=False)
return md5(data.encode('utf8')).hexdigest()
def flow(self):
if len(self.errors) == 0:
config = self.config._unflatten()
source = config['source']
ref_hash = self.hash_key(source, config['structure'], config.get('publish'))
cache_path = os.path.join('.cache', ref_hash)
datapackage_path = os.path.join(cache_path, 'datapackage.json')
structure_params = self.context._structure_params()
http_session = self.context.http_session()
loader = load(source.pop('path'), validate=False,
name=RESOURCE_NAME,
**source, **structure_params,
http_session=http_session,
http_timeout=120,
infer_strategy=load.INFER_PYTHON_TYPES,
cast_strategy=load.CAST_DO_NOTHING,
limit_rows=(
None
if self.config.get(CONFIG_PUBLISH_ALLOWED)
else 5000
))
if self.config.get(CONFIG_PUBLISH_ALLOWED):
return Flow(
loader,
)
else:
if not os.path.exists(datapackage_path):
logger.info('Caching source data into %s', cache_path)
Flow(
loader,
dump_to_path(cache_path, validator_options=dict(on_error=ignore)),
# printer(),
).process()
logger.info('Using cached source data from %s', cache_path)
return Flow(
load(datapackage_path, resources=RESOURCE_NAME),
)
class PostLoaderDGP(ConfigurableDGP):
def init(self):
super().init('loading', per_taxonomy=False)
self._flows = None
class PreLoaderDGP(ConfigurableDGP):
def init(self):
super().init('preloading', per_taxonomy=False)
self._flows = None
|
[
"[email protected]"
] | |
a0bcf1146515c5da0c64441490de32599b91f02e
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/CodeJamData/12/01/14.py
|
fd0cb48a6c9d059526138c98e8ba82d309f6802b
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,517 |
py
|
#!/usr/bin/python2
### Google Code Jam template
# Futures
from __future__ import division
from __future__ import with_statement
from __future__ import print_function
## Library
# @memoized
def memoized(func):
mem = {}
def wrapped(*args):
if args not in mem:
mem[args] = func(*args)
return mem[args]
return wrapped
## Setup
from os.path import basename, splitext
# Task letter
TASK=splitext(basename(__file__))[0]
print("Task {}".format(TASK))
## Input templates
# Line as int
#int(infile.readline())
# Line as many ints
#(int(s) for s in infile.readline().split())
## Precalculation
print("Precalculation...")
from string import maketrans
src = """aozq
ejp mysljylc kd kxveddknmc re jsicpdrysi
rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd
de kr kd eoya kw aej tysr re ujdr lkgc jv"""
dst = """yeqz
our language is impossible to understand
there are twenty six factorial possibilities
so it is okay if you want to just give up"""
table = maketrans(src, dst)
print("Precalculation done.")
## Calculation
print("Calculation...")
with open(TASK+".in") as infile:
with open(TASK+".out",mode="wt") as outfile:
cases = int(infile.readline())
for ncase in range(cases):
print("Case #{nc}".format(nc=ncase+1))
# Perform all nessesary calculation
text = infile.readline().strip()
data = text.translate(table)
outfile.write("Case #{nc}: {data}\n".format(nc=ncase+1,data=data))
print("Calculation done.")
|
[
"[email protected]"
] | |
5efd766bb70d94a197cb80cb858d7211c005cb27
|
4de2b914e4607dd0ca7eec60b21026af6b6c4797
|
/Old_work/valdambrini_cheli_papallo_tarmati/catkin_ws/build/navigation/clear_costmap_recovery/catkin_generated/pkg.develspace.context.pc.py
|
cb8deb76dfb119ed5c90cb0df8ac2a426a6fc434
|
[] |
no_license
|
ABiondi12/project_sgn
|
5203d21f2753dcdf7c53b153324dc75bc1221549
|
570b7be0b01e7c83cb927945e532d6a2213ebf65
|
refs/heads/main
| 2023-06-18T12:59:18.337096 | 2021-07-21T10:27:08 | 2021-07-21T10:27:08 | 307,121,028 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 551 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/robot/catkin_ws/src/navigation/map_server/include".split(';') if "/home/robot/catkin_ws/src/navigation/map_server/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;nav_msgs;tf2".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lmap_server_image_loader".split(';') if "-lmap_server_image_loader" != "" else []
PROJECT_NAME = "map_server"
PROJECT_SPACE_DIR = "/home/robot/catkin_ws/devel"
PROJECT_VERSION = "1.16.2"
|
[
"[email protected]"
] | |
0a948854e027da6e1d313f2c60f11f0684e5b0f2
|
e7e497b20442a4220296dea1550091a457df5a38
|
/main_project/AdHot/monitorsystem/monitorsystem/controllers/zoom_graph.py
|
4e70d23eb63430c0430abba719d0b4142562c92e
|
[] |
no_license
|
gunner14/old_rr_code
|
cf17a2dedf8dfcdcf441d49139adaadc770c0eea
|
bb047dc88fa7243ded61d840af0f8bad22d68dee
|
refs/heads/master
| 2021-01-17T18:23:28.154228 | 2013-12-02T23:45:33 | 2013-12-02T23:45:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,518 |
py
|
import logging
import rrdtool
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from monitorsystem.lib.base import BaseController, render
from monitorsystem.lib.app_globals import Globals as g
from monitorsystem.lib.get_chart_info import GetAllCatagory
log = logging.getLogger(__name__)
class ZoomGraphController(BaseController):
def index(self):
# Return a rendered template
#return render('/zoom_graph.mako')
# or, return a string
return 'Hello World'
def zoomin(self, dbname, datasource, resolution, title, points, limit, description):
limit = int(limit)
img_path = "/data/xce/pylons/monitorsystem/monitorsystem/public/img/" + str(dbname) + "_big.png"
rrd_path = "/data/xce/monitor/data/" + str(dbname) + ".rrd";
title = str(title);
font = "TITLE:10:/data/xce/monitor/fonts/simhei.ttf"
passed_time = 60 * int(points);
start = "now-" + str(passed_time)
datasource = str(datasource)
resolution = str(resolution)
rra1_points = 1200;
ds_def_1 = "DEF:value1=" + rrd_path + ":" + datasource + ":AVERAGE"
ds_def_2 = "DEF:value2=" + rrd_path + ":" + datasource + ":MAX"
if(limit > 0):
c_def_1 = "CDEF:value3=value1," + str(limit) + ",GT,value1,UNKN,IF"
c_def_2 = "CDEF:value4=value2," + str(limit) + ",GT,value2,UNKN,IF"
elif(limit < 0):
c_def_1 = "CDEF:value3=value1," + str(-limit) + ",LT,value1,UNKN,IF"
c_def_2 = "CDEF:value4=value2," + str(-limit) + ",LT,value2,UNKN,IF"
graph_def_1 = "AREA:value1#00FF00:average"
graph_def_2 = "LINE1:value2#0000FF:max"
graph_def_3 = "AREA:value3#FF0000:warning "
graph_def_4 = "LINE1:value4#FF0000"
width = "500"
height = "400"
comments = "COMMENT:Average--------------MAX--------------MIN-------------- "
g_print_1 = "GPRINT:value1:AVERAGE:%1.2lf"
g_print_2 = "GPRINT:value1:MAX:%18.2lf"
g_print_3 = "GPRINT:value1:MIN:%15.2lf"
if(limit == 0):
if(int(points) <= rra1_points):
ret = rrdtool.graph(img_path, "-w", width, "-h", height, "-n", font, "-t", title, "-S", resolution, "--start", start, ds_def_1, graph_def_1, comments, g_print_1, g_print_2, g_print_3)
else:
ret = rrdtool.graph(img_path, "-w", width, "-h", height, "-n", font, "-t", title, "-S", resolution, "--start", start, "--vertical-label=", ds_def_1, ds_def_2, graph_def_1, graph_def_2, comments, g_print_1, g_print_2, g_print_3)
else:
if(int(points) <= rra1_points):
ret = rrdtool.graph(img_path, "-w", width, "-h", height, "-n", font, "-t", title, "-S", resolution, "--start", start, ds_def_1, c_def_1, graph_def_1, graph_def_3, comments, g_print_1, g_print_2, g_print_3)
else:
ret = rrdtool.graph(img_path, "-w", width, "-h", height, "-n", font, "-t", title, "-S", resolution, "--start", start, "--vertical-label=", ds_def_1, ds_def_2, c_def_1, c_def_2, graph_def_1, graph_def_2, graph_def_3, graph_def_4)
c.img_path = "img/" + str(dbname) + "_big.png"
c.description = description
c.catalist = GetAllCatagory();
return render('/zoom.mako')
# return "Viewing " + str(dbname) + " " + str(resolution) + " " + str(points) + " " + str(limit)
|
[
"[email protected]"
] | |
d1a37f55af2498bbddef30e64ab5cf173cdc0d1e
|
7f2612e5132e1583e5ba9758f299a8f301f0dc70
|
/FB/257-binary-tree-paths.py
|
eada330e2dcde3095e5ceb1523f68ee52d0cba47
|
[] |
no_license
|
taeheechoi/coding-practice
|
380e263a26ed4de9e542c51e3baa54315127ae4f
|
9528b5e85b0ea2960c994ffea62b5be86481dc38
|
refs/heads/main
| 2022-07-09T11:22:18.619712 | 2022-06-28T14:55:51 | 2022-06-28T14:55:51 | 447,082,854 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 550 |
py
|
# https://leetcode.com/problems/binary-tree-paths/
# Input: root = [1,2,3,null,5]
# Output: ["1->2->5","1->3"]
# Example 2:
# Input: root = [1]
# Output: ["1"]
class Solution:
def binaryTreePath(self, root):
elements = []
def dfs(node, s):
if not node: return
if node.left is None and node.right is None:
elements.append(s + str(root.val))
s += str(node.val) + '->'
dfs(root.left, s)
dfs(root.right, s)
dfs(root, '')
return elements
|
[
"[email protected]"
] | |
ebe6ba66f1743f17d66488c547d62eb1dd646dc3
|
a972c5de4144940d1c5903bb5636df4bcaf4b283
|
/ejerciciokenwin/__init__.py
|
bcbe09dd494756b2f4afdb3392ceb03bc3b19d99
|
[] |
no_license
|
apocalipsys/ejerciciopyramid-2020
|
5dafe2926bb78338eb1eca17d2be8f6ef2eba8fa
|
2411601f4e2e0dd9aa49951251f9acfe73d43777
|
refs/heads/master
| 2020-12-21T00:51:38.700245 | 2020-02-07T07:24:51 | 2020-02-07T07:24:51 | 236,258,661 | 3 | 1 | null | 2020-02-05T06:29:14 | 2020-01-26T02:42:08 |
Python
|
UTF-8
|
Python
| false | false | 802 |
py
|
#This is a config file, necesary to include the views, moludes, models and so on
#Este archivo de configuracion sirve para incluir las vistas, modelo de base de datos, modulos etc.
from pyramid.config import Configurator
from pyramid.session import SignedCookieSessionFactory
import os
static_dir = os.path.abspath(os.path.dirname(__file__))
def main(global_config, **settings):
my_session_factory = SignedCookieSessionFactory(
'itsaseekreet')
with Configurator(settings=settings,session_factory=my_session_factory) as config:
config.include('.models')
config.include('pyramid_jinja2')
#config.add_jinja2_renderer('.html')
config.include('.security')
config.include('.routes')
config.scan('.views')
return config.make_wsgi_app()
|
[
"[email protected]"
] | |
05316d88a35289d491a107f6328cede2a1c6eb9f
|
4eaa1b9b08914e0a2cc9276363e489ccef19d3a2
|
/ch9/electric_car.py
|
a3903742781c7a56beb7c524f66ba35a4bb8f545
|
[] |
no_license
|
melihcanyardi/Python-Crash-Course-2e-Part-I
|
69b3b5b3f63cdbd7be6fabd6d4f2ddfd9a3434a3
|
0c9b250f512985c04b2c0397f3afaa8bf3a57f17
|
refs/heads/main
| 2023-03-12T21:43:14.012537 | 2021-03-03T19:23:41 | 2021-03-03T19:23:41 | 344,236,741 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,048 |
py
|
from car import Car
class Battery():
"""A simple attempt to model a battery for an electric car."""
def __init__(self, battery_size=75):
"""Initialize the battery's attributes."""
self.battery_size = battery_size
def describe_battery(self):
"""Print a statement describing the battery size."""
print(f"This car has a {self.battery_size}-kWh battery.")
def get_range(self):
"""Print a statement about the range this battery provides."""
if self.battery_size == 75:
range = 260
elif self.battery_size == 100:
range = 315
print(f"This car can go about {range} miles on a full charge.")
class ElectricCar(Car):
"""Represent aspects of a car, specific to electric vehicles."""
def __init__(self, make, model, year):
"""
Initialize attributes of the parent class.
Then initialize attributes specific to an electric car.
"""
super().__init__(make, model, year)
self.battery = Battery()
|
[
"[email protected]"
] | |
4331f303b88abc1007c44aedec54876888a6b860
|
1a74a9ec3e51e7776e5c15e98c66b4cb5a9f8144
|
/source/webapp/views/base_views.py
|
3bf9e6278427c473397fb2e32f09ab53e41e9079
|
[] |
no_license
|
Aitmatow/issue_tracker
|
d66e47a7f633a455e28a1921c5220c60a4c5907f
|
96f482be1251d9c557446bc0bfa0e949cc3129d9
|
refs/heads/master
| 2022-11-26T19:59:12.929073 | 2019-12-09T12:52:13 | 2019-12-09T12:52:13 | 211,033,057 | 0 | 0 | null | 2022-11-22T04:47:23 | 2019-09-26T07:57:27 |
Python
|
UTF-8
|
Python
| false | false | 4,098 |
py
|
from django.db.models import Q
from django.shortcuts import get_object_or_404, render, redirect
from django.utils.http import urlencode
from django.views import View
from django.views.generic import TemplateView, ListView
class DetailView(TemplateView):
context_key = 'objects'
model = None
def get_context_data(self, **kwargs):
pk = kwargs.get('pk')
context = super().get_context_data(**kwargs)
context[self.context_key] = get_object_or_404(self.model, pk=pk)
return context
def get_objects(self):
return self.model.objects.all()
class UpdateView(View):
form_class = None
template_name = None
redirect_url = ''
model = None
key_kwarg = 'pk'
context_key = 'object'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.form_class(initial=self.get_form_initial())
context = self.make_context(form)
return render(request, self.template_name, context=context)
def get_form_initial(self):
model_fields = [field.name for field in self.model._meta.fields]
initial = {}
for field in model_fields:
initial[field] = getattr(self.object, field)
print(initial)
return initial
def post(self, request, *args, **kwargs):
form = self.form_class(data=request.POST)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
self.object = self.get_object()
for field, value in form.cleaned_data.items():
setattr(self.object, field, value)
self.object.save()
return redirect(self.get_redirect_url())
def form_invalid(self, form):
context = self.make_context(form)
return render(self.request, self.template_name, context=context)
def get_object(self):
pk = self.kwargs.get(self.key_kwarg)
return get_object_or_404(self.model, pk=pk)
def make_context(self, form):
return {
'form': form,
self.context_key: self.object
}
def get_redirect_url(self):
return self.redirect_url
class DeleteView(View):
template_name = None
model = None
redirect_url = None
confirmation_for_delete = None
def get(self, request, *args, **kwargs):
object = get_object_or_404(self.model, pk=kwargs.get('pk'))
if self.confirmation_for_delete == True:
context = {'object': object}
return render(self.request, self.template_name, context)
else:
object.delete()
return redirect(self.get_redirect_url())
def post(self, request, *args, **kwargs):
object = get_object_or_404(self.model, pk = kwargs.get('pk'))
object.delete()
return redirect(self.get_redirect_url())
def get_redirect_url(self):
return self.redirect_url
class SearchView(ListView):
template_name = None
model = None
paginate_by = 10
paginate_orphans = 1
page_kwarg = 'page'
form = None
def get(self, request, *args, **kwargs):
self.form = self.get_search_form()
self.search_value = self.get_search_value()
return super().get(request, *args, **kwargs)
def get_search_form(self):
return self.form(data=self.request.GET)
def get_search_value(self):
if self.form.is_valid():
return self.form.cleaned_data['search']
return None
def get_queryset(self):
queryset = super().get_queryset()
if self.search_value:
queryset = queryset.filter(
self.get_query()
)
return queryset
def get_query(self):
pass
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(object_list=object_list, **kwargs)
context['form'] = self.form
if self.search_value:
context['query'] = urlencode({'search' : self.search_value})
return context
|
[
"[email protected]"
] | |
466c50bd91fc4be61abb950479c4d47fb1041ed9
|
8ed80561e1b3c0bcdb6201cae8af845d5da23edc
|
/guppe/exercicios_secao_8/ex_10.py
|
62a551fc304de9116280ee458a9d1eaa9680822e
|
[] |
no_license
|
Fulvio7/curso-python-guppe
|
42d5a1ecd80c1f3b27dc3f5dad074a51c9b774eb
|
98966963f698eb33e65ed58a84f96e28f675848a
|
refs/heads/main
| 2023-08-28T13:31:12.916407 | 2021-10-09T19:03:17 | 2021-10-09T19:03:17 | 415,393,122 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 397 |
py
|
"""
10- Faça uma função que receba dois números e retorne qual deles
é o maior.
"""
def retorna_maior(n1, n2):
if n1 > n2:
return f'{n1} é maior.'
elif n2 > n1:
return f'{n2} é maior.'
return 'Os dois números são iguais! XP '
print('Descubra o maior número')
num1 = int(input('Num1 = '))
num2 = int(input('Num2 = '))
print(retorna_maior(num1, num2))
|
[
"[email protected]"
] | |
3518fd2cc14d2ce5ab3297741d351dc4635fe976
|
5a04919d14d3f34815987a202d520609c17cc605
|
/problems/forritun/hefja_i_veldi/tests/gen.py
|
1397acd8903770e81c30e87255b3e79a5210b121
|
[] |
no_license
|
SuprDewd/forritunarverkefni
|
49e9864c6efaa192747b3f004f79a53a4519c10a
|
702819f9fa2d106fede4ff2284a00b5141662493
|
refs/heads/master
| 2021-01-15T11:14:44.316308 | 2014-08-09T15:09:09 | 2014-08-09T15:09:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 332 |
py
|
import math
tests = [ (10, 4), (10, -4), (15, -1), (1, 4), (0, 3), (1, -2), (-1, 2), (-1, 3), (-1, 0), (13, 2), (-13, 2), (13, 3), (-13, 3), (-5, -6) ]
for i, t in enumerate(tests):
with open('T%d.in' % i, 'w') as f: f.write('%d %d\n' % tuple(t))
with open('T%d.out' % i, 'w') as f: f.write('%f\n' % math.pow(t[0], t[1]))
|
[
"[email protected]"
] | |
1dfaa8cf11a2d14dd19b5bf31b58f44bf15e34a0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03379/s477030145.py
|
93534519416891d22f6c4c276609f50101689a1d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 539 |
py
|
def main():
n = int(input())
x_lst = list(map(int, input().split()))
x_sorted_lst = sorted(x_lst)
median1 = x_sorted_lst[n // 2 - 1]
median2 = x_sorted_lst[n // 2]
if median1 == median2:
lst = [median1] * n
else:
lst = []
for i in range(n):
x = x_lst[i]
if x <= median1:
lst.append(median2)
elif median2 <= x:
lst.append(median1)
for i in range(n):
print(lst[i])
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
1a19af9a0fadedcb4bd7e45597b3e62571e51821
|
3c6b0521eb788dc5e54e46370373e37eab4a164b
|
/holistic_eval/roberta_mnli/examples/scripts/scripts/run_experiment.py
|
9d5f149859a049824f329d3eeca723f861738f66
|
[
"Apache-2.0"
] |
permissive
|
y12uc231/DialEvalMetrics
|
7402f883390b94854f5d5ae142f700a697d7a21c
|
f27d717cfb02b08ffd774e60faa6b319a766ae77
|
refs/heads/main
| 2023-09-02T21:56:07.232363 | 2021-11-08T21:25:24 | 2021-11-08T21:25:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,598 |
py
|
import os
import smtplib
from email.mime.text import MIMEText
mail_host = 'smtp.163.com'
mail_user = 'aigu3525'
mail_pass = 'WOaibaobao'
sender = '[email protected]'
receivers = ['[email protected]']
def run_training(ex_title, type, paras_dict, node, GPU, logger=None , print_=False):
print('_'*100)
if type == 'MNLI': train_file = 'run_glue.py'
opt_dict = paras_dict
try:
os.mkdir('scripts/logs/' + type)
except:
x=1
message = MIMEText('Start training experiment {}'.format(str(ex_title)), 'plain', 'utf-8')
message['Subject'] = 'Experiment {}'.format(str(ex_title))
message['From'] = sender
message['To'] = receivers[0]
try:
smtpObj = smtplib.SMTP()
smtpObj.connect(mail_host, 25)
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(
sender, receivers, message.as_string())
smtpObj.quit()
print('success')
except:
print('error') # 打印错误
if True:
print_file_train = 'scripts/logs/'+ type + '/' + ex_title+ '.print'
keys = list(opt_dict)
values = [opt_dict[key] for key in keys]
paras = ''
for i in range(len(keys)):
if values[i] == 'False':
continue
paras += ' --'
paras += keys[i]
if values[i] != 'True':
paras += '='
paras += str(values[i])
if True:
commend_list_train = []
# print(paras)
commend_list_train.append('ssh node'+node + ' \"')
commend_list_train.append('cd /root/liuyx/transformers/examples;')
commend_list_train.append('CUDA_VISIBLE_DEVICES=' + str(GPU) + ' /root/anaconda3/envs/transformer/bin/python ')
commend_list_train.append(train_file + paras +' 2>&1 | tee '+print_file_train + '')
commend_list_train.append('\"')
print(commend_list_train)
pred_return = os.system(''.join(commend_list_train))
message = MIMEText('Experiment {}, training end'.format(str(ex_title)), 'plain', 'utf-8')
message['Subject'] = 'Experiment {}'.format(str(ex_title))
message['From'] = sender
message['To'] = receivers[0]
try:
smtpObj = smtplib.SMTP()
smtpObj.connect(mail_host, 25)
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(
sender, receivers, message.as_string())
smtpObj.quit()
print('success')
except:
print('error') # 打印错误
|
[
"[email protected]"
] | |
53b6f743c52e16229449c9f99dc18438957c017f
|
4290daae480aabfc35c85374a468085a6fa1a1ac
|
/ctt-server/openapi_server/test/test_result_controller.py
|
652d6ffdfa52d2b04f99be41db1f222c6a23aec6
|
[
"Apache-2.0"
] |
permissive
|
pjakovits/radon-ctt
|
01c8bc760372f6887798722c291674971b10a86d
|
8c73e05a83ef66bd6e9dba6608d2bee089df7e86
|
refs/heads/master
| 2021-05-20T19:13:35.919410 | 2021-03-28T17:07:20 | 2021-03-28T17:07:20 | 252,386,523 | 0 | 0 |
Apache-2.0
| 2020-04-02T07:34:24 | 2020-04-02T07:34:23 | null |
UTF-8
|
Python
| false | false | 2,596 |
py
|
# coding: utf-8
from __future__ import absolute_import
import unittest
from flask import json
from six import BytesIO
from openapi_server.models.result import Result # noqa: E501
from openapi_server.test import BaseTestCase
class TestResultController(BaseTestCase):
"""ResultController integration test stubs"""
def test_create_result(self):
"""Test case for create_result
Creates new result
"""
body = POSTResult()
response = self.client.open(
'/result',
method='POST',
data=json.dumps(body),
content_type='application/json')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_delete_result_by_uuid(self):
"""Test case for delete_result_by_uuid
Delete a result
"""
response = self.client.open(
'/result/{result_uuid}'.format(result_uuid='result_uuid_example'),
method='DELETE')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_download_result_by_uuid(self):
"""Test case for download_result_by_uuid
Downloads the generated results
"""
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/RadonCTT/result/{result_uuid}/download'.format(result_uuid='result_uuid_example'),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_result_by_uuid(self):
"""Test case for get_result_by_uuid
Retrieve a result
"""
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/RadonCTT/result/{result_uuid}'.format(result_uuid='result_uuid_example'),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_results(self):
"""Test case for get_results
Get all results
"""
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/RadonCTT/result',
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
a8612ac222aae209f9c985d771c92a8900557d7e
|
f76e8b03862264731be92bc16e4ced7b7e078b0a
|
/instagram/urls.py
|
64a8b30ddbdcb0ea06908f107e67a02fbccf2f17
|
[
"MIT"
] |
permissive
|
bryomajor/instalite
|
b8d400d6b1ecc337e5008ddd6738e8df4653df05
|
c3854b30235960fae89682c55c88637fb8fb05ad
|
refs/heads/master
| 2022-12-11T15:06:32.222163 | 2021-04-07T10:13:46 | 2021-04-07T10:13:46 | 221,914,550 | 0 | 1 |
MIT
| 2021-06-10T22:16:42 | 2019-11-15T11:54:53 |
Python
|
UTF-8
|
Python
| false | false | 1,134 |
py
|
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
url(r'^home/', views.timeline, name = 'index'),
url(r'^$', views.home, name = 'home'),
url(r'^signup/$', views.signup, name = 'signup'),
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', views.activate, name='activate'),
url(r'^search/', views.search_results, name = 'search_results'),
url(r'^user/(?P<username>\w+)', views.profile, name='profile'),
url(r'^accounts/edit/', views.edit_profile, name='edit_profile'),
url(r'^upload/$', views.upload_image, name='upload_image'),
url(r'^follow/(?P<user_id>\d+)', views.follow, name = 'follow'),
url(r'^unfollow/(?P<user_id>\d+)', views.unfollow, name='unfollow'),
url(r'^comment/(?P<image_id>\d+)', views.comment, name='comment'),
url(r'^like/(?P<image_id>\d+)', views.like, name='like'),
url(r'^is_liked/', views.is_liked, name = 'is_liked')
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
2c5ad861fcce9b4203e5ff1c9b6fbdabf1e10047
|
7c0ac74b1215a5e53698924b69b89221fec0cfd6
|
/torch_geometric/utils/matmul.py
|
8e8f4bb6aaea1430f1a94da1c9239473e4b18be1
|
[
"MIT"
] |
permissive
|
ZackPashkin/pytorch_geometric
|
b30c1a220f3f5f593ec4ac12b696f2cac1ae4e0a
|
3663a96c8e649af46c29a32beb03f49cc97f5b86
|
refs/heads/master
| 2020-03-20T09:33:51.497347 | 2019-03-19T05:07:30 | 2019-03-19T05:07:30 | 137,341,025 | 0 | 0 | null | 2018-06-14T10:05:30 | 2018-06-14T10:05:29 | null |
UTF-8
|
Python
| false | false | 415 |
py
|
from torch_scatter import scatter_add
def matmul(index, value, tensor):
tensor = tensor if tensor.dim() > 1 else tensor.unsqueeze(-1)
assert (value is None or value.dim() == 1) and tensor.dim() == 2
row, col = index
out_col = tensor[col]
out_col = out_col if value is None else out_col * value.unsqueeze(-1)
out = scatter_add(out_col, row, dim=0, dim_size=tensor.size(0))
return out
|
[
"[email protected]"
] | |
5ffb982e62eb751b952318b60fb800f712713ca9
|
f3f732881b813dd5d6e1239618f5d4d6bb394db7
|
/160.intersectionLinkedList.py
|
6fc29a7e0f2285d1531677f1a175803bb8ec1e0d
|
[] |
no_license
|
umnstao/leetcodeOJ
|
45917528abb693fa23678356497267e4ce571a4f
|
f7cb7cfa6e1f04efd741c2456ad930db48101573
|
refs/heads/master
| 2021-01-21T14:57:22.257064 | 2017-11-22T22:57:48 | 2017-11-22T22:57:48 | 95,362,154 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,000 |
py
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
lenA = 0
lenB = 0
curA = headA
while curA:
curA = curA.next
lenA += 1
curB = headB
while curB:
curB = curB.next
lenB += 1
curA = headA
curB = headB
if lenA > lenB:
k = lenA - lenB
while k > 0:
curA = curA.next
k = k - 1
elif lenA < lenB:
k = lenB - lenA
while k > 0:
curB = curB.next
k = k - 1
while curA:
if curA == curB:
return curA
curA = curA.next
curB = curB.next
return None
|
[
"[email protected]"
] | |
e3927f2bbe1447e57c5f9862e6bdbbed472c3f4d
|
ef72a7df3c39c215dd90ac5e72b164eb9d7da892
|
/rpg/monsters/imp.py
|
b23f023017e8acdf3b1f1eebfbf51bb93c44f41b
|
[] |
no_license
|
thebmo/messing_around
|
d49a87fc1ff722428ea67bc710ca99ad287098bd
|
4cb12e0b224cf7d1f93cb4ae6ff7603619fb7aa9
|
refs/heads/master
| 2021-01-13T02:18:50.799898 | 2015-04-08T01:12:41 | 2015-04-08T01:12:41 | 28,570,375 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 378 |
py
|
"""
Imp monster sub-class of parents Monster and NPC.
"""
from monster import Monster
from npc import NPC
class Imp(Monster, NPC):
NAME = 'Imp'
STATS = {
'STR': 5,
'AGI': 3,
'INT': 1,
'CHA': 0,
'LCK': 0,
'max_hp': 6,
'max_ap': 1,
'level': 2,
'exp': 6,
'gold': 8,
}
|
[
"[email protected]"
] | |
247c239100ef619a331be5d46ae4dabbf1f51393
|
bf69394cc6015f2c8ac28ae927be2a83b96facf3
|
/lib/utils/training_stats.py
|
6aff48aa5ddbb6c269cd19eb13e3b1621d6a791a
|
[
"MIT"
] |
permissive
|
fangyuan-ksgk/Detectron.pytorch
|
bf1133b73763ec682b4f219a857e81515d86ebf5
|
e8dfb86fbc68d30b9f443bc6aec722c5e4ce301e
|
refs/heads/master
| 2023-03-16T04:48:22.648717 | 2018-04-30T14:54:28 | 2018-04-30T14:54:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,618 |
py
|
#!/usr/bin/env python2
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Utilities for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict, OrderedDict
import datetime
import numpy as np
from core.config import cfg
from utils.logging import log_stats
from utils.logging import SmoothedValue
from utils.timer import Timer
import utils.net as nu
class TrainingStats(object):
"""Track vital training statistics."""
def __init__(self, misc_args, log_period=20, tensorboard_logger=None):
# Output logging period in SGD iterations
self.misc_args = misc_args
self.LOG_PERIOD = log_period
self.tblogger = tensorboard_logger
self.tb_ignored_keys = ['iter', 'eta']
self.iter_timer = Timer()
# Window size for smoothing tracked values (with median filtering)
self.WIN_SZ = 20
def create_smoothed_value():
return SmoothedValue(self.WIN_SZ)
self.smoothed_losses = defaultdict(create_smoothed_value)
self.smoothed_metrics = defaultdict(create_smoothed_value)
self.smoothed_total_loss = SmoothedValue(self.WIN_SZ)
def IterTic(self):
self.iter_timer.tic()
def IterToc(self):
return self.iter_timer.toc(average=False)
def ResetIterTimer(self):
self.iter_timer.reset()
def UpdateIterStats(self, model_out):
"""Update tracked iteration statistics."""
total_loss = 0
if cfg.FPN.FPN_ON:
loss_rpn_cls_value = 0
loss_rpn_bbox_value = 0
for k, loss in model_out['losses'].items():
assert loss.shape[0] == cfg.NUM_GPUS
loss = loss.mean(dim=0)
total_loss += loss
loss_data = loss.data[0]
self.smoothed_losses[k].AddValue(loss_data)
model_out['losses'][k] = loss
if k.startswith('loss_rpn_cls'):
loss_rpn_cls_value += loss_data
elif k.startswith('loss_rpn_bbox'):
loss_rpn_bbox_value += loss_data
self.smoothed_total_loss.AddValue(total_loss.data[0])
model_out['total_loss'] = total_loss
if cfg.FPN.FPN_ON:
self.smoothed_losses['loss_rpn_cls'].AddValue(loss_rpn_cls_value)
self.smoothed_losses['loss_rpn_bbox'].AddValue(loss_rpn_bbox_value)
for k, metric in model_out['metrics'].items():
metric = metric.mean(dim=0)
self.smoothed_metrics[k].AddValue(metric.data[0])
model_out['metrics'][k] = metric
def LogIterStats(self, cur_iter, lr):
"""Log the tracked statistics."""
if (cur_iter % self.LOG_PERIOD == 0 or
cur_iter == cfg.SOLVER.MAX_ITER - 1):
stats = self.GetStats(cur_iter, lr)
log_stats(stats, self.misc_args)
if self.tblogger:
self.tb_log_stats(stats, cur_iter)
def tb_log_stats(self, stats, cur_iter):
"""Log the tracked statistics to tensorboard"""
for k in stats:
if k not in self.tb_ignored_keys:
v = stats[k]
if isinstance(v, dict):
self.tb_log_stats(v, cur_iter)
else:
self.tblogger.add_scalar(k, v, cur_iter)
def GetStats(self, cur_iter, lr):
eta_seconds = self.iter_timer.average_time * (
cfg.SOLVER.MAX_ITER - cur_iter
)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
stats = OrderedDict(
iter=cur_iter + 1, # 1-indexed
time=self.iter_timer.average_time,
eta=eta,
loss=self.smoothed_total_loss.GetMedianValue(),
lr=lr,
)
stats['metrics'] = OrderedDict()
for k in sorted(self.smoothed_metrics):
stats['metrics'][k] = self.smoothed_metrics[k].GetMedianValue()
head_losses = []
rpn_losses = []
rpn_fpn_cls_losses = []
rpn_fpn_bbox_losses = []
for k, v in self.smoothed_losses.items():
toks = k.split('_')
if len(toks) == 2:
head_losses.append((k, v.GetMedianValue()))
elif len(toks) == 3:
rpn_losses.append((k, v.GetMedianValue()))
elif len(toks) == 4 and toks[2] == 'cls':
rpn_fpn_cls_losses.append((k, v.GetMedianValue()))
elif len(toks) == 4 and toks[2] == 'bbox':
rpn_fpn_bbox_losses.append((k, v.GetMedianValue()))
else:
raise ValueError("Unexpected loss key: %s" % k)
stats['head_losses'] = OrderedDict(head_losses)
stats['rpn_losses'] = OrderedDict(rpn_losses)
stats['rpn_fpn_cls_losses'] = OrderedDict(rpn_fpn_cls_losses)
stats['rpn_fpn_bbox_losses'] = OrderedDict(rpn_fpn_bbox_losses)
return stats
|
[
"[email protected]"
] | |
58387329bb15b94260f2528c77fccfb21cdb8190
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/SqbyWYwqChQroXfhu_23.py
|
85771451090fc8484ee47a515d24c515837f537c
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,362 |
py
|
"""
This challenge concerns _square matrices_ (same number of rows and columns) as
the below example illustrates:
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
The entries in the diagonal line from the top left to the bottom right form
the _main diagonal_ of the matrix. In this case, 1,5,9 form the main diagonal.
Write a function that returns the matrix obtained by replacing the entries
_above_ the main diagonal with 0s.
For example, for the matrix above you should return:
[
[1, 0, 0],
[4, 5, 0],
[7, 8, 9]
]
### Examples
lower_triang([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]) ➞ [
[1, 0, 0],
[4, 5, 0],
[7, 8, 9]
]
lower_triang([
[5, 7],
[7, 9]
]) ➞ [
[5, 0],
[7, 9]
]
lower_triang([
[1, 8, 8, 1],
[2, 7, 7, 2],
[3, 6, 6, 3],
[4, 5, 5, 4]
]) ➞ [
[1, 0, 0, 0],
[2, 7, 0, 0],
[3, 6, 6, 0],
[4, 5, 5, 4]
]
### Notes
* As in the examples, the size of the matrices will vary (but they will always be square).
* In Linear Algebra, matrices with 0s above the diagonal are called _lower triangular matrices_.
"""
def lower_triang(arr):
for i in range(len(arr)):
for j in range(len(arr[i])):
if j < i:
arr[j][i] = 0
return arr
|
[
"[email protected]"
] | |
788398854e79143d77bd7bcbbc79202a74d49414
|
3e1beedf80c60153482192b086347d0530701c37
|
/problem solving/cinema.py
|
cfee1d1abc3a0f3637d3211ba9876bb01e88668e
|
[] |
no_license
|
rishi772001/Competetive-programming
|
ac130bde426844e09a3e5162e279d61278c7c502
|
3493991cac55f225eeee67dd49f1caed8211465c
|
refs/heads/master
| 2023-04-12T14:59:59.447354 | 2021-04-30T05:05:13 | 2021-04-30T05:05:13 | 267,785,820 | 8 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,133 |
py
|
# https://leetcode.com/problems/cinema-seat-allocation/
n = 2
booked = [[1,6],[1,8],[1,3],[2,3],[1,10],[1,2],[1,5],[2,2],[2,4],[2,10],[1,7],[2,5]]
theatre = [[0]*10 for i in range(n)]
for i in range(len(booked)):
theatre[booked[i][0] - 1][booked[i][1] - 1] += 1
print(theatre)
count = 0
for i in range(len(theatre)):
sum = theatre[i][1] + theatre[i][2] + theatre[i][3] + theatre[i][4]
j = 5
flag = False
if sum == 0:
count += 1
sum = theatre[i][j] + theatre[i][j + 1] + theatre[i][j + 2] + theatre[i][j + 3]
j = j + 3
while j < 10:
if j - 4 == 1 or j - 4 == 3 or j - 4 == 5:
sum += theatre[i][j]
sum -= theatre[i][j - 4]
j += 1
continue
if (sum == 0):
count += 1
if(j + 4 < 10):
sum = theatre[i][j] + theatre[i][j + 1] + theatre[i][j + 2] + theatre[i][j + 3]
j += 3
else:
break
sum += theatre[i][j]
sum -= theatre[i][j - 4]
j += 1
print(count)
|
[
"[email protected]"
] | |
c697d125b0367a7834e07b1127c2335e79570e79
|
2dfbb018568209864544375de59a157c8752689a
|
/skimreads/comments/migrations/0002_auto__del_field_comment_reading__add_field_comment_note.py
|
7b339a69401d147362ef5085086bdfaa985c3fb8
|
[] |
no_license
|
tommydangerous/skimreads
|
7df4bde603c6122f20242d4591357802a4484f9f
|
6e73341ab034b52bb48cde4f076948946944d2a9
|
refs/heads/master
| 2020-05-17T23:20:15.020065 | 2014-09-27T06:28:34 | 2014-09-27T06:28:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,250 |
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Comment.reading'
db.delete_column('comments_comment', 'reading_id')
# Adding field 'Comment.note'
db.add_column('comments_comment', 'note',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['readings.Note']),
keep_default=False)
def backwards(self, orm):
# Adding field 'Comment.reading'
db.add_column('comments_comment', 'reading',
self.gf('django.db.models.fields.related.ForeignKey')(default='', to=orm['readings.Reading']),
keep_default=False)
# Deleting field 'Comment.note'
db.delete_column('comments_comment', 'note_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comments.comment': {
'Meta': {'object_name': 'Comment'},
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['readings.Note']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'readings.note': {
'Meta': {'ordering': "['created']", 'object_name': 'Note'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reading': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['readings.Reading']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'readings.reading': {
'Meta': {'object_name': 'Reading'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
}
}
complete_apps = ['comments']
|
[
"[email protected]"
] | |
95085f0f6148d3aeba523e3cba43e37d56a4cc60
|
908336e941d7d95d2ff168f8d132bf5656b87752
|
/datasets/weibo_senti_100k/parse.py
|
ad104fde40447e6e0371f6df42df4903898a6e89
|
[] |
no_license
|
cyy0523xc/ChineseNlpCorpus
|
364437b5662bc0a138281afc817b375c50a7fecf
|
a027225e9caf963d0d4e38d96b402ce515505850
|
refs/heads/master
| 2020-03-22T05:45:09.343135 | 2018-12-18T02:14:08 | 2018-12-18T02:14:08 | 139,587,654 | 2 | 0 | null | 2018-07-03T13:29:28 | 2018-07-03T13:29:27 | null |
UTF-8
|
Python
| false | false | 612 |
py
|
# -*- coding: utf-8 -*-
#
#
# Author: alex
# Created Time: 2018年07月03日 星期二 21时51分54秒
import csv
with open('./weibo_senti_100k.csv', encoding='utf8') as r, \
open('../../format_datasets/weibo_senti_100k/weibo_senti_100k_pos.txt', 'w', encoding='utf8') as pos, \
open('../../format_datasets/weibo_senti_100k/weibo_senti_100k_neg.txt', 'w', encoding='utf8') as neg:
for row in csv.DictReader(r):
content = row['review'].replace("\n", ' ').strip() + "\n"
if row['label'] == '1':
pos.write(content)
else:
neg.write(content)
print('ok')
|
[
"[email protected]"
] | |
fa6df867465274ac8444a135a311aa00afd86d2c
|
48d08e7c20628479ea69b4a1a51f99a3db26c79d
|
/MathPy/04_sympy_intro.py
|
9a8b4683b4bf945cb6a3376f182c1efe1b83b73d
|
[] |
no_license
|
PyRPy/stats_py
|
59ae0975c5b549fb47f7630b1f232caf715fe2ff
|
0c87ebf7f84eb7a21bcedb3234170ef220ca2f14
|
refs/heads/master
| 2022-09-27T21:01:53.316765 | 2022-09-17T01:52:09 | 2022-09-17T01:52:09 | 167,268,454 | 4 | 6 | null | null | null | null |
UTF-8
|
Python
| false | false | 379 |
py
|
from sympy import Symbol
# ------------------Defining Symbols and Symbolic Operations -------------------
x = Symbol('x')
print(x + x + 1)
a = Symbol('x')
print(a + a + 1)
# find the original symbol object
print(a.name)
# define multiple symbols
from sympy import symbols
x, y, z = symbols('x, y, z')
s = x*(x + y) + x*(y + z)
print(s)
print(x*x*(1 + x))
|
[
"[email protected]"
] | |
34019fe74d66ee473c9f78d9730d9b933cee8973
|
4007a7626ccb18480e73ac304b0010f6aeba33fb
|
/proj_preproc/db.py
|
cb9defbbd56297c1bca17df0cef2ee205afdb103
|
[] |
no_license
|
olmozavala/air_pollution_forecast
|
68030748b975d463158f1ce7c7f16eb038493ced
|
5b543b3f4a190d7ae33a55c4f5b30f56b17347c3
|
refs/heads/master
| 2023-07-22T15:16:31.166036 | 2023-06-08T18:55:14 | 2023-06-08T18:55:14 | 226,166,662 | 0 | 0 | null | 2023-02-16T18:40:24 | 2019-12-05T18:41:50 |
Python
|
UTF-8
|
Python
| false | false | 129 |
py
|
def eliminateNonContinuousTimes(data, numhours):
"""It eliminates those 'rows' that do not contain 'numhours' continuously"""
|
[
"[email protected]"
] | |
bdce9ca6acb87cf1e40299efade42b89dec4c38a
|
9de27e623c85b0d55da4afe4d843fe321b77954d
|
/Configuration/Geometry/python/GeometryDD4hepExtended2026D76_cff.py
|
1905e4de5a22bdb37bd5f0728b7fe57a842f0dc7
|
[
"Apache-2.0"
] |
permissive
|
PFCal-dev/cmssw
|
a97d566d691bc5ac900e48c632f4e87a005f94a2
|
232187f0f8a201210426312b27a1b62e55b6084c
|
refs/heads/hgc-tpg-devel-CMSSW_12_0_0_pre3
| 2022-06-01T08:27:39.166655 | 2021-11-23T15:28:18 | 2021-11-23T15:28:18 | 14,498,276 | 4 | 7 |
Apache-2.0
| 2022-02-08T11:01:38 | 2013-11-18T16:34:32 |
C++
|
UTF-8
|
Python
| false | false | 924 |
py
|
import FWCore.ParameterSet.Config as cms
# This config was generated automatically using generate2026Geometry.py
# If you notice a mistake, please update the generating script, not just this config
from Configuration.Geometry.GeometryDD4hep_cff import *
DDDetectorESProducer.confGeomXMLFiles = cms.FileInPath("Geometry/CMSCommonData/data/dd4hep/cmsExtendedGeometry2026D76.xml")
from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi import *
from Geometry.EcalCommonData.ecalSimulationParameters_cff import *
from Geometry.HcalCommonData.hcalDDDSimConstants_cff import *
from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import *
from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import *
from Geometry.MuonNumbering.muonGeometryConstants_cff import *
from Geometry.MuonNumbering.muonOffsetESProducer_cff import *
from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import *
|
[
"[email protected]"
] | |
bc47d286fda4479958fbd49dd8f596957c627662
|
d83118503614bb83ad8edb72dda7f449a1226f8b
|
/src/dprj/platinumegg/app/cabaret/views/application/scoutevent/resultanim.py
|
e2efa0cc4796d7c81b9432230cea05603a9db449
|
[] |
no_license
|
hitandaway100/caba
|
686fe4390e182e158cd9714c90024a082deb8c69
|
492bf477ac00c380f2b2758c86b46aa7e58bbad9
|
refs/heads/master
| 2021-08-23T05:59:28.910129 | 2017-12-03T19:03:15 | 2017-12-03T19:03:15 | 112,512,044 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,483 |
py
|
# -*- coding: utf-8 -*-
from platinumegg.app.cabaret.util.cabareterror import CabaretError
from platinumegg.app.cabaret.util.api import BackendApi
import settings
from platinumegg.app.cabaret.util.url_maker import UrlMaker
from platinumegg.app.cabaret.views.application.scoutevent.base import ScoutHandler
import urllib
from defines import Defines
import settings_sub
class Handler(ScoutHandler):
"""スカウト結果.
引数:
実行したスカウトのID.
確認キー.
結果のindex.
"""
@classmethod
def getViewerPlayerClassList(cls):
return []
def process(self):
self.__swf_params = {}
args = self.getUrlArgs('/sceventresultanim/')
try:
stageid = int(args.get(0))
scoutkey = urllib.unquote(args.get(1))
index = int(args.get(2) or 0)
except:
raise CabaretError(u'引数が不正です', CabaretError.Code.ILLEGAL_ARGS)
v_player = self.getViewerPlayer()
model_mgr = self.getModelMgr()
using = settings.DB_READONLY
flag_skip = BackendApi.get_scoutskip_flag(v_player.id)
eventmaster = BackendApi.get_current_scouteventmaster(model_mgr, using=using)
if eventmaster is None:
raise CabaretError(u'Event Closed.', CabaretError.Code.EVENT_CLOSED)
mid = eventmaster.id
# 進行情報.
playdata = BackendApi.get_event_playdata(model_mgr, mid, v_player.id, using)
if playdata and playdata.confirmkey == scoutkey:
# DBからとり直すべき.
playdata = BackendApi.get_event_playdata(model_mgr, mid, v_player.id, using=settings.DB_DEFAULT, reflesh=True)
if playdata is None or playdata.alreadykey != scoutkey:
if settings_sub.IS_LOCAL:
raise CabaretError(u'キーが正しくありません %s vs %s' % (playdata.alreadykey if playdata else 'None', scoutkey))
url = self.makeAppLinkUrlRedirect(UrlMaker.scoutevent())
self.appRedirect(url)
return
eventlist = playdata.result.get('event', [])[index:]
if not eventlist:
raise CabaretError(u'引数が不正です', CabaretError.Code.ILLEGAL_ARGS)
table = {
Defines.ScoutEventType.COMPLETE : (self.procComplete, False),
Defines.ScoutEventType.LEVELUP : (self.procLevelup, True),
Defines.ScoutEventType.HAPPENING : (self.procHappening, True),
}
proc = None
next_event = None
for idx, event in enumerate(eventlist):
next_event = eventlist[idx+1] if (idx + 1) < len(eventlist) else None
tmp = table.get(event.get_type(), None)
if tmp is None:
index += idx
break
tmp_proc, is_skipok = tmp
if flag_skip and is_skipok:
continue
index += idx
proc = tmp_proc
break
if not proc:
url = UrlMaker.scouteventresult(stageid, scoutkey)
self.appRedirect(self.makeAppLinkUrlRedirect(url))
return
if next_event and table.has_key(next_event.get_type()):
url = UrlMaker.scouteventresultanim(stageid, scoutkey, index+1)
else:
url = UrlMaker.scouteventresult(stageid, scoutkey)
self.__swf_params['backUrl'] = self.makeAppLinkUrl(url)
self.__playdata = playdata
proc(event)
def procComplete(self, event):
"""スカウト完了演出.
"""
self.__swf_params['text'] = Defines.EffectTextFormat.SCOUTRESULT_COMPLETE_TEXT
self.appRedirectToEffect('scoutclear/effect.html', self.__swf_params)
def procLevelup(self, event):
"""レベルアップ演出.
"""
resulttexts = []
# レベル情報.
resulttexts.append(Defines.EffectTextFormat.LEVELUP_STATUSTEXT % event.level)
self.__swf_params['statusText'] = u'\n'.join(resulttexts)
self.appRedirectToEffect('levelup/effect.html', self.__swf_params)
def procHappening(self, event):
"""ハプニング発生演出.
"""
self.appRedirectToEffect('chohutokyaku/effect.html', self.__swf_params)
def main(request):
return Handler.run(request)
|
[
"[email protected]"
] | |
6e6f3dccdbcc5e1215398c4c2605d64ab759adb7
|
4cdf4e243891c0aa0b99dd5ee84f09a7ed6dd8c8
|
/python/decorator/12.py
|
88fcefe3e80c692e7cf23963349684364a407982
|
[
"MIT"
] |
permissive
|
gozeon/code-collections
|
464986c7765df5dca980ac5146b847416b750998
|
13f07176a6c7b6ac13586228cec4c1e2ed32cae4
|
refs/heads/master
| 2023-08-17T18:53:24.189958 | 2023-08-10T04:52:47 | 2023-08-10T04:52:47 | 99,432,793 | 1 | 0 |
NOASSERTION
| 2020-07-17T09:25:44 | 2017-08-05T15:56:53 |
JavaScript
|
UTF-8
|
Python
| false | false | 395 |
py
|
#coding=utf-8
# -*- coding=utf-8 -*-
from functools import wraps
def my_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
'''decorator'''
print('Calling decorated function...')
return func(*args, **kwargs)
return wrapper
@my_decorator
def example():
"""Docstring"""
print('Called example function')
print(example.__name__, example.__doc__)
|
[
"[email protected]"
] | |
e4b9ae8070fb64421cd1a17b81be4ca33bd507bd
|
b3b066a566618f49ae83c81e963543a9b956a00a
|
/Intermediate Data Visualization with Seaborn/04_Creating Plots on Data Aware Grids/04_Building a PairGrid.py
|
9a654d32b4feb9d7dc4923d50e47cef330e416b7
|
[] |
no_license
|
ahmed-gharib89/DataCamp_Data_Scientist_with_Python_2020
|
666c4129c3f0b5d759b511529a365dfd36c12f1a
|
f3d20b788c8ef766e7c86c817e6c2ef7b69520b8
|
refs/heads/master
| 2022-12-22T21:09:13.955273 | 2020-09-30T01:16:05 | 2020-09-30T01:16:05 | 289,991,534 | 2 | 0 | null | 2020-08-24T17:15:43 | 2020-08-24T17:15:42 | null |
UTF-8
|
Python
| false | false | 1,354 |
py
|
"""
Building a PairGrid
When exploring a dataset, one of the earliest tasks is exploring the relationship between pairs of variables. This step is normally a precursor to additional investigation.
Seaborn supports this pair-wise analysis using the PairGrid. In this exercise, we will look at the Car Insurance Premium data we analyzed in Chapter 1. All data is available in the df variable.
Instructions 1/2
50 XP
1
2
Compare "fatal_collisions" to "premiums" by using a scatter plot mapped to a PairGrid()."""
# Create a PairGrid with a scatter plot for fatal_collisions and premiums
g = sns.PairGrid(df, vars=["fatal_collisions", "premiums"])
g2 = g.map(plt.scatter)
plt.show()
plt.clf()
"""
Create another PairGrid but plot a histogram on the diagonal and scatter plot on the off diagonal.
"""
# Create the same PairGrid but map a histogram on the diag
g = sns.PairGrid(df, vars=["fatal_collisions", "premiums"])
g2 = g.map_diag(plt.hist)
g3 = g2.map_offdiag(plt.scatter)
plt.show()
plt.clf()
#========================================================#
# DEVELOPER #
# BasitAminBhatti #
# Github #
# https://github.com/basitaminbhatti #
#========================================================#
|
[
"Your-Email"
] |
Your-Email
|
ed82d43819a50cc8adfb850789934e1c87866bb5
|
2daa3894e6d6929fd04145100d8a3be5eedbe21c
|
/tests/artificial/transf_sqrt/trend_poly/cycle_0/ar_12/test_artificial_128_sqrt_poly_0_12_100.py
|
d9d41e9db30a39012110217754bd29484e6124f5
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Henri-Lo/pyaf
|
a1f73a0cc807873bd7b79648fe51de9cfd6c126a
|
08c968425d85dcace974d90db7f07c845a0fe914
|
refs/heads/master
| 2021-07-01T12:27:31.600232 | 2017-09-21T11:19:04 | 2017-09-21T11:19:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 306 |
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 128 , FREQ = 'D', seed = 0, trendtype = "poly", cycle_length = 0, transform = "sqrt", sigma = 0.0, exog_count = 100, ar_order = 12);
art.process_dataset(dataset);
|
[
"[email protected]"
] | |
8e578b5c5e911fbe8995ba795536316e66e5a61b
|
0ee72dc1b03070e25d3036bf6b562fc9b809ee72
|
/freeze/__init__.py
|
18f5610c310bc0963162bfdbec6dfe13797a4bdd
|
[
"MIT"
] |
permissive
|
fabiocaccamo/django-freeze
|
d36a9c7a9e197b23fa63dc77f0901aba89e4dfaf
|
c2d5dfbf38b072d79e1a37489b07e91c8af9461c
|
refs/heads/main
| 2023-08-29T12:50:19.069297 | 2023-07-18T07:35:52 | 2023-07-18T07:35:52 | 44,330,200 | 91 | 19 |
MIT
| 2023-09-08T13:52:25 | 2015-10-15T16:20:55 |
Python
|
UTF-8
|
Python
| false | false | 269 |
py
|
from freeze.metadata import (
__author__,
__copyright__,
__description__,
__license__,
__title__,
__version__,
)
__all__ = [
"__author__",
"__copyright__",
"__description__",
"__license__",
"__title__",
"__version__",
]
|
[
"[email protected]"
] | |
983525aeb3a369cf1bd12f914b3440516b86d99a
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/leetcode-cn/0670.0_Maximum_Swap.py
|
33229f1e40f8b51a167f593d9fa16ce58ddebd89
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 832 |
py
|
'''
执行用时:36 ms, 在所有 Python3 提交中击败了68.71% 的用户
内存消耗:14.9 MB, 在所有 Python3 提交中击败了49.11% 的用户
通过测试用例:111 / 111
'''
class Solution:
def maximumSwap(self, num: int) -> int:
digits = list(str(num))
s_digits = sorted(digits, reverse=True)
if digits == s_digits:
return num
def max_index_after(i):
m = i
for j in range(i + 1, len(digits)):
if digits[j] >= digits[m]:
m = j
return m
n = len(digits)
for i in range(n - 1):
j = max_index_after(i)
if digits[i] < digits[j]:
digits[i], digits[j] = digits[j], digits[i]
break
return int(''.join(digits))
|
[
"[email protected]"
] | |
3c5b293c6d389c7c7dc932dead2e0c0535d49fc5
|
2af6a5c2d33e2046a1d25ae9dd66d349d3833940
|
/res/scripts/client/gui/shared/formatters/__init__.py
|
95539a9befb1c59e1830abe651b2f06f1e199360
|
[] |
no_license
|
webiumsk/WOT-0.9.12-CT
|
e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2
|
2506e34bd6634ad500b6501f4ed4f04af3f43fa0
|
refs/heads/master
| 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null |
WINDOWS-1250
|
Python
| false | false | 776 |
py
|
# 2015.11.10 21:29:02 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/shared/formatters/__init__.py
import BigWorld
from gui.shared.formatters import icons
from gui.shared.formatters import text_styles
from gui.shared.formatters import time_formatters
__all__ = ('icons', 'text_styles', 'time_formatters')
def getClanAbbrevString(clanAbbrev):
return '[{0:>s}]'.format(clanAbbrev)
def getGlobalRatingFmt(globalRating):
if globalRating >= 0:
return BigWorld.wg_getIntegralFormat(globalRating)
return '--'
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\formatters\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:29:02 Střední Evropa (běžný čas)
|
[
"[email protected]"
] | |
3bb2bda63bb05e17d287b72bc50bda27aba736b4
|
18fe3f034f203bc8a22d08f15b29297ebcc7dfaf
|
/py/qlazypy/lib/densop_mcx.py
|
e7a3b8c9e9d186908e9c3aa923a516079358fdfc
|
[
"Apache-2.0"
] |
permissive
|
katou-boop/qlazy
|
b8802c48b0cba0ba89cc1e1a69f551e0f4fdcc73
|
6b62fff65939a589603af7ed8be921c9f1669bb3
|
refs/heads/master
| 2023-02-17T12:30:05.419650 | 2021-01-17T23:20:20 | 2021-01-17T23:20:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 917 |
py
|
# -*- coding: utf-8 -*-
from qlazypy.error import *
from qlazypy.config import *
from qlazypy.util import *
# multi-controlled X gate
def __gray_code(de, n):
for k in range(2**n):
yield k^(k>>1)
def densop_mcx(de,qid=[]):
# controled and target register
qid_ctr = qid[:-1]
qid_tar = qid[-1]
# hadamard
de.h(qid_tar)
# controlled-RZ(psi), psi=pi/(2**(bitnum-1))
bitnum = len(qid_ctr)
psi = 1.0/(2**(bitnum-1)) # unit=pi(radian)
gray_pre = 0
for gray in __gray_code(de, bitnum):
if gray == 0:
continue
msb = len(str(bin(gray)))-3
chb = len(str(bin(gray^gray_pre)))-3
if gray != 1:
if chb == msb:
chb -= 1
de.cx(qid_ctr[chb], qid_ctr[msb])
de.cp(qid_ctr[msb], qid_tar, phase=psi)
psi = -psi
gray_pre = gray
# hadamard
de.h(qid_tar)
|
[
"[email protected]"
] | |
6db1394c31c689f64f58cffb4a65caedab7887b6
|
55c46d50ed426a3dccef8c44904df4524de43aa1
|
/oldp/apps/cases/api_views.py
|
ff02e857cdc98c72953bdf206858a565d1bfcd76
|
[
"MIT"
] |
permissive
|
docsuleman/oldp
|
1a438a9c669a54aab2f76133200e566d627d9668
|
8dcaa8e6e435794c872346b5014945ace885adb4
|
refs/heads/master
| 2020-06-29T10:45:18.787344 | 2019-08-04T18:21:02 | 2019-08-04T18:21:02 | 200,513,942 | 0 | 0 |
MIT
| 2019-08-04T17:36:52 | 2019-08-04T16:07:25 |
Python
|
UTF-8
|
Python
| false | false | 2,439 |
py
|
import coreapi
import coreschema
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from django_filters.rest_framework import DjangoFilterBackend
from drf_haystack.filters import HaystackFilter
from drf_haystack.generics import HaystackGenericAPIView
from rest_framework import viewsets
from rest_framework.filters import OrderingFilter
from rest_framework.mixins import ListModelMixin
from rest_framework.permissions import AllowAny
from rest_framework.viewsets import ViewSetMixin
from oldp.api import SmallResultsSetPagination
from oldp.apps.cases.filters import CaseAPIFilter
from oldp.apps.cases.models import Case
from oldp.apps.cases.search_indexes import CaseIndex
from oldp.apps.cases.serializers import CaseSerializer, CASE_API_FIELDS, CaseSearchSerializer
from oldp.apps.search.filters import SearchSchemaFilter
class CaseViewSet(viewsets.ModelViewSet):
"""
List view for cases
"""
pagination_class = SmallResultsSetPagination # limit page (other content field blows up response size)
queryset = Case.get_queryset()
serializer_class = CaseSerializer
# lookup_field = 'slug'
filter_backends = (OrderingFilter, DjangoFilterBackend, )
filterset_class = CaseAPIFilter
ordering_fields = ('date', )
@method_decorator(cache_page(60))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_queryset(self):
return Case.get_queryset()\
.select_related('court')\
.only(*CASE_API_FIELDS)
class CaseSearchSchemaFilter(SearchSchemaFilter):
search_index_class = CaseIndex
def get_default_schema_fields(self):
return [
# Search query field is required
coreapi.Field(
name='text',
location='query',
required=True,
schema=coreschema.String(description='Search query on text content (Lucence syntax support).'),
)
]
class CaseSearchViewSet(ListModelMixin, ViewSetMixin, HaystackGenericAPIView):
"""
Search view (list only)
"""
permission_classes = (AllowAny,)
pagination_class = SmallResultsSetPagination # limit page (other content field blows up response size)
index_models = [
Case
]
serializer_class = CaseSearchSerializer
filter_backends = (HaystackFilter, CaseSearchSchemaFilter,)
|
[
"[email protected]"
] | |
fc696582a78cdd7c5d1899b2b36105b5ae57fb27
|
cc2029f40a12e82712072275fc76a07ac59b5940
|
/battles/tourneys/20170409_2015.py
|
47a50e202ae61271c8e51095af49e9ed277655a0
|
[
"MIT"
] |
permissive
|
heitorchang/learn-code
|
d3fb8e45d539d302372126fe28e85032590b5707
|
5e6e56f7257de1910830619c01d470e892d7f9d8
|
refs/heads/master
| 2023-08-09T13:46:18.623772 | 2023-07-21T16:57:11 | 2023-07-21T16:57:11 | 147,522,837 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,482 |
py
|
from math import log
description = """
You are playing a number guessing game with your friend. Your friend thought of some integer x from 1 to n. In order to guess the number, you can ask two types of questions:
"is x smaller or equal to a?" for some integer a;
"is x greater or equal to a?" for some integer a.
If the answer to your question is "yes", you should pay your friend $2, otherwise you should pay him $1.
How much will you have to pay to your friend, assuming that you apply the strategy that minimizes the amount of money you have to pay in order to guess the number in the worst case scenario?
"""
def numberGuessingNaive(n):
# solution by sensytive
p=[0]*(n+1)
for i in range(2,n+1):
p[i]=i
for m in range(1,i):
pr('p[i] 1+p[m] 2+p[i-m]')
p[i] = min(p[i], max(1+p[m], 2+p[i-m]))
pr('p')
return p[-1]
def factorialsProductTrailingZeros(l, r):
result = 0
last = 0
for i in range(1, r + 1):
number = i
while number % 5 == 0:
number /= 5
result += 1
if i >= l:
pr('result last')
result += last
return result
def test():
testeql(numberGuessingNaive(4),4)
testeql(numberGuessingNaive(3),3)
testeql(numberGuessingNaive(1),0)
# testeql(numberGuessingNaive(534),14)
testeql(factorialsProductTrailingZeros(4, 10), 7)
testeql(numberGuessingNaive(15), 0)
testeql(numberGuessingNaive(9), 0)
|
[
"[email protected]"
] | |
1f9b109192968687be953ae31ed89405140c4775
|
4a63e96d7015e3e13d9b5204fc0261c05f600d3b
|
/Standard Library/tempfile/app.py
|
7fc029dac7ed9b6e14fd7f28165dcf25da70c0c0
|
[
"Apache-2.0"
] |
permissive
|
shubhamnag14/Python-Documents
|
0e38f58298d35b4df5b61adb361d720337148a00
|
d3fee0ad90232b413f6ac1b562588fb255b79e42
|
refs/heads/master
| 2023-06-08T23:51:26.089840 | 2021-06-20T15:07:44 | 2021-06-20T15:07:44 | 380,832,776 | 0 | 0 |
Apache-2.0
| 2021-06-27T20:33:08 | 2021-06-27T20:31:41 | null |
UTF-8
|
Python
| false | false | 320 |
py
|
import tempfile
def one():
file = tempfile.mkstemp()
with open(file[1], 'w+') as f:
f.write("This is a test")
f.seek(0)
print(f.read())
print(tempfile.gettempdir())
print(tempfile.gettempdirb())
print(tempfile.gettempprefix())
print(tempfile.gettempprefixb())
print(tempfile.tempdir)
|
[
"[email protected]"
] | |
7c21708c07f793fe8b7ea0a740e301f39cdba0f3
|
00ce7b1e677abbfe7912a472e74b3fab92b9fc50
|
/Data_processing/MNIST/checkSource.py
|
0ca2d86a677aa1ee43da5cc7832283efb90ac5f8
|
[] |
no_license
|
Xharlie/MultiGen
|
a19b8cd76bc1933773411d69200e86bf1ba8ed74
|
e3e646289249ce9418fb40f5a246310ac37e9a96
|
refs/heads/master
| 2021-01-22T10:56:16.432939 | 2017-12-02T03:34:41 | 2017-12-02T03:34:41 | 82,051,628 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 877 |
py
|
import h5py
from PIL import Image
import numpy as np
DIGIT=2
FORM=4
COLOR=0
TRANSFORM=1
def check():
imgArray = []
with h5py.File('../../Source/MNIST/all_info.h5', 'r') as f:
imgArray = f['data']
segm = f['segm']
digit = f['digit']
form = f['form']
color = f['color']
transform = f['transform']
index = 0
for i in range(imgArray.shape[0]):
if (digit[i][DIGIT] == 1 and form[i][FORM] == 1 and color[i][COLOR] == 1 and transform[i][TRANSFORM] == 1):
index = i
break
img = np.transpose(imgArray[index],(1,2,0))
img = (img)*255
img = Image.fromarray(img.astype(np.int8), 'RGB')
img.show()
# print segm.shape
# img2 = segm
# print img2
# img2 = Image.fromarray((img2 * 255).astype(np.int8), 'L')
# img2.show()
print digit,form,color,transform
if __name__ == '__main__':
check()
|
[
"[email protected]"
] | |
8f0f47ef704ca1bf13b6b054d960ad79eb855848
|
a838d4bed14d5df5314000b41f8318c4ebe0974e
|
/sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2019_02_01/aio/operations/_operations.py
|
8379386a6db3839f741ead07cbbbe9c6844a901e
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
scbedd/azure-sdk-for-python
|
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
|
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
|
refs/heads/master
| 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 |
MIT
| 2019-08-11T21:16:01 | 2018-11-28T21:34:49 |
Python
|
UTF-8
|
Python
| false | false | 4,659 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["_models.OperationListResult"]:
"""Gets a list of compute operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2019_02_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.ContainerService/operations'} # type: ignore
|
[
"[email protected]"
] | |
c1ea1c2df956749c6deeb18f05376f849453d2e6
|
3c3c274f266736c97dc14608511f039e65e31694
|
/chalicelib/auth.py
|
ad8d2d2a8ff3cc74580968dde491e1779e63a446
|
[] |
no_license
|
craymaru/chalice-todoapp-training
|
b2de9a7bff52ae3675a36ac44c7886a003199c7c
|
5a3229f3f4d185457812777432bd99adb9b7c56a
|
refs/heads/master
| 2023-01-11T18:03:50.712684 | 2020-11-18T04:29:34 | 2020-11-18T04:29:34 | 313,465,965 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,168 |
py
|
import hashlib
import hmac
import datetime
from uuid import uuid4
import jwt
from chalice import UnauthorizedError
# TODO: Figure out what we want to do with this.
# We can either move this out to env vars in config.json,
# use KMS to encrypt/decrypt this value, or store this in SSM.
# Until we figure it out I'll store it here.
_SECRET = b'\xf7\xb6k\xabP\xce\xc1\xaf\xad\x86\xcf\x84\x02\x80\xa0\xe0'
def get_jwt_token(username, password, record):
actual = hashlib.pbkdf2_hmac(
record['hash'],
password.encode(),
record['salt'].value,
record['rounds']
)
expected = record['hashed'].value
if hmac.compare_digest(actual, expected):
now = datetime.datetime.utcnow()
unique_id = str(uuid4())
payload = {
'sub': username,
'iat': now,
'nbf': now,
'jti': unique_id,
# NOTE: We can also add 'exp' if we want tokens to expire.
}
return jwt.encode(payload, _SECRET, algorithm='HS256')
raise UnauthorizedError('Invalid password')
def decode_jwt_token(token):
return jwt.decode(token, _SECRET, algorithms=['HS256'])
|
[
"[email protected]"
] | |
a7b55848abbb88a94997e6304eb564af957d682f
|
e012ac032de8bf5bf880d4917fb6329f99f47d2b
|
/grdient_decent.py
|
13306f92fd7645e6b5595b881733dbdf2c385b6e
|
[] |
no_license
|
janakiraam/ML-ToyProbelm
|
d1d0b8ffe55fb68cea22ea2326be3aeb23e64423
|
f8775ebce8f6b464e023bda92890fc30bcf923e6
|
refs/heads/main
| 2023-03-15T11:06:32.252230 | 2021-03-13T17:19:08 | 2021-03-13T17:19:08 | 341,291,350 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 511 |
py
|
import numpy as np
def gradient_decent(x,y):
m_curr=0
b_curr=0
iteration=100
n = len(x)
learning_rate=0.001
for i in range(iteration):
y_predict=m_curr*x+b_curr
md=-(2/n)*sum(x*(y-y_predict))
bd=-(2/n)*sum(y-y_predict)
m_curr=m_curr - learning_rate*md
b_curr=b_curr - learning_rate*bd
print("m {}, b {} , iteration {}".format(m_curr,b_curr,i))
x=np.array([1,2,3,4,5])
y=np.array([5,7,11,25,13])
gradient_decent(x,y)
|
[
"[email protected]"
] | |
4cd37584ef4a0d01cd88ff800395b7ab860f7b52
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/problems/0530.0_Minimum_Absolute_Difference_in_BST.py
|
93ad4e5a1a70ef0a7e5a3df60900e33c8cd38472
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 763 |
py
|
'''
Runtime: 99 ms, faster than 5.10% of Python3 online submissions for Minimum Absolute Difference in BST.
Memory Usage: 18.5 MB, less than 69.94% of Python3 online submissions for Minimum Absolute Difference in BST.
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def getMinimumDifference(self, root: Optional[TreeNode]) -> int:
def inorder(node):
if node:
yield from inorder(node.left)
yield node.val
yield from inorder(node.right)
return min(b - a for a, b in pairwise(inorder(root)))
|
[
"[email protected]"
] | |
dd6b99605f2ad07b00b76fab12d2dfa0ec787223
|
0f949dc62b728b2cf6e0e172eb7c1cc31012244d
|
/script/tft_touch.py
|
5190129c1503eb022011cef240009dfb42cc3187
|
[
"MIT"
] |
permissive
|
jeguzzi/mt_screen
|
74c4314012ddb9471650d8b1f10c889265101f92
|
f06ea6404474e8a71a4d61ec381a6e99e03e0ebb
|
refs/heads/master
| 2020-03-19T09:39:44.114549 | 2018-06-06T09:46:05 | 2018-06-06T09:46:05 | 136,307,460 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,953 |
py
|
#!/usr/bin/env python
from __future__ import division
import threading
import evdev
import rospy
import wiringpi2 as wp
from evdev.ecodes import ABS_X, ABS_Y, BTN_TOUCH, EV_ABS, EV_KEY
from sensor_msgs.msg import Joy
from std_msgs.msg import Bool
KEYS = [1, 4, 5]
IN = 0
OUT = 1
class TFTouch(object):
def __init__(self):
rospy.init_node('tft')
self.continuos = rospy.get_param('~continuos', True)
rate = rospy.get_param('~rate', 10.0)
if rate > 0:
period = 1 / rate
else:
period = 0.1
self.width = rospy.get_param('tft/width', 320)
self.height = rospy.get_param('tft/height', 240)
self.dev = evdev.InputDevice('/dev/input/ts_uinput')
wp.wiringPiSetup()
for key, pin in enumerate(KEYS):
wp.pinMode(pin, IN)
self.key_pub = {pin: rospy.Publisher('tft/key_{key}'.format(key=i + 1), Bool, queue_size=1)
for i, pin in enumerate(KEYS)}
self.state = {pin: 0 for pin in KEYS}
self.touch = {'x': None, 'y': None, 'down': 0}
self.joy_pub = rospy.Publisher('tft/touch', Joy, queue_size=1)
rospy.Timer(rospy.Duration(period), self.update_keys, oneshot=False)
self.dev_thread = threading.Thread(target=self.update_touch)
self.dev_thread.daemon = True
self.dev_thread.start()
self.buttons = []
self.axes = []
def update_touch(self):
for event in self.dev.read_loop():
if event.type == EV_ABS:
if event.code == ABS_X:
self.touch['x'] = max(min(event.value, self.width), 0)
continue
if event.code == ABS_Y:
self.touch['y'] = max(min((self.height - event.value), self.height), 0)
continue
if event.type == EV_KEY and event.code == BTN_TOUCH:
self.touch['down'] = event.value
continue
def update_keys(self, event):
# 1 is up, 0 is down
state = {pin: 1 - wp.digitalRead(pin) for pin in KEYS}
if self.touch['down'] and self.touch['x'] is not None and self.touch['y'] is not None:
axes = [2 * self.touch['x'] / self.width - 1, 2 * self.touch['y'] / self.height - 1]
else:
axes = [0, 0]
buttons = [self.touch['down']] + [state[pin] for pin in KEYS]
if self.continuos or buttons != self.buttons or axes != self.axes:
msg = Joy(buttons=buttons, axes=axes)
msg.header.stamp = rospy.Time.now()
# msg.header.frame_id = 'tft'
self.joy_pub.publish(msg)
self.buttons = buttons
self.axes = axes
for pin, value in state.items():
if value != self.state.get(pin):
self.key_pub[pin].publish(value)
self.state = state
if __name__ == '__main__':
t = TFTouch()
rospy.spin()
|
[
"[email protected]"
] | |
e9a4edfe1026ffae7f0e4077a0753cd8224ef2a4
|
d18ed72d6f8d27dd8a13eab5c6366f9dca48aa6b
|
/espresso/jobmanager/jobmanager/temp/bin/packjobdir.py
|
697971511dcfa49909f268637571980c629e1286
|
[] |
no_license
|
danse-inelastic/AbInitio
|
6f1dcdd26a8163fa3026883fb3c40f63d1105b0c
|
401e8d5fa16b9d5ce42852b002bc2e4274afab84
|
refs/heads/master
| 2021-01-10T19:16:35.770411 | 2011-04-12T11:04:52 | 2011-04-12T11:04:52 | 34,972,670 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 719 |
py
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2009 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def main():
from vnf.applications.PackJobDir import PackJobDir as base
class App(base):
def _getPrivateDepositoryLocations(self):
return ['../config']
app = App()
return app.run()
# main
if __name__ == '__main__':
# invoke the application shell
main()
# version
__id__ = "$Id$"
# End of file
|
[
"[email protected]"
] | |
ff30e8932a6292b69bb900155874ffcfa1e06431
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_136/2930.py
|
97be09473c78d8ee4bccfb81bd58eb99d9cd14ca
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 432 |
py
|
from __future__ import division
T = input()
for i in range(T):
C, F, X = [float(x) for x in raw_input().split()]
cookiesRate = 2
if C >= X : print "Case #%d: %.7f" % (i+1, X/cookiesRate)
else:
timeElapsed = 0
while(C/cookiesRate + X/(cookiesRate+F) < X/cookiesRate):
timeElapsed += C/cookiesRate
cookiesRate += F
timeElapsed += X/cookiesRate
print "Case #%d: %.7f" % (i+1, timeElapsed)
|
[
"[email protected]"
] | |
c633ac2470e05a99614be9f9f82a751daa8489db
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/bob/c4ccaa8b4474471f993db5910720bf59.py
|
53dba6ce82f915abfd9d8828c4b03607686fbbc1
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null |
UTF-8
|
Python
| false | false | 1,305 |
py
|
import unicodedata
STANDARD_RESPONSES = {
'question': 'Sure.',
'exclamation': 'Woah, chill out!',
'empty': 'Fine. Be that way!',
'other': 'Whatever.'
}
def hey(*statements):
for statement in statements:
if type(statement) != str:
try:
statement = str(statement)
except:
statement = unicodedata.normalize('NFKD', statement).encode('ascii','ignore')
if is_empty(statement):
return STANDARD_RESPONSES['empty']
punctuation = statement[len(statement) - 1]
if is_exclamation(statement, punctuation):
return STANDARD_RESPONSES['exclamation']
elif is_question(statement, punctuation):
return STANDARD_RESPONSES['question']
else:
return STANDARD_RESPONSES['other']
def is_empty(statement):
if len(statement) == 0 or statement.isspace():
return True
else:
return False
def is_question(statement, punctuation):
if punctuation == '?':
return True
return False
def is_exclamation(statement, punctuation):
if punctuation == '!':
if statement.isupper():
return True
else:
return False
elif statement.isupper():
return True
return False
|
[
"[email protected]"
] | |
141edf402032a4bbe9c3349258944e9dcfa2c803
|
fb7efe44f4d9f30d623f880d0eb620f3a81f0fbd
|
/chrome/browser/android/digital_asset_links/DEPS
|
7023254e344e39b9b94c5db81d7a70a7df505240
|
[
"BSD-3-Clause"
] |
permissive
|
wzyy2/chromium-browser
|
2644b0daf58f8b3caee8a6c09a2b448b2dfe059c
|
eb905f00a0f7e141e8d6c89be8fb26192a88c4b7
|
refs/heads/master
| 2022-11-23T20:25:08.120045 | 2018-01-16T06:41:26 | 2018-01-16T06:41:26 | 117,618,467 | 3 | 2 |
BSD-3-Clause
| 2022-11-20T22:03:57 | 2018-01-16T02:09:10 | null |
UTF-8
|
Python
| false | false | 296 |
# It is likely that this code will eventually be shared across platforms, so
# excluding dependencies that would make this being a component impossible.
include_rules = [
"-content",
"-chrome",
"+base",
"+content/public/test",
"+chrome/browser/android/digital_asset_links",
"+net",
]
|
[
"[email protected]"
] | ||
9836c4db6976992908c3e2fdd5c42aee5b2c2e44
|
66d352e30036b0917e22b2ccde6e0bbc05f9758c
|
/TelluricSpectra/TellRemoval_interptest.py
|
54d1373f0ce141d99b8b9bb15b17c2674b949ca8
|
[] |
no_license
|
jason-neal/Phd-codes
|
8354563b1d2b0fcce39d72adbfd82b65557399b4
|
c947ffa56228746e2e5cdb3ab99e174f6c8e9776
|
refs/heads/master
| 2023-08-30T23:11:55.394560 | 2022-04-24T09:25:28 | 2022-04-24T09:25:28 | 42,106,284 | 0 | 1 | null | 2023-08-16T02:22:59 | 2015-09-08T10:40:26 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 5,575 |
py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
""" Codes for Telluric contamination removal
Interpolates telluric spectra to the observed spectra.
Divides spectra telluric spectra
can plot result
"""
import argparse
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from astropy.io import fits
from scipy import interpolate
from scipy.interpolate import interp1d
import GaussianFitting as gf
import Obtain_Telluric as obt
def divide_spectra(spec_a, spec_b):
""" Assumes that the spectra have been interpolated to same wavelength step"""
""" Divide two spectra"""
assert(len(spec_a) == len(spec_b)), "Not the same length"
divide = spec_a / spec_b
return divide
def match_wl(wl, spec, ref_wl):
"""Interpolate Wavelengths of spectra to common WL
Most likely convert telluric to observed spectra wl after wl mapping performed"""
newspec1 = np.interp(ref_wl, wl, spec) # 1-d peicewise linear interpolat
test_plot_interpolation(wl, spec,ref_wl,newspec1)
print("newspec1")
# cubic spline with scipy
#linear_interp = interp1d(wl, spec)
#linear_interp = interp1d(wl, spec, kind='cubic')
# Timeing interpolation
starttime = time.time()
newspec2 = interpolate.interp1d(wl, spec, kind='linear')(ref_wl)
print("linear intergration time =", time.time()-starttime)
starttime = time.time()
newspec2 = interpolate.interp1d(wl, spec, kind='slinear')(ref_wl)
print("slinear intergration time =", time.time()-starttime)
starttime = time.time()
newspec2 = interpolate.interp1d(wl, spec, kind='quadratic')(ref_wl)
print("quadratic intergration time =", time.time()-starttime)
starttime = time.time()
newspec2 = interpolate.interp1d(wl, spec, kind='cubic')(ref_wl)
print("cubic intergration time =", time.time()-starttime)
#newspec2 = interp1d(wl, spec, kind='cubic')(ref_wl)
print("newspec2")
#ewspec2 = sp.interpolate.interp1d(wl, spec, kind='cubic')(ref_wl)
return newspec1, newspec2 # test inperpolations
def plot_spectra(wl, spec, colspec="k.-", label=None, title="Spectrum"):
""" Do I need to replicate plotting code?
Same axis
"""
plt.plot(wl, spec, colspec, label=label)
plt.title(title)
plt.legend()
plt.show(block=False)
return None
def test_plot_interpolation(x1, y1, x2, y2, methodname=None):
""" Plotting code """
plt.plot(x1, y1, label="original values")
plt.plot(x2, y2, label="new points")
plt.title("testing Interpolation: ", methodname)
plt.legend()
plt.xlabel("Wavelength (nm)")
plt.ylabel("Norm Intensity")
plt.show()
return None
def telluric_correct(wl_obs, spec_obs, wl_tell, spec_tell):
"""Code to contain other functions in this file
1. Interpolate spectra to same wavelengths with match_WLs()
2. Divide by Telluric
3. ...
"""
print("Before match_wl")
interp1, interp2 = match_wl(wl_tell, spec_tell, wl_obs)
print("After match_wl")
# could just do interp here without match_wl function
# test outputs
#print("test1")
#test_plot_interpolation(wl_tell, spec_tell, wl_obs, interp1)
#print("test2")
# test_plot_interpolation(wl_tell, spec_tell, wl_obs, interp2)
# division
print("Before divide_spectra")
corrected_spec = divide_spectra(spec_obs, interp2)
print("After divide_spectra")
#
# other corrections?
return corrected_spec
def _parser():
"""Take care of all the argparse stuff.
:returns: the args
"""
parser = argparse.ArgumentParser(description='Telluric Removal')
parser.add_argument('fname', help='Input fits file')
parser.add_argument('-o', '--output', default=False,
help='Ouput Filename',)
args = parser.parse_args()
return args
def main(fname, output=False):
homedir = os.getcwd()
data = fits.getdata(fname)
wl = data["Wavelength"]
I = data["Extracted_DRACS"]
hdr = fits.getheader(fname)
datetime = hdr["DATE-OBS"]
obsdate, obstime = datetime.split("T")
obstime, __ = obstime.split(".")
tellpath = "/home/jneal/Phd/data/Tapas/"
tellname = obt.get_telluric_name(tellpath, obsdate, obstime)
print("tell name", tellname)
tell_data = obt.load_telluric(tellpath, tellname[0])
wl_lower = np.min(wl/1.0001)
wl_upper = np.max(wl*1.0001)
tell_data = gf.slice_spectra(tell_data[0], tell_data[1], wl_lower, wl_upper)
#tell_data =
print("After slice spectra")
plt.figure()
plt.plot(wl, I, label="Spectra")
plt.plot(tell_data[0], tell_data[1], label="Telluric lines")
plt.show()
# Loaded in the data
# Now perform the telluric removal
I_corr = telluric_correct(wl, I, tell_data[0], tell_data[1])
print("After telluric_correct")
plt.figure()
plt.plot(wl, I_corr, label="Corrected Spectra")
plt.plot(tell_data[0], tell_data[1], label="Telluric lines")
plt.show()
if __name__ == "__main__":
args = vars(_parser())
fname = args.pop('fname')
opts = {k: args[k] for k in args}
main(fname, **opts)
""" Some test code for testing functions """
sze = 20
x2 = range(sze)
y2 = np.random.randn(len(x2)) + np.ones_like(x2)
y2 = 0.5 * np.ones_like(x2)
x1 = np.linspace(1, sze-1.5, 9)
y1 = np.random.randn(len(x1)) + np.ones_like(x1)
y1 = np.ones_like(x1)
print(x1)
print(x2)
#print(y1)
#print(y2)
y1_cor = telluric_correct(x1, y1, x2, y2)
print(x1)
print(y1)
print(y1_cor)
|
[
"[email protected]"
] | |
9c1b67405acfc447e0bcde61a0b406ab29189c33
|
f4713830c8519daca9d75ec692a6937ee03c74d4
|
/Problems/Algorithms/953. Verifying an Alien Dictionary/alien_dictionary.py
|
af8a014ae986a3a0467e9a3207355cbfdb4b4240
|
[
"MIT"
] |
permissive
|
xuedong/leet-code
|
a0dd38cb884292de9d947718bb00160eff2b0f00
|
285d49cd7061ec43368d63b7c7c56763be520570
|
refs/heads/master
| 2023-09-03T02:38:55.932182 | 2023-09-02T18:35:42 | 2023-09-02T18:35:42 | 189,745,542 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 332 |
py
|
#!/usr/bin/env python3
from typing import List
class Solution:
def isAlienSorted(self, words: List[str], order: str) -> bool:
map = {ch: idx for idx, ch in enumerate(order)}
words = [[map[ch] for ch in word] for word in words]
return all(word1 <= word2 for word1, word2 in zip(words[:-1], words[1:]))
|
[
"[email protected]"
] | |
3791527cea4f9b19510cd2511f27d307b569de22
|
4d2de834ecea6ef444b1c45afb5a41e717900858
|
/app/app_todo/__init__.py
|
33a8204e60a9ea5ebfaf02b5c996d4aafaf808af
|
[] |
no_license
|
florije1988/flask_regular
|
19da04c59fbf600274d206750ccb8cf355db2d24
|
1219e4efbad76202d6dca7e4b2148344ea9edf8c
|
refs/heads/master
| 2020-12-24T13:21:29.840919 | 2014-12-16T00:58:15 | 2014-12-16T00:58:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 269 |
py
|
# -*- coding: utf-8 -*-
__author__ = 'florije'
from flask import Blueprint
from app.custom_api import Api
app_todo = Blueprint('app_task', __name__)
api_todo = Api(app_todo, catch_all_404s=True)
from . import views
api_todo.add_resource(views.HelloHandler, '/hello')
|
[
"[email protected]"
] | |
0ba9aca97b1c1f59da1afb823752e4f46a680b96
|
feae88b4a8bc0aba388dcc2eeb7debb49d736809
|
/apps/second_app/urls.py
|
fb99d9914ffc2c2fedcdee10fd14c61afe4e550b
|
[] |
no_license
|
john-gore/belt3_retry
|
ec8a5582382fc00f0bcb3cf973fe9cd073ed571c
|
03aa6d7ff9988615a96d2c882282107d389b1c52
|
refs/heads/master
| 2021-07-21T11:11:42.972344 | 2017-10-29T21:34:09 | 2017-10-29T21:34:09 | 108,772,325 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 239 |
py
|
from django.conf.urls import url
from django.contrib import admin
from . import views
from ..first_app.models import User # This line is new!
urlpatterns = [
url(r'^$', views.index, name='index') # This line has changed!
]
|
[
"[email protected]"
] | |
fbc05970539a311c1532e03d1461d962abe1cae2
|
5b4312ddc24f29538dce0444b7be81e17191c005
|
/autoware.ai/1.12.0/devel/.private/vector_map_msgs/lib/python2.7/dist-packages/vector_map_msgs/msg/_PointArray.py
|
302c83b9f7d628767effb2ae4bd898435e6dc65f
|
[
"MIT"
] |
permissive
|
muyangren907/autoware
|
b842f1aeb2bfe7913fb2be002ea4fc426b4e9be2
|
5ae70f0cdaf5fc70b91cd727cf5b5f90bc399d38
|
refs/heads/master
| 2020-09-22T13:08:14.237380 | 2019-12-03T07:12:49 | 2019-12-03T07:12:49 | 225,167,473 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,546 |
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from vector_map_msgs/PointArray.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import vector_map_msgs.msg
import std_msgs.msg
class PointArray(genpy.Message):
_md5sum = "6d79425254a86e33112d6737776efb2b"
_type = "vector_map_msgs/PointArray"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
Point[] data
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: vector_map_msgs/Point
# Ver 1.00
int32 pid
float64 b
float64 l
float64 h
float64 bx
float64 ly
int32 ref
int32 mcode1
int32 mcode2
int32 mcode3
"""
__slots__ = ['header','data']
_slot_types = ['std_msgs/Header','vector_map_msgs/Point[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,data
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(PointArray, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = []
else:
self.header = std_msgs.msg.Header()
self.data = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.data)
buff.write(_struct_I.pack(length))
for val1 in self.data:
_x = val1
buff.write(_get_struct_i5d4i().pack(_x.pid, _x.b, _x.l, _x.h, _x.bx, _x.ly, _x.ref, _x.mcode1, _x.mcode2, _x.mcode3))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.data = []
for i in range(0, length):
val1 = vector_map_msgs.msg.Point()
_x = val1
start = end
end += 60
(_x.pid, _x.b, _x.l, _x.h, _x.bx, _x.ly, _x.ref, _x.mcode1, _x.mcode2, _x.mcode3,) = _get_struct_i5d4i().unpack(str[start:end])
self.data.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.data)
buff.write(_struct_I.pack(length))
for val1 in self.data:
_x = val1
buff.write(_get_struct_i5d4i().pack(_x.pid, _x.b, _x.l, _x.h, _x.bx, _x.ly, _x.ref, _x.mcode1, _x.mcode2, _x.mcode3))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.data = []
for i in range(0, length):
val1 = vector_map_msgs.msg.Point()
_x = val1
start = end
end += 60
(_x.pid, _x.b, _x.l, _x.h, _x.bx, _x.ly, _x.ref, _x.mcode1, _x.mcode2, _x.mcode3,) = _get_struct_i5d4i().unpack(str[start:end])
self.data.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_i5d4i = None
def _get_struct_i5d4i():
global _struct_i5d4i
if _struct_i5d4i is None:
_struct_i5d4i = struct.Struct("<i5d4i")
return _struct_i5d4i
|
[
"[email protected]"
] | |
8735f5b0e9167684495efe5852cebc7defa664f7
|
930309163b930559929323647b8d82238724f392
|
/abc155_c.py
|
6b0f7c6960bceb99ef3c1e6274c2f06a7b5baa8f
|
[] |
no_license
|
GINK03/atcoder-solvers
|
874251dffc9f23b187faa77c439b445e53f8dfe1
|
b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7
|
refs/heads/master
| 2021-11-07T14:16:52.138894 | 2021-09-12T13:32:29 | 2021-09-12T13:32:29 | 11,724,396 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 193 |
py
|
import collections
N=int(input())
S=[input() for i in range(N)]
S=collections.Counter(S)
max_v = max(S.values())
for k,v in sorted(list(filter(lambda x:x[1]==max_v, S.items()))):
print(k)
|
[
"[email protected]"
] | |
2326a5cd67d0e36dfc987657a3b77f64b1108019
|
5de646fb3ecf10ecb45e05018a23b6345fb9ca53
|
/codejam/2020 Qualification Round/d.py
|
e358bdc477498577b9dcea874b2bbacb4f08905f
|
[] |
no_license
|
PPinto22/LeetCode
|
5590d6ca87efcd29f9acd2eaed1bcf6805135e29
|
494a35542b61357c98c621202274d774e650a27c
|
refs/heads/master
| 2022-04-29T20:37:31.085120 | 2022-04-02T12:02:30 | 2022-04-02T12:02:30 | 201,478,904 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,118 |
py
|
from typing import Union, List, Tuple, Optional
def solve(B):
def set(index, value):
nonlocal control_equal, control_complement, known
# Fix to prevent unpaired bits right before a fluctuation
if (not control_complement or not control_equal) \
and (query % 10 == 0) \
and (known % 2 == 0):
return
solution[index] = value
known += 1
pair = get_pair(index)
if not control_equal and value == pair[1]:
control_equal = pair
elif not control_complement \
and pair[1] is not None \
and value != pair[1]:
control_complement = pair
def get_pair(index):
pair_index = B - 1 - index
return [pair_index, solution[pair_index]]
def determine_fluctuation():
nonlocal control_complement, control_equal
possibilities = ['complement', 'reverse', 'both', 'none']
if control_equal:
index, old = control_equal
new = ask(index)
if old == new:
possibilities = [p for p in possibilities if p in {'reverse', 'none'}]
else:
possibilities = [p for p in possibilities if p in {'complement', 'both'}]
control_equal = index, new
if control_complement:
index, old = control_complement
new = ask(index)
if old == new:
possibilities = [p for p in possibilities if p in {'both', 'none'}]
else:
possibilities = [p for p in possibilities if p in {'complement', 'reverse'}]
control_complement = index, new
return possibilities[0]
def apply_fluctuation(fluctuation):
def complement():
for i in range(B):
if solution[i] is not None:
solution[i] = not solution[i]
if fluctuation == 'complement':
complement()
elif fluctuation == 'reverse':
solution.reverse()
elif fluctuation == 'both':
complement()
solution.reverse()
def ask(i):
nonlocal query
query += 1
print(i + 1, flush=True)
response = input()
return True if response == '1' else False
def next_index():
return (known // 2) if (known % 2 == 0) else (B - (known // 2) - 1)
solution: List[Union[bool, None]] = [None] * B
control_equal: Optional[Tuple[int, bool]] = None
control_complement: Optional[Tuple[int, bool]] = None
query = 0
known = 0
while known < B and query < 150:
if query > 0 and query % 10 == 0:
fluctuation = determine_fluctuation()
apply_fluctuation(fluctuation)
else:
index = next_index()
set(index, ask(index))
return ''.join(map(lambda x: '1' if x else '0', solution))
if __name__ == '__main__':
T, B = map(int, input().split())
for Ti in range(1, T + 1):
solution = solve(B)
print(solution, flush=True)
if input() == 'N':
break
|
[
"[email protected]"
] | |
e79db74e458b1f23bf9c7d355f33c7457e7e49b8
|
45272da6d64161a586b1dd41df63b8f701f38e39
|
/Easy Problems/1-10/1easy.py
|
075277c849e0a410bcde57f4d5bf459e7c1e8fad
|
[] |
no_license
|
Lucas-Guimaraes/Reddit-Daily-Programmer
|
559f813d2ee1a06e80a2b260bcb43718ae50b8bf
|
45d554d0e0f8bc67e2111bede3a45f77f5512d7b
|
refs/heads/main
| 2023-07-31T18:36:48.774791 | 2021-09-13T04:08:09 | 2021-09-13T04:08:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 349 |
py
|
#https://www.reddit.com/r/dailyprogrammer/comments/pih8x/easy_challenge_1/
user_name = raw_input("Put in your name: ")
user_age = raw_input("Whhat's your age?: ")
user_screenname = raw_input("How about a username?: ")
print("Your name is " + user_name + " your are " + user_age + " years old, and your username is " + user_screenname)
raw_input()
|
[
"[email protected]"
] | |
d5c5909ea6644335136f2d82bcda8a30fa14ccab
|
48477a15ad96505def8097a6c098826b1e5cfe1a
|
/2_basic_algorithms/2_sorting_algorithms/14_pair_sum.py
|
9e1422278c00aead20f8116beaac4b3230077a6d
|
[] |
no_license
|
450703035/Data-Structures-Algorithms
|
02cd5bbb92ce25019fce4955af38b0317b4f4cac
|
dde33560fcb3e3ff41cf8bd37a454f8c13b15138
|
refs/heads/master
| 2021-05-22T02:25:03.554870 | 2020-06-27T14:23:24 | 2020-06-27T14:23:24 | 252,927,174 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,832 |
py
|
# Pair Sum
'''
Problem Statement
Given an input array and a target value (integer), find two values
in the array whose sum is equal to the target value.
Solve the problem without using extra space.
You can assume the array has unique values and will never have
more than one solution.
'''
def pair_sum(arr, target):
"""
:param: arr - input array
:param: target - target value
TODO: complete this method to find two numbers such that their sum is equal to the target
Return the two numbers in the form of a sorted list
"""
# Sort the list
arr.sort()
# Initialize two pointers - one from the start of the array and
# the other from the the end.
front_index = 0
back_index = len(arr) - 1
# Shift the pointers
while front_index < back_index:
front = arr[front_index]
back = arr[back_index]
if front + back == target:
return [front, back]
# Sum < target --> shift front pointer forwards
elif front + back < target:
front_index += 1
# Sum > target --> Shift back pointer backwards
else:
back_index -= 1
return [None, None]
# Test of pair/sum function.
def test_function(test_case):
input_list = test_case[0]
target =test_case[1]
solution = test_case[2]
output = pair_sum(input_list, target)
if output == solution:
print("Pass")
else:
print("False")
input_list = [2, 7, 11, 15]
target = 9
solution = [2, 7]
test_case = [input_list, target, solution]
test_function(test_case)
input_list = [0, 8, 5, 7, 9]
target = 9
solution = [0, 9]
test_case = [input_list, target, solution]
test_function(test_case)
input_list = [110, 9, 89]
target = 9
solution = [None, None]
test_case = [input_list, target, solution]
test_function(test_case)
|
[
"[email protected]"
] | |
26520cf0e4d572626cca7f3ae58470069e37fd63
|
5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5
|
/blimgui/dist/OpenGL/raw/GLES2/NV/read_buffer.py
|
638349916933fad25c3ba754755ffda4f1e717dc
|
[
"MIT"
] |
permissive
|
juso40/bl2sdk_Mods
|
8422a37ca9c2c2bbf231a2399cbcb84379b7e848
|
29f79c41cfb49ea5b1dd1bec559795727e868558
|
refs/heads/master
| 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 |
MIT
| 2022-11-20T09:47:56 | 2019-05-24T20:55:10 |
Python
|
UTF-8
|
Python
| false | false | 617 |
py
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_NV_read_buffer'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_NV_read_buffer',error_checker=_errors._error_checker)
GL_READ_BUFFER_NV=_C('GL_READ_BUFFER_NV',0x0C02)
@_f
@_p.types(None,_cs.GLenum)
def glReadBufferNV(mode):pass
|
[
"[email protected]"
] | |
410559c8f26e95c96374a7fea4724d3d00169ba7
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/number-of-ways-to-earn-points.py
|
6707c76b184e8c02c07e41ef08fcbd9b81e9220e
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 |
MIT
| 2023-05-31T06:10:33 | 2018-10-11T17:38:35 |
C++
|
UTF-8
|
Python
| false | false | 1,069 |
py
|
# Time: O(n * t * c)
# Space: O(t)
# knapsack dp
class Solution(object):
def waysToReachTarget(self, target, types):
"""
:type target: int
:type types: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
dp = [0]*(target+1)
dp[0] = 1
for c, m in types:
for i in reversed(xrange(1, target+1)):
for j in xrange(1, min(i//m, c)+1):
dp[i] = (dp[i]+dp[i-j*m])%MOD
return dp[-1]
# Time: O(n * t * c)
# Space: O(t)
# knapsack dp
class Solution2(object):
def waysToReachTarget(self, target, types):
"""
:type target: int
:type types: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
dp = [0]*(target+1)
dp[0] = 1
for c, m in types:
new_dp = [0]*(target+1)
for i in xrange(target+1):
for j in xrange(min((target-i)//m, c)+1):
new_dp[i+j*m] = (new_dp[i+j*m]+dp[i])%MOD
dp = new_dp
return dp[-1]
|
[
"[email protected]"
] | |
6f0cf4d61aa094e7e4958d5d2d42c7ee379e097f
|
942a82cd1e34cd8f57e1d7f3272e4086605256ee
|
/config/settings.py
|
4ab609f97c0680e52cc1f2490a6f0d441b5e6b02
|
[] |
no_license
|
hanieh-mav/SocialNetwork-with-drf
|
d451126f93e3735a8c9d6dbf714a8179785e15cc
|
d929704a3d9f26e1e0ca5d961a01ba7dd5c6bf84
|
refs/heads/main
| 2023-06-13T08:17:46.591597 | 2021-07-09T13:37:06 | 2021-07-09T13:37:06 | 353,754,846 | 2 | 0 | null | 2021-07-09T13:27:27 | 2021-04-01T16:04:26 |
Python
|
UTF-8
|
Python
| false | false | 4,482 |
py
|
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@81g)s8gw+7-84o%ks%*8&j$cbb+&m%(#)+e6getb5o40@vil)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'crispy_forms',
'posts.apps.PostsConfig',
'accounts.apps.AccountsConfig',
'postapi.apps.PostapiConfig',
'accountapi.apps.AccountapiConfig',
'rest_framework',
'rest_framework.authtoken',
'dj_rest_auth',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'dj_rest_auth.registration',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
#LOGIN_URL
LOGIN_URL = 'accounts:login'
LOGIN_REDIRECT_URL = 'posts:post-list'
#LOGOUT_URL
LOGOUT_REDIRECT_URL = 'posts:post-list'
STATIC_URL = '/static/'
#MEDIA
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
AUTH_USER_MODEL = 'accounts.User'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated'
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'dj_rest_auth.jwt_auth.JWTCookieAuthentication',
],
}
SITE_ID = 1
REST_USE_JWT = True
JWT_AUTH_COOKIE = 'my-app-auth'
JWT_AUTH_REFRESH_COOKIE = 'my-refresh-token'
#EMAIL SETTING
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'xxxxxxxxxxxxxxxxxxx'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
|
[
"[email protected]"
] | |
13420aecf149f66ef9cb63a68a5a090dbc8a2e3c
|
6c3e475dcd95d14875a199466b8a7c744f61478b
|
/userProfile/userProfile.py
|
1395f4986a45fed5e4b88da12ed0cb114aa8c04b
|
[] |
no_license
|
webclinic017/tripleATradeBot
|
b4cce7a330e76f9f207c4d6f4d16327b1717a17a
|
40b6130f52eb969336c7b602e698f41a2d8f947b
|
refs/heads/main
| 2023-01-04T04:16:38.338563 | 2020-10-29T10:33:34 | 2020-10-29T10:33:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,432 |
py
|
from eventLogger import eventLogger as logger
from pathList import pathList
from alice_blue import *
import openpyxl
class userProfile:
userProfileWorkBook=""
profileName=""
userName = ""
password = ""
apiSecret = ""
accessToken = ""
aliceObj = ""
exchangeList = ['NSE']
def __init__(self, profileName):
self.userProfileWorkBook = openpyxl.load_workbook(pathList.userProfileFileName)
self.profileName = profileName
self.userName = self.userProfileWorkBook.get_sheet_by_name(self.profileName)['A1'].value
self.password = self.userProfileWorkBook.get_sheet_by_name(self.profileName)['A2'].value
self.apiSecret = self.userProfileWorkBook.get_sheet_by_name(self.profileName)['A3'].value
logger.info(self.userName)
logger.info(self.password)
logger.info(self.apiSecret)
def login(self):
logger.info("login")
self.accessToken = AliceBlue.login_and_get_access_token(username=self.userName, password=self.password, twoFA='a', api_secret=self.apiSecret)
self.aliceObj = AliceBlue(username=self.userName, password=self.password, access_token=self.accessToken, master_contracts_to_download=self.exchangeList)
def profileData(self):
logger.info("profileData")
print (self.aliceObj.get_profile())
print (self.aliceObj.get_balance())
|
[
"[email protected]"
] | |
8723a4a6f9bb16968b5f83ec44895b30cb9da123
|
d82b879f41e906589a0a6ad5a6a09e0a0032aa3f
|
/ObservationScripts/on_off/observe_moon_spec_analyser.py
|
176f9c75c90dd4f6945052404f93c17615964d9f
|
[] |
no_license
|
SETIatHCRO/ATA-Utils
|
66718eed669882792148fe0b7a2f977cd0f6ac2e
|
59f4d21b086effaf41d5e11e338ce602c803cfd0
|
refs/heads/master
| 2023-08-16T20:41:44.233507 | 2023-08-10T20:39:13 | 2023-08-10T20:39:13 | 137,617,987 | 5 | 5 | null | 2023-08-10T20:39:14 | 2018-06-17T00:07:05 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 867 |
py
|
#!/home/obsuser/miniconda3/envs/ATAobs/bin/python
from ATATools import ata_control, logger_defaults
from SNAPobs import snap_dada, snap_if
import time
import atexit
import numpy as np
import sys
import argparse
import logging
import os
def main():
logger = logger_defaults.getProgramLogger("observe",
loglevel=logging.INFO)
az_offset = 20.
el_offset = 0.
ant_list = ["2b"]
source = "moon"
ata_control.reserve_antennas(ant_list)
atexit.register(ata_control.release_antennas,ant_list, False)
ata_control.create_ephems2(source, az_offset, el_offset)
ata_control.point_ants2(source, "off", ant_list)
#ata_control.autotune(ant_list)
_ = input("Press any key to switch to on source")
ata_control.point_ants2(source, "on", ant_list)
print("on source acquired")
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
567c4f1b87268b45b3e5955082e71554b4e4551e
|
e3abb55ba514fb102ce01601ab0e9ebc15f5d26f
|
/code/l010_await.py
|
1c1c6228bf6292b72ebae15c80d040f4c8a0b5a4
|
[] |
no_license
|
joshmarshall/coroutine-presentation
|
1d8dec7a6c31a0ee5e8875883a326ea801300e93
|
a6d07e70bdff286f45785f4127d854ea701a6a08
|
refs/heads/master
| 2023-09-03T04:23:20.422823 | 2018-01-03T10:19:50 | 2018-01-03T10:19:50 | 64,452,999 | 1 | 0 | null | 2017-11-19T21:17:58 | 2016-07-29T05:29:08 |
Python
|
UTF-8
|
Python
| false | false | 1,248 |
py
|
import asyncio
class Session(object):
@classmethod
def connect(cls):
return Session()
async def __aenter__(self):
print("Creating session...")
await asyncio.sleep(1)
return self
async def __aexit__(self, exc_typ, exc, tb):
# can also handle exceptions as necessary
await asyncio.sleep(1)
print("Disconnected.")
async def __aiter__(self):
self.records = [Record(), Record()]
return self
async def __anext__(self):
print("Finding record...")
await asyncio.sleep(1)
if not self.records:
raise StopAsyncIteration()
return self.records.pop(0)
def find(self):
return self
class Record(object):
async def update(self, **kwargs):
await asyncio.sleep(1)
print("Updating record: {0}".format(kwargs))
async def wait():
async with Session.connect() as session:
i = 0
async for record in session.find():
i += 1
await record.update(foo=i)
def main():
loop = asyncio.get_event_loop()
print("Starting...")
loop.run_until_complete(wait())
print("Finishing...")
loop.close()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
9967bfbb48682fff74e8fa93da453b918a2d908b
|
43715a10381ec37c275850c2e4f5302cde18de8c
|
/rooms/models.py
|
8544758b5c7d49ad504a4a43c4f38656f611174b
|
[] |
no_license
|
dongdong-e/airbnb-clone
|
443f290baca4ea5c8f22f6c573383d11de4140f4
|
32c083c4e7f562d968639099d8439f26a666b175
|
refs/heads/master
| 2023-05-02T22:08:32.232594 | 2019-11-25T12:13:13 | 2019-11-25T12:13:13 | 219,305,006 | 0 | 0 | null | 2023-04-21T20:42:00 | 2019-11-03T13:27:34 |
Python
|
UTF-8
|
Python
| false | false | 2,842 |
py
|
from django.db import models
from django.urls import reverse
from django_countries.fields import CountryField
from core import models as core_models
class AbstractItem(core_models.TimeStampedModel):
""" Abstract Item """
name = models.CharField(max_length=80)
class Meta:
abstract = True
def __str__(self):
return self.name
class RoomType(AbstractItem):
""" RoomType Model Definition """
class Meta:
verbose_name = "Room Type"
ordering = ["name"]
class Amenity(AbstractItem):
""" Amenity Model Definition """
class Meta:
verbose_name_plural = "Amenities"
class Facility(AbstractItem):
""" Facility Model Definition """
class Meta:
verbose_name_plural = "Facilities"
class HouseRule(AbstractItem):
""" HouseRule Model Definition """
class Meta:
verbose_name = "House Rule"
class Photo(core_models.TimeStampedModel):
""" Photo Model Definition """
caption = models.CharField(max_length=80)
file = models.ImageField(upload_to="room_photos")
room = models.ForeignKey("Room", related_name="photos", on_delete=models.CASCADE)
def __str__(self):
return self.caption
class Room(core_models.TimeStampedModel):
""" Room Model Definition """
name = models.CharField(max_length=140)
description = models.TextField()
country = CountryField()
city = models.CharField(max_length=80)
price = models.IntegerField()
address = models.CharField(max_length=140)
guests = models.IntegerField()
beds = models.IntegerField()
bedrooms = models.IntegerField()
baths = models.IntegerField()
check_in = models.TimeField()
check_out = models.TimeField()
instant_book = models.BooleanField(default=False)
host = models.ForeignKey(
"users.User", related_name="rooms", on_delete=models.CASCADE
)
room_type = models.ForeignKey(
"RoomType", related_name="rooms", on_delete=models.SET_NULL, null=True
)
amenities = models.ManyToManyField("Amenity", related_name="rooms", blank=True)
facilities = models.ManyToManyField("Facility", related_name="rooms", blank=True)
house_rules = models.ManyToManyField("HouseRule", related_name="rooms", blank=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.city = str.capitalize(self.city)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse("rooms:detail", kwargs={"pk": self.pk})
def total_rating(self):
all_reviews = self.reviews.all()
all_ratings = 0
if len(all_reviews) > 0:
for review in all_reviews:
all_ratings += review.rating_average()
return round(all_ratings / len(all_reviews), 2)
return 0
|
[
"[email protected]"
] | |
7ab917ac2d5b6dbd613df8ad73eaa04c6fd703b9
|
e042a2437aa60fdc966c4bb97d87f27fb6378c9c
|
/vae-mnist/utils.py
|
cbc53886b453559793ea1d4b8a743196b76eca8f
|
[] |
no_license
|
JZDBB/OOC-for-research
|
a8653f69a01fe9edd024411234ca422e220a437f
|
265fbd1732460acbe2a36f4273635485abf0eb0c
|
refs/heads/master
| 2020-07-04T04:08:51.130198 | 2019-08-21T13:00:38 | 2019-08-21T13:00:38 | 202,150,646 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 288 |
py
|
import numpy as np
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx / size[1]
img[j*h:j*h+h, i*w:i*w+w] = image
return img
|
[
"[email protected]"
] | |
7d167e1feb92203517a6bf08e8597b19369c565e
|
42ffa887ca0ac7b54f0473880613865fe523fbfc
|
/src/viztracer/__init__.py
|
38fd0acde24ec07503595c6da251f4e74a45e921
|
[
"Apache-2.0"
] |
permissive
|
tianxie1989/viztracer
|
e61090ac286a5b4ffe4c8f0265fde38bca68837b
|
39a6314b2a5a30ede71be96bd5e174b2bdaa2664
|
refs/heads/master
| 2022-12-11T08:21:25.415858 | 2020-08-21T00:21:00 | 2020-08-21T00:21:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 384 |
py
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/viztracer/blob/master/NOTICE.txt
from .viztracer import VizTracer
from .flamegraph import FlameGraph
from .decorator import ignore_function
__version__ = "0.3.0"
__all__ = [
"__version__",
"VizTracer",
"FlameGraph",
"ignore_function"
]
|
[
"[email protected]"
] | |
d2a67d571a6ae128e18235f827a76b271bc6e6e8
|
cbd2eee46663fad5b5375b13c8c21b1b06eb4c6b
|
/ecloud/code/src/main/python/manor/streamlet/create_nodes.py
|
159486c27b7fd7132e26361dfada9a5c35673aba
|
[] |
no_license
|
1026237416/Python
|
ef474ee40d7efcd6dabb6fb0ecba81b4dcfc7e14
|
ffa8f9ffb8bfec114b0ca46295db05c4213c4c30
|
refs/heads/master
| 2021-07-05T00:57:00.456886 | 2019-04-26T10:13:46 | 2019-04-26T10:13:46 | 114,510,323 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,090 |
py
|
# coding=utf-8
import yaml
from tornado import gen
from manor.screwdriver import stack_util
from manor.screwdriver.vendor_ecloud import list_app_resources
from manor.streamlet import StreamletBase,get_stack_resources
from manor.streamlet import download_path
from manor.util import generals
from manor.util import redis_tool
SUCCESS_FLAG='CREATE_COMPLETE'
CREATING_FLAG='CREATE_IN_PROGRESS'
def get_instance(params,node_id,serial):
return CreateNodes(params,serial,node_id)
class CreateNodes(StreamletBase):
def __init__(self,params,serial,node_id):
super(CreateNodes,self).__init__(node_id,params,serial)
self.result=None
self.created_resources=[]
self.stack_status=''
self.ips=[]
@gen.coroutine
def execute(self):
if not self.executed:
self.executed=True
# todo: check input parameters...
self.log.debug('params:')
self.log.debug(self.params)
data_module={
'name':'create node',
'resources':{},
'group_name':self.get_resource('group_name')
}
self.log.debug('calculate data module ..')
try:
if self.get_resource('group_name')=='':
raise Exception('group name is empty.')
if self.get_resource('max')!='':
_max=int(self.get_resource('max'))
group_name=self.get_resource('group_name')
rs=yield list_app_resources(self.serial)
rs=[_ for _ in rs if _['group_name']==group_name]
if len(rs)>=_max:
raise Exception('manor.create.node.upper.limited')
os_name=yield download_path(self.get_resource('image'))
data_module['resources'][self.get_resource('group_name')]={
"count":self.get_resource('amount'),
"group_name":self.get_resource('group_name'),
"image":self.get_resource('image'),
'flavor':self.get_resource('flavors'),
"memory":self.get_resource('memory'),
"cores":self.get_resource('cores'),
'tenant':self.get_resource('tenant'),
'size':self.get_resource('disk_capacity'),
"os":os_name,
"network":[
{
"network":self.get_resource('network'),
"subnet":self.get_resource('subnet')
}
]
}
self.log.debug(data_module)
self.stack_id=yield stack_util.create_action(data_module,
self.serial)
except Exception as e:
self.log.error(generals.trace())
raise e
@gen.coroutine
def calculate_created_resources(self):
resources=yield get_stack_resources(self.stack_id)
self.log.debug('calculate created:\n %s'%yaml.safe_dump(resources))
self.created_resources=resources
@gen.coroutine
def get_stack_status(self):
future=yield stack_util.get_stack(self.stack_id)
self.stack_status=future.to_dict()['stack_status']
def get_resource(self,key):
if key in self.params:
return self.params[key]
else:
return ''
def __ips_not_in_road_map(self,ips):
return [_ for _ in ips if _ not in self.__get_road_map()]
def __get_road_map(self):
r=redis_tool.get_it()
road_map=r.keys('mapup*')
return [_.split('_$_')[3] for _ in road_map]
def check_finish(self):
"""
注意,此方法运行在一个线程中,每秒会执行一次。
"""
try:
self.log.debug('create_nodes step. check finish. stack_id %s'%
self.stack_id)
if self.stack_id is None:
return False
if self.stack_status!=CREATING_FLAG:
if self.stack_status==SUCCESS_FLAG:
if len(self.created_resources)==0:
self.calculate_created_resources()
if len(self.ips)==0:
self.ips=[_['ip'] for _ in self.created_resources]
checked=[_ for _ in self.ips if _ in self.__get_road_map()]
self.log.debug('%s - %s'%(self.ips,checked))
if len(self.ips)>0 and self.ips==checked:
return True
else:
return False
else:
self.get_stack_status()
else:
self.log.debug('the stack stack_status is %s'%self.stack_status)
self.get_stack_status()
return False
except:
self.log.error(generals.trace())
raise Exception('error.manor.stream.check.create.node.finish')
|
[
"[email protected]"
] | |
8f1829ee69b87b02cc106601fc364e928bd4864f
|
6275b8eee6f8f0f69c1f7d1b74a82db22329d560
|
/src/train_v4.py
|
fe583be223d6b303b0b94e9af04e688c97169fb1
|
[
"MIT"
] |
permissive
|
khodwe56/kaggle-birdsong-recognition
|
081575ea02e663f98292c5e579c14de4bcdb7e22
|
95a902c37355619cf02558968f000038e487db47
|
refs/heads/master
| 2023-01-01T21:35:20.101880 | 2020-10-27T17:03:06 | 2020-10-27T17:03:06 | 299,716,450 | 0 | 0 |
MIT
| 2020-09-29T19:21:48 | 2020-09-29T19:21:47 | null |
UTF-8
|
Python
| false | false | 721 |
py
|
from argparse import ArgumentParser, Namespace
from engine.main_engine_v4 import MainEngineV4
import importlib
import torch
import ignite.distributed as idist
torch.backends.cudnn.benchmark = True
def run(local_rank, config):
pe = MainEngineV4(local_rank, config)
pe.train(config.run_params)
def main(hyperparams):
with idist.Parallel(**hyperparams.dist_params) as parallel:
parallel.run(run, hyperparams)
if __name__ == '__main__':
parser = ArgumentParser(parents=[])
parser.add_argument('--config', type=str)
params = parser.parse_args()
module = importlib.import_module(params.config, package=None)
hyperparams = module.Parameters()
main(hyperparams)
|
[
"[email protected]"
] | |
f74296653aa5f909d55be6b01db02cd11a8f0142
|
69533190b829ae8d37fe87e6990ecb9cc250bef3
|
/old/teach_pendant/switch_map.py
|
d91d5db1a81cd2eaa23f0f5cc8e4f22691e1cba2
|
[] |
no_license
|
chxb1987/idx6dof
|
a3ebd70d9901845b3a72f611e021caaba8814602
|
b6a2a1b79673cdc3d929c469116ff4eaf3f7583d
|
refs/heads/master
| 2020-08-03T21:46:51.620409 | 2017-06-14T20:50:22 | 2017-06-14T20:50:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,074 |
py
|
SWITCH_UP=1
SWITCH_DOWN=3
sw_map = (
( 4, 2, SWITCH_UP),
( 12, 2, SWITCH_DOWN),
( 16, 17, SWITCH_UP),
( 17, 16, SWITCH_UP),
( 18, 15, SWITCH_UP),
( 19, 14, SWITCH_UP),
( 20, 13, SWITCH_UP),
( 21, 12, SWITCH_UP),
( 22, 10, SWITCH_UP),
( 23, 11, SWITCH_UP),
( 24, 17, SWITCH_DOWN),
( 25, 16, SWITCH_DOWN),
( 26, 15, SWITCH_DOWN),
( 27, 14, SWITCH_DOWN),
( 28, 13, SWITCH_DOWN),
( 29, 12, SWITCH_DOWN),
( 30, 10, SWITCH_DOWN),
( 31, 11, SWITCH_DOWN),
( 32, 7, SWITCH_UP),
( 33, 6, SWITCH_UP),
( 34, 5, SWITCH_UP),
( 35, 4, SWITCH_UP),
( 36, 3, SWITCH_UP),
( 37, 8, SWITCH_UP),
( 38, 1, SWITCH_UP),
( 39, 9, SWITCH_UP),
( 40, 7, SWITCH_DOWN),
( 41, 6, SWITCH_DOWN),
( 42, 5, SWITCH_DOWN),
( 43, 4, SWITCH_DOWN),
( 44, 3, SWITCH_DOWN),
( 45, 8, SWITCH_DOWN),
( 46, 1, SWITCH_DOWN),
( 47, 9, SWITCH_DOWN),
)
for sw_code, sw_n, sw_pos in sw_map:
if sw_pos == SWITCH_UP:
vn = 'this->swbits_ups'
mn = 'SET_SW_UP'
else:
vn = 'this->swbits_downs'
mn = 'SET_SW_DOWN'
print "case {sw_code}: {mn}({sw_n}); break; ".format(**locals())
|
[
"[email protected]"
] | |
9c5ae5f21eb5f1a36093fe5f764a1835128a01d2
|
dc67e70a303f265ee6cb4c1a2d61fe811053fb3d
|
/beginner/095/C.py
|
e641e597678f29556c9fceffadc8270b970f8ac8
|
[] |
no_license
|
cry999/AtCoder
|
d39ce22d49dfce805cb7bab9d1ff0dd21825823a
|
879d0e43e3fac0aadc4d772dc57374ae72571fe6
|
refs/heads/master
| 2020-04-23T13:55:00.018156 | 2019-12-11T05:23:03 | 2019-12-11T05:23:03 | 171,214,066 | 0 | 0 | null | 2019-05-13T15:17:02 | 2019-02-18T04:24:01 |
Python
|
UTF-8
|
Python
| false | false | 623 |
py
|
def half_and_half(
A: int, B: int, C: int, X: int, Y: int) -> int:
"""
:param A: A ピザの値段
:param B: B ピザの値段
:param C: AB ピザの値段
:param X: A ピザの必要数
:param Y: B ピザの必要数
"""
min_price = float('inf')
for num_ab in range(max(X, Y)+1):
num_a, num_b = max(0, X-num_ab), max(0, Y-num_ab)
price = num_a*A + num_b*B + 2*num_ab*C
min_price = min(min_price, price)
return min_price
if __name__ == "__main__":
A, B, C, X, Y = map(int, input().split())
ans = half_and_half(A, B, C, X, Y)
print(ans)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.