blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
89953cc562f5821db41a06a6c2c67cef8e4197ab
|
67cfe3567f0a961123c561538624be28044ec852
|
/backend/girltalk_15424/urls.py
|
d6ab5c8e6f4bb8b2bb8d6c9afad9be43542c8a78
|
[] |
no_license
|
crowdbotics-apps/girltalk-15424
|
b732f7f6fc04fedd1acd99a2acfd129af71cc010
|
770efb300bc8297faea15e7b6a94c7a755fa8cf7
|
refs/heads/master
| 2023-02-04T02:55:52.708635 | 2020-04-04T05:21:02 | 2020-04-04T05:21:02 | 252,916,119 | 0 | 0 | null | 2023-01-26T16:28:35 | 2020-04-04T05:20:13 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,914 |
py
|
"""girltalk_15424 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Girltalk"
admin.site.site_title = "Girltalk Admin Portal"
admin.site.index_title = "Girltalk Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="Girltalk API",
default_version="v1",
description="API documentation for Girltalk App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
|
[
"[email protected]"
] | |
598e66cd794150397c8cf73002b440126b93541a
|
951fc0da7384b961726999e5451a10e2783462c4
|
/script.module.ATFTV/addon.py
|
08dc093ce00ace1411bebb0134af1dcc39de1c05
|
[] |
no_license
|
vphuc81/MyRepository
|
eaf7b8531b2362f0e0de997a67b889bc114cd7c2
|
9bf8aca6de07fcd91bcec573f438f29e520eb87a
|
refs/heads/master
| 2022-01-02T15:07:35.821826 | 2021-12-24T05:57:58 | 2021-12-24T05:57:58 | 37,680,232 | 6 | 10 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,622 |
py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016,2017,2018 RACC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import sys
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
from xbmcgui import ListItem
from routing import Plugin
import os
import traceback
import requests
import requests_cache
from datetime import timedelta
from base64 import b64decode, urlsafe_b64encode
from pyDes import des, PAD_PKCS5
try:
from urllib.parse import quote_from_bytes as orig_quote
except ImportError:
from urllib import quote as orig_quote
addon = xbmcaddon.Addon()
plugin = Plugin()
plugin.name = addon.getAddonInfo("name")
user_agent = "Dalvik/2.1.0 (Linux; U; Android 5.1.1; AFTS Build/LVY48F)"
player_user_agent = "mediaPlayerhttp/2.1 (Linux;Android 5.1) ExoPlayerLib/2.6.1"
USER_DATA_DIR = xbmc.translatePath(addon.getAddonInfo("profile")).decode("utf-8") # !!
CACHE_TIME = int(addon.getSetting("cache_time"))
CACHE_FILE = os.path.join(USER_DATA_DIR, "cache")
expire_after = timedelta(hours=CACHE_TIME)
if not os.path.exists(USER_DATA_DIR):
os.makedirs(USER_DATA_DIR)
s = requests_cache.CachedSession(CACHE_FILE, allowable_methods="POST", expire_after=expire_after, old_data_on_error=True)
s.hooks = {"response": lambda r, *args, **kwargs: r.raise_for_status()}
s.headers.update({"User-Agent": "USER-AGENT-tvtap-APP-V2"})
token_url = "http://tvtap.net/tvtap1/index_new.php?case=get_channel_link_with_token_tvtap"
list_url = "http://tvtap.net/tvtap1/index_new.php?case=get_all_channels"
def quote(s, safe=""):
return orig_quote(s.encode("utf-8"), safe.encode("utf-8"))
@plugin.route("/")
def root():
categories = {
"01": "UK & USA Channels",
"02": "Movies",
"03": "Music",
"04": "News",
"05": "Sport",
"06": "Documentary",
"07": "Kids",
"08": "Food",
"09": "Religious",
}
list_items = []
for cat in categories.keys():
li = ListItem(categories[cat])
url = plugin.url_for(list_channels, cat_id=cat.lstrip("0"))
list_items.append((url, li, True))
xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.addDirectoryItems(plugin.handle, list_items)
xbmcplugin.endOfDirectory(plugin.handle)
@plugin.route("/list_channels/<cat_id>")
def list_channels(cat_id=None):
list_items = []
r = s.post(list_url, headers={"app-token": "9120163167c05aed85f30bf88495bd89"}, data={"username": "603803577"}, timeout=15)
if "Could not connect" in r.content:
s.cache.clear()
ch = r.json()
for c in ch["msg"]["channels"]:
if c["cat_id"] == cat_id:
image = "http://tvtap.net/tvtap1/{0}|User-Agent={1}".format(quote(c.get("img"), "/"), quote(user_agent))
li = ListItem(c["channel_name"].rstrip("."))
li.setProperty("IsPlayable", "true")
li.setArt({"thumb": image, "icon": image})
li.setInfo(type="Video", infoLabels={"Title": c["channel_name"].rstrip("."), "mediatype": "video"})
try:
li.setContentLookup(False)
except AttributeError:
pass
url = plugin.url_for(play, ch_id=c["pk_id"])
list_items.append((url, li, False))
xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.addDirectoryItems(plugin.handle, list_items)
xbmcplugin.endOfDirectory(plugin.handle)
@plugin.route("/play/<ch_id>/play.pvr")
def play(ch_id):
# 178.132.6.54 81.171.8.162
key = b"19087321"
r = s.post(list_url, headers={"app-token": "9120163167c05aed85f30bf88495bd89"}, data={"username": "603803577"}, timeout=15)
ch = r.json()
for c in ch["msg"]["channels"]:
if c["pk_id"] == ch_id:
selected_channel = c
break
title = selected_channel.get("channel_name")
image = "http://tvtap.net/tvtap1/{0}|User-Agent={1}".format(quote(c.get("img"), "/"), quote(user_agent))
with s.cache_disabled():
r = s.post(token_url, headers={"app-token": "9120163167c05aed85f30bf88495bd89"}, data={"channel_id": ch_id, "username": "603803577"}, timeout=15)
links = []
for stream in r.json()["msg"]["channel"][0].keys():
if "stream" in stream or "chrome_cast" in stream:
d = des(key)
link = d.decrypt(b64decode(r.json()["msg"]["channel"][0][stream]), padmode=PAD_PKCS5)
if link:
link = link.decode("utf-8")
if not link == "dummytext" and link not in links:
links.append(link)
if addon.getSetting("autoplay") == "true":
link = links[0]
else:
dialog = xbmcgui.Dialog()
ret = dialog.select("Choose Stream", links)
link = links[ret]
if link.startswith("http"):
media_url = "{0}|User-Agent={1}".format(link, quote(player_user_agent))
else:
media_url = link
if "playlist.m3u8" in media_url:
if addon.getSetting("inputstream") == "true":
li = ListItem(title, path=media_url)
li.setArt({"thumb": image, "icon": image})
li.setMimeType("application/vnd.apple.mpegurl")
li.setProperty("inputstreamaddon", "inputstream.adaptive")
li.setProperty("inputstream.adaptive.manifest_type", "hls")
li.setProperty("inputstream.adaptive.stream_headers", media_url.split("|")[-1])
elif addon.getSetting("livestreamer") == "true":
serverPath = os.path.join(xbmc.translatePath(addon.getAddonInfo("path")), "livestreamerXBMCLocalProxy.py")
runs = 0
while not runs > 10:
try:
requests.get("http://127.0.0.1:19001/version")
break
except Exception:
xbmc.executebuiltin("RunScript(" + serverPath + ")")
runs += 1
xbmc.sleep(600)
livestreamer_url = "http://127.0.0.1:19001/livestreamer/" + urlsafe_b64encode("hlsvariant://" + media_url)
li = ListItem(title, path=livestreamer_url)
li.setArt({"thumb": image, "icon": image})
li.setMimeType("video/x-mpegts")
else:
li = ListItem(title, path=media_url)
li.setArt({"thumb": image, "icon": image})
li.setMimeType("application/vnd.apple.mpegurl")
try:
li.setContentLookup(False)
except AttributeError:
pass
else:
li = ListItem(title, path=media_url)
li.setArt({"thumb": image, "icon": image})
xbmcplugin.setResolvedUrl(plugin.handle, True, li)
if __name__ == "__main__":
try:
plugin.run(sys.argv)
s.close()
except requests.exceptions.RequestException as e:
dialog = xbmcgui.Dialog()
dialog.notification(plugin.name, str(e), xbmcgui.NOTIFICATION_ERROR)
traceback.print_exc()
xbmcplugin.endOfDirectory(plugin.handle, False)
|
[
"[email protected]"
] | |
fc9d27bcb01c7fe4e3ef1115a053ef8ac3b732cd
|
1925c535d439d2d47e27ace779f08be0b2a75750
|
/microsoft/implement_rand10_with_rand7.py
|
0f89680adba0923d2798aa8ebf8bb297ca0fc640
|
[] |
no_license
|
arthurDz/algorithm-studies
|
ee77d716041671c4b8bb757d8d96f3d10b6589f7
|
1e4d23dd0c40df34f58d71c7ca3e6491be732075
|
refs/heads/master
| 2023-04-27T12:17:06.209278 | 2021-04-30T20:16:18 | 2021-04-30T20:16:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 736 |
py
|
# Given a function rand7 which generates a uniform random integer in the range 1 to 7, write a function rand10 which generates a uniform random integer in the range 1 to 10.
# Do NOT use system's Math.random().
# Example 1:
# Input: 1
# Output: [7]
# Example 2:
# Input: 2
# Output: [8,4]
# Example 3:
# Input: 3
# Output: [8,1,10]
# Note:
# rand7 is predefined.
# Each testcase has one argument: n, the number of times that rand10 is called.
# Follow up:
# What is the expected value for the number of calls to rand7() function?
# Could you minimize the number of calls to rand7()?
def rand10(self):
temp = rand7() + (rand7() - 1) * 7
while temp > 10:
temp = rand7() + (rand7() - 1) * 7
return temp
|
[
"[email protected]"
] | |
fc27042eaae21fea6ee015e954980fd672a2c584
|
ff6248be9573caec94bea0fa2b1e4b6bf0aa682b
|
/StudentProblem/10.21.11.40/8/1569575464.py
|
03f7325643668c7c922036efc5b29701c3522051
|
[] |
no_license
|
LennartElbe/codeEvo
|
0e41b1a7705204e934ef71a5a28c047366c10f71
|
e89b329bc9edd37d5d9986f07ca8a63d50686882
|
refs/heads/master
| 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,719 |
py
|
import functools
import typing
import string
import random
import pytest
class Leaf0:
def __init__ (self, value):
self.value = value
class Node0:
def __init__ (self, left, right, value=None):
self.value = value
self.left = left
self.right = right
## Lösung Teil 1.
def Leaf(Leaf0):
def __init__(self, *args):
super().__init__(*args)
def preorder(self) -> list:
"""
Returns a list of the leaf in preorder without any None values.
"""
return self.value
def postorder(self) -> list:
"""
Returns a list of the leaf in postorder without any None values.
"""
return self.value
class Node(Node0):
def __init__(self, *args):
super().__init__(*args)
def preorder(self) -> list:
"""
Returns a list of the node in preorder without any None values.
"""
ls = []
if self.value:
ls.append(self.value)
if self.left:
ls += self.left.preorder()
if self.right:
ls += self.right.preorder()
return ls
def postorder(self) -> list:
"""
Returns a list of the node in postorder without any None values.
"""
ls = []
if self.left:
ls += self.left.preorder()
if self.right:
ls += self.right.preorder()
if self.value:
ls.append(self.value)
return ls
######################################################################
## Lösung Teil 2.
def test_tree():
assert Node (Leaf(1), Leaf(2), 3).postorder() == [1, 2, 3]
######################################################################
|
[
"[email protected]"
] | |
7e4e28d2c13d17fdd64f8dd33933b84f8a9c95db
|
cbcdf195338307b0c9756549a9bffebf3890a657
|
/django-stubs/core/cache/backends/base.pyi
|
52f2910b56950d0d0b50af70cb6a198f97a8879f
|
[
"MIT"
] |
permissive
|
mattbasta/django-stubs
|
bc482edf5c6cdf33b85005c2638484049c52851b
|
8978ad471f2cec0aa74256fe491e2e07887f1006
|
refs/heads/master
| 2020-04-27T08:38:22.694104 | 2019-03-06T09:05:08 | 2019-03-06T09:05:24 | 174,178,933 | 1 | 0 |
MIT
| 2019-03-06T16:18:01 | 2019-03-06T16:18:00 | null |
UTF-8
|
Python
| false | false | 2,590 |
pyi
|
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Optional, Union
from django.core.exceptions import ImproperlyConfigured
class InvalidCacheBackendError(ImproperlyConfigured): ...
class CacheKeyWarning(RuntimeWarning): ...
DEFAULT_TIMEOUT: Any
MEMCACHE_MAX_KEY_LENGTH: int
def default_key_func(key: Union[int, str], key_prefix: str, version: Union[int, str]) -> str: ...
def get_key_func(key_func: Optional[Union[Callable, str]]) -> Callable: ...
class BaseCache:
default_timeout: int = ...
key_prefix: str = ...
version: int = ...
key_func: Callable = ...
def __init__(self, params: Dict[str, Optional[Union[Callable, Dict[str, int], int, str]]]) -> None: ...
def get_backend_timeout(self, timeout: Any = ...) -> Optional[float]: ...
def make_key(self, key: Union[int, str], version: Optional[Union[int, str]] = ...) -> str: ...
def add(self, key: Any, value: Any, timeout: Any = ..., version: Optional[Any] = ...) -> None: ...
def get(self, key: Any, default: Optional[Any] = ..., version: Optional[Any] = ...) -> Any: ...
def set(self, key: Any, value: Any, timeout: Any = ..., version: Optional[Any] = ...) -> None: ...
def touch(self, key: Any, timeout: Any = ..., version: Optional[Any] = ...) -> None: ...
def delete(self, key: Any, version: Optional[Any] = ...) -> None: ...
def get_many(self, keys: List[str], version: Optional[int] = ...) -> Dict[str, Union[int, str]]: ...
def get_or_set(
self, key: str, default: Optional[Union[Callable, int, str]], timeout: Any = ..., version: Optional[int] = ...
) -> Optional[Union[int, str]]: ...
def has_key(self, key: Any, version: Optional[Any] = ...): ...
def incr(self, key: str, delta: int = ..., version: Optional[int] = ...) -> int: ...
def decr(self, key: str, delta: int = ..., version: Optional[int] = ...) -> int: ...
def __contains__(self, key: str) -> bool: ...
def set_many(
self,
data: Union[Dict[str, bytes], Dict[str, int], Dict[str, str], OrderedDict],
timeout: Any = ...,
version: Optional[Union[int, str]] = ...,
) -> List[Any]: ...
def delete_many(self, keys: Union[Dict[str, str], List[str]], version: None = ...) -> None: ...
def clear(self) -> None: ...
def validate_key(self, key: str) -> None: ...
def incr_version(self, key: str, delta: int = ..., version: Optional[int] = ...) -> int: ...
def decr_version(self, key: str, delta: int = ..., version: Optional[int] = ...) -> int: ...
def close(self, **kwargs: Any) -> None: ...
|
[
"[email protected]"
] | |
0165d25c1c0c68a71343c15d575f22e270017e69
|
e29734c2b3543a05a28b6bc460c3248ea37aaf5c
|
/apps/course/migrations/0015_auto_20190424_1717.py
|
36961cbabe8320fc898752c336f25bbec6d02e5d
|
[] |
no_license
|
simida0755/PopularBlogs
|
fda6dbe06751dde013ba57f73c708fd7106a49ee
|
3a86989232206d0727223306c0e2d2c62d35fa9b
|
refs/heads/master
| 2020-05-21T15:54:09.853341 | 2019-05-13T02:15:28 | 2019-05-13T02:15:28 | 186,101,555 | 5 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 434 |
py
|
# Generated by Django 2.0.2 on 2019-04-24 17:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0014_auto_20190424_1716'),
]
operations = [
migrations.AlterField(
model_name='course',
name='image',
field=models.ImageField(null=True, upload_to='courses/%Y/%m', verbose_name='封面图'),
),
]
|
[
"[email protected]"
] | |
2b9bee86ebd1b08f2a0f0400abf395c09608c7e8
|
5de3f612df0dbda712b39403dbafb0617e597651
|
/build/pal_behaviour_msgs/catkin_generated/pkg.installspace.context.pc.py
|
8706a70930366093c2aaea8520ef1c40fd260a4a
|
[] |
no_license
|
AdriiTrujillo/tiago_public_ws
|
1bd62d51c2eb694d07db83738f7bebd582d8126c
|
6eaeabd1ec177df837b81fd9f42887318128766b
|
refs/heads/main
| 2023-04-03T13:09:09.749190 | 2021-04-01T10:05:43 | 2021-04-01T10:05:43 | 350,026,041 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 461 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs;actionlib_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "pal_behaviour_msgs"
PROJECT_SPACE_DIR = "/home/adrii/tiago_public_ws/install"
PROJECT_VERSION = "0.13.2"
|
[
"[email protected]"
] | |
d156167ce165ac16bab92f480187ddf3da7430eb
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/b2e3e8c0718142d4cb0387f46cd77c15b67cc1e9-<get_random_string>-bug.py
|
9b873176526ac647a2e151598420e0deb76c070d
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 182 |
py
|
def get_random_string(length=8, choices=(string.ascii_letters + string.digits)):
'\n Generate random string\n '
return ''.join([choice(choices) for i in range(length)])
|
[
"[email protected]"
] | |
9d9bec682b8409ccc2d18ac3c64f1c22b5a01199
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2551/60603/312775.py
|
3038d6419acf664fc4ed2b489ef7cb65c0727f17
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 323 |
py
|
def change(a,b):
for i in range(a-1,b):
li[i]=0 if li[i]==1 else 1
def que(a,b):
return sum(li[a-1:b])
n,m = [int(x) for x in input().split()]
li = [0]*n
for i in range(m):
s = [int(x) for x in input().split()]
if s[0]==0:
change(s[1],s[2])
elif s[0]==1:
print(que(s[1],s[2]))
|
[
"[email protected]"
] | |
b0b638794415687755cbdb2be2b4c90db79e1c55
|
bc2cdb1e438efaf67131e975ac4db80b4dc43385
|
/src/public/message/migrations/0003_pushmessage.py
|
a4cc7fb3829a923d5a18ec9f447e1971018bd4f1
|
[] |
no_license
|
Shadow-linux/ops-for-study
|
cf4d55409ebc6f27d454bea60886cd154c994484
|
115b567948d25a64e423a6cdc89bc8337896afe2
|
refs/heads/master
| 2023-01-14T13:35:56.880896 | 2019-09-23T05:01:31 | 2019-09-23T05:01:31 | 209,781,758 | 2 | 0 | null | 2023-01-04T10:55:45 | 2019-09-20T12:08:11 |
Python
|
UTF-8
|
Python
| false | false | 1,062 |
py
|
# Generated by Django 2.0.1 on 2019-04-17 21:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('message', '0002_auto_20190416_1144'),
]
operations = [
migrations.CreateModel(
name='PushMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(help_text='str; 标题', max_length=100)),
('content', models.TextField(help_text='str; 消息内容')),
('user_id_list', models.CharField(help_text='str; 用户ID', max_length=500)),
('send_type_list', models.CharField(help_text='str; 发送消息类型', max_length=500)),
('created', models.DateTimeField(auto_now_add=True, help_text='str; 创建时间')),
],
options={
'verbose_name': '消息推送',
'db_table': 'common_push_message',
},
),
]
|
[
"[email protected]"
] | |
4ee5c7635d1d388cb4d468d7dc04515ac9df2ccd
|
26d6c34df00a229dc85ad7326de6cb5672be7acc
|
/msgraph-cli-extensions/v1_0/personalcontacts_v1_0/azext_personalcontacts_v1_0/vendored_sdks/personalcontacts/aio/_personal_contacts.py
|
0c6cf4d05bbae3c59e563ab9a028bb2e8874efa7
|
[
"MIT"
] |
permissive
|
BrianTJackett/msgraph-cli
|
87f92471f68f85e44872939d876b9ff5f0ae6b2c
|
78a4b1c73a23b85c070fed2fbca93758733f620e
|
refs/heads/main
| 2023-06-23T21:31:53.306655 | 2021-07-09T07:58:56 | 2021-07-09T07:58:56 | 386,993,555 | 0 | 0 |
NOASSERTION
| 2021-07-17T16:56:05 | 2021-07-17T16:56:05 | null |
UTF-8
|
Python
| false | false | 3,990 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import PersonalContactsConfiguration
from .operations import UsersOperations
from .operations import UsersContactFoldersOperations
from .operations import UsersContactFoldersContactsOperations
from .operations import UsersContactsOperations
from .. import models
class PersonalContacts(object):
"""PersonalContacts.
:ivar users: UsersOperations operations
:vartype users: personal_contacts.aio.operations.UsersOperations
:ivar users_contact_folders: UsersContactFoldersOperations operations
:vartype users_contact_folders: personal_contacts.aio.operations.UsersContactFoldersOperations
:ivar users_contact_folders_contacts: UsersContactFoldersContactsOperations operations
:vartype users_contact_folders_contacts: personal_contacts.aio.operations.UsersContactFoldersContactsOperations
:ivar users_contacts: UsersContactsOperations operations
:vartype users_contacts: personal_contacts.aio.operations.UsersContactsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param top: Show only the first n items.
:type top: int
:param skip: Skip the first n items.
:type skip: int
:param search: Search items by search phrases.
:type search: str
:param filter: Filter items by property values.
:type filter: str
:param count: Include count of items.
:type count: bool
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
top: Optional[int] = None,
skip: Optional[int] = None,
search: Optional[str] = None,
filter: Optional[str] = None,
count: Optional[bool] = None,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://graph.microsoft.com/v1.0'
self._config = PersonalContactsConfiguration(credential, top, skip, search, filter, count, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.users = UsersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.users_contact_folders = UsersContactFoldersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.users_contact_folders_contacts = UsersContactFoldersContactsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.users_contacts = UsersContactsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "PersonalContacts":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
[
"[email protected]"
] | |
e742907f523101322df4966977e82fafc1446f34
|
2c4ad0e41e495b1be29ac54f3552f5a4bcfb8d8b
|
/apps/comments/views.py
|
0c004c9e007472a3dba19ad976acbe6ce31052d7
|
[] |
no_license
|
buzzzzx/blogforzly
|
7de8f01e767e01f30d7dab8ffb2243484de24f4a
|
163a26c7518ed13c7f3a58cd12d455748b60ab6d
|
refs/heads/master
| 2022-03-09T14:43:00.098795 | 2019-08-06T13:13:08 | 2019-08-06T13:13:08 | 114,436,672 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,032 |
py
|
from django.shortcuts import render, get_object_or_404, redirect
from .models import Comment
from .forms import CommentForm
from blog.models import Post
from utils.send_email import send
# Create your views here.
def post_comment(request, post_pk):
post = get_object_or_404(Post, pk=post_pk)
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
# comment.text = form.text
comment.save()
# send email
send(flag=1, nickname=comment.name, semail=comment.email, text=comment.text, postname=comment.post.title)
return redirect(post)
else:
comment_list = post.comment_set.all()
context = {
'post': post,
'form': form,
'comment_list': comment_list
}
return render(request, 'blog/detail.html', context=context)
return redirect(post)
|
[
"[email protected]"
] | |
1cdc2bcc69cfb9db96d5c781083c1bc817ff9f01
|
387cf5f72ed6679a4d9e04bddd16998a190c4caf
|
/problems/programmers/lv4/pgs-12983-wrong.py
|
69760552fe4acfa3898004c7c8b095f9f458bbe3
|
[] |
no_license
|
CodyBuilder-dev/Algorithm-Coding-Test
|
db4ee1e7565fbcef3140192225167eff42ad5c02
|
cca5c4ba8bc31679ab00aceccfd8d9d39c232f72
|
refs/heads/master
| 2021-07-24T00:34:41.888289 | 2021-07-21T14:29:00 | 2021-07-21T14:29:00 | 219,123,221 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 881 |
py
|
"""
제목 : 단어 퍼즐
아이디어 : 거스름돈과 비슷한 dp
(1) DP 값 저장
- key는 strs의 i번재 단어까지 서서 t의 j번째를 만드는 것
- j번째 까지냐 vs j번째 만이냐
- value는 최소값
(2) 초기화
- dp[0][0] ~ dp[0][len(t)-1]는 초기화할 수 있음
(3) 점화식
- dp[i][j] 는, min(dp[i-1][j], dp[i][j-k] (k = strs에 담긴 원소들의 길이))인가?
"""
from math import inf
def solution(strs, t):
dp = [[inf]*len(t) for _ in range(len(strs))]
for i in range(len(t)):
print(strs[0]*(i+1))
dp[0][i] = t[:i+1].count(strs[0]*(i+1))
return dp
# 테스트 케이스
print(solution(["ba","na","n","a"],"banana"),3)
print(solution(["app","ap","p","l","e","ple","pp"],"apple"),2)
print(solution(["ba","an","nan","ban","n"],"banana"),-1)
print(solution(["bax","dxv","zxc"],"baobab"))
print(solution)
|
[
"[email protected]"
] | |
0e48b30a06104cba35625dfe97b6f03f276fffcb
|
c553f9d608c435cd7f19c9be0ef512307295a837
|
/daemin/greedy/실전문제/1.모험가길드.py
|
cfc80dc0c3cd461720a12db2077c822dd132f7b8
|
[] |
no_license
|
Green0v0/Algorithm
|
2d089e7c016997c1fb5e1094ddeeb80cd1ce0485
|
ab9b387e63550ef1b5dfe0f851163b16fbd42c88
|
refs/heads/main
| 2023-05-24T05:37:17.125671 | 2021-06-16T05:35:52 | 2021-06-16T05:35:52 | 330,944,982 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,317 |
py
|
# n = int(input())
# k = list(map(int,input().split(" ")))
n= 5
data = [2,3,1,2,2]
data.sort() # 원본 변경 / True 가 내림차순이다.
result = 0 # 그룹 갯수
count = 0 # 모험가의 수
"""
1. 기본 로직은 공포도가 작은 탐험가 부터 (숫자가 작은) 그룹을 이뤄 나가는것이다.
2. count 에 일단 모험가를 1개 넣어주고 다음 모험가(i)가 1보다 작거나 같으면 그룹이 되어나간다.
3. 1보다 크다면 result(그룹)이 되지 못하고 반복문으로 올라가서 다음 모험가를 데리고 count 에 1을 더해준다.
4 . 그러면서 조건에 만족할때 그룹개수를 증가시킨다.
"""
for i in data:
count +=1
if count >= i: # i가 크거나같으면 공포도에 따른 그룹 구성원이 안맞는데도 그룹을 이룬다.
result +=1
count=0
print(result)
# 첫번째 코드 실패 //
# (4,3,2,2,2,1,1,1,1,1) 이 케이스를 입력했을때 4,3,2가 남으면 더이상 그룹이 될 수없는데 그냥 실행이된다.
#
#
# while True:
# m = min(k)
# for _ in range(m):
# k.pop() # 이부분이 문제임! pop을 하니까 마지막 4,3,2 에서 2때문에 pop을 두번해서 3까지 날려버림..
# count += 1
# if len(k)==0:
# break
# print(count)
|
[
"[email protected]"
] | |
00b204dd1c59a7f8d99f85a898d26452b44fb647
|
0cbd245ba67ada0dd04e8a61471b2bc2bbacdc47
|
/App09_RealEstate_DataMiner/app9.py
|
8c7df6853b363cbc8fa964ed55f68f41a46db523
|
[] |
no_license
|
ptsouth97/pythonapps
|
7ed0a121f35669d0bb177d88ef9aa09828bea813
|
ee239a02c553fb9d2672f50a4b4c49b4ea4396f0
|
refs/heads/master
| 2021-01-12T04:31:31.687181 | 2017-02-05T21:07:42 | 2017-02-05T21:07:42 | 77,632,442 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,577 |
py
|
import os
import csv
from data_types import Purchase
import statistics
def main():
print_header()
filename = get_data_file()
# print(filename)
data = load_file(filename)
query_data(data)
def print_header():
print('------------------------')
print(' Real Estate App')
print('------------------------')
print()
def get_data_file():
base_folder = os.path.dirname(__file__)
return os.path.join(base_folder, 'data', 'SacramentoRealEstateTransactions2008.csv')
def load_file(filename):
with open(filename, 'r', encoding='utf-8') as fin:
reader = csv.DictReader(fin)
purchases = []
for row in reader:
# print(type(row), row)
# print("Bed count: {}".format(row['beds']))
p = Purchase.create_from_dict(row)
purchases.append(p)
return purchases
# print(purchases[0].__dict__)
# header = fin.readline().strip()
# reader = csv.reader(fin, delimiter=',')
# for row in reader:
# print(row)
# beds = row[4]
# def load_file_basic(filename):
# with open(filename, 'r', encoding='utf-8') as fin:
# header = fin.readline().strip()
# print('found header: ' + header)
#
# lines = []
# for line in fin:
# line_data = line.strip().split(',')
# lines.append(line_data)
#
# print(lines[:5])
# def get_price(p):
# return p.price
def query_data(data):
# if data was sorted by price:
# data.sort(key=get_price)
data.sort(key= lambda p: p.price)
# most expensive house
high_purchase = data[-1]
print("The most expensive house is ${:,} with {} beds and {} baths".format(high_purchase.price, high_purchase.beds, high_purchase.baths))
# least expensive house
low_purchase = data[0]
print("The least expensive house is ${:,} with {} beds and {} baths".format(low_purchase.price, low_purchase.beds, low_purchase.baths))
# average price house
# average price of 2 bedroom homes
# prices = []
# for pur in data:
# prices.append(pur.price)
# LIST COMPREHENSIONS
prices = [
p.price # projection or items
for p in data # the set to process
]
ave_price = statistics.mean(prices)
print("The average home price is ${:,}".format(int(ave_price)))
two_bed_homes = [
p
for p in data # the set to process
if p.beds == 2 # test condition
]
ave_price = statistics.mean([p.price for p in two_bed_homes])
ave_baths = statistics.mean([p.baths for p in two_bed_homes])
ave_sqft = statistics.mean([p.sq__ft for p in two_bed_homes])
print("The average price of a 2-bedroom home is ${:,}, baths={}, sq ft={:,}".format(int(ave_price), round(ave_baths, 1), round( ave_sqft, 1)))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
caef56b59f3154376c50d4336649aec1100d0102
|
2f6d017dedc68588b2615d65c1e8ca8bcdd90446
|
/api/dynamic_tests_v2/cumsum.py
|
64fc792e50a19fb1e753faa601710dbef87b366e
|
[] |
no_license
|
hysunflower/benchmark
|
70fc952a4eb1545208543627539d72e991cef78a
|
c14f99c15b4be9e11f56ea378ca15d9c3da23bab
|
refs/heads/master
| 2022-06-30T07:04:14.986050 | 2022-06-15T02:43:04 | 2022-06-15T02:43:04 | 224,449,279 | 1 | 0 | null | 2019-11-27T14:29:29 | 2019-11-27T14:29:29 | null |
UTF-8
|
Python
| false | false | 1,479 |
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common_import import *
class CumsumConfig(APIConfig):
def __init__(self):
super(CumsumConfig, self).__init__('cumsum')
self.feed_spec = {"range": [-1, 1]}
class PDCumsum(PaddleDynamicAPIBenchmarkBase):
def build_graph(self, config):
x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)
result = paddle.cumsum(x=x, axis=config.axis)
self.feed_list = [x]
self.fetch_list = [result]
class TorchCumsum(PytorchAPIBenchmarkBase):
def build_graph(self, config):
x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)
result = torch.cumsum(x=x, axis=config.axis)
self.feed_list = [x]
self.fetch_list = [result]
if __name__ == '__main__':
test_main(
pd_dy_obj=PDCumsum(), torch_obj=TorchCumsum(), config=CumsumConfig())
|
[
"[email protected]"
] | |
c83eadf7b9b9967c1507e6da8273883512787e28
|
13ea58f72fa96e2455609fb452b5f3b98e94f846
|
/sfepy/postprocess/plot_cmesh.py
|
4319e5a25f131980a112ea817a562980f7b29e29
|
[
"BSD-3-Clause"
] |
permissive
|
vondrejc/sfepy
|
4284ee47979b89d9e504b72b91689a9ce0c3a5ec
|
8e427af699c4b2858eb096510057abb3ae7e28e8
|
refs/heads/master
| 2021-01-24T00:09:18.722674 | 2014-08-20T12:37:03 | 2014-08-20T14:25:56 | 12,810,199 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,538 |
py
|
"""
Functions to visualize the CMesh geometry and topology.
"""
import matplotlib.pyplot as plt
from sfepy.postprocess.plot_dofs import _get_axes
def plot_wireframe(ax, cmesh, color='k', show=False):
"""
Plot a finite element mesh as a wireframe using edges connectivity.
"""
coors = cmesh.coors
dim = cmesh.dim
edges = cmesh.get_conn(1, 0)
ax = _get_axes(ax, dim)
for edge_vertices in edges.indices.reshape((edges.num, 2)):
cc = coors[edge_vertices]
if dim == 3:
ax.plot(cc[:, 0], cc[:, 1], cc[:, 2], color)
else:
ax.plot(cc[:, 0], cc[:, 1], color)
if show:
plt.show()
return ax
def plot_entities(ax, cmesh, edim, color='b', size=10, show=False):
"""
Plot mesh topology entities using scatter plot.
"""
coors = cmesh.get_centroids(edim)
dim = cmesh.dim
ax = _get_axes(ax, dim)
if dim == 3:
ax.scatter(coors[:, 0], coors[:, 1], coors[:, 2], s=size, c=color)
else:
ax.scatter(coors[:, 0], coors[:, 1], s=size, c=color)
if show:
plt.show()
return ax
def label_global_entities(ax, cmesh, edim, color='b', fontsize=10, show=False):
"""
Label mesh topology entities using global ids.
"""
coors = cmesh.get_centroids(edim)
dim = cmesh.dim
ax = _get_axes(ax, dim)
for ii, cc in enumerate(coors):
if dim == 3:
ax.text(cc[0], cc[1], cc[2], ii,
color=color, fontsize=fontsize)
else:
ax.text(cc[0], cc[1], ii,
color=color, fontsize=fontsize)
if show:
plt.show()
return ax
def label_local_entities(ax, cmesh, edim, color='b', fontsize=10, show=False):
"""
Label mesh topology entities using cell-local ids.
"""
coors = cmesh.get_centroids(edim)
dim = cmesh.dim
centres = cmesh.get_centroids(dim)
conn = cmesh.get_conn(dim, edim)
off = conn.offsets
ax = _get_axes(ax, dim)
eps = 0.1
oeps = 1.0 - eps
for ii in xrange(conn.num):
for ic, ie in enumerate(conn.indices[off[ii]:off[ii+1]]):
# Shift labels towards the cell centre.
cc = oeps * coors[ie] + eps * centres[ii]
if dim == 3:
ax.text(cc[0], cc[1], cc[2], ic,
color=color, fontsize=fontsize)
else:
ax.text(cc[0], cc[1], ic,
color=color, fontsize=fontsize)
if show:
plt.show()
return ax
|
[
"[email protected]"
] | |
83b6c3223a9ea60b7456b4e43317b1614cfe87e0
|
7ce05272d21c903abc85ebc74544009aacd80c82
|
/Advance_Python/Socket_Programming/socket_programs/client.py
|
cead5a5a2d925f83e46f72d6bbd4a1b3d48a2ce3
|
[] |
no_license
|
sachinyadav3496/PythonInternBatch2018
|
8899a866f60a39b4c7eff4f5bc79ec2586833403
|
8e2610ad80c39ea747e8a6547ebe540e7b019a79
|
refs/heads/master
| 2021-06-26T09:18:58.178457 | 2020-10-03T09:49:32 | 2020-10-03T09:49:32 | 136,880,809 | 18 | 34 | null | 2020-10-03T09:49:33 | 2018-06-11T05:56:26 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 670 |
py
|
import socket
server_socket = socket.socket()
host = socket.gethostbyname(socket.gethostname()) # Give server address if server is on differnt machine
port = 12345 #port no on which server is listing
server_socket.connect((host,port))
print("Got Connection from server at {}:{} ".format(host,port))
while True :
smsg = server_socket.recv(1024)
if smsg.decode().strip().lower() == 'bye' :
print("Connection is Terminated by server")
server_socket.close()
break
print("\t\t\tServer -> ",smsg.decode())
msg = input("client->")
server_socket.send(msg.encode())
if msg == 'bye' :
server_socket.close()
break
|
[
"[email protected]"
] | |
a4ec925ffdf9afa9aff09c57049d796f503f32ea
|
524c168b1b7ab4644a612f692645ae56487dea8c
|
/fwork-backend/tina/projects/migrations/0013_auto_20141210_1040.py
|
ac9ab59da8ca199cb8221bccf33e483a8493f55f
|
[] |
no_license
|
phamhongnhung2501/Taiga.Tina
|
b4fa730a9f9601e23ab19c6d937e7daf0386b1e2
|
8bc44de3a364ccd0e49e767b098589898dcabc10
|
refs/heads/master
| 2022-12-14T09:55:11.205228 | 2019-07-08T07:42:38 | 2019-07-08T07:42:38 | 195,760,755 | 0 | 0 | null | 2022-12-08T05:18:37 | 2019-07-08T07:39:32 |
Python
|
UTF-8
|
Python
| false | false | 995 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db import connection
from tina.projects.userstories.models import *
from tina.projects.tasks.models import *
from tina.projects.issues.models import *
from tina.projects.models import *
def _fix_tags_model(tags_model):
table_name = tags_model._meta.db_table
query = "select id from (select id, unnest(tags) tag from %s) x where tag LIKE '%%,%%'"%(table_name)
cursor = connection.cursor()
cursor.execute(query)
for row in cursor.fetchall():
id = row[0]
instance = tags_model.objects.get(id=id)
instance.tags = [tag.replace(",", "") for tag in instance.tags]
instance.save()
def fix_tags(apps, schema_editor):
_fix_tags_model(Project)
class Migration(migrations.Migration):
dependencies = [
('projects', '0012_auto_20141210_1009'),
]
operations = [
migrations.RunPython(fix_tags),
]
|
[
"[email protected]"
] | |
46eb0092ec00ba666cc6bbdaa21bff606a02a170
|
6f594cc963795c69d8da3c30ca580c0405ef2d6e
|
/binaryTree/652FindDuplicateSubtrees.py
|
fbf0d850405b6d265b0194874f1be18bc6d4cea4
|
[] |
no_license
|
lo-tp/leetcode
|
25933c5b25f64f881d43748d8b2763f69614a97f
|
4cc4d76c64e9d9aa3f53c5e9574e488c93e10a50
|
refs/heads/master
| 2022-09-07T20:32:58.487759 | 2022-09-05T03:39:50 | 2022-09-07T13:39:50 | 116,555,892 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,259 |
py
|
from collections import defaultdict
class Solution(object):
def findDuplicateSubtrees(self, root):
res, current, stack, data = [], '', [], defaultdict(lambda: 0)
while stack or root:
if root:
stack.append((root, False, ''))
root = root.left
else:
t, visited, left_str = stack.pop()
if visited:
current = '{} {} {}'.format(left_str, t.val, current)
root = None
if data[current] == 1:
res.append(t)
data[current] += 1
else:
stack.append((t, True, current))
current = ''
root = t.right
return res
def findDuplicateSubtrees(self, root):
res, current, stack, data = [], '', [
(root, 0, '')], defaultdict(lambda: 0)
while stack:
root, flag, left_str = stack.pop()
if not root:
current += ' '
elif not flag:
stack.append((root, 1, ''))
stack.append((root.left, 0, ''))
elif flag == 1:
stack.append((root, 2, current))
stack.append((root.right, 0, ''))
current = ''
else:
current = 'l{}-{}-{}r'.format(left_str, root.val, current)
if data[current] == 1:
res.append(root)
data[current] += 1
return res
def findDuplicateSubtrees(self, root):
cur = None
res, data, stack = [], defaultdict(lambda: 0), [(root, None, 0)]
while stack:
node, string, flag = stack.pop()
if not node:
cur = '#'
elif not flag:
stack.append((node, None, 1))
stack.append((node.left, None, 0))
elif flag == 1:
stack.append((node, cur, 2))
stack.append((node.right, None, 0))
else:
cur = '{},{},{}'.format(node.val, string, cur)
data[cur] += 1
if data[cur] == 2:
res.append(node)
return res
|
[
"[email protected]"
] | |
9f238e46d438784023ea24f418acbc362d03107b
|
86813bf514f3e0257f92207f40a68443f08ee44b
|
/459 重复的子字符串/459 重复的子字符串.py
|
09445982e188b913bf7d0f47bd859239932d3471
|
[] |
no_license
|
Aurora-yuan/Leetcode_Python3
|
4ce56679b48862c87addc8cd870cdd525c9d926c
|
720bb530850febc2aa67a56a7a0b3a85ab37f415
|
refs/heads/master
| 2021-07-12T13:23:19.399155 | 2020-10-21T03:14:36 | 2020-10-21T03:14:36 | 212,998,500 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 415 |
py
|
#label: string difficulty: easy
class Solution:
def repeatedSubstringPattern(self, s: str) -> bool:
n = len(s)
for i in range(1,n//2+1):
if n % i == 0:
a = s[:i]
j = i
while j<n and s[j:j+i] == a:
j += i
if j == n:
return True
return False
|
[
"[email protected]"
] | |
1e9a225fe5733b7b760390bc1f1511e3d4fc2649
|
99697559d046cdd04dd9068bd518e4da4177aaa2
|
/Finish/H065_Valid_Number.py
|
887cc224b572e863ae805b6987920e3864f81620
|
[] |
no_license
|
Azurisky/Leetcode
|
3e3621ef15f2774cfdfac8c3018e2e4701760c3b
|
8fa215fb0d5b2e8f6a863756c874d0bdb2cffa04
|
refs/heads/master
| 2020-03-18T22:46:35.780864 | 2018-10-07T05:45:30 | 2018-10-07T05:45:30 | 135,364,168 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 904 |
py
|
class Solution:
def isNumber(self, s):
"""
:type s: str
:rtype: bool
"""
if not s:
return False
# strip the heading and tailing spaces of the string
s = s.strip()
i = 0
res = signs = eE = dot = False
while i < len(s):
if s[i].isdigit():
i += 1
res = signs = True
elif s[i]=='.' and not dot:
i += 1
dot = signs = True
elif (s[i]=='e' or s[i]=='E') and (not eE) and res:
i += 1
res = signs = False
dot = eE = True
elif (s[i]=='+' or s[i]=='-') and not res and not signs:
i += 1
signs = True
else:
return False
if res:
return True
return False
|
[
"[email protected]"
] | |
d5417d605f2204782ab1b6dd38bcb7262adc6354
|
99ae6372a5a5518543f9863a33ab21218a3a0768
|
/tests/test-all.py
|
31554599186717cf11032773e371545ac5143bde
|
[] |
no_license
|
DANS-KNAW/parthenos-widget
|
7b3578a37402069e99da8eaf0d8cf52f32c12231
|
b549b76b7f16f1338cd80c6af7952963b3a8dd63
|
refs/heads/master
| 2022-07-17T23:07:25.238193 | 2021-11-03T08:36:55 | 2021-11-03T08:36:55 | 84,067,894 | 0 | 3 | null | 2021-11-03T08:36:55 | 2017-03-06T11:56:28 |
JavaScript
|
UTF-8
|
Python
| false | false | 684 |
py
|
#!/usr/bin/python
from __future__ import print_function, absolute_import
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))
#import pytest
from tests.config import MATRIX
from parthenos.core.datatojson import *
import uuid
import httpretty
import requests
import pandas as pd
import simplejson
import json
if __name__ == '__main__':
print ('%s' % contents(0))
print ('%s' % gettopics("SOCIAL SCIENCE"))
print ('%s' % gettopics("LANGUAGE STUDIES"))
# print ('%s' % policies(4))
# (df, fairtest) = fair(4)
# print ('%s' % fairtest)
# x = fairfilter(df, fairtest, 'fair')
# print ('%s' % x.to_html())
|
[
"[email protected]"
] | |
c00bff8a97f2f0cd605b081aab99214bd019e9fd
|
fe42f1c1eefb2069eda1dd98821ba6049fb4f01a
|
/ML/P3DataAnalysisPandas/P4Combining.py
|
30cbbcdbd467feed161647f9dcf1775382909e7d
|
[] |
no_license
|
hvn2001/LearnPython
|
c1b13f6685e6e62b3c9b612e88e624925f43eb6e
|
323595df8d69e84873f74819a36b5eb36b017773
|
refs/heads/master
| 2021-03-30T06:26:55.110963 | 2020-04-10T16:13:36 | 2020-04-10T16:13:36 | 248,025,126 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,906 |
py
|
import pandas as pd
print('------A. Concatenation------')
df1 = pd.DataFrame({'c1': [1, 2], 'c2': [3, 4]},
index=['r1', 'r2'])
df2 = pd.DataFrame({'c1': [5, 6], 'c2': [7, 8]},
index=['r1', 'r2'])
df3 = pd.DataFrame({'c1': [5, 6], 'c2': [7, 8]})
concat = pd.concat([df1, df2], axis=1)
print('{}\n'.format(concat))
'''
c1 c2 c1 c2
r1 1 3 5 7
r2 2 4 6 8
'''
concat = pd.concat([df2, df1, df3])
print('{}\n'.format(concat))
'''
c1 c2
r1 5 7
r2 6 8
r1 1 3
r2 2 4
0 5 7
1 6 8
'''
concat = pd.concat([df1, df3], axis=1)
print('{}\n'.format(concat))
'''
c1 c2 c1 c2
r1 1.0 3.0 NaN NaN
r2 2.0 4.0 NaN NaN
0 NaN NaN 5.0 7.0
1 NaN NaN 6.0 8.0
'''
print('------B. Merging------')
mlb_df1 = pd.DataFrame({'name': ['john doe', 'al smith', 'sam black', 'john doe'],
'pos': ['1B', 'C', 'P', '2B'],
'year': [2000, 2004, 2008, 2003]})
mlb_df2 = pd.DataFrame({'name': ['john doe', 'al smith', 'jack lee'],
'year': [2000, 2004, 2012],
'rbi': [80, 100, 12]})
print('{}\n'.format(mlb_df1))
'''
name pos year
0 john doe 1B 2000
1 al smith C 2004
2 sam black P 2008
3 john doe 2B 2003
'''
print('{}\n'.format(mlb_df2))
'''
name rbi year
0 john doe 80 2000
1 al smith 100 2004
2 jack lee 12 2012
'''
mlb_merged = pd.merge(mlb_df1, mlb_df2)
print('{}\n'.format(mlb_merged))
'''
name pos year rbi
0 john doe 1B 2000 80
1 al smith C 2004 100
'''
print('------Ex: ------')
def concat_rows(df1, df2):
row_concat = pd.concat([df1, df2])
return row_concat
def concat_cols(df1, df2):
col_concat = pd.concat([df1, df2], axis=1)
return col_concat
def merge_dfs(df1, df2):
merged_df = pd.merge(df1, df2)
return merged_df
|
[
"[email protected]"
] | |
25a67c4819e5f76e8597007afbef568d28dcd9f0
|
63c8b9227a6b3178d918769042ecb060acc557be
|
/symphony/cli/pyinventory/graphql/add_service_endpoint_mutation.py
|
4f7d20fa43ca5e0c5407290c9053e8a3f6f0fe27
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
snwfdhmp/magma
|
7c4898db68d2668fd39ed25f73bb9a2bc5959066
|
8b3ff20a2717337a83c8ef531fa773a851d2e54d
|
refs/heads/master
| 2020-12-06T09:06:25.806497 | 2020-01-07T18:27:09 | 2020-01-07T18:28:51 | 232,418,366 | 1 | 0 |
NOASSERTION
| 2020-01-07T21:12:28 | 2020-01-07T21:12:27 | null |
UTF-8
|
Python
| false | false | 3,003 |
py
|
#!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from functools import partial
from typing import Any, Callable, List, Mapping, Optional
from dataclasses_json import dataclass_json
from marshmallow import fields as marshmallow_fields
from .datetime_utils import fromisoformat
DATETIME_FIELD = field(
metadata={
"dataclasses_json": {
"encoder": datetime.isoformat,
"decoder": fromisoformat,
"mm_field": marshmallow_fields.DateTime(format="iso"),
}
}
)
def enum_field(enum_type):
def encode_enum(value):
return value.value
def decode_enum(t, value):
return t(value)
return field(
metadata={
"dataclasses_json": {
"encoder": encode_enum,
"decoder": partial(decode_enum, enum_type),
}
}
)
class ServiceEndpointRole(Enum):
CONSUMER = "CONSUMER"
PROVIDER = "PROVIDER"
@dataclass_json
@dataclass
class AddServiceEndpointInput:
id: str
portId: str
role: ServiceEndpointRole = enum_field(ServiceEndpointRole)
@dataclass_json
@dataclass
class AddServiceEndpointMutation:
__QUERY__ = """
mutation AddServiceEndpointMutation($input: AddServiceEndpointInput!) {
addServiceEndpoint(input: $input) {
id
name
externalId
customer {
id
name
externalId
}
endpoints {
id
port {
id
}
role
}
links {
id
}
}
}
"""
@dataclass_json
@dataclass
class AddServiceEndpointMutationData:
@dataclass_json
@dataclass
class Service:
@dataclass_json
@dataclass
class Customer:
id: str
name: str
externalId: Optional[str] = None
@dataclass_json
@dataclass
class ServiceEndpoint:
@dataclass_json
@dataclass
class EquipmentPort:
id: str
id: str
port: EquipmentPort
role: ServiceEndpointRole = enum_field(ServiceEndpointRole)
@dataclass_json
@dataclass
class Link:
id: str
id: str
name: str
endpoints: List[ServiceEndpoint]
links: List[Link]
externalId: Optional[str] = None
customer: Optional[Customer] = None
addServiceEndpoint: Optional[Service] = None
data: Optional[AddServiceEndpointMutationData] = None
errors: Any = None
@classmethod
# fmt: off
def execute(cls, client, input: AddServiceEndpointInput):
# fmt: off
variables = {"input": input}
response_text = client.call(cls.__QUERY__, variables=variables)
return cls.from_json(response_text).data
|
[
"[email protected]"
] | |
28e11970a757421df8a3c2d034a2856bde5b414f
|
93582aa46c835b66a2117bf24178fd80236af89d
|
/setup.py
|
e2eaee39d2b4d1cd674afe84307252167e1f9eba
|
[] |
no_license
|
collective/collective.leadmedia
|
0fbe4e03421fcec6f026a80de80c4af28d2f218e
|
5fb3749861fd21859ae84686dc29f877859de45b
|
refs/heads/master
| 2023-08-24T01:19:19.470625 | 2019-07-23T13:30:53 | 2019-07-23T13:30:53 | 26,549,930 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,061 |
py
|
from setuptools import setup, find_packages
import os
version = '0.1'
setup(name='collective.leadmedia',
version=version,
description="Adds a slideshow to any dexterity folderish type.",
long_description=open("README.rst").read(),
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
],
keywords='Plone',
author='Andre Goncalves',
author_email='[email protected]',
url='https://github.com/collective/collective.leadmedia',
download_url='https://github.com/collective/collective.leadmedia/tarball/0.1',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['collective'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
setup_requires=["PasteScript"],
paster_plugins=["ZopeSkel"],
)
|
[
"[email protected]"
] | |
326168d8de06212813ef98b555650a25305f7aab
|
fff561e0e4f351d85d038cf87569c23280622157
|
/cmsplugin_cascade/generic/cms_plugins.py
|
3eaaf072f99d2b3a564045fc617a550d4bb910eb
|
[
"MIT"
] |
permissive
|
schacki/djangocms-cascade
|
9d3e9176e54c7cca619fdc6917c38b1588bc7c88
|
2809f701a1cfa17a53539fac4d9dadaa5ebe40b7
|
refs/heads/master
| 2021-01-20T22:02:42.959467 | 2015-12-23T19:31:07 | 2015-12-23T19:31:07 | 42,931,185 | 0 | 0 | null | 2015-09-22T12:02:53 | 2015-09-22T12:02:52 | null |
UTF-8
|
Python
| false | false | 1,881 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.forms import widgets
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.fields import PartialFormField
from cmsplugin_cascade.plugin_base import CascadePluginBase
from cmsplugin_cascade.mixins import TransparentMixin
class SimpleWrapperPlugin(TransparentMixin, CascadePluginBase):
name = _("Simple Wrapper")
parent_classes = None
require_parent = False
allow_children = True
alien_child_classes = True
TAG_CHOICES = tuple((cls, _("<{}> – Element").format(cls))
for cls in ('div', 'span', 'section', 'article',)) + (('naked', _("Naked Wrapper")),)
glossary_fields = (
PartialFormField('tag_type',
widgets.Select(choices=TAG_CHOICES),
label=_("HTML element tag"),
help_text=_('Choose a tag type for this HTML element.')
),
)
@classmethod
def get_identifier(cls, instance):
identifier = super(SimpleWrapperPlugin, cls).get_identifier(instance)
tag_name = dict(cls.TAG_CHOICES).get(instance.glossary.get('tag_type'))
if tag_name:
return format_html('{0}{1}', identifier, tag_name)
return identifier
def get_render_template(self, context, instance, placeholder):
if instance.glossary.get('tag_type') == 'naked':
return 'cascade/generic/naked.html'
return 'cascade/generic/wrapper.html'
plugin_pool.register_plugin(SimpleWrapperPlugin)
class HorizontalRulePlugin(CascadePluginBase):
name = _("Horizontal Rule")
parent_classes = None
allow_children = False
tag_type = 'hr'
render_template = 'cascade/generic/single.html'
glossary_fields = ()
plugin_pool.register_plugin(HorizontalRulePlugin)
|
[
"[email protected]"
] | |
d1961e74a2e79af96908d797e62f8c02b98f3feb
|
6e68ef0a53ce48da79b4906d85fc9785deee4ca5
|
/Reverse/urls.py
|
393afb2306b734c2dd1c0ad59846b0a9bf76a76c
|
[] |
no_license
|
shubhamkharose/CODEDAEMON
|
e3ed8050b5c43ec146c6d253d06121fc37cdb2d4
|
6df7af35c51f5f54b2e2167e3d64d163c9a688f9
|
refs/heads/master
| 2021-04-06T00:58:01.515828 | 2018-03-15T11:04:31 | 2018-03-15T11:04:31 | 125,353,062 | 1 | 4 | null | 2019-10-28T04:03:58 | 2018-03-15T10:48:53 |
JavaScript
|
UTF-8
|
Python
| false | false | 930 |
py
|
"""website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from . import views
app_name = 'Reverse'
'''
app_name is added bcoz name of
'''
urlpatterns = [
url (r'^check/(?P<problem_name>[0-9A-Za-z_]+)/$',views.check,name='check'),
url (r'^(?P<problem_name>[0-9A-Za-z_]+)/$',views.index,name='index'),
]
|
[
"[email protected]"
] | |
67175736189e77eb4d95c43ea91bc66748416e04
|
8a55b9000920b75f937073c043249090c13b04b1
|
/mlcomp/utils/config.py
|
b036f3030ec955ff17b4b4b841ebe710cec54587
|
[
"MIT"
] |
permissive
|
jingmouren/mlcomp
|
209f43296325387447549d1d206ffaeab5739d8e
|
3fd251429be3892903ab6b3361bcd69c6ea9eeff
|
refs/heads/master
| 2020-07-10T04:31:26.928425 | 2019-08-22T10:07:07 | 2019-08-22T10:07:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,573 |
py
|
from collections import defaultdict
from typing import List
import os
import json
import albumentations as A
from mlcomp import DATA_FOLDER
from mlcomp.utils.io import yaml_load
from mlcomp.utils.misc import dict_flatten, dict_unflatten
class Config(dict):
@property
def data_folder(self):
return os.path.join(DATA_FOLDER, self['info']['project'])
@staticmethod
def from_json(config: str):
return Config(json.loads(config))
@staticmethod
def from_yaml(config: str):
return Config(yaml_load(config))
def merge_dicts_smart(target: dict, source: dict, sep='/'):
target_flatten = dict_flatten(target)
mapping = defaultdict(list)
for k, v in target_flatten.items():
parts = k.split(sep)
for i in range(len(parts) - 1, -1, -1):
key = sep.join(parts[i:])
mapping[key].append(k)
for k, v in source.items():
assert len(mapping[k]) == 1, f'ambiguous mapping for {k}'
key = mapping[k][0]
target_flatten[key] = v
return dict_unflatten(target_flatten)
def parse_albu(configs: List[dict]):
res = []
for config in configs:
assert 'name' in config, f'name is required in {config}'
config = config.copy()
name = config.pop('name')
if name == 'Compose':
items = config.pop('items')
aug = A.Compose(parse_albu(items), **config)
else:
aug = getattr(A, name)(**config)
res.append(aug)
return res
__all__ = ['Config', 'merge_dicts_smart', 'parse_albu']
|
[
"[email protected]"
] | |
b75bd97af0d87c71caf404ca4aed646d76e18dca
|
2ef27655cd1deb9de4074249e559269abd334fa1
|
/6 kyu/Decipher Student Messages.py
|
cbe021ef697c6a0afe2e18953a1c584352271249
|
[] |
no_license
|
sieczkah/Codewars_KATA
|
c7606b9a88693e2550af0ef55808f34c00e77b73
|
68d5d4a133a015e49bcdbff29ee45e3baefcd652
|
refs/heads/main
| 2023-05-06T03:59:01.403765 | 2021-05-24T19:36:34 | 2021-05-24T19:36:34 | 334,698,441 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 354 |
py
|
"""https://www.codewars.com/kata/5a1a144f8ba914bbe800003f/train/python"""
def decipher_message(message):
lng = int(len(message) ** 0.5) # the coding square is always perfect so we need to know the lenght
words = [message[i::lng] for i in range(lng)] # in 5x5 we take every 5th letter in 6x6 we take every 6th...
return ''.join(words)
|
[
"[email protected]"
] | |
959ee0746b95977a7b889b6b12e476719844568f
|
7516dfcd3d2e012d98fa3aec45aafe0e2c64ffe1
|
/py/utest/utest_fsoci.py
|
d3c0f7ac8809b0c7282c29600f364a91671f08a5
|
[] |
no_license
|
ReiMatsuzaki/naewdy2
|
64e1c06a7eca228811c83e49eed57c9502ba1c2e
|
10f0110417b6d2699a688c64cdf39df0ef6d06c2
|
refs/heads/master
| 2021-03-16T10:12:02.856923 | 2018-03-15T03:30:00 | 2018-03-15T03:30:00 | 115,374,229 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 910 |
py
|
import unittest
from numpy import sqrt
from naewdy2.fsoci import *
class TestFsoci(unittest.TestCase):
def test_sign_ai(self):
self.assertAlmostEqual(0, sign_ai([1,2,3], 4))
self.assertAlmostEqual(1, sign_ai([1,2,3], 3))
self.assertAlmostEqual(-1, sign_ai([1,2,3], 2))
self.assertAlmostEqual(1, sign_ai([1,2,3], 1))
def test_aiaj(self):
self.assertAlmostEqual(1, aiaj([1,2,3], 1, 1, [1,2,3]))
self.assertAlmostEqual(0, aiaj([1,2,3], 4, 1, [1,2,3]))
self.assertAlmostEqual(1, aiaj([1,2,4], 4, 3, [1,2,3]))
self.assertAlmostEqual(-1, aiaj([1,3,4], 4, 2, [1,2,3]))
def test_eij(self):
self.assertAlmostEqual(sqrt(2.0),
eij([1,2,3], [1,2,3],
1, 1,
[1,2,3], [1,2,3]))
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
c6887a8ffe4aaa0df0666ed9ab5b8c601c225868
|
941ace80571b53f53ab4e1f44d7b3ee9300e6a84
|
/chapter02/lxml_example.py
|
3bf1a23c01ccdd4989a11da724357915b61829e3
|
[
"MIT"
] |
permissive
|
qigezai/python-scrap
|
81d3855caba095cab36f204a6b74c55f43cb7f15
|
3a9ad37a94008a8071b84e64d90c46f59580cca0
|
refs/heads/master
| 2021-10-10T06:26:18.023662 | 2019-01-07T14:46:19 | 2019-01-07T14:46:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 443 |
py
|
#!/usr/bin/python2.7
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2019/1/5 下午4:50
import urllib2
import lxml.html
def scrape(html):
tree = lxml.html.fromstring(html)
td = tree.cssselect('tr#places_neighbours__row > td.w2p_fw')[0]
area = td.text_content()
return area
if __name__ == '__main__':
html = urllib2.urlopen('http://example.webscraping.com/view/United-Kingdom-239').read()
print scrape(html)
|
[
"[email protected]"
] | |
10c3a4d5e3d2f35da492858f8236fd8081029116
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/R/robertionita/bookyfy.py
|
9069f400e7f3e52096364d8732181bcdb8bb1ad9
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,311 |
py
|
# bestsellers from the kindle book store
import scraperwiki
import lxml.html
import time
import re
for x in range(1,6):
html = scraperwiki.scrape("http://www.amazon.com/Best-Sellers-Kindle-Store-eBooks/zgbs/digital-text/154606011/ref=zg_bs_154606011_pg_" + str(x) +
"?_encoding=UTF8&pg=" + str(x))
root = lxml.html.fromstring(html)
pos = 0
for el in root.cssselect("div.zg_itemImmersion"):
title = el.cssselect("div.zg_title a")[0].text_content()
link = el.cssselect("div.zg_title a")[0].attrib['href'].rstrip('\n') # Strip newline characters, funky shit happens if you don't
#rank = el.cssselect("span.zg_rankNumber")[0].text_content()
price = el.cssselect("strong.price")[0].text_content()
#release = el.cssselect("div.zg_releaseDate")[0].text_content()
author = el.cssselect("div.zg_byline")[0].text_content()
days_in_list = el.cssselect("td.zg_daysInList")[0].text_content()
pos += 1
booklink = scraperwiki.scrape(link)
bookpage = lxml.html.fromstring(booklink)
def get_rank(bookpage):
## For each book detail page, select the body element for scraping wizardy
for el in bookpage.cssselect("body"):
## Scraping rank
rank = el.cssselect("li#SalesRank b")[0].tail
## Extract rank number from book page using regex
re1='.*?' # Non-greedy match on filler
re2='(\\d+)' # Integer Number 1
rg = re.compile(re1+re2,re.IGNORECASE|re.DOTALL)
m = rg.search(rank)
if m:
rank=m.group(1)
#print "("+int1+")"+"\n"
print "Rank of book:"
print rank
#print lxml.html.tostring(rank)
return rank
rank = get_rank(bookpage)
print rank
record = {"Title" : title,
"Author" : author,
"Link" : link,
"Ranking" : get_rank(bookpage),
"Price" : price,
"sdate" : time.strftime( "%Y-%m-%d" )
}
scraperwiki.sqlite.save(unique_keys=["sdate"], data=record)
|
[
"[email protected]"
] | |
dbddf5f34bf33ff7cb4facd928b2c338fa2e36bc
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/galex_j20204+0704/sdB_GALEX_J20204+0704_lc.py
|
9e2b035b6d7b472d8473d414942cee17af805004
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 355 |
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[305.1135,7.070683], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_GALEX_J20204+0704 /sdB_GALEX_J20204+0704_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
808ee195b759a16cb41071c38fd23df333d355a7
|
c25b4125b76654452fc2d5cc2f0f7a47643df177
|
/setup.py
|
bfee8ef032e3226e132a395cb98d6a4c1d1398ae
|
[
"MIT"
] |
permissive
|
dfjsdkfj/grparks
|
416b7fdd68a533573c5f4bb53dd7bf748a80c221
|
365717804fafb27c6e3d65322b6fd6b2a9315aa7
|
refs/heads/master
| 2020-12-24T09:02:01.982187 | 2016-02-02T20:40:06 | 2016-02-02T20:40:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 989 |
py
|
#!/usr/bin/env python
"""
Setup script for ParkFinder.
"""
import setuptools
from parks import __project__, __version__
import os
if os.path.exists('README.rst'):
README = open('README.rst').read()
else:
README = "" # a placeholder, readme is generated on release
CHANGES = open('CHANGES.md').read()
setuptools.setup(
name=__project__,
version=__version__,
description="Find and validate park data on OpenStreetMap.",
url='https://github.com/friendlycode/gr-parks',
author='Jace Browning',
author_email='[email protected]',
packages=setuptools.find_packages(),
entry_points={'console_scripts': []},
long_description=(README + '\n' + CHANGES),
license='MIT',
classifiers=[
'Development Status :: 1 - Planning',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.4',
],
install_requires=open('requirements.txt').readlines(),
)
|
[
"[email protected]"
] | |
ba2090eb36670814f7650ef6ffa2e6fb27f37fb5
|
692654b45228d813c8dc4c9ade0a6836cd2e7f17
|
/other_tools/check_token.py
|
2904c24fc086dd871cd4ade0faf53c78feebc2da
|
[] |
no_license
|
sunary/nlp
|
dd67dce0a2001670efe0e1dc5f5ef7b014845982
|
a9fa796118d51dd80cc9525d50247632caa00b7f
|
refs/heads/master
| 2021-01-22T10:02:44.975681 | 2017-05-20T08:43:23 | 2017-05-20T08:43:23 | 43,935,102 | 3 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,720 |
py
|
__author__ = 'sunary'
class CheckToken():
def __init__(self):
self.checker_token = []
self.black_token = []
def set_checker(self, checker):
self.checker_token = checker
def add_token(self, token):
'''
add token to the sorted list of token
Args:
token: the token need to be added
'''
if self.black_token:
position = self._find(token)
if token != self.black_token[position]:
self.black_token[position + 1:position + 1] = [token]
else:
self.black_token.append(token)
pass
def _find(self, token):
if not token:
return 0
left_position = 0
right_position = len(self.black_token) - 1
mid_position= (left_position + right_position)/2
mid_value = self.black_token[mid_position]
while left_position <= right_position:
if token < mid_value:
right_position = mid_position - 1
else:
left_position = mid_position + 1
mid_position = (left_position + right_position)/2
mid_value = self.black_token[mid_position]
return left_position - 1
def check_token(self):
'''
check any token in the sorted list of tokens is in the list
Returns:
bool: True if any token is in the list
Examples:
>>> set_checker([1, 2, 3, 4, 5, 6])
>>> add_token([2, 3])
>>> check_token()
True
>>> add_token([3, 4, 6])
False
'''
for i in range(len(self.checker_token)):
len_token = 1
while True:
list_token = self.checker_token[i: i + len_token]
position = self._find(list_token) + 1
if self.black_token[position - 1] == list_token:
del self.black_token[position - 1]
if position >= len(self.black_token) or len_token > len(self.black_token[position]) or len_token > len(list_token) or\
self.black_token[position][len_token - 1] != list_token[len_token - 1]:
break
len_token += 1
return False
if __name__ == '__main__':
check_token = CheckToken()
check_token.set_checker([1, 2, 3, 2, 2, 4, 45, 46, 4, 45, 52, 1, 21, 4, 5, 3, 4, 5, 1, 2])
check_token.add_token([1, 2])
check_token.add_token([5, 2])
check_token.add_token([3, 4, 1])
check_token.add_token([3, 4])
check_token.add_token([2, 2])
print check_token.black_token
check_token.check_token()
print check_token.black_token
|
[
"[email protected]"
] | |
cc33910210b5a0f0c332798673e332c4b8cb7eb7
|
f8aa7306eeea9d2eafc400392acbdff931306e57
|
/tests/test_cli.py
|
abf9c2f77d6c6a7a256664301c1113bc18566435
|
[
"Apache-2.0"
] |
permissive
|
b-jazz/warehouse
|
929d1a0e7b4de3fd0596ff8334bda31ab5856bdc
|
8c5414d709c6fd04c1b013ded680057a7def0833
|
refs/heads/master
| 2020-12-26T00:34:54.053900 | 2014-03-08T20:30:25 | 2014-03-08T20:30:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,425 |
py
|
# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import pretend
import werkzeug.serving
from warehouse.cli import ServeCommand
from warehouse.serving import WSGIRequestHandler
def test_serve(monkeypatch):
run_simple = pretend.call_recorder(
lambda *a, **kw: None,
)
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple)
host, port, app, use_reloader, use_debugger = (
pretend.stub() for x in range(5)
)
ServeCommand()(
app, host, port,
reloader=use_reloader,
debugger=use_debugger,
)
assert run_simple.calls == [
pretend.call(
host, port, app,
use_reloader=use_reloader,
use_debugger=use_debugger,
request_handler=WSGIRequestHandler,
),
]
|
[
"[email protected]"
] | |
93250100f4dea25b292e8471b70ae83b71cce42f
|
e582d60b7996faf7b87c6d857613e63581d415b9
|
/elliot/recommender/visual_recommenders/VNPR/visual_neural_personalized_ranking_model.py
|
858a318f1ec3594cc6a9eef6e489659da71b7b15
|
[] |
no_license
|
Abdel57Grota/Reenvisioning-the-comparison-between-Neural-Collaborative-Filtering-and-Matrix-Factorization
|
d6e51c32094550789673846acdf9891557b790c1
|
2a2b0148e881cf8ba45c48ad9d42f52421585284
|
refs/heads/main
| 2023-09-03T09:47:41.894117 | 2021-11-09T09:17:35 | 2021-11-09T09:17:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,024 |
py
|
"""
Module description:
"""
__version__ = '0.1'
__author__ = 'Vito Walter Anelli, Claudio Pomo, Daniele Malitesta, Felice Antonio Merra'
__email__ = '[email protected], [email protected], [email protected], [email protected]'
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.random.set_seed(0)
class VNPRModel(keras.Model):
def __init__(self,
num_users,
num_items,
embed_mf_size, l_w, mlp_hidden_size, dropout, learning_rate=0.01,
emb_image=None,
name="VNPR",
**kwargs):
super().__init__(name=name, **kwargs)
tf.random.set_seed(42)
self.num_users = num_users
self.num_items = num_items
self.embed_mf_size = embed_mf_size
self.l_w = l_w
self.mlp_hidden_size = mlp_hidden_size
self.dropout = dropout
self.initializer = tf.initializers.GlorotUniform()
self.user_mf_embedding = keras.layers.Embedding(input_dim=self.num_users, output_dim=self.embed_mf_size,
embeddings_initializer=self.initializer, name='U_MF',
dtype=tf.float32)
self.item_mf_embedding_1 = keras.layers.Embedding(input_dim=self.num_items, output_dim=self.embed_mf_size,
embeddings_initializer=self.initializer, name='I_MF_1',
dtype=tf.float32)
self.item_mf_embedding_2 = keras.layers.Embedding(input_dim=self.num_items, output_dim=self.embed_mf_size,
embeddings_initializer=self.initializer, name='I_MF_2',
dtype=tf.float32)
self.emb_image = emb_image
self.F = tf.Variable(
self.emb_image, dtype=tf.float32, trainable=False)
self.mlp_layers_1 = keras.Sequential()
for units in mlp_hidden_size:
# We can have a deeper MLP. In the paper is directly to 1
self.mlp_layers_1.add(keras.layers.Dropout(dropout))
self.mlp_layers_1.add(keras.layers.Dense(units, activation='relu'))
self.mlp_layers_2 = keras.Sequential()
for units in mlp_hidden_size:
# We can have a deeper MLP. In the paper is directly to 1
self.mlp_layers_2.add(keras.layers.Dropout(dropout))
self.mlp_layers_2.add(keras.layers.Dense(units, activation='relu'))
self.optimizer = tf.optimizers.Adam(learning_rate)
@tf.function
def call(self, inputs, training=None, mask=None):
user, item1, item2 = inputs
user_mf_e = self.user_mf_embedding(user)
item_mf_e_1 = self.item_mf_embedding_1(item1)
item_mf_e_2 = self.item_mf_embedding_2(item2)
feature_e_1 = tf.nn.embedding_lookup(self.F, item1)
feature_e_2 = tf.nn.embedding_lookup(self.F, item2)
embedding_input_1 = tf.concat([user_mf_e * item_mf_e_1, feature_e_1], axis=2) # [batch_size, embedding_size]
mlp_output_1 = self.mlp_layers_1(embedding_input_1) # [batch_size, 1]
embedding_input_2 = tf.concat([user_mf_e * item_mf_e_2, feature_e_2], axis=2)
mlp_output_2 = self.mlp_layers_2(embedding_input_2) # [batch_size, 1]
return tf.squeeze(mlp_output_1), tf.squeeze(mlp_output_2), user_mf_e, item_mf_e_1, item_mf_e_2
@tf.function
def train_step(self, batch):
with tf.GradientTape() as tape:
user, pos, neg = batch
# Clean Inference
mlp_output_1, mlp_output_2, user_mf_e, item_mf_e_1, item_mf_e_2 = self.call(inputs=(user, pos, neg),
training=True)
difference = tf.clip_by_value(mlp_output_1 - mlp_output_2, -80.0, 1e8)
loss = tf.reduce_sum(tf.nn.softplus(-difference))
# Regularization Component
reg_loss = self.l_w * tf.reduce_sum([tf.nn.l2_loss(user_mf_e),
tf.nn.l2_loss(item_mf_e_1),
tf.nn.l2_loss(item_mf_e_2)])
# Loss to be optimized
loss += reg_loss
grads = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.trainable_variables))
return loss
@tf.function
def predict(self, inputs, training=False, **kwargs):
"""
Get full predictions on the whole users/items matrix.
Returns:
The matrix of predicted values.
"""
u, i = inputs
output_1, output_2, _, _, _ = self.call(inputs=(u, i, i), training=training)
return (output_1 + output_2) * 0.5
@tf.function
def get_recs(self, inputs, training=False, **kwargs):
"""
Get full predictions on the whole users/items matrix.
Returns:
The matrix of predicted values.
"""
user, item = inputs
user_mf_e = self.user_mf_embedding(user)
item_mf_e_1 = self.item_mf_embedding_1(item)
item_mf_e_2 = self.item_mf_embedding_2(item)
feature_e = tf.nn.embedding_lookup(self.F, item)
mf_output_1 = tf.concat([user_mf_e * item_mf_e_1, feature_e], axis=2) # [batch_size, embedding_size]
mf_output_2 = tf.concat([user_mf_e * item_mf_e_2, feature_e], axis=2) # [batch_size, embedding_size]
mlp_output_1 = self.mlp_layers_1(mf_output_1) # [batch_size, 1]
mlp_output_2 = self.mlp_layers_2(mf_output_2) # [batch_size, 1]
return tf.squeeze((mlp_output_1+mlp_output_2)/2)
@tf.function
def get_top_k(self, preds, train_mask, k=100):
return tf.nn.top_k(tf.where(train_mask, preds, -np.inf), k=k, sorted=True)
|
[
"[email protected]"
] | |
a0e4d0fc0edadaf6b668bd6570f6c2ba34a2fc9e
|
a09740e643d6277ada23c82d8e87853a1cd1a9e5
|
/Z_ALL_FILE/Py/omdf5.py
|
b033ff3d43a5953248d534cd87fd3b5182354496
|
[
"Apache-2.0"
] |
permissive
|
FuckBrains/omEngin
|
c5fb011887c8b272f9951df3880a879456f202e8
|
b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195
|
refs/heads/main
| 2023-03-20T18:27:53.409976 | 2021-03-14T15:50:11 | 2021-03-14T15:50:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 374 |
py
|
import pandas as pd
dates=['April-10', 'April-11', 'April-12', 'April-13','April-14','April-16']
income1=[10,20,10,15,10,12]
income2=[20,30,10,5,40,13]
df=pd.DataFrame({"Date":dates,
"Income_1":income1,
"Income_2":income2})
print(df.apply(lambda row: "Total income in "+ row["Date"]+ " is:"+str(row["Income_1"]+row["Income_2"]),axis=1))
|
[
"[email protected]"
] | |
2068d1710140295cd665f7971b3655a7f2367e15
|
f0592d39eaf5f8bcbe46c4b16f6fa631be48887f
|
/tests/contrib/autoguide/test_hessian.py
|
f26a124db03826f7d6b1f111d1e4ca602e1d5ab1
|
[
"MIT"
] |
permissive
|
wsgharvey/pyro
|
0bfc762a20c4bcbbe30e61adbcc2c33e32effdb5
|
5c3ef54050d9ad566e5965174d4ad51bd37e55dd
|
refs/heads/dev
| 2021-05-06T18:57:58.458691 | 2018-10-10T01:48:52 | 2018-10-10T01:48:52 | 111,995,379 | 0 | 0 | null | 2017-11-25T10:33:12 | 2017-11-25T10:33:12 | null |
UTF-8
|
Python
| false | false | 827 |
py
|
from __future__ import absolute_import, division, print_function
import torch
import pyro.distributions as dist
from pyro.contrib.autoguide import _hessian
from tests.common import assert_equal
def test_mvn():
tmp = torch.randn(3, 10)
cov = torch.matmul(tmp, tmp.t())
mvn = dist.MultivariateNormal(cov.new_zeros(3), cov)
x = torch.randn(3, requires_grad=True)
y = mvn.log_prob(x)
assert_equal(_hessian(y, x), -mvn.precision_matrix)
def test_multi_variables():
x = torch.randn(3, requires_grad=True)
z = torch.randn(3, requires_grad=True)
y = (x ** 2 * z + z ** 3).sum()
H = _hessian(y, (x, z))
Hxx = (2 * z).diag()
Hxz = (2 * x).diag()
Hzz = (6 * z).diag()
target_H = torch.cat([torch.cat([Hxx, Hxz]), torch.cat([Hxz, Hzz])], dim=1)
assert_equal(H, target_H)
|
[
"[email protected]"
] | |
81fb67bfbbafced31af6e9a8ec85def9ce72c428
|
4b8b0be0588f9e5249729f165b72a6b38324837d
|
/glycresoft_ms2_classification/prediction_tools/__init__.py
|
52e2cd4a029490a37c2b53ed85f7619bf145d4ca
|
[] |
no_license
|
GlycReSoft2/embed_tandem_ms_classifier
|
5e2f569f2b74f2f14f1c1c0cede32de99c150890
|
0495f2234562a9c5dd02d545800c077df2305387
|
refs/heads/master
| 2020-06-02T09:32:55.457664 | 2015-06-20T21:30:19 | 2015-06-20T21:30:19 | 22,615,207 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 461 |
py
|
try:
from data_processing import prepare_model_file
from data_processing import save_model_file
from data_processing import call_by_coverage
from data_processing import determine_ambiguity
from data_processing import PredictionResults
from data_processing import convert_csv_to_nested
from classifier_definitions import *
except ImportError:
print("Unable to import parts of prediction_tools")
from .constants import constants
|
[
"[email protected]"
] | |
49a442b058c1eb081db28a321b0d5020c9dec449
|
a622e8b295d799b7b9125e2b15243b8bdae1dc09
|
/1908/190826/미로문제/미로문제.py
|
58c26cc2d9d42d035507cc03d22855d8235c45a4
|
[] |
no_license
|
otterji/algorithms
|
3a36c04bacc22c46f1ee220b3b129cda876db797
|
ebd0ade0cd1de8e489d015aa0b2833afeab3898e
|
refs/heads/master
| 2020-11-26T02:48:13.299848 | 2020-04-11T12:09:41 | 2020-04-11T12:09:41 | 228,942,526 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,465 |
py
|
# 시작점과 끝점의 위치가 항상 1행은 아님. 중간일수도 있음. 제일 먼저 시작점 위치 찾기
# DFS로 풀어도됨
# 백트래킹으로 풀어도됨
# 도착할 수 있는지 여부 찾기
import sys
sys.stdin = open("input.txt", "r")
T = int(input())
for tc in range(1, T+1):
N = int(input())
miro = [list(map(int,input())) for _ in range(N)]
for i in range(N):
for j in range(N):
if miro[i][j] == 2:
start = (i, j)
break
stack = []
visited = []
dx = [0, 0, -1, 1] # 좌 우
dy = [-1, 1, 0, 0] # 상 하
def DFS(miro, s, g):
x, y = s, g
stack.append((x, y))
while stack:
x = stack[-1][0]
y = stack[-1][1]
for i in range(4):
if 0 <= dx[i] + x <= N-1 and 0 <= dy[i] + y <= N-1: # 범위 안에 있으면
if miro[dx[i] + x][dy[i] + y] == 3:
return 1
if miro[dx[i] + x][dy[i] + y] == 0 and (dx[i] + x, dy[i] + y) not in visited:
x = dx[i] + x
y = dy[i] + y
stack.append((x, y))
visited.append((x, y))
break # 얘를 안하니까 값이 안나오네
else:
stack.pop()
return 0
result = DFS(miro, start[0], start[1])
print('#{} {}'.format(tc, result))
|
[
"[email protected]"
] | |
9f811d0e5fca8f23ad4e3fe6e2188485c4722a37
|
7ee8a3bc4fbe8e094a4acf0bc7dd58899a5f4d3e
|
/src/djnext_example/artist/migrations/0001_initial.py
|
47c807a90b452b75770bc42c2fff12d3e484a197
|
[] |
no_license
|
yourlabs/djnext
|
95798acf66fb3b507ea701cce31e40f1bcdf2b1d
|
76516e2d76495300385223265878b5d30641c965
|
refs/heads/master
| 2023-02-09T06:40:07.051724 | 2018-05-13T21:37:24 | 2018-05-13T21:37:24 | 133,148,115 | 36 | 5 | null | 2023-01-26T03:23:41 | 2018-05-12T13:20:29 |
Python
|
UTF-8
|
Python
| false | false | 561 |
py
|
# Generated by Django 2.0.4 on 2018-05-12 17:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
options={
'ordering': ['name'],
},
),
]
|
[
"[email protected]"
] | |
34b64673ff08d394dce6f7563327c1fdc93549b7
|
256746f29f9995accd4fee35b9b8981264ca2e37
|
/Ch06/2017-9-25.py
|
7d2a5f71b389e7ec916d60249be31ee662dff0f2
|
[] |
no_license
|
Vagacoder/Python_for_everyone
|
adadd55561b2200d461afbc1752157ad7326698e
|
b2a1d1dcbc3cce5499ecc68447e1a04a8e59dc66
|
refs/heads/master
| 2021-06-22T00:26:02.169461 | 2019-05-25T16:06:04 | 2019-05-25T16:06:04 | 114,508,951 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 671 |
py
|
##Ch06 R6.5
from random import *
count = 0
value = []
while count<10:
randomNumber = randint(1,10)
while randomNumber in value:
randomNumber = randint(1, 10)
value.append(randomNumber)
count += 1
print (value)
##Ch06 R6.6
from random import *
count = 0
value = []
while count<10:
randomNumber = randint(1,100)
while randomNumber in value:
randomNumber = randint(1, 100)
value.append(randomNumber)
count += 1
print (value)
max = value[0]
min = value[0]
for i in value:
if i > max:
max = i
if i < min:
min = i
print("Max is: %d" %max)
print("Min is: %d" %min)
|
[
"[email protected]"
] | |
57e6299d4c59ae36b3a95d328a5793886a62834a
|
d6f7ac9541ec803db6f3b528030f6dd94bf2c1fe
|
/bootcamp_module09/core/tests/test_student_59.py
|
9836fc05f38771dec8e001f19bb7483049077493
|
[
"BSD-3-Clause"
] |
permissive
|
poloxu/bisb-bootcamp-2021-module09
|
c6182abf2b04621e79cec21102da23aabd4fb307
|
46c146e2ffdeebf3b95abcd8fe382f982ce67cb6
|
refs/heads/master
| 2023-07-29T23:22:22.874853 | 2021-09-17T16:59:55 | 2021-09-17T16:59:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 757 |
py
|
from bootcamp_module09.core.student_59 import count_substring # noqa
def test_count_substring_single():
test_string = "CGCTAGCGT"
test_substring = "TAG"
expected_count = 1
observed_count = count_substring(test_string, test_substring)
assert expected_count == observed_count
def test_count_substring_repeated():
test_string = "AGCTAGCAGT"
test_substring = "AGC"
expected_count = 2
observed_count = count_substring(test_string, test_substring)
assert expected_count == observed_count
def test_count_substring_none():
test_string = "AGTCCCCTAGA"
test_substring = "AAA"
expected_count = 0
observed_count = count_substring(test_string, test_substring)
assert expected_count == observed_count
|
[
"[email protected]"
] | |
4a1d6bf2ad0501abe44630ea764ba4fb0f30dd56
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/cirq_new/cirq_program/startCirq_pragma227.py
|
2ebfa1038e0dd1e6ab87822f17605f3c0fadb833
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,536 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=14
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.X.on(input_qubit[1])) # number=10
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=11
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=12
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=6
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma227.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
e7dd7163634a0bbdb9a9cad543458590b2bb5119
|
955f9d3fb34af54de2f046d17bbac11c1474819e
|
/abc174/c.py
|
8d677e68c2c1a02bfe30bd9fe642311a51a3f835
|
[] |
no_license
|
shimewtr/AtCoderPracticePython
|
5bb4c28119fced2d111bd1810e0e290f25b6a191
|
f3c22ec1f7a36a27848070c5c6ca4e1717b04ac6
|
refs/heads/master
| 2023-01-12T17:28:44.770138 | 2020-11-19T22:50:22 | 2020-11-19T22:50:22 | 204,830,292 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,279 |
py
|
import sys
from io import StringIO
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
def resolve():
k = int(input())
if k % 2 == 0:
print(-1)
else:
ans = 1
check = True
while (check):
tmp = 0
for i in range(ans):
tmp += 7 * 10 ** i
if tmp % k == 0:
print(ans)
break
ans += 1
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_input_1(self):
print("test_input_1")
input = """101"""
output = """4"""
self.assertIO(input, output)
def test_input_2(self):
print("test_input_2")
input = """2"""
output = """-1"""
self.assertIO(input, output)
def test_input_3(self):
print("test_input_3")
input = """999983"""
output = """999982"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
1ea03400ca87f6315d33824b3426b6fb0d74d1c5
|
4589a9ea76e458793ad78059839b81d365f433de
|
/athena_automation/athenataf/tests/configuration/system/admin/delete_test_scenarios/DeleteTestScenarios.py
|
8914ef5a124a3da9001bacaf87ea36bba1885e95
|
[] |
no_license
|
cash2one/reautomation_handoff
|
5e2c4c432d8f658d1b57211782744bd0b56c52f6
|
7ef83572d659db35036189eb394f99de1369db5a
|
refs/heads/master
| 2020-05-22T17:56:33.214080 | 2015-07-13T07:51:18 | 2015-07-13T07:51:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,105 |
py
|
import logging
logger = logging.getLogger('athenataf')
from athenataf.lib.functionality.test.ConfigurationTest import ConfigurationTest
class DeleteTestScenarios(ConfigurationTest):
'''
Test class for System Admin DeleteTestScenarios.
'''
def test_ath_11329_delete_view_only_guest_registration_only_non_default_values(self):
conf = self.config.config_vars
self.take_s1_snapshot()
system_page = self.LeftPanel.go_to_system_page()
system_page.go_to_admin_tab()
system_page.view_only_non_default_values(conf.viewonly,conf.viewonly,conf.viewonly)
system_page._save_settings()
system_page.go_to_admin_tab()
system_page.guest_registration_only_non_default_values(conf.guest_username,conf.guest_password,conf.guest_password)
system_page._save_settings()
self.take_s2_snapshot()
system_page.go_to_admin_tab()
system_page.restore_view_only_default_values()
system_page.go_to_admin_tab()
system_page.restore_guest_registration_only_default_values()
self.take_s3_snapshot()
self.assert_s1_s2_diff(0)
self.assert_s1_s3_diff()
self.clear()
|
[
"[email protected]"
] | |
8ab3069b9a328363bbbfd0ad67638a4ac549183c
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_116/770.py
|
c1b36eac52d21c7e378886958c50e72ea92b665e
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,196 |
py
|
# Solver for Tic-Tac-Toe-Tomek game
import numpy as np
fin = open('A-large.in')
fout = open('testout_large.txt', 'w')
def CheckWinner(A, player_char, not_player_char):
# Check if X wins
Acopy = A
Acopy = np.where(Acopy=='.', 0, Acopy)
Acopy = np.where(Acopy==not_player_char,0,Acopy)
Acopy = np.where(Acopy=='T',1,Acopy)
Acopy = np.where(Acopy==player_char,1,Acopy)
Acopy = np.array(Acopy, dtype=int)
# print(Acopy)
if max(np.sum(Acopy,0))==4 or max(np.sum(Acopy,1))==4 or np.trace(Acopy)==4 or sum(Acopy[[0,1,2,3], [3,2,1,0]])==4:
return(True)
else:
return(False)
T = int(fin.readline().rstrip('\n'))
for j in range(1,T+1,1):
board = []
line = fin.readline()
while line != '\n' and line != '':
board.append(list(line.strip('\n')))
line = fin.readline()
# CheckWinner(array)
# print(board)
matboard = np.array(board)
if CheckWinner(matboard, 'X', 'O'):
fout.write('Case #%d: X won\n' %j)
elif CheckWinner(matboard, 'O', 'X'):
fout.write('Case #%d: O won\n' %j)
elif np.in1d(['.'], matboard).all():
fout.write('Case #%d: Game has not completed\n' %j)
else:
fout.write('Case #%d: Draw\n' %j)
fin.close()
fout.close()
|
[
"[email protected]"
] | |
6264a0b4aebc98ab2fd8d75d31f9861aece0fde2
|
59de7788673ade984b9c9fbc33664a7cbdba67d3
|
/res/scripts/client/gui/scaleform/daapi/view/meta/fortchoicedivisionwindowmeta.py
|
12eb657baafa06583d6ac8fb7bce9fbd90dcdb1c
|
[] |
no_license
|
webiumsk/WOT-0.9.15-CT
|
3fa24ab37a6c91b7073034afb2f355efa5b7fe36
|
fbd194fbaa6bdece51c7a68fc35bbb5257948341
|
refs/heads/master
| 2020-12-24T21:27:23.175774 | 2016-05-01T13:47:44 | 2016-05-01T13:47:44 | 57,600,180 | 0 | 0 | null | null | null | null |
WINDOWS-1250
|
Python
| false | false | 1,202 |
py
|
# 2016.05.01 15:22:42 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/FortChoiceDivisionWindowMeta.py
from gui.Scaleform.framework.entities.abstract.AbstractWindowView import AbstractWindowView
class FortChoiceDivisionWindowMeta(AbstractWindowView):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends AbstractWindowView
null
"""
def selectedDivision(self, divisionID):
"""
:param divisionID:
:return :
"""
self._printOverrideError('selectedDivision')
def changedDivision(self, divisionID):
"""
:param divisionID:
:return :
"""
self._printOverrideError('changedDivision')
def as_setDataS(self, data):
"""
:param data:
:return :
"""
if self._isDAAPIInited():
return self.flashObject.as_setData(data)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\meta\fortchoicedivisionwindowmeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.05.01 15:22:42 Střední Evropa (letní čas)
|
[
"[email protected]"
] | |
285a2caa90b61ae628ae8b0c2b62c3ae736ac74f
|
aace5cbeeb567b017984898297192ea6b5c5993f
|
/文件操作/csv/03pd按照列写入csv文件.py
|
67d0d5a2673a07fd7481e0836c2853236a6457af
|
[
"MIT"
] |
permissive
|
Litao439420999/Spider
|
4eb27fc332b9a97c9917c236c3653809c2229ac3
|
47d70ec92936b8bea87c641df47ea30e5dde86a1
|
refs/heads/master
| 2023-03-24T19:02:22.857250 | 2021-03-14T02:07:59 | 2021-03-14T02:07:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 813 |
py
|
'''
Description: 参考:https://blog.csdn.net/weixin_43245453/article/details/90054820?utm_medium=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-4.control&depth_1-utm_source=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-4.control
Author: HCQ
Company(School): UCAS
Email: [email protected]
Date: 2021-01-16 21:40:14
LastEditTime: 2021-01-16 21:48:25
FilePath: /Spider/文件操作/csv/03pd按照列写入csv文件.py
'''
import pandas as pd
#a和b的长度必须保持一致,否则报错
a = [x for x in range(5)]
b = [x for x in range(5,10)]
#字典中的key值即为csv中列名
dataframe = pd.DataFrame({'a_name':a,'b_name':b})
#将DataFrame存储为csv,index表示是否显示行名,default=True
dataframe.to_csv(r"03保存test.csv",index=False, sep=',')
|
[
"[email protected]"
] | |
0846ce23d72a96dd3abeb6c06cb588f10a9f6824
|
24dabf63ba445fa4df205b5c9bbe89f9d7230527
|
/transfer_learning/tools/double_iterator.py
|
244733768081f4b153ad922e06ce30643145c6df
|
[] |
no_license
|
marco-willi/hco-experiments
|
e51ea5581eefb4fc3b46fb4337b9f04eb52640fb
|
7f3076b476e3311ed22d2db37c6d075e43d0d61f
|
refs/heads/master
| 2021-01-22T04:09:37.706108 | 2018-01-03T20:44:46 | 2018-01-03T20:44:46 | 92,433,439 | 1 | 0 | null | 2017-08-21T03:49:27 | 2017-05-25T18:40:03 |
Python
|
UTF-8
|
Python
| false | false | 2,987 |
py
|
"""
Double Iterator
- Outer (slower) ImageGenerator that serves large batches of data that just
fit into memory
- Inner (numpy) ImageGenerator that serves smaller batches of data
"""
# import modules
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import Iterator
class DoubleIterator(Iterator):
""" Outer / Inner data generators to optimize image serving
- batch_size: int
the number of images returned by the Iterator
- outer_generator: Iterator that returns images
typically ImageDataGenerator.flow_from_directory()
"""
def __init__(self, outer_generator, batch_size, seed=None,
inner_shuffle=True):
self.outer_generator = outer_generator
self.batch_size = batch_size
self.n_on_stack = 0
self.inner = None
self.n = outer_generator.n
self.seed = seed
self.inner_shuffle = inner_shuffle
def next(self):
""" Get next batch """
if (self.n_on_stack == 0) or (self.inner is None):
# get next batch of outer generator
X_outer, y_outer = self.outer_generator.next()
# calculate stack size for inner generator
self.n_on_stack = (self.outer_generator.batch_size //
self.batch_size)
# Create inner data generator (no data agumentation - this is
# done by the outer generator)
self.inner = ImageDataGenerator().flow(
X_outer, y_outer,
batch_size=self.batch_size,
seed=self.seed, shuffle=self.inner_shuffle)
# get next batch
X_inner, y_inner = self.inner.next()
self.n_on_stack -= 1
# print("N on stack: %s, batches_seen: %s" %
# (self.n_on_stack, self.outer_generator.total_batches_seen))
return X_inner, y_inner
if __name__ == '__main__':
from config.config import cfg_path
path = cfg_path['images'] + 'train/'
datagen_train = ImageDataGenerator(
rescale=1./255,
featurewise_center=False,
featurewise_std_normalization=False,
horizontal_flip=True,
zoom_range=[0.9, 1])
train_generator = datagen_train.flow_from_directory(
path,
target_size=(150, 150),
color_mode='rgb',
batch_size=500,
class_mode='sparse',
seed=123)
train_generator.batch_index
train_generator.total_batches_seen
train_generator.batch_size // 32
31 * 32
tt = DoubleIterator(train_generator, 32)
batch_x, batch_y = tt.next()
batch_x2, batch_y2 = tt.next()
import numpy as np
np.array_equal(batch_x, batch_x2)
batch_x.shape
3200 // 32
import time
for i in range(0, 100):
time_s = time.time()
X, y = tt.next()
time_elapsed = time.time() - time_s
print("Iteration %s took %s s" % (i, time_elapsed))
|
[
"[email protected]"
] | |
f46483143cee2b1cfa802c56d800dd7312457b50
|
14e19bcaaf917924e7bb78e4f7e6b42662ff5164
|
/fancy_month01/day17_fancy/day17_teacher/demo05.py
|
5e119a78d204ea68d697a808609411ce80758693
|
[] |
no_license
|
Lzffancy/Aid_study
|
5b3538443ca0ad1107a83ef237459b035fef70d0
|
4ba5e5045371490d68459edd1f0a94963e0295b1
|
refs/heads/master
| 2023-02-22T19:11:00.867446 | 2021-01-25T13:01:35 | 2021-01-25T13:01:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 467 |
py
|
"""
闭包
三大要素:
有外有内
内使用外
外返回内
字面思想:
封闭内存空间
作用:
外部函数栈帧执行后,不释放.
等待内部函数重复使用
"""
def func01():
a = 100
def func02():
print(a)
return func02 # 返回但没有执行内部函数.
# 调用外部函数,得到内部函数
res = func01()
res()
res()
|
[
"[email protected]"
] | |
511a610b4208faf06340813b7dc036f4cefe122c
|
67971c2c66bce8e9746810592f71a33fcbbeb260
|
/tests/test_database/test_playlist.py
|
cd1653bbcdf1c25933f2071b41dce51c388a761b
|
[
"MIT"
] |
permissive
|
holing/LinDouFm
|
78ade890c974b967ba3102cf93c31dee1bfcde09
|
463618599e2f3111c7fc2dd251940e9c4981b40b
|
refs/heads/master
| 2021-01-17T03:39:53.758021 | 2015-01-18T14:13:36 | 2015-01-18T14:13:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,809 |
py
|
# coding:utf-8
from database.playlist import playlist
from database.music import music_model
from database.channel import channel_model
from tests.test_database.test_music import get_test_music
def test_playlist():
#添加测试频道
channel_name = u"test_channel_name"
channel_uuid = u"mk_test_douban-cid"
channel = channel_model.add_channel(channel_name, channel_uuid)
assert len(playlist.get_music_by_channel(channel, 20)) == 0
#添加测试音乐
music_information = get_test_music()
new_music_list = []
for i in range(20):
music_information[u"cover"].seek(0)
music_information[u"audio"].seek(0)
music_information[u"uuid"] += unicode(i)
music = music_model.add_music(music_information[u"title"], music_information[u"artist"], music_information[u"album"]
, music_information[u"company"], music_information[u"public_time"], music_information[u"kbps"], music_information[u"cover"], music_information[u"audio"], music_information[u"uuid"])
new_music_list.append(music.key)
#往测试频道中添加测试音乐信息
channel_model.update_channel(channel, music_list=new_music_list)
channel = channel_model.get_channel(key=channel.key)[0]
assert len(playlist.get_music_by_channel(channel, 30)) == 20
assert len(playlist.get_music_by_channel(channel, 20)) == 20
assert len(playlist.get_music_by_channel(channel, 10)) == 10
#删除
channel_model.delete_channel(channel)
music_list = music_model.get_music(title=music_information[u"title"])
for music in music_list:
music_model.delete_music(music)
|
[
"root@ubuntu.(none)"
] |
root@ubuntu.(none)
|
63463a703612e5da4d3698590f690f700b1e48e0
|
7f57c12349eb4046c40c48acb35b0f0a51a344f6
|
/2015/RotateList_v0.py
|
cc1c94e88855fbce957c86bc6277c56718a5008b
|
[] |
no_license
|
everbird/leetcode-py
|
0a1135952a93b93c02dcb9766a45e481337f1131
|
b093920748012cddb77258b1900c6c177579bff8
|
refs/heads/master
| 2022-12-13T07:53:31.895212 | 2022-12-10T00:48:39 | 2022-12-10T00:48:39 | 11,116,752 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,120 |
py
|
#!/usr/bin/env python
# encoding: utf-8
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def print_l(head):
if head:
print head.val
if head.next:
print_l(head.next)
class Solution:
# @param {ListNode} head
# @param {integer} k
# @return {ListNode}
def rotateRight(self, head, k):
if not head:
return
if not head.next:
return head
l = 1
tail = head
while tail.next:
tail = tail.next
l += 1
k %= l
if k == 0:
return head
t = head
for i in range(l - k - 1):
t = t.next
new_head = t.next
t.next = None
tail.next = head
return new_head
if __name__ == '__main__':
s = Solution()
head = n1 = ListNode(1)
n2 = ListNode(2)
n3 = ListNode(3)
n4 = ListNode(4)
n5 = ListNode(5)
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
h = s.rotateRight(head, 5)
print_l(h)
|
[
"[email protected]"
] | |
8a7a2e55befff55fa7322db16f944dccb8bddcb3
|
f33b30743110532ddae286ba1b34993e61669ab7
|
/Optimal Division.py
|
171cb2effb649a0cb56f16ae0f104dba31b07f47
|
[] |
no_license
|
c940606/leetcode
|
fe9dcee7a5daa4d52999d5f53253dd6dd33c348b
|
631df2ce6892a6fbb3e435f57e90d85f8200d125
|
refs/heads/master
| 2021-07-10T14:01:26.164966 | 2020-08-16T10:46:16 | 2020-08-16T10:46:16 | 186,588,449 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 430 |
py
|
class Solution(object):
def optimalDivision(self, nums):
"""
:type nums: List[int]
:rtype: str
"""
n = len(nums)
if n == 0:
return
if n == 1:
return nums[0]
if n == 2:
return str(nums[0]) + "/" + str(nums[1])
res = str(nums[0])
res += "/"
res += "(" + str(nums[1])
for i in range(2,n):
res += "/"+str(nums[i])
res += ")"
return res
a = Solution()
print(a.optimalDivision([1000,100,10,2]))
|
[
"[email protected]"
] | |
7b2653c28ca84b62142d0978452bfbd4823f4d88
|
e28fad299c396ff153e5df666443e335a033b657
|
/mms/stories/views.py
|
183a6686c66e73e2b676c20eb9843e75bcd8bf7c
|
[] |
no_license
|
easherma/mms_django
|
387b179ab74bf4447fa7acefa6ac84f0423edb1f
|
1ae30ae8bc30550dce19e288ae43759a8155f8ad
|
refs/heads/master
| 2021-01-10T18:08:01.586356 | 2017-01-12T20:44:09 | 2017-01-12T20:44:09 | 71,917,502 | 0 | 0 | null | 2017-02-20T19:08:29 | 2016-10-25T16:36:14 |
HTML
|
UTF-8
|
Python
| false | false | 2,896 |
py
|
from django.contrib.auth.models import User
from stories.models import Story, Submission, Waypoint
from stories.serializers import StorySerializer, UserSerializer, SubmissionSerializer, WaypointSerializer
from rest_framework import viewsets
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework.decorators import detail_route, list_route
from rest_framework.renderers import JSONRenderer
from django.utils.six import BytesIO
from rest_framework.parsers import JSONParser
import geojson
import json
def waypoint_to_geojson(waypoint, properties):
geometry= waypoint['geom']
#[f.name for f in models.Waypoint._meta.get_fields()]
feature = geojson.Feature(geometry=geometry, properties=properties)
return feature
class StoryViewSet(viewsets.ModelViewSet):
queryset = Story.objects.all()
serializer_class = StorySerializer
@detail_route()
def waypoints(self, request, pk=None):
#serializer = WaypointSerializer
story = self.get_object()
submissions = story.submissions.all()
#waypoints = []
for submission in submissions:
#waypoints = submission.waypoints
features = []
for waypoint in submission.waypoints.values():
geom = geojson.loads(waypoint['geom'])
#should return just the props we need
properties = waypoint
#geom['properties'] = properties
feature = geojson.Feature(geometry=geom, properties=properties)
features.append(feature)
waypoints = geojson.FeatureCollection(features)
return Response(waypoints)
@detail_route()
def users(self, request, pk=None):
story = self.get_object()
pk = self.kwargs['pk']
queryset = User.objects.filter(submission=story.pk)
#get to
return Response(queryset.values())
class WaypointsByStory(viewsets.ModelViewSet):
serializer_class = WaypointSerializer
storyname = 'My First Story'
queryset = Waypoint.objects.filter(submission__story__name='My First Story').select_related('submission')
#these are pretty much useless
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
class SubmissionViewSet(viewsets.ModelViewSet):
queryset = Submission.objects.all()
serializer_class = SubmissionSerializer
class WaypointViewSet(viewsets.ModelViewSet):
queryset = Waypoint.objects.all()
serializer_class = WaypointSerializer
class StoryList(APIView):
renderer_classes = (TemplateHTMLRenderer,)
template_name = 'stories_list.html'
def get(self, request):
queryset = Story.objects.all()
return Response({'stories': queryset})
|
[
"[email protected]"
] | |
d39c7fb78ac2d32f16918615fb0f8dadb4a8b9d1
|
7af9841dfdeb7192cee9f5bc5ae24ebabeeebdcc
|
/project/admin.py
|
06b0a54f6791eeb4a8343c0af355c73e99ad51a5
|
[] |
no_license
|
dimansion/bepy
|
513d1d6b8c6f679ce97f46741b50b73dabf20484
|
dd92999b9fb0d65e9479372718409785a8d26d26
|
refs/heads/master
| 2020-06-28T11:27:02.204255 | 2016-11-14T11:26:32 | 2016-11-14T11:26:32 | 67,694,755 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 560 |
py
|
from django.contrib import admin
from project.models import Page, Content
class ContentInline(admin.TabularInline):
model = Content
prepopulated_fields = {'slug':('name',)}
class PageAdmin(admin.ModelAdmin):
list_display = ('title', 'published_date',)
prepopulated_fields = {'slug':('title',)}
inlines = [ContentInline]
# class ContentAdmin(admin.ModelAdmin):
# list_display = ('name', 'lesson',)
# prepopulated_fields = {'slug':('name',)}
admin.site.register(Page, PageAdmin)
# admin.site.register(Content, ContentAdmin)
|
[
"[email protected]"
] | |
8810e20c0d4928a9c3b0dbf23ef6590ec448b754
|
128d593efd591dc83a3aef2d4bfad39e73ee637e
|
/python_code/complete/no128
|
a8958da736adcb09069e0cf51a44cd9584ed2446
|
[] |
no_license
|
jwan/ProjectEuler
|
93be87d89cc58516d503dd5ed53bdbd706748cda
|
65aec4f87b8899db6bad94a36412a28a4b4527e9
|
refs/heads/master
| 2021-01-17T08:21:46.654529 | 2011-05-02T23:11:35 | 2011-05-02T23:11:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,574 |
#!/usr/bin/env python
# 1 --> (2,3,4,5,6,7)
# [1] 1
# [2,...,7] 6
# [8,...,19] 12
# [20,...,37] 18
# [38,...,61] 24
# f(k) = 3k^2 - 3k + 1
# f(k) = elements before layer k if k > 0
#Layer 0
# 1 -- (1,1) -- (2,1),(2,2),(2,3),(2,4),(2,5),(2,6)
# Layer 1
# 2 -- (2,1) -- (1,1), (2,2),(2,6), (3,1),(3,2),(3,12) C
# 3 -- (2,2) -- (1,1), (2,1),(2,3), (3,2),(3,3),(3,4) C
# 4 -- (2,3) -- (1,1), (2,2),(2,4), (3,4),(3,5),(3,6) C
# 5 -- (2,4) -- (1,1), (2,3),(2,5), (3,6),(3,7),(3,8) C
# 6 -- (2,5) -- (1,1), (2,4),(2,6), (3,8),(3,9),(3,10) C
# 7 -- (2,6) -- (1,1), (2,5),(2,1), (3,10),(3,11),(3,12) C
# Layer 2
# 8 -- (3,1) -- (2,1), (3,2),(3,12),(4,1),(4,2),(4,18) C
# 9 -- (3,2) -- (2,1),(2,2),(3,1),(3,3), (4,2),(4,3)
# 10 -- (3,3) -- (2,2), (3,2),(3,4), (4,3),(4,4),(4,5) C
# 11 -- (3,4) -- (2,2),(2,3),(3,3),(3,5), (4,5),(4,6)
# 12 -- (3,5) -- (2,3), (3,4),(3,6), (4,6),(4,7),(4,8) C
# 13 -- (3,6) -- (2,3),(2,4)
# 14 -- (3,7) -- (2,4)
# 15 -- (3,8) -- (2,4),(2,5)
# 16 -- (3,9) -- (2,5)
# 17 -- (3,10) -- (2,5),(2,6)
# 18 -- (3,11) -- (2,6)
# 19 -- (3,12) -- (2,6),(2,1)
# 20 -- (4,1) -- (3,)(4,)(5,)
# 21 -- (4,2) --(3,1)(3,2)
# 22 -- (4,3) -- (3,2)(3,3)
# 22 -- (4,4) --
# (n, k) is corner if k % (n - 1) == 1
# A corner is adjacent to 1 block of lower class, 2 of same, and 3 of higher
# the 2 of same will always be (n, k - 1 *wrap*), (n, k + 1 *wrap*)
# (n,1) will always be (n-1,1),(n,0),(n,2),(n+1,0),(n+1,1),(n+1,2)
# Both the n-1 and n+1 grouping will start where the previous one left off
# Only the corners and the final non-corner have a chance at 3 primes
# This is because if we are not either, then they are next to 2 consec. #'s,
# which give a diff. of 1, the other two pairs will give differences that differ
# by one, so at most 1 of each can be prime
##############################
# Case1, k neq 1, corner
##############################
# The corner (n, k) is adjacent to
# (n-1, (k-1)/(n-1)*(n-2) + 1), (n,k-1), (n,k+1)--> don't matter if not end piece,
# (n+1, (k-1)/(n-1)*n), (n+1, (k-1)/(n-1)*n + 1), (n+1, (k-1)/(n-1)*n + 2),
# 3*(n - 1)*(n - 2) + 1 + k vs.
# 3*(n - 2)*(n - 3) + 1 + (k - 1)/(n - 1)*(n - 2) + 1,
# 3*(n - 1)*(n - 2) + k,3*(n - 1)*(n - 2) + 2 + k,
# 3*n*(n - 1) + 1 + (k - 1)/(n - 1)*n, 3*n*(n - 1) + 1 + (k - 1)/(n - 1)*n + 1,
# 3*n*(n - 1) + 1 + (k - 1)/(n - 1)*n + 2
# Diffs
# 6*(n - 2) + (k - 1)/(n - 1),
# 1,1,
# 6*(n - 1) + (k - 1)/(n - 1) - 1,
# 6*(n - 1) + (k - 1)/(n - 1),
# 6*(n - 1) + (k - 1)/(n - 1) + 1,
# Only way it can be 3 is if
# c1=6*(n - 2) + (k - 1)/(n - 1),
# c2=6*(n - 1) + (k - 1)/(n - 1) - 1,
# c3=6*(n - 1) + (k - 1)/(n - 1) + 1,
# But if n > 2, c1 prime implies (k-1)/(n-1) == 1,5 mod 6
# implies c2 == 0,4 mod 6, c3 == 0,2 mod 6, so it is never possible
# for n > 2
# For n = 1, 1 works
# For n = 2, of 3,4,5,6,7 none work
##############################
# Case2, k = 1
##############################
# The corner (n, 1) is adjacent to
# (n-1, 1), (n,6*(n-1)), (n,2)--> don't matter if not end piece,
# (n+1, 6*n), (n+1, 1), (n+1, 2),
# 3*(n - 1)*(n - 2) + 2 vs.
# 3*(n - 2)*(n - 3) + 2,
# 3*(n - 1)*(n - 2) + 1 + 6*(n - 1),3*(n - 1)*(n - 2) + 3,
# 3*n*(n - 1) + 1 + 6*n, 3*n*(n - 1) + 2,
# 3*n*(n - 1) + 3
# Diffs
# 6*(n - 2),
# 6*(n - 1) - 1,1
# 6*(2*n - 1) - 1, 6*(n - 1),
# 6*(n - 1) + 1
# c1=6*(n - 1) - 1
# c2=6*(2*n - 1) - 1
# c3=6*(n - 1) + 1
# Start at n = 3 (cases 1 and 2 already done, special cases)
##############################
# Case3
##############################
# The one outlier is the final piece (n, 6*(n - 1))
# When n > 2, this is not 1 mod n - 1, hence not a corner
# This is adjacent to (n,1),(n,6*n-7),(n-1,1),(n-1,6*(n-2)),
# (n+1,6*n),(n+1,6*n-1)
# 3*(n - 1)*(n - 2) + 1 + 6*(n-1) vs.
# 3*(n - 1)*(n - 2) + 1 + 1, 3*(n - 1)*(n - 2) + 6*(n - 1),
# 3*(n - 2)*(n - 3) + 1 + 1, 3*(n - 2)*(n - 3) + 1 + 6*(n-2),
# 3*n*(n - 1) + 1 + 6*n, 3*n*(n - 1) + 6*n
# Diffs
# 6*(n - 1) - 1, 1,
# 6*(2*n - 3) - 1, 6*(n - 1),
# 6*n, 6*n - 1
# c1=6*(n - 1) - 1
# c2=6*(2*n - 3) - 1
# c3=6*n - 1
# Start at n = 3 (cases 1 and 2 already done, special cases)
from python_code.decorators import euler_timer
from python_code.functions import sieve
# 3*(n - 1)*(n - 2) + 2:
# c1=6*(n - 1) - 1 = 6*n - 7
# c2=6*(2*n - 1) - 1=12*n - 7
# c3=6*(n - 1) + 1=6*n - 5
# 3*(n - 1)*(n - 2) + 1 + 6*(n-1):
# c1=6*(n - 1) - 1=6*n - 7
# c2=6*(2*n - 3) - 1=12*n - 19
# c3=6*n - 1=6*n - 1
# in the first two layers only 1 and 2 do as we wish
# from there, first = 8, last = 19 and we can increment
# first by 6*(layer - 1) and last by 6*layer
# The first corner will be FC(layer) = 3*(layer - 1)*(layer - 2) + 2
# it only has PD = 3 if
# (6*layer - 7), (6*layer - 5) and (12*layer - 7) are prime
# The last corner will be
# LC(layer) = 3*(layer - 1)*(layer - 2) + 1 + 6*(layer - 1)
# it only has PD = 3 if
# (6*layer - 7), (6*layer - 1) and (12*layer - 19) are prime
# Instead of carrying out costly multiplications, we can increment
# these by 6 and 12 respectively, similarly
# FC(L + 1) - FC(L) = 6*(L - 1)
# LC(L + 1) - LC(L) = 6*L
# So we can increment these as well
@euler_timer(128)
def main():
TOTAL = 2000
MAX_n = 10**6
PRIMES = sieve(MAX_n)
# Constant, rather than linear lookup
prime_bools = [False]*(MAX_n + 1)
for prime in PRIMES:
prime_bools[prime] = True
count = 2
current = 2
layer = 3
first_corner = 8 # Value of first corner in layer
last_corner = 19 # Value of last corner in layer
six_shared = 11 # prime candidate shared by both corners,
# with a difference of 6
six_first = 13 # prime candidate for first corner, diff 6
six_last = 17 # prime candidate for last corner, diff 6
twelve_first = 29 # prime candidate for first corner, diff 12
twelve_last = 17 # prime candidate for last corner, diff 12
while count < TOTAL:
if twelve_first > MAX_n:
raise Exception("Primes not large enough")
if prime_bools[six_shared]:
if prime_bools[six_first] and prime_bools[twelve_first]:
current = first_corner
count += 1
if count < TOTAL:
if prime_bools[six_last] and prime_bools[twelve_last]:
current = last_corner
count += 1
six_shared, six_last = six_last, six_last + 6
six_first += 6
twelve_last, twelve_first = twelve_first, twelve_first + 12
first_corner += 6*(layer - 1)
last_corner += 6*layer
layer += 1
print current
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | ||
1c0718148e9e9ebb9bdd52de8a5d00b60b6504b5
|
29c58b3bec6ac0fcdb3070efc118600ee92004da
|
/test/test_email_html_dto.py
|
92e5827bb8e4596c35ee57d8c9ef29da4ca517f5
|
[
"MIT"
] |
permissive
|
mailslurp/mailslurp-client-python
|
a2b5a0545206714bd4462ae517f242852b52aaf9
|
5c9a7cfdd5ea8bf671928023e7263847353d92c4
|
refs/heads/master
| 2023-06-23T00:41:36.257212 | 2023-06-14T10:10:14 | 2023-06-14T10:10:14 | 204,662,133 | 8 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,824 |
py
|
# coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://docs.mailslurp.com/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import mailslurp_client
from mailslurp_client.models.email_html_dto import EmailHtmlDto # noqa: E501
from mailslurp_client.rest import ApiException
class TestEmailHtmlDto(unittest.TestCase):
"""EmailHtmlDto unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test EmailHtmlDto
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = mailslurp_client.models.email_html_dto.EmailHtmlDto() # noqa: E501
if include_optional :
return EmailHtmlDto(
subject = '0',
body = '0'
)
else :
return EmailHtmlDto(
)
def testEmailHtmlDto(self):
"""Test EmailHtmlDto"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
6d04d8977bbb04374efd4d17378fdc14d5da1a84
|
a721e4ca65b79ce725c7b5b43539c963a3b55290
|
/Halloween_Sale.py
|
ce32b629161728b86e99fa33e4cc4101e5a4e754
|
[] |
no_license
|
joydas65/Hackerrank-Problems
|
0832d7cfd1de7e5df4dba76326ede735edc9afea
|
a16b3b0ebb65e7597f8f6417047da4d415a818c7
|
refs/heads/master
| 2022-06-21T12:47:55.241409 | 2022-06-18T18:21:08 | 2022-06-18T18:21:08 | 159,071,834 | 9 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 254 |
py
|
p,d,m,s = map(int, input().split())
ans = 0
while s >= p:
ans += 1
if p <= m:
s -= p
elif p > m:
s -= p
p -= d
if p <= m:
p = m
print(ans)
|
[
"[email protected]"
] | |
d06f68298b85070352f8aed0d2e30edf7ed61d84
|
4a5caabe31670ab44fe5097df3971d434fc9ca3f
|
/kgpy/optics/coordinate/decenter.py
|
d5438c129063ab4f46b7d9b63e6badcb0be0e0d5
|
[] |
no_license
|
ngoldsworth/kgpy
|
c61d64d39a4da011ad7a42566dbeb6ef88266dea
|
d751fca7f6cc6e762fdc954113f55d407055349d
|
refs/heads/master
| 2022-11-27T14:25:01.972415 | 2020-07-30T23:24:10 | 2020-07-30T23:24:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,157 |
py
|
import dataclasses
import numpy as np
from astropy import units as u
import kgpy.mixin
__all__ = ['Decenter']
@dataclasses.dataclass
class Decenter(kgpy.mixin.Broadcastable):
x: u.Quantity = 0 * u.mm
y: u.Quantity = 0 * u.mm
@classmethod
def promote(cls, value: 'Decenter'):
return cls(value.x, value.y)
@property
def config_broadcast(self):
return np.broadcast(
super().config_broadcast,
self.x,
self.y,
)
def __invert__(self):
return type(self)(
-self.x,
-self.y,
)
def __call__(self, value: u.Quantity, inverse: bool = False, num_extra_dims: int = 0) -> u.Quantity:
value = value.copy()
sh = list(self.x.shape)
sh[~1:~1] = [1] * num_extra_dims
x = self.x.reshape(sh)
y = self.y.reshape(sh)
if not inverse:
value[..., 0] += x
value[..., 1] += y
else:
value[..., 0] -= x
value[..., 1] -= y
return value
def copy(self):
return Decenter(
x=self.x,
y=self.y,
)
|
[
"[email protected]"
] | |
efacad244c5ae011bae81166d0c9355ca56c784c
|
430a146307fd1f64781a91ab60e79b45a231da28
|
/l10n/admin.py
|
347fd6f73abc0b496fa0697dde92dcc90646fdff
|
[
"BSD-2-Clause",
"MIT"
] |
permissive
|
rsalmaso/django-fluo-l10n
|
61455df2154538db665a9414285a85b7538c81c6
|
e7b298748a4461407cffe4987a4453db6722c53a
|
refs/heads/master
| 2021-01-18T23:56:46.507679 | 2016-01-03T14:34:37 | 2016-01-03T14:34:37 | 48,949,291 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,689 |
py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2007-2016, Raffaele Salmaso <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
from fluo import admin
from .models import Country, AdministrativeArea
class AdministrativeAreaInline(admin.TabularInline):
model = AdministrativeArea
extra = 1
class CountryAdmin(admin.ModelAdmin):
list_display = ('printable_name', 'iso2_code', 'iso3_code',)
list_filter = ('continent', 'status')
search_fields = ('name', 'iso2_code', 'iso3_code')
inlines = [AdministrativeAreaInline]
admin.site.register(Country, CountryAdmin)
|
[
"[email protected]"
] | |
e2ff82125ca55f866ce113b6933b903002731bc8
|
70280955a5382d73e58395eba78c119a400f4ce7
|
/asakatsu/0609/4.py
|
9f554c1b35208567493334073d67e3034afea623
|
[] |
no_license
|
cohock13/atcoder
|
a7d0e26a10a4e58690347a2e36839c2f503a79ba
|
d268aa68fc96203eab94d021bd158cf84bdb00bc
|
refs/heads/master
| 2021-01-03T00:41:31.055553 | 2020-10-27T12:28:06 | 2020-10-27T12:28:06 | 239,839,477 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 874 |
py
|
H,W = map(int,input().split())
m = [list(map(int,input().split())) for _ in range(H)]
ans = []
for i in range(H):
if i%2 == 0:##左->右
for j in range(W):
if m[i][j]%2:
if j == W-1:
if i != H-1:
ans.append((i+1,j+1,i+2,j+1))
m[i+1][j] += 1
else:
ans.append((i+1,j+1,i+1,j+2))
m[i][j+1] += 1
else:##右->左
for j in reversed(range(W)):
if m[i][j]%2:
if j == 0:
if i != H-1:
ans.append((i+1,j+1,i+2,j+1))
m[i+1][j] += 1
else:
ans.append((i+1,j+1,i+1,j))
m[i][j-1] += 1
print(len(ans))
for i in ans:
print(*i)
|
[
"[email protected]"
] | |
76dea297ed9137e442997eb9ab7a890747ca3906
|
bf076ab3f9dd5c1860474665be646f89937f1a7f
|
/settings.py
|
9acef3e24318d42f1f56f72b921037982218e7f2
|
[
"MIT"
] |
permissive
|
telminov/sonm-cdn-dns
|
f66f16fed0c67ed6f862410777f0c0fc3c87b27f
|
960395f2e7f8d79b5dd2623919ccf89e964fe4ac
|
refs/heads/master
| 2020-03-26T21:12:38.279423 | 2018-09-04T07:58:01 | 2018-09-04T07:58:01 | 145,374,340 | 0 | 0 |
MIT
| 2018-09-04T07:58:02 | 2018-08-20T06:16:27 |
Python
|
UTF-8
|
Python
| false | false | 156 |
py
|
NODE_MANAGER_URL = 'http://node-manager.cdn.sonm.soft-way.biz'
NODE_MANAGER_TOKEN = '123'
CDN_DOMAIN = 'cdn-sonm.soft-way.biz.'
IP_STACK_ACCESS_KEY = '123'
|
[
"[email protected]"
] | |
20d0368ac8cbfbff2bd5fb04603008994795b7ad
|
721406d87f5086cfa0ab8335a936ece839ab2451
|
/.venv/lib/python3.8/site-packages/opencensus/metrics/export/metric.py
|
658a27e45125376833965c07c6c3db599f5498f8
|
[
"MIT"
] |
permissive
|
MarkusMeyer13/graph-teams-presence
|
661296b763fe9e204fe1e057e8bd6ff215ab3936
|
c302b79248f31623a1b209e098afc4f85d96228d
|
refs/heads/main
| 2023-07-09T03:34:57.344692 | 2021-07-29T07:16:45 | 2021-07-29T07:16:45 | 389,268,821 | 0 | 0 |
MIT
| 2021-07-29T07:16:46 | 2021-07-25T05:23:08 |
Python
|
UTF-8
|
Python
| false | false | 3,224 |
py
|
# Copyright 2018, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opencensus.metrics.export import metric_descriptor
class Metric(object):
"""A collection of time series data and label metadata.
This class implements the spec for v1 Metrics as of opencensus-proto
release v0.1.0. See opencensus-proto for details:
https://github.com/census-instrumentation/opencensus-proto/blob/v0.1.0/src/opencensus/proto/metrics/v1/metrics.proto#L35
Defines a Metric which has one or more timeseries.
:type descriptor: class: '~opencensus.metrics.export.metric_descriptor.MetricDescriptor'
:param descriptor: The metric's descriptor.
:type timeseries: list(:class: '~opencensus.metrics.export.time_series.TimeSeries')
:param timeseries: One or more timeseries for a single metric, where each
timeseries has one or more points.
""" # noqa
def __init__(self, descriptor, time_series):
if not time_series:
raise ValueError("time_series must not be empty or null")
if descriptor is None:
raise ValueError("descriptor must not be null")
self._time_series = time_series
self._descriptor = descriptor
self._check_type()
def __repr__(self):
return ('{}(time_series={}, descriptor.name="{}")'
.format(
type(self).__name__,
"<{} TimeSeries>".format(len(self.time_series)),
self.descriptor.name,
))
@property
def time_series(self):
return self._time_series
@property
def descriptor(self):
return self._descriptor
def _check_type(self):
"""Check that point value types match the descriptor type."""
check_type = metric_descriptor.MetricDescriptorType.to_type_class(
self.descriptor.type)
for ts in self.time_series:
if not ts.check_points_type(check_type):
raise ValueError("Invalid point value type")
def _check_start_timestamp(self):
"""Check that starting timestamp exists for cumulative metrics."""
if self.descriptor.type in (
metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64,
metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE,
metric_descriptor.MetricDescriptorType.CUMULATIVE_DISTRIBUTION,
):
for ts in self.time_series:
if ts.start_timestamp is None:
raise ValueError("time_series.start_timestamp must exist "
"for cumulative metrics")
|
[
"[email protected]"
] | |
979c07a99a4de6deead71a30be7e764a1d398bd8
|
f900a9f48fe24c6a581bcb28ad1885cfe5743f80
|
/Chapter_11/test_name_function.py
|
1f6c6b10bf1eed5b8cf64f797faded06b16b0b93
|
[] |
no_license
|
Anjali-225/PythonCrashCourse
|
76e63415e789f38cee019cd3ea155261ae2e8398
|
f9b9649fe0b758c04861dad4d88058d48837a365
|
refs/heads/master
| 2022-12-03T21:35:07.428613 | 2020-08-18T11:42:58 | 2020-08-18T11:42:58 | 288,430,981 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 613 |
py
|
import unittest
from name_function import get_formatted_name
class NamesTestCase(unittest.TestCase):
"""Tests for 'name_function.py'."""
def test_first_last_name(self):
"""Do names like 'Janis Joplin' work?"""
formatted_name = get_formatted_name('janis', 'joplin')
self.assertEqual(formatted_name, 'Janis Joplin')
def test_first_last_middle_name(self):
"""Do names like 'Wolfgang Amadeus Mozart' work?"""
formatted_name = get_formatted_name('wolfgang', 'mozart', 'amadeus')
self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
df3287e337b27feb9ec0bb40be295e9b74ceef18
|
56243d3bf67d8bc7770ab5d12e2ef812e69196de
|
/setup.py
|
2b0c2bbc8e7dd85974ea6e4e24c97eba9dac99fd
|
[
"MIT"
] |
permissive
|
William-Lake/comparing_lists
|
a48542bb9c2d8a0de701d2d01b049664ff02e7c0
|
d9d53c89d4a36b1843bc536655cf8831afd4a2d4
|
refs/heads/master
| 2020-04-02T15:40:44.574432 | 2019-01-30T18:34:56 | 2019-01-30T18:34:56 | 154,578,261 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,528 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [ ]
setup_requirements = [ ]
test_requirements = [ ]
setup(
author="William Lake",
author_email='N/A',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="A small python utility program I wrote for the rare instances where I just need to compare two lists of data.",
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='comparing_lists',
name='comparing_lists',
packages=find_packages(include=['comparing_lists']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/William-Lake/comparing_lists',
version='0.1.0',
zip_safe=False,
)
|
[
"noreply"
] |
noreply
|
4d352b30c38ee5240aa74ad3e2bd79c7693bfa0a
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/notification-hub/azext_notification_hub/vendored_sdks/notificationhubs/models/notification_hubs_management_client_enums.py
|
f97b28ab81f5bed1408323aa284ef1b8c81d6704
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 |
MIT
| 2023-09-14T10:48:57 | 2017-10-11T16:27:31 |
Python
|
UTF-8
|
Python
| false | false | 789 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class SkuName(str, Enum):
free = "Free"
basic = "Basic"
standard = "Standard"
class NamespaceType(str, Enum):
messaging = "Messaging"
notification_hub = "NotificationHub"
class AccessRights(str, Enum):
manage = "Manage"
send = "Send"
listen = "Listen"
|
[
"[email protected]"
] | |
49db302c96c35f528c5f252b1a2f9596dea8b8ad
|
63f9a0d150cbef75f4e6e8246dc7ecac3f3b6d09
|
/python/ray/serve/examples/echo_full.py
|
9639f1a258d95e7009d0f3ab0520bc1ed59235a0
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
ray-project/maze-raylit
|
79f0a5af9fe4bdc13a2d5b3919da867ed5439aab
|
a03cd14a50d87d58effea1d749391af530d7609c
|
refs/heads/master
| 2023-01-23T04:23:35.178501 | 2020-12-04T22:34:14 | 2020-12-04T22:34:14 | 318,274,659 | 5 | 0 |
Apache-2.0
| 2020-12-04T22:34:15 | 2020-12-03T17:47:58 |
Python
|
UTF-8
|
Python
| false | false | 1,465 |
py
|
import time
import requests
import ray
import ray.serve as serve
# initialize ray serve system.
ray.init(num_cpus=10)
client = serve.start()
# a backend can be a function or class.
# it can be made to be invoked from web as well as python.
def echo_v1(flask_request):
response = flask_request.args.get("response", "web")
return response
client.create_backend("echo:v1", echo_v1)
# An endpoint is associated with an HTTP path and traffic to the endpoint
# will be serviced by the echo:v1 backend.
client.create_endpoint("my_endpoint", backend="echo:v1", route="/echo")
print(requests.get("http://127.0.0.1:8000/echo", timeout=0.5).text)
# The service will be reachable from http
print(ray.get(client.get_handle("my_endpoint").remote(response="hello")))
# as well as within the ray system.
# We can also add a new backend and split the traffic.
def echo_v2(flask_request):
# magic, only from web.
return "something new"
client.create_backend("echo:v2", echo_v2)
# The two backend will now split the traffic 50%-50%.
client.set_traffic("my_endpoint", {"echo:v1": 0.5, "echo:v2": 0.5})
# Observe requests are now split between two backends.
for _ in range(10):
print(requests.get("http://127.0.0.1:8000/echo").text)
time.sleep(0.2)
# You can also change number of replicas for each backend independently.
client.update_backend_config("echo:v1", {"num_replicas": 2})
client.update_backend_config("echo:v2", {"num_replicas": 2})
|
[
"[email protected]"
] | |
98413f87180c601da3d941fbf79ed8b5fb9d4e36
|
d2a2546165b3db6295a3f21972dda8ab9aab7846
|
/src/vehicles/road_thief_supplies.py
|
41875538a6ae4bb32f8ce3800c922d8d5fd5e778
|
[] |
no_license
|
andythenorth/road-hog
|
bab12b133dd674f0e6d7ae87498675f8da96b982
|
1800d57d4ce904e7041f24646c393b37903d9466
|
refs/heads/main
| 2022-09-26T19:57:31.006800 | 2022-09-17T10:09:37 | 2022-09-17T10:09:37 | 214,848,659 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 542 |
py
|
from road_vehicle import SuppliesHauler, DieselRoadVehicle
consist = SuppliesHauler(id='road_thief_supplies',
base_numeric_id=560,
name='Road Thief',
power=720,
vehicle_life=40,
intro_date=1989)
consist.add_unit(type=DieselRoadVehicle,
capacity=0,
vehicle_length=7,
always_use_same_spriterow=True)
consist.add_unit(capacity=45,
vehicle_length=7)
|
[
"[email protected]"
] | |
81a57cd8a99f18eced67a63639f21d53f756df5d
|
9d30115d59ed821a5c7aecf2318b5e0ed22c9676
|
/src/codewars/python/8kyu/binary_addition.py
|
c09a98f894ac53f951b41e2efa477772038c98b0
|
[] |
no_license
|
garigari-kun/til
|
02c7bf05274d1077b454e1f7d4a7355849441524
|
b71f36a66045ab7da7f4a97f7e18de2aaa05f493
|
refs/heads/master
| 2020-04-16T02:13:45.727909 | 2018-12-16T01:26:40 | 2018-12-16T01:26:40 | 56,369,670 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 311 |
py
|
"""
7kyu
Binary Addition
Implement a function that adds two numbers together and returns their sum in binary. The conversion can be done before, or after the addition.
The binary number returned should be a string.
"""
def add_binary(a,b):
answer = a + b
binary = bin(answer)[2:]
return binary
|
[
"[email protected]"
] | |
288de51aecb74984b26d4c45de7430cecdb35121
|
6c48ad953031fd6be870e8bd8775538b9ac7033e
|
/python/demo06/demo06_multi_inherit.py
|
4d65f096931f3671905f96e664f5289bfe015bca
|
[] |
no_license
|
yeswhos/Code-Practice
|
b080c9484f510d02c2d78e388fc03eedc397aa7b
|
0fd8263a5c87dbd0e8b1dd5a38f32a188870308b
|
refs/heads/master
| 2023-04-08T13:11:06.105039 | 2023-03-16T11:34:03 | 2023-03-16T11:34:03 | 247,809,031 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 167 |
py
|
class A:
def demo(self):
print("self方法")
class B:
def test(self):
print("test 方法")
class C(A, B):
pass
c = C()
c.demo()
c.test()
|
[
"[email protected]"
] | |
e6c752bba55bc005223a795821bd8aa1cb76ec92
|
41c9fde93aeb2afbbe10032a6a5b326573675220
|
/notify/models.py
|
cd84534c00eb3f0ca55493ff698a79c8a5e6542a
|
[] |
no_license
|
yindashan/nuri
|
99dd6c2b944c9014391817018bf9406c40699cfd
|
a9bda6e88d1dd0db3517d068f540dba6c64bcc74
|
refs/heads/master
| 2021-01-22T11:46:54.758649 | 2014-10-20T10:36:19 | 2014-10-20T10:36:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,613 |
py
|
# -*- coding:utf-8 -*-
# 所有主机和硬件报警都通过此模块发出
import json
from celery import task
from utils.sms import sms
from utils.mail import send_mail
from appitem.models import AppService, AppRelation
from datetime import datetime
from django.conf import settings
# 异步处理主机存活事件的通知
@task
def notify_host(criterion, host, state, now_time):
# 1. 产生通知信息
dd = {}
dd['time'] = now_time.strftime('%Y-%m-%d %H:%M:%S')
dd['host'] = host
if state == 'UP':
dd['type'] = 'RECOVERY'
dd['information'] = 'OK - The host is up now.'
else:
dd['type'] = 'PROBLEM'
dd['information'] = 'CRITICAL - The host may be down.'
dd['state'] = state
settings.REDIS_DB.rpush('host_event', json.dumps(dd))
# 2. 触发报警
host_ip = settings.REDIS_DB.hget('host_ip', host)
message = "%s Host Alert: IP: %s is %s" % (dd['type'], host_ip, state)
if criterion.email_list:
send_mail(u'技术支持中心--运维监控中心', criterion.email_list.split(','), message, message)
if criterion.mobile_list:
sms(criterion.mobile_list.split(','), message)
# 向应用的业务运维 发出 主机up/down 事件报警
def alert_host4app(appname, host_ip, ntype, state):
message = "%s Host Alert: IP: %s for %s is %s" % (ntype, host_ip, appname, state)
try:
app = AppService.objects.get(app_name = appname)
except BaseException:
return
email_list = change(app.email_list)
mobile_list = change(app.mobile_list)
# 当前逻辑, 子应用会继承父应用的报警人信息
# 获取自己的父应用--单继承
rel_list = AppRelation.objects.filter(child_app=app)
for item in rel_list:
email_list.extend(change(item.parent_app.email_list))
mobile_list.extend(change(item.parent_app.mobile_list))
if email_list:
send_mail(u"技术支持中心--运维监控中心", email_list, message, message)
if mobile_list:
sms(mobile_list, message)
# 如果item_list 是逗号分隔的字符串,就返回一个列表
# 否则返回一个空列表
def change(item_list):
item_list = item_list.strip()
if item_list:
return item_list.split(',')
return []
# 异步处理应用事件的通知
# 异步处理应用报警
@task
def notify_app(appname, host, ntype, state, info):
# 1. 产生应用通知信息
dd = {}
dd['time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
dd['host'] = host
dd['appname'] = appname
dd['type'] = ntype
dd['state'] = state
dd['information'] = info
settings.REDIS_DB.rpush('notification', json.dumps(dd))
# 2. 触发报警
# 检查主机状态,如果主机状态down, 则将主机事件发给应用的报警人
# 其它情况正常发送应用报警短信
app = None
try:
app = AppService.objects.get(app_name=appname)
except BaseException:
# FIXME
return
if app.is_alarm == 1 :
status = settings.REDIS_DB.hget('host_alive_' + host, 'current_status')
host_ip = settings.REDIS_DB.hget('host_ip', host)
if status is None or status == 'UP':
alert_app(appname, host_ip, ntype, state, info)
else:
alert_host4app(appname, host_ip, 'PROBLEM', 'DOWN')
def alert_app(appname, host_ip, ntype, state, info):
app = None
try:
app = AppService.objects.get(app_name = appname)
except BaseException:
return
email_list = change(app.email_list)
mobile_list = change(app.mobile_list)
# 当前逻辑, 子应用会继承父应用的报警人信息
# 获取自己的父应用--单继承
rel_list = AppRelation.objects.filter(child_app=app)
for item in rel_list:
email_list.extend(change(item.parent_app.email_list))
mobile_list.extend(change(item.parent_app.mobile_list))
if email_list:
alert_app_mail(appname, host_ip, ntype, state, info, email_list)
# 当前只有 CRITICAL 才触发短信报警
if state == 'CRITICAL' and mobile_list:
alert_app_sms(appname, host_ip, ntype, state, mobile_list)
# 应用邮件通知
def alert_app_mail(appname, host_ip, notify_type, state, info, email_list):
subject = gen_subject(appname, host_ip, notify_type, state)
content = gen_mail_content(appname, host_ip, notify_type, state, info)
# 发送邮件
content = content.replace('\\n','\n')
if email_list:
send_mail(u"技术支持中心--运维监控中心", email_list, subject, content)
# 应用短信通知
def alert_app_sms(appname, host_ip, notify_type, state, mobile_list):
message = gen_subject(appname, host_ip, notify_type, state)
if mobile_list:
sms(mobile_list, message)
def gen_subject(appname, host_ip, notify_type, state):
subject = "***%s Service Alert: %s / %s is %s***" % (notify_type, host_ip, appname, state)
return subject
def gen_mail_content(appname, host_ip, notify_type, state, info):
ll = []
ll.append("Notification Type: %s\n" % notify_type)
ll.append("Service: %s\n" % appname)
ll.append("Host: %s\n" % host_ip)
ll.append("State: %s\n" % state)
curr_time = datetime.now()
ll.append("Date/Time: %s\n" % curr_time.strftime("%Y-%m-%d %H:%M:%S"))
ll.append("\n")
ll.append("Additional Info:\n")
if info:
ll.append(info)
else:
ll.append("null")
return ''.join(ll)
|
[
"="
] |
=
|
acdf4977672dbda2425b7f78c305dc1d7776bfe6
|
0c12d94714546d38266314f4858fa484136f02dc
|
/fluent_utils/django_compat/django14.py
|
8e20d3f46f6f4a1ba1d1a2fc5e03abde5af259fc
|
[
"Apache-2.0"
] |
permissive
|
benkonrath/django-fluent-utils
|
a63504dbccd3e21b3c88b04665f48f4721848d40
|
5e90dfe5f7b22ca2c2fe942c304e51981e170ba6
|
refs/heads/master
| 2021-01-01T04:04:01.452357 | 2017-05-22T11:10:44 | 2017-05-22T11:10:44 | 97,116,253 | 0 | 0 | null | 2017-07-13T11:43:06 | 2017-07-13T11:43:06 | null |
UTF-8
|
Python
| false | false | 1,084 |
py
|
# The timezone support was introduced in Django 1.4, fallback to standard library for 1.3.
try:
from django.utils.timezone import now, utc
except ImportError:
# Django < 1.4
from datetime import datetime
now = datetime.now
utc = None # datetime(..., tzinfo=utc) creates naive datetime this way.
# URLs moved in Django 1.4
try:
# Django 1.6 requires this
from django.conf.urls import url, include
except ImportError:
# Django 1.3 compatibility, kept in minor release
from django.conf.urls.defaults import patterns, url, include
else:
try:
from django.conf.urls import patterns # Django 1.9-
except ImportError:
from django.core.urlresolvers import RegexURLPattern
def patterns(prefix, *args):
pattern_list = []
for t in args:
if isinstance(t, (list, tuple)):
t = url(prefix=prefix, *t)
elif isinstance(t, RegexURLPattern):
t.add_prefix(prefix)
pattern_list.append(t)
return pattern_list
|
[
"[email protected]"
] | |
9854c422ac2f90220f55dde8a71086e0fc9de277
|
4c91490373be8867d47a01a181a8257e503236e1
|
/Level 1/두개 뽑아서 더하기.py
|
1cb9ae1671f978983f0ca5fb93453b00662f303a
|
[] |
no_license
|
dohee479/PROGRAMMERS
|
bc1b9d8ad4841e96f8dbe650cb1e70064e4a1bd5
|
398261d5c340de5b0135c4bad6858d925c94482d
|
refs/heads/master
| 2023-03-23T17:28:35.191331 | 2021-03-12T16:53:47 | 2021-03-12T16:53:47 | 298,023,004 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 604 |
py
|
def backtrack(numbers, index, cnt, m, result):
global visited
if cnt == m:
answer.add(result)
return
for i in range(index, len(numbers)):
if visited[i]:
continue
for j in range(i + 1):
visited[j] = 1
backtrack(numbers, i + 1, cnt + 1, m, result + numbers[i])
for j in range(len(numbers)):
visited[j] = 0
def solution(numbers):
global answer, visited
answer = set()
visited = [0] * len(numbers)
backtrack(numbers, 0, 0, 2, 0)
return sorted(list(answer))
print(solution(([2,1,3,4,1])))
|
[
"[email protected]"
] | |
536e71273f02e7028fe26d6242db9bada2af0dcc
|
7a4da5ec2196bf975a9e6115846244788b36b952
|
/3.7.0/lldb-3.7.0.src/test/macosx/safe-to-func-call/TestSafeFuncCalls.py
|
73ae892dc7571294845b8570857cc0d81492d0aa
|
[
"NCSA",
"MIT"
] |
permissive
|
androm3da/clang_sles
|
ca4ada2ec85d625c65818ca9b60dcf1bc27f0756
|
2ba6d0711546ad681883c42dfb8661b842806695
|
refs/heads/master
| 2021-01-10T13:50:25.353394 | 2016-03-31T21:38:29 | 2016-03-31T21:38:29 | 44,787,977 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,887 |
py
|
"""Test function call thread safety."""
import os, time
import unittest2
import lldb
import lldbutil
from lldbtest import *
class TestSafeFuncCalls(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@python_api_test
@dsym_test
def test_with_dsym_and_python_api(self):
"""Test function call thread safety."""
self.buildDsym()
self.function_call_safety_check()
@skipUnlessDarwin
@python_api_test
@dwarf_test
def test_with_dwarf_and_python_api(self):
"""Test function call thread safety."""
self.buildDwarf()
self.function_call_safety_check()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers that we will step to in main:
self.main_source = "main.c"
def check_number_of_threads(self, process):
self.assertTrue(process.GetNumThreads() == 2, "Check that the process has two threads when sitting at the stopper() breakpoint")
def safe_to_call_func_on_main_thread (self, main_thread):
self.assertTrue(main_thread.SafeToCallFunctions() == True, "It is safe to call functions on the main thread")
def safe_to_call_func_on_select_thread (self, select_thread):
self.assertTrue(select_thread.SafeToCallFunctions() == False, "It is not safe to call functions on the select thread")
def function_call_safety_check(self):
"""Test function call safety checks"""
exe = os.path.join(os.getcwd(), "a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
self.main_source_spec = lldb.SBFileSpec (self.main_source)
break1 = target.BreakpointCreateByName ("stopper", 'a.out')
self.assertTrue(break1, VALID_BREAKPOINT)
process = target.LaunchSimple (None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
threads = lldbutil.get_threads_stopped_at_breakpoint (process, break1)
if len(threads) != 1:
self.fail ("Failed to stop at breakpoint 1.")
self.check_number_of_threads(process)
main_thread = lldb.SBThread()
select_thread = lldb.SBThread()
for idx in range (0, process.GetNumThreads()):
t = process.GetThreadAtIndex (idx)
if t.GetName() == "main thread":
main_thread = t
if t.GetName() == "select thread":
select_thread = t
self.assertTrue(main_thread.IsValid() and select_thread.IsValid(), "Got both expected threads")
self.safe_to_call_func_on_main_thread (main_thread)
self.safe_to_call_func_on_select_thread (select_thread)
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
[
"[email protected]"
] | |
003b0869325795d0e20613fbcd858c7435600cdd
|
d5ad13232e3f1ced55f6956bc4cbda87925c8085
|
/RNAseqMSMS/8-snv-virus-sv/1-integration.py
|
28de0f65de0b94bbf61a878cf425349690bcfd78
|
[] |
no_license
|
arvin580/SIBS
|
c0ba9a8a41f59cb333517c286f7d80300b9501a2
|
0cc2378bf62359ec068336ea4de16d081d0f58a4
|
refs/heads/master
| 2021-01-23T21:57:35.658443 | 2015-04-09T23:11:34 | 2015-04-09T23:11:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,646 |
py
|
### [SNV, Virus, Deltion, Duplication, Inversion, Translocation]
D = {}
inFile = open('sum_snv.exome_summary.overall.filter')
for line in inFile:
line = line.strip()
fields = line.split('\t')
gene = fields[1]
D.setdefault(gene,[0,0,0,0,0,0])
D[gene][0] = 1
inFile.close()
inFile = open('ERR0498-04-05.unmapped.unique.human-viruse-checked-human-gene3')
for line in inFile:
line = line.strip()
fields = line.split('\t')
gene = fields[0]
D.setdefault(gene,[0,0,0,0,0,0])
D[gene][1] = 1
inFile.close()
inFile = open('split-mapped-deletion.gene')
for line in inFile:
line = line.strip()
fields = line.split('\t')
gene = fields[0]
D.setdefault(gene,[0,0,0,0,0,0])
D[gene][2] = 1
inFile.close()
inFile = open('split-mapped-duplication.gene')
for line in inFile:
line = line.strip()
fields = line.split('\t')
gene = fields[0]
D.setdefault(gene,[0,0,0,0,0,0])
D[gene][3] = 1
inFile.close()
inFile = open('split-mapped-inversion.gene')
for line in inFile:
line = line.strip()
fields = line.split('\t')
gene = fields[0]
D.setdefault(gene,[0,0,0,0,0,0])
D[gene][4] = 1
inFile.close()
inFile = open('split-mapped-translocation.gene')
for line in inFile:
line = line.strip()
fields = line.split('\t')
gene = fields[0]
D.setdefault(gene,[0,0,0,0,0,0])
D[gene][5] = 1
inFile.close()
d = D.items()
d.sort(cmp= lambda x,y:cmp(sum(x[1]),sum(y[1])),reverse= True)
ouFile = open('HeLa-Gene-SNV-Virus-Deletion-Duplication-Inversion-Translocation','w')
for item in d:
ouFile.write(item[0]+'\t'+'\t'.join([str(x) for x in item[1]])+'\n')
|
[
"[email protected]"
] | |
9c1ebff143583caed72eccb5483e60f02ed4113a
|
93ca62f2fb727c20f1fc5c8d81b7134877e85c6e
|
/Refactoring/refactoring_3_abstraction.py
|
e0146c6f647d0d2e91a1e1af6a47ef2590df21a1
|
[] |
no_license
|
lily48/oop-python
|
f607e954d50236f6f8d7844f1c4e29b38c0dbfae
|
b009cd4477a5045de115e44aa326923a32ba7c1c
|
refs/heads/master
| 2023-01-19T03:39:22.726179 | 2020-11-18T20:38:14 | 2020-11-18T20:38:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,016 |
py
|
"""
Simulate a simple board game.
There are 2 players.
Each player takes turn rolling a die and moving that number of spaces.
The first person to space 100 wins.
"""
import random
class Player:
def __init__(self, player_number):
self.score = 0
self.player_number = player_number
def roll_die(self):
die = random.randint(1, 6)
print(f'{self} rolled a {die}')
return die
def make_move(self):
self.score += self.roll_die()
print(f'{self}: {self.score}')
@property
def has_won(self):
return self.score >= 100
def __str__(self):
return f'Player {self.player_number}'
def play_game(num_players=2):
players = []
for i in range(num_players):
players.append(Player(i + 1))
while True:
for player in players:
player.make_move()
if player.has_won:
print(f'{player} wins!')
return
if __name__ == '__main__':
play_game(num_players=3)
|
[
"[email protected]"
] | |
5c95a60905b7a7bda2099078018e15544b41824c
|
473625e02c757fd9f9ba58624aa84551280611e3
|
/store/migrations/0010_icat.py
|
d484e1b5289f8c78f96cc0fa8a5342cbe38026ee
|
[] |
no_license
|
rohitrajput-42/PortalK
|
1a15cd182b252de459acc950eb87d3837d7e6ff4
|
a5647b560d850c650c9cefae30a43bc7424d188b
|
refs/heads/main
| 2023-01-21T08:50:06.614440 | 2020-11-27T12:58:57 | 2020-11-27T12:58:57 | 314,314,573 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 505 |
py
|
# Generated by Django 3.1.2 on 2020-11-15 12:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0009_joblist_lcat'),
]
operations = [
migrations.CreateModel(
name='Icat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=2000)),
],
),
]
|
[
"[email protected]"
] | |
d184179a81f11f57b1cdd2a8e64a7a8ee95a2bd2
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2667/60755/240482.py
|
d1424016496d8a183f3003a82a8271c426557281
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 168 |
py
|
NumOfEg = int(input())
result = []
for i in range(NumOfEg):
num = input().split(" ")
result.append(pow(2,int(num[1]))-int(num[0]))
for i in result:
print(i)
|
[
"[email protected]"
] | |
3808964e73e804ea86f5c9f6bb724678bd097437
|
8163d8f03aea22cb4fa1e60d809781049fff4bb4
|
/relationship/first/migrations/0001_initial.py
|
f5297a9e109a0d6e57feaf3aac287ec150b66734
|
[] |
no_license
|
shubham454/Django-Devlopment
|
694b973d31a82d2ded11f95138bd766130d7d3c9
|
43a2c3b98dbe9f582f2394fcfb3beb133c37b145
|
refs/heads/master
| 2022-12-04T14:34:05.093402 | 2020-08-13T18:35:33 | 2020-08-13T18:35:33 | 287,353,684 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 921 |
py
|
# Generated by Django 2.2.2 on 2019-12-30 05:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lname', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Framework',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fname', models.CharField(max_length=50)),
('flanguage', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='first.Language')),
],
),
]
|
[
"[email protected]"
] | |
a5745b3b97a4ed742e0b12d53ace7eda3fbebed1
|
5b25362295262504a56dcbac10a40226bdc18bba
|
/libapp/migrations/0012_user_notification.py
|
8d2e484be18e0f6780273648fc8161dc05ae32f1
|
[
"MIT"
] |
permissive
|
Nyagah-Tech/library
|
2fef2c990b7992bb3b311dfb1502b4d2124494ac
|
2ae1df4e89257c9c7d7f2328ab6c5f6352867997
|
refs/heads/master
| 2022-12-14T14:31:54.440553 | 2020-05-29T14:18:59 | 2020-05-29T14:18:59 | 235,101,527 | 0 | 0 |
MIT
| 2022-12-08T03:43:09 | 2020-01-20T12:56:42 |
Python
|
UTF-8
|
Python
| false | false | 881 |
py
|
# Generated by Django 2.2.8 on 2020-01-24 05:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('libapp', '0011_borrowing_notification'),
]
operations = [
migrations.CreateModel(
name='User_notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('notification', tinymce.models.HTMLField()),
('posted_on', models.DateTimeField(auto_now_add=True)),
('posted_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"[email protected]"
] | |
e958a6f04aa7711889b4247d69ccf74e4f61fd27
|
d042c1d3b3d62264bc93d9f8e0b9f57f85e24b62
|
/Python3/04_RegularExpression/re.py
|
fd80656fb1ddefc5d0daa6abc0468d0b75463bb3
|
[] |
no_license
|
FadelBerakdar/OSTpythonCourse
|
f1867c1a329d4041914cbdeaa869a8db41ec4625
|
3f1740fd21f341490856a9e434154e049d19933a
|
refs/heads/master
| 2016-08-12T23:24:22.765256 | 2016-02-24T13:39:58 | 2016-02-24T13:39:58 | 52,438,301 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,034 |
py
|
"""
______________________________________________________________________
| Regular Expression |
|______________________________________________________________________|
| . | any character except new line \n |
| \w | any unicode word character Aa1_ |
| \W | anything that isn't unicdoe character |
| \s | any white space, tabs, and newlines |
| \S | anything that isn't while space |
| \d | any number from 0 to 9 |
| \D | anything that isn't number |
| \b | any word boundaries "edges of a word" |
| \B | anything that isn't word boundaries |
| \+ | to escape a character +)({}#@!%^&*- |
|______________________________________________________________________|
| {3} | something that occurs exactly three times |
| {,3} | something that occurs 0 to three times |
| {2,3} | something that occurs two to three times |
| ? | something that occurs 0 or one time |
| * | something that occurs at least one time |
| + | something that occurs at least once |
|_______|______________________________________________________________|
|[aple] | apple |
| [a-z] | any lowercase letters from a to z |
| [^2] | anythin that isn't 2 |
| [^\t] | ignore tap character |
|_______|______________________________________________________________|
| ^ | begining of new line |
| $ | the end of the line |
| () | group |
| ?p< >| group name |
| r"" | we have to use r"" to avoid using \\, so we use raw string r |
|_______|______________________________________________________________|
print(r"\tb")
print("\tb")
gro up() | Returns the entire matched string.
555-555-5555
start()
Returns the start index of the match.
0
end()
Returns the end index o f the m atch.
12
span()
Returns a tuple with the start and end indexes of the match.
(0,12)
"""
def ccn_safety(string):
#pattern = r"\d{4}-\d{4}-\d{4}"
pattern = r"(\d{4}-){3}(?P\d{4})"
#return re.sub(pattern, "XXXX-XXXX-XXXX-" + "\g", string)
return re.sub(r"(\d{4}-){3}(\d{4})", "XXXX-XXXX-XXXX-"+"\g", string)
a = "4444"
b = " 4444"
c = "asda 4444"
d = "AAC"
e = "AC"
f = "1AC"
import re
pattern = r"^\d{4}$"
pattern = r"^[A-Z]{2}$"
for ob in (a, b, c, d, e, f):
print(bool(re.search(pattern,ob)))
|
[
"[email protected]"
] | |
928297c5bd91bcea7bfd13a8298215620b62700a
|
5b3bf81b22f4eb78a1d9e801b2d1d6a48509a236
|
/leetcode/1010.py
|
840fc1be71784ae1cc6aede7f3ce2feac361d083
|
[] |
no_license
|
okoks9011/problem_solving
|
42a0843cfdf58846090dff1a2762b6e02362d068
|
e86d86bb5e3856fcaaa5e20fe19194871d3981ca
|
refs/heads/master
| 2023-01-21T19:06:14.143000 | 2023-01-08T17:45:16 | 2023-01-08T17:45:16 | 141,427,667 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 401 |
py
|
from collections import defaultdict
class Solution:
def numPairsDivisibleBy60(self, time: List[int]) -> int:
dic = defaultdict(int)
for t in time:
dic[t % 60] += 1
result = 0
for i in range(1, 30):
result += dic[i] * dic[60-i]
result += dic[0] * (dic[0]-1) // 2
result += dic[30] * (dic[30]-1) // 2
return result
|
[
"[email protected]"
] | |
3f0ceb8350bfe0a17cf4877ecffca5c89455cb04
|
f11600b9a256bf6a2b584d127faddc27a0f0b474
|
/easy/1566.py
|
e1fd77a95b8a19432432804e090514e518bd9f8c
|
[] |
no_license
|
longhao54/leetcode
|
9c1f0ce4ca505ec33640dd9b334bae906acd2db5
|
d156c6a13c89727f80ed6244cae40574395ecf34
|
refs/heads/master
| 2022-10-24T07:40:47.242861 | 2022-10-20T08:50:52 | 2022-10-20T08:50:52 | 196,952,603 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 992 |
py
|
class Solution:
def containsPattern(self, arr: List[int], m: int, k: int) -> bool:
if m * k == len(arr):
return [arr[0]] * m * k == arr
for start in range(0, len(arr)-m*k):
tmp = arr[start:start+m]*k
index = 0
while index < len(arr)-m:
if tmp == arr[index:index+m*k]:
return True
index += 1
return False
# 快一点的方法
# 其实不用向上面一样生成list 再比较 这样会很慢 只要按位比较就可以
class Solution:
def containsPattern(self, arr: List[int], m: int, k: int) -> bool:
n = len(arr)
if n < m*k:
return False
for l in range(n - m * k + 1):
offset = 0
while offset < m * k:
if arr[l + offset] != arr[l + offset % m]:
break
offset += 1
if offset == m * k:
return True
return False
|
[
"[email protected]"
] | |
ee6f15e3182bc0c650262c869b4aa170fc6f416d
|
40f8107fdd2afa1f9c41d4d02b32298258bd3ae7
|
/src/app/cache.py
|
bdc0c3b9d8a36c32ac9b7ea12af8a10dacd4439c
|
[
"Apache-2.0"
] |
permissive
|
ConvergeTP/von_tails
|
91f62e722325e1a0845e766359dae94de13076d3
|
98ce984b001cd09005b6496ce10687588def53ef
|
refs/heads/master
| 2020-05-30T23:18:30.532496 | 2019-08-21T14:30:58 | 2019-08-21T14:30:58 | 170,901,819 | 0 | 0 |
Apache-2.0
| 2019-04-02T18:01:45 | 2019-02-15T17:18:29 |
Python
|
UTF-8
|
Python
| false | false | 705 |
py
|
"""
Copyright 2017-2019 Government of Canada - Public Services and Procurement Canada - buyandsell.gc.ca
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from aiocache import SimpleMemoryCache
MEM_CACHE = SimpleMemoryCache()
|
[
"[email protected]"
] | |
caa1bab989647808316c09990fc13f5e713b386c
|
21e7753732296bfdfb6dd9a9b58c7c6b8d90a1e5
|
/Bits/nextNumber/getNext.py
|
6ebc140b20a361fff4350f249e1d6384893f8d31
|
[] |
no_license
|
rongfeng-china/python-algorithms-and-data-structures
|
eb8514b44d7ff97dd7c4deda2d8ea888a5aa8d04
|
a69241bb7b684bc7d00acdd46c2fc214f7b61887
|
refs/heads/master
| 2020-03-13T09:08:13.375870 | 2015-12-11T07:37:30 | 2015-12-11T07:37:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 890 |
py
|
# Given a positive number, print the next smallest and the next
# largest number that have the same number of 1 bits in their
# binary representation.
def getNext(n):
c = n
# let p denote the position of first non-trailing 0 ie a zero which is followed by 1s
c0 = 0 # number of zeros to right of position p
c1 = 0 # number of ones to right of position p
# while there are training zeros and c > 0
while (c & 1) == 0 and (c != 0):
c0 += 1
c >>= 1
while (c & 1) == 1:
c1 += 1
c >>= 1
# If n = 111...1100...000, then there is no bigger number with same number of 1s
if c0 + c1 == 31 or c0 + c1 == 0:
return -1
p = c0 + c1
n |= (1 << p) # Flip rightmost non trailing zero
n &= ~((1 << p) - 1) # Clear all bits to right of 1
n |= (1 << (c1 - 1)) - 1 # Insert (c1-1) ones on the right
return n
|
[
"[email protected]"
] | |
621a9809e8f9a0c711fccec07ffb4f43131cc423
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-2/c19541276c8f809733d4587fdcc04a8c7add54b3-<draw_text>-bug.py
|
4b03e81403d0a80992ec3ff20502160971ac0508
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,744 |
py
|
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
s = common_texification(s)
prop_cmds = _font_properties_str(prop)
s = ('%s %s' % (prop_cmds, s))
writeln(self.fh, '\\begin{pgfscope}')
alpha = gc.get_alpha()
if (alpha != 1.0):
writeln(self.fh, ('\\pgfsetfillopacity{%f}' % alpha))
writeln(self.fh, ('\\pgfsetstrokeopacity{%f}' % alpha))
rgb = tuple(gc.get_rgb())[:3]
if (rgb != (0, 0, 0)):
writeln(self.fh, ('\\definecolor{textcolor}{rgb}{%f,%f,%f}' % rgb))
writeln(self.fh, '\\pgfsetstrokecolor{textcolor}')
writeln(self.fh, '\\pgfsetfillcolor{textcolor}')
s = ('\\color{textcolor}' + s)
f = (1.0 / self.figure.dpi)
text_args = []
if (mtext and (((angle == 0) or (mtext.get_rotation_mode() == 'anchor')) and (mtext.get_va() != 'center_baseline'))):
(x, y) = mtext.get_transform().transform_point(mtext.get_position())
text_args.append(('x=%fin' % (x * f)))
text_args.append(('y=%fin' % (y * f)))
halign = {
'left': 'left',
'right': 'right',
'center': '',
}
valign = {
'top': 'top',
'bottom': 'bottom',
'baseline': 'base',
'center': '',
}
text_args.append(halign[mtext.get_ha()])
text_args.append(valign[mtext.get_va()])
else:
text_args.append(('x=%fin' % (x * f)))
text_args.append(('y=%fin' % (y * f)))
text_args.append('left')
text_args.append('base')
if (angle != 0):
text_args.append(('rotate=%f' % angle))
writeln(self.fh, ('\\pgftext[%s]{%s}' % (','.join(text_args), s)))
writeln(self.fh, '\\end{pgfscope}')
|
[
"[email protected]"
] | |
3b96e5ce191ac951020d3af07454faec70bbb18a
|
6879a8596df6f302c63966a2d27f6b4d11cc9b29
|
/abc/problems030/021/b.py
|
8a85c8e452bae3d0deb92785c327dd6922669e59
|
[] |
no_license
|
wkwkgg/atcoder
|
41b1e02b88bf7a8291b709306e54cb56cb93e52a
|
28a7d4084a4100236510c05a88e50aa0403ac7cd
|
refs/heads/master
| 2020-07-26T03:47:19.460049 | 2020-03-01T18:29:57 | 2020-03-01T18:29:57 | 208,523,188 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 165 |
py
|
N = int(input())
A, B = map(int, input().split())
K = int(input())
P = list(map(int, input().split()))
print("YES" if len(P) + 2 == len(set(P + [A, B])) else "NO")
|
[
"[email protected]"
] | |
82449f43a77d7008703082bf0d83768860297c65
|
bd48e8af13abb5a8574b47ea3337e64a45e8f672
|
/nanum/search/apis.py
|
c9621326855333e4e5c41e1bd2a515cdc0b21840
|
[] |
no_license
|
markui/nanum-project
|
d221cacfaed9d6e2e882f3d4f29dc77055a4e97b
|
399064b62a7c8049b37efd77a98f17a903754070
|
refs/heads/master
| 2021-09-08T08:03:30.667750 | 2018-01-09T07:06:11 | 2018-01-09T07:06:11 | 110,780,979 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,510 |
py
|
from rest_framework import generics, permissions
from rest_framework.exceptions import ParseError
from rest_framework.response import Response
from rest_framework.views import APIView
from topics.models import Topic
from topics.serializers import TopicSerializer
from . import search
class TopicSearchAPIView(generics.RetrieveAPIView):
queryset = Topic.objects.all()
serializer_class = TopicSerializer
authentication_classes = (
permissions.IsAuthenticated,
)
def retrieve(self, request, *args, **kwargs):
query_params = self.request.query_params
topic_name = query_params.get("name", None)
if not topic_name:
raise ParseError(detail={"error": "name 필드가 비어있습니다."})
queryset = Topic.objects.filter(name__contains=topic_name)
if not queryset:
return Response({"result": "결과가 없습니다."})
serializer = self.get_serializer(queryset, many=True)
result = {"result": serializer.data}
return Response(result)
class SearchAPIView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, format=None):
"""
Return a list of all users.
"""
query_params = self.request.query_params
query = query_params.get("query", None)
if not query:
raise ParseError({"error": "query 필드가 비어있습니다."})
result = search.search(query)
return Response(result)
|
[
"[email protected]"
] | |
9b9afbb047cf6727bb42595fed496738377aa805
|
64c6134c2873ded7e84b93f10162fb6f27f25139
|
/PPPDebug.py
|
30ce0f3868cb09886d2cbb64b184695648871941
|
[
"BSD-2-Clause"
] |
permissive
|
umd-lhcb/UT-Aux-mapping
|
1c22e1aec6eeefaa9d54f0cc48486a8162784c99
|
69f611f133ddcf1df18a9256c9ba1e9a577c1019
|
refs/heads/master
| 2022-01-19T11:54:26.101859 | 2022-01-09T04:31:49 | 2022-01-09T04:31:49 | 162,521,821 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,246 |
py
|
#!/usr/bin/env python
#
# Author: Yipeng Sun
# License: BSD 2-clause
# Last Change: Fri May 28, 2021 at 03:43 AM +0200
from pathlib import Path
from itertools import permutations
from collections.abc import Iterable
from pyUTM.io import WirelistNaiveReader, PcadNaiveReader
from UT_Aux_mapping.const import input_dir
from UT_Aux_mapping.helpers import ppp_netname_regulator
#####################
# Read all netlists #
#####################
netlists = {}
def read_net(path, name, ext='wirelist', reader=WirelistNaiveReader):
loc_reader = reader(path / Path(name+'.'+ext))
return loc_reader.read()
ppp_vars = ['c_true_ppp_mag', 'c_mirror_ppp_mag']
netlists.update({k: read_net(input_dir, k) for k in ppp_vars})
p2b2_vars = ['true_p2b2', 'mirror_p2b2']
netlists.update({k: read_net(input_dir, k, 'net', PcadNaiveReader)
for k in p2b2_vars})
##########
# Checks #
##########
netnames = {}
def flatten(iterable, depth=0, max_depth=-1):
output = []
for item in iterable:
if isinstance(item, Iterable) and not isinstance(item, str):
if depth == max_depth:
output.append(item)
else:
output += flatten(item, depth+1, max_depth)
else:
output.append(item)
return output
def uniq_elems(l1, l2):
return [i for i in l1 if i not in l2]
def print_uniq(uniq_d):
for rule, result in uniq_d.items():
if result:
print('The following nets are {}:'.format(rule))
print('\n'.join(result))
print('')
# Check if there's nets that a unique to one variant
netnames.update({k: [ppp_netname_regulator(n) for n in netlists[k].keys()]
for k in ppp_vars})
uniq_ppp = {'in {} not {}'.format(k1, k2):
uniq_elems(netnames[k1], netnames[k2])
for k1, k2 in permutations(ppp_vars, 2)}
print_uniq(uniq_ppp)
# Check nets that are unique to P2B2
netnames.update({k: [n for n in netlists[k].keys()] for k in p2b2_vars})
uniq_p2b2 = {'in {} not {}'.format(k1, k2):
uniq_elems(netnames[k1], netnames[k2])
for k1, k2 in
flatten(map(permutations, zip(ppp_vars, p2b2_vars)), max_depth=1)}
print_uniq(uniq_p2b2)
|
[
"[email protected]"
] | |
d68fe861a80437aa7df982272ee1d513723f0492
|
69582e48fd7965df3f769c52e27caf0868a09e93
|
/lista1/roberta/questao4.py
|
23e35af4384697af77761db66b666f007e2d6d4d
|
[] |
no_license
|
yurimalheiros/IP-2019-2
|
b591cd48fd8f253dfd17f2f99d5a2327b17432c2
|
25b9e5802709a7491933d62d8300cbd7c3ef177f
|
refs/heads/master
| 2020-09-18T16:59:50.937764 | 2020-02-26T15:42:41 | 2020-02-26T15:42:41 | 224,156,588 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 508 |
py
|
# Função Definir alarme
# Autor Roberta de Lima
from datetime import datetime, timedelta
# Função Estática
print("ALARME")
dt = datetime(2019,11,3, 14)
hrAlarme = dt + timedelta(hours=51)
print("Sendo 14hrs, daqui a 51hrs o alarme tocará às ",hrAlarme.strftime("%H:%M "))
# Função dinâmica
#tempo = int(input("Digite o tempo para alarme(horas): "))
#hj = datetime.now()
#hrAlarme = hj + timedelta(hours=tempo)
#print("Hora do alarme: ", hrAlarme.strftime("%H:%M %d/%m/%Y"))
|
[
"[email protected]"
] | |
245e30a12a1d2ad46ca40d3018cb3e900a6d25a6
|
24f664aa2344d4f5d5e7b048ac4e85231715c4c8
|
/experimental/dsmith/glsl/generators.py
|
145d0ae0ab124b97f48c357842f95e93fd61bc24
|
[] |
no_license
|
speycode/clfuzz
|
79320655e879d1e0a06a481e8ec2e293c7c10db7
|
f2a96cf84a7971f70cb982c07b84207db407b3eb
|
refs/heads/master
| 2020-12-05T13:44:55.486419 | 2020-01-03T14:14:03 | 2020-01-03T14:15:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,783 |
py
|
#
# Copyright 2017, 2018 Chris Cummins <[email protected]>.
#
# This file is part of DeepSmith.
#
# DeepSmith is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# DeepSmith is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# DeepSmith. If not, see <http://www.gnu.org/licenses/>.
#
"""
GLSL program generators.
"""
import math
import random
import string
from time import time
from experimental.dsmith.glsl.db import *
from experimental.dsmith.langs import Generator
from labm8.py import fs
class GlslGenerator(Generator):
"""
Common baseclass for program generators.
"""
# Abstract methods (must be implemented):
def generate_one(self, session: session_t) -> ProgramProxy:
""" Generate a single program. """
raise NotImplementedError("abstract class")
# Default methods (may be overriden):
def __repr__(self):
return f"{Colors.BOLD}{Colors.GREEN}{self.__name__}{Colors.END}"
def num_programs(self, session: session_t = None) -> int:
""" return the number of generated programs in the database """
with ReuseSession(session) as s:
return (
s.query(func.count(Program.id))
.filter(Program.generator == self.id)
.scalar()
)
def sloc_total(self, session: session_t = None) -> int:
""" return the total linecount of generated programs """
with ReuseSession(session) as s:
return (
s.query(func.sum(Program.linecount))
.filter(Program.generator == self.id)
.scalar()
)
def generation_time(self, session: session_t = None) -> float:
""" return the total generation time of all programs """
with ReuseSession(session) as s:
return (
s.query(func.sum(Program.generation_time))
.filter(Program.generator == self.id)
.scalar()
or 0
)
def num_testcases(self, session: session_t = None) -> int:
""" return the total number of testcases """
with ReuseSession(session) as s:
return (
s.query(func.count(Testcase.id))
.join(Program)
.filter(Program.generator == self.id)
.scalar()
)
def generate(self, n: int = math.inf, up_to: int = math.inf) -> None:
""" generate 'n' new programs 'up_to' this many exist in db """
with Session() as s:
num_progs = self.num_programs(s)
# Determine the termination criteria:
if n == math.inf and up_to == math.inf:
max_value = math.inf
bar_max = progressbar.UnknownLength
elif n == math.inf:
max_value = up_to
bar_max = max_value
else:
max_value = num_progs + n
bar_max = max_value
# Exit early if possible:
if num_progs >= max_value:
print(
f"There are already {Colors.BOLD}{num_progs}{Colors.END} "
"programs in the database. Nothing to be done."
)
return
# Print a preamble message:
num_to_generate = max_value - num_progs
if num_to_generate < math.inf:
estimated_time = (
self.generation_time(s) / max(num_progs, 1)
) * num_to_generate
eta = humanize.Duration(estimated_time)
print(
f"{Colors.BOLD}{num_to_generate}{Colors.END} programs are "
"to be generated. Estimated generation time is "
+ f"{Colors.BOLD}{eta}{Colors.END}."
)
else:
print(f"Generating programs {Colors.BOLD}forever{Colors.END} ...")
bar = progressbar.ProgressBar(
initial_value=num_progs, max_value=bar_max, redirect_stdout=True
)
# The actual generation loop:
buf = []
while num_progs < max_value:
buf.append(self.generate_one(s))
# Update progress bar
num_progs += 1
bar.update(num_progs)
if len(buf) >= dsmith.DB_BUF_SIZE:
save_proxies_uniq_on(s, buf, "sha1")
num_progs = self.num_programs(s)
buf = []
save_proxies_uniq_on(s, buf, "sha1")
print(
f"All done! You now have {Colors.BOLD}{num_progs}{Colors.END} "
f"{self} programs in the database"
)
def import_from_dir(self, indir: Path) -> None:
""" import program sources from a directory """
with Session() as s:
start_num_progs = self.num_programs(s)
def _save(proxies):
# Create records from proxies:
programs = [proxy.to_record(s) for proxy in proxies]
app.Warning(getattr(type(programs[0]), "sha1"))
import sys
sys.exit(0)
# Filter duplicates in the set of new records:
programs = dict(
(program.sha1, program) for program in programs
).values()
# Fetch a list of dupe keys already in the database:
sha1s = [program.sha1 for program in programs]
dupes = set(
x[0] for x in s.query(Program.sha1).filter(Program.sha1.in_(sha1s))
)
# Filter the list of records to import, excluding dupes:
uniq = [program for program in programs if program.sha1 not in dupes]
# Import those suckas:
s.add_all(uniq)
s.commit()
nprog, nuniq = len(programs), len(uniq)
app.Log(1, f"imported {nuniq} of {nprog} unique programs")
num_progs = self.num_programs(s)
# Print a preamble message:
paths = fs.ls(indir, abspaths=True)
num_to_import = humanize.Commas(len(paths))
print(
f"{Colors.BOLD}{num_to_import}{Colors.END} files are " "to be imported."
)
bar = progressbar.ProgressBar(redirect_stdout=True)
# The actual import loop:
buf = []
for i, path in enumerate(bar(paths)):
buf.append(self.import_from_file(s, path))
if len(buf) >= dsmith.DB_BUF_SIZE:
save_proxies_uniq_on(s, buf, "sha1")
buf = []
save_proxies_uniq_on(s, buf, "sha1")
num_imported = humanize.Commas(self.num_programs(s) - start_num_progs)
num_progs = humanize.Commas(self.num_programs(s))
print(
f"All done! Imported {Colors.BOLD}{num_imported}{Colors.END} "
f"new {self} programs. You now have "
f"{Colors.BOLD}{num_progs}{Colors.END} {self} programs in the "
"database"
)
def import_from_file(
self, session: session_t, path: Path
) -> Union[None, ProgramProxy]:
""" Import a program from a file. """
# app.Log(2, f"importing '{path}'")
# Simply ignore non-ASCII chars:
src = "".join([i if ord(i) < 128 else "" for i in fs.Read(path).strip()])
return ProgramProxy(generator=self.id, generation_time=0, src=src)
class RandChar(GlslGenerator):
"""
This generator produces a uniformly random sequence of ASCII characters, of
a random length.
"""
__name__ = "randchar"
id = Generators.RANDCHAR
# Arbitrary range
charcount_range = (100, 100000)
def generate_one(self, session: session_t) -> ProgramProxy:
""" Generate a single program. """
start_time = time()
charcount = random.randint(*self.charcount_range)
src = "".join(random.choices(string.printable, k=charcount))
runtime = time() - start_time
return ProgramProxy(generator=self.id, generation_time=runtime, src=src)
class GitHub(GlslGenerator):
"""
Programs mined from GitHub.
"""
__name__ = "github"
id = Generators.GITHUB
class DSmith(GlslGenerator):
__name__ = "dsmith"
id = Generators.DSMITH
|
[
"[email protected]"
] | |
c7ba61c0a3b5a899b8ee49ba4ba2fc8900cae79b
|
b1f748d761751e89f62cf5b8a2b13adac5bf3a29
|
/setprice/apps.py
|
f40a45dfd11be0925a3a5178812e4788c49ae528
|
[] |
no_license
|
sangeeth-subramoniam/buildingmanagementheroku
|
7b77be693fa73dbd2dff9c816bf50daf1e501029
|
db26de549f7088d2ff80a303abeeaaa548d43e0b
|
refs/heads/master
| 2023-07-08T13:46:06.384694 | 2021-08-10T06:50:14 | 2021-08-10T06:50:14 | 392,492,925 | 0 | 0 | null | 2021-08-04T02:46:57 | 2021-08-04T00:14:10 |
Python
|
UTF-8
|
Python
| false | false | 148 |
py
|
from django.apps import AppConfig
class SetpriceConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'setprice'
|
[
"[email protected]"
] | |
97a357ea5ffdef6f835f86617addc0cc7f749d5c
|
87d5b21265c381104de8f45aa67842a4adc880eb
|
/257. Binary Tree Paths.py
|
51ff3604e23dc618adedd7bd3014b6538442da6f
|
[] |
no_license
|
MYMSSENDOG/leetcodes
|
ac047fe0d951e0946740cb75103fc94aae967166
|
8a52a417a903a0742034161471a084bc1e494d68
|
refs/heads/master
| 2020-09-23T16:55:08.579319 | 2020-09-03T19:44:26 | 2020-09-03T19:44:26 | 225,543,895 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 865 |
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from tree_node_lib import *
class Solution:
def binaryTreePaths(self, root: TreeNode) :
if not root:
return []
ret = []
def dfs(cur_node, path):
if path:
path = path + "->" + str(cur_node.val)
else:
path = str(cur_node.val)
if not cur_node.left and not cur_node.right:
ret.append(path)
return
if cur_node.left:
dfs(cur_node.left, path)
if cur_node.right:
dfs(cur_node.right, path)
dfs(root, "")
return ret
root = makeTree([1,2,3,None,5])
sol = Solution()
print(sol.binaryTreePaths(root))
|
[
"[email protected]"
] | |
3828a0b1c4613505ab9b4cda45351013c7a86543
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03608/s343365481.py
|
d46c782a99ce70c039b4156e5d7b06c88335bcd8
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 644 |
py
|
import itertools
n, m, r = map(int, input().split())
d = [[float('inf')]*n for _ in range(n)]
rs = list(map(int, input().split()))
for _ in range(m):
a, b, t = map(int, input().split())
a -= 1
b -= 1
d[a][b] = t
d[b][a] = t
for i in range(n):
d[i][i] = 0
def warshall_floyd(d):
#d[i][j]: iからjへの最短距離
for k in range(n):
for i in range(n):
for j in range(n):
d[i][j] = min(d[i][j],d[i][k] + d[k][j])
return d
d = warshall_floyd(d)
ans = float('inf')
for p in itertools.permutations(rs):
tmp = 0
for i in range(len(p)-1):
tmp += d[p[i]-1][p[i+1]-1]
ans = min(ans, tmp)
print(ans)
|
[
"[email protected]"
] | |
998f5b3d89a07a14d7dc41bd878db07e4902b536
|
6022fcef8175c71e3f9d702fc7098eee2a3eb9ac
|
/game/steam/review.py
|
adc766e91db223836ccb85b430ef0afc1fcb34f9
|
[] |
no_license
|
yxw19870806/Py3Crawler
|
f8fe8d68138dcfe3c63cc086d8b0042c814eab20
|
e79889d0dbc13df90bca29e616ca5024ad2cdf18
|
refs/heads/master
| 2023-08-22T19:19:43.458412 | 2023-05-17T06:15:31 | 2023-05-17T06:15:31 | 139,689,380 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,030 |
py
|
# -*- coding:UTF-8 -*-
"""
获取steam可以发布评测的游戏
https://store.steampowered.com/
@author: hikaru
email: [email protected]
如有问题或建议请联系
"""
import os
from common import *
from game.steam.lib import steam
# 打印列表
# print_type 0 全部游戏
# print_type 1 只要本体
# print_type 2 只要DLC
# print_type 3 只要本体已评测的DLC
def print_list(apps_cache_data, game_dlc_list, print_type=0):
for game_id in apps_cache_data["can_review_lists"]:
# 是DLC
if game_id in game_dlc_list:
if print_type == 1:
continue
# 本体没有评测过
if game_dlc_list[game_id] in apps_cache_data["can_review_lists"]:
if print_type == 3:
continue
else:
if print_type == 2 or print_type == 3:
continue
console.log("https://store.steampowered.com/app/%s" % game_id)
def main(check_game=True):
# 获取登录状态
steam_class = steam.Steam(need_login=True)
# 历史记录
apps_cache_data = steam_class.load_cache_apps_info()
# 已检测过的游戏列表
checked_apps_file_path = os.path.join(steam_class.cache_data_path, "review_checked.txt")
checked_apps_string = file.read_file(checked_apps_file_path)
if checked_apps_string:
checked_apps_list = checked_apps_string.split(",")
else:
checked_apps_list = []
# 已删除的游戏
deleted_app_list = steam_class.load_deleted_app_list()
# 已资料受限制的游戏
restricted_app_list = steam_class.load_restricted_app_list()
# 游戏的DLC列表
game_dlc_list = steam_class.load_game_dlc_list()
# 获取自己的全部玩过的游戏列表
try:
played_game_list = steam.get_account_owned_app_list(steam_class.account_id, True)
except crawler.CrawlerException as e:
console.log(e.http_error("个人游戏主页"))
raise
if check_game:
while len(played_game_list) > 0:
game_id = played_game_list.pop()
if game_id in deleted_app_list:
continue
if game_id in checked_apps_list:
continue
console.log("开始解析游戏 %s,剩余数量:%s" % (game_id, len(played_game_list)))
# 获取游戏信息
try:
game_data = steam.get_game_store_index(game_id)
except crawler.CrawlerException as e:
console.log("游戏 %s 解析失败,原因:%s" % (game_id, e.message))
console.log(e.http_error("游戏%s" % game_id))
continue
is_change = False
# 已删除
if game_data["deleted"]:
deleted_app_list.append(game_id)
# 保存数据
steam_class.save_deleted_app_list(deleted_app_list)
else:
# 有DLC的话,遍历每个DLC
for dlc_id in game_data["dlc_list"]:
# 已经评测过了,跳过检查
if dlc_id in apps_cache_data["review_list"]:
continue
# DLC和游戏本体关系字典
if dlc_id not in game_dlc_list:
game_dlc_list[dlc_id] = game_id
is_change = True
# 获取DLC信息
try:
dlc_data = steam.get_game_store_index(dlc_id)
except crawler.CrawlerException as e:
console.log(e.http_error("游戏%s" % dlc_id))
continue
if dlc_data["owned"]:
# 已经评测过了
if dlc_data["reviewed"]:
# 从待评测列表中删除
if dlc_id in apps_cache_data["can_review_lists"]:
apps_cache_data["can_review_lists"].remove(dlc_id)
# 增加已评测记录
if dlc_id not in apps_cache_data["review_list"]:
apps_cache_data["review_list"].append(dlc_id)
# 新的可以评测游戏
else:
if dlc_id not in apps_cache_data["can_review_lists"]:
apps_cache_data["can_review_lists"].append(dlc_id)
# 已经评测过了
if game_data["reviewed"]:
# 从待评测列表中删除
if game_id in apps_cache_data["can_review_lists"]:
apps_cache_data["can_review_lists"].remove(game_id)
# 增加已评测记录
if game_id not in apps_cache_data["review_list"]:
apps_cache_data["review_list"].append(game_id)
# 新的可以评测游戏
else:
if game_id not in apps_cache_data["can_review_lists"]:
apps_cache_data["can_review_lists"].append(game_id)
if is_change:
steam_class.save_game_dlc_list(game_dlc_list)
# 已资料受限制
if game_data["restricted"]:
if game_id not in restricted_app_list:
restricted_app_list.append(game_id)
# 保存数据
steam_class.save_restricted_app_list(restricted_app_list)
# 增加检测标记
steam_class.save_cache_apps_info(apps_cache_data)
# 保存数据
checked_apps_list.append(game_id)
file.write_file(",".join(checked_apps_list), checked_apps_file_path, const.WriteFileMode.REPLACE)
# 输出
print_list(apps_cache_data, game_dlc_list)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
[
"[email protected]"
] | |
12d4022463c4e14e4a8d07430052771096ea3c82
|
48894ae68f0234e263d325470178d67ab313c73e
|
/sa/profiles/Supertel/K2X/get_arp.py
|
000b19c788f9e60b4779efc75da73afe20e2447b
|
[
"BSD-3-Clause"
] |
permissive
|
DreamerDDL/noc
|
7f949f55bb2c02c15ac2cc46bc62d957aee43a86
|
2ab0ab7718bb7116da2c3953efd466757e11d9ce
|
refs/heads/master
| 2021-05-10T18:22:53.678588 | 2015-06-29T12:28:20 | 2015-06-29T12:28:20 | 118,628,133 | 0 | 0 | null | 2018-01-23T15:19:51 | 2018-01-23T15:19:51 | null |
UTF-8
|
Python
| false | false | 2,424 |
py
|
# -*- coding: utf-8 -*-
##----------------------------------------------------------------------
## Supertel.K2X.get_arp
##----------------------------------------------------------------------
## Copyright (C) 2007-2014 The NOC Project
## See LICENSE for details
##----------------------------------------------------------------------
## Python modules
import re
## NOC modules
from noc.sa.script import Script as NOCScript
from noc.sa.interfaces import IGetARP
class Script(NOCScript):
name = "Supertel.K2X.get_arp"
implements = [IGetARP]
cache = True
rx_line = re.compile(
r"^(VLAN\s+\d+|)\s+(?P<interface>\S+)\s+(?P<ip>\S+)\s+"
r"(?P<mac>\S+)\s+(Dynamic|Static)\s*$",
re.MULTILINE | re.IGNORECASE)
def execute(self):
r = []
"""
# Try SNMP first
#
# Some time return vlan ID, not interface name!!!
#
if self.snmp and self.access_profile.snmp_ro:
try:
for v in self.snmp.get_tables(["1.3.6.1.2.1.4.22.1.1",
"1.3.6.1.2.1.4.22.1.2",
"1.3.6.1.2.1.4.22.1.3"],
bulk=True):
iface = self.snmp.get("1.3.6.1.2.1.31.1.1.1.1." + v[1],
cached=True)
mac = ":".join(["%02x" % ord(c) for c in v[2]])
ip = ["%02x" % ord(c) for c in v[3]]
ip = ".".join(str(int(c, 16)) for c in ip)
r.append({
"ip": ip,
"mac": mac,
"interface": iface,
})
return r
except self.snmp.TimeOutError:
pass
"""
# Fallback to CLI
for match in self.rx_line.finditer(self.cli("show arp", cached=True)):
mac = match.group("mac")
if mac.lower() == "incomplete":
r.append({
"ip": match.group("ip"),
"mac": None,
"interface": None
})
else:
r.append({
"ip": match.group("ip"),
"mac": match.group("mac"),
"interface": match.group("interface")
})
return r
|
[
"[email protected]"
] | |
e193c720a834640102192c91e1ebc0f7a0c50778
|
8fa191cd4a67431a04eff62d35122ee83cc7b0af
|
/bookwyrm/migrations/0100_shelf_description.py
|
18185b17def91702d69be55cd555a444186df05a
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
bookwyrm-social/bookwyrm
|
24678676a7a58dba96641194dfae3fffbf01574d
|
0f8da5b738047f3c34d60d93f59bdedd8f797224
|
refs/heads/main
| 2023-08-20T21:45:30.957277 | 2023-08-19T23:41:50 | 2023-08-19T23:41:50 | 236,415,735 | 1,398 | 216 |
NOASSERTION
| 2023-09-08T20:43:06 | 2020-01-27T03:51:54 |
Python
|
UTF-8
|
Python
| false | false | 416 |
py
|
# Generated by Django 3.2.5 on 2021-09-28 23:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bookwyrm", "0099_readthrough_is_active"),
]
operations = [
migrations.AddField(
model_name="shelf",
name="description",
field=models.TextField(blank=True, max_length=500, null=True),
),
]
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.