blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72b255f1a70060f3fae7db94812b435d5bb8b2d
|
818e5e78f84596a7c086b218fd4aa9e8ea912afe
|
/hackatons/materials/algo/source/T5_LinearStructure/P2_Queue/counter_game_deq.py
|
d78087eb4b0f2a733e40cb405b86b2885f5e47e4
|
[] |
no_license
|
davendiy/forpythonanywhere
|
44fbc63651309598b58391667f0fead40e8fad91
|
1b9292ca33b06b17cd516e4e9913479edb6d35cd
|
refs/heads/master
| 2020-08-10T04:24:02.665635 | 2019-10-25T07:05:46 | 2019-10-25T07:05:46 | 214,255,096 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,193 |
py
|
#Лічилка з використанням деку
from source.T5_LinearStructure.P2_Queue.counter_game import Player
from source.T5_LinearStructure.P2_Queue.Deque import Deque
def count_counter():
""" Функція розв'язує задачу "лічилка" """
d = Deque() # створити дек d
n = int(input('Кількість гравців: '))
m = int(input('Кількість слів: '))
for i in range(n):
pl = Player(i+1) # створити гравця з номером на 1 більше i
d.append(pl) # додати гравця у кінець деку
print('\nПослідовність номерів, що вибувають')
while not d.empty():
for i in range(m-1): # m-1 раз перекласти гравця з початку до кінця деку
d.append(d.popleft())
pl = d.popleft() # узяти m-го гравця з початку деку
print(pl) # та показати його номер
count_counter()
|
[
"[email protected]"
] | |
8333137c128e54828c5eee264b4aee1b358fa310
|
f0a1a85e8cae69144ce304d4c91b53b8f8cf5116
|
/mysite/blog/models.py
|
6bc8deb3e116bb18d028e1c31219e4e91c1c6bb9
|
[
"MIT"
] |
permissive
|
ohduran-attempts/by-example
|
0a96b59cf41e3c955e8e744b0604c909168fd998
|
a56385c169d426090970f3f481d15fec50a9c603
|
refs/heads/master
| 2020-04-22T16:41:53.512719 | 2019-02-15T07:32:51 | 2019-02-15T07:32:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,995 |
py
|
from common.models import TimeStamped
from django.conf import settings
from django.db import models
from django.urls import reverse_lazy
from django.utils import timezone
from taggit.managers import TaggableManager
class PublishedManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(status='published')
class Post(TimeStamped, models.Model):
STATUS_CHOICES_TPL = (
('draft', 'Draft'),
('published', 'Published'),
)
objects = models.Manager()
published = PublishedManager()
tags = TaggableManager()
title = models.CharField(max_length=250)
slug = models.SlugField(max_length=250,
unique_for_date='publish')
author = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='blog_posts')
body = models.TextField()
publish = models.DateTimeField(default=timezone.now)
status = models.CharField(max_length=10,
choices=STATUS_CHOICES_TPL,
default='draft')
class Meta:
ordering = ('-publish',)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse_lazy('blog:post_detail',
args=[self.publish.year,
self.publish.month,
self.publish.day,
self.slug])
class Comment(TimeStamped, models.Model):
post = models.ForeignKey(Post,
on_delete=models.CASCADE,
related_name='comments')
name = models.CharField(max_length=80)
email = models.EmailField()
body = models.TextField()
active = models.BooleanField(default=True)
class Meta:
ordering = ('created',)
def __str__(self):
return f"Comment by {self.name} on {self.post}"
|
[
"[email protected]"
] | |
bffe3775877350a0d53f049549cc6499bd1d2cee
|
36901e58fbdeabc7380ae2c0278010b2c51fe54d
|
/gatheros_subscription/urls/me.py
|
4823370a6d4c79d1b4002d326f190346c0136ed1
|
[] |
no_license
|
hugoseabra/congressy
|
e7c43408cea86ce56e3138d8ee9231d838228959
|
ac1e9b941f1fac8b7a13dee8a41982716095d3db
|
refs/heads/master
| 2023-07-07T04:44:26.424590 | 2021-08-11T15:47:02 | 2021-08-11T15:47:02 | 395,027,819 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 269 |
py
|
from django.conf.urls import include, url
from gatheros_subscription import views
urls = [
url(
r'^subscriptions/$',
views.MySubscriptionsListView.as_view(),
name='my-subscriptions'
),
]
urlpatterns_me = [url(r'^me/', include(urls))]
|
[
"[email protected]"
] | |
070fc92166fd5c5e64836d1cf9676f441f1cdd5c
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_6404600001200128_1/Python/ihadanny/r1_p1.py
|
f67fc6f333dd5df96ae47855e77a0df26307669e
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 412 |
py
|
from sys import stdin
import re
import operator
import bisect
import sys
import random
cases = int(stdin.next().strip())
for case in range(1, cases+1):
N = int(stdin.next().strip())
M = map(int, stdin.next().split())
drops = [max(i-j,0) for i, j in zip(M[:-1], M[1:])]
max_eaten = [min(max(drops), x) for x in M[:-1]]
print 'Case #%d: %d %d' % (case, sum(drops), sum(max_eaten))
|
[
"[email protected]"
] | |
81370fb27ca8ee771d8333b297381817241fd383
|
9193e2743434893c76e45b85a6a2ebcef71e8e2d
|
/ch03/ans27.py
|
7e4795a48c12edf941443c284fa07ea89d030dc3
|
[] |
no_license
|
kyodocn/nlp100v2020
|
d4f06a0eb089d7f056aa00817f79199fb4edfed2
|
99c66511352092a0f4c5028b1f440e09d6401331
|
refs/heads/master
| 2022-04-15T02:43:12.003780 | 2020-04-13T18:41:15 | 2020-04-13T18:41:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 741 |
py
|
import re
import pandas as pd
df = pd.read_json('ch03/jawiki-country.json.gz', lines=True)
ukText = df.query('title=="イギリス"')['text'].values
ls, fg = [], False
template = '基礎情報'
p1 = re.compile('\{\{' + template)
p2 = re.compile('\}\}')
p3 = re.compile('\|')
p4 = re.compile('<ref(\s|>).+?(</ref>|$)')
for l in ukText[0].split('\n'):
if fg:
ml = [p2.match(l), p3.match(l)]
if ml[0]:
break
if ml[1]:
ls.append(p4.sub('', l.strip()))
if p1.match(l):
fg = True
p = re.compile('\|(.+?)\s=\s(.+)')
ans = {m.group(1): m.group(2) for m in [p.match(c) for c in ls]}
r = re.compile('\[\[(.+\||)(.+?)\]\]')
ans = {k: r.sub(r'\2', v) for k, v in ans.items()}
print(ans)
|
[
"[email protected]"
] | |
a2111854ac54c26359b72bf65a3d4e34aa50b31e
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/EYojuPCtvSzF2chkZ_1.py
|
d247c0967894694c7c4e84c2701804484f99a9dd
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 576 |
py
|
"""
Create a function that returns the selected **filename** from a path. Include
the **extension** in your answer.
### Examples
get_filename("C:/Projects/pil_tests/ascii/edabit.txt") ➞ "edabit.txt"
get_filename("C:/Users/johnsmith/Music/Beethoven_5.mp3") ➞ "Beethoven_5.mp3"
get_filename("ffprobe.exe") ➞ "ffprobe.exe"
### Notes
* Tests will include both absolute and relative paths.
* For simplicity, all paths will include forward slashes.
"""
from pathlib import PurePath
def get_filename(path):
return PurePath(path).name
|
[
"[email protected]"
] | |
fde97c8249d30b9f96310f9a0f91c45db0dcdc11
|
4fe971fdd0fb1d87b2bfaa5fe4b249b121501836
|
/vignewton/managers/admin/images.py
|
a76a68be13c22e69ecf041c2f50c32321f7ec221
|
[
"Unlicense"
] |
permissive
|
umeboshi2/vignewton
|
709c3395b74951385d1d3f9a932e4e6a6c1e0350
|
bf55f90a25ae616e003ff0f71643dbe5084e924f
|
refs/heads/master
| 2021-01-20T13:47:26.052679 | 2013-10-25T18:36:29 | 2013-10-25T18:36:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,186 |
py
|
from cStringIO import StringIO
from datetime import datetime
import transaction
from PIL import Image
from vignewton.models.sitecontent import SiteImage
class ImageManager(object):
def __init__(self, session):
self.session = session
self.thumbnail_size = 128, 128
def images_query(self):
return self.session.query(SiteImage)
def make_thumbnail(self, content):
imgfile = StringIO(content)
img = Image.open(imgfile)
img.thumbnail(self.thumbnail_size, Image.ANTIALIAS)
outfile = StringIO()
img.save(outfile, 'JPEG')
outfile.seek(0)
thumbnail_content = outfile.read()
return thumbnail_content
def add_image(self, name, fileobj):
content = fileobj.read()
with transaction.manager:
image = SiteImage(name, content)
image.thumbnail = self.make_thumbnail(content)
self.session.add(image)
return self.session.merge(image)
def delete_image(self, id):
with transaction.manager:
image = self.session.query(SiteImage).get(id)
self.session.delete(image)
|
[
"[email protected]"
] | |
9f77e916c511b53114f58ea7fa8a56b79e0034a7
|
7a8bb4c1de15f987e3231590eae74c051bf33726
|
/SJVA_Scanner_KoreaTV_Download.py
|
6a40cfa985904b82d46ef3644e0cc39210ea8b19
|
[] |
no_license
|
sunyruru/SJVA-Scanners
|
cbe6efa56be4c74a96059a91b32b60ff2ba4f3b6
|
5028c8c4aa58d4514f77ab46f3155f288c64b6f5
|
refs/heads/master
| 2020-04-21T13:40:04.306951 | 2019-01-28T08:21:35 | 2019-01-28T08:21:35 | 169,606,889 | 2 | 0 | null | 2019-02-07T16:53:39 | 2019-02-07T16:53:39 | null |
UTF-8
|
Python
| false | false | 3,916 |
py
|
# -*- coding: UTF-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import re, os, os.path
import Media, VideoFiles, Stack, Utils
import time, json, traceback, io
episode_regexps = [
r'(?P<show>.*?)[\s\.]E?(?P<ep>\d{1,2})[\-\~]E?\d{1,2}', #합본 걸리게
r'(?P<show>.*?)[eE](?P<ep>[0-9]{1,4})'
]
date_regexps = [
r'(?P<show>.*?)[^0-9a-zA-Z](?P<year>[0-9]{2})(?P<month>[0-9]{2})(?P<day>[0-9]{2})[^0-9a-zA-Z]', # 6자리
]
try:
import logging
import logging.handlers
logger = logging.getLogger('sjva_scanner')
logger.setLevel(logging.ERROR)
formatter = logging.Formatter(u'[%(asctime)s|%(lineno)s]:%(message)s')
#file_max_bytes = 10 * 1024 * 1024
filename = os.path.join(os.path.dirname( os.path.abspath( __file__ ) ), '../../', 'Logs', 'sjva.scanner.korea.tv.download.log')
fileHandler = logging.FileHandler(filename, encoding='utf8')
#fileHandler = logging.handlers.RotatingFileHandler(filename=filename), maxBytes=file_max_bytes, backupCount=5, encoding='euc-kr')
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
except:
pass
def Scan(path, files, mediaList, subdirs, language=None, root=None):
VideoFiles.Scan(path, files, mediaList, subdirs, root)
paths = Utils.SplitPath(path)
shouldStack = True
logger.debug('=====================================================')
logger.debug('- path:%s' % path)
logger.debug('- files count:%s' % len(files))
logger.debug('- subdir count:%s' % len(subdirs))
for _ in subdirs:
logger.debug(' * %s' % _)
if len(paths) != 0:
logger.debug('- paths[0] : %s' % paths[0])
logger.debug('- files count : %s', len(files))
for i in files:
tempDone = False
try:
file = os.path.basename(i)
logger.debug(' * FILE : %s' % file)
#for idx, rx in enumerate(episode_regexps):
for rx in episode_regexps:
match = re.search(rx, file, re.IGNORECASE)
if match:
show = match.group('show').replace('.', '') if match.groupdict().has_key('show') else ''
season = 1
episode = int(match.group('ep'))
name, year = VideoFiles.CleanName(show)
name = re.sub(r'((.*?기획)|(미니시리즈)|(.*?드라마)|(.*?특집))', '', name).strip()
logger.debug(' - MATCH show:[%s] name:[%s] episode:[%s] year:[%s]', show, name, episode, year)
if len(name) > 0:
tv_show = Media.Episode(name, season, episode, '', year)
tv_show.display_offset = 0
tv_show.parts.append(i)
mediaList.append(tv_show)
logger.debug(' - APPEND by episode: %s' % tv_show)
tempDone = True
break
if tempDone == False:
for rx in date_regexps:
match = re.search(rx, file)
if match:
year = int(match.group('year')) + 2000
month = int(match.group('month'))
day = int(match.group('day'))
show = match.group('show')
tv_show = Media.Episode(show, year, None, None, None)
tv_show.released_at = '%d-%02d-%02d' % (year, month, day)
tv_show.parts.append(i)
mediaList.append(tv_show)
logger.debug(' - APPEND by date: %s' % tv_show)
tempDone = True
break
if tempDone == False:
logger.error(' NOT APPEND!!')
except Exception, e:
logger.error(e)
if shouldStack:
Stack.Scan(path, files, mediaList, subdirs)
|
[
"[email protected]"
] | |
9e0ed93c65839146d4639537314916ed89f2de42
|
cdd5c3238ba9feba53f95a04c247a846b15ecd09
|
/code/client/munkilib/updatecheck/unused_software.py
|
6c770cb491ffacf602e09ea131244321d63ffc2c
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
munki/munki
|
13d786513f8fd5dba6f533bfbea76d28c4836d8e
|
d3c9eb4ffccd280fe3e4bbce9544171cb6c2cc80
|
refs/heads/main
| 2023-08-27T23:19:04.095339 | 2023-08-01T23:44:10 | 2023-08-01T23:44:10 | 24,219,473 | 2,890 | 474 |
NOASSERTION
| 2023-08-22T15:15:44 | 2014-09-19T06:51:32 |
Python
|
UTF-8
|
Python
| false | false | 5,577 |
py
|
# encoding: utf-8
#
# Copyright 2017-2023 Greg Neagle.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
updatecheck.unused_software
Created by Greg Neagle on 2017-02-18.
Functions for removing unused optional install items
"""
from __future__ import absolute_import, print_function
# Apple frameworks via PyObjC
# PyLint cannot properly find names inside Cocoa libraries, so issues bogus
# No name 'Foo' in module 'Bar' warnings. Disable them.
# pylint: disable=E0611
from AppKit import NSWorkspace
# pylint: enable=E0611
# our libs
from .. import app_usage
from .. import display
def bundleid_is_running(app_bundleid):
'''Returns a boolean indicating if the application with the given
bundleid is currently running.'''
workspace = NSWorkspace.sharedWorkspace()
running_apps = workspace.runningApplications()
for app in running_apps:
if app.bundleIdentifier() == app_bundleid:
return True
return False
def bundleids_from_installs_list(pkginfo_pl):
'''Extracts a list of application bundle_ids from the installs list of a
pkginfo item'''
installs_list = pkginfo_pl.get('installs', [])
bundle_ids = [item.get('CFBundleIdentifier') for item in installs_list
if (item.get('CFBundleIdentifier') and
item.get('type') == 'application'
or (item.get('type') == 'bundle' and
item.get('path', '').endswith('.app')))]
return bundle_ids
def should_be_removed(item_pl):
"""Determines if an optional install item should be removed due to lack of
use.
Returns a boolean."""
name = item_pl['name']
removal_info = item_pl.get('unused_software_removal_info')
# do we have unused_software_removal_info?
if not removal_info:
return False
display.display_debug1(
'\tChecking to see if %s should be removed due to lack of use...', name)
try:
removal_days = int(removal_info.get('removal_days', 0))
if removal_days < 1:
raise ValueError
except ValueError:
display.display_warning('Invalid removal_days: %s for item %s'
% (removal_info.get('removal_days'), name))
return False
display.display_debug1(
'\t\tNumber of days until removal is %s', removal_days)
usage = app_usage.ApplicationUsageQuery()
usage_data_days = usage.days_of_data()
if usage_data_days is None or usage_data_days < removal_days:
# we don't have usage data old enough to judge
display.display_debug1(
'\t\tApplication usage data covers fewer than %s days.',
removal_days)
return False
# check to see if we have an install request within the removal_days
days_since_install_request = usage.days_since_last_install_event(
'install', name)
if (days_since_install_request is not None and
days_since_install_request != -1 and
days_since_install_request <= removal_days):
display.display_debug1('\t\t%s had an install request %s days ago.',
name, days_since_install_request)
return False
# get list of application bundle_ids to check
if 'bundle_ids' in removal_info:
bundle_ids = removal_info['bundle_ids']
else:
# get application bundle_ids from installs list
bundle_ids = bundleids_from_installs_list(item_pl)
if not bundle_ids:
display.display_debug1('\\tNo application bundle_ids to check.')
return False
# now check each bundleid to see if it's currently running or has been
# activated in the past removal_days days
display.display_debug1('\t\tChecking bundle_ids: %s', bundle_ids)
for bundle_id in bundle_ids:
if bundleid_is_running(bundle_id):
display.display_debug1(
'\t\tApplication %s is currently running.' % bundle_id)
return False
days_since_last_activation = usage.days_since_last_usage_event(
'activate', bundle_id)
if days_since_last_activation == -1:
display.display_debug1(
'\t\t%s has not been activated in more than %s days...',
bundle_id, usage.days_of_data())
elif days_since_last_activation <= removal_days:
display.display_debug1('\t\t%s was last activated %s days ago',
bundle_id, days_since_last_activation)
return False
else:
display.display_debug1('\t\t%s was last activated %s days ago',
bundle_id, days_since_last_activation)
# if we get this far we must not have found any apps used in the past
# removal_days days, so we should set up a removal
display.display_info('Will add %s to the removal list since it has been '
'unused for at least %s days...', name, removal_days)
return True
if __name__ == '__main__':
print('This is a library of support tools for the Munki Suite.')
|
[
"[email protected]"
] | |
7942255ce3e00ae3769a7cdbbb8edc73fc986e87
|
6b1cac18b81a4704c310fb30a30e2906c6137511
|
/onepanman_api/views/api/notice.py
|
26a0a1f3a0327c3e7ae9f34146fc170cb14d8ea3
|
[
"MIT"
] |
permissive
|
Capstone-onepanman/api-server
|
973c73a4472637e5863d65ae90ec53db83aeedf7
|
1a5174fbc441d2718f3963863590f634ba2014e1
|
refs/heads/master
| 2022-12-09T22:43:23.720837 | 2020-03-20T00:43:21 | 2020-03-20T00:43:21 | 234,227,137 | 0 | 0 |
MIT
| 2022-12-08T02:37:19 | 2020-01-16T03:29:36 |
Python
|
UTF-8
|
Python
| false | false | 247 |
py
|
from rest_framework import viewsets
from onepanman_api.models import Notice
from onepanman_api.serializers.notice import NoticeSerializer
class NoticeViewSet(viewsets.ModelViewSet):
queryset = Notice
serializer_class = NoticeSerializer
|
[
"[email protected]"
] | |
dd17276b517f0934344b4de656f26eca45e56c03
|
df9b342f71cee4306c52ee5e29d105f8712d7439
|
/BOJ/하노이탑/다른사람.py
|
b447ae5dea94644e425cae796590c5652794ad21
|
[] |
no_license
|
qkreltms/problem-solvings
|
a3fbd93d5664830761c70ef6a476e94ada399af0
|
cade3fc738c0b7b40ae4bf0385fdd552313ad5a1
|
refs/heads/master
| 2023-07-19T08:17:48.580833 | 2021-08-31T08:45:57 | 2021-08-31T08:45:57 | 136,621,853 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 187 |
py
|
def f(n, a, b, c):
if(n == 1):
print(a, c, sep = " ")
else:
f(n-1, a, c, b)
f(1, a, b, c)
f(n-1, b, a, c)
n = int(input())
print(2**n-1)
if(n <= 20):
f(n, 1, 2, 3)
|
[
"[email protected]"
] | |
fbe5bbf72cfc77e0e0a289bbf4f3e02ff45f6c7d
|
c421330a5e03df01aa4ec9dc1c60dd2b9c514423
|
/movieproject/movieapp/urls.py
|
29e716810f30b03d4a9e060a55a905cdf4dcd5f1
|
[] |
no_license
|
sayanth123/movieapp
|
16051774cbb1766c513a3e2b28c45b905c45c4d0
|
f4e50a7f1b7441390ab234c11a13e1d989ec3118
|
refs/heads/master
| 2023-05-06T05:41:09.735871 | 2021-05-26T12:46:47 | 2021-05-26T12:46:47 | 371,027,811 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 328 |
py
|
from . import views
from django.urls import path
app_name='movieapp'
urlpatterns = [
path('', views.index,name='index'),
path('movie/<int:movie_id>/', views.detail,name='detail'),
path('add/', views.add,name='add'),
path('update/<int:id>/', views.update,name='update'),
path('delete/<int:id>/', views.delete,name='delete'),
]
|
[
"[email protected]"
] | |
63b6f5beff30f469db12c028c0a1fefdad4c79f5
|
d507d0846902e0012a4b2a0aaaea1cbbdb21db46
|
/supervisely_lib/annotation/json_geometries_map.py
|
394b5674ece0eb53c38ebf1dfc6160b66988b185
|
[] |
no_license
|
wpilibsuite/supervisely
|
a569fdc0d5e5f2fb912f32beab8f3fedb277504e
|
19805ca9b2bd20e31d6d41a99dc37dc439bc257a
|
refs/heads/master
| 2022-09-09T02:32:54.883109 | 2020-06-01T20:55:49 | 2020-06-01T20:55:49 | 267,916,361 | 2 | 3 | null | 2020-06-03T13:59:56 | 2020-05-29T17:27:30 |
Python
|
UTF-8
|
Python
| false | false | 843 |
py
|
# coding: utf-8
from supervisely_lib.geometry.bitmap import Bitmap
from supervisely_lib.geometry.cuboid import Cuboid
from supervisely_lib.geometry.point import Point
from supervisely_lib.geometry.polygon import Polygon
from supervisely_lib.geometry.polyline import Polyline
from supervisely_lib.geometry.rectangle import Rectangle
from supervisely_lib.geometry.graph import GraphNodes
from supervisely_lib.geometry.any_geometry import AnyGeometry
from supervisely_lib.geometry.cuboid_3d import Cuboid3d
_INPUT_GEOMETRIES = [Bitmap, Cuboid, Point, Polygon, Polyline, Rectangle, GraphNodes, AnyGeometry, Cuboid3d]
_JSON_SHAPE_TO_GEOMETRY_TYPE = {geometry.geometry_name(): geometry for geometry in _INPUT_GEOMETRIES}
def GET_GEOMETRY_FROM_STR(figure_shape: str):
geometry = _JSON_SHAPE_TO_GEOMETRY_TYPE[figure_shape]
return geometry
|
[
"[email protected]"
] | |
996df35200d2adc6b93a637fd11c0fe8b8974d26
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/confidentialledger/azure-confidentialledger/azure/confidentialledger/receipt/_claims_models.py
|
9859688f266bb0aff4d28d6e620d07a0fd31064e
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 |
MIT
| 2023-09-14T21:48:49 | 2012-04-24T16:46:12 |
Python
|
UTF-8
|
Python
| false | false | 4,138 |
py
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""Models for application claims."""
from typing import Any, Dict, Optional, Union
from dataclasses import dataclass
@dataclass
class LedgerEntryClaim:
"""
LedgerEntryClaim represents an Application Claim derived from ledger entry data.
:keyword protocol: The protocol used to compute the claim.
:paramtype protocol: str
:keyword collectionId: The collection ID of the ledger entry.
:paramtype collectionId: str
:keyword contents: The contents of the ledger entry.
:paramtype contents: str
:keyword secretKey: The secret key used to compute the claim digest.
:paramtype secretKey: str
"""
protocol: str
collectionId: str
contents: str
secretKey: str
@classmethod
def from_dict(cls, ledger_entry_claim_dict: Dict[str, Any]):
"""Create a new instance of this class from a dictionary.
:param dict[str, any] ledger_entry_claim_dict: The dictionary representation of the ledger entry claim.
:return: A new instance of this class corresponding to the provided dictionary.
:rtype: LedgerEntryClaim
"""
return cls(**ledger_entry_claim_dict)
@dataclass
class ClaimDigest:
"""
ClaimDigest represents an Application Claim in digested form.
:keyword protocol: The protocol used to compute the claim.
:paramtype protocol: str
:keyword value: The digest of the claim.
:paramtype value: str
"""
protocol: str
value: str
@classmethod
def from_dict(cls, ledger_entry_claim_dict: Dict[str, Any]):
"""Create a new instance of this class from a dictionary.
:param dict[str, any] ledger_entry_claim_dict: The dictionary representation of the claim digest.
:return: A new instance of this class corresponding to the provided dictionary.
:rtype: ClaimDigest
"""
return cls(**ledger_entry_claim_dict)
@dataclass
class ApplicationClaim:
"""
ApplicationClaim represents a claim of a ledger application.
:keyword kind: The kind of the claim.
:paramtype kind: str
:keyword ledgerEntry: The ledger entry claim.
:paramtype ledgerEntry: Optional[Union[Dict[str, Any], LedgerEntryClaim]]
:keyword digest: The claim digest object.
:paramtype digest: Optional[Union[Dict[str, Any], ClaimDigest]]
"""
kind: str
ledgerEntry: Optional[LedgerEntryClaim] = None
digest: Optional[ClaimDigest] = None
def __init__(
self,
kind: str,
ledgerEntry: Optional[Union[Dict[str, Any], LedgerEntryClaim]] = None,
digest: Optional[Union[Dict[str, Any], ClaimDigest]] = None,
**kwargs: Any
):
"""
:keyword kind: The kind of the claim.
:paramtype kind: str
:keyword ledgerEntry: The ledger entry claim.
:paramtype ledgerEntry: Optional[Union[Dict[str, Any], LedgerEntryClaim]]
:keyword digest: The claim digest object.
:paramtype digest: Optional[Union[Dict[str, Any], ClaimDigest]]
"""
self.kind = kind
if ledgerEntry:
if isinstance(ledgerEntry, LedgerEntryClaim):
self.ledgerEntry = ledgerEntry
else:
self.ledgerEntry = LedgerEntryClaim.from_dict(ledgerEntry)
else:
self.ledgerEntry = None
if digest:
if isinstance(digest, ClaimDigest):
self.digest = digest
else:
self.digest = ClaimDigest.from_dict(digest)
else:
self.digest = None
self.kwargs = kwargs
@classmethod
def from_dict(cls, claim_dict: Dict[str, Any]):
"""Create a new instance of this class from a dictionary.
:param dict[str, any] claim_dict: The dictionary representation of the application claim.
:return: A new instance of this class corresponding to the provided dictionary.
:rtype: ApplicationClaim
"""
return cls(**claim_dict)
|
[
"[email protected]"
] | |
0185c4f4c626389ea2464ebda9f072d8a3b86e50
|
1fe8d4133981e53e88abf633046060b56fae883e
|
/venv/lib/python3.8/site-packages/tensorflow/python/keras/api/_v2/keras/applications/xception/__init__ 2.py
|
bf93ae01110de2a54ca5eaeaa25020b85ad82eab
|
[] |
no_license
|
Akira331/flask-cifar10
|
6c49db8485038731ce67d23f0972b9574746c7a7
|
283e7a2867c77d4b6aba7aea9013bf241d35d76c
|
refs/heads/master
| 2023-06-14T16:35:06.384755 | 2021-07-05T14:09:15 | 2021-07-05T14:09:15 | 382,864,970 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 128 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:bdac5cda377d4d39bccb19a4cdeafc9b8f2a51c4983a1c81f5f33a22b6729864
size 731
|
[
"[email protected]"
] | |
5528da26ff17297745c4e882767344421f6747fc
|
5da373c7f45b65894804002ef33fd53264d976f9
|
/ppim/models/__init__.py
|
375413f9f747aca74a305719606c6d34f8708fba
|
[
"Apache-2.0"
] |
permissive
|
chenhaohan88/Paddle-Image-Models
|
55bfafdbb43ef001faa4ea2e53570ab3248e4786
|
c80e3423ce57779b3426c3c024f3fc51cdb9d1b7
|
refs/heads/main
| 2023-04-10T22:52:45.251251 | 2021-04-04T02:20:15 | 2021-04-04T02:20:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 955 |
py
|
# Transformer
# from .tnt import tnt_s, TNT
from .vit import VisionTransformer
from .pit import pit_ti, pit_s, pit_xs, pit_b, pit_ti_distilled, pit_s_distilled, pit_xs_distilled, pit_b_distilled, PoolingTransformer, DistilledPoolingTransformer
from .deit import deit_ti, deit_s, deit_b, deit_b_384, deit_ti_distilled, deit_s_distilled, deit_b_distilled, deit_b_distilled_384, DistilledVisionTransformer
# CNN
# from .dla import dla_34, dla_46_c, dla_46x_c, dla_60, dla_60x, dla_60x_c, dla_102, dla_102x, dla_102x2, dla_169, DLA
from .rexnet import rexnet_1_0, rexnet_1_3, rexnet_1_5, rexnet_2_0, rexnet_3_0, ReXNet
from .repvgg import repvgg_a0, repvgg_a1, repvgg_a2, repvgg_b0, repvgg_b1, repvgg_b2, repvgg_b3, repvgg_b1g2, repvgg_b1g4, repvgg_b2g4, repvgg_b3g4, RepVGG
# from .hardnet import hardnet_68, hardnet_85, hardnet_39_ds, hardnet_68_ds, HarDNet
# Involution
from .rednet import rednet_26, rednet_38, rednet_50, rednet_101, rednet_152, RedNet
|
[
"[email protected]"
] | |
de9883ebf4e9b195992a3a40d7ed18ada729acc7
|
ab1c920583995f372748ff69d38a823edd9a06af
|
/hw/day9/day9_hw3.py
|
16f8486856923f4b36925b819f6988b3d58adbad
|
[] |
no_license
|
adyadyat/pyprojects
|
5e15f4e33892f9581b8ebe518b82806f0cd019dc
|
c8f79c4249c22eb9e3e19998d5b504153faae31f
|
refs/heads/master
| 2022-11-12T16:59:17.482303 | 2020-07-04T09:08:18 | 2020-07-04T09:08:18 | 265,461,663 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 513 |
py
|
for i in range(1,9):
for j in range(8,i,-1):
print(' ',end='')
for k in range(1,i+1):
print(i,end='')
for x in range(2,i+1):
print(i,end='')
print()
for i in range(7,0,-1):
for j in range(i,8):
print(' ',end='')
for k in range(i,0,-1):
print(i,end='')
for x in range(i,1,-1):
print(i,end='')
print()
'''
1
222
33333
4444444
555555555
66666666666
7777777777777
888888888888888
7777777777777
66666666666
555555555
4444444
33333
222
1
'''
|
[
"[email protected]"
] | |
5b809ff208831e26008b58b30ecc4453fe7f150d
|
fcc665fc2792820e438d32339cc12ae796c1835c
|
/opps/core/models/profile.py
|
d012047a545730de5be9f658dfa00941a86911e5
|
[
"MIT"
] |
permissive
|
marcelomilo/opps
|
e3614e644d97ebc6b62e0083aee9a42c242f567c
|
bf92a003b6ad1f521d662d767a29f58a6033cb3d
|
refs/heads/master
| 2021-01-16T18:50:12.146646 | 2013-03-02T05:15:51 | 2013-03-02T05:15:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 435 |
py
|
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
class Profile(models.Model):
user = models.ForeignKey(User, related_name='user')
twitter = models.CharField(_(u"Twitter"), max_length=75, blank=True,
null=True)
class Meta:
app_label = 'core'
def __unicode__(self):
return self.user
|
[
"[email protected]"
] | |
ef2d18211c323bd7603ec0938ce87dce09755d62
|
b4c2bbf32748f381f8918c2c20d2a86b5453dc87
|
/plugins/convert/mask/box_blend.py
|
f42177463e6d8e748353a1bd9354d1eaf432d0ff
|
[
"MIT"
] |
permissive
|
oveis/DeepVideoFaceSwap
|
d45c7a18204f851a5c8b9cb6c9618284d4314b59
|
e507f94d4f5d74c36e41c386c6fb14bb745a4885
|
refs/heads/dev-gan-model
| 2022-07-14T10:06:08.131201 | 2019-07-09T00:48:16 | 2019-07-09T00:48:16 | 184,978,011 | 6 | 5 |
MIT
| 2022-06-21T22:00:38 | 2019-05-05T04:09:53 |
Python
|
UTF-8
|
Python
| false | false | 1,990 |
py
|
#!/usr/bin/env python3
""" Adjustments for the swap box for faceswap.py converter """
import numpy as np
from ._base import Adjustment, BlurMask, logger
class Mask(Adjustment):
""" Manipulations that occur on the swap box
Actions performed here occur prior to warping the face back to the background frame
For actions that occur identically for each frame (e.g. blend_box), constants can
be placed into self.func_constants to be compiled at launch, then referenced for
each face. """
def __init__(self, mask_type, output_size, predicted_available=False, config=None):
super().__init__(mask_type, output_size, predicted_available, config)
self.mask = self.get_mask() if not self.skip else None
def get_mask(self):
""" The box for every face will be identical, so set the mask just once
As gaussian blur technically blurs both sides of the mask, reduce the mask ratio by
half to give a more expected box """
logger.debug("Building box mask")
mask_ratio = self.config["distance"] / 200
facesize = self.dummy.shape[0]
erode = slice(round(facesize * mask_ratio), -round(facesize * mask_ratio))
mask = self.dummy[:, :, -1]
mask[erode, erode] = 1.0
mask = BlurMask(self.config["type"],
mask,
self.config["radius"],
self.config["passes"]).blurred
logger.debug("Built box mask. Shape: %s", mask.shape)
return mask
def process(self, new_face):
""" The blend box function. Adds the created mask to the alpha channel """
if self.skip:
logger.trace("Skipping blend box")
return new_face
logger.trace("Blending box")
mask = np.expand_dims(self.mask, axis=-1)
new_face = np.clip(np.concatenate((new_face, mask), axis=-1), 0.0, 1.0)
logger.trace("Blended box")
return new_face
|
[
"[email protected]"
] | |
1a6bfbbed305ea623e2da442fa25a000b9f34077
|
53568d7c9ca6d53f3f90fe45d33cf6357a732a88
|
/170521-lambda-expresions,list-comprehension,classes/ulamek.py
|
4b3d6cf7509a99713ff711da7a639b031f54f698
|
[] |
no_license
|
majsylw/Python-3.x-examples
|
eb7ce7df9c582f7b56fa6d40db5f96479858f867
|
59b56ca98a0ea27ce48fb47a173333bf0a9d1349
|
refs/heads/main
| 2023-06-08T07:24:53.052672 | 2021-06-29T12:46:15 | 2021-06-29T12:46:15 | 348,288,268 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,596 |
py
|
"""
Przykładowa definicja klasy ułamek -- wykorzystanie metod specjalnych
"""
import math
class Ulamek:
def __init__(self, licznik, mianownik):
assert(mianownik > 0)
self.licznik, self.mianownik = licznik, mianownik
self.skracanie()
# funkcja print
def __str__(self):
return f'{self.licznik}/{self.mianownik}'
def skracanie(self):
temp = math.gcd(self.licznik, self.mianownik)
self.licznik //= temp
self.mianownik //= temp
# przeciążamy operator ==
def __eq__(self, u2):
return self.licznik == u2.licznik and self.mianownik == u2.mianownik
# przeciążamy operator + uzywając napisanej wcześniej metody statycznej
def __add__(self, inny_ulamek):
return Ulamek.dodawanie(self, inny_ulamek)
# przeciążamy operator *
def __mul__(self, u2):
wynik = Ulamek(self.licznik*u2.licznik,
self.mianownik*u2.mianownik)
return wynik
# metoda statyczna
@staticmethod
def dodawanie(ulamek1, ulamek2):
wynik = Ulamek(ulamek1.licznik*ulamek2.mianownik + ulamek2.licznik*ulamek1.mianownik,
ulamek1.mianownik*ulamek2.mianownik)
wynik.skracanie()
return wynik
if __name__ == '__main__':
u1 = Ulamek(3, 4)
u2 = Ulamek(2, 6)
print(u1)
print(u1, '+', u2, '=', Ulamek.dodawanie(u1, u2)) # wykorzystanie metody statycznej
print(u1, '+', u2, '=', u1 + u2) # przeciażenie +
print(u1, '*', u2, '=', u1 * u2) # przeciażenie *
print(u1, '==', u2, '->', u1 == u2)
|
[
"[email protected]"
] | |
1edbe9210cdaf8b6747c0577918cd4156ca3452d
|
57ddfddd1e11db649536a8ed6e19bf5312d82d71
|
/AtCoder/ABC1/ABC184/D.py
|
b4c97e61d1c7838d65bbca5688a51931bd044ccf
|
[] |
no_license
|
pgDora56/ProgrammingContest
|
f9e7f4bb77714dc5088c2287e641c0aa760d0f04
|
fdf1ac5d1ad655c73208d98712110a3896b1683d
|
refs/heads/master
| 2023-08-11T12:10:40.750151 | 2021-09-23T11:13:27 | 2021-09-23T11:13:27 | 139,927,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 841 |
py
|
import sys
sys.setrecursionlimit(10**9)
memo = {}
def search(a,b,c,cnt):
tot = a+b+c
if a > b:
a, b = b, a
if b > c:
b, c = c, b
if a > b:
a, b = b, a
if a in memo:
if b in memo[a]:
if c in memo[a][b]:
return memo[a][b][c]
else:
memo[a][b] = {}
else:
memo[a] = {}
memo[a][b] = {}
chil = 0
if a==99:
chil += (cnt+1) * 99
elif a!=0:
chil += search(a+1,b,c,cnt+1) * a
if b==99:
chil += (cnt+1) * 99
elif b!=0:
chil += search(a,b+1,c,cnt+1) * b
if c==99:
chil += (cnt+1) * 99
elif c!=0:
chil += search(a,b,c+1,cnt+1) * c
res = chil / tot
memo[a][b][c] = res
return chil / tot
a, b, c = map(int, input().split())
print(search(a,b,c,0))
|
[
"[email protected]"
] | |
6cbdb1487c6d3378423262ea3ae076dec93232d6
|
7c6b801ff36aa0a82ceb30c98e90091209320c7c
|
/cloudant121234.py
|
36222d26b5123a8e34eafb378d33919373468894
|
[] |
no_license
|
SmartPracticeschool/llSPS-INT-2442-Smart-Waste-Management-System-For-Metropolitan-Cities
|
5872fc64c1290991bb36b8f7fdc03eceb0025a8f
|
c6673bf9171b66b08a0c5a5f6643799b0d7fc3e6
|
refs/heads/master
| 2022-10-20T07:07:52.180598 | 2020-06-09T14:23:00 | 2020-06-09T14:23:00 | 267,571,204 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,459 |
py
|
import time
import sys
import random
import ibmiotf.application
import ibmiotf.device
#Provide your IBM Watson Device Credentials
organization = "q2va6d" # repalce it with organization ID
deviceType = "rsip" #replace it with device type
deviceId = "108" #repalce with device id
authMethod = "token"
authToken = "9110705023"#repalce with token
def myCommandCallback(cmd):
print("Command received: %s" % cmd.data)
if cmd.data['command']=='cover':
print("the bin lid is closed")
elif cmd.data['command'] == 'uncover':
print("the bin lid is open")
try:
deviceOptions = {"org": organization, "type": deviceType, "id": deviceId, "auth-method": authMethod, "auth-token": authToken}
deviceCli = ibmiotf.device.Client(deviceOptions)
#..............................................
except Exception as e:
print("Caught exception connecting device: %s" % str(e))
sys.exit()
deviceCli.connect()
while True:
L = random.randint(0, 100);
F = random.randint(0, 100);
Q = random.randint(0, 100);
W = random.randint(0, 100);
E = random.randint(0, 100);
R = random.randint(0, 100);
T = random.randint(0, 100);
Y = random.randint(0, 100);
lat=17.3984
lon=78.5583
data = {'d':{ 'garbagelevel' : L, 'garbageweight': F,'lat': lat,'lon': lon,'a' : Q, 'b' : W, 'c' : E, 'd' : R,'e' : T, 'f' : Y, 'g' : Y}}
u=time.asctime(time.localtime(time.time()))
print(u)
#print data
def myOnPublishCallback():
print ("Published Your Garbage Level = %s %%" % L, "Garbage Weight = %s %%" % F, "to IBM Watson")
print ("Published Your Garbage Level of bin2 = %s %%" % Q, "Garbage Weight of bin2 = %s %%" % W, "to IBM Watson")
print ("Published Your Garbage Level of bin3 = %s %%" % E, "Garbage Weight of bin3 = %s %%" % R, "to IBM Watson")
print ("Published Your Garbage Level of bin4 = %s %%" % T, "Garbage Weight of bin4 = %s %%" % Y, "to IBM Watson")
success = deviceCli.publishEvent("event", "json", data, qos=0, on_publish=myOnPublishCallback)
if not success:
print("Not connected to IoTF")
time.sleep(5)
deviceCli.commandCallback = myCommandCallback
from cloudant.client import Cloudant
from cloudant.error import CloudantException
from cloudant.result import Result, ResultByKey
client = Cloudant("fa3c80de-84b9-4280-be10-e9ee55d6726b-bluemix", "cd3fd31f55919b590bdd100e21c3278805fab74817ca0ca86c68309a46585792",
url="https://fa3c80de-84b9-4280-be10-e9ee55d6726b-bluemix:cd3fd31f55919b590bdd100e21c3278805fab74817ca0ca86c68309a46585792@fa3c80de-84b9-4280-be10-e9ee55d6726b-bluemix.cloudantnosqldb.appdomain.cloud")
client.connect()
database_name = "dustmanagement"
my_database = client.create_database(database_name)
if my_database.exists():
print(f"'{database_name}' successfully created.")
json_document = {'d':{ 'Garbage Level' : L, 'Garbage Weight': F }}
json_document = {'d':{ 'Garbage Level' : Q, 'Garbage Weight': W }}
json_document = {'d':{ 'Garbage Level' : E, 'Garbage Weight': R }}
json_document = {'d':{ 'Garbage Level' : T, 'Garbage Weight': Y }}
new_document = my_database.create_document(json_document)
if new_document.exists():
print(f"Document '{new_document}' successfully created.")
''' if L>=100:
print("your garbage is full")
import requests
url = "https://www.fast2sms.com/dev/bulk"
querystring = {"authorization":"G3k8jc6SOWqei20PQZJV4otdarXImlCYAygM9RuUxKnb1BvDhEWbJPYeFM1tLASXNKQzj5xp0Gm3Uw6B","sender_id":"FSTSMS","message":"This is test message","language":"english","route":"p","numbers":"9999999999,8919275560,7777777777"}
headers = {
'cache-control': "no-cache"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text)'''
# Disconnect the device and application from the cloud
deviceCli.disconnect()
|
[
"[email protected]"
] | |
9a5b1d2e7d6dea3e986d99e0bb25fe5acc6bb443
|
63b0f544dc8ad899dd605d36e6048077c7a9ed6e
|
/tests/test_shrinking.py
|
1c5b0a732701a01bc5dd6b9c42af810e40883b84
|
[] |
no_license
|
DRMacIver/structureshrink
|
c2372d7e4686879cb035292573d32a60459f1024
|
625e01236d6a7d72295782277737595f81d77d2a
|
refs/heads/master
| 2020-05-22T02:47:24.446684 | 2016-06-16T12:16:39 | 2016-06-16T12:16:39 | 55,408,891 | 101 | 6 | null | 2016-04-18T20:24:31 | 2016-04-04T12:20:29 |
Python
|
UTF-8
|
Python
| false | false | 734 |
py
|
from structureshrink import shrink
from hypothesis import given, strategies as st
import hashlib
@given(st.binary(), st.random_module())
def test_partition_by_length(b, _):
shrunk = shrink(b, len)
assert len(shrunk) == len(b) + 1
@given(
st.lists(st.binary(min_size=1, max_size=4), min_size=1, max_size=5),
st.random_module()
)
def test_shrink_to_any_substring(ls, _):
shrunk = shrink(
b''.join(ls), lambda x: sum(l in x for l in ls)
)
assert len(shrunk) >= len(ls)
def test_partition_by_last_byte():
seed = b''.join(bytes([i, j]) for i in range(256) for j in range(256))
shrunk = shrink(
seed, lambda s: hashlib.sha1(s).digest()[-1] & 127
)
assert len(shrunk) == 128
|
[
"[email protected]"
] | |
b22e2138f9c4c2578dd2761ab351bdc609613b66
|
381b75fe68a4da258e2e60a97105b66ac47214e4
|
/qa/rpc-tests/getblocktemplate_proposals.py
|
bd844d49dd91db1fa1eb0f16535ccea2625de16b
|
[
"MIT"
] |
permissive
|
lipcoin/lipcoin
|
3a5997dfc9193ee7dee6f9fa0adc1cb5fb8c92a3
|
7afc0a02d63620e5a5601474cca131cb0cf3bbe4
|
refs/heads/master
| 2021-01-24T07:57:56.248620 | 2018-03-17T19:04:38 | 2018-03-17T19:04:38 | 112,155,869 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,726 |
py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The LipCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import LipCoinTestFramework
from test_framework.util import *
from binascii import a2b_hex, b2a_hex
from hashlib import sha256
from struct import pack
def b2x(b):
return b2a_hex(b).decode('ascii')
# NOTE: This does not work for signed numbers (set the high bit) or zero (use b'\0')
def encodeUNum(n):
s = bytearray(b'\1')
while n > 127:
s[0] += 1
s.append(n % 256)
n //= 256
s.append(n)
return bytes(s)
def varlenEncode(n):
if n < 0xfd:
return pack('<B', n)
if n <= 0xffff:
return b'\xfd' + pack('<H', n)
if n <= 0xffffffff:
return b'\xfe' + pack('<L', n)
return b'\xff' + pack('<Q', n)
def dblsha(b):
return sha256(sha256(b).digest()).digest()
def genmrklroot(leaflist):
cur = leaflist
while len(cur) > 1:
n = []
if len(cur) & 1:
cur.append(cur[-1])
for i in range(0, len(cur), 2):
n.append(dblsha(cur[i] + cur[i+1]))
cur = n
return cur[0]
def template_to_bytearray(tmpl, txlist):
blkver = pack('<L', tmpl['version'])
mrklroot = genmrklroot(list(dblsha(a) for a in txlist))
timestamp = pack('<L', tmpl['curtime'])
nonce = b'\0\0\0\0'
blk = blkver + a2b_hex(tmpl['previousblockhash'])[::-1] + mrklroot + timestamp + a2b_hex(tmpl['bits'])[::-1] + nonce
blk += varlenEncode(len(txlist))
for tx in txlist:
blk += tx
return bytearray(blk)
def template_to_hex(tmpl, txlist):
return b2x(template_to_bytearray(tmpl, txlist))
def assert_template(node, tmpl, txlist, expect):
rsp = node.getblocktemplate({'data':template_to_hex(tmpl, txlist),'mode':'proposal'})
if rsp != expect:
raise AssertionError('unexpected: %s' % (rsp,))
class GetBlockTemplateProposalTest(LipCoinTestFramework):
'''
Test block proposals with getblocktemplate.
'''
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
self.nodes = self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
def run_test(self):
node = self.nodes[0]
node.generate(1) # Mine a block to leave initial block download
tmpl = node.getblocktemplate()
if 'coinbasetxn' not in tmpl:
rawcoinbase = encodeUNum(tmpl['height'])
rawcoinbase += b'\x01-'
hexcoinbase = b2x(rawcoinbase)
hexoutval = b2x(pack('<Q', tmpl['coinbasevalue']))
tmpl['coinbasetxn'] = {'data': '01000000' + '01' + '0000000000000000000000000000000000000000000000000000000000000000ffffffff' + ('%02x' % (len(rawcoinbase),)) + hexcoinbase + 'fffffffe' + '01' + hexoutval + '00' + '00000000'}
txlist = list(bytearray(a2b_hex(a['data'])) for a in (tmpl['coinbasetxn'],) + tuple(tmpl['transactions']))
# Test 0: Capability advertised
assert('proposal' in tmpl['capabilities'])
# NOTE: This test currently FAILS (regtest mode doesn't enforce block height in coinbase)
## Test 1: Bad height in coinbase
#txlist[0][4+1+36+1+1] += 1
#assert_template(node, tmpl, txlist, 'FIXME')
#txlist[0][4+1+36+1+1] -= 1
# Test 2: Bad input hash for gen tx
txlist[0][4+1] += 1
assert_template(node, tmpl, txlist, 'bad-cb-missing')
txlist[0][4+1] -= 1
# Test 3: Truncated final tx
lastbyte = txlist[-1].pop()
assert_raises(JSONRPCException, assert_template, node, tmpl, txlist, 'n/a')
txlist[-1].append(lastbyte)
# Test 4: Add an invalid tx to the end (duplicate of gen tx)
txlist.append(txlist[0])
assert_template(node, tmpl, txlist, 'bad-txns-duplicate')
txlist.pop()
# Test 5: Add an invalid tx to the end (non-duplicate)
txlist.append(bytearray(txlist[0]))
txlist[-1][4+1] = 0xff
assert_template(node, tmpl, txlist, 'bad-txns-inputs-missingorspent')
txlist.pop()
# Test 6: Future tx lock time
txlist[0][-4:] = b'\xff\xff\xff\xff'
assert_template(node, tmpl, txlist, 'bad-txns-nonfinal')
txlist[0][-4:] = b'\0\0\0\0'
# Test 7: Bad tx count
txlist.append(b'')
assert_raises(JSONRPCException, assert_template, node, tmpl, txlist, 'n/a')
txlist.pop()
# Test 8: Bad bits
realbits = tmpl['bits']
tmpl['bits'] = '1c0000ff' # impossible in the real world
assert_template(node, tmpl, txlist, 'bad-diffbits')
tmpl['bits'] = realbits
# Test 9: Bad merkle root
rawtmpl = template_to_bytearray(tmpl, txlist)
rawtmpl[4+32] = (rawtmpl[4+32] + 1) % 0x100
rsp = node.getblocktemplate({'data':b2x(rawtmpl),'mode':'proposal'})
if rsp != 'bad-txnmrklroot':
raise AssertionError('unexpected: %s' % (rsp,))
# Test 10: Bad timestamps
realtime = tmpl['curtime']
tmpl['curtime'] = 0x7fffffff
assert_template(node, tmpl, txlist, 'time-too-new')
tmpl['curtime'] = 0
assert_template(node, tmpl, txlist, 'time-too-old')
tmpl['curtime'] = realtime
# Test 11: Valid block
assert_template(node, tmpl, txlist, None)
# Test 12: Orphan block
tmpl['previousblockhash'] = 'ff00' * 16
assert_template(node, tmpl, txlist, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
GetBlockTemplateProposalTest().main()
|
[
"[email protected]"
] | |
3d3ed85bb76718a4e5973252aefc6b9b998ef6c6
|
0e478f3d8b6c323c093455428c9094c45de13bac
|
/src/OTLMOW/OTLModel/Datatypes/KlOmegaElementMateriaal.py
|
0e4f888f71af1341513eee503beab2556145d36f
|
[
"MIT"
] |
permissive
|
davidvlaminck/OTLMOW
|
c6eae90b2cab8a741271002cde454427ca8b75ba
|
48f8c357c475da1d2a1bc7820556843d4b37838d
|
refs/heads/main
| 2023-01-12T05:08:40.442734 | 2023-01-10T15:26:39 | 2023-01-10T15:26:39 | 432,681,113 | 3 | 1 |
MIT
| 2022-06-20T20:36:00 | 2021-11-28T10:28:24 |
Python
|
UTF-8
|
Python
| false | false | 2,285 |
py
|
# coding=utf-8
import random
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlOmegaElementMateriaal(KeuzelijstField):
"""De gebruikte materialen van het omega-element."""
naam = 'KlOmegaElementMateriaal'
label = 'Omega element materiaal'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlOmegaElementMateriaal'
definition = 'De gebruikte materialen van het omega-element.'
status = 'ingebruik'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlOmegaElementMateriaal'
options = {
'aluminium': KeuzelijstWaarde(invulwaarde='aluminium',
label='aluminium',
status='ingebruik',
definitie='Omega-element vervaarigd uit aluminium.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlOmegaElementMateriaal/aluminium'),
'roestvrij-staal': KeuzelijstWaarde(invulwaarde='roestvrij-staal',
label='roestvrij staal',
status='ingebruik',
definitie='Omega-element vervaarigd uit roestvrij staal.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlOmegaElementMateriaal/roestvrij-staal'),
'verzinkt-staal': KeuzelijstWaarde(invulwaarde='verzinkt-staal',
label='verzinkt staal',
status='ingebruik',
definitie='Omega-element vervaarigd uit verzinkt staal.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlOmegaElementMateriaal/verzinkt-staal')
}
@classmethod
def create_dummy_data(cls):
return random.choice(list(map(lambda x: x.invulwaarde,
filter(lambda option: option.status == 'ingebruik', cls.options.values()))))
|
[
"[email protected]"
] | |
bf7636f3f80aa31b41bfea8c5de09a9c2c78081e
|
be5e5aebd753ed1f376dc18ce411f0fac6d2f762
|
/natuurpunt_purchase/__openerp__.py
|
2c5775f49e8c2d135a4f1389ae4e637f0ac437cf
|
[] |
no_license
|
smart-solution/natuurpunt-purchase
|
7d9fcfdde769b6294d8dc705cecc99a177b4573c
|
0ac94cb68cee4ef464158720e04007ee12036179
|
refs/heads/master
| 2021-05-22T04:43:21.594422 | 2020-11-02T13:32:27 | 2020-11-02T13:32:27 | 39,186,322 | 0 | 2 | null | 2020-11-02T13:32:28 | 2015-07-16T08:42:31 |
Python
|
UTF-8
|
Python
| false | false | 1,548 |
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Smart Solution bvba
# Copyright (C) 2010-Today Smart Solution BVBA (<http://www.smartsolution.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "natuurpunt_purchase",
"version" : "1.0",
"author" : "Smart Solution ([email protected])",
"website" : "www.smartsolution.be",
"category" : "Generic Modules/Base",
"description": """
""",
"depends" : ["purchase_requisition"],
"data" : [
'natuurpunt_purchase_view.xml',
'natuurpunt_purchase_data.xml',
'natuurpunt_purchase_report.xml',
'security/natuurpunt_purchase_security.xml',
# 'security/ir.model.access.csv'
],
"active": False,
"installable": True
}
|
[
"[email protected]"
] | |
981bbfed69a5508f0cfab20fc831cfd657c03bfd
|
690c4fd238926624c1d3fa594aeb9d7140618b5b
|
/day04/mysite4/mysite4/settings.py
|
b6283d1c8dc99f4cc72597551584c5d90b1ccbf3
|
[] |
no_license
|
dalaAM/month_04
|
66c4630a169294f4e4dca26c77989ad5879da2ca
|
322532fedd095cd9307ee4f2633026debe56f551
|
refs/heads/master
| 2022-12-04T06:02:12.995054 | 2020-08-23T04:06:19 | 2020-08-23T04:06:19 | 286,018,771 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,203 |
py
|
"""
Django settings for mysite4 project.
Generated by 'django-admin startproject' using Django 2.2.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lc!7ik)7n=drgz5wna+v5$_oejjd&c9hr$i2y8ag#rz4!fj4co'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bookstore',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite4.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite4.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mysite4',
'USER': 'root',
'PASSWORD': '123456',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
387622b9565cfcaa2fe10c694aeb971fe457181e
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/MuonSpectrometer/MuonCnv/MuonByteStream/share/WriteMuonByteStream_jobOptions.py
|
a8c537456ede0b7ccc707e97e9cfe4a5455e6a66
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 221 |
py
|
theApp.Dlls += [ "MuonByteStream" ]
StreamBS = Algorithm( "StreamBS" )
StreamBS.ItemList +=["4187#*"]
StreamBS.ItemList +=["4190#*"]
StreamBS.ItemList +=["4186#*"]
StreamBS.ItemList +=["4183#*"]
StreamBS.ForceRead=True
|
[
"[email protected]"
] | |
235b0d7e97c24574ab59397ad07507f0a41dccd3
|
45d515a0e33794e7c46a3ad7e1cfdf3ac6c2ee83
|
/collector.py
|
75168f49016e4b9e35ec36b52b159adbb814a41a
|
[
"MIT"
] |
permissive
|
djcarter85/Fantasy-Premier-League
|
12b2aaef62c5bc4e0656b83572c2ff9087aa4238
|
46a8e72b80b34a1afe3d7a9c9b4f8ad0cba48b7e
|
refs/heads/master
| 2021-07-03T13:04:05.621833 | 2020-12-21T17:16:41 | 2020-12-21T17:16:41 | 201,034,915 | 1 | 0 |
NOASSERTION
| 2019-08-07T11:16:27 | 2019-08-07T11:16:26 | null |
UTF-8
|
Python
| false | false | 4,066 |
py
|
import os
import sys
import csv
def get_teams(directory):
teams = {}
fin = open(directory + "/teams.csv", 'rU')
reader = csv.DictReader(fin)
for row in reader:
teams[int(row['id'])] = row['name']
return teams
def get_fixtures(directory):
fixtures_home = {}
fixtures_away = {}
fin = open(directory + "/fixtures.csv", 'rU')
reader = csv.DictReader(fin)
for row in reader:
fixtures_home[int(row['id'])] = int(row['team_h'])
fixtures_away[int(row['id'])] = int(row['team_a'])
return fixtures_home, fixtures_away
def get_positions(directory):
positions = {}
names = {}
pos_dict = {'1': "GK", '2': "DEF", '3': "MID", '4': "FWD"}
fin = open(directory + "/players_raw.csv", 'rU',encoding="utf-8")
reader = csv.DictReader(fin)
for row in reader:
positions[int(row['id'])] = pos_dict[row['element_type']]
names[int(row['id'])] = row['first_name'] + ' ' + row['second_name']
return names, positions
def get_expected_points(gw, directory):
xPoints = {}
fin = open(os.path.join(directory, 'xP' + str(gw) + '.csv'), 'rU')
reader = csv.DictReader(fin)
for row in reader:
xPoints[int(row['id'])] = row['xP']
return xPoints
def merge_gw(gw, gw_directory):
merged_gw_filename = "merged_gw.csv"
gw_filename = "gw" + str(gw) + ".csv"
gw_path = os.path.join(gw_directory, gw_filename)
fin = open(gw_path, 'rU', encoding="utf-8")
reader = csv.DictReader(fin)
fieldnames = reader.fieldnames
fieldnames += ["GW"]
rows = []
for row in reader:
row["GW"] = gw
rows += [row]
out_path = os.path.join(gw_directory, merged_gw_filename)
fout = open(out_path,'a', encoding="utf-8")
writer = csv.DictWriter(fout, fieldnames=fieldnames, lineterminator='\n')
print(gw)
if gw == 1:
writer.writeheader()
for row in rows:
writer.writerow(row)
def collect_gw(gw, directory_name, output_dir):
rows = []
fieldnames = []
root_directory_name = "data/2020-21/"
fixtures_home, fixtures_away = get_fixtures(root_directory_name)
teams = get_teams(root_directory_name)
names, positions = get_positions(root_directory_name)
xPoints = get_expected_points(gw, output_dir)
for root, dirs, files in os.walk(u"./" + directory_name):
for fname in files:
if fname == 'gw.csv':
fpath = os.path.join(root, fname)
fin = open(fpath, 'rU')
reader = csv.DictReader(fin)
fieldnames = reader.fieldnames
for row in reader:
if int(row['round']) == gw:
id = int(os.path.basename(root).split('_')[-1])
name = names[id]
position = positions[id]
fixture = int(row['fixture'])
if row['was_home'] == True or row['was_home'] == "True":
row['team'] = teams[fixtures_home[fixture]]
else:
row['team'] = teams[fixtures_away[fixture]]
row['name'] = name
row['position'] = position
row['xP'] = xPoints[id]
rows += [row]
fieldnames = ['name', 'position', 'team', 'xP'] + fieldnames
outf = open(os.path.join(output_dir, "gw" + str(gw) + ".csv"), 'w', encoding="utf-8")
writer = csv.DictWriter(outf, fieldnames=fieldnames, lineterminator='\n')
writer.writeheader()
for row in rows:
writer.writerow(row)
def collect_all_gws(directory_name, output_dir):
for i in range(1,5):
collect_gw(i, directory_name, output_dir)
def merge_all_gws(num_gws, gw_directory):
for i in range(1, num_gws):
merge_gw(i, gw_directory)
def main():
#collect_all_gws(sys.argv[1], sys.argv[2])
merge_all_gws(int(sys.argv[1]), sys.argv[2])
#collect_gw(39, sys.argv[1], sys.argv[2])
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
bb010b096427cce84eb368767cc9d17ddb8f16db
|
a9fc496e0724866093dbb9cba70a8fdce12b67a9
|
/scripts/field/eunwol_house.py
|
131e7ecadea4a9957479632d96bd39eede25e3ea
|
[
"MIT"
] |
permissive
|
ryantpayton/Swordie
|
b2cd6b605f7f08f725f5e35d23ba3c22ef2ae7c0
|
ca6f42dd43f63b1d2e6bb5cdc8fc051c277f326e
|
refs/heads/master
| 2022-12-01T09:46:47.138072 | 2020-03-24T10:32:20 | 2020-03-24T10:32:20 | 253,997,319 | 2 | 0 |
MIT
| 2022-11-24T08:17:54 | 2020-04-08T05:50:22 |
Java
|
UTF-8
|
Python
| false | false | 878 |
py
|
# 410000001
if sm.hasQuest(38002):
sm.removeEscapeButton()
sm.flipDialoguePlayerAsSpeaker()
sm.sendNext("What happened? A house and a new name... But what happened to my friends? Are they alive? If I am, then maybe we failed to seal the Black Mage...")
sm.sendSay("No. They wouldn't give up that easily. They're probably hiding out somewhere, waiting to get back together. I need to look after myself for now, and get my strength back.")
sm.sendSay("Level 10... It's better than nothing, but it's not the best feeling. I'll hang around and get stronger. That's the only thing I can do now.")
sm.setQRValue(38002, "clear", False)
elif sm.hasQuest(38018):
sm.removeEscapeButton()
sm.flipDialoguePlayerAsSpeaker()
sm.sendNext("W-what is that thing? It looks so fuzzy. I don't think I should touch it...")
sm.setQRValue(38018, "clear", False)
|
[
"[email protected]"
] | |
cc95e675ce9006d3e9f7d28cffe4c7ef20978ece
|
e024cc2f51d2c9104a514f3f1a77c5cabbe7691a
|
/examplePandas.py
|
c717fed2795c5e5ce4b716fd2779e9a249e1c041
|
[] |
no_license
|
wilsonsk/Machine-Learning-for-Stock-Trading
|
1818f144df02e69ce3e29fe1eb576675d512324a
|
bf5a36942e0f39e6c6d1c521bb3532e7eb82b669
|
refs/heads/master
| 2021-06-08T18:47:35.787532 | 2016-09-23T05:13:22 | 2016-09-23T05:13:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 266 |
py
|
import pandas as pd
def test_run():
df = pd.read_csv("data/AAPL.csv");
print df #print entire dataframe
#print df.head() -- print first 5 rows
#print df.tail() -- print last 5 rows
#print df.tail(n) -- print last n rows
if __name__ == "__main__":
test_run()
|
[
"[email protected]"
] | |
6b2dc4c4ace54c42df53fad4d1201457c5f52c49
|
881041fab1b4d05f1c5371efed2f9276037eb609
|
/tasks/where-civilian-complaints-were-reported-2005-2009/depositor.py
|
cfc1f38a64c3ca6b8dd165f0179f14f18bf8bf97
|
[] |
no_license
|
ResidentMario/urban-physiology-nyc-catalog
|
b568f3b6ee1a887a50c4df23c488f50c92e30625
|
cefbc799f898f6cdf24d0a0ef6c9cd13c76fb05c
|
refs/heads/master
| 2021-01-02T22:43:09.073952 | 2017-08-06T18:27:22 | 2017-08-06T18:27:22 | 99,377,500 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 408 |
py
|
import requests
r = requests.get("https://data.cityofnewyork.us/api/views/wqr5-zmgj/rows.csv?accessType=DOWNLOAD")
with open("/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/where-civilian-complaints-were-reported-2005-2009/data.csv", "wb") as f:
f.write(r.content)
outputs = ["/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/where-civilian-complaints-were-reported-2005-2009/data.csv"]
|
[
"[email protected]"
] | |
216af594580d96800f9747a8650c7a4f5c81e89f
|
88ba19b3303c112a424720106a7f7fde615757b5
|
/03-data_manipulation_with_pandas/01-transforming_data/sorting_rows1.py
|
0939c1757697add7f2c7c4dbd665fad67ebd8b1c
|
[] |
no_license
|
mitchisrael88/Data_Camp
|
4100f5904c62055f619281a424a580b5b2b0cbc1
|
14356e221f614424a332bbc46459917bb6f99d8a
|
refs/heads/master
| 2022-10-22T18:35:39.163613 | 2020-06-16T23:37:41 | 2020-06-16T23:37:41 | 263,859,926 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,368 |
py
|
Python 3.7.4 (tags/v3.7.4:e09359112e, Jul 8 2019, 20:34:20) [MSC v.1916 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> # Sort homelessness by individual
homelessness_ind = homelessness.sort_values("individuals")
# Print the top few rows
print(homelessness_ind.head())
SyntaxError: multiple statements found while compiling a single statement
>>>
>>>
=============================== RESTART: Shell ===============================
>>> # Sort homelessness by descending family members
homelessness_fam = homelessness.sort_values("family_members", ascending=False)
# Print the top few rows
print(homelessness_fam.head())
SyntaxError: multiple statements found while compiling a single statement
>>>
=============================== RESTART: Shell ===============================
>>> # Sort homelessness by descending family members
homelessness_fam = homelessness.sort_values("family_members", ascending=False)
# Print the top few rows
print(homelessness_fam.head())
SyntaxError: multiple statements found while compiling a single statement
>>>
=============================== RESTART: Shell ===============================
>>> # Sort homelessness by individual
homelessness_ind = homelessness.sort_values("individuals")
# Print the top few rows
print(homelessness_ind.head())
|
[
"[email protected]"
] | |
170a9f6840626ccbdc39ec724bedd10138df1fc0
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/security/azure-mgmt-security/azure/mgmt/security/_configuration.py
|
9aa2b7aa11ce32d405db56ca4db44791e423a5c6
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 |
MIT
| 2020-06-16T16:38:15 | 2019-08-30T21:08:55 |
Python
|
UTF-8
|
Python
| false | false | 2,145 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrestazure import AzureConfiguration
from .version import VERSION
class SecurityCenterConfiguration(AzureConfiguration):
"""Configuration for SecurityCenter
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Azure subscription ID
:type subscription_id: str
:param asc_location: The location where ASC stores the data of the
subscription. can be retrieved from Get locations
:type asc_location: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, asc_location, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if asc_location is None:
raise ValueError("Parameter 'asc_location' must not be None.")
if not base_url:
base_url = 'https://management.azure.com'
super(SecurityCenterConfiguration, self).__init__(base_url)
# Starting Autorest.Python 4.0.64, make connection pool activated by default
self.keep_alive = True
self.add_user_agent('azure-mgmt-security/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
self.asc_location = asc_location
|
[
"[email protected]"
] | |
07ccca1ad2d1ac1eabc7ee6a124434a18a9abf44
|
5e5799e0ccce7a72d514fbc76dcb0a2108013f18
|
/DAQConst.py
|
97bc899ca96bfab6e6bceb5513c84de6b84fe56f
|
[] |
no_license
|
sourcery-ai-bot/dash
|
6d68937d225473d06a18ef64079a4b3717b5c12c
|
e1d1c3a601cd397d2508bfd4bb12bdb4e878cd9a
|
refs/heads/master
| 2023-03-07T17:15:39.174964 | 2011-03-01T17:11:21 | 2011-03-01T17:11:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 493 |
py
|
#!/usr/bin/env python
#
# DAQ Constant values
class DAQPort(object):
"DAQLive port"
DAQLIVE = 6659
"IceCube Live logging/monitoring port"
I3LIVE = 6666
"CnCServer XML-RPC port"
CNCSERVER = 8080
"CnCServer->DAQRun logging port"
CNC2RUNLOG = 8999
"DAQRun XML-RPC port"
DAQRUN = 9000
"DAQRun catchall logging port"
CATCHALL = 9001
"First port used by DAQRun for individual component logging"
RUNCOMP_BASE = 9002
|
[
"[email protected]"
] | |
6eb0d84530b500e74e8e9edde1228aadfe50f8ea
|
8966d83bf85d4738d644624bd7b7063e8534a515
|
/data.d/code/python/example/wxpython/frame_boxsizer_horizontal.py
|
cb90c4dd5a0b24d8d1c6b59f67b455c564814a00
|
[] |
no_license
|
taka16a23/.emacs.d
|
84a77c04c4d5e00c089cb01cc42a94b884f729ae
|
ac5794e2594037e316d5fe9cf6bf1fd20b44a726
|
refs/heads/master
| 2023-05-29T06:25:38.449977 | 2023-05-16T22:08:04 | 2023-05-16T22:08:04 | 82,106,233 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 676 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from time import sleep
import wx
def _main():
app = wx.App()
frame = wx.Frame(None, wx.ID_ANY, 'test Frameme', size=(400, 200))
panel = wx.Panel(frame, wx.ID_ANY)
panel.SetBackgroundColour('#AFAFAF')
button_1 = wx.Button(panel, wx.ID_ANY, 'botton1')
button_2 = wx.Button(panel, wx.ID_ANY, 'botton2')
button_3 = wx.Button(panel, wx.ID_ANY, 'botton3')
layout = wx.BoxSizer(wx.HORIZONTAL)
layout.Add(button_1)
layout.Add(button_2)
layout.Add(button_3)
panel.SetSizer(layout)
frame.Show()
app.MainLoop()
if __name__ == '__main__':
_main()
|
[
"root@qu"
] |
root@qu
|
d29954de8f62e3c9ec1497319acc72009ec90777
|
42d8bea28c7a34dde8d47f81e9463c5970af7120
|
/app/api/convert.py
|
6b5629f6f561c782c33307b24c72610c5501db73
|
[
"MIT"
] |
permissive
|
Tharitos/mse_teacher_plan
|
1e26818811db4991eadca3157b28b2c9ae691416
|
4c577f810eb040c4a74810c98e2c8c4b514caf5d
|
refs/heads/master
| 2020-04-07T05:57:52.084094 | 2018-11-18T19:04:02 | 2018-11-18T19:04:02 | 158,116,922 | 0 | 0 |
NOASSERTION
| 2018-11-18T19:01:48 | 2018-11-18T19:01:48 | null |
UTF-8
|
Python
| false | false | 2,032 |
py
|
from typing import Union, List, Type, Dict
import bson
import datetime
import mongoengine
from mongoengine.document import Document
ConvertedField = Dict[str, Union[str, int, List[str]]]
ConvertedDocument = List[ConvertedField]
def f(text: str, name: str, type: str, opts: List[str] = None,
value: str='', fixed: bool =False) -> ConvertedField:
if opts is None:
opts = []
return {
'text': text,
'name': name,
'type': type,
'opts': opts,
'value': value,
'fixed': fixed
}
def convert_HTML_to_mongo_types(obj) -> str:
if isinstance(obj, mongoengine.fields.IntField):
return 'number'
if isinstance(obj, mongoengine.fields.DateTimeField):
return 'date'
# if obj.isinstance(mongoengine.fields.StringField):
return 'text'
def convert_mongo_model(obj: Type[Document]) -> ConvertedDocument:
fields = obj._fields_ordered
res = []
for field in fields:
current_field = obj._fields[field]
try:
text = current_field.verbose_name
except AttributeError:
text = '%NO_VERBOSE_NAME%'
try:
fixed = current_field.changeable_by_admin
except AttributeError:
fixed = False
name = current_field.name
type = convert_HTML_to_mongo_types(current_field)
opts = None
if current_field.choices:
opts = current_field.choices
value = ''
res.append(f(text, name, type, opts, value, fixed))
return res
def convert_mongo_document(obj: Document) -> ConvertedDocument:
res = convert_mongo_model(obj)
fields = obj._fields_ordered
for i in range(len(fields)):
data = obj[fields[i]]
if isinstance(data, datetime.datetime):
data = data.date().isoformat()
if isinstance(data, bson.objectid.ObjectId):
data = str(data)
if isinstance(data, Document):
data = str(data.id)
res[i]['value'] = data
return res
|
[
"[email protected]"
] | |
c09c4c872e08f2b035c24a8533dc2d86407835e1
|
ee53b0262007b2f0db0fe15b2ad85f65fafa4e25
|
/Leetcode/1488. Avoid Flood in The City.py
|
8c3fd8f830fe17cfd954caa9f8977d15f440474a
|
[] |
no_license
|
xiaohuanlin/Algorithms
|
bd48caacb08295fc5756acdac609be78e143a760
|
157cbaeeff74130e5105e58a6b4cdf66403a8a6f
|
refs/heads/master
| 2023-08-09T05:18:06.221485 | 2023-08-08T11:53:15 | 2023-08-08T11:53:15 | 131,491,056 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,234 |
py
|
'''
Your country has an infinite number of lakes. Initially, all the lakes are empty, but when it rains over the nth lake, the nth lake becomes full of water. If it rains over a lake that is full of water, there will be a flood. Your goal is to avoid floods in any lake.
Given an integer array rains where:
rains[i] > 0 means there will be rains over the rains[i] lake.
rains[i] == 0 means there are no rains this day and you can choose one lake this day and dry it.
Return an array ans where:
ans.length == rains.length
ans[i] == -1 if rains[i] > 0.
ans[i] is the lake you choose to dry in the ith day if rains[i] == 0.
If there are multiple valid answers return any of them. If it is impossible to avoid flood return an empty array.
Notice that if you chose to dry a full lake, it becomes empty, but if you chose to dry an empty lake, nothing changes.
Example 1:
Input: rains = [1,2,3,4]
Output: [-1,-1,-1,-1]
Explanation: After the first day full lakes are [1]
After the second day full lakes are [1,2]
After the third day full lakes are [1,2,3]
After the fourth day full lakes are [1,2,3,4]
There's no day to dry any lake and there is no flood in any lake.
Example 2:
Input: rains = [1,2,0,0,2,1]
Output: [-1,-1,2,1,-1,-1]
Explanation: After the first day full lakes are [1]
After the second day full lakes are [1,2]
After the third day, we dry lake 2. Full lakes are [1]
After the fourth day, we dry lake 1. There is no full lakes.
After the fifth day, full lakes are [2].
After the sixth day, full lakes are [1,2].
It is easy that this scenario is flood-free. [-1,-1,1,2,-1,-1] is another acceptable scenario.
Example 3:
Input: rains = [1,2,0,1,2]
Output: []
Explanation: After the second day, full lakes are [1,2]. We have to dry one lake in the third day.
After that, it will rain over lakes [1,2]. It's easy to prove that no matter which lake you choose to dry in the 3rd day, the other one will flood.
Constraints:
1 <= rains.length <= 105
0 <= rains[i] <= 109
'''
import unittest
from typing import *
from bisect import bisect
class Solution:
def avoidFlood(self, rains: List[int]) -> List[int]:
maps = {}
zero_index = []
res = []
for i, num in enumerate(rains):
if num == 0:
zero_index.append(i)
res.append(1)
else:
if num in maps:
if not zero_index:
return []
k = bisect(zero_index, maps[num])
if k == len(zero_index):
return []
res[zero_index[k]] = num
del zero_index[k]
maps[num] = i
res.append(-1)
return res
class TestSolution(unittest.TestCase):
def test_case(self):
examples = (
(([1,2,0,0,2,1],),[-1,-1,2,1,-1,-1]),
)
for first, second in examples:
self.assert_function(first, second)
def assert_function(self, first, second):
self.assertEqual(Solution().avoidFlood(*first), second,
msg="first: {}; second: {}".format(first, second))
unittest.main()
|
[
"[email protected]"
] | |
d7c897b1fa38a472e0636bfb49694cb78a9a4151
|
5492859d43da5a8e292777c31eace71e0a57dedf
|
/chat/migrations/0021_auto_20190711_2100.py
|
648c9ff6915b6a1a5b5e87052c58dbab41893255
|
[
"MIT"
] |
permissive
|
akindele214/181hub_2
|
93ad21dc6d899b6c56fbe200354b1678bb843705
|
48b8814b5f66ad87f9a54721506076ddf70fe9bc
|
refs/heads/master
| 2022-12-13T01:15:07.925556 | 2020-05-19T09:39:57 | 2020-05-19T09:39:57 | 196,470,605 | 1 | 1 |
MIT
| 2022-12-08T01:22:55 | 2019-07-11T22:04:42 |
Python
|
UTF-8
|
Python
| false | false | 762 |
py
|
# Generated by Django 2.1.5 on 2019-07-11 20:00
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('chat', '0020_reportchat_content'),
]
operations = [
migrations.AlterField(
model_name='reportchat',
name='chat',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='report_chat', to='chat.Chat'),
),
migrations.AlterField(
model_name='reportchat',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL),
),
]
|
[
"[email protected]"
] | |
72c5c560be0c150db2650cd8ddc1d2d5d0b5b6df
|
f4d8faeebbf9b7fe43396c637096a56c01a70060
|
/blog/migrations/0006_auto_20201105_1114.py
|
5f8babf0fc5e5c91344168878c7a39cc28a2de29
|
[] |
no_license
|
eloghin/blog
|
eb44f6d57e88fefacb48111791b9c96fd4883be9
|
3c27a112bb3d51a5a25e901c10a632d4d6251a15
|
refs/heads/main
| 2023-01-07T05:47:59.124104 | 2020-11-05T13:12:48 | 2020-11-05T13:12:48 | 309,698,269 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 524 |
py
|
# Generated by Django 3.1.2 on 2020-11-05 11:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20201021_0920'),
]
operations = [
migrations.AlterModelOptions(
name='comment',
options={'ordering': ('date_created',)},
),
migrations.AddField(
model_name='comment',
name='email',
field=models.EmailField(max_length=254, null=True),
),
]
|
[
"[email protected]"
] | |
90687734a25d313028207d5b66add9b5d039eb1f
|
6ab217b675b0d33dec9d8985efc2de314e3a7a28
|
/menus/controllers/restapi/menu_category/urls.py
|
05579b2896e01b722317338f1b06535471c80647
|
[] |
no_license
|
nujkram/dream_cream_pastries
|
3547928af859ebbb93f8d6ff64d02796d8c61a0c
|
c6a764f4f2c16191661ee6747dc0daa896eae5ec
|
refs/heads/master
| 2023-06-20T20:20:21.001373 | 2021-07-29T00:55:49 | 2021-07-29T00:55:49 | 375,721,861 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,264 |
py
|
from django.urls import path
from dream_cream_pastries_project.urls import (
URL_READ_ONLY,
URL_DETAIL,
URL_CREATE,
URL_UPDATE,
URL_DELETE
)
from .api import(
ApiPublicMenuCategoryListDetail,
ApiPrivateMenuCategoryViewSet
)
VERSION = 'v1'
urlpatterns = [
# public
path(
f'{VERSION}/public/list',
ApiPublicMenuCategoryListDetail.as_view(URL_READ_ONLY),
name='api_public_menu_category_list_detail'
),
# private
path(
f'{VERSION}/private/list',
ApiPrivateMenuCategoryViewSet.as_view(URL_READ_ONLY),
name='api_private_menu_category_list_detail'
),
path(
f'{VERSION}/private/create',
ApiPrivateMenuCategoryViewSet.as_view(URL_CREATE),
name='api_private_menu_category_create'
),
path(
f'{VERSION}/private/<pk>/update',
ApiPrivateMenuCategoryViewSet.as_view(URL_UPDATE),
name='api_private_menu_category_update'
),
path(
f'{VERSION}/private/<pk>/delete',
ApiPrivateMenuCategoryViewSet.as_view(URL_DELETE),
name='api_private_menu_category_delete'
),
]
"""
Add to urls.py urlpatterns:
path('menu_category/api/', include('menus.controllers.restapi.menu_category.urls'))
|
[
"[email protected]"
] | |
0646e9fd57a5a8ba9198885afcbdf59f25a09de9
|
027635467005c93a5b5406030b6f8852368e6390
|
/question1_highest_average.py
|
a26682a8d9d01efbdae4eb073f9c60eec3e52feb
|
[] |
no_license
|
Shadyaobuya/Opibus-Assessment
|
0472a05e4c78b28cc5779d1a2a78c29cabb1ba04
|
8675e82a1c64d864eb4f85604d7843670a3f8078
|
refs/heads/master
| 2023-08-22T16:14:57.912494 | 2021-10-16T11:40:14 | 2021-10-16T11:40:14 | 417,516,375 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,073 |
py
|
#This is a program that takes in a list of discharge rates and returns the highest average
def find_highest_average(discharge):
new_list=[] #create an empty list that will hold the sum of two contiguous rates
for rate in range(len(discharge)-1):
high=discharge[rate]+discharge[rate+1]
new_list.append(high) #get the sum of every two contiguous rates and append it to the empty list
highest_sum=new_list[0] #make an assumption that the highest sum is at the first index of the empty list
for i in new_list:
if i >=highest_sum: #loop through the empty list and reasign the value of the highest sum
highest_sum=i
highest_average=highest_sum/2 #get the average of the highest sum
return highest_average
if __name__=='__main__':
print(find_highest_average([2, 3, 4, 1, 5])) #test case 1 output 3.5: [3,4]
print(find_highest_average([2, 3, 4, 8, 1, 5])) #test case 2 output:6.0 [4,8]
print(find_highest_average([6,1,7,3,9,6])) #test case 3 output:7.5: [9,6]
|
[
"[email protected]"
] | |
1a54da2add1bd9577ec9109d3620de423fa16e30
|
d31d744f62c09cb298022f42bcaf9de03ad9791c
|
/federated/tensorflow_federated/__init__.py
|
7153a5a59599a5d2457dc00818688f32f3380d26
|
[] |
no_license
|
yuhuofei/TensorFlow-1
|
b2085cb5c061aefe97e2e8f324b01d7d8e3f04a0
|
36eb6994d36674604973a06159e73187087f51c6
|
refs/heads/master
| 2023-02-22T13:57:28.886086 | 2021-01-26T14:18:18 | 2021-01-26T14:18:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,090 |
py
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The TensorFlow Federated library."""
import sys
from tensorflow_federated.version import __version__ # pylint: disable=g-bad-import-order
from tensorflow_federated.python import aggregators
from tensorflow_federated.python import learning
from tensorflow_federated.python import simulation
from tensorflow_federated.python.core import backends
from tensorflow_federated.python.core import framework
from tensorflow_federated.python.core import templates
from tensorflow_federated.python.core import test
from tensorflow_federated.python.core import utils
from tensorflow_federated.python.core.api.computation_base import Computation
from tensorflow_federated.python.core.api.computation_types import at_clients as type_at_clients
from tensorflow_federated.python.core.api.computation_types import at_server as type_at_server
from tensorflow_federated.python.core.api.computation_types import FederatedType
from tensorflow_federated.python.core.api.computation_types import FunctionType
from tensorflow_federated.python.core.api.computation_types import SequenceType
from tensorflow_federated.python.core.api.computation_types import StructType
from tensorflow_federated.python.core.api.computation_types import StructWithPythonType
from tensorflow_federated.python.core.api.computation_types import TensorType
from tensorflow_federated.python.core.api.computation_types import to_type
from tensorflow_federated.python.core.api.computation_types import Type
from tensorflow_federated.python.core.api.computations import check_returns_type
from tensorflow_federated.python.core.api.computations import federated_computation
from tensorflow_federated.python.core.api.computations import tf_computation
from tensorflow_federated.python.core.api.intrinsics import federated_aggregate
from tensorflow_federated.python.core.api.intrinsics import federated_apply
from tensorflow_federated.python.core.api.intrinsics import federated_broadcast
from tensorflow_federated.python.core.api.intrinsics import federated_collect
from tensorflow_federated.python.core.api.intrinsics import federated_eval
from tensorflow_federated.python.core.api.intrinsics import federated_map
from tensorflow_federated.python.core.api.intrinsics import federated_mean
from tensorflow_federated.python.core.api.intrinsics import federated_reduce
from tensorflow_federated.python.core.api.intrinsics import federated_secure_sum
from tensorflow_federated.python.core.api.intrinsics import federated_sum
from tensorflow_federated.python.core.api.intrinsics import federated_value
from tensorflow_federated.python.core.api.intrinsics import federated_zip
from tensorflow_federated.python.core.api.intrinsics import sequence_map
from tensorflow_federated.python.core.api.intrinsics import sequence_reduce
from tensorflow_federated.python.core.api.intrinsics import sequence_sum
from tensorflow_federated.python.core.api.placements import CLIENTS
from tensorflow_federated.python.core.api.placements import SERVER
from tensorflow_federated.python.core.api.typed_object import TypedObject
from tensorflow_federated.python.core.api.value_base import Value
from tensorflow_federated.python.core.api.values import to_value
if sys.version_info[0] < 3 or sys.version_info[1] < 6:
raise Exception('TFF only supports Python versions 3.6 or later.')
# Initialize a default execution context. This is implicitly executed the
# first time a module in the `core` package is imported.
backends.native.set_local_execution_context()
|
[
"[email protected]"
] | |
abada1167457df8faaf71d0f85057c37fcd5b748
|
929fc8dd47b91c963c8c2f81d88e3d995a9dfc7c
|
/src/subject/Tree.py
|
3ebe30494ae72e4da3574a67bea453247420b88b
|
[] |
no_license
|
1325052669/leetcode
|
fe7571a9201f4ef54089c2e078810dad11205b14
|
dca40686c6a280bd394feb8e6e78d40eecf854b9
|
refs/heads/master
| 2023-04-01T17:53:30.605822 | 2021-04-10T15:17:45 | 2021-04-10T15:17:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,284 |
py
|
class TreeNode:
def __init__(self,val):
self.val=val
self.left =None
self.right = None
class Tree:
def __init__(self):
pass
def pre_order_traverse(self,root):
if not root:return []
res = []
def dfs(node,res):
if not node:return
res.append(node.val)
dfs(node.left,res)
dfs(node.right,res)
dfs(root,res)
return res
def pre_order_iterative(self, root):
if not root:return []
stack = [root]
res =[]
while stack:
node = stack.pop()
res.append(node.val)
if node.right:
stack.append(node.right)
if node.left:
stack.append(node.left)
return res
def pre_order_divide_conquer(self,node):
if not node:return []
res=[node.val]
left = self.pre_order_divide_conquer(node.left)
right = self.pre_order_divide_conquer(node.right)
res.extend(left)
res.extend(right)
return res
def in_order_traverse(self,root):
if not root:return []
res = []
def dfs(node,res):
if not node:return
dfs(node.left,res)
res.append(node.val)
dfs(node.right,res)
dfs(root,res)
return res
def in_order_iterative(self,root):
if not root:return []
stack = []
res = []
cur = root
while stack or cur:
if cur:
stack.append(cur)
cur = cur.left
else:
node = stack.pop()
res.append(node.val)
cur = node.right
return res
def in_order_divide_conqur(self,root):
if not root:return []
res =[]
left = self.in_order_divide_conqur(root.left)
res += left
res.append(root.val)
right = self.in_order_divide_conqur(root.right)
res+=right
return res
def post_order_traverse(self,root):
if not root:return []
def dfs(node,res):
if not node:return
dfs(node.left,res)
dfs(node.right,res)
res.append(node.val)
res=[]
dfs(root,res)
return res
def post_order_divide_conqur(self,node):
if not node:return []
res = []
left = self.post_order_divide_conqur(node.left)
right = self.post_order_divide_conqur(node.right)
res+=left
res+=right
res.append(node.val)
return res
def main():
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.left = TreeNode(4)
root.left.right = TreeNode(5)
root.right.left = TreeNode(6)
root.right.right = TreeNode(7)
# print(Tree().pre_order_traverse(root))
# print(Tree().pre_order_iterative(root))
# print(Tree().pre_order_divide_conquer(root))
# print(Tree().in_order_traverse(root))
# print(Tree().in_order_iterative(root))
# print(Tree().in_order_divide_conqur(root))
print(Tree().post_order_traverse(root))
print(Tree().post_order_divide_conqur(root))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
eb96064b42e96778d4d8b0bdffaf9669ba512f73
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2178/60692/285974.py
|
d49a0e281bfcaae701b49245a014c2d5ce39431b
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 816 |
py
|
from collections import defaultdict
'''
n = int(input())
list1 = input().split(" ")
count = 0
s1 = ''
res = []
dic1 = defaultdict(int)
for i in range(n):
count += i + 1
if i == 0:
s1 = list1[i]
else:
s1 += list1[i]
if list1[i] == list1[i - 1]:
dic1[list1[i]] += 1
if dic1[list1[i]] > 1:
count += (dic1[list1[i]] - 1) * dic1[list1[i]] // 2
count -= dic1[list1[i]] * (dic1[list1[i]] + 1) // 2
elif s1[0:i].count(list1[i]) and s1.index(list1[i]) != i - 1:
count -= 1
j = i - 1
t = s1[j:]
while s1[0:j].count(t):
count -= 1
j -= 1
t = s1[j:]
res.append(count)
for j in res:
print(j)
'''
n = int(input)
print(input())
|
[
"[email protected]"
] | |
5a68d169b1831d85bb68d490f987e3d2d2cbac5a
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startPyquil979.py
|
e80fc8be8c4791851baaa5a6a9e04a24ad913cfd
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,043 |
py
|
# qubit number=5
# total number=43
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=3
prog += H(1) # number=4
prog += H(2) # number=5
prog += H(3) # number=6
prog += RX(-0.1602212253330796,1) # number=36
prog += H(4) # number=21
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=7
prog += H(3) # number=8
prog += X(0) # number=9
prog += CNOT(0,1) # number=28
prog += H(4) # number=31
prog += X(1) # number=29
prog += CNOT(0,1) # number=30
prog += CNOT(0,2) # number=22
prog += CNOT(0,2) # number=25
prog += X(2) # number=26
prog += CNOT(0,2) # number=27
prog += CNOT(0,2) # number=24
prog += X(3) # number=12
prog += X(0) # number=13
prog += X(1) # number=14
prog += X(2) # number=15
prog += X(3) # number=16
prog += H(0) # number=17
prog += H(1) # number=18
prog += H(2) # number=19
prog += H(3) # number=20
prog += H(0) # number=37
prog += CZ(1,0) # number=38
prog += H(0) # number=39
prog += Z(1) # number=34
prog += H(0) # number=40
prog += CZ(1,0) # number=41
prog += H(0) # number=42
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('5q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil979.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
becca70bff7e7bf31f995812875dc8047fd6f021
|
177455bdf1fece221eef93b50f16253d342aa6a6
|
/alerta/api/v2/views.py
|
b3627e1aa8dedf3635fb8cf886a08f541b70a809
|
[
"Apache-2.0"
] |
permissive
|
ntoll/alerta
|
c42630d91bf16cb649b43b69ae798abe60f39ed6
|
8122526b1791a0ff0d1aa26061129892b7e86f00
|
refs/heads/master
| 2021-01-18T05:18:30.062671 | 2013-03-03T23:17:10 | 2013-03-03T23:17:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,637 |
py
|
from flask import jsonify, request, current_app
from functools import wraps
from alerta.api.v2 import app, mongo
import datetime
# TODO(nsatterl): put these constants somewhere appropriate
MAX_HISTORY = -10 # 10 most recent
# TODO(nsatterl): use @before_request and @after_request to attach a unique request id
@app.before_first_request
def before_request():
# print "load config file with warning message"
pass
# TODO(nsatterl): fix JSON-P
def jsonp(func):
"""Wraps JSONified output for JSONP requests."""
@wraps(func)
def decorated_function(*args, **kwargs):
callback = request.args.get('callback', False)
if callback:
data = str(func(*args, **kwargs).data)
content = str(callback) + '(' + data + ')'
mimetype = 'application/javascript'
return current_app.response_class(content, mimetype=mimetype)
else:
return func(*args, **kwargs)
return decorated_function
@app.route('/alerta/api/v2/alerts/alert/<alertid>')
def get_alert(alertid):
alert = mongo.db.alerts.find_one({'_id': alertid})
if alert:
fix_id(alert)
return jsonify(response={'alert': alert, 'status': 'ok', 'total': 1})
else:
# TODO(nsatterl): include error message as 'message': 'not found' etc.
return jsonify(response={"alert": None, "status": "error", "message": "not found", "total": 0})
@app.route('/alerta/api/v2/alerts')
def get_alerts():
hide_details = request.args.get('hide-alert-details', False, bool)
hide_alert_repeats = request.args.getlist('hide-alert-repeats')
# TODO(nsatterl): support comma-separated fields eg. fields=event,summary
fields = dict((k, 1) for k in request.args.getlist('fields'))
# NOTE: if filtering on fields still always include severity and status in response
if fields:
fields['severity'] = 1
fields['status'] = 1
if request.args.get('hide-alert-history', False, bool):
fields['history'] = 0
else:
fields['history'] = {'slice': MAX_HISTORY}
alert_limit = request.args.get('limit', 0, int)
query = dict()
query_time = datetime.datetime.utcnow()
from_date = request.args.get('from-date')
if from_date:
from_date = datetime.datetime.strptime(from_date, '%Y-%m-%dT%H:%M:%S.%fZ')
from_date = from_date.replace(tzinfo=pytz.utc)
to_date = query_time
to_date = to_date.replace(tzinfo=pytz.utc)
query['lastReceiveTime'] = {'$gt': from_date, '$lte': to_date}
sort_by = list()
for s in request.args.getlist('sort-by'):
if s in ['createTime', 'receiveTime', 'lastReceiveTime']:
sort_by.append((s, -1)) # sort by newest first
else:
sort_by.append((s, 1)) # alpha-numeric sort
if not sort_by:
sort_by.append(('lastReceiveTime', -1))
return jsonify(details=hide_details, repeats=hide_alert_repeats, fields=fields)
@app.route('/alerta/api/v1/alerts/alert.json', methods=['POST', 'PUT'])
def create_alert(alertid):
pass
@app.route('/alerta/api/v2/alerts/alert/<alertid>', methods=['POST', 'PUT'])
def modify_alert(alertid):
pass
@app.route('/alerta/api/v2/alerts/alert/<alertid>/tag', methods=['POST', 'PUT'])
def tag_alert(alertid):
pass
@app.route('/alerta/api/v2/alerts/alert/<alertid>', methods=['DELETE'])
def delete_alert(alertid):
pass
@app.route('/alerta/api/v2/resources')
def get_resources(alertid):
pass
def fix_id(alert):
if '_id' in alert:
alert['id'] = alert['_id']
del alert['_id']
return alert
|
[
"[email protected]"
] | |
49ef83378fcd0ea9e5514661358c72f05e5b41ae
|
d37bac0cca5a3fce2eaeded5ab8262f3ec215b85
|
/backend/home/migrations/0002_load_initial_data.py
|
7de06fbaafa1d57c815551632a91d73c08613ed1
|
[] |
no_license
|
crowdbotics-apps/m-18-nov-dev-15260
|
52ada15c3d64dc0ba8fdc83a0887e830268ff02c
|
4e951ccfe3ab16025f995ef8fea500522e0470e0
|
refs/heads/master
| 2023-01-16T06:03:20.219329 | 2020-11-18T06:47:21 | 2020-11-18T06:47:21 | 313,847,239 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,290 |
py
|
from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "m 18 nov"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">m 18 nov</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "m-18-nov-dev-15260.botics.co"
site_params = {
"name": "m 18 nov",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
|
[
"[email protected]"
] | |
a20ffd93c0dcbfea4dfc93f1a9c4a64f1c8d25aa
|
36de14c6b188886df6a284ee9ce4a464a5ded433
|
/Solutions/0838/0838.py
|
ca1246e69387c77941ed2610ee370d69c953d1e0
|
[] |
no_license
|
washing1127/LeetCode
|
0dca0f3caa5fddd72b299e6e8f59b5f2bf76ddd8
|
b910ddf32c7e727373449266c9e3167c21485167
|
refs/heads/main
| 2023-03-04T23:46:40.617866 | 2023-02-21T03:00:04 | 2023-02-21T03:00:04 | 319,191,720 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,323 |
py
|
# -*- coding:utf-8 -*-
# Author: washing
# DateTime: 2022/2/21 11:16
# File: 0838.py
# Desc:
class Solution:
def pushDominoes(self, dominoes: str) -> str:
l = list(dominoes)
status_c = l[0]
status_id = 0
for i in range(1,len(l)):
c = l[i]
if c == '.': continue
elif c == 'L':
if status_c == 'R': # 之前和当前相对,向中间靠拢
idl = status_id; idr = i
while idl < idr:
l[idl] = 'R'
l[idr] = 'L'
idl += 1
idr -= 1
status_id = i
status_c = 'L'
else: # 当前向左,之前为空或向左,中间全向左
for idx in range(status_id,i): l[idx] = 'L'
status_id = i
else:
if status_c == 'R': # 之前向右,当前向右,中间全向右
for idx in range(status_id,i): l[idx] = 'R'
status_id = i
else: # 之前向左或为空,当前向右,中间不变
status_c = 'R'
status_id = i
if l[-1] == '.' and status_c == 'R':
|
[
"[email protected]"
] | |
af5e890ed0bb583636307a1cf2b0d3b8d7d1c779
|
6b66e499e7c2c6246c114029b83ae6ed3a4daa27
|
/barista/kinematicplots_Bu.py
|
01e0803f97f8df228c08c070882104a36adfa5fd
|
[] |
no_license
|
DryRun/boffea
|
d837723eee13650306ede501a6e9fe1c5a9c610b
|
433fdb92f3b60b6f140c0a0a3b2761d812b7044e
|
refs/heads/master
| 2023-09-01T17:41:04.451388 | 2023-08-24T21:55:18 | 2023-08-24T21:55:18 | 232,651,843 | 1 | 0 | null | 2022-06-22T04:50:57 | 2020-01-08T20:24:05 |
Python
|
UTF-8
|
Python
| false | false | 6,047 |
py
|
import sys
import numpy as np
import matplotlib.pyplot as plt
from cycler import cycler
from coffea import hist, util
from pprint import pprint
import glob
import mplhep
plt.style.use(mplhep.style.ROOT)
plt.tight_layout()
from brazil.aguapreta import *
figure_directory = "/home/dryu/BFrag/data/kinematic/"
input_files = {
"data": glob.glob("/home/dryu/BFrag/data/histograms/Run*coffea"),
"Bu": ["/home/dryu/BFrag/boffea/barista/Bu2KJpsi2KMuMu/MCEfficiencyHistograms.coffea"],
"Bd": ["/home/dryu/BFrag/boffea/barista/Bd2KsJpsi2KPiMuMu/MCEfficiencyHistograms.coffea"],
"Bs": ["/home/dryu/BFrag/boffea/barista/Bs2PhiJpsi2KKMuMu/MCEfficiencyHistograms.coffea"],
}
coffea = {}
for what in input_files.keys():
for f in input_files[what]:
coffea_tmp = util.load(f)
# Delete Bcands trees
for key in list(coffea_tmp.keys()):
if "Bcands" in key or "cutflow" in key:
del coffea_tmp[key]
# For data, combine subjobs
#if what == "data":
# subjobs = [x.name for x in coffea_tmp["BuToKMuMu_fit_pt_absy_mass"].axis("dataset").identifiers()]
# print(subjobs)
# for key in list(coffea_tmp.keys()):
# if type(coffea_tmp[key]).__name__ == "Hist":
# if "dataset" in [x.name for x in coffea_tmp[key].axes()]:
# print("DEBUG : Attempting to group axes.")
# print("DEBUG : Input identifiers = ")
# print(coffea_tmp[key].axis("dataset").identifiers())
# print("DEBUG : attempt to group")
# print(subjobs)
# coffea_tmp[key] = coffea_tmp[key].group("dataset",
# hist.Cat("dataset", "Primary dataset"),
# {"Run2018": subjobs})
# Persistify
if not what in coffea:
coffea[what] = coffea_tmp
else:
coffea[what].add(coffea_tmp)
print(coffea["Bu"]["BuToKMuMu_fit_pt_absy_mass"].axes())
print(coffea["Bu"]["BuToKMuMu_fit_pt_absy_mass"].axis("dataset").identifiers())
plot_index = {
"Bu": {
"fit_pt":{
"hist_mc": coffea["Bu"]["BuToKMuMu_fit_pt_absy_mass"]\
.group("dataset", hist.Cat("dataset", "Primary dataset"), {"mc": ["Bu2KJpsi2KMuMu_probefilter"]})\
.integrate("fit_mass")\
.integrate("fit_absy", slice(0., 2.25))\
.rebin("fit_pt", hist.Bin("pt", r"$p_{T}$ [GeV]", 50, 0., 50.)),
"hist_data": coffea["data"]["BuToKMuMu_fit_pt_absy_mass"]\
.integrate("fit_mass")\
.integrate("fit_absy", slice(0., 2.25))\
.rebin("fit_pt", hist.Bin("pt", r"$p_{T}$ [GeV]", 50, 0., 50.)),
"xlim": [0., 50.],
"xscale": "linear",
"xlabel": r"$p_{T}$ [GeV]",
"ylim": "auto",
"yscale": "log",
"ylabel": "Events",
},
"fit_absy":{
"hist_mc": coffea["Bu"]["BuToKMuMu_fit_pt_absy_mass"]\
.group("dataset", hist.Cat("dataset", "Primary dataset"), {"mc": ["Bu2KJpsi2KMuMu_probefilter"]})\
.integrate("fit_mass")\
.integrate("fit_pt", slice(0., 30.))\
.rebin("fit_absy", hist.Bin("absy", r"|y|$", 10, 0., 2.5)),
"hist_data": coffea["data"]["BuToKMuMu_fit_pt_absy_mass"]\
.integrate("fit_mass")\
.integrate("fit_pt", slice(0., 30.))\
.rebin("fit_absy", hist.Bin("absy", r"|y|$", 10, 0., 2.5)),
"xlim": [0., 3.0],
"xscale": "linear",
"xlabel": r"$|y|$",
"ylim": "auto",
"yscale": "log",
"ylabel": "Events",
},
"fit_mass":{
"hist_mc": coffea["Bu"]["BuToKMuMu_fit_pt_absy_mass"]\
.group("dataset", hist.Cat("dataset", "Primary dataset"), {"mc": ["Bu2KJpsi2KMuMu_probefilter"]})\
.integrate("fit_absy", slice(0., 2.25))\
.integrate("fit_pt", slice(0., 30.)),
"hist_data": coffea["data"]["BuToKMuMu_fit_pt_absy_mass"]\
.integrate("fit_absy", slice(0., 2.25))\
.integrate("fit_pt", slice(0., 30.)),
"xlim": [5.05, 5.5],
"xscale": "linear",
"xlabel": r"Fitted $B_{u}$ mass [GeV]",
"ylim": "auto",
"yscale": "log",
"ylabel": "Events",
}
}
}
figure_directory = "/home/dryu/BFrag/data/kinematic"
def plot(hist_mc=None, hist_data=None, xlim=[], xscale="", xlabel="", ylim=[], yscale="", ylabel="", data_selection="", mc_selection="", savetag=""):
hist_mc = hist_mc.integrate("selection", mc_selection)
print(hist_data.axis("selection").identifiers())
hist_data = hist_data.integrate("selection", data_selection)
# Normalize MC to data
print(hist_data)
print(hist_data.values())
data_norm = hist_data.values().sum()
hist_all = copy.deepcopy(hist_data).add(hist_mc)
fig, ax = plt.subplots(1, 1, figsize=(10, 12))
hist.plot1d(hist_all, overlay="dataset", ax=ax[0])
ax[0].set_xlim(xlim)
ax[0].set_xscale(xscale)
ax[0].set_xlabel(xlabel)
ax[0].set_ylim(ylim)
ax[0].set_yscale(yscale)
ax[0].set_ylabel(ylabel)
hist.plotratio(
num=hist_all.integrate("dataset", "Run2018"),
den=hist_all.integrate("dataset", "Bu2KJpsi2KMuMu_probefilter"),
unc='num',
ax=ax[1])
ax[1].set_xlim(xlim)
ax[1].set_xscale(xscale)
ax[1].set_xlabel(xlabel)
ax[1].set_ylabel("Data / MC")
fig.savefig(f"{figure_directory}/{savetag}.png")
if __name__ == "__main__":
mc_selection = "recomatch_HLT_Mu9_IP5"
data_selection = "recotrig_HLT_Mu9_IP5"
for btype in ["Bu"]:
for plot_name, metadata in plot_index[btype].items():
plot(**metadata, savetag=f"{plot_name}_reco", mc_selection=mc_selection, data_selection=data_selection)
|
[
"[email protected]"
] | |
88c304f224ab60062582abbfa1146a651e1233e6
|
f21814f3b4c8217e830af48b427de0b24dc398d4
|
/missing_value_count_and_percent.py
|
aed47ea11574bbab9b091a7ff7b5448c8d28d997
|
[] |
no_license
|
CaraFJ/Utility
|
2d1dbc3f09c33d9d92bf1e602f1a01b0f3ba656e
|
f032e6b376d65a05fe9d25fca31794c1302ec7ed
|
refs/heads/master
| 2021-09-08T16:47:26.173366 | 2021-09-08T04:51:04 | 2021-09-08T04:52:05 | 248,438,511 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 496 |
py
|
def missing_value_count_and_percent(df):
"""
Return the number and percent of missing values for each column.
Args:
df (Dataframe): A dataframe with many columns
Return:
df (Dataframe): A dataframe with one column showing number of missing values, one column showing percentage of missing values with 4 digits
"""
df = pd.concat({'num_missing_values':df.isnull().sum(), 'pct_missing_values':df.isnull().mean().round(4)}, axis=1)
)
return df
|
[
"[email protected]"
] | |
27b94c9d7849b71176bca1cb1da910235230ce4d
|
c087e0bbeeac080335240c05255bd682cfea100e
|
/remap_reads_consensus.py
|
f0decb7ac4e13e3cab2add1986e43a77371c997a
|
[] |
no_license
|
ifiddes/notch2nl_10x
|
f537481da544ec5e3c62a2899b713b4cb68e7285
|
35cfd95b0e7563bad0c5d2354fd7be526bc3a39d
|
refs/heads/master
| 2021-01-10T10:18:59.098115 | 2016-03-24T17:43:03 | 2016-03-24T17:43:03 | 50,366,711 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,718 |
py
|
"""
Run the traditional WGS-SUN based pipeline on 10x data to compare to the results
"""
import pysam
import sys
import vcf
import string
import itertools
import numpy as np
import argparse
import tempfile
import os
import subprocess
from pyfasta import Fasta
from operator import itemgetter
from itertools import groupby
from collections import Counter, defaultdict
sys.path.append("/hive/users/ifiddes/pycbio")
from pycbio.sys.procOps import runProc, callProc
from pycbio.sys.fileOps import tmpFileGet
from pycbio.sys.mathOps import format_ratio
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('inBam', help='(10x) bamfile to remap')
parser.add_argument('outPdf', help='path to write plot to')
parser.add_argument('--outBam', default=None, help='path to write consensus aligned bam to')
parser.add_argument('--consensusVcf', default='/hive/users/ifiddes/notch2nl_suns/Notch2NL_SUN_UniqueIndels_ConsensusRef.vcf.gz')
parser.add_argument('--consensusRef', default='/hive/users/ifiddes/notch2nl_suns/notch2_aligned_consensus.fasta')
return parser.parse_args()
regions = [['chr1', 119990189, 120163923, 'Notch2'],
['chr1', 146149601, 146329308, 'Notch2NL-A'],
['chr1', 148597945, 148786127, 'Notch2NL-B'],
['chr1', 149349476, 149477855, 'Notch2NL-C'],
['chr1', 120706154, 120801963, 'Notch2NL-D']]
def extract_reads(bam, offset=50000):
tmp_reads = tmpFileGet(suffix='reads.fq')
tmp_shuf = tmpFileGet()
region_strs = ['{}:{}-{}'.format(chrom, start - offset, stop + offset) for chrom, start, stop, para in regions]
view_cmd = ['samtools', 'view', '-b', bam]
view_cmd.extend(region_strs)
cmd = [view_cmd,
['samtools', 'bamshuf', '-Ou', '-', tmp_shuf],
['samtools', 'bam2fq', '-']]
with open(tmp_reads, 'w') as tmp_paired_h:
runProc(cmd, stdout=tmp_reads)
return tmp_reads
def remap_reads(tmp_reads, index, out_bam):
sort_tmp = tmpFileGet()
cmd = [['bwa', 'mem', '-p', index, tmp_reads],
['samtools', 'view', '-b', '-'],
['samtools', 'sort', '-T', sort_tmp, '-O', 'bam', '-']]
with open(out_bam, 'w') as f_h:
runProc(cmd, stdout=f_h)
cmd = ['samtools', 'index', out_bam]
runProc(cmd)
def build_remapped_bam(in_bam, consensus_ref, out_bam):
tmp_reads = extract_reads(in_bam)
remap_reads(tmp_reads, consensus_ref, out_bam)
os.remove(tmp_reads)
def pileup(out_bam, vcf_path):
bases = {"A", "T", "G", "C", "a", "t", "g", "c"}
vcf_handle = vcf.Reader(open(vcf_path))
wgs_results = defaultdict(list)
for vcf_rec in vcf_handle:
if vcf_rec.is_indel:
continue
pos_str = "{0}:{1}-{1}".format(vcf_rec.CHROM, vcf_rec.POS)
cmd = ['samtools', 'mpileup', '-r', pos_str, out_bam]
mpileup_rec = callProc(cmd).split()
pile_up_result = Counter(x.upper() for x in mpileup_rec[4] if x in bases)
sample_dict = {s.sample: s.gt_bases for s in vcf_rec.samples}
for s in vcf_rec.samples:
if len([x for x in sample_dict.itervalues() if x == s.gt_bases]) != 1:
continue
if s.gt_bases is None:
continue
c = 1.0 * pile_up_result[s.gt_bases] / len(mpileup_rec[4])
c *= 1.0 * len([x for x in sample_dict.itervalues() if x is not None]) / len(sample_dict)
wgs_results[s.sample].append([vcf_rec.POS, c])
return wgs_results
def plot_results(wgs_results, out_pdf, aln_size):
paralogs = ['Notch2', 'Notch2NL-A', 'Notch2NL-B', 'Notch2NL-C', 'Notch2NL-D']
fig, plots = plt.subplots(5, sharey=True, sharex=True)
plt.yticks((0, 0.1, 0.2, 0.3, 0.4))
plt.ylim((0, 0.4))
xticks = range(0, int(round(aln_size / 10000.0) * 10000.0), 10000)
plt.xticks(xticks, rotation='vertical')
plt.xlim((0, aln_size))
plt.xlabel("Alignment position")
for i, (p, para) in enumerate(zip(plots, paralogs)):
p.set_title(para)
wgs = wgs_results[para]
xvals, yvals = zip(*wgs)
p.vlines(xvals, np.zeros(len(xvals)), yvals, color=sns.color_palette()[0], alpha=0.7, linewidth=0.8)
# mark the zeros
zero_wgs = [[x, y + 0.02] for x, y in wgs if y == 0]
if len(zero_wgs) > 0:
z_xvals, z_yvals = zip(*zero_wgs)
p.vlines(z_xvals, np.zeros(len(z_xvals)), z_yvals, color=sns.color_palette()[2], alpha=0.7, linewidth=0.8)
plt.tight_layout(pad=2.5, h_pad=0.25)
zero_line = matplotlib.lines.Line2D([], [], color=sns.color_palette()[2])
reg_line = matplotlib.lines.Line2D([], [], color=sns.color_palette()[0])
fig.legend(handles=(reg_line, zero_line), labels=["WGS SUN Fraction", "WGS Missing SUN"], loc="upper right")
fig.text(0.01, 0.5, 'SUN fraction of reads', va='center', rotation='vertical')
plt.savefig(out_pdf, format="pdf")
plt.close()
def get_aln_size(consensus_ref):
f = Fasta(consensus_ref)
assert len(f) == 1
return len(f[f.keys()[0]])
def main():
args = parse_args()
if args.outBam is None:
out_bam = tmpFileGet(suffix='merged.sorted.bam')
else:
out_bam = args.outBam
build_remapped_bam(args.inBam, args.consensusRef, out_bam)
wgs_results = pileup(out_bam, args.consensusVcf)
aln_size = get_aln_size(args.consensusRef)
plot_results(wgs_results, args.outPdf, aln_size)
if args.outBam is None:
os.remove(out_bam)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f59db1371af75f94b82190561a99278bcd02b079
|
551b75f52d28c0b5c8944d808a361470e2602654
|
/huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/balancer_active_window.py
|
aeea9247f02f3b36a9f8fd0019a8e52731f28dcd
|
[
"Apache-2.0"
] |
permissive
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
9d6597ce8ab666a9a297b3d936aeb85c55cf5877
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
refs/heads/master
| 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 |
NOASSERTION
| 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null |
UTF-8
|
Python
| false | false | 3,518 |
py
|
# coding: utf-8
import pprint
import re
import six
class BalancerActiveWindow:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'start_time': 'str',
'stop_time': 'str'
}
attribute_map = {
'start_time': 'start_time',
'stop_time': 'stop_time'
}
def __init__(self, start_time=None, stop_time=None):
"""BalancerActiveWindow - a model defined in huaweicloud sdk"""
self._start_time = None
self._stop_time = None
self.discriminator = None
self.start_time = start_time
self.stop_time = stop_time
@property
def start_time(self):
"""Gets the start_time of this BalancerActiveWindow.
活动时间窗开始时间。
:return: The start_time of this BalancerActiveWindow.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this BalancerActiveWindow.
活动时间窗开始时间。
:param start_time: The start_time of this BalancerActiveWindow.
:type: str
"""
self._start_time = start_time
@property
def stop_time(self):
"""Gets the stop_time of this BalancerActiveWindow.
活动时间窗结束时间。
:return: The stop_time of this BalancerActiveWindow.
:rtype: str
"""
return self._stop_time
@stop_time.setter
def stop_time(self, stop_time):
"""Sets the stop_time of this BalancerActiveWindow.
活动时间窗结束时间。
:param stop_time: The stop_time of this BalancerActiveWindow.
:type: str
"""
self._stop_time = stop_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BalancerActiveWindow):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
07cfd1607796d3ca94ad028d3b8c573a0d32cc3b
|
4f998e9798b5d72a508a62013d8179e58d94b8bb
|
/home/migrations/0001_load_initial_data.py
|
592ecd278b5ad39a6095474d97880b4060026301
|
[] |
no_license
|
crowdbotics-apps/testcerabc-27781
|
72437420dc97964cfd2c882f723f6e8dc4177fe8
|
a58dc42415d0c2c7a523a8b9566f3a64b20a6164
|
refs/heads/master
| 2023-05-12T14:34:46.264425 | 2021-06-06T18:47:08 | 2021-06-06T18:47:08 | 374,438,770 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 538 |
py
|
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "testcerabc-27781.botics.co"
site_params = {
"name": "testcerabc",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
|
[
"[email protected]"
] | |
b3b8eb91fa66a2775490954f8c3ff2b4d06a219f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_critics.py
|
daa20c16b7ffcfd31864b2f9e82bd272a677bdae
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 238 |
py
|
from xai.brain.wordbase.nouns._critic import _CRITIC
#calss header
class _CRITICS(_CRITIC, ):
def __init__(self,):
_CRITIC.__init__(self)
self.name = "CRITICS"
self.specie = 'nouns'
self.basic = "critic"
self.jsondata = {}
|
[
"[email protected]"
] | |
91b1725adfaa4f3636377b6571089cf7925ad856
|
05e634a232574f676434dfa8e4183f3d0a1a4bc9
|
/tutorials/pp-series/HRNet-Keypoint/lib/metrics/json_results.py
|
9e0ceea69b6c0e57ed0f0224ad12a02078870de0
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/models
|
67ac00d93c5255ac64a9d80ae5be2e8927e47cee
|
8042c21b690ffc0162095e749a41b94dd38732da
|
refs/heads/release/2.4
| 2023-09-04T15:23:59.543625 | 2023-07-20T11:54:16 | 2023-07-20T11:54:16 | 88,868,842 | 7,633 | 3,597 |
Apache-2.0
| 2023-09-05T23:23:54 | 2017-04-20T13:30:15 |
Python
|
UTF-8
|
Python
| false | false | 5,121 |
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import numpy as np
__all__ = [
'get_det_res', 'get_det_poly_res', 'get_seg_res', 'get_solov2_segm_res',
'get_keypoint_res'
]
def get_det_res(bboxes, bbox_nums, image_id, label_to_cat_id_map, bias=0):
det_res = []
k = 0
for i in range(len(bbox_nums)):
cur_image_id = int(image_id[i][0])
det_nums = bbox_nums[i]
for j in range(det_nums):
dt = bboxes[k]
k = k + 1
num_id, score, xmin, ymin, xmax, ymax = dt.tolist()
if int(num_id) < 0:
continue
category_id = label_to_cat_id_map[int(num_id)]
w = xmax - xmin + bias
h = ymax - ymin + bias
bbox = [xmin, ymin, w, h]
dt_res = {
'image_id': cur_image_id,
'category_id': category_id,
'bbox': bbox,
'score': score
}
det_res.append(dt_res)
return det_res
def get_det_poly_res(bboxes, bbox_nums, image_id, label_to_cat_id_map, bias=0):
det_res = []
k = 0
for i in range(len(bbox_nums)):
cur_image_id = int(image_id[i][0])
det_nums = bbox_nums[i]
for j in range(det_nums):
dt = bboxes[k]
k = k + 1
num_id, score, x1, y1, x2, y2, x3, y3, x4, y4 = dt.tolist()
if int(num_id) < 0:
continue
category_id = label_to_cat_id_map[int(num_id)]
rbox = [x1, y1, x2, y2, x3, y3, x4, y4]
dt_res = {
'image_id': cur_image_id,
'category_id': category_id,
'bbox': rbox,
'score': score
}
det_res.append(dt_res)
return det_res
def get_seg_res(masks, bboxes, mask_nums, image_id, label_to_cat_id_map):
import pycocotools.mask as mask_util
seg_res = []
k = 0
for i in range(len(mask_nums)):
cur_image_id = int(image_id[i][0])
det_nums = mask_nums[i]
for j in range(det_nums):
mask = masks[k].astype(np.uint8)
score = float(bboxes[k][1])
label = int(bboxes[k][0])
k = k + 1
if label == -1:
continue
cat_id = label_to_cat_id_map[label]
rle = mask_util.encode(
np.array(
mask[:, :, None], order="F", dtype="uint8"))[0]
if six.PY3:
if 'counts' in rle:
rle['counts'] = rle['counts'].decode("utf8")
sg_res = {
'image_id': cur_image_id,
'category_id': cat_id,
'segmentation': rle,
'score': score
}
seg_res.append(sg_res)
return seg_res
def get_solov2_segm_res(results, image_id, num_id_to_cat_id_map):
import pycocotools.mask as mask_util
segm_res = []
# for each batch
segms = results['segm'].astype(np.uint8)
clsid_labels = results['cate_label']
clsid_scores = results['cate_score']
lengths = segms.shape[0]
im_id = int(image_id[0][0])
if lengths == 0 or segms is None:
return None
# for each sample
for i in range(lengths - 1):
clsid = int(clsid_labels[i])
catid = num_id_to_cat_id_map[clsid]
score = float(clsid_scores[i])
mask = segms[i]
segm = mask_util.encode(np.array(mask[:, :, np.newaxis], order='F'))[0]
segm['counts'] = segm['counts'].decode('utf8')
coco_res = {
'image_id': im_id,
'category_id': catid,
'segmentation': segm,
'score': score
}
segm_res.append(coco_res)
return segm_res
def get_keypoint_res(results, im_id):
anns = []
preds = results['keypoint']
for idx in range(im_id.shape[0]):
image_id = im_id[idx].item()
kpts, scores = preds[idx]
for kpt, score in zip(kpts, scores):
kpt = kpt.flatten()
ann = {
'image_id': image_id,
'category_id': 1, # XXX hard code
'keypoints': kpt.tolist(),
'score': float(score)
}
x = kpt[0::3]
y = kpt[1::3]
x0, x1, y0, y1 = np.min(x).item(), np.max(x).item(), np.min(
y).item(), np.max(y).item()
ann['area'] = (x1 - x0) * (y1 - y0)
ann['bbox'] = [x0, y0, x1 - x0, y1 - y0]
anns.append(ann)
return anns
|
[
"[email protected]"
] | |
c22c2d6937f2f8e7d0605c8690d553ce6add5b2e
|
2aac13d0048f12ac877af92a93f73c4ef1311d6e
|
/mrchunks/process.py
|
49a1ec417764052f224f2e231e044e3ae6be2ef8
|
[] |
no_license
|
victorpoluceno/mrchunks
|
18250e2bf0be375de48e01b2a42976285d556e85
|
8328ed3d836144ccc563b135d78f59e50ff4104b
|
refs/heads/master
| 2021-01-15T22:18:39.091832 | 2015-11-22T23:00:53 | 2015-11-22T23:00:53 | 32,928,863 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,760 |
py
|
import zmq
from mrchunks.concurrent import Engine, switch
from mrchunks.mailbox import Mailbox
from mrchunks.serializer import decode, encode
class Server:
def __init__(self):
self._context = zmq.Context()
def __call__(self, *args, **kwargs):
self.listen(kwargs['address'])
while True:
envelop = self.get()
self.send(envelop)
def get(self):
while True:
socks = dict(self._poller.poll(100))
if socks:
if socks.get(self._socket) != zmq.POLLIN:
switch()
continue
data = self._socket.recv()
# FIXME may be we need to ack after sendo ipc socket
self._socket.send(b"OK+")
break
else:
switch()
return decode(data)
def send(self, envelop):
sender, recipient, message = envelop
_, _, p = recipient
socket = self._context.socket(zmq.REQ)
socket.connect("ipc:///tmp/%d" % (p,))
socket.send(encode(envelop), zmq.NOBLOCK)
def listen(self, address):
self._socket = self._context.socket(zmq.REP)
address, port = address
self._socket.bind("tcp://*:%s" % (port,))
self._poller = zmq.Poller()
self._poller.register(self._socket, zmq.POLLIN)
class Arbiter:
def __init__(self, address, number_of_workers=1):
self._next_pid = 0
self._address = address
self._engine = Engine(number_of_workers)
self._listen()
def _get_next_pid(self):
pid = self._next_pid
self._next_pid += 1
return self._address + (pid,)
def _listen(self):
server = Server()
self._engine.apply(server, address=self._address)
def spawn(self, start, *args, **kwargs):
pid = self._get_next_pid()
process = Process(pid, start)
self._engine.apply(process, *args, **kwargs)
return pid
def run(self, forever=True):
self._engine.run(forever)
def get_arbiter(*args, **kwargs):
return Arbiter(*args, **kwargs)
class Process(object):
def __init__(self, pid, start):
self.pid = pid
self._start = start
def __call__(self, *args, **kwargs):
self._mailbox = Mailbox()
self._mailbox.run(self.pid)
self._start(self, *args, **kwargs)
def send(self, recipient, message):
print('Sending message: {} from: {} to: {}'.format(message, self.pid,
recipient))
self._mailbox.send(recipient, message)
def receive(self):
print('Receiving...')
envelop = self._mailbox.receive()
return envelop
|
[
"[email protected]"
] | |
530ae96e854fca34aa8899b13ba869d5d6b1f658
|
019fd2c29b8239d7b0a3906cfbdddfd440362417
|
/asset/google/cloud/asset_v1beta1/gapic/asset_service_client_config.py
|
340e89de38b2510f4f5c219239170706bfdfdc83
|
[
"Apache-2.0"
] |
permissive
|
tswast/google-cloud-python
|
1334d26cdb994293f307d889251d7daef5fcb826
|
d897d56bce03d1fda98b79afb08264e51d46c421
|
refs/heads/master
| 2021-06-10T17:40:06.968584 | 2020-01-11T17:41:29 | 2020-01-11T17:41:29 | 58,775,221 | 1 | 1 |
Apache-2.0
| 2019-04-10T17:09:46 | 2016-05-13T22:06:37 |
Python
|
UTF-8
|
Python
| false | false | 1,179 |
py
|
config = {
"interfaces": {
"google.cloud.asset.v1beta1.AssetService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000,
}
},
"methods": {
"ExportAssets": {
"timeout_millis": 600000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"BatchGetAssetsHistory": {
"timeout_millis": 600000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
},
}
}
}
|
[
"[email protected]"
] | |
91c46ed6861438fb001bf94fe1fa600cd41ad2c9
|
423f9cbe3b39e431b7eca2ba6ad15b2fd70ef56b
|
/EditDistance.py
|
d1cb6dc2cf6a49df28a308e6019e0e55bb7329c4
|
[] |
no_license
|
SerChirag/Dynamic-Programming
|
8f7e6f23fd76c8d99fb8eb23b4324e1eb8e5b790
|
672bf3cb726cea302ce72ad7183d7f684b2788f0
|
refs/heads/master
| 2021-10-10T17:29:16.346125 | 2019-01-14T18:00:59 | 2019-01-14T18:00:59 | 115,982,525 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 480 |
py
|
def edit(str1,str2):
edit = [[0 for j in range(len(str2)+1)] for i in range(len(str1)+1)]
count = 0
for i in range(1,len(str1)+1):
for j in range(1,len(str2)+1):
if(str1[i-1] == str2[j-1]):
edit[i][j] = edit[i-1][j-1] + 1
else:
edit[i][j] = max(edit[i][j-1],edit[i-1][j])
return edit[len(str1)][len(str2)]
str1 = "ABCDGH"
str2 = "AEDFHR"
print edit(str1,str2)
|
[
"[email protected]"
] | |
9947c254c93bcc92b396bff46d6f0321e70fe555
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adverbs/_bleakly.py
|
28b608df7c1220eda261a778a410c245be7da9ea
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 434 |
py
|
#calss header
class _BLEAKLY():
def __init__(self,):
self.name = "BLEAKLY"
self.definitions = [u'in a way that suggests a lack of hope: ', u'in a way that is cold and empty: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adverbs'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"[email protected]"
] | |
79f2687bc8f4a9add7c0fbbba2af25d1ce45be2a
|
4fcb2e797ba83b310fe05461d48f02931ea5a427
|
/2017/day-19/solution.py
|
b22d5be009cad3f1c42f831b1e093a846f34a4d9
|
[] |
no_license
|
BrentChesny/AdventOfCode
|
5a642d081505563f7518c5244bb814e9e4dfc5de
|
dad5224961539149bed5757bbae0ccc35a3a293d
|
refs/heads/master
| 2022-12-11T19:51:22.138655 | 2022-12-04T21:46:29 | 2022-12-04T21:46:29 | 47,266,210 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,463 |
py
|
def parse_input():
return [list(line.strip('\n')) for line in open('input.txt').readlines()]
def solve_part_one():
grid = parse_input()
pos_r, pos_c = 0, grid[0].index('|')
dir_r, dir_c = 1, 0
result = ''
while grid[pos_r+dir_r][pos_c+dir_c] != ' ':
pos_r, pos_c = pos_r + dir_r, pos_c + dir_c
if grid[pos_r][pos_c].isupper():
result += grid[pos_r][pos_c]
if grid[pos_r][pos_c] == '+':
dir_r, dir_c = find_new_direction(grid, (pos_r, pos_c), (dir_r, dir_c))
return result
def solve_part_two():
grid = parse_input()
pos_r, pos_c = 0, grid[0].index('|')
dir_r, dir_c = 1, 0
result = 1
while grid[pos_r+dir_r][pos_c+dir_c] != ' ':
result += 1
pos_r, pos_c = pos_r + dir_r, pos_c + dir_c
if grid[pos_r][pos_c] == '+':
dir_r, dir_c = find_new_direction(grid, (pos_r, pos_c), (dir_r, dir_c))
return result
def find_new_direction(grid, pos, old_dir):
pos_r, pos_c = pos
if grid[pos_r-1][pos_c] == '|' and old_dir != (1, 0):
return -1, 0
if grid[pos_r+1][pos_c] == '|' and old_dir != (-1, 0):
return 1, 0
if grid[pos_r][pos_c-1] == '-' and old_dir != (0, 1):
return 0, -1
if grid[pos_r][pos_c+1] == '-' and old_dir != (0, -1):
return 0, 1
def main():
print 'Part one: ', solve_part_one()
print 'Part two: ', solve_part_two()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
128d601cae05a0f318b0a90ac3ac53d97636fa48
|
e0980f704a573894350e285f66f4cf390837238e
|
/.history/home/models_20201026174905.py
|
36accded34a74330fba536c865386a721c5957a3
|
[] |
no_license
|
rucpata/WagtailWebsite
|
28008474ec779d12ef43bceb61827168274a8b61
|
5aa44f51592f49c9a708fc5515ad877c6a29dfd9
|
refs/heads/main
| 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,730 |
py
|
from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import StreamField
from wagtail.admin.edit_handlers import FieldPanel, PageChooserPanel, StreamFieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.snippets.blocks import SnippetChooserBlock
from streams import blocks
class HomePage(Page):
lead_text = models.CharField(
max_length = 140,
blank = True,
help_text = 'Podtytuł pod tytułem banera'
)
button = models.ForeignKey(
'wagtailcore.Page',
blank = True,
null = True,
related_name = '+',
help_text = 'Wybierz opcjonalną stronę, do której chcesz utworzyć łącze',
on_delete = models.SET_NULL,
)
button_text = models.CharField(
max_length = 50,
default = 'Czytaj więcej',
blank = False,
help_text = 'Przycisk tekstowy'
)
banner_background_image = models.ForeignKey(
'wagtailimages.Image',
blank = False,
null =True,
related_name = '+',
help_text = 'Obraz tła baneru',
on_delete = models.SET_NULL,
)
body = StreamField([
('title', blocks.TitleBlock()),
('cards', blocks.CardsBlock()),
('image_and_text', blocks.ImageAndTextBlock()),
('cta', blocks.CallToActionBlock()),
('testimonial', SnippetChooserBlock(target_model='')),
], null=True, blank=True)
content_panels = Page.content_panels + [
FieldPanel('lead_text'),
PageChooserPanel('button'),
FieldPanel('button_text'),
ImageChooserPanel('banner_background_image'),
StreamFieldPanel('body'),
]
|
[
"[email protected]"
] | |
0f3a08eb19415e6839f084ef6b5fd54d9bb6cee3
|
6019b48f027b1f62de8474a834f52157fc8faf2c
|
/src/ch3/cv2io/negaposi.py
|
7aa3463cf05aee3a2932641dbca8b3d908f3f44e
|
[] |
no_license
|
kujirahand/book-mlearn-gyomu
|
d540aebf96af84d5c271fa11f31bf18417c16f34
|
b1d5f04a69777fb3896b28144ecb18d49a744c25
|
refs/heads/master
| 2023-07-04T01:14:39.673001 | 2023-04-05T13:27:53 | 2023-04-05T13:27:53 | 135,913,708 | 127 | 113 | null | 2020-08-10T23:16:30 | 2018-06-03T14:56:59 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 212 |
py
|
import matplotlib.pyplot as plt
import cv2
# 画像を読み込む
img = cv2.imread("test.jpg")
# ネガポジ反転
img = 255 - img
# 画像を表示
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
|
[
"[email protected]"
] | |
7d7f982e88dc574bb2ed1b7b4f0f6c36f495a5a7
|
ece0d321e48f182832252b23db1df0c21b78f20c
|
/engine/2.80/scripts/addons/archipack/presets/archipack_stair/l_wood_over_concrete.py
|
d4fc1344a54ccd723bdcb01aad8a5764c427b8b4
|
[
"Unlicense",
"GPL-3.0-only",
"Font-exception-2.0",
"GPL-3.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain-disclaimer",
"Bitstream-Vera",
"LicenseRef-scancode-blender-2010",
"LGPL-2.1-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"PSF-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"BSD-2-Clause"
] |
permissive
|
byteinc/Phasor
|
47d4e48a52fa562dfa1a2dbe493f8ec9e94625b9
|
f7d23a489c2b4bcc3c1961ac955926484ff8b8d9
|
refs/heads/master
| 2022-10-25T17:05:01.585032 | 2019-03-16T19:24:22 | 2019-03-16T19:24:22 | 175,723,233 | 3 | 1 |
Unlicense
| 2022-10-21T07:02:37 | 2019-03-15T00:58:08 |
Python
|
UTF-8
|
Python
| false | false | 5,867 |
py
|
import bpy
d = bpy.context.active_object.data.archipack_stair[0]
d.steps_type = 'CLOSED'
d.handrail_slice_right = True
d.total_angle = 6.2831854820251465
d.user_defined_subs_enable = True
d.string_z = 0.30000001192092896
d.nose_z = 0.029999999329447746
d.user_defined_subs = ''
d.idmat_step_side = '3'
d.handrail_x = 0.03999999910593033
d.right_post = True
d.left_post = True
d.width = 1.5
d.subs_offset_x = 0.0
d.rail_mat.clear()
item_sub_1 = d.rail_mat.add()
item_sub_1.name = ''
item_sub_1.index = '4'
d.step_depth = 0.30000001192092896
d.rail_z = (0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806)
d.right_subs = False
d.left_panel = True
d.idmat_handrail = '3'
d.da = 1.5707963705062866
d.post_alt = 0.0
d.left_subs = False
d.n_parts = 3
d.user_defined_post_enable = True
d.handrail_slice_left = True
d.handrail_profil = 'SQUARE'
d.handrail_expand = False
d.panel_alt = 0.25
d.post_expand = False
d.subs_z = 1.0
d.rail_alt = (1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
d.panel_dist = 0.05000000074505806
d.panel_expand = False
d.x_offset = 0.0
d.subs_expand = False
d.idmat_post = '4'
d.left_string = False
d.string_alt = -0.03999999910593033
d.handrail_y = 0.03999999910593033
d.radius = 1.0
d.string_expand = False
d.post_z = 1.0
d.idmat_top = '3'
d.idmat_bottom = '1'
d.parts.clear()
item_sub_1 = d.parts.add()
item_sub_1.name = ''
item_sub_1.manipulators.clear()
item_sub_2 = item_sub_1.manipulators.add()
item_sub_2.name = ''
item_sub_2.p0 = (0.0, 0.0, 1.4040000438690186)
item_sub_2.prop1_name = 'length'
item_sub_2.p2 = (1.0, 0.0, 0.0)
item_sub_2.normal = (0.0, 0.0, 1.0)
item_sub_2.pts_mode = 'SIZE'
item_sub_2.p1 = (0.0, 4.0, 1.4040000438690186)
item_sub_2.prop2_name = ''
item_sub_2.type_key = 'SIZE'
item_sub_1.right_shape = 'RECTANGLE'
item_sub_1.radius = 0.699999988079071
item_sub_1.type = 'S_STAIR'
item_sub_1.length = 4.0
item_sub_1.left_shape = 'RECTANGLE'
item_sub_1.da = 1.5707963705062866
item_sub_1 = d.parts.add()
item_sub_1.name = ''
item_sub_1.manipulators.clear()
item_sub_2 = item_sub_1.manipulators.add()
item_sub_2.name = ''
item_sub_2.p0 = (-1.0, 4.0, 1.944000005722046)
item_sub_2.prop1_name = 'da'
item_sub_2.p2 = (0.0, 1.0, 0.0)
item_sub_2.normal = (0.0, 0.0, 1.0)
item_sub_2.pts_mode = 'RADIUS'
item_sub_2.p1 = (1.0, 0.0, 0.0)
item_sub_2.prop2_name = 'radius'
item_sub_2.type_key = 'ARC_ANGLE_RADIUS'
item_sub_1.right_shape = 'RECTANGLE'
item_sub_1.radius = 0.699999988079071
item_sub_1.type = 'C_STAIR'
item_sub_1.length = 2.0
item_sub_1.left_shape = 'RECTANGLE'
item_sub_1.da = 1.5707963705062866
item_sub_1 = d.parts.add()
item_sub_1.name = ''
item_sub_1.manipulators.clear()
item_sub_2 = item_sub_1.manipulators.add()
item_sub_2.name = ''
item_sub_2.p0 = (-1.0, 5.0, 2.700000047683716)
item_sub_2.prop1_name = 'length'
item_sub_2.p2 = (1.0, 0.0, 0.0)
item_sub_2.normal = (0.0, 0.0, 1.0)
item_sub_2.pts_mode = 'SIZE'
item_sub_2.p1 = (-3.0, 5.0, 2.700000047683716)
item_sub_2.prop2_name = ''
item_sub_2.type_key = 'SIZE'
item_sub_1.right_shape = 'RECTANGLE'
item_sub_1.radius = 0.699999988079071
item_sub_1.type = 'S_STAIR'
item_sub_1.length = 2.0
item_sub_1.left_shape = 'RECTANGLE'
item_sub_1.da = 1.5707963705062866
d.subs_bottom = 'STEP'
d.user_defined_post = ''
d.panel_offset_x = 0.0
d.idmat_side = '1'
d.right_string = False
d.idmat_raise = '1'
d.left_rail = False
d.parts_expand = False
d.panel_z = 0.6000000238418579
d.bottom_z = 0.029999999329447746
d.z_mode = 'STANDARD'
d.panel_x = 0.009999999776482582
d.post_x = 0.03999999910593033
d.presets = 'STAIR_L'
d.steps_expand = True
d.subs_x = 0.019999999552965164
d.subs_spacing = 0.10000000149011612
d.left_handrail = True
d.handrail_offset = 0.0
d.right_rail = False
d.idmat_panel = '5'
d.post_offset_x = 0.019999999552965164
d.idmat_step_front = '3'
d.rail_n = 1
d.string_offset = 0.0
d.subs_y = 0.019999999552965164
d.handrail_alt = 1.0
d.post_corners = False
d.rail_expand = False
d.rail_offset = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
d.rail_x = (0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806)
d.left_shape = 'RECTANGLE'
d.nose_y = 0.019999999552965164
d.nose_type = 'STRAIGHT'
d.handrail_extend = 0.10000000149011612
d.idmat_string = '3'
d.post_y = 0.03999999910593033
d.subs_alt = 0.0
d.right_handrail = True
d.idmats_expand = False
d.right_shape = 'RECTANGLE'
d.idmat_subs = '4'
d.handrail_radius = 0.019999999552965164
d.right_panel = True
d.post_spacing = 1.0
d.string_x = 0.019999999552965164
d.height = 2.700000047683716
|
[
"[email protected]"
] | |
c3f1f40c430acf8791af7d15a9c634c03815ed76
|
3b7b6648b72910046b6a227db30f71aeee2cba9c
|
/2021-03-08-SimpleRNN/StockReturnPredictionWithLSTM.py
|
f0ad401fbb2df1790b2b25eb955c0d967a9b1a7c
|
[] |
no_license
|
ken2190/deep-learning-study
|
f2abeb1cd302e405a15bbb52188ae44ffb414e2f
|
f2998be89d0c931176f158ae5f48ca562786e171
|
refs/heads/main
| 2023-04-02T05:07:08.504212 | 2021-04-11T15:11:22 | 2021-04-11T15:11:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 788 |
py
|
from tensorflow.keras.layers import Input, SimpleRNN, GRU, Dropout, LSTM, Dense, Flatten, Softmax
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD, Adam
from sklearn.preprocessing import LabelBinarizer, StandardScaler
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
df = pd.read_csv('https://raw.githubusercontent.com/lazyprogrammer/machine_learning_examples/master/tf2.0/sbux.csv')
series = df["close"].values.reshape(-1, 1)
scalar = StandardScaler()
scalar.fit(series[:len(series) // 2])
series = scalar.transform(series).flatten()
df["prevClose"] = df["close"].shift(1)
df["Return"] = (df["close"] - df["prevClose"])/df["prevClose"]
df["Return"].hist()
u = np.array([1, 2])
v = np.array([3, 4])
|
[
"[email protected]"
] | |
14e14add80032e04c6e82d148372cd9e1ea89a4a
|
dbe7731552d8e6d1e63cc0f2e27d3810cc61f350
|
/hyper_paras/hp_a2c.py
|
1265b7c895a914b699bf58d2d2719a54eb9e5c15
|
[] |
no_license
|
ZhangRui111/rl_breakout_tf
|
6bb3f57f2b1d52f196323916393234e8abb990ac
|
04f259cd3c32eaffbad87fe1035b0f87c96127b0
|
refs/heads/master
| 2020-04-08T19:24:16.018734 | 2018-12-18T02:42:56 | 2018-12-18T02:42:56 | 159,653,713 | 1 | 1 | null | 2018-12-18T02:42:57 | 2018-11-29T11:12:04 |
Python
|
UTF-8
|
Python
| false | false | 356 |
py
|
from hyper_paras.base_hyper_paras import BaseHyperparameters
class Hyperparameters(BaseHyperparameters):
def __init__(self):
super().__init__()
self.model = 'A2C'
self.MAX_EPISODES = 50001 # 50001 : 500
self.LEARNING_RATE_ACTOR = 0.00005
self.LEARNING_RATE_CRITIC = 0.0001
self.DISCOUNT_FACTOR = 0.9
|
[
"[email protected]"
] | |
7e11fd6bffade16b50990049c688e90b29754bf0
|
282769509af68245596dc73de42f552cfd73cd21
|
/autoindex/watcher.py
|
d560ceaf60985c133ac610de4bc2a6e3972819c7
|
[] |
no_license
|
brutasse-archive/autoindex
|
1130173d22c1d996a7cb38fcd59b51d07c0b8068
|
cc5cfc414325aff133c684257e8c2bfdc9aaa672
|
refs/heads/master
| 2021-01-19T14:34:18.472167 | 2012-07-17T21:31:27 | 2012-07-17T21:31:27 | 5,048,409 | 15 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,877 |
py
|
import logging
import os
import signal
from pip.download import is_archive_file
from pyinotify import WatchManager, Notifier, ProcessEvent, EventsCodes
from .indexer import index
logger = logging.getLogger(__name__)
class IndexProcess(ProcessEvent):
def __init__(self, wm, mask):
self.wm = wm
self.mask = mask
self.queue = set()
def update_watch(self, directory):
self.wm.add_watch(directory, mask=self.mask)
def process_IN_CREATE(self, event):
logger.debug("Created {0}".format(event.pathname))
if os.path.isdir(event.pathname):
self.update_watch(event.pathname)
else:
self.index_alarm(event)
def process_IN_MODIFY(self, event):
logger.debug("Modified {0}".format(event.pathname))
self.index_alarm(event)
def process_IN_DELETE(self, event):
logger.debug("Deleted {0}".format(event.pathname))
self.index_alarm(event)
def index_alarm(self, event):
if is_archive_file(event.pathname):
logger.debug("Queuing indexing")
self.queue.add(os.path.dirname(event.pathname))
signal.setitimer(signal.ITIMER_REAL, 5)
def watch(directory):
logger.info("Watching {0}".format(directory))
flags = EventsCodes.ALL_FLAGS
mask = flags['IN_CREATE'] | flags['IN_MODIFY'] | flags['IN_DELETE']
wm = WatchManager()
wm.add_watch(directory, mask, rec=True)
process = IndexProcess(wm, mask)
notifier = Notifier(wm, process)
def update_index(*args):
while process.queue:
# This is slightly sub-optimal, would be better to pop all
# elements at once but this operation needs to be atomic.
dist_dir = process.queue.pop()
index(directory, only=[dist_dir])
signal.signal(signal.SIGALRM, update_index)
notifier.loop()
|
[
"[email protected]"
] | |
e1a6d1b6a7f2d662c54225f864327197af261dea
|
2b6fa34dac030ec1f2918b1377956bf791219d22
|
/leetcode/medium/unique-paths.py
|
ec4b4d43fdfd54d17af687e347baacf85881da50
|
[
"MIT"
] |
permissive
|
rainzhop/cumulus-tank
|
aa13fb8f14c27893838a67d2eb69fdd2ac3d6450
|
09ebc7858ea53630e30606945adfea856a80faa3
|
refs/heads/master
| 2020-06-06T23:24:37.498966 | 2020-01-06T09:52:16 | 2020-01-06T09:52:16 | 192,874,778 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 911 |
py
|
# https://leetcode.com/problems/unique-paths/
#
# A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
#
# The robot can only move either down or right at any point in time.
# The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).
#
# How many possible unique paths are there?
#
# S * * * * * *
# * * * * * * *
# * * * * * * F
#
# Above is a 3 x 7 grid. How many possible unique paths are there?
#
# Note: m and n will be at most 100.
class Solution(object):
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
if self.path[m][n] != 0: return self.path[m][n]
if m == 1 or n == 1: return 1
return self.uniquePaths(m-1, n) + self.uniquePaths(m, n-1)
if __name__ == '__main__':
s = Solution()
print s.uniquePaths(3,3)
|
[
"[email protected]"
] | |
4729a3d9e08865cacd04820127685a2d0a867ff4
|
aa3f670fcc2b43d8a5eb8a131082510bed2eb4d8
|
/nagios/check_raster.py
|
92fd2d22b4429549a4571011b62d3ee9c259b62b
|
[
"MIT"
] |
permissive
|
jamayfieldjr/iem
|
e0d496311d82790ad518c600c2fcffe44e834da1
|
275b77a65f3b12e26e6cbdb230786b9c7d2b9c9a
|
refs/heads/master
| 2020-08-07T11:55:56.256857 | 2019-10-04T04:22:36 | 2019-10-04T04:22:36 | 213,439,554 | 1 | 0 |
MIT
| 2019-10-07T17:01:20 | 2019-10-07T17:01:20 | null |
UTF-8
|
Python
| false | false | 782 |
py
|
"""Check a raster file and count the number of non-zero values."""
from __future__ import print_function
import sys
from osgeo import gdal
import numpy
def main():
"""Go Main Go."""
ntp = gdal.Open('/home/ldm/data/gis/images/4326/USCOMP/ntp_0.png')
data = ntp.ReadAsArray()
count = numpy.sum(numpy.where(data > 0, 1, 0))
sz = data.shape[0] * data.shape[1]
if count > 1000:
print('OK - %s/%s|count=%s;100;500;1000' % (count, sz, count))
status = 0
elif count > 500:
print('WARNING - %s/%s|count=%s;100;500;1000' % (count, sz, count))
status = 1
else:
print('CRITICAL - %s/%s|count=%s;100;500;1000' % (count, sz, count))
status = 2
return status
if __name__ == '__main__':
sys.exit(main())
|
[
"[email protected]"
] | |
53241e5667493e3b22a78779f524d5b575342228
|
2fb755e1d23267495345d1a94f4b79a1356657e7
|
/black_box_tests/mapper_example.py
|
45461118a2357b70b83703ecf1eaf2fdcd10696d
|
[
"MIT"
] |
permissive
|
daringer/lollygag
|
66bc86c7bea7943fd713cd5e463d911552b4d979
|
27da172cfa769ef7b850de517f778059068badca
|
refs/heads/master
| 2021-05-16T03:24:15.691274 | 2017-10-11T12:45:45 | 2017-10-11T12:45:45 | 105,471,520 | 0 | 0 | null | 2017-10-01T20:19:20 | 2017-10-01T20:19:20 | null |
UTF-8
|
Python
| false | false | 849 |
py
|
#!/usr/bin/python
from lollygag import run
from lollygag.services import Services
from lollygag.dependency_injection.inject import Inject
from lollygag.core.crawlers.mapper_crawler import MapperCrawler
import json
def on_finish(log_service, crawler):
def callback(*args):
log_service.important("-------------Yeah boiiii, done-----------------")
result = crawler.make_map()
result = json.dumps(result, indent=4)
with open("result.json", "w+") as f:
f.write(result)
log_service.important("------------Done processing the tree-----------")
return callback
def main():
Services.crawler_factory = MapperCrawler
crawler.on_finish(on_finish(Services.log_service(), crawler))
run(subscribe={'on_finish': on_finish(Services.log_service())})
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
43dde8d0256d76c729723e64d08000466a23902b
|
d3055f3eedfdb124084f092c0f4540aa82a0f04d
|
/texture_tool/describe.py
|
62e6307f6e97bb0cf9de3478bdc4598cdf08df36
|
[] |
no_license
|
podgorskiy/texture-tool
|
a90ec9adee2c8d19b21cdf42b714d8d4917c9612
|
f8973871ee2ce72b4d4756796276b07be06e42dd
|
refs/heads/master
| 2022-04-17T13:36:05.448525 | 2020-04-08T18:03:36 | 2020-04-08T18:03:36 | 253,153,161 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,430 |
py
|
# Copyright 2020 Stanislav Pidhorskyi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import texture_tool
def describe(self):
assert isinstance(self, texture_tool.PVRTexture)
s = '<' + '\n'
members = [attr for attr in dir(self) if not callable(getattr(self, attr)) and not attr.startswith("__")]
for attr in members:
s += '\t' + attr + ': ' + str(getattr(self, attr)) + '\n'
s += '\t' + str('Flipped X: ' + str(self.get_orientation(texture_tool.Axis.x))) + '\n'
s += '\t' + str('Flipped Y: ' + str(self.get_orientation(texture_tool.Axis.y))) + '\n'
s += '\t' + str('Width: ' + str(self.get_width())) + '\n'
s += '\t' + str('Height: ' + str(self.get_height())) + '\n'
s += '\t' + str('Depth: ' + str(self.get_depth())) + '\n'
s += '\t' + str('dtype: ' + str(self.dtype)) + '\n'
s += '>'
return s
|
[
"[email protected]"
] | |
b29df2eab12bee0ea732b5953df4904701e18f95
|
c34380b64145b4ce26df9b27c34139d08de27515
|
/highest_scoring_word.py
|
d6718e1ecce87a61b07dea1aab9b93f1d03c0fe1
|
[] |
no_license
|
codeandrew/python-algorithms
|
531bc1574700cb7d822904f1e1ead9a596a85d29
|
c71b0941f14825fcaa3fbb1429365ca1f28a3018
|
refs/heads/master
| 2023-04-28T23:56:01.283434 | 2023-04-05T03:06:22 | 2023-04-05T03:06:22 | 169,078,505 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 965 |
py
|
"""
Given a string of words, you need to find the highest scoring word.
Each letter of a word scores points according to its position in the alphabet: a = 1, b = 2, c = 3 etc.
You need to return the highest scoring word as a string.
If two words score the same, return the word that appears earliest in the original string.
All letters will be lowercase and all inputs will be valid.
"""
def high(x):
l = x.strip(" ").split()
s = []
for i in l:
ss =[]
s.append(ss)
for ii in i:
ss.append(ord(ii)-96)
sumList = [sum(i) for i in s]
return l[sumList.index(max(sumList))]
"""
Other Options
"""
def high(x):
words=x.split(' ')
list = []
for i in words:
scores = [sum([ord(char) - 96 for char in i])]
list.append(scores)
return words[list.index(max(list))]
def high(words):
return max(words.split(), key=lambda word: sum(ord(c) - ord('a') + 1 for c in word.lower()))
|
[
"[email protected]"
] | |
75dc35285e4cc28b0a0071cdf2c074aa2ea6f960
|
37fd103f6b0de68512e3cb6098d0abb9220f5a7d
|
/Python from scratch/014spectogram_waveform.py
|
fb3fad05fc153b831ee682fc1949eb029c556f40
|
[] |
no_license
|
FlyingMedusa/PythonELTIT
|
720d48089738b7e629cad888f0032df3a4ccea2c
|
36ab01fc9d42337e3c76c59c383d7b1a6142f9b9
|
refs/heads/master
| 2020-09-11T18:17:17.825390 | 2020-04-21T16:38:03 | 2020-04-21T16:38:03 | 222,150,066 | 0 | 0 | null | 2020-04-21T16:38:04 | 2019-11-16T19:37:33 |
Python
|
UTF-8
|
Python
| false | false | 562 |
py
|
from scipy.io import wavfile
import matplotlib.pyplot as pyplot
sampling_frequency, signal_data = wavfile.read('sample_for_task_013.wav')
# duration = len(signal_data)/ sampling_frequency
pyplot.subplot(311) # three rows, one col,1st plot
pyplot.specgram(signal_data, Fs = sampling_frequency)
pyplot.title('Some spectogram')
pyplot.xlabel('duration (s)')
pyplot.ylabel('Frequency (Hz)')
pyplot.subplot(313) # three rows, one col,3rd plot
pyplot.plot(signal_data)
pyplot.title('Some waveform')
pyplot.xlabel('duration')
pyplot.ylabel('intensity')
pyplot.show()
|
[
"[email protected]"
] | |
1088e21e565a1e3657d113b966546a1b0eb98ac8
|
5679731cee36c537615d285ed72810f4c6b17380
|
/167_TwoSumII_InputArrayIsSorted.py
|
4ea08c7abe24681955be0a656cf106fb19e4146e
|
[] |
no_license
|
manofmountain/LeetCode
|
6b76105190a9b62df65a7b56b6def4120498b9fa
|
718f688b3d316e8c10ef680d9c21ecd518d062f8
|
refs/heads/master
| 2021-01-12T03:41:48.318116 | 2017-07-18T12:35:58 | 2017-07-18T12:35:58 | 78,252,164 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 565 |
py
|
##43.90%
class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
if len(numbers) < 2:
return []
left, right = 0, len(numbers) - 1
while left < right:
sum = numbers[left] + numbers[right]
if sum < target:
left += 1
elif sum > target:
right -= 1
else:
return [left + 1, right + 1]
return []
|
[
"[email protected]"
] | |
ce63be621dd2fa160d3e9198752579ac7e8f9b18
|
364b36d699d0a6b5ddeb43ecc6f1123fde4eb051
|
/_downloads_1ed/fig_fft_text_example.py
|
78f8d57d71630eb3e61ff1ec81dc25ae5256806e
|
[] |
no_license
|
astroML/astroml.github.com
|
eae3bfd93ee2f8bc8b5129e98dadf815310ee0ca
|
70f96d04dfabcd5528978b69c217d3a9a8bc370b
|
refs/heads/master
| 2022-02-27T15:31:29.560052 | 2022-02-08T21:00:35 | 2022-02-08T21:00:35 | 5,871,703 | 2 | 5 | null | 2022-02-08T21:00:36 | 2012-09-19T12:55:23 |
HTML
|
UTF-8
|
Python
| false | false | 2,376 |
py
|
"""
Example of a Fourier Transform
------------------------------
Figure E.1
An example of approximating the continuous Fourier transform of a function
using the fast Fourier transform.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy import fftpack
from astroML.fourier import FT_continuous, sinegauss, sinegauss_FT
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Choose parameters for the wavelet
N = 10000
t0 = 5
f0 = 2
Q = 2
#------------------------------------------------------------
# Compute the wavelet on a grid of times
Dt = 0.01
t = t0 + Dt * (np.arange(N) - N / 2)
h = sinegauss(t, t0, f0, Q)
#------------------------------------------------------------
# Approximate the continuous Fourier Transform
f, H = FT_continuous(t, h)
rms_err = np.sqrt(np.mean(abs(H - sinegauss_FT(f, t0, f0, Q)) ** 2))
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
fig.subplots_adjust(hspace=0.25)
# plot the wavelet
ax = fig.add_subplot(211)
ax.plot(t, h.real, '-', c='black', label='$Re[h]$', lw=1)
ax.plot(t, h.imag, ':', c='black', label='$Im[h]$', lw=1)
ax.legend()
ax.set_xlim(2, 8)
ax.set_ylim(-1.2, 1.2)
ax.set_xlabel('$t$')
ax.set_ylabel('$h(t)$')
# plot the Fourier transform
ax = fig.add_subplot(212)
ax.plot(f, H.real, '-', c='black', label='$Re[H]$', lw=1)
ax.plot(f, H.imag, ':', c='black', label='$Im[H]$', lw=1)
ax.text(0.55, 1.5, "RMS Error = %.2g" % rms_err)
ax.legend()
ax.set_xlim(0.5, 3.5)
ax.set_ylim(-1.9, 1.9)
ax.set_xlabel('$f$')
ax.set_ylabel('$H(f)$')
plt.show()
|
[
"[email protected]"
] | |
cfbb540e6dfba1237f2ee80097afe65bc324da40
|
177df2b442866474377498a8b85f3d58410d0193
|
/create_glidein_tarball.py
|
45b070a821e819c0b6f139301c0d4fe04e8cab66
|
[] |
no_license
|
briedel/pyglidein
|
6c19f2d310bd15a85df50eb384e8d2f186aaff50
|
835c458e4f7f0dc0dcf785120da31ffa9425f0bd
|
refs/heads/master
| 2020-12-11T03:35:27.540075 | 2017-03-24T14:28:47 | 2017-03-24T14:28:47 | 49,531,789 | 0 | 0 | null | 2016-03-18T17:26:32 | 2016-01-12T22:02:49 |
Python
|
UTF-8
|
Python
| false | false | 7,707 |
py
|
"""
Create a glidein tarball by downloading the source, building it, then
copying what is needed into the tarball.
"""
import sys
import os
import shutil
import subprocess
import tarfile
import tempfile
if sys.version_info[0] < 3 and sys.version_info[1] < 7:
raise Exception('requires python 2.7+')
def libuuid_download(version='1.0.3'):
url = 'http://downloads.sourceforge.net/project/libuuid/libuuid-'+version+'.tar.gz'
subprocess.check_call(['wget', url])
subprocess.check_call(['tar', '-zxf', 'libuuid-'+version+'.tar.gz'])
return 'libuuid-'+version
def libuuid_build():
"""Build uuid statically"""
dirname = libuuid_download()
initial_dir = os.getcwd()
os.chdir(dirname)
try:
if os.path.exists('release_dir'):
shutil.rmtree('release_dir')
os.mkdir('release_dir')
options = ['--enable-static',
'--disable-shared',
'--prefix',os.path.join(os.getcwd(),'release_dir'),
]
subprocess.check_call(['./configure']+options)
subprocess.check_call(['make'])
subprocess.check_call(['make','install'])
return os.path.join(initial_dir,dirname,'release_dir')
finally:
os.chdir(initial_dir)
def cvmfs_download():
url = 'https://github.com/cvmfs/cvmfs/archive/libcvmfs-stable.tar.gz'
subprocess.check_call(['wget', url])
subprocess.check_call(['tar', '-zxf', 'libcvmfs-stable.tar.gz'])
return 'cvmfs-libcvmfs-stable'
def cvmfs_build():
libuuid = libuuid_build()
dirname = cvmfs_download()
initial_dir = os.getcwd()
os.chdir(dirname)
try:
if os.path.exists('release_dir'):
shutil.rmtree('release_dir')
os.mkdir('release_dir')
options = ['-Wno-dev',
'-DINSTALL_MOUNT_SCRIPTS=OFF',
'-DBUILD_SERVER=OFF',
'-DBUILD_CVMFS=OFF',
'-DBUILD_LIBCVMFS=ON',
'-DINSTALL_BASH_COMPLETION=OFF',
'-DUUID_LIBRARY:FILE='+os.path.join(libuuid,'lib','libuuid.a'),
'-DUUID_INCLUDE_DIR:PATH='+os.path.join(libuuid,'include'),
'-DCMAKE_INSTALL_PREFIX='+os.path.join(os.getcwd(),'release_dir'),
]
subprocess.check_call(['cmake']+options)
subprocess.check_call(['make','libpacparser'])
os.chdir('cvmfs')
subprocess.check_call(['make'])
subprocess.check_call(['make','install'])
return os.path.join(initial_dir,dirname,'release_dir')
finally:
os.chdir(initial_dir)
def parrot_download(version):
url = 'http://ccl.cse.nd.edu/software/files/cctools-'+version+'-source.tar.gz'
subprocess.check_call(['wget', url])
subprocess.check_call(['tar', '-zxf', 'cctools-'+version+'-source.tar.gz'])
return 'cctools-'+version+'-source'
def parrot_build(version='6.0.14'):
cvmfs = cvmfs_build()
dirname = parrot_download(version)
initial_dir = os.getcwd()
os.chdir(dirname)
try:
if os.path.exists('release_dir'):
shutil.rmtree('release_dir')
os.mkdir('release_dir')
options = ['--without-system-sand',
'--without-system-allpairs',
'--without-system-wavefront',
'--without-system-makeflow',
# '--without-system-ftp-lite',
# '--without-system-chirp',
'--without-system-umbrella',
'--without-system-resource_monitor',
'--without-system-doc',
'--with-cvmfs-path',cvmfs,
'--prefix',os.path.join(os.getcwd(),'release_dir'),
]
subprocess.check_call(['./configure']+options)
subprocess.check_call(['make'])
subprocess.check_call(['make','install'])
return os.path.join(initial_dir,dirname,'release_dir')
finally:
os.chdir(initial_dir)
def condor_download(version):
version = version.replace('.','_')
url = 'https://github.com/htcondor/htcondor/archive/V'+version+'.tar.gz'
subprocess.check_call(['wget', url])
subprocess.check_call(['tar', '-zxf', 'V'+version+'.tar.gz'])
return 'htcondor-'+version
def condor_build(version='8.6.1'):
dirname = condor_download(version)
initial_dir = os.getcwd()
os.chdir(dirname)
try:
if os.path.exists('release_dir'):
shutil.rmtree('release_dir')
os.mkdir('release_dir')
options = [
'-DHAVE_BACKFILL=OFF',
'-DHAVE_BOINC=OFF',
'-DHAVE_HIBERNATION=OFF',
'-DHAVE_KBDD=OFF',
'-DWANT_GLEXEC=OFF',
'-DWANT_FULL_DEPLOYMENT=OFF',
'-DWITH_BOINC=OFF',
'-DWITH_BOSCO=OFF',
'-DWITH_CAMPUSFACTORY=OFF',
'-DWITH_BLAHP=OFF',
'-DWITH_CURL=OFF',
'-DWITH_COREDUMPER=OFF',
'-DWITH_CREAM=OFF',
'-DWITH_GANGLIA=OFF',
'-DWITH_GLOBUS=OFF',
'-DWITH_GSOAP=OFF',
'-DWITH_LIBDELTACLOUD=OFF',
'-DWITH_LIBVIRT=OFF',
'-DWITH_PYTHON_BINDINGS=OFF',
'-DWITH_UNICOREGAHP=OFF',
'-DWITH_VOMS=OFF',
]
if version > '8.5.2':
options.append('-DWITH_KRB5=OFF')
subprocess.check_call(['cmake','-DCMAKE_INSTALL_PREFIX:PATH='+os.getcwd()+'/release_dir']
+options+['.'])
subprocess.check_call(['make'])
subprocess.check_call(['make','install'])
return os.path.join(initial_dir,dirname,'release_dir')
finally:
os.chdir(initial_dir)
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--template-dir',dest='template',default='glidein_template',
help='Location of template directory')
parser.add_option('--htcondor-version',dest='condor',default=None,
help='HTCondor version to use')
parser.add_option('--parrot-version',dest='parrot',default=None,
help='Parrot (cctools) version to use')
parser.add_option('-o','--output',dest='output',default='glidein.tar.gz',
help='output tarball name')
(options, args) = parser.parse_args()
if not options.template:
raise Exception('need a template directory')
options.template = os.path.abspath(options.template)
curdir = os.getcwd()
d = tempfile.mkdtemp(dir=os.getcwd())
tarfile_name = os.path.abspath(os.path.expandvars(os.path.expanduser(options.output)))
try:
os.chdir(d)
parrot_opts = {}
if options.parrot:
parrot_opts['version'] = options.parrot
parrot_path = parrot_build(**parrot_opts)
condor_opts = {}
if options.condor:
condor_opts['version'] = options.condor
condor_path = condor_build(**condor_opts)
with tarfile.open(tarfile_name,'w:gz') as tar:
for f in os.listdir(options.template):
tar.add(os.path.join(options.template,f),arcname=f)
tar.add('.',arcname='glideinExec',recursive=False)
for f in os.listdir(condor_path):
tar.add(os.path.join(condor_path,f),arcname=os.path.join('glideinExec',f))
tar.add(os.path.join(parrot_path,'bin','parrot_run'),arcname=os.path.join('GLIDEIN_PARROT','parrot_run'))
tar.add(os.path.join(parrot_path,'lib','libparrot_helper.so'),arcname=os.path.join('GLIDEIN_PARROT','libparrot_helper.so'))
finally:
os.chdir(curdir)
shutil.rmtree(d)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f1fdec782a19b71a749c643458ec9d0408978d66
|
053221e1d90b365f68701dbd5b6466f30d1f6fd7
|
/Day4/vd2.py
|
d2624b1ae91bd834e7c6b6d1c9a499d95af8c68b
|
[] |
no_license
|
pytutorial/py2011E
|
eceb4d563cc807294b08b818edadd521ed8da488
|
306437369b0bfe55a2fa827b098283856242e731
|
refs/heads/main
| 2023-02-28T23:57:32.851536 | 2021-01-30T14:56:12 | 2021-01-30T14:56:12 | 318,186,117 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 364 |
py
|
# vd2.py
# Nhập vào họ tên đầy đủ của một người
# In ra Họ, tên đệm, tên của người đó
ho_ten = input('Họ và tên:')
#TODO :
items = ho_ten.split()
ho = items[0]
ten = items[-1]
ten_dem = ''
for i in range(1, len(items)-1):
ten_dem += items[i] + ' '
print('Họ: ', ho)
print('Tên đệm:', ten_dem)
print('Tên: ', ten)
|
[
"[email protected]"
] | |
6859b7420def17cbc91c49bd229e6028b100e87d
|
bf3a87fd7725ad4e7e85492509f3e5aa68709fd0
|
/chat/.history/Cliente_20191106204840.py
|
8b51d56c2ef6c7a8b2f56ce7b17b3a47b7f38cdd
|
[] |
no_license
|
slalbertojesus/merixo-grpc
|
f468b4f6349b4367ad6064f175cef7c3e49d829f
|
182569a89cad605fd81b095861fd58390729c720
|
refs/heads/master
| 2020-09-04T21:39:53.488701 | 2019-12-25T02:07:24 | 2019-12-25T02:07:24 | 219,899,136 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 825 |
py
|
import grpc
import uuid
import chat_pb2 as structure
import chat_pb2_grpc as grpc_chat
from Usuario import Usuario
class Cliente():
def IniciarCliente(self):
id = uuid.uuid1()
print(id)
channel = grpc.insecure_channel('localhost:50051')
conn = grpc_chat.ChatAdminStub(channel)
structure.Usuario.id = id.hex
structure.Usuario.usuario = "Choco"
structure.Usuario.activo = True
request = structure.Usuario
#structure._USUARIO.id = id.hex
#structure._USUARIO.usuario = "Choco"
#structure._USUARIO.activo = True
#request = structure._USUARIO
confirmacion = conn.Subscribirse(request)
print(confirmacion)
if __name__ == '__main__':
cliente = Cliente()
cliente.IniciarCliente()
|
[
"[email protected]"
] | |
405a1959f9d4f85a7a2f446f5fc40e3adc4d2834
|
f89cd667200844f019dbf2c93798e7fee96b89e2
|
/dynamic-programming/exercises/ugly-numbers.py
|
ab24762e2184774dfc0008339825acefc4170efc
|
[] |
no_license
|
radomirbrkovic/algorithms
|
575f4540c7aab2daf3e55d0df99030e440ee2060
|
621d0f82e0e4cd253afc0e07772a201b019f7889
|
refs/heads/master
| 2023-07-15T23:59:29.725946 | 2021-09-01T19:47:08 | 2021-09-01T19:47:08 | 250,455,390 | 0 | 0 | null | 2021-09-01T19:47:09 | 2020-03-27T06:12:52 |
Python
|
UTF-8
|
Python
| false | false | 596 |
py
|
# Ugly Numbers https://www.geeksforgeeks.org/ugly-numbers/
def maxDivide(a, b):
while a % b == 0:
a = a / b
return a
def isUgly(no):
no = maxDivide(no, 2)
no = maxDivide(no, 3)
no = maxDivide(no, 5)
return 1 if no == 1 else 0
# Function to get the nth ugly number
def getNthUglyNo(n):
i = 1
# ugly number count
count = 1
# Check for all integers untill
# ugly count becomes n
while n > count:
i += 1
if isUgly(i):
count += 1
return i
print("150th ugly number is ", getNthUglyNo(150))
|
[
"[email protected]"
] | |
31cdbe882af4808f510d60c5303fc71448bad50f
|
28a462a28f443c285ca5efec181ebe36b147c167
|
/tests/compile/basic/es2016/Symbol.keyFor.spec
|
4405fe814db693ef8c40840d1d430431bc104824
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
kaist-plrg/jstar
|
63e71f9156860dc21cccc33a9f6c638dfee448ea
|
1282919127ea18a7e40c7a55e63a1ddaaf7d9db4
|
refs/heads/main
| 2022-07-22T08:12:34.947712 | 2022-02-27T04:19:33 | 2022-02-27T11:06:14 | 384,045,526 | 6 | 4 |
NOASSERTION
| 2022-02-27T11:05:26 | 2021-07-08T07:53:21 |
Python
|
UTF-8
|
Python
| false | false | 396 |
spec
|
1. If Type(_sym_) is not Symbol, throw a *TypeError* exception.
1. For each element _e_ of the GlobalSymbolRegistry List (see <emu-xref href="#sec-symbol.for"></emu-xref>),
1. If SameValue(_e_.[[Symbol]], _sym_) is *true*, return _e_.[[Key]].
1. Assert: GlobalSymbolRegistry does not currently contain an entry for _sym_.
1. Return *undefined*.
|
[
"[email protected]"
] | |
b27373bc38eff28a67ebaad6b5aa01a01e97f5e3
|
a884039e1a8b0ab516b80c2186e0e3bad28d5147
|
/Livros/Livro-Desenvolvimento web com Flask/Capitulo02/Nível 02/exemplo07a.py
|
0129fa2065636c4e62560194d3ba20d2e016d1d8
|
[
"MIT"
] |
permissive
|
ramonvaleriano/python-
|
6e744e8bcd58d07f05cd31d42a5092e58091e9f0
|
ada70918e945e8f2d3b59555e9ccc35cf0178dbd
|
refs/heads/main
| 2023-04-10T14:04:24.497256 | 2021-04-22T18:49:11 | 2021-04-22T18:49:11 | 340,360,400 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 406 |
py
|
# Program: exemplo07a.py
# Author: Ramon R. Valeriano
# Description: Programa do Capítulo 2, para melhorar a fixação
# Developed: 02/03/2020 - 16:29
from flask import Flask, make_response
app = Flask(__name__)
@app.route('/')
def index():
response = make_response('<h1>Este documento esta sendo carregado em um cookie.</h1>')
response.set_cookie('answer', '42')
return response
app.run()
|
[
"[email protected]"
] | |
eb14ad9cc026342ecb88f0372c9d46218bb7bf1c
|
584db1be8b6bdedaa56d186692ad72da5ee07164
|
/patron/cells/weights/__init__.py
|
d83f31e1ab2fdb889a4e774c5b82817b6dad2c51
|
[
"Apache-2.0"
] |
permissive
|
casbin/openstack-patron
|
66006f57725cf1c3d735cd5529d3459fd77384c8
|
b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25
|
refs/heads/master
| 2023-05-31T05:23:37.721768 | 2015-12-31T12:18:17 | 2015-12-31T12:18:17 | 382,054,546 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,286 |
py
|
# Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cell Scheduler weights
"""
from patron import weights
class WeightedCell(weights.WeighedObject):
def __repr__(self):
return "WeightedCell [cell: %s, weight: %s]" % (
self.obj.name, self.weight)
class BaseCellWeigher(weights.BaseWeigher):
"""Base class for cell weights."""
pass
class CellWeightHandler(weights.BaseWeightHandler):
object_class = WeightedCell
def __init__(self):
super(CellWeightHandler, self).__init__(BaseCellWeigher)
def all_weighers():
"""Return a list of weight plugin classes found in this directory."""
return CellWeightHandler().get_all_classes()
|
[
"[email protected]"
] | |
d999acb14a4258c765255569ad0349f26990ecdc
|
38bf7e24a2150983f482a6749dc661ed4c4a4439
|
/docs/source/conf.py
|
914308cfb0a62a3b79401f3a79e53ff0e90b1f3c
|
[] |
no_license
|
guoweikuang/flask_v2ex
|
15b6247d979146ada57fe2e6dd7c93f7708297ff
|
d84c14b1d90be78e634677dee332a63bca69c7fc
|
refs/heads/master
| 2022-12-17T19:36:57.945884 | 2019-10-23T13:25:44 | 2019-10-23T13:25:44 | 116,472,843 | 20 | 5 | null | 2022-11-22T02:08:35 | 2018-01-06T10:09:07 |
JavaScript
|
UTF-8
|
Python
| false | false | 4,776 |
py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'V2Ex'
copyright = '2018, guoweikuang'
author = 'guoweikuang'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = 'v1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'V2Exdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'V2Ex.tex', 'V2Ex Documentation',
'guoweikuang', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'v2ex', 'V2Ex Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'V2Ex', 'V2Ex Documentation',
author, 'V2Ex', 'One line description of project.',
'Miscellaneous'),
]
|
[
"[email protected]"
] | |
9bf3d2c051c29082aa33cfeceab377e3427f85ff
|
05abb78c60a69422ae3e00a542bbd4573faf8174
|
/python-para-zumbis/lista2/exercicio1.py
|
9918795b5836c2bd55e4644ea40ede511eb2e42b
|
[] |
no_license
|
xuting1108/Programas-de-estudo
|
72b812d52f5b130a95103c38dbe9e471dc5aa6f9
|
01fe21097055d69c2115cff3da2199429e87dead
|
refs/heads/master
| 2022-10-20T17:06:14.517643 | 2019-04-08T11:16:12 | 2019-04-08T11:16:12 | 179,678,721 | 0 | 1 | null | 2022-10-09T13:13:57 | 2019-04-05T12:38:23 |
Python
|
UTF-8
|
Python
| false | false | 600 |
py
|
# Faça um Programa que peça os três lados de um triângulo. O programa deverá informar se os valores podem ser um triângulo.
# Indique, caso os lados formem um triângulo, se o mesmo é: equilátero, isósceles ou escaleno.
lado_a = float(input('informe a medida do lado a: '))
lado_b = float(input('informe a medida do lado b: '))
lado_c = float(input('informe a medida do lado c: '))
if lado_a == lado_b == lado_c:
print('o triangulo é equilátero')
elif lado_a == lado_b or lado_a == lado_c or lado_b == lado_c:
print('o triangulo é isósceles')
else:
print('o triangulo é escaleno')
|
[
"[email protected]"
] | |
b9bbeafefaafd8ff7661334198c1365cd73e36d1
|
f73bcada5ab8432d2af07b5cb7fd7a38109d3e3a
|
/.history/parser_20201108170616.py
|
c0517d7feb1c60b713329f35cfcf547572ddba48
|
[] |
no_license
|
mariajbp/gedcomparser
|
837bf4ae5628a81e535d233c7c35313c6d86d78c
|
6fc55899e5a82c4071991ab94a344b64c014b84d
|
refs/heads/master
| 2023-01-23T09:01:27.459597 | 2020-11-19T23:58:53 | 2020-11-19T23:58:53 | 310,900,559 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,247 |
py
|
#!/usr/bin/python3
#python3 parser.py input/bible.gedcom > test.txt
import sys
from re import *
filename = sys.argv[1].split('/')[1]
assetPath = "assets"
indPath = "individuals"
famPath = "families"
cssPath = "assets/gedcom.css"
def createFamily(fk,fi):
f = open('assets/families/'+fk+'.html', 'w')
f.write('<h4> <a href=\"../index.html\"> return to index </a> </h4>')
f.write('<!DOCTYPE html><html><head> <link rel="stylesheet" type="text/css" href="../index.css"></head>\n')
f.write('<h1> Código da familia: ' + fk + '</h1>')
for keys,values in fi.items():
print(keys)
print(values)
f.close()
def createIndex(fam,indi):
f = open("assets/index.html", 'w')
f.write('<!DOCTYPE html><html><head> <link rel="stylesheet" type="text/css" href="index.css"></head>\n')
f.write('<h1> Ficheiro: ' + filename + '</h1>')
f.write('<div class="row"><div class="column"><h2>Familias</h2>')
for keyf in fam:
f.write('<li> <a href=\"'+famPath+'/'+keyf+'.html\">'+keyf+'</a></li>\n')
f.write('</ul> </div>')
f.write('<div class="column"><h2>Individuos</h2>')
for keyi in indi:
f.write('<li> <a href=\"'+indPath+'/'+keyi+'.html\">'+keyi+'</a></li>\n')
f.write('</ul></div></div>')
f.close()
BG = {}
def procIndi(s,i):
indi = {}
v = search(r'\bNAME\s+(.*)', i)
if v:
indi['name']= v.group(1)
v = findall (r'\bFAMS\s+@(.*)@',i)
indi['fams'] = v
BG[s] = indi
BF = {}
def procFam(f,i):
fam={}
h = search(r'\bHUSB\s+@(.*)@',i)
if h:
fam['husb'] = h.group(1)
w = search(r'\bWIFE\s+@(.*)@',i)
if w:
fam['wife'] = w.group(1)
fam['child'] = findall (r'\bCHIL\s+@(.*)@',i)
BF[f] = fam
def process(t):
items = split(r'\n0',t)
for i in items:
z = search(r'@(I\d+)@ *INDI', i) #procura todos os individuos
if z:
procIndi(z.group(1),i)
f = search(r'@(F\d+)@ *FAM', i) #procura todas as familias
if f:
procFam(f.group(1),i)
with open(sys.argv[1], 'r') as f :
gedcom = f.read()
process(gedcom)
createIndex(BF.keys(), BG.keys())
for k,v in BF.items():
createFamily(k,v)
|
[
"[email protected]"
] | |
4a5d3fe945019ad4717eef5286af1768dc05b083
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_vicarage.py
|
3cfe8615692b2c6a7f3f67bc930f9033fcdd2e06
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 316 |
py
|
#calss header
class _VICARAGE():
def __init__(self,):
self.name = "VICARAGE"
self.definitions = [u'the house in which a vicar lives']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"[email protected]"
] | |
4c9ec16df1b6a85b34a767c4e8a4d46e53d950f7
|
82256eb259bf5fa75a8f15500a6b5a1306a07034
|
/addintegers3.py
|
f6d586ed9a6c80d2002f3850a12e20180a03404d
|
[] |
no_license
|
dennisnderitu254/Andela-Exercises
|
1c0d2c309b6ea113a4d812e313ded867f6dea9a4
|
edb17f0ed867a4436478a8d9bf5690a749155781
|
refs/heads/master
| 2021-05-05T13:38:06.658363 | 2017-10-31T14:35:38 | 2017-10-31T14:35:38 | 105,002,996 | 3 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 193 |
py
|
# Exhibiting functional composition
num1 = int(raw_input('Please enter an integer value:'))
num2 = int(raw_input('Please enter another integer value:'))
print(num1, '+', num2, '=', num1 + num2)
|
[
"[email protected]"
] | |
a972d8916751e7929616031a929acb51c7a7b956
|
3e2447737acc8e6bef6728b1a8e5f1d5e6db2968
|
/opennem/pipelines/wem/balancing_summary.py
|
fd431bcabfb48da0aacae2723bb3de03f7e58e17
|
[
"MIT"
] |
permissive
|
gaslitbytech/opennem
|
5a5197003662725ccd2f82d790cdb1495a975a07
|
deec3e2079db9d9d84171010fd0c239170d1e7ce
|
refs/heads/master
| 2023-07-23T14:08:28.949054 | 2020-10-09T03:53:20 | 2020-10-09T03:53:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,148 |
py
|
import csv
import logging
from sqlalchemy.dialects.postgresql import insert
from opennem.db.models.opennem import BalancingSummary
from opennem.pipelines import DatabaseStoreBase
from opennem.schema.network import NetworkWEM
from opennem.utils.dates import parse_date
from opennem.utils.pipelines import check_spider_pipeline
logger = logging.getLogger(__name__)
class WemStoreBalancingSummary(DatabaseStoreBase):
@check_spider_pipeline
def process_item(self, item, spider=None):
s = self.session()
csvreader = csv.DictReader(item["content"].split("\n"))
records_to_store = []
for record in csvreader:
trading_interval = parse_date(
record["Trading Interval"], dayfirst=True, network=NetworkWEM
)
if not trading_interval:
continue
records_to_store.append(
{
"network_id": "WEM",
"network_region": "WEM",
"trading_interval": trading_interval,
"forecast_load": record["Load Forecast (MW)"],
"generation_scheduled": record[
"Scheduled Generation (MW)"
],
"generation_non_scheduled": record[
"Non-Scheduled Generation (MW)"
],
"generation_total": record["Total Generation (MW)"],
"price": record["Final Price ($/MWh)"],
}
)
stmt = insert(BalancingSummary).values(records_to_store)
stmt.bind = self.engine
stmt = stmt.on_conflict_do_update(
constraint="balancing_summary_pkey",
set_={
"price": stmt.excluded.price,
"generation_total": stmt.excluded.generation_total,
},
)
try:
r = s.execute(stmt)
s.commit()
except Exception as e:
logger.error("Error inserting records")
logger.error(e)
finally:
s.close()
return len(records_to_store)
|
[
"[email protected]"
] | |
c54cbc847e347a11beaa33ad2bd3cb4e97c48277
|
28cd350c10e5fe3542f2913e1833f5725aa56fd5
|
/prepare_VehicleID.py
|
17adc3f1c349e6a19d4ae965ba534f591054547c
|
[
"MIT"
] |
permissive
|
layumi/Person_reID_baseline_pytorch
|
dffeb79f25f2fe1b83646746bbb295f2df36bad4
|
4dae9cdf42f71c72a44a64fb23bfc470c501085f
|
refs/heads/master
| 2023-09-03T14:34:04.082508 | 2023-08-17T04:12:26 | 2023-08-17T04:12:26 | 115,712,649 | 4,042 | 1,132 |
MIT
| 2023-06-19T08:29:17 | 2017-12-29T10:22:41 |
Python
|
UTF-8
|
Python
| false | false | 2,992 |
py
|
import os
from shutil import copyfile
def copy_file(s, t):
for root, dirs, files in os.walk(s):
for name in files:
copyfile(root+'/'+name,t+'/'+name)
# You only need to change this line to your dataset download path
download_path = './data/VehicleID_V1.0/'
if not os.path.isdir(download_path):
print('please change the download_path')
#---------------------------------------
#train_all
train_path = download_path + '/image'
train_save_path = download_path + '/pytorch/train_test'
if not os.path.isdir(train_save_path):
os.mkdir(train_save_path)
fname = './data/VehicleID_V1.0/attribute/img2vid.txt'
with open(fname) as fp:
for i, line in enumerate(fp):
name, label = line.split(' ')
name = name + '.jpg'
ID = int(label)
src_path = train_path + '/' + name
dst_path = train_save_path + '/p%d'%ID
if not os.path.isdir(dst_path):
os.mkdir(dst_path)
print(src_path, dst_path)
copyfile( src_path, dst_path+'/'+name)
#---------------------------------------
#train
train_list = []
train_only_save_path = download_path + '/pytorch/train'
if not os.path.isdir(train_only_save_path):
os.mkdir(train_only_save_path)
with open(download_path+'train_test_split/train_list.txt', 'r') as f:
for name in f:
name = name.replace('\n','')
train_ID = name.split(' ')
train_ID = int(train_ID[1])
if not train_ID in train_list:
train_list.append(train_ID)
print(len(train_list))
for ID in train_list:
os.system('rsync -r %s/p%d %s'%( train_save_path, ID, train_only_save_path))
#---------------------------------------
#val800
for num in [800,1600,2400]:
val_list = []
query_save_path = download_path + '/pytorch/query%d'%num
gallery_save_path = download_path + '/pytorch/gallery%d'%num
if not os.path.isdir(query_save_path):
os.mkdir(query_save_path)
os.mkdir(gallery_save_path)
with open(download_path+'train_test_split/test_list_%d.txt'%num, 'r') as f:
for name in f:
name = name.replace('\n','')
val_ID = name.split(' ')
val_name = val_ID[0] + '.jpg'
val_ID = int(val_ID[1])
src_path = train_path + '/' + val_name
if val_ID not in val_list:
val_list.append(val_ID)
dst_path = gallery_save_path + '/p%d'%val_ID #For VehicleID QueryNumber > Gallery
if not os.path.isdir(dst_path):
os.mkdir(dst_path)
copyfile( src_path, dst_path+'/'+val_name)
else:
dst_path = query_save_path + '/p%d'%val_ID
if not os.path.isdir(dst_path):
os.mkdir(dst_path)
copyfile( src_path, dst_path+'/'+val_name)
|
[
"[email protected]"
] | |
16ee84d5d1b6441baaf6dbf58d95f65b16fd49cb
|
e1b3816615cce62ebe2b6c59b0eb3fbd3693d73b
|
/solutions/167-two-sum-ii-input-array-is-sorted/two-sum-ii-input-array-is-sorted.py
|
60d0a04a154052849aad48a3e763a43ca3bebcba
|
[] |
no_license
|
fagan2888/leetcode-6
|
1fb18979ffacb82d5db77988b38ecd7371b428b9
|
14176f1752e2bb94dec51bd90dfd412896ed84de
|
refs/heads/master
| 2022-01-10T03:27:51.388066 | 2019-06-15T14:13:48 | 2019-06-15T14:13:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,107 |
py
|
# -*- coding:utf-8 -*-
# Given an array of integers that is already sorted in ascending order, find two numbers such that they add up to a specific target number.
#
# The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2.
#
# Note:
#
#
# Your returned answers (both index1 and index2) are not zero-based.
# You may assume that each input would have exactly one solution and you may not use the same element twice.
#
#
# Example:
#
#
# Input: numbers = [2,7,11,15], target = 9
# Output: [1,2]
# Explanation: The sum of 2 and 7 is 9. Therefore index1 = 1, index2 = 2.
#
class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
if len(numbers) <= 1:
return None
buffer_dict = {}
for i in range(len(numbers)):
if numbers[i] in buffer_dict:
return [buffer_dict[numbers[i]], i+1]
else: buffer_dict[target - numbers[i]] = i+1
|
[
"[email protected]"
] | |
8852e9dcd8cde183a336da575c9de3ddf255095c
|
15a2a8c612545e61dab18a5d0673b1cef95a9638
|
/Part/神龙天女.py
|
a09f4df92188101b24dd402950e0a0ce29b7c469
|
[] |
no_license
|
YICHENG-LAI/DNFCalculating
|
6fa10b692580dad119446307508a3bf32ff46d1a
|
426375e4e0034e435a8f38974ce81323c8ea7f9c
|
refs/heads/master
| 2022-11-17T00:18:06.650791 | 2020-07-05T07:28:50 | 2020-07-05T07:28:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,360 |
py
|
from PublicReference.base import *
class 神龙天女主动技能(主动技能):
def 等效CD(self, 武器类型):
return round(self.CD / self.恢复 * 1.05, 1)
#念珠1.05
class 神龙天女技能0(神龙天女主动技能):
名称 = '罪业加身'
所在等级 = 10
等级上限 = 60
基础等级 = 48
基础 = 2014 - 204.4468
成长 = 204.4468
CD = 6.0
TP成长 = 0.08
TP上限 = 7
class 神龙天女技能1(神龙天女主动技能):
名称 = '唤雷符'
所在等级 = 15
等级上限 = 60
基础等级 = 46
基础 = 1721 - 174.644
成长 = 174.644
CD = 5.0
TP成长 = 0.08
TP上限 = 7
class 神龙天女技能2(神龙天女主动技能):
名称 = '念珠连射'
备注 = '(TP为基础精通)'
所在等级 = 15
等级上限 = 1
基础等级 = 1
基础 = 9195.58 / 9.362
成长 = 0
CD = 1.0
TP成长 = 0.1
TP上限 = 5
class 神龙天女技能3(神龙天女主动技能):
名称 = '木槵子经'
所在等级 = 15
等级上限 = 60
基础等级 = 46
基础 = 1602 - 163.6
成长 = 163.6
CD = 4.0
TP成长 = 0.1
TP上限 = 7
class 神龙天女技能4(神龙天女主动技能):
名称 = '束灵符'
所在等级 = 20
等级上限 = 60
基础等级 = 43
基础 = 2052 - 208.214
成长 = 208.214
CD = 7.0
TP成长 = 0.1
TP上限 = 7
class 神龙天女技能5(神龙天女主动技能):
名称 = '驱邪咒'
所在等级 = 25
等级上限 = 60
基础等级 = 41
基础 = 5100 - 519
成长 = 519
CD = 12.0
TP上限 = 5
TP倍率 = [1, 1.125, 1.228, 1.330, 1.433, 1.535]
def 等效百分比(self, 武器类型):
if self.等级 == 0:
return 0
else:
return int((self.基础 + self.成长 * self.等级)* self.TP倍率[self.TP等级] * self.倍率)
class 神龙天女技能6(被动技能):
名称 = '祈雨祭'
所在等级 = 25
等级上限 = 20
基础等级 = 10
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.00 + 0.02 * self.等级, 5)
class 神龙天女技能7(被动技能):
名称 = '神术强化'
所在等级 = 30
等级上限 = 20
基础等级 = 10
def 加成倍率(self, 武器类型):
if self.等级 <= 10:
return round(1.05 + 0.015 * self.等级, 5)
else:
return round(1.00 + 0.02 * self.等级, 5)
class 神龙天女技能8(神龙天女主动技能):
名称 = '和合之玉'
所在等级 = 30
等级上限 = 60
基础等级 = 38
基础 = 5233 - 531.108
成长 = 531.108
CD = 15.0
TP成长 = 0.1
TP上限 = 7
class 神龙天女技能9(神龙天女主动技能):
名称 = '聚魂吸星符'
所在等级 = 35
等级上限 = 60
基础等级 = 36
基础 = 6004 - 609.629
成长 = 609.629
CD = 15.0
TP成长 = 0.1
TP上限 = 7
是否有护石 = 1
def 装备护石(self):
self.倍率 *= 1.14
self.CD *= 0.95
class 神龙天女技能10(神龙天女主动技能):
名称 = '龙魂之怒'
所在等级 = 40
等级上限 = 60
基础等级 = 33
基础 = 8116 - 823.406
成长 = 823.406
CD = 20.0
TP成长 = 0.1
TP上限 = 7
class 神龙天女技能11(神龙天女主动技能):
名称 = '百八念珠'
所在等级 = 40
等级上限 = 60
基础等级 = 33
基础 = 13060 - 1326.25
成长 = 1326.25
CD = 25.0
TP成长 = 0.1
TP上限 = 7
是否有护石 = 1
def 装备护石(self):
self.倍率 *= 1.18
self.CD *= 0.83
class 神龙天女技能12(神龙天女主动技能):
名称 = '不动珠箔阵'
所在等级 = 45
等级上限 = 60
基础等级 = 31
基础 = 16138 - 1635.567
成长 = 1635.567
CD = 45.0
TP成长 = 0.1
TP上限 = 7
是否有护石 = 1
def 装备护石(self):
self.倍率 *= 1.09
self.CD *= 0.9
class 神龙天女技能13(神龙天女主动技能):
名称 = '神龙如意珠'
备注 = '(1次)'
是否主动 = 0
所在等级 = 48
等级上限 = 40
基础等级 = 20
基础 = 526 - 83.947
成长 = 83.947
CD = 0.5
关联技能 = ['所有']
def 等效CD(self, 武器类型):
return 0.5
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.115 + 0.015 * self.等级, 5)
class 神龙天女技能14(神龙天女主动技能):
名称 = '神谕:神龙雷雨祭'
所在等级 = 50
等级上限 = 40
基础等级 = 12
基础 = 45113 - 10407
成长 = 10407
CD = 140
class 神龙天女技能15(神龙天女主动技能):
名称 = '因果业火符'
所在等级 = 60
等级上限 = 40
基础等级 = 23
基础 = 13346 - 1354.864
成长 = 1354.864
CD = 30.0
TP成长 = 0.1
TP上限 = 7
是否有护石 = 1
def 装备护石(self):
self.倍率 *= 1.24
class 神龙天女技能16(神龙天女主动技能):
名称 = '夺命大念阵'
所在等级 = 70
等级上限 = 40
基础等级 = 18
基础 = 24291 - 2464.235
成长 = 2464.235
CD = 50.0
TP成长 = 0.1
TP上限 = 7
是否有护石 = 1
def 装备护石(self):
self.倍率 *= 1.24
class 神龙天女技能17(被动技能):
名称 = '龙神之力'
所在等级 = 75
等级上限 = 40
基础等级 = 11
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.23 + 0.02 * self.等级, 5)
class 神龙天女技能18(神龙天女主动技能):
名称 = '退魔阴阳符'
所在等级 = 75
等级上限 = 40
基础等级 = 16
基础 = 42399 - 4303.067
成长 = 4303.067
CD = 40.0
class 神龙天女技能19(神龙天女主动技能):
名称 = '天坠阴阳玉'
所在等级 = 80
等级上限 = 40
基础等级 = 13
基础 = 40585 - 4117.917
成长 = 4117.917
CD = 45.0
class 神龙天女技能20(神龙天女主动技能):
名称 = '龙威如狱·龙恩如海'
所在等级 = 85
等级上限 = 40
基础等级 = 5
基础 = 92783 - 21518
成长 = 21518
CD = 180.0
class 神龙天女技能21(被动技能):
名称 = '卓越之力'
所在等级 = 95
等级上限 = 40
基础等级 = 4
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.18 + 0.02 * self.等级, 5)
class 神龙天女技能22(被动技能):
名称 = '超卓之心'
所在等级 = 95
等级上限 = 11
基础等级 = 1
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.045 + 0.005 * self.等级, 5)
class 神龙天女技能23(被动技能):
名称 = '觉醒之抉择'
所在等级 = 100
等级上限 = 40
基础等级 = 2
关联技能 = ['无']
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.10 + 0.05 * self.等级, 5)
class 神龙天女技能24(被动技能):
名称 = '基础精通'
所在等级 = 1
等级上限 = 200
基础等级 = 100
关联技能 = ['念珠连射']
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(0.463 + 0.089 * self.等级, 5)
神龙天女技能列表 = []
i = 0
while i >= 0:
try:
exec('神龙天女技能列表.append(神龙天女技能'+str(i)+'())')
i += 1
except:
i = -1
神龙天女技能序号 = dict()
for i in range(len(神龙天女技能列表)):
神龙天女技能序号[神龙天女技能列表[i].名称] = i
神龙天女一觉序号 = 0
神龙天女二觉序号 = 0
神龙天女三觉序号 = 0
for i in 神龙天女技能列表:
if i.所在等级 == 50:
神龙天女一觉序号 = 神龙天女技能序号[i.名称]
if i.所在等级 == 85:
神龙天女二觉序号 = 神龙天女技能序号[i.名称]
if i.所在等级 == 100:
神龙天女三觉序号 = 神龙天女技能序号[i.名称]
神龙天女护石选项 = ['无']
for i in 神龙天女技能列表:
if i.是否有伤害 == 1 and i.是否有护石 == 1:
神龙天女护石选项.append(i.名称)
神龙天女符文选项 = ['无']
for i in 神龙天女技能列表:
if i.所在等级 >= 20 and i.所在等级 <= 80 and i.所在等级 != 50 and i.是否有伤害 == 1:
神龙天女符文选项.append(i.名称)
class 神龙天女角色属性(角色属性):
职业名称 = '神龙天女'
武器选项 = ['念珠']
#'物理百分比','魔法百分比','物理固伤','魔法固伤'
伤害类型选择 = ['魔法百分比']
#默认
伤害类型 = '魔法百分比'
防具类型 = '布甲'
防具精通属性 = ['智力']
主BUFF = 2.08
#基础属性(含唤醒)
基础力量 = 793.0
基础智力 = 952.0
#适用系统奶加成
力量 = 基础力量
智力 = 基础智力
#人物基础 + 唤醒
物理攻击力 = 65.0
魔法攻击力 = 65.0
独立攻击力 = 1045.0
火属性强化 = 13
冰属性强化 = 13
光属性强化 = 13
暗属性强化 = 13
远古记忆 = 0
def __init__(self):
self.技能栏= deepcopy(神龙天女技能列表)
self.技能序号= deepcopy(神龙天女技能序号)
class 神龙天女(角色窗口):
def 窗口属性输入(self):
self.初始属性 = 神龙天女角色属性()
self.角色属性A = 神龙天女角色属性()
self.角色属性B = 神龙天女角色属性()
self.一觉序号 = 神龙天女一觉序号
self.二觉序号 = 神龙天女二觉序号
self.三觉序号 = 神龙天女三觉序号
self.护石选项 = deepcopy(神龙天女护石选项)
self.符文选项 = deepcopy(神龙天女符文选项)
|
[
"[email protected]"
] | |
d33b2b4cab54b838414fd70c755f3bcd6fb1580f
|
5d34d74965504c363dc294c1ba97a46393759995
|
/channels/tech_weekly_radar/app.py
|
c5371872c92041105e68e3e47f6e22824e230e65
|
[
"MIT"
] |
permissive
|
Nalorokk/reddit2telegram
|
7f898b7d17771e9de98c7f176a5a1d071f6d47d9
|
28bfc1271f40b219ee7a34e8338fa93f0d44cbd2
|
refs/heads/master
| 2020-03-18T08:29:33.946768 | 2018-05-23T04:25:52 | 2018-05-23T04:25:52 | 134,513,083 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,893 |
py
|
#encoding:utf-8
import csv
import importlib
import random
import datetime
import pymongo
import yaml
from utils import SupplyResult
subreddit = 'all'
t_channel = '@r_channels'
def get_active_period(r2t, channel_name):
min_cursor = r2t.stats.find({'channel' : channel_name.lower()}).sort([('ts', pymongo.ASCENDING)]).limit(1)
min_ts = min_cursor.next()['ts']
max_cursor = r2t.stats.find({'channel' : channel_name.lower()}).sort([('ts', pymongo.DESCENDING)]).limit(1)
max_ts = max_cursor.next()['ts']
diff = max_ts - min_ts
return diff.days
def get_newly_active(r2t, channels_list):
newly_active = list()
for channel in channels_list:
days_active = get_active_period(r2t, channel)
if days_active <= 31:
newly_active.append(channel)
return newly_active
def get_top_growers_for_last_week(r2t, channels_list):
top_growers = dict()
now = datetime.datetime.now()
for channel in channels_list:
week_ago_cursor = r2t.stats.find({
'channel': channel.lower(),
'ts': {'$gte': now - datetime.timedelta(days=7)}
}).sort([('ts', pymongo.ASCENDING)]).limit(100)
for stat_record in week_ago_cursor:
if 'members_cnt' in stat_record:
week_ago_members_cnt = stat_record['members_cnt']
break
current_cursor = r2t.stats.find({'channel': channel.lower()}).sort([('ts', pymongo.DESCENDING)]).limit(100)
for stat_record in current_cursor:
if 'members_cnt' in stat_record:
current_members_cnt = stat_record['members_cnt']
break
grow = current_members_cnt - week_ago_members_cnt
if grow >= 10:
top_growers[channel] = grow
return sorted(top_growers, key=top_growers.get, reverse=True)[:3]
def send_post(submission, r2t):
config_filename = 'configs/prod.yml'
with open(config_filename) as config_file:
config = yaml.load(config_file.read())
channels_list = list()
with open(config['cron_file']) as tsv_file:
tsv_reader = csv.DictReader(tsv_file, delimiter='\t')
for row in tsv_reader:
submodule_name = row['submodule_name']
submodule = importlib.import_module('channels.{}.app'.format(submodule_name))
channel_name = submodule.t_channel
if ('@' in channel_name) and (channel_name not in ['@r_channels_test', '@r_channels']):
channels_list.append(channel_name)
newly_active = get_newly_active(r2t, channels_list)
text_to_send = '<b>Weekend news</b>\n\n'
if len(newly_active) > 0:
text_to_send += '🎉 Welcome to newly active channels: {channels_list}. 🎈🎈\n\n'.format(channels_list=', '.join(newly_active))
text_to_send += '🏆 Channel of the week: {channel_name}. Join and enjoy!\n\n'.format(channel_name=random.choice(channels_list))
top_growers = get_top_growers_for_last_week(r2t, channels_list)
if len(top_growers) > 0:
text_to_send += '🔥 Hottest channels of the week: {channels}.\n\n'.format(channels=', '.join(top_growers))
list_of_channels = ['{n}. {channel}'.format(n=str(i + 1).zfill(2), channel=channel)
for i, channel in enumerate(random.sample(channels_list, k=len(channels_list)))]
text_to_send += '⬇️ All active channels:\n{list_of_channels}\n\n'.format(list_of_channels='\n'.join(list_of_channels))
text_to_send += '🙋\nQ: How can I help?\nA: Promote your favorite channels!\n\n'
text_to_send += 'Q: How to make similar channels?\nA: Ask here or use manual at https://github.com/Fillll/reddit2telegram.\n\n'
text_to_send += 'Q: Where to donate?\nA: http://bit.ly/r2t_donate'
r2t.send_text(text_to_send, parse_mode='HTML')
# It's not a proper supply, so just stop.
return SupplyResult.STOP_THIS_SUPPLY
|
[
"[email protected]"
] | |
7a0a3a277b02addb1e326d10fb728c20339483e7
|
d9a11615b57624a47e4719222ffd346eedbbabc1
|
/tests/test_flow.py
|
cbb43664378e920dfe878bdfd884a44676142e9b
|
[] |
no_license
|
mattjegan/pyzerem
|
79461659521bf98551d8b54e74861a0609db29e3
|
d3fe9fb54454b14747cc1d238961a93b854aee46
|
refs/heads/master
| 2021-04-28T21:12:13.909647 | 2018-02-19T11:13:54 | 2018-02-19T11:13:54 | 121,944,907 | 0 | 0 | null | 2018-02-18T11:19:44 | 2018-02-18T11:19:44 | null |
UTF-8
|
Python
| false | false | 1,568 |
py
|
from zerem import Flow, Slot, process
class TestFlow(object):
def test_slots_register(self):
"""
Tests that slot is added to the flows available slots
"""
class MyFlow(Flow):
slot = Slot()
m = MyFlow()
assert getattr(m, '__flow_available') == {
'slot': 0,
}
def test_processes_register(self):
"""
Test that the process is added to the flows processes/watchers
"""
class MyFlow(Flow):
@process
def step1(self):
pass
m = MyFlow()
assert getattr(m, '__flow_watchers') == [
(['self'], m.step1),
]
def test_setattr_triggers_methods(self):
"""
Tests that setting a slot triggers appropriate processes
"""
class MyFlow(Flow):
slot = Slot()
triggered = False
@process
def step1(self, slot):
self.triggered = True
m = MyFlow()
m.slot = 'test_value'
assert m.triggered is True
def test_setattr_does_not_trigger_when_wrong_args(self):
"""
Tests that setting a slot does not trigger processes it shouldn't
"""
class MyFlow(Flow):
slot = Slot()
triggered = False
@process
def step1(self, slot, nonexistant):
self.triggered = True
m = MyFlow()
m.slot = 'test_value'
assert m.triggered is False
|
[
"[email protected]"
] | |
d1d0508de70a0ada37a1c3e68468cb649846a73f
|
9a423dfb84041a926970e10afad93f15619a34d8
|
/backend/google_helpers/utils.py
|
cc3592f7ee47ae2ca715dbd7623e04aa1cc1fb21
|
[] |
no_license
|
Babalwa01/Tilde
|
3c2d6295b3d5e8a0cce1331f657ad835688a4db5
|
8eaffeb2c6b78aec4f0d6b5f573106e0a705ae53
|
refs/heads/master
| 2023-05-28T23:06:49.205259 | 2021-05-18T08:41:14 | 2021-05-18T08:41:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,651 |
py
|
import logging
import pandas as pd
from functools import lru_cache
import re
from timezone_helpers import timestamp_str_to_tz_aware_datetime
from google_helpers.constants import TIMESTAMP_FORMAT, TIMEZONE_NAME
def timestamp_to_datetime(timestamp):
return timestamp_str_to_tz_aware_datetime(
timestamp=timestamp, zone_name=TIMEZONE_NAME, dt_format=TIMESTAMP_FORMAT
)
def fetch_sheet(sheet: str = None, url: str = None):
print(f"Fetching sheet: {sheet} {url}")
service = authorize()
if sheet:
book = service.open(sheet)
elif url:
book = service.open_by_url(url)
logging.info(f"fetched sheet {sheet}")
sheet = book.sheet1 # choose the first sheet
return pd.DataFrame(sheet.get_all_records())
def authorize():
import json
from oauth2client.client import SignedJwtAssertionCredentials
import gspread
import os
# insert name of json service account key
SCOPE = [
"https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/drive",
]
SECRETS_FILE = os.getenv("GOOGLE_SHEETS_CREDENTIALS_FILE")
if not SECRETS_FILE:
raise Exception(
"Missing environmental variable: GOOGLE_SHEETS_CREDENTIALS_FILE"
)
# Based on docs here - http://gspread.readthedocs.org/en/latest/oauth2.html
# Load in the secret JSON key in working directory (must be a service account)
json_key = json.load(open(SECRETS_FILE))
# Authenticate using the signed key
credentials = SignedJwtAssertionCredentials(
json_key["client_email"], json_key["private_key"], SCOPE
)
ret = gspread.authorize(credentials)
return ret
# def date_from_args(date): # Not tz aware
# if type(date) is datetime.datetime:
# return date.date()
# for dt_format in [
# "%m/%d/%Y %H:%M:%S",
# "%m/%d/%Y %H:%M",
# "%m/%d/%Y",
# "%d/%m/%Y",
# "%d/%m/%Y %H:%M",
# "%d/%m/%Y %H:%M:%S",
# "%Y/%m/%d %H:%M:%S",
# ]:
# try:
# return datetime.datetime.strptime(date, dt_format).date()
# except ValueError:
# pass
# raise Exception(f"date '{date}' not allowed")
# def timestamp_to_date(timestamp): # Not tz aware
# return timestamp_to_datetime(timestamp).date()
def clean_project_url_part(df, source_col, dest_col):
def mapper(row):
found = re.match(".*(projects/.*$)", str(row[source_col]))
if found:
return found.groups()[0]
return ""
df[dest_col] = df.apply(mapper, axis=1)
df = df[df[source_col].str.contains("projects/")]
return df
|
[
"[email protected]"
] | |
25f36707799253f370eeb2ff989176d7430e52ac
|
0c84cc9a2c06594e01835a617a7d5866f9db68a4
|
/importing-example/example_2/use_animals.py
|
a55375cf73a3a4e3806d644da20f0e1ba7b9f72f
|
[] |
no_license
|
01-Jacky/Python-Things
|
a508ac4161c0f836fb793bd07e8c69ff0f3d6e1d
|
5153a27cdf9dc17ec3344c2774674c7f92156cf6
|
refs/heads/master
| 2021-03-19T16:59:50.000741 | 2018-04-04T23:48:46 | 2018-04-04T23:48:46 | 100,906,338 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 314 |
py
|
# Import classes from your brand new package
from package.Animals import Mammals
from package.Animals import Birds
# Create an object of Mammals class & call a method of it
myMammal = Mammals()
myMammal.printMembers()
# Create an object of Birds class & call a method of it
myBird = Birds()
myBird.printMembers()
|
[
"[email protected]"
] | |
043f39369ddb5869a0f589beb403b66748b3f3a0
|
ceedf463269728f0257030671917f9fc979c720a
|
/popula.py
|
8a2b370eed09596f677c02302927860324171dcd
|
[] |
no_license
|
weltonvaz/Zumbis
|
4a8bc213b2d7380b0ef4f3672c6a36b45f3f5c0a
|
da760e9f258c03660a2eae1439190ce36dee716d
|
refs/heads/master
| 2021-01-19T08:33:58.430648 | 2015-04-17T11:59:11 | 2015-04-17T11:59:11 | 32,888,135 | 0 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 507 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Problema de crescimento populacional
# Desenvolvido por Evaldo Junior (InFog)
# http://evaldojunior.com.br/blog
popA, popB, anos = 80000, 200000, 0
cresA, cresB = 0.03, 0.015 # Crescimentos de 3% e 1,5% ao ano
while (popA < popB):
anos += 1
popA = popA + (popA * cresA)
popB = popB + (popB * cresB)
print("Após %i anos o país A ultrapassou o país B em número de habitantes." % anos)
print("País A: %.0f" % popA)
print("País B: %.0f" % popB)
|
[
"[email protected]"
] | |
65a01fd1f09658838b02901d836cc99d3fe44dd1
|
ed37a985a7411fb3b8f29282a81f1d823f8f4afc
|
/pascal_triangle/implementations/cython/base.py
|
5289433918abcc9fb01106fd869644cc623a41fb
|
[] |
no_license
|
dmugtasimov/pascal_triangle
|
5b310451582f6fc2ddc74f316259c6ec9fc4ec4b
|
875deac43300a42560f0433a92e5f1e0475bb754
|
refs/heads/master
| 2021-06-16T10:55:11.338999 | 2017-04-11T17:20:54 | 2017-04-11T17:20:54 | 35,548,062 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 143 |
py
|
from pascal_triangle.implementations.base import PascalTriangleBase
class CyPascalTriangleBase(PascalTriangleBase):
language = 'Cython'
|
[
"[email protected]"
] | |
5aabfdaa690e6d5f51e29d29af16c5f7bbebe551
|
f9c7969c8649c484f2460fb245a3d5bd6870fa5a
|
/ch07/exercises/exercise 50.py
|
57914cc3e70dcbd399eceb03ac689bf9eefd314c
|
[] |
no_license
|
Pshypher/tpocup
|
78cf97d51259bfea944dc205b9644bb1ae4ab367
|
b05b05728713637b1976a8203c2c97dbbfbb6a94
|
refs/heads/master
| 2022-05-18T13:11:31.417205 | 2020-01-07T13:50:06 | 2020-01-07T13:50:06 | 260,133,112 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 792 |
py
|
# Program written to transfer the elements of a list to another
# list whilst rearranging the order in which the elements appear
# Unless stated otherwise, variables are assumed to be of type int
def transform(list1, list2, r1, r2):
"""Removes items from list1 in the slice r1:r2, appends them onto list2
in reverse order; Returns the resulting list."""
slice_lst = list1[r1:r2] # r1 < r2
slice_lst.reverse() # reverse the order of the slice
list2.extend(slice_lst) # add the elements sliced from list1
# now reversed to list2
return list2
# Test that the function above works as expected
list1 = [1,2,3,4,5,6,7,8,9]
list2 = [100,200]
transform(list1, list2, 4, 7)
print(list2) # displays [100,200,7,6,5]
|
[
"[email protected]"
] | |
6a71ee61962bf5aaad4affa272e4d5ea139738fa
|
56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e
|
/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY4JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_106/run_cfg.py
|
9695dd7ce6e979864c86ead25607ebeee3e6d533
|
[] |
no_license
|
rmanzoni/HTT
|
18e6b583f04c0a6ca10142d9da3dd4c850cddabc
|
a03b227073b2d4d8a2abe95367c014694588bf98
|
refs/heads/master
| 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,495 |
py
|
import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY4JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DY4JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_572.root',
'/store/cmst3/user/cmgtools/CMG/DY4JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_573.root',
'/store/cmst3/user/cmgtools/CMG/DY4JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_574.root',
'/store/cmst3/user/cmgtools/CMG/DY4JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_575.root',
'/store/cmst3/user/cmgtools/CMG/DY4JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_576.root')
)
|
[
"[email protected]"
] | |
1a0e532b26b8e1f4e25a0bdf0c0d61114323d61c
|
e7b7cc34f77c71e61aa0fa05bcc62f54fc2fc0e1
|
/String/test_q014_longest_common_prefix.py
|
5c11b0ca85d14ca6dca237e3305afcd9f12663cf
|
[] |
no_license
|
sevenhe716/LeetCode
|
41d2ef18f5cb317858c9b69d00bcccb743cbdf48
|
4a1747b6497305f3821612d9c358a6795b1690da
|
refs/heads/master
| 2020-03-16T16:12:27.461172 | 2019-04-22T13:27:54 | 2019-04-22T13:27:54 | 130,221,784 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 660 |
py
|
import unittest
from String.q014_longest_common_prefix import SolutionF
class TestLongestCommonPrefix(unittest.TestCase):
"""Test q014_longest_common_prefix.py"""
def test_longest_common_prefix(self):
s = SolutionF()
self.assertEqual('fl', s.longestCommonPrefix(["flower", "flow", "flight"]))
self.assertEqual('', s.longestCommonPrefix(["flower", "flow", ""]))
self.assertEqual('f', s.longestCommonPrefix(["flower", "flow", "f"]))
self.assertEqual('', s.longestCommonPrefix(["dog", "racecar", "car"]))
self.assertEqual('', s.longestCommonPrefix([]))
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.