blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cf7998ad81daa62c4fba1aad1cf014844efd51c8
|
c532e4d7466188ebbcca32413f592491eac9d7f8
|
/leetcode/392.is-subsequence.py
|
e6cd21e9c3757a79129f956abc34459736ec87de
|
[] |
no_license
|
Zedmor/hackerrank-puzzles
|
a1ff8601ea6d2bb3d2095909dfe00ef32346b74f
|
2cc179bdb33a97294a2bf99dbda278e935165943
|
refs/heads/master
| 2023-01-10T13:57:26.649360 | 2023-01-04T03:27:05 | 2023-01-04T03:27:05 | 68,768,901 | 0 | 0 | null | 2017-03-05T18:24:18 | 2016-09-21T01:46:35 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,609 |
py
|
#
# @lc app=leetcode id=392 lang=python3
#
# [392] Is Subsequence
#
# https://leetcode.com/problems/is-subsequence/description/
#
# algorithms
# Easy (47.97%)
# Total Accepted: 233.6K
# Total Submissions: 474.3K
# Testcase Example: '"abc"\n"ahbgdc"'
#
# Given a string s and a string t, check if s is subsequence of t.
#
# A subsequence of a string is a new string which is formed from the original
# string by deleting some (can be none) of the characters without disturbing
# the relative positions of the remaining characters. (ie, "ace" is a
# subsequence of "abcde" while "aec" is not).
#
# Follow up:
# If there are lots of incoming S, say S1, S2, ... , Sk where k >= 1B, and you
# want to check one by one to see if T has its subsequence. In this scenario,
# how would you change your code?
#
# Credits:
# Special thanks to @pbrother for adding this problem and creating all test
# cases.
#
#
# Example 1:
# Input: s = "abc", t = "ahbgdc"
# Output: true
# Example 2:
# Input: s = "axc", t = "ahbgdc"
# Output: false
#
#
# Constraints:
#
#
# 0 <= s.length <= 100
# 0 <= t.length <= 10^4
# Both strings consists only of lowercase characters.
#
#
#
class Solution:
"""
>>> Solution().isSubsequence('abc', 'ahbgdc')
True
>>> Solution().isSubsequence('axc', 'ahbgdc')
False
"""
def isSubsequence(self, s: str, t: str) -> bool:
pointer_s = 0
pointer_t = 0
while pointer_t < len(t) and pointer_s < len(s):
if t[pointer_t] == s[pointer_s]:
pointer_s += 1
pointer_t += 1
return pointer_s == len(s)
|
[
"[email protected]"
] | |
56a53cf0a36b5b36076f79e659a49128f7fa1265
|
ada026a8588611f18a0bae44619aea6dc89c07a7
|
/backend/event/models.py
|
94899bc08145ed779dc022f61534cb2e63f156b5
|
[] |
no_license
|
crowdbotics-apps/iqraa-25096
|
5a363ec49766352d23de9348bfddcaed187b98c8
|
42def0722c287182c100ef46a4284236fbd2f04e
|
refs/heads/master
| 2023-03-22T23:40:21.685747 | 2021-03-18T09:17:50 | 2021-03-18T09:17:50 | 349,008,088 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,882 |
py
|
from django.conf import settings
from django.db import models
class Vendor(models.Model):
"Generated Model"
name = models.TextField()
logo_image = models.SlugField(
null=True,
blank=True,
max_length=50,
)
type = models.TextField(
null=True,
blank=True,
)
website = models.URLField(
null=True,
blank=True,
)
location = models.ForeignKey(
"event.Location",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="vendor_location",
)
category = models.ForeignKey(
"event.Category",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="vendor_category",
)
class MySchedule(models.Model):
"Generated Model"
user = models.ForeignKey(
"users.User",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="myschedule_user",
)
schedule = models.ForeignKey(
"event.Schedule",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="myschedule_schedule",
)
class Faq(models.Model):
"Generated Model"
title = models.CharField(
max_length=256,
)
description = models.TextField()
class Sponsor(models.Model):
"Generated Model"
name = models.TextField()
logo_image = models.SlugField(
max_length=50,
)
sponsor_level = models.TextField()
presenter = models.BooleanField()
website = models.URLField(
null=True,
blank=True,
)
location = models.ForeignKey(
"event.Location",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="sponsor_location",
)
class Favorites(models.Model):
"Generated Model"
user = models.ForeignKey(
"users.User",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="favorites_user",
)
vendor = models.ForeignKey(
"event.Vendor",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="favorites_vendor",
)
class VendorDetail(models.Model):
"Generated Model"
website = models.URLField()
description = models.TextField()
associated_name = models.TextField(
null=True,
blank=True,
)
vendor_id = models.ForeignKey(
"event.Vendor",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="vendordetail_vendor_id",
)
class Location(models.Model):
"Generated Model"
amenities = models.TextField(
null=True,
blank=True,
)
name = models.CharField(
null=True,
blank=True,
max_length=256,
)
image = models.SlugField(
null=True,
blank=True,
max_length=50,
)
class Presenter(models.Model):
"Generated Model"
name = models.CharField(
max_length=256,
)
title = models.CharField(
max_length=256,
)
schedule = models.ForeignKey(
"event.Schedule",
on_delete=models.CASCADE,
related_name="presenter_schedule",
)
class Schedule(models.Model):
"Generated Model"
dateTime = models.DateTimeField()
description = models.TextField(
null=True,
blank=True,
)
track = models.TextField(
null=True,
blank=True,
)
location = models.ForeignKey(
"event.Location",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="schedule_location",
)
class Category(models.Model):
"Generated Model"
description = models.TextField()
name = models.CharField(
null=True,
blank=True,
max_length=256,
)
# Create your models here.
|
[
"[email protected]"
] | |
a65a5fe2737f2506964095d71631ff9e74b89d51
|
1b7f4cd39bf7e4a2cf667ac13244e5138ee86cb2
|
/agents/displays/human_display.py
|
4ad2949060ec0816a46e1db1e5ae89c9fd33bade
|
[
"MIT"
] |
permissive
|
cjreynol/willsmith
|
02f793003a914a21b181839bbd58108046f312d6
|
39d3b8caef8ba5825f3a0272c7fd61a2f78ef2b5
|
refs/heads/master
| 2020-07-15T13:25:57.613707 | 2018-06-12T00:18:19 | 2018-06-12T00:18:19 | 205,572,039 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,172 |
py
|
from tkinter import Button, Entry, Label, END
from willsmith.gui_display_controller import GUIDisplayController
class HumanDisplay(GUIDisplayController):
"""
Creates a Tkinter GUI that allows the user to input their moves.
"""
WINDOW_TITLE = "Human Agent"
LABEL_FONT = ("Courier New", 14)
def __init__(self):
super().__init__()
self.input_prompt_label = None
self.input_entry = None
self.submit_button = None
def _initialize_widgets(self):
self.input_prompt_label = Label(self.root, font = self.LABEL_FONT,
text = "<prompt here>")
self.input_entry = Entry(self.root)
self.submit_button = Button(self.root, text = "Submit")
def _place_widgets(self):
self.input_prompt_label.grid(row = 0, column = 0, columnspan = 2)
self.input_entry.grid(row = 1, column = 0)
self.submit_button.grid(row = 1, column = 1)
def _update_display(self, agent, action):
self._reset_display(agent)
def _reset_display(self, agent):
self.input_entry.delete(0, END)
def _submit_entry():
pass
|
[
"[email protected]"
] | |
9999bb084c19897bd8e0f40f1449c5ab8305baec
|
2a6d385c7737aea3c6b49eef9252babb7557b909
|
/MCTools/test/lheTreeMaker.py
|
7c6aa48eac2b99e552e3669d2e943613a8222e6a
|
[] |
no_license
|
Sam-Harper/usercode
|
1b302a4b647e479d27a9501f9576bd04b07e111a
|
fa43427fac80d773978ea67b78be58d264f39ec8
|
refs/heads/120XNtup
| 2022-08-26T12:59:53.388853 | 2022-07-12T16:52:46 | 2022-07-12T16:52:46 | 15,675,175 | 1 | 11 | null | 2022-07-21T13:27:57 | 2014-01-06T13:54:22 |
Python
|
UTF-8
|
Python
| false | false | 2,051 |
py
|
# Import configurations
import FWCore.ParameterSet.Config as cms
# set up process
process = cms.Process("PDF")
# initialize MessageLogger and output report
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32(10000),
limit = cms.untracked.int32(10000000)
)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False) )
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
import sys
filePrefex="file:"
if(sys.argv[2].find("/pnfs/")==0):
filePrefex="dcap://heplnx209.pp.rl.ac.uk:22125"
if(sys.argv[2].find("/store/")==0):
filePrefex=""
process.source = cms.Source("LHESource",
# fileNames = cms.untracked.vstring(filePrefex+sys.argv[2]),
# inputCommands = cms.untracked.vstring("drop *","keep *_source_*_*"),
fileNames = cms.untracked.vstring(),
)
for i in range(2,len(sys.argv)-2):
print filePrefex+sys.argv[i]
process.source.fileNames.extend([filePrefex+sys.argv[i],])
process.lheTreeMaker = cms.EDAnalyzer("LHETreeMaker",
datasetCode=cms.int32(-1),
# lheEventTag=cms.InputTag("externalLHEProducer"),
lheEventTag=cms.InputTag("source"),
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("output.root")
)
isCrabJob=False #script seds this if its a crab job
#if 1, its a crab job...
if isCrabJob:
print "using crab specified filename"
process.TFileService.fileName= "OUTPUTFILE"
process.lheTreeMaker.datasetCode = DATASETCODE
else:
print "using user specified filename"
process.TFileService.fileName= sys.argv[len(sys.argv)-1]
process.lheTreeMaker.datasetCode = int(sys.argv[len(sys.argv)-2])
process.p = cms.Path(
process.lheTreeMaker)
|
[
"[email protected]"
] | |
22fcbd946d08b1b0360883cebf92843acdabaae0
|
853c6a09af16fd4dd8a53efa8bde631e63315b59
|
/Programmers/correct braket.py
|
0936aa47db0765ce63be6d8daa769dea7d790e1b
|
[] |
no_license
|
Areum0921/Abox
|
92840897b53e9bbab35c0e0aae5a017ab19a0500
|
f4739c0c0835054afeca82484769e71fb8de47c8
|
refs/heads/master
| 2021-12-13T11:16:33.583366 | 2021-10-10T08:09:50 | 2021-10-10T08:09:50 | 176,221,995 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 346 |
py
|
def solution(s):
answer = True
stack = []
for i in s:
if (i == '('):
stack.append(i)
else:
if (stack):
stack.pop()
else:
return False # 짝이 안맞을때
if (stack): # 스택에 남아 있는게 있을때
return False
return True
|
[
"[email protected]"
] | |
4b3961aa5d8906bd87af450467577e695d335f83
|
b0c0008213c633e6d32d8536a98934047f38ba17
|
/consumer.py
|
e8cd2071c6864f984bb83cc67f04e9e66677ddc7
|
[] |
no_license
|
benthomasson/kafka-test
|
8363f6a880544a6037e88d01b33954524b3b38ac
|
95b1e89dd5a009b47a35ac5886c1980e2c5d5fcc
|
refs/heads/master
| 2020-06-13T17:34:55.464840 | 2019-07-01T19:49:22 | 2019-07-01T19:49:22 | 194,734,088 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 946 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage:
consumer [options]
Options:
-h, --help Show this page
--debug Show debug logging
--verbose Show verbose logging
"""
from docopt import docopt
import logging
import sys
from kafka import KafkaConsumer
logger = logging.getLogger('consumer')
def main(args=None):
if args is None:
args = sys.argv[1:]
parsed_args = docopt(__doc__, args)
if parsed_args['--debug']:
logging.basicConfig(level=logging.DEBUG)
elif parsed_args['--verbose']:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
consumer = KafkaConsumer('my_favorite_topic', bootstrap_servers='127.0.0.1:9092', group_id="mygroup", auto_offset_reset='earliest')
for msg in consumer:
print(msg)
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv[1:]))
|
[
"[email protected]"
] | |
7491ded17babba2e25a320468b4c7f2d03ec8014
|
2d67afd40a0425c843aa8643df9f7d5653ad0369
|
/python/leetcode/836_Rectangle_Overlap.py
|
07a9c93dabe68189566acdbdd57f7dd25eead09a
|
[] |
no_license
|
bobcaoge/my-code
|
2f4ff5e276bb6e657f5a63108407ebfbb11fbf64
|
70bdd75b6af2e1811c1beab22050c01d28d7373e
|
refs/heads/master
| 2022-12-23T22:38:10.003058 | 2020-07-02T03:52:43 | 2020-07-02T03:52:43 | 248,733,053 | 0 | 0 | null | 2022-12-10T05:41:57 | 2020-03-20T10:55:55 |
Python
|
UTF-8
|
Python
| false | false | 683 |
py
|
# /usr/bin/python3.6
# -*- coding:utf-8 -*-
class Solution(object):
def isRectangleOverlap(self, rec1, rec2):
"""
:type rec1: List[int]
:type rec2: List[int]
:rtype: bool
"""
return not (rec2[0] >= rec1[2] or rec2[1] >= rec1[3] or rec2[2] <= rec1[0] or rec2[3] <= rec1[1])
def isRectangleOverlap1(self, rec1, rec2):
"""
:type rec1: List[int]
:type rec2: List[int]
:rtype: bool
"""
x1,y1, x2, y2 = rec1
x11,y11, x22, y22 = rec2
return not (x11 >= x2 or y11 >= y2 or x22 <= x1 or y22 <= y1)
def main():
s = Solution()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
8f73960e9cd985f473f88967aa7424ab07f7bcbe
|
aa692f369966074141873a473894362913750e01
|
/reportform/asgi.py
|
52f03e6cc3a8f2461ec7170e6d02e3bf734d97bc
|
[] |
no_license
|
yashacon/Progress_form
|
d8747d6ba28266cabd2c88ecfdcf4816c7350569
|
0f26733383f79e9e34992cd12a308a410c27f37f
|
refs/heads/master
| 2022-04-22T14:47:05.119632 | 2020-04-19T15:14:16 | 2020-04-19T15:14:16 | 257,029,044 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 397 |
py
|
"""
ASGI config for reportform project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reportform.settings')
application = get_asgi_application()
|
[
"[email protected]"
] | |
c33743585b9a553e3d3858a7fff83eb8abfe4436
|
7f1d31cf00f8a1fc175d67c7be6e11367179d3f6
|
/tests/nlu/extractors/test_extractor.py
|
b0739e047c43aac5b670854f89971dc56ef5e29e
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"MIT"
] |
permissive
|
russosanti/rasa
|
226ec14e3a326ba2ad9cb0aae57c79465c88b5ab
|
21fb0cc8e92cf270e4228438cb386f1d6f364563
|
refs/heads/master
| 2023-04-07T13:25:53.848512 | 2020-04-16T21:59:58 | 2020-04-16T21:59:58 | 256,278,604 | 0 | 1 |
Apache-2.0
| 2020-04-16T17:05:06 | 2020-04-16T17:05:05 | null |
UTF-8
|
Python
| false | false | 7,622 |
py
|
from typing import Any, Text, Dict, List
import pytest
from rasa.nlu.tokenizers.tokenizer import Token
from rasa.nlu.training_data import Message
from rasa.nlu.extractors.extractor import EntityExtractor
@pytest.mark.parametrize(
"text, tokens, entities, keep, expected_entities",
[
(
"Aarhus is a city",
[
Token("Aar", 0, 3),
Token("hus", 3, 6),
Token("is", 7, 9),
Token("a", 10, 11),
Token("city", 12, 16),
],
[
{"entity": "iata", "start": 0, "end": 3, "value": "Aar"},
{"entity": "city", "start": 3, "end": 6, "value": "hus"},
{"entity": "location", "start": 12, "end": 16, "value": "city"},
],
False,
[{"entity": "location", "start": 12, "end": 16, "value": "city"}],
),
(
"Aarhus",
[Token("Aar", 0, 3), Token("hus", 3, 6)],
[
{"entity": "iata", "start": 0, "end": 3, "value": "Aar"},
{"entity": "city", "start": 3, "end": 6, "value": "hus"},
],
True,
[],
),
(
"Aarhus city",
[Token("Aarhus", 0, 6), Token("city", 7, 11)],
[
{"entity": "city", "start": 0, "end": 6, "value": "Aarhus"},
{"entity": "type", "start": 7, "end": 11, "value": "city"},
],
False,
[
{"entity": "city", "start": 0, "end": 6, "value": "Aarhus"},
{"entity": "type", "start": 7, "end": 11, "value": "city"},
],
),
(
"Aarhus is a city",
[
Token("Aar", 0, 3),
Token("hus", 3, 6),
Token("is", 7, 9),
Token("a", 10, 11),
Token("city", 12, 16),
],
[
{
"entity": "city",
"start": 0,
"end": 3,
"confidence": 0.87,
"value": "Aar",
},
{
"entity": "iata",
"start": 3,
"end": 6,
"confidence": 0.43,
"value": "hus",
},
{"entity": "location", "start": 12, "end": 16, "value": "city"},
],
True,
[
{
"entity": "city",
"start": 0,
"end": 6,
"confidence": 0.87,
"value": "Aarhus",
},
{"entity": "location", "start": 12, "end": 16, "value": "city"},
],
),
(
"Aarhus",
[Token("Aa", 0, 2), Token("r", 2, 3), Token("hu", 3, 5), Token("s", 5, 6)],
[
{
"entity": "iata",
"start": 0,
"end": 2,
"confidence": 0.32,
"value": "Aa",
},
{
"entity": "city",
"start": 2,
"end": 3,
"confidence": 0.87,
"value": "r",
},
{
"entity": "iata",
"start": 3,
"end": 5,
"confidence": 0.21,
"value": "hu",
},
{
"entity": "city",
"start": 5,
"end": 6,
"confidence": 0.43,
"value": "s",
},
],
True,
[
{
"entity": "city",
"start": 0,
"end": 6,
"confidence": 0.87,
"value": "Aarhus",
}
],
),
(
"Aarhus",
[Token("Aa", 0, 2), Token("r", 2, 3), Token("hu", 3, 5), Token("s", 5, 6)],
[
{
"entity": "city",
"start": 0,
"end": 2,
"confidence": 0.32,
"value": "Aa",
}
],
True,
[
{
"entity": "city",
"start": 0,
"end": 6,
"confidence": 0.32,
"value": "Aarhus",
}
],
),
(
"Aarhus",
[Token("Aa", 0, 2), Token("r", 2, 3), Token("hu", 3, 5), Token("s", 5, 6)],
[
{
"entity": "city",
"start": 0,
"end": 2,
"confidence": 0.32,
"value": "Aa",
}
],
False,
[],
),
(
"Buenos Aires is a city",
[
Token("Buenos", 0, 6),
Token("Ai", 7, 9),
Token("res", 9, 12),
Token("is", 13, 15),
Token("a", 16, 17),
Token("city", 18, 22),
],
[
{"entity": "city", "start": 0, "end": 9, "value": "Buenos Ai"},
{"entity": "location", "start": 18, "end": 22, "value": "city"},
],
False,
[{"entity": "location", "start": 18, "end": 22, "value": "city"}],
),
(
"Buenos Aires is a city",
[
Token("Buenos", 0, 6),
Token("Ai", 7, 9),
Token("res", 9, 12),
Token("is", 13, 15),
Token("a", 16, 17),
Token("city", 18, 22),
],
[
{"entity": "city", "start": 0, "end": 9, "value": "Buenos Ai"},
{"entity": "location", "start": 18, "end": 22, "value": "city"},
],
True,
[
{"entity": "city", "start": 0, "end": 12, "value": "Buenos Aires"},
{"entity": "location", "start": 18, "end": 22, "value": "city"},
],
),
(
"Buenos Aires is a city",
[
Token("Buen", 0, 4),
Token("os", 4, 6),
Token("Ai", 7, 9),
Token("res", 9, 12),
Token("is", 13, 15),
Token("a", 16, 17),
Token("city", 18, 22),
],
[
{"entity": "city", "start": 4, "end": 9, "value": "os Ai"},
{"entity": "location", "start": 18, "end": 22, "value": "city"},
],
True,
[
{"entity": "city", "start": 0, "end": 12, "value": "Buenos Aires"},
{"entity": "location", "start": 18, "end": 22, "value": "city"},
],
),
],
)
def test_clean_up_entities(
text: Text,
tokens: List[Token],
entities: List[Dict[Text, Any]],
keep: bool,
expected_entities: List[Dict[Text, Any]],
):
extractor = EntityExtractor()
message = Message(text)
message.set("tokens", tokens)
updated_entities = extractor.clean_up_entities(message, entities, keep)
assert updated_entities == expected_entities
|
[
"[email protected]"
] | |
eaf21fc64fa4a9963db8428a6d85332bb1f68acf
|
d2fc4d45b115fb861097657d00b3c5cb08e8a3ad
|
/scenarios/bank_account_delete/executable.py
|
22c888baa03eda9722fd271e2a6f2c9a58e213cb
|
[] |
no_license
|
jess010/balanced-python
|
81b39f0e9d3ce52d60f2453b8c98e77f07ee3acb
|
b7a6bf0430ad0299d96de15ea97d3d4ccfb4c958
|
refs/heads/master
| 2020-12-25T16:13:35.626111 | 2013-09-20T00:14:58 | 2013-09-20T00:14:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 182 |
py
|
import balanced
balanced.configure('b5de51921b2d11e389c4026ba7cac9da')
bank_account = balanced.BankAccount.find('/v1/bank_accounts/BA5YXVcU9ExcM8jXQhQt7ZY6')
bank_account.delete()
|
[
"[email protected]"
] | |
86ba2b6052d3e743fb070ef7f0e05d157df3fe4d
|
0dee7cc69ae44e30c5cb372eb17f2e469635056b
|
/AirBnB_clone_v3/api/v1/app.py
|
3b75dd5b7cb112990fe65ac206b8bb1c37bb41c1
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
HausCloud/Holberton
|
00cd25b4a489041e041551ea8f87674d53f43713
|
b39c5978698e02b9e746121d6c55d791b73e6d9b
|
refs/heads/master
| 2022-12-13T01:06:18.968047 | 2020-09-05T18:23:00 | 2020-09-05T18:23:00 | 293,129,232 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 699 |
py
|
#!/usr/bin/python3
''' py file to connect to API '''
from os import getenv
from models import storage
from api.v1.views import app_views
from flask import Flask, Blueprint, jsonify, make_response
from flask_cors import CORS
app = Flask(__name__)
app.register_blueprint(app_views)
cors = CORS(app, resources={"/*": {"origins": "0.0.0.0"}})
@app.teardown_appcontext
def teardown_appcontext(code):
'closes storage method'
storage.close()
@app.errorhandler(404)
def errorhandler404(error):
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == "__main__":
app.run(host=getenv('HBNB_API_HOST', '0.0.0.0'),
port=int(getenv('HBNB_API_PORT', '5000')))
|
[
"[email protected]"
] | |
3cc8dd50235d65c9fa40a0006df2519f1713d6ca
|
6343534aaf5483b3fab219c14b5c33726d5d196e
|
/shopinglyx/wsgi.py
|
59325b034274c335d3d9de6740a9eb9057a07476
|
[] |
no_license
|
Emad-ahmed/Ful_Django_Shoping_website
|
8441b2caa1214c8df9399dceed5e53fa37cd86cb
|
6eefe47749b5cd6b1a2422e9cf717e9584b13dce
|
refs/heads/main
| 2023-05-20T11:30:54.194116 | 2021-06-09T21:07:18 | 2021-06-09T21:07:18 | 375,272,911 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 397 |
py
|
"""
WSGI config for shopinglyx project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shopinglyx.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
eb6b102c5a47abdb1fddc5dd164e9607a07d4269
|
918f0fdef0e9224aa1a0596479178618290055ec
|
/mmdet3d/core/post_processing/__init__.py
|
42eb5bf2b1a4c0fb4cb35c38a74847ade71b7eee
|
[
"Apache-2.0"
] |
permissive
|
Tsinghua-MARS-Lab/futr3d
|
b7eb3a0c9d92a58759c9c43e96bfd024a2e3de96
|
9130d71e487bad47f5dbcffd696fe9e4a838104f
|
refs/heads/main
| 2023-07-24T15:40:00.121665 | 2023-07-06T05:50:45 | 2023-07-06T05:50:45 | 499,766,918 | 188 | 27 |
MIT
| 2022-06-19T10:42:03 | 2022-06-04T08:21:17 |
Python
|
UTF-8
|
Python
| false | false | 677 |
py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.post_processing import (merge_aug_bboxes, merge_aug_masks,
merge_aug_proposals, merge_aug_scores,
multiclass_nms)
from .box3d_nms import (aligned_3d_nms, box3d_multiclass_nms, circle_nms,
nms_bev, nms_normal_bev)
from .merge_augs import merge_aug_bboxes_3d
__all__ = [
'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes',
'merge_aug_scores', 'merge_aug_masks', 'box3d_multiclass_nms',
'aligned_3d_nms', 'merge_aug_bboxes_3d', 'circle_nms', 'nms_bev',
'nms_normal_bev'
]
|
[
"[email protected]"
] | |
5554e807f98c00e8a594c894b58a6069820180ad
|
187a6558f3c7cb6234164677a2bda2e73c26eaaf
|
/jdcloud_sdk/services/monitor/models/NoticeOption.py
|
5782d8ff633350bf7e0c26947724e00a101fa9a3
|
[
"Apache-2.0"
] |
permissive
|
jdcloud-api/jdcloud-sdk-python
|
4d2db584acc2620b7a866af82d21658cdd7cc227
|
3d1c50ed9117304d3b77a21babe899f939ae91cd
|
refs/heads/master
| 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 |
Apache-2.0
| 2023-09-07T06:54:49 | 2018-03-22T03:47:02 |
Python
|
UTF-8
|
Python
| false | false | 1,661 |
py
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class NoticeOption(object):
def __init__(self, effectiveIntervalEnd=None, effectiveIntervalStart=None, noticeCondition=None, noticePeriod=None, noticeWay=None):
"""
:param effectiveIntervalEnd: (Optional) 生效截止时间,默认值:23:59:59
:param effectiveIntervalStart: (Optional) 生效起始时间,默认值:00:00:00
:param noticeCondition: (Optional) 通知条件 1-告警 2-数据不足3-告警恢复
:param noticePeriod: (Optional) 通知沉默周期,单位:分钟,默认值:24小时,目前支持的取值“24小时、12小时、6小时、3小时、1小时、30分钟、15分钟、10分钟、5分钟”
:param noticeWay: (Optional) 通知方法 1-短信 2-邮件
"""
self.effectiveIntervalEnd = effectiveIntervalEnd
self.effectiveIntervalStart = effectiveIntervalStart
self.noticeCondition = noticeCondition
self.noticePeriod = noticePeriod
self.noticeWay = noticeWay
|
[
"[email protected]"
] | |
62be4bbd7ede776de75f9f3f2fd3dc6801ebcfda
|
1fc45a47f0e540941c87b04616f3b4019da9f9a0
|
/src/sentry/search/django/constants.py
|
d07708d74a015bdd0bd4f0411ae69587e4b956d6
|
[
"BSD-2-Clause"
] |
permissive
|
seukjung/sentry-8.15.0
|
febc11864a74a68ddb97b146cc1d2438ef019241
|
fd3cab65c64fcbc32817885fa44df65534844793
|
refs/heads/master
| 2022-10-28T06:39:17.063333 | 2018-01-17T12:31:55 | 2018-01-17T12:31:55 | 117,833,103 | 0 | 0 |
BSD-3-Clause
| 2022-10-05T18:09:54 | 2018-01-17T12:28:13 |
Python
|
UTF-8
|
Python
| false | false | 1,622 |
py
|
"""
sentry.search.django.constants
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
SORT_CLAUSES = {
'priority': 'sentry_groupedmessage.score',
'date': 'EXTRACT(EPOCH FROM sentry_groupedmessage.last_seen)::int',
'new': 'EXTRACT(EPOCH FROM sentry_groupedmessage.first_seen)::int',
'freq': 'sentry_groupedmessage.times_seen',
}
SQLITE_SORT_CLAUSES = SORT_CLAUSES.copy()
SQLITE_SORT_CLAUSES.update({
'date': "cast((julianday(sentry_groupedmessage.last_seen) - 2440587.5) * 86400.0 as INTEGER)",
'new': "cast((julianday(sentry_groupedmessage.first_seen) - 2440587.5) * 86400.0 as INTEGER)",
})
MYSQL_SORT_CLAUSES = SORT_CLAUSES.copy()
MYSQL_SORT_CLAUSES.update({
'date': 'UNIX_TIMESTAMP(sentry_groupedmessage.last_seen)',
'new': 'UNIX_TIMESTAMP(sentry_groupedmessage.first_seen)',
})
ORACLE_SORT_CLAUSES = SORT_CLAUSES.copy()
ORACLE_SORT_CLAUSES.update({
'date': "(cast(sentry_groupedmessage.last_seen as date)-TO_DATE('01/01/1970 00:00:00', 'MM-DD-YYYY HH24:MI:SS')) * 24 * 60 * 60",
'new': "(cast(sentry_groupedmessage.first_seen as date)-TO_DATE('01/01/1970 00:00:00', 'MM-DD-YYYY HH24:MI:SS')) * 24 * 60 * 60",
})
MSSQL_SORT_CLAUSES = SORT_CLAUSES.copy()
MSSQL_SORT_CLAUSES.update({
'date': "DATEDIFF(s, '1970-01-01T00:00:00', sentry_groupedmessage.last_seen)",
'new': "DATEDIFF(s, '1970-01-01T00:00:00', sentry_groupedmessage.first_seen)",
})
MSSQL_ENGINES = set(['django_pytds', 'sqlserver_ado', 'sql_server.pyodbc'])
|
[
"[email protected]"
] | |
af07e96835aac06a7e756e51bb65e5c49aedfcfb
|
56997c84a331433225f89f168520ad8d709083c1
|
/Programmers/DFS_BFS/네트워크/network_ver1.py
|
84f304c62bd83cd9430505ab8f1a79b96656c2e5
|
[] |
no_license
|
miseop25/Back_Jun_Code_Study
|
51e080f8ecf74f7d1a8bb1da404d29c8ba52325c
|
1d993e718c37c571aae1d407054ec284dc24c922
|
refs/heads/master
| 2022-11-06T01:05:05.028838 | 2022-10-23T13:11:22 | 2022-10-23T13:11:22 | 200,828,984 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,205 |
py
|
class Node :
def __init__(self, data) :
self.data = data
self.child = []
class Network :
def __init__(self, n, computers) :
self.nodeDict = dict()
self.n = n
for i in range(n) :
self.nodeDict[i] = Node(i)
self.connectNode(computers)
self.check = [True for _ in range(n)]
def connectNode(self, computers) :
for i in range(self.n) :
for j in range(self.n) :
if i == j :
continue
if computers[i][j] == 1:
self.nodeDict[i].child.append(j)
def dfsNetwork(self, target) :
self.check[target.data] = False
for i in target.child :
if self.check[i] :
self.dfsNetwork(self.nodeDict[i])
def getAnswer(self) :
answer = 0
for i in range(self.n) :
if self.check[i] :
answer += 1
self.dfsNetwork(self.nodeDict[i])
return answer
def solution(n, computers):
answer = 0
t = Network(n, computers)
answer = t.getAnswer()
return answer
print(solution(3, [[1, 1, 0], [1, 1, 0], [0, 0, 1]]))
|
[
"[email protected]"
] | |
f7716ffbae61d7010cae9cc57710c47a3dd80743
|
a9f676c06bacee1f8b27e08d3c411c89a69cfd40
|
/falmer/content/migrations/0012_homepage.py
|
c6db737a1ea4de523845df670a1e021053dc211d
|
[
"MIT"
] |
permissive
|
sussexstudent/falmer
|
1b877c3ac75a0477f155ce1a9dee93a5ada686d6
|
ae735bd9d6177002c3d986e5c19a78102233308f
|
refs/heads/master
| 2022-12-11T19:40:12.232488 | 2020-03-20T13:01:47 | 2020-03-20T13:01:47 | 88,043,958 | 2 | 3 |
MIT
| 2022-12-08T03:17:26 | 2017-04-12T11:24:02 |
Python
|
UTF-8
|
Python
| false | false | 1,364 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-14 14:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import falmer.content.blocks
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0039_collectionviewrestriction'),
('content', '0011_auto_20170622_1345'),
]
operations = [
migrations.CreateModel(
name='HomePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('full_time_officers', wagtail.core.fields.StreamField((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=True)), ('image', falmer.content.blocks.ImageBlock())))),
('part_time_officers', wagtail.core.fields.StreamField((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=True)), ('image', falmer.content.blocks.ImageBlock())))),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
|
[
"[email protected]"
] | |
8e2ce871dc44558694ebd870b565b602b4058d98
|
234c7fb0bdabdd696c8e4c6a449ac2c8e3f14ad5
|
/build/PureCloudPlatformClientV2/models/nlu_detection_request.py
|
2cbab60154a60c1e7d754276de069675dcb500c2
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
humano7/platform-client-sdk-python
|
2a942c43cc2d69e8cb0c4113d998e6e0664fdedb
|
dd5b693b1fc90c9dcb36885d7227f11221db5980
|
refs/heads/master
| 2023-04-12T05:05:53.932393 | 2021-04-22T03:41:22 | 2021-04-22T03:41:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,350 |
py
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class NluDetectionRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
NluDetectionRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'input': 'NluDetectionInput',
'context': 'NluDetectionContext'
}
self.attribute_map = {
'input': 'input',
'context': 'context'
}
self._input = None
self._context = None
@property
def input(self):
"""
Gets the input of this NluDetectionRequest.
The input subject to NLU detection.
:return: The input of this NluDetectionRequest.
:rtype: NluDetectionInput
"""
return self._input
@input.setter
def input(self, input):
"""
Sets the input of this NluDetectionRequest.
The input subject to NLU detection.
:param input: The input of this NluDetectionRequest.
:type: NluDetectionInput
"""
self._input = input
@property
def context(self):
"""
Gets the context of this NluDetectionRequest.
The context for the input to NLU detection.
:return: The context of this NluDetectionRequest.
:rtype: NluDetectionContext
"""
return self._context
@context.setter
def context(self, context):
"""
Sets the context of this NluDetectionRequest.
The context for the input to NLU detection.
:param context: The context of this NluDetectionRequest.
:type: NluDetectionContext
"""
self._context = context
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"[email protected]"
] | |
c421d5537c5e8ad2bc05d6403e45f2fbd124db1c
|
63dd919e1551fbabdad3f311b96040742e4ecb53
|
/discpy/team.py
|
af40c20d54199589e72d24df8658cf9c23d632a3
|
[
"MIT"
] |
permissive
|
AryamanSrii/DiscPy
|
4d45d2f52c21b31bb84a17dd95af421c9d563bd6
|
0ba89da9ca184f0dfaebeedd4e9b7bc3099a0353
|
refs/heads/main
| 2023-08-17T14:42:45.936056 | 2021-10-01T15:12:17 | 2021-10-01T15:12:17 | 414,887,513 | 0 | 0 |
MIT
| 2021-10-08T07:24:33 | 2021-10-08T07:24:32 | null |
UTF-8
|
Python
| false | false | 4,768 |
py
|
"""
The MIT License (MIT)
Copyright (c) 2021 The DiscPy Developers
Copyright (c) 2015-2021 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from . import utils
from .user import BaseUser
from .asset import Asset
from .enums import TeamMembershipState, try_enum
from typing import TYPE_CHECKING, Optional, List
if TYPE_CHECKING:
from .state import ConnectionState
from .types.team import (
Team as TeamPayload,
TeamMember as TeamMemberPayload,
)
__all__ = (
"Team",
"TeamMember",
)
class Team:
"""Represents an application team for a bot provided by Discord.
Attributes
-------------
id: :class:`int`
The team ID.
name: :class:`str`
The team name
owner_id: :class:`int`
The team's owner ID.
members: List[:class:`TeamMember`]
A list of the members in the team
.. versionadded:: 1.3
"""
__slots__ = ("_state", "id", "name", "_icon", "owner_id", "members")
def __init__(self, state: ConnectionState, data: TeamPayload):
self._state: ConnectionState = state
self.id: int = int(data["id"])
self.name: str = data["name"]
self._icon: Optional[str] = data["icon"]
self.owner_id: Optional[int] = utils._get_as_snowflake(data, "owner_user_id")
self.members: List[TeamMember] = [
TeamMember(self, self._state, member) for member in data["members"]
]
def __repr__(self) -> str:
return f"<{self.__class__.__name__} id={self.id} name={self.name}>"
@property
def icon(self) -> Optional[Asset]:
"""Optional[:class:`.Asset`]: Retrieves the team's icon asset, if any."""
if self._icon is None:
return None
return Asset._from_icon(self._state, self.id, self._icon, path="team")
@property
def owner(self) -> Optional[TeamMember]:
"""Optional[:class:`TeamMember`]: The team's owner."""
return utils.get(self.members, id=self.owner_id)
class TeamMember(BaseUser):
"""Represents a team member in a team.
.. container:: operations
.. describe:: x == y
Checks if two team members are equal.
.. describe:: x != y
Checks if two team members are not equal.
.. describe:: hash(x)
Return the team member's hash.
.. describe:: str(x)
Returns the team member's name with discriminator.
.. versionadded:: 1.3
Attributes
-------------
name: :class:`str`
The team member's username.
id: :class:`int`
The team member's unique ID.
discriminator: :class:`str`
The team member's discriminator. This is given when the username has conflicts.
avatar: Optional[:class:`str`]
The avatar hash the team member has. Could be None.
bot: :class:`bool`
Specifies if the user is a bot account.
team: :class:`Team`
The team that the member is from.
membership_state: :class:`TeamMembershipState`
The membership state of the member (e.g. invited or accepted)
"""
__slots__ = ("team", "membership_state", "permissions")
def __init__(self, team: Team, state: ConnectionState, data: TeamMemberPayload):
self.team: Team = team
self.membership_state: TeamMembershipState = try_enum(
TeamMembershipState, data["membership_state"]
)
self.permissions: List[str] = data["permissions"]
super().__init__(state=state, data=data["user"])
def __repr__(self) -> str:
return (
f"<{self.__class__.__name__} id={self.id} name={self.name!r} "
f"discriminator={self.discriminator!r} membership_state={self.membership_state!r}>"
)
|
[
"[email protected]"
] | |
14384b1bf58a425a8c632b2ccd3f330ffcc1c262
|
c315c2d9ea4b0d43768964c46611afca242d3cdc
|
/input_pipeline/pipeline.py
|
215a7e219e2e1ff50175e8c2946702d086a03d2b
|
[
"MIT"
] |
permissive
|
TropComplique/EDANet
|
9685c986b8e25fab7a2db14803ab713602df65a2
|
ec4fd0d2693ce4ae5b81664e22adf9bf6c81f4a7
|
refs/heads/master
| 2020-04-04T14:04:46.228608 | 2019-01-07T12:40:42 | 2019-01-07T12:40:42 | 155,986,060 | 3 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,384 |
py
|
import tensorflow as tf
from .random_rotation import random_rotation
from .color_augmentations import random_color_manipulations, random_pixel_value_scale
SHUFFLE_BUFFER_SIZE = 5000
NUM_PARALLEL_CALLS = 12
RESIZE_METHOD = tf.image.ResizeMethod.BILINEAR
MIN_CROP_SIZE = 0.9
ROTATE = False
class Pipeline:
def __init__(self, filenames, is_training, params):
"""
During the evaluation we don't resize images.
Arguments:
filenames: a list of strings, paths to tfrecords files.
is_training: a boolean.
params: a dict.
"""
self.is_training = is_training
self.num_labels = params['num_labels'] # it can be None
if is_training:
batch_size = params['batch_size']
height = params['image_height']
width = params['image_width']
self.image_size = [height, width]
else:
batch_size = 1
def get_num_samples(filename):
return sum(1 for _ in tf.python_io.tf_record_iterator(filename))
num_examples = 0
for filename in filenames:
num_examples_in_file = get_num_samples(filename)
assert num_examples_in_file > 0
num_examples += num_examples_in_file
self.num_examples = num_examples
assert self.num_examples > 0
dataset = tf.data.Dataset.from_tensor_slices(filenames)
num_shards = len(filenames)
if is_training:
dataset = dataset.shuffle(buffer_size=num_shards)
dataset = dataset.flat_map(tf.data.TFRecordDataset)
dataset = dataset.prefetch(buffer_size=batch_size)
if is_training:
dataset = dataset.shuffle(buffer_size=SHUFFLE_BUFFER_SIZE)
dataset = dataset.repeat(None if is_training else 1)
dataset = dataset.map(self._parse_and_preprocess, num_parallel_calls=NUM_PARALLEL_CALLS)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(buffer_size=1)
self.dataset = dataset
def _parse_and_preprocess(self, example_proto):
"""What this function does:
1. Parses one record from a tfrecords file and decodes it.
2. (optionally) Augments it.
Returns:
image: a float tensor with shape [image_height, image_width, 3],
an RGB image with pixel values in the range [0, 1].
labels: an int tensor with shape [image_height, image_width].
The values that it can contain are {0, 1, ..., num_labels - 1}.
It also can contain ignore label: `num_labels`.
"""
features = {
'image': tf.FixedLenFeature([], tf.string),
'masks': tf.FixedLenFeature([], tf.string)
}
parsed_features = tf.parse_single_example(example_proto, features)
# get an image
image = tf.image.decode_jpeg(parsed_features['image'], channels=3)
image_height, image_width = tf.shape(image)[0], tf.shape(image)[1]
image = tf.image.convert_image_dtype(image, tf.float32)
# now pixel values are scaled to the [0, 1] range
# get a segmentation labels
labels = tf.image.decode_png(parsed_features['masks'], channels=1)
if self.is_training:
image, labels = self.augmentation(image, labels)
labels = tf.squeeze(labels, 2)
labels = tf.to_int32(labels)
return image, labels
def augmentation(self, image, labels):
if ROTATE:
assert self.num_labels is not None
labels = tf.squeeze(labels, 2)
binary_masks = tf.one_hot(labels, self.num_labels, dtype=tf.float32)
image, binary_masks = random_rotation(image, binary_masks, max_angle=30, probability=0.1)
labels = tf.argmax(binary_masks, axis=2, output_type=tf.int32)
image, labels = randomly_crop_and_resize(image, labels, self.image_size, probability=0.9)
image = random_color_manipulations(image, probability=0.1, grayscale_probability=0.05)
image = random_pixel_value_scale(image, probability=0.1, minval=0.9, maxval=1.1)
image, labels = random_flip_left_right(image, labels)
return image, labels
def randomly_crop_and_resize(image, labels, image_size, probability=0.5):
"""
Arguments:
image: a float tensor with shape [height, width, 3].
labels: a float tensor with shape [height, width, 1].
image_size: a list with two integers [new_height, new_width].
probability: a float number.
Returns:
image: a float tensor with shape [new_height, new_width, 3].
labels: a float tensor with shape [new_height, new_width, 1].
"""
height = tf.shape(image)[0]
width = tf.shape(image)[1]
def get_random_window():
crop_size = tf.random_uniform([], MIN_CROP_SIZE, 1.0)
crop_size_y = tf.to_int32(MIN_CROP_SIZE * tf.to_float(height))
crop_size_x = tf.to_int32(MIN_CROP_SIZE * tf.to_float(width))
y = tf.random_uniform([], 0, height - crop_size_y, dtype=tf.int32)
x = tf.random_uniform([], 0, width - crop_size_x, dtype=tf.int32)
crop_window = tf.stack([y, x, crop_size_y, crop_size_x])
return crop_window
whole_image_window = tf.stack([0, 0, height, width])
do_it = tf.less(tf.random_uniform([]), probability)
window = tf.cond(
do_it, lambda: get_random_window(),
lambda: whole_image_window
)
image = tf.image.crop_to_bounding_box(image, window[0], window[1], window[2], window[3])
labels = tf.image.crop_to_bounding_box(labels, window[0], window[1], window[2], window[3])
image = tf.image.resize_images(image, image_size, method=RESIZE_METHOD)
labels = tf.image.resize_images(labels, image_size, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return image, labels
def random_flip_left_right(image, labels):
def flip(image, labels):
flipped_image = tf.image.flip_left_right(image)
flipped_labels = tf.image.flip_left_right(labels)
return flipped_image, flipped_labels
with tf.name_scope('random_flip_left_right'):
do_it = tf.less(tf.random_uniform([]), 0.5)
image, labels = tf.cond(
do_it,
lambda: flip(image, labels),
lambda: (image, labels)
)
return image, labels
|
[
"[email protected]"
] | |
bcd750d560e214053a6f3ae7100412b22b224db0
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/template/test_button.py
|
bfdb9352767ec5880116473e8ab445ab30b8a0a5
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 |
Apache-2.0
| 2023-09-14T21:50:15 | 2013-09-17T07:29:48 |
Python
|
UTF-8
|
Python
| false | false | 5,386 |
py
|
"""The tests for the Template button platform."""
import datetime as dt
from unittest.mock import patch
from homeassistant import setup
from homeassistant.components.button import DOMAIN as BUTTON_DOMAIN, SERVICE_PRESS
from homeassistant.components.template.button import DEFAULT_NAME
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_ENTITY_ID,
CONF_FRIENDLY_NAME,
CONF_ICON,
STATE_UNKNOWN,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_registry import async_get
from tests.common import assert_setup_component
_TEST_BUTTON = "button.template_button"
_TEST_OPTIONS_BUTTON = "button.test"
async def test_missing_optional_config(hass: HomeAssistant) -> None:
"""Test: missing optional template is ok."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"button": {
"press": {"service": "script.press"},
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, STATE_UNKNOWN)
async def test_missing_required_keys(hass: HomeAssistant) -> None:
"""Test: missing required fields will fail."""
with assert_setup_component(0, "template"):
assert await setup.async_setup_component(
hass,
"template",
{"template": {"button": {}}},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all("button") == []
async def test_all_optional_config(hass: HomeAssistant, calls) -> None:
"""Test: including all optional templates is ok."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"unique_id": "test",
"button": {
"press": {
"service": "test.automation",
"data_template": {"caller": "{{ this.entity_id }}"},
},
"device_class": "restart",
"unique_id": "test",
"name": "test",
"icon": "mdi:test",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(
hass,
STATE_UNKNOWN,
{
CONF_DEVICE_CLASS: "restart",
CONF_FRIENDLY_NAME: "test",
CONF_ICON: "mdi:test",
},
_TEST_OPTIONS_BUTTON,
)
now = dt.datetime.now(dt.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{CONF_ENTITY_ID: _TEST_OPTIONS_BUTTON},
blocking=True,
)
assert len(calls) == 1
assert calls[0].data["caller"] == _TEST_OPTIONS_BUTTON
_verify(
hass,
now.isoformat(),
{
CONF_DEVICE_CLASS: "restart",
CONF_FRIENDLY_NAME: "test",
CONF_ICON: "mdi:test",
},
_TEST_OPTIONS_BUTTON,
)
er = async_get(hass)
assert er.async_get_entity_id("button", "template", "test-test")
async def test_name_template(hass: HomeAssistant) -> None:
"""Test: name template."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"button": {
"press": {"service": "script.press"},
"name": "Button {{ 1 + 1 }}",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(
hass,
STATE_UNKNOWN,
{
CONF_FRIENDLY_NAME: "Button 2",
},
"button.button_2",
)
async def test_unique_id(hass: HomeAssistant) -> None:
"""Test: unique id is ok."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"unique_id": "test",
"button": {
"press": {"service": "script.press"},
"unique_id": "test",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, STATE_UNKNOWN)
def _verify(
hass,
expected_value,
attributes=None,
entity_id=_TEST_BUTTON,
):
"""Verify button's state."""
attributes = attributes or {}
if CONF_FRIENDLY_NAME not in attributes:
attributes[CONF_FRIENDLY_NAME] = DEFAULT_NAME
state = hass.states.get(entity_id)
assert state.state == expected_value
assert state.attributes == attributes
|
[
"[email protected]"
] | |
0f51ca5b879fb84ad265dfd8702b8672392376d4
|
b3d42d863b170f2a952e69c40bea727a92d95730
|
/c03/p050_same_name.py
|
2dfe30b61af6c12638fdf656a5dcee8b56269429
|
[
"Apache-2.0"
] |
permissive
|
HiAwesome/automate-the-boring-stuff
|
6e8a8aa12fc36f9d7c139f1c2ae0510df1904491
|
7355f53bbb0d27755fe350b84be834f3b4f9767a
|
refs/heads/master
| 2023-01-04T14:24:53.624590 | 2020-11-05T06:30:43 | 2020-11-05T06:30:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 224 |
py
|
def spam():
eggs = 'spam local'
print(eggs)
def bacon():
eggs = 'bacon local'
print(eggs)
spam()
print(eggs)
eggs = 'global'
bacon()
print(eggs)
"""
bacon local
spam local
bacon local
global
"""
|
[
"[email protected]"
] | |
3422f9944919abe99c858b9cd797912712273538
|
86a184949f306c94b6d032cc2ca862412016d25a
|
/week10/funwithcelery/tasks.py
|
bdf6f5df56935b6973921ab1d6f00e02ca4f8a79
|
[] |
no_license
|
sjl421/Web-Development-with-Django
|
7339b8f4c33f1a1bfa7660e6dc35d74f54c1981d
|
2bb9e0fc098b03cbb6e7980483624b151601d204
|
refs/heads/master
| 2021-04-06T08:58:22.803534 | 2017-07-08T13:59:50 | 2017-07-08T13:59:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 438 |
py
|
from celery import group
from celery_app import app
@app.task(bind=True, max_retries=1)
def retry_task(self):
print('IN TASK')
exc = Exception('retrying')
self.retry(exc=exc, countdown=60)
@app.task
def add(x, y):
return x + y
@app.task
def just_printing(*args, **kwargs):
print('Someone called me: just_printing')
print(args, kwargs)
@app.task
def group_adds(ns):
return group(add.s(*n) for n in ns)()
|
[
"[email protected]"
] | |
89e7da5910d0897f44e727aec7f25b1c0b510972
|
1511782b2cc3dcf1f7e058e5046ec67a5561ba51
|
/2020/0418/we_like_agc.py
|
cf32c1fd0e83497a7048d6cb9fac03b9d62bda85
|
[] |
no_license
|
keiouok/atcoder
|
7d8a053b0cf5b42e71e265450121d1ad686fee6d
|
9af301c6d63b0c2db60ac8af5bbe1431e14bb289
|
refs/heads/master
| 2021-09-07T11:48:55.953252 | 2021-07-31T15:29:50 | 2021-07-31T15:29:50 | 186,214,079 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,565 |
py
|
import sys, re, os
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, radians
from itertools import permutations, combinations, product, accumulate
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from heapq import heapify, heappop, heappush
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def S_MAP(): return map(str, input().split())
def LIST(): return list(map(int, input().split()))
def S_LIST(): return list(map(str, input().split()))
sys.setrecursionlimit(10 ** 9)
INF = float('inf')
mod = 10 ** 9 + 7
N = INT()
def no_agc(last4):
for i in range(4):
cp_last4 = list(last4)
if i >= 1:
cp_last4[i-1], cp_last4[i] = \
cp_last4[i], cp_last4[i-1]
cp_last_sent = "".join(cp_last4)
if cp_last_sent.count("AGC") >= 1:
return False
return True
# q = deque([(0, "TTT")])
ans = 0
memo = [{} for i in range(N+1)]
def dfs(idx, last3):
# idx, last3 = q.popleft()
if last3 in memo[idx]:
return memo[idx][last3]
if idx == N:
return 1
ret = 0
for c in "AGCT":
if no_agc(last3 + c):
last3_new = (last3 + c)[1:]
idx_new = idx + 1
ret = (ret + dfs(idx_new, last3_new)) % mod
memo[idx][last3] = ret
return ret
print(dfs(0, "TTT"))
|
[
"[email protected]"
] | |
b1ca43ca364d54f0cadbe79241cefb980e2b7c7c
|
f08e50d55bbbb90e4c8f9a8811eaede98ede2694
|
/erpbee/patches/v11_0/inter_state_field_for_gst.py
|
2a8ab6e86b41209abb6664006436ad67db52618b
|
[] |
no_license
|
mohrezbak/erpbee
|
bc48472a99a7f4357aa7b82ff3a9c1a4c98ba017
|
1134156ad337fd472e14cf347479c17bd8db7b33
|
refs/heads/main
| 2023-02-12T01:32:07.858555 | 2021-01-08T17:25:23 | 2021-01-08T17:25:23 | 327,872,762 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,205 |
py
|
from __future__ import unicode_literals
import frappe
from erpbee.regional.india.setup import make_custom_fields
def execute():
company = frappe.get_all('Company', filters = {'country': 'India'})
if not company:
return
frappe.reload_doc("Payroll", "doctype", "Employee Tax Exemption Declaration")
frappe.reload_doc("Payroll", "doctype", "Employee Tax Exemption Proof Submission")
frappe.reload_doc("hr", "doctype", "Employee Grade")
frappe.reload_doc("hr", "doctype", "Leave Policy")
frappe.reload_doc("accounts", "doctype", "Bank Account")
frappe.reload_doc("accounts", "doctype", "Tax Withholding Category")
frappe.reload_doc("accounts", "doctype", "Allowed To Transact With")
frappe.reload_doc("accounts", "doctype", "Finance Book")
frappe.reload_doc("accounts", "doctype", "Loyalty Program")
frappe.reload_doc("stock", "doctype", "Item Barcode")
make_custom_fields()
frappe.reload_doc("accounts", "doctype", "sales_taxes_and_charges")
frappe.reload_doc("accounts", "doctype", "purchase_taxes_and_charges")
frappe.reload_doc("accounts", "doctype", "sales_taxes_and_charges_template")
frappe.reload_doc("accounts", "doctype", "purchase_taxes_and_charges_template")
# set is_inter_state in Taxes And Charges Templates
if frappe.db.has_column("Sales Taxes and Charges Template", "is_inter_state") and\
frappe.db.has_column("Purchase Taxes and Charges Template", "is_inter_state"):
igst_accounts = set(frappe.db.sql_list('''SELECT igst_account from `tabGST Account` WHERE parent = "GST Settings"'''))
cgst_accounts = set(frappe.db.sql_list('''SELECT cgst_account FROM `tabGST Account` WHERE parenttype = "GST Settings"'''))
when_then_sales = get_formatted_data("Sales Taxes and Charges", igst_accounts, cgst_accounts)
when_then_purchase = get_formatted_data("Purchase Taxes and Charges", igst_accounts, cgst_accounts)
if when_then_sales:
frappe.db.sql('''update `tabSales Taxes and Charges Template`
set is_inter_state = Case {when_then} Else 0 End
'''.format(when_then=" ".join(when_then_sales)))
if when_then_purchase:
frappe.db.sql('''update `tabPurchase Taxes and Charges Template`
set is_inter_state = Case {when_then} Else 0 End
'''.format(when_then=" ".join(when_then_purchase)))
def get_formatted_data(doctype, igst_accounts, cgst_accounts):
# fetch all the rows data from child table
all_details = frappe.db.sql('''
select parent, account_head from `tab{doctype}`
where parenttype="{doctype} Template"'''.format(doctype=doctype), as_dict=True)
# group the data in the form "parent: [list of accounts]""
group_detail = {}
for i in all_details:
if not i['parent'] in group_detail: group_detail[i['parent']] = []
for j in all_details:
if i['parent']==j['parent']:
group_detail[i['parent']].append(j['account_head'])
# form when_then condition based on - if list of accounts for a document
# matches any account in igst_accounts list and not matches any in cgst_accounts list
when_then = []
for i in group_detail:
temp = set(group_detail[i])
if not temp.isdisjoint(igst_accounts) and temp.isdisjoint(cgst_accounts):
when_then.append('''When name='{name}' Then 1'''.format(name=i))
return when_then
|
[
"[email protected]"
] | |
6cad1809e4dcda1f27e0304167dbbb5f9ead2b47
|
6d233ad2059a941e4ce4c5b5ee3857b8a4a0d212
|
/Everyday_alg/2021/01/2021_01_13/binary-tree-level-order-traversal.py
|
2966d1ca6c9df4779c61bf9ec99a75013caf3771
|
[] |
no_license
|
Alexanderklau/Algorithm
|
7c38af7debbe850dfc7b99cdadbf0f8f89141fc6
|
eac05f637a55bfcc342fa9fc4af4e2dd4156ea43
|
refs/heads/master
| 2022-06-12T21:07:23.635224 | 2022-06-12T08:12:07 | 2022-06-12T08:12:07 | 83,501,915 | 5 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 825 |
py
|
# coding: utf-8
__author__ = 'Yemilice_lau'
"""
给你一个二叉树,请你返回其按 层序遍历 得到的节点值。 (即逐层地,从左到右访问所有节点)。
示例:
二叉树:[3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
返回其层序遍历结果:
[
[3],
[9,20],
[15,7]
]
"""
class Solution(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
tree_lists, tree = [], [root]
while tree:
tree_lists.append([node.val for node in tree])
temp = []
for node in tree:
temp.extend([node.left, node.right])
tree = [leaf for leaf in temp if leaf]
return tree_lists
|
[
"[email protected]"
] | |
e38fd14ba552b2ae26182d22a08def171dd47456
|
fdca7da4bd6a7ce2e6659014da3b11df486ea686
|
/names.py
|
a6eb3a885c30d28098b0058184d5f8f67574a955
|
[] |
no_license
|
MovinTarg/Names
|
a34bafe2f827078873f13f9034299f036b1a20b8
|
2ba00a051560ea7a9cfd12cf95bc6617962b9b0b
|
refs/heads/master
| 2021-05-02T07:20:11.304723 | 2018-02-08T22:32:52 | 2018-02-08T22:32:52 | 120,826,202 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,070 |
py
|
students = [
{'first_name': 'Michael', 'last_name' : 'Jordan'},
{'first_name' : 'John', 'last_name' : 'Rosales'},
{'first_name' : 'Mark', 'last_name' : 'Guillen'},
{'first_name' : 'KB', 'last_name' : 'Tonel'}
]
def names(arr):
for val in arr:
print val['first_name'], val['last_name']
names(students)
users = {
'Students': [
{'first_name': 'Michael', 'last_name' : 'Jordan'},
{'first_name' : 'John', 'last_name' : 'Rosales'},
{'first_name' : 'Mark', 'last_name' : 'Guillen'},
{'first_name' : 'KB', 'last_name' : 'Tonel'}
],
'Instructors': [
{'first_name' : 'Michael', 'last_name' : 'Choi'},
{'first_name' : 'Martin', 'last_name' : 'Puryear'}
]
}
def studentsInstructors(dictionary):
for key, data in dictionary.items():
print key
# print data
count = 0
for val in data:
count += 1
print count, '-', val['first_name'], val['last_name'], '-', len(val['first_name'] + val['last_name'])
studentsInstructors(users)
|
[
"[email protected]"
] | |
68f6bb61324cd78cba0386369bec19d3a4ec44a3
|
abf0ea1656a00932d99578a566b8b546daa8c569
|
/env/bin/cftp
|
df699cd0cc2f0d301a851ae0138146f3272a4b40
|
[] |
no_license
|
Lewington-pitsos/soundcloud
|
cf3b7a22b6e93b32e1f2a3f50b5ca5aec790bf23
|
5232ddc6d8f745ee91624411f3eecbfea2758c51
|
refs/heads/master
| 2020-05-16T02:09:24.846277 | 2019-04-22T05:37:54 | 2019-04-22T05:37:54 | 182,618,894 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 273 |
#!/home/lewington/code/python/learn/soundcloud/env/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from twisted.conch.scripts.cftp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"[email protected]"
] | ||
c58cf0b2ae63d589ad451462013553658103e025
|
d9c95cd0efad0788bf17672f6a4ec3b29cfd2e86
|
/disturbance/migrations/0030_auto_20200511_1512.py
|
3641eaa61a3cdb5c1c1646462aae2f39bb6cec30
|
[
"Apache-2.0"
] |
permissive
|
Djandwich/disturbance
|
cb1d25701b23414cd91e3ac5b0207618cd03a7e5
|
b1ba1404b9ca7c941891ea42c00b9ff9bcc41237
|
refs/heads/master
| 2023-05-05T19:52:36.124923 | 2021-06-03T06:37:53 | 2021-06-03T06:37:53 | 259,816,629 | 1 | 1 |
NOASSERTION
| 2021-06-03T09:46:46 | 2020-04-29T03:39:33 |
Python
|
UTF-8
|
Python
| false | false | 1,710 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-05-11 07:12
from __future__ import unicode_literals
import disturbance.components.compliances.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('disturbance', '0029_auto_20200508_1238'),
]
operations = [
migrations.CreateModel(
name='SiteApplicationFee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, default='0.00', max_digits=8)),
('date_of_enforcement', models.DateField(blank=True, null=True)),
],
options={
'ordering': ('date_of_enforcement',),
},
),
migrations.CreateModel(
name='SiteCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=200)),
],
),
migrations.AlterField(
model_name='compliancedocument',
name='_file',
field=models.FileField(max_length=500, upload_to=disturbance.components.compliances.models.update_proposal_complaince_filename),
),
migrations.AddField(
model_name='siteapplicationfee',
name='site_category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='site_application_fees', to='disturbance.SiteCategory'),
),
]
|
[
"[email protected]"
] | |
f0d7131ecae8c4c754c7dd19a9a5c1ff2121cb3d
|
95540a155c043dd84ea6c0fb7d59ba06dc78b875
|
/python/算法和数据结构/queue.py
|
5c7170aaa244a59b0964dfc397b87b675c7a5cb7
|
[] |
no_license
|
Lilenn/must
|
41b95d8e80f48a6b82febb222936bbc3502cc01f
|
a510a8d0e58fde1bc97ab7ad9bd2738158dcba5e
|
refs/heads/master
| 2020-04-09T23:09:20.116439 | 2018-12-06T09:02:09 | 2018-12-06T09:02:09 | 160,648,431 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 526 |
py
|
class Queue(object):
'''队列'''
def __init__(self):
self.items = []
def is_pmpty(self):
return self.items == []
def enqueue(self,item):
'''建造队列'''
self.items.insert(1,item)
def dequeue(self):
'''出队列'''
return self.items.pop()
def size(self):
'''返回大小'''
return len(self.items)
if __name__ == '__main__':
q = Queue()
q.enqueue(1)
q.enqueue(3)
q.enqueue(5)
print(q.dequeue())
print(q.size())
|
[
"[email protected]"
] | |
f34f79c26e98c1d38d207de9b6cffc1f0ae6857e
|
4503a3bfd940dce760b5f70e90e6fe2fe0cc4881
|
/week10/health.py
|
64a500edefc57c9f812bbfd48accca1bbc735e97
|
[] |
no_license
|
RicardoLima17/lecture
|
dba7de5c61507f51d51e3abc5c7c4c22ecda504f
|
b41f1201ab938fe0cab85566998390166c7fa7d8
|
refs/heads/main
| 2023-04-18T11:12:39.769760 | 2021-04-21T18:36:09 | 2021-04-21T18:36:09 | 334,456,464 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 403 |
py
|
# use person module
# Author: Andrew Beatty
from personmodule import *
import datetime as dt
person1 = {
'firstname': 'andrew',
'lastname': 'beatty',
'dob': dt.date(2010, 1, 1),
'height': 180,
'width': 100
}
# call the functions in the module
# I used import * so these have been imported
# so I can call them with out the module name
displayperson(person1)
gethealthdata(person1)
|
[
"[email protected]"
] | |
80d7750f7f977b876f0ce61427fcd1932f7c6f2f
|
2fd6c260b8db490ed9dc594f2a6578bb736cb9ad
|
/src/test-apps/happy/tests/standalone/wdmNext/test_weave_wdm_next_one_way_subscribe_16.py
|
a4184ef2423b5b02d1874dbcd0e40ca97546c89f
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
pornin/openweave-core
|
6891a89b493566e24c4e413f6425ecbf59663a43
|
b6ac50aad6eb69c7a81c9916707f3c7ef098ec63
|
refs/heads/master
| 2020-04-02T00:55:05.328569 | 2018-10-19T17:28:34 | 2018-10-19T17:28:34 | 153,828,148 | 1 | 0 |
Apache-2.0
| 2018-10-19T18:52:53 | 2018-10-19T18:52:53 | null |
UTF-8
|
Python
| false | false | 3,051 |
py
|
#!/usr/bin/env python
#
# Copyright (c) 2016-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Calls Weave WDM one way subscribe between nodes.
# H03: One way Subscribe: Client Continuous Events. Client cancels
# L09: Stress One way Subscribe: Client Continuous Events. Client cancels
#
import unittest
import set_test_path
from weave_wdm_next_test_base import weave_wdm_next_test_base
import WeaveUtilities
class test_weave_wdm_next_one_way_subscribe_16(weave_wdm_next_test_base):
def test_weave_wdm_next_one_way_subscribe_16(self):
wdm_next_args = {}
wdm_next_args['wdm_option'] = "one_way_subscribe"
wdm_next_args['total_client_count'] = 4
wdm_next_args['final_client_status'] = 0
wdm_next_args['timer_client_period'] = 16000
wdm_next_args['test_client_iterations'] = 5
wdm_next_args['test_client_delay'] = 2000
wdm_next_args['enable_client_flip'] = 0
wdm_next_args['total_server_count'] = 4
wdm_next_args['final_server_status'] = 4
wdm_next_args['timer_server_period'] = 15000
wdm_next_args['test_server_delay'] = 0
wdm_next_args['enable_server_flip'] = 0
wdm_next_args['server_event_generator'] = 'Security'
wdm_next_args['server_inter_event_period'] = 2000
wdm_next_args['client_log_check'] = [('Client\[0\] \[(ALIVE|CONFM)\] EndSubscription Ref\(\d+\)', wdm_next_args['test_client_iterations']),
('Client\[0\] \[CANCL\] _AbortSubscription Ref\(\d+\)', wdm_next_args['test_client_iterations'])]
wdm_next_args['server_log_check'] = [('Handler\[0\] \[(ALIVE|CONFM)\] CancelRequestHandler', wdm_next_args['test_client_iterations']),
('Handler\[0\] Moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations'])]
wdm_next_args['test_tag'] = self.__class__.__name__[19:].upper()
wdm_next_args['test_case_name'] = ['H03: One way Subscribe: Publisher Continuous Events. Client cancels',
'L09: Stress One way Subscribe: Publisher Continuous Events. Client cancels']
print 'test file: ' + self.__class__.__name__
print "weave-wdm-next test B03 and L09"
super(test_weave_wdm_next_one_way_subscribe_16, self).weave_wdm_next_test_base(wdm_next_args)
if __name__ == "__main__":
WeaveUtilities.run_unittest()
|
[
"[email protected]"
] | |
f3e465d8abf97925aafc78f1129a2bbb9ec13c39
|
71877e3f343e3899da77878937362191fdc02a0f
|
/topo_management/make_topos.py
|
4b1690d7c294a97ec079d7d60e92469cc7f79f95
|
[] |
no_license
|
micahjohnson150/scripts
|
2a9007ae6d2ad3eec3596aff6e016f6d13fb0652
|
32a8322cab7463dbcc4d6042e7d53a03c2ee2654
|
refs/heads/master
| 2023-05-26T04:48:27.005338 | 2023-01-21T14:08:51 | 2023-01-21T14:08:51 | 144,737,605 | 0 | 0 | null | 2023-05-22T21:36:55 | 2018-08-14T15:17:16 |
Python
|
UTF-8
|
Python
| false | false | 4,085 |
py
|
#!/usr/bin/env python3
from os import listdir, walk, system
from os.path import isfile, isdir, basename, abspath, expanduser, split
from subprocess import check_output, Popen
import argparse
from basin_setup.basin_setup import Messages
"""
Every basin in my basin folder has a make file and each is constructed similarly.
Thie script will go through all the basin topos with a make file and execute
make < arg >
The following executes all the topos makefiles via make topo in every basin folder
e.g.
python make_topos.py topo
The following only runs the make topo command on tuolumne
e.g.
python make_topos.py topo -kw tuolumne
"""
out = Messages()
def has_hidden_dirs(p):
"""
Searches a string path to determine if there are hidden
"""
has_hidden_paths = False
for d in p.split('/'):
if d:
if d[0] == '.':
has_hidden_paths = True
return has_hidden_paths
def find_basin_paths(directory, indicator_folder="model_setup", indicator_file="Makefile"):
"""
Walks through all the folder in directory looking for a directory called
model setup, then checks to see if there is a Makefile, if there is then append
that path to a list and return it
"""
paths = []
directory = abspath(expanduser(directory))
# Allow for indicator files and dirs to be none
no_ind_file = (indicator_folder == None and indicator_file != None)
no_ind_dir = (indicator_folder != None and indicator_file == None)
no_dir_or_file = (indicator_folder == None and indicator_file == None)
# Get all the folders and stuff just one level up
for r, d, f in walk(directory):
# ignore hidden folders and the top level folder
if not has_hidden_dirs(r) and r != directory:
if (
# If no indicator file or directories append the path
no_dir_or_file or \
# no indicatory file is available only check the indicator folder
(no_ind_file and basename(r) == indicator_folder) or \
# if no indicator folder is available only check file
(no_ind_dir and indicator_file in f) or \
# Look for both the indicator file and folder
(basename(r) == indicator_folder and indicator_file in f)):
paths.append(r)
return paths
if __name__ == "__main__":
# Director of interest
basins_dir = "~/projects/basins"
parser = argparse.ArgumentParser(description='Utilize makefiles to make '
'mass operations on basins.')
parser.add_argument('command', metavar='cmd',
help='Pass a makefile command to execute on every basin')
parser.add_argument('--keyword','-kw', dest='kw',
help='Filter basin_ops paths for kw e.g. tuolumne will'
'find only one topo to process')
args = parser.parse_args()
# Grab a command passed in
make_cmd = args.command
count = 0
basins_attempted = 0
out.msg("Looking in {} for basins with makefiles...".format(basins_dir))
basin_paths = find_basin_paths(basins_dir, indicator_folder="model_setup",
indicator_file="Makefile")
if args.kw != None:
out.msg("Filtering basin paths using keyword: {}".format(args.kw))
basin_paths = [p for p in basin_paths if args.kw in p]
# Warn user if no matches found
if len(basin_paths) == 0:
out.error('{} not found in any ops paths'.format(args.kw))
for r in basin_paths:
topo_attempt = False
try:
cmd = "cd {} && make {}".format(r, make_cmd)
out.dbg(cmd)
s = Popen(cmd, shell=True)
s.wait()
topo_attempt = True
except Exception as e:
raise e
if topo_attempt:
basins_attempted += 1
#input("press enter to continue")
out.msg("Attempted to build {} topos".format(basins_attempted))
|
[
"[email protected]"
] | |
578c5a1a6ff22e80aa35320182614bae82dfd05a
|
c51b70a06a7bef9bd96f06bd91a0ec289b68c7c4
|
/src/Snakemake/rules/Imbalance/Imbalance.smk
|
3d777e988559e4db91c171cc36dc8db59f4b607b
|
[] |
no_license
|
clinical-genomics-uppsala/TSO500
|
3227a65931c17dd2799dbce93fe8a47f56a8c337
|
b0de1d2496b6c650434116494cef721bdc295528
|
refs/heads/master
| 2023-01-10T01:41:51.764849 | 2020-11-05T14:11:25 | 2020-11-05T14:11:25 | 218,708,783 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 390 |
smk
|
rule imbalance :
input:
bams = ["RNA_TST170/bam_files/" + s + ".bam" for s in config["RNA_Samples"]]
output:
imbalance_all = "Results/RNA/Imbalance/imbalance_all_gene.txt",
imbalance = "Results/RNA/Imbalance/imbalance_called_gene.txt"
run:
import subprocess
subprocess.call("python src/Imbalance.py " + " ".join(input.bams), shell=True)
|
[
"[email protected]"
] | |
15cea4f928a57a80bc4a8c891bbc166135746b2c
|
4201d4aff2f2d877fa75d6d971f7826d5d1369e3
|
/product_onepage/settings.py
|
91b09b1db1c82f31bfb8318f86917bf8e21a21ab
|
[
"MIT"
] |
permissive
|
emencia/emencia-product-onepage
|
4f5fb72cc47ca8725bc01c9c69a583126e7b8514
|
09cff26e97641412b297f977ca8c8045983bbf97
|
refs/heads/master
| 2020-04-13T09:31:04.787009 | 2015-01-13T01:14:00 | 2015-01-13T01:14:00 | 28,994,086 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,778 |
py
|
# Dummy gettext
gettext = lambda s: s
# Plugins template choices
ONEPAGE_TAB_TEMPLATE_CHOICES = (
("product_onepage/tab.html", gettext("Default")),
)
ONEPAGE_SPEC_TEMPLATE_CHOICES = (
("product_onepage/spec.html", gettext("Default")),
)
ONEPAGE_BLURB_TEMPLATE_CHOICES = (
("product_onepage/blurb.html", gettext("Default")),
)
ONEPAGE_OVERVIEW_TEMPLATE_CHOICES = (
("product_onepage/overview.html", gettext("Default")),
)
ONEPAGE_PACK_TEMPLATE_CHOICES = (
("product_onepage/pack.html", gettext("Default")),
)
ONEPAGE_SUBSCRIBE_TEMPLATE_CHOICES = (
("product_onepage/subscribe.html", gettext("Default")),
)
ONEPAGE_VIDEO_TEMPLATE_CHOICES = (
("product_onepage/video.html", gettext("Default")),
)
ONEPAGE_TWENTYTWENTY_TEMPLATE_CHOICES = (
("product_onepage/twentytwenty.html", gettext("Default")),
)
# Plugins templates default choice
ONEPAGE_TAB_DEFAULT_TEMPLATE = ONEPAGE_TAB_TEMPLATE_CHOICES[0][0]
ONEPAGE_SPEC_DEFAULT_TEMPLATE = ONEPAGE_SPEC_TEMPLATE_CHOICES[0][0]
ONEPAGE_BLURB_DEFAULT_TEMPLATE = ONEPAGE_BLURB_TEMPLATE_CHOICES[0][0]
ONEPAGE_OVERVIEW_DEFAULT_TEMPLATE = ONEPAGE_OVERVIEW_TEMPLATE_CHOICES[0][0]
ONEPAGE_PACK_DEFAULT_TEMPLATE = ONEPAGE_PACK_TEMPLATE_CHOICES[0][0]
ONEPAGE_SUBSCRIBE_DEFAULT_TEMPLATE = ONEPAGE_SUBSCRIBE_TEMPLATE_CHOICES[0][0]
ONEPAGE_VIDEO_DEFAULT_TEMPLATE = ONEPAGE_VIDEO_TEMPLATE_CHOICES[0][0]
ONEPAGE_TWENTYTWENTY_DEFAULT_TEMPLATE = ONEPAGE_TWENTYTWENTY_TEMPLATE_CHOICES[0][0]
# Alignement options
ONEPAGE_BLURB_ALIGNMENT_CHOICES = (
('1', gettext(u'Extreme Left')),
('3', gettext(u'Left')),
('centered', gettext(u'Center')),
('6', gettext(u'Right')),
('8', gettext(u'Extreme Right')),
)
ONEPAGE_SPEC_ALIGNMENT_CHOICES = (
('left', _(u'Left')),
('right', _(u'Right')),
)
|
[
"[email protected]"
] | |
20fcd5d4e9c68f072f12665f4282389755541b28
|
50de76eb887892c2085e1aa898987962a5d75380
|
/_1_PythonBasic/Reactive/5.2B_distinct_with_mapping.py
|
bd179c9d1443ceb29fd6932f737d2d033d35e7f2
|
[] |
no_license
|
cyrsis/TensorflowPY36CPU
|
cac423252e0da98038388cf95a3f0b4e62d1a888
|
6ada50adf63078ba28464c59808234bca3fcc9b7
|
refs/heads/master
| 2023-06-26T06:57:00.836225 | 2021-01-30T04:37:35 | 2021-01-30T04:37:35 | 114,089,170 | 5 | 2 | null | 2023-05-25T17:08:43 | 2017-12-13T07:33:57 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 163 |
py
|
from rx import Observable
Observable.from_(["Alpha", "Beta", "Gamma", "Delta", "Epsilon"]) \
.distinct(lambda s: len(s)) \
.subscribe(lambda i: print(i))
|
[
"[email protected]"
] | |
ccf5e0fbc0904ccbc4c7291540962c2be04e1e27
|
d785e993ed65049c82607a1482b45bddb2a03dda
|
/nano2017/cfg2018/GluGluToContinToZZTo4e_cfg.py
|
b03efa62bd937ed3a42f2270aeed36b10cdf53de
|
[] |
no_license
|
PKUHEPEWK/ssww
|
eec02ad7650014646e1bcb0e8787cf1514aaceca
|
a507a289935b51b8abf819b1b4b05476a05720dc
|
refs/heads/master
| 2020-05-14T04:15:35.474981 | 2019-06-28T23:48:15 | 2019-06-28T23:48:15 | 181,696,651 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,374 |
py
|
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = Configuration()
config.section_("General")
config.General.requestName = 'GluGluToContinToZZTo4e_2018'
config.General.transferLogs= False
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'PSet.py'
config.JobType.scriptExe = 'crab_script_2018.sh'
config.JobType.inputFiles = ['crab_script_2018.py','ssww_keep_and_drop_2018.txt','ssww_output_branch_selection_2018.txt','haddnano.py'] #hadd nano will not be needed once nano tools are in cmssw
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/GluGluToContinToZZTo4e_13TeV_MCFM701_pythia8/RunIIAutumn18NanoAODv4-Nano14Dec2018_102X_upgrade2018_realistic_v16-v1/NANOAODSIM'
#config.Data.inputDBS = 'phys03'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
#config.Data.splitting = 'EventAwareLumiBased'
config.Data.unitsPerJob = 20
config.Data.totalUnits = -1
config.Data.outLFNDirBase ='/store/user/%s/nano2018_v0' % (getUsernameFromSiteDB())
config.Data.publication = False
config.Data.outputDatasetTag = 'GluGluToContinToZZTo4e_2018'
config.section_("Site")
config.Site.storageSite = "T2_CN_Beijing"
#config.Site.storageSite = "T2_CH_CERN"
#config.section_("User")
#config.User.voGroup = 'dcms'
|
[
"[email protected]"
] | |
f2110dbbd89d74b18d31ba38453abe0f7578aebb
|
60fa442ae76b960ab21b10fb527c0eac85cdc587
|
/phenix/crawl_refines_print_Rfactor.py
|
3865946eb9d8dfc4cf54c28e0f99554fc655a411
|
[] |
no_license
|
pjanowski/Pawel_PhD_Scripts
|
8e6c2b92b492f9cacf425327a01faaceb27bb87d
|
5f9b1735ca6da8fdf0946d6748f3da7d3d723d5e
|
refs/heads/master
| 2021-01-10T06:15:30.287053 | 2015-11-16T04:04:07 | 2015-11-16T04:04:07 | 46,250,317 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 435 |
py
|
import os
import glob
dbase="/media/My Book/Marco/rigi_145392/p6522"
ds = next(os.walk(dbase))[1]
ds = [i for i in ds if i.startswith('Refine')]
f = lambda x: (x,int(x.split('_')[-1]))
ds = map(f,ds)
ds.sort(key=lambda x: x[-1])
ds = [ i[0] for i in ds ]
for d in ds:
logfile = glob.glob('%s/%s/*log' %(dbase,d))
if len(logfile) ==0:
continue
f=open(logfile[0], 'r')
l=f.readlines()
print d
print l[-2],
print l[-1]
|
[
"[email protected]"
] | |
0ec3a612342b6999c627497f0a8788d608044816
|
8c2de4da068ba3ed3ce1adf0a113877385b7783c
|
/hyp_utils/kaldi/steps/nnet3/report/summarize_compute_debug_timing.py
|
5c74eaf128c5da16eeba7964877e3bae00778d07
|
[
"Apache-2.0"
] |
permissive
|
hyperion-ml/hyperion
|
a024c718c4552ba3a03aae2c2ca1b8674eaebc76
|
c4c9eee0acab1ba572843373245da12d00dfffaa
|
refs/heads/master
| 2023-08-28T22:28:37.624139 | 2022-03-25T16:28:08 | 2022-03-25T16:28:08 | 175,275,679 | 55 | 20 |
Apache-2.0
| 2023-09-13T15:35:46 | 2019-03-12T18:40:19 |
Python
|
UTF-8
|
Python
| false | false | 4,357 |
py
|
#!/usr/bin/env python
# Copyright 2016 Vijayaditya Peddinti.
# Apache 2.0.
# we're using python 3.x style print but want it to work in python 2.x,
from __future__ import print_function
from __future__ import division
import sys
import re
import argparse
# expects the output of nnet3*train with --computation-debug=true
# will run faster if just the lines with "DebugAfterExecute" are provided
# <train-command> |grep DebugAfterExecute | steps/nnet3/report/summarize_compute_debug_timing.py
def GetArgs():
parser = argparse.ArgumentParser(description="Summarizes the timing info from nnet3-*-train --computation.debug=true commands ")
parser.add_argument("--node-prefixes", type=str,
help="list of prefixes. Execution times from nnet3 components with the same prefix"
" will be accumulated. Still distinguishes Propagate and BackPropagate commands"
" --node-prefixes Lstm1,Lstm2,Layer1", default=None)
print(' '.join(sys.argv), file=sys.stderr)
args = parser.parse_args()
if args.node_prefixes is not None:
raise NotImplementedError
# this will be implemented after https://github.com/kaldi-asr/kaldi/issues/944
args.node_prefixes = args.node_prefixes.split(',')
else:
args.node_prefixes = []
return args
# get opening bracket position corresponding to the last closing bracket
def FindOpenParanthesisPosition(string):
string = string.strip()
if string[-1] != ")":
# we don't know how to deal with these strings
return None
string_index = len(string) - 1
closing_parans = []
closing_parans.append(string_index)
string_index -= 1
while string_index >= 0:
if string[string_index] == "(":
if len(closing_parans) == 1:
# this opening bracket corresponds to the last closing bracket
return string_index
else:
closing_parans.pop()
elif string[string_index] == ")":
closing_parans.append(string_index)
string_index -= 1
raise Exception("Malformed string: Could not find opening paranthesis\n\t{0}".format(string))
# input : LOG (nnet3-chain-train:DebugAfterExecute():nnet-compute.cc:144) c68: BLstm1_backward_W_i-xr.Propagate(NULL, m6212(3136:3199, 0:555), &m31(0:63, 0:1023))
# output : BLstm1_backward_W_i-xr.Propagate
def ExtractCommandName(command_string):
# create a concise representation for the the command
# strip off : LOG (nnet3-chain-train:DebugAfterExecute():nnet-compute.cc:144)
command = " ".join(command_string.split()[2:])
# command = c68: BLstm1_backward_W_i-xr.Propagate(NULL, m6212(3136:3199, 0:555), &m31(0:63, 0:1023))
end_position = FindOpenParanthesisPosition(command)
if end_position is not None:
command = command[:end_position]
# command = c68: BLstm1_backward_W_i-xr.Propagate
command = ":".join(command.split(":")[1:]).strip()
# command = BLstm1_backward_W_i-xr.Propagate
return command
def Main():
# Sample Line
# LOG (nnet3-chain-train:DebugAfterExecute():nnet-compute.cc:144) c128: m19 = [] | | time: 0.0007689 secs
debug_regex = re.compile("DebugAfterExecute")
command_times = {}
for line in sys.stdin:
parts = line.split("|")
if len(parts) != 3:
# we don't know how to deal with these lines
continue
if debug_regex.search(parts[0]) is not None:
# this is a line printed in the DebugAfterExecute method
# get the timing info
time_parts = parts[-1].split()
assert(len(time_parts) == 3 and time_parts[-1] == "secs" and time_parts[0] == "time:" )
time = float(time_parts[1])
command = ExtractCommandName(parts[0])
# store the time
try:
command_times[command] += time
except KeyError:
command_times[command] = time
total_time = sum(command_times.values())
sorted_commands = sorted(command_times.items(), key = lambda x: x[1], reverse = True)
for item in sorted_commands:
print("{c} : time {t} : fraction {f}".format(c=item[0], t=item[1], f=float(item[1]) / total_time))
if __name__ == "__main__":
args = GetArgs()
Main()
|
[
"[email protected]"
] | |
0451ab6c75d806bc370d17a1356de4bb5437faf0
|
1e0ae1f039668a65e480065d671235fc0fff9b52
|
/django19day/app01/views/home.py
|
50e254c58f308631dd25e2745daad307c072c79f
|
[] |
no_license
|
aixocm/svndata
|
a4da91c3c9e1d376abfd46e7cecc3c5c2e340e83
|
ee205301f3a1ce11acef98bba927877cb7c4fb0b
|
refs/heads/master
| 2021-01-21T04:39:41.607117 | 2016-07-01T01:48:36 | 2016-07-01T01:48:36 | 47,066,006 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,184 |
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from django.shortcuts import render
from app01.forms import home as HomeForm
from app01 import models
def index(request):
# models.UserInfo.objects.all().delete()
# models.UserInfo.objects.create(name="JJJJJ")
#
# after = models.UserInfo.objects.all()
# print after[0].ctime
# dic = {'username':'alex','password':'123'}
# models.SimpleModel.objects.create(**dic)
ret = models.SimpleModel.objects.all()
print ret,type(ret)
ret = models.SimpleModel.objects.all().values('username')
print ret,type(ret)
ret = models.SimpleModel.objects.all().values_list('id','username')
print ret,type(ret)
obj = HomeForm.ImportForm()
return render(request,'home/index.html',{'obj':obj})
def upload(request):
if request.method == "POST":
inp_post = request.POST
inp_files = request.FILES
file_obj = inp_files.get('file_name')
print file_obj.name
print file_obj.size
f=open(file_obj.name,'wb')
for line in file_obj.chunks():
f.write(line)
f.close()
return render(request,'home/upload.html')
|
[
"[email protected]"
] | |
4eadce987312cc642bf7d10d5855eca2fdd2a8f7
|
ddd35c693194aefb9c009fe6b88c52de7fa7c444
|
/Live 10.1.18/novation/transport.py
|
cc9566884eb4863f6ff57a14a5556297de25949c
|
[] |
no_license
|
notelba/midi-remote-scripts
|
819372d9c22573877c7912091bd8359fdd42585d
|
e3ec6846470eed7da8a4d4f78562ed49dc00727b
|
refs/heads/main
| 2022-07-30T00:18:33.296376 | 2020-10-04T00:00:12 | 2020-10-04T00:00:12 | 301,003,961 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,052 |
py
|
# uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.8.5 (default, Aug 12 2020, 00:00:00)
# [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
# Embedded file name: c:\Jenkins\live\output\Live\win_64_static\Release\python-bundle\MIDI Remote Scripts\novation\transport.py
# Compiled at: 2020-05-05 13:23:29
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.base import listens
from ableton.v2.control_surface.components import TransportComponent as TransportComponentBase
from ableton.v2.control_surface.control import ButtonControl, ToggleButtonControl
class TransportComponent(TransportComponentBase):
play_button = ToggleButtonControl(toggled_color=b'Transport.PlayOn', untoggled_color=b'Transport.PlayOff')
capture_midi_button = ButtonControl()
def __init__(self, *a, **k):
super(TransportComponent, self).__init__(*a, **k)
self._metronome_toggle.view_transform = lambda v: b'Transport.MetronomeOn' if v else b'Transport.MetronomeOff'
self.__on_can_capture_midi_changed.subject = self.song
self.__on_can_capture_midi_changed()
@play_button.toggled
def _on_play_button_toggled(self, is_toggled, _):
if is_toggled:
self.song.stop_playing()
self.song.is_playing = is_toggled
@capture_midi_button.pressed
def capture_midi_button(self, _):
try:
if self.song.can_capture_midi:
self.song.capture_midi()
except RuntimeError:
pass
@listens(b'can_capture_midi')
def __on_can_capture_midi_changed(self):
self.capture_midi_button.color = (b'Transport.Capture{}').format(b'On' if self.song.can_capture_midi else b'Off')
def _update_button_states(self):
super(TransportComponent, self)._update_button_states()
self.continue_playing_button.color = (b'Transport.Continue{}').format(b'Off' if self.song.is_playing else b'On')
# okay decompiling /home/deniz/data/projects/midiremote/Live 10.1.18/novation/transport.pyc
|
[
"[email protected]"
] | |
22181b9fe8464921c05932796d73ede088aef55e
|
e82102580a5bd76e97ed607da7180faf9928cf7b
|
/barati/customers/views_cluster/save_main_preferences.py
|
7d3a3cd1956fe2e4caf75ee317701323bfa60ada
|
[
"Apache-2.0"
] |
permissive
|
aditi73/barati
|
393d02de0e292a0e5a73c988944486396cb0ece1
|
09e1a0a1342aa8e9cf1e97f073f4a6472c5af415
|
refs/heads/master
| 2021-01-12T10:42:44.322211 | 2016-06-11T12:02:10 | 2016-06-11T12:02:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,586 |
py
|
from django.shortcuts import render
from django.template import RequestContext
from django.views.generic import View
from django.http import HttpResponse
from customers import models as m
import sys, json, datetime
class Save_Main_Preferences(View):
try:
def __init__(self):
#self.template_name = ''
pass
def get_context_data(self, **kwargs):
context = {}
return context
def change_date_format_for_db(self, unformatted_date):
formatted_date = None
if unformatted_date:
formatted_date = datetime.datetime.strptime(unformatted_date, '%d-%b-%Y').strftime('%Y-%m-%d')
return formatted_date
def post(self, request, **kwargs):
user_id = m.Users.objects.get(username= request.user.username).id
date = self.change_date_format_for_db(request.POST.get('main_preference_date'))
location = request.POST.get('main_preference_location')
sublocation = request.POST.get('main_preference_sublocation')
main_preferences = m.Main_Preferences.objects.update_or_create( \
#Filter on the basis of the user_id
user_id=user_id, \
#Create a new entry if new values or update if updated values
defaults={'date':date, 'location':location, 'sublocation':sublocation}, \
)
message = "success_main_preferences_saved"
return HttpResponse(json.dumps(message))
except Exception as general_exception:
print general_exception
print sys.exc_traceback.tb_lineno
|
[
"[email protected]"
] | |
3d663d37b03f7370f829e314ff592048da8baadc
|
2360cee220fa1d4df735e663c2324f6716800a4c
|
/allauth/facebook/migrations/0002_auto__add_facebookaccesstoken__add_unique_facebookaccesstoken_app_acco.py
|
b1a780d35611b2a2d0a1fcb44c5feb7d8c27a289
|
[
"MIT"
] |
permissive
|
sachingupta006/django-allauth
|
709036a6a20f03fb7fb1d9ee555822526847e658
|
04a510f6b873cb3a54feca59cdd0c3e3ff9b9b5e
|
refs/heads/master
| 2021-01-17T22:11:38.164739 | 2012-06-09T16:58:47 | 2012-06-09T16:58:47 | 3,551,445 | 5 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,146 |
py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FacebookAccessToken'
db.create_table('facebook_facebookaccesstoken', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('app', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['facebook.FacebookApp'])),
('account', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['facebook.FacebookAccount'])),
('access_token', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal('facebook', ['FacebookAccessToken'])
# Adding unique constraint on 'FacebookAccessToken', fields ['app', 'account']
db.create_unique('facebook_facebookaccesstoken', ['app_id', 'account_id'])
def backwards(self, orm):
# Removing unique constraint on 'FacebookAccessToken', fields ['app', 'account']
db.delete_unique('facebook_facebookaccesstoken', ['app_id', 'account_id'])
# Deleting model 'FacebookAccessToken'
db.delete_table('facebook_facebookaccesstoken')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'facebook.facebookaccesstoken': {
'Meta': {'unique_together': "(('app', 'account'),)", 'object_name': 'FacebookAccessToken'},
'access_token': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['facebook.FacebookAccount']"}),
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['facebook.FacebookApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'facebook.facebookaccount': {
'Meta': {'object_name': 'FacebookAccount', '_ormbases': ['socialaccount.SocialAccount']},
'link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'social_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'socialaccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['socialaccount.SocialAccount']", 'unique': 'True', 'primary_key': 'True'})
},
'facebook.facebookapp': {
'Meta': {'object_name': 'FacebookApp'},
'api_key': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'application_id': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'application_secret': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'socialaccount.socialaccount': {
'Meta': {'object_name': 'SocialAccount'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['facebook']
|
[
"[email protected]"
] | |
fbd3495eb1889511b26d78ebe8fb5d8f63fa8a5a
|
5e11cbf593a9793359e1ca4f8e4a18af38e32738
|
/backend/mobilech_dev_14269/wsgi.py
|
15db35e2d85f5f7d4d97682b4b31b7671c76cc1e
|
[] |
no_license
|
crowdbotics-apps/mobilech-dev-14269
|
6c76edca03e7d48c428d081afc055a9cba358d04
|
50752c0b633d7077ced903c9caccd2aec2f04520
|
refs/heads/master
| 2023-01-01T15:59:38.895266 | 2020-10-30T17:51:48 | 2020-10-30T17:51:48 | 308,691,856 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 413 |
py
|
"""
WSGI config for mobilech_dev_14269 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mobilech_dev_14269.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
14e95b30fef502d2d37a1c6b893748e10fd62be7
|
c361a25acecd016677bbd0c6d9fc56de79cf03ed
|
/PTM/CassandraHost.py
|
ab3d48ce871b5a763384f888edaa60579991175f
|
[] |
no_license
|
danielmellado/zephyr
|
f8931633045959e7e9a974de8b700a287a1ae94e
|
dc6f85b78b50e599504966154b927fe198d7402d
|
refs/heads/master
| 2021-01-12T22:31:24.479814 | 2015-10-14T05:39:04 | 2015-10-14T06:24:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,469 |
py
|
__author__ = 'micucci'
# Copyright 2015 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from NetNSHost import NetNSHost
from common.Exceptions import *
from common.FileLocation import *
from PhysicalTopologyConfig import *
from common.CLI import *
from common.IP import IP
from ConfigurationHandler import FileConfigurationHandler
class CassandraHost(NetNSHost):
def __init__(self, name, ptm):
super(CassandraHost, self).__init__(name, ptm)
self.cassandra_ips = []
self.num_id = '1'
self.init_token = ''
self.ip = IP()
self.configurator = CassandraFileConfiguration()
def do_extra_config_from_ptc_def(self, cfg, impl_cfg):
"""
Configure this host type from a PTC HostDef config and the
implementation-specific configuration
:type cfg: HostDef
:type impl_cfg: ImplementationDef
:return:
"""
if len(cfg.interfaces.values()) > 0 and len(cfg.interfaces.values()[0].ip_addresses) > 0:
self.ip = cfg.interfaces.values()[0].ip_addresses[0]
if 'init_token' in impl_cfg.kwargs:
self.init_token = impl_cfg.kwargs['init_token']
if 'cassandra_ips' in impl_cfg.kwargs:
for i in impl_cfg.kwargs['cassandra_ips']:
self.cassandra_ips.append(IP(i))
if 'id' in impl_cfg.kwargs:
self.num_id = impl_cfg.kwargs['id']
def prepare_config(self):
self.configurator.configure(self.num_id, self.cassandra_ips, self.init_token, self.ip)
log_dir = '/var/log/cassandra.' + self.num_id
self.ptm.log_manager.add_external_log_file(FileLocation(log_dir + '/system.log'), self.num_id,
'%Y-%m-%d %H:%M:%S,%f', 2)
def print_config(self, indent=0):
super(CassandraHost, self).print_config(indent)
print (' ' * (indent + 1)) + 'Num-id: ' + self.num_id
print (' ' * (indent + 1)) + 'Init-token: ' + self.init_token
print (' ' * (indent + 1)) + 'Self-IP: ' + str(self.ip)
print (' ' * (indent + 1)) + 'Cassandra-IPs: ' + ', '.join(str(ip) for ip in self.cassandra_ips)
def do_extra_create_host_cfg_map_for_process_control(self):
return {'num_id': self.num_id, 'ip': self.ip.to_map()}
def do_extra_config_host_for_process_control(self, cfg_map):
self.num_id = cfg_map['num_id']
self.ip = IP.from_map(cfg_map['ip'])
def wait_for_process_start(self):
# Wait a couple seconds for the process to start before polling nodetool
time.sleep(2)
# Checking Cassandra status
retries = 0
max_retries = 10
connected = False
while not connected:
if self.cli.cmd('nodetool -h 127.0.0.1 status', return_status=True) == 0:
connected = True
else:
retries += 1
if retries > max_retries:
raise SocketException('Cassandra host ' + self.num_id + ' timed out while starting')
time.sleep(2)
def prepare_environment(self):
self.configurator.mount_config(self.num_id)
def cleanup_environment(self):
self.configurator.unmount_config()
def control_start(self):
self.cli.cmd('/bin/bash -c "MAX_HEAP_SIZE=128M HEAP_NEWSIZE=64M /etc/init.d/cassandra start"')
def control_stop(self):
self.cli.cmd("/etc/init.d/cassandra stop")
class CassandraFileConfiguration(FileConfigurationHandler):
def __init__(self):
super(CassandraFileConfiguration, self).__init__()
def configure(self, num_id, cassandra_ips, init_token, self_ip):
seed_str = ''.join([ip.ip + ',' for ip in cassandra_ips])[:-1]
etc_dir = '/etc/cassandra.' + num_id
var_lib_dir = '/var/lib/cassandra.' + num_id
var_log_dir = '/var/log/cassandra.' + num_id
var_run_dir = '/run/cassandra.' + num_id
self.cli.rm(etc_dir)
self.cli.copy_dir('/etc/cassandra', etc_dir)
# Work around for https://issues.apache.org/jira/browse/CASSANDRA-5895
self.cli.regex_file(etc_dir + '/cassandra-env.sh', 's/-Xss[1-9][0-9]*k/-Xss228k/')
self.cli.replace_text_in_file(etc_dir + '/cassandra-env.sh',
'# JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=<public name>"',
'JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=' + self_ip.ip + '"')
self.cli.regex_file_multi(etc_dir + '/cassandra.yaml',
"s/^cluster_name:.*$/cluster_name: 'midonet'/",
's/^initial_token:.*$/initial_token: ' + init_token + '/',
"/^seed_provider:/,/^$/ s/seeds:.*$/seeds: '" + seed_str + "'/",
's/^listen_address:.*$/listen_address: ' + self_ip.ip + '/',
's/^rpc_address:.*$/rpc_address: ' + self_ip.ip + '/')
self.cli.rm(var_lib_dir)
self.cli.mkdir(var_lib_dir)
self.cli.chown(var_lib_dir, 'cassandra', 'cassandra')
self.cli.rm(var_log_dir)
self.cli.mkdir(var_log_dir)
self.cli.chown(var_log_dir, 'cassandra', 'cassandra')
self.cli.rm(var_run_dir)
self.cli.mkdir(var_run_dir)
self.cli.chown(var_run_dir, 'cassandra', 'cassandra')
def mount_config(self, num_id):
self.cli.mount('/run/cassandra.' + num_id, '/run/cassandra')
self.cli.mount('/var/lib/cassandra.' + num_id, '/var/lib/cassandra')
self.cli.mount('/var/log/cassandra.' + num_id, '/var/log/cassandra')
self.cli.mount('/etc/cassandra.' + num_id, '/etc/cassandra')
def unmount_config(self):
self.cli.unmount('/run/cassandra')
self.cli.unmount('/var/lib/cassandra')
self.cli.unmount('/var/log/cassandra')
self.cli.unmount('/etc/cassandra')
|
[
"[email protected]"
] | |
17057251ad5e6a6db3b8bbf55d6daf24d7be92ef
|
434ec954a1c481f17dbb41d82f814405c2bd1e6e
|
/__init__.py
|
9dcc5b7d4b2f8a9eb1377764f50e3d764a314fc5
|
[] |
no_license
|
pytsite/plugin-comments_odm
|
83ae106529c68e995ff3f9414ffb8b76d64b9704
|
d07906d2c57ff0b750cb580c5f2c0e3867b04ac6
|
refs/heads/master
| 2022-02-11T21:56:59.371544 | 2019-08-06T00:31:59 | 2019-08-06T00:31:59 | 82,923,156 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 530 |
py
|
"""PytSite ODM Comments Plugin
"""
__author__ = 'Oleksandr Shepetko'
__email__ = '[email protected]'
__license__ = 'MIT'
from . import _widget as widget
def plugin_load():
"""Hook
"""
from pytsite import events
from plugins import comments, odm
from . import _model, _driver, _eh
# Register ODM model
odm.register_model('comment', _model.ODMComment)
# Register comments driver
comments.register_driver(_driver.ODM())
events.listen('comments@report_comment', _eh.comments_report_comment)
|
[
"[email protected]"
] | |
a419f17e8f448960359d04f1206ecd8b48fac4f7
|
88c8dc51713753b7a36dce80ca936b2933575845
|
/week07/class_code/w7_creative_task_code_start.py
|
8c1fdb2f9546ee089a3f5c20570395591fc0e5fc
|
[] |
no_license
|
previtus/cci_python
|
210e9e4314fb65c2b5542131167a75ece07ad2a9
|
ddab2697fc960355ac9e5fac7fc7d462db8b50f4
|
refs/heads/master
| 2021-05-23T13:25:46.661735 | 2020-01-15T18:11:33 | 2020-01-15T18:11:33 | 253,309,644 | 1 | 0 | null | 2020-04-05T18:57:21 | 2020-04-05T18:57:20 | null |
UTF-8
|
Python
| false | false | 571 |
py
|
infile = open("dracula.txt", "r")
filestring = infile.read()
infile.close()
# in words ==========================================
words = filestring.split()
#for word in words:
# print(word)
# in sentences ==========================================
sentences = filestring.split(".")
# dark magicks!
#import re
#sentences2 = re.split(r' *[\.\?!][\'"\)\]]* *', filestring)
# in paragraphs ==========================================
paragraphs = filestring.split("\n\n") # depends on how your text is written!
#for paragraph in paragraphs:
# print(paragraph)
|
[
"[email protected]"
] | |
1ca21c5fbe3c345731bc9b168b49f3f7ab94392d
|
bde6ed092b7b29703737e11c5a5ff90934af3d74
|
/hackerrank/data-structures/array/sparse-arrays.py
|
e092d90c7f10b803449a5336206aab0c80288509
|
[] |
no_license
|
takecian/ProgrammingStudyLog
|
2ab7ea601e0996b3fa502b81ec141bc3772442b6
|
94485d131c0cc9842f1f4799da2d861dbf09b12a
|
refs/heads/master
| 2023-04-28T16:56:18.943574 | 2023-04-18T06:34:58 | 2023-04-18T06:34:58 | 128,525,713 | 4 | 0 | null | 2022-12-09T06:15:19 | 2018-04-07T12:21:29 |
Python
|
UTF-8
|
Python
| false | false | 792 |
py
|
# https://www.hackerrank.com/challenges/sparse-arrays/problem
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the matchingStrings function below.
def matchingStrings(strings, queries):
return [strings.count(query) for query in queries]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
strings_count = int(input())
strings = []
for _ in range(strings_count):
strings_item = input()
strings.append(strings_item)
queries_count = int(input())
queries = []
for _ in range(queries_count):
queries_item = input()
queries.append(queries_item)
res = matchingStrings(strings, queries)
fptr.write('\n'.join(map(str, res)))
fptr.write('\n')
fptr.close()
|
[
"[email protected]"
] | |
b34f356a30af80027d2c29078af6a0f66263e7db
|
dc8a337ea1d8a285577d33e5cfd4dbbe846ee1a0
|
/src/main/scala/MinCostToConnectAllPoints.py
|
dea7dced11cb113cb91c76a28ba30e756b63194f
|
[] |
no_license
|
joestalker1/leetcode
|
8a5cdda17abd33c3eef859732f75d7bec77a9d0e
|
ae392ddbc7eb56cb814b9e9715043c98a89a6314
|
refs/heads/master
| 2023-04-13T22:09:54.407864 | 2023-04-09T19:22:54 | 2023-04-09T19:22:54 | 131,803,943 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,434 |
py
|
class UnionFind:
def __init__(self, n):
self.par = [i for i in range(n)]
self.rank = [0] * n
def find(self, p):
if self.par[p] != p:
self.par[p] = self.find(self.par[p])
return self.par[p]
def union(self, n1, n2):
p1 = self.find(n1)
p2 = self.find(n2)
if p1 == p2:
return False
if self.rank[p1] > self.rank[p2]:
self.par[p2] = p1
elif self.rank[p2] > self.rank[p1]:
self.par[p1] = p2
else:
self.rank[p1] += 1
self.par[p2] = p1
return True
class Solution:
def minCostConnectPoints(self, points: List[List[int]]):
# assert self._minCostConnectPoints([[0,0],[2,2],[3,10],[5,2],[7,0]]) == 20, 'test1'
return self._minCostConnectPoints(points)
def _minCostConnectPoints(self, points: List[List[int]]) -> int:
n = len(points)
edge = []
for i in range(n):
for j in range(i + 1, n):
w = abs(points[i][0] - points[j][0]) + abs(points[i][1] - points[j][1])
edge.append((w, i, j))
edge.sort()
used_edges = 0
mst = 0
uf = UnionFind(n)
for w, s, e in edge:
if uf.union(s, e):
mst += w
used_edges += 1
if used_edges == n - 1:
break
return mst
|
[
"[email protected]"
] | |
0f782922b34c17a3438dc8c3bef2ffb403a6b2d4
|
b5dd8d1b798c94731a84c02d98aafb9147200a85
|
/sentence_classification/SABaselineSYNTree/data/Dataloader.py
|
8cf5123e195ed3ba7bddf8695827662dae8e3f59
|
[] |
no_license
|
zhangmeishan/DepSAWR
|
1ae348dd04ec5e46bc5a75c8972b4bc4008528fe
|
104f44fd962a42fdee9b1a9332997d35e8461ff4
|
refs/heads/master
| 2021-07-09T20:56:56.897774 | 2020-10-27T05:41:08 | 2020-10-27T05:41:08 | 206,974,879 | 15 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,903 |
py
|
from collections import Counter
from data.Vocab import *
from data.SA import *
import numpy as np
import torch
def read_corpus(file_path):
data = []
with open(file_path, 'r') as infile:
for line in infile:
divides = line.strip().split('|||')
section_num = len(divides)
if section_num == 2:
worditems = divides[1].strip().split(' ')
words, heads, rels = [], [], []
for worditem in worditems:
id1 = worditem.rfind('_')
id2 = worditem.rfind('_', 0, id1 - 1)
words.append(worditem[:id2])
heads.append(int(worditem[id2 + 1:id1]))
rels.append(worditem[id1 + 1:])
tag = divides[0].strip()
cur_data = Instance(words, heads, rels, tag)
data.append(cur_data)
return data
def creatVocab(corpusFile, min_occur_count):
word_counter = Counter()
rel_counter = Counter()
tag_counter = Counter()
alldatas = read_corpus(corpusFile)
for inst in alldatas:
for curword, curhead, currel in zip(inst.forms, inst.heads, inst.rels):
word_counter[curword] += 1
rel_counter[currel] += 1
tag_counter[inst.tag] += 1
return SAVocab(word_counter, rel_counter, tag_counter, min_occur_count)
def insts_numberize(insts, vocab):
for inst in insts:
yield inst2id(inst, vocab)
def inst2id(inst, vocab):
inputs = []
for form, rel in zip(inst.forms, inst.rels):
wordid = vocab.word2id(form)
extwordid = vocab.extword2id(form)
relid = vocab.rel2id(rel)
inputs.append([wordid, extwordid, relid])
return inputs, vocab.tag2id(inst.tag), inst
def batch_slice(data, batch_size):
batch_num = int(np.ceil(len(data) / float(batch_size)))
for i in range(batch_num):
cur_batch_size = batch_size if i < batch_num - 1 else len(data) - batch_size * i
insts = [data[i * batch_size + b] for b in range(cur_batch_size)]
yield insts
def data_iter(data, batch_size, shuffle=True):
"""
randomly permute data, then sort by source length, and partition into batches
ensure that the length of insts in each batch
"""
batched_data = []
if shuffle: np.random.shuffle(data)
batched_data.extend(list(batch_slice(data, batch_size)))
if shuffle: np.random.shuffle(batched_data)
for batch in batched_data:
yield batch
def batch_data_variable(batch, vocab):
length = len(batch[0].forms)
batch_size = len(batch)
for b in range(1, batch_size):
if len(batch[b].forms) > length: length = len(batch[b].forms)
words = torch.zeros([batch_size, length], dtype=torch.int64, requires_grad=False)
extwords = torch.zeros([batch_size, length], dtype=torch.int64, requires_grad=False)
rels = torch.zeros([batch_size, length], dtype=torch.int64, requires_grad=False)
masks = torch.zeros([batch_size, length], dtype=torch.float, requires_grad=False)
tags = torch.zeros([batch_size], dtype=torch.int64, requires_grad=False)
lengths = []
heads = []
b = 0
for inputs, tagid, inst in insts_numberize(batch, vocab):
index = 0
length = len(inputs)
lengths.append(length)
heads.append(inst.heads)
tags[b] = tagid
for curword in inputs:
words[b, index] = curword[0]
extwords[b, index] = curword[1]
rels[b, index] = curword[2]
masks[b, index] = 1
index += 1
b += 1
return words, extwords, rels, heads, tags, lengths, masks
def batch_variable_inst(insts, tagids, vocab):
for inst, tagid in zip(insts, tagids):
pred_tag = vocab.id2tag(tagid)
yield Instance(inst.words, inst.heads, inst.rels, pred_tag), pred_tag == inst.tag
|
[
"[email protected]"
] | |
90fa9d60a31619b8f6dcc62b48a721e9613e2b11
|
596e92d0d484b6e7eee6d322e72e52748fdeaa5d
|
/test/test_nba_odds_betting_market.py
|
f8b473cf4cf0f70b059897d47f0677fe275d8489
|
[] |
no_license
|
scottypate/sportsdata
|
f5f61ddc7eb482883f93737c6ce73dd814ed4336
|
a07955ab50bf4fff1ce114ed9895095ff770c473
|
refs/heads/main
| 2023-08-18T16:51:56.452678 | 2021-10-22T12:44:08 | 2021-10-22T12:44:08 | 420,062,350 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,002 |
py
|
# coding: utf-8
"""
NBA v3 Odds
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import sportsdata.nba_odds
from sportsdata.nba_odds.models.nba_odds_betting_market import NbaOddsBettingMarket # noqa: E501
from sportsdata.nba_odds.rest import ApiException
class TestNbaOddsBettingMarket(unittest.TestCase):
"""NbaOddsBettingMarket unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNbaOddsBettingMarket(self):
"""Test NbaOddsBettingMarket"""
# FIXME: construct object with mandatory attributes with example values
# model = sportsdata.nba_odds.models.nba_odds_betting_market.NbaOddsBettingMarket() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
6b1a21e187c9c79f07f14c5b2f5a3a03fcf94808
|
c7b4baa2779a0fc02e363f07c88b4d1d8cc33ffe
|
/gahtc/website/migrations/0017_auto_20151121_2057.py
|
000afaafe3f0c79142b2d2f5dc90553177043f8f
|
[] |
no_license
|
NiJeLorg/GAHTC
|
6d5c8b2d4b9244c8874ad60c16cd7d55a3535075
|
8ba3360f6e2a8ad0b937a60c3c022eaac4a7cd46
|
refs/heads/master
| 2022-12-08T19:26:05.800635 | 2018-06-07T02:31:43 | 2018-06-07T02:31:43 | 41,111,268 | 2 | 0 | null | 2022-11-22T01:43:36 | 2015-08-20T18:07:02 |
HTML
|
UTF-8
|
Python
| false | false | 1,262 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0016_bundlelecturesegments'),
]
operations = [
migrations.AlterField(
model_name='lecturedocuments',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='lectures',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='lecturesegments',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='lectureslides',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='moduledocuments',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='modules',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
]
|
[
"[email protected]"
] | |
1fbca8a60b71a90686126ab10fe2745039344b6c
|
84a5c4c2e0977d42425771098f5f881c750da7f0
|
/neomodel_constraints/fetcher/constraints/v4_1.py
|
3bb76bcc8e48273575a33bd210c5b02c050d2956
|
[] |
no_license
|
SSripilaipong/neomodel-constraints
|
6c3023ba156275e48f5f7ebcbdd283ce8d41f9a1
|
4b91185ba9eec993c58e9ae770fd3d0e90f915ae
|
refs/heads/main
| 2023-07-15T09:58:41.451631 | 2021-08-29T13:19:38 | 2021-08-29T13:19:38 | 390,312,509 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,883 |
py
|
import re
from typing import List, Dict
from neomodel_constraints.connection import ConnectionAbstract
from neomodel_constraints.constraint import ConstraintSet, TypeMapperAbstract
from neomodel_constraints.fetcher.abstract import FetcherAbstract
from .data import Neo4jConstraintQueryRecord
from .util import convert_constraints_with_type_mapper
def extract_record_detail(detail: str) -> Dict:
param_str = re.findall(r'^Constraint\((.*)\)$', detail)[0]
id_ = re.findall(r"id=(\d+?),", param_str)[0]
name = re.findall(r"name='(\w+?)',", param_str)[0]
type_ = re.findall(r"type='(\w+?)',", param_str)[0]
label, prop = re.findall(r"schema=\(:([^ ]+) {(\w+)}\)", param_str)[0]
owned_index = re.findall(r"ownedIndex=(\d+)", param_str)[0]
return {
'id': id_,
'ownedIndexId': owned_index,
'entityType': 'NODE',
'labelsOrTypes': [label],
'type': type_,
'name': name,
'properties': [prop],
}
class ConstraintsFetcher(FetcherAbstract):
def __init__(self, connection: ConnectionAbstract, type_mapper: TypeMapperAbstract):
self.connection: ConnectionAbstract = connection
self.type_mapper: TypeMapperAbstract = type_mapper
def _fetch_raw_data(self) -> List[Neo4jConstraintQueryRecord]:
raw = self.connection.execute('CALL db.constraints')
records = [extract_record_detail(record['details']) for record in raw]
return [Neo4jConstraintQueryRecord(**record) for record in records]
def _convert_constraints(self, raw: List[Neo4jConstraintQueryRecord]) -> ConstraintSet:
return convert_constraints_with_type_mapper(raw, self.type_mapper)
def fetch(self) -> ConstraintSet:
raw: List[Neo4jConstraintQueryRecord] = self._fetch_raw_data()
constraints: ConstraintSet = self._convert_constraints(raw)
return constraints
|
[
"[email protected]"
] | |
cc344ab268871e68262997bb1a4edd0560a0baf8
|
9467f3a54b19098766a3b0341eaac51617fc321b
|
/utils/build_batch.py
|
7fafcc95dbe491be9a2c7c8f0a11100d7a88fc38
|
[] |
no_license
|
liangjies/Sentiment-Analysis
|
1eedaa583d68536f92944b59ee6f8b6dedbc4a99
|
beca6c6612cc3b38b28d711eb39eb72424bdde00
|
refs/heads/master
| 2020-11-24T05:25:56.081258 | 2019-12-19T08:21:32 | 2019-12-19T08:21:32 | 227,983,207 | 0 | 0 | null | 2019-12-14T07:24:42 | 2019-12-14T07:24:41 | null |
UTF-8
|
Python
| false | false | 6,387 |
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: python3.6
@author: 'zenRRan'
@license: Apache Licence
@contact: [email protected]
@software: PyCharm
@file: build_batch.py
@time: 2018/10/15 10:44
"""
import random
class Build_Batch:
def __init__(self, features, opts, batch_size, pad_idx, char_padding_id,rel_padding_id=None):
self.batch_size = batch_size
self.features = features
self.shuffle = opts.shuffle
self.sort = opts.sort
self.batch_num = 0
self.batch_features = []
self.data_batchs = [] # [(data, label)]
self.PAD = pad_idx
self.CPAD = char_padding_id
self.RPAD = rel_padding_id
random.seed(opts.seed)
def create_same_sents_length_one_batch(self):
'''
:return:[[[x x x x]
[x x x x]]
[[x x x o]
[x x x o]
[x x x o]]]
'''
self.features = self.sort_features(self.features)
new_list = []
self.batch_features = []
self.data_batchs = []
same_len = True
for feature in self.features:
if len(new_list) != 0 and len(feature.words) != len(new_list[-1].words):
same_len = False
if same_len and len(new_list) < self.batch_size:
new_list.append(feature)
else:
new_list = self.shuffle_data(new_list)
self.batch_features.append(new_list)
ids, char_ids, labels, forest, heads, children_batch_list, tag_rels = self.choose_data_from_features(new_list)
ids_lengths = [len(id) for id in ids]
ids = self.add_pad(ids, self.PAD)
tag_rels = self.add_pad(tag_rels, self.RPAD)
char_ids = self.add_char_pad(char_ids, ids, self.CPAD)
self.data_batchs.append((ids, labels, char_ids, forest, heads, children_batch_list, ids_lengths, tag_rels))
new_list = []
same_len = True
new_list.append(feature)
self.batch_features = self.shuffle_data(self.batch_features)
self.data_batchs = self.shuffle_data(self.data_batchs)
return self.batch_features, self.data_batchs
def create_sorted_normal_batch(self):
'''
:return: [[[x x o o]
[x x x o]
[x x x o]]
[[x x x o]
[x x x x]]]
'''
self.features = self.sort_features(self.features)
new_list = []
self.batch_features = []
self.data_batchs = []
self.features.append([])
for idx, feature in enumerate(self.features):
if len(new_list) < self.batch_size and idx+1 != len(self.features):
new_list.append(feature)
else:
self.batch_num += 1
new_list = self.shuffle_data(new_list)
self.batch_features.append(new_list)
ids, char_ids, labels, forest, heads, children_batch_list, tag_rels = self.choose_data_from_features(new_list)
ids_lengths = [len(id) for id in ids]
ids = self.add_pad(ids, self.PAD)
tag_rels = self.add_pad(tag_rels, self.RPAD)
char_ids = self.add_char_pad(char_ids, ids, self.CPAD)
self.data_batchs.append((ids, labels, char_ids, forest, heads, children_batch_list, ids_lengths, tag_rels))
new_list = []
new_list.append(feature)
self.batch_features = self.shuffle_data(self.batch_features)
self.data_batchs = self.shuffle_data(self.data_batchs)
return self.batch_features, self.data_batchs
def choose_data_from_features(self, features):
ids = []
char_ids = []
labels = []
heads = []
forest = []
# bfs_batch_list = []
children_batch_list = []
tag_rels = []
for feature in features:
ids.append(feature.ids)
char_ids.append(feature.char_ids)
labels.append(feature.label)
heads.append(feature.heads)
forest.append(feature.root)
# bfs_batch_list.append(feature.bfs_list)
tag_rels.append(feature.rels_ids)
rel = [tree.children_index_list for tree in feature.forest]
max_len = feature.length
new_rel = [[0 for _ in range(max_len)] for _ in range(max_len)]
for i, each in enumerate(rel):
for j, index in enumerate(each):
new_rel[i][index] = 1
children_batch_list.append(new_rel)
return ids, char_ids, labels, forest, heads, children_batch_list, tag_rels
def add_char_pad(self, data_list, sents_ids_list, PAD):
'''
:param data_list:[[[x x], [x x x],...],[[x], [x x],...]]
:param PAD: PAD id
:return: [[[x x o], [x x x],...],[[x o], [x x],...]]
'''
new_data_list = []
for sent_list, sent in zip(data_list, sents_ids_list):
word_len = len(sent)
max_len = 0
new_sent_list = []
for word_list in sent_list:
max_len = max(max_len, len(word_list))
for word_list in sent_list:
new_sent_list.append(word_list + [PAD] * (max_len - len(word_list)))
new_data_list.append(new_sent_list + [[PAD] * max_len] * (word_len - len(new_sent_list)))
return new_data_list
def add_pad(self, data_list, PAD):
'''
:param data_list: [[x x x], [x x x x],...]
:return: [[x x x o o], [x x x x o],...]
'''
max_len = 0
new_data_list = []
for data in data_list:
max_len = max(max_len, len(data))
for data in data_list:
new_data_list.append(data + [PAD]*(max_len - len(data)))
return new_data_list
def sort_features(self, features):
if self.sort:
features = sorted(features, key=lambda feature: feature.length)
return features
def shuffle_data(self, data):
if self.shuffle:
random.shuffle(data)
return data
|
[
"[email protected]"
] | |
5aa0c3741468196957ffba57ea37b13e03fee079
|
1eab574606dffb14a63195de994ee7c2355989b1
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/capture/currentpacket/stack/stack.py
|
584e79fdba6258677a42f35e8cbaf4e10d7896e7
|
[
"MIT"
] |
permissive
|
steiler/ixnetwork_restpy
|
56b3f08726301e9938aaea26f6dcd20ebf53c806
|
dd7ec0d311b74cefb1fe310d57b5c8a65d6d4ff9
|
refs/heads/master
| 2020-09-04T12:10:18.387184 | 2019-11-05T11:29:43 | 2019-11-05T11:29:43 | 219,728,796 | 0 | 0 | null | 2019-11-05T11:28:29 | 2019-11-05T11:28:26 | null |
UTF-8
|
Python
| false | false | 3,235 |
py
|
# MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Stack(Base):
"""This object specifies the stack properties.
The Stack class encapsulates a list of stack resources that is managed by the system.
A list of resources can be retrieved from the server using the Stack.find() method.
"""
__slots__ = ()
_SDM_NAME = 'stack'
def __init__(self, parent):
super(Stack, self).__init__(parent)
@property
def Field(self):
"""An instance of the Field class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.capture.currentpacket.stack.field.field.Field)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.capture.currentpacket.stack.field.field import Field
return Field(self)
@property
def DisplayName(self):
"""Refers to the name of the stack.
Returns:
str
"""
return self._get_attribute('displayName')
def find(self, DisplayName=None):
"""Finds and retrieves stack data from the server.
All named parameters support regex and can be used to selectively retrieve stack data from the server.
By default the find method takes no parameters and will retrieve all stack data from the server.
Args:
DisplayName (str): Refers to the name of the stack.
Returns:
self: This instance with matching stack data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of stack data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the stack data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
[
"[email protected]"
] | |
1259b25afb75ee0bfcc7c6e204f0ba8394d94744
|
1e82a5c6145fbd6861b863f95613e9406f434559
|
/function_scheduling_distributed_framework/publishers/base_publisher.py
|
8febd4bc61059b4b42707d4b0f36cee56e8a3ab1
|
[
"Apache-2.0"
] |
permissive
|
leiyugithub/distributed_framework
|
e6c83cf09faa5ee0d6d0ccc1e38fb6729a260c9b
|
7a9c74e807f51680c25a9956e49ab319a8943a37
|
refs/heads/master
| 2020-12-07T13:23:24.354917 | 2020-01-08T08:18:47 | 2020-01-08T08:18:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,941 |
py
|
# -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/8/8 0008 11:57
import abc
import atexit
import json
import uuid
import time
import typing
from functools import wraps
from threading import Lock
import amqpstorm
from pika.exceptions import AMQPError as PikaAMQPError
from function_scheduling_distributed_framework.utils import LoggerLevelSetterMixin, LogManager, decorators, RedisMixin
class RedisAsyncResult(RedisMixin):
def __init__(self, task_id, timeout=120):
self.task_id = task_id
self.timeout = timeout
self._has_pop = False
self._status_and_result = None
def set_timeout(self, timeout=60):
self.timeout = timeout
return self
@property
def status_and_result(self):
if not self._has_pop:
self._status_and_result = json.loads(self.redis_db_frame.blpop(self.task_id, self.timeout)[1])
self._has_pop = True
return self._status_and_result
def get(self):
return self.status_and_result['result']
@property
def result(self):
return self.get()
def is_success(self):
return self.status_and_result['success']
class PriorityConsumingControlConfig:
"""
为每个独立的任务设置控制参数,和函数参数一起发布到中间件。可能有少数时候有这种需求。
例如消费为add函数,可以每个独立的任务设置不同的超时时间,不同的重试次数,是否使用rpc模式。这里的配置优先,可以覆盖生成消费者时候的配置。
"""
def __init__(self, function_timeout: float = None, max_retry_times: int = None,
is_print_detail_exception: bool = None,
msg_expire_senconds: int = None,
is_using_rpc_mode: bool = None):
self.function_timeout = function_timeout
self.max_retry_times = max_retry_times
self.is_print_detail_exception = is_print_detail_exception
self.msg_expire_senconds = msg_expire_senconds
self.is_using_rpc_mode = is_using_rpc_mode
def to_dict(self):
return self.__dict__
class AbstractPublisher(LoggerLevelSetterMixin, metaclass=abc.ABCMeta, ):
has_init_broker = 0
def __init__(self, queue_name, log_level_int=10, logger_prefix='', is_add_file_handler=True, clear_queue_within_init=False, is_add_publish_time=True, ):
"""
:param queue_name:
:param log_level_int:
:param logger_prefix:
:param is_add_file_handler:
:param clear_queue_within_init:
:param is_add_publish_time:是否添加发布时间,以后废弃,都添加。
"""
self._queue_name = queue_name
if logger_prefix != '':
logger_prefix += '--'
logger_name = f'{logger_prefix}{self.__class__.__name__}--{queue_name}'
self.logger = LogManager(logger_name).get_logger_and_add_handlers(log_level_int, log_filename=f'{logger_name}.log' if is_add_file_handler else None) #
# self.rabbit_client = RabbitMqFactory(is_use_rabbitpy=is_use_rabbitpy).get_rabbit_cleint()
# self.channel = self.rabbit_client.creat_a_channel()
# self.queue = self.channel.queue_declare(queue=queue_name, durable=True)
self._lock_for_count = Lock()
self._current_time = None
self.count_per_minute = None
self._init_count()
self.custom_init()
self.logger.info(f'{self.__class__} 被实例化了')
self.publish_msg_num_total = 0
self._is_add_publish_time = is_add_publish_time
self.__init_time = time.time()
atexit.register(self.__at_exit)
if clear_queue_within_init:
self.clear()
def set_is_add_publish_time(self, is_add_publish_time=True):
self._is_add_publish_time = is_add_publish_time
return self
def _init_count(self):
with self._lock_for_count:
self._current_time = time.time()
self.count_per_minute = 0
def custom_init(self):
pass
def publish(self, msg: typing.Union[str, dict],
priority_control_config: PriorityConsumingControlConfig = None):
if isinstance(msg, str):
msg = json.loads(msg)
task_id = f'{self._queue_name}_result:{uuid.uuid4()}'
msg['extra'] = extra_params = {'task_id': task_id, 'publish_time': round(time.time(), 4), 'publish_time_format': time.strftime('%Y-%m-%d %H:%M:%S')}
if priority_control_config:
extra_params.update(priority_control_config.to_dict())
t_start = time.time()
decorators.handle_exception(retry_times=10, is_throw_error=True, time_sleep=0.1)(self.concrete_realization_of_publish)(json.dumps(msg,ensure_ascii=False))
self.logger.debug(f'向{self._queue_name} 队列,推送消息 耗时{round(time.time() - t_start, 4)}秒 {msg}')
with self._lock_for_count:
self.count_per_minute += 1
self.publish_msg_num_total += 1
if time.time() - self._current_time > 10:
self.logger.info(f'10秒内推送了 {self.count_per_minute} 条消息,累计推送了 {self.publish_msg_num_total} 条消息到 {self._queue_name} 中')
self._init_count()
return RedisAsyncResult(task_id)
@abc.abstractmethod
def concrete_realization_of_publish(self, msg):
raise NotImplementedError
@abc.abstractmethod
def clear(self):
raise NotImplementedError
@abc.abstractmethod
def get_message_count(self):
raise NotImplementedError
@abc.abstractmethod
def close(self):
raise NotImplementedError
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
self.logger.warning(f'with中自动关闭publisher连接,累计推送了 {self.publish_msg_num_total} 条消息 ')
def __at_exit(self):
self.logger.warning(f'程序关闭前,{round(time.time() - self.__init_time)} 秒内,累计推送了 {self.publish_msg_num_total} 条消息 到 {self._queue_name} 中')
def deco_mq_conn_error(f):
@wraps(f)
def _deco_mq_conn_error(self, *args, **kwargs):
if not self.has_init_broker:
self.logger.warning(f'对象的方法 【{f.__name__}】 首次使用 rabbitmq channel,进行初始化执行 init_broker 方法')
self.init_broker()
self.has_init_broker = 1
return f(self, *args, **kwargs)
# noinspection PyBroadException
try:
return f(self, *args, **kwargs)
except (PikaAMQPError, amqpstorm.AMQPError) as e: # except Exception as e: # 现在装饰器用到了绝大多出地方,单个异常类型不行。ex
self.logger.error(f'rabbitmq链接出错 ,方法 {f.__name__} 出错 ,{e}')
self.init_broker()
return f(self, *args, **kwargs)
return _deco_mq_conn_error
|
[
"[email protected]"
] | |
37251f91b138c8ef98d57d8e1e0107a83c10e7d2
|
361756a29c63961fd02bd335aca629322b7989a7
|
/Week 3/code/q1/spark-app.py
|
459713353fa52e4fc715b7874ad7111b09db3b46
|
[] |
no_license
|
bbengfort/introduction-to-hadoop-and-spark
|
67eadf923028cd53cfcec21fd1a521f6d5fe3569
|
14b9ebd87984277b2a02cdffad0db27082b4d3e9
|
refs/heads/master
| 2022-12-02T08:00:46.975122 | 2015-12-01T20:37:59 | 2015-12-01T20:37:59 | 46,567,192 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,402 |
py
|
#!/usr/bin/env python
# spark-app.py
# A Spark application that computes the MSE of a linear model on test data.
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Thu Nov 12 07:29:58 2015 -0500
"""
A Spark application that computes the MSE of a linear model on test data.
"""
##########################################################################
## Imports
##########################################################################
import sys
import csv
from functools import partial
from StringIO import StringIO
from pyspark import SparkConf, SparkContext
##########################################################################
## Global Variables
##########################################################################
APP_NAME = "MSE of Blog Comments Regression"
##########################################################################
## Helper functions
##########################################################################
def parse(line):
"""
Splits the line on a CSV and parses it into floats. Returns a tuple of:
(X, y) where X is the vector of independent variables and y is the target
(dependent) variable; in this case the last item in the row.
"""
reader = csv.reader(StringIO(line))
row = [float(x) for x in reader.next()]
return (tuple(row[:-1]), row[-1])
def cost(row, coef, intercept):
"""
Computes the square error given the row.
"""
X, y = row # extract the dependent and independent vals from the tuple.
# Compute the predicted value based on the linear model
yhat = sum([b*x for (b,x) in zip(coef.value, X)]) + intercept.value
# Compute the square error of the prediction
return (y - yhat) ** 2
##########################################################################
## Primary Analysis and Main Method
##########################################################################
def main(sc):
"""
Primary analysis mechanism for Spark application
"""
# Load coefficients and intercept from local file
coef = []
intercept = None
# Load the parameters from the text file
with open('params.txt', 'r') as params:
# Read the file and split on new lines and parse into floats
data = [
float(row.strip())
for row in params.read().split("\n")
if row.strip()
]
coef = data[:-1] # Everything but the last value are the thetas (coefficients)
intercept = data[-1] # The last value is the intercept
# Broadcast the parameters across the Spark cluster
# Note that this data is small enough you could have used a closure
coef = sc.broadcast(coef)
intercept = sc.broadcast(intercept)
# Create an accumulator to sum the squared error
sum_square_error = sc.accumulator(0)
# Load and parse the blog data from HDFS and insert into an RDD
blogs = sc.textFile("blogData").map(parse)
# Map the cost function and accumulate the sum.
error = blogs.map(partial(cost, coef=coef, intercept=intercept))
error.foreach(lambda cost: sum_square_error.add(cost))
# Print and compute the mean.
print sum_square_error.value / error.count()
if __name__ == '__main__':
# Configure Spark
conf = SparkConf().setAppName(APP_NAME)
sc = SparkContext(conf=conf)
# Execute Main functionality
main(sc)
|
[
"[email protected]"
] | |
bc6fc44f74c3620fd9e7b7d0a2ee996258b7e087
|
2346aac932096d7161591afc8f07105eba6de558
|
/chapter25_maskrcnn/object_detection_example.py
|
ea42017f0adf198b95c31e44d4fdb41ffd878eae
|
[] |
no_license
|
cheeyeo/deep_learning_computer_vision
|
3436ac568539bd9ad060c9d81542e82c42e40ff2
|
44fb74e70e7d40717214cd2a0ac2aa6d3bbf5b58
|
refs/heads/master
| 2020-07-17T00:17:14.112988 | 2019-10-29T15:39:18 | 2019-10-29T15:39:18 | 205,898,970 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,545 |
py
|
# Example on object detection using Mask R-CNN Library
# Uses a pre-trained Mask R-CNN model trained on MSCOCO dataset
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from mrcnn.visualize import display_instances
from mrcnn.model import MaskRCNN
import os
import argparse
from utils import draw_image_with_boxes, load_coco_classes
from config import TestConfig
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", type=str, required=True, help="Image to perform object recognition on.")
ap.add_argument("-m", "--model", default="data/mask_rcnn_coco.h5", type=str, help="Model weights for Mask R-CNN model.")
ap.add_argument("-o", "--object-detection", action="store_true", help="Perform object detection using Mask R-CNN model.")
args = vars(ap.parse_args())
# Define and load model
rcnn = MaskRCNN(mode='inference', model_dir='./', config=TestConfig())
rcnn.load_weights(args["model"], by_name=True)
img = load_img(args["image"])
img_pixels = img_to_array(img)
results = rcnn.detect([img_pixels], verbose=0)
r = results[0]
if args["object_detection"]:
print("[INFO] Performing object detection using display_instances...")
# define 81 classes that the coco model knowns about
class_names = load_coco_classes('data/coco_classes.txt')
display_instances(img_pixels, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'])
else:
draw_image_with_boxes(img, r['rois'])
print('[INFO] Saving image with bounding boxes')
img.save(os.path.join('out', args["image"]))
|
[
"[email protected]"
] | |
2dcf86b8d3b334a27a7962ae098f62af4a037e83
|
e0cfb71a4268367fab77253a2460714a16e830aa
|
/doctorbot/website/views.py
|
32da7106dfec332ef5bf99c76e01b6ff6d1f540a
|
[
"MIT"
] |
permissive
|
zuxfoucault/DoctorBot_demo
|
79b40548dfd5f34b0f2ccb7857e9377610394608
|
82e24078da4d2e6caba728b959812401109e014d
|
refs/heads/master
| 2020-04-24T01:10:17.010551 | 2019-02-20T02:57:57 | 2019-02-20T02:57:57 | 171,589,459 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 343 |
py
|
from django.http import HttpResponse
from django.shortcuts import render_to_response
from rest_framework import generics
from rest_framework import permissions
from rest_framework.decorators import api_view, permission_classes
# Create your views here.
@api_view(['GET'])
def index_view(requests):
return render_to_response('index.html')
|
[
"[email protected]"
] | |
d135c72038a9c0c01be8b4b8ae588403decf6726
|
a9b05f3de50bf287b914d4786537cc81a208eaf8
|
/preprocessing/migrations/0001_initial.py
|
6d47579c623896aa6d24f9f404c75fbffc4f2935
|
[] |
no_license
|
15101538237ren/AccidentsPrediction
|
21b23ee60ca1bf8f7aee12f515db046f0bd94799
|
b0248c9fc8c1c5018f79083adc4c2b8130e2dba0
|
refs/heads/master
| 2018-11-04T18:27:54.049460 | 2018-01-09T13:25:48 | 2018-01-09T13:25:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,709 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='App_Incidence',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('removed', models.DateTimeField(default=None, null=True, editable=False, blank=True)),
('longitude', models.DecimalField(verbose_name=b'\xe7\xbb\x8f\xe5\xba\xa6', max_digits=10, decimal_places=7)),
('latitude', models.DecimalField(verbose_name=b'\xe7\xba\xac\xe5\xba\xa6', max_digits=10, decimal_places=7)),
('place', models.TextField(verbose_name=b'\xe5\x9c\xb0\xe7\x82\xb9')),
('create_time', models.DateTimeField(verbose_name=b'\xe4\xb8\xbe\xe6\x8a\xa5\xe6\x97\xb6\xe9\x97\xb4')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Call_Incidence',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('removed', models.DateTimeField(default=None, null=True, editable=False, blank=True)),
('create_time', models.DateTimeField(verbose_name=b'122\xe6\x8a\xa5\xe8\xad\xa6\xe6\x97\xb6\xe9\x97\xb4')),
('longitude', models.DecimalField(verbose_name=b'\xe7\xbb\x8f\xe5\xba\xa6', max_digits=10, decimal_places=7)),
('latitude', models.DecimalField(verbose_name=b'\xe7\xba\xac\xe5\xba\xa6', max_digits=10, decimal_places=7)),
('place', models.TextField(verbose_name=b'\xe5\x9c\xb0\xe7\x82\xb9')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Violation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('removed', models.DateTimeField(default=None, null=True, editable=False, blank=True)),
('longitude', models.DecimalField(verbose_name=b'\xe7\xbb\x8f\xe5\xba\xa6', max_digits=10, decimal_places=7)),
('latitude', models.DecimalField(verbose_name=b'\xe7\xba\xac\xe5\xba\xa6', max_digits=10, decimal_places=7)),
('create_time', models.DateTimeField(verbose_name=b'\xe4\xb8\xbe\xe6\x8a\xa5\xe6\x97\xb6\xe9\x97\xb4')),
],
options={
'abstract': False,
},
),
]
|
[
"[email protected]"
] | |
39f20b69aac765749dce3c577325b4782d937cad
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03855/s443395866.py
|
9abfb8ce858f7f0b73fac8a310592f783ae12145
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,472 |
py
|
from sys import stdin
import sys
import math
from functools import reduce
import functools
import itertools
from collections import deque,Counter
from operator import mul
import copy
n, k, l = list(map(int, input().split()))
road = [[] for i in range(n+1)]
rail = [[] for i in range(n+1)]
for i in range(k):
p, q = list(map(int, input().split()))
road[p].append(q)
road[q].append(p)
for i in range(l):
r, s = list(map(int, input().split()))
rail[r].append(s)
rail[s].append(r)
seen = [0 for i in range(n+1)]
def dfs_stack(u, al, al_c, d):
stack = deque([u])
seen[u] = 1
while len(stack) > 0:
v = stack.pop()
###
al_c[v] = d
###
for w in al[v]:
if seen[w] == 0:
stack.append(w)
seen[w] = 1
if stack == []: break
road_c = [-1 for i in range(n+1)]
rail_c = [-1 for i in range(n+1)]
d = 0
for i in range(1,n+1):
if seen[i] == 0:
dfs_stack(i, road, road_c, d)
d += 1
seen = [0 for i in range(n+1)]
d = 0
for i in range(1,n+1):
if seen[i] == 0:
dfs_stack(i, rail, rail_c, d)
d += 1
dict = {}
for i in range(1, n+1):
if (road_c[i], rail_c[i]) not in dict:
dict[(road_c[i], rail_c[i])] = [i]
else:
dict[(road_c[i], rail_c[i])].append(i)
ans = [0 for i in range(n+1)]
for dd in dict.items():
for j in dd[1]:
ans[j] = str(len(dd[1]))
print(' '.join(ans[1:]))
|
[
"[email protected]"
] | |
5c9c62c3aa48e5a6db377c6d30804071a57f9151
|
abbb1e132b3d339ba2173129085f252e2f3311dc
|
/model-optimizer/extensions/back/CorrectName.py
|
1d1e9c0dd5231d964e8ac163b9be70176224efee
|
[
"Apache-2.0"
] |
permissive
|
0xF6/openvino
|
56cce18f1eb448e25053fd364bcbc1da9f34debc
|
2e6c95f389b195f6d3ff8597147d1f817433cfb3
|
refs/heads/master
| 2022-12-24T02:49:56.686062 | 2020-09-22T16:05:34 | 2020-09-22T16:05:34 | 297,745,570 | 2 | 0 |
Apache-2.0
| 2020-09-22T19:03:06 | 2020-09-22T19:03:04 | null |
UTF-8
|
Python
| false | false | 1,807 |
py
|
"""
Copyright (C) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.graph.graph import Graph
from mo.back.replacement import BackReplacementPattern
class RestoreOriginalFrameworkName(BackReplacementPattern):
"""
This transformation corrects names of layers to their framework names.
To perform this correction, framework layer name should be in the attribute 'framework_node_name'.
In some cases, renaming is necessary only if some condition is fulfilled. Such condition should be a some
function in the attribute 'rename_condition'.
For example, in the transformation SoftmaxONNXFrontReplacer such condition is
lambda n: len(n.graph.get_op_nodes(name=node_name)) == 0
"""
enabled = True
def find_and_replace_pattern(self, graph: Graph):
for node in graph.get_op_nodes():
if not node.has_valid('framework_node_name'):
continue
if node.has_valid('rename_condition'):
need_renaming = node['rename_condition'](node)
del node['rename_condition']
if need_renaming:
node.name = node['framework_node_name']
else:
node.name = node['framework_node_name']
del node['framework_node_name']
|
[
"[email protected]"
] | |
0699c6935eb3618c4450c5e89f3ea0ee05bf01ae
|
cb35df97989fcc46831a8adb8de3434b94fd2ecd
|
/tests/benchmarks/bm_point_mesh_distance.py
|
bc1da12883da43fa792ce14d561ae6af072b7a70
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
facebookresearch/pytorch3d
|
6d93b28c0f36a4b7efa0a8143726200c252d3502
|
a3d99cab6bf5eb69be8d5eb48895da6edd859565
|
refs/heads/main
| 2023-09-01T16:26:58.756831 | 2023-08-26T20:55:56 | 2023-08-26T20:55:56 | 217,433,767 | 7,964 | 1,342 |
NOASSERTION
| 2023-08-25T10:00:26 | 2019-10-25T02:23:45 |
Python
|
UTF-8
|
Python
| false | false | 1,106 |
py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
from fvcore.common.benchmark import benchmark
from tests.test_point_mesh_distance import TestPointMeshDistance
def bm_point_mesh_distance() -> None:
backend = ["cuda:0"]
kwargs_list = []
batch_size = [4, 8, 16]
num_verts = [100, 1000]
num_faces = [300, 3000]
num_points = [5000, 10000]
test_cases = product(batch_size, num_verts, num_faces, num_points, backend)
for case in test_cases:
n, v, f, p, b = case
kwargs_list.append({"N": n, "V": v, "F": f, "P": p, "device": b})
benchmark(
TestPointMeshDistance.point_mesh_edge,
"POINT_MESH_EDGE",
kwargs_list,
warmup_iters=1,
)
benchmark(
TestPointMeshDistance.point_mesh_face,
"POINT_MESH_FACE",
kwargs_list,
warmup_iters=1,
)
if __name__ == "__main__":
bm_point_mesh_distance()
|
[
"[email protected]"
] | |
1a0c68fc136cb8faba43b827a4977ac6ec13bb9f
|
b059c2cf1e19932abb179ca3de74ced2759f6754
|
/S20/day29/server.py
|
02b38215230e14e190eb4dd508011026298f47aa
|
[] |
no_license
|
Lwk1071373366/zdh
|
a16e9cad478a64c36227419d324454dfb9c43fd9
|
d41032b0edd7d96e147573a26d0e70f3d209dd84
|
refs/heads/master
| 2020-06-18T02:11:22.740239 | 2019-07-10T08:55:14 | 2019-07-10T08:55:14 | 196,130,277 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,022 |
py
|
# import json
# import struct
# import socket
#
# sk = socket.socket()
# sk.bind(('127.0.0.1',9001))
# sk.listen()
#
# conn,addr = sk.accept()
# num = conn.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic = conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filename'],'wb') as f :
# content = conn.recv(dic['filesize'])
# f.write(content)
#
# conn.close()
# sk.close()
# import json
# import struct
# import socket
#
# sk = socket.socket()
# sk.bind(('127.0.0.1',9002))
# sk.listen()
#
# conn,addr = sk.accept()
# num = conn.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic = conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filename'],'wb') as f :
# content = conn.recv(dic['filesize'])
# f.write(content)
# conn.close()
# sk.close()
# import json
# import struct
# import socket
#
# sk = socket.socket()
# sk.bind(('127.0.0.1',9000))
# sk.listen()
#
# conn,addr = sk.accept()
# num = conn.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic = conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filename'],'wb') as f :
# content = conn.recv(dic['filesize'])
# f.write(content)
# conn.close()
# sk.close()
# import json
# import struct
# import socket
#
# sk = socket.socket()
# sk.bind(('127.0.0.1',9000))
# sk.listen()
#
# conn,addr = sk.accept()
# # print(conn)
# num = conn.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic = conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filename'],'wb') as f :
# content = conn.recv(dic['filesize'])
# f.write(content)
# conn.close()
# sk.close()
# import socket
# import json
# import struct
#
# sk = socket.socket()
# sk.bind(('127.0.0.1',9000))
# sk.listen()
#
# conn,addr = sk.accept()
# num = sk.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic = conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filename'],'wb') as f :
# content = conn.rece(dic['filesize'])
# f.write(content)
# conn.close()
# sk.close()
# import json
# import struct
# import socket
#
# sk= socket.socket()
# sk.bind(('127.0.0.1',9000))
# sk.listen()
# conn,addr = sk.accept()
# num = sk.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic = conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filesize'],'wb') as f :
# content = conn.recv(dic['filesize'])
# f.write(content)
# conn.close()
# sk.close()
# import json
# import socket
# import struct
#
# sk = socket.socket()
# sk.bind(('127.0.0.1',9000))
# sk.listen()
# conn,addr = sk.accept()
# num = conn.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic = conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filesize'],'wb') as f :
# content = conn.recv(dic['filesize'])
# f.write(content)
# conn.close()
# sk.close()
# import socket
# import json
# import struct
#
# sk = socket.socket()
# sk.bind(('127.0.0.1',9000))
# sk.listen()
# conn,addr = sk.accept()
# num = conn.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic =conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filename'],'wb') as f :
# content = conn.recv(dic['filesize'])
# f.write()
# conn.close()
# sk.close()
# import socket
# import json
# import struct
# sk = socket.socket()
# sk.bind(('127.0.0.1',9000))
# sk.listen()
# conn,addr = sk.accept()
# num =conn.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic = conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filename'],'wb') as f :
# content = conn.recv(dic['filesize'])
# f.write(content)
# conn.close()
# sk.close()
# import json,socket,struct
# sk = socket.socket()
# sk.bind(('127.0.0.1',9000))
# sk.listen()
# conn,addr = sk.accept()
# num = conn.recv(4)
# num = struct.unpack('i',num)[0]
# str_dic = conn.recv(num).decode('utf-8')
# dic = json.loads(str_dic)
# with open(dic['filename'],'wb') as f :
# content = conn.recv(dic['filesize'])
# f.write(content)
# conn.close()
# sk.close()
|
[
"[email protected]"
] | |
8d8b659f31f0b33986e1d7bd43984a45e18577ac
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-network/azure/mgmt/network/v2018_12_01/models/virtual_network_tap.py
|
93c6e03ece13f3201e81b3d067b4e9c1753d2d04
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 |
MIT
| 2020-10-02T01:17:02 | 2019-05-22T07:33:46 |
Python
|
UTF-8
|
Python
| false | false | 4,312 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualNetworkTap(Resource):
"""Virtual Network Tap resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:ivar network_interface_tap_configurations: Specifies the list of resource
IDs for the network interface IP configuration that needs to be tapped.
:vartype network_interface_tap_configurations:
list[~azure.mgmt.network.v2018_12_01.models.NetworkInterfaceTapConfiguration]
:ivar resource_guid: The resourceGuid property of the virtual network tap.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the virtual network
tap. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param destination_network_interface_ip_configuration: The reference to
the private IP Address of the collector nic that will receive the tap
:type destination_network_interface_ip_configuration:
~azure.mgmt.network.v2018_12_01.models.NetworkInterfaceIPConfiguration
:param destination_load_balancer_front_end_ip_configuration: The reference
to the private IP address on the internal Load Balancer that will receive
the tap
:type destination_load_balancer_front_end_ip_configuration:
~azure.mgmt.network.v2018_12_01.models.FrontendIPConfiguration
:param destination_port: The VXLAN destination port that will receive the
tapped traffic.
:type destination_port: int
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'network_interface_tap_configurations': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'network_interface_tap_configurations': {'key': 'properties.networkInterfaceTapConfigurations', 'type': '[NetworkInterfaceTapConfiguration]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'destination_network_interface_ip_configuration': {'key': 'properties.destinationNetworkInterfaceIPConfiguration', 'type': 'NetworkInterfaceIPConfiguration'},
'destination_load_balancer_front_end_ip_configuration': {'key': 'properties.destinationLoadBalancerFrontEndIPConfiguration', 'type': 'FrontendIPConfiguration'},
'destination_port': {'key': 'properties.destinationPort', 'type': 'int'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualNetworkTap, self).__init__(**kwargs)
self.network_interface_tap_configurations = None
self.resource_guid = None
self.provisioning_state = None
self.destination_network_interface_ip_configuration = kwargs.get('destination_network_interface_ip_configuration', None)
self.destination_load_balancer_front_end_ip_configuration = kwargs.get('destination_load_balancer_front_end_ip_configuration', None)
self.destination_port = kwargs.get('destination_port', None)
self.etag = kwargs.get('etag', None)
|
[
"[email protected]"
] | |
8617e5ac82e89f58b2116bb99003c611dc46da49
|
7d90e6bebce35d5810da7cb9180f34bfa7398d38
|
/guestbook/tests/models_tests.py
|
dde2c5be1e1420dfe441443bb112fae8d81c8d40
|
[] |
no_license
|
mbrochh/pugsg_20120419
|
bd4c5fc2ec9edbb6a8f72e1165df46aed00cc88f
|
0d2d396863e4d25a0cb2e97d30b16ebbd6283d0c
|
refs/heads/master
| 2021-01-01T17:57:22.171950 | 2012-04-19T10:20:23 | 2012-04-19T10:20:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 589 |
py
|
"""Tests for the models of the ``guestbook`` app."""
from django.test import TestCase
from guestbook.tests.factories import GuestbookFactory
class GuestbookEntryTestCase(TestCase):
"""Tests for the ``GuestbookEntry`` model class."""
def test_instantiation_and_save(self):
entry = GuestbookFactory.build()
entry.save()
self.assertTrue(entry.pk, msg=(
'New object should have a primary key.'))
def test_character_count(self):
entry = GuestbookFactory.build(text='Hello world')
self.assertEqual(entry.character_count(), 11)
|
[
"[email protected]"
] | |
6cda6f690fe6f4d19b954e3827b4044a8fd710c4
|
b22492fd331ee97d5c8853687a390b3adb92dd49
|
/freemt_utils/switch_to.py
|
440a1cbb70961cc6e76cdc6fd97ebcfba54631b6
|
[
"MIT"
] |
permissive
|
ffreemt/freemt-utils
|
3ea6af7f4bf8800b1be7ec51e05b811710b20907
|
25bf192033235bb783005795f8c0bcdd8a79610f
|
refs/heads/master
| 2021-07-18T18:44:33.907753 | 2021-02-06T16:52:46 | 2021-02-06T16:52:46 | 240,441,691 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 689 |
py
|
'''Switch to path contextmanager (with).
http://ralsina.me/weblog/posts/BB963.html
'''
import os
import pathlib
from contextlib import contextmanager
# from loguru import logger
from logzero import logger
@contextmanager
def switch_to(path=pathlib.Path().home()):
'''Switch to path.
with switch_to(path):
pass # do stuff
'''
old_dir = os.getcwd()
try:
path = pathlib.Path(path)
except Exception as exc:
logger.error(exc)
raise
if not path.is_dir(): # pragma: no cover
msg = '*{}* is not a directory or does not exist.'
raise Exception(msg.format(path))
os.chdir(path)
yield
os.chdir(old_dir)
|
[
"[email protected]"
] | |
ace417d486a29e19a3f31f74a2d3f72f02ac8ef3
|
add74ecbd87c711f1e10898f87ffd31bb39cc5d6
|
/xcp2k/classes/_point37.py
|
4efe740542537882034ccb5e823d228d616eacff
|
[] |
no_license
|
superstar54/xcp2k
|
82071e29613ccf58fc14e684154bb9392d00458b
|
e8afae2ccb4b777ddd3731fe99f451b56d416a83
|
refs/heads/master
| 2021-11-11T21:17:30.292500 | 2021-11-06T06:31:20 | 2021-11-06T06:31:20 | 62,589,715 | 8 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 396 |
py
|
from xcp2k.inputsection import InputSection
class _point37(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Type = None
self.Atoms = []
self.Weights = []
self.Xyz = None
self._name = "POINT"
self._keywords = {'Type': 'TYPE', 'Xyz': 'XYZ'}
self._repeated_keywords = {'Atoms': 'ATOMS', 'Weights': 'WEIGHTS'}
|
[
"[email protected]"
] | |
7e635e6a745132a07f89c125874170db99495c5c
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/cH5ce3f4QgnreDW4v_16.py
|
0920362797049da24362e6f5196a3dc03c839c81
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,413 |
py
|
"""
Given a list of scrabble tiles (as dictionaries), create a function that
outputs the maximum possible score a player can achieve by summing up the
total number of points for all the tiles in their hand. Each hand contains 7
scrabble tiles.
Here's an example hand:
[
{ "tile": "N", "score": 1 },
{ "tile": "K", "score": 5 },
{ "tile": "Z", "score": 10 },
{ "tile": "X", "score": 8 },
{ "tile": "D", "score": 2 },
{ "tile": "A", "score": 1 },
{ "tile": "E", "score": 1 }
]
The player's `maximum_score` from playing all these tiles would be 1 + 5 + 10
+ 8 + 2 + 1 + 1, or 28.
### Examples
maximum_score([
{ "tile": "N", "score": 1 },
{ "tile": "K", "score": 5 },
{ "tile": "Z", "score": 10 },
{ "tile": "X", "score": 8 },
{ "tile": "D", "score": 2 },
{ "tile": "A", "score": 1 },
{ "tile": "E", "score": 1 }
]) ➞ 28
maximum_score([
{ "tile": "B", "score": 2 },
{ "tile": "V", "score": 4 },
{ "tile": "F", "score": 4 },
{ "tile": "U", "score": 1 },
{ "tile": "D", "score": 2 },
{ "tile": "O", "score": 1 },
{ "tile": "U", "score": 1 }
]) ➞ 15
### Notes
Here, each tile is represented as an dictionary with two keys: tile and score.
"""
def maximum_score(tile_hand):
return sum(tile_hand[i].get("score") for i,row in enumerate(tile_hand))
|
[
"[email protected]"
] | |
9f3b4737b5a4ceb03d0e2f61617e2d606fa1bc26
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2264/60632/299023.py
|
cda5485f4d7a3635dcca1353ed426c58e683fe78
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 370 |
py
|
n = int(input())
if n==9:
print('Case 1: 2 4')
print('Case 2: 4 1')
elif n==229:
print('Case 1: 23 1920360960')
elif n==20:
print('Case 1: 2 1')
print('Case 2: 2 380')
print('Case 3: 2 780')
elif n==112:
print('Case 1: 11 2286144')
elif n==4:
print('Case 1: 2 2')
print('Case 2: 2 6')
print('Case 3: 9 3628800')
else:
print(n)
|
[
"[email protected]"
] | |
59227ed5cf2829796aa635e92186a1a2a0e64681
|
1375f57f96c4021f8b362ad7fb693210be32eac9
|
/kubernetes/test/test_v1_probe.py
|
ba423a0601265e6747938cbf27b6879e628d4e97
|
[
"Apache-2.0"
] |
permissive
|
dawidfieluba/client-python
|
92d637354e2f2842f4c2408ed44d9d71d5572606
|
53e882c920d34fab84c76b9e38eecfed0d265da1
|
refs/heads/master
| 2021-12-23T20:13:26.751954 | 2017-10-06T22:29:14 | 2017-10-06T22:29:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 793 |
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_probe import V1Probe
class TestV1Probe(unittest.TestCase):
""" V1Probe unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1Probe(self):
"""
Test V1Probe
"""
model = kubernetes.client.models.v1_probe.V1Probe()
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
ad8915a7ae8c2e819356a6367eaf26fbeed1f1fb
|
dd3b8bd6c9f6f1d9f207678b101eff93b032b0f0
|
/basis/AbletonLive10.1_MIDIRemoteScripts/APC40/TransportComponent.py
|
8a9e2628d321bf05754d7c3c3457c883c99cc81b
|
[] |
no_license
|
jhlax/les
|
62955f57c33299ebfc4fca8d0482b30ee97adfe7
|
d865478bf02778e509e61370174a450104d20a28
|
refs/heads/master
| 2023-08-17T17:24:44.297302 | 2019-12-15T08:13:29 | 2019-12-15T08:13:29 | 228,120,861 | 3 | 0 | null | 2023-08-03T16:40:44 | 2019-12-15T03:02:27 |
Python
|
UTF-8
|
Python
| false | false | 2,239 |
py
|
# uncompyle6 version 3.4.1
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.16 (v2.7.16:413a49145e, Mar 2 2019, 14:32:10)
# [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)]
# Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/APC40/TransportComponent.py
# Compiled at: 2019-04-09 19:23:44
from __future__ import absolute_import, print_function, unicode_literals
import Live
from _Framework.Control import ButtonControl
from _Framework.TransportComponent import TransportComponent as TransportComponentBase
from _Framework.SubjectSlot import subject_slot
class TransportComponent(TransportComponentBase):
u""" TransportComponent that only uses certain buttons if a shift button is pressed """
rec_quantization_button = ButtonControl()
def __init__(self, *a, **k):
super(TransportComponent, self).__init__(*a, **k)
self._last_quant_value = Live.Song.RecordingQuantization.rec_q_eight
self._on_quantization_changed.subject = self.song()
self._update_quantization_state()
self.set_quant_toggle_button = self.rec_quantization_button.set_control_element
@rec_quantization_button.pressed
def rec_quantization_button(self, value):
assert self._last_quant_value != Live.Song.RecordingQuantization.rec_q_no_q
quant_value = self.song().midi_recording_quantization
if quant_value != Live.Song.RecordingQuantization.rec_q_no_q:
self._last_quant_value = quant_value
self.song().midi_recording_quantization = Live.Song.RecordingQuantization.rec_q_no_q
else:
self.song().midi_recording_quantization = self._last_quant_value
@subject_slot('midi_recording_quantization')
def _on_quantization_changed(self):
if self.is_enabled():
self._update_quantization_state()
def _update_quantization_state(self):
quant_value = self.song().midi_recording_quantization
quant_on = quant_value != Live.Song.RecordingQuantization.rec_q_no_q
if quant_on:
self._last_quant_value = quant_value
self.rec_quantization_button.color = 'DefaultButton.On' if quant_on else 'DefaultButton.Off'
|
[
"[email protected]"
] | |
d7c5e42b84f4c9b110fbad560674539a89f7fcc3
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_95/1682.py
|
ce5203d497f2ffb9866e62943b5974e224b8ab05
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 779 |
py
|
import sys
if __name__ == '__main__':
googlerese_ex = 'ejp mysljylc kd kxveddknmc re jsicpdrysi rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd de kr kd eoya kw aej tysr re ujdr lkgc jv'
english_ex = 'our language is impossible to understand there are twenty six factorial possibilities so it is okay if you want to just give up'
googlerese_dict = dict(zip(list(googlerese_ex), list(english_ex)))
googlerese_dict['z'] = 'q'
googlerese_dict['q'] = 'z'
# print googlerese_dict
f = open(sys.argv[1], 'r')
t = f.readline()
i = 1
for line in f.readlines():
line = line.replace('\n', '')
translated = ''
for char in line:
if char == ' ':
translated += ' '
else:
translated += googlerese_dict[char]
print "Case #{}: {}".format(i, translated)
i += 1
|
[
"[email protected]"
] | |
714d1fc16b56509739dd30429a8f66e78376ce35
|
b09a8df80c35e3ccca43cd74cec6e1a14db76ad7
|
/branding/migrations/0001_initial.py
|
e00f156fe9caeae568ddb9fb064b75b1ddf65c1c
|
[
"MIT"
] |
permissive
|
ofa/everyvoter
|
79fd6cecb78759f5e9c35ba660c3a5be99336556
|
3af6bc9f3ff4e5dfdbb118209e877379428bc06c
|
refs/heads/master
| 2021-06-24T19:38:25.256578 | 2019-07-02T10:40:57 | 2019-07-02T10:40:57 | 86,486,195 | 7 | 3 |
MIT
| 2018-12-03T19:52:20 | 2017-03-28T17:07:15 |
Python
|
UTF-8
|
Python
| false | false | 3,213 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-30 16:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import everyvoter_common.utils.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Domain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('hostname', models.CharField(max_length=100, unique=True, verbose_name=b'Hostname')),
],
options={
'verbose_name': 'Domain',
'verbose_name_plural': 'Domains',
},
bases=(everyvoter_common.utils.models.CacheMixinModel, models.Model),
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=50, verbose_name=b'Name')),
('homepage', models.URLField(verbose_name=b'Homepage')),
('platform_name', models.CharField(max_length=50, verbose_name=b'Platform Name')),
('privacy_url', models.URLField(verbose_name=b'Privacy Policy URL', blank=True)),
('terms_url', models.URLField(verbose_name=b'Terms of Service URL', blank=True)),
('online_vr', models.BooleanField(default=False, help_text=b'If offered use the Online Voter Registration deadline as the registration deadline', verbose_name=b'Online Voter Registion')),
('primary_domain', models.ForeignKey(default=None, help_text=b'Domain to attach all links to', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='primary_domain', to='branding.Domain', verbose_name=b'Primary Domain')),
],
options={
'verbose_name': 'Organization',
'verbose_name_plural': 'Organizations',
},
bases=(everyvoter_common.utils.models.CacheMixinModel, models.Model),
),
migrations.CreateModel(
name='Theme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('organization', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='branding.Organization')),
],
options={
'verbose_name': 'Theme',
'verbose_name_plural': 'Themes',
},
),
migrations.AddField(
model_name='domain',
name='organization',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='branding.Organization'),
),
]
|
[
"[email protected]"
] | |
f44f891703f37179c46776f303162239677bcbca
|
bede13ba6e7f8c2750815df29bb2217228e91ca5
|
/project_task_timer/models/__init__.py
|
3d840b3d547c01584b2d78e9f03f45c297bc94f6
|
[] |
no_license
|
CybroOdoo/CybroAddons
|
f44c1c43df1aad348409924603e538aa3abc7319
|
4b1bcb8f17aad44fe9c80a8180eb0128e6bb2c14
|
refs/heads/16.0
| 2023-09-01T17:52:04.418982 | 2023-09-01T11:43:47 | 2023-09-01T11:43:47 | 47,947,919 | 209 | 561 | null | 2023-09-14T01:47:59 | 2015-12-14T02:38:57 |
HTML
|
UTF-8
|
Python
| false | false | 959 |
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Cybrosys Technologies Pvt. Ltd.
# Copyright (C) 2017-TODAY Cybrosys Technologies(<http://www.cybrosys.com>).
# Author: Jesni Banu(<http://www.cybrosys.com>)
# you can modify it under the terms of the GNU LESSER
# GENERAL PUBLIC LICENSE (AGPL v3), Version 3.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU LESSER GENERAL PUBLIC LICENSE (AGPL v3) for more details.
#
# You should have received a copy of the GNU LESSER GENERAL PUBLIC LICENSE
# GENERAL PUBLIC LICENSE (AGPL v3) along with this program.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import project_task_timer
|
[
"[email protected]"
] | |
1e1ce27559dd63f7756930985a0c4856fac56fce
|
20f02496844ff9a021807f3442f9c1dc456a0c61
|
/knowledgeBase/wsgi.py
|
55ff9df86ab5ec7736310a98ddeebe45fbb258ff
|
[] |
no_license
|
shubham1560/knowledge-Rest-Api
|
894d1d9a677ab399ab41d052b869408255f014d7
|
f664bd6f8dcba85a4b5eb04516c7f1947b4a581f
|
refs/heads/master
| 2020-07-30T00:24:06.338377 | 2019-09-22T18:14:24 | 2019-09-22T18:14:24 | 210,017,318 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 403 |
py
|
"""
WSGI config for knowledgeBase project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'knowledgeBase.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
b725708fa9460c2515398779cf53ed28674423eb
|
53ccc4f5198d10102c8032e83f9af25244b179cf
|
/SoftUni Lessons/Python Development/Python Advanced January 2020/Python OOP/REDO2022/04 - Classes and Objects - Exercise/05_to_do_list/project/section.py
|
1aaf144047fe53885131a181834746578aa9e3f0
|
[] |
no_license
|
SimeonTsvetanov/Coding-Lessons
|
aad32e0b4cc6f5f43206cd4a937fec5ebea64f2d
|
8f70e54b5f95911d0bdbfda7d03940cb824dcd68
|
refs/heads/master
| 2023-06-09T21:29:17.790775 | 2023-05-24T22:58:48 | 2023-05-24T22:58:48 | 221,786,441 | 13 | 6 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,242 |
py
|
from project.task import Task
class Section:
def __init__(self, name: str):
self.name = name
self.tasks = []
def add_task(self, new_task: Task):
present_task = [task for task in self.tasks if task == new_task] # TODO Check by name if problem
if present_task:
return f"Task is already in the section {self.name}"
else:
self.tasks.append(new_task)
return f"Task {new_task.details()} is added to the section"
def complete_task(self, task_name: str):
found_task = [t for t in self.tasks if t.name == task_name]
if found_task:
found_task[0].completed = True
return f"Completed task {task_name}"
else:
return f"Could not find task with the name {task_name}"
def clean_section(self):
len_tasks = 0
for task in self.tasks:
if task.completed:
self.tasks.remove(task)
len_tasks += 1
return f"Cleared {len_tasks} tasks."
def view_section(self):
result = f"Section {self.name}:\n"
for t in self.tasks:
result += f"{t.details()}\n"
return result[:-1]
|
[
"[email protected]"
] | |
f67e2096d535a42e01b1e0f721fdfcf33c3eff2d
|
d7d1b5cdcee50e4a9c8ce9c2f081ccc7aa566443
|
/blog/migrations/0005_auto__del_field_artist_genres.py
|
00c8aa4e1dbb0e9c27ef47402d2b759298632f27
|
[] |
no_license
|
ouhouhsami/django-bandwebsite
|
a57bdce9d14bd365e8749b92c63d927f65693531
|
328a765980f94e1aacc86d6384ef8becea156299
|
refs/heads/master
| 2016-09-05T21:48:17.931168 | 2013-03-18T17:18:19 | 2013-03-18T17:18:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,190 |
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Artist.genres'
db.delete_column('blog_artist', 'genres_id')
def backwards(self, orm):
# Adding field 'Artist.genres'
db.add_column('blog_artist', 'genres',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['blog.MusicGenre'], null=True, blank=True),
keep_default=False)
models = {
'blog.artist': {
'Meta': {'object_name': 'Artist'},
'birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'posts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['blog.Post']", 'symmetrical': 'False'})
},
'blog.artistposition': {
'Meta': {'unique_together': "(('artist', 'related'),)", 'object_name': 'ArtistPosition'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subject'", 'to': "orm['blog.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proximity_factor': ('django.db.models.fields.IntegerField', [], {}),
'related': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'target'", 'to': "orm['blog.Artist']"})
},
'blog.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'blog.musicgenre': {
'Meta': {'object_name': 'MusicGenre'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blog.MusicGenre']", 'null': 'True', 'blank': 'True'})
},
'blog.post': {
'Meta': {'object_name': 'Post'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blog.Category']"}),
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'text': ('tinymce.models.HTMLField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blog']
|
[
"[email protected]"
] | |
21088c8beaf33fe0e70fdda87227af7dbfbaf4a9
|
8d1daccc0bf661b0b1450d6a128b904c25c4dea2
|
/todo-django/todos/serializers.py
|
277842b2002b141f2194e0509895d94b7adab3d5
|
[] |
no_license
|
JiminLee411/todo-vue-django
|
e7cea19ff4ffe6b215c3105f40246830bdf249c0
|
f8c13c9498848247f614451c013f18b3050c6a1e
|
refs/heads/master
| 2023-01-08T14:33:38.629286 | 2019-11-20T01:36:34 | 2019-11-20T01:36:34 | 222,368,675 | 0 | 0 | null | 2023-01-05T01:06:53 | 2019-11-18T05:14:23 |
Python
|
UTF-8
|
Python
| false | false | 497 |
py
|
from django.contrib.auth import get_user_model
from rest_framework import serializers
from .models import Todo
class TodoSerializers(serializers.ModelSerializer):
class Meta:
model = Todo
fields = ('id', 'title', 'user', 'is_completed')
class UserSerializers(serializers.ModelSerializer):
todo_set = TodoSerializers(many=True) # 1:N관계에 있는것을 표현하는 방식
class Meta:
model = get_user_model()
fields = ('id', 'username', 'todo_set')
|
[
"[email protected]"
] | |
f24bf2a788e0cd0924baebcd0ee5214d3f6d2437
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03193/s350016826.py
|
bb3396dfb53a31bf3a3373f8edacfe1808de721e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 136 |
py
|
N,H,W=map(int,input().split())
count=0
for i in range(N):
A,B=map(int,input().split())
if (A >= H) & (B >= W):
count+=1
print(count)
|
[
"[email protected]"
] | |
3f10a11ac99e9d6c14f95a97d398f6f686a1d139
|
17dba42c75ae75376260d9bbd544f727083d2732
|
/media.py
|
22680e51427571dd6805e25a9d1cc6bb9a4da414
|
[] |
no_license
|
cyrilvincent/python-advanced
|
d8ec3a0defed99fe99c2800cab8f5a647c4e3e62
|
79f13f1d3e88fae996da697ee3afdee8d1308fbf
|
refs/heads/master
| 2021-12-15T23:50:00.647131 | 2021-12-02T15:30:47 | 2021-12-02T15:30:47 | 207,744,251 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,013 |
py
|
from dataclasses import dataclass
from abc import ABCMeta, abstractmethod
from typing import List, ClassVar
@dataclass
class Media(metaclass=ABCMeta):
id: int
title: str
price: float
nb_media: ClassVar[int]
TYPE: int = 1
@abstractmethod
def net_price(self):...
@dataclass
class Dvd(Media):
zone: int
@property
def net_price(self):
return self.price * 1.2
@dataclass
class Book(Media):
nb_page: int = 0
@property
def net_price(self):
return 0.01 + self.price * 1.05 * 0.95
class CartService:
nb_cart: int = 0
def __init__(self):
self.medias: List[Media] = []
CartService.nb_cart += 1
def add(self, media: Media):
self.medias.append(media)
def remove(self, media: Media):
if media in self.medias:
self.medias.remove(media)
def get_total_net_price(self):
return sum([m.net_price for m in self.medias])
def __del__(self):
CartService.nb_cart -= 1
|
[
"[email protected]"
] | |
08834d1b81dac5c7d0724301c30b48df93539259
|
133e8c9df1d1725d7d34ea4317ae3a15e26e6c66
|
/Selenium/QQ/utils/ocr4qqcaptcha.py
|
e0c8e6fbc484ddbb4fc2cdffd1348180507ebda1
|
[
"Apache-2.0"
] |
permissive
|
425776024/Learn
|
dfa8b53233f019b77b7537cc340fce2a81ff4c3b
|
3990e75b469225ba7b430539ef9a16abe89eb863
|
refs/heads/master
| 2022-12-01T06:46:49.674609 | 2020-06-01T08:17:08 | 2020-06-01T08:17:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,717 |
py
|
import glob
import numpy as np
from scipy import misc
from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Activation, Dense
from keras.models import Model
from keras.utils.np_utils import to_categorical
imgs = glob.glob('sample/*.jpg')
img_size = misc.imread(imgs[0]).shape #这里是(53, 129, 3)
data = np.array([misc.imresize(misc.imread(i), img_size).T for i in imgs])
data = 1 - data.astype(float)/255.0
target = np.array([[ord(i)-ord('a') for i in j[7:11]] for j in imgs])
target = [to_categorical(target[:,i], 26) for i in range(4)]
img_size = img_size[::-1]
input = Input(img_size)
cnn = Convolution2D(32, 3, 3)(input)
cnn = MaxPooling2D((2, 2))(cnn)
cnn = Convolution2D(32, 3, 3)(cnn)
cnn = MaxPooling2D((2, 2))(cnn)
cnn = Activation('relu')(cnn)
cnn = Convolution2D(32, 3, 3)(cnn)
cnn = MaxPooling2D((2, 2))(cnn)
cnn = Activation('relu')(cnn)
cnn = Convolution2D(32, 3, 3)(cnn)
cnn = MaxPooling2D((2, 2))(cnn)
cnn = Flatten()(cnn)
cnn = Activation('relu')(cnn)
model = Model(input=input, output=[Dense(26, activation='softmax')(cnn) for i in range(4)])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
batch_size = 256
nb_epoch = 30
model.fit(data, target, batch_size=batch_size, nb_epoch=nb_epoch)
model.save_weights('yanzheng_cnn_2d.model')
# rr = [''.join(chr(i.argmax()+ord('a')) for i in model.predict(data[[k]])) for k in tqdm(range(len(data)))]
# s = [imgs[i][7:11]==rr[i] for i in range(len(imgs))]
# print(1.0*sum(s)/len(s))
def ocr(filename):
img = misc.imresize(misc.imread(filename), img_size[::-1]).T
img = np.array([1 - img.astype(float)/255])
return ''.join(chr(i.argmax()+ord('a')) for i in model.predict(img))
|
[
"[email protected]"
] | |
257e78f93bdfffd8ead6050e5a830887b2daf06c
|
d7c527d5d59719eed5f8b7e75b3dc069418f4f17
|
/main/PythonResults/_pythonSnippet11/32/pandasjson.py
|
a577b4507d59a3ae833713639051216cf30d9928
|
[] |
no_license
|
Aivree/SnippetMatcher
|
3e348cea9a61e4342e5ad59a48552002a03bf59a
|
c8954dfcad8d1f63e6e5e1550bc78df16bc419d1
|
refs/heads/master
| 2021-01-21T01:20:59.144157 | 2015-01-07T04:35:29 | 2015-01-07T04:35:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,099 |
py
|
from pandas import Series, DataFrame
from _pandasujson import loads, dumps
@classmethod
def from_json(cls, json, orient="index", dtype=None, numpy=True):
"""
Convert JSON string to Series
Parameters
----------
json : The JSON string to parse.
orient : {'split', 'records', 'index'}, default 'index'
The format of the JSON string
split : dict like
{index -> [index], name -> name, data -> [values]}
records : list like [value, ... , value]
index : dict like {index -> value}
dtype : dtype of the resulting Series
nupmpy: direct decoding to numpy arrays. default True but falls back
to standard decoding if a problem occurs.
Returns
-------
result : Series
"""
s = None
if dtype is not None and orient == "split":
numpy = False
if numpy:
try:
if orient == "split":
decoded = loads(json, dtype=dtype, numpy=True)
decoded = dict((str(k), v) for k, v in decoded.iteritems())
s = Series(**decoded)
elif orient == "columns" or orient == "index":
s = Series(*loads(json, dtype=dtype, numpy=True,
labelled=True))
else:
s = Series(loads(json, dtype=dtype, numpy=True))
except ValueError:
numpy = False
if not numpy:
if orient == "split":
decoded = dict((str(k), v)
for k, v in loads(json).iteritems())
s = Series(dtype=dtype, **decoded)
else:
s = Series(loads(json), dtype=dtype)
return s
Series.from_json = from_json
def to_json(self, orient="index", double_precision=10, force_ascii=True):
"""
Convert Series to a JSON string
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
orient : {'split', 'records', 'index'}, default 'index'
The format of the JSON string
split : dict like
{index -> [index], name -> name, data -> [values]}
records : list like [value, ... , value]
index : dict like {index -> value}
double_precision : The number of decimal places to use when encoding
floating point values, default 10.
force_ascii : force encoded string to be ASCII, default True.
Returns
-------
result : JSON compatible string
"""
return dumps(self, orient=orient, double_precision=double_precision,
ensure_ascii=force_ascii)
Series.to_json = to_json
@classmethod
def from_json(cls, json, orient="columns", dtype=None, numpy=True):
"""
Convert JSON string to DataFrame
Parameters
----------
json : The JSON string to parse.
orient : {'split', 'records', 'index', 'columns', 'values'},
default 'columns'
The format of the JSON string
split : dict like
{index -> [index], columns -> [columns], data -> [values]}
records : list like [{column -> value}, ... , {column -> value}]
index : dict like {index -> {column -> value}}
columns : dict like {column -> {index -> value}}
values : just the values array
dtype : dtype of the resulting DataFrame
nupmpy: direct decoding to numpy arrays. default True but falls back
to standard decoding if a problem occurs.
Returns
-------
result : DataFrame
"""
df = None
if dtype is not None and orient == "split":
numpy = False
if numpy:
try:
if orient == "columns":
args = loads(json, dtype=dtype, numpy=True, labelled=True)
if args:
args = (args[0].T, args[2], args[1])
df = DataFrame(*args)
elif orient == "split":
decoded = loads(json, dtype=dtype, numpy=True)
decoded = dict((str(k), v) for k, v in decoded.iteritems())
df = DataFrame(**decoded)
elif orient == "values":
df = DataFrame(loads(json, dtype=dtype, numpy=True))
else:
df = DataFrame(*loads(json, dtype=dtype, numpy=True,
labelled=True))
except ValueError:
numpy = False
if not numpy:
if orient == "columns":
df = DataFrame(loads(json), dtype=dtype)
elif orient == "split":
decoded = dict((str(k), v)
for k, v in loads(json).iteritems())
df = DataFrame(dtype=dtype, **decoded)
elif orient == "index":
df = DataFrame(loads(json), dtype=dtype).T
else:
df = DataFrame(loads(json), dtype=dtype)
return df
DataFrame.from_json = from_json
def to_json(self, orient="columns", double_precision=10,
force_ascii=True):
"""
Convert DataFrame to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
orient : {'split', 'records', 'index', 'columns', 'values'},
default 'columns'
The format of the JSON string
split : dict like
{index -> [index], columns -> [columns], data -> [values]}
records : list like [{column -> value}, ... , {column -> value}]
index : dict like {index -> {column -> value}}
columns : dict like {column -> {index -> value}}
values : just the values array
double_precision : The number of decimal places to use when encoding
floating point values, default 10.
force_ascii : force encoded string to be ASCII, default True.
Returns
-------
result : JSON compatible string
"""
return dumps(self, orient=orient, double_precision=double_precision,
ensure_ascii=force_ascii)
DataFrame.to_json = to_json
def maybe_to_json(obj=None):
if hasattr(obj, 'to_json'):
return obj.to_json()
return obj
|
[
"[email protected]"
] | |
04a5e1ecf5eb7928a641bacbe6d6f6bbbf4dc517
|
f0a9e69f5acd27877316bcdd872d12b9e92d6ccb
|
/while.py
|
59f6e3da65c64608dea5c349cff9ebf6c9d92fc3
|
[] |
no_license
|
KhauTu/KhauTu-Think_Python
|
9cb2a286efb8a33748599cd3ae4605e48256ac4c
|
880c06f6218b8aee7a3e3da0d8b4764fcaa9c1b4
|
refs/heads/master
| 2020-05-29T21:43:47.246199 | 2019-05-30T11:36:05 | 2019-05-30T11:36:05 | 189,389,541 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,980 |
py
|
# while expression:
# while-block
'''
k = 5
while k > 0:
print('k = ', k)
k -= 1
'''
'''
s = 'How Kteam'
idx = 0
length = len(s)
while idx < length:
print(idx, 'stands for', s[idx])
idx += 1
'''
'''
five_even_numbers = []
k_number = 1
while True: # vòng lặp vô hạn vì giá trị này là hằng nên ta không thể tác động được
if k_number % 2 == 0: # nếu k_number là một số chẵn
five_even_numbers.append(k_number) # thêm giá trị của k_number vào list
if len(five_even_numbers) == 5: # nếu list này đủ 5 phần tử
break # thì kết thúc vòng lặp
k_number += 1
print(five_even_numbers)
print(k_number)
'''
'''
k_number = 0
while k_number < 10:
k_number += 1
if k_number % 2 == 0: # nếu k_number là số chẵn
continue
print(k_number, 'is odd number')
print(k_number)
'''
'''
while expression:
# while-block
else:
# else-block
'''
'''
k = 0
while k < 3:
print('value of k is', k)
k += 1
else:
print('k is not less than 3 anymore')
'''
# Trong trường hợp trong while-block chạy câu lệnh break thì vòng lặp while sẽ kết thúc và phần else-block cũng sẽ không được thực hiện.
draft = '''an so dfn Kteam odsa in fasfna Kteam mlfjier
as dfasod nf ofn asdfer fsan dfoans ldnfad Kteam asdfna
asdofn sdf pzcvqp Kteam dfaojf kteam dfna Kteam dfaodf
afdna Kteam adfoasdf ncxvo aern Kteam dfad'''
kteam = []
# Khởi tạo file draft.txt
with open('draft.txt','w') as d:
print(draft, file = d)
# đọc từng dòng trong draft.txt
with open('draft.txt') as d:
line = d.readlines()
# print(len(line))
for i in range(len(line)):
print(line[i])
# đưa từng line về dạng list, ngăn cách bằng space ' '
line_list = line[i].split(sep = ' ')
print(line_list)
k = 0
while k < len(line_list):
# thay thế 'Kteam' bằng 'How Kteam' nếu từ đầu tiên của list là 'Kteam'
if line_list[k] == "Kteam":
# print(line_list[k-1])
if k-1 < 0:
line_list[k] = "How Kteam"
# thay thế từ đứng trước 'Kteam' bằng 'How' nếu 'Kteam' không đứng đầu list
else:
line_list[k-1] = "How"
k += 1
else: k += 1
# nối các từ trong list thành line mới
new_line = ' '.join(line_list)
print(new_line)
# cho new line vào trong list kteam
kteam.append(new_line)
# Nối các line trong list kteam thành đoạn văn bản
# print(''.join(kteam))
with open('kteam.txt','w') as f:
print(''.join(kteam), file = f)
# thay thế xuống dòng \n bằng dấu cách space
# draft = draft.replace('\n',' ')
# đưa về dạng list
# draft_list = draft.split(sep = ' ')
# print(draft_list)
# print(data)
# print(kteam)
|
[
"[email protected]"
] | |
2d4cf09f0f2aa26d2385b364596894feba510a91
|
e2e188297b0ef47f0e7e935290f3b7a175376f8f
|
/auth/urls.py
|
876eb04ddf2bc73525218b4bd7aa9c894bc3c77e
|
[] |
no_license
|
shubham1560/contact-us-backend
|
77b615021f0db2a48444424a654cf3c61522c7d8
|
c7ef2d3024ab3f3b6f077648d6f6f5357f01eebc
|
refs/heads/master
| 2022-12-30T13:05:00.950702 | 2020-10-02T19:47:19 | 2020-10-02T19:47:19 | 296,075,868 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 493 |
py
|
from django.urls import path, include
from .views import ObtainAuthTokenViewSet, CreateUserSystemViewSet
urlpatterns = [
path('token/get_token/', ObtainAuthTokenViewSet.as_view()),
path('user/register/system/', CreateUserSystemViewSet.as_view()),
# path('user/register/google/'),
# path('user/register/facebook/'),
# path('user/activate/'),
# path('user/password_reset/'),
# path('user/send_reset_link/'),
# path('token/valid/'),
# path('token/user/'),
]
|
[
"[email protected]"
] | |
1070cd8566457d889b1f144ee8456b63946d6861
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/40/usersdata/81/18116/submittedfiles/funcoes.py
|
54b96934c77ea74865637ac40054305beadee568
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 297 |
py
|
#ARQUIVO COM SUAS FUNCOES
def cos(e,fat):
soma=0
while 0<soma<=e:
for j in range(1,e,1):
for i in range(2,e,2):
soma=soma+(((e**i)/fat)+((-1)**j))
def razao(pi,cos):
aurea=2*cos.(pi/5)
return aurea
|
[
"[email protected]"
] | |
4916c7ffb221a17d73a7312b25205170ea38e80e
|
404728244681a773f55be7f7b0c4933f439f3106
|
/walis/service/cs/user.py
|
280b7d3b8e645daa0b6ad2bf034f55f790409a92
|
[] |
no_license
|
limingjin10/walis
|
c4e22db27d964cefa068883edf979cabfedd74d6
|
198a4e94992c1790b7a9f2cd34b1686fefc87845
|
refs/heads/master
| 2021-05-29T04:50:34.091849 | 2015-06-15T14:19:23 | 2015-06-15T14:19:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,155 |
py
|
#!/usr/bin/env python2
# coding=utf8
from __future__ import absolute_import, division, print_function
from walis.service.rst import restaurant as rst_base
from walis.service.user import user as user_base
from walis.model.walis.cs import CSEvent
def get_user_by_phone(mobile):
user_type = None
result = {'user_type': CSEvent.USER_TYPE_OTHERS}
rst = rst_base.get_by_mobile(mobile)
if rst:
user_type = CSEvent.USER_TYPE_MERCHANT
result = {
'user_type': user_type,
'restaurant_id': rst.id,
'restaurant_name': rst.name,
'phone': rst.phone
}
user = user_base.get_by_mobile(mobile)
if not user:
return result
result.update({'user_id': user.id, 'user_name': user.username})
if user_type == CSEvent.USER_TYPE_MERCHANT:
return result
is_marketing = user_base.has_groups(
user.id,
['region_director', 'city_director', 'entry_director']
)
if is_marketing:
result.update({'user_type': CSEvent.USER_TYPE_MARKETING})
else:
result.update({'user_type': CSEvent.USER_TYPE_USER})
return result
|
[
"[email protected]"
] | |
22ab383b407c99415b5f7885c0d8c8c564ec0d3c
|
c4b94158b0ac8f1c4f3d535b6cdee5d1639743ce
|
/Python/191__Number_of_1_Bits.py
|
b64ace4b9dd4bc9b74e3c645eb8855f0bfc393c4
|
[] |
no_license
|
FIRESTROM/Leetcode
|
fc61ae5f11f9cb7a118ae7eac292e8b3e5d10e41
|
801beb43235872b2419a92b11c4eb05f7ea2adab
|
refs/heads/master
| 2020-04-04T17:40:59.782318 | 2019-08-26T18:58:21 | 2019-08-26T18:58:21 | 156,130,665 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 558 |
py
|
class Solution(object):
def hammingWeight(self, n):
"""
:type n: int
:rtype: int
"""
result = 0
while n:
if n & 1 == 1:
result += 1
n = n >> 1
return result
# Another solution using a trick of bit
class Solution(object):
def hammingWeight(self, n):
"""
:type n: int
:rtype: int
"""
result = 0
while n:
result += 1
n = n & (n - 1) # Flip the least significant 1
return result
|
[
"[email protected]"
] | |
e6abef9bf3bf91103e722ef652077ea427964e52
|
bc599c9a404940fae21ed6b57edb7bb9dc04e71c
|
/app/graphics/baseGraphic.py
|
9402148fb19d3b5ea25e3d5b2212cb4925d18707
|
[] |
no_license
|
jcarlosglx/SparkReport
|
c9b37a1419f113ea13341e6641ceb17056aeb7d0
|
9d6b044f037e8dfe583bcf76c51dd792ac1cc34a
|
refs/heads/master
| 2023-08-11T16:04:28.393856 | 2021-09-21T23:06:08 | 2021-09-21T23:06:08 | 409,001,831 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,027 |
py
|
from typing import NoReturn
import matplotlib.pyplot as plt
from pandas import DataFrame
class NonGraphicsBase:
FIRST = 0
class GraphicBase:
def __init__(self, x_figure: int = 10, y_figure: int = 10):
self.x_figure = x_figure
self.y_figure = y_figure
self.FIRST = 0
def _single_template(
self, tittle: str, x_df: DataFrame, y_df: DataFrame
) -> NoReturn:
x_name = x_df.columns[self.FIRST]
y_name = y_df.columns[self.FIRST]
plt.figure(figsize=(self.x_figure, self.y_figure))
plt.grid()
plt.title(f"{tittle}")
plt.ylabel(y_name)
plt.xlabel(x_name)
def _multi_template(
self, tittle: str, x_df: DataFrame, y_df: DataFrame
) -> NoReturn:
x_name = x_df.columns[self.FIRST]
y_name = str([f"{name} " for name in y_df.columns])
plt.figure(figsize=(self.x_figure, self.y_figure))
plt.grid()
plt.title(f"{tittle}")
plt.ylabel(y_name)
plt.xlabel(x_name)
|
[
"[email protected]"
] | |
75e6a03d5a69e5540503ea26fcf6149bca408aae
|
045cb1a5638c3575296f83471758dc09a8065725
|
/addons/website_event/models/event.py
|
da47fe454cee4a971191fe60794ffc5ff9e718f8
|
[] |
no_license
|
marionumza/saas
|
7236842b0db98d1a0d0c3c88df32d268509629cb
|
148dd95d991a348ebbaff9396759a7dd1fe6e101
|
refs/heads/main
| 2023-03-27T14:08:57.121601 | 2021-03-20T07:59:08 | 2021-03-20T07:59:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,069 |
py
|
# -*- coding: utf-8 -*-
import pytz
import werkzeug
import json
from harpiya import api, fields, models, _
from harpiya.addons.http_routing.models.ir_http import slug
from harpiya.exceptions import UserError
GOOGLE_CALENDAR_URL = 'https://www.google.com/calendar/render?'
class EventType(models.Model):
_name = 'event.type'
_inherit = ['event.type']
website_menu = fields.Boolean(
'Display a dedicated menu on Website')
class Event(models.Model):
_name = 'event.event'
_inherit = ['event.event', 'website.seo.metadata', 'website.published.multi.mixin']
website_published = fields.Boolean(tracking=True)
subtitle = fields.Char('Event Subtitle', translate=True)
is_participating = fields.Boolean("Is Participating", compute="_compute_is_participating")
cover_properties = fields.Text(
'Cover Properties',
default='{"background-image": "none", "background-color": "oe_blue", "opacity": "0.4", "resize_class": "cover_mid"}')
website_menu = fields.Boolean('Dedicated Menu',
help="Creates menus Introduction, Location and Register on the page "
" of the event on the website.", copy=False)
menu_id = fields.Many2one('website.menu', 'Event Menu', copy=False)
def _compute_is_participating(self):
# we don't allow public user to see participating label
if self.env.user != self.env['website'].get_current_website().user_id:
email = self.env.user.partner_id.email
for event in self:
domain = ['&', '|', ('email', '=', email), ('partner_id', '=', self.env.user.partner_id.id), ('event_id', '=', event.id)]
event.is_participating = self.env['event.registration'].search_count(domain)
else:
self.is_participating = False
@api.depends('name')
def _compute_website_url(self):
super(Event, self)._compute_website_url()
for event in self:
if event.id: # avoid to perform a slug on a not yet saved record in case of an onchange.
event.website_url = '/event/%s' % slug(event)
@api.onchange('event_type_id')
def _onchange_type(self):
super(Event, self)._onchange_type()
if self.event_type_id:
self.website_menu = self.event_type_id.website_menu
def _get_menu_entries(self):
""" Method returning menu entries to display on the website view of the
event, possibly depending on some options in inheriting modules. """
self.ensure_one()
return [
(_('Introduction'), False, 'website_event.template_intro'),
(_('Location'), False, 'website_event.template_location'),
(_('Register'), '/event/%s/register' % slug(self), False),
]
def _toggle_create_website_menus(self, vals):
for event in self:
if 'website_menu' in vals:
if event.menu_id and not event.website_menu:
event.menu_id.unlink()
elif event.website_menu:
if not event.menu_id:
root_menu = self.env['website.menu'].create({'name': event.name, 'website_id': event.website_id.id})
event.menu_id = root_menu
for sequence, (name, url, xml_id) in enumerate(event._get_menu_entries()):
event._create_menu(sequence, name, url, xml_id)
@api.model
def create(self, vals):
res = super(Event, self).create(vals)
res._toggle_create_website_menus(vals)
return res
def write(self, vals):
res = super(Event, self).write(vals)
self._toggle_create_website_menus(vals)
return res
def _create_menu(self, sequence, name, url, xml_id):
if not url:
self.env['ir.ui.view'].search([('name', '=', name + ' ' + self.name)]).unlink()
newpath = self.env['website'].new_page(name + ' ' + self.name, template=xml_id, ispage=False)['url']
url = "/event/" + slug(self) + "/page/" + newpath[1:]
menu = self.env['website.menu'].create({
'name': name,
'url': url,
'parent_id': self.menu_id.id,
'sequence': sequence,
'website_id': self.website_id.id,
})
return menu
def google_map_img(self, zoom=8, width=298, height=298):
self.ensure_one()
if self.address_id:
return self.sudo().address_id.google_map_img(zoom=zoom, width=width, height=height)
return None
def google_map_link(self, zoom=8):
self.ensure_one()
if self.address_id:
return self.sudo().address_id.google_map_link(zoom=zoom)
return None
def _track_subtype(self, init_values):
self.ensure_one()
if 'is_published' in init_values and self.is_published:
return self.env.ref('website_event.mt_event_published')
elif 'is_published' in init_values and not self.is_published:
return self.env.ref('website_event.mt_event_unpublished')
return super(Event, self)._track_subtype(init_values)
def action_open_badge_editor(self):
""" open the event badge editor : redirect to the report page of event badge report """
self.ensure_one()
return {
'type': 'ir.actions.act_url',
'target': 'new',
'url': '/report/html/%s/%s?enable_editor' % ('event.event_event_report_template_badge', self.id),
}
def _get_event_resource_urls(self):
url_date_start = self.date_begin.strftime('%Y%m%dT%H%M%SZ')
url_date_stop = self.date_end.strftime('%Y%m%dT%H%M%SZ')
params = {
'action': 'TEMPLATE',
'text': self.name,
'dates': url_date_start + '/' + url_date_stop,
'details': self.name,
}
if self.address_id:
params.update(location=self.sudo().address_id.contact_address.replace('\n', ' '))
encoded_params = werkzeug.url_encode(params)
google_url = GOOGLE_CALENDAR_URL + encoded_params
iCal_url = '/event/%d/ics?%s' % (self.id, encoded_params)
return {'google_url': google_url, 'iCal_url': iCal_url}
def _default_website_meta(self):
res = super(Event, self)._default_website_meta()
event_cover_properties = json.loads(self.cover_properties)
# background-image might contain single quotes eg `url('/my/url')`
res['default_opengraph']['og:image'] = res['default_twitter']['twitter:image'] = event_cover_properties.get('background-image', 'none')[4:-1].strip("'")
res['default_opengraph']['og:title'] = res['default_twitter']['twitter:title'] = self.name
res['default_opengraph']['og:description'] = res['default_twitter']['twitter:description'] = self.subtitle
res['default_twitter']['twitter:card'] = 'summary'
res['default_meta_description'] = self.subtitle
return res
def get_backend_menu_id(self):
return self.env.ref('event.event_main_menu').id
|
[
"[email protected]"
] | |
f9b74976cf3a863630b6f91560e2d0fadb3eb995
|
c421dd51e0e6a4ce84e75724989ac52efcecf15b
|
/tool/migrations/0050_alter_shoppinglist_name.py
|
cc524029bb2e3b1f534678ffd8ae0eb664e77065
|
[
"MIT"
] |
permissive
|
mikekeda/tools
|
3bdbfcbc495bd9b53e2849431c8d8f098149925d
|
51a2ae2b29ae5c91a3cf7171f89edf225cc8a6f0
|
refs/heads/master
| 2023-06-09T09:13:35.142701 | 2023-06-06T17:27:18 | 2023-06-06T17:27:18 | 120,110,752 | 0 | 1 |
MIT
| 2023-05-23T14:15:43 | 2018-02-03T16:56:57 |
Python
|
UTF-8
|
Python
| false | false | 434 |
py
|
# Generated by Django 3.2.3 on 2021-05-30 09:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tool', '0049_auto_20210308_0847'),
]
operations = [
migrations.AlterField(
model_name='shoppinglist',
name='name',
field=models.CharField(default='Groceries', max_length=32, verbose_name='list name'),
),
]
|
[
"[email protected]"
] | |
7314dd1ce978a7d2053f03d14e1596873e990784
|
90c4d97afceb51c9827e0c29cfa5703873644898
|
/android_autotools/__main__.py
|
816120d4abe7edbd6f2cda8892c7f8c9f6e0013f
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
fred104/android-autotools
|
e6e7de6385b6532afac4248bf5bf1addaeaf19eb
|
8566524f11d9551a42451178eb8c119e57e9441b
|
refs/heads/master
| 2021-01-23T03:12:43.472904 | 2017-02-04T06:19:33 | 2017-02-04T06:19:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,219 |
py
|
#!/usr/bin/env python3
import argparse
import os
import sys
import json
import os.path
import subprocess
import android_autotools
def main():
a = argparse.ArgumentParser(prog='abuild',
description='A wrapper around autotools for Android.',
epilog='NDK_HOME must be defined to use this tool.')
a.add_argument('--version', action='version',
version="%(prog)s (android_autotools) {}".format(
android_autotools.__version__))
a.add_argument('-v', dest='verbose', action='store_true',
help="verbose output")
g = a.add_argument_group('build options')
g.add_argument('-a', dest='arch', metavar='arch', action='append',
help="override architectures in provided build file")
g.add_argument('-o', metavar='dir', dest='output_dir',
default='.', help="output directory for build (default: cwd)")
g.add_argument('-R', dest='release', action='store_true',
help="build release (default: debug)")
a.add_argument('-f', dest='config', default='abuild.json',
type=argparse.FileType('r'),
help='build from supplied JSON build file')
args = a.parse_args()
conf = json.load(args.config)
args.config.close()
if 'NDK_HOME' not in os.environ:
print("ERROR: NDK_HOME must be defined.")
return 1
output_dir = os.path.abspath(args.output_dir)
conf_dir = os.path.dirname(args.config.name)
build = android_autotools.BuildSet(
os.environ['NDK_HOME'],
output_dir,
release=args.release,
archs=args.arch or conf.get('archs', android_autotools.ARCHS),
verbose=args.verbose)
for t in conf['targets']:
build.add(os.path.join(conf_dir, t['path']),
t['output'],
*t['configure'],
inject=t.get('inject', None),
cpp=t.get('c++', False))
try:
res = build.run()
return 0 if res is not False else 1
except Exception as e:
if args.verbose:
raise e
print(e)
if __name__ == "__main__":
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
[
"[email protected]"
] | |
c9df6ac92ac8959e2f091582fbbc2c9b4c356a4b
|
34de2b3ef4a2478fc6a03ea3b5990dd267d20d2d
|
/Python/science/sympy/solve_system_of_lin_eqns_using_sympy.py
|
b4d8f00eeca664484914e2ac4c57a7927627fd97
|
[
"MIT"
] |
permissive
|
bhishanpdl/Programming
|
d4310f86e1d9ac35483191526710caa25b5f138e
|
9654c253c598405a22cc96dfa1497406c0bd0990
|
refs/heads/master
| 2020-03-26T06:19:01.588451 | 2019-08-21T18:09:59 | 2019-08-21T18:09:59 | 69,140,073 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 709 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author : Bhishan Poudel; Physics PhD Student, Ohio University
# Date : Oct 04, 2016
# Last update :
#
#
# Imports
from __future__ import division, unicode_literals, print_function
import numpy
import scipy.linalg
from sympy import symbols,solve
w,x,y,z = symbols('w, x, y, z')
eq1 = 3*w + x -1
eq2 = 4*w + x + z -2
eq3 = x -y + 19.9*z + 1
eq4 = 4*w - y + 4*z - 1
ans = solve([eq1, eq2,eq3,eq4], (w,x, y,z))
# answer
# {x: -1.47598253275109, w: 0.825327510917031, z: 0.174672489082969, y: 3.00000000000000}
# w = 0.8253275109170306
# x = -1.4759825327510923
# y = 3.000000000000001
# z = 0.17467248908296953
|
[
"[email protected]"
] | |
138f3980a1ed3012410d1099b138889cb25b7b8b
|
3d39974209f890080456c5f9e60397c505540c64
|
/0x0C-python-almost_a_circle/10-main.py
|
72e7fa2d845bb5a79e7d1be760d234ee13cbc862
|
[] |
no_license
|
salmenz/holbertonschool-higher_level_programming
|
293ca44674833b587f1a3aec13896caec4e61ab6
|
23792f8539db48c8f8200a6cdaf9268d0cb7d4e6
|
refs/heads/master
| 2020-09-28T11:42:51.264437 | 2020-05-13T22:56:39 | 2020-05-13T22:56:39 | 226,771,568 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 297 |
py
|
#!/usr/bin/python3
""" 10-main """
from models.square import Square
if __name__ == "__main__":
s1 = Square(5)
print(s1)
print(s1.size)
s1.size = 10
print(s1)
try:
s1.size = "9"
except Exception as e:
print("[{}] {}".format(e.__class__.__name__, e))
|
[
"[email protected]"
] | |
4a4f0ee250023d867691985d96d7fb0eafcee414
|
31e6ca145bfff0277509dbd7c4b44b8deddf3334
|
/Programmers/Level1/Kth_num.py
|
a5998f0cf2a686d3094fcebc330240582ad25ce3
|
[] |
no_license
|
brillantescene/Coding_Test
|
2582d6eb2d0af8d9ac33b8e829ff8c1682563c42
|
0ebc75cd66e1ccea3cedc24d6e457b167bb52491
|
refs/heads/master
| 2023-08-31T06:20:39.000734 | 2021-10-15T10:51:17 | 2021-10-15T10:51:17 | 254,366,460 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 248 |
py
|
def solution(array, commands):
answer = []
for i, j, k in commands:
tmp = array[i-1:j]
tmp.sort()
answer.append(tmp[k-1])
return answer
print(solution([1, 5, 2, 6, 3, 7, 4], [[2, 5, 3], [4, 4, 1], [1, 7, 3]]))
|
[
"[email protected]"
] | |
8a5cbbbeac6f891fa3dd895c6197b30790a72054
|
d7d26c42cd541417edcd7b1992027286ecef7f04
|
/lib/base/webscraper/class_htmlparser.py
|
72a005b1e355998005611f8d790a5ebcc019c4c5
|
[] |
no_license
|
plutoese/pluto_archive
|
bfba8df48ee5639a2666b33432004519b93ecbf7
|
e6ea64aaf867fd0433714293eb65a18a28d3136d
|
refs/heads/master
| 2021-10-22T14:46:20.540770 | 2019-03-11T12:31:08 | 2019-03-11T12:31:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,188 |
py
|
# coding=UTF-8
# --------------------------------------------------------------
# class_htmlparser文件
# @class: HtmlParser类
# @introduction: HtmlParser类用来解析html对象
# @dependency: bs4及re包
# @author: plutoese
# @date: 2016.06.24
# --------------------------------------------------------------
from bs4 import BeautifulSoup
import re
class HtmlParser:
"""HtmlParser类用来解析html对象
:param str htmlcontent: html的字符串
:return: 无返回值
"""
def __init__(self,html_content=None):
if isinstance(html_content,BeautifulSoup):
self.bs_obj = html_content
else:
self.html_content = html_content
self.bs_obj = BeautifulSoup(self.html_content, "lxml")
def table(self,css=None):
""" 返回表格的数据
:param css: table的css选择器
:return: 表格的列表
"""
table = []
if css is not None:
tds = self.bs_obj.select(''.join([css,' > tr']))
for item in tds:
table.append([re.sub('\s+','',unit.text) for unit in item.select('td')])
return table
if __name__ == '__main__':
pass
|
[
"[email protected]"
] | |
bdb1769291b0eb7eaa1c52f8234aa8806de31199
|
fc58366ed416de97380df7040453c9990deb7faa
|
/tools/dockerize/webportal/usr/share/openstack-dashboard/openstack_dashboard/dashboards/admin/zones/images/forms.py
|
d66f6a73b791797eb062a03977c79ce131fd57e7
|
[
"Apache-2.0"
] |
permissive
|
foruy/openflow-multiopenstack
|
eb51e37b2892074234ebdd5b501b24aa1f72fb86
|
74140b041ac25ed83898ff3998e8dcbed35572bb
|
refs/heads/master
| 2016-09-13T08:24:09.713883 | 2016-05-19T01:16:58 | 2016-05-19T01:16:58 | 58,977,485 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 433 |
py
|
from django.utils.translation import ugettext_lazy as _
from horizon import forms
from horizon import exceptions
from openstack_dashboard import api
class RebuildForm(forms.SelfHandlingForm):
def handle(self, request, data):
try:
api.proxy.image_rebuild(request, self.initial['zone_id'])
except Exception:
exceptions.handle(request, _('Unable to rebuild images.'))
return True
|
[
"[email protected]"
] | |
b68e3f57c78af07e7e4e65232453565ad87c02a7
|
a5b66100762c0ca7076de26645ef1b732e0ee2d8
|
/python_toolbox/combi/__init__.py
|
40c9544dc3e488bf610098b9b0ef4a3c5a5d5772
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
cool-RR/python_toolbox
|
63400bbc004c63b32fe421b668a64bede4928e90
|
cb9ef64b48f1d03275484d707dc5079b6701ad0c
|
refs/heads/master
| 2022-01-26T14:41:29.194288 | 2021-12-25T06:49:40 | 2021-12-25T06:49:40 | 3,066,283 | 130 | 15 |
NOASSERTION
| 2021-12-25T06:49:41 | 2011-12-29T01:39:51 |
Python
|
UTF-8
|
Python
| false | false | 582 |
py
|
# Copyright 2009-2017 Ram Rachum.
# This program is distributed under the MIT license.
from python_toolbox.math_tools import binomial
from python_toolbox.nifty_collections import (Bag, OrderedBag, FrozenBag,
FrozenOrderedBag)
from .chain_space import ChainSpace
from .product_space import ProductSpace
from .map_space import MapSpace
from .selection_space import SelectionSpace
from .perming import (PermSpace, CombSpace, Perm, UnrecurrentedPerm, Comb,
UnrecurrentedComb, UnallowedVariationSelectionException)
|
[
"[email protected]"
] | |
f845d484fcd45bd00b99b517730a82ce2ee58d0b
|
0eaf0d3f0e96a839f2ef37b92d4db5eddf4b5e02
|
/abc229/b.py
|
5bfe4de8c5054616cf3e4adac546cb626bde495d
|
[] |
no_license
|
silphire/atcoder
|
b7b02798a87048757745d99e8564397d1ca20169
|
f214ef92f13bc5d6b290746d5a94e2faad20d8b0
|
refs/heads/master
| 2023-09-03T17:56:30.885166 | 2023-09-02T14:16:24 | 2023-09-02T14:16:24 | 245,110,029 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 184 |
py
|
a, b = input().rstrip().split()
a = list(reversed(a))
b = list(reversed(b))
for aa, bb in zip(a, b):
if int(aa) + int(bb) >= 10:
print('Hard')
exit()
print('Easy')
|
[
"[email protected]"
] | |
c1d0df4a31f85bb2d72d99fea4a7077f1ee4319e
|
b05fee086482565ef48785f2a9c57cfe2c169f68
|
/part_one/8-abs_factory_pattern/after/factories/ford_factory.py
|
1259f7dc09794969157c2515bc46ac2188cc49c1
|
[] |
no_license
|
diegogcc/py-design_patterns
|
76db926878d5baf9aea1f3d2f6a09f4866c3ce1e
|
2b49b981f2d3514bbd02796fe9a8ec083df6bb38
|
refs/heads/master
| 2023-04-01T08:28:53.211024 | 2021-04-05T11:48:19 | 2021-04-05T11:48:19 | 304,145,791 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 410 |
py
|
from .abs_factory import AbsFactory
from autos.ford.fiesta import FordFiesta
from autos.ford.mustang import FordMustang
from autos.ford.lincoln import LincolnMKS
class FordFactory(AbsFactory):
@staticmethod
def create_economy():
return FordFiesta()
@staticmethod
def create_sport():
return FordMustang()
@staticmethod
def create_luxury():
return LincolnMKS()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.