blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4f774788f282a41dee2b12ddc84f99a3129f478b
|
d1a8d250cf6e3e61f90b5d122e389d8488a9ff8c
|
/Travel/urls.py
|
92e0a1b5647b5b2c19167ecdbde5cdc9f7fe538a
|
[] |
no_license
|
dusty-g/Travel
|
89501d156a3ea86f4478f7bb41e1f968c7087bd3
|
5ca6061884e7630a0b0365adfa640da3ce1a6c37
|
refs/heads/master
| 2021-01-20T02:23:18.066638 | 2017-04-30T19:00:56 | 2017-04-30T19:00:56 | 89,401,516 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 936 |
py
|
"""Travel URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
urlpatterns = [
url(r'^', include('apps.users_app.urls', namespace='users')),
url(r'^travels/', include('apps.trips_app.urls', namespace='travels')),
url(r'^destination/', include('apps.destinations_app.urls', namespace='destinations')),
]
|
[
"[email protected]"
] | |
523c27cef991c4da732221247a1e6f5b62c76c04
|
6e1569e876ad1094efdaa013a5311cfc79c56da5
|
/src/create_ls_corpus.py
|
d6371f39b348700adf67ea26191253f7cd0576c7
|
[
"MIT"
] |
permissive
|
mbencherif/forced-alignment
|
83895cb76435e36a380585a2128beb6d9feef9f7
|
39fa08e78d4fc2a85fda81b31a320e1b0d2578f0
|
refs/heads/master
| 2022-02-17T11:10:37.083002 | 2019-07-17T07:37:55 | 2019-07-17T07:37:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 15,327 |
py
|
# Create ReadyLingua Corpus
import argparse
import logging
import math
import os
import re
import sys
from os import makedirs, walk
from os.path import exists, splitext, join
from pathlib import Path
from librosa.output import write_wav
from pydub.utils import mediainfo
from tqdm import tqdm
from constants import CORPUS_ROOT, CORPUS_RAW_ROOT
from corpus.corpus import LibriSpeechCorpus
from corpus.corpus_entry import CorpusEntry
from corpus.corpus_segment import Speech, Pause, UnalignedSpeech
from util.audio_util import crop_to_segments, seconds_to_frame, read_audio
from util.corpus_util import save_corpus, find_file_by_suffix
from util.log_util import log_setup, create_args_str
logfile = 'create_ls_corpus.log'
log_setup(filename=logfile)
log = logging.getLogger(__name__)
# -------------------------------------------------------------
# CLI arguments
# -------------------------------------------------------------
parser = argparse.ArgumentParser(description="""Create LibriSpeech corpus from raw files""")
parser.add_argument('-f', '--file', help='Dummy argument for Jupyter Notebook compatibility')
parser.add_argument('-s', '--source_root', default=CORPUS_RAW_ROOT,
help=f'(optional) source root directory (default: {CORPUS_RAW_ROOT}')
parser.add_argument('-t', '--target_root', default=CORPUS_ROOT,
help=f'(optional) target root directory (default: {CORPUS_ROOT})')
parser.add_argument('-m', '--max_entries', type=int, default=None,
help='(optional) maximum number of corpus entries to process. Default=None=\'all\'')
parser.add_argument('-o', '--overwrite', default=False, action='store_true',
help='(optional) overwrite existing audio data if already present. If set to true this will '
'convert, resample and crop the audio data to a 16kHz mono WAV file which will prolong the'
'corpus creation process considerably. If set to false, the conversion of audio data will be'
'skipped, if the file is already present in the target directory and the corpus will only be'
'updated with the most current corpus entries. Default=False)')
args = parser.parse_args()
# -------------------------------------------------------------
# Other values
# -------------------------------------------------------------
books_pattern = re.compile('(?P<book_id>\d+)'
'\s*\|\s*'
'(?P<book_title>.*?)'
'\s*(\|\s*|\n)')
speakers_pattern = re.compile('(?P<speaker_id>\d+)'
'\s*\|\s*'
'(?P<sex>[MF])'
'\s*\|\s*'
'(?P<subset>.*?)'
'\s*\|\s*'
'(?P<minutes>\d[\d.]*)'
'\s*\|\s*'
'(?P<speaker_name>.*)')
chapters_pattern = re.compile("(?P<chapter_id>\d+)"
"\s*\|\s*"
"(?P<reader_id>\d+)"
"\s*\|\s*"
"(?P<minutes>\d[\d.]*)"
"\s*\|\s*"
"(?P<subset>.*?)"
"\s*\|\s*"
"(?P<project_id>\d+)"
"\s*\|\s*"
"(?P<book_id>\d+)"
"\s*\|\s*"
"(?P<chapter_title>.*)"
"\s*\|\s*"
"(?P<project_title>.*)")
segment_pattern = re.compile('(?P<segment_id>.*)\s(?P<segment_start>.*)\s(?P<segment_end>.*)\n')
non_ascii_pattern = re.compile(r'[^\x00-\x7F]+')
punctuation_pattern = re.compile(r'[^\w\s]')
whitespace_pattern = re.compile(r'\s+')
def main():
print(create_args_str(args))
source_root = join(args.source_root, 'librispeech-raw')
target_root = join(args.target_root, 'librispeech-corpus')
print(f'Processing files from {source_root} and saving them in {target_root}')
corpus, corpus_file = create_corpus(source_root, target_root, args.max_entries)
print(f'Done! Corpus with {len(corpus)} entries saved to {corpus_file}')
def create_corpus(source_root, target_root, max_entries=None):
if not exists(source_root):
print(f"ERROR: Source root {source_root} does not exist!")
exit(0)
if not exists(target_root):
makedirs(target_root)
return create_librispeech_corpus(source_root=source_root, target_root=target_root, max_entries=max_entries)
def create_librispeech_corpus(source_root, target_root, max_entries):
audio_root = join(source_root, 'audio')
book_info, chapter_info, speaker_info = collect_corpus_info(audio_root)
print('loading book texts')
books_root = join(source_root, 'books')
books = collect_book_texts(books_root)
print('creating corpus entries')
corpus_entries = []
directories = [root for root, subdirs, files in walk(audio_root) if not subdirs]
progress = tqdm(directories, total=min(len(directories), max_entries or math.inf), file=sys.stderr, unit='entries')
for raw_path in progress:
if max_entries and len(corpus_entries) >= max_entries:
break
progress.set_description(f'{raw_path:{100}}')
parms = collect_corpus_entry_parms(raw_path, book_info, chapter_info, speaker_info)
segments_file, transcript_file, mp3_file = collect_corpus_entry_files(raw_path, parms)
segments_file = join(raw_path, segments_file)
transcript_file = join(raw_path, transcript_file)
if not segments_file or not transcript_file or not mp3_file:
log.warning(f'Skipping directory (not all files found): {raw_path}')
break
book_id = parms['book_id']
book_text = books[book_id] if book_id in books else ''
if not book_text:
log.warning(f'No book text found. Processing directory, but speech pauses might be wrong.')
segments, full_transcript = create_segments(segments_file, transcript_file, book_text)
# Convert, resample and crop audio
audio_file = join(raw_path, mp3_file)
target_audio_path = join(target_root, splitext(mp3_file)[0] + ".wav")
if not exists(target_audio_path) or args.overwrite:
audio, rate = read_audio(audio_file, resample_rate=16000, to_mono=True)
audio, rate, segments = crop_to_segments(audio, rate, segments)
write_wav(target_audio_path, audio, rate)
parms['media_info'] = mediainfo(target_audio_path)
# Create corpus entry
corpus_entry = CorpusEntry(target_audio_path, segments, full_transcript=full_transcript, raw_path=raw_path,
parms=parms)
corpus_entries.append(corpus_entry)
corpus = LibriSpeechCorpus(corpus_entries, target_root)
corpus_file = save_corpus(corpus, target_root)
return corpus, corpus_file
def collect_corpus_info(directory):
# books
books_file = find_file_by_suffix(directory, 'BOOKS.TXT')
books_file = join(directory, books_file)
books = collect_books(books_file)
# chapters
chapters_file = find_file_by_suffix(directory, 'CHAPTERS.TXT')
chapters_file = join(directory, chapters_file)
chapters = collect_chapters(chapters_file)
# speakers
speakers_file = find_file_by_suffix(directory, 'SPEAKERS.TXT')
speakers_file = join(directory, speakers_file)
speakers = collect_speakers(speakers_file)
return books, chapters, speakers
def collect_info(file, pattern):
with open(file) as f:
for line in (line for line in f.readlines() if not line.startswith(';')):
results = re.search(pattern, line)
if results:
yield results
def collect_books(books_file):
books = {}
for result in collect_info(books_file, books_pattern):
book_id = result.group('book_id') if result.group('book_id') else 'unknown'
book_title = result.group('book_title') if result.group('book_title') else 'unknown'
books[book_id] = book_title
books['unknown'] = 'unknown'
return books
def collect_chapters(chapters_file):
chapters = {}
for result in collect_info(chapters_file, chapters_pattern):
chapter_id = result.group('chapter_id')
chapter = {
'reader_id': result.group('reader_id'),
'length': float(result.group('minutes')),
'subset': result.group('subset'),
'project_id': result.group('project_id'),
'book_id': result.group('book_id'),
'chapter_title': result.group('chapter_title'),
'project_title': result.group('project_title')
}
chapters[chapter_id] = chapter
chapters['unknown'] = 'unknown'
return chapters
def collect_speakers(speakers_file):
speakers = {}
for result in collect_info(speakers_file, speakers_pattern):
speaker_id = result.group('speaker_id')
speaker = {
'sex': result.group('sex'),
'subset': result.group('subset'),
'length': float(result.group('minutes')),
'name': result.group('speaker_name'),
}
speakers[speaker_id] = speaker
speakers['unknown'] = 'unknown'
return speakers
def collect_book_texts(books_root):
book_texts = {}
for root, files in tqdm([(root, files) for root, subdirs, files in walk(books_root)
if not subdirs and len(files) == 1], unit='books'):
book_path = join(root, files[0])
encoding = 'latin-1' if 'ascii' in book_path else 'utf-8' # use latin-1 for ascii files because of encoding problems
book_id = root.split(os.sep)[-1]
book_text = Path(book_path).read_text(encoding=encoding)
book_texts[book_id] = book_text
return book_texts
def collect_corpus_entry_parms(directory, book_info, chapter_info, speaker_info):
files_pattern = re.compile("[\\\/]mp3[\\\/](?P<speaker_id>\d*)[\\\/](?P<chapter_id>\d*)")
result = re.search(files_pattern, directory)
if result:
speaker_id = result.group('speaker_id')
chapter_id = result.group('chapter_id')
chapter = chapter_info[chapter_id] if chapter_id in chapter_info else {'chapter_title': 'unknown',
'book_id': 'unknown',
'subset': 'unknown'}
speaker = speaker_info[speaker_id] if speaker_id in speaker_info else 'unknown'
book_id = chapter['book_id']
book_title = book_info[book_id] if book_id in book_info else chapter['project_title']
chapter_title = chapter['chapter_title']
subset = chapter['subset']
speaker_name = speaker['name']
return {'name': book_title,
'id': chapter_id,
'chapter_title': chapter_title,
'language': 'en',
'book_id': book_id,
'speaker_id': speaker_id,
'chapter_id': chapter_id,
'speaker_name': speaker_name,
'subset': subset}
def collect_corpus_entry_files(directory, parms):
speaker_id = parms['speaker_id']
chapter_id = parms['chapter_id']
segments_file = find_file_by_suffix(directory, f'{speaker_id}-{chapter_id}.seg.txt')
transcript_file = find_file_by_suffix(directory, f'{speaker_id}-{chapter_id}.trans.txt')
mp3_file = find_file_by_suffix(directory, f'{chapter_id}.mp3')
return segments_file, transcript_file, mp3_file
def normalize_text(text):
text = text.upper()
text = text.replace('-', ' ')
text = re.sub(non_ascii_pattern, '', text)
text = re.sub(punctuation_pattern, '', text)
text = re.sub(whitespace_pattern, ' ', text)
return text
def find_text_between(prev_text, next_text, book_text):
prev_text = normalize_text(prev_text)
next_text = normalize_text(next_text)
if prev_text in book_text and next_text in book_text:
# find occurrences of prev_text and nex_text which are closes to each other
prev_indices = [(m.start(), m.end()) for m in re.finditer(prev_text, book_text)]
min_distance = math.inf
start = end = 0
for prev_start, prev_end in prev_indices:
next_indices = [(m.start(), m.end()) for m in re.finditer(next_text, book_text) if m.start() > prev_end]
for next_start, next_end in next_indices:
distance = next_start - prev_end
if distance < min_distance:
min_distance = distance
start = prev_end + 1
end = next_start - 1
between_text = book_text[start:end]
return between_text
return None
def create_segments(segments_file, transcript_file, book_text):
book_text = normalize_text(book_text)
full_transcript = ''
segment_texts = {}
with open(transcript_file, 'r') as f_transcript:
for line in f_transcript.readlines():
segment_id, segment_text = line.split(' ', 1)
segment_texts[segment_id] = segment_text.replace('\n', '')
segments = []
with open(segments_file, 'r') as f_segments:
lines = f_segments.readlines()
for i, line in enumerate(lines):
segment_id, next_start, next_end = parse_segment_line(line)
segment_text = segment_texts[segment_id] if segment_id in segment_texts else ''
full_transcript += segment_text + '\n'
# add pause or missing speech segment between speeches (if there is one)
if i > 0:
prev_id, prev_start, prev_end = parse_segment_line(lines[i - 1])
prev_text = segment_texts[prev_id] if prev_id in segment_texts else None
between_text = find_text_between(prev_text, segment_text, book_text)
between_start = prev_end + 1
between_end = next_start - 1
if between_end - between_start > 0:
if between_text:
full_transcript += between_text + '\n'
between_segment = UnalignedSpeech(start_frame=between_start, end_frame=between_end,
transcript=between_text)
else:
between_segment = Pause(start_frame=between_start, end_frame=between_end)
segments.append(between_segment)
speech = Speech(start_frame=next_start, end_frame=next_end, transcript=segment_text)
segments.append(speech)
return segments, full_transcript
def parse_segment_line(line):
result = re.search(segment_pattern, line)
if result:
segment_id = result.group('segment_id')
segment_start = seconds_to_frame(result.group('segment_start'))
segment_end = seconds_to_frame(result.group('segment_end'))
return segment_id, segment_start, segment_end
return None, None, None
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f1c2edcac27a6ce135430dbd9554ede1eecf2db1
|
caeec99c6a0e7d0dd625891c5adacd23ff311892
|
/trunk/config.py.sample
|
797bfa19df19a436c7f0371bef0150aa14ea9801
|
[] |
no_license
|
BGCX067/faccbk-svn-to-git
|
fad0e44a3ce675d390751a4ff4cc8afbe9a4ebe8
|
82a5a801a9a2e19a2a72cbbdce0324a42ad699a4
|
refs/heads/master
| 2016-09-01T08:53:29.582184 | 2015-12-28T14:36:39 | 2015-12-28T14:36:39 | 48,699,781 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,867 |
sample
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on Aug 15, 2010
@author: Wang Yuanyi
'''
#please change follow 2 row by your family numbers google account
Admin = '@gmail.com'
Users = ['@gmail.com','@gmail.com']
TEST = False
from wiwikai.faccbk import TransPurposeCategory, TransAccount, Payee, \
trans_type_expense, trans_type_income, trans_account_type_credit_card, \
trans_account_type_debit_card
import os
server_software = os.environ['SERVER_SOFTWARE']
DEVELOPMENT = False
if server_software.startswith('Development'):
DEVELOPMENT = True
TEST = True
if DEVELOPMENT == True:
Admin = '[email protected]'
Users = ['[email protected]']
if TEST:
def insert_trans_purpose_category(ptitle, ptrans_type):
transTargetCtg = TransPurposeCategory(title = ptitle, trans_type = ptrans_type )
transTargetCtg.put()
def insert_trans_account(plastnumber, ptrans_account_type, pbank_name, pstatement_date, ppayment_due_date):
creditCard = TransAccount(last4number = plastnumber, type=ptrans_account_type, bank_name = pbank_name, statement_date = pstatement_date, payment_due_date =ppayment_due_date )
creditCard.put()
def insert_payee(payee_title):
payee = Payee(title = payee_title)
payee.put()
if TransPurposeCategory.all().count() == 0:
insert_trans_purpose_category(u"家庭食物支出", trans_type_expense)
insert_trans_purpose_category(u"工资收入", trans_type_income)
if TransAccount.all().count() == 0:
insert_trans_account('8888', trans_account_type_credit_card, 'ICBC', 20, 8)
insert_trans_account('7777', trans_account_type_debit_card, 'JBC', 25, 15)
if Payee.all().count() == 0:
insert_payee(u'孩子')
insert_payee(u'老婆')
insert_payee(u'自己')
|
[
"[email protected]"
] | |
a5557e1e7860c8f253f595c34e566932589397fe
|
9d5ae8cc5f53f5aee7247be69142d9118769d395
|
/105. Construct Binary Tree from Preorder and Inorder Traversal.py
|
07ece7f58fcc51506392b5c3a8ab71150a8ac29c
|
[] |
no_license
|
BITMystery/leetcode-journey
|
d4c93319bb555a7e47e62b8b974a2f77578bc760
|
616939d1599b5a135747b0c4dd1f989974835f40
|
refs/heads/master
| 2020-05-24T08:15:30.207996 | 2017-10-21T06:33:17 | 2017-10-21T06:33:17 | 84,839,304 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 862 |
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
if not preorder:
return None
if len(preorder) == 1:
return TreeNode(preorder[0])
root = TreeNode(preorder[0])
i = inorder.index(preorder[0])
left_inorder = inorder[:i]
right_inorder = inorder[i + 1:]
left_preorder = preorder[1: i + 1]
right_preorder = preorder[i + 1:]
root.left = self.buildTree(left_preorder, left_inorder)
root.right = self.buildTree(right_preorder, right_inorder)
return root
|
[
"[email protected]"
] | |
59b2cce8a4944dee1270584b23cafa10f4126df0
|
eacfc1c0b2acd991ec2cc7021664d8e79c9e58f6
|
/ccpnmr2.4/python/ccpnmr/analysis/popups/LinkPeakLists.py
|
070d735f76a146f4c371726cf9dff58a46e43c2b
|
[] |
no_license
|
edbrooksbank/ccpnmr2.4
|
cfecb0896dcf8978d796e6327f7e05a3f233a921
|
f279ca9bb2d972b1ce075dad5fcc16e6f4a9496c
|
refs/heads/master
| 2021-06-30T22:29:44.043951 | 2019-03-20T15:01:09 | 2019-03-20T15:01:09 | 176,757,815 | 0 | 1 | null | 2020-07-24T14:40:26 | 2019-03-20T14:59:23 |
HTML
|
UTF-8
|
Python
| false | false | 51,456 |
py
|
"""
======================COPYRIGHT/LICENSE START==========================
LinkPeakLists.py: Part of the CcpNmr Analysis program
Copyright (C) 2003-2010 Wayne Boucher and Tim Stevens (University of Cambridge)
=======================================================================
The CCPN license can be found in ../../../../license/CCPN.license.
======================COPYRIGHT/LICENSE END============================
for further information, please contact :
- CCPN website (http://www.ccpn.ac.uk/)
- email: [email protected]
- contact the authors: [email protected], [email protected]
=======================================================================
If you are using this software for academic purposes, we suggest
quoting the following references:
===========================REFERENCE START=============================
R. Fogh, J. Ionides, E. Ulrich, W. Boucher, W. Vranken, J.P. Linge, M.
Habeck, W. Rieping, T.N. Bhat, J. Westbrook, K. Henrick, G. Gilliland,
H. Berman, J. Thornton, M. Nilges, J. Markley and E. Laue (2002). The
CCPN project: An interim report on a data model for the NMR community
(Progress report). Nature Struct. Biol. 9, 416-418.
Wim F. Vranken, Wayne Boucher, Tim J. Stevens, Rasmus
H. Fogh, Anne Pajon, Miguel Llinas, Eldon L. Ulrich, John L. Markley, John
Ionides and Ernest D. Laue (2005). The CCPN Data Model for NMR Spectroscopy:
Development of a Software Pipeline. Proteins 59, 687 - 696.
===========================REFERENCE END===============================
"""
import cPickle
from ccpnmr.analysis.core.AssignmentAdvanced import pickAssignSpecFromRoot, assignSpecNonRootResonances
from ccpnmr.analysis.popups.BasePopup import BasePopup
from ccpnmr.analysis.core.ExperimentBasic import getOnebondExpDimRefs, getPrimaryDataDimRef, getSeqAssignRefExperiments
from ccpnmr.analysis.core.MarkBasic import createPeakMark
from ccpnmr.analysis.core.WindowBasic import getWindowPaneName, getSpectrumViews, toggleSpectrum
from ccpnmr.analysis.core.WindowBasic import findOrthogonalWindows, getPeakWindowPosition
from ccpnmr.analysis.core.WindowBasic import getPeakDimAxisMapping
from ccpnmr.analysis.core.Util import getAnalysisDataDim
from memops.gui.ButtonList import ButtonList, UtilityButtonList
from memops.gui.CheckButton import CheckButton
from memops.gui.IntEntry import IntEntry
from memops.gui.FloatEntry import FloatEntry
from memops.gui.Frame import Frame
from memops.gui.Label import Label
from memops.gui.LabelDivider import LabelDivider
from memops.gui.MessageReporter import showWarning, showOkCancel
from memops.gui.ProgressBar import ProgressBar
from memops.gui.PulldownList import PulldownList
from memops.gui.ScrolledMatrix import ScrolledMatrix
from memops.gui.TabbedFrame import TabbedFrame
# Shrink non-essential bits of window
# Equalise window sizes
# Equalise window ranges
# 3D roots
defaultTolerances = {'1H':0.05,'13C':0.20,'15N':0.3}
def testPopup(argServer):
popup = LinkPeakListsPopup(argServer.parent)
popup.open()
class LinkPeakListsPopup(BasePopup):
"""
**Pick and Assign Peaks Based on Locations in Root Spectra**
This popup window is designed to help in the early stages of peak picking and
resonance assignment. The main idea is that certain "root" spectra, typically
an HNCO or 15N HSQC, with initialised peak lists are used as a basis for
locating and assigning peaks in other spectra that overlap in the relevant
dimensions. For example you may use the amide peak positions in a 15N HSQC
spectrum to pick peaks in restricted regions of a 3D HNCACB spectrum and copy
amide resonance assignments to the amide dimensions of the 3D spectrum where
they overlap with the HSQC peaks.
Often the root spectrum, that is the source of peak position and assignment
information, will only be assigned in an anonymous way; the peaks will have
spin system and resonance numbers (e.g. "{7}[4][5]") and not link to any
particular residues or atoms. For this tool it doesn't matter whether the root
peaks carry full assignments or not, all that is important is that there are
some peaks to specify locations and some form of assignment to copy. The
easiest way to setup initial assignments on a root spectrum, after picking
some peaks, is to use the `Initialise Root Resonances`_ option.
In normal operation the user chooses a peak list to act as the the source of
the root assignments and positions, and maybe a spectrum window to view those
peaks in. Next the assignable spectra that are to be the target for peak
picking and/or assignment are chosen. Firstly, the user adds and removes
spectrum windows to the "Target Windows" list, the spectra to be operated on
are then selected from those that are visible in these windows. By selecting
different kinds of window different kinds of spectra may be operated on, so
for example the user could both work with HCN spectra and HHN spectra with the
same HSQC root. This tool is largely visual and it is important to be able to
see the locations that are bing considered (e.g. amide), hence the user is
only presented with assignable spectra that can potentially be seen. The
"Assignable Spectra" section is filled with the spectra for the target
windows that may be assigned, and the user double-clicks in the "Active?"
to set whether the individual spectra should be operated on or not (for
peak picking and assignment).
One windows and spectra are setup is is advisable to consider the "Tolerances"
tab that controls how wide a search region is used to pick and assign peaks
relative to the positions of the reference, root peaks. The user can also
setup exclusions to avoid picking peaks near the water signal (e.g. in 15N
HSQC-NOESY) or homonuclear diagonal.
The last "Link Peaks" tab is the one that remains active while the user is
actually running the the peak picking and assignment functions. Here, the main
table lists all of the peaks in the root peak list that are used for position
and assignment references. Clicking on a row in this table, assuming the
relevant navigations are checked above, will cause the location of any root
and target spectrum windows to move in order to show the root (e.g. amide)
location for the selected row. Even if peaks are not peaked or assigned this
tool may be used for efficient coordination in window. Typically clicking on
an HSQC peak will present an amide position in 3D target windows, locating the
X and Z axes, so that the user can see the column of peaks that may be
picked/assigned.
For a single selected root location, the user may transfer assignments or pick
peaks *and* transfer assignments, by clicking the appropriate buttons in the
"Pick & Assign Functions" section. This is one-by-one way of working is the
safest because the user is presented will the spectra for each location and
will be able to view the result of the peak picking and assignment. The
equivalent "Process-all Functions" will work though all root locations in the
table picking and assigning peaks en masse, according to the set tolerances,
in a quick but less controlled manner.
The "Assign Non-root Resonances" options are present so that you can give a
starting assignment to the assigned spectra in the dimension that does not
match the root location. For example, you could add 13C resonances to an
HNcoCA spectrum or 1H resonances to the indirect dimension 1H dimension of a
15N HSQC-TOCSY. These "non-root" resonance numbers will all be new and unique,
thus this operation should only be used for types of experiment where the is
one peak for each non-root resonance. For example HNcoCA has one peak per CA
resonance by HNCA usually has two, so the function is only recommended for the
former.
**Caveats & Tips**
This tool can be operated in two slightly different ways, according to the
preference of the user and the quality of the spectra. The "Process-all"
functions can be used to start with, given fairly strict tolerances, but the
user should then go through each root position checking for and tidying up
mistakes (picking noise & artefacts for example). Alternatively the root
locations could be picked and assigned one-by-one so the user can spot
problems as they occur.
Although the same tolerances, set via the "Tolerances" tab, are used for both
peak picking and for resonance assignment some peaks that can be picked may
not be assigned with the same settings. Whereas the peak picking is done
relative to the root location the assignment, in common with the rest if
Analysis, is relative to the current chemical shift average and thus may
differ from the root location. Widening the tolarances a little, or increasing
the chemical shift weighting of the root spectrum can allow assignments to be
made if they were previously out of bounds.
.. _`Initialise Root Resonances`: InitRootAssignmentsPopup.html
"""
def __init__(self, parent, *args, **kw):
self.waiting = False
self.windowPane = None
self.windowPanes = []
self.rootPeak = None
self.peakList = None
self.rootPane = None
self.guiParent = parent
self.targetPeakLists = []
self.dimMappings = {}
self.selDimMapping = {}
self.nonRootDict = {}
self.marks = []
self.project = parent.project
BasePopup.__init__(self, parent=parent, title='Assignment : Pick & Assign From Roots', **kw)
def open(self):
BasePopup.open(self)
self.updateRootPeakList()
def close(self):
self.setAppDataOptions()
BasePopup.close(self)
def body(self, guiFrame):
self.geometry('500x600')
guiFrame.expandGrid(0,0)
self.progressBar = ProgressBar(self,text = '', progress = 0, total = 100, title='Progress')
self.progressBar.close()
row = 0
tipTexts = ['Selection of source or "root" for assignments, the target spectra and which spectrum windows to navigate within',
'Settings that relate to assignment and peak picking tolerances when going from "root" positions to related spectra',
'The main assignment & peak picking functions; using a table of the "root" peak that are the source of peak (e.g. amide) positions and assignments.']
options = ['Windows & Spectra','Tolerances','Link Peaks']
tabbedFrame = TabbedFrame(guiFrame, options=options,
grid=(row,0), tipTexts=tipTexts)
frameA, frameB, frameC = tabbedFrame.frames
frameA.expandGrid(5,1)
frameB.expandGrid(3,0)
frameC.expandGrid(2,0)
# Windows & Spectra
frame = Frame(frameA, grid=(0,0), gridSpan=(1,2), sticky='ew')
frame.grid_columnconfigure(3, weight=1)
label = Label(frame, text='Root Peak List:', grid=(0,0))
tipText = 'Selects which peak list is considered as the "root"; the positions and assignments that will be used to pick/assign related spectra'
self.peakListPulldown = PulldownList(frame, callback=self.changeRootPeakList,
grid=(0,1), tipText=tipText)
label = Label(frame, text='Root Window:', grid=(0,2))
tipText = 'Selects which spectrum window is used to navigate to the location of root peak selected in the "Link Peaks" table'
self.rootPanePulldown = PulldownList(frame, callback=self.changeRootWindow,
grid=(0,3), tipText=tipText)
div = LabelDivider(frameA, text='Target Windows',
grid=(1,0), gridSpan=(1,2))
tipTexts = ['Remove the selected target window from consideration, so that it is no longer used for navigation and assignment',
'Add the window selected in the adjacent pulldown list as a source of assignable spectra and a target for navigation']
texts = ['Remove Target Window','Add Target Window:']
commands = [self.removeWindow,self.addWindow]
self.windowButtons = ButtonList(frameA, texts=texts, tipTexts=tipTexts,
commands=commands, grid=(2,0))
tipText = 'Selects a spectrum window, from those not already selected, that may be used for assignment/navigation'
self.windowPulldown = PulldownList(frameA, callback=None,
grid=(2,1), tipText=tipText)
tipTexts = ['The serial number of the spectrum window',
'The name of the spectrum window used for navigation and providing assignable spectra',
'When there are multiple options, states which axes of the window (X, Y, Z...) correspond to the directly bound "root" dimensions']
headingList = ['#','Name','Selected\nMapping']
self.mappingPulldown = PulldownList(self, callback=self.setMapping)
editWidgets = [None, None, self.mappingPulldown]
editGetCallbacks = [None, None, self.getMapping]
editSetCallbacks = [None, None, self.setMapping]
self.windowMatrix = ScrolledMatrix(frameA, headingList=headingList,
editSetCallbacks=editSetCallbacks,
editGetCallbacks=editGetCallbacks,
editWidgets=editWidgets,
callback=self.selectWindow,
grid=(3,0), gridSpan=(1,2),
tipTexts=tipTexts)
div = LabelDivider(frameA, text='Assignable Spectra', grid=(4,0), gridSpan=(1,2))
tipTexts = ['The "experiment:spectrum" name for the spectrum which may be used for peak picking & assignment; must be present in the above windows',
'Whether the spectrum is considered active for the processes of peak picking and/or assignment; inactive ones will not be affected',
'Whether spectrum dimensions that do not map to the root (typically non-amide) may be assigned; common for HNCO, HNH-TOCSY etc.',
'The full CCPN experiment type of the spectrum; should be something with an obvious relation to the root peak list',
'The kinds of isotope (or otherwise) present on the dimensions of the spectrum']
headingList = ['Spectrum','Active?','Assign\nNon-root dim?',
'Experiment\nType','Dimensions']
editWidgets = [None, None, None, None, None,]
editGetCallbacks = [None, self.togglePeakList, self.toggleNonRootAssign, None, None,]
editSetCallbacks = [None, None, None, None, None,]
self.spectrumMatrix = ScrolledMatrix(frameA, headingList=headingList,
editSetCallbacks=editSetCallbacks,
editGetCallbacks=editGetCallbacks,
editWidgets=editWidgets, callback=None,
grid=(5,0), gridSpan=(1,2),
tipTexts=tipTexts)
# Tolerances
#div = LabelDivider(frameB, text='Tolerances')
#div.grid(row=0, column=0, sticky='nsew')
frame = Frame(frameB, grid=(1,0), sticky='nsew')
frame.grid_columnconfigure(5, weight=1)
self.tolLabel1 = Label(frame, text='Root Dim 1:', grid=(0,0))
tipText = 'Sets the upper limit to the dimension 1 assignment tolerance, within which peaks may be picked and assignments made, relative to the root location'
self.tolEntry1 = FloatEntry(frame, returnCallback=self.setDataDimTolerances,
width=8, grid=(0,1), tipText=tipText)
self.tolEntry1.bind('<Leave>', self.setDataDimTolerances, '+')
self.tolLabel2 = Label(frame, text='Root Dim 2:', grid=(1,0))
tipText = 'Sets the upper limit to the dimension 2 assignment tolerance, within which peaks may be picked and assignments made, relative to the root location'
self.tolEntry2 = FloatEntry(frame, returnCallback=self.setDataDimTolerances,
width=8, grid=(1,1), tipText=tipText)
self.tolEntry2.bind('<Leave>', self.setDataDimTolerances, '+')
label = Label(frame, text='Min Water:', grid=(2,0))
tipText = 'Sets the lower bound to the 1H ppm exclusion zone, usually representing a water signal, within which no peaks will be picked or assigned'
self.minWaterEntry = FloatEntry(frame, width=8, text=4.95,
grid=(2,1), tipText=tipText)
label = Label(frame, text='Max Water:', grid=(3,0))
tipText = 'Sets the upper bound to the 1H ppm exclusion zone, usually representing a water signal, within which no peaks will be picked or assigned'
self.maxWaterEntry = FloatEntry(frame, width=8, text=4.95,
grid=(3,1), tipText=tipText)
label = Label(frame, text='Max Num Marks:', grid=(4,0))
tipText = 'When using multi-dimensional cross marks to indicate peak positions, sets how many marks persist from subsequent peak navigations'
self.marksEntry = IntEntry(frame, width=8, text=1,
grid=(4,1), tipText=tipText)
self.marksEntry.grid(row=4, column=1, sticky='w')
label = Label(frame, text='1H Diagonal:', grid=(5,0))
tipText = 'Sets the width of the exclusion zone around the 1H-1H homonuclear diagonal, within which no peaks will be picked or assigned '
self.diagEntry = FloatEntry(frame, width=8, text=0.25,
grid=(5,1), tipText=tipText)
# Peaks
div = LabelDivider(frameC, text='Root Peaks', grid=(0,0))
frame = Frame(frameC, grid=(1,0), sticky='ew')
frame.expandGrid(1,3)
#label = Label(frame, text='Find Spin System:')
#label.grid(row=1, column=0, sticky='nw')
label = Label(frame, text='Navigate to root', grid=(0,0))
tipText = 'Sets whether clicking in the root peak table will cause the selected root spectrum window to display the selected peak'
self.followRootSelect = CheckButton(frame, callback=None, tipText=tipText,
grid=(0,1), selected=True)
label = Label(frame, text='Navigate to targets', grid=(0,2))
tipText = 'Sets whether clicking in the root peak table will cause the selected navigation window views to move to the selected root position'
self.followTargetSelect = CheckButton(frame, callback=None, tipText=tipText,
grid=(0,3), selected=True)
label = Label(frame, text='Set Non-root Atom Types', grid=(0,4))
tipText = 'Sets whether for appropriate experiments like HNCO, HNCA, HNHA etc., whether non-root assignment set resonance atom type'
self.assignTypeSelect = CheckButton(frame, callback=None, tipText=tipText,
grid=(0,5), selected=False)
tipTexts = ['The serial number of the peak in the root peak list',
'The assignment of the root peak in the first root dimension, that maps to the (typically) higher dimensionality assignment spectra',
'The assignment of the root peak in the second root dimension, that maps to the (typically) higher dimensionality assignment spectra',
'The location of the root peak in the first root dimension; the basis for peak picking and assignment zones',
'The location of the root peak in the second root dimension; the basis for peak picking and assignment zones']
self.rootPeakTipTexts = tipTexts
headingList = ['#','Assign F1','Assign F2','Shift F1','Shift F2']
self.rootPeakMatrix = ScrolledMatrix(frameC, headingList=headingList,
callback=self.selectRootPeak,
grid=(2,0), tipTexts=tipTexts)
tipTexts = ['If a peak is selected, select the next peak in the table and navigate to that peaks root location',
'If a peak is selected, select the previous peak in the table and navigate to that peaks root location']
texts = ['Next Root','Previous Root']
commands = [self.nextTarget,self.prevTarget]
self.prevNextButtons = ButtonList(frameC, texts=texts, tipTexts=tipTexts,
commands=commands, grid=(3,0))
div = LabelDivider(frameC, text='Pick & Assign Functions', grid=(4,0))
tipTexts = ['Using the selected root peak as the source of assignment, spread assignments to the active spectra, within stated tolerances',
'Using the selected root peak as the source of assignment and centre of peak pick zone, pick and assign peaks in the active spectra',
'For the spectrum positions that match the selected root peak, add new (separate) non-root resonances to peaks (where missing)']
texts = ['Assign\nRoot Resonances',
'Pick & Assign\nRoot Resonances',
'Assign Non-root\nResonances']
commands = [self.assignTarget,
self.pickAssignTarget,
self.assignNonRootTarget]
self.rootPeakButtons = ButtonList(frameC, texts=texts, tipTexts=tipTexts,
commands=commands, grid=(5,0))
div = LabelDivider(frameC, text='Process-all Functions', grid=(6,0))
tipTexts = ['Using all peaks in the root peak list as an assignment source, spread assignments to the active spectra, within stated tolerances',
'Using all peaks in the root peak list as assignment sources and peak pick locations, pick and assign peaks in the active spectra',
'For the spectrum positions that match the all root peaks, add new (separate) non-root resonances to peaks (where missing)']
texts = ['Assign All\nRoot Resonances',
'Pick All & Assign\nRoot Resonances',
'Assign All Non-root\n Resonances']
commands = [self.assignAllTarget,
self.pickAssignAllTarget,
self.assignNonRootAllTarget]
self.bottomButtons = ButtonList(frameC, commands=commands, texts=texts,
grid=(7,0), sticky='ew', tipTexts=tipTexts)
buttons = UtilityButtonList(tabbedFrame.sideFrame, helpUrl=self.help_url,
grid=(0,0), sticky='e')
self.getAppDataOptions()
self.updateRootPeakList()
self.updateRootWindow()
self.updateWindowPulldown()
self.administerNotifiers(self.registerNotify)
def administerNotifiers(self, notifyFunc):
for func in ('__init__', 'delete','setName'):
for clazz in ('ccp.nmr.Nmr.DataSource', 'ccp.nmr.Nmr.Experiment'):
notifyFunc(self.updateSpectra, clazz, func)
notifyFunc(self.updateWindowPaneAfter, 'ccpnmr.Analysis.SpectrumWindowPane', func)
notifyFunc(self.updateWindowsAfter, 'ccpnmr.Analysis.SpectrumWindow', 'setName')
notifyFunc(self.updateSpectra, 'ccpnmr.Analysis.SpectrumWindowView', 'setIsPosVisible')
notifyFunc(self.updateSpectra, 'ccpnmr.Analysis.SpectrumWindowView', 'setIsNegVisible')
for func in ('__init__', 'delete',):
notifyFunc(self.updateAfter, 'ccp.nmr.Nmr.Peak', func)
notifyFunc(self.updateSpectra, 'ccp.nmr.Nmr.PeakList', func)
for func in ('setAnnotation','setPosition','setNumAliasing'):
notifyFunc(self.updateAfter, 'ccp.nmr.Nmr.PeakDim', func)
def toggleNonRootAssign(self, peakList):
boolean = self.nonRootDict.get(self.getPeakListId(peakList), False)
self.nonRootDict[self.getPeakListId(peakList)] = not boolean
self.updateSpectra()
def setMapping(self, null):
mapping = self.mappingPulldown.getObject()
if self.selDimMapping.get(self.windowPane) != mapping:
self.selDimMapping[self.windowPane] = mapping
self.updateWindows()
def getMapping(self, pane):
index = -1
mapping = self.selDimMapping.get(pane)
mappings = self.dimMappings.get(pane) or []
if mapping and (mapping in mappings):
index = mappings.index(mapping)
self.mappingPulldown.setup(mappings, mappings, index)
def updateButtons(self):
if self.windowPane:
self.windowButtons.buttons[0].enable()
else:
self.windowButtons.buttons[0].disable()
if self.peakList and self.peakList.peaks and self.targetPeakLists:
self.bottomButtons.buttons[0].enable()
self.bottomButtons.buttons[1].enable()
self.bottomButtons.buttons[2].enable()
else:
self.bottomButtons.buttons[0].disable()
self.bottomButtons.buttons[1].disable()
self.bottomButtons.buttons[2].disable()
if self.rootPeak:
self.prevNextButtons.buttons[0].enable()
self.prevNextButtons.buttons[1].enable()
else:
self.prevNextButtons.buttons[0].disable()
self.prevNextButtons.buttons[1].disable()
if self.rootPeak and self.targetPeakLists:
self.rootPeakButtons.buttons[0].enable()
self.rootPeakButtons.buttons[1].enable()
self.rootPeakButtons.buttons[2].enable()
else:
self.rootPeakButtons.buttons[0].disable()
self.rootPeakButtons.buttons[1].disable()
self.rootPeakButtons.buttons[2].disable()
def getRootPeakLists(self):
project = self.project
peakLists = []
for experiment in self.nmrProject.experiments:
isRoot = False
for expTransfer in experiment.expTransfers:
if expTransfer.transferType in ('onebond','CP'):
expDimRefs = expTransfer.sortedExpDimRefs()
if '1H' in expDimRefs[0].isotopeCodes:
if '1H' not in expDimRefs[1].isotopeCodes:
isRoot = True
break
else:
isRoot = True
break
if isRoot:
for spectrum in experiment.dataSources:
if len(spectrum.dataDims) != 2:
if experiment.refExperiment:
if experiment.refExperiment.name not in ('H[N[CO]]','H[N[co[CA]]]'):
continue
else:
continue
for peakList in spectrum.peakLists:
data = (experiment.name,spectrum.name,peakList.serial)
peakLists.append(['%s:%s:%d' % data, peakList])
peakLists.sort()
return peakLists
def getRootDataDims(self):
dims = []
if self.peakList:
spectrum = self.peakList.dataSource
experiment = spectrum.experiment
expDimRefPairs = []
for expDimRef0, expDimRef1 in getOnebondExpDimRefs(experiment):
if '1H' in expDimRef0.isotopeCodes:
expDimRefPairs.append( (expDimRef0, expDimRef1) )
elif '1H' in expDimRef1.isotopeCodes:
expDimRefPairs.append( (expDimRef0, expDimRef1) )
if not expDimRefPairs:
expDimRefPairs = getOnebondExpDimRefs(experiment)
for expDimRef0, expDimRef1 in expDimRefPairs:
dataDim0 = spectrum.findFirstDataDim(expDim=expDimRef0.expDim)
dataDim1 = spectrum.findFirstDataDim(expDim=expDimRef1.expDim)
if dataDim0 and dataDim1:
dims = [(dataDim0.dim, dataDim0), (dataDim1.dim, dataDim1)]
dims.sort()
dims = [x[1] for x in dims]
break # First onebond pair only
return dims
def getTargetPeakLists(self):
peakLists = set()
for windowPane in self.windowPanes:
for view in windowPane.spectrumWindowViews:
spectrum = view.analysisSpectrum.dataSource
freqDims = [dd for dd in spectrum.dataDims if dd.className == 'FreqDataDim']
if len(freqDims) >= 2:
peakList = spectrum.activePeakList or spectrum.findFirstPeakList()
if peakList:
peakLists.add(peakList)
return list(peakLists)
def getRootWindows(self):
panes = set()
if self.peakList:
views = getSpectrumViews(self.peakList.dataSource)
for view in views:
panes.add( view.spectrumWindowPane )
winData = [[getWindowPaneName(p), p] for p in panes]
winData.sort()
return winData
def getTargetWindows(self):
self.dimMappings = {}
if self.rootPane:
windowZplanes = findOrthogonalWindows(self.rootPane, [], minDims=2)
for (planeName0, pane, positions) in windowZplanes:
planeName = planeName0.split(' in ')[0]
if self.dimMappings.get(pane) is None:
self.dimMappings[pane] = []
if self.selDimMapping.get(pane) is None:
self.selDimMapping[pane] = planeName
self.dimMappings[pane].append(planeName)
for pane in self.selDimMapping:
avail = self.dimMappings.get(pane, [])
if self.selDimMapping[pane] not in avail:
if avail:
self.selDimMapping[pane] = avail[0]
else:
self.selDimMapping[pane] = None
winData = [[getWindowPaneName(p), p] for p in self.dimMappings.keys()]
winData.sort()
return winData
def changeRootPeakList(self, peakList):
if peakList is not self.peakList:
self.setAppDataOptions()
self.peakList = peakList
self.rootPeak = None
self.updateRootPeakList()
self.updateRootWindow()
self.updateAfter()
def changeRootWindow(self, rootPane):
if rootPane is not self.rootPane:
self.rootPane = rootPane
self.updateWindowPulldown()
def navigateToRoot(self):
if self.rootPeak:
maxMarks = self.marksEntry.get()
rootPane = self.rootPanePulldown.getObject()
if rootPane:
while len(self.marks) >= maxMarks:
if self.marks:
oldMark = self.marks.pop(0)
if not oldMark.isDeleted:
oldMark.delete()
else:
break
if maxMarks > 0:
mark = createPeakMark(self.rootPeak, lineWidth=2.0, remove=False)
self.marks.append(mark)
if self.followRootSelect.get():
windowFrame = rootPane.getWindowFrame()
windowFrame.gotoPeak(self.rootPeak)
def navigateToTarget(self):
if self.rootPeak and self.rootPane and self.followTargetSelect.get():
position = getPeakWindowPosition(self.rootPeak, self.rootPane,
useDefault=False)
# Blot out non-root positions from HNCO, HNcoCA
if len(self.rootPeak.peakDims) > 2:
dimMapping = getPeakDimAxisMapping(self.rootPeak, self.rootPane)
dataDims = self.getRootDataDims()
axisPanels = self.rootPane.sortedAxisPanels()
for i, axisPanel in enumerate(axisPanels):
peakDim = dimMapping.get(axisPanel.label)
if peakDim.dataDim not in dataDims:
position[i] = None
windowZplanes = findOrthogonalWindows(self.rootPane, [position,], minDims=2)
for (planeName0, windowPane, positions) in windowZplanes:
planeName = planeName0.split(' in ')[0]
if (windowPane in self.windowPanes) and \
(planeName == self.selDimMapping.get(windowPane)):
windowFrame = windowPane.getWindowFrame()
windowFrame.gotoPosition(position=positions[0])
def getWaterExclusionRegion(self):
waterMinPpm=self.minWaterEntry.get() or 4.95
waterMaxPpm=self.maxWaterEntry.get() or 4.95
waterExclusion = [waterMinPpm, waterMaxPpm]
waterExclusion.sort()
return waterExclusion
def pickAssignTarget(self):
if self.rootPeak and self.targetPeakLists:
tolerances = [self.tolEntry1.get() or 0.1, self.tolEntry2.get() or 0.1]
diagTolerance=self.diagEntry.get() or 0.0
waterExclusion = self.getWaterExclusionRegion()
for peakList in self.targetPeakLists:
pickAssignSpecFromRoot([self.rootPeak,], peakList,
tolerances=tolerances,
progressBar=None,
diagTolerance=diagTolerance,
waterExclusion=waterExclusion)
def pickAssignAllTarget(self):
if showOkCancel('Confirm','Pick and assign all targets?', parent=self):
if self.peakList and self.targetPeakLists:
tolerances = [self.tolEntry1.get() or 0.1, self.tolEntry2.get() or 0.1]
diagTolerance=self.diagEntry.get() or 0.0
waterExclusion = self.getWaterExclusionRegion()
for peakList in self.targetPeakLists:
spectrum = peakList.dataSource
experiment = spectrum.experiment
self.progressBar.setText('Working on %s:%s:%s' % (experiment.name,spectrum.name,peakList.serial))
pickAssignSpecFromRoot(self.peakList.peaks, peakList,
tolerances=tolerances,
progressBar=self.progressBar,
diagTolerance=diagTolerance,
waterExclusion=waterExclusion)
def assignTarget(self):
if self.rootPeak and self.targetPeakLists:
tolerances = [self.tolEntry1.get() or 0.1, self.tolEntry2.get() or 0.1]
diagTolerance=self.diagEntry.get() or 0.0
waterExclusion = self.getWaterExclusionRegion()
for peakList in self.targetPeakLists:
pickAssignSpecFromRoot([self.rootPeak,], peakList, pickNew=False,
tolerances=tolerances, progressBar=None,
diagTolerance=diagTolerance,
waterExclusion=waterExclusion)
def assignAllTarget(self):
if showOkCancel('Confirm','Assign root resonances to all targets?', parent=self):
if self.peakList and self.targetPeakLists:
tolerances = [self.tolEntry1.get() or 0.1, self.tolEntry2.get() or 0.1]
diagTolerance=self.diagEntry.get() or 0.0
waterExclusion = self.getWaterExclusionRegion()
for peakList in self.targetPeakLists:
spectrum = peakList.dataSource
experiment = spectrum.experiment
self.progressBar.setText('Working on %s:%s:%s' % (experiment.name,spectrum.name,peakList.serial))
pickAssignSpecFromRoot(self.peakList.peaks, peakList, pickNew=False,
tolerances=tolerances, progressBar=self.progressBar,
diagTolerance=diagTolerance,
waterExclusion=waterExclusion)
def nextTarget(self):
if self.rootPeak:
peaks = self.rootPeakMatrix.objectList
index = peaks.index(self.rootPeak)
peak = peaks[(index+1) % len(peaks)]
self.rootPeakMatrix.selectObject(peak)
def prevTarget(self):
if self.rootPeak:
peaks = self.rootPeakMatrix.objectList
index = peaks.index(self.rootPeak)
peak = peaks[(index-1) % len(peaks)]
self.rootPeakMatrix.selectObject(peak)
def assignNonRootTarget(self):
if self.rootPeak and self.targetPeakLists:
found = False
for peakList in self.targetPeakLists:
if self.nonRootDict.get(self.getPeakListId(peakList)):
found = True
break
if not found:
showWarning('Warning','No spectra are selected for non-root dim assignment.', parent=self)
return
refExps, refExpsCO = getSeqAssignRefExperiments(self.project)
assignType = self.assignTypeSelect.get()
tolerances = [self.tolEntry1.get() or 0.1, self.tolEntry2.get() or 0.1]
for peakList in self.targetPeakLists:
if self.nonRootDict.get(self.getPeakListId(peakList)):
assignSpecNonRootResonances([self.rootPeak,], peakList,
diagTolerance=self.diagEntry.get() or 0.3,
waterMinPpm=self.minWaterEntry.get() or 4.95,
waterMaxPpm=self.maxWaterEntry.get() or 4.95,
tolerances=tolerances, progressBar=None,
assignType=assignType, refExpsCO=refExpsCO)
def assignNonRootAllTarget(self):
if showOkCancel('Confirm','Assign non-root resonances to all targets?', parent=self):
if self.peakList and self.targetPeakLists:
found = False
for peakList in self.targetPeakLists:
if self.nonRootDict.get(self.getPeakListId(peakList)):
found = True
break
if not found:
showWarning('Warning','No spectra are selected for non-root dim assignment.', parent=self)
return
refExps, refExpsCO = getSeqAssignRefExperiments(self.project)
assignType = self.assignTypeSelect.get()
tolerances = [self.tolEntry1.get() or 0.1, self.tolEntry2.get() or 0.1]
for peakList in self.targetPeakLists:
if self.nonRootDict.get(self.getPeakListId(peakList)):
spectrum = peakList.dataSource
experiment = spectrum.experiment
self.progressBar.setText('Working on %s:%s:%s' % (experiment.name,spectrum.name,peakList.serial))
assignSpecNonRootResonances(self.peakList.peaks, peakList,
diagTolerance=self.diagEntry.get() or 0.3,
waterMinPpm=self.minWaterEntry.get() or 4.95,
waterMaxPpm=self.maxWaterEntry.get() or 4.95,
tolerances=tolerances, progressBar=self.progressBar,
assignType=assignType, refExpsCO=refExpsCO)
def removeWindow(self):
windowPane = self.windowPane
if windowPane in self.windowPanes:
self.windowPanes.remove(windowPane)
self.windowPane = None
self.updateWindows(windowPane)
def addWindow(self):
if self.peakList:
windowPane = self.windowPulldown.getObject()
if windowPane and (windowPane not in self.windowPanes):
self.windowPanes.append(windowPane)
self.updateWindows(windowPane)
def selectRootPeak(self, obj, row, col):
if obj:
self.rootPeak = obj
self.updateButtons()
self.navigateToRoot() # Always done for marker
if self.followTargetSelect.get():
self.navigateToTarget()
def selectWindow(self, obj, row, col):
self.windowPane = obj
self.updateButtons()
def updateWindowPulldown(self):
windowData = self.getTargetWindows()
index = 0
names = []
windowPanes = []
if windowData:
names = [x[0] for x in windowData]
windowPanes = [x[1] for x in windowData]
newWindows = []
for windowPane in self.windowPanes:
if windowPane in windowPanes:
newWindows.append(windowPane)
# Remove already present windows from 'to-add' list
ii = windowPanes.index(windowPane)
del names[ii]
del windowPanes[ii]
if newWindows != self.windowPanes:
self.windowPanes = newWindows
self.updateWindows()
self.windowPulldown.setup(names, windowPanes, index)
def updateRootWindow(self):
windowData = self.getRootWindows()
index = -1
names = []
rootPane = self.rootPane
if windowData:
names = [x[0] for x in windowData]
panes = [x[1] for x in windowData]
if rootPane not in panes:
rootPane = panes[0]
index = panes.index(rootPane)
else:
rootPane = None
panes = []
if rootPane is not self.rootPane:
self.rootPane = rootPane
self.updateWindowPulldown()
self.rootPanePulldown.setup(names, panes, index)
def getAppDataOptions(self):
project = self.project
analysisProject = self.analysisProject
app = project.application
data = analysisProject.linkPeakListsData
if data:
try:
options = cPickle.loads(data)
except Exception:
options = {}
else:
options = {}
if options.get('peakList'):
s1,s2,s3 = options['peakList']
experiment = self.nmrProject.findFirstExperiment(serial=s1)
if experiment:
spectrum = experiment.findFirstDataSource(serial=s2)
if spectrum:
self.peakList = spectrum.findFirstPeakList(serial=s3)
self.rootPeak = None
if options.get('rootWindow'):
serials = options['rootWindow']
self.rootPane = None
if type(serials) == type(1):
window = analysisProject.findFirstSpectrumWindow(serial=serials)
if window:
self.rootPane = window.findFirstSpectrumWindowPane()
else:
serial1, serial2 = serials
window = analysisProject.findFirstSpectrumWindow(serial=serial1)
if window:
self.rootPane = window.findFirstSpectrumWindowPane(serial=serial2)
if options.get('windowPanes'):
for serial1, serial2 in options['windowPanes']:
window = analysisProject.findFirstSpectrumWindow(serial=serial1)
if window:
windowPane = window.findFirstSpectrumWindowPane(serial=serial2)
if windowPane:
self.windowPanes.append(windowPane)
elif options.get('windows'):
self.windowPanes = []
for serial in options['windows']:
window = analysisProject.findFirstSpectrumWindow(serial=serial)
if window:
self.windowPanes.append(window.findFirstSpectrumWindowPane())
self.diagEntry.set(options.get('diagTol',0.3))
self.minWaterEntry.set(options.get('minWater',4.95))
self.maxWaterEntry.set(options.get('maxWater',4.95))
self.followRootSelect.set(options.get('followRoot',True))
self.followTargetSelect.set(options.get('followTarget',True))
self.assignTypeSelect.set(options.get('assignType',False))
mappings = options.get('selDimMapping', [])
self.selDimMapping = {}
if type(mappings) is type({}):
for pane in self.windowPanes:
mapping = mappings.get(pane.spectrumWindow.serial)
avail = self.dimMappings.get(pane, [])
if mapping not in avail:
if avail:
mapping = avail[0]
else:
mapping = None
self.selDimMapping[pane] = mapping
else:
for i, pane in enumerate(self.windowPanes):
self.selDimMapping[pane] = mappings[i]
self.nonRootDict = options.get('nonRootDict', {})
def setAppDataOptions(self):
project = self.project
options = {}
options['diagTol'] = self.diagEntry.get()
options['minWater'] = self.minWaterEntry.get()
options['maxWater'] = self.maxWaterEntry.get()
options['followRoot'] = self.followRootSelect.get()
options['followTarget'] = self.followTargetSelect.get()
options['assignType'] = self.assignTypeSelect.get()
options['windowPanes'] = [(p.spectrumWindow.serial, p.serial) for p in self.windowPanes]
options['selDimMapping']= [self.selDimMapping[w] for w in self.windowPanes]
options['nonRootDict'] = self.nonRootDict
if self.rootPane:
options['rootWindow'] = (self.rootPane.spectrumWindow.serial,
self.rootPane.serial)
if self.peakList:
options['peakList'] = self.getPeakListId(self.peakList)
try:
data = cPickle.dumps(options)
self.analysisProject.linkPeakListsData=data
except:
pass
def setDataDimTolerances(self, *opt):
dataDim1, dataDim2 = self.getRootDataDims()
analysisDataDim1 = getAnalysisDataDim(dataDim1)
analysisDataDim2 = getAnalysisDataDim(dataDim2)
tol1 = analysisDataDim1.assignTolerance
tol2 = analysisDataDim2.assignTolerance
analysisDataDim1.assignTolerance = self.tolEntry1.get() or tol1
analysisDataDim2.assignTolerance = self.tolEntry2.get() or tol2
def updateRootPeakList(self):
peakListData = self.getRootPeakLists()
index = -1
names = []
peakList = self.peakList
if peakListData:
names = [x[0] for x in peakListData]
peakLists = [x[1] for x in peakListData]
if peakList not in peakLists:
peakList = peakLists[0]
index = peakLists.index(peakList)
else:
peakList = None
peakLists = []
if peakList is not self.peakList:
self.peakList = peakList
self.rootPeak = None
self.updateRootWindow()
if self.peakList:
spectrum = self.peakList.dataSource
dataDim1, dataDim2 = self.getRootDataDims()
analysisDataDim1 = getAnalysisDataDim(dataDim1)
analysisDataDim2 = getAnalysisDataDim(dataDim2)
dataDims = self.peakList.dataSource.sortedDataDims()
isotopeStr1 = ','.join(getPrimaryDataDimRef(dataDim1).expDimRef.isotopeCodes)
isotopeStr2 = ','.join(getPrimaryDataDimRef(dataDim2).expDimRef.isotopeCodes)
self.tolEntry1.set( analysisDataDim1.assignTolerance )
self.tolLabel1.set('Root %s Dim %d' % (isotopeStr1,dataDim1.dim) )
self.tolEntry2.set( analysisDataDim2.assignTolerance )
self.tolLabel2.set('Root %s Dim %d' % (isotopeStr2,dataDim2.dim) )
self.updateAfter()
self.peakListPulldown.setup(names, peakLists, index)
def getPeakListId(self, peakList):
spectrum = peakList.dataSource
return (spectrum.experiment.serial,spectrum.serial,peakList.serial)
def getIsActive(self, peakList):
for windowPane in self.windowPanes:
for view in windowPane.spectrumWindowViews:
if view.analysisSpectrum.dataSource is peakList.dataSource:
if view.isPosVisible or view.isNegVisible:
return True
return False
def togglePeakList(self, peakList):
if peakList:
views = set([])
for windowPane in self.windowPanes:
for view in windowPane.spectrumWindowViews:
if view.analysisSpectrum.dataSource is peakList.dataSource:
views.add(view)
onViews = []
offViews = []
for view in views:
if view.isPosVisible or view.isNegVisible:
onViews.append(view)
else:
offViews.append(view)
if onViews:
for view in onViews:
toggleSpectrum(view.spectrumWindowPane.spectrumWindow,
spectrum=view.analysisSpectrum.dataSource)
elif offViews:
for view in offViews:
toggleSpectrum(view.spectrumWindowPane.spectrumWindow,
spectrum=view.analysisSpectrum.dataSource)
def updateSpectra(self, object=None):
if object:
self.updateRootPeakList()
self.targetPeakLists = []
peakLists = self.getTargetPeakLists()
activeColors = ['#B0FFB0','#B0FFB0',None,None,None]
inactiveColors = ['#FFB0B0','#FFB0B0',None,None,None]
textMatrix = []
objectList = []
colorMatrix = []
for peakList in peakLists:
spectrum = peakList.dataSource
experiment = spectrum.experiment
refExpName = '-'
if experiment.refExperiment:
refExpName = experiment.refExperiment.name
isotopes = []
for dataDim in spectrum.dataDims:
isotopeDict = {}
for expDimRef in dataDim.expDim.expDimRefs:
for isotope in expDimRef.isotopeCodes:
isotopeDict[isotope] = None
isotopes.append( ','.join( isotopeDict.keys() ) )
yesNo = 'No'
if self.nonRootDict.get(self.getPeakListId(peakList)):
yesNo = 'Yes'
if self.getIsActive(peakList):
isActive = 'Yes'
colorMatrix.append(activeColors)
self.targetPeakLists.append(peakList)
else:
isActive = 'No'
colorMatrix.append(inactiveColors)
datum = ['%s:%s' % (experiment.name,spectrum.name),
isActive,
yesNo,
refExpName,
' '.join(isotopes)]
textMatrix.append(datum)
objectList.append(peakList)
self.spectrumMatrix.update(textMatrix=textMatrix,
colorMatrix=colorMatrix,
objectList=objectList)
self.setAppDataOptions()
def updateWindowsAfter(self, window=None):
self.after_idle(self.updateWindows) # Axis panel notifiers need a bit of time
def updateWindowPaneAfter(self, windowPane=None):
if windowPane:
self.after_idle(lambda :self.updateWindows(windowPane))
def updateWindows(self, windowPane=None):
if windowPane:
self.updateRootWindow()
self.updateWindowPulldown()
if windowPane.isDeleted and (windowPane in self.windowPanes):
if self.windowPane is windowPane:
self.windowPane = None
self.windowPanes.remove(windowPane)
# '#','Name','Axes','Selected mapping'
textMatrix = []
objectList = []
for windowPane in self.windowPanes:
datum = []
datum.append(windowPane.spectrumWindow.serial)
datum.append(getWindowPaneName(windowPane))
#datum.append( ' '.join([axisPanel.axisType.name for axisPanel in windowPane.axisPanels]) )
mapping = self.selDimMapping.get(windowPane)
avail = self.dimMappings.get(windowPane)
if avail and (mapping not in avail):
self.selDimMapping[windowPane] = avail[0]
datum.append(self.selDimMapping[windowPane])
textMatrix.append(datum)
objectList.append(windowPane)
self.updateSpectra()
self.windowMatrix.update(textMatrix=textMatrix,
objectList=objectList)
def updateAfter(self, obj=None):
if obj:
if obj.className == 'Peak':
peak = obj
else: # must be peakDim
peak = obj.peak
if peak.peakList is not self.peakList:
return
if self.waiting:
return
else:
self.waiting = True
self.after_idle(self.update)
def update(self):
textMatrix = []
objectList = []
headingList = ['#','Assign F1','Assign F2','Shift F1','Shift F2']
if self.peakList:
dataDim1, dataDim2 = self.getRootDataDims()
i = 1
for dataDim in (dataDim1, dataDim2):
dim = dataDim.dim
headingList[i] = 'Assign F%d' % (dim)
headingList[i+2] = 'Shift F%d' % (dim)
i += 1
for peak in self.peakList.peaks:
peakDim0 = peak.findFirstPeakDim(dataDim=dataDim1)
peakDim1 = peak.findFirstPeakDim(dataDim=dataDim2)
datum = []
datum.append(peak.serial)
datum.append(peakDim0.annotation or '-')
datum.append(peakDim1.annotation or '-')
datum.append(peakDim0.value)
datum.append(peakDim1.value)
textMatrix.append(datum)
objectList.append(peak)
self.rootPeakMatrix.update(textMatrix=textMatrix,
objectList=objectList,
headingList=headingList,
tipTexts=self.rootPeakTipTexts)
self.updateButtons()
self.setAppDataOptions()
self.waiting = False
def destroy(self):
self.administerNotifiers(self.unregisterNotify)
self.setAppDataOptions()
BasePopup.destroy(self)
|
[
"[email protected]"
] | |
861203770fe40eabf96cb4818d736ba3918bcd4f
|
46f2834ae92da9e17463def0c635f75bf05886a1
|
/abc/abc122/A/main.py
|
73450f43c55c708928dfc5e028569620a5730e24
|
[] |
no_license
|
replu/atcoder
|
bf3da10c937c955ca1bc3fa33b8f24c74d2d6c50
|
a6183d03355058bccc2b89db5e07b7f72598fea3
|
refs/heads/master
| 2023-03-30T15:03:47.879783 | 2021-03-28T17:08:19 | 2021-03-28T17:08:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 486 |
py
|
#!/usr/bin/env python3
import sys
def solve(b: str):
return
# Generated by 1.1.7.1 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
b = next(tokens) # type: str
solve(b)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
566fef1988f54e052d1f14803548c6240db965c9
|
376b6933872b9110765154094d2c77713da2c853
|
/rnaseq/5clusterall/cap3Wrapper.py
|
b5254bbb0bf980bfe85607197f3869afde7b2059
|
[] |
no_license
|
markphuong/geographus-genome
|
46b037e7789641895f1a99b8bf6dee3418887600
|
a0ff439fbc0c350279359a51321e40e7778f5170
|
refs/heads/master
| 2020-03-19T07:21:29.297458 | 2018-06-05T04:15:18 | 2018-06-05T04:15:18 | 136,107,214 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,684 |
py
|
#!/usr/bin/env python
#this concatenates all read files into R1 and R2 files [if you get multiple read files per index from illumina]
import os
import sys
import argparse
import multiprocessing
def get_args(): #arguments needed to give to this script
parser = argparse.ArgumentParser(description="concatenate reads")
#forces required argument to let it run
required = parser.add_argument_group("required arguments")
# required.add_argument("--map", help="textfile with ID that relate to read files you want to concatenate. for ex., index1, index2, index3 (with new lines after each thing)", required=True) #A map file with the sample ID and the fasta file it goes to
return parser.parse_args()
def concat(element):
variables = dict(
index = element)
commands = """
/home/phuong/CAP3/cap3 geographus.transcripts.fa > all.cap3.out
cat geographus.transcripts.fa.cap.contigs geographus.transcripts.fa.cap.singlets > geographus.transcripts.cap3.fasta
cd-hit-est -i geographus.transcripts.cap3.fasta -o geographus.transcripts.clustered.fasta -c 0.99 > geographus.cd-hit-est
cp -p * /pylon5/bi4s86p/phuong/geographus.genome/rnaseq/5clusterall/RESULTS
""".format(**variables)
cmd_list = commands.split("\n")
for cmd in cmd_list:
os.system(cmd)
concat('tes')
#def main():
# args = get_args()
#Make a list with the indexes you want to process
# mylist = []
# with open(args.map) as rfile:
# for line in rfile:
# line = line.strip()
# mylist.append(line)
#start the multiprocessing
# pool = multiprocessing.Pool(10)
# pool.map(concat, mylist)#run the function with the arguments
#if __name__ == "__main__": #run main over multiple processors
# main()
|
[
"[email protected]"
] | |
9402a70a7e7b3c955d989eb346cb982d1328408e
|
d51c0aeddb864973ec1171e99e8f174ad622d965
|
/baselines/baselines/deepq/experiments/train_cartpole.py
|
a767a8d208ad52f4e5f63f24ce59ea74005c806b
|
[
"MIT"
] |
permissive
|
Kiwoo/HVHRL
|
c42af09faec716e727a6fb4a82171412e66abad9
|
b883c6a36655e2d348114e320b953f12dc799fd4
|
refs/heads/master
| 2021-01-25T14:04:11.848972 | 2018-04-26T08:20:41 | 2018-04-26T08:20:41 | 123,648,363 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 683 |
py
|
import gym
from baselines import deepq
def callback(lcl, _glb):
# stop training if reward exceeds 199
is_solved = lcl['t'] > 100 and sum(lcl['episode_rewards'][-101:-1]) / 100 >= 199
return is_solved
def main():
env = gym.make("Pendulum-v0")
model = deepq.models.mlp([256,256])
act = deepq.learn(
env,
q_func=model,
lr=1e-4,
max_timesteps=400000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10,
callback=callback
)
print("Saving model to cartpole_model.pkl")
act.save("cartpole_model.pkl")
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
07fcc6f0dc5bb166815d7aeb242fa74b24447294
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_14133.py
|
3143a7ae2947795b0e135c4a710171911771d8dc
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 100 |
py
|
# How to get a single value from a pymongo query of a mongodb in python?
myValues[i] = bla['value']
|
[
"[email protected]"
] | |
1c8cad55af6edfc57241f881d4848b14031864af
|
34de2b3ef4a2478fc6a03ea3b5990dd267d20d2d
|
/Python/plotting/plotting1/panda_module/remove_whitespace/a.py
|
4a80e59c6a1bd9d3df8994693660f2b22cde87a1
|
[
"MIT"
] |
permissive
|
bhishanpdl/Programming
|
d4310f86e1d9ac35483191526710caa25b5f138e
|
9654c253c598405a22cc96dfa1497406c0bd0990
|
refs/heads/master
| 2020-03-26T06:19:01.588451 | 2019-08-21T18:09:59 | 2019-08-21T18:09:59 | 69,140,073 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,682 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : Bhishan Poudel
# Date : Apr 04, 2016
# Ref : http://www.randalolson.com/2014/06/28/how-to-make-beautiful-data-visualizations-in-python-with-matplotlib/
# Imports
import matplotlib.pyplot as plt
import pandas as pd
# Read the data into a pandas DataFrame.
gender_degree_data = pd.read_csv("http://www.randalolson.com/wp-content/uploads/percent-bachelors-degrees-women-usa.csv")
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
# You typically want your plot to be ~1.33x wider than tall. This plot is a rare
# exception because of the number of lines being plotted on it.
# Common sizes: (10, 7.5) and (12, 9)
plt.figure(figsize=(12, 14))
# Remove the plot frame lines. They are unnecessary chartjunk.
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
# Ensure that the axis ticks only show up on the bottom and left of the plot.
# Ticks on the right and top of the plot are generally unnecessary chartjunk.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Limit the range of the plot to only where the data is.
# Avoid unnecessary whitespace.
plt.ylim(0, 90)
plt.xlim(1968, 2014)
# Make sure your axis ticks are large enough to be easily read.
# You don't want your viewers squinting to read your plot.
plt.yticks(range(0, 91, 10), [str(x) + "%" for x in range(0, 91, 10)], fontsize=14)
plt.xticks(fontsize=14)
# Provide tick lines across the plot to help your viewers trace along
# the axis ticks. Make sure that the lines are light and small so they
# don't obscure the primary data lines.
for y in range(10, 91, 10):
plt.plot(range(1968, 2012), [y] * len(range(1968, 2012)), "--", lw=0.5, color="black", alpha=0.3)
# Remove the tick marks; they are unnecessary with the tick lines we just plotted.
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# Now that the plot is prepared, it's time to actually plot the data!
# Note that I plotted the majors in order of the highest % in the final year.
majors = ['Health Professions', 'Public Administration', 'Education', 'Psychology',
'Foreign Languages', 'English', 'Communications\nand Journalism',
'Art and Performance', 'Biology', 'Agriculture',
'Social Sciences and History', 'Business', 'Math and Statistics',
'Architecture', 'Physical Sciences', 'Computer Science',
'Engineering']
for rank, column in enumerate(majors):
# Plot each line separately with its own color, using the Tableau 20
# color set in order.
plt.plot(gender_degree_data.Year.values,
gender_degree_data[column.replace("\n", " ")].values,
lw=2.5, color=tableau20[rank])
# Add a text label to the right end of every line. Most of the code below
# is adding specific offsets y position because some labels overlapped.
y_pos = gender_degree_data[column.replace("\n", " ")].values[-1] - 0.5
if column == "Foreign Languages":
y_pos += 0.5
elif column == "English":
y_pos -= 0.5
elif column == "Communications\nand Journalism":
y_pos += 0.75
elif column == "Art and Performance":
y_pos -= 0.25
elif column == "Agriculture":
y_pos += 1.25
elif column == "Social Sciences and History":
y_pos += 0.25
elif column == "Business":
y_pos -= 0.75
elif column == "Math and Statistics":
y_pos += 0.75
elif column == "Architecture":
y_pos -= 0.75
elif column == "Computer Science":
y_pos += 0.75
elif column == "Engineering":
y_pos -= 0.25
# Again, make sure that all labels are large enough to be easily read
# by the viewer.
plt.text(2011.5, y_pos, column, fontsize=14, color=tableau20[rank])
# matplotlib's title() call centers the title on the plot, but not the graph,
# so I used the text() call to customize where the title goes.
# Make the title big enough so it spans the entire plot, but don't make it
# so big that it requires two lines to show.
# Note that if the title is descriptive enough, it is unnecessary to include
# axis labels; they are self-evident, in this plot's case.
plt.text(1995, 93, "Percentage of Bachelor's degrees conferred to women in the U.S.A."
", by major (1970-2012)", fontsize=17, ha="center")
# Always include your data source(s) and copyright notice! And for your
# data sources, tell your viewers exactly where the data came from,
# preferably with a direct link to the data. Just telling your viewers
# that you used data from the "U.S. Census Bureau" is completely useless:
# the U.S. Census Bureau provides all kinds of data, so how are your
# viewers supposed to know which data set you used?
plt.text(1966, -8, "Data source: nces.ed.gov/programs/digest/2013menu_tables.asp"
"\nAuthor: Randy Olson (randalolson.com / @randal_olson)"
"\nNote: Some majors are missing because the historical data "
"is not available for them", fontsize=10)
# Finally, save the figure as a PNG.
# You can also save it as a PDF, JPEG, etc.
# Just change the file extension in this call.
# bbox_inches="tight" removes all the extra whitespace on the edges of your plot.
plt.savefig("percent-bachelors-degrees-women-usa.png", bbox_inches="tight")
|
[
"[email protected]"
] | |
23399b0e88453cfbeee0e62dc3945c91eccbc848
|
a5103b7d5066138ac1a9aabc273361491a5031cd
|
/daily/8/studyOfFace/MTCNN/prepare/rnet_data.py
|
4422d2f2f0d46f62676e9f48737a81c052c43e2e
|
[] |
no_license
|
mckjzhangxk/deepAI
|
0fa2f261c7899b850a4ec432b5a387e8c5f13e83
|
24e60f24b6e442db22507adddd6bf3e2c343c013
|
refs/heads/master
| 2022-12-13T18:00:12.839041 | 2021-06-18T03:01:10 | 2021-06-18T03:01:10 | 144,862,423 | 1 | 1 | null | 2022-12-07T23:31:01 | 2018-08-15T14:19:10 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 105 |
py
|
from prepare.hard_example import gen_hard_example
if __name__ == '__main__':
gen_hard_example('RNet')
|
[
"[email protected]"
] | |
a60f0f53fe90dec0b1d503bc141dbfb3c2c22314
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p00000/s458844379.py
|
6d83338830ad39cbf525c602db21b70c0c6451d0
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 90 |
py
|
for i in range(9):
for j in range(9):
print "%dx%d=%d" %(i+1,j+1, (i+1)*(j+1))
|
[
"[email protected]"
] | |
529e0aa15f413725f9b4e3a6945022c0fd4e7083
|
c91d029b59f4e6090a523bf571b3094e09852258
|
/src/logistica/migrations/0004_auto_20170711_1738.py
|
57bb29d599c643d69a5cac048dfd62e473e192cd
|
[
"MIT"
] |
permissive
|
anselmobd/fo2
|
d51b63ebae2541b00af79448ede76b02638c41f0
|
8e7f8f3d9a296c7da39d0faf38a266e9c6c162ab
|
refs/heads/master
| 2023-08-31T19:59:33.964813 | 2023-08-31T19:50:53 | 2023-08-31T19:50:53 | 92,856,677 | 1 | 0 |
MIT
| 2023-04-21T21:50:46 | 2017-05-30T17:04:27 |
Python
|
UTF-8
|
Python
| false | false | 898 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-11 20:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('logistica', '0003_auto_20170710_1212'),
]
operations = [
migrations.AddField(
model_name='notafiscal',
name='dest_cnpj',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='CNPJ'),
),
migrations.AddField(
model_name='notafiscal',
name='dest_nome',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Destinatário'),
),
migrations.AddField(
model_name='notafiscal',
name='natu_venda',
field=models.BooleanField(default=False, verbose_name='venda'),
),
]
|
[
"[email protected]"
] | |
4a6518abbc5f98a6b738dbfb78048ecc912f48c8
|
f336bcdc1eeab553e0d3d1de2ca6da64cd7f27bc
|
/kline/mail.py
|
d612df174c3dcde334dfc541671e8a871f20655b
|
[] |
no_license
|
tonylibing/stockpractice
|
04568c017a96815e3796c895e74f11fa128d3ffe
|
039e144b3a4cc00e400338174b31fa277df55517
|
refs/heads/main
| 2023-09-05T03:53:02.565539 | 2021-10-30T22:08:16 | 2021-10-30T22:08:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,716 |
py
|
# coding:utf-8
# 1000元实盘练习程序
# 发送邮件模块
# 参考https://zhuanlan.zhihu.com/p/24180606
# 防止硬编码泄露用户名密码,参考:https://blog.csdn.net/lantian_123/article/details/101518724
# 需在源码目录下自行编辑.env文件,定义USERNAME和PASSWORD的值
import smtplib
from email.mime.text import MIMEText
from dotenv import load_dotenv
import os
# 发送邮件
def sentMail(title, content):
# 加载用户名和密码
load_dotenv()
username = os.getenv("USERNAME")
password = os.getenv("PASSWORD")
senderAddress = username+"@163.com"
# 设置服务器所需信息
mail_host = "smtp.163.com"
# 用户名
mail_user = username
# 密码
mail_pass = password
# 邮件发送方地址
sender = senderAddress
# 接收方地址
receivers = [senderAddress]
# 设置邮件信息
# 邮件内容
message = MIMEText(content, 'plain', 'utf-8')
# 邮件主题
message['Subject'] = title
# 发送方信息
message['From'] = sender
# 接受方信息
message['To'] = receivers[0]
# 登陆并发送邮件
try:
smtpObj = smtplib.SMTP_SSL(mail_host, 465)
# 连接到服务器
# smtpObj.connect(mail_host, 465)
# 登录到服务器
smtpObj.login(mail_user,mail_pass)
# 发送
smtpObj.sendmail(
sender,receivers,message.as_string())
# 退出
smtpObj.quit()
print('发送成功')
except smtplib.SMTPException as e:
print('发送错误', e) #打印错误
if __name__ == "__main__":
sentMail("测试", "这又是一封通过python发送的测试邮件。")
|
[
"[email protected]"
] | |
689c9e0171cd5a49c7db0431676dafac55e3f56c
|
99c4d4a6592fded0e8e59652484ab226ac0bd38c
|
/code/batch-2/dn4 - krajevne funkcije/M-17237-2540.py
|
6f6a7e522553764587c45140bc09e8f3a722ee3e
|
[] |
no_license
|
benquick123/code-profiling
|
23e9aa5aecb91753e2f1fecdc3f6d62049a990d5
|
0d496d649247776d121683d10019ec2a7cba574c
|
refs/heads/master
| 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,487 |
py
|
def koordinate(ime, kraji):
for name,x,y in kraji:
if name == ime:
return(x,y)
elif name == None:
return None
def razdalja_koordinat(x1, y1, x2, y2):
import math
return math.sqrt(math.pow(x2-x1, 2)+ math.pow(y2-y1,2))
def razdalja(ime1, ime2, kraji):
x_1, y_1 = koordinate(ime1, kraji)
x_2, y_2 =koordinate(ime2, kraji)
return razdalja_koordinat(x_1, y_1, x_2, y_2)
def v_dometu(ime, domet, kraji):
x_1, y_1 = koordinate(ime, kraji)
mesta = []
for name, x_2, y_2 in kraji:
razdalja = razdalja_koordinat(x_1, y_1, x_2, y_2)
if razdalja <= domet and name != ime:
mesta.append(name)
return mesta
def najbolj_oddaljeni(ime, imena, kraji):
x_1, y_1 = koordinate(ime, kraji)
naj_oddaljeni = 0
for name in imena:
x_2, y_2 = koordinate(name, kraji)
razdalja = razdalja_koordinat(x_1, y_2, x_2, y_2)
if naj_oddaljeni<razdalja:
naj_oddaljeni=razdalja
naj_oddaljeni_ime = name
return naj_oddaljeni_ime
def zalijemo(ime, domet, kraji):
naj_razdalja = 0
x_1, y_1 = koordinate(ime, kraji)
for name, x, y in kraji:
x_2 = x
y_2 = y
razdalja = razdalja_koordinat(x_1, y_1, x_2, y_2)
if razdalja < domet :
if naj_razdalja < razdalja:
naj_razdalja = razdalja
naj_ime = name
return naj_ime
|
[
"[email protected]"
] | |
204c66a406901687afa7adb378d50f6169acde6a
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2429/60776/289149.py
|
8b653e07054c352ba83431c6436e14577e5b51d9
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 275 |
py
|
a=int(input())
for k in range(0,a):
a=input()
b = input().split(' ')
result=0
for i in range(0, len(b)):
b[i] = int(b[i])
for i in range(0,len(b)):
if result<max(b[i:len(b)])-b[i]:
result=max(b[i:len(b)])-b[i]
print(result)
|
[
"[email protected]"
] | |
8a7e16854616347b5368b07913fd92aefa97e9d2
|
d10c5d3603e027a8fd37115be05e62634ec0f0a5
|
/08_Statistical-Thinking-in-Python-1/08_ex_3-10.py
|
06c4213345f1eafa6d736dac7e4763b791cd08d6
|
[] |
no_license
|
stacygo/2021-01_UCD-SCinDAE-EXS
|
820049125b18b38ada49ffc2036eab33431d5740
|
027dc2d2878314fc8c9b2796f0c2e4c781c6668d
|
refs/heads/master
| 2023-04-29T01:44:36.942448 | 2021-05-23T15:29:28 | 2021-05-23T15:29:28 | 335,356,448 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 483 |
py
|
# Exercise 3-10: Sampling out of the Binomial distribution
import numpy as np
import matplotlib.pyplot as plt
from functions import ecdf
np.random.seed(42)
# Take 10,000 samples out of the binomial distribution: n_defaults
n_defaults = np.random.binomial(100, 0.05, size=10000)
# Compute CDF: x, y
x, y = ecdf(n_defaults)
# Plot the CDF with axis labels
plt.plot(x, y, marker='.', linestyle='none')
plt.xlabel('number of defaults')
plt.ylabel('CDF')
# Show the plot
plt.show()
|
[
"[email protected]"
] | |
8a37254576bc719509e3ff8e4e2ec21a1bace1e1
|
0fc6ff5eb90ced71a3927b0e326481d40b020e66
|
/validate/__init__.py
|
78fd72485db94da66de27612181fbd5d36b3e316
|
[] |
no_license
|
chairco/lazy_email_validate
|
155c8f8020a13a7c527c7d56f42014f5d75e9fdb
|
169ed825894c21a0a04841d1e82b3a2b3a7df802
|
refs/heads/master
| 2021-01-21T14:19:40.946601 | 2017-06-26T07:38:53 | 2017-06-26T07:38:53 | 95,269,879 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 177 |
py
|
# -*- coding: utf-8 -*-
__version__ = '0.0.1'
__author__ = 'chairco'
__email__ = '[email protected]'
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2017, chairco.'
|
[
"[email protected]"
] | |
6f78e598047b741dda5d12407c289837488f0a87
|
51496986e7fd12ba8c96b08d6394591f24472d59
|
/code/toscaparser/nodetemplate.py
|
c05ac0c60779d78167115cdb5cb3d39d641a2d58
|
[
"Apache-2.0"
] |
permissive
|
superfluidity/RDCL3D
|
3ba33051e19b362f9084d35037f881c332576dae
|
3c5717941bd4046aa1be178e9004db1dc1c469a0
|
refs/heads/master
| 2020-04-06T04:13:50.615021 | 2019-01-06T14:56:51 | 2019-01-06T14:56:51 | 83,023,039 | 8 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,090 |
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from toscaparser.common.exception import ExceptionCollector
from toscaparser.common.exception import InvalidPropertyValueError
from toscaparser.common.exception import MissingRequiredFieldError
from toscaparser.common.exception import TypeMismatchError
from toscaparser.common.exception import UnknownFieldError
from toscaparser.common.exception import ValidationError
from toscaparser.dataentity import DataEntity
from toscaparser.elements.interfaces import CONFIGURE
from toscaparser.elements.interfaces import CONFIGURE_SHORTNAME
from toscaparser.elements.interfaces import INTERFACE_DEF_RESERVED_WORDS
from toscaparser.elements.interfaces import InterfacesDef
from toscaparser.elements.interfaces import LIFECYCLE
from toscaparser.elements.interfaces import LIFECYCLE_SHORTNAME
from toscaparser.elements.relationshiptype import RelationshipType
from toscaparser.entity_template import EntityTemplate
from toscaparser.relationship_template import RelationshipTemplate
from toscaparser.utils.gettextutils import _
log = logging.getLogger('tosca')
class NodeTemplate(EntityTemplate):
'''Node template from a Tosca profile.'''
def __init__(self, name, node_templates, custom_def=None,
available_rel_tpls=None, available_rel_types=None):
super(NodeTemplate, self).__init__(name, node_templates[name],
'node_type',
custom_def)
self.templates = node_templates
self._validate_fields(node_templates[name])
self.custom_def = custom_def
self.related = {}
self.relationship_tpl = []
self.available_rel_tpls = available_rel_tpls
self.available_rel_types = available_rel_types
self._relationships = {}
self.sub_mapping_tosca_template = None
@property
def relationships(self):
if not self._relationships:
requires = self.requirements
if requires and isinstance(requires, list):
for r in requires:
for r1, value in r.items():
explicit = self._get_explicit_relationship(r, value)
if explicit:
for key, value in explicit.items():
self._relationships[key] = value
return self._relationships
def _get_explicit_relationship(self, req, value):
"""Handle explicit relationship
For example,
- req:
node: DBMS
relationship: tosca.relationships.HostedOn
"""
explicit_relation = {}
node = value.get('node') if isinstance(value, dict) else value
if node:
# TODO(spzala) implement look up once Glance meta data is available
# to find a matching TOSCA node using the TOSCA types
msg = _('Lookup by TOSCA types is not supported. '
'Requirement for "%s" can not be full-filled.') % self.name
if (node in list(self.type_definition.TOSCA_DEF.keys())
or node in self.custom_def):
ExceptionCollector.appendException(NotImplementedError(msg))
return
if node not in self.templates:
ExceptionCollector.appendException(
KeyError(_('Node template "%s" was not found.') % node))
return
related_tpl = NodeTemplate(node, self.templates, self.custom_def)
relationship = value.get('relationship') \
if isinstance(value, dict) else None
# check if it's type has relationship defined
if not relationship:
parent_reqs = self.type_definition.get_all_requirements()
if parent_reqs is None:
ExceptionCollector.appendException(
ValidationError(message='parent_req is ' +
str(parent_reqs)))
else:
for key in req.keys():
for req_dict in parent_reqs:
if key in req_dict.keys():
relationship = (req_dict.get(key).
get('relationship'))
break
if relationship:
found_relationship_tpl = False
# apply available relationship templates if found
if self.available_rel_tpls:
for tpl in self.available_rel_tpls:
if tpl.name == relationship:
rtype = RelationshipType(tpl.type, None,
self.custom_def)
explicit_relation[rtype] = related_tpl
tpl.target = related_tpl
tpl.source = self
self.relationship_tpl.append(tpl)
found_relationship_tpl = True
# create relationship template object.
rel_prfx = self.type_definition.RELATIONSHIP_PREFIX
if not found_relationship_tpl:
if isinstance(relationship, dict):
relationship = relationship.get('type')
if relationship:
if self.available_rel_types and \
relationship in self.available_rel_types.keys():
pass
elif not relationship.startswith(rel_prfx):
relationship = rel_prfx + relationship
else:
ExceptionCollector.appendException(
MissingRequiredFieldError(
what=_('"relationship" used in template '
'"%s"') % related_tpl.name,
required=self.TYPE))
for rtype in self.type_definition.relationship.keys():
if rtype.type == relationship:
explicit_relation[rtype] = related_tpl
related_tpl._add_relationship_template(req,
rtype.type,
self)
elif self.available_rel_types:
if relationship in self.available_rel_types.keys():
rel_type_def = self.available_rel_types.\
get(relationship)
if 'derived_from' in rel_type_def:
super_type = \
rel_type_def.get('derived_from')
if not super_type.startswith(rel_prfx):
super_type = rel_prfx + super_type
if rtype.type == super_type:
explicit_relation[rtype] = related_tpl
related_tpl.\
_add_relationship_template(
req, rtype.type, self)
return explicit_relation
def _add_relationship_template(self, requirement, rtype, source):
req = requirement.copy()
req['type'] = rtype
tpl = RelationshipTemplate(req, rtype, self.custom_def, self, source)
self.relationship_tpl.append(tpl)
def get_relationship_template(self):
return self.relationship_tpl
def _add_next(self, nodetpl, relationship):
self.related[nodetpl] = relationship
@property
def related_nodes(self):
if not self.related:
for relation, node in self.type_definition.relationship.items():
for tpl in self.templates:
if tpl == node.type:
self.related[NodeTemplate(tpl)] = relation
return self.related.keys()
def validate(self, tosca_tpl=None):
self._validate_capabilities()
self._validate_requirements()
self._validate_properties(self.entity_tpl, self.type_definition)
self._validate_interfaces()
for prop in self.get_properties_objects():
prop.validate()
def _validate_requirements(self):
type_requires = self.type_definition.get_all_requirements()
allowed_reqs = ["template"]
if type_requires:
for treq in type_requires:
for key, value in treq.items():
allowed_reqs.append(key)
if isinstance(value, dict):
for key in value:
allowed_reqs.append(key)
requires = self.type_definition.get_value(self.REQUIREMENTS,
self.entity_tpl)
if requires:
if not isinstance(requires, list):
ExceptionCollector.appendException(
TypeMismatchError(
what='"requirements" of template "%s"' % self.name,
type='list'))
else:
for req in requires:
for r1, value in req.items():
if isinstance(value, dict):
self._validate_requirements_keys(value)
self._validate_requirements_properties(value)
allowed_reqs.append(r1)
self._common_validate_field(req, allowed_reqs,
'requirements')
def _validate_requirements_properties(self, requirements):
# TODO(anyone): Only occurrences property of the requirements is
# validated here. Validation of other requirement properties are being
# validated in different files. Better to keep all the requirements
# properties validation here.
for key, value in requirements.items():
if key == 'occurrences':
self._validate_occurrences(value)
break
def _validate_occurrences(self, occurrences):
DataEntity.validate_datatype('list', occurrences)
for value in occurrences:
DataEntity.validate_datatype('integer', value)
if len(occurrences) != 2 or not (0 <= occurrences[0] <= occurrences[1]) \
or occurrences[1] == 0:
ExceptionCollector.appendException(
InvalidPropertyValueError(what=(occurrences)))
def _validate_requirements_keys(self, requirement):
for key in requirement.keys():
if key not in self.REQUIREMENTS_SECTION:
ExceptionCollector.appendException(
UnknownFieldError(
what='"requirements" of template "%s"' % self.name,
field=key))
def _validate_interfaces(self):
ifaces = self.type_definition.get_value(self.INTERFACES,
self.entity_tpl)
if ifaces:
for name, value in ifaces.items():
if name in (LIFECYCLE, LIFECYCLE_SHORTNAME):
self._common_validate_field(
value, InterfacesDef.
interfaces_node_lifecycle_operations,
'interfaces')
elif name in (CONFIGURE, CONFIGURE_SHORTNAME):
self._common_validate_field(
value, InterfacesDef.
interfaces_relationship_configure_operations,
'interfaces')
elif name in self.type_definition.interfaces.keys():
self._common_validate_field(
value,
self._collect_custom_iface_operations(name),
'interfaces')
else:
ExceptionCollector.appendException(
UnknownFieldError(
what='"interfaces" of template "%s"' %
self.name, field=name))
def _collect_custom_iface_operations(self, name):
allowed_operations = []
nodetype_iface_def = self.type_definition.interfaces[name]
allowed_operations.extend(nodetype_iface_def.keys())
if 'type' in nodetype_iface_def:
iface_type = nodetype_iface_def['type']
if iface_type in self.type_definition.custom_def:
iface_type_def = self.type_definition.custom_def[iface_type]
else:
iface_type_def = self.type_definition.TOSCA_DEF[iface_type]
allowed_operations.extend(iface_type_def.keys())
allowed_operations = [op for op in allowed_operations if
op not in INTERFACE_DEF_RESERVED_WORDS]
return allowed_operations
def _validate_fields(self, nodetemplate):
for name in nodetemplate.keys():
if name not in self.SECTIONS and name not in self.SPECIAL_SECTIONS:
ExceptionCollector.appendException(
UnknownFieldError(what='Node template "%s"' % self.name,
field=name))
|
[
"[email protected]"
] | |
e4ff02c8eee5d437306988c001615f0dfcb35b4a
|
8799cbe3a261fea3ff05af2fba7e3eade40b57f5
|
/SocialMedia/home/migrations/0001_initial.py
|
c845d53ec77a6f58857930c9c7677535d3ca89ee
|
[] |
no_license
|
Anoop-Suresh/Training
|
83b5759db0d2113bb90731b243a1dd2d5be5992f
|
e6f4dd8a77fec058917dd25c424a1f3afc7df236
|
refs/heads/master
| 2022-11-30T08:18:21.432284 | 2019-10-13T03:48:15 | 2019-10-13T03:48:15 | 190,737,085 | 0 | 0 | null | 2022-11-22T04:17:20 | 2019-06-07T12:05:47 |
Python
|
UTF-8
|
Python
| false | false | 982 |
py
|
# Generated by Django 2.2.4 on 2019-08-09 06:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=250)),
('email', models.EmailField(blank=True, max_length=254)),
('phone', models.IntegerField(default=0, null=True)),
('image', models.FileField(blank=True, upload_to='media/images/')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"[email protected]"
] | |
81d5962d85aef36a06189c0aeba78b1165d363b6
|
989eea1d9110972ec6b2f4cedcc1759c4859a7c0
|
/RemoteClientMain.py
|
861c279cb9ebcc314dbc586755a41b093f7b0190
|
[] |
no_license
|
writefaruq/EpuckDistributedClient
|
9d09a58ad95e6905912f93f285d520e890d0a489
|
499a2a633654dbe20b183a7ee2d35151d2075aff
|
refs/heads/master
| 2020-05-31T17:16:32.110668 | 2010-05-27T09:06:24 | 2010-05-27T09:06:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,915 |
py
|
#!/usr/bin/python
from multiprocessing.managers import BaseManager
from multiprocessing import *
import time
import sys
import logging, logging.config, logging.handlers
logging.config.fileConfig("\
/home/newport-ril/centralized-expt/EpuckDistributedClient/logging-remote.conf")
logger = logging.getLogger("EpcLogger")
from EpuckDistributedClient.data_manager import *
from EpuckDistributedClient.device_controller_remote import *
from RILCommonModules.RILSetup import *
class RemoteManager(BaseManager):
pass
RemoteManager.register('get_target')
def main():
logging.debug("--- Start EPC---")
device_controller.start()
time.sleep(2)
try:
device_controller.join()
except (KeyboardInterrupt, SystemExit):
logging.debug("--- End EPC---")
print "User requested exit..ClientMain shutting down now"
sys.exit(0)
if __name__ == '__main__':
# parse robot id
numargs = len(sys.argv) - 1
if numargs > 1 or numargs < 1:
print "usage:" + sys.argv[0] + " <robot id >"
sys.exit(1)
else:
robotid = int(sys.argv[1])
DATA_MGR_PORT = EXPT_SERVER_PORT_BASE + robotid
# connect to server's data manager
mgr = RemoteManager(address=(EXPT_SERVER_IP, DATA_MGR_PORT), authkey="123")
mgr.connect()
datamgr = mgr.get_target()
myid = datamgr.GetRobotID()
if int(myid) != robotid:
print "robot id: " + str(robotid) + "and DataMgr port: " +\
str(DATA_MGR_PORT) + "mismatch -- check both are started..."
sys.exit(1)
# setup processes
device_controller = Process(\
target=controller_main,\
name="DeviceController",
args=(datamgr,))
#print tgt.GetRobotPose()
#print tgt.GetTaskInfo()
##print tgt.IsRobotPoseAvailable()
##tgt.SetSelectedTaskStarted()
#print tgt.IsSelectedTaskAvailable()
#print tgt.GetSelectedTask()
main()
|
[
"[email protected]"
] | |
94bf3ead889d4cee21f396fdba7754ee15bf3365
|
c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd
|
/google/devtools/artifactregistry/v1beta2/google-cloud-artifactregistry-v1beta2-py/google/cloud/artifactregistry_v1beta2/services/artifact_registry/transports/grpc_asyncio.py
|
4dd3cffa0b1b6ca8d8086ed31823925f2610d587
|
[
"Apache-2.0"
] |
permissive
|
dizcology/googleapis-gen
|
74a72b655fba2565233e5a289cfaea6dc7b91e1a
|
478f36572d7bcf1dc66038d0e76b9b3fa2abae63
|
refs/heads/master
| 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 36,076 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.artifactregistry_v1beta2.types import file
from google.cloud.artifactregistry_v1beta2.types import package
from google.cloud.artifactregistry_v1beta2.types import repository
from google.cloud.artifactregistry_v1beta2.types import repository as gda_repository
from google.cloud.artifactregistry_v1beta2.types import tag
from google.cloud.artifactregistry_v1beta2.types import tag as gda_tag
from google.cloud.artifactregistry_v1beta2.types import version
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import ArtifactRegistryTransport, DEFAULT_CLIENT_INFO
from .grpc import ArtifactRegistryGrpcTransport
class ArtifactRegistryGrpcAsyncIOTransport(ArtifactRegistryTransport):
"""gRPC AsyncIO backend transport for ArtifactRegistry.
The Artifact Registry API service.
Artifact Registry is an artifact management system for storing
artifacts from different package management systems.
The resources managed by this API are:
- Repositories, which group packages and their data.
- Packages, which group versions and their tags.
- Versions, which are specific forms of a package.
- Tags, which represent alternative names for versions.
- Files, which contain content and are optionally associated with a
Package or Version.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'artifactregistry.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes)
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
**self_signed_jwt_kwargs,
**kwargs
)
def __init__(self, *,
host: str = 'artifactregistry.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def list_repositories(self) -> Callable[
[repository.ListRepositoriesRequest],
Awaitable[repository.ListRepositoriesResponse]]:
r"""Return a callable for the list repositories method over gRPC.
Lists repositories.
Returns:
Callable[[~.ListRepositoriesRequest],
Awaitable[~.ListRepositoriesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_repositories' not in self._stubs:
self._stubs['list_repositories'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/ListRepositories',
request_serializer=repository.ListRepositoriesRequest.serialize,
response_deserializer=repository.ListRepositoriesResponse.deserialize,
)
return self._stubs['list_repositories']
@property
def get_repository(self) -> Callable[
[repository.GetRepositoryRequest],
Awaitable[repository.Repository]]:
r"""Return a callable for the get repository method over gRPC.
Gets a repository.
Returns:
Callable[[~.GetRepositoryRequest],
Awaitable[~.Repository]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_repository' not in self._stubs:
self._stubs['get_repository'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/GetRepository',
request_serializer=repository.GetRepositoryRequest.serialize,
response_deserializer=repository.Repository.deserialize,
)
return self._stubs['get_repository']
@property
def create_repository(self) -> Callable[
[gda_repository.CreateRepositoryRequest],
Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the create repository method over gRPC.
Creates a repository. The returned Operation will
finish once the repository has been created. Its
response will be the created Repository.
Returns:
Callable[[~.CreateRepositoryRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_repository' not in self._stubs:
self._stubs['create_repository'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/CreateRepository',
request_serializer=gda_repository.CreateRepositoryRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['create_repository']
@property
def update_repository(self) -> Callable[
[gda_repository.UpdateRepositoryRequest],
Awaitable[gda_repository.Repository]]:
r"""Return a callable for the update repository method over gRPC.
Updates a repository.
Returns:
Callable[[~.UpdateRepositoryRequest],
Awaitable[~.Repository]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_repository' not in self._stubs:
self._stubs['update_repository'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/UpdateRepository',
request_serializer=gda_repository.UpdateRepositoryRequest.serialize,
response_deserializer=gda_repository.Repository.deserialize,
)
return self._stubs['update_repository']
@property
def delete_repository(self) -> Callable[
[repository.DeleteRepositoryRequest],
Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the delete repository method over gRPC.
Deletes a repository and all of its contents. The
returned Operation will finish once the repository has
been deleted. It will not have any Operation metadata
and will return a google.protobuf.Empty response.
Returns:
Callable[[~.DeleteRepositoryRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_repository' not in self._stubs:
self._stubs['delete_repository'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/DeleteRepository',
request_serializer=repository.DeleteRepositoryRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['delete_repository']
@property
def list_packages(self) -> Callable[
[package.ListPackagesRequest],
Awaitable[package.ListPackagesResponse]]:
r"""Return a callable for the list packages method over gRPC.
Lists packages.
Returns:
Callable[[~.ListPackagesRequest],
Awaitable[~.ListPackagesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_packages' not in self._stubs:
self._stubs['list_packages'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/ListPackages',
request_serializer=package.ListPackagesRequest.serialize,
response_deserializer=package.ListPackagesResponse.deserialize,
)
return self._stubs['list_packages']
@property
def get_package(self) -> Callable[
[package.GetPackageRequest],
Awaitable[package.Package]]:
r"""Return a callable for the get package method over gRPC.
Gets a package.
Returns:
Callable[[~.GetPackageRequest],
Awaitable[~.Package]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_package' not in self._stubs:
self._stubs['get_package'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/GetPackage',
request_serializer=package.GetPackageRequest.serialize,
response_deserializer=package.Package.deserialize,
)
return self._stubs['get_package']
@property
def delete_package(self) -> Callable[
[package.DeletePackageRequest],
Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the delete package method over gRPC.
Deletes a package and all of its versions and tags.
The returned operation will complete once the package
has been deleted.
Returns:
Callable[[~.DeletePackageRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_package' not in self._stubs:
self._stubs['delete_package'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/DeletePackage',
request_serializer=package.DeletePackageRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['delete_package']
@property
def list_versions(self) -> Callable[
[version.ListVersionsRequest],
Awaitable[version.ListVersionsResponse]]:
r"""Return a callable for the list versions method over gRPC.
Lists versions.
Returns:
Callable[[~.ListVersionsRequest],
Awaitable[~.ListVersionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_versions' not in self._stubs:
self._stubs['list_versions'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/ListVersions',
request_serializer=version.ListVersionsRequest.serialize,
response_deserializer=version.ListVersionsResponse.deserialize,
)
return self._stubs['list_versions']
@property
def get_version(self) -> Callable[
[version.GetVersionRequest],
Awaitable[version.Version]]:
r"""Return a callable for the get version method over gRPC.
Gets a version
Returns:
Callable[[~.GetVersionRequest],
Awaitable[~.Version]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_version' not in self._stubs:
self._stubs['get_version'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/GetVersion',
request_serializer=version.GetVersionRequest.serialize,
response_deserializer=version.Version.deserialize,
)
return self._stubs['get_version']
@property
def delete_version(self) -> Callable[
[version.DeleteVersionRequest],
Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the delete version method over gRPC.
Deletes a version and all of its content. The
returned operation will complete once the version has
been deleted.
Returns:
Callable[[~.DeleteVersionRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_version' not in self._stubs:
self._stubs['delete_version'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/DeleteVersion',
request_serializer=version.DeleteVersionRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['delete_version']
@property
def list_files(self) -> Callable[
[file.ListFilesRequest],
Awaitable[file.ListFilesResponse]]:
r"""Return a callable for the list files method over gRPC.
Lists files.
Returns:
Callable[[~.ListFilesRequest],
Awaitable[~.ListFilesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_files' not in self._stubs:
self._stubs['list_files'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/ListFiles',
request_serializer=file.ListFilesRequest.serialize,
response_deserializer=file.ListFilesResponse.deserialize,
)
return self._stubs['list_files']
@property
def get_file(self) -> Callable[
[file.GetFileRequest],
Awaitable[file.File]]:
r"""Return a callable for the get file method over gRPC.
Gets a file.
Returns:
Callable[[~.GetFileRequest],
Awaitable[~.File]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_file' not in self._stubs:
self._stubs['get_file'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/GetFile',
request_serializer=file.GetFileRequest.serialize,
response_deserializer=file.File.deserialize,
)
return self._stubs['get_file']
@property
def list_tags(self) -> Callable[
[tag.ListTagsRequest],
Awaitable[tag.ListTagsResponse]]:
r"""Return a callable for the list tags method over gRPC.
Lists tags.
Returns:
Callable[[~.ListTagsRequest],
Awaitable[~.ListTagsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_tags' not in self._stubs:
self._stubs['list_tags'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/ListTags',
request_serializer=tag.ListTagsRequest.serialize,
response_deserializer=tag.ListTagsResponse.deserialize,
)
return self._stubs['list_tags']
@property
def get_tag(self) -> Callable[
[tag.GetTagRequest],
Awaitable[tag.Tag]]:
r"""Return a callable for the get tag method over gRPC.
Gets a tag.
Returns:
Callable[[~.GetTagRequest],
Awaitable[~.Tag]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_tag' not in self._stubs:
self._stubs['get_tag'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/GetTag',
request_serializer=tag.GetTagRequest.serialize,
response_deserializer=tag.Tag.deserialize,
)
return self._stubs['get_tag']
@property
def create_tag(self) -> Callable[
[gda_tag.CreateTagRequest],
Awaitable[gda_tag.Tag]]:
r"""Return a callable for the create tag method over gRPC.
Creates a tag.
Returns:
Callable[[~.CreateTagRequest],
Awaitable[~.Tag]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_tag' not in self._stubs:
self._stubs['create_tag'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/CreateTag',
request_serializer=gda_tag.CreateTagRequest.serialize,
response_deserializer=gda_tag.Tag.deserialize,
)
return self._stubs['create_tag']
@property
def update_tag(self) -> Callable[
[gda_tag.UpdateTagRequest],
Awaitable[gda_tag.Tag]]:
r"""Return a callable for the update tag method over gRPC.
Updates a tag.
Returns:
Callable[[~.UpdateTagRequest],
Awaitable[~.Tag]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_tag' not in self._stubs:
self._stubs['update_tag'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/UpdateTag',
request_serializer=gda_tag.UpdateTagRequest.serialize,
response_deserializer=gda_tag.Tag.deserialize,
)
return self._stubs['update_tag']
@property
def delete_tag(self) -> Callable[
[tag.DeleteTagRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete tag method over gRPC.
Deletes a tag.
Returns:
Callable[[~.DeleteTagRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_tag' not in self._stubs:
self._stubs['delete_tag'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/DeleteTag',
request_serializer=tag.DeleteTagRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_tag']
@property
def set_iam_policy(self) -> Callable[
[iam_policy_pb2.SetIamPolicyRequest],
Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the set iam policy method over gRPC.
Updates the IAM policy for a given resource.
Returns:
Callable[[~.SetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'set_iam_policy' not in self._stubs:
self._stubs['set_iam_policy'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/SetIamPolicy',
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs['set_iam_policy']
@property
def get_iam_policy(self) -> Callable[
[iam_policy_pb2.GetIamPolicyRequest],
Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the IAM policy for a given resource.
Returns:
Callable[[~.GetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_iam_policy' not in self._stubs:
self._stubs['get_iam_policy'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/GetIamPolicy',
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs['get_iam_policy']
@property
def test_iam_permissions(self) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
Awaitable[iam_policy_pb2.TestIamPermissionsResponse]]:
r"""Return a callable for the test iam permissions method over gRPC.
Tests if the caller has a list of permissions on a
resource.
Returns:
Callable[[~.TestIamPermissionsRequest],
Awaitable[~.TestIamPermissionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'test_iam_permissions' not in self._stubs:
self._stubs['test_iam_permissions'] = self.grpc_channel.unary_unary(
'/google.devtools.artifactregistry.v1beta2.ArtifactRegistry/TestIamPermissions',
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs['test_iam_permissions']
__all__ = (
'ArtifactRegistryGrpcAsyncIOTransport',
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
b04ec232dad42146af4dce5ba79fdd3343c9be31
|
fc29ccdcf9983a54ae2bbcba3c994a77282ae52e
|
/Leetcode_By_Topic/bfs_pq-407.py
|
5dc3ac844da50106fb2abfa7e638275b9d16f6e8
|
[] |
no_license
|
linnndachen/coding-practice
|
d0267b197d9789ab4bcfc9eec5fb09b14c24f882
|
5e77c3d7a0632882d16dd064f0aad2667237ef37
|
refs/heads/master
| 2023-09-03T19:26:25.545006 | 2021-10-16T16:29:50 | 2021-10-16T16:29:50 | 299,794,608 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,142 |
py
|
# the reason why we are using pq is because we are always
# looking for the smallest/lowest height
class Solution:
def trapRainWater(self, heightMap: List[List[int]]) -> int:
m, n, = len(heightMap), len(heightMap[0])
heap, trapped = [], 0
for i in range(m):
for j in range(n):
if i in {0, m - 1} or j in {0, n - 1}:
# enque the edges
heapq.heappush(heap, (heightMap[i][j], i, j))
heightMap[i][j] = -1
while heap:
# started with the lowest height
h, i, j = heapq.heappop(heap)
for x, y in ((i+1, j), (i-1, j), (i, j+1), (i, j-1)):
# looped through the enclosed area
if 0 < x < m-1 and 0 < y < n-1 and heightMap[x][y] != -1:
# if there's a difference, add the area
trapped += max(h - heightMap[x][y], 0)
# increase the minimum height if needed
heapq.heappush(heap, (max(heightMap[x][y], h), x, y))
heightMap[x][y] = -1
return trapped
|
[
"[email protected]"
] | |
54b29fef17ebaf22001c0689a06c72dc1f828954
|
a861d6c0963c403775d63e4b4147249d06f55097
|
/webapps/py/badObjectNames.py
|
ff030ebec2155413a7fda983b3082e7c0feb436f
|
[] |
no_license
|
JulianJaffe/Tools
|
87e2ad973f6bbc5d76583658e6765f0742058c90
|
9e84dccc4e271da5c2c1ab307b17779601143117
|
refs/heads/master
| 2020-03-29T13:03:03.056238 | 2014-10-15T03:27:17 | 2014-10-15T03:27:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,846 |
py
|
#!/usr/bin/env /usr/bin/python
import os
import sys
# for log
import csv
import codecs
import ConfigParser
import time, datetime
import httplib, urllib2
import cgi
#import cgitb; cgitb.enable() # for troubleshooting
import re
from lxml import etree
import locale
locale.setlocale(locale.LC_ALL, 'en_US')
# the only other module: isolate postgres calls and connection
import badObjectNamesDB
elapsedtime = time.time()
# #################################### Collection Stats web app #######################################
def doGetNamesOverSixtyCharsLong(config):
unixruntime = time.time()
isoruntime = datetime.datetime.fromtimestamp(int(unixruntime)).strftime('%Y-%m-%d %H:%M:%S')
badobjectnames = badObjectNamesDB.getnamesoversixtycharslong(config)
print """<h2 text-align="center">There are """ + str(len(badobjectnames)) + " objects with names over 60 characters long:</h2><i>(Note: full names are shown, so any truncation is a pre-CSpace artifact)</i><br/>"
print """<table><tr><th>Museum No.</th><th width="500px">Object name</th></tr>"""
icount = -1
for badobjectname in badobjectnames:
icount += 1
objectnumber = badobjectnames[icount][0]
objectname = badobjectnames[icount][1]
description = badobjectnames[icount][2]
objectcsid = badobjectnames[icount][3]
objecturl = "http://pahma.cspace.berkeley.edu:8180/collectionspace/ui/pahma/html/cataloging.html?csid=" + str(objectcsid)
print '''<tr><td><a href="''' + str(objecturl) + '''" target="_blank">''' + str(objectnumber) + "</a></td><td>" + str(objectname) + "</td></tr>"
print "</table>"
# ###############################
def starthtml(form,config):
if config == False:
print selectWebapp()
sys.exit(0)
logo = config.get('info','logo')
schemacolor1 = config.get('info','schemacolor1')
serverlabel = config.get('info','serverlabel')
serverlabelcolor = config.get('info','serverlabelcolor')
apptitle = config.get('info','apptitle')
updateType = config.get('info','updatetype')
location1 = str(form.getvalue("lo.location1")) if form.getvalue("lo.location1") else ''
location2 = str(form.getvalue("lo.location2")) if form.getvalue("lo.location2") else ''
num2ret = str(form.getvalue('num2ret')) if str(form.getvalue('num2ret')).isdigit() else '50'
button = '''<input id="actionbutton" class="save" type="submit" value="Refresh" name="action">'''
otherfields = ''' '''
divsize = '''<div id="sidiv" style="position:relative; width:1000px; height:750px; color:#CC0000; ">'''
return '''Content-type: text/html; charset=utf-8
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>''' + apptitle + ' : ' + serverlabel + '''</title>
<style type="text/css">
body { margin:10px 10px 0px 10px; font-family: Arial, Helvetica, sans-serif; }
table { width: 100%; }
td { cell-padding: 3px; }
.stattitle { font-weight: normal; text-align:right; }
.statvalue { font-weight: bold; text-align:left; }
.statvaluecenter { font-weight: bold; text-align:center; }
th { text-align: left ;color: #666666; font-size: 16px; font-weight: bold; cell-padding: 3px;}
h1 { font-size:32px; float:left; padding:10px; margin:0px; border-bottom: none; }
h2 { font-size:28px; padding:5px; margin:0px; border-bottom: none; text-align:center; }
h3 { font-size:12px; float:left; color:white; background:black; }
p { padding:10px 10px 10px 10px; }
button { font-size: 150%; width:85px; text-align: center; text-transform: uppercase;}
.objtitle { font-size:28px; float:left; padding:2px; margin:0px; border-bottom: thin dotted #aaaaaa; color: #000000; }
.objsubtitle { font-size:28px; float:left; padding:2px; margin:0px; border-bottom: thin dotted #aaaaaa; font-style: italic; color: #999999; }
.cell { line-height: 1.0; text-indent: 2px; color: #666666; font-size: 16px;}
.enumerate { background-color: green; font-size:20px; color: #FFFFFF; font-weight:bold; vertical-align: middle; text-align: center; }
img#logo { float:left; height:50px; padding:10px 10px 10px 10px;}
.locations { color: #000000; background-color: #FFFFFF; font-weight: bold; font-size: 18px; }
.ncell { line-height: 1.0; cell-padding: 2px; font-size: 16px;}
.objname { font-weight: bold; font-size: 16px; font-style: italic; width:200px; }
.objno { font-weight: bold; font-size: 16px; font-style: italic; width:110px; }
.objno { font-weight: bold; font-size: 16px; font-style: italic; width:160px; }
.ui-tabs .ui-tabs-panel { padding: 0px; min-height:120px; }
.rdo { text-align: center; }
.save { background-color: BurlyWood; font-size:20px; color: #000000; font-weight:bold; vertical-align: middle; text-align: center; }
.shortinput { font-weight: bold; width:150px; }
.subheader { background-color: ''' + schemacolor1 + '''; color: #FFFFFF; font-size: 24px; font-weight: bold; }
.veryshortinput { width:60px; }
.xspan { color: #000000; background-color: #FFFFFF; font-weight: bold; font-size: 12px; }
</style>
<style type="text/css">
/*<![CDATA[*/
@import "../css/jquery-ui-1.8.22.custom.css";
/*]]>*/
</style>
<script type="text/javascript" src="../js/jquery-1.7.2.min.js"></script>
<script type="text/javascript" src="../js/jquery-ui-1.8.22.custom.min.js"></script>
<script type="text/javascript" src="../js/provision.js"></script>
<style>
.ui-autocomplete-loading { background: white url('../images/ui-anim_basic_16x16.gif') right center no-repeat; }
</style>
<script type="text/javascript">
function formSubmit(location)
{
console.log(location);
document.getElementById('lo.location1').value = location
document.getElementById('lo.location2').value = location
//document.getElementById('num2ret').value = 1
//document.getElementById('actionbutton').value = "Next"
document.forms['sysinv'].submit();
}
</script>
</head>
<body>
<form id="sysinv" enctype="multipart/form-data" method="post">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tbody><tr><td width="3%"> </td><td align="center">''' + divsize + '''
<table width="100%">
<tbody>
<tr>
<td style="width: 400px; color: #000000; font-size: 32px; font-weight: bold;">''' + apptitle + '''</td>
<td><span style="color:''' + serverlabelcolor + ''';">''' + serverlabel + '''</td>
<th style="text-align:right;"><img height="60px" src="''' + logo + '''"></th>
</tr>
<tr><td colspan="3"><hr/></td></tr>
<tr>
<td colspan="3">
<table>
<tr><td><table>
''' + otherfields + '''
</table>
</td><td style="width:3%"/>
<td style="width:120px;text-align:center;">''' + button + '''</td>
</tr>
</table></td></tr>
<tr><td colspan="3"><div id="status"><hr/></div></td></tr>
</tbody>
</table>'''
# ###############################
def endhtml(config,elapsedtime):
#user = form.getvalue('user')
count = form.getvalue('count')
connect_string = config.get('connect','connect_string')
return '''
<table width="100%">
<tbody>
<tr><td colspan="5"><hr></td></tr>
<tr>
<td width="180px" class="xspan">''' + time.strftime("%b %d %Y %H:%M:%S", time.localtime()) + '''</td>
<td width="120px" class="cell">elapsed time: </td>
<td class="xspan">''' + ('%8.2f' % elapsedtime) + ''' seconds</td>
<td style="text-align: right;" class="cell">powered by </td>
<td style="text-align: right;width: 170;" class="cell"><img src="http://collectionspace.org/sites/all/themes/CStheme/images/CSpaceLogo.png" height="30px"></td>
</tr>
</tbody>
</table>
</div>
</td><td width="3%"> </td></tr>
</tbody></table>
</form>
<script>
$(document).ready(function () {
$('[name]').map(function() {
var elementID = $(this).attr('name');
if (elementID.indexOf('.') == 2) {
console.log(elementID);
$(this).autocomplete({
source: function(request, response) {
$.ajax({
url: "../cgi-bin/autosuggest.py?connect_string=''' + connect_string + '''",
dataType: "json",
data: {
q : request.term,
elementID : elementID
},
success: function(data) {
response(data);
}
});
},
minLength: 2,
});
}
});
});
</script>
</body></html>
'''
# ###############################
if __name__ == "__main__":
#fileName = 'badObjectNamesV321.cfg'
fileName = 'badObjectNamesDev.cfg'
config = ConfigParser.RawConfigParser()
config.read(fileName)
form = cgi.FieldStorage()
print starthtml(form,config)
doGetNamesOverSixtyCharsLong(config)
elapsedtime = time.time() - elapsedtime
print endhtml(config,elapsedtime)
|
[
"[email protected]"
] | |
00f59bbf612888b96061b5e1941b039693ae8bd6
|
a838d4bed14d5df5314000b41f8318c4ebe0974e
|
/sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_integration_runtime_connection_infos_operations.py
|
025f88c15bb2e51084fa176d23b21e8d83390cf2
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
scbedd/azure-sdk-for-python
|
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
|
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
|
refs/heads/master
| 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 |
MIT
| 2019-08-11T21:16:01 | 2018-11-28T21:34:49 |
Python
|
UTF-8
|
Python
| false | false | 5,248 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class IntegrationRuntimeConnectionInfosOperations:
"""IntegrationRuntimeConnectionInfosOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
workspace_name: str,
integration_runtime_name: str,
**kwargs
) -> "_models.IntegrationRuntimeConnectionInfo":
"""Get integration runtime connection info.
Get connection info for an integration runtime.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param integration_runtime_name: Integration runtime name.
:type integration_runtime_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IntegrationRuntimeConnectionInfo, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.IntegrationRuntimeConnectionInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IntegrationRuntimeConnectionInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'integrationRuntimeName': self._serialize.url("integration_runtime_name", integration_runtime_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IntegrationRuntimeConnectionInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/integrationRuntimes/{integrationRuntimeName}/getConnectionInfo'} # type: ignore
|
[
"[email protected]"
] | |
6aa9adbe021700f4492779c1e1475e5eae105d7b
|
1e9c9f2a9639db7cdb032aae69cb4d99aef1d3a5
|
/hackerEarth/practice/dataStructures/advancedDataStructures/segmentTrees/chemicalReaction.py
|
abb01081b288bab7983f9f021a980fb0f4895a1c
|
[
"MIT"
] |
permissive
|
sagarnikam123/learnNPractice
|
f0da3f8acf653e56c591353ab342765a6831698c
|
1b3b0cb2cff2f478006626a4c37a99102acbb628
|
refs/heads/master
| 2023-02-04T11:21:18.211654 | 2023-01-24T14:47:52 | 2023-01-24T14:47:52 | 61,184,927 | 2 | 1 |
MIT
| 2022-03-06T11:07:18 | 2016-06-15T06:57:19 |
Python
|
UTF-8
|
Python
| false | false | 4,202 |
py
|
# Chemical Reaction
#######################################################################################################################
#
# Ani and his Favourite Chemistry Teacher Lissa were performing an Experiment in the Chemistry Lab. Experiment
# involves a N step Chemical Reaction. An N step Chemical Reaction requires N different reactants from the
# periodic table . (Do you know about Periodic Table? No , still you can try solving this problem). N elements
# are stacked in the bottom up manner with their reacting times. Look at the given figure.
# image
#
# Lissa is very good at performing experiment (offcourse as she is a chemistry teacher). So, she is doing the
# actual job alone. Ani is there only to provide her a helping hand. After every step, Lissa ordered Ani to put
# kth element from the stack (counting start from bottom) into the ongoing chemical reaction and record the
# expected time taken by the chemical reaction to be accomplished.
# Expected Time of a Chemical reaction is defined as the maximum of reacting time of all the reactants present
# in the chemical reaction at that instant of time.
# Considering a 6 step Chemical reaction with the same set of reactants given above. Let the order of elements
# given by Lissa to Ani follows this list.
# Note that the list contains N-1 elements only.
#
# 2 2 1 2 2
#
# Step 1: Ani puts the second element from the bottom i.e titanium into the chemical reaction and records
# the expected time as 799 .
# New stack configuration :: image
# Step 2: Ani puts the second element from the bottom i.e barium into the chemical reaction and records
# the expected time as 799.
# New stack configuration ::image
# Step 3: Ani puts the first element from the bottom i.e zinc into the chemical reaction and records
# the expected time as 999.
# New stack configuration ::image
# Step 4: Ani puts the second element from the bottom i.e potassium into the chemical reaction and records
# the expected time as 999.
# New stack configuration ::image
# Step 5: Ani puts the second element from the bottom i.e sodium into the chemical reaction and records
# the expected time as 999.
# New stack configuration ::image
# As there is only one element left on the stack in the end. Ani puts that element into the reaction without
# asking his teacher (He is over-smart actually ). While doing this, he dropped some chemical on the record
# taken by him. This made Miss Lissa very angry and she decided to punish him. Ani does not want to be punished
# by his favourite teacher. So, can you save him from being punished ?. Can you generate same record for him.
#
# Input:
# First line of input contains a single integer T denoting the number of Experiments to be performed.
# Next 4*T lines contains description of each experiment. Each experiment's description consists of 4 lines.
# First line of description contains a single integer N denoting the order of reaction (number of reactants).
# Next line of description contains N space separated strings i.e names of reactants. Next line of description
# contains N integers denoting the reacting time of each element. Next line of description contains N-1 integers
# denoting the ordered list of elements given by Lissa to Ani.
#
# Output:
# For each Experiment, Output consists of N lines where ith line contains one string (name of Element added in
# the ith step) and expected time of the Chemical Reaction after ith step.
#
# Constraints:
# 1 <= T <=10
# 1 <= N <= 5*105
# *Element names composed of only lower case letters *
# 1 <=|Reactant's name| <= 10
# 0 <= Reacting time <= 109
# sum of all N over all the test cases is 5*109
#
# NOTE:
# Prefer to use Printf / Scanf instead of cin / cout (very large input set).
#
# SAMPLE INPUT
# 1
# 6
# zinc titanium barium lithium potassium sodium
# 999 799 600 140 7 100
# 2 2 1 2 2
#
# SAMPLE OUTPUT
# titanium 799
# barium 799
# zinc 999
# potassium 999
# sodium 999
# lithium 999
#
#######################################################################################################################
|
[
"[email protected]"
] | |
a09ca7216adbbeef3f5c000eb167936a1f1ed25e
|
49f61714a6f78d984fd2194d6064d84e891bc5b7
|
/2019-1/230/users/4041/codes/1683_2471.py
|
825a290d780306bb8586be7773cb306f47c9bd76
|
[] |
no_license
|
psbarros/Variaveis3
|
b5c4e1517e7d94a846ee03791d25d5821a1c651c
|
3dcf6f810709ce03c78335acf9533e008a2ae125
|
refs/heads/master
| 2023-06-13T07:05:00.878430 | 2021-07-06T17:51:37 | 2021-07-06T17:51:37 | 383,549,597 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 659 |
py
|
idade = int(input("insira sua idade: "))
imc = float(input("insira sua imc: "))
if(idade<45 and imc<22.0):
print("Entradas:", idade, "anos e IMC", imc)
risco = "Baixo"
print("Risco:", risco)
elif(idade<45 and imc>=22.0):
print("Entradas:", idade, "anos e IMC", imc)
risco = "Medio"
print("Risco:", risco)
elif(idade>=45 and imc<22.0):
print("Entradas:", idade, "anos e IMC", imc)
risco = "Medio"
print("Risco:", risco)
elif(idade>=45 and imc>=22.0):
print("Entradas:", idade, "anos e IMC", imc)
risco = "Alto"
print("Risco:", risco)
elif(idade<=0 and idade>130 and imc<=0):
print("Entradas:", idade, "anos e IMC", imc)
print("Dados invalidos")
|
[
"[email protected]"
] | |
e3e4c8b3377b575c08d598c785954c535352ffad
|
55a273347cb103fe2b2704cb9653956956d0dd34
|
/code/tmp_rtrip/test/mp_preload.py
|
e346c5f11d3c41466fad2388a00794a035b708d4
|
[
"MIT"
] |
permissive
|
emilyemorehouse/ast-and-me
|
4af1bc74fc967ea69ac1aed92664f6428acabe6a
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
refs/heads/master
| 2022-11-18T03:50:36.505882 | 2018-05-12T17:53:44 | 2018-05-12T17:53:44 | 115,035,148 | 25 | 1 |
MIT
| 2022-11-04T11:36:43 | 2017-12-21T18:27:19 |
Python
|
UTF-8
|
Python
| false | false | 318 |
py
|
import multiprocessing
multiprocessing.Lock()
def f():
print('ok')
if __name__ == '__main__':
ctx = multiprocessing.get_context('forkserver')
modname = 'test.mp_preload'
__import__(modname)
ctx.set_forkserver_preload([modname])
proc = ctx.Process(target=f)
proc.start()
proc.join()
|
[
"[email protected]"
] | |
1169a6fcb6d341f8e55bde090138a5d6601bc2de
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_315/ch28_2020_09_14_19_50_22_038006.py
|
ea52166c4ae4e1d2336c316884e0055e2f33adfe
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 78 |
py
|
i = 0
x = 0
while i <= 100:
i = i + (1/2)**x
x = x + 1
print (i)
|
[
"[email protected]"
] | |
f510490adc4d4b3e88b0b150cfd42fd22a80f11f
|
0afc497dafc54da5fe3e88aea7f7244f43767259
|
/Vents/migrations/0003_auto_20170522_1321.py
|
c2c0e77eeb4e557c7139a8037990a1f9016d62d2
|
[] |
no_license
|
matheo97/Multitenant-Django-app
|
816da2f37c73487f82ecd26f9c52132609558d53
|
555bc4b737a52e4446f4de78e21c22a2206336fb
|
refs/heads/master
| 2020-04-26T15:56:40.550895 | 2019-03-06T13:30:35 | 2019-03-06T13:30:35 | 173,662,386 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 838 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-05-22 18:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Vents', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='ventaregistrada',
name='direccion',
field=models.CharField(default='', max_length=100, null=True),
),
migrations.AddField(
model_name='ventaregistrada',
name='nombre',
field=models.CharField(default='', max_length=100, null=True),
),
migrations.AddField(
model_name='ventaregistrada',
name='tarjeta',
field=models.CharField(default='', max_length=100, null=True),
),
]
|
[
"[email protected]"
] | |
8b241edc84d3eb6f5cbb94d5e472a9326adcf879
|
05fb218bf06c525bf48d9c7133e4e3c659ab444c
|
/exchangelib/version.py
|
d8bbf5f90872148b6aac7b12584bc4125985b36d
|
[
"BSD-2-Clause"
] |
permissive
|
alexdelin/exchangelib
|
b2f326c60b4fcbbdf84d7cfc5078d53f7f003cc8
|
06e3237ab05b876b5542f5cc3d5a6ecc7a9094d1
|
refs/heads/master
| 2021-01-23T07:51:05.752612 | 2017-09-08T08:06:43 | 2017-09-08T08:06:43 | 86,459,174 | 0 | 0 | null | 2017-09-11T15:50:26 | 2017-03-28T12:47:08 |
Python
|
UTF-8
|
Python
| false | false | 12,638 |
py
|
# coding=utf-8
from __future__ import unicode_literals
import logging
from xml.etree.ElementTree import ParseError
import requests.sessions
from future.utils import python_2_unicode_compatible
from six import text_type
from .errors import TransportError, ErrorInvalidSchemaVersionForMailboxVersion
from .transport import TNS, SOAPNS, get_auth_instance
from .util import is_xml, to_xml
log = logging.getLogger(__name__)
# Legend for dict:
# Key: shortname
# Values: (EWS API version ID, full name)
# 'shortname' comes from types.xsd and is the official version of the server, corresponding to the version numbers
# supplied in SOAP headers. 'API version' is the version name supplied in the RequestServerVersion element in SOAP
# headers and describes the EWS API version the server implements. Valid values for this element are described here:
# http://msdn.microsoft.com/en-us/library/bb891876(v=exchg.150).aspx
VERSIONS = {
'Exchange2007': ('Exchange2007', 'Microsoft Exchange Server 2007'),
'Exchange2007_SP1': ('Exchange2007_SP1', 'Microsoft Exchange Server 2007 SP1'),
'Exchange2007_SP2': ('Exchange2007_SP1', 'Microsoft Exchange Server 2007 SP2'),
'Exchange2007_SP3': ('Exchange2007_SP1', 'Microsoft Exchange Server 2007 SP3'),
'Exchange2010': ('Exchange2010', 'Microsoft Exchange Server 2010'),
'Exchange2010_SP1': ('Exchange2010_SP1', 'Microsoft Exchange Server 2010 SP1'),
'Exchange2010_SP2': ('Exchange2010_SP2', 'Microsoft Exchange Server 2010 SP2'),
'Exchange2010_SP3': ('Exchange2010_SP2', 'Microsoft Exchange Server 2010 SP3'),
'Exchange2013': ('Exchange2013', 'Microsoft Exchange Server 2013'),
'Exchange2013_SP1': ('Exchange2013_SP1', 'Microsoft Exchange Server 2013 SP1'),
'Exchange2015': ('Exchange2015', 'Microsoft Exchange Server 2015'),
'Exchange2015_SP1': ('Exchange2015_SP1', 'Microsoft Exchange Server 2015 SP1'),
'Exchange2016': ('Exchange2016', 'Microsoft Exchange Server 2016'),
}
# Build a list of unique API versions, used when guessing API version supported by the server. Use reverse order so we
# get the newest API version supported by the server.
API_VERSIONS = sorted({v[0] for v in VERSIONS.values()}, reverse=True)
@python_2_unicode_compatible
class Build(object):
"""
Holds methods for working with build numbers
"""
# List of build numbers here: https://technet.microsoft.com/en-gb/library/hh135098(v=exchg.150).aspx
API_VERSION_MAP = {
8: {
0: 'Exchange2007',
1: 'Exchange2007_SP1',
2: 'Exchange2007_SP1',
3: 'Exchange2007_SP1',
},
14: {
0: 'Exchange2010',
1: 'Exchange2010_SP1',
2: 'Exchange2010_SP2',
3: 'Exchange2010_SP2',
},
15: {
0: 'Exchange2013', # Minor builds starting from 847 are Exchange2013_SP1, see api_version()
1: 'Exchange2016',
20: 'Exchange2016', # This is Office365. See issue #221
},
}
__slots__ = ('major_version', 'minor_version', 'major_build', 'minor_build')
def __init__(self, major_version, minor_version, major_build=0, minor_build=0):
self.major_version = major_version
self.minor_version = minor_version
self.major_build = major_build
self.minor_build = minor_build
if major_version < 8:
raise ValueError("Exchange major versions below 8 don't support EWS (%s)", text_type(self))
@classmethod
def from_xml(cls, elem):
xml_elems_map = {
'major_version': 'MajorVersion',
'minor_version': 'MinorVersion',
'major_build': 'MajorBuildNumber',
'minor_build': 'MinorBuildNumber',
}
kwargs = {}
for k, xml_elem in xml_elems_map.items():
v = elem.get(xml_elem)
if v is None:
raise ValueError()
kwargs[k] = int(v) # Also raises ValueError
return cls(**kwargs)
def api_version(self):
if self.major_version == 15 and self.minor_version == 0 and self.major_build >= 847:
return 'Exchange2013_SP1'
try:
return self.API_VERSION_MAP[self.major_version][self.minor_version]
except KeyError:
raise ValueError('API version for build %s is unknown' % self)
def __cmp__(self, other):
# __cmp__ is not a magic method in Python3. We'll just use it here to implement comparison operators
c = (self.major_version > other.major_version) - (self.major_version < other.major_version)
if c != 0:
return c
c = (self.minor_version > other.minor_version) - (self.minor_version < other.minor_version)
if c != 0:
return c
c = (self.major_build > other.major_build) - (self.major_build < other.major_build)
if c != 0:
return c
return (self.minor_build > other.minor_build) - (self.minor_build < other.minor_build)
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ne__(self, other):
return self.__cmp__(other) != 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __gt__(self, other):
return self.__cmp__(other) > 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __str__(self):
return '%s.%s.%s.%s' % (self.major_version, self.minor_version, self.major_build, self.minor_build)
def __repr__(self):
return self.__class__.__name__ \
+ repr((self.major_version, self.minor_version, self.major_build, self.minor_build))
# Helpers for comparison operations elsewhere in this package
EXCHANGE_2007 = Build(8, 0)
EXCHANGE_2010 = Build(14, 0)
EXCHANGE_2010_SP2 = Build(14, 2)
EXCHANGE_2013 = Build(15, 0)
EXCHANGE_2016 = Build(15, 1)
@python_2_unicode_compatible
class Version(object):
"""
Holds information about the server version
"""
__slots__ = ('build', 'api_version')
def __init__(self, build, api_version=None):
self.build = build
self.api_version = api_version
if self.build is not None and self.api_version is None:
self.api_version = build.api_version()
@property
def fullname(self):
return VERSIONS[self.api_version][1]
@classmethod
def guess(cls, protocol):
"""
Tries to ask the server which version it has. We haven't set up an Account object yet, so we generate requests
by hand. We only need a response header containing a ServerVersionInfo element.
The types.xsd document contains a 'shortname' value that we can use as a key for VERSIONS to get the API version
that we need in SOAP headers to generate valid requests. Unfortunately, the Exchagne server may be misconfigured
to either block access to types.xsd or serve up a wrong version of the document. Therefore, we only use
'shortname' as a hint, but trust the SOAP version returned in response headers.
To get API version and build numbers from the server, we need to send a valid SOAP request. We can't do that
without a valid API version. To solve this chicken-and-egg problem, we try all possible API versions that this
package supports, until we get a valid response. If we managed to get a 'shortname' previously, we try the
corresponding API version first.
"""
log.debug('Asking server for version info')
# We can't use a session object from the protocol pool for docs because sessions are created with service auth.
auth = get_auth_instance(credentials=protocol.credentials, auth_type=protocol.docs_auth_type)
try:
shortname = cls._get_shortname_from_docs(auth=auth, types_url=protocol.types_url)
log.debug('Shortname according to %s: %s', protocol.types_url, shortname)
except (TransportError, ParseError) as e:
log.info(text_type(e))
shortname = None
api_version = VERSIONS[shortname][0] if shortname else None
return cls._guess_version_from_service(protocol=protocol, hint=api_version)
@staticmethod
def _get_shortname_from_docs(auth, types_url):
# Get the server version from types.xsd. We can't necessarily use the service auth type since it may not be the
# same as the auth type for docs.
log.debug('Getting %s with auth type %s', types_url, auth.__class__.__name__)
# Some servers send an empty response if we send 'Connection': 'close' header
from .protocol import BaseProtocol
with requests.sessions.Session() as s:
s.mount('http://', adapter=BaseProtocol.get_adapter())
s.mount('https://', adapter=BaseProtocol.get_adapter())
r = s.get(url=types_url, auth=auth, allow_redirects=False, stream=False)
log.debug('Request headers: %s', r.request.headers)
log.debug('Response code: %s', r.status_code)
log.debug('Response headers: %s', r.headers)
if r.status_code != 200:
raise TransportError('Unexpected HTTP status %s when getting %s (%s)' % (r.status_code, types_url, r.text))
if not is_xml(r.text):
raise TransportError('Unexpected result when getting %s. Maybe this is not an EWS server?%s' % (
types_url,
'\n\n%s[...]' % r.text[:200] if len(r.text) > 200 else '\n\n%s' % r.text if r.text else '',
))
return to_xml(r.text).get('version')
@classmethod
def _guess_version_from_service(cls, protocol, hint=None):
# The protocol doesn't have a version yet, so add one with our hint, or default to latest supported version.
# Use ResolveNames as a minimal request to the server to test if the version is correct. If not, ResolveNames
# will try to guess the version automatically.
from .services import ResolveNames
protocol.version = Version(build=None, api_version=hint or API_VERSIONS[-1])
try:
ResolveNames(protocol=protocol).call(unresolved_entries=[protocol.credentials.username])
return protocol.version
except ErrorInvalidSchemaVersionForMailboxVersion:
raise TransportError('Unable to guess version')
@staticmethod
def _is_invalid_version_string(s):
# Check if a version string is bogus
return s.startswith('V2_') or s[:6] in ('V2015_', 'V2016_', 'V2017_')
@classmethod
def from_response(cls, requested_api_version, response):
try:
header = to_xml(response).find('{%s}Header' % SOAPNS)
if header is None:
raise ParseError()
except ParseError:
raise TransportError('Unknown XML response (%s)' % response)
info = header.find('{%s}ServerVersionInfo' % TNS)
if info is None:
raise TransportError('No ServerVersionInfo in response: %s' % response)
try:
build = Build.from_xml(elem=info)
except ValueError:
raise TransportError('Bad ServerVersionInfo in response: %s' % response)
# Not all Exchange servers send the Version element
api_version_from_server = info.get('Version') or build.api_version()
if api_version_from_server != requested_api_version:
if cls._is_invalid_version_string(api_version_from_server):
# For unknown reasons, Office 365 may respond with an API version strings that is invalid in a request.
# Detect these so we can fallback to a valid version string.
log.info('API version "%s" worked but server reports version "%s". Using "%s"', requested_api_version,
api_version_from_server, requested_api_version)
api_version_from_server = requested_api_version
else:
# Work around a bug in Exchange that reports a bogus API version in the XML response. Trust server
# response except 'V2_nn' or 'V201[5,6]_nn_mm' which is bogus
log.info('API version "%s" worked but server reports version "%s". Using "%s"', requested_api_version,
api_version_from_server, api_version_from_server)
return cls(build, api_version_from_server)
def __repr__(self):
return self.__class__.__name__ + repr((self.build, self.api_version))
def __str__(self):
return 'Build=%s, API=%s, Fullname=%s' % (self.build, self.api_version, self.fullname)
|
[
"[email protected]"
] | |
3ba9446a9bf04ecf5c8fc5716de850d07d603a73
|
0796e7c0ce6c9e1d4dc820873d3c1ff15804b312
|
/test.py
|
cdc0c3cb8f51040a1a4b1e6eb52cae36ca43a34b
|
[] |
no_license
|
parwisenlared/GAKeras
|
8ef9c3ab1af7a93cbe3bfc95e9f5b072b54aac29
|
b5ad2e3a9aa6e4b774a97b5add2606d0406c3804
|
refs/heads/master
| 2022-11-16T02:12:29.350678 | 2018-11-07T12:39:20 | 2018-11-07T12:39:20 | 278,607,991 | 0 | 0 | null | 2020-07-10T10:39:34 | 2020-07-10T10:39:33 | null |
UTF-8
|
Python
| false | false | 2,694 |
py
|
import unittest
from convindividual import ConvIndividual
from mutation import MutationConv
from crossover import CrossoverConv
from fitness import Fitness
from config import Config
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
class IndividualTest(unittest.TestCase):
def xtest_net(self):
input_shape = (28,28,1)
model = Sequential()
model.add(MaxPooling2D(pool_size=(3,3), input_shape = input_shape))
print("----->", model.layers[-1].output_shape)
model.add(MaxPooling2D(pool_size=(3,3)))
print("----->", model.layers[-1].output_shape)
model.add(MaxPooling2D(pool_size=(3,3)))
print("----->", model.layers[-1].output_shape)
if model.layers[-1].output_shape[1] >= 2 and model.layers[-1].output_shape[2] >= 2:
model.add(MaxPooling2D(pool_size=(2,2)))
print("----->", model.layers[-1].output_shape)
model.add(Flatten())
#model.add(Convolution2D(20, 5, 5, border_mode='same'))
#model.add(MaxPooling2D(pool_size=(2,2)))
#model.add(MaxPooling2D(pool_size=(2,2)))
#model.add(MaxPooling2D(pool_size=(2,2)))
#model.add(Flatten())
model.summary()
def test_create_individual(self):
Config.input_shape = (28,28,1)
Config.noutputs = 10
for i in range(1000):
print("--------------------- start {} -------------------".format(i))
ind = ConvIndividual()
ind.randomInit()
print(ind)
net = ind.createNetwork()
net.summary()
def xtest_evaluate(self):
ind = ConvIndividual()
ind.randomInit()
print(ind)
fit = Fitness("data/mnist2d.train")
print("evaluating")
print( fit.evaluate(ind) )
def xtest_mutation(self):
print(" *** test mutation *** ")
Config.input_shape = (28,28,1)
Config.noutputs = 10
ind = ConvIndividual()
ind.randomInit()
print(ind)
mut = MutationConv()
mut.mutate(ind)
print(ind)
def xtest_crossover(self):
print(" *** test crossover *** ")
Config.input_shape = (28,28,1)
Config.noutputs = 10
ind1 = ConvIndividual()
ind1.randomInit()
print(ind1)
ind2 = ConvIndividual()
ind2.randomInit()
print(ind2)
cross = CrossoverConv()
off1, off2 = cross.cxOnePoint(ind1, ind2)
print(off1)
print(off2)
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
8f7c292fb090ecefffa1b0e549a821774baa67a5
|
215a7b5d0fcbfd85a3770a981fa2031f733e98db
|
/week8/133clone_graph.py
|
03ccc307d099c643e4bcc7768ef16b4e90e7e38a
|
[] |
no_license
|
shivani-aradhya/Leetcode-DSA
|
7b571de15ef216a5a17f91dbfc895bd69ce8789e
|
c73e3ed8112454167d381bd497665021e36d1257
|
refs/heads/main
| 2023-07-09T10:59:38.987614 | 2021-08-18T05:50:44 | 2021-08-18T05:50:44 | 334,694,883 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 737 |
py
|
class UndirectedGraphNode(object):
def __init__(self, x):
self.label = x
self.neighbors = []
class Solution(object):
def cloneGraph(self, node):
if node is None:
return None
cloned_node = UndirectedGraphNode(node.label)
cloned, queue = {node:cloned_node}, [node]
while queue:
current = queue.pop()
for neighbor in current.neighbors:
if neighbor not in cloned:
queue.append(neighbor)
cloned_neighbor = UndirectedGraphNode(neighbor.label)
cloned[neighbor] = cloned_neighbor
cloned[current].neighbors.append(cloned[neighbor])
return cloned[node]
|
[
"[email protected]"
] | |
0efc77aecb76941ad9ba6674deec524dec657bec
|
f0adca7cac7fb12cdb89e7e821559fe2603bf4bc
|
/src/199/recipe_199_02.py
|
f56c8c403955dc27daf67afa541b742e89b2df67
|
[] |
no_license
|
eriamavro/python-recipe-src
|
dccfa06bc56fcc713f8da9e466f04d07c1f961f0
|
d14f3e4cd885515e9a9a7b8e3f064609c8e50fad
|
refs/heads/master
| 2023-02-13T02:08:44.531621 | 2021-01-14T12:03:05 | 2021-01-14T12:03:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 378 |
py
|
from datetime import datetime, date, time, timedelta
# 2021/12/22のdate型を生成
d1 = date(2021, 12, 22)
# 2021/12/22 12:00:30のdatetime型を生成
dt1 = datetime(2021, 12, 22, 12, 00, 30)
# 100日分のtimedelta型を生成
delta = timedelta(days=100)
# 100日前の日付を計算
d2 = d1 - delta
dt2 = dt1 - delta
# 計算結果をprint出力
print(d2)
print(dt2)
|
[
"[email protected]"
] | |
0be81145ca411d6fbbbfa64f1db51b5ca36595dc
|
066435cd1b48955ab0039c275d706f167ccae5a2
|
/ao2mo/_ao2mo.py
|
2f53dfa31beb8225027add912b91daa2b90bef87
|
[
"BSD-2-Clause"
] |
permissive
|
matk86/pyscf
|
ca4b5c27b9ed6c5fb4120c8471110c087c43600b
|
931bf855591a68c415a9564972a6e216a12b0b36
|
refs/heads/master
| 2020-12-25T10:08:18.334338 | 2016-02-01T06:15:33 | 2016-02-01T06:15:33 | 51,003,165 | 1 | 0 | null | 2016-02-03T14:00:02 | 2016-02-03T14:00:01 | null |
UTF-8
|
Python
| false | false | 11,128 |
py
|
#!/usr/bin/env python
import ctypes
import _ctypes
import numpy
import pyscf.lib
from pyscf.scf import _vhf
libao2mo = pyscf.lib.load_library('libao2mo')
def _fpointer(name):
return ctypes.c_void_p(_ctypes.dlsym(libao2mo._handle, name))
class AO2MOpt(object):
def __init__(self, mol, intor,
prescreen='CVHFnoscreen', qcondname=None):
self._this = ctypes.POINTER(_vhf._CVHFOpt)()
#print self._this.contents, expect ValueError: NULL pointer access
self._intor = _fpointer(intor)
c_atm = numpy.asarray(mol._atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(mol._bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(mol._env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
self._cintopt = _vhf.make_cintopt(c_atm, c_bas, c_env, intor)
libao2mo.CVHFinit_optimizer(ctypes.byref(self._this),
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
self._this.contents.fprescreen = _fpointer(prescreen)
if prescreen != 'CVHFnoscreen':
# for cint2e_sph, qcondname is 'CVHFsetnr_direct_scf'
fsetqcond = getattr(libao2mo, qcondname)
fsetqcond(self._this,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
def __del__(self):
libao2mo.CINTdel_optimizer(ctypes.byref(self._cintopt))
libao2mo.CVHFdel_optimizer(ctypes.byref(self._this))
# if out is not None, transform AO to MO in-place
def nr_e1fill_(intor, sh_range, atm, bas, env,
aosym='s1', comp=1, ao2mopt=None, out=None):
assert(aosym in ('s4', 's2ij', 's2kl', 's1'))
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
klsh0, klsh1, nkl = sh_range
if '_cart' in intor:
libao2mo.CINTtot_cgto_cart.restype = ctypes.c_int
nao = libao2mo.CINTtot_cgto_cart(c_bas.ctypes.data_as(ctypes.c_void_p), nbas)
cgto_in_shell = _fpointer('CINTcgto_cart')
elif '_sph' in intor:
libao2mo.CINTtot_cgto_spheric.restype = ctypes.c_int
nao = libao2mo.CINTtot_cgto_spheric(c_bas.ctypes.data_as(ctypes.c_void_p), nbas)
cgto_in_shell = _fpointer('CINTcgto_spheric')
else:
raise NotImplementedError('cint2e spinor AO integrals')
if aosym in ('s4', 's2ij'):
nao_pair = nao * (nao+1) // 2
else:
nao_pair = nao * nao
if out is None:
out = numpy.empty((comp,nkl,nao_pair))
else:
out = numpy.ndarray((comp,nkl,nao_pair), buffer=out)
if ao2mopt is not None:
cao2mopt = ao2mopt._this
cintopt = ao2mopt._cintopt
cintor = ao2mopt._intor
else:
cao2mopt = pyscf.lib.c_null_ptr()
cintor = _fpointer(intor)
cintopt = _vhf.make_cintopt(c_atm, c_bas, c_env, intor)
fdrv = getattr(libao2mo, 'AO2MOnr_e1fill_drv')
fill = _fpointer('AO2MOfill_nr_' + aosym)
fdrv(cintor, cgto_in_shell, fill,
out.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(klsh0), ctypes.c_int(klsh1-klsh0),
ctypes.c_int(nkl), ctypes.c_int(comp),
cintopt, cao2mopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
if ao2mopt is None:
libao2mo.CINTdel_optimizer(ctypes.byref(cintopt))
return out
def nr_e1_(eri, mo_coeff, shape, aosym='s1', mosym='s1', out=None):
assert(eri.flags.c_contiguous)
assert(aosym in ('s4', 's2ij', 's2kl', 's1'))
assert(mosym in ('s2', 's1'))
mo_coeff = numpy.asfortranarray(mo_coeff)
nao = mo_coeff.shape[0]
i0, icount, j0, jcount = shape
ij_count = icount * jcount
if aosym in ('s4', 's2ij'):
if mosym == 's2':
fmmm = _fpointer('AO2MOmmm_nr_s2_s2')
assert(icount == jcount)
ij_count = icount * (icount+1) // 2
elif icount <= jcount:
fmmm = _fpointer('AO2MOmmm_nr_s2_iltj')
else:
fmmm = _fpointer('AO2MOmmm_nr_s2_igtj')
else:
if icount <= jcount:
fmmm = _fpointer('AO2MOmmm_nr_s1_iltj')
else:
fmmm = _fpointer('AO2MOmmm_nr_s1_igtj')
nrow = eri.shape[0]
if out is None:
out = numpy.empty((nrow,ij_count))
else:
out = numpy.ndarray((nrow,ij_count), buffer=out)
fdrv = getattr(libao2mo, 'AO2MOnr_e2_drv')
pao_loc = ctypes.POINTER(ctypes.c_void_p)()
c_nbas = ctypes.c_int(0)
ftrans = _fpointer('AO2MOtranse1_nr_' + aosym)
fdrv(ftrans, fmmm,
out.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
mo_coeff.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nrow), ctypes.c_int(nao),
ctypes.c_int(i0), ctypes.c_int(icount),
ctypes.c_int(j0), ctypes.c_int(jcount),
pao_loc, c_nbas)
return out
# if out is not None, transform AO to MO in-place
# ao_loc has nbas+1 elements, last element in ao_loc == nao
def nr_e2_(eri, mo_coeff, shape, aosym='s1', mosym='s1', out=None,
ao_loc=None):
assert(eri.flags.c_contiguous)
assert(aosym in ('s4', 's2ij', 's2kl', 's1'))
assert(mosym in ('s2', 's1'))
mo_coeff = numpy.asfortranarray(mo_coeff)
nao = mo_coeff.shape[0]
k0, kc, l0, lc = shape
kl_count = kc * lc
if aosym in ('s4', 's2kl'):
if mosym == 's2':
fmmm = _fpointer('AO2MOmmm_nr_s2_s2')
assert(kc == lc)
kl_count = kc * (kc+1) // 2
elif kc <= lc:
fmmm = _fpointer('AO2MOmmm_nr_s2_iltj')
else:
fmmm = _fpointer('AO2MOmmm_nr_s2_igtj')
else:
if kc <= lc:
fmmm = _fpointer('AO2MOmmm_nr_s1_iltj')
else:
fmmm = _fpointer('AO2MOmmm_nr_s1_igtj')
nrow = eri.shape[0]
if out is None:
out = numpy.empty((nrow,kl_count))
else:
out = numpy.ndarray((nrow,kl_count), buffer=out)
if ao_loc is None:
pao_loc = ctypes.POINTER(ctypes.c_void_p)()
c_nbas = ctypes.c_int(0)
ftrans = _fpointer('AO2MOtranse2_nr_' + aosym)
else:
ao_loc = numpy.asarray(ao_loc, dtype=numpy.int32)
c_nbas = ctypes.c_int(ao_loc.shape[0]-1)
pao_loc = ao_loc.ctypes.data_as(ctypes.c_void_p)
ftrans = _fpointer('AO2MOsortranse2_nr_' + aosym)
fdrv = getattr(libao2mo, 'AO2MOnr_e2_drv')
fdrv(ftrans, fmmm,
out.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
mo_coeff.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nrow), ctypes.c_int(nao),
ctypes.c_int(k0), ctypes.c_int(kc),
ctypes.c_int(l0), ctypes.c_int(lc),
pao_loc, c_nbas)
return out
# if out is not None, transform AO to MO in-place
def r_e1_(intor, mo_coeff, shape, sh_range, atm, bas, env,
tao, aosym='s1', comp=1, ao2mopt=None, out=None):
assert(aosym in ('s4', 's2ij', 's2kl', 's1', 'a2ij', 'a2kl', 'a4ij',
'a4kl', 'a4'))
mo_coeff = numpy.asfortranarray(mo_coeff)
i0, icount, j0, jcount = shape
ij_count = icount * jcount
c_atm = numpy.asarray(atm, dtype=numpy.int32)
c_bas = numpy.asarray(bas, dtype=numpy.int32)
c_env = numpy.asarray(env)
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
klsh0, klsh1, nkl = sh_range
if icount <= jcount:
fmmm = _fpointer('AO2MOmmm_r_iltj')
else:
fmmm = _fpointer('AO2MOmmm_r_igtj')
if out is None:
out = numpy.empty((comp,nkl,ij_count), dtype=numpy.complex)
else:
out = numpy.ndarray((comp,nkl,nao_pair), dtype=numpy.complex,
buffer=out)
if ao2mopt is not None:
cao2mopt = ao2mopt._this
cintopt = ao2mopt._cintopt
cintor = ao2mopt._intor
else:
cao2mopt = pyscf.lib.c_null_ptr()
cintor = _fpointer(intor)
cintopt = _vhf.make_cintopt(c_atm, c_bas, c_env, intor)
tao = numpy.asarray(tao, dtype=numpy.int32)
fdrv = getattr(libao2mo, 'AO2MOr_e1_drv')
fill = _fpointer('AO2MOfill_r_' + aosym)
ftrans = _fpointer('AO2MOtranse1_r_' + aosym)
fdrv(cintor, fill, ftrans, fmmm,
out.ctypes.data_as(ctypes.c_void_p),
mo_coeff.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(klsh0), ctypes.c_int(klsh1-klsh0),
ctypes.c_int(nkl),
ctypes.c_int(i0), ctypes.c_int(icount),
ctypes.c_int(j0), ctypes.c_int(jcount),
ctypes.c_int(comp), cintopt, cao2mopt,
tao.ctypes.data_as(ctypes.c_void_p),
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
if ao2mopt is None:
libao2mo.CINTdel_optimizer(ctypes.byref(cintopt))
return out
# if out is not None, transform AO to MO in-place
# ao_loc has nbas+1 elements, last element in ao_loc == nao
def r_e2_(eri, mo_coeff, shape, tao, ao_loc, aosym='s1', out=None):
assert(eri.flags.c_contiguous)
assert(aosym in ('s4', 's2ij', 's2kl', 's1', 'a2ij', 'a2kl', 'a4ij',
'a4kl', 'a4'))
mo_coeff = numpy.asfortranarray(mo_coeff)
nao = mo_coeff.shape[0]
k0, kc, l0, lc = shape
kl_count = kc * lc
if kc <= lc:
fmmm = _fpointer('AO2MOmmm_r_iltj')
else:
fmmm = _fpointer('AO2MOmmm_r_igtj')
nrow = eri.shape[0]
if out is None:
out = numpy.empty((nrow,kl_count), dtype=numpy.complex)
else:
out = numpy.ndarray((nrow,kl_count), dtype=numpy.complex,
buffer=out)
tao = numpy.asarray(tao, dtype=numpy.int32)
ao_loc = numpy.asarray(ao_loc, dtype=numpy.int32)
c_nbas = ctypes.c_int(ao_loc.shape[0]-1)
ftrans = _fpointer('AO2MOsortranse2_r_' + aosym)
fdrv = getattr(libao2mo, 'AO2MOr_e2_drv')
fdrv(ftrans, fmmm,
out.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
mo_coeff.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nrow), ctypes.c_int(nao),
ctypes.c_int(k0), ctypes.c_int(kc),
ctypes.c_int(l0), ctypes.c_int(lc),
tao.ctypes.data_as(ctypes.c_void_p),
ao_loc.ctypes.data_as(ctypes.c_void_p), c_nbas)
return out
def _get_num_threads():
libao2mo.omp_get_num_threads.restype = ctypes.c_int
nthreads = libao2mo.omp_get_num_threads()
return nthreads
# ij = i * (i+1) / 2 + j
def _extract_pair(ij):
i = int(numpy.sqrt(2*ij+.25) - .5 + 1e-7)
j = ij - i*(i+1)//2
return i,j
|
[
"[email protected]"
] | |
548e197af8cd6dcb51d001c3700a9359ef7afc89
|
5a8214b3a452c574e6c883bf5d90ba58ba87c461
|
/leetcode/434.number-of-segments-in-a-string.py
|
056b6cffd5334e0bade58dda9fd8cc91cc2205e2
|
[] |
no_license
|
phlalx/algorithms
|
69a3c8519687816e3c6333ec12b40659d3e3167f
|
f4da5a5dbda640b9bcbe14cb60a72c422b5d6240
|
refs/heads/master
| 2023-02-03T10:30:30.181735 | 2020-12-26T09:47:38 | 2020-12-26T09:47:38 | 129,254,618 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 860 |
py
|
#
# @lc app=leetcode id=434 lang=python3
#
# [434] Number of Segments in a String
#
# https://leetcode.com/problems/number-of-segments-in-a-string/description/
#
# algorithms
# Easy (37.22%)
# Likes: 161
# Dislikes: 619
# Total Accepted: 63.8K
# Total Submissions: 171K
# Testcase Example: '"Hello, my name is John"'
#
# Count the number of segments in a string, where a segment is defined to be a
# contiguous sequence of non-space characters.
#
# Please note that the string does not contain any non-printable characters.
#
# Example:
#
# Input: "Hello, my name is John"
# Output: 5
#
#
#
# @lc code=start
class Solution:
def countSegments(self, s: str) -> int:
res = 0
prev = ' '
for cur in s:
if cur != ' ' and prev == ' ':
res += 1
prev = cur
return res
# @lc code=end
|
[
"[email protected]"
] | |
b5e58ef918b7f031e9107ff500ff7bba05be784a
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/actrl/rulehitaghist1year.py
|
ea3e4ff07cf4d4dc755d3ea6bba0e0e576787ef6
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 33,656 |
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RuleHitAgHist1year(Mo):
"""
A class that represents historical aggregated statistics for rule hits in a 1 year sampling interval. This class updates every day.
"""
meta = StatsClassMeta("cobra.model.actrl.RuleHitAgHist1year", "rule hits")
counter = CounterMeta("revPkts", CounterCategory.COUNTER, "packets", "reverse hit packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "revPktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "revPktsPer"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "revPktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "revPktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "revPktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "revPktsRate"
meta._counters.append(counter)
counter = CounterMeta("pkts", CounterCategory.COUNTER, "packets", "hit packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "pktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "pktsPer"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "pktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "pktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "pktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "pktsRate"
meta._counters.append(counter)
counter = CounterMeta("egrPkts", CounterCategory.COUNTER, "packets", "egress hit packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "egrPktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "egrPktsPer"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "egrPktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "egrPktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "egrPktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "egrPktsRate"
meta._counters.append(counter)
counter = CounterMeta("ingrPkts", CounterCategory.COUNTER, "packets", "ingress hit packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "ingrPktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "ingrPktsPer"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "ingrPktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "ingrPktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "ingrPktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "ingrPktsRate"
meta._counters.append(counter)
meta.moClassName = "actrlRuleHitAgHist1year"
meta.rnFormat = "HDactrlRuleHitAg1year-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical aggregated rule hits stats in 1 year"
meta.writeAccessMask = 0x601
meta.readAccessMask = 0x601
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.fv.RInfoHolder")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.actrl.RuleHitAgHist")
meta.rnPrefixes = [
('HDactrlRuleHitAg1year-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "egrPktsCum", "egrPktsCum", 7483, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "egress hit packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsCum", prop)
prop = PropMeta("str", "egrPktsPer", "egrPktsPer", 7484, PropCategory.IMPLICIT_PERIODIC)
prop.label = "egress hit packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsPer", prop)
prop = PropMeta("str", "egrPktsRate", "egrPktsRate", 7488, PropCategory.IMPLICIT_RATE)
prop.label = "egress hit packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsRate", prop)
prop = PropMeta("str", "egrPktsSpct", "egrPktsSpct", 7485, PropCategory.IMPLICIT_SUSPECT)
prop.label = "egress hit packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsSpct", prop)
prop = PropMeta("str", "egrPktsThr", "egrPktsThr", 7486, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "egress hit packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("egrPktsThr", prop)
prop = PropMeta("str", "egrPktsTr", "egrPktsTr", 7487, PropCategory.IMPLICIT_TREND)
prop.label = "egress hit packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsTr", prop)
prop = PropMeta("str", "index", "index", 5821, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "ingrPktsCum", "ingrPktsCum", 7544, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "ingress hit packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsCum", prop)
prop = PropMeta("str", "ingrPktsPer", "ingrPktsPer", 7545, PropCategory.IMPLICIT_PERIODIC)
prop.label = "ingress hit packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsPer", prop)
prop = PropMeta("str", "ingrPktsRate", "ingrPktsRate", 7549, PropCategory.IMPLICIT_RATE)
prop.label = "ingress hit packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsRate", prop)
prop = PropMeta("str", "ingrPktsSpct", "ingrPktsSpct", 7546, PropCategory.IMPLICIT_SUSPECT)
prop.label = "ingress hit packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsSpct", prop)
prop = PropMeta("str", "ingrPktsThr", "ingrPktsThr", 7547, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "ingress hit packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("ingrPktsThr", prop)
prop = PropMeta("str", "ingrPktsTr", "ingrPktsTr", 7548, PropCategory.IMPLICIT_TREND)
prop.label = "ingress hit packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsTr", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "pktsCum", "pktsCum", 24188, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "hit packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsCum", prop)
prop = PropMeta("str", "pktsPer", "pktsPer", 24189, PropCategory.IMPLICIT_PERIODIC)
prop.label = "hit packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsPer", prop)
prop = PropMeta("str", "pktsRate", "pktsRate", 24193, PropCategory.IMPLICIT_RATE)
prop.label = "hit packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRate", prop)
prop = PropMeta("str", "pktsSpct", "pktsSpct", 24190, PropCategory.IMPLICIT_SUSPECT)
prop.label = "hit packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsSpct", prop)
prop = PropMeta("str", "pktsThr", "pktsThr", 24191, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "hit packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("pktsThr", prop)
prop = PropMeta("str", "pktsTr", "pktsTr", 24192, PropCategory.IMPLICIT_TREND)
prop.label = "hit packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsTr", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "revPktsCum", "revPktsCum", 24243, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "reverse hit packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsCum", prop)
prop = PropMeta("str", "revPktsPer", "revPktsPer", 24244, PropCategory.IMPLICIT_PERIODIC)
prop.label = "reverse hit packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsPer", prop)
prop = PropMeta("str", "revPktsRate", "revPktsRate", 24248, PropCategory.IMPLICIT_RATE)
prop.label = "reverse hit packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsRate", prop)
prop = PropMeta("str", "revPktsSpct", "revPktsSpct", 24245, PropCategory.IMPLICIT_SUSPECT)
prop.label = "reverse hit packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsSpct", prop)
prop = PropMeta("str", "revPktsThr", "revPktsThr", 24246, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "reverse hit packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("revPktsThr", prop)
prop = PropMeta("str", "revPktsTr", "revPktsTr", 24247, PropCategory.IMPLICIT_TREND)
prop.label = "reverse hit packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsTr", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToHcloudIgw", "From fv:Ctx to hcloud:Igw", "cobra.model.hcloud.Igw"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToHcloudVgw", "From fv:Ctx to hcloud:Vgw", "cobra.model.hcloud.Vgw"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudExtEPgToConsVzBrCP", "From cloud ExtEPg to Consumer Contract", "cobra.model.vz.BrCP"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudExtEPgToProvVzBrCP", "From cloud ExtEPg to Provider Contract", "cobra.model.vz.BrCP"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudEPgToConsVzBrCP", "From EPg to Contract", "cobra.model.vz.BrCP"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudEPgToProvVzBrCP", "From EPg to Contract", "cobra.model.vz.BrCP"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToCloudExtEPg", "From fvCtx (VRF) to cloudExtEPg", "cobra.model.cloud.ExtEPg"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudExtEPgToFvCtx", "cloud:ExtEPg to fv:Ctx", "cobra.model.fv.Ctx"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudExtEPgToHCloudEndPoint", "cloud:ExtEPg to hcloud:EndPoint", "cobra.model.hcloud.EndPoint"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudExtEPgToHcloudSecurityGroup", "cloud:ExtEPg to hcloud:SecurityGroup", "cobra.model.hcloud.SecurityGroup"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudExtEPgToHCloudCtx", "From cloud ExtEPg to VPCs hCloudCtx", "cobra.model.hcloud.Ctx"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToCloudRegion", "From fvCtx (VRF) to CloudRegion", "cobra.model.cloud.Region"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToHcloudCsr", "From fvCtx (VRF) to hcloudCsr (CSR)", "cobra.model.hcloud.Csr"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudEpgToHcloudSecurityGroup", "cloud:EPg to hcloud:SecurityGroup", "cobra.model.hcloud.SecurityGroup"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudEpgToFvCtx", "cloud:EPg to fv:Ctx", "cobra.model.fv.Ctx"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToHCloudEndPoint", "From fvCtx (VRF) to hcloud:EndPoint", "cobra.model.hcloud.EndPoint"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudEpgToHCloudEndPoint", "cloud:EPg to hcloud:EndPoint", "cobra.model.hcloud.EndPoint"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToHCloudCtx", "From fvCtx (VRF) to hcloudCtx (VPC)", "cobra.model.hcloud.Ctx"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToCloudCtxProfile", "From fvCtx (VRF) to cloudCtxProfile", "cobra.model.cloud.CtxProfile"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToCloudEPg", "From fvCtx (VRF) to cloud EPg", "cobra.model.cloud.EPg"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudEPgToHCloudCtx", "From cloud EPg to VPCs hCloudCtx", "cobra.model.hcloud.Ctx"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("cloudEpgToApp", "cloudEpgToApp", "cobra.model.cloud.App"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CtxToRegion", "Vrf to cloud Region", "cobra.model.cloud.Region"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("ATgToGraphInst", "Graph Instances", "cobra.model.vns.GraphInst"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AEPgToVirtualMachines", "Virtual Machines", "cobra.model.comp.Vm"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("MgmtInstPToNode", "External Management Network EPG to Node", "cobra.model.fv.Locale"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("OoBToNode", "Out-of-band Management EPG to Node", "cobra.model.fv.Locale"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("InBToNode", "Node", "cobra.model.fv.Locale"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("EPgToNwIf", "Interface", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CtxToNwIf", "Private Network to Interface", "cobra.model.nw.If"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"[email protected]"
] | |
694353b37809e6c6e79c255f2e73c685974086a1
|
dd65b9bc9475a6cc58817fd45c078e5a6abae241
|
/VISION/FT700/ch11/11-1.py
|
9c19edae1bc84ef389208f2341f5ce56b5c947e7
|
[] |
no_license
|
jumbokh/gcp_class
|
5b68192ab4ad091362d89ad667c64443b3b095bb
|
0a8e2663bfb5b01ce20146da178fa0c9bd7c6625
|
refs/heads/master
| 2021-10-22T09:22:04.634899 | 2021-10-21T12:46:10 | 2021-10-21T12:46:10 | 228,617,096 | 8 | 7 | null | 2021-08-25T15:55:30 | 2019-12-17T12:58:17 |
Python
|
UTF-8
|
Python
| false | false | 465 |
py
|
import cv2
def get_edge(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 灰階處理
blur = cv2.GaussianBlur(gray, (13, 13), 0) # 高斯模糊
canny = cv2.Canny(blur, 50, 150) # 邊緣偵測
return canny
#----------------------------------------------#
img = cv2.imread('road.jpg') # 讀取圖片
edge = get_edge(img)
cv2.imshow('Edge', edge) # 顯示邊緣圖
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
230fda537b2e0091fc4d861174f5188b3647a1ea
|
0daf6763c960cd898e9bb5612b1314d7e34b8870
|
/sorting/data.py
|
a062769b4b09c3f3f46732a2430c4454d8f6e24c
|
[
"MIT"
] |
permissive
|
evanthebouncy/nnhmm
|
a6ba2a1f0ed2c90a0188de8b5e162351e6668565
|
acd76edaa1b3aa0c03d39f6a30e60d167359c6ad
|
refs/heads/master
| 2021-01-12T02:27:32.814908 | 2017-04-01T05:01:24 | 2017-04-01T05:01:24 | 77,956,435 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,535 |
py
|
import numpy as np
from draw import *
# total number of observations
OBS_SIZE = 20
# length of the field i.e. LxL field
N_BATCH = 50
L = 8
# ------------------------------------------------------------------ helpers
# turn a coordinate to a pair of numpy objects
def vectorize(coords):
retX, retY = np.zeros([L]), np.zeros([L])
retX[coords[0]] = 1.0
retY[coords[1]] = 1.0
return retX, retY
# show dimension of a data object (list of list or a tensor)
def show_dim(lst1):
if hasattr(lst1, '__len__') and len(lst1) > 0:
return [len(lst1), show_dim(lst1[0])]
else:
try:
return lst1.get_shape()
except:
try:
return lst1.shape
except:
return type(lst1)
# --------------------------------------------------------------- modelings
# generate the hidden state
def gen_X():
return np.random.permutation(L)
def gen_A(X):
return np.argmin(X), np.argmax(X)
def mk_query(X):
def query(O):
Ox, Oy = O
if X[Ox] < X[Oy]:
return [1.0, 0.0]
return [0.0, 1.0]
return query
def gen_O(X):
query = mk_query(X)
Ox = np.random.randint(0, L)
Oy = np.random.randint(0, L)
O = (Ox, Oy)
return O, query(O)
# data of the form of
# A: the answer we're trying to infer
# obs: the OBS_SIZE number of observations
# divided into obs_x and obs_y
# obs_tfs: the true/false of these observations
# all variables are a list of tensors of dimention [n_batch x ...]
def gen_data(n_batch = N_BATCH):
# Answer
new_ob_x = []
new_ob_y = []
new_ob_tf = []
obs_x = [[] for i in range(OBS_SIZE)]
obs_y = [[] for i in range(OBS_SIZE)]
obs_tfs = [[] for i in range(OBS_SIZE)]
orig_x = []
for bb in range(n_batch):
# generate a hidden variable X
perm = gen_X()
orig_x.append(perm)
new_obb_xy, new_obb_tf = gen_O(perm)
new_obb_x, new_obb_y = vectorize(new_obb_xy)
new_ob_x.append(new_obb_x)
new_ob_y.append(new_obb_y)
new_ob_tf.append(new_obb_tf)
# generate observations for this hidden variable x
for ob_idx in range(OBS_SIZE):
_ob_coord, _ob_lab = gen_O(perm)
_ob_x, _ob_y = vectorize(_ob_coord)
obs_x[ob_idx].append(_ob_x)
obs_y[ob_idx].append(_ob_y)
obs_tfs[ob_idx].append(_ob_lab)
return np.array(obs_x, np.float32),\
np.array(obs_y, np.float32),\
np.array(obs_tfs, np.float32),\
np.array(new_ob_x, np.float32),\
np.array(new_ob_y, np.float32),\
np.array(new_ob_tf, np.float32),\
orig_x
|
[
"[email protected]"
] | |
0c3826235cc92f3219c43f5b9bce8807ac403ebb
|
2fd0c65aa0f72133f773dac5d9a5c48fe9e26fac
|
/Dsz/PyScripts/Lib/dsz/mca/status/cmd/handles/types.py
|
6a1f4c77e8aa675f29091252ea44fdbd6e6ae2b8
|
[] |
no_license
|
FingerLeakers/DanderSpritz_docs
|
f5d2430e0b86b1b2f0684f02ddd4fa973a5a7364
|
d96b6a71c039b329f9f81544f645857c75360e7f
|
refs/heads/master
| 2021-01-25T13:05:51.732149 | 2018-03-08T01:22:49 | 2018-03-08T01:22:49 | 123,527,268 | 2 | 0 | null | 2018-03-02T03:48:31 | 2018-03-02T03:48:30 | null |
UTF-8
|
Python
| false | false | 1,171 |
py
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, Feb 6 2017, 23:53:20)
# [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)]
# Embedded file name: types.py
from types import *
MSG_KEY_PARAMS_QUERY = 65536
MSG_KEY_PARAMS_QUERY_PROCESS_ID = 65537
MSG_KEY_PARAMS_QUERY_ALL = 65538
MSG_KEY_PARAMS_QUERY_MEMORY = 65539
MSG_KEY_PARAMS_DUPLICATE = 131072
MSG_KEY_PARAMS_DUPLICATE_PROCESS_ID = 131073
MSG_KEY_PARAMS_DUPLICATE_HANDLE = 131074
MSG_KEY_PARAMS_CLOSE = 196608
MSG_KEY_PARAMS_CLOSE_PROCESS_ID = 196609
MSG_KEY_PARAMS_CLOSE_HANDLE = 196610
MSG_KEY_RESULT_HANDLE = 1114112
MSG_KEY_RESULT_HANDLE_PROCESS_ID = 1114113
MSG_KEY_RESULT_HANDLE_HANDLE = 1114114
MSG_KEY_RESULT_HANDLE_RIGHTS = 1114115
MSG_KEY_RESULT_HANDLE_TYPE = 1114116
MSG_KEY_RESULT_HANDLE_METADATA = 1114117
MSG_KEY_RESULT_DUPLICATE = 1179648
MSG_KEY_RESULT_DUPLICATE_ORIG_PROCESS_ID = 1179649
MSG_KEY_RESULT_DUPLICATE_ORIG_HANDLE = 1179650
MSG_KEY_RESULT_DUPLICATE_NEW_PROCESS_ID = 1179651
MSG_KEY_RESULT_DUPLICATE_NEW_HANDLE = 1179652
MSG_KEY_RESULT_CLOSE = 1245184
MSG_KEY_RESULT_CLOSE_PROCESS_ID = 1245185
MSG_KEY_RESULT_CLOSE_HANDLE = 1245186
|
[
"[email protected]"
] | |
83c0df6ea2b7386857e988a1a82a8102befb33e4
|
2ce3ef971a6d3e14db6615aa4da747474d87cc5d
|
/练习/python框架/flask_test/flask_demo/flask_test/flask_sql_test.py
|
fe3af6542c189d8a31672803529966213bef0dfc
|
[] |
no_license
|
JarvanIV4/pytest_hogwarts
|
40604245807a4da5dbec2cb189b57d5f76f5ede3
|
37d4bae23c030480620897583f9f5dd69463a60c
|
refs/heads/master
| 2023-01-07T09:56:33.472233 | 2020-11-10T15:06:13 | 2020-11-10T15:06:13 | 304,325,109 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,079 |
py
|
# -*- coding: utf-8 -*-
# @Time : 2020/02/16
# @Author : Wind
from 练习.python框架.flask_test.flask_demo.flask_SQLalchemy_demo import *
class FlaskSQL:
def __init__(self):
db.drop_all() # 删除表
db.create_all() # 创建表
global role, user
role = Role(name='admin')
db.session.add(role)
db.session.commit()
user = User(name='heima', role_id=role.id)
db.session.add(user)
db.session.commit()
def add(self):
# 新增数据
pass
# role = Role(name='admin')
# db.session.add(role)
# db.session.commit()
#
# user = User(name='heima', role_id=role.id)
# db.session.add(user)
# db.session.commit()
def update(self):
# 修改数据
user.name = 'chengxuyuan'
db.session.commit()
def delete(self):
# 删除数据
db.session.delete(user)
db.session.commit()
if __name__ == '__main__':
flask = FlaskSQL()
# flask.add()
flask.update()
# flask.delete()
|
[
"[email protected]"
] | |
9885a2d5509ef32b3fa2d55fe3d536064ae6af56
|
7acb2024b2315c08194eb377e7fba9ad358d3192
|
/logic_normal.py
|
963cd0301f8e08b8908fa7e926d665629abb2e3b
|
[] |
no_license
|
wnd2da/webtoon_daum
|
8aab816636c1c50d624059b8e3bc5808994bd7a2
|
6e1dfa383af9441d6981c060dc1adfb2694f7de2
|
refs/heads/master
| 2022-10-03T23:17:32.417497 | 2020-06-11T16:52:24 | 2020-06-11T16:52:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,407 |
py
|
# -*- coding: utf-8 -*-
#########################################################
# python
import os
import datetime
import traceback
import urllib
from datetime import datetime
# third-party
from sqlalchemy import desc
from sqlalchemy import or_, and_, func, not_
import requests
from lxml import html
# sjva 공용
from framework import app, db, scheduler, path_app_root, celery
from framework.job import Job
from framework.util import Util
# 패키지
from .plugin import logger, package_name
from .model import ModelSetting, ModelItem
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Language' : 'ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7',
'Referer' : ''
}
#########################################################
class LogicNormal(object):
session = requests.Session()
@staticmethod
def scheduler_function():
LogicNormal.scheduler_function_db()
try:
url = 'http://webtoon.daum.net/data/pc/webtoon/list_serialized/%s' % datetime.now().strftime('%A').lower()[0:3]
data = requests.get(url).json()
for item in data['data']:
nickname = item['nickname']
logger.debug('--- %s' % nickname)
toon_data = LogicNormal.analysis(nickname)
#logger.debug(toon_data)
if toon_data['status'] != '200':
continue
if not ModelSetting.get_bool('all_episode_download'):
LogicNormal.add(toon_data['latestEpisode'], toon_data)
else:
for tmp in toon_data['episodes']:
LogicNormal.add(tmp, toon_data)
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
@staticmethod
def add(data, toon_data):
from .logic_queue import LogicQueue
entity = db.session.query(ModelItem).filter_by(episode_id=data['episode_id']).first()
if entity is None and data['price'] == 0:
whitelists = ModelSetting.get_list('whitelist')
if whitelists:
flag = False
for t in whitelists:
if toon_data['title'].replace(' ', '').find(t) != -1:
flag = True
logger.debug('WHITE : %s', toon_data['title'])
break
if flag:
entity = LogicQueue.add_queue(data['episode_id'], data['episode_idx'], data['episode_title'], toon_data['title'], toon_data['nickname'])
return
blacklists = ModelSetting.get_list('blacklist')
if blacklists:
for t in blacklists:
if toon_data['title'].replace(' ', '').find(t) != -1:
logger.debug('BALCK : %s', toon_data['title'])
return
entity = LogicQueue.add_queue(data['episode_id'], data['episode_idx'], data['episode_title'], toon_data['title'], toon_data['nickname'])
@staticmethod
def scheduler_function_db():
entities = db.session.query(ModelItem).filter(ModelItem.status<10).all()
from .logic_queue import LogicQueue
for e in entities:
e.status_kor = u'대기'
entity = LogicQueue.add_queue(e.episode_id, e.episode_idx, e.episode_title, e.toon_title, e.toon_nickname)
@staticmethod
def get_html(url, referer=None, stream=False):
try:
if LogicNormal.session is None:
LogicNormal.session = requests.session()
#logger.debug('get_html :%s', url)
headers['Referer'] = '' if referer is None else referer
page_content = LogicNormal.session.get(url, headers=headers)
data = page_content.content
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
return data
@staticmethod
def entity_update(entity):
import plugin
plugin.socketio_callback('queue_one', entity, encoding=False)
@staticmethod
def download(entity):
try:
entity['download_count'] += 1
entity['status'] = 1
entity['str_status'] = '대기'
LogicNormal.entity_update(entity)
url = 'http://webtoon.daum.net/data/pc/webtoon/viewer_images/%s' % (entity['episode_id'])
data = requests.get(url).json()
entity['str_status'] = '분석'
LogicNormal.entity_update(entity)
dirname = ModelSetting.get('download_path')
if ModelSetting.get_bool('use_title_folder'):
dirname = os.path.join(dirname, Util.change_text_for_use_filename(entity['toon_title']))
#if not os.path.exists(dirname):
# os.makedirs(dirname)
tmp = u'%s %s %s' % (entity['episode_idx'].zfill(3), entity['toon_title'], entity['episode_title'])
dirname = os.path.join(dirname, Util.change_text_for_use_filename(tmp))
if not os.path.exists(dirname):
os.makedirs(dirname)
entity['filename'] = '%s.zip' % dirname
if os.path.exists(entity['filename']):
entity['status'] = 12
entity['str_status'] = '파일 있음'
LogicNormal.entity_update(entity)
else:
entity['str_status'] = '다운로드중'
LogicNormal.entity_update(entity)
count = len(data['data'])
for idx, tmp in enumerate(data['data']):
filename = os.path.join(dirname, str(idx+1).zfill(2) + '.jpg')
image_data = requests.get(tmp['url'], headers=headers, stream=True)
with open(filename, 'wb') as handler:
handler.write(image_data.content)
entity['str_status'] = '다운로드중 %s / %s' % (idx+1, count)
entity['percent'] = int(100.0 * (idx+1) / count)
LogicNormal.entity_update(entity)
Util.makezip(dirname)
entity['status'] = 11
entity['str_status'] = '완료'
LogicNormal.entity_update(entity)
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
entity['status'] = 2
entity['str_status'] = '실패'
if entity['download_count'] >= 5:
entity['status'] = 13
entity['str_status'] = '재시도초과'
LogicNormal.entity_update(entity)
ModelItem.save_as_dict(entity)
@staticmethod
def analysis(nickname):
ret = {}
try:
url = 'http://webtoon.daum.net/data/pc/webtoon/view/%s' % (nickname)
data = requests.get(url).json()
#logger.debug(data)
ret['status'] = data['result']['status']
if ret['status'] != '200':
ret['ret'] = 'error'
ret['log'] = data['result']['message']
return ret
ret['title'] = data['data']['webtoon']['title']
ret['nickname'] = data['data']['webtoon']['nickname']
ret['id'] = data['data']['webtoon']['id']
ret['image'] = data['data']['webtoon']['pcThumbnailImage']['url']
ret['desc'] = data['data']['webtoon']['introduction']
try:
ret['author'] = data['data']['webtoon']['cp']['name']
except:
ret['author'] = ''
ret['episodes'] = []
for epi in data['data']['webtoon']['webtoonEpisodes']:
try:
entity = {}
entity['episode_id'] = epi['id']
entity['episode_idx'] = epi['episode']
entity['episode_title'] = epi['title']
entity['image'] = epi['thumbnailImage']['url']
entity['price'] = epi['price']
entity['date'] = '%s-%s-%s' % (epi['dateCreated'][:4], epi['dateCreated'][4:6], epi['dateCreated'][6:8])
ret['episodes'].append(entity)
except:
pass
ret['latestEpisode'] = {'episode_id':data['data']['webtoon']['latestWebtoonEpisode']['id'], 'episode_idx':data['data']['webtoon']['latestWebtoonEpisode']['episode'], 'episode_title':data['data']['webtoon']['latestWebtoonEpisode']['title'], 'price':data['data']['webtoon']['latestWebtoonEpisode']['price']}
ret['ret'] = 'success'
LogicNormal.current_data = data
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
ret['ret'] = 'exception'
ret['log'] = str(e)
return ret
|
[
"[email protected]"
] | |
64a9454f5620d0735efc3b811ed47a1cceb58908
|
fc1c1e88a191b47f745625688d33555901fd8e9a
|
/meraki_sdk/models/update_device_switch_port_model.py
|
706fb93e1ed903692e8113465e3b2e2c937d86e3
|
[
"MIT",
"Python-2.0"
] |
permissive
|
RaulCatalano/meraki-python-sdk
|
9161673cfd715d147e0a6ddb556d9c9913e06580
|
9894089eb013318243ae48869cc5130eb37f80c0
|
refs/heads/master
| 2022-04-02T08:36:03.907147 | 2020-02-03T19:24:04 | 2020-02-03T19:24:04 | 416,889,849 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,244 |
py
|
# -*- coding: utf-8 -*-
"""
meraki_sdk
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class UpdateDeviceSwitchPortModel(object):
"""Implementation of the 'updateDeviceSwitchPort' model.
TODO: type model description here.
Attributes:
name (string): The name of the switch port
tags (string): The tags of the switch port
enabled (bool): The status of the switch port
mtype (string): The type of the switch port ("access" or "trunk")
vlan (int): The VLAN of the switch port. A null value will clear the
value set for trunk ports.
voice_vlan (int): The voice VLAN of the switch port. Only applicable
to access ports.
allowed_vlans (string): The VLANs allowed on the switch port. Only
applicable to trunk ports.
poe_enabled (bool): The PoE status of the switch port
isolation_enabled (bool): The isolation status of the switch port
rstp_enabled (bool): The rapid spanning tree protocol status
stp_guard (string): The state of the STP guard ("disabled", "Root
guard", "BPDU guard", "Loop guard")
access_policy_number (int): The number of the access policy of the
switch port. Only applicable to access ports.
link_negotiation (string): The link speed for the switch port
port_schedule_id (string): The ID of the port schedule. A value of
null will clear the port schedule.
udld (UdldEnum): The action to take when Unidirectional Link is
detected (Alert only, Enforce). Default configuration is Alert
only.
mac_whitelist (list of string): Only devices with MAC addresses
specified in this list will have access to this port. Up to 20 MAC
addresses can be defined. To disable MAC whitelist, set
accessPolicyNumber to null.
sticky_mac_whitelist (list of string): The initial list of MAC
addresses for sticky Mac whitelist. To reset Sticky MAC whitelist,
set accessPolicyNumber to null.
sticky_mac_whitelist_limit (int): The maximum number of MAC addresses
for sticky MAC whitelist.
storm_control_enabled (bool): The storm control status of the switch
port
"""
# Create a mapping from Model property names to API property names
_names = {
"name":'name',
"tags":'tags',
"enabled":'enabled',
"mtype":'type',
"vlan":'vlan',
"voice_vlan":'voiceVlan',
"allowed_vlans":'allowedVlans',
"poe_enabled":'poeEnabled',
"isolation_enabled":'isolationEnabled',
"rstp_enabled":'rstpEnabled',
"stp_guard":'stpGuard',
"access_policy_number":'accessPolicyNumber',
"link_negotiation":'linkNegotiation',
"port_schedule_id":'portScheduleId',
"udld":'udld',
"mac_whitelist":'macWhitelist',
"sticky_mac_whitelist":'stickyMacWhitelist',
"sticky_mac_whitelist_limit":'stickyMacWhitelistLimit',
"storm_control_enabled":'stormControlEnabled'
}
def __init__(self,
name=None,
tags=None,
enabled=None,
mtype=None,
vlan=None,
voice_vlan=None,
allowed_vlans=None,
poe_enabled=None,
isolation_enabled=None,
rstp_enabled=None,
stp_guard=None,
access_policy_number=None,
link_negotiation=None,
port_schedule_id=None,
udld=None,
mac_whitelist=None,
sticky_mac_whitelist=None,
sticky_mac_whitelist_limit=None,
storm_control_enabled=None):
"""Constructor for the UpdateDeviceSwitchPortModel class"""
# Initialize members of the class
self.name = name
self.tags = tags
self.enabled = enabled
self.mtype = mtype
self.vlan = vlan
self.voice_vlan = voice_vlan
self.allowed_vlans = allowed_vlans
self.poe_enabled = poe_enabled
self.isolation_enabled = isolation_enabled
self.rstp_enabled = rstp_enabled
self.stp_guard = stp_guard
self.access_policy_number = access_policy_number
self.link_negotiation = link_negotiation
self.port_schedule_id = port_schedule_id
self.udld = udld
self.mac_whitelist = mac_whitelist
self.sticky_mac_whitelist = sticky_mac_whitelist
self.sticky_mac_whitelist_limit = sticky_mac_whitelist_limit
self.storm_control_enabled = storm_control_enabled
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
tags = dictionary.get('tags')
enabled = dictionary.get('enabled')
mtype = dictionary.get('type')
vlan = dictionary.get('vlan')
voice_vlan = dictionary.get('voiceVlan')
allowed_vlans = dictionary.get('allowedVlans')
poe_enabled = dictionary.get('poeEnabled')
isolation_enabled = dictionary.get('isolationEnabled')
rstp_enabled = dictionary.get('rstpEnabled')
stp_guard = dictionary.get('stpGuard')
access_policy_number = dictionary.get('accessPolicyNumber')
link_negotiation = dictionary.get('linkNegotiation')
port_schedule_id = dictionary.get('portScheduleId')
udld = dictionary.get('udld')
mac_whitelist = dictionary.get('macWhitelist')
sticky_mac_whitelist = dictionary.get('stickyMacWhitelist')
sticky_mac_whitelist_limit = dictionary.get('stickyMacWhitelistLimit')
storm_control_enabled = dictionary.get('stormControlEnabled')
# Return an object of this model
return cls(name,
tags,
enabled,
mtype,
vlan,
voice_vlan,
allowed_vlans,
poe_enabled,
isolation_enabled,
rstp_enabled,
stp_guard,
access_policy_number,
link_negotiation,
port_schedule_id,
udld,
mac_whitelist,
sticky_mac_whitelist,
sticky_mac_whitelist_limit,
storm_control_enabled)
|
[
"[email protected]"
] | |
5c1ab956338dcb9308279fad56bf1b39cb5b5de7
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2710/60727/295696.py
|
6fe2ad78e9c68b5c106f342fd469c6299ebeedfb
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 442 |
py
|
arr = list(map(int, input().split()))
n, q = arr[0], arr[1]
arr, res = [], []
for i in range(0, q):
arr.append(input().split())
for i in range(0, q):
t = arr[i]
if t[0] == 'M':
res.append((int(t[1]), int(t[2])))
elif t[0] == 'D':
ans = n + 1
for item in res:
if item[0] <= int(t[1]) and item[1] >= int(t[2]):
ans = min(ans, item[1])
print(ans if ans < n + 1 else -1)
|
[
"[email protected]"
] | |
0429d5df3473aeb93f6ac3d93454a2eb5ff3e162
|
3fac68967637842325cc8242caa7910fc22e759d
|
/challenges/gcd.py
|
cec9be158405f150d5cde1d724e44e0e0bd97302
|
[] |
no_license
|
fox016/learnpython
|
31c947274e025488579f226f7931382aebf9a9d4
|
cd1f8c4d31094ad04932a77ea0e0df88788f5328
|
refs/heads/master
| 2021-01-11T21:07:54.785903 | 2017-04-13T17:18:33 | 2017-04-13T17:18:33 | 79,253,425 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 438 |
py
|
print(gcd(108,48))
print(gcd(9823759873245674572938,23897856))
print(gcd(52129982331677168,3258123895729823))
print(gcd(3742284139568368,3274498622122322))
print(gcd(47156645998656522469911,9100405368161785038053))
print(gcd(1617470750160875729262056498097501089239401390,312143478101221631962853008404780911958480970))
print(gcd(5599495879952728975999543669592960035350709387033509,1080604468061052960280613690623202813839610583462607))
|
[
"[email protected]"
] | |
0736cc76a26fb7b580d93517031eadc500ddf4bf
|
f662a786ca7bcedaec3ff5d488ce28f5c455da93
|
/source/conf.py
|
3280c79eb86f523eaf6210317dfe0d7982b5328a
|
[
"MIT"
] |
permissive
|
AppImageX/standards
|
e4a058df188af948de8025e962cdd0d8c9f967e3
|
cf25225a246b00ab2924e90b8bb464e2be270049
|
refs/heads/master
| 2023-02-10T03:03:14.753311 | 2020-12-27T23:33:12 | 2020-12-28T19:07:54 | 324,832,510 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,414 |
py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
# sys.path.insert(0, os.path.abspath("."))
# -- Project information -----------------------------------------------------
project = "AppImageX Standards"
copyright = "2020, The AppImageX Team"
author = "The AppImageX Team"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named "sphinx.ext.*") or your custom
# ones.
extensions = [
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.ifconfig",
"sphinx_last_updated_by_git",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_options = {
"display_version": True,
"sticky_navigation": True,
"includehidden": True,
"collapse_navigation": True,
"titles_only": True,
"prev_next_buttons_location": "both",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# try to fetch current Git commit ID from the environment
commit = os.environ.get("GITHUB_SHA", os.environ.get("GIT_COMMIT", None))
# if this is not possible for some reason, try to fetch it via the git command
if not commit:
import subprocess
try:
commit = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]).decode().split()[0]
except subprocess.CalledProcessError:
commit = "<not available>"
# make sure to use short commit
commit = commit[:7]
html_context = {
"display_github": True,
"github_user": "AppImage",
"github_repo": "docs.appimage.org",
"github_version": "master/source/",
"commit": commit,
}
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/": None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# apply some subtle changes to the selected theme via custom CSS file
def setup(app):
app.add_stylesheet("css/custom.css")
|
[
"[email protected]"
] | |
c1278e40a748f85fd99e5abac06ec8c0a87ec483
|
09a8648805c390594be0908da3188f287dedc471
|
/src/practices/github/handle_beautifulsoup_1.py
|
990c8181860ac65fa8e346a7de8af0c82f4db7d2
|
[
"Apache-2.0"
] |
permissive
|
lgonline/mp
|
9d17abbb41ff42fbaf1666059504e2377485c6a9
|
21ef1bfb2feacf6a7abda858c083e0c49878f889
|
refs/heads/master
| 2020-12-29T02:36:32.332387 | 2019-11-16T03:02:02 | 2019-11-16T03:02:02 | 44,308,720 | 1 | 1 | null | 2015-10-20T16:24:08 | 2015-10-15T09:50:46 |
Python
|
UTF-8
|
Python
| false | false | 1,186 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/6/7 19:10
# @Author : liugang9
# @Email : [email protected]
# @File : handle_beautifulsoup_1.py
# @Software: PyCharm
# @license: Apache Licence
# @contact: [email protected]
"""
"""
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
def useUrllibGetWebpage():
html = urlopen('https://www.baidu.com')
print(html.read())
pass
def useBeautifulSoupGetWebpage():
url = 'https://www.baidu.com'
webpage = urlopen(url)
soup = BeautifulSoup(webpage,'html.parser')
print(soup)
def getBookFromDouban():
url = 'https://www.douban.com/tag/%E5%B0%8F%E8%AF%B4/?focus=book'
soup = BeautifulSoup(urlopen(url),'html.parser')
# book_div = soup.find(attrs={'id':'book'})
books = soup.findAll(attrs={'class':'title'})
# print(type(books))
# for book in books:
# print(book)
for book in books:
clear_books = re.findall(r'>(\S+)<',str(book))
print(clear_books)
# for mybook in clear_books:
# print(mybook)
if __name__ == '__main__':
# useUrllibGetWebpage()
# useUrllibGetWebpage()
getBookFromDouban()
|
[
"[email protected]"
] | |
20cd6063dfa85464b0fe6949728b420cf2de23ff
|
55c75df9dc3a5479a7524c8e2f6546e113d2d89c
|
/src/gamesbyexample/magichexagon.py
|
39dbfaacc9376ed100c74e20234ba604b1980a43
|
[
"MIT"
] |
permissive
|
spp2/PythonStdioGames
|
4b9bef97fef7dc84fc4c09b2585298cdab865c6c
|
7edc6a07ef816a44579800e773f30217541971fa
|
refs/heads/master
| 2022-10-04T02:14:50.789665 | 2020-06-02T16:03:35 | 2020-06-02T16:03:35 | 268,824,285 | 0 | 0 |
MIT
| 2020-06-02T15:18:41 | 2020-06-02T14:31:53 |
Python
|
UTF-8
|
Python
| false | false | 6,503 |
py
|
"""Magic Hexagon, by Al Sweigart [email protected]
Place numbers in a hexagon so each row adds up to 38.
More info at https://en.wikipedia.org/wiki/Magic_hexagon
More info at https://www.youtube.com/watch?v=ZkVSRwFWjy0
This and other games are available at https://nostarch.com/XX
Tags: large, game, puzzle game, board game"""
__version__ = 0
import sys
# Print the title and instructions:
print('''Magic Hexagon, by Al Sweigart [email protected]
Place the numbers 1 to 19 on spaces A through S such that all 15
horizontal and diagonal rows add up to 38. The unused numbers are
stored in the Z box until you place them.
We'll start the board with 3 and 17 placed.
''')
input('Press Enter to begin...')
# A large, multi-line string that acts as a template for the game board:
# You can copy/paste this from https://pastebin.com/raw/h9ufKzSz
boardTemplate = r"""Sum to 38: {29} {30} {31}
_ / _ / _ /
/ \/ / \/ / \/ {32}
/ \ / \ / \ / +-Space Map-+
| {0} | {1} | {2} |--/-----{19} | A B C |
/ \ / \ / \ / \/ {33} | D E F G |
/ \ / \ / \ / \ / | H I J K L |
| {3} | {4} | {5} | {6} |--/--{20} | M N O P |
/ \ / \ / \ / \ / \/ | Q R S |
/ \ / \ / \ / \ / \ +-----------+
| {7} | {8} | {9} | {10} | {11} |--{21}
\ / \ / \ / \ / \ / +-----Z-----+
\ / \ / \ / \ / \ /\ |{34} {35} {36} {37}|
| {12} | {13} | {14} | {15} |--\--{22} |{38} {39} {40} {41}|
\ / \ / \ / \ / \ |{42} {43} {44} {45}|
\ / \ / \ / \ /\ {24} |{46} {47} {48} {49}|
| {16} | {17} | {18} |--\-----{23} |{50} {51} {52} |
\ / \ / \ / \ +-----------+
\_/\ \_/\ \_/\ {25}
\ \ \
{28} {27} {26}"""
# The hex board starts off with 3 and 17 placed in A and B:
board = {}
for space in 'ABCDEFGHIJKLMNOPQRS':
board[space] = 0 # Set the space to blank (that is, 0).
board['A'] = 3 # Start with 3 in space A.
board['B'] = 17 # Start with 17 in space B.
# The unused numbers box starts with integers 1 to 19, except 3 and 17:
unusedNums = set()
for i in range(1, 20):
unusedNums.add(i)
unusedNums.remove(3)
unusedNums.remove(17)
while True: # Main game loop.
rowSums = {} # The keys are row numbers, value is the row's sum.
# ROW NUMBERING:
# 12 14
# 11 / 13/15
# / / / / /
# A B C-/-/--1
# D E F G-/---2
# H I J K L----3
# M N O P-\---4
# Q R S-\-6--5
# \ \ \ 7
# 10 9 8
# Calculate the sum for each of the 15 rows:
b = board # Syntactic sugar to have a shorter variable name.
rowSums[1] = b['A'] + b['B'] + b['C']
rowSums[2] = b['D'] + b['E'] + b['F'] + b['G']
rowSums[3] = b['H'] + b['I'] + b['J'] + b['K'] + b['L']
rowSums[4] = b['M'] + b['N'] + b['O'] + b['P']
rowSums[5] = b['Q'] + b['R'] + b['S']
rowSums[6] = b['C'] + b['G'] + b['L']
rowSums[7] = b['B'] + b['F'] + b['K'] + b['P']
rowSums[8] = b['A'] + b['E'] + b['J'] + b['O'] + b['S']
rowSums[9] = b['D'] + b['I'] + b['N'] + b['R']
rowSums[10] = b['H'] + b['M'] + b['Q']
rowSums[11] = b['A'] + b['D'] + b['H']
rowSums[12] = b['B'] + b['E'] + b['I'] + b['M']
rowSums[13] = b['C'] + b['F'] + b['J'] + b['N'] + b['Q']
rowSums[14] = b['G'] + b['K'] + b['O'] + b['R']
rowSums[15] = b['L'] + b['P'] + b['S']
# Prepare the arguments to use for the boardTemplate string:
templateArgs = []
# Indexes 0 to 18 of templateArgs are for the numbers 1 to 19:
for space in 'ABCDEFGHIJKLMNOPQRS':
if board[space] == 0:
templateArgs.append(' .')
else:
templateArgs.append(str(board[space]).rjust(2))
# Indexes 19 to 33 of templateArgs are for the row sums:
for rowNumber in range(1, 16):
templateArgs.append(str(rowSums[rowNumber]).rjust(2))
# Indexes 34 to 52 of templateArgs are for the unused numbers box:
for i in range(1, 20):
if i in unusedNums:
templateArgs.append(str(i).rjust(2))
else:
templateArgs.append(' .')
# Display the hex board:
print(boardTemplate.format(*templateArgs))
# Quit the program if all rows add up to 38:
isSolved = True
for i in range(1, 16): # Loop over all 15 rows.
if rowSums[i] != 38:
isSolved = False # Unsolved if at least one row isn't 38.
if isSolved:
print('You\'ve solved the puzzle! Hurray!')
break
# Get the selected space from the user:
while True:
print('Select a space A to S (or Z or QUIT): ')
response = input('> ').upper()
if response == 'QUIT':
print('Thanks for playing!')
sys.exit()
if response in tuple('ABCDEFGHIJKLMNOPQRSZ'):
selectedSpace = response
break
# Get the selected number from the user to put on the selected space:
while True:
print('Enter 1 to 19 for', selectedSpace, '(or "quit"):')
response = input('> ')
if response.lower().startswith('q'):
print('Thanks for playing!')
sys.exit()
if response.isdecimal() and (1 <= int(response) <= 19):
selectedNumber = int(response)
break
if selectedSpace == 'Z':
# Move the number to the unused numbers box:
unusedNums.add(selectedNumber)
for space in 'ABCDEFGHIJKLMNOPQRS':
if board[space] == selectedNumber:
board[space] = 0 # Set this space to blank.
elif selectedNumber in unusedNums:
# Move the number from the unused numbers box to the board:
numberAtOriginalSpace = board[selectedSpace]
board[selectedSpace] = selectedNumber # Put number on board.
unusedNums.remove(selectedNumber)
if numberAtOriginalSpace != 0:
unusedNums.add(numberAtOriginalSpace)
else:
# Since the number must already be on the board, do a swap to
# move it to the selected space:
for space in 'ABCDEFGHIJKLMNOPQRS':
if board[space] == selectedNumber:
spaceOfOriginalNumber = space
numberAtOriginalSpace = board[selectedSpace]
# Swap the two numbers on the board:
board[selectedSpace] = selectedNumber
board[spaceOfOriginalNumber] = numberAtOriginalSpace
|
[
"[email protected]"
] | |
15d5c55905fb974d4fd57d95c5db4742865c48fe
|
75fa11b13ddab8fd987428376f5d9c42dff0ba44
|
/metadata-ingestion/src/datahub/__init__.py
|
3ac3efefc14f064f1ce41e1262d80b9e73fd2735
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"MIT"
] |
permissive
|
RyanHolstien/datahub
|
163d0ff6b4636919ed223ee63a27cba6db2d0156
|
8cf299aeb43fa95afb22fefbc7728117c727f0b3
|
refs/heads/master
| 2023-09-04T10:59:12.931758 | 2023-08-21T18:33:10 | 2023-08-21T18:33:10 | 246,685,891 | 0 | 0 |
Apache-2.0
| 2021-02-16T23:48:05 | 2020-03-11T21:43:58 |
TypeScript
|
UTF-8
|
Python
| false | false | 808 |
py
|
import sys
import warnings
# Published at https://pypi.org/project/acryl-datahub/.
__package_name__ = "acryl-datahub"
__version__ = "0.0.0.dev0"
def is_dev_mode() -> bool:
return __version__.endswith("dev0")
def nice_version_name() -> str:
if is_dev_mode():
return "unavailable (installed in develop mode)"
return __version__
if sys.version_info < (3, 7):
warnings.warn(
"DataHub requires Python 3.7 or newer. "
"Please upgrade your Python version to continue using DataHub.",
FutureWarning,
stacklevel=2,
)
elif sys.version_info < (3, 8):
warnings.warn(
"DataHub will require Python 3.8 or newer soon. "
"Please upgrade your Python version to continue using DataHub.",
FutureWarning,
stacklevel=2,
)
|
[
"[email protected]"
] | |
d29d7ead3beeb69f4e7257874393395a45b514f2
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/335/usersdata/281/101497/submittedfiles/matriz1.py
|
d5be8f9b97f00c041645b6fbf32654da27cf0484
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 861 |
py
|
# -*- coding: utf-8 -*-
n=int(input('Digite o número de linhas: '))
m=int(input('Digite o número de colunas: '))
matriz=[]
for i in range(0,n,1):
linha=[]
for j in range(0,m,1):
linha.append(int(input('Digite um binario[0,1]: ')))
matriz.append(linha)
matrizX=[]
for i in range (0,n,1):
for j in range (0,n,1):
if matriz[i][j]==1:
x=i
break
for i1 in range (n-1,-1,-1):
for j in range (0,m,1):
if matriz[i1][j]==1:
x1=i1
break
for j in range (0,m,1):
for i in range (0,n,1):
if matriz[i][j]==1:
y=j
break
for j1 in range (m-1,-1,-1):
for i in range (0,n,1):
if matriz [i][j1]==1:
y1=j1
break
for i in range (x1,x+1,1):
for j in range (y1,y+1,1):
matrizX.append(matriz[i][j])
print(matrizX)
|
[
"[email protected]"
] | |
524af3a185a13244b19e8fc20ee82ba5d0d201c2
|
587ca84f28c54892ca1fed2ef14774568c20076d
|
/2013/tp2/tp2ej8.py
|
ea28a71f4cd4dfd3181ac85c14ebd9ef791dd802
|
[] |
no_license
|
guillox/Practica_python
|
4c6132a22387db8017d37347217e11091e05b5c9
|
a446c944ea81668393597ddf478dafb53d942cb1
|
refs/heads/master
| 2021-01-10T20:01:12.653714 | 2015-04-11T20:56:05 | 2015-04-11T20:56:05 | 33,785,885 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 242 |
py
|
"""Módulos
8.- Escriba cada uno de los incisos del ejercicio 7.- (a, b y c) en módulos diferentes
(conjuntos.py, cola.py, parámetros_variables.py), impórtelos en un nuevo modulo
(principal.py) y utilice sus funciones en éste último"""
|
[
"[email protected]"
] | |
b56ee924a022696c1a076479da11f09329caf595
|
e6dab5aa1754ff13755a1f74a28a201681ab7e1c
|
/.parts/lib/django-1.5/tests/modeltests/model_package/__init__.py
|
c9faaf2ceb4c8ea96b57c3eb766d8bc07ff93b3f
|
[] |
no_license
|
ronkagan/Euler_1
|
67679203a9510147320f7c6513eefd391630703e
|
022633cc298475c4f3fd0c6e2bde4f4728713995
|
refs/heads/master
| 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 108 |
py
|
/home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.5/tests/modeltests/model_package/__init__.py
|
[
"[email protected]"
] | |
a42072f98db7c8b0cc32e382058507c2e0302a68
|
a303be0a547d717b0deb19b5bdcc75010e131b51
|
/Contests/College Contests/Hacktivate/p5.py
|
2f92ca59a3663d4202e3459033685ae3a656031b
|
[] |
no_license
|
harrypotter0/competitive-programming
|
ff883c4dc5aa8d72f1af589bb654a422e32c8a38
|
82a8497e69212dc62e75af74b0d5a3b390b8aca2
|
refs/heads/master
| 2023-03-23T07:07:14.295053 | 2021-03-17T01:24:45 | 2021-03-17T01:24:45 | 70,964,689 | 16 | 9 | null | 2021-03-17T01:24:49 | 2016-10-15T03:52:53 |
Python
|
UTF-8
|
Python
| false | false | 983 |
py
|
import math
def readInts():
return list(map(int, raw_input().strip().split()))
def readInt():
return int(raw_input())
def readIntsindex0():
return list(map(lambda x: int(x) - 1, input().split()))
def readStrs():
return raw_input().split()
def readStr():
return raw_input()
a = [[1 for i in xrange(n+1)] for n in xrange(102)]
for i in xrange(1, 101):
for j in xrange(1, i+1):
a[i+1][j] = a[i][j-1] + a[i][j]
p = math.pow(10,9)+9
# print(p)
for _ in xrange(int(raw_input())):
n, g = map(int, raw_input().split())
s = 0
ans = [n for _ in xrange(g+1)]
for i in xrange(1, g+1):
ans[i] = (pow(n+1, i+1)) - 1
for j in range(2, i+2):
ans[i] -= ((a[i+1][j]*ans[i+1-j]))
ans[i] = (ans[i]/(i+1))
print (int((ans[g]-1-pow(n,g)) % p))
'''
Sample Input 0
1
4 2
Sample Output 0
13
Explanation 0
(4-1)2 + (4-2)2 = 32 + 22 = 13.
Sample Input 1
1
4 3
Sample Output 1
35
Explanation 1
(4-1)3 + (4-2)3 = 33 + 23 = 35
'''
|
[
"[email protected]"
] | |
fbffdfa4fe0e2cff11731aa4d0a1893418e00bf8
|
70450f0c551adf47b450468e424f4f90bebfb58d
|
/dataclasses/resources/test/loads.py
|
464d663ceca5a9adb63d4de6367de0d4edabe395
|
[
"MIT"
] |
permissive
|
hschwane/offline_production
|
ebd878c5ac45221b0631a78d9e996dea3909bacb
|
e14a6493782f613b8bbe64217559765d5213dc1e
|
refs/heads/master
| 2023-03-23T11:22:43.118222 | 2021-03-16T13:11:22 | 2021-03-16T13:11:22 | 280,381,714 | 0 | 0 |
MIT
| 2020-07-17T09:20:29 | 2020-07-17T09:20:29 | null |
UTF-8
|
Python
| false | false | 67 |
py
|
#!/usr/bin/env python
#
from icecube import icetray, dataclasses
|
[
"[email protected]"
] | |
402302a71ae5831e01eb2d136420e4d4a3044e79
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/94/usersdata/149/50708/submittedfiles/mediaLista.py
|
e1979fe1810c9462bf8a0c5faee5e239056bf973
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,050 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
n1=int(input('digite o n1:'))
n2=int(input('digite o n2:'))
n3=int(input('digite o n3:'))
n4=int(input('digite o n4:'))
n5=int(input('digite o n5:'))
n6=int(input('digite o n6:'))
s1=int(input('digite o s1:'))
s2=int(input('digite o s2:'))
s3=int(input('digite o s3:'))
s4=int(input('digite o s4:'))
s5=int(input('digite o s5:'))
s6=int(input('digite o s6:'))
cont=0
if n1==s1 or n1==s2 or n1==s3 or n1==s4 or n1==s5 or n1==s6:
cont=cont+1
if n2==s1 or n2==s2 or n2==s3 or n2==s4 or n2==s5 or n2==s6:
cont=cont+1
if n3==s1 or n3==s2 or n3==s3 or n3==s4 or n3==s5 or n3==s6:
cont=cont+1
if n4==s1 or n4==s2 or n4==s3 or n4==s4 or n4==s5 or n4==s6:
cont=cont+1
if n5==s1 or n5==s2 or n5==s3 or n5==s4 or n5==s5 or n5==s6:
cont=cont+1
if n6==s1 or n6==s2 or n6==s3 or n6==s4 or n6==s5 or n6==s6:
cont=cont+1
if contador==3:
print('3n')
elif contador==4:
print('4n')
elif contador==5:
print('5n')
elif contador==6:
print('6n')
elif contador<3:
print('n')
|
[
"[email protected]"
] | |
d9747b4b212e3d0982c69a484b3317dda82cbe5a
|
43598dd1e251b1733ed16981a148834bd9faca9b
|
/draw_util.py
|
6fec3134df7e94dd8e44503a6b59f2ab3d985313
|
[] |
no_license
|
SamL98/PhysicsEngine
|
86e6f38a34d7261c13cc76e78f2702e72c4c0c3b
|
440e9042cc999277bbc1961cbb4b8f2300f28fde
|
refs/heads/master
| 2020-03-23T18:51:45.246905 | 2018-07-22T22:50:14 | 2018-07-22T22:50:14 | 141,936,554 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,287 |
py
|
import numpy as np
"""
Clear the given canvas
:param canvas: numpy array to clear (set to 1)
"""
def clear(canvas):
canvas[0:canvas.shape[0], 0:canvas.shape[1]] = 1
"""
Calculate the Euclidean distance between two points
:param p1: one point
:param p2: the other point
"""
def dist(p1, p2):
return np.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)
"""
Return whether or not given bounding box is viewable in the canvas
:param bbox: the bounding box defining a shape
:param canvas_shape: the shape of the current canvas
"""
def is_viewable(bbox, canvas_shape):
pos = (int(bbox.origin[0]), int(bbox.origin[1]))
xInView = pos[0]<canvas_shape[0] and pos[0]+bbox.height/2>=0
yInView = pos[1]<canvas_shape[1] and pos[1]+bbox.width/2>=0
return xInView and yInView
"""
Return the bounding box of the part of the shape to draw
because some of the bounding box may be out of view.
:param bbox: the bounding box
:param canvas_shape: the shape of the current canvas
"""
def get_drawing_coords(bbox, canvas_shape):
pos = (int(bbox.origin[0]), int(bbox.origin[1]))
center = (int(bbox.center[0]), int(bbox.center[1]))
t = max(0, pos[0])
b = min(canvas_shape[0], center[0]+bbox.height//2)
l = max(0, pos[1])
r = min(canvas_shape[1], center[1]+bbox.width//2)
return center, t, l, b, r
|
[
"[email protected]"
] | |
e315107d95eb655e7fd621bbcc8ec9c87809941f
|
48f73b5b78da81c388d76d685ec47bb6387eefdd
|
/scrapeHackerrankCode/codes/itertools-permutations.py
|
4caa82914dda972ada250151c6f329eb7f86fb9e
|
[] |
no_license
|
abidkhan484/hacerrankScraping
|
ad0ceda6c86d321d98768b169d63ea1ee7ccd861
|
487bbf115117bd5c293298e77f15ae810a50b82d
|
refs/heads/master
| 2021-09-18T19:27:52.173164 | 2018-07-18T12:12:51 | 2018-07-18T12:12:51 | 111,005,462 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 241 |
py
|
# Accepted
# Python 3
from itertools import permutations
a, d = input().split()
d = int(d)
l = list(permutations(a, d))
le = len(l)
l.sort()
for i in range(le):
for j in range(d):
print(l[i][j], end='')
print()
|
[
"[email protected]"
] | |
b7304124b253d7826e8d3a6f2f00285712b9fa5a
|
7ac271f357f4c8f0c23c697b11966259f836880f
|
/app/data/dvdrental/language.py
|
5b8fd9d6e1d6871ba70b081f79a44f05a68f7c4c
|
[] |
no_license
|
cheng93/PythonWeb
|
74a58eadee4ee7d2872a582a907bbf47630df371
|
d5ced8dee1d5ba31778125c5e67169c92acf26a0
|
refs/heads/develop
| 2021-01-19T23:59:11.315871 | 2018-03-04T19:26:18 | 2018-03-04T19:26:18 | 89,063,916 | 0 | 0 | null | 2018-03-04T19:26:19 | 2017-04-22T11:09:14 |
Python
|
UTF-8
|
Python
| false | false | 405 |
py
|
from app.data.dvdrental import Base
from sqlalchemy import Column, DateTime, Integer, String, text
class Language(Base):
__tablename__ = 'language'
language_id = Column(Integer, primary_key=True, server_default=text("nextval('language_language_id_seq'::regclass)"))
name = Column(String(20), nullable=False)
last_update = Column(DateTime, nullable=False, server_default=text("now()"))
|
[
"[email protected]"
] | |
325e68bb2e595b68fa254d9bcb3c01d2c7a19026
|
8a0bbb159a3d6a259a83224b8addc83c9da1986e
|
/lists/tests.py
|
fa5b816ece674b42b152c08c66577ea5f3886bc9
|
[] |
no_license
|
guinslym/tdd-django-tutorial
|
5e976bcfe3a670b0b75c64646881c5a98214848e
|
436d036e01527788e3f7b055f84ed159c822e8b4
|
refs/heads/master
| 2021-01-01T04:50:07.504959 | 2016-05-19T21:31:03 | 2016-05-19T21:31:03 | 59,229,057 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,178 |
py
|
from django.test import TestCase
from django.http import HttpRequest
from django.template.loader import render_to_string
from lists.views import home_page
# Create your tests here.
class HomePageViewTest(TestCase):
def test_home_page_returns_correct_html(self):
request = HttpRequest()
response = home_page(request)
self.assertIn('<title>To-Do lists</title>', response.content.decode('utf8'))
self.assertTrue(response.content.startswith(b'<html>'))
#print(response.content)
self.assertTrue(response.content.strip().endswith(b'</html>'))
#expected_content = open('lists/templates/home.html').read()
expected_content = render_to_string('home.html')
self.assertEqual(response.content.decode('utf8'), expected_content)
def test_home_page_can_store_post_requests(self):
request = HttpRequest()
request.method = 'POST'
request.POST['item_text'] = 'new item'
response = home_page(request)
expected_content = render_to_string('home.html',
{'new_item_text': 'new item'})
self.assertEqual(response.content.strip().decode('utf8'), expected_content)
|
[
"[email protected]"
] | |
4345a1edf1a7d7ec88ebe84116eca2ce54549f00
|
6d6d012b940718afda0e16e3d993d4d8a25793c0
|
/applications/suscribers/views.py
|
8781a40a6e5c2b2d3fcab0b21217d3ffd61597be
|
[] |
no_license
|
neunapp/ajaxdj
|
e66bad9ffd47664e6c0342439b363f97907f8465
|
7a0a29d61ebe0bf5496b235165aaa3d504320978
|
refs/heads/master
| 2022-09-10T21:26:28.370316 | 2020-06-02T18:05:46 | 2020-06-02T18:05:46 | 268,872,703 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,371 |
py
|
#
from django.shortcuts import render
from django.http import JsonResponse
from django.views.generic import CreateView
from .models import Suscriptor
class SuscriptorCreateView(CreateView):
template_name = "add.html"
model = Suscriptor
fields = ('__all__')
success_url = '.'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["suscripciones"] = Suscriptor.objects.all()
print('**************')
return context
def render_to_response(self, context, **response_kwargs):
""" """
if self.request.is_ajax():
print('Es un peticon ajax*********')
data = list(context["suscripciones"].values())
return JsonResponse({'suscriptores': data})
else:
response_kwargs.setdefault('content_type', self.content_type)
return self.response_class(
request=self.request,
template=self.get_template_names(),
context=context,
using=self.template_engine,
**response_kwargs
)
# cuando este proceso de la respuesta es repetitiva durante el proyecto
# podemos usar una de las mejores herramientas que ofrece Vistas basadas en clases
# que son los mixin, lo que nos ayuda a resumir aun mas las lineas de codigo
# ejemplo aqui abajo utilizano la misma vista
class AjaxaResponseMixin(object):
def render_to_response(self, context, **response_kwargs):
""" """
if self.request.is_ajax():
data = list(context['suscripciones'].values())
return JsonResponse({'suscriptores': data})
else:
response_kwargs.setdefault('content_type', self.content_type)
return self.response_class(
request=self.request,
template=self.get_template_names(),
context=context,
using=self.template_engine,
**response_kwargs
)
class SuscriptorCreateView2(AjaxaResponseMixin, CreateView):
template_name = "add.html"
model = Suscriptor
fields = ('__all__')
success_url = '.'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["suscripciones"] = Suscriptor.objects.all()
return context
|
[
"[email protected]"
] | |
7c068027cc97b79c1cc967c92cc93dd96840b64e
|
6fa082e3d17a7899c9d27e7da1a8fabc04c7a3d5
|
/tests/test_pkg.py
|
2f2ef5665dce458d025dd9c35be502d00ce035b1
|
[] |
no_license
|
agx/whatmaps
|
06ecd79e4d6090794b6c7d9bce6c9efaa16d12f9
|
e28c99d3fc7c391de039321e87b5ce12eae38572
|
refs/heads/master
| 2023-01-09T12:52:38.546656 | 2022-12-30T12:09:19 | 2022-12-30T12:09:55 | 18,165,729 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,860 |
py
|
# vim: set fileencoding=utf-8 :
# (C) 2014 Guido Günther <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test L{whatmaps.process} config"""
import unittest
from mock import patch
from whatmaps.pkg import Pkg, PkgError
from . import context
class TestPkg(unittest.TestCase):
def setUp(self):
self.tmpdir = context.new_tmpdir(__name__)
def test_abstract(self):
"""Check abstract method signatures"""
self.assertIsNone(Pkg.type)
self.assertIsNone(Pkg.services)
def test_repr(self):
p = Pkg('apckage')
self.assertEqual(str(p), "<None Pkg object name:'apckage'>")
def test_list_contents(self):
with patch('subprocess.Popen') as mock:
p = Pkg('doesnotmatter')
p._list_contents = '/does/not/matter'
PopenMock = mock.return_value
PopenMock.communicate.return_value = [
b'/package/content',
b'/more/package/content',
]
PopenMock.returncode = 0
result = p._get_contents()
self.assertIn('/package/content', result)
self.assertNotIn('/more/package/content', result)
# We want to check that we don't invoke Popen on
# a second call so let it fail
PopenMock.returncode = 1
result = p._get_contents()
self.assertIn('/package/content', result)
self.assertNotIn('/more/package/content', result)
def test_shared_objects(self):
"""Test that we properly match shared objects"""
with patch('subprocess.Popen') as mock:
p = Pkg('doesnotmatter')
p._list_contents = '/does/not/matter'
PopenMock = mock.return_value
PopenMock.communicate.return_value = [b'\n'.join([
b'/lib/foo.so.1',
b'/lib/bar.so',
b'/not/a/shared/object',
b'/not/a/shared/object.soeither',
])]
PopenMock.returncode = 0
result = p.shared_objects
self.assertIn('/lib/foo.so.1', result)
self.assertIn('/lib/bar.so', result)
self.assertNotIn('/not/a/shred/object', result)
self.assertNotIn('/not/a/shred/object.soeither', result)
# We want to check that we don't invoke Popen on
# a second call so let it fail.
PopenMock.returncode = 1
result = p._get_contents()
self.assertIn('/lib/foo.so.1', result)
self.assertNotIn('/not/a/shred/object', result)
def test_shared_object_error(self):
"""Test that we raise PkgError"""
with patch('subprocess.Popen') as mock:
p = Pkg('doesnotmatter')
p._list_contents = '/does/not/matter'
PopenMock = mock.return_value
PopenMock.communicate.return_value = ['']
PopenMock.returncode = 1
try:
p.shared_objects
self.fail("PkgError exception not raised")
except PkgError:
pass
except Exception as e:
self.fail("Raised '%s is not PkgError" % e)
def tearDown(self):
context.teardown()
|
[
"[email protected]"
] | |
68236e56506ea6cc90c7d5ffe069d839068af442
|
0524f83be13e4b6cafd304acb002eca7a5d3e0f0
|
/Matplotlib intro/Intro/save_image.py
|
9be325c4df724ca1d6ac114720f5591d40271db3
|
[] |
no_license
|
LesediSekakatlela/python_projects
|
25064f7a7cabd54f6d04aba31cdace6e8af06d63
|
6cca3cbddefa006a17f95486dbaa8a0b7de7fea5
|
refs/heads/main
| 2023-06-06T23:37:59.385280 | 2021-06-20T16:46:11 | 2021-06-20T16:46:11 | 371,608,246 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 168 |
py
|
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0,10,100)
y = x
plt.plot(x, y, label='linear')
plt.legend()
plt.savefig("Figure_1.png")
plt.show()
|
[
"[email protected]"
] | |
39caf781a2fb28054c728bd71be1b09a045da442
|
afc8d5a9b1c2dd476ea59a7211b455732806fdfd
|
/Configurations/ggH_SF/Full2017_HTXS_Stage1p2_v7/plot_blind.py
|
3b79748cb261b57889418880386c716ea5d393b4
|
[] |
no_license
|
latinos/PlotsConfigurations
|
6d88a5ad828dde4a7f45c68765081ed182fcda21
|
02417839021e2112e740607b0fb78e09b58c930f
|
refs/heads/master
| 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 |
Python
|
UTF-8
|
Python
| false | false | 3,342 |
py
|
# plot configuration
# groupPlot = {}
#
# Groups of samples to improve the plots.
# If not defined, normal plots is used
#
groupPlot['top'] = {
'nameHR' : 'tW and t#bar{t}',
'isSignal' : 0,
'color': 400, # kYellow
'samples' : ['top']
}
groupPlot['WW'] = {
'nameHR' : 'WW',
'isSignal' : 0,
'color': 851, # kAzure -9
'samples' : ['WW', 'ggWW', 'WWewk']
}
groupPlot['Fake'] = {
'nameHR' : 'nonprompt',
'isSignal' : 0,
'color': 921, # kGray + 1
'samples' : ['Fake_mm', 'Fake_ee']
}
groupPlot['DY'] = {
'nameHR' : "DY",
'isSignal' : 0,
'color': 418, # kGreen+2
'samples' : ['DY']
}
groupPlot['VVV'] = {
'nameHR' : 'VVV',
'isSignal' : 0,
'color': 857, # kAzure -3
'samples' : ['VVV']
}
groupPlot['VZ'] = {
'nameHR' : "VZ",
'isSignal' : 0,
'color' : 617, # kViolet + 1
'samples' : ['VZ', 'WZ', 'ZZ']
}
groupPlot['Vg'] = {
'nameHR' : "V#gamma",
'isSignal' : 0,
'color' : 810, # kOrange + 10
'samples' : ['Vg', 'Wg']
}
groupPlot['VgS'] = {
'nameHR' : "V#gamma*",
'isSignal' : 0,
'color' : 409, # kGreen - 9
'samples' : ['VgS_H','VgS_L']
}
print signals
groupPlot['Higgs'] = {
'nameHR' : 'Higgs',
'isSignal' : 1,
'color': 632, # kRed
'samples' : signals,
}
#plot = {}
# keys here must match keys in samples.py
#
plot['DY'] = {
'color': 418, # kGreen+2
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0,
}
plot['Fake_mm'] = {
'color': 921, # kGray + 1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['Fake_ee'] = {
'color': 921, # kGray + 1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['top'] = {
'nameHR' : 'tW and t#bar{t}',
'color': 400, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0,
}
plot['WW'] = {
'color': 851, # kAzure -9
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['ggWW'] = {
'color': 850, # kAzure -10
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['WWewk'] = {
'color': 851, # kAzure -9
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['Vg'] = {
'color': 859, # kAzure -1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['VgS_H'] = {
'color' : 617, # kViolet + 1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['VgS_L'] = {
'color' : 617, # kViolet + 1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['VZ'] = {
'color': 858, # kAzure -2
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['VVV'] = {
'color': 857, # kAzure -3
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
# HWW
for signal in signals:
plot[signal] = {
'nameHR' : signal,
'color': 632, # kRed
'isSignal' : 1,
'isData' : 0,
'scale' : 1
}
# data
plot['DATA'] = {
'nameHR' : 'Data',
'color': 1 ,
'isSignal' : 0,
'isData' : 1 ,
'isBlind' : 1
}
# additional options
legend['lumi'] = 'L = 41.5/fb'
legend['sqrt'] = '#sqrt{s} = 13 TeV'
|
[
"[email protected]"
] | |
9085c6988eab04035dae166b01927ae787a7b454
|
b76daa106277ef2f7ab7f6e3278546c6da0bb967
|
/base/sys_argv/code/pathdir.py
|
dd170c13326a1de5d44bc7bd2c02d908e1a062ac
|
[] |
no_license
|
DyLanCao/ipython
|
d071b4659999062106438ec077d27754a711ef92
|
746e070d193de04002d277e5170ddf8b5d9d4d44
|
refs/heads/master
| 2021-06-12T19:31:44.325346 | 2021-02-20T03:17:58 | 2021-02-20T03:17:58 | 142,657,284 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,196 |
py
|
import sys,os
try:
import parameters,helpAbout,autoUpdate
from Combobox import ComboBox
except ImportError:
from COMTool import parameters,helpAbout,autoUpdate
from COMTool.Combobox import ComboBox
from PyQt5.QtGui import QIcon,QFont,QTextCursor,QPixmap
from PyQt5.QtWidgets import (QApplication, QWidget,QToolTip,QPushButton,QMessageBox,QDesktopWidget,QMainWindow,
QVBoxLayout,QHBoxLayout,QGridLayout,QTextEdit,QLabel,QRadioButton,QCheckBox,
QLineEdit,QGroupBox,QSplitter,QFileDialog)
class MainWindow(QMainWindow):
DataPath = "./"
def __init__(self,app):
pathDirList = sys.argv[0].replace("\\", "/").split("/")
self.DataPath = os.path.abspath("/".join(str(i) for i in pathDirList))
pathDirList.pop()
if not os.path.exists(self.DataPath + "/" + parameters.strDataDirName):
pathDirList.pop()
self.DataPath = os.path.abspath("/".join(str(i) for i in pathDirList))
self.DataPath = (self.DataPath + "/" + parameters.strDataDirName).replace("\\", "/")
def main():
app = QApplication(sys.argv)
mainWindow = MainWindow(app)
print("pathdir:",mainWindow.DataPath)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
58e33bc192fdc4c023cd05a9cc2cfed05f2900d9
|
e055690de408c8e0a6ca97b43912b1482528b98c
|
/src/features.py
|
c97f0c9ed48cdb2b95deea0e4c84f27eee76c013
|
[] |
no_license
|
webclinic017/Dissertation-1
|
88155419cfa6b9d8a1834fadecdadda5c22498db
|
1e118c0697f0785dc2db30e46c26af154b269813
|
refs/heads/master
| 2021-09-08T06:37:21.633294 | 2016-11-29T11:07:02 | 2016-11-29T11:07:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,423 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Project: Dissertation
# File name: features
# Author: Mark Wang
# Date: 13/6/2016
from StockInference.constant import Constants
const = Constants()
features = {
const.PRICE_TYPE: const.STOCK_ADJUSTED_CLOSED,
const.STOCK_PRICE: {const.DATA_PERIOD: 5},
const.STOCK_INDICATOR: [
(const.MACD, {
const.MACD_FAST_PERIOD: 12,
const.MACD_SLOW_PERIOD: 26,
const.MACD_TIME_PERIOD: 9
}),
(const.MACD, {
const.MACD_FAST_PERIOD: 7,
const.MACD_SLOW_PERIOD: 14,
const.MACD_TIME_PERIOD: 9
}),
(const.SMA, 3),
(const.SMA, 13),
(const.SMA, 21),
(const.EMA, 5),
(const.EMA, 13),
(const.EMA, 21),
(const.ROC, 13),
(const.ROC, 21),
(const.RSI, 9),
(const.RSI, 14),
(const.RSI, 21),
],
const.FUNDAMENTAL_ANALYSIS: [
const.US10Y_BOND,
const.US30Y_BOND,
const.FXI,
# const.IC,
# const.IA, # comment this two because this two bond is a little newer
const.HSI,
{const.FROM: const.USD, const.TO: const.HKD},
{const.FROM: const.EUR, const.TO: const.HKD},
# {const.FROM: const.AUD, const.TO: const.HKD},
const.ONE_YEAR,
const.HALF_YEAR,
const.OVER_NIGHT,
const.GOLDEN_PRICE,
]
}
|
[
"[email protected]"
] | |
cdd0665697b42f4cc95cda7d1404b2f0b64c2720
|
6a185868a6a41384f44334b23fea9079a2a35ded
|
/Algorithm/04_선형리스트-통합.py
|
02694336285a5e96ba66bece4e669bab44d1cfb7
|
[] |
no_license
|
kimhyeongju/coding_practice
|
ec096e574877a4d21babdc0162d96a9c75ee5686
|
599b3ecf3100622e165abfc54c0ad90a270ccb51
|
refs/heads/master
| 2023-03-25T21:34:34.893277 | 2021-03-19T17:49:54 | 2021-03-19T17:49:54 | 296,621,620 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,344 |
py
|
# 함수 선언부, 클래스 선언부
def insert_data(position, friend):
katok.append(None)
kLen = len(katok)
for i in range(kLen-1, position, -1):
katok[i] = katok[i-1]
katok[i-1] = None
katok[position] = friend
def delete_data(position):
kLen = len(katok)
katok[position] = None
for i in range(position+1,kLen):
katok[i-1] = katok[i]
katok[i] = None
del(katok[kLen-1])
def add_data(friend):
katok.append(None)
kLen = len(katok)
katok[kLen-1] = friend
# 전역 변수부
katok = []
select = -1 # 1.추가 2.삽입 3.삭제 4.종료
# 메인 코드부
if __name__ == '__main__':
while(select != 4):
select = int(input('선택(1.추가 2.삽입 3.삭제 4.종료) --> '))
if select == 1:
data = input('추가할 데이터 --> ')
add_data(data)
print(katok)
elif select == 2:
pos = int(input('삽입할 위치 --> '))
data = input('추가할 데이터 --> ')
insert_data(pos, data)
print(katok)
elif select == 3:
pos = int(input('삭제할 위치 --> '))
delete_data(pos)
print(katok)
elif select ==4:
exit()
else:
print("잘못 입력")
exit()
|
[
"[email protected]"
] | |
a0548ac68468ff78b08b70032648b02eb729069b
|
6c2865fd7d6a0fb2552826e7cd77ae863ef5bc4a
|
/mwlinks/libs/WikiExtractor.py
|
5802ba96a761ebaa29bac11756c3088287ed1a44
|
[
"MIT"
] |
permissive
|
hunterhector/python-mwlinks
|
0df4440b21ddaf9b1ab3866806155bc2d359b95a
|
6f63d56e79313b8a3c5161a0197a260442829518
|
refs/heads/master
| 2021-01-20T03:13:58.751091 | 2019-09-09T21:13:07 | 2019-09-09T21:13:07 | 89,511,539 | 0 | 0 | null | 2017-04-26T18:01:42 | 2017-04-26T18:01:42 | null |
UTF-8
|
Python
| false | false | 116,538 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# Version: 2.75 (March 4, 2017)
# Author: Giuseppe Attardi ([email protected]), University of Pisa
#
# Contributors:
# Antonio Fuschetto ([email protected])
# Leonardo Souza ([email protected])
# Juan Manuel Caicedo ([email protected])
# Humberto Pereira ([email protected])
# Siegfried-A. Gevatter ([email protected])
# Pedro Assis ([email protected])
# Wim Muskee ([email protected])
# Radics Geza ([email protected])
# orangain ([email protected])
# Seth Cleveland ([email protected])
# Bren Barn
#
# =============================================================================
# Copyright (c) 2011-2017. Giuseppe Attardi ([email protected]).
# =============================================================================
# This file is part of Tanl.
#
# Tanl is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License, version 3,
# as published by the Free Software Foundation.
#
# Tanl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License at <http://www.gnu.org/licenses/> for more details.
#
# =============================================================================
"""Wikipedia Extractor:
Extracts and cleans text from a Wikipedia database dump and stores output in a
number of files of similar size in a given directory.
Each file will contain several documents in the format:
<doc id="" revid="" url="" title="">
...
</doc>
If the program is invoked with the --json flag, then each file will
contain several documents formatted as json ojects, one per line, with
the following structure
{"id": "", "revid": "", "url":"", "title": "", "text": "..."}
Template expansion requires preprocesssng first the whole dump and
collecting template definitions.
"""
from __future__ import unicode_literals, division
import sys
import argparse
import bz2
import codecs
import cgi
import fileinput
import logging
import os.path
import re # TODO use regex when it will be standard
import time
import json
from io import StringIO
from multiprocessing import Queue, Process, Value, cpu_count
from timeit import default_timer
PY2 = sys.version_info[0] == 2
# Python 2.7 compatibiity
if PY2:
from urllib import quote
from htmlentitydefs import name2codepoint
from itertools import izip as zip, izip_longest as zip_longest
range = xrange # Use Python 3 equivalent
chr = unichr # Use Python 3 equivalent
text_type = unicode
class SimpleNamespace(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
keys = sorted(self.__dict__)
items = ("{}={!r}".format(k, self.__dict__[k]) for k in keys)
return "{}({})".format(type(self).__name__, ", ".join(items))
def __eq__(self, other):
return self.__dict__ == other.__dict__
else:
from urllib.parse import quote
from html.entities import name2codepoint
from itertools import zip_longest
from types import SimpleNamespace
text_type = str
# ===========================================================================
# Program version
version = '2.75'
## PARAMS ####################################################################
options = SimpleNamespace(
##
# Defined in <siteinfo>
# We include as default Template, when loading external template file.
knownNamespaces={'Template': 10},
##
# The namespace used for template definitions
# It is the name associated with namespace key=10 in the siteinfo header.
templateNamespace='',
templatePrefix='',
##
# The namespace used for module definitions
# It is the name associated with namespace key=828 in the siteinfo header.
moduleNamespace='',
##
# Recognize only these namespaces in links
# w: Internal links to the Wikipedia
# wiktionary: Wiki dictionary
# wikt: shortcut for Wiktionary
#
acceptedNamespaces=['w', 'wiktionary', 'wikt'],
# This is obtained from <siteinfo>
urlbase='',
##
# Filter disambiguation pages
filter_disambig_pages=False,
##
# Drop tables from the article
keep_tables=False,
##
# Whether to preserve links in output
keepLinks=False,
##
# Whether to preserve section titles
keepSections=True,
##
# Whether to preserve lists
keepLists=False,
##
# Whether to output HTML instead of text
toHTML=False,
##
# Whether to write json instead of the xml-like default output format
write_json=False,
##
# Whether to expand templates
expand_templates=True,
##
## Whether to escape doc content
escape_doc=False,
##
# Print the wikipedia article revision
print_revision=False,
##
# Ignore the header and only print the text
ignore_header=True,
##
# Minimum expanded text length required to print document
min_text_length=0,
##
# Keep new lines
keep_new_lines=True,
# Shared objects holding templates, redirects and cache
templates={},
redirects={},
# cache of parser templates
# FIXME: sharing this with a Manager slows down.
templateCache={},
# Elements to ignore/discard
ignored_tag_patterns=[],
discardElements=[
'gallery', 'timeline', 'noinclude', 'pre',
'table', 'tr', 'td', 'th', 'caption', 'div',
'form', 'input', 'select', 'option', 'textarea',
'ul', 'li', 'ol', 'dl', 'dt', 'dd', 'menu', 'dir',
'ref', 'references', 'img', 'imagemap', 'source', 'small',
'sub', 'sup', 'indicator'
],
)
##
# Keys for Template and Module namespaces
templateKeys = set(['10', '828'])
##
# Regex for identifying disambig pages
filter_disambig_page_pattern = re.compile("{{disambig(uation)?(\|[^}]*)?}}")
##
# page filtering logic -- remove templates, undesired xml namespaces, and disambiguation pages
def keepPage(ns, page):
if ns != '0': # Aritcle
return False
# remove disambig pages if desired
if options.filter_disambig_pages:
for line in page:
if filter_disambig_page_pattern.match(line):
return False
return True
def get_url(uid):
return "%s?curid=%s" % (options.urlbase, uid)
# =========================================================================
#
# MediaWiki Markup Grammar
# https://www.mediawiki.org/wiki/Preprocessor_ABNF
# xml-char = %x9 / %xA / %xD / %x20-D7FF / %xE000-FFFD / %x10000-10FFFF
# sptab = SP / HTAB
# ; everything except ">" (%x3E)
# attr-char = %x9 / %xA / %xD / %x20-3D / %x3F-D7FF / %xE000-FFFD / %x10000-10FFFF
# literal = *xml-char
# title = wikitext-L3
# part-name = wikitext-L3
# part-value = wikitext-L3
# part = ( part-name "=" part-value ) / ( part-value )
# parts = [ title *( "|" part ) ]
# tplarg = "{{{" parts "}}}"
# template = "{{" parts "}}"
# link = "[[" wikitext-L3 "]]"
# comment = "<!--" literal "-->"
# unclosed-comment = "<!--" literal END
# ; the + in the line-eating-comment rule was absent between MW 1.12 and MW 1.22
# line-eating-comment = LF LINE-START *SP +( comment *SP ) LINE-END
# attr = *attr-char
# nowiki-element = "<nowiki" attr ( "/>" / ( ">" literal ( "</nowiki>" / END ) ) )
# wikitext-L2 = heading / wikitext-L3 / *wikitext-L2
# wikitext-L3 = literal / template / tplarg / link / comment /
# line-eating-comment / unclosed-comment / xmlish-element /
# *wikitext-L3
# ------------------------------------------------------------------------------
selfClosingTags = ('br', 'hr', 'nobr', 'ref', 'references', 'nowiki')
placeholder_tags = {'math': 'formula', 'code': 'codice'}
def normalizeTitle(title):
"""Normalize title"""
# remove leading/trailing whitespace and underscores
title = title.strip(' _')
# replace sequences of whitespace and underscore chars with a single space
title = re.sub(r'[\s_]+', ' ', title)
m = re.match(r'([^:]*):(\s*)(\S(?:.*))', title)
if m:
prefix = m.group(1)
if m.group(2):
optionalWhitespace = ' '
else:
optionalWhitespace = ''
rest = m.group(3)
ns = normalizeNamespace(prefix)
if ns in options.knownNamespaces:
# If the prefix designates a known namespace, then it might be
# followed by optional whitespace that should be removed to get
# the canonical page name
# (e.g., "Category: Births" should become "Category:Births").
title = ns + ":" + ucfirst(rest)
else:
# No namespace, just capitalize first letter.
# If the part before the colon is not a known namespace, then we
# must not remove the space after the colon (if any), e.g.,
# "3001: The_Final_Odyssey" != "3001:The_Final_Odyssey".
# However, to get the canonical page name we must contract multiple
# spaces into one, because
# "3001: The_Final_Odyssey" != "3001: The_Final_Odyssey".
title = ucfirst(prefix) + ":" + optionalWhitespace + ucfirst(rest)
else:
# no namespace, just capitalize first letter
title = ucfirst(title)
return title
def unescape(text):
"""
Removes HTML or XML character references and entities from a text string.
:param text The HTML (or XML) source text.
:return The plain text, as a Unicode string, if necessary.
"""
def fixup(m):
text = m.group(0)
code = m.group(1)
try:
if text[1] == "#": # character reference
if text[2] == "x":
return chr(int(code[1:], 16))
else:
return chr(int(code))
else: # named entity
return chr(name2codepoint[code])
except:
return text # leave as is
return re.sub("&#?(\w+);", fixup, text)
# Match HTML comments
# The buggy template {{Template:T}} has a comment terminating with just "->"
comment = re.compile(r'<!--.*?-->', re.DOTALL)
# Match <nowiki>...</nowiki>
nowiki = re.compile(r'<nowiki>.*?</nowiki>')
def ignoreTag(tag):
left = re.compile(r'<%s\b.*?>' % tag, re.IGNORECASE | re.DOTALL) # both <ref> and <reference>
right = re.compile(r'</\s*%s>' % tag, re.IGNORECASE)
options.ignored_tag_patterns.append((left, right))
# Match selfClosing HTML tags
selfClosing_tag_patterns = [
re.compile(r'<\s*%s\b[^>]*/\s*>' % tag, re.DOTALL | re.IGNORECASE) for tag in selfClosingTags
]
# Match HTML placeholder tags
placeholder_tag_patterns = [
(re.compile(r'<\s*%s(\s*| [^>]+?)>.*?<\s*/\s*%s\s*>' % (tag, tag), re.DOTALL | re.IGNORECASE),
repl) for tag, repl in placeholder_tags.items()
]
# Match preformatted lines
preformatted = re.compile(r'^ .*?$')
# Match external links (space separates second optional parameter)
externalLink = re.compile(r'\[\w+[^ ]*? (.*?)]')
externalLinkNoAnchor = re.compile(r'\[\w+[&\]]*\]')
# Matches bold/italic
bold_italic = re.compile(r"'''''(.*?)'''''")
bold = re.compile(r"'''(.*?)'''")
italic_quote = re.compile(r"''\"([^\"]*?)\"''")
italic = re.compile(r"''(.*?)''")
quote_quote = re.compile(r'""([^"]*?)""')
# Matches space
spaces = re.compile(r' {2,}')
# Matches dots
dots = re.compile(r'\.{4,}')
# ======================================================================
class Template(list):
"""
A Template is a list of TemplateText or TemplateArgs
"""
@classmethod
def parse(cls, body):
tpl = Template()
# we must handle nesting, s.a.
# {{{1|{{PAGENAME}}}
# {{{italics|{{{italic|}}}
# {{#if:{{{{{#if:{{{nominee|}}}|nominee|candidate}}|}}}|
#
start = 0
for s, e in findMatchingBraces(body, 3):
tpl.append(TemplateText(body[start:s]))
tpl.append(TemplateArg(body[s + 3:e - 3]))
start = e
tpl.append(TemplateText(body[start:])) # leftover
return tpl
def subst(self, params, extractor, depth=0):
# We perform parameter substitutions recursively.
# We also limit the maximum number of iterations to avoid too long or
# even endless loops (in case of malformed input).
# :see: http://meta.wikimedia.org/wiki/Help:Expansion#Distinction_between_variables.2C_parser_functions.2C_and_templates
#
# Parameter values are assigned to parameters in two (?) passes.
# Therefore a parameter name in a template can depend on the value of
# another parameter of the same template, regardless of the order in
# which they are specified in the template call, for example, using
# Template:ppp containing "{{{{{{p}}}}}}", {{ppp|p=q|q=r}} and even
# {{ppp|q=r|p=q}} gives r, but using Template:tvvv containing
# "{{{{{{{{{p}}}}}}}}}", {{tvvv|p=q|q=r|r=s}} gives s.
# logging.debug('&*ssubst tpl %d %s', extractor.frame.length, '', depth, self)
if depth > extractor.maxParameterRecursionLevels:
extractor.recursion_exceeded_3_errs += 1
return ''
return ''.join([tpl.subst(params, extractor, depth) for tpl in self])
def __str__(self):
return ''.join([text_type(x) for x in self])
class TemplateText(text_type):
"""Fixed text of template"""
def subst(self, params, extractor, depth):
return self
class TemplateArg(object):
"""
parameter to a template.
Has a name and a default value, both of which are Templates.
"""
def __init__(self, parameter):
"""
:param parameter: the parts of a tplarg.
"""
# the parameter name itself might contain templates, e.g.:
# appointe{{#if:{{{appointer14|}}}|r|d}}14|
# 4|{{{{{subst|}}}CURRENTYEAR}}
# any parts in a tplarg after the first (the parameter default) are
# ignored, and an equals sign in the first part is treated as plain text.
# logging.debug('TemplateArg %s', parameter)
parts = splitParts(parameter)
self.name = Template.parse(parts[0])
if len(parts) > 1:
# This parameter has a default value
self.default = Template.parse(parts[1])
else:
self.default = None
def __str__(self):
if self.default:
return '{{{%s|%s}}}' % (self.name, self.default)
else:
return '{{{%s}}}' % self.name
def subst(self, params, extractor, depth):
"""
Substitute value for this argument from dict :param params:
Use :param extractor: to evaluate expressions for name and default.
Limit substitution to the maximun :param depth:.
"""
# the parameter name itself might contain templates, e.g.:
# appointe{{#if:{{{appointer14|}}}|r|d}}14|
paramName = self.name.subst(params, extractor, depth + 1)
paramName = extractor.transform(paramName)
res = ''
if paramName in params:
res = params[paramName] # use parameter value specified in template invocation
elif self.default: # use the default value
defaultValue = self.default.subst(params, extractor, depth + 1)
res = extractor.transform(defaultValue)
# logging.debug('subst arg %d %s -> %s' % (depth, paramName, res))
return res
class Frame(object):
def __init__(self, title='', args=[], prev=None):
self.title = title
self.args = args
self.prev = prev
self.depth = prev.depth + 1 if prev else 0
def push(self, title, args):
return Frame(title, args, self)
def pop(self):
return self.prev
def __str__(self):
res = ''
prev = self.prev
while prev:
if res: res += ', '
res += '(%s, %s)' % (prev.title, prev.args)
prev = prev.prev
return '<Frame [' + res + ']>'
# ======================================================================
substWords = 'subst:|safesubst:'
class Extractor(object):
"""
An extraction task on a article.
"""
def __init__(self, id, revid, title, lines):
"""
:param id: id of page.
:param title: tutle of page.
:param lines: a list of lines.
"""
self.id = id
self.revid = revid
self.title = title
if options.keep_new_lines:
self.text = '\n'.join(lines)
else:
self.text = ' '.join(lines)
self.magicWords = MagicWords()
self.frame = Frame()
self.recursion_exceeded_1_errs = 0 # template recursion within expand()
self.recursion_exceeded_2_errs = 0 # template recursion within expandTemplate()
self.recursion_exceeded_3_errs = 0 # parameter recursion
self.template_title_errs = 0
def write_output(self, out, text):
"""
:param out: a memory file
:param text: the text of the page
"""
url = get_url(self.id)
if options.write_json:
json_data = {
'id': self.id,
'url': url,
'title': self.title,
'text': "\n".join(text)
}
if options.print_revision:
json_data['revid'] = self.revid
# We don't use json.dump(data, out) because we want to be
# able to encode the string if the output is sys.stdout
out_str = json.dumps(json_data, ensure_ascii=False)
if out == sys.stdout: # option -a or -o -
out_str = out_str.encode('utf-8')
out.write(out_str)
out.write('\n')
else:
if not options.ignore_header:
if options.print_revision:
header = '<doc id="%s" revid="%s" url="%s" title="%s">\n' % (self.id, self.revid, url, self.title)
else:
header = '<doc id="%s" url="%s" title="%s">\n' % (self.id, url, self.title)
if out == sys.stdout: # option -a or -o -
header = header.encode('utf-8')
out.write(header)
for line in text:
if line.strip() == "":
continue
if out == sys.stdout: # option -a or -o -
line = line.encode('utf-8')
out.write(line)
out.write('\n')
if not options.ignore_header:
footer = "\n</doc>\n"
out.write(footer)
def extract(self, out):
"""
:param out: a memory file.
"""
logging.info('%s\t%s', self.id, self.title)
# Separate header from text with a newline.
if options.toHTML:
title_str = '<h1>' + self.title + '</h1>'
else:
title_str = self.title + '\n'
# https://www.mediawiki.org/wiki/Help:Magic_words
colon = self.title.find(':')
if colon != -1:
ns = self.title[:colon]
pagename = self.title[colon + 1:]
else:
ns = '' # Main
pagename = self.title
self.magicWords['NAMESPACE'] = ns
self.magicWords['NAMESPACENUMBER'] = options.knownNamespaces.get(ns, '0')
self.magicWords['PAGENAME'] = pagename
self.magicWords['FULLPAGENAME'] = self.title
slash = pagename.rfind('/')
if slash != -1:
self.magicWords['BASEPAGENAME'] = pagename[:slash]
self.magicWords['SUBPAGENAME'] = pagename[slash + 1:]
else:
self.magicWords['BASEPAGENAME'] = pagename
self.magicWords['SUBPAGENAME'] = ''
slash = pagename.find('/')
if slash != -1:
self.magicWords['ROOTPAGENAME'] = pagename[:slash]
else:
self.magicWords['ROOTPAGENAME'] = pagename
self.magicWords['CURRENTYEAR'] = time.strftime('%Y')
self.magicWords['CURRENTMONTH'] = time.strftime('%m')
self.magicWords['CURRENTDAY'] = time.strftime('%d')
self.magicWords['CURRENTHOUR'] = time.strftime('%H')
self.magicWords['CURRENTTIME'] = time.strftime('%H:%M:%S')
text = self.text
self.text = '' # save memory
#
# @see https://doc.wikimedia.org/mediawiki-core/master/php/classParser.html
# This does the equivalent of internalParse():
#
# $dom = $this->preprocessToDom( $text, $flag );
# $text = $frame->expand( $dom );
#
text = self.transform(text)
text = self.wiki2text(text)
text = compact(self.clean(text))
text = [title_str] + text
if sum(len(line) for line in text) < options.min_text_length:
return
self.write_output(out, text)
errs = (self.template_title_errs,
self.recursion_exceeded_1_errs,
self.recursion_exceeded_2_errs,
self.recursion_exceeded_3_errs)
if any(errs):
logging.warn("Template errors in article '%s' (%s): title(%d) recursion(%d, %d, %d)",
self.title, self.id, *errs)
def transform(self, wikitext):
"""
Transforms wiki markup.
@see https://www.mediawiki.org/wiki/Help:Formatting
"""
# look for matching <nowiki>...</nowiki>
res = ''
cur = 0
for m in nowiki.finditer(wikitext, cur):
res += self.transform1(wikitext[cur:m.start()]) + wikitext[m.start():m.end()]
cur = m.end()
# leftover
res += self.transform1(wikitext[cur:])
return res
def transform1(self, text):
"""Transform text not containing <nowiki>"""
if options.expand_templates:
# expand templates
# See: http://www.mediawiki.org/wiki/Help:Templates
return self.expand(text)
else:
# Drop transclusions (template, parser functions)
return dropNested(text, r'{{', r'}}')
def wiki2text(self, text):
#
# final part of internalParse().)
#
# $text = $this->doTableStuff( $text );
# $text = preg_replace( '/(^|\n)-----*/', '\\1<hr />', $text );
# $text = $this->doDoubleUnderscore( $text );
# $text = $this->doHeadings( $text );
# $text = $this->replaceInternalLinks( $text );
# $text = $this->doAllQuotes( $text );
# $text = $this->replaceExternalLinks( $text );
# $text = str_replace( self::MARKER_PREFIX . 'NOPARSE', '', $text );
# $text = $this->doMagicLinks( $text );
# $text = $this->formatHeadings( $text, $origText, $isMain );
# Drop tables
# first drop residual templates, or else empty parameter |} might look like end of table.
if not options.keep_tables:
text = dropNested(text, r'{{', r'}}')
text = dropNested(text, r'{\|', r'\|}')
# Handle bold/italic/quote
if options.toHTML:
text = bold_italic.sub(r'<b>\1</b>', text)
text = bold.sub(r'<b>\1</b>', text)
text = italic.sub(r'<i>\1</i>', text)
else:
text = bold_italic.sub(r'\1', text)
text = bold.sub(r'\1', text)
text = italic_quote.sub(r'"\1"', text)
text = italic.sub(r'"\1"', text)
text = quote_quote.sub(r'"\1"', text)
# residuals of unbalanced quotes
text = text.replace("'''", '').replace("''", '"')
# replace internal links
text = replaceInternalLinks(text)
# replace external links
text = replaceExternalLinks(text)
# drop MagicWords behavioral switches
text = magicWordsRE.sub('', text)
# ############### Process HTML ###############
# turn into HTML, except for the content of <syntaxhighlight>
res = ''
cur = 0
for m in syntaxhighlight.finditer(text):
res += unescape(text[cur:m.start()]) + m.group(1)
cur = m.end()
text = res + unescape(text[cur:])
return text
def clean(self, text):
"""
Removes irrelevant parts from :param: text.
"""
# Collect spans
spans = []
# Drop HTML comments
for m in comment.finditer(text):
spans.append((m.start(), m.end()))
# Drop self-closing tags
for pattern in selfClosing_tag_patterns:
for m in pattern.finditer(text):
spans.append((m.start(), m.end()))
# Drop ignored tags
for left, right in options.ignored_tag_patterns:
for m in left.finditer(text):
spans.append((m.start(), m.end()))
for m in right.finditer(text):
spans.append((m.start(), m.end()))
# Bulk remove all spans
text = dropSpans(spans, text)
# Drop discarded elements
for tag in options.discardElements:
text = dropNested(text, r'<\s*%s\b[^>/]*>' % tag, r'<\s*/\s*%s>' % tag)
if not options.toHTML:
# Turn into text what is left (&nbsp;) and <syntaxhighlight>
text = unescape(text)
# Expand placeholders
for pattern, placeholder in placeholder_tag_patterns:
index = 1
for match in pattern.finditer(text):
text = text.replace(match.group(), '%s_%d' % (placeholder, index))
index += 1
text = text.replace('<<', '«').replace('>>', '»')
#############################################
# Cleanup text
text = text.replace('\t', ' ')
text = spaces.sub(' ', text)
text = dots.sub('...', text)
text = re.sub(' (,:\.\)\]»)', r'\1', text)
text = re.sub('(\[\(«) ', r'\1', text)
text = re.sub(r'\n\W+?\n', '\n', text, flags=re.U) # lines with only punctuations
text = text.replace(',,', ',').replace(',.', '.')
if options.keep_tables:
# the following regular expressions are used to remove the wikiml chartacters around table strucutures
# yet keep the content. The order here is imporant so we remove certain markup like {| and then
# then the future html attributes such as 'style'. Finally we drop the remaining '|-' that delimits cells.
text = re.sub(r'!(?:\s)?style=\"[a-z]+:(?:\d+)%;\"', r'', text)
text = re.sub(r'!(?:\s)?style="[a-z]+:(?:\d+)%;[a-z]+:(?:#)?(?:[0-9a-z]+)?"', r'', text)
text = text.replace('|-', '')
text = text.replace('|', '')
if options.toHTML:
text = cgi.escape(text)
return text
# ----------------------------------------------------------------------
# Expand templates
maxTemplateRecursionLevels = 30
maxParameterRecursionLevels = 10
# check for template beginning
reOpen = re.compile('(?<!{){{(?!{)', re.DOTALL)
def expand(self, wikitext):
"""
:param wikitext: the text to be expanded.
Templates are frequently nested. Occasionally, parsing mistakes may
cause template insertion to enter an infinite loop, for instance when
trying to instantiate Template:Country
{{country_{{{1}}}|{{{2}}}|{{{2}}}|size={{{size|}}}|name={{{name|}}}}}
which is repeatedly trying to insert template 'country_', which is
again resolved to Template:Country. The straightforward solution of
keeping track of templates that were already inserted for the current
article would not work, because the same template may legally be used
more than once, with different parameters in different parts of the
article. Therefore, we limit the number of iterations of nested
template inclusion.
"""
# Test template expansion at:
# https://en.wikipedia.org/wiki/Special:ExpandTemplates
# https://it.wikipedia.org/wiki/Speciale:EspandiTemplate
res = ''
if self.frame.depth >= self.maxTemplateRecursionLevels:
self.recursion_exceeded_1_errs += 1
return res
# logging.debug('%*s<expand', self.frame.depth, '')
cur = 0
# look for matching {{...}}
for s, e in findMatchingBraces(wikitext, 2):
res += wikitext[cur:s] + self.expandTemplate(wikitext[s + 2:e - 2])
cur = e
# leftover
res += wikitext[cur:]
# logging.debug('%*sexpand> %s', self.frame.depth, '', res)
return res
def templateParams(self, parameters):
"""
Build a dictionary with positional or name key to expanded parameters.
:param parameters: the parts[1:] of a template, i.e. all except the title.
"""
templateParams = {}
if not parameters:
return templateParams
# logging.debug('%*s<templateParams: %s', self.frame.length, '', '|'.join(parameters))
# Parameters can be either named or unnamed. In the latter case, their
# name is defined by their ordinal position (1, 2, 3, ...).
unnamedParameterCounter = 0
# It's legal for unnamed parameters to be skipped, in which case they
# will get default values (if available) during actual instantiation.
# That is {{template_name|a||c}} means parameter 1 gets
# the value 'a', parameter 2 value is not defined, and parameter 3 gets
# the value 'c'. This case is correctly handled by function 'split',
# and does not require any special handling.
for param in parameters:
# Spaces before or after a parameter value are normally ignored,
# UNLESS the parameter contains a link (to prevent possible gluing
# the link to the following text after template substitution)
# Parameter values may contain "=" symbols, hence the parameter
# name extends up to the first such symbol.
# It is legal for a parameter to be specified several times, in
# which case the last assignment takes precedence. Example:
# "{{t|a|b|c|2=B}}" is equivalent to "{{t|a|B|c}}".
# Therefore, we don't check if the parameter has been assigned a
# value before, because anyway the last assignment should override
# any previous ones.
# FIXME: Don't use DOTALL here since parameters may be tags with
# attributes, e.g. <div class="templatequotecite">
# Parameters may span several lines, like:
# {{Reflist|colwidth=30em|refs=
# <ref name="Goode">Title</ref>
# The '=' might occurr within an HTML attribute:
# "<ref name=value"
# but we stop at first.
m = re.match(' *([^=]*?) *?=(.*)', param, re.DOTALL)
if m:
# This is a named parameter. This case also handles parameter
# assignments like "2=xxx", where the number of an unnamed
# parameter ("2") is specified explicitly - this is handled
# transparently.
parameterName = m.group(1).strip()
parameterValue = m.group(2)
if ']]' not in parameterValue: # if the value does not contain a link, trim whitespace
parameterValue = parameterValue.strip()
templateParams[parameterName] = parameterValue
else:
# this is an unnamed parameter
unnamedParameterCounter += 1
if ']]' not in param: # if the value does not contain a link, trim whitespace
param = param.strip()
templateParams[str(unnamedParameterCounter)] = param
# logging.debug('%*stemplateParams> %s', self.frame.length, '', '|'.join(templateParams.values()))
return templateParams
def expandTemplate(self, body):
"""Expands template invocation.
:param body: the parts of a template.
:see http://meta.wikimedia.org/wiki/Help:Expansion for an explanation
of the process.
See in particular: Expansion of names and values
http://meta.wikimedia.org/wiki/Help:Expansion#Expansion_of_names_and_values
For most parser functions all names and values are expanded,
regardless of what is relevant for the result. The branching functions
(#if, #ifeq, #iferror, #ifexist, #ifexpr, #switch) are exceptions.
All names in a template call are expanded, and the titles of the
tplargs in the template body, after which it is determined which
values must be expanded, and for which tplargs in the template body
the first part (default) [sic in the original doc page].
In the case of a tplarg, any parts beyond the first are never
expanded. The possible name and the value of the first part is
expanded if the title does not match a name in the template call.
:see code for braceSubstitution at
https://doc.wikimedia.org/mediawiki-core/master/php/html/Parser_8php_source.html#3397:
"""
# template = "{{" parts "}}"
# Templates and tplargs are decomposed in the same way, with pipes as
# separator, even though eventually any parts in a tplarg after the first
# (the parameter default) are ignored, and an equals sign in the first
# part is treated as plain text.
# Pipes inside inner templates and tplargs, or inside double rectangular
# brackets within the template or tplargs are not taken into account in
# this decomposition.
# The first part is called title, the other parts are simply called parts.
# If a part has one or more equals signs in it, the first equals sign
# determines the division into name = value. Equals signs inside inner
# templates and tplargs, or inside double rectangular brackets within the
# part are not taken into account in this decomposition. Parts without
# equals sign are indexed 1, 2, .., given as attribute in the <name> tag.
if self.frame.depth >= self.maxTemplateRecursionLevels:
self.recursion_exceeded_2_errs += 1
# logging.debug('%*sEXPAND> %s', self.frame.depth, '', body)
return ''
logging.debug('%*sEXPAND %s', self.frame.depth, '', body)
parts = splitParts(body)
# title is the portion before the first |
title = parts[0].strip()
title = self.expand(title)
# SUBST
# Apply the template tag to parameters without
# substituting into them, e.g.
# {{subst:t|a{{{p|q}}}b}} gives the wikitext start-a{{{p|q}}}b-end
# @see https://www.mediawiki.org/wiki/Manual:Substitution#Partial_substitution
subst = False
if re.match(substWords, title, re.IGNORECASE):
title = re.sub(substWords, '', title, 1, re.IGNORECASE)
subst = True
if title in self.magicWords.values:
ret = self.magicWords[title]
logging.debug('%*s<EXPAND %s %s', self.frame.depth, '', title, ret)
return ret
# Parser functions.
# For most parser functions all names and values are expanded,
# regardless of what is relevant for the result. The branching
# functions (#if, #ifeq, #iferror, #ifexist, #ifexpr, #switch) are
# exceptions: for #if, #iferror, #ifexist, #ifexp, only the part that
# is applicable is expanded; for #ifeq the first and the applicable
# part are expanded; for #switch, expanded are the names up to and
# including the match (or all if there is no match), and the value in
# the case of a match or if there is no match, the default, if any.
# The first argument is everything after the first colon.
# It has been evaluated above.
colon = title.find(':')
if colon > 1:
funct = title[:colon]
parts[0] = title[colon + 1:].strip() # side-effect (parts[0] not used later)
# arguments after first are not evaluated
ret = callParserFunction(funct, parts, self)
logging.debug('%*s<EXPAND %s %s', self.frame.depth, '', funct, ret)
return ret
title = fullyQualifiedTemplateTitle(title)
if not title:
self.template_title_errs += 1
return ''
redirected = options.redirects.get(title)
if redirected:
title = redirected
# get the template
if title in options.templateCache:
template = options.templateCache[title]
elif title in options.templates:
template = Template.parse(options.templates[title])
# add it to cache
options.templateCache[title] = template
del options.templates[title]
else:
# The page being included could not be identified
logging.debug('%*s<EXPAND %s %s', self.frame.depth, '', title, '')
return ''
logging.debug('%*sTEMPLATE %s: %s', self.frame.depth, '', title, template)
# tplarg = "{{{" parts "}}}"
# parts = [ title *( "|" part ) ]
# part = ( part-name "=" part-value ) / ( part-value )
# part-name = wikitext-L3
# part-value = wikitext-L3
# wikitext-L3 = literal / template / tplarg / link / comment /
# line-eating-comment / unclosed-comment /
# xmlish-element / *wikitext-L3
# A tplarg may contain other parameters as well as templates, e.g.:
# {{{text|{{{quote|{{{1|{{error|Error: No text given}}}}}}}}}}}
# hence no simple RE like this would work:
# '{{{((?:(?!{{{).)*?)}}}'
# We must use full CF parsing.
# the parameter name itself might be computed, e.g.:
# {{{appointe{{#if:{{{appointer14|}}}|r|d}}14|}}}
# Because of the multiple uses of double-brace and triple-brace
# syntax, expressions can sometimes be ambiguous.
# Precedence rules specifed here:
# http://www.mediawiki.org/wiki/Preprocessor_ABNF#Ideal_precedence
# resolve ambiguities like this:
# {{{{ }}}} -> { {{{ }}} }
# {{{{{ }}}}} -> {{ {{{ }}} }}
#
# :see: https://en.wikipedia.org/wiki/Help:Template#Handling_parameters
params = parts[1:]
# Order of evaluation.
# Template parameters are fully evaluated before they are passed to the template.
# :see: https://www.mediawiki.org/wiki/Help:Templates#Order_of_evaluation
if not subst:
# Evaluate parameters, since they may contain templates, including
# the symbol "=".
# {{#ifexpr: {{{1}}} = 1 }}
params = [self.transform(p) for p in params]
# build a dict of name-values for the parameter values
params = self.templateParams(params)
# Perform parameter substitution.
# Extend frame before subst, since there may be recursion in default
# parameter value, e.g. {{OTRS|celebrative|date=April 2015}} in article
# 21637542 in enwiki.
self.frame = self.frame.push(title, params)
instantiated = template.subst(params, self)
value = self.transform(instantiated)
self.frame = self.frame.pop()
logging.debug('%*s<EXPAND %s %s', self.frame.depth, '', title, value)
return value
# ----------------------------------------------------------------------
# parameter handling
def splitParts(paramsList):
"""
:param paramsList: the parts of a template or tplarg.
Split template parameters at the separator "|".
separator "=".
Template parameters often contain URLs, internal links, text or even
template expressions, since we evaluate templates outside in.
This is required for cases like:
{{#if: {{{1}}} | {{lc:{{{1}}} | "parameter missing"}}
Parameters are separated by "|" symbols. However, we
cannot simply split the string on "|" symbols, since these
also appear inside templates and internal links, e.g.
{{if:|
|{{#if:the president|
|{{#if:|
[[Category:Hatnote templates|A{{PAGENAME}}]]
}}
}}
}}
We split parts at the "|" symbols that are not inside any pair
{{{...}}}, {{...}}, [[...]], {|...|}.
"""
# Must consider '[' as normal in expansion of Template:EMedicine2:
# #ifeq: ped|article|[http://emedicine.medscape.com/article/180-overview|[http://www.emedicine.com/ped/topic180.htm#{{#if: |section~}}
# as part of:
# {{#ifeq: ped|article|[http://emedicine.medscape.com/article/180-overview|[http://www.emedicine.com/ped/topic180.htm#{{#if: |section~}}}} ped/180{{#if: |~}}]
# should handle both tpl arg like:
# 4|{{{{{subst|}}}CURRENTYEAR}}
# and tpl parameters like:
# ||[[Category:People|{{#if:A|A|{{PAGENAME}}}}]]
sep = '|'
parameters = []
cur = 0
for s, e in findMatchingBraces(paramsList):
par = paramsList[cur:s].split(sep)
if par:
if parameters:
# portion before | belongs to previous parameter
parameters[-1] += par[0]
if len(par) > 1:
# rest are new parameters
parameters.extend(par[1:])
else:
parameters = par
elif not parameters:
parameters = [''] # create first param
# add span to last previous parameter
parameters[-1] += paramsList[s:e]
cur = e
# leftover
par = paramsList[cur:].split(sep)
if par:
if parameters:
# portion before | belongs to previous parameter
parameters[-1] += par[0]
if len(par) > 1:
# rest are new parameters
parameters.extend(par[1:])
else:
parameters = par
# logging.debug('splitParts %s %s\nparams: %s', sep, paramsList, text_type(parameters))
return parameters
def findMatchingBraces(text, ldelim=0):
"""
:param ldelim: number of braces to match. 0 means match [[]], {{}} and {{{}}}.
"""
# Parsing is done with respect to pairs of double braces {{..}} delimiting
# a template, and pairs of triple braces {{{..}}} delimiting a tplarg.
# If double opening braces are followed by triple closing braces or
# conversely, this is taken as delimiting a template, with one left-over
# brace outside it, taken as plain text. For any pattern of braces this
# defines a set of templates and tplargs such that any two are either
# separate or nested (not overlapping).
# Unmatched double rectangular closing brackets can be in a template or
# tplarg, but unmatched double rectangular opening brackets cannot.
# Unmatched double or triple closing braces inside a pair of
# double rectangular brackets are treated as plain text.
# Other formulation: in ambiguity between template or tplarg on one hand,
# and a link on the other hand, the structure with the rightmost opening
# takes precedence, even if this is the opening of a link without any
# closing, so not producing an actual link.
# In the case of more than three opening braces the last three are assumed
# to belong to a tplarg, unless there is no matching triple of closing
# braces, in which case the last two opening braces are are assumed to
# belong to a template.
# We must skip individual { like in:
# {{#ifeq: {{padleft:|1|}} | { | | }}
# We must resolve ambiguities like this:
# {{{{ }}}} -> { {{{ }}} }
# {{{{{ }}}}} -> {{ {{{ }}} }}
# {{#if:{{{{{#if:{{{nominee|}}}|nominee|candidate}}|}}}|...}}
# {{{!}} {{!}}}
# Handle:
# {{{{{|safesubst:}}}#Invoke:String|replace|{{{1|{{{{{|safesubst:}}}PAGENAME}}}}}|%s+%([^%(]-%)$||plain=false}}
# as well as expressions with stray }:
# {{{link|{{ucfirst:{{{1}}}}}} interchange}}}
if ldelim: # 2-3
reOpen = re.compile('[{]{%d,}' % ldelim) # at least ldelim
reNext = re.compile('[{]{2,}|}{2,}') # at least 2
else:
reOpen = re.compile('{{2,}|\[{2,}')
reNext = re.compile('{{2,}|}{2,}|\[{2,}|]{2,}') # at least 2
cur = 0
while True:
m1 = reOpen.search(text, cur)
if not m1:
return
lmatch = m1.end() - m1.start()
if m1.group()[0] == '{':
stack = [lmatch] # stack of opening braces lengths
else:
stack = [-lmatch] # negative means [
end = m1.end()
while True:
m2 = reNext.search(text, end)
if not m2:
return # unbalanced
end = m2.end()
brac = m2.group()[0]
lmatch = m2.end() - m2.start()
if brac == '{':
stack.append(lmatch)
elif brac == '}':
while stack:
openCount = stack.pop() # opening span
if openCount == 0: # illegal unmatched [[
continue
if lmatch >= openCount:
lmatch -= openCount
if lmatch <= 1: # either close or stray }
break
else:
# put back unmatched
stack.append(openCount - lmatch)
break
if not stack:
yield m1.start(), end - lmatch
cur = end
break
elif len(stack) == 1 and 0 < stack[0] < ldelim:
# ambiguous {{{{{ }}} }}
# yield m1.start() + stack[0], end
cur = end
break
elif brac == '[': # [[
stack.append(-lmatch)
else: # ]]
while stack and stack[-1] < 0: # matching [[
openCount = -stack.pop()
if lmatch >= openCount:
lmatch -= openCount
if lmatch <= 1: # either close or stray ]
break
else:
# put back unmatched (negative)
stack.append(lmatch - openCount)
break
if not stack:
yield m1.start(), end - lmatch
cur = end
break
# unmatched ]] are discarded
cur = end
def findBalanced(text, openDelim=['[['], closeDelim=[']]']):
"""
Assuming that text contains a properly balanced expression using
:param openDelim: as opening delimiters and
:param closeDelim: as closing delimiters.
:return: an iterator producing pairs (start, end) of start and end
positions in text containing a balanced expression.
"""
openPat = '|'.join([re.escape(x) for x in openDelim])
# pattern for delimiters expected after each opening delimiter
afterPat = {o: re.compile(openPat + '|' + c, re.DOTALL) for o, c in zip(openDelim, closeDelim)}
stack = []
start = 0
cur = 0
# end = len(text)
startSet = False
startPat = re.compile(openPat)
nextPat = startPat
while True:
next = nextPat.search(text, cur)
if not next:
return
if not startSet:
start = next.start()
startSet = True
delim = next.group(0)
if delim in openDelim:
stack.append(delim)
nextPat = afterPat[delim]
else:
opening = stack.pop()
# assert opening == openDelim[closeDelim.index(next.group(0))]
if stack:
nextPat = afterPat[stack[-1]]
else:
yield start, next.end()
nextPat = startPat
start = next.end()
startSet = False
cur = next.end()
# ----------------------------------------------------------------------
# Modules
# Only minimal support
# FIXME: import Lua modules.
def if_empty(*rest):
"""
This implements If_empty from English Wikipedia module:
<title>Module:If empty</title>
<ns>828</ns>
<text>local p = {}
function p.main(frame)
local args = require('Module:Arguments').getArgs(frame, {wrappers = 'Template:If empty', removeBlanks = false})
-- For backwards compatibility reasons, the first 8 parameters can be unset instead of being blank,
-- even though there's really no legitimate use case for this. At some point, this will be removed.
local lowestNil = math.huge
for i = 8,1,-1 do
if args[i] == nil then
args[i] = ''
lowestNil = i
end
end
for k,v in ipairs(args) do
if v ~= '' then
if lowestNil < k then
-- If any uses of this template depend on the behavior above, add them to a tracking category.
-- This is a rather fragile, convoluted, hacky way to do it, but it ensures that this module's output won't be modified
-- by it.
frame:extensionTag('ref', '[[Category:Instances of Template:If_empty missing arguments]]', {group = 'TrackingCategory'})
frame:extensionTag('references', '', {group = 'TrackingCategory'})
end
return v
end
end
end
return p </text>
"""
for arg in rest:
if arg:
return arg
return ''
# ----------------------------------------------------------------------
# String module emulation
# https://en.wikipedia.org/wiki/Module:String
def functionParams(args, vars):
"""
Build a dictionary of var/value from :param: args.
Parameters can be either named or unnamed. In the latter case, their
name is taken fron :param: vars.
"""
params = {}
index = 1
for var in vars:
value = args.get(var)
if value is None:
value = args.get(str(index)) # positional argument
if value is None:
value = ''
else:
index += 1
params[var] = value
return params
def string_sub(args):
params = functionParams(args, ('s', 'i', 'j'))
s = params.get('s', '')
i = int(params.get('i', 1) or 1) # or handles case of '' value
j = int(params.get('j', -1) or -1)
if i > 0: i -= 1 # lua is 1-based
if j < 0: j += 1
if j == 0: j = len(s)
return s[i:j]
def string_sublength(args):
params = functionParams(args, ('s', 'i', 'len'))
s = params.get('s', '')
i = int(params.get('i', 1) or 1) - 1 # lua is 1-based
len = int(params.get('len', 1) or 1)
return s[i:i + len]
def string_len(args):
params = functionParams(args, ('s'))
s = params.get('s', '')
return len(s)
def string_find(args):
params = functionParams(args, ('source', 'target', 'start', 'plain'))
source = params.get('source', '')
pattern = params.get('target', '')
start = int('0' + params.get('start', 1)) - 1 # lua is 1-based
plain = int('0' + params.get('plain', 1))
if source == '' or pattern == '':
return 0
if plain:
return source.find(pattern, start) + 1 # lua is 1-based
else:
return (re.compile(pattern).search(source, start) or -1) + 1
def string_pos(args):
params = functionParams(args, ('target', 'pos'))
target = params.get('target', '')
pos = int(params.get('pos', 1) or 1)
if pos > 0:
pos -= 1 # The first character has an index value of 1
return target[pos]
def string_replace(args):
params = functionParams(args, ('source', 'pattern', 'replace', 'count', 'plain'))
source = params.get('source', '')
pattern = params.get('pattern', '')
replace = params.get('replace', '')
count = int(params.get('count', 0) or 0)
plain = int(params.get('plain', 1) or 1)
if plain:
if count:
return source.replace(pattern, replace, count)
else:
return source.replace(pattern, replace)
else:
return re.compile(pattern).sub(replace, source, count)
def string_rep(args):
params = functionParams(args, ('s'))
source = params.get('source', '')
count = int(params.get('count', '1'))
return source * count
# ----------------------------------------------------------------------
# Module:Roman
# http://en.wikipedia.org/w/index.php?title=Module:Roman
# Modulo:Numero_romano
# https://it.wikipedia.org/wiki/Modulo:Numero_romano
def roman_main(args):
"""Convert first arg to roman numeral if <= 5000 else :return: second arg."""
num = int(float(args.get('1')))
# Return a message for numbers too big to be expressed in Roman numerals.
if 0 > num or num >= 5000:
return args.get('2', 'N/A')
def toRoman(n, romanNumeralMap):
"""convert integer to Roman numeral"""
result = ""
for integer, numeral in romanNumeralMap:
while n >= integer:
result += numeral
n -= integer
return result
# Find the Roman numerals for numbers 4999 or less.
smallRomans = (
(1000, "M"),
(900, "CM"), (500, "D"), (400, "CD"), (100, "C"),
(90, "XC"), (50, "L"), (40, "XL"), (10, "X"),
(9, "IX"), (5, "V"), (4, "IV"), (1, "I")
)
return toRoman(num, smallRomans)
# ----------------------------------------------------------------------
modules = {
'convert': {
'convert': lambda x, u, *rest: x + ' ' + u, # no conversion
},
'If empty': {
'main': if_empty
},
'String': {
'len': string_len,
'sub': string_sub,
'sublength': string_sublength,
'pos': string_pos,
'find': string_find,
'replace': string_replace,
'rep': string_rep,
},
'Roman': {
'main': roman_main
},
'Numero romano': {
'main': roman_main
}
}
# ----------------------------------------------------------------------
# variables
class MagicWords(object):
"""
One copy in each Extractor.
@see https://doc.wikimedia.org/mediawiki-core/master/php/MagicWord_8php_source.html
"""
names = [
'!',
'currentmonth',
'currentmonth1',
'currentmonthname',
'currentmonthnamegen',
'currentmonthabbrev',
'currentday',
'currentday2',
'currentdayname',
'currentyear',
'currenttime',
'currenthour',
'localmonth',
'localmonth1',
'localmonthname',
'localmonthnamegen',
'localmonthabbrev',
'localday',
'localday2',
'localdayname',
'localyear',
'localtime',
'localhour',
'numberofarticles',
'numberoffiles',
'numberofedits',
'articlepath',
'pageid',
'sitename',
'server',
'servername',
'scriptpath',
'stylepath',
'pagename',
'pagenamee',
'fullpagename',
'fullpagenamee',
'namespace',
'namespacee',
'namespacenumber',
'currentweek',
'currentdow',
'localweek',
'localdow',
'revisionid',
'revisionday',
'revisionday2',
'revisionmonth',
'revisionmonth1',
'revisionyear',
'revisiontimestamp',
'revisionuser',
'revisionsize',
'subpagename',
'subpagenamee',
'talkspace',
'talkspacee',
'subjectspace',
'subjectspacee',
'talkpagename',
'talkpagenamee',
'subjectpagename',
'subjectpagenamee',
'numberofusers',
'numberofactiveusers',
'numberofpages',
'currentversion',
'rootpagename',
'rootpagenamee',
'basepagename',
'basepagenamee',
'currenttimestamp',
'localtimestamp',
'directionmark',
'contentlanguage',
'numberofadmins',
'cascadingsources',
]
def __init__(self):
self.values = {'!': '|'}
def __getitem__(self, name):
return self.values.get(name)
def __setitem__(self, name, value):
self.values[name] = value
switches = (
'__NOTOC__',
'__FORCETOC__',
'__TOC__',
'__TOC__',
'__NEWSECTIONLINK__',
'__NONEWSECTIONLINK__',
'__NOGALLERY__',
'__HIDDENCAT__',
'__NOCONTENTCONVERT__',
'__NOCC__',
'__NOTITLECONVERT__',
'__NOTC__',
'__START__',
'__END__',
'__INDEX__',
'__NOINDEX__',
'__STATICREDIRECT__',
'__DISAMBIG__'
)
magicWordsRE = re.compile('|'.join(MagicWords.switches))
# ----------------------------------------------------------------------
# parser functions utilities
def ucfirst(string):
""":return: a string with just its first character uppercase
We can't use title() since it coverts all words.
"""
if string:
return string[0].upper() + string[1:]
else:
return ''
def lcfirst(string):
""":return: a string with its first character lowercase"""
if string:
if len(string) > 1:
return string[0].lower() + string[1:]
else:
return string.lower()
else:
return ''
def fullyQualifiedTemplateTitle(templateTitle):
"""
Determine the namespace of the page being included through the template
mechanism
"""
if templateTitle.startswith(':'):
# Leading colon by itself implies main namespace, so strip this colon
return ucfirst(templateTitle[1:])
else:
m = re.match('([^:]*)(:.*)', templateTitle)
if m:
# colon found but not in the first position - check if it
# designates a known namespace
prefix = normalizeNamespace(m.group(1))
if prefix in options.knownNamespaces:
return prefix + ucfirst(m.group(2))
# The title of the page being included is NOT in the main namespace and
# lacks any other explicit designation of the namespace - therefore, it
# is resolved to the Template namespace (that's the default for the
# template inclusion mechanism).
# This is a defense against pages whose title only contains UTF-8 chars
# that are reduced to an empty string. Right now I can think of one such
# case - <C2><A0> which represents the non-breaking space.
# In this particular case, this page is a redirect to [[Non-nreaking
# space]], but having in the system a redirect page with an empty title
# causes numerous problems, so we'll live happier without it.
if templateTitle:
return options.templatePrefix + ucfirst(templateTitle)
else:
return '' # caller may log as error
def normalizeNamespace(ns):
return ucfirst(ns)
# ----------------------------------------------------------------------
# Parser functions
# see http://www.mediawiki.org/wiki/Help:Extension:ParserFunctions
# https://github.com/Wikia/app/blob/dev/extensions/ParserFunctions/ParserFunctions_body.php
class Infix:
"""Infix operators.
The calling sequence for the infix is:
x |op| y
"""
def __init__(self, function):
self.function = function
def __ror__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __or__(self, other):
return self.function(other)
def __rlshift__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __rshift__(self, other):
return self.function(other)
def __call__(self, value1, value2):
return self.function(value1, value2)
ROUND = Infix(lambda x, y: round(x, y))
from math import floor, ceil, pi, e, trunc, exp, log as ln, sin, cos, tan, asin, acos, atan
def sharp_expr(extr, expr):
"""Tries converting a lua expr into a Python expr."""
try:
expr = extr.expand(expr)
expr = re.sub('(?<![!<>])=', '==', expr) # negative lookbehind
expr = re.sub('mod', '%', expr) # no \b here
expr = re.sub('\bdiv\b', '/', expr)
expr = re.sub('\bround\b', '|ROUND|', expr)
return text_type(eval(expr))
except:
return '<span class="error">%s</span>' % expr
def sharp_if(extr, testValue, valueIfTrue, valueIfFalse=None, *args):
# In theory, we should evaluate the first argument here,
# but it was evaluated while evaluating part[0] in expandTemplate().
if testValue.strip():
# The {{#if:}} function is an if-then-else construct.
# The applied condition is: "The condition string is non-empty".
valueIfTrue = extr.expand(valueIfTrue.strip()) # eval
if valueIfTrue:
return valueIfTrue
elif valueIfFalse:
return extr.expand(valueIfFalse.strip()) # eval
return ""
def sharp_ifeq(extr, lvalue, rvalue, valueIfTrue, valueIfFalse=None, *args):
rvalue = rvalue.strip()
if rvalue:
# lvalue is always evaluated
if lvalue.strip() == rvalue:
# The {{#ifeq:}} function is an if-then-else construct. The
# applied condition is "is rvalue equal to lvalue". Note that this
# does only string comparison while MediaWiki implementation also
# supports numerical comparissons.
if valueIfTrue:
return extr.expand(valueIfTrue.strip())
else:
if valueIfFalse:
return extr.expand(valueIfFalse.strip())
return ""
def sharp_iferror(extr, test, then='', Else=None, *args):
if re.match('<(?:strong|span|p|div)\s(?:[^\s>]*\s+)*?class="(?:[^"\s>]*\s+)*?error(?:\s[^">]*)?"', test):
return extr.expand(then.strip())
elif Else is None:
return test.strip()
else:
return extr.expand(Else.strip())
def sharp_switch(extr, primary, *params):
# FIXME: we don't support numeric expressions in primary
# {{#switch: comparison string
# | case1 = result1
# | case2
# | case4 = result2
# | 1 | case5 = result3
# | #default = result4
# }}
primary = primary.strip()
found = False # for fall through cases
default = None
rvalue = None
lvalue = ''
for param in params:
# handle cases like:
# #default = [http://www.perseus.tufts.edu/hopper/text?doc=Perseus...]
pair = param.split('=', 1)
lvalue = extr.expand(pair[0].strip())
rvalue = None
if len(pair) > 1:
# got "="
rvalue = extr.expand(pair[1].strip())
# check for any of multiple values pipe separated
if found or primary in [v.strip() for v in lvalue.split('|')]:
# Found a match, return now
return rvalue
elif lvalue == '#default':
default = rvalue
rvalue = None # avoid defaulting to last case
elif lvalue == primary:
# If the value matches, set a flag and continue
found = True
# Default case
# Check if the last item had no = sign, thus specifying the default case
if rvalue is not None:
return lvalue
elif default is not None:
return default
return ''
# Extension Scribunto: https://www.mediawiki.org/wiki/Extension:Scribunto
def sharp_invoke(module, function, args):
functions = modules.get(module)
if functions:
funct = functions.get(function)
if funct:
return text_type(funct(args))
return ''
parserFunctions = {
'#expr': sharp_expr,
'#if': sharp_if,
'#ifeq': sharp_ifeq,
'#iferror': sharp_iferror,
'#ifexpr': lambda *args: '', # not supported
'#ifexist': lambda extr, title, ifex, ifnex: extr.expand(ifnex), # assuming title is not present
'#rel2abs': lambda *args: '', # not supported
'#switch': sharp_switch,
'#language': lambda *args: '', # not supported
'#time': lambda *args: '', # not supported
'#timel': lambda *args: '', # not supported
'#titleparts': lambda *args: '', # not supported
# This function is used in some pages to construct links
# http://meta.wikimedia.org/wiki/Help:URL
'urlencode': lambda extr, string, *rest: quote(string.encode('utf-8')),
'lc': lambda extr, string, *rest: string.lower() if string else '',
'lcfirst': lambda extr, string, *rest: lcfirst(string),
'uc': lambda extr, string, *rest: string.upper() if string else '',
'ucfirst': lambda extr, string, *rest: ucfirst(string),
'int': lambda extr, string, *rest: text_type(int(string)),
}
def callParserFunction(functionName, args, extractor):
"""
Parser functions have similar syntax as templates, except that
the first argument is everything after the first colon.
:return: the result of the invocation, None in case of failure.
:param: args not yet expanded (see branching functions).
https://www.mediawiki.org/wiki/Help:Extension:ParserFunctions
"""
try:
# https://it.wikipedia.org/wiki/Template:Str_endswith has #Invoke
functionName = functionName.lower()
if functionName == '#invoke':
module, fun = args[0].strip(), args[1].strip()
logging.debug('%*s#invoke %s %s %s', extractor.frame.depth, '', module, fun, args[2:])
# special handling of frame
if len(args) == 2:
# find parameters in frame whose title is the one of the original
# template invocation
templateTitle = fullyQualifiedTemplateTitle(module)
if not templateTitle:
logging.warn("Template with empty title")
params = None
frame = extractor.frame
while frame:
if frame.title == templateTitle:
params = frame.args
break
frame = frame.prev
else:
params = [extractor.transform(p) for p in args[2:]] # evaluates them
params = extractor.templateParams(params)
ret = sharp_invoke(module, fun, params)
logging.debug('%*s<#invoke %s %s %s', extractor.frame.depth, '', module, fun, ret)
return ret
if functionName in parserFunctions:
# branching functions use the extractor to selectively evaluate args
return parserFunctions[functionName](extractor, *args)
except:
return "" # FIXME: fix errors
return ""
# ----------------------------------------------------------------------
# Expand using WikiMedia API
# import json
# def expand(text):
# """Expand templates invoking MediaWiki API"""
# text = urlib.urlencodew(text.encode('utf-8'))
# base = urlbase[:urlbase.rfind('/')]
# url = base + "/w/api.php?action=expandtemplates&format=json&text=" + text
# exp = json.loads(urllib.urlopen(url))
# return exp['expandtemplates']['*']
# ----------------------------------------------------------------------
# Extract Template definition
reNoinclude = re.compile(r'<noinclude>(?:.*?)</noinclude>', re.DOTALL)
reIncludeonly = re.compile(r'<includeonly>|</includeonly>', re.DOTALL)
def define_template(title, page):
"""
Adds a template defined in the :param page:.
@see https://en.wikipedia.org/wiki/Help:Template#Noinclude.2C_includeonly.2C_and_onlyinclude
"""
# title = normalizeTitle(title)
# sanity check (empty template, e.g. Template:Crude Oil Prices))
if not page: return
# check for redirects
m = re.match('#REDIRECT.*?\[\[([^\]]*)]]', page[0], re.IGNORECASE)
if m:
options.redirects[title] = m.group(1) # normalizeTitle(m.group(1))
return
text = unescape(''.join(page))
# We're storing template text for future inclusion, therefore,
# remove all <noinclude> text and keep all <includeonly> text
# (but eliminate <includeonly> tags per se).
# However, if <onlyinclude> ... </onlyinclude> parts are present,
# then only keep them and discard the rest of the template body.
# This is because using <onlyinclude> on a text fragment is
# equivalent to enclosing it in <includeonly> tags **AND**
# enclosing all the rest of the template body in <noinclude> tags.
# remove comments
text = comment.sub('', text)
# eliminate <noinclude> fragments
text = reNoinclude.sub('', text)
# eliminate unterminated <noinclude> elements
text = re.sub(r'<noinclude\s*>.*$', '', text, flags=re.DOTALL)
text = re.sub(r'<noinclude/>', '', text)
onlyincludeAccumulator = ''
for m in re.finditer('<onlyinclude>(.*?)</onlyinclude>', text, re.DOTALL):
onlyincludeAccumulator += m.group(1)
if onlyincludeAccumulator:
text = onlyincludeAccumulator
else:
text = reIncludeonly.sub('', text)
if text:
if title in options.templates:
logging.warn('Redefining: %s', title)
options.templates[title] = text
# ----------------------------------------------------------------------
def dropNested(text, openDelim, closeDelim):
"""
A matching function for nested expressions, e.g. namespaces and tables.
"""
openRE = re.compile(openDelim, re.IGNORECASE)
closeRE = re.compile(closeDelim, re.IGNORECASE)
# partition text in separate blocks { } { }
spans = [] # pairs (s, e) for each partition
nest = 0 # nesting level
start = openRE.search(text, 0)
if not start:
return text
end = closeRE.search(text, start.end())
next = start
while end:
next = openRE.search(text, next.end())
if not next: # termination
while nest: # close all pending
nest -= 1
end0 = closeRE.search(text, end.end())
if end0:
end = end0
else:
break
spans.append((start.start(), end.end()))
break
while end.end() < next.start():
# { } {
if nest:
nest -= 1
# try closing more
last = end.end()
end = closeRE.search(text, end.end())
if not end: # unbalanced
if spans:
span = (spans[0][0], last)
else:
span = (start.start(), last)
spans = [span]
break
else:
spans.append((start.start(), end.end()))
# advance start, find next close
start = next
end = closeRE.search(text, next.end())
break # { }
if next != start:
# { { }
nest += 1
# collect text outside partitions
return dropSpans(spans, text)
def dropSpans(spans, text):
"""
Drop from text the blocks identified in :param spans:, possibly nested.
"""
spans.sort()
res = ''
offset = 0
for s, e in spans:
if offset <= s: # handle nesting
if offset < s:
res += text[offset:s]
offset = e
res += text[offset:]
return res
# ----------------------------------------------------------------------
# WikiLinks
# May be nested [[File:..|..[[..]]..|..]], [[Category:...]], etc.
# Also: [[Help:IPA for Catalan|[andora]]]
def replaceInternalLinks(text):
"""
Replaces internal links of the form:
[[title |...|label]]trail
with title concatenated with trail, when present, e.g. 's' for plural.
See https://www.mediawiki.org/wiki/Help:Links#Internal_links
"""
# call this after removal of external links, so we need not worry about
# triple closing ]]].
cur = 0
res = ''
for s, e in findBalanced(text):
m = tailRE.match(text, e)
if m:
trail = m.group(0)
end = m.end()
else:
trail = ''
end = e
inner = text[s + 2:e - 2]
# find first |
pipe = inner.find('|')
if pipe < 0:
title = inner
label = title
else:
title = inner[:pipe].rstrip()
# find last |
curp = pipe + 1
for s1, e1 in findBalanced(inner):
last = inner.rfind('|', curp, s1)
if last >= 0:
pipe = last # advance
curp = e1
label = inner[pipe + 1:].strip()
res += text[cur:s] + makeInternalLink(title, label) + trail
cur = end
return res + text[cur:]
# the official version is a method in class Parser, similar to this:
# def replaceInternalLinks2(text):
# global wgExtraInterlanguageLinkPrefixes
# # the % is needed to support urlencoded titles as well
# tc = Title::legalChars() + '#%'
# # Match a link having the form [[namespace:link|alternate]]trail
# e1 = re.compile("([%s]+)(?:\\|(.+?))?]](.*)" % tc, re.S | re.D)
# # Match cases where there is no "]]", which might still be images
# e1_img = re.compile("([%s]+)\\|(.*)" % tc, re.S | re.D)
# holders = LinkHolderArray(self)
# # split the entire text string on occurrences of [[
# iterBrackets = re.compile('[[').finditer(text)
# m in iterBrackets.next()
# # get the first element (all text up to first [[)
# s = text[:m.start()]
# cur = m.end()
# line = s
# useLinkPrefixExtension = self.getTargetLanguage().linkPrefixExtension()
# e2 = None
# if useLinkPrefixExtension:
# # Match the end of a line for a word that is not followed by whitespace,
# # e.g. in the case of "The Arab al[[Razi]]", "al" will be matched
# global wgContLang
# charset = wgContLang.linkPrefixCharset()
# e2 = re.compile("((?>.*[^charset]|))(.+)", re.S | re.D | re.U)
# if self.mTitle is None:
# raise MWException(__METHOD__ + ": \self.mTitle is null\n")
# nottalk = not self.mTitle.isTalkPage()
# if useLinkPrefixExtension:
# m = e2.match(s)
# if m:
# first_prefix = m.group(2)
# else:
# first_prefix = false
# else:
# prefix = ''
# useSubpages = self.areSubpagesAllowed()
# for m in iterBrackets:
# line = text[cur:m.start()]
# cur = m.end()
# # TODO: Check for excessive memory usage
# if useLinkPrefixExtension:
# m = e2.match(e2)
# if m:
# prefix = m.group(2)
# s = m.group(1)
# else:
# prefix = ''
# # first link
# if first_prefix:
# prefix = first_prefix
# first_prefix = False
# might_be_img = False
# m = e1.match(line)
# if m: # page with normal label or alt
# label = m.group(2)
# # If we get a ] at the beginning of m.group(3) that means we have a link that is something like:
# # [[Image:Foo.jpg|[http://example.com desc]]] <- having three ] in a row fucks up,
# # the real problem is with the e1 regex
# # See bug 1300.
# #
# # Still some problems for cases where the ] is meant to be outside punctuation,
# # and no image is in sight. See bug 2095.
# #
# if label and m.group(3)[0] == ']' and '[' in label:
# label += ']' # so that replaceExternalLinks(label) works later
# m.group(3) = m.group(3)[1:]
# # fix up urlencoded title texts
# if '%' in m.group(1):
# # Should anchors '#' also be rejected?
# m.group(1) = str_replace(array('<', '>'), array('<', '>'), rawurldecode(m.group(1)))
# trail = m.group(3)
# else:
# m = e1_img.match(line):
# if m:
# # Invalid, but might be an image with a link in its caption
# might_be_img = true
# label = m.group(2)
# if '%' in m.group(1):
# m.group(1) = rawurldecode(m.group(1))
# trail = ""
# else: # Invalid form; output directly
# s += prefix + '[[' + line
# continue
# origLink = m.group(1)
# # Dont allow internal links to pages containing
# # PROTO: where PROTO is a valid URL protocol these
# # should be external links.
# if (preg_match('/^(?i:' + self.mUrlProtocols + ')/', origLink)) {
# s += prefix + '[[' + line
# continue
# }
# # Make subpage if necessary
# if useSubpages:
# link = self.maybeDoSubpageLink(origLink, label)
# else:
# link = origLink
# noforce = origLink[0] != ':'
# if not noforce:
# # Strip off leading ':'
# link = link[1:]
# nt = Title::newFromText(self.mStripState.unstripNoWiki(link))
# if nt is None:
# s += prefix + '[[' + line
# continue
# ns = nt.getNamespace()
# iw = nt.getInterwiki()
# if might_be_img { # if this is actually an invalid link
# if (ns == NS_FILE and noforce) { # but might be an image
# found = False
# while True:
# # look at the next 'line' to see if we can close it there
# next_line = iterBrakets.next()
# if not next_line:
# break
# m = explode(']]', next_line, 3)
# if m.lastindex == 3:
# # the first ]] closes the inner link, the second the image
# found = True
# label += "[[%s]]%s" % (m.group(0), m.group(1))
# trail = m.group(2)
# break
# elif m.lastindex == 2:
# # if there is exactly one ]] that is fine, we will keep looking
# label += "[[{m[0]}]]{m.group(1)}"
# else:
# # if next_line is invalid too, we need look no further
# label += '[[' + next_line
# break
# if not found:
# # we couldnt find the end of this imageLink, so output it raw
# # but dont ignore what might be perfectly normal links in the text we ve examined
# holders.merge(self.replaceInternalLinks2(label))
# s += "{prefix}[[%s|%s" % (link, text)
# # note: no trail, because without an end, there *is* no trail
# continue
# } else: # it is not an image, so output it raw
# s += "{prefix}[[%s|%s" % (link, text)
# # note: no trail, because without an end, there *is* no trail
# continue
# }
# wasblank = (text == '')
# if wasblank:
# text = link
# else:
# # Bug 4598 madness. Handle the quotes only if they come from the alternate part
# # [[Lista d''e paise d''o munno]] . <a href="...">Lista d''e paise d''o munno</a>
# # [[Criticism of Harry Potter|Criticism of ''Harry Potter'']]
# # . <a href="Criticism of Harry Potter">Criticism of <i>Harry Potter</i></a>
# text = self.doQuotes(text)
# # Link not escaped by : , create the various objects
# if noforce and not nt.wasLocalInterwiki():
# # Interwikis
# if iw and mOptions.getInterwikiMagic() and nottalk and (
# Language::fetchLanguageName(iw, None, 'mw') or
# in_array(iw, wgExtraInterlanguageLinkPrefixes)):
# # Bug 24502: filter duplicates
# if iw not in mLangLinkLanguages:
# self.mLangLinkLanguages[iw] = True
# self.mOutput.addLanguageLink(nt.getFullText())
# s = rstrip(s + prefix)
# s += strip(trail, "\n") == '' ? '': prefix + trail
# continue
# if ns == NS_FILE:
# if not wfIsBadImage(nt.getDBkey(), self.mTitle):
# if wasblank:
# # if no parameters were passed, text
# # becomes something like "File:Foo.png",
# # which we dont want to pass on to the
# # image generator
# text = ''
# else:
# # recursively parse links inside the image caption
# # actually, this will parse them in any other parameters, too,
# # but it might be hard to fix that, and it doesnt matter ATM
# text = self.replaceExternalLinks(text)
# holders.merge(self.replaceInternalLinks2(text))
# # cloak any absolute URLs inside the image markup, so replaceExternalLinks() wont touch them
# s += prefix + self.armorLinks(
# self.makeImage(nt, text, holders)) + trail
# else:
# s += prefix + trail
# continue
# if ns == NS_CATEGORY:
# s = rstrip(s + "\n") # bug 87
# if wasblank:
# sortkey = self.getDefaultSort()
# else:
# sortkey = text
# sortkey = Sanitizer::decodeCharReferences(sortkey)
# sortkey = str_replace("\n", '', sortkey)
# sortkey = self.getConverterLanguage().convertCategoryKey(sortkey)
# self.mOutput.addCategory(nt.getDBkey(), sortkey)
# s += strip(prefix + trail, "\n") == '' ? '' : prefix + trail
# continue
# }
# }
# # Self-link checking. For some languages, variants of the title are checked in
# # LinkHolderArray::doVariants() to allow batching the existence checks necessary
# # for linking to a different variant.
# if ns != NS_SPECIAL and nt.equals(self.mTitle) and !nt.hasFragment():
# s += prefix + Linker::makeSelfLinkObj(nt, text, '', trail)
# continue
# # NS_MEDIA is a pseudo-namespace for linking directly to a file
# # @todo FIXME: Should do batch file existence checks, see comment below
# if ns == NS_MEDIA:
# # Give extensions a chance to select the file revision for us
# options = []
# descQuery = False
# Hooks::run('BeforeParserFetchFileAndTitle',
# [this, nt, &options, &descQuery])
# # Fetch and register the file (file title may be different via hooks)
# file, nt = self.fetchFileAndTitle(nt, options)
# # Cloak with NOPARSE to avoid replacement in replaceExternalLinks
# s += prefix + self.armorLinks(
# Linker::makeMediaLinkFile(nt, file, text)) + trail
# continue
# # Some titles, such as valid special pages or files in foreign repos, should
# # be shown as bluelinks even though they are not included in the page table
# #
# # @todo FIXME: isAlwaysKnown() can be expensive for file links; we should really do
# # batch file existence checks for NS_FILE and NS_MEDIA
# if iw == '' and nt.isAlwaysKnown():
# self.mOutput.addLink(nt)
# s += self.makeKnownLinkHolder(nt, text, array(), trail, prefix)
# else:
# # Links will be added to the output link list after checking
# s += holders.makeHolder(nt, text, array(), trail, prefix)
# }
# return holders
def makeInternalLink(title, label):
colon = title.find(':')
if colon > 0 and title[:colon] not in options.acceptedNamespaces:
return ''
if colon == 0:
# drop also :File:
colon2 = title.find(':', colon + 1)
if colon2 > 1 and title[colon + 1:colon2] not in options.acceptedNamespaces:
return ''
if options.keepLinks:
return '<a href="%s">%s</a>' % (quote(title.encode('utf-8')), label)
else:
return label
# ----------------------------------------------------------------------
# External links
# from: https://doc.wikimedia.org/mediawiki-core/master/php/DefaultSettings_8php_source.html
wgUrlProtocols = [
'bitcoin:', 'ftp://', 'ftps://', 'geo:', 'git://', 'gopher://', 'http://',
'https://', 'irc://', 'ircs://', 'magnet:', 'mailto:', 'mms://', 'news:',
'nntp://', 'redis://', 'sftp://', 'sip:', 'sips:', 'sms:', 'ssh://',
'svn://', 'tel:', 'telnet://', 'urn:', 'worldwind://', 'xmpp:', '//'
]
# from: https://doc.wikimedia.org/mediawiki-core/master/php/Parser_8php_source.html
# Constants needed for external link processing
# Everything except bracket, space, or control characters
# \p{Zs} is unicode 'separator, space' category. It covers the space 0x20
# as well as U+3000 is IDEOGRAPHIC SPACE for bug 19052
EXT_LINK_URL_CLASS = r'[^][<>"\x00-\x20\x7F\s]'
ANCHOR_CLASS = r'[^][\x00-\x08\x0a-\x1F]'
ExtLinkBracketedRegex = re.compile(
'\[(((?i)' + '|'.join(wgUrlProtocols) + ')' + EXT_LINK_URL_CLASS + r'+)' +
r'\s*((?:' + ANCHOR_CLASS + r'|\[\[' + ANCHOR_CLASS + r'+\]\])' + r'*?)\]',
re.S | re.U)
# A simpler alternative:
# ExtLinkBracketedRegex = re.compile(r'\[(.*?)\](?!])')
EXT_IMAGE_REGEX = re.compile(
r"""^(http://|https://)([^][<>"\x00-\x20\x7F\s]+)
/([A-Za-z0-9_.,~%\-+&;#*?!=()@\x80-\xFF]+)\.((?i)gif|png|jpg|jpeg)$""",
re.X | re.S | re.U)
def replaceExternalLinks(text):
"""
https://www.mediawiki.org/wiki/Help:Links#External_links
[URL anchor text]
"""
s = ''
cur = 0
for m in ExtLinkBracketedRegex.finditer(text):
s += text[cur:m.start()]
cur = m.end()
url = m.group(1)
label = m.group(3)
# # The characters '<' and '>' (which were escaped by
# # removeHTMLtags()) should not be included in
# # URLs, per RFC 2396.
# m2 = re.search('&(lt|gt);', url)
# if m2:
# link = url[m2.end():] + ' ' + link
# url = url[0:m2.end()]
# If the link text is an image URL, replace it with an <img> tag
# This happened by accident in the original parser, but some people used it extensively
m = EXT_IMAGE_REGEX.match(label)
if m:
label = makeExternalImage(label)
# Use the encoded URL
# This means that users can paste URLs directly into the text
# Funny characters like ö aren't valid in URLs anyway
# This was changed in August 2004
s += makeExternalLink(url, label) # + trail
return s + text[cur:]
def makeExternalLink(url, anchor):
"""Function applied to wikiLinks"""
if options.keepLinks:
return '<a href="%s">%s</a>' % (quote(url.encode('utf-8')), anchor)
else:
return anchor
def makeExternalImage(url, alt=''):
if options.keepLinks:
return '<img src="%s" alt="%s">' % (url, alt)
else:
return alt
# ----------------------------------------------------------------------
# match tail after wikilink
tailRE = re.compile('\w+')
syntaxhighlight = re.compile('<syntaxhighlight .*?>(.*?)</syntaxhighlight>', re.DOTALL)
# skip level 1, it is page name level
section = re.compile(r'(==+)\s*(.*?)\s*\1')
listOpen = {'*': '<ul>', '#': '<ol>', ';': '<dl>', ':': '<dl>'}
listClose = {'*': '</ul>', '#': '</ol>', ';': '</dl>', ':': '</dl>'}
listItem = {'*': '<li>%s</li>', '#': '<li>%s</<li>', ';': '<dt>%s</dt>',
':': '<dd>%s</dd>'}
def compact(text):
"""Deal with headers, lists, empty sections, residuals of tables.
:param text: convert to HTML.
"""
page = [] # list of paragraph
headers = {} # Headers for unfilled sections
emptySection = False # empty sections are discarded
listLevel = [] # nesting of lists
listCount = [] # count of each list (it should be always in the same length of listLevel)
for line in text.split('\n'):
if not line: # collapse empty lines
# if there is an opening list, close it if we see an empty line
if len(listLevel):
page.append(line)
if options.toHTML:
for c in reversed(listLevel):
page.append(listClose[c])
listLevel = []
listCount = []
emptySection = False
elif page and page[-1]:
page.append('')
continue
# Handle section titles
m = section.match(line)
if m:
title = m.group(2)
lev = len(m.group(1)) # header level
if options.toHTML:
page.append("<h%d>%s</h%d>" % (lev, title, lev))
if title and title[-1] not in '!?':
title += '.' # terminate sentence.
headers[lev] = title
# drop previous headers
for i in list(headers.keys()):
if i > lev:
del headers[i]
emptySection = True
listLevel = []
listCount = []
continue
# Handle page title
elif line.startswith('++'):
title = line[2:-2]
if title:
if title[-1] not in '!?':
title += '.'
page.append(title)
# handle indents
elif line[0] == ':':
# page.append(line.lstrip(':*#;'))
continue
# handle lists
elif line[0] in '*#;:':
i = 0
# c: current level char
# n: next level char
for c, n in zip_longest(listLevel, line, fillvalue=''):
if not n or n not in '*#;:': # shorter or different
if c:
if options.toHTML:
page.append(listClose[c])
listLevel = listLevel[:-1]
listCount = listCount[:-1]
continue
else:
break
# n != ''
if c != n and (not c or (c not in ';:' and n not in ';:')):
if c:
# close level
if options.toHTML:
page.append(listClose[c])
listLevel = listLevel[:-1]
listCount = listCount[:-1]
listLevel += n
listCount.append(0)
if options.toHTML:
page.append(listOpen[n])
i += 1
n = line[i - 1] # last list char
line = line[i:].strip()
if line: # FIXME: n is '"'
if options.keepLists:
if options.keepSections:
# emit open sections
items = sorted(headers.items())
for _, v in items:
page.append(v)
headers.clear()
# use item count for #-lines
listCount[i - 1] += 1
bullet = '%d. ' % listCount[i - 1] if n == '#' else '- '
page.append('{0:{1}s}'.format(bullet, len(listLevel)) + line)
elif options.toHTML:
page.append(listItem[n] % line)
elif len(listLevel):
if options.toHTML:
for c in reversed(listLevel):
page.append(listClose[c])
listLevel = []
listCount = []
page.append(line)
# Drop residuals of lists
elif line[0] in '{|' or line[-1] == '}':
continue
# Drop irrelevant lines
elif (line[0] == '(' and line[-1] == ')') or line.strip('.-') == '':
continue
elif len(headers):
if options.keepSections:
items = sorted(headers.items())
for i, v in items:
page.append(v)
headers.clear()
page.append(line) # first line
emptySection = False
elif not emptySection:
# Drop preformatted
if line[0] != ' ': # dangerous
page.append(line)
return page
def handle_unicode(entity):
numeric_code = int(entity[2:-1])
if numeric_code >= 0x10000: return ''
return chr(numeric_code)
# ------------------------------------------------------------------------------
# Output
class NextFile(object):
"""
Synchronous generation of next available file name.
"""
filesPerDir = 100
def __init__(self, path_name):
self.path_name = path_name
self.dir_index = -1
self.file_index = -1
def __next__(self):
self.file_index = (self.file_index + 1) % NextFile.filesPerDir
if self.file_index == 0:
self.dir_index += 1
dirname = self._dirname()
if not os.path.isdir(dirname):
os.makedirs(dirname)
return self._filepath()
next = __next__
def _dirname(self):
char1 = self.dir_index % 26
char2 = self.dir_index // 26 % 26
return os.path.join(self.path_name, '%c%c' % (ord('A') + char2, ord('A') + char1))
def _filepath(self):
return '%s/wiki_%02d' % (self._dirname(), self.file_index)
class OutputSplitter(object):
"""
File-like object, that splits output to multiple files of a given max size.
"""
def __init__(self, nextFile, max_file_size=0, compress=True):
"""
:param nextFile: a NextFile object from which to obtain filenames
to use.
:param max_file_size: the maximum size of each file.
:para compress: whether to write data with bzip compression.
"""
self.nextFile = nextFile
self.compress = compress
self.max_file_size = max_file_size
self.file = self.open(next(self.nextFile))
def reserve(self, size):
if self.file.tell() + size > self.max_file_size:
self.close()
self.file = self.open(next(self.nextFile))
def write(self, data):
self.reserve(len(data))
self.file.write(data)
def close(self):
self.file.close()
def open(self, filename):
if self.compress:
return bz2.BZ2File(filename + '.bz2', 'w')
else:
return open(filename, 'wb')
# ----------------------------------------------------------------------
# READER
tagRE = re.compile(r'(.*?)<(/?\w+)[^>]*?>(?:([^<]*)(<.*?>)?)?')
# 1 2 3 4
keyRE = re.compile(r'key="(\d*)"')
def load_templates(file, output_file=None):
"""
Load templates from :param file:.
:param output_file: file where to save templates and modules.
"""
options.templatePrefix = options.templateNamespace + ':'
options.modulePrefix = options.moduleNamespace + ':'
if output_file:
output = codecs.open(output_file, 'wb', 'utf-8')
for page_count, page_data in enumerate(pages_from(file)):
id, revid, title, ns, page = page_data
if not output_file and (not options.templateNamespace or
not options.moduleNamespace): # do not know it yet
# reconstruct templateNamespace and moduleNamespace from the first title
if ns in templateKeys:
colon = title.find(':')
if colon > 1:
if ns == '10':
options.templateNamespace = title[:colon]
options.templatePrefix = title[:colon + 1]
elif ns == '828':
options.moduleNamespace = title[:colon]
options.modulePrefix = title[:colon + 1]
if ns in templateKeys:
text = ''.join(page)
define_template(title, text)
# save templates and modules to file
if output_file:
output.write('<page>\n')
output.write(' <title>%s</title>\n' % title)
output.write(' <ns>%s</ns>\n' % ns)
output.write(' <id>%s</id>\n' % id)
output.write(' <text>')
for line in page:
output.write(line)
output.write(' </text>\n')
output.write('</page>\n')
if page_count and page_count % 100000 == 0:
logging.info("Preprocessed %d pages", page_count)
if output_file:
output.close()
logging.info("Saved %d templates to '%s'", len(options.templates), output_file)
def pages_from(input):
"""
Scans input extracting pages.
:return: (id, revid, title, namespace key, page), page is a list of lines.
"""
# we collect individual lines, since str.join() is significantly faster
# than concatenation
page = []
id = None
ns = '0'
last_id = None
revid = None
inText = False
redirect = False
title = None
for line in input:
if not isinstance(line, text_type): line = line.decode('utf-8')
if '<' not in line: # faster than doing re.search()
if inText:
page.append(line)
continue
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'page':
page = []
redirect = False
elif tag == 'id' and not id:
id = m.group(3)
elif tag == 'id' and id:
revid = m.group(3)
elif tag == 'title':
title = m.group(3)
elif tag == 'ns':
ns = m.group(3)
elif tag == 'redirect':
redirect = True
elif tag == 'text':
if m.lastindex == 3 and line[m.start(3) - 2] == '/': # self closing
# <text xml:space="preserve" />
continue
inText = True
line = line[m.start(3):m.end(3)]
page.append(line)
if m.lastindex == 4: # open-close
inText = False
elif tag == '/text':
if m.group(1):
page.append(m.group(1))
inText = False
elif inText:
page.append(line)
elif tag == '/page':
if id != last_id and not redirect:
yield (id, revid, title, ns, page)
last_id = id
ns = '0'
id = None
revid = None
title = None
page = []
def process_dump(input_file, template_file, out_file, file_size, file_compress,
process_count):
"""
:param input_file: name of the wikipedia dump file; '-' to read from stdin
:param template_file: optional file with template definitions.
:param out_file: directory where to store extracted data, or '-' for stdout
:param file_size: max size of each extracted file, or None for no max (one file)
:param file_compress: whether to compress files with bzip.
:param process_count: number of extraction processes to spawn.
"""
if input_file == '-':
input = sys.stdin
else:
input = fileinput.FileInput(input_file, openhook=fileinput.hook_compressed)
# collect siteinfo
for line in input:
# When an input file is .bz2 or .gz, line can be a bytes even in Python 3.
if not isinstance(line, text_type): line = line.decode('utf-8')
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'base':
# discover urlbase from the xml dump file
# /mediawiki/siteinfo/base
base = m.group(3)
options.urlbase = base[:base.rfind("/")]
elif tag == 'namespace':
mk = keyRE.search(line)
if mk:
nsid = mk.group(1)
else:
nsid = ''
options.knownNamespaces[m.group(3)] = nsid
if re.search('key="10"', line):
options.templateNamespace = m.group(3)
options.templatePrefix = options.templateNamespace + ':'
elif re.search('key="828"', line):
options.moduleNamespace = m.group(3)
options.modulePrefix = options.moduleNamespace + ':'
elif tag == '/siteinfo':
break
if options.expand_templates:
# preprocess
template_load_start = default_timer()
if template_file:
if os.path.exists(template_file):
logging.info("Loading template definitions from: %s", template_file)
# can't use with here:
file = fileinput.FileInput(template_file,
openhook=fileinput.hook_compressed)
load_templates(file)
file.close()
else:
if input_file == '-':
# can't scan then reset stdin; must error w/ suggestion to specify template_file
raise ValueError("to use templates with stdin dump, must supply explicit template-file")
logging.info("Preprocessing '%s' to collect template definitions: this may take some time.", input_file)
load_templates(input, template_file)
input.close()
input = fileinput.FileInput(input_file, openhook=fileinput.hook_compressed)
template_load_elapsed = default_timer() - template_load_start
logging.info("Loaded %d templates in %.1fs", len(options.templates), template_load_elapsed)
# process pages
logging.info("Starting page extraction from %s.", input_file)
extract_start = default_timer()
# Parallel Map/Reduce:
# - pages to be processed are dispatched to workers
# - a reduce process collects the results, sort them and print them.
process_count = max(1, process_count)
maxsize = 10 * process_count
# output queue
output_queue = Queue(maxsize=maxsize)
if out_file == '-':
out_file = None
worker_count = process_count
# load balancing
max_spool_length = 10000
spool_length = Value('i', 0, lock=False)
# reduce job that sorts and prints output
reduce = Process(target=reduce_process,
args=(options, output_queue, spool_length,
out_file, file_size, file_compress))
reduce.start()
# initialize jobs queue
jobs_queue = Queue(maxsize=maxsize)
# start worker processes
logging.info("Using %d extract processes.", worker_count)
workers = []
for i in range(worker_count):
extractor = Process(target=extract_process,
args=(options, i, jobs_queue, output_queue))
extractor.daemon = True # only live while parent process lives
extractor.start()
workers.append(extractor)
# Mapper process
page_num = 0
for page_data in pages_from(input):
id, revid, title, ns, page = page_data
if keepPage(ns, page):
# slow down
delay = 0
if spool_length.value > max_spool_length:
# reduce to 10%
while spool_length.value > max_spool_length / 10:
time.sleep(10)
delay += 10
if delay:
logging.info('Delay %ds', delay)
job = (id, revid, title, page, page_num)
jobs_queue.put(job) # goes to any available extract_process
page_num += 1
page = None # free memory
input.close()
# signal termination
for _ in workers:
jobs_queue.put(None)
# wait for workers to terminate
for w in workers:
w.join()
# signal end of work to reduce process
output_queue.put(None)
# wait for it to finish
reduce.join()
extract_duration = default_timer() - extract_start
extract_rate = page_num / extract_duration
logging.info("Finished %d-process extraction of %d articles in %.1fs (%.1f art/s)",
process_count, page_num, extract_duration, extract_rate)
# ----------------------------------------------------------------------
# Multiprocess support
def extract_process(opts, i, jobs_queue, output_queue):
"""Pull tuples of raw page content, do CPU/regex-heavy fixup, push finished text
:param i: process id.
:param jobs_queue: where to get jobs.
:param output_queue: where to queue extracted text for output.
"""
global options
options = opts
createLogger(options.quiet, options.debug)
out = StringIO() # memory buffer
while True:
job = jobs_queue.get() # job is (id, title, page, page_num)
if job:
id, revid, title, page, page_num = job
try:
e = Extractor(*job[:4]) # (id, revid, title, page)
page = None # free memory
e.extract(out)
text = out.getvalue()
except:
text = ''
logging.exception('Processing page: %s %s', id, title)
output_queue.put((page_num, text))
out.truncate(0)
out.seek(0)
else:
logging.debug('Quit extractor')
break
out.close()
report_period = 10000 # progress report period
def reduce_process(opts, output_queue, spool_length,
out_file=None, file_size=0, file_compress=True):
"""Pull finished article text, write series of files (or stdout)
:param opts: global parameters.
:param output_queue: text to be output.
:param spool_length: spool length.
:param out_file: filename where to print.
:param file_size: max file size.
:param file_compress: whether to compress output.
"""
global options
options = opts
createLogger(options.quiet, options.debug)
if out_file:
nextFile = NextFile(out_file)
output = OutputSplitter(nextFile, file_size, file_compress)
else:
output = sys.stdout if PY2 else sys.stdout.buffer
if file_compress:
logging.warn("writing to stdout, so no output compression (use an external tool)")
interval_start = default_timer()
# FIXME: use a heap
spool = {} # collected pages
next_page = 0 # sequence numbering of page
while True:
if next_page in spool:
output.write(spool.pop(next_page).encode('utf-8'))
next_page += 1
# tell mapper our load:
spool_length.value = len(spool)
# progress report
if next_page % report_period == 0:
interval_rate = report_period / (default_timer() - interval_start)
logging.info("Extracted %d articles (%.1f art/s)",
next_page, interval_rate)
interval_start = default_timer()
else:
# mapper puts None to signal finish
pair = output_queue.get()
if not pair:
break
page_num, text = pair
spool[page_num] = text
# tell mapper our load:
spool_length.value = len(spool)
# FIXME: if an extractor dies, process stalls; the other processes
# continue to produce pairs, filling up memory.
if len(spool) > 200:
logging.debug('Collected %d, waiting: %d, %d', len(spool),
next_page, next_page == page_num)
if output != sys.stdout:
output.close()
# ----------------------------------------------------------------------
# Minimum size of output files
minFileSize = 200 * 1024
def main():
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
parser.add_argument("input",
help="XML wiki dump file")
groupO = parser.add_argument_group('Output')
groupO.add_argument("-o", "--output", default="text",
help="directory for extracted files (or '-' for dumping to stdout)")
groupO.add_argument("-b", "--bytes", default="1M",
help="maximum bytes per output file (default %(default)s)",
metavar="n[KMG]")
groupO.add_argument("-c", "--compress", action="store_true",
help="compress output files using bzip")
groupO.add_argument("--json", action="store_true",
help="write output in json format instead of the default one")
groupP = parser.add_argument_group('Processing')
groupP.add_argument("--html", action="store_true",
help="produce HTML output, subsumes --links")
groupP.add_argument("-l", "--links", action="store_true",
help="preserve links")
groupP.add_argument("-s", "--sections", action="store_true",
help="preserve sections")
groupP.add_argument("--lists", action="store_true",
help="preserve lists")
groupP.add_argument("-ns", "--namespaces", default="", metavar="ns1,ns2",
help="accepted namespaces in links")
groupP.add_argument("--templates",
help="use or create file containing templates")
groupP.add_argument("--no-templates", action="store_false",
help="Do not expand templates")
groupP.add_argument("-r", "--revision", action="store_true", default=options.print_revision,
help="Include the document revision id (default=%(default)s)")
groupP.add_argument("-r", "--ignore-header", action="store_true", default=options.ignore_header,
help="Skip writing down the headers (default=%(default)s)")
groupP.add_argument("--min_text_length", type=int, default=options.min_text_length,
help="Minimum expanded text length required to write document (default=%(default)s)")
groupP.add_argument("--filter_disambig_pages", action="store_true", default=options.filter_disambig_pages,
help="Remove pages from output that contain disabmiguation markup (default=%(default)s)")
groupP.add_argument("-it", "--ignored_tags", default="", metavar="abbr,b,big",
help="comma separated list of tags that will be dropped, keeping their content")
groupP.add_argument("-de", "--discard_elements", default="", metavar="gallery,timeline,noinclude",
help="comma separated list of elements that will be removed from the article text")
groupP.add_argument("--keep_tables", action="store_true", default=options.keep_tables,
help="Preserve tables in the output article text (default=%(default)s)")
default_process_count = max(1, cpu_count() - 1)
parser.add_argument("--processes", type=int, default=default_process_count,
help="Number of processes to use (default %(default)s)")
groupS = parser.add_argument_group('Special')
groupS.add_argument("-q", "--quiet", action="store_true",
help="suppress reporting progress info")
groupS.add_argument("--debug", action="store_true",
help="print debug info")
groupS.add_argument("-a", "--article", action="store_true",
help="analyze a file containing a single article (debug option)")
groupS.add_argument("-v", "--version", action="version",
version='%(prog)s ' + version,
help="print program version")
args = parser.parse_args()
options.keepLinks = args.links
options.keepSections = args.sections
options.keepLists = args.lists
options.toHTML = args.html
options.write_json = args.json
options.print_revision = args.revision
options.ignore_header
options.min_text_length = args.min_text_length
if args.html:
options.keepLinks = True
options.expand_templates = args.no_templates
options.filter_disambig_pages = args.filter_disambig_pages
options.keep_tables = args.keep_tables
try:
power = 'kmg'.find(args.bytes[-1].lower()) + 1
file_size = int(args.bytes[:-1]) * 1024 ** power
if file_size < minFileSize:
raise ValueError()
except ValueError:
logging.error('Insufficient or invalid size: %s', args.bytes)
return
if args.namespaces:
options.acceptedNamespaces = set(args.namespaces.split(','))
# ignoredTags and discardElemets have default values already supplied, if passed in the defaults are overwritten
if args.ignored_tags:
ignoredTags = set(args.ignored_tags.split(','))
else:
ignoredTags = [
'abbr', 'b', 'big', 'blockquote', 'center', 'cite', 'em',
'font', 'h1', 'h2', 'h3', 'h4', 'hiero', 'i', 'kbd',
'p', 'plaintext', 's', 'span', 'strike', 'strong',
'tt', 'u', 'var'
]
# 'a' tag is handled separately
for tag in ignoredTags:
ignoreTag(tag)
if args.discard_elements:
options.discardElements = set(args.discard_elements.split(','))
FORMAT = '%(levelname)s: %(message)s'
logging.basicConfig(format=FORMAT)
options.quiet = args.quiet
options.debug = args.debug
createLogger(options.quiet, options.debug)
input_file = args.input
if not options.keepLinks:
ignoreTag('a')
# sharing cache of parser templates is too slow:
# manager = Manager()
# templateCache = manager.dict()
if args.article:
if args.templates:
if os.path.exists(args.templates):
with open(args.templates) as file:
load_templates(file)
file = fileinput.FileInput(input_file, openhook=fileinput.hook_compressed)
for page_data in pages_from(file):
id, revid, title, ns, page = page_data
Extractor(id, revid, title, page).extract(sys.stdout)
file.close()
return
output_path = args.output
if output_path != '-' and not os.path.isdir(output_path):
try:
os.makedirs(output_path)
except:
logging.error('Could not create: %s', output_path)
return
process_dump(input_file, args.templates, output_path, file_size,
args.compress, args.processes)
def createLogger(quiet, debug):
logger = logging.getLogger()
if not quiet:
logger.setLevel(logging.INFO)
if debug:
logger.setLevel(logging.DEBUG)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
99c97cdca3213bb11daae25a275b3ee3295970fb
|
91ff566b975b7c0a21d280aa192953622600afae
|
/deploy_flask/lib/python3.6/sre_compile.py
|
bf7daffa83bae38ede58e7a155ec050c16368dab
|
[] |
no_license
|
mattg317/Deploying_with_Flask
|
93e1f19f093e02b886f02e01c30075fbbbafb6e1
|
881e694f74bd2f170ed8f638ffb97811d19e3969
|
refs/heads/master
| 2021-09-04T12:50:36.147008 | 2018-01-18T21:41:18 | 2018-01-18T21:41:18 | 107,602,674 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 63 |
py
|
/Users/matthewgiordanella/anaconda/lib/python3.6/sre_compile.py
|
[
"[email protected]"
] | |
824ca23d6252d8aaf3b2c8307a5abfaebb3ea24f
|
d41d18d3ea6edd2ec478b500386375a8693f1392
|
/plotly/validators/streamtube/hoverlabel/font/_family.py
|
335e3ef0531d4b5f69975fdfd5c2903dd7c3d021
|
[
"MIT"
] |
permissive
|
miladrux/plotly.py
|
38921dd6618650d03be9891d6078e771ffccc99a
|
dbb79e43e2cc6c5762251537d24bad1dab930fff
|
refs/heads/master
| 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 |
MIT
| 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null |
UTF-8
|
Python
| false | false | 531 |
py
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='family',
parent_name='streamtube.hoverlabel.font',
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=True,
edit_type='none',
no_blank=True,
role='style',
strict=True,
**kwargs
)
|
[
"[email protected]"
] | |
fced2509d1f5b132439534f2f9d67b73070b25b2
|
399fb29d8525b6d7ac298783675d0d56e37bcac7
|
/python/ray/autoscaler/aws/tests/download_ssh_key.py
|
b3eb8b0a3c313b5155af259b1f0175c4494d6a84
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
AmeerHajAli/ray
|
40c9aebe0da59e9bcd70303d981bfe6b65007991
|
1ffd032f5f793d8817217a040f0f636f9372cd56
|
refs/heads/master
| 2023-03-28T10:50:09.186561 | 2023-03-24T23:08:08 | 2023-03-24T23:08:08 | 175,129,851 | 1 | 0 |
Apache-2.0
| 2019-03-12T03:39:16 | 2019-03-12T03:39:14 | null |
UTF-8
|
Python
| false | false | 512 |
py
|
import os
import boto3
# Create a Boto3 client to interact with S3
s3_client = boto3.client("s3", region_name="us-west-2")
# Set the name of the S3 bucket and the key to download
bucket_name = "oss-release-test-ssh-keys"
key_name = "ray-autoscaler_59_us-west-2.pem"
# Download the key from the S3 bucket to a local file
local_key_path = os.path.expanduser(f"~/.ssh/{key_name}")
s3_client.download_file(bucket_name, key_name, local_key_path)
# Set permissions on the key file
os.chmod(local_key_path, 0o400)
|
[
"[email protected]"
] | |
ad0feb58c540da01689ddae3f367adda20b6db35
|
99589f73e394567a3656ccf287003ae89ad5c83e
|
/MDustbinButton.py
|
e0a146c06f2553d193119d1a96c0aa258f164385
|
[] |
no_license
|
YanruMu-sunding/pyqtwidget
|
f0c9f33a7d040ace979f9b050ebde8a96fceae2e
|
85aaac30e7e63a9494e8bc022c49bf5a6a01e251
|
refs/heads/master
| 2022-03-01T11:06:00.386250 | 2015-01-23T09:42:21 | 2015-01-23T09:42:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,292 |
py
|
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2013.09
# Email : [email protected]
###################################################################
try:
from PySide.QtCore import *
from PySide.QtGui import *
except ImportError:
import sip
sip.setapi("QString", 2)
sip.setapi("QVariant", 2)
from PyQt4.QtCore import *
from PyQt4.QtGui import *
'''
Class Name: MDustbinButton
Type : QToolButton
Public Method:
void setSize(int)
void setData(int)
int data()
Public Signal:
void sigClicked(int)
'''
class MDustbinButton(QToolButton):
def __init__(self, data = 0, parent = None):
super(MDustbinButton, self).__init__(parent)
self.setData(data)
self.setToolTip('Remove')
self.setAutoRaise(True)
self.setIcon(QIcon('./images/dustbin.png'))
self.connect(self, SIGNAL('clicked()'), self.slotEmitDelete)
def slotEmitDelete(self):
self.emit(SIGNAL('sigClicked(int)'), self.stateData)
def setData(self, data):
self.stateData = data
def data(self):
return self.stateData
def setSize(self, w):
self.setFixedSize(QSize(w, w))
self.setIconSize(QSize(w-1, w-1))
|
[
"[email protected]"
] | |
b32bbeb3daea896285047e64886179a4f2fef98c
|
702b8b109bf4b2235de7442a46fbf22288a5ff24
|
/forums/Algorithms.py
|
49fd08441901f3e4daa6dd53394388e1f289ecc2
|
[] |
no_license
|
jakhar1996/neuron-task
|
3eac554088c6b5ae81c9c9b532e8537086875347
|
5dd62fcb03acd81a31c73ad2354dccbfecfe7b4c
|
refs/heads/master
| 2021-01-19T18:53:43.901904 | 2016-10-10T20:11:44 | 2016-10-10T20:11:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,187 |
py
|
'''
Algorithms for calculating score of Posts, Comments and Voting
reference :
1. https://github.com/reddit/reddit/blob/master/r2/r2/lib/db/_sorts.pyx
2. https://medium.com/hacking-and-gonzo/how-reddit-ranking-algorithms-work-ef111e33d0d9
'''
from math import *
import datetime
epoch = datetime.datetime(1970,1,1)
def sign(a):
if a == 0 : return 0
return a/abs(a)
def voteCount(p,n):
#Used in initial implementation for vote counting
p = int(p)
n = int(n)
return abs(p)*(n-p) + (1-abs(p))*n
def epoch_seconds(date):
td = date-epoch
return td.days * 86400 + td.seconds + (float(td.microseconds)/1000000)
def hot(score,date):
#Algorithm for sorting featured posts
order = log(max(abs(score),1),10)
sign = 1 if score > 0 else -1 if score < 0 else 0
seconds = epoch_seconds(date) - 1134028003
a = round(sign * order + seconds/45000,7)
return a
def zeero(a):
a.score = 0
a.vote_count = 0
return a
def confidence(ups,downs):
'''
Algorithm for sorting comments
'''
n = ups + downs
if n == 0:
return 0
z = 1.281551565545
p = float(ups) / n
left = p + 1/(2*n)*z*z
right = z*sqrt(p*(1-p)/n + z*z/(4*n*n))
under = 1 + 1/n*z*z
return (left - right) / under
|
[
"[email protected]"
] | |
8545d13ebaeacd4925ad752290a60f46c5c1659f
|
c0c536f619292d8cac6bc0d340fa855ce6d21310
|
/torch/_inductor/fx_passes/post_grad.py
|
2acd2c3e0ba7366f96866a7a5ae485fbbace8a04
|
[
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
manuelmusngi/pytorch
|
32915e01515f1ba81cdd20340dbae5dbfe6809e6
|
bab21d20ebf45a5dc620b48791bb526f664445a5
|
refs/heads/main
| 2023-07-01T23:21:47.013111 | 2023-06-19T19:34:42 | 2023-06-20T09:24:21 | 339,895,372 | 2 | 0 |
NOASSERTION
| 2021-02-18T00:46:11 | 2021-02-18T00:46:11 | null |
UTF-8
|
Python
| false | false | 11,764 |
py
|
import functools
import itertools
import logging
import operator
import torch
import torch._inductor as inductor
from .. import config, ir, pattern_matcher
from ..lowering import lowerings as L
from ..pattern_matcher import (
_return_true,
Arg,
CallFunction,
filter_nodes,
get_arg_value,
Ignored,
init_once_fakemode,
KeywordArg,
ListOf,
Match,
MULTIPLE,
PatternMatcherPass,
register_graph_pattern,
stable_topological_sort,
)
from ..virtualized import V
log = logging.getLogger(__name__)
aten = torch.ops.aten
prims = torch.ops.prims
# First pass_patterns[0] are applied, then [1], then [2]
pass_patterns = [
PatternMatcherPass(),
PatternMatcherPass(),
PatternMatcherPass(),
]
def post_grad_passes(gm: torch.fx.GraphModule, locality_reorder: bool):
"""
Passes that run on after grad. This is called once on the forwards
graph and once on the backwards graph.
The IR here has been normalized and functionalized.
"""
if config.dce:
# has some issues with mutation in inference mode
gm.graph.eliminate_dead_code()
if locality_reorder:
reorder_for_locality(gm.graph)
if config.pattern_matcher:
lazy_init()
for patterns in pass_patterns:
patterns.apply(gm.graph)
stable_topological_sort(gm.graph)
gm.recompile()
gm.graph.lint()
@init_once_fakemode
def lazy_init():
if torch._C._has_mkldnn:
from .mkldnn_fusion import _mkldnn_fusion_init
_mkldnn_fusion_init()
from .quantization import register_quantization_lowerings
register_quantization_lowerings()
def reorder_for_locality(graph: torch.fx.Graph):
def visit(other_node):
if (
other_node.op == "call_function"
and other_node.target != operator.getitem
and all((n in seen_nodes) for n in other_node.users)
):
# move node's producers right before it
node.prepend(other_node)
seen_nodes = set()
# only reorder nodes before the first copy_ in the graph.
# copy_ will appear at the end of functionalized graphs when there is mutation on inputs,
# and this reordering doesnt work well with mutation
first_copy = next(
(
node
for node in graph.nodes
if node.op == "call_function"
and node.target == torch.ops.aten.copy_.default
),
None,
)
past_mutating_epilogue = True if first_copy is None else False
for node in reversed(graph.nodes):
seen_nodes.add(node)
if not past_mutating_epilogue:
past_mutating_epilogue = node is first_copy
continue
torch.fx.map_arg((node.args, node.kwargs), visit)
def register_lowering_pattern(pattern, extra_check=_return_true, pass_number=1):
"""
Register an aten to inductor IR replacement pattern
"""
return pattern_matcher.register_lowering_pattern(
pattern, extra_check, pass_dict=pass_patterns[pass_number]
)
################################################################################
# Actual patterns below this point.
# Priority of patterns is:
# - later output nodes first
# - order patterns are defined in
################################################################################
@register_lowering_pattern(
CallFunction(
aten.add,
CallFunction(aten.mm, Arg(), Arg()),
CallFunction(aten.mm, Arg(), Arg()),
)
)
def mm_plus_mm(match: Match, mat1, mat2, mat3, mat4):
return inductor.kernel.mm_plus_mm.tuned_mm_plus_mm(mat1, mat2, mat3, mat4)
@register_graph_pattern(
CallFunction(
aten.cumsum.default,
CallFunction(
torch.ops.aten.full.default,
[Arg(), Arg()],
1,
dtype=KeywordArg("dtype"),
layout=Ignored(),
device=KeywordArg("device"),
pin_memory=False,
_users=MULTIPLE,
),
1,
_users=MULTIPLE,
),
pass_dict=pass_patterns[1],
)
def pointless_cumsum_replacement(match: Match, size0, size1, device, dtype):
"""Based on a pattern in OPTForCausalLM"""
def repl(size0, size1):
return torch.arange(1, size1 + 1, device=device, dtype=dtype).expand(
size0, size1
)
# only replace the output node, not all nodes
match.nodes = [match.output_node()]
with V.fake_mode:
match.replace_by_example(repl, [size0, size1])
def shape_of_mm(a, b):
m, _ = a.get_size()
_, n = b.get_size()
return [m, n]
@register_lowering_pattern(
CallFunction(aten.cat, ListOf(CallFunction(aten.mm, Arg(), Arg())), Arg()),
)
def cat_mm(match, inputs, dim):
return cat_tuned_op(match, inputs, dim, op=L[aten.mm], shape_of=shape_of_mm)
@register_lowering_pattern(
CallFunction(
aten.cat, ListOf(CallFunction(aten.addmm, Arg(), Arg(), Arg())), Arg()
),
)
def cat_addmm(match, inputs, dim):
def shape_of(bias, a, b):
m, _ = a.get_size()
_, n = b.get_size()
return [m, n]
return cat_tuned_op(match, inputs, dim, op=L[aten.addmm], shape_of=shape_of)
def cat_tuned_op(match, inputs, dim, *, op, shape_of):
"""
Memory planning to remove cat. We can't use the stock memory
planner since autotuning matmuls needs to know the output layout.
"""
if len(inputs) == 1:
return op(*inputs[0])
# TODO(jansel): rewrite this as a bmm?
if dim < 0:
dim += len(shape_of(*inputs[0]))
assert dim in (0, 1)
notdim = 1 - dim
new_size = None
offsets_start = []
offsets_end = []
# compute output sizes
for i in range(len(inputs)):
shape = shape_of(*inputs[i])
if new_size is None:
new_size = shape
else:
new_size[notdim] = V.graph.sizevars.guard_equals(
shape[notdim], new_size[notdim]
)
new_size[dim] += shape[dim]
offsets_start.append(new_size[dim] - shape[dim])
offsets_end.append(new_size[dim])
dtype = functools.reduce(
torch.promote_types, [x.get_dtype() for x in itertools.chain(*inputs)]
)
device = inputs[0][0].get_device()
kernel = ir.ConcatKernel(
name=None,
layout=ir.FixedLayout(device, dtype, new_size),
inputs=[],
)
kernel_tensor = ir.TensorBox.create(kernel)
for i in range(len(inputs)):
dst = ir.SliceView.create(kernel_tensor, dim, offsets_start[i], offsets_end[i])
src = op(*inputs[i], layout=dst.get_layout()).data.data
assert isinstance(src, (ir.ExternKernelOut, ir.TemplateBuffer))
src.layout = ir.AliasedLayout(dst)
kernel.inputs.append(src)
kernel.name = V.graph.register_buffer(kernel)
kernel.inputs = ir.ConcatKernel.unwrap_storage(kernel.inputs)
return kernel_tensor
_cat_1 = CallFunction(aten.cat, Arg(), 1, _users=2)
@register_lowering_pattern(
CallFunction(
aten.cat,
[
_cat_1,
CallFunction(
aten.slice,
CallFunction(aten.slice, _cat_1, 0, 0, 9223372036854775807),
1,
0,
KeywordArg("size"),
),
],
1,
)
)
def cat_slice_cat(match, cat_input, size, dim=1):
"""
This is an example of a more complex pattern where cat_1 is used
multiple times inside the pattern. We fold 2 calls to cat into one.
Matches:
cat_1: f32[1024, 4077] = torch.ops.aten.cat.default([add_26, primals_217], 1)
slice_1: f32[1024, 4077] = torch.ops.aten.slice.Tensor(cat_1, 0, 0, 9223372036854775807)
slice_2: f32[1024, 19] = torch.ops.aten.slice.Tensor(slice_1, 1, 0, 19)
cat_2: f32[1024, 4096] = torch.ops.aten.cat.default([cat_1, slice_2], 1)
Rewrite to:
slice_2 = torch.ops.aten.slice.Tensor(add_26, 1, 0, 19)
cat_2 = torch.ops.aten.cat.default([add_26, primals_217, slice2], 1)
"""
first, *rest = cat_input
# Optimization is optional, because we can just not fold the cat
# size should be within first.get_size()[dim] such that the optimization is valid.
# For negative `end`, we currently fallback to not optimizing.
if size >= 0 and V.graph.sizevars.statically_known_leq(size, first.get_size()[dim]):
# fold 2 cats into 1 cat
return L[aten.cat](
[
first,
*rest,
L[aten.slice](first, dim, 0, size),
],
dim,
)
else:
# don't expect to hit this case, just fall back
tmp = L[aten.cat](cat_input, dim)
return L[aten.cat](
[
tmp,
L[aten.slice](tmp, dim, 0, size),
],
dim,
)
@register_lowering_pattern(
CallFunction(
aten.add,
CallFunction(aten.mm, Arg(), Arg()),
KeywordArg("inp"),
),
pass_number=2,
)
@register_lowering_pattern(
CallFunction(
aten.add,
KeywordArg("inp"),
CallFunction(aten.mm, Arg(), Arg()),
),
pass_number=2,
)
def addmm(match, mat1, mat2, inp):
if isinstance(inp, ir.TensorBox):
inp_shape = inp.get_size()
matched = len(inp_shape) <= 2
mm_shape = shape_of_mm(mat1, mat2)
for i, m in zip(inp_shape, mm_shape):
matched &= i == 1 or i == m
else: # inp is a Number
matched = False
if matched:
return L[aten.addmm](inp, mat1, mat2)
else:
return L[aten.add](inp, L[aten.mm](mat1, mat2))
def is_valid_splitwithsizes_cat(match):
split_nodes = filter_nodes(match.nodes, aten.split_with_sizes)
cat_nodes = filter_nodes(match.nodes, aten.cat)
get_item_nodes = filter_nodes(match.nodes, operator.getitem)
if len(split_nodes) != 1 or len(cat_nodes) != 1:
return False
split_node, cat_node = split_nodes[0], cat_nodes[0]
# The dim of split and cat should match for passthrough
if get_arg_value(split_node, 2, "dim") != get_arg_value(cat_node, 1, "dim"):
return False
get_item_args = {
get_arg_value(get_item_node, 1) for get_item_node in get_item_nodes
}
assert None not in get_item_args
split_sizes = get_arg_value(split_node, 1, "split_sizes")
# All parts of split should be included in the cat
if get_item_args != set(range(len(split_sizes))):
return False
# The order of get_item_args should same with cat_node used.
# For example, if the split_node like split_with_sizes(input, [2, 2, 3], 1),
# the cat node should be like cat([get_item(0), get_item(1), get_item(2)], 1).
cat_items_args_order = [
get_arg_value(item_node, 1) for item_node in get_arg_value(cat_node, 0)
]
if cat_items_args_order != list(range(len(split_sizes))):
return False
return True
@register_lowering_pattern(
CallFunction(
aten.cat,
ListOf(
CallFunction(
operator.getitem,
CallFunction(
aten.split_with_sizes,
KeywordArg("input_"),
Ignored(),
Ignored(),
_users=MULTIPLE,
),
Ignored(),
),
),
Ignored(),
),
pass_number=2,
extra_check=is_valid_splitwithsizes_cat,
)
def splitwithsizes_cat_replace(match, input_):
return input_
def view_to_reshape(gm):
"""
Replace view ops in the GraphModule to reshape ops.
"""
for nd in gm.graph.nodes:
if nd.target == torch.ops.aten.view.default:
nd.target = torch.ops.aten.reshape.default
|
[
"[email protected]"
] | |
5eebbda08df3e41b17377b37668d53d24995eef6
|
42a0760a051935b2e765d57c445235221a28f49e
|
/509_Fibonacci_Number.py
|
ddfee4f254fd8b67428fa44385ed8f01eb920e51
|
[] |
no_license
|
Th3Lourde/l33tcode
|
3bea3a3e7c633a2d5a36f7d76d5d776d275d8ee3
|
eb6b11f97a022b66716cb3890cc56c58f62e8aa4
|
refs/heads/master
| 2022-12-22T19:05:04.384645 | 2022-12-18T19:38:46 | 2022-12-18T19:38:46 | 232,450,369 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 622 |
py
|
class Solution:
def fib_1(self, N):
if N == 0:
return 0
if N == 1 or N == 2:
return 1
elif N >= 2:
return self.fib(N-1) + self.fib(N-2)
def fib(self, N, mem):
if mem[N] != None:
result = mem[N]
elif N == 1 or N == 2:
result = 1
elif N >= 2:
result = self.fib(N-1, mem) + self.fib(N-2, mem)
mem[N] = result
return result
if __name__ == '__main__':
s = Solution()
n = 4
mem = [None] * (n+1)
# print(mem)
# print(s.fib(4, mem))
# print(mem)
|
[
"[email protected]"
] | |
be2a3ce025842cdf70bc7ed4d624f599c4bc2b10
|
3a570384a3fa9c4c7979d33b182556e1c637e9eb
|
/anw/Packages/anw/gui/shipdesignvalue.py
|
5941d1747f3f0ddb36cfb27a1a4f3886faa3754a
|
[] |
no_license
|
colshag/ANW
|
56a028af5042db92b5ead641dc542fcb4533344e
|
46948d8d18a0639185dd4ffcffde126914991553
|
refs/heads/master
| 2020-03-27T00:22:49.409109 | 2018-10-27T06:37:04 | 2018-10-27T06:37:04 | 145,618,125 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,087 |
py
|
# ---------------------------------------------------------------------------
# Cosmica - All rights reserved by NeuroJump Trademark 2018
# shipdesignvalue.py
# Written by Chris Lewis
# ---------------------------------------------------------------------------
# This uses scroll mouse up/down or plus and minus buttons to
# Add/Remove Weapons and Components for a selected Ship Design
# ---------------------------------------------------------------------------
import string
from anw.func import globals, funcs
from anw.gui import textonscreen, scrollvalue, weapondirection
class ShipDesignValue(scrollvalue.ScrollValue):
"""Add or remove ship design weapons and components"""
def __init__(self, path, id, myParent):
self.weapondirection = None
self.isWeapon = 0
self.isDrone = 0
self.myDroneDesign = None
self.myParent = myParent
self.mode = myParent.mode
self.game = myParent.game
self.id = self.getID(id)
self.myShipDesign = myParent.myShipDesign
self.myData = self.getMyData()
self.sim = None
self.textName = None
self.textDescription = None
self.textCR = None
self.textAL = None
self.textEC = None
self.textIA = None
scrollvalue.ScrollValue.__init__(self, path, x=0.42, y=-0.72, name='design')
self.allKeys = ['A','S','D','Z','X','C','V']
self.disableButtonIgnore = ['S', 'Z', 'X', 'C', 'V']
self.scrollFactor = 1
self.selectedQuad = ''
self.myTitle.setText('Add or Remove Selected item from Quadrant:')
self.createMyInfo()
if self.isWeapon == 1:
self.createWeaponDirection()
def setMyDroneDesign(self, droneID):
"""Set the DroneDesign"""
self.myDroneDesign = self.mode.game.droneDesignObjects[droneID]
def getID(self, id):
"""id will be prefixed with a W to denote a weapon, D for a drone, instead of ship component"""
if id[0] == 'W':
self.isWeapon = 1
return id[1:]
elif id[0] == 'D':
self.isWeapon = 1
self.isDrone = 1
self.setMyDroneDesign(id[1:])
self.myParent.createDroneInfo(self.myDroneDesign)
return self.myDroneDesign.getMyLauncherID()
else:
self.isWeapon = 0
return id
def createWeaponDirection(self):
"""Display Weapon Direction Buttons"""
if self.weapondirection == None:
self.weapondirection = weapondirection.WeaponDirection(self.path, x=0.42, y=-0.9)
self.myWidgets.append(self.weapondirection)
def getMyData(self):
"""MyData is either a componentData object or weaponData object"""
if self.isWeapon == 1:
return self.myParent.mode.game.weapondata[self.id]
else:
return self.myParent.mode.game.componentdata[self.id]
def createButtons(self):
"""Create all Buttons"""
for key in ['Z','X','C','V']:
buttonPosition = ((self.posInitX+self.x*.10),0,(self.posInitY+self.y*.10))
self.createButton(key, buttonPosition)
self.x += 1
self.x = 0
self.y = 1
for key in ['A','S','D']:
buttonPosition = ((self.posInitX+self.x*.10),0,(self.posInitY+self.y*.10))
self.createButton(key, buttonPosition)
self.x += 1
def pressZ(self):
"""Press Fore Quad"""
self.enableLastButton('Z')
self.disableButton('Z')
self.selectedQuad = 'fore'
self.setMin()
self.setMax()
def pressX(self):
"""Press Aft Quad"""
self.enableLastButton('X')
self.disableButton('X')
self.selectedQuad = 'aft'
self.setMin()
self.setMax()
def pressC(self):
"""Press Port Quad"""
self.enableLastButton('C')
self.disableButton('C')
self.selectedQuad = 'port'
self.setMin()
self.setMax()
def pressV(self):
"""Press Star Quad"""
self.enableLastButton('V')
self.disableButton('V')
self.selectedQuad = 'star'
self.setMin()
self.setMax()
def createMyInfo(self):
"""Create Info based on id given"""
self.setCurrentValue(0)
self.writeName()
self.createSim()
self.writeDescription()
self.writeCost()
def setMin(self):
"""Min is based on number of that component or weapon type in quad"""
try:
myQuad = self.myShipDesign.quads[self.selectedQuad]
count = 0
if self.isWeapon == 1:
for weaponID, myWeapon in myQuad.weapons.iteritems():
if myWeapon.type == self.myData.id:
count += 1
else:
for componentID, myComponent in myQuad.components.iteritems():
if myComponent.type == self.myData.id:
count += 1
self.setMinValue(-count)
except:
self.setMinValue(0)
def setMax(self):
"""Max is based on remaining component slots and weapon size in comps"""
try:
num = self.myShipDesign.myShipHull.componentNum
num = num-self.myShipDesign.quads[self.selectedQuad].currentComps
if self.isWeapon == 1:
num = num/self.myData.numComps
self.setMaxValue(num)
except:
self.setMaxValue(0)
def writeName(self):
"""Create Name"""
if self.textName == None:
self.textName = textonscreen.TextOnScreen(self.path, self.myData.name,
scale=0.04, font=5, parent=aspect2d)
self.textName.writeTextToScreen(self.posInitX+0.36, 0, self.posInitY+0.31, 12)
self.textName.setCardColor(globals.colors['guiblue3'], 0.2, 0.2, 7, 0.2)
self.myWidgets.append(self.textName)
else:
self.textName.myText.setText(self.myData.name)
def createSim(self):
"""Create myData Sim Picture"""
if len(self.myData.abr) == 4:
name = 'ammo'
else:
name = string.lower(self.myData.abr[1:])
if self.sim == None:
self.sim = loader.loadModelCopy('%s/plane' % self.path)
self.sim.setScale(0.08)
self.sim.reparentTo(aspect2d)
self.sim.setTransparency(1)
tex = loader.loadTexture('%s/%s.png' % (self.path, name))
self.sim.setTexture(tex, 0)
self.sim.setPos(self.posInitX+0.41, 0, self.posInitY+0.17)
self.myWidgets.append(self.sim)
else:
tex = loader.loadTexture('%s/%s.png' % (self.path, name))
self.sim.setTexture(tex, 0)
def writeDescription(self):
"""Create Description"""
if self.textDescription == None:
self.textDescription = textonscreen.TextOnScreen(self.path, self.myData.description,
scale=0.03, font=5, parent=aspect2d)
self.textDescription.writeTextToScreen(self.posInitX+0.36, 0, self.posInitY+0.09, 20)
self.textDescription.setCardColor(globals.colors['guiblue3'], 0.2, 0.2, 0.2, 0.2)
self.myWidgets.append(self.textDescription)
else:
self.textDescription.myText.setText(self.myData.description)
def writeCost(self):
"""Create Cost"""
self.writeCRCost()
self.writeALCost()
self.writeECCost()
self.writeIACost()
def writeCRCost(self):
value = '%d' % (self.myData.costCR)
if self.textCR == None:
self.textCR = textonscreen.TextOnScreen(self.path, value,
scale=0.03, font=5, parent=aspect2d)
self.textCR.writeTextToScreen(self.posInitX+0.47, 0, self.posInitY+0.2, 10)
self.textCR.setColor(globals.colors['guigreen'])
self.myWidgets.append(self.textCR)
else:
self.textCR.myText.setText(value)
def writeALCost(self):
value = '%d' % (self.myData.costAL)
if self.textAL == None:
self.textAL = textonscreen.TextOnScreen(self.path, value,
scale=0.03, font=5, parent=aspect2d)
self.textAL.writeTextToScreen(self.posInitX+0.47, 0, self.posInitY+0.2-0.02, 10)
self.textAL.setColor(globals.colors['guiblue1'])
self.myWidgets.append(self.textAL)
else:
self.textAL.myText.setText(value)
def writeECCost(self):
value = '%d' % (self.myData.costEC)
if self.textEC == None:
self.textEC = textonscreen.TextOnScreen(self.path, value,
scale=0.03, font=5, parent=aspect2d)
self.textEC.writeTextToScreen(self.posInitX+0.47, 0, self.posInitY+0.2-0.04, 10)
self.textEC.setColor(globals.colors['guiyellow'])
self.myWidgets.append(self.textEC)
else:
self.textEC.myText.setText(value)
def writeIACost(self):
value = '%d' % (self.myData.costIA)
if self.textIA == None:
self.textIA = textonscreen.TextOnScreen(self.path, value,
scale=0.03, font=5, parent=aspect2d)
self.textIA.writeTextToScreen(self.posInitX+0.47, 0, self.posInitY+0.2-0.06, 10)
self.textIA.setColor(globals.colors['guired'])
self.myWidgets.append(self.textIA)
else:
self.textIA.myText.setText(value)
def pressS(self):
"""Submit value"""
myQuad = self.myShipDesign.quads[self.selectedQuad]
if self.myDroneDesign == None:
droneID = ''
else:
droneID = self.myDroneDesign.id
if self.isWeapon == 1:
if self.currentValue < 0:
self.myParent.removeWeapons(myQuad, self.myData.id, -self.currentValue)
else:
self.myParent.addWeapons(myQuad, self.myData.id, self.currentValue, self.weapondirection.direction, droneID)
else:
if self.currentValue < 0:
self.myParent.removeComponents(myQuad, self.myData.id, -self.currentValue)
else:
self.myParent.addComponents(myQuad, self.myData.id, self.currentValue)
self.disableButton('S')
def ignoreShortcuts(self):
"""Ignore all keyboard shortcuts created"""
self.ignoreAll()
if self.weapondirection != None:
self.weapondirection.ignoreShortcuts()
def setShortcuts(self):
"""Set all keyboard shortcuts"""
for key in self.allKeys:
self.setAcceptOnButton(key)
if self.weapondirection != None:
self.weapondirection.setShortcuts()
|
[
"[email protected]"
] | |
c808cacd4d12136e61ae70443e057d83bfff00a2
|
ae85cd400fa71296867c9e55297affa2d3679b5d
|
/hashmaps/count_pairs.py
|
975bf328e09c9882d3f5eb6ca6976298bde1852a
|
[] |
no_license
|
Psycadelik/sifu
|
a1e751aa4e97cd56431cdf8704304b82943db37c
|
72965f694f7a44aa8711d11934b216d5ccf9d280
|
refs/heads/master
| 2023-04-12T17:07:00.677702 | 2021-05-07T07:30:14 | 2021-05-07T07:30:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,077 |
py
|
"""
Count of index pairs with equal elements in an array
Given an array of n elements.
The task is to count the total number of indices (i, j) such that arr[i] = arr[j] and i != j
Examples :
Input : arr[] = {1, 1, 2}: -> 1
As arr[0] = arr[1], the pair of indices is (0, 1)
Input : arr[] = {1, 1, 1} -> 3
As arr[0] = arr[1], the pair of indices is (0, 1),
(0, 2) and (1, 2)
Input : arr[] = {1, 2, 3} -> 0
"""
def countPairs(arr):
n = len(arr)
mapping = {}
ans = 0
# Finding frequency of each number.
for num in arr:
mapping[num] = mapping.get(num, 0) + 1
# Calculating pairs of each value.
for k, v in mapping.items():
ans += (v * (v - 1)) // 2
return ans
def countPairs3(arr):
def no_of_repeats(n):
if n < 2:
return 0
return n-1 + no_of_repeats(n-1)
freqs = [arr.count(i) for i in list(set(arr))]
res = sum([no_of_repeats(i) for i in freqs])
return res
arr = [1, 1, 2]
arr1 = [1, 1, 1, 3, 3, 4, 1]
# print(countPairs(arr))
print(countPairs(arr1))
print(countPairs3(arr1))
|
[
"[email protected]"
] | |
f58deb9eda7f55877a7c900911bed9917c40e100
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03945/s876347603.py
|
59b6606399e20fd6e84e359c750c5ad89b645501
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 258 |
py
|
def myAnswer(S:str) -> int:
ans = 0
pre = S[0]
for s in S[1:]:
if (pre != s):
pre = s
ans += 1
return ans
def modelAnswer():
return
def main():
S = input()
print(myAnswer(S))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
c2c901b79b059b6cb996273d372de8316f4ccc9d
|
6fe990986efe7f06f8a6eafd01fd4eb042c7f8b3
|
/portfolio/urls.py
|
0c2f3ffe78cf6dcf42512e15b6327e5e52b8623a
|
[] |
no_license
|
StillsSma/portfolio
|
e436c82b2fc5639d38b88375b85e73c40bfd9985
|
856047f9ea38d812dda630815e7a7bf6cf63c798
|
refs/heads/master
| 2021-01-13T04:12:05.110398 | 2017-04-12T22:26:05 | 2017-04-12T22:26:05 | 77,696,082 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 355 |
py
|
from django.conf.urls import url
from django.contrib import admin
from app.views import IndexView
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', IndexView.as_view(), name="index_view"),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
[
"[email protected]"
] | |
bb991325cedd1e15fba6feb2e464a75c9a1125ab
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/pa3/sample/list_get_element_oob_3-29.py
|
3a004c5cabf6adc94926f46ff1b288c44de5a398
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 38 |
py
|
x:[int] = None
x = []
print($Var[0])
|
[
"[email protected]"
] | |
8b3c268cb508e5b629740a4b9b56f8e830592352
|
48b79c2d813cc89c227e36b83a5508acdf9657bd
|
/udemy/ecommerce/cfehome/forms.py
|
c3565e95ab0f4222b594c4f78ad177017e9035d3
|
[] |
no_license
|
felipe-basina/python
|
b1c8b980ac201241f06e79c0f5f05ee2528b9960
|
bb739f9b57b1947010f831826fd7c65b2a3b85cf
|
refs/heads/master
| 2022-09-23T20:59:34.080521 | 2020-12-19T16:37:21 | 2020-12-19T16:37:21 | 101,874,996 | 1 | 0 | null | 2022-09-16T17:42:00 | 2017-08-30T11:49:08 |
Python
|
UTF-8
|
Python
| false | false | 1,212 |
py
|
from django import forms
from django.contrib.auth import get_user_model
User = get_user_model()
class ContactForm(forms.Form):
fullname = forms.CharField(
widget=forms.TextInput(
attrs={
"class": "form-control",
"id": "form_full_name",
"placeholder": "Your full name"}
)
)
email = forms.EmailField(
widget=forms.EmailInput(
attrs={
"class": "form-control",
"id": "form_email",
"placeholder": "Your email"}
)
)
content = forms.CharField(
widget=forms.Textarea(
attrs={
"class": "form-control",
"id": "form_content",
"placeholder": "Your content"}
)
)
def clean_email(self):
email = self.cleaned_data.get("email")
if not "gmail.com" in email:
raise forms.ValidationError("Email has to be gmail.com")
return email
|
[
"[email protected]"
] | |
7493e37819689409b4590fc5a592b68f51559ce8
|
f3bd271bf00325881fb5b2533b9ef7f7448a75ec
|
/xcp2k/classes/_each346.py
|
52567e9a128698e79a0ca91ffd3ecde84ab173fd
|
[] |
no_license
|
obaica/xcp2k
|
7f99fc9d494859e16b9b0ea8e217b0493f4b2f59
|
6e15c2c95658f545102595dc1783f5e03a9e6916
|
refs/heads/master
| 2020-07-15T17:27:43.378835 | 2019-02-11T16:32:24 | 2019-02-11T16:32:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,113 |
py
|
from xcp2k.inputsection import InputSection
class _each346(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Just_energy = None
self.Powell_opt = None
self.Qs_scf = None
self.Xas_scf = None
self.Md = None
self.Pint = None
self.Metadynamics = None
self.Geo_opt = None
self.Rot_opt = None
self.Cell_opt = None
self.Band = None
self.Ep_lin_solver = None
self.Spline_find_coeffs = None
self.Replica_eval = None
self.Bsse = None
self.Shell_opt = None
self.Tddft_scf = None
self._name = "EACH"
self._keywords = {'Md': 'MD', 'Bsse': 'BSSE', 'Powell_opt': 'POWELL_OPT', 'Replica_eval': 'REPLICA_EVAL', 'Just_energy': 'JUST_ENERGY', 'Metadynamics': 'METADYNAMICS', 'Geo_opt': 'GEO_OPT', 'Band': 'BAND', 'Xas_scf': 'XAS_SCF', 'Rot_opt': 'ROT_OPT', 'Spline_find_coeffs': 'SPLINE_FIND_COEFFS', 'Shell_opt': 'SHELL_OPT', 'Cell_opt': 'CELL_OPT', 'Qs_scf': 'QS_SCF', 'Ep_lin_solver': 'EP_LIN_SOLVER', 'Tddft_scf': 'TDDFT_SCF', 'Pint': 'PINT'}
|
[
"[email protected]"
] | |
90888ddfffffef2ccb8da651c3573ecdba4d388a
|
93c6cdca36f79e7ccb4e100c048fa4d44ed3d937
|
/day06/day06-04.py
|
f66c94a45471d3bc70c823de6493a93a1b97f9fb
|
[] |
no_license
|
parkwisdom/Python-Study-step3
|
eab66f0afd10ebdaadb167dddec245ab6115d859
|
4c51b6d9959f93e52e8896d9c404c10c64bc8ea8
|
refs/heads/master
| 2020-04-03T13:51:33.369220 | 2018-10-30T00:55:02 | 2018-10-30T00:55:02 | 155,301,544 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,988 |
py
|
from tkinter import *
from tkinter.simpledialog import *
from tkinter.filedialog import *
def drawSheet(cList) :
global cellList
if cellList != None :
for row in cellList:
for col in row:
col.destroy()
rowNum = len(cList)
colNum = len(cList[0])
cellList = []
# 빈 시트 만들기
for i in range(0, rowNum):
tmpList = []
for k in range(0, colNum):
ent = Entry(window, text='')
tmpList.append(ent)
ent.grid(row=i, column=k)
cellList.append(tmpList)
# 시트에 리스트값 채우기. (= 각 엔트리에 값 넣기)
for i in range(0, rowNum):
for k in range(0, colNum):
cellList[i][k].insert(0, cList[i][k])
def openCSV() :
global csvList
csvList = []
input_file = askopenfilename(parent=window,
filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
filereader = open(input_file, 'r', newline='')
header = filereader.readline()
header = header.strip() # 앞뒤 공백제거
header_list = header.split(',')
csvList.append(header_list)
for row in filereader: # 모든행은 row에 넣고 돌리기.
row = row.strip()
row_list = row.split(',')
csvList.append(row_list)
drawSheet(csvList)
filereader.close()
def saveCSV() :
global csvList
if csvList == [] :
return
saveFp = asksaveasfile(parent=window, mode='w', defaultextension='.csv',
filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
filewriter = open(saveFp.name, 'w', newline='')
for row_list in csvList :
row_str = ','.join(map(str, row_list))
filewriter.writelines(row_str + '\n')
filewriter.close()
def csvData01() :
csvList=[]
global csvList
input_file = "d:\\pydata\\csv\\supplier_data.csv"
filereader = open(input_file, 'r', newline='')
header = filereader.readline()
header = header.strip() # 앞뒤 공백제거
header_list = header.split(',')
# part Number, Purchase Date
idx1 = 0
for h in header_list:
if h.strip().upper() == 'part Number'.strip().upper():
break
idx1 += 1
idx2 = 0
for h in header_list:
if h.strip().upper() == 'Purchase Date'.strip().upper():
break
idx2 += 1
if idx1 > idx2:
idx1, idx2 = idx2, idx1
del (header_list[idx2])
del (header_list[idx1])
csvList.append(header_list)
for row in filereader: # 모든행은 row에 넣고 돌리기.
row = row.strip()
row_list = row.split(',')
del (row_list[idx2])
del (row_list[idx1])
if row_list[0] == 'Supplier Y':
continue
cost = float(row_list[2][1:])
cost *= 1.5
cost = int(cost / 100) * 100
cost_str = "${0:.2f}".format(cost)
row_list[2] = cost_str
csvList.append(row_list)
drawSheet(csvList)
filereader.close()
def csvData02():
global csvList
csvList=[]
import csv
input_file='d:\pydata\csv\supplier_data.csv'
filereader= open(input_file,'r',newline='')
csvReader = csv.reader(filereader)
header_list = next(csvReader)
csvList.append(header_list)
for row_list in csvReader:
csvList.append(row_list)
drawSheet(csvList)
filereader.close()
pass
## 전역 변수 ##
csvList, cellList = [], []
## 메인 코드 ##
window = Tk()
mainMenu = Menu(window)
window.config(menu=mainMenu)
fileMenu = Menu(mainMenu)
mainMenu.add_cascade(label='파일', menu=fileMenu)
fileMenu.add_command(label='CSV 열기', command=openCSV)
fileMenu.add_command(label='CSV 저장', command=saveCSV)
csvMenu = Menu(mainMenu)
mainMenu.add_cascade(label='CSV 데이터 분석', menu=csvMenu)
csvMenu.add_command(label='특정 열,행 제거', command=csvData01)
csvMenu.add_command(label='특정 열,행 제거', command=csvData02)
window.mainloop()
|
[
"[email protected]"
] | |
d6468a91d9c1cd79c7e894a0df202c4743dc1841
|
13edd8f1bc3b86fd881f85fbeafe94811392d7fc
|
/seventh_module/CRM/38.CRM开发之公户基本管理/luffy_crm/web/views/public_customer.py
|
9a98ecccb0e156c8735277ebf26e985092295e90
|
[] |
no_license
|
ryan-yang-2049/oldboy_python_study
|
f4c90c9d8aac499e1d810a797ab368217f664bb1
|
6e1ab7f217d9bf9aa7801266dee7ab4d7a602b9f
|
refs/heads/master
| 2022-07-22T23:49:28.520668 | 2019-06-11T13:26:25 | 2019-06-11T13:26:25 | 129,877,980 | 0 | 1 | null | 2022-07-18T17:12:54 | 2018-04-17T09:12:48 |
HTML
|
UTF-8
|
Python
| false | false | 712 |
py
|
# -*- coding: utf-8 -*-
"""
__title__ = 'public_customer.py'
__author__ = 'yangyang'
__mtime__ = '2019-02-21'
"""
from stark.service.v1 import StarkHandler,StarkModelForm,get_choice_text,get_m2m_text
from web import models
class PublicCustomerModelForm(StarkModelForm):
class Meta:
model =models.Customer
exclude = ['consultant',]
class PublicCustomerHandler(StarkHandler):
list_display = ['name','qq',get_choice_text('状态','status'),get_choice_text('性别','gender'),
get_m2m_text('咨询的课程','course')]
def get_queryset(self, request, *args, **kwargs):
return self.model_class.objects.filter(consultant__isnull=True)
model_form_class = PublicCustomerModelForm
|
[
"[email protected]"
] | |
5ac4b682d2dfdc3e080813ab4a70bff3d7d7351e
|
7933d55de7e2d3a9e78a372fa76f064f5ed5eb6f
|
/maths/questions/antiderivative.py
|
56e2c6794eb07b71afc2e8541f40caf39c5e0963
|
[] |
no_license
|
o2edu/MathsExams
|
4921f6683e1d6d96aa834d5b01f30bd66522887d
|
8e2c0aeba6bbad52103c420747ead1dad6380408
|
refs/heads/master
| 2021-05-29T17:42:32.442055 | 2014-05-31T10:18:12 | 2014-05-31T10:18:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,603 |
py
|
import sympy
import random
from .. import all_functions, not_named_yet
from ..latex import solutions
from ..symbols import x0
from . import relationships
@relationships.root
class Antiderivative(relationships.QuestionPart):
"""
Question description
====================
Take a simple expression and find its antiderivative.
Real-life instances
===================
2009 2a: y = 1 / (1 - 2x) [4 lines] [2 marks]
2010 2a: y = cos(2x + 1) [2 lines] [1 mark]
2011 2a: y = 1 / (3x - 4) [5 lines] [1 mark]
2012 2: y = 1 / (2x - 1)^3 [4 lines] [2 marks]
there is no correlation between function type and marks assigned, so we have to choose between
the historic 1 or 2 marks this question has been assigned
"""
def __init__(self):
self.num_marks = random.randint(1, 2)
self._qp = {}
self._qp['function_type'] = random.choice(['linear', 'trig'])
inner_function = all_functions.request_linear(difficulty=3).equation
if self._qp['function_type'] == 'linear':
self.num_lines = 4
index = random.randint(1, 3)
self._qp['equation'] = 1 / inner_function ** index
elif self._qp['function_type'] == 'trig':
self.num_lines = 2
outer_function = random.choice([sympy.cos, sympy.sin])
self._qp['equation'] = outer_function(inner_function)
self._qp['antiderivative'] = self._qp['equation'].integrate()
def question_statement(self):
return 'Find an antiderivative of ${equation}$ with respect to $x$.'.format(
equation=sympy.latex(self._qp['equation'])
)
def solution_statement(self):
# sympy integrates things like 1/x as log(x), not log(|x|) (since the symbol x is treated as a complex number, not a real number)
proper_antiderivative = self._qp['antiderivative'].replace(sympy.log(x0), sympy.log(sympy.Abs(x0)))
constant_of_integration = not_named_yet.randint_no_zero(-3, 3)
lines = solutions.Lines()
# without using .factor() here, we could have (x + 1)**(-3) integrate to -1/(2*x**2 + 4*x + 2) which is expanded
antiderivative = proper_antiderivative.factor() + constant_of_integration
lines += r'${antiderivative}$'.format(antiderivative=sympy.latex(antiderivative))
lines += r'We arbitrarily choose our constant of integration to be ${constant_of_integration}$. It can be any real number, including zero.'.format(
constant_of_integration=constant_of_integration
)
return lines.write()
|
[
"[email protected]"
] | |
c75c91792bd3cf62f44b90570699313af9f3e2aa
|
79e45a6e4846927da432087aba845036b11c5622
|
/UAT/var/ARCHIVE/NZDJPYdailyOHLC.py
|
a06abae1818e84fd8456ff3e4968a135362f0569
|
[] |
no_license
|
mjserpico/Scarlett-Trading
|
cba2bcfaacf886b9d851d978683b4ce641c8f6ad
|
9778717393dbb0818ee026356996d1806345a6c2
|
refs/heads/master
| 2020-03-21T21:39:51.108503 | 2019-05-09T02:06:26 | 2019-05-09T02:06:26 | 139,076,454 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,394 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 08 09:16:43 2017
@author: Michael
"""
import mysql.connector
from ib.opt import Connection, message
from ib.ext.Contract import Contract
import ib
import time
import logging
import datetime
import datalink #universal logins for environment
Flag = 0
logging.basicConfig(filename='pythonlogs\DailyOHLC' + str(datetime.date.today()) + '.txt', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh = logging.FileHandler('DailyOHLC' + str(datetime.date.today()) + '.txt')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.debug('Starting NZDJPYDailyOHLC')
def reply_handler(msg):
#print(msg.value)
print("Reply:", msg)
test = msg.open
test2 = msg.high
test3 = msg.low
test4 = msg.close
logger.debug('In Reply Handler')
if float(test) != -1:
import time
logger.debug('Valid Price Found (OPEN NOT -1)')
#cnx = mysql.connector.connect(user='mjserpico', password='UrzE8B66',host="scar01.cqxmc7cib5oh.us-east-1.rds.amazonaws.com", database='SCAR01')
#cnx = mysql.connector.connect(user='Scarlett01', password='scar01lett',host="serpdb01.cqxmc7cib5oh.us-east-1.rds.amazonaws.com", database='SERPDB01')
cnx = mysql.connector.connect(user=datalink.DB_User, password=datalink.DB_Pass,host=datalink.DB_Host, database=datalink.DB_Path)
logger.debug('Connected to Database')
cur = cnx.cursor()
cur.execute("""Insert Into NZDJPY (Date, Open, High, Low, Close) values(%s,%s,%s,%s,%s)""",(time.strftime("%m/%d/%Y"),float(test),float(test2),float(test3),float(test4)))
cnx.commit()
logger.debug('Ran Insert Script')
today = datetime.date.today( )
print("Today is " + str(today))
dayofweek = datetime.datetime.today().weekday()
print(dayofweek)
if dayofweek == 0: #if Today is Monday
yesterday = today - datetime.timedelta(days=3) #Get Friday
month = (str(0) + str(yesterday.month))
day = (str(0)+ str(yesterday.day))
yesterday2 = (month[-2:] +"/"+ day[-2:] +"/"+str(yesterday.year))
logger.debug('Yesterday2 was %s', str(yesterday2))
else:
yesterday = today - datetime.timedelta(days=1) #Take 1 Day back
month = (str(0) + str(yesterday.month))
day = (str(0)+ str(yesterday.day))
yesterday2 = (month[-2:] +"/"+ day[-2:] +"/"+str(yesterday.year))
logger.debug('Yesterday2 was %s', str(yesterday2))
#MovingAverage Calculation
#Step 1 Get earliest Date to calculate avg from
#reformat date to DB convention first
logger.debug('Today is still %s', today)
backdate = today - datetime.timedelta(days=13)
logger.debug('Date shifted back 10 is %s', backdate)
dayofweek = backdate.weekday()
#Adjust for Saturdays and Sundays: No price data available.
# if dayofweek == 6:
# backdate = today - datetime.timedelta(days = 9)
# if dayofweek == 5:
# backdate = today - datetime.timedelta(days = 8)
#
month = (str(0) + str(backdate.month))
day = (str(0)+ str(backdate.day))
backdate2 = (month[-2:] +"/"+ day[-2:] +"/"+str(backdate.year))
logger.debug('First Date of BB Moving Average is %s', backdate2)
#Select ID from EURUSD where Date in ('12/19/2016', '02/07/2017');
#Select round(Avg(Close),5) from EURUSD where ID BETWEEN 3881 AND 3915;
query = ("SELECT ID from " + CCY1 + CCY2 + " where Date = \"" + yesterday2 + "\"")
logger.debug('Query is %s', query)
cur.execute(query)
for (ID) in cur:
ID1 = ID
logger.debug('BB ID1 is %s', ID1)
query = ("SELECT ID from " + CCY1 + CCY2 + " where Date = \"" + backdate2 + "\"")
logger.debug('Query is %s', query)
cur.execute(query)
for (ID) in cur:
ID2 = ID
logger.debug('BB ID1 is %s', ID1)
logger.debug('BB ID2 is %s', ID2)
query = ("SELECT round(Avg(Close),5) as Avg from " + CCY1 + CCY2 + " where ID BETWEEN " + str(ID2[0]) + " AND " + str(ID1[0]) + ";")
logger.debug('Query is %s', query)
cur.execute(query)
for (Avg) in cur:
BBMovAvg = Avg #Final Moving Average Value
logger.debug('BBMovAvg is %s', BBMovAvg)
##Puts Moving Average Value in hasPosition Table for Reference with intraday strategies
query = ("UPDATE hasPosition SET BB_STRATMovingAvgValue = " + str(BBMovAvg[0]) + " where CCY =\'" + CCY1 + CCY2 +"\';")
logger.debug('Query is %s', query)
cur.execute(query)
cnx.commit()
global Flag
Flag = 1
logger.debug('Flag set to 1')
while Flag == 0:
conn = Connection.create(port=4002, clientId=999)
conn.connect()
logger.debug('Connecting to Server')
time.sleep(1)
conn.register(reply_handler,'HistoricalData') #By registering "HistoricalData" --the Method name only --we can eliminate all the open order garbage
logger.debug('Registered HistoricalData Reply Handler')
#conn.registerall(reply_handler)
time.sleep(1)
qqq = Contract()
qqq.m_symbol = 'NZD'
qqq.m_secType = 'CASH'
qqq.m_exchange = 'IDEALPRO'
qqq.m_currency = 'JPY'
logger.debug('Requesting historical data')
conn.reqHistoricalData(1, qqq, '', '1 D', '1 day', 'Midpoint', 1, 2)
logger.debug('Returned from Reply Handler')
time.sleep(1) #give IB time to send us messages
logger.debug('Disconnecting from Server')
conn.disconnect()
logger.debug('Finished AUDCAD Daily OHLC')
|
[
"[email protected]"
] | |
c35c68e7a7faa7dfa3b76900fb3308daf37711fe
|
945b3c14b5a58f8d98955cdf27aef9469e21523c
|
/flod_booking/alembic/versions/20140307-1414-483e5e40b48d_book_131_noark_5_documents_will_.py
|
6c307e7ab2a2eb9984925dcd7e32e36ce21794e4
|
[
"BSD-2-Clause-Views"
] |
permissive
|
Trondheim-kommune/Bookingbasen
|
34e595e9c57ea6428406b2806559aab17e9a3031
|
58235a5a1fd6ad291cb237e6ec9a67bfe8c463c6
|
refs/heads/master
| 2022-11-29T00:20:18.681549 | 2017-05-29T19:33:43 | 2017-05-29T19:33:43 | 49,863,780 | 1 | 1 |
NOASSERTION
| 2022-11-22T00:27:34 | 2016-01-18T08:47:46 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,581 |
py
|
# -*- coding: utf-8 -*-
"""BOOK-131 Noark 5 documents will temporarily be saved in flod (the integration point, FeSak, is not ready to receive them)
Revision ID: 483e5e40b48d
Revises: 47d9ec0a7bc5
Create Date: 2014-03-07 14:14:59.182049
"""
# revision identifiers, used by Alembic.
revision = '483e5e40b48d'
down_revision = '47d9ec0a7bc5'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('fesak_sak',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('application_id', sa.Integer(), nullable=False),
sa.Column('saksnummer', sa.String(), nullable=False),
sa.Column('ws_header', sa.String(), nullable=False),
sa.Column('ws_sak', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['application_id'], ['applications.id'], ),
sa.UniqueConstraint('application_id')
)
op.create_table('fesak_journalpost',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('fesak_sak_id', sa.Integer(), nullable=False),
sa.Column('ws_header', sa.String(), nullable=False),
sa.Column('ws_journalpost', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['fesak_sak_id'], ['fesak_sak.id'], ),
)
def downgrade():
raise NotImplementedError('This application does not support downgrades.')
|
[
"[email protected]"
] | |
726f87be962da3929f2f16d4d4ab5bded3efb223
|
6a5a16dc64262c0c3aa4732253d804de105a60b2
|
/2.Replacing values in a DataFrame/Replace single values II.py
|
58160600818e3b9518971f909fa09fa548aed606
|
[] |
no_license
|
Mat4wrk/Writing-Efficient-Code-with-pandas-Datacamp
|
425c574053f5777098c7ef1ebedc4ede6500860a
|
11ee5f5f2dae180a51fe003a52aaed22df8b5835
|
refs/heads/main
| 2023-03-12T01:02:09.787179 | 2021-02-25T16:39:59 | 2021-02-25T16:39:59 | 342,286,682 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 133 |
py
|
# Replace the number rank by a string
names['Rank'].replace({1: 'FIRST', 2: 'SECOND', 3: 'THIRD'}, inplace=True)
print(names.head())
|
[
"[email protected]"
] | |
c1b0703067199a1d0d8be39d0e2e1cab53696640
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_136/ch36_2020_10_02_01_35_54_347944.py
|
7e2ceca48b0f5b0f14629fb376f033a7433bfd56
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 237 |
py
|
def farotial(n):
i=0
diminuir= 1
outra= n
multiplicador= 1
while i<n-1:
outra-= diminuir
#print (outra)
multiplicador*= outra
i+= 1
resultado= multiplicador * n
return resultado
|
[
"[email protected]"
] | |
f69e31e1134db42b4aaddf609d3180af08765c3e
|
8baa00a8c04f64e983532fa4a420c68f490bdaa8
|
/build/tiago_gazebo/catkin_generated/pkg.installspace.context.pc.py
|
7a21807e7abb1a71cee3116b16c11845a31ea160
|
[] |
no_license
|
samuel-cavalcanti/TIAGo
|
e52c778f40ba8e03af21ba275b45b7faac5625b3
|
575533f713338b28ee5709279a874c9e374d77bd
|
refs/heads/master
| 2021-04-06T01:32:09.090002 | 2018-03-10T23:51:45 | 2018-03-10T23:51:45 | 124,707,936 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 383 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "tiago_gazebo"
PROJECT_SPACE_DIR = "/home/samuel/tiago_public_ws/install"
PROJECT_VERSION = "0.0.10"
|
[
"[email protected]"
] | |
bae83ea1902b2ad2dd0cc9983d69a229e3e4b6b5
|
5982a9c9c9cb682ec9732f9eeb438b62c61f2e99
|
/Problem_165/learning_solution.py
|
7fec0e3d63d4bdb759b81fe243085cecfadd800a
|
[] |
no_license
|
chenshanghao/LeetCode_learning
|
6fdf98473be8f2240dd86d5586bbd1bbb95d6b0c
|
acf2395f3b946054009d4543f2a13e83402323d3
|
refs/heads/master
| 2021-10-23T05:23:01.970535 | 2019-03-15T05:08:54 | 2019-03-15T05:08:54 | 114,688,902 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 665 |
py
|
class Solution:
def compareVersion(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
# Case 1:
# 1='1.0.1' 2='01.01.01'
# 2='1.0.0' 2='1.0'
versions1 = [int(v) for v in version1.split(".")]
versions2 = [int(v) for v in version2.split(".")]
for i in range(max(len(versions1),len(versions2))):
v1 = versions1[i] if i < len(versions1) else 0
v2 = versions2[i] if i < len(versions2) else 0
if v1 > v2:
return 1
elif v1 < v2:
return -1
return 0
|
[
"[email protected]"
] | |
bca710e613a599994957f93c89e4a64b8f045faf
|
0015f3ac50f20f2f99727948f117d7ec8cd324d5
|
/Data_Preprocessing_ABC_24hrs.py
|
c8087f23b1b604a60e4d271f750e50b68290dcfd
|
[] |
no_license
|
omkarpandit24/Dial-Plan-Data-Preprocessing-Aricent
|
720d49bce31be9bcec7d4a8c62f8fab46c1fe04b
|
578f9849027cdaa5f4eff38ff9c06b9f7b837444
|
refs/heads/master
| 2020-04-01T08:06:30.483536 | 2018-10-15T17:21:57 | 2018-10-15T17:21:57 | 153,017,742 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,888 |
py
|
#Dial Plan data preprocessing
#24 hours execution
import pandas as pd
import numpy as np
#Import mapping file - mapping between Test_Case_ID and its details about 'Call of Service' and 'Call type'
mapping_data = pd.read_csv("Test_Case_ID_mapping.csv")
#Import 24 Hours working executions data
data = pd.read_csv("INTLENH_premium_try2_CT_3.csv")
data.rename(columns={'Calling Party': 'Calling_Party', 'Called Party': 'Called_Party'}, inplace=True)
data = data[['Status', 'Calling_Party', 'Called_Party', 'Duration']]
#New dataframe to store combined results - data + mapping_data
data3 = pd.DataFrame(columns=['Status', 'Calling_Party', 'Called_Party', 'Duration', 'Test_Case_ID'])
#Focus on only Failed and Completed executions
status_array = ['Failed', 'Completed']
data = data.loc[data['Status'].isin(status_array)]
calling_party_series = pd.Series(data['Calling_Party'])
called_party_series = pd.Series(data['Called_Party'])
#Truncate the text to extract only calling party information
data['Calling_Party']= data.Calling_Party.str.split().str.get(0)
#Call of service codes for 24 hours execution
Call_of_Service = ['InternalOnly', 'Nat24STD', 'Nat24RES'
,'Nat24ENH', 'INTL24STD', 'INTL24ENH'
,'CLIRNat24STD', 'CLIRNat24RES', 'CLIRNat24ENH'
, 'CLIRINTL24STD', 'CLIRINTL24ENH']
#Codes available for call type common for all 3 types of executions
Call_Type = ['National', 'Service', 'Freephone', 'Emergency'
, 'International', 'Mobile', 'Premium']
#Define type of execution
execution_cycle = '24 Hours Execution'
#Current execution cycle ID
cycle_id = 3
#Mapping logic
for i in range(len(Call_of_Service)):
data1 = data[data['Calling_Party'] == Call_of_Service[i]]
#data1 = data[calling_party_series.str.match(Call_of_Service[i])]
for j in range(len(Call_Type)):
data2 = data1[called_party_series.str.contains(Call_Type[j])]
data2.insert(len(data2.columns), 'Test_Case_ID', pd.Series(np.random.randn(len(data2['Status'])), index=data2.index))
for index, row in mapping_data.iterrows():
if row["Execution_Cycle"] == execution_cycle and row["COS_Code"] == Call_of_Service[i] and row["Call_Type_code"] == Call_Type[j]:
test_case_id = row["Test_Case_ID"]
#print(test_case_id)
data2['Test_Case_ID'] = test_case_id
data3 = data3.append(data2)
data3.loc[data3['Test_Case_ID'] == 'DP_GERMANY_TC42']
data3.loc[data3['Calling_Party'] == 'INTL24ENH']
data4 = data3.sort_index()
data4['Execution_ID'] = range(1, len(data4) + 1)
data4 = data4.drop(['Calling_Party', 'Called_Party'], axis=1)
data4['Cycle_ID'] = cycle_id
data4 = data4[['Execution_ID', 'Cycle_ID', 'Duration', 'Status', 'Test_Case_ID']]
#Writing into CSV file
data4.to_csv('PP_INTLENH_premium_24hrs_try2_CT_3.csv')
|
[
"[email protected]"
] | |
e44953e3dd208113a69c491df8bc862ce7df32a8
|
e1efc8e0b0e4629dea61504fbc816c0527691bd9
|
/5.mysql/mysql12-引擎.py
|
82a3eab1efb867d286da3c941f8abdf243a5f941
|
[] |
no_license
|
xiongmengmeng/xmind-technology
|
2bb67a0bf92cfd660cac01f8ab3a2454423ccba5
|
e2fdb6987ef805a65f0a4feb52d84383853f4b77
|
refs/heads/main
| 2023-07-31T07:10:29.868120 | 2021-09-11T08:18:17 | 2021-09-11T08:18:17 | 307,636,242 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,363 |
py
|
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import xmind
from xmind.core.markerref import MarkerId
xmind_name="mysql"
w = xmind.load(os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
s2=w.createSheet()
s2.setTitle("引擎")
r2=s2.getRootTopic()
r2.setTitle("引擎")
content={
'MyISAM':[
{'每个MyISAM在磁盘上存储成三个文件':[
'1.frm文件:存储表的定义数据',
'2.MYD文件:存放表具体记录的数据',
'3.MYI文件:存储索引,仅保存记录所在页的指针,索引的结构是B+树结构'
]},
'存储引擎通过MYI的B+树结构来查找记录页,再根据记录页查找记录',
'不支持事务'
],
'InnoDB':[
'1.通过自动增长auto_increment,生成id',
'2.支持事务:默认隔离级别为可重复度,通过MVCC(并发版本控制)来实现',
'3.使用的锁粒度为行级锁,可支持更高的并发',
'4.存在着缓冲管理:通过缓冲池,将索引和数据全部缓存起来,加快查询的速度',
'5.InnoDB类型的表,其数据的物理组织形式是聚簇表,所有数据按照主键来组织,数据和索引放在一块,位于B+数的叶子节点上'
'6.支持事务'
],
'Memory':[
'1.支持数据类型有限:如不支持TEXT和BLOB类型,对字符串类型,只支持固定长度的,VARCHAR会被自动存储为CHAR类型',
'2.支持的锁粒度为表级锁:访问量大时,表级锁会成为MEMORY存储引擎的瓶颈',
'3.数据存放在内存中:一旦服务器出现故障,数据会丢失',
'4.默认使用hash索引'
],
'InnoDB和Memory的区别':[
'InnoDB引擎:把数据放在主键索引上,其他索引上保存的是主键id',
'Memory引擎:把数据单独存放,索引上保存数据位置'
],
'InnoDB和MyISAM的区别':[
'都是使用B+树来实现索引,但innoDB的叶子节点保存的是主键和数据(占空间更大,但查询更快),MyISAM保存了数据指针',
'锁:InnoDB支持行级锁,事务,MVCC,MyISAM不支持',
'count(*):InnoDB要扫描全表,MyISAM用一个变量保存了整个表的行数'
]
}
#构建xmind
xmind.build(content,r2)
#保存xmind
xmind.save(w,os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
|
[
"[email protected]"
] | |
43ab8afeab93a62e08dddd6878ed50d6a8ff2cc2
|
cbcdf195338307b0c9756549a9bffebf3890a657
|
/django-stubs/contrib/admin/forms.pyi
|
02de4b7a39ef2e839ec31c245909328b97c3f7bd
|
[
"MIT"
] |
permissive
|
mattbasta/django-stubs
|
bc482edf5c6cdf33b85005c2638484049c52851b
|
8978ad471f2cec0aa74256fe491e2e07887f1006
|
refs/heads/master
| 2020-04-27T08:38:22.694104 | 2019-03-06T09:05:08 | 2019-03-06T09:05:24 | 174,178,933 | 1 | 0 |
MIT
| 2019-03-06T16:18:01 | 2019-03-06T16:18:00 | null |
UTF-8
|
Python
| false | false | 891 |
pyi
|
from typing import Any, Dict
from django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm
from django.contrib.auth.models import User
class AdminAuthenticationForm(AuthenticationForm):
auto_id: str
data: Dict[str, str]
empty_permitted: bool
error_class: type
fields: Dict[Any, Any]
files: Dict[Any, Any]
initial: Dict[Any, Any]
is_bound: bool
label_suffix: str
request: None
user_cache: None
error_messages: Any = ...
required_css_class: str = ...
def confirm_login_allowed(self, user: User) -> None: ...
class AdminPasswordChangeForm(PasswordChangeForm):
auto_id: str
data: Dict[Any, Any]
empty_permitted: bool
error_class: type
fields: Dict[Any, Any]
files: Dict[Any, Any]
initial: Dict[Any, Any]
is_bound: bool
label_suffix: str
user: Any
required_css_class: str = ...
|
[
"[email protected]"
] | |
06b164dad6993d72808fa25f53b59ffbb58c7abe
|
636506c687b4797bfe5daa59b5264615d3bb894b
|
/backend/task/migrations/0001_initial.py
|
34545b851e4d69599c555b830abd90bf0b7735ca
|
[] |
no_license
|
crowdbotics-apps/pip-25311
|
ac5240874c28ab73f28b5f8c5bc273267aaa88e5
|
c4155c93f1517039812e794caf744214fdd115e2
|
refs/heads/master
| 2023-03-29T11:25:20.961098 | 2021-03-27T19:44:45 | 2021-03-27T19:44:45 | 352,161,389 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,121 |
py
|
# Generated by Django 2.2.19 on 2021-03-27 19:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('task_profile', '0001_initial'),
('task_category', '0001_initial'),
('location', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('details', models.TextField()),
('frequency', models.CharField(max_length=7)),
('size', models.CharField(max_length=6)),
('is_confirmed', models.BooleanField()),
('status', models.CharField(max_length=10)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('timestamp_confirmed', models.DateTimeField(blank=True, null=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task_category', to='task_category.Category')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task_customer', to='task_profile.CustomerProfile')),
('location', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='task_location', to='location.TaskLocation')),
('subcategory', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='task_subcategory', to='task_category.Subcategory')),
('tasker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task_tasker', to='task_profile.TaskerProfile')),
],
),
migrations.CreateModel(
name='TaskTransaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=10)),
('timestamp_completed', models.DateTimeField(blank=True, null=True)),
('date', models.DateField(blank=True, null=True)),
('timestamp_started', models.DateTimeField(blank=True, null=True)),
('task', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tasktransaction_task', to='task.Task')),
],
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.FloatField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('review', models.TextField(blank=True, null=True)),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='rating_customer', to='task_profile.CustomerProfile')),
('tasker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rating_tasker', to='task_profile.TaskerProfile')),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_customer', to='task_profile.CustomerProfile')),
('task', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='message_task', to='task.Task')),
('tasker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_tasker', to='task_profile.TaskerProfile')),
],
),
]
|
[
"[email protected]"
] | |
38c507a83cc96c9dff844075ddef4b8e6e21b84c
|
d87483a2c0b50ed97c1515d49d62c6e9feaddbe0
|
/.history/buyTopStocks_20210202220509.py
|
ced8f04211038e2e467f10513714dca47a8b61ff
|
[
"MIT"
] |
permissive
|
HopperKremer/hoptrader
|
0d36b6e33922414003cf689fb81f924da076a54b
|
406793c10bc888648290fd15c7c2af62cf8c6c67
|
refs/heads/main
| 2023-06-12T15:51:00.910310 | 2021-07-06T16:15:41 | 2021-07-06T16:15:41 | 334,754,936 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,661 |
py
|
# from excel import OpenExcel
from tda import auth, client
import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import config
from selenium import webdriver
import json
DRIVER_PATH = "/home/hopper/chromedriver"
driver = webdriver.Chrome(DRIVER_PATH)
redirect_uri = "https://localhost"
try:
c = auth.client_from_token_file(config.token_path, config.api_key)
except FileNotFoundError:
# with webdriver.Chrome() as driver:
c = auth.client_from_login_flow(
driver, config.api_key, redirect_uri, config.token_path
)
r = c.get_price_history(
"AAPL",
period_type=client.Client.PriceHistory.PeriodType.YEAR,
period=client.Client.PriceHistory.Period.TWENTY_YEARS,
frequency_type=client.Client.PriceHistory.FrequencyType.DAILY,
frequency=client.Client.PriceHistory.Frequency.DAILY,
)
assert r.status_code == 200, r.raise_for_status()
print(json.dumps(r.json(), indent=4))
soldFile = open("sold.py", "a")
soldStocks = []
# for stock, data in my_stocks.items():
for stock in my_stocks:
driver = webdriver.Chrome(PATH)
driver.get('https://financhill.com/screen/stock-score')
score = int(driver.find_element_by_tag_name('h2').text)
time.sleep(2)
print(stock)
print(score)
# if (score < 40):
# r.order_sell_trailing_stop(stock, data['quantity'], 1)
# soldStocks.append(stock)
driver.quit()
soldFile.write(soldStocks)
soldFile.close()
<span class="sort sort-desc" data-sort-name="stock_score_normalized" data-current-order="">
Stock Score <i class="glyphicon"></i></span>
|
[
"[email protected]"
] | |
130ae343f2184e2cf9db80e53c5efbebb5c76066
|
358a60b05a291a4a81c50401be836e6a60687b55
|
/Problems/Eigenvalues/main.py
|
351a776463b82d906f57d23b325d1260174bfcce
|
[] |
no_license
|
wangpengda1210/Tetris
|
dbdd57cb21d40ff625445e5e9f737db51dd57f63
|
7ef5b3fe8f687097d7d3ff7b5c7aa3b77032667b
|
refs/heads/main
| 2023-03-09T16:34:28.367503 | 2021-03-02T03:40:55 | 2021-03-02T05:39:00 | 342,492,432 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 142 |
py
|
import numpy as np
a = int(input())
b = int(input())
c = int(input())
d = int(input())
print(np.linalg.eigvals(np.array([[a, b], [c, d]])))
|
[
"[email protected]"
] | |
94259b1c5eb4a5d39212ed10ba34bbd1766befa5
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/39/usersdata/72/15179/submittedfiles/dec2bin.py
|
cd0019461b73084784b76609959e2e414e1c86af
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 150 |
py
|
# -*- coding: utf-8 -*-
n=int(input('digite o numero decimal:'))
i=0
j=1
d=n%2
while n>0:
d=n%2
n=n/2
i=i+d*j
j=j*10
print ('%d' %i)
|
[
"[email protected]"
] | |
a1626bb5890cc887220d4ad125773c36dcc3d81a
|
b3528a3795ce373e27d52362128de3cff6f9969d
|
/python/orbs/target/password-generator/slices1589360571.263371/success/success_24_0.py
|
6911640c338f776987141f438eff887ce2b76e1d
|
[] |
no_license
|
greenmonn/daily-coding
|
43e0f3775678c7d6116df7ba5034ea18489d87c9
|
ef6ecc88e6db61e18364eef3ea071c11e1385a99
|
refs/heads/master
| 2023-01-14T04:59:14.130309 | 2021-02-08T23:32:56 | 2021-02-08T23:32:56 | 157,735,438 | 1 | 1 | null | 2022-12-21T02:13:17 | 2018-11-15T15:47:37 |
Python
|
UTF-8
|
Python
| false | false | 5,923 |
py
|
#!/usr/bin/env python3
# m4ngl3m3! v0.1.1
# Common password pattern generator using strings list
# Follow (Medium / Twitter): @localh0t
import argparse
import sys
import os
from Mangler import ManglingParameters
from Mangler import Mangler
def build_parser():
"""Add parser arguments and return an instance of ArgumentParser."""
parser = argparse.ArgumentParser(description=("Common password pattern "
"generator using strings "
"list"),
formatter_class=argparse.
ArgumentDefaultsHelpFormatter)
parser.add_argument("mutation_mode",
metavar="MUTATION_MODE",
type=str,
help=("Mutation mode to perform: "
"(prefix-mode | suffix-mode | dual-mode)"),
choices=['prefix-mode', 'suffix-mode', 'dual-mode'])
parser.add_argument("strings_file",
metavar="STRINGS_FILE",
type=str,
help="File with strings to mutate")
parser.add_argument("output_file",
metavar="OUTPUT_FILE",
type=str,
help="Where to write the mutated strings")
parser.add_argument("-fy", "--from-year",
metavar="FROM_YEAR",
type=int,
help="Year where our iteration starts",
default=2015)
parser.add_argument("-ty", "--to-year",
metavar="TO_YEAR",
type=int,
help="Year where our iteration ends",
default=2020)
parser.add_argument('-sy', "--short-year",
help=("Also add shorter year form when iterating"),
action='store_true',
default=False)
parser.add_argument("-nf", "--numbers-file",
metavar="NUMBERS_FILE",
type=str,
help="Numbers prefix/suffix file",
default='./target/password-generator/files/numbers/numbers_set2.txt')
parser.add_argument("-sf", "--symbols-file",
metavar="SYMBOLS_FILE",
type=str,
help="Symbols prefix/suffix file",
default='./target/password-generator/files/symbols/symbols_set2.txt')
parser.add_argument("-cf", "--custom-file",
metavar="CUSTOM_FILE",
type=str,
help="Custom words/dates/initials/etc file")
parser.add_argument('-sbs', "--symbols-before-suffix",
help=("Insert symbols also before years/numbers/"
"custom (when in suffix-mode or dual-mode)"),
action='store_true',
default=False)
parser.add_argument('-sap', "--symbols-after-prefix",
help=("Insert symbols also after years/numbers/custom"
" (when in prefix-mode or dual-mode)"),
action='store_true',
default=False)
parser.add_argument("-mm", "--mutation-methods",
metavar="MUTATION_METHODS",
type=str,
help=("Mutation methods to perform (comma separated, "
"no spaces) (valid: see MUTATION_METHODS.md)"),
default='normal,'
'uppercase,'
'firstup,'
'replacevowels')
return parser
def validate_files(strings_file, output_file):
"""Check if input/output files are valid."""
if not os.path.isfile(strings_file):
print("[-] The file %s does not exist or is not a file!" % strings_file)
sys.exit(1)
if os.path.isfile(output_file):
os.remove(output_file)
def build_mangler_with_args(args):
"""Return an instance of Mangler with the given parameters."""
parameters = ManglingParameters()
parameters.num_file = open(args.numbers_file, 'r').read().splitlines()
parameters.sym_file = open(args.symbols_file, 'r').read().splitlines()
if (args.custom_file):
parameters.cus_file = open(args.custom_file, 'r').read().splitlines()
parameters.mutation_mode = args.mutation_mode
parameters.from_year = args.from_year
parameters.to_year = args.to_year
parameters.short_year = args.short_year
parameters.prefix_pos_swap = args.symbols_after_prefix
parameters.suffix_pos_swap = args.symbols_before_suffix
return Mangler(mangling_parameters=parameters)
if __name__ == "__main__":
args = build_parser().parse_args()
mangler = build_mangler_with_args(args)
mangler_functions = {
"normal": mangler.normal_mangling,
"uppercase": mangler.uppercase_mangling,
"firstup": mangler.firstup_mangling,
"replacevowels": mangler.replacevowels_mangling,
}
written_strings = 0
with open(args.strings_file, 'r') as f:
for line in f:
mangled = []
for method in args.mutation_methods.lower().split(","):
try:
(name, output) = mangler_functions[method](line.strip())
mangled.extend(output)
except KeyError:
print("[-] The method %s is not defined !" % method)
print("[+] %s mutation method done on string: %s" %
(name, line.strip()))
written_strings += len(mangled)
print('##v_trajectory captured: {}##'.format(written_strings))
|
[
"[email protected]"
] | |
dd0f2fd7d48c85fda6349cac297406d769294ba1
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/217/usersdata/355/113233/submittedfiles/av2_p3_m2.py
|
ddc27da64cc24bc2b5d2c4edd83d7db6abb116a4
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 503 |
py
|
# -*- coding: utf-8 -*-
import numpy as np
n=int(input("Digite a dimensão da matriz: "))
matriz=np.zeros(n,n)
M0=M1=M2=0
for i in range(0,n,1):
for j in range(0,n,1):
matriz[i,j]=float(input("Digite os elementos da matriz: "))
while j=0:
M0=matriz[i,j]+M0
while j=1:
M1=matriz[i,j]+M1
while j=2:
M2=matriz[i,j]+M2
if M0=M1:
m=M0
else:
if M1=M2:
m=M1
else:
m=M0
|
[
"[email protected]"
] | |
56688dead2cbc91be46ea27859c5c5320cca5b5a
|
551b75f52d28c0b5c8944d808a361470e2602654
|
/huaweicloud-sdk-ocr/huaweicloudsdkocr/v1/model/recognize_driver_license_response.py
|
bbe664d173706914e84e388630af95ba3f524c3d
|
[
"Apache-2.0"
] |
permissive
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
9d6597ce8ab666a9a297b3d936aeb85c55cf5877
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
refs/heads/master
| 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 |
NOASSERTION
| 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null |
UTF-8
|
Python
| false | false | 2,933 |
py
|
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class RecognizeDriverLicenseResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'result': 'DriverLicenseResult'
}
attribute_map = {
'result': 'result'
}
def __init__(self, result=None):
"""RecognizeDriverLicenseResponse - a model defined in huaweicloud sdk"""
super(RecognizeDriverLicenseResponse, self).__init__()
self._result = None
self.discriminator = None
if result is not None:
self.result = result
@property
def result(self):
"""Gets the result of this RecognizeDriverLicenseResponse.
:return: The result of this RecognizeDriverLicenseResponse.
:rtype: DriverLicenseResult
"""
return self._result
@result.setter
def result(self, result):
"""Sets the result of this RecognizeDriverLicenseResponse.
:param result: The result of this RecognizeDriverLicenseResponse.
:type: DriverLicenseResult
"""
self._result = result
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RecognizeDriverLicenseResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.