blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3800854302dd23a2b91645a282642a890b8aa887 | 4809471274d6e136ac66d1998de5acb185d1164e | /pypureclient/flasharray/FA_2_5/models/array_connection_path_response.py | de0fc1399cbe7b264e3b2ab8d767e8ab33deda4c | [
"BSD-2-Clause"
]
| permissive | astrojuanlu/py-pure-client | 053fef697ad03b37ba7ae21a0bbb466abf978827 | 6fa605079950765c316eb21c3924e8329d5e3e8a | refs/heads/master | 2023-06-05T20:23:36.946023 | 2021-06-28T23:44:24 | 2021-06-28T23:44:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,193 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_5 import models
class ArrayConnectionPathResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[ArrayConnectionPath]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.ArrayConnectionPath]
):
"""
Keyword args:
items (list[ArrayConnectionPath])
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ArrayConnectionPathResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ArrayConnectionPathResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ArrayConnectionPathResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
e76448cde7ed936c5dde66db96302816ecafc4b1 | 36bdbbf1be53ba5f09b9a2b1dd15e91f8f6b0da1 | /learn/migrations/0004_auto_20181116_1649.py | 2880faadffc2f92d5d293b247452f5857c030eaf | []
| no_license | phufoxy/fotourNew | 801ab2518424118020dc6e5f31a7ba90a654e56a | 6048c24f5256c8c5a0d18dc7b38c106a7c92a29c | refs/heads/master | 2023-04-13T01:34:22.510717 | 2018-12-26T03:46:09 | 2018-12-26T03:46:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | # Generated by Django 2.1 on 2018-11-16 09:49
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('learn', '0003_auto_20181116_1648'),
]
operations = [
migrations.AlterField(
model_name='speak',
name='date',
field=models.DateTimeField(default=datetime.datetime(2018, 11, 16, 16, 49, 23, 870624)),
),
migrations.AlterField(
model_name='taskspeak',
name='date',
field=models.DateTimeField(default=datetime.datetime(2018, 11, 16, 16, 49, 23, 870624)),
),
]
| [
"[email protected]"
]
| |
41d11890310ec3f84d4e94f421d3c69ba64b9cd6 | 5af277b5819d74e61374d1d78c303ac93c831cf5 | /tcc/evaluate.py | 1b7afa40c2f407e4510e06c080a912498d6e67ae | [
"Apache-2.0"
]
| permissive | Ayoob7/google-research | a2d215afb31513bd59bc989e09f54667fe45704e | 727ec399ad17b4dd1f71ce69a26fc3b0371d9fa7 | refs/heads/master | 2022-11-11T03:10:53.216693 | 2020-06-26T17:13:45 | 2020-06-26T17:13:45 | 275,205,856 | 2 | 0 | Apache-2.0 | 2020-06-26T16:58:19 | 2020-06-26T16:58:18 | null | UTF-8 | Python | false | false | 5,634 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate embeddings on downstream tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import tensorflow.compat.v2 as tf
from tcc.algorithms import get_algo
from tcc.config import CONFIG
from tcc.datasets import create_dataset
from tcc.datasets import create_one_epoch_dataset
from tcc.tasks import get_tasks
from tcc.utils import get_embeddings_dataset
from tcc.utils import get_lr_opt_global_step
from tcc.utils import restore_ckpt
from tcc.utils import setup_eval_dir
layers = tf.keras.layers
flags.DEFINE_boolean('continuous_eval', True, 'Evaluate continously.')
flags.DEFINE_string('logdir', '/tmp/alignment_logs', 'Path to logs.')
flags.DEFINE_boolean('defun', True, 'Defun everything!')
flags.DEFINE_boolean('visualize', False, 'Visualize images. Switched off by '
'for default to speed traininig up and take less memory.')
flags.DEFINE_integer(
'max_embs', 0, 'Max number of videos to embed. 0 or less '
'means embed all videos in dataset.')
FLAGS = flags.FLAGS
evaluated_last_ckpt = False
def evaluate_once(algo, iterator_tasks, embedding_tasks, iterators,
summary_writer):
"""Evaluate learnt embeddings on downstream tasks."""
# Sets up model for training.
_, optimizer, global_step = get_lr_opt_global_step()
restore_ckpt(logdir=CONFIG.LOGDIR, optimizer=optimizer, **algo.model)
if global_step.numpy() == CONFIG.TRAIN.MAX_ITERS:
global evaluated_last_ckpt
evaluated_last_ckpt = True
metrics = {}
if iterator_tasks:
with summary_writer.as_default():
with tf.summary.record_if(True):
for task_name, task in iterator_tasks.items():
metrics[task_name] = task.evaluate(algo, global_step,
iterators=iterators)
max_embs = None if FLAGS.max_embs <= 0 else FLAGS.max_embs
if embedding_tasks:
frames_per_batch = CONFIG.EVAL.FRAMES_PER_BATCH
for dataset_name in CONFIG.DATASETS:
dataset = {'name': dataset_name}
train_iterator = create_one_epoch_dataset(
dataset_name,
'train',
mode='eval',
path_to_tfrecords=CONFIG.PATH_TO_TFRECORDS)
dataset['train_dataset'] = get_embeddings_dataset(
algo.model, train_iterator, frames_per_batch=frames_per_batch,
max_embs=max_embs)
val_iterator = create_one_epoch_dataset(
dataset_name,
'val',
mode='eval',
path_to_tfrecords=CONFIG.PATH_TO_TFRECORDS)
dataset['val_dataset'] = get_embeddings_dataset(
algo.model, val_iterator, frames_per_batch=frames_per_batch,
max_embs=max_embs)
with summary_writer.as_default():
with tf.summary.record_if(True):
for task_name, task in embedding_tasks.items():
if task_name not in metrics:
metrics[task_name] = {}
metrics[task_name][dataset_name] = task.evaluate(
algo, global_step, embeddings_dataset=dataset)
# Add all metrics in a separate tag so that analysis is easier.
with summary_writer.as_default():
with tf.summary.record_if(True):
for task_name in embedding_tasks.keys():
for dataset in CONFIG.DATASETS:
tf.summary.scalar('metrics/%s_%s' % (dataset, task_name),
metrics[task_name][dataset],
step=global_step)
avg_metric = sum(metrics[task_name].values())
avg_metric /= len(CONFIG.DATASETS)
tf.summary.scalar('metrics/all_%s' % task_name,
avg_metric, step=global_step)
def timeout_fn():
global evaluated_last_ckpt
return evaluated_last_ckpt
def evaluate():
"""Evaluate embeddings."""
CONFIG.LOGDIR = FLAGS.logdir
logdir = CONFIG.LOGDIR
setup_eval_dir(logdir)
algo = get_algo(CONFIG.TRAINING_ALGO)
if FLAGS.defun:
algo.call = tf.function(algo.call)
algo.compute_loss = tf.function(algo.compute_loss)
iterator_tasks, embedding_tasks = get_tasks(CONFIG.EVAL.TASKS)
# Setup summary writer.
summary_writer = tf.summary.create_file_writer(
os.path.join(logdir, 'eval_logs'), flush_millis=10000)
iterators = {}
if iterator_tasks:
# Setup Dataset Iterators from train and val datasets.
iterators['train_iterator'] = create_dataset('train', mode='eval')
iterators['val_iterator'] = create_dataset('val', mode='eval')
if FLAGS.continuous_eval:
for _ in tf.train.checkpoints_iterator(logdir, timeout=1,
timeout_fn=timeout_fn):
evaluate_once(algo, iterator_tasks, embedding_tasks, iterators,
summary_writer)
else:
evaluate_once(algo, iterator_tasks, embedding_tasks, iterators,
summary_writer)
def main(_):
tf.enable_v2_behavior()
tf.keras.backend.set_learning_phase(0)
evaluate()
if __name__ == '__main__':
app.run(main)
| [
"[email protected]"
]
| |
108773ee0e769f568f2c93989888421559ae51cc | 07af444dafa5bde373b0730e92d67e455d4ff4df | /SFData/StackOverflow/s50483715_ground_truth.py | 1a82f2ca14d03d0b9d9658ab76abf93061a4d9a0 | []
| no_license | tensfa/tensfa | 9114595b58a2e989780af0c348afb89a2abb04b4 | 415dcfaec589b0b14c5b9864872c912f3851b383 | refs/heads/main | 2023-06-30T14:27:38.217089 | 2021-08-03T01:33:30 | 2021-08-03T01:33:30 | 368,465,614 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,243 | py | from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
img_width, img_height = 150, 150
train_data_dir = os.path.dirname(os.path.realpath(__file__)) + '/../data/generator/train'#train directory generated by train_cla
validation_data_dir = os.path.dirname(os.path.realpath(__file__)) + '/../data/generator/test'#validation directory generated by val_cla
train_samples = 6
validation_samples = 2
epochs = 1
batch_size = 1
input_shape = (img_width, img_height, 1)
#build a sequential model to train data
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
train_datagen = ImageDataGenerator(#train data generator
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
val_datagen = ImageDataGenerator(rescale=1. / 255)#validation data generator
train_generator = train_datagen.flow_from_directory(#train generator
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary',
color_mode = 'grayscale')
validation_generator = val_datagen.flow_from_directory(#validation generator
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary',
color_mode = 'grayscale')
model.fit_generator(#fit the generator to train and validate the model
train_generator,
steps_per_epoch=train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=validation_samples // batch_size) | [
"[email protected]"
]
| |
fe28322254d51f1964fb264c3851963777900a5d | 7e11a563876a05771152448c8c80cf262f3bbc40 | /python1基础/day10/lambda.py | 805a2a35b789dcbb7d86fc3d0d48ab5ad7f3a71a | []
| no_license | qiujiandeng/- | ee8eb1c828177c9796b3a1bda547aa036c19914d | bb376535ff9f2fe23828bee32efb1d9010aa38e6 | refs/heads/master | 2020-05-25T11:56:39.471770 | 2019-05-21T07:52:22 | 2019-05-21T07:52:22 | 187,779,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | # lambda.py
# def myadd(x,y):
# return x + y
myadd = lambda x,y: x + y#只能放表达式
# x + y
# max("abc")
# print("hello")
# True if x > y else False
# [x**2 for x in range(10)]
print("20+30=",myadd(20,30)) #50
print("1+2=",myadd(1,2)) #3
| [
"[email protected]"
]
| |
371f962ddc772c29c413d8755c2a6f5596366739 | d6c117812a618ff34055488337aaffea8cf81ca1 | /serve/Servr-Servr-desktop-edition/Servr - desktop edition.py | 66b42077ca95dfcca1875a39f12bbabe7a15c24f | [
"Apache-2.0"
]
| permissive | c0ns0le/Pythonista | 44829969f28783b040dd90b46d08c36cc7a1f590 | 4caba2d48508eafa2477370923e96132947d7b24 | refs/heads/master | 2023-01-21T19:44:28.968799 | 2016-04-01T22:34:04 | 2016-04-01T22:34:04 | 55,368,932 | 3 | 0 | null | 2023-01-22T01:26:07 | 2016-04-03T21:04:40 | Python | UTF-8 | Python | false | false | 2,121 | py | #!/usr/bin/env python
from wsgiref.simple_server import make_server
import mimetypes
import os
config_filename = 'Config.txt'
source_directory = 'Resources'
def get_contents_of_file(filepath):
with open(filepath) as in_file:
return in_file.read()
def get_files_dict(directory):
return {filename : get_contents_of_file(os.path.join(directory, filename))
for filename in os.listdir(directory)}
print('Welcome to Servr - desktop edition!')
with open(config_filename, 'a+') as in_file:
pass # if config file does not already exist, create one
config = get_contents_of_file(config_filename).split('\n')
do_auto_start = config[0].lower() if config else 'n'
if do_auto_start == 'y':
print("Getting data from {}...".format(config_filename))
filename, address, port = config[1:4]
else:
filename = raw_input("Enter homepage HTML file name including extension:").strip()
address = raw_input("Enter this device's private IP address:").strip()
port = raw_input("Enter an unused port:").strip()
if filename and address and port:
msg = "Save these values into {}? (No)".format(config_filename)
save_to_cfg = (raw_input(msg).strip().lower() or 'n')[0]
if save_to_cfg == 'y':
with open(config_filename, 'w') as out_file:
out_file.write('\n'.join(['y', filename, address, port, '']))
htmlData = get_contents_of_file(os.path.join(source_directory, filename))
files_dict = get_files_dict(source_directory)
def host(environ, start_response):
mimeType = 'text/html'
status = '200 OK'
path_info = environ.get('PATH_INFO', None)
if path_info in (None, '/', '/home', '/index.html'):
dataToReturn = htmlData
else:
path_info = path_info.strip('/')
dataToReturn = files_dict.get(path_info, None)
if dataToReturn:
mimeType = mimetypes.guess_type(path_info)[0]
else:
dataToReturn = status = '404 Not Found'
headers = [('Content-type', mimeType)]
start_response(status, headers)
return [dataToReturn]
webServer = make_server(address, int(port), host)
print('Serving at url: http://{}:{}'.format(address, port))
webServer.serve_forever()
| [
"[email protected]"
]
| |
0cf3d5c34ba015e36ea3bcae5895e8dd880d5346 | 14557ac358f90f9f055d9072e14ba494d565f072 | /tool/gaussianElimination.py | 95ae3fe91b949bae52269de21a66ffda95dbfd1a | []
| no_license | August-us/exam | d8143359aa85bd8f4047024f3e9f83b662fa23bb | 092a800a15bdd0f3d0c8f521a5e0fc90f964e8a8 | refs/heads/master | 2021-07-01T04:16:15.297648 | 2021-06-26T15:14:31 | 2021-06-26T15:14:31 | 203,525,734 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | import numpy as np
def gaussianElimination(A,b):
A=np.matrix(A,dtype=np.float)
b=np.array(b)
assert len(A.shape)==2, "Coefficient matrix is not 2d. "
assert A.shape[0]==b.shape[0], "Coefficient and b do not match."
A=np.concatenate([A,b[:,None]],axis=1)
for i in range(b.shape[0]):
for k in range(i,A.shape[0]):
if A[k,i]:
A[i,:],A[k,:]=A[k,:]/A[k,i],A[i,:]
break
else:
continue
A[i+1:,i:]-=(A[i+1:,i]/A[i,i])*A[i,i:] # 对角阵
return A
def _solve(A):
n=A.shape[1]-1
flag=(A[:n]!=0).any(axis=1)
if flag.all():# 可能有唯一解
x=np.zeros(n)
for i in range(n-1,-1,-1):
assert (A[i,i]!=0.), "Equations without solution"
x[i]=(A[i,n]-np.dot(A[i,:n],x))/A[i,i]
return x
else:
k=flag.sum()
x = np.zeros(n)
for i in range(k-1,-1,-1):
assert (A[i, i] != 0.), "Equations without solution"
x[i] = (A[i, n] - np.dot(A[i,:n], x)) / A[i, i]
k=np.eye(n-k)
return (x,k)
def solve(A,b):
'''
:param A: 系数矩阵
:param b: 常数向量
:return: 返回线性方程组的解,如果有无数个解,返回特解和通解(元组),如果只有唯一解,返回之
'''
A=gaussianElimination(A,b)
return _solve(A)
if __name__ == '__main__':
A=[
[1,0,0,0],
[1,1,3,3],
[1,2,2,4],
[1,3,1,3],
]
b = [4, 18,24,26]
print(solve(A,b))
| [
"[email protected]"
]
| |
4960ea804db2bb114054817e0b17cc24f476368c | f7e5f77292c2bf595ae034e0a67bf8e01a6e82b1 | /p957/test_solution.py | f63de72edfe3a4b41502aaa4b0da7fa80a572715 | []
| no_license | carwestsam/leetCode | af0d64d8d52597c88441b811ce6609a056ef290e | 8075fbb40987d5e6af8d30941a19fa48a3320f56 | refs/heads/master | 2021-01-20T22:28:59.652051 | 2019-03-22T01:11:30 | 2019-03-22T01:11:30 | 60,820,215 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | from unittest import TestCase
from p957.Solution import Solution
class TestSolution(TestCase):
def test_prisonAfterNDays(self):
sol = Solution()
self.assertEqual([0, 0, 1, 1, 0, 0, 0, 0], sol.prisonAfterNDays([0, 1, 0, 1, 1, 0, 0, 1], 7))
self.assertEqual([0, 0, 1, 1, 1, 1, 1, 0], sol.prisonAfterNDays([1, 0, 0, 1, 0, 0, 1, 0], 1000000000))
self.assertEqual(sol.raw([1, 0, 0, 1, 0, 1, 1, 0], 300),
sol.prisonAfterNDays([1, 0, 0, 1, 0, 1, 1, 0], 300))
| [
"[email protected]"
]
| |
b746bc216e8a269465b30ae48c46f3f6e68f5839 | afd85583cd544f6c909797579f4c28aae89e0828 | /src/main/seg/common/Term.py | 6f33143d1a58375ace4f45cc3634404ecd5475bd | []
| no_license | chuanfanyoudong/python_hanlp | a127f61b0847031677532ea7ec54cdbb65ac7486 | 5417778c51f7a209e8a866b884de6c7a6392b203 | refs/heads/master | 2020-04-13T08:50:26.386240 | 2019-01-14T15:10:35 | 2019-01-14T15:10:35 | 163,093,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | """
@author: zkjiang
@contact: [email protected]
@software: PyCharm
@file: Term.py
@time: 2019/1/1 21:49
"""
"""
单词类,用户可以直接访问单词的全部属性
"""
class Term(object):
def __init__(self, word, nature):
self.word = word # 词语
self.nature = nature # 词性
def __str__(self):
return self.word + "/" + self.nature
def length(self):
"""
:return:词的长度
"""
return len(self.word)
def getFrequency(self):
"""
获取词语在python_hanlp词库的频次
后续需要补充对词表的类
:return: 频次, 0代表这是个新词
"""
return 0
def equals(self, obj):
"""
比较两个Term是否相等
:param obj: 0或者1
:return:
"""
if isinstance(obj, Term):
if self.nature == obj.nature and self.word == obj.word:
return 1
return 0 | [
"[email protected]"
]
| |
89b0a495c98f20d50c6e691bc32418e7e00fad4c | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/CISCO-OTN-IF-CAPABILITY.py | 5833aa4037e5d433623814f3321764e4b7c56cee | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 3,182 | py | #
# PySNMP MIB module CISCO-OTN-IF-CAPABILITY (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-OTN-IF-CAPABILITY
# Produced by pysmi-0.3.4 at Wed May 1 12:09:01 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection")
ciscoAgentCapability, = mibBuilder.importSymbols("CISCO-SMI", "ciscoAgentCapability")
ModuleCompliance, AgentCapabilities, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "AgentCapabilities", "NotificationGroup")
NotificationType, Counter32, MibIdentifier, ObjectIdentity, Bits, Counter64, IpAddress, Unsigned32, Gauge32, ModuleIdentity, TimeTicks, Integer32, iso, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Counter32", "MibIdentifier", "ObjectIdentity", "Bits", "Counter64", "IpAddress", "Unsigned32", "Gauge32", "ModuleIdentity", "TimeTicks", "Integer32", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ciscoOtnIfMIBCapability = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 7, 562))
ciscoOtnIfMIBCapability.setRevisions(('2007-10-20 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoOtnIfMIBCapability.setRevisionsDescriptions(('First version of this MIB module.',))
if mibBuilder.loadTexts: ciscoOtnIfMIBCapability.setLastUpdated('200710200000Z')
if mibBuilder.loadTexts: ciscoOtnIfMIBCapability.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoOtnIfMIBCapability.setContactInfo('Cisco Systems Customer Service Postal: 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: [email protected]')
if mibBuilder.loadTexts: ciscoOtnIfMIBCapability.setDescription('capabilities description for CISCO-OTN-IF-MIB')
ciscoOtnIfCapIOSXRV3R06CRS1 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 562, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoOtnIfCapIOSXRV3R06CRS1 = ciscoOtnIfCapIOSXRV3R06CRS1.setProductRelease('Cisco IOS-XR Release 3.6 for CRS-1')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoOtnIfCapIOSXRV3R06CRS1 = ciscoOtnIfCapIOSXRV3R06CRS1.setStatus('current')
if mibBuilder.loadTexts: ciscoOtnIfCapIOSXRV3R06CRS1.setDescription('Cisco OTN IF MIB capabilities for IOX-XR Release 3.6 for CRS 1')
mibBuilder.exportSymbols("CISCO-OTN-IF-CAPABILITY", PYSNMP_MODULE_ID=ciscoOtnIfMIBCapability, ciscoOtnIfMIBCapability=ciscoOtnIfMIBCapability, ciscoOtnIfCapIOSXRV3R06CRS1=ciscoOtnIfCapIOSXRV3R06CRS1)
| [
"[email protected]"
]
| |
08ea0a3e72f2032472b9d2361c3bc7deb129556c | ce03ceec096f467a27461506e2f278e1f83bd4b2 | /v2/api/models/survivors.py | c3f1feec904c55665709f03cf81c3410a7e62bbb | []
| no_license | CalebKester/kdm-manager | 729ef10ad4fcb615762f2f607aa3b347a2738b11 | 76e5e90f38f90ad8770c8077d2f4e597c25d020b | refs/heads/master | 2021-08-15T23:41:40.230703 | 2017-11-18T16:03:56 | 2017-11-18T16:03:56 | 107,457,554 | 0 | 0 | null | 2017-10-18T20:10:03 | 2017-10-18T20:10:03 | null | UTF-8 | Python | false | false | 111,967 | py | #!/usr/bin/python2.7
from bson import json_util
from bson.objectid import ObjectId
from copy import copy
from datetime import datetime
from flask import request, Response
import json
import random
import Models
import utils
from assets import survivor_sheet_options, survivors
from models import abilities_and_impairments, cursed_items, disorders, endeavors, epithets, fighting_arts, names, saviors, survival_actions, survivor_special_attributes, the_constellations, weapon_proficiency
class Assets(Models.AssetCollection):
""" These are pre-made survivors, e.g. from the BCS. """
def __init__(self, *args, **kwargs):
self.assets = survivors.beta_challenge_scenarios
self.type = "survivor"
Models.AssetCollection.__init__(self, *args, **kwargs)
def get_specials(self, return_type=dict):
""" This returns the 'specials' macro dicts, which are basically simple
'scripts' for operating on a settlement at creation time. """
d = copy(survivors.specials)
for k in sorted(d.keys()):
d[k]["handle"] = k
if return_type == "JSON":
output = []
for k in sorted(d.keys()):
output.append(d[k])
return output
return d
def get_defaults(self):
""" Returns a dictionary of default attribute values for survivors. """
d = copy(survivors.defaults)
return d
class Survivor(Models.UserAsset):
""" This is the base class for all expansions. Private methods exist for
enabling and disabling expansions (within a campaign/settlement). """
def __repr__(self):
return "%s [%s] (%s)" % (self.survivor["name"], self.survivor["sex"], self.survivor["_id"])
def __init__(self, *args, **kwargs):
self.collection="survivors"
self.object_version = 0.79
# initialize AssetCollections for later
self.CursedItems = cursed_items.Assets()
self.Disorders = disorders.Assets()
self.Names = names.Assets()
self.Saviors = saviors.Assets()
self.SpecialAttributes = survivor_special_attributes.Assets()
# data model meta data
self.stats = ['Movement','Accuracy','Strength','Evasion','Luck','Speed','bleeding_tokens']
self.game_asset_keys = ['disorders','epithets','fighting_arts','abilities_and_impairments']
self.armor_locations = ['Head', 'Body', 'Arms', 'Waist', 'Legs']
self.flags = ['skip_next_hunt','cannot_use_fighting_arts','cannot_spend_survival','departing','cannot_gain_bleeding_tokens']
self.abs_value_attribs = ['max_bleeding_tokens', ]
self.min_zero_attribs = ["hunt_xp","Courage","Understanding"]
self.min_one_attribs = ["Movement"]
self.damage_locations = [
"brain_damage_light",
"head_damage_heavy",
"arms_damage_light",
"arms_damage_heavy",
"body_damage_light",
"body_damage_heavy",
"waist_damage_light",
"waist_damage_heavy",
"legs_damage_light",
"legs_damage_heavy",
]
# if we're doing a new survivor, it will happen when we subclass the
# Models.UserAsset class:
Models.UserAsset.__init__(self, *args, **kwargs)
# this makes the baby jesus cry
if self.Settlement is None:
if request and request.collection != 'survivor':
self.logger.warn("%s Initializing Settlement object! THIS IS BAD FIX IT" % self)
import settlements
self.Settlement = settlements.Settlement(_id=self.survivor["settlement"], normalize_on_init=False)
if self.normalize_on_init:
self.normalize()
def new(self):
""" Creates a new survivor.
The 'attribs' dictionary will be used, after initialization, to add or
overwrite all key/value pairs on self.survivor.
Important! Only attrib keys that are part of the baseline survivor data
model will be used! Any other keys in 'attribs' will be ignored!
If this is an internal call, i.e. from the settlement creation method
or similar, simply set self.new_asset_attribs to be a dictionary of the
required survivor attribs.
Otherwise, attribs will come from the request params.
"""
# if called without attribs dict, assume we're responding to a request
# and initialize attribs to be request params
if self.new_asset_attribs == {}:
attribs = self.params
else:
attribs = self.new_asset_attribs
# self.logger.debug(attribs)
#
# Can't create a survivor without initializing a settlement! do
# that first, an fail bigly if you cannot
#
import settlements # baby jesus, still crying
self.Settlement = settlements.Settlement(_id=attribs["settlement"])
self.settlement_id = self.Settlement.settlement["_id"]
self.survivor = {
# meta and housekeeping
"meta": {
"abilities_and_impairments_version": 1.0,
"disorders_version": 1.0,
"epithets_version": 1.0,
"favorites_version": 1.0,
"fighting_arts_version": 1.0,
"special_attributes_version": 1.0,
"weapon_proficiency_type": 1.0,
},
"email": request.User.login,
"born_in_ly": self.get_current_ly(),
"created_on": datetime.now(),
"created_by": request.User._id,
"settlement": self.settlement_id,
"public": False,
# survivor sheet
"name": "Anonymous",
"sex": "R",
"survival": 0,
"hunt_xp": 0,
"Insanity": 0,
"Head": 0,
"Arms": 0,
"Body": 0,
"Waist": 0,
"Legs": 0,
"Courage": 0,
"Understanding": 0,
"affinities": {"red":0,"blue":0,"green":0},
# misc
'inherited': {
'father': {'abilities_and_impairments': [], 'disorders': [], 'fighting_arts': []},
'mother': {'abilities_and_impairments': [], 'disorders': [], 'fighting_arts': []},
},
'departing': False,
'bleeding_tokens': 0,
'max_bleeding_tokens': 5,
# attributes
"Movement": 5,
"Accuracy": 0,
"Strength": 0,
"Evasion": 0,
"Luck": 0,
"Speed": 0,
"attribute_detail": {
"Movement": {"tokens": 0, "gear": 0},
"Accuracy": {"tokens": 0, "gear": 0},
"Strength": {"tokens": 0, "gear": 0},
"Evasion": {"tokens": 0, "gear": 0},
"Luck": {"tokens": 0, "gear": 0},
"Speed": {"tokens": 0, "gear": 0},
},
# weapon proficiency
"Weapon Proficiency": 0,
"weapon_proficiency_type": None,
# game assets
"abilities_and_impairments": [],
"cursed_items": [],
"disorders": [],
"epithets": [],
"favorite": [],
"fighting_arts": [],
"fighting_arts_levels": {},
}
c_dict = self.get_campaign(dict)
# 1.a apply/overwrite attribs that go with our data model
for a in attribs.keys():
if a in self.survivor.keys():
forbidden_keys = ['settlement']
if a not in forbidden_keys and attribs[a] != None:
self.survivor[a] = attribs[a]
# 1.b for bools, keep 'em bool, e.g. in case they come in as 'checked'
for boolean in ["public"]:
self.survivor[boolean] = bool(self.survivor[boolean])
# 1.c if sex is "R", pick a random sex
if self.survivor["sex"] == "R":
self.survivor["sex"] = random.choice(["M","F"])
# 1.d sanity check new attribs; die violently if we fail here
if self.survivor["sex"] not in ["M","F"]:
msg = "Invalid survivor 'sex' attrib '%s' received! Must be 'M' or 'F' and str type!" % (self.survivor["sex"])
self.logger.exception(msg)
raise utils.InvalidUsage(msg, status_code=400)
sex_pronoun = "his"
if self.survivor['sex'] == 'F':
sex_pronoun = 'her'
# 1.e now save, get an OID so we can start logging and
# start calling object/class methods
self._id = utils.mdb.survivors.insert(self.survivor)
self.load()
# 1.f set the name
s_name = self.survivor['name']
if s_name == "Anonymous" and request.User.get_preference("random_names_for_unnamed_assets"):
s_name = self.Names.get_random_survivor_name(self.survivor["sex"])
self.set_name(s_name, save=False)
self.log_event("%s created new survivor %s" % (request.User.login, self.pretty_name()))
#
# 2. parents and newborn status/operations
#
# 2.a check for incoming parents
parent_names = []
for parent in ["father","mother"]:
if parent in attribs.keys() and attribs[parent] is not None:
parent_oid = ObjectId(attribs[parent])
parent_mdb = utils.mdb.survivors.find_one({"_id": parent_oid})
parent_names.append(parent_mdb["name"])
self.survivor[parent] = parent_oid
# check parents for inheritable A&Is
AI = abilities_and_impairments.Assets()
for ai in parent_mdb['abilities_and_impairments']:
ai_asset = AI.get_asset(ai)
if ai_asset.get('inheritable', False):
self.survivor['inherited'][parent]['abilities_and_impairments'].append(ai)
self.log_event('%s inherited %s from %s %s %s.' % (self.pretty_name(), ai_asset['name'], sex_pronoun, parent, parent_mdb['name']), event_type='survivor_inheritance')
self.add_game_asset('abilities_and_impairments', ai)
# 2.b set newborn status; create parent_string var (for logging)
survivor_is_a_newborn = False
if parent_names != []:
survivor_is_a_newborn = True
genitive_appellation = "Son"
parent_string = ' and '.join(parent_names)
if self.survivor["sex"] == "F":
genitive_appellation = "Daughter"
# 2.c log the birth/joining
if survivor_is_a_newborn:
self.log_event("%s born to %s!" % (self.pretty_name(), parent_string), event_type="survivor_birth")
else:
self.log_event('%s joined the settlement!' % (self.pretty_name()), event_type="survivor_join")
# 2.d increment survivial if we're named
if self.survivor["name"] != "Anonymous" and self.survivor["survival"] == 0:
self.log_event("Automatically added 1 survival to %s" % self.pretty_name())
self.survivor["survival"] += 1
# 3.a avatar - LEGACY CODE BELOW
# if params is not None and "survivor_avatar" in params and params["survivor_avatar"].filename != "":
# self.update_avatar(params["survivor_avatar"]
# 3.b settlement buffs - move this to a separate function
if request.User.get_preference("apply_new_survivor_buffs"):
def apply_buff_list(l):
""" Private helper to apply a dictionary of bonuses. """
for d in l:
for k in d.keys():
if k == "affinities":
self.update_affinities(d[k])
elif k == "abilities_and_impairments":
if type(d[k]) != list:
msg = "The 'abilities_and_impairments' bonus must be a list! Failing on %s" % d
self.logger.exception(msg)
raise Exception(msg)
for a_handle in d[k]:
self.add_game_asset("abilities_and_impairments", a_handle)
else:
self.update_attribute(k, d[k])
buff_sources = set()
buff_list = []
# bonuses come from principles, innovations...
for attrib in ["principles","innovations"]:
for d in self.Settlement.list_assets(attrib):
if d.get("new_survivor", None) is not None:
buff_list.append(d["new_survivor"])
buff_sources.add(d["name"])
if survivor_is_a_newborn:
if d.get("newborn_survivor", None) is not None:
buff_list.append(d["newborn_survivor"])
buff_sources.add(d["name"])
# ...and also from the campaign definition for now
if c_dict.get('new_survivor', None) is not None:
buff_list.append(c_dict['new_survivor'])
buff_sources.add("'%s' campaign" % c_dict["name"])
if survivor_is_a_newborn:
if c_dict.get('newborn_survivor', None) is not None:
buff_list.append(c_dict['newborn_survivor'])
buff_sources.add("'%s' campaign" % c_dict["name"])
if buff_list != []:
buff_string = utils.list_to_pretty_string(buff_sources)
self.log_event("Applying %s bonuses to %s" % (buff_string, self.pretty_name()))
apply_buff_list(buff_list)
else:
self.log_event("Settlement bonuses where not applied to %s due to user preference." % self.pretty_name())
# Add our campaign's founder epithet if the survivor is a founder
if self.is_founder():
self.logger.debug("%s is a founder. Adding founder epithet!" % self.pretty_name())
founder_epithet = self.get_campaign(dict).get("founder_epithet", "founder")
self.add_game_asset("epithets", founder_epithet)
# log and save
self.logger.debug("%s created by %s (%s)" % (self, request.User, self.Settlement))
self.save()
return self._id
def normalize(self):
""" In which we force the survivor's mdb document to adhere to the biz
logic of the game and our own data model. """
self.perform_save = False
self.bug_fixes()
self.baseline()
self.duck_type()
#
# asset migrations (names to handles)
#
if self.survivor["meta"].get("abilities_and_impairments_version", None) is None:
self.convert_abilities_and_impairments()
self.perform_save = True
if self.survivor["meta"].get("disorders_version", None) is None:
self.convert_disorders()
self.perform_save = True
if self.survivor["meta"].get("epithets_version", None) is None:
self.convert_epithets()
self.perform_save = True
if self.survivor["meta"].get("favorites_version", None) is None:
self.convert_favorite()
self.perform_save = True
if self.survivor["meta"].get("fighting_arts_version", None) is None:
self.convert_fighting_arts()
self.perform_save = True
if self.survivor["meta"].get("special_attributes_version", None) is None:
self.convert_special_attributes()
self.perform_save = True
if self.survivor["meta"].get("weapon_proficiency_type_version", None) is None:
self.convert_weapon_proficiency_type()
self.perform_save = True
#
# game asset normalization - TKTK fix this up
#
if 'ability_customizations' in self.survivor.keys():
del self.survivor['ability_customizations']
self.logger.debug("%s Removing deprecated attribute 'ability_customizations'." % self)
self.perform_save = True
# enforce the partner A&I
if "partner_id" in self.survivor.keys():
if "partner" not in self.survivor["abilities_and_impairments"]:
self.logger.debug("Automatically adding 'Partner' A&I to %s." % self)
self.survivor["abilities_and_impairments"].append("partner")
self.perform_save = True
# add the savior key if we're dealing with a savior
if self.is_savior() and not "savior" in self.survivor.keys():
self.survivor["savior"] = self.is_savior()
self.perform_save
# enforce minimum attributes for certain attribs
self.min_attributes()
if self.perform_save:
self.logger.info("%s survivor modified during normalization! Saving changes..." % self)
self.save()
def serialize(self, return_type=None, include_meta=True):
""" Renders the survivor as JSON. We don't serialize to anything else."""
# tidy these up prior to serialization
for k in ["abilities_and_impairments", "fighting_arts", "disorders"]:
self.survivor[k] = sorted(self.survivor[k])
# start the insanity
output = {}
if include_meta:
output = self.get_serialize_meta()
# build the sheet: don't forget to add cursed items to it
output.update({"sheet": self.survivor})
output["sheet"].update({"effective_sex": self.get_sex()})
output["sheet"].update({"can_be_nominated_for_intimacy": self.can_be_nominated_for_intimacy()})
output["sheet"].update({"can_gain_bleeding_tokens": self.can_gain_bleeding_tokens()})
output["sheet"].update({"can_gain_survival": self.can_gain_survival()})
output["sheet"].update({"cannot_spend_survival": self.cannot_spend_survival()})
output["sheet"].update({"cannot_use_fighting_arts": self.cannot_use_fighting_arts()})
output["sheet"].update({"skip_next_hunt": self.skip_next_hunt()})
output["sheet"].update({"founder": self.is_founder()})
output["sheet"].update({"savior": self.is_savior()})
output['sheet'].update({'parents': self.get_parents(dict)})
# survivors whose campaigns use dragon traits get a top-level element
if self.get_campaign(dict).get("dragon_traits", False):
output["dragon_traits"] = {}
output["dragon_traits"].update({"trait_list": self.get_dragon_traits()})
output["dragon_traits"].update({"active_cells": self.get_dragon_traits("active_cells")})
output["dragon_traits"].update({"available_constellations": self.get_dragon_traits("available_constellations")})
# now add the additional top-level items ("keep it flat!" -khoa)
output.update({"notes": self.get_notes()})
output.update({"survival_actions": self.get_survival_actions("JSON")})
if return_type == dict:
return output
return json.dumps(output, default=json_util.default)
#
# normalization/enforcement helper methods
#
def apply_survival_limit(self, save=False):
""" Check the settlement to see if we're enforcing Survival Limit. Then
enforce it, if indicated. Force values less than zero to zero. """
# no negative numbers
if self.survivor["survival"] < 0:
self.survivor["survival"] = 0
# see if we want to enforce the Survival Limit
if self.Settlement.get_survival_limit(bool):
if self.survivor["survival"] > self.Settlement.get_survival_limit():
self.survivor["survival"] = self.Settlement.get_survival_limit()
# save, if params require
if save:
self.save()
#
# update/set methods
#
def add_custom_ai(self, ai_name=None, ai_desc=None, ai_type=None):
""" Adds a custom A&I to the survivor. """
raise Exception("NOT IMPLEMENTED!!")
def add_cursed_item(self, handle=None):
""" Adds a cursed item to a survivor. Does a bit of biz logic, based on
the asset dict for the item.
If the 'handle' kwarg is None, this method will look for a request
param, e.g. as if this was a reqeuest_response() call.
"""
# initialize
if handle is None:
self.check_request_params(['handle'])
handle = self.params["handle"]
ci_dict = self.CursedItems.get_asset(handle)
# check for the handle (gracefully fail if it's a dupe)
if ci_dict["handle"] in self.survivor["cursed_items"]:
self.logger.error("%s already has cursed item '%s'" % (self, ci_dict["handle"]))
return False
# log to settlement event
self.log_event("%s is cursed! %s added %s to survivor." % (self.pretty_name(), request.User.login, ci_dict["name"]), event_type="survivor_curse")
# add related A&Is
if ci_dict.get("abilities_and_impairments", None) is not None:
for ai_handle in ci_dict["abilities_and_impairments"]:
self.add_game_asset('abilities_and_impairments', ai_handle)
self.add_game_asset("epithets", "cursed")
# append it, save and exit
self.survivor["cursed_items"].append(ci_dict["handle"])
self.save()
def rm_cursed_item(self, handle=None):
""" Removes cursed items from the survivor, to include any A&Is that go
along with that cursed item. Does NOT remove any A&Is that are caused by
any remaining cursed items, i.e. so you can have multiple items with the
King's Curse, etc. """
# initialize
if handle is None:
self.check_request_params(['handle'])
handle = self.params['handle']
ci_dict = self.CursedItems.get_asset(handle)
# check for the handle (gracefully fail if it's no thtere)
if ci_dict["handle"] not in self.survivor["cursed_items"]:
self.logger.error("%s does not have cursed item '%s'. Ignoring bogus request..." % (self, ci_dict["handle"]))
return False
# log to settlement event
self.log_event("%s removed %s from %s" % (request.User.login, ci_dict["name"], self.pretty_name()))
# remove any A&Is that are no longer required/present
if ci_dict.get("abilities_and_impairments", None) is not None:
# create a set of the curse A&Is that are sticking around
remaining_curse_ai = set()
for ci_handle in self.survivor["cursed_items"]:
if ci_handle == ci_dict["handle"]: # ignore the one we're processing currently
pass
else:
remaining_ci_dict = self.CursedItems.get_asset(ci_handle)
if remaining_ci_dict.get("abilities_and_impairments", None) is not None:
remaining_curse_ai.update(remaining_ci_dict["abilities_and_impairments"])
# now check the CI we're processing against the list we created
for ai_handle in ci_dict["abilities_and_impairments"]:
if ai_handle not in remaining_curse_ai:
self.rm_game_asset('abilities_and_impairments', ai_handle)
else:
# self.logger.debug("%s is still in %s" % (ai_handle, remaining_curse_ai))
self.logger.info("%s Not removing '%s' A&I; survivor is still cursed." % (self, ai_handle))
# rm the epithet if we have no curses
if self.survivor['cursed_items'] == []:
self.rm_game_asset("epithets", "cursed")
# remove it, save and exit
self.survivor["cursed_items"].remove(ci_dict["handle"])
self.save()
def add_favorite(self, user_email=None):
"""Adds the value of the incoming 'user_email' kwarg to the survivor's
'favorite' attribute (which is a list of users who have favorited the
survivor. """
if user_email is None:
self.check_request_params(['user_email'])
user_email = self.params['user_email']
if user_email in self.survivor['favorite']:
self.logger.error("%s User '%s' is already in this survivor's favorite list. Ignoring bogus add request." % (self, user_email))
return True
else:
self.survivor['favorite'].append(user_email)
self.log_event('%s added %s to their favorite survivors.' % (user_email, self.pretty_name()))
self.save()
def rm_favorite(self, user_email=None):
"""Removes the value of the incoming 'user_email' kwarg from the
survivor's 'favorite' attribute (which is a list of users who have
favorited the survivor. """
if user_email is None:
self.check_request_params(['user_email'])
user_email = self.params['user_email']
if user_email not in self.survivor['favorite']:
self.logger.error("%s User '%s' is not in this survivor's favorite list. Ignoring bogus remove request." % (self, user_email))
return True
else:
self.survivor['favorite'].remove(user_email)
self.log_event('%s removed %s from their favorite survivors.' % (user_email, self.pretty_name()))
self.save()
def add_game_asset(self, asset_class=None, asset_handle=None, apply_related=True, save=True):
""" Port of the legacy method of the same name.
Does not apply nearly as much business logic as the legacy webapp
method, however, so don't expect a ton of user-friendliness out of this
one.
If the 'asset_class' value is None, then the method assumes that it is
being called by a request_response() method and looks for request
params.
Important! All incoming asset handles are turned into asset dicts using
their AssetCollection class's get_asset() method! Any handle that cannot
be turned into a dict in this way will bomb out and raise an exception!
Here is the order of operations on how an incoming handle is evaluated:
1.) the "max" attribute of any incoming asset is respected. The
asset WILL NOT be added if doing so would go above the asset's
"max", as defined by its asset dict. The call will return False.
2.) if an asset dict contains one of our survivor 'flag' values,
this method will try to set it, if the flag's value evaluates to
Boolean True.
3.) any keys of the asset dictionary are also attributes of the
self.survivor dict get a call to self.update_attribute, i.e. to
add them to whatever the survivor's existing attribute happens
to be.
4.) if the asset dict has an 'epithet' key, the value of that key
(which should always be an epithet handle) will be added to the
survivor.
5.) the survivor's permanent affinities are modified if the asset
dict contains the 'affinities' key
6.) similarly, if the asset dict has a 'related' key, any related
asset handles are applied to the survivr.
Once added to the survivor, the following 'post-processing' business
logic is automatically handled by this method:
1.) Weapon masteries get a log_event() call. They are also added
to the settlement's Innovations (via add_innovation() call.)
That's it! Have fun!
"""
# method preprocessing first
asset_class, asset_dict = self.asset_operation_preprocess(asset_class, asset_handle)
# 1.) MAX - check the asset's 'max' attribute:
if asset_dict.get("max", None) is not None:
if self.survivor[asset_class].count(asset_dict["handle"]) >= asset_dict["max"]:
self.logger.warn("%s max for '%s' (%s) has already been reached! Ignoring..." % (self, asset_dict["handle"], asset_class))
return False
# 2.) STATUS - set status flags if they're in the dict
for flag in self.flags:
if asset_dict.get(flag, None) is True:
self.set_status_flag(flag)
# 3.) ATTRIBS - now check asset dict keys for survivor dict attribs
for ak in asset_dict.keys():
if ak in self.stats:
self.update_attribute(ak, asset_dict[ak])
if ak in self.abs_value_attribs:
self.set_attribute(ak, asset_dict[ak])
# RETIRED mostly this is for the 'Fear of the Dark' disorder, TBH
if 'retire' in asset_dict.keys():
self.set_retired(True)
# levels!?
if asset_dict.get('levels', None) is not None:
self.survivor["fighting_arts_levels"][asset_dict["handle"]] = []
# 4.) EPITHETS - check for 'epithet' key
if asset_dict.get("epithet", None) is not None:
self.add_game_asset("epithets", asset_dict["epithet"])
# 5.) AFFINITIES - some assets add permanent affinities
if asset_dict.get('affinities', None) is not None:
self.update_affinities(asset_dict["affinities"])
# 6.) RELATED - add any related
if apply_related and asset_dict.get("related", None) is not None:
self.logger.info("Automatically applying %s related asset handles to %s" % (len(asset_dict["related"]), self))
for related_handle in asset_dict["related"]:
self.add_game_asset(asset_class, related_handle, apply_related=False)
# finally, if we're still here, add it and log_event() it
self.survivor[asset_class].append(asset_dict["handle"])
self.survivor[asset_class].sort()
self.log_event("%s added '%s' (%s) to %s" % (request.User.login, asset_dict["name"], asset_dict["type_pretty"], self.pretty_name()))
#
# post-processing/special handling starts here
#
# special handling for certain game asset types
if asset_dict.get("type", None) == "weapon_mastery":
self.log_event("%s has become a %s master!" % (self.pretty_name(), asset_dict["weapon_name"]), event_type="survivor_mastery")
if asset_dict.get("add_to_innovations", True):
self.Settlement.add_innovation(asset_dict["handle"])
# finally, save the survivor and return
if save:
self.save()
def asset_operation_preprocess(self, asset_class=None, asset_handle=None):
""" As its name suggests, the purpose of this method is to 'stage up' or
prepare to do the add_game_asset() method (above). The idea is that you
call this method at the beginning of add_game_asset() to sanity check it
and do any other preprocessing tasks.
Set 'asset_class' kwarg to the string of an asset collection and
'asset_handle' to any handle within that asset collection and this
func will return the value of 'asset_class' and an asset dict for the
'asset_handle' value.
This method will back off to the incoming request if 'asset_type' is
None.
"""
#
# 1.) initialize the request. Try to use kwargs, but back off to
# request params if incoming kwargs are None
#
if asset_class is None:
self.check_request_params(["type", "handle"])
asset_class = self.params["type"]
asset_handle = self.params["handle"]
elif asset_class is not None and asset_handle is None:
self.check_request_params(["handle"])
asset_handle = self.params["handle"]
# 2.) initialize/import the AssetModule and an AssetCollection object
exec "AssetModule = %s" % asset_class
A = AssetModule.Assets()
# 3.) handle the _random pseudo/bogus/magic handle
if asset_handle == "_random":
self.logger.info("%s selecting random '%s' asset..." % (self, asset_class))
available = copy(self.Settlement.get_available_assets(AssetModule)[asset_class])
# filter out assets that the survivor already has
for h in self.survivor[asset_class]:
if available.get(h, None) is not None:
del available[h]
# filter out 'secret' assets
for k in available.keys():
if A.get_asset(k).get('type', None) == 'secret_fighting_art':
del available[k]
asset_handle = random.choice(available.keys())
self.logger.info("%s selected '%s' asset handle at random." % (self, asset_handle))
# 4.) try to get the asset; bomb out if we can't
asset_dict = A.get_asset(asset_handle)
if asset_dict is None:
msg = "%s.Assets() class does not include handle '%s'!" % (asset_class, asset_handle)
self.logger.exception(msg)
raise utils.InvalidUsage(msg, status_code=400)
# exit preprocessing with a valid class name and asset dictionary
return asset_class, asset_dict
def set_many_game_assets(self):
""" Much like the set_many_attributes() route/method, this one WILL ONLY
WORK WITH A REQUEST object present.
Iterates over a list of assets to add and calls add_game_asset() once for
every asset in the array. """
# initialize and sanity check!
self.check_request_params(['game_assets','action'])
action = self.params['action']
updates = self.params['game_assets']
if type(updates) != list:
raise utils.InvalidUsage("The add_many_game_assets() method requires the 'assets' param to be an array/list!")
elif action not in ['add','rm']:
raise utils.InvalidUsage("add_many_game_assets() 'action' param must be 'add' or 'rm'.")
for u in updates:
asset_class = u.get('type', None)
asset_handle = u.get('handle', None)
err = "set_many_game_assets() is unable to process hash: %s" % u
usg = " Should be: {handle: 'string', type: 'string'}"
err_msg = err + usg
if asset_class is None:
raise utils.InvalidUsage(err_msg)
elif asset_handle is None:
raise utils.InvalidUsage(err_msg)
if action == 'add':
self.add_game_asset(str(asset_class), str(asset_handle), save=False)
elif action == 'rm':
self.rm_game_asset(str(asset_class), str(asset_handle), save=False)
self.save()
def replace_game_assets(self):
""" Much like set_many_game_assets(), this route facilitates The Watcher
UI/UX and SHOULD ONLY BE USED WITH A REQUEST OBJECT since it pulls all
params from there and cannot be called directly.
This one takes a game asset category and overwrites it with an incoming
list of handles.
"""
self.check_request_params(['type','handles'])
asset_class = self.params['type']
asset_handles = self.params['handles']
if asset_class not in self.game_asset_keys:
raise utils.InvalidUsage("The replace_game_assets() method cannot modify asset type '%s'. Allowed types include: %s" % (asset_class, self.game_asset_keys))
# start the riot
handles_to_rm = set()
handles_to_add = set()
# process the current list: figure out what has to be removed
for h in self.survivor[asset_class]:
if h not in asset_handles:
handles_to_rm.add(h)
# process the incoming list: figure out what we need to add
for h in asset_handles:
if h not in self.survivor[asset_class]:
handles_to_add.add(h)
# bail if we've got no changes
if handles_to_add == set() and handles_to_rm == set():
self.logger.warn('Ignoring bogus replace_game_assets() operation: no changes to make...')
return True
for h in handles_to_rm:
self.rm_game_asset(asset_class, h, save=False)
for h in handles_to_add:
self.add_game_asset(asset_class, h, save=False)
self.save()
def rm_game_asset(self, asset_class=None, asset_handle=None, rm_related=True, save=True):
""" The inverse of the add_game_asset() method, this one most all the
same stuff, except it does it in reverse order:
One thing it does NOT do is check the asset dict's 'max' attribute, since
that is irrelevant.
"""
asset_class, asset_dict = self.asset_operation_preprocess(asset_class, asset_handle)
# 1.) fail gracefully if this is a bogus request
if asset_dict["handle"] not in self.survivor[asset_class]:
self.logger.warn("%s Attempt to remove non-existent key '%s' from '%s'. Ignoring..." % (self, asset_dict["handle"], asset_class))
return False
# 2.) STATUS - unset status flags if they're in the dict
for flag in self.flags:
if asset_dict.get(flag, None) is True:
self.set_status_flag(flag, unset=True)
# 3.) ATTRIBS - now check asset dict keys for survivor dict attribs
for ak in asset_dict.keys():
if ak in self.stats:
self.update_attribute(ak, -asset_dict[ak])
if ak in self.abs_value_attribs:
self.default_attribute(ak)
# RETIRED mostly this is for the 'Fear of the Dark' disorder, TBH
if 'retire' in asset_dict.keys():
self.set_retired(False)
# 4.) EPITHETS - check for 'epithet' key
if asset_dict.get("epithet", None) is not None:
self.rm_game_asset("epithets", asset_dict["epithet"])
# 5.) AFFINITIES - some assets add permanent affinities: rm those
if asset_dict.get('affinities', None) is not None:
self.update_affinities(asset_dict["affinities"], operation="rm")
# 6.) RELATED - rm any related
if rm_related and asset_dict.get("related", None) is not None:
self.logger.info("Automatically removing %s related asset handles from %s" % (len(asset_dict["related"]), self))
for related_handle in asset_dict["related"]:
self.rm_game_asset(asset_class, related_handle, rm_related=False)
# finally, if we're still here, add it and log_event() it
self.survivor[asset_class].remove(asset_dict["handle"])
self.log_event("%s removed '%s' (%s) from %s" % (request.User.login, asset_dict["name"], asset_dict["type_pretty"], self.pretty_name()))
if save:
self.save()
def add_note(self):
""" Adds a Survivor note to the mdb. Expects a request context. """
self.check_request_params(['note'])
note = self.params['note']
note_dict = {
"created_by": request.User._id,
"created_on": datetime.now(),
"survivor_id": self.survivor["_id"],
"settlement_id": self.Settlement.settlement['_id'],
"note": note,
}
note_oid = utils.mdb.survivor_notes.insert(note_dict)
self.logger.debug("%s Added a note to %s" % (request.User, self))
return Response(response={'note_oid': note_oid}, status=200)
def rm_note(self):
""" Removes a Survivor note from the MDB. Expects a request context. """
self.check_request_params(['_id'])
_id = ObjectId(self.params['_id'])
utils.mdb.survivor_notes.remove({'_id': _id})
self.logger.debug("%s Removed a note from %s" % (request.User, self))
def update_affinities(self, aff_dict={}, operation="add"):
""" Set the kwarg 'operation' to either 'add' or 'rm' in order to
do that operation on self.survivor["affinities"], which looks like this:
{'red': 0, 'blue': 0, 'green': 0}
The 'aff_dict' should mirror the actual affinities dict, except without
all of the color keys. For example:
{'red': 1, 'blue': 2}
{'green': -1}
If 'aff_dict' is unspecified or an empty dict, this method will assume
that it is being called by request_response() and check for 'aff_dict'
in self.params.
"""
# initialize
if aff_dict == {}:
self.check_request_params(['aff_dict'])
aff_dict = self.params["aff_dict"]
if 'operation' in self.params:
operation = self.params["operation"]
# sanity check
if operation not in ["add","rm"]:
msg = "The '%s' operation is not supported by the update_affinities() method!" % (operation)
self.logger.exception(msg)
raise utils.InvalidUsage(msg, status_code=400)
# now do it and log_event() the results for each key
for aff_key in aff_dict.keys():
if operation == "add":
self.survivor["affinities"][aff_key] += aff_dict[aff_key]
elif operation == 'rm':
self.survivor["affinities"][aff_key] -= aff_dict[aff_key]
self.log_event("%s set %s '%s' affinity to %s" % (request.User.login, self.pretty_name(), aff_key, self.survivor["affinities"][aff_key]))
self.save()
def update_attribute(self, attribute=None, modifier=None):
""" Adds 'modifier' value to self.survivor value for 'attribute'. """
if attribute is None or modifier is None:
self.check_request_params(['attribute','modifier'])
attribute = self.params["attribute"]
modifier = self.params["modifier"]
# hand off to update_survival or damage_brain if that's the shot
if attribute == 'survival':
self.update_survival(modifier)
return True
elif attribute == 'brain_event_damage':
self.damage_brain(modifier)
return True
# sanity check!
if attribute not in self.survivor.keys():
msg = "%s does not have '%s' attribute!" % (self, attribute)
self.logger.exception(msg)
raise utils.InvalidUsage(msg, status_code=400)
elif type(self.survivor[attribute]) != int:
msg = "%s '%s' attribute is not an int type! (It's a '%s')" % (self, attribute, type(self.survivor[attribute]))
self.logger.exception(msg)
raise utils.InvalidUsage(msg, status_code=400)
else:
pass
# ok, do it, but enforce mins
self.survivor[attribute] = self.survivor[attribute] + modifier
if attribute in self.min_zero_attribs and self.survivor[attribute] < 0:
self.survivor[attribute] = 0
if attribute in self.min_one_attribs and self.survivor[attribute] < 1:
self.survivor[attribute] = 1
# force a max of 9 for courage and understanding
if attribute in ['Courage','Understanding']:
if self.survivor[attribute] > 9:
self.survivor[attribute] = 9
# log completion of the update
self.log_event("%s set %s attribute '%s' to %s" % (request.User.login, self.pretty_name(), attribute, self.survivor[attribute]))
self.save()
# biz logic for weapon proficiency - POST PROCESS
if attribute == "Weapon Proficiency" and self.survivor[attribute] >= 3:
if self.survivor["weapon_proficiency_type"] is not None:
W = weapon_proficiency.Assets()
w_handle = self.survivor["weapon_proficiency_type"]
w_dict = W.get_asset(w_handle)
if self.survivor[attribute] == 3:
self.log_event("%s is a %s specialist!" % (self.pretty_name(), w_dict["name"]))
self.add_game_asset("abilities_and_impairments", "%s_specialization" % w_handle)
elif self.survivor[attribute] == 8:
self.add_game_asset("abilities_and_impairments", "mastery_%s" % w_handle)
def update_bleeding_tokens(self, modifier=None):
""" Adds 'modifier' to the survivor["bleeding_tokens"]. Cannot go below
zero, e.g. by adding a negative number, or above
survivor["max_bleeding_tokens"]. Fails gracefully in either case. """
if modifier is None:
self.check_request_params(['modifier'])
modifier = int(self.params["modifier"])
current_value = self.survivor["bleeding_tokens"]
self.survivor['bleeding_tokens'] = current_value + modifier
if self.survivor["bleeding_tokens"] > 0:
self.survivor["bleeding_tokens"] = 0
elif self.survivor["bleeding_tokens"] > self.survivor["max_bleeding_tokens"]:
self.survivor["bleeding_tokens"] = self.survivor["max_bleding_tokens"]
self.log_event('%s set %s bleeding tokens to %s.' % (request.User.login, self.survivor.pretty_name(), self.survivor["bleeding_tokens"]))
self.save()
def update_returning_survivor_years(self, add_year=None, save=True):
""" Adds the current LY to the survivor's 'returning_survivor' attrib
(i.e. list) by default. Set 'add_year' to any integer to add an arbitrary
value. """
if add_year is None:
add_year = self.Settlement.get_current_ly()
if not 'returning_survivor' in self.survivor.keys():
self.survivor['returning_survivor'] = []
self.survivor['returning_survivor'].append(add_year)
self.survivor['returning_survivor'] = list(set(self.survivor['returning_survivor']))
if save:
self.save()
def update_survival(self, modifier=None):
""" Adds 'modifier' to survivor["survival"]. Respects settlement rules
about whether to enforce the Survival Limit. Will not go below zero. """
if modifier is None:
self.check_request_params(["modifier"])
modifier = int(self.params["modifier"])
self.survivor["survival"] += modifier
self.apply_survival_limit()
self.logger.debug("%s set %s survival to %s" % (request.User, self, self.survivor["survival"]))
self.save()
#
# toggles and flags!
#
def toggle_boolean(self, attribute=None):
""" This is a generic toggle that will toggle any attribute of the
survivor that is Boolean. Note that this will only work on attributes
that are part of the survivor data model (check the baseline() method)
and will not work, for example, on status flags such as 'skip_next_hunt'
and similar. """
if attribute is None:
self.check_request_params(["attribute"])
attribute = self.params["attribute"]
if attribute not in self.survivor.keys():
msg = "The attribute '%s' is not part of the survivor data model!" % attribute
self.logger.error(msg)
raise utils.InvalidUsage(msg, status_code=400)
elif type(self.survivor[attribute]) != bool:
msg = "The attribute '%s' is not type 'bool'!" % attribute
self.logger.error(msg)
raise utils.InvalidUsage(msg, status_code=400)
if self.survivor[attribute]:
self.survivor[attribute] = False
else:
self.survivor[attribute] = True
self.log_event("%s changed %s attribute '%s' to %s" % (request.User.login, self.pretty_name(), attribute, self.survivor[attribute]))
self.save()
pass
def toggle_fighting_arts_level(self):
""" Toggles a fighting arts level on or off, e.g. by adding it to or
removing it from the array for a particular FA's handle.
Assumes that this is an API request and does not process any args that
do not come in the request object.
"""
self.check_request_params(['handle', 'level'])
fa_handle = self.params["handle"]
fa_level = int(self.params["level"])
FA = fighting_arts.Assets()
fa_dict = FA.get_asset(fa_handle)
if fa_handle not in self.survivor['fighting_arts_levels'].keys():
self.survivor["fighting_arts_levels"][fa_handle] = []
self.logger.warn("%s Adding fighting art handle '%s' to 'fighting_arts_levels' dict." % (self, fa_handle))
if fa_level in self.survivor['fighting_arts_levels'][fa_handle]:
toggle_operation = "off"
self.survivor['fighting_arts_levels'][fa_handle].remove(fa_level)
else:
toggle_operation = "on"
self.survivor['fighting_arts_levels'][fa_handle].append(fa_level)
self.survivor['fighting_arts_levels'][fa_handle] = sorted(self.survivor['fighting_arts_levels'][fa_handle])
self.logger.debug("%s toggled '%s' fighting art level %s %s for %s." % (request.User.login, fa_dict["name"], fa_level, toggle_operation, self.pretty_name()))
self.save()
def toggle_sotf_reroll(self):
""" toggles the survivor's Survival of the Fittest once-in-a-lifetime
reroll on or off. This is self.survivor["sotf_reroll"] and it's a bool
and it's not part of the data model, so creating it is necessary some
times. """
if not "sotf_reroll" in self.survivor.keys():
self.survivor["sotf_reroll"] = True
self.log_event("%s toggled SotF reroll on for %s" % (request.User.login, self.pretty_name()))
elif self.survivor["sotf_reroll"]:
self.survivor["sotf_reroll"] = False
self.log_event("%s toggled SotF reroll off for %s" % (request.User.login, self.pretty_name()))
elif not self.survivor["sotf_reroll"]:
self.survivor["sotf_reroll"] = True
self.log_event("%s toggled SotF reroll on for %s" % (request.User.login, self.pretty_name()))
else:
self.logger.error("Unhandled logic in toggle_sotf_reroll() method!")
raise Exception
self.save()
def toggle_status_flag(self, flag=None):
""" Toggles a status flag on or off. Available status flags:
- cannot_spend_survival
- cannot_use_fighting_arts
- skip_next_hunt
- retired
If 'flag' is None, this method assumes that it is being called by a the
request_response() method and will check for incoming params.
"""
if flag is None:
self.check_request_params(['flag'])
flag = self.params["flag"]
if flag not in self.flags:
msg = "Survivor status flag '%s' cannot be toggled!" % flag
raise utils.InvalidUsage(msg, status_code=400)
flag_pretty = flag.replace("_", " ").capitalize()
flag_current_status = self.survivor.get(flag, None)
if flag_current_status is None:
self.survivor[flag] = True
self.log_event("%s set '%s' on %s" % (request.User.login, flag_pretty, self.pretty_name()))
elif flag_current_status is False:
self.survivor[flag] = True
self.log_event("%s set '%s' on %s" % (request.User.login, flag_pretty, self.pretty_name()))
else:
del self.survivor[flag]
self.log_event("%s removed '%s' from %s" % (request.User.login, flag_pretty, self.pretty_name()))
self.save()
#
# special game controls
#
def controls_of_death(self):
""" Manage all aspects of the survivor's death here. This is tied to a
a number of settlement methods/values, so be cautious with this one. """
self.check_request_params(["dead"])
dead = self.params["dead"]
if dead is False:
for death_key in ["died_on","died_in","cause_of_death","dead"]:
if death_key in self.survivor.keys():
del self.survivor[death_key]
self.logger.debug("%s Removed '%s' from %s" % (request.User, death_key, self))
self.log_event("%s has resurrected %s!" % (request.User.login, self.pretty_name()))
else:
self.survivor["dead"] = True
self.survivor["died_on"] = datetime.now()
# this isn't very DRY, but we've got to strictly type here
if 'died_in' in self.params:
self.survivor['died_in'] = int(self.params["died_in"])
else:
self.survivor["died_in"] = self.Settlement.get_current_ly()
if 'cause_of_death' in self.params:
self.survivor['cause_of_death'] = str(self.params["cause_of_death"])
else:
self.survivor['cause_of_death'] = "Unspecified"
self.log_event('%s has died! Cause of death: %s' % (self.pretty_name(), self.survivor["cause_of_death"]), event_type="survivor_death")
self.Settlement.update_population(-1)
self.save()
def damage_brain(self, dmg=0):
""" Inflicts brain event damage on the survivor."""
remainder = self.survivor['Insanity'] - dmg
log_damage = False
if remainder < 0:
if self.survivor.get('brain_damage_light', None) is None:
self.survivor['brain_damage_light'] = 'checked' #transitional
log_damage = True
self.survivor['Insanity'] = 0
elif remainder == 0:
self.survivor['Insanity'] = 0
elif remainder > 0:
self.survivor['Insanity'] = remainder
else:
raise Exception('%s Impossible damage_brain() result!' % self)
self.log_event("%s inflicted %s Brain Event Damage on %s. The survivor's Insanity is now %s" % (request.User.login, dmg, self.pretty_name(), self.survivor["Insanity"]), event_type="brain_event_damage")
if log_damage:
self.log_event("%s has suffered a Brain injury due to Brain Event Damage!" % (self.pretty_name()))
self.save()
def return_survivor(self, showdown_type=None):
""" Returns the departing survivor. This is a minimized port of the legacy
webapp's heal() method (which was super overkill in the first place).
This method assumes a request context, so don't try if it you haven't got
a request object initialized and in the global namespace. """
#
# initialize/sanity check
#
if not 'departing' in self.survivor.keys():
self.logger.warn('%s is not a Departing Survivor! Skipping bogus return() request...' % self)
def finish():
""" Private method for concluding the return. Enhances DRYness. """
msg = "%s returned %s to %s" % (request.User.login, self.pretty_name(), self.Settlement.settlement['name'])
self.log_event(msg, event_type="survivor_return")
self.save()
#
# Living and dead survivor return operations
#
# 1.) update meta data
self.survivor['departing'] = False
if showdown_type == 'normal':
self.update_returning_survivor_years(save=False)
# 2.) remove armor
for loc in self.armor_locations:
self.survivor[loc] = 0
# 3.) remove tokens/gear modifiers
self.reset_attribute_details(save=False)
# 4.) heal injury boxes
self.reset_damage(save=False)
# 5.) finish if the survivor is dead
if self.is_dead():
finish()
#
# Living survivors only from here!
#
# 6.) zero-out bleeding tokens
self.set_bleeding_tokens(0, save=False)
# 7.) increment Hunt XP
# if self.is_savior():
# self.update_attribute('hunt_xp', 4)
# else:
# self.update_attribute('hunt_xp', 1)
# 8.) process disorders with 'on_return' attribs
for d in self.survivor['disorders']:
d_dict = self.Disorders.get_asset(d)
if d_dict.get('on_return', None) is not None:
for k,v in d_dict['on_return'].iteritems():
self.survivor[k] = v
# OK, we out!
finish()
#
# set methods!
#
def set_affinity(self, color=None, value=None):
""" Adds 'modifier' value to self.survivor value for 'attribute'. If the
'attrib' kwarg is None, the function assumes that this is part of a call
to request_response() and will get request params. """
if color is None:
self.check_request_params(['color','value'])
color = self.params["color"]
value = int(self.params["value"])
# sanity check!
if color not in self.survivor["affinities"].keys():
msg = "%s does not have a '%s' affinity!" % (self, color)
self.logger.exception(msg)
raise utils.InvalidUsage(msg, status_code=400)
self.survivor["affinities"][color] = value
self.log_event("%s set %s %s affinity to %s" % (request.User.login, self.pretty_name(), color, value))
self.save()
def set_attribute(self, attrib=None, value=None, save=True):
""" Adds 'modifier' value to self.survivor value for 'attribute'. If the
'attrib' kwarg is None, the function assumes that this is part of a call
to request_response() and will get request params. """
if attrib is None:
self.check_request_params(['attribute','value'])
attrib = self.params["attribute"]
value = int(self.params["value"])
# sanity check!
if attrib not in self.survivor.keys():
msg = "%s does not have '%s' attribute!" % (self, attrib)
self.logger.exception(msg)
raise utils.InvalidUsage(msg, status_code=400)
elif type(self.survivor[attrib]) != int:
msg = "%s '%s' attribute is not an int type!" % (self, attrib)
self.logger.exception(msg)
raise utils.InvalidUsage(msg, status_code=400)
self.survivor[attrib] = value
self.log_event("%s set %s '%s' to %s" % (request.User.login, self.pretty_name(), attrib, value))
# save is optional; do it now if we're doing it
if save:
self.save()
def set_many_attributes(self):
""" This is an API-only route and therefore ONLY WORKS IF YOU HAVE
request parameters!
This basically reads a list of attributes to update and then updates
them in the order they appear.
"""
# initialize from the request
attr_updates = []
detail_updates = []
if 'attributes' in self.params.keys():
attr_updates = self.params['attributes']
if 'attribute_details' in self.params.keys():
detail_updates = self.params['attribute_details']
# type check
if type(attr_updates) != list or type(detail_updates) != list:
raise utils.InvalidUsage("The set_many_attributes() method requires 'attributes' and 'attribute_details' params to be array/list types!")
# do attr_updates first
for u in attr_updates:
if type(u) != dict:
raise utils.InvalidUsage("The set_many_attributes() method 'attributes' must be hashes!")
attrib = u.get('attribute', None)
value = u.get('value', None)
err = "set_many_attributes() is unable to process hash: %s" % u
usg = " Should be: {attribute: 'string', value: 'int'}"
err_msg = err + usg
if attrib is None:
raise utils.InvalidUsage(err_msg)
elif value is None:
raise utils.InvalidUsage(err_msg)
self.set_attribute(str(attrib), int(value), False)
# do detail updates
for u in detail_updates:
if type(u) != dict:
raise utils.InvalidUsage("The set_many_attributes() method 'attribute_details' must be hashes!")
attrib = u.get('attribute',None)
detail = u.get('detail',None)
value = u.get('value',None)
for v in [attrib, detail, value]:
if v is None:
raise utils.InvalidUsage("The '%s' attribute of %s may not be undefined!" % (v, u))
self.set_attribute_detail(attrib, detail, value, False)
self.save()
def set_attribute_detail(self, attrib=None, detail=None, value=False, save=True):
""" Use to update the 'attribute_detail' dictionary for the survivor.
If this is called without an 'attrib' value, it will assume that it is
being called as part of a request_response() call and look for request
parameters.
The 'attrib' value should be 'Strength', 'Luck', etc. and the 'detail'
should be one of the following:
- 'tokens'
- 'gear'
The 'value' must be an int.
"""
if attrib is None:
self.check_request_params(["attribute","detail","value"])
attrib = self.params["attribute"]
detail = self.params["detail"]
value = int(self.params["value"])
if attrib not in self.survivor["attribute_detail"].keys():
msg = "Survivor attribute '%s' does not support details!" % (attrib)
self.logger.error(msg)
raise utils.InvalidUsage(msg, status_code=400)
if detail not in ['tokens','gear']:
msg = "Survivor 'attribute_detail' detail type '%s' is not supported!" % (detail)
self.logger.error(msg)
raise utils.InvalidUsage(msg, status_code=400)
self.survivor["attribute_detail"][attrib][detail] = value
self.log_event("%s set %s '%s' detail '%s' to %s" % (request.User.login, self.pretty_name(), attrib, detail, value))
# now save if we're saving
if save:
self.save()
def set_bleeding_tokens(self, value=None, save=True):
""" Sets self.survivor['bleeding_tokens'] to 'value', respecting the
survivor's max and refusing to go below zero. """
if value is None:
self.check_request_params(['value'])
value = int(self.params["value"])
self.survivor['bleeding_tokens'] = value
if self.survivor["bleeding_tokens"] < 0:
self.survivor["bleeding_tokens"] = 0
elif self.survivor["bleeding_tokens"] > self.survivor["max_bleeding_tokens"]:
self.survivor["bleeding_tokens"] = self.survivor["max_bleding_tokens"]
self.log_event('%s set %s bleeding tokens to %s.' % (request.User.login, self.pretty_name(), self.survivor["bleeding_tokens"]))
if save:
self.save()
def set_constellation(self, constellation=None, unset=None):
""" Sets or unsets the survivor's self.survivor["constellation"]. """
current_constellation = self.survivor.get("constellation", False)
# figure out if we're unsetting
if unset is None:
if "unset" in self.params:
unset = True
else:
unset = False
if unset and current_constellation:
del self.survivor["constellation"]
self.log_event("%s unset the constellation for %s" % (request.User.login, self.pretty_name()))
self.rm_game_asset("epithets", "the_%s" % current_constellation.lower())
return True
elif unset and not current_constellation:
self.logger.warn("%s Does not have a constellation! Ignoring unset request..." % (self))
return False
if constellation is None:
self.check_request_params(['constellation'])
constellation = self.params["constellation"]
if current_constellation == constellation:
self.logger.warn("%s Constellation is already %s. Ignoring request..." % (self, constellation))
return False
else:
if current_constellation:
self.rm_game_asset("epithets", "the_%s" % current_constellation.lower())
self.survivor["constellation"] = constellation
self.add_game_asset("epithets", "the_%s" % constellation.lower())
self.log_event("%s set %s constellation to '%s'" % (request.User.login, self.pretty_name(), constellation))
self.log_event("%s has become one of the People of the Stars!" % (self.pretty_name), event_type="potstars_constellation")
self.save()
def set_email(self, new_email=None):
""" Validates an incoming email address and attempts to set it as the
survivor's new email. It has to a.) be different, b.) look like an email
address and c.) belong to a registered user before we'll set it."""
if new_email is None:
self.check_request_params(["email"])
new_email = self.params["email"]
# sanity checks
if new_email == self.survivor["email"]:
msg = "%s Survivor email unchanged! Ignoring request..." % self
self.logger.warn(msg)
return Response(response=msg, status=200)
elif not '@' in new_email:
msg = "'%s Survivor email '%s' does not look like an email address! Ignoring..." % (self, new_email)
self.logger.warn(msg)
return Response(response=msg, status=200)
elif utils.mdb.users.find_one({'login': new_email}) is None:
msg = "The email address '%s' is not associated with any known user." % new_email
self.logger.error(msg)
return Response(response=msg, status=422)
# if we're still here, do it.
old_email = self.survivor["email"]
self.survivor["email"] = new_email
self.log_event("%s changed the manager of %s to %s." % (request.User.login, old_email, self.survivor["email"]))
self.save()
return utils.http_200
def set_name(self, new_name=None, save=True):
""" Sets the survivor's name. Logs it. """
if new_name is None:
self.check_request_params(["name"])
new_name = self.params["name"]
new_name = new_name.strip()
if new_name == self.survivor["name"]:
self.logger.warn("%s Survivor name unchanged! Ignoring set_name() call..." % self)
return True
if new_name in ["", u""]:
new_name = "Anonymous"
old_name = self.survivor["name"]
self.survivor["name"] = new_name
if save:
self.log_event("%s renamed %s to %s" % (request.User.login, old_name, new_name))
self.save()
def set_parent(self, role=None, oid=None):
""" Sets the survivor's 'mother' or 'father' attribute. """
if role is None or oid is None:
self.check_request_params(['role', 'oid'])
role = self.params['role']
oid = self.params['oid']
oid = ObjectId(oid)
if role not in ['father','mother']:
utils.invalidUsage("Parent 'role' value must be 'father' or 'mother'!")
new_parent = utils.mdb.survivors.find_one({"_id": oid})
if new_parent is None:
utils.invalidUsage("Parent OID '%s' does not exist!" % oid)
if oid == self.survivor.get(role, None):
self.logger.warn("%s %s is already %s. Ignoring request..." % (self, role, new_parent["name"]))
return True
self.survivor[role] = ObjectId(oid)
self.log_event("%s updated %s lineage: %s is now %s" % (request.User.login, self.pretty_name(), role, new_parent["name"]))
self.save()
def set_retired(self, retired=None):
""" Set to true or false. Backs off to request params is 'retired' kwarg
is None. Does a little user-friendliness/sanity-checking."""
if retired == None:
self.check_request_params(["retired"])
retired=self.params["retired"]
if type(retired) != bool:
retired = bool(retired)
if "retired" in self.survivor.keys() and self.survivor["retired"] == retired:
self.logger.warn("%s Already has 'retired' set to '%s'. Ignoring bogus request..." % (self, retired))
return True
self.survivor["retired"] = retired
if self.survivor["retired"]:
self.log_event("%s has retired %s." % (request.User.login, self.pretty_name()))
self.survivor['retired_in'] = self.get_current_ly()
else:
del self.survivor["retired"]
try:
del self.survivor['retired_in']
except:
pass
self.log_event("%s has taken %s out of retirement." % (request.User.login, self.pretty_name()))
self.save()
def set_savior_status(self, color=None, unset=False):
""" Makes the survivor a savior or removes all savior A&Is. """
if "unset" in self.params:
unset = True
# handle 'unset' operations first
if unset and self.is_savior():
s_dict = self.Saviors.get_asset_by_color(self.is_savior())
for ai_handle in s_dict["abilities_and_impairments"]:
self.rm_game_asset("abilities_and_impairments", ai_handle, rm_related=False)
del self.survivor["savior"]
self.save()
self.log_event("%s unset savior status for %s" % (request.User.login, self.pretty_name()))
return True
elif unset and not self.is_savior():
self.logger.error("%s Not a savior: cannot unset savior status!" % (self))
return False
else:
pass # moving along...
# now handle 'set' operations
if color is None:
self.check_request_params(["color"])
color = self.params["color"]
# bail if this is redundant/double-click
if color == self.is_savior():
self.logger.error("%s is already a %s savior. Ignoring..." % (self, color))
return False
# remove previous if we're switching
if self.is_savior() and color != self.is_savior():
self.logger.warn("%s changing savior color from %s to %s..." % (self, self.is_savior(), color))
s_dict = self.Saviors.get_asset_by_color(self.is_savior())
for ai_handle in s_dict["abilities_and_impairments"]:
self.rm_game_asset("abilities_and_impairments", ai_handle, rm_related=False)
# finally, if we're still here, set it
self.survivor["savior"] = color
s_dict = self.Saviors.get_asset_by_color(color)
for ai_handle in s_dict["abilities_and_impairments"]:
self.add_game_asset("abilities_and_impairments", ai_handle, apply_related=False)
self.log_event("A savior is born! %s applied %s savior status to %s" % (request.User.login, color, self.pretty_name()), event_type="savior_birth")
self.save()
def set_sex(self, sex=None):
""" Sets self.survivor["sex"] attribute. Can only be 'M' or 'F'.
Note that this should not be used to set the survivor's 'effective sex',
i.e. in the event of a Gender Swap curse, etc.
'Effective sex' is determined automatically (see the get_effective_sex()
method in this module for more info.
If the 'sex' kwarg is None, this method assumes that it is being called
by request_response() and will look for 'sex' in self.params.
"""
if sex is None:
self.check_request_params(['sex'])
sex = self.params["sex"]
if sex not in ["M","F"]:
msg = "Survivor sex must be 'M' or 'F'. Survivor sex cannot be '%s'." % sex
self.logger.exception(msg)
raise utils.InvalidUsage(msg, status_code=400)
self.survivor["sex"] = sex
self.log_event("%s set %s sex to '%s'." % (request.User.login, self.pretty_name(), sex))
self.save()
def set_special_attribute(self):
""" Sets an arbitrary attribute handle on the survivor. Saves. Expects a
request context. """
# initialize and validate
self.check_request_params(['handle','value'])
handle = self.params['handle']
value = self.params['value']
sa_dict = self.SpecialAttributes.get_asset(handle)
self.survivor[handle] = value
if value and 'epithet' in sa_dict:
self.add_game_asset('epithets', sa_dict['epithet'])
elif not value and 'epithet' in sa_dict:
self.rm_game_asset('epithets', sa_dict['epithet'])
if value:
msg = "%s added '%s' to %s." % (request.User.login, sa_dict['name'], self.pretty_name())
else:
msg = "%s removed '%s' from %s." % (request.User.login, sa_dict['name'], self.pretty_name())
self.log_event(msg)
self.save()
def set_status_flag(self, flag=None, unset=False):
""" Sets or unsets a status flag, regardless of previous status of that
flag. Supported flags include:
- cannot_spend_survival
- cannot_use_fighting_arts
- skip_next_hunt
If 'flag' is None, this method assumes that it is being called by a the
request_response() method and will check for incoming params.
Set the 'unset' kwarg to True to force unset the value.
"""
if flag is None:
self.check_request_params(['flag'])
flag = self.params["flag"]
if 'unset' in self.params:
unset = True
if flag not in self.flags:
msg = "Survivor status flag '%s' cannot be set!" % flag
raise utils.InvalidUsage(msg, status_code=400)
flag_pretty = flag.replace("_", " ").capitalize()
if unset and self.survivor.get(flag, None) is not None:
del self.survivor[flag]
self.log_event("%s removed '%s' from %s" % (request.User.login, flag_pretty, self.pretty_name()))
else:
self.survivor[flag] = True
self.log_event("%s set '%s' on %s" % (request.User.login, flag_pretty, self.pretty_name()))
self.save()
def set_survival(self, value=0):
""" Sets survivor["survival"] to 'value'. Respects settlement rules
about whether to enforce the Survival Limit. Will not go below zero. """
self.check_request_params(["value"])
value = int(self.params["value"])
self.survivor["survival"] = value
self.apply_survival_limit()
self.logger.debug("%s set %s survival to %s" % (request.User, self, self.survivor["survival"]))
self.save()
def set_weapon_proficiency_type(self, handle=None, unset=False):
""" Sets the self.survivor["weapon_proficiency_type"] string to a
handle. """
if handle is None:
self.check_request_params(["handle"])
handle = self.params["handle"]
W = weapon_proficiency.Assets()
h_dict = W.get_asset(handle)
self.survivor["weapon_proficiency_type"] = handle
self.log_event("%s set weapon proficiency type to '%s' for %s" % (request.User.login, h_dict["handle"], self.pretty_name()))
self.save()
#
# defaults and resets
#
def default_attribute(self, attrib=None):
""" Defaults a Survivor attribute to its base value, as determined the
assets.survivors.py module. This absolutely will clobber the current
value, leaving no trace of it. YHBW. """
if attrib is None:
self.check_request_params(['attribute'])
attrib = self.params["attribute"]
# sanity check!
SA = Assets()
defaults = SA.get_defaults()
if attrib not in defaults.keys():
msg = "%s does not have a default value!" % (attrib)
self.logger.exception(msg)
raise utils.InvalidUsage(msg, status_code=400)
if attrib not in self.survivor.keys():
msg = "%s does not have '%s' attribute!" % (self, attrib)
self.logger.exception(msg)
raise utils.InvalidUsage(msg, status_code=400)
self.survivor[attrib] = defaults[attrib]
self.log_event("%s defaulted %s '%s' to %s" % (request.User.login, self.pretty_name(), attrib, defaults[attrib]))
self.save()
def reset_attribute_details(self, save=True):
""" Zero-out all attribute_detail values and will overwrite with
extreme prejudice. """
self.survivor["attribute_detail"] = {
"Strength": {"tokens": 0, "gear": 0},
"Evasion": {"tokens": 0, "gear": 0},
"Movement": {"tokens": 0, "gear": 0},
"Luck": {"tokens": 0, "gear": 0},
"Speed": {"tokens": 0, "gear": 0},
"Accuracy": {"tokens": 0, "gear": 0},
}
if save:
self.save()
def reset_damage(self, save=True):
""" Remove all damage attribs/bools from the survivor. """
for d in self.damage_locations:
if d in self.survivor.keys():
del self.survivor[d]
if save:
self.save()
#
# get methods
#
def pretty_name(self):
""" Returns the survivor's name in a prettified way, e.g. 'Timothy [M]'.
Meant to be an alternative for the __repr__() way of doing it, which
includes the asset's OID. Use this method when calling log_event().
NB! This returns the survivor's effective sex (i.e. calls self.get_sex()
instead of using the base attribute), which is different from the way
the __repr__() method does it!
"""
return "%s [%s]" % (self.survivor["name"], self.get_sex())
def get_available_endeavors(self, return_type=None):
""" Works like a miniature version of the Settlement method of the same
name. Returns a list of handles instead of a big-ass JSON thing, however.
Set 'return_type' to dict to get a dictionary instead of a list of handles.
"""
E = endeavors.Assets()
def check_availability(e_handle):
""" Private method that checks whether an endeavor is currently
available to the survivor. """
e_dict = E.get_asset(e_handle)
available = True
if e_dict.get('requires_returning_survivor',False):
if self.get_current_ly() not in self.survivor.get('returning_survivor', []):
available = False
return available
e_handles = set()
for a_dict in self.list_assets('abilities_and_impairments'):
for e_handle in a_dict.get('endeavors', []):
if check_availability(e_handle):
e_handles.add(e_handle)
e_handles = sorted(list(e_handles))
# return dictionary.
if return_type == dict:
output = {}
for e_handle in e_handles:
output[e] = E.get_asset(e_handle)
return output
return e_handles
def get_dragon_traits(self, return_type=dict):
""" Returns survivor Dragon Traits. """
traits = []
# check Understanding for 9+
if int(self.survivor["Understanding"]) >= 9:
traits.append("9 Understanding (max)")
# check self.survivor["expansion_attribs"] for "Reincarnated surname","Scar","Noble surname"
if "expansion_attribs" in self.survivor.keys():
for attrib in ["potstars_reincarnated_surname", "potstars_scar", "potstars_noble_surname"]:
if attrib in self.survivor.keys():
traits.append(attrib)
# check the actual survivor name too, you know, for the real role players
split_name = self.survivor["name"].split(" ")
for surname in ["Noble","Reincarnated"]:
if surname in split_name and "%s surname" % surname not in traits:
traits.append(surname + " surname")
# check abilities_and_impairments for Oracle's Eye, Iridescent Hide, Pristine,
AI = abilities_and_impairments.Assets()
for a in [AI.get_asset('oracles_eye'), AI.get_asset('pristine'), AI.get_asset('iridescent_hide')]:
if a["handle"] in self.survivor["abilities_and_impairments"]:
traits.append("%s ability" % a["name"])
# check for any weapon mastery
AI.filter("type", ["weapon_mastery"], reverse=True)
for wm in AI.get_dicts():
if wm["handle"] in self.survivor["abilities_and_impairments"]:
traits.append("Weapon Mastery")
# check disorders for "Destined"
if "destined" in self.survivor["disorders"]:
traits.append("Destined disorder")
# check fighting arts for "Fated Blow", "Frozen Star", "Unbreakable", "Champion's Rite"
# for fa in ["Fated Blow","Frozen Star","Unbreakable","Champion's Rite"]:
FA = fighting_arts.Assets()
for fa in ['champions_rite', 'fated_blow', 'frozen_star', 'unbreakable']:
if fa in self.survivor["fighting_arts"]:
fa_dict = FA.get_asset(fa)
fa_name = copy(fa_dict["name"])
if fa_name == "Frozen Star":
fa_name = "Frozen Star secret"
traits.append("%s fighting art" % fa_name)
# check for 3+ strength
if int(self.survivor["Strength"]) >= 3:
traits.append("3+ Strength attribute")
# check for 1+ Accuracy
if int(self.survivor["Accuracy"]) >= 1:
traits.append("1+ Accuracy attribute")
# check Courage for 9+
if int(self.survivor["Courage"]) >= 9:
traits.append("9 Courage (max)")
if return_type == "active_cells":
cells = set()
C = the_constellations.Assets()
c_map = C.get_asset('lookups')["map"]
for t in traits:
if t in c_map.keys():
cells.add(c_map[t])
return list(cells)
elif return_type == 'available_constellations':
constellations = set()
active_cells = self.get_dragon_traits('active_cells')
C = the_constellations.Assets()
c_formulae = C.get_asset('lookups')["formulae"]
for k,v in c_formulae.iteritems(): # k = "Witch", v = set(["A1","A2","A3","A4"])
if v.issubset(active_cells):
constellations.add(k)
return list(constellations)
# if no return_type, just return the trait list
return traits
def get_epithets(self, return_type=None):
""" Returns survivor epithet (handles) as a list, unless the
'return_type' kwarg is set to 'pretty', which gets you a nice
string. """
e = self.survivor["epithets"]
if return_type == "pretty":
E = epithets.Assets()
output = ""
for e_handle in e:
e_asset = E.get_asset(e_handle)
output += e_asset["name"]
return output
return e
def get_lineage(self):
""" DO NOT call this method during normal serialization: it is a PIG and
running it on more than one survivor at once is a really, really bad
idea.
Also, it returns a Response object. So yeah.
This method creates a dictionary of survivor lineage information. """
# initialize
output = {
'intimacy_partners': set(),
'siblings': { 'full': [], 'half': [] },
}
# PROCESS parents first w the survivor version of get_parents() b/c that's easy
survivor_parents = self.get_parents(dict)
output['parents'] = survivor_parents
# now PROCESS the settlement's get_parents() output for partners, children and sibs
children = set()
siblings = {'full': set(), 'half': set()}
couplings = self.Settlement.get_parents()
for coupling in couplings:
if self.survivor['_id'] == coupling['father']:
output['intimacy_partners'].add(coupling['mother'])
children = children.union(coupling['children'])
elif self.survivor['_id'] == coupling['mother']:
output['intimacy_partners'].add(coupling['father'])
children = children.union(coupling['children'])
# full-blood sibs
if self.survivor['_id'] in coupling['children']:
siblings['full'] = coupling['children'] # you can only have one set of full-blood sibs, right?
# half-blood sibs
if survivor_parents != {'father': None, 'mother': None}:
if survivor_parents['father']['_id'] == coupling['father'] and survivor_parents['mother']['_id'] != coupling['mother']:
siblings['half'] = siblings['half'].union(coupling['children'])
if survivor_parents['father']['_id'] != coupling['father'] and survivor_parents['mother']['_id'] == coupling['mother']:
siblings['half'] = siblings['half'].union(coupling['children'])
#
# Post-process
#
# process sibling oids and make lists of dictionaries
for k in siblings: # i.e. 'half' or 'full'
for s in siblings[k]:
if s == self.survivor["_id"]:
pass # can't be your own sib
else:
output['siblings'][k].append(utils.mdb.survivors.find_one({'_id': s}))
# retrieve children from mdb; process oid lists into dictionaries
output['children'] = {}
for p in output['intimacy_partners']:
output['children'][str(p)] = []
for c in children:
c_dict = utils.mdb.survivors.find_one({'_id': c})
if c_dict['father'] == self.survivor['_id']:
output['children'][str(c_dict['mother'])].append(c_dict)
elif c_dict['mother'] == self.survivor['_id']:
output['children'][str(c_dict['father'])].append(c_dict)
# sort the children on their born in LY
for p_id in output['children']:
output['children'][p_id] = sorted(output['children'][p_id], key=lambda k: k['born_in_ly'])
# get fuck buddy survivor data from mdb
output['intimacy_partners'] = [utils.mdb.survivors.find_one({'_id': s}) for s in output['intimacy_partners']]
return Response(
response=json.dumps(output, default=json_util.default),
status=200,
mimetype="application/json"
)
def get_notes(self):
""" Gets the survivor's notes as a list of dictionaries. """
notes = utils.mdb.survivor_notes.find({
"survivor_id": self.survivor["_id"],
"created_on": {"$gte": self.survivor["created_on"]}
}, sort=[("created_on",1)])
return list(notes)
def get_parents(self, return_type=None):
""" Returns survivor OIDs for survivor parents by default. Set
'return_type' to 'dict' (w/o the quotes) to get survivor dictionaries
back. """
parents = []
for p in ["father","mother"]:
if p in self.survivor.keys():
parents.append(self.survivor[p])
if return_type == dict:
output = {'mother': None, 'father': None}
for p_oid in parents:
p = utils.mdb.survivors.find_one({'_id': p_oid})
if p is not None:
if p["sex"] == 'M':
output['father'] = p
elif p['sex'] == 'F':
output['mother'] = p
else:
raise
return output
return parents
def get_sex(self):
""" Returns a string value of 'M' or 'F' representing the survivor's
current effective sex. The basic math here is that we start from the
survivor's sheet on their sex and then apply any curses, etc. to get
our answer. """
sex = self.survivor["sex"]
def invert_sex(s):
if s == "M":
return "F"
elif s == "F":
return "M"
for ai_dict in self.list_assets("abilities_and_impairments"):
if ai_dict.get("reverse_sex", False):
sex = invert_sex(sex)
return sex
def get_survival_actions(self, return_type=dict):
""" Returns the SA's available to the survivor based on current
impairments, etc. Use 'return_type' = 'JSON' to get a list of dicts
back, rather than a single dict.
Important! There's a ton of business logic here, given that there's a
lot of interplay among game assets, so read this carefully and all the
way through before making changes!
"""
# helper called later
def update_available(sa_key_list, available=False, title_tip=None):
""" Inline helper func to update the list of available_actions while
iterating over survivor attributes. """
for sa_key in sa_key_list:
if not available:
if sa_key in available_actions.keys():
available_actions[sa_key]["available"] = False
available_actions[sa_key]["title_tip"] = title_tip
elif available:
sa = SA.get_asset(sa_key)
sa["available"] = True
sa["title_tip"] = title_tip
available_actions[sa_key] = sa
#
# action starts here. initialize and set defaults first:
#
AI = abilities_and_impairments.Assets()
SA = survival_actions.Assets()
available_actions = self.Settlement.get_survival_actions()
# check A&Is and FAs/SFAs # disorders coming soon! TKTK
attrib_keys = ['abilities_and_impairments', 'fighting_arts']
for ak in attrib_keys:
for a_dict in self.list_assets(ak): # iterate assets
if "survival_actions" in a_dict.keys(): # check for SA key
# handle 'enable' keys; special logic re: can't use FAs
if ak == 'fighting_arts' and self.cannot_use_fighting_arts():
pass
else:
update_available(
a_dict["survival_actions"].get("enable", []),
available = True,
title_tip = "Available due to '%s'" % a_dict["name"],
)
# handle 'disable' keys
update_available(
a_dict["survival_actions"].get("disable", []),
available = False,
title_tip = "Impairment '%s' prevents %s from using this ability." % (a_dict["name"], self.survivor["name"])
)
# support JSON return
if return_type == 'JSON':
output = []
for k, v in available_actions.iteritems():
output.append(v)
return sorted(output, key=lambda k: k['sort_order'])
# dict return
return available_actions
#
# evaluation / biz logic methods
#
def can_be_nominated_for_intimacy(self):
""" Returns a bool representing whether the survivor can do the
mommmy-daddy dance. """
for ai_dict in self.list_assets("abilities_and_impairments"):
if ai_dict.get("cannot_be_nominated_for_intimacy", False):
return False
if self.is_dead():
return False
return True
def can_gain_survival(self):
""" Returns a bool representing whether or not the survivor can GAIN
survival. This is not whether they can SPEND survival. """
for ai_dict in self.list_assets("abilities_and_impairments"):
if ai_dict.get("cannot_gain_survival", False):
return False
return True
def can_gain_bleeding_tokens(self):
""" Returns a bool describing whether the survivor can gain bleeding
tokens. """
if self.survivor.get('cannot_gain_bleeding_tokens', None) is None:
return True
return False
def cannot_spend_survival(self):
""" Returns a bool representing whether or not the survivor can SPEND
survival. This is not whether they can GAIN survival. """
if self.survivor.get("cannot_spend_survival", None) is True:
return True
for ai_dict in self.list_assets("abilities_and_impairments"):
if ai_dict.get("cannot_spend_survival", False):
return True
return False
def cannot_use_fighting_arts(self):
"""Returns a bool representing whether or not the survivor can use
Fighting Arts. """
if self.survivor.get("cannot_use_fighting_arts", None) is True:
return True
for ai_dict in self.list_assets("abilities_and_impairments"):
if ai_dict.get("cannot_use_fighting_arts", False):
return True
return False
def is_dead(self):
"""Returns a bool of whether the survivor is dead."""
if "dead" in self.survivor.keys():
return True
return False
def is_departing(self):
""" Returns a bool of whether the survivor is departing. """
return self.survivor.get('departing', False)
def is_founder(self):
"""Returns a bool of whether the survivor is a founding survivor. """
if self.survivor["born_in_ly"] == 0:
return True
return False
def is_savior(self):
""" Returns False if the survivor is NOT a savior; returns their color
if they are (which should evaluate to Boolean true wherever we evaluate
it, right?). """
# automatically return false if the campaign doesn't have saviors
if self.get_campaign(dict).get("saviors", None) is None:
return False
# automatically return the survivor's 'savior' attrib if the survivor
# is a savior
if self.survivor.get("savior", False):
return self.survivor["savior"]
# now do the legacy check
for s_dict in self.Saviors.get_dicts():
for s_ai in s_dict["abilities_and_impairments"]:
if s_ai in self.survivor["abilities_and_impairments"]:
return s_dict["color"]
return False
def skip_next_hunt(self):
"""Returns a bool representing whether or not the survivor has to sit
the next one out. """
if self.survivor.get("skip_next_hunt", None) is True:
return True
return False
#
# conversion and normalization methods
#
def baseline(self):
""" Baselines the MDB doc to bring it into compliance with our general
data model for survivors.
We update the actual data in the MDB, rather than simply having a base-
line model (e.g. in a config file somewhere) and then initializing new
assets such that they overwrite/fill-in the blanks.
This might seem like an odd design decision, but the data is designed to
be portable, so we inflict/enforce a lot of the model on the 'database'.
"""
if not "meta" in self.survivor.keys():
self.logger.warn("Creating 'meta' key for %s" % self)
self.survivor["meta"] = {}
self.perform_save = True
if not "attribute_detail" in self.survivor.keys():
self.reset_attribute_details(save=False)
self.perform_save = True
if not 'affinities' in self.survivor.keys():
self.survivor["affinities"] = {"red":0,"blue":0,"green":0}
self.perform_save = True
if not 'bleeding_tokens' in self.survivor.keys():
self.survivor["bleeding_tokens"] = 0
self.logger.info("Adding baseline 'bleeding_tokens' (int) attrib to %s" % self)
self.perform_save = True
if not 'max_bleeding_tokens' in self.survivor.keys():
self.survivor["max_bleeding_tokens"] = 5
self.logger.info("Adding baseline 'max_bleeding_tokens' (int) attrib to %s" % self)
self.perform_save = True
if not 'cursed_items' in self.survivor.keys():
self.survivor["cursed_items"] = []
self.perform_save = True
if not 'public' in self.survivor.keys():
self.survivor["public"] = False
self.perform_save = True
elif self.survivor["public"] == "checked":
self.survivor["public"] = True
self.perform_save = True
if not 'fighting_arts_levels' in self.survivor.keys():
self.survivor['fighting_arts_levels'] = {}
self.perform_save = True
if 'in_hunting_party' in self.survivor.keys():
if self.survivor['in_hunting_party'] == True:
self.survivor['departing'] = True
else:
self.survivor['departing'] = False
del self.survivor['in_hunting_party']
self.logger.info("Removed deprecated attribute 'in_hunting_party' from %s" % self)
self.perform_save = True
if not 'favorite' in self.survivor.keys():
self.survivor["favorite"] = []
self.perform_save = True
def bug_fixes(self, force_save=False):
""" This should be called during normalize() BEFORE you call baseline().
Compare with the way this works on the Settlement object. Make sure all
bugs are dated and have a ticket number, so we can remove them after a
year has passed.
"""
# 2017-10-25 The "king's_step" bug
if "king's_step" in self.survivor['fighting_arts']:
self.logger.debug("%s King's Step bad asset handle detected! Fixing..." % (self))
self.survivor['fighting_arts'].remove("king's_step")
self.survivor['fighting_arts'].append('kings_step')
self.perform_save = True
# 2017-10-28 The "weak spot" bug (other bad A&Is)
for bad_handle in ['Weak Spot', 'Intracranial hemmorhage','Weak spot: arms', "Weak spot is body."]:
if bad_handle in self.survivor['abilities_and_impairments']:
self.logger.debug("%s Bad asset handle '%s' detected! Fixing..." % (self, bad_handle))
self.survivor['abilities_and_impairments'].remove(bad_handle)
self.perform_save = True
# 2017-10-22 'acid_palms' asset handle Issue #341
# https://github.com/toconnell/kdm-manager/issues/341
if 'acid_palms' in self.survivor['abilities_and_impairments']:
self.logger.warn("[BUG] Detected 'acid_palms' in survivor A&I list!")
self.survivor['abilities_and_impairments'].remove('acid_palms')
if 'gorm' in self.Settlement.get_expansions():
self.survivor['abilities_and_impairments'].append('acid_palms_gorm')
self.logger.info("%s Replaced 'acid_palms' with 'acid_palms_gorm'" % self)
elif 'dragon_king' in self.Settlement.get_expansions():
self.survivor['abilities_and_impairments'].append('acid_palms_dk')
self.logger.info("%s Replaced 'acid_palms' with 'acid_palms_dk'" % self)
else:
self.logger.error("Unable to replace 'acid_palms' A&I with a real handle!")
self.perform_save = True
# now, if we're forcing a save (e.g. because the settlement is calling
# method or something, do it
if force_save and hasattr(self, 'perform_save') and self.perform_save:
self.save()
def duck_type(self):
""" Duck-types certain survivor sheet attributes, e.g. to make sure they
didn't experience a type change due to bad form input, etc. """
# enforce ints first
int_types = [
"Insanity",
"Accuracy",
"Evasion",
"Luck",
"Movement",
"Speed",
"Strength",
"Arms",
"Body",
"Head",
"Legs",
"Waist",
"Understanding",
"Courage",
"survival",
"hunt_xp",
"bleeding_tokens",
"max_bleeding_tokens",
]
for attrib in int_types:
if type(self.survivor[attrib]) != int:
self.logger.warn("%s Duck-typed '%s' attrib to int." % (self, attrib))
self.survivor[attrib] = int(self.survivor[attrib])
self.perform_save = True
# now translate checkbox ui stuff to bools
for checked_attrib in ['dead','sotf_reroll','retired']:
if checked_attrib in self.survivor.keys() and self.survivor[checked_attrib] == 'checked':
self.survivor[checked_attrib] = True
self.logger.warn("%s Duck-typed '%s' attrib from 'checked' to True" % (self, checked_attrib))
self.perform_save = True
if type(self.survivor["name"]) not in [unicode, str]:
self.survivor["name"] = str(self.survivor["name"])
self.perform_save = True
def min_attributes(self):
""" Applies assorted game rules to the survivor. """
for attrib in self.min_zero_attribs:
if self.survivor[attrib] < 0:
self.survivor[attrib] = 0
self.logger.warn("%s Survivor '%s' attrib normalized to minimum value of zero." % (self, attrib))
self.perform_save = True
for attrib in self.min_one_attribs:
if self.survivor[attrib] < 1:
self.survivor[attrib] = 0
self.logger.warn("%s Survivor '%s' attrib normalized to minimum value of one." % (self, attrib))
self.perform_save = True
def convert_abilities_and_impairments(self):
""" Swaps out A&I names for handles. """
new_ai = []
for ai_dict in self.list_assets("abilities_and_impairments", log_failures=True):
new_ai.append(ai_dict["handle"])
self.logger.info("%s Migrated A&I '%s' to '%s'" % (self, ai_dict["name"], ai_dict["handle"]))
self.survivor["abilities_and_impairments"] = new_ai
self.survivor["meta"]["abilities_and_impairments_version"] = 1.0
self.logger.info("Converted A&Is from names (legacy) to handles for %s" % (self))
def convert_disorders(self):
""" Swaps out disorder names for handles. """
new_d = []
for d_dict in self.list_assets("disorders", log_failures=True):
new_d.append(d_dict["handle"])
self.logger.info("%s Migrated Disorder '%s' to '%s'" % (self, d_dict["name"], d_dict["handle"]))
self.survivor["disorders"] = new_d
self.survivor["meta"]["disorders_version"] = 1.0
self.logger.info("Converted Disorders from names (legacy) to handles for %s" % (self))
def convert_epithets(self):
""" Tries to convert epithets to handles. Drops anything it cannot. """
E = epithets
new_epithets = []
for e_dict in self.list_assets("epithets"):
new_epithets.append(e_dict["handle"])
self.logger.info("%s Converted '%s' epithet name to handle '%s'" % (self, e_dict["name"], e_dict["handle"]))
self.survivor["epithets"] = new_epithets
self.survivor["meta"]["epithets_version"] = 1.0
self.logger.info("Converted epithets from names (legacy) to handles for %s" % (self))
def convert_favorite(self):
""" Turns the 'favorite' attribute from a string to a list of email
addresses. """
if self.survivor.get('favorite', None) is None:
self.survivor['favorite'] = []
else:
self.survivor['favorite'] = []
self.add_favorite(self.survivor["email"])
self.survivor["meta"]["favorites_version"] = 1.0
self.logger.info("Converted 'favorite' attrib from str (legacy) to list for %s" % (self))
def convert_fighting_arts(self):
""" Tries to convert Fighting Art names to to handles. Drops anything
that it cannot convert. """
FA = fighting_arts
new_fa_list = []
for fa_dict in self.list_assets("fighting_arts"):
if fa_dict is None:
pass
else:
new_fa_list.append(fa_dict["handle"])
self.logger.info("%s Converted '%s' Fighting Art name to handle '%s'" % (self, fa_dict["name"], fa_dict["handle"]))
self.survivor["fighting_arts"] = new_fa_list
self.survivor["meta"]["fighting_arts_version"] = 1.0
self.logger.info("Converted Fighting Arts from names (legacy) to handles for %s" % (self))
def convert_weapon_proficiency_type(self):
""" Swaps out names for handles. """
# first normalize an empty string to None type
if self.survivor["weapon_proficiency_type"] == "":
self.survivor["weapon_proficiency_type"] = None
if self.survivor["weapon_proficiency_type"] != None:
w_name = self.survivor["weapon_proficiency_type"]
W = weapon_proficiency.Assets()
w_dict = W.get_asset_from_name(w_name)
if w_dict is None:
self.logger.error("%s Weapon proficiency type '%s' could not be migrated!" % (self, w_name))
else:
self.survivor["weapon_proficiency_type"] = w_dict["handle"]
self.logger.info("%s Migrated weapon proficiency type '%s' to '%s'" % (self, w_name, w_dict["handle"]))
self.survivor["meta"]["weapon_proficiency_type_version"] = 1.0
self.logger.info("Converted weapon proficiency type name (legacy) to handle for %s" % (self))
def convert_special_attributes(self):
""" This one's...a hot mess on account of this feature having never
been properly implemented in the legacy app.
Basically, there's a list of known special attribute names, and we're
going to manually crosswalk them to modern handles.
"""
crosswalk = [
('Purified', 'potsun_purified'),
('Sun Eater', 'potsun_sun_eater'),
('Child of the Sun', 'potsun_child_of_the_sun'),
('Scar', 'potstars_scar'),
('Reincarnated surname', 'potstars_noble_surname'),
('Noble surname', 'potstars_reincarnated_surname'),
]
if 'expansion_attribs' in self.survivor.keys():
for i in crosswalk:
name, handle = i
if name in self.survivor['expansion_attribs'].keys():
self.survivor[handle] = True
self.logger.debug(name)
del self.survivor['expansion_attribs']
else:
pass
self.survivor["meta"]["special_attributes_version"] = 1.0
self.logger.info("Converted survivor special attributes for %s" % (self))
#
# NO METHODS BELOW THIS POINT other than request_response()
#
def request_response(self, action=None):
""" Initializes params from the request and then response to the
'action' kwarg appropriately. This is the ancestor of the legacy app
assets.Survivor.modify() method. """
self.get_request_params()
# get methods first
if action == "get":
return self.return_json()
elif action == "get_lineage":
return self.get_lineage()
elif action == "get_survival_actions":
sa = self.get_survival_actions("JSON")
return json.dumps(sa, default=json_util.default)
# controllers with biz logic - i.e. fancy-pants methods
elif action == "controls_of_death":
self.controls_of_death()
elif action == "update_bleeding_tokens":
self.update_bleeding_tokens()
elif action == "set_bleeding_tokens":
self.set_bleeding_tokens()
# add/rm assets
elif action == "add_favorite":
self.add_favorite()
elif action == "rm_favorite":
self.rm_favorite()
elif action == "add_game_asset":
self.add_game_asset()
elif action == "rm_game_asset":
self.rm_game_asset()
elif action == "set_many_game_assets": # serial add/rm game asset calls
self.set_many_game_assets()
elif action == "replace_game_assets":
self.replace_game_assets()
elif action == "toggle_fighting_arts_level":
self.toggle_fighting_arts_level()
# Cursed item methods
elif action == "add_cursed_item":
self.add_cursed_item()
elif action == "rm_cursed_item":
self.rm_cursed_item()
# savior stuff
elif action == "set_savior_status":
self.set_savior_status()
# misc sheet operations
elif action == "set_name":
self.set_name()
elif action == "set_email":
return self.set_email() # because we're doing server-side validation
elif action == "set_retired":
self.set_retired()
elif action == "set_sex":
self.set_sex()
elif action == "set_constellation":
self.set_constellation()
elif action == "set_weapon_proficiency_type":
self.set_weapon_proficiency_type()
elif action == 'set_special_attribute':
self.set_special_attribute()
# sheet attribute operations
elif action == "set_attribute":
self.set_attribute()
elif action == "set_many_attributes": # serial set_attribute()
self.set_many_attributes()
elif action == "set_attribute_detail": # tokens/gear
self.set_attribute_detail()
elif action == "update_attribute":
self.update_attribute()
# notes
elif action == 'add_note':
self.add_note()
elif action == 'rm_note':
self.rm_note()
# affinities
elif action == "update_affinities":
self.update_affinities()
elif action == "set_affinity":
self.set_affinity()
# status flags!
elif action == 'set_status_flag':
self.set_status_flag()
elif action == 'toggle_status_flag':
self.toggle_status_flag()
elif action == 'toggle_boolean':
self.toggle_boolean()
# survival
elif action == "update_survival":
self.update_survival()
elif action == "set_survival":
self.set_survival()
# manager-only / non-game methods
elif action == "toggle_sotf_reroll":
self.toggle_sotf_reroll()
elif action == 'set_parent':
self.set_parent()
else:
# unknown/unsupported action response
self.logger.warn("Unsupported survivor action '%s' received!" % action)
return utils.http_400
# finish successfully
if self.params.get('serialize_on_response', False):
return Response(response=self.serialize(), status=200)
else:
return utils.http_200
# ~fin
| [
"[email protected]"
]
| |
d604e16f013648aa96e6c8375ee065fe2b13f4bc | 044d385c3d86eef5f2aee7ea1912c2461da37604 | /venv/Scripts/easy_install-3.7-script.py | 493d3b53a1eb3a2c1889dfc9b02e04445bb8cd09 | []
| no_license | Y-Ali/calculator_oop | d1147b294feb87d2343b6441e225367aa24bf45e | d1cf65cbe78a3930758c3bc7df5d07e61cd850e3 | refs/heads/master | 2020-05-16T19:23:03.723285 | 2019-04-24T15:37:57 | 2019-04-24T15:37:57 | 183,258,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | #!C:\Users\yali\PycharmProjects\calculator_tdd\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"[email protected]"
]
| |
b3dcd0daa543202e55b42bfd4a1b1515215608b5 | 57c570d1b5a621158d8763f935e2069be6b8c90a | /tykj-operation/tykj-operation/MarketSearchCrawler/MarketSearch/items.py | 61a4e605750086427e3a5512fa008afb0e632c3b | []
| no_license | liuliainio/liuli | e011decf45f7eca7009a12ad4a96f33a17055945 | 203fbf4f135efb6432c77b937633003ce2f2c9a2 | refs/heads/master | 2021-01-10T20:35:08.070770 | 2018-08-21T05:52:59 | 2018-08-21T05:52:59 | 25,625,853 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,684 | py | # Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/topics/items.html
from MarketSearch.utils import strip_space
from scrapy.contrib.loader.processor import MapCompose, Join, TakeFirst
from scrapy.item import Item, Field
from scrapy.utils.markup import remove_comments, unquote_markup, \
replace_escape_chars, remove_tags
class CrawledItem(Item):
name = Field(input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
icon_link = Field(input_processor=MapCompose(strip_space), output_processor=TakeFirst(),)
source = Field(output_processor=TakeFirst(),)
source_link = Field(output_processor=TakeFirst(),)
rating = Field(output_processor=Join(),)
# rating = Field()
version = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
developer = Field(
default='',
input_processor=MapCompose(unquote_markup,
strip_space),
output_processor=TakeFirst(),
)
sdk_support = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=Join(),)
category = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
screen_support = Field(
default='',
input_processor=MapCompose(unquote_markup,
strip_space),
output_processor=Join(),
)
apk_size = Field(default='', input_processor=MapCompose(strip_space), output_processor=TakeFirst(),)
language = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
publish_date = Field(output_processor=TakeFirst(),)
downloads = Field(default=0, input_processor=MapCompose(strip_space), output_processor=TakeFirst(),)
# downloads = Field()
description = Field(
default='',
input_processor=MapCompose(unquote_markup,
remove_comments,
replace_escape_chars,
strip_space),
output_processor=Join(),
)
images = Field(default='', output_processor=Join(),)
qr_link = Field(default='', output_processor=TakeFirst(),)
download_link = Field(default='', input_processor=MapCompose(strip_space), output_processor=TakeFirst(),)
class AppItem(CrawledItem):
update_note = Field(
default='',
input_processor=MapCompose(unquote_markup,
remove_comments,
replace_escape_chars,
strip_space),
output_processor=Join(),
)
labels = Field(
default='',
input_processor=MapCompose(unquote_markup,
remove_comments,
replace_escape_chars,
strip_space),
output_processor=Join(separator=u','),
)
icon_path = Field()
images_path = Field()
last_crawl = Field()
class FinalAppItem(AppItem):
package_name = Field(
default='',
input_processor=MapCompose(unquote_markup,
remove_comments,
replace_escape_chars,
strip_space),
output_processor=Join(),
)
is_break = Field(default=-1)
platform = Field(default=1)
file_type = Field(default='apk')
avail_download_links = Field(default='')
error = Field(default='')
min_sdk_version = Field(default=0)
vol_id = Field(default=0)
status = Field(default=0)
created_at = Field()
version_code = Field()
images_path = Field(default='')
icon_path = Field(default='')
class AppleItem(CrawledItem):
icon_path = Field()
images_path = Field()
last_crawl = Field()
price = Field(default=0, input_processor=MapCompose(strip_space), output_processor=TakeFirst(),)
app_id = Field()
apple_id = Field()
class TripAdvisorItem(Item):
source = Field(output_processor=TakeFirst(),)
source_link = Field(output_processor=TakeFirst(),)
name = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
rating = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
category = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
reviews = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
price = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
city = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
address = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=Join(),)
phone = Field()
hotel_class = Field(
default='',
input_processor=MapCompose(unquote_markup,
strip_space),
output_processor=TakeFirst(),
)
rank_of_city = Field()
longitude_latitude = Field(
default='',
input_processor=MapCompose(unquote_markup,
strip_space),
output_processor=TakeFirst(),
)
owner_website = Field(
default='',
input_processor=MapCompose(unquote_markup,
strip_space),
output_processor=TakeFirst(),
)
last_crawl = Field()
class YelpItem(Item):
source = Field(output_processor=TakeFirst(),)
source_link = Field(output_processor=TakeFirst(),)
name = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
rating = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
category = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=Join(','),)
reviews = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
price = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
city = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
address = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=Join(),)
owner_website = Field(
default='',
input_processor=MapCompose(unquote_markup,
strip_space),
output_processor=TakeFirst(),
)
phone = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
longitude_latitude = Field(
default='',
input_processor=MapCompose(unquote_markup,
strip_space),
output_processor=TakeFirst(),
)
last_crawl = Field()
class YoutubeItem(Item):
source = Field(output_processor=TakeFirst(),)
source_link = Field(output_processor=TakeFirst(),)
name = Field(input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
likes = Field(default=0, input_processor=MapCompose(strip_space), output_processor=TakeFirst(),)
dislikes = Field(default=0, input_processor=MapCompose(strip_space), output_processor=TakeFirst(),)
duration = Field(input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
view_count = Field(default=0, input_processor=MapCompose(strip_space), output_processor=TakeFirst(),)
author = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
category = Field(default='', input_processor=MapCompose(unquote_markup, strip_space), output_processor=TakeFirst(),)
publish_date = Field(output_processor=TakeFirst(),)
comments = Field(default=0, input_processor=MapCompose(strip_space), output_processor=TakeFirst(),)
description = Field(
default='',
input_processor=MapCompose(unquote_markup,
remove_comments,
replace_escape_chars,
strip_space),
output_processor=Join(),
)
last_crawl = Field()
class UserNameItem(Item):
name = Field()
class DownloadLinkItem(Item):
url = Field()
| [
"[email protected]"
]
| |
ddf0882909a579898ddb49cf6223619f3e4ae28f | ac6743eb881c77ba8b80e19638969a032f0c5df3 | /leo/src/leoGlobals.py | 9eaa5ccd6cd7c3c5ec16d4c45095f681ad3b3726 | []
| no_license | leo-editor/leo-cvs-2006-2008 | 4a983046e293d809698f11aa47eae640ad4fd07a | da696020bda4752700bf96f6417751346c92e3c4 | refs/heads/master | 2016-09-08T01:20:34.767629 | 2008-02-27T19:56:03 | 2008-02-27T19:56:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184,203 | py | # -*- coding: utf-8 -*-
#@+leo-ver=4-thin
#@+node:ekr.20031218072017.3093:@thin leoGlobals.py
#@@first
"""Global constants, variables and utility functions used throughout Leo."""
#@@language python
#@@tabwidth -4
#@@pagewidth 80
# __pychecker__ = '--no-import --no-reimportself --no-reimport --no-constCond --no-constant1'
# Disable all import warnings: This module must do strange things with imports.
# Disable checks for constant conditionals.
#@<< imports >>
#@+node:ekr.20050208101229:<< imports >>
import leoGlobals as g # So code can use g below.
# Don't import this here: it messes up Leo's startup code.
# import leoTest
try:
import gc
except ImportError:
gc = None
try:
import filecmp
except ImportError: # does not exist in jython.
filecmp = None
try:
import gettext
except ImportError: # does not exist in jython.
gettext = None
# Do NOT import pdb here! We shall define pdb as a _function_ below.
# import pdb
import exceptions
import operator
import re
import sys
import time
import zipfile
# These do not exist in IronPython.
# However, it *is* valid for IronPython to use the Python 2.4 libs!
import difflib
import os
import string
import tempfile
import traceback
import types
# print '(types.FrameType)',repr(types.FrameType)
# print '(types.StringTypes)',repr(types.StringTypes)
#@-node:ekr.20050208101229:<< imports >>
#@nl
#@<< define general constants >>
#@+node:ekr.20031218072017.3094:<< define general constants >>
body_newline = '\n'
body_ignored_newline = '\r'
#@-node:ekr.20031218072017.3094:<< define general constants >>
#@nl
#@<< define global data structures >>
#@+node:EKR.20040610094819:<< define global data structures >>
# Visible externally so plugins may add to the list of directives.
globalDirectiveList = [
# New in Leo 4.4.4: these used to be in leoKeywords.
'all','c','code','delims','doc','end_raw',
'first','last','others','raw','root-code','root-doc',
# Old.
"color", "comment", "encoding", "header", "ignore", "killcolor",
"language", "lineending", "nocolor", "noheader", "nowrap",
"pagewidth", "path", "quiet", "root", "silent",
"tabwidth", "terse", "unit", "verbose", "wrap"]
#@-node:EKR.20040610094819:<< define global data structures >>
#@nl
app = None # The singleton app object.
unitTesting = False # A synonym for app.unitTesting.
#@+others
#@+node:ekr.20050328133058:g.createStandAloneTkApp
# This must be defined in leoGlobals: g.app.gui doesn't exist yet.
def createStandAloneTkApp(pluginName=''):
'''Create a Tk version of the g.app object for 'stand-alone' plugins.'''
if not g.app:
# Important: these references do not make Leo's core gui-dependent.
# In other words, this function is called only when Tkinter should be the gui.
import Tkinter as Tk
Pmw = g.importExtension('Pmw',pluginName=pluginName,verbose=True)
if Tk and Pmw:
import leoApp, leoGui
g.app = leoApp.LeoApp()
g.app.root = Tk.Tk()
Pmw.initialise(g.app.root)
g.app.gui = leoGui.nullGui('<stand-alone app gui>')
g.computeStandardDirectories()
return g.app
#@-node:ekr.20050328133058:g.createStandAloneTkApp
#@+node:ekr.20031218072017.3095:Checking Leo Files...
#@+node:ekr.20031218072017.822:createTopologyList
def createTopologyList (c,root=None,useHeadlines=False):
"""Creates a list describing a node and all its descendents"""
if not root: root = c.rootPosition()
v = root
if useHeadlines:
aList = [(v.numberOfChildren(),v.headString()),]
else:
aList = [v.numberOfChildren()]
child = v.firstChild()
while child:
aList.append(g.createTopologyList(c,child,useHeadlines))
child = child.next()
return aList
#@-node:ekr.20031218072017.822:createTopologyList
#@-node:ekr.20031218072017.3095:Checking Leo Files...
#@+node:ekr.20031218072017.3099:Commands & Directives
#@+node:ekr.20050304072744:Compute directories... (leoGlobals)
#@+node:ekr.20041117155521:computeGlobalConfigDir
def computeGlobalConfigDir():
import leoGlobals as g
encoding = g.startupEncoding()
if hasattr(sys,'leo_config_directory'):
theDir = sys.leo_config_directory
else:
theDir = g.os_path_join(g.app.loadDir,"..","config")
if theDir:
theDir = g.os_path_abspath(theDir)
if (
not theDir or
not g.os_path_exists(theDir,encoding) or
not g.os_path_isdir(theDir,encoding)
):
theDir = None
return theDir
#@-node:ekr.20041117155521:computeGlobalConfigDir
#@+node:ekr.20041117151301:computeHomeDir
def computeHomeDir():
"""Returns the user's home directory."""
import leoGlobals as g
encoding = g.startupEncoding()
# dotDir = g.os_path_abspath('./',encoding)
home = os.getenv('HOME',default=None)
if home and len(home) > 1 and home[0]=='%' and home[-1]=='%':
# Get the indirect reference to the true home.
home = os.getenv(home[1:-1],default=None)
if home:
# N.B. This returns the _working_ directory if home is None!
# This was the source of the 4.3 .leoID.txt problems.
home = g.os_path_abspath(home,encoding)
if (
not g.os_path_exists(home,encoding) or
not g.os_path_isdir(home,encoding)
):
home = None
# g.trace(home)
return home
#@-node:ekr.20041117151301:computeHomeDir
#@+node:ekr.20060416113431:computeLeoDir
def computeLeoDir ():
loadDir = g.app.loadDir
theDir = g.os_path_dirname(loadDir)
if theDir not in sys.path:
sys.path.append(theDir)
if 0: # This is required so we can do import leo (as a package)
theParentDir = g.os_path_dirname(theDir)
if theParentDir not in sys.path:
sys.path.append(theParentDir)
return theDir
#@-node:ekr.20060416113431:computeLeoDir
#@+node:ekr.20031218072017.1937:computeLoadDir
def computeLoadDir():
"""Returns the directory containing leo.py."""
import leoGlobals as g
import sys
try:
# Fix a hangnail: on Windows the drive letter returned by
# __file__ is randomly upper or lower case!
# The made for an ugly recent files list.
path = g.__file__ # was leo.__file__
if sys.platform=='win32':
if len(path) > 2 and path[1]==':':
# Convert the drive name to upper case.
path = path[0].upper() + path[1:]
encoding = g.startupEncoding()
path = g.os_path_abspath(path,encoding)
if path:
loadDir = g.os_path_dirname(path,encoding)
else: loadDir = None
if (
not loadDir or
not g.os_path_exists(loadDir,encoding) or
not g.os_path_isdir(loadDir,encoding)
):
loadDir = os.getcwd()
print "Using emergency loadDir:",repr(loadDir)
loadDir = g.os_path_abspath(loadDir,encoding)
# g.es("load dir:",loadDir,color="blue")
return loadDir
except:
print "Exception getting load directory"
raise
#import traceback ; traceback.print_exc()
#return None
#@-node:ekr.20031218072017.1937:computeLoadDir
#@+node:ekr.20050328133444:computeStandardDirectories
def computeStandardDirectories():
'''Set g.app.loadDir, g.app.homeDir and g.app.globalConfigDir.'''
if 0:
import sys
for s in sys.path: g.trace(s)
g.app.loadDir = g.computeLoadDir()
# Depends on g.app.tkEncoding: uses utf-8 for now.
g.app.leoDir = g.computeLeoDir()
g.app.homeDir = g.computeHomeDir()
g.app.extensionsDir = g.os_path_abspath(
g.os_path_join(g.app.loadDir,'..','extensions'))
g.app.globalConfigDir = g.computeGlobalConfigDir()
g.app.testDir = g.os_path_abspath(
g.os_path_join(g.app.loadDir,'..','test'))
g.app.user_xresources_path = g.os_path_join(g.app.homeDir,'.leo_xresources')
#@-node:ekr.20050328133444:computeStandardDirectories
#@+node:ekr.20041117151301.1:startupEncoding
def startupEncoding ():
import leoGlobals as g
import sys
if sys.platform=="win32": # "mbcs" exists only on Windows.
encoding = "mbcs"
elif sys.platform=="dawwin":
encoding = "utf-8"
else:
encoding = g.app.tkEncoding
return encoding
#@-node:ekr.20041117151301.1:startupEncoding
#@-node:ekr.20050304072744:Compute directories... (leoGlobals)
#@+node:ekr.20031218072017.1380:Directive utils...
#@+node:ekr.20031218072017.1381:the @language and @comment directives (leoUtils)
#@+node:ekr.20031218072017.1382:set_delims_from_language
# Returns a tuple (single,start,end) of comment delims
def set_delims_from_language(language):
# g.trace(g.callers())
val = app.language_delims_dict.get(language)
if val:
delim1,delim2,delim3 = g.set_delims_from_string(val)
if delim2 and not delim3:
return None,delim1,delim2
else: # 0,1 or 3 params.
return delim1,delim2,delim3
else:
return None, None, None # Indicate that no change should be made
#@-node:ekr.20031218072017.1382:set_delims_from_language
#@+node:ekr.20031218072017.1383:set_delims_from_string
def set_delims_from_string(s):
"""Returns (delim1, delim2, delim2), the delims following the @comment directive.
This code can be called from @language logic, in which case s can point at @comment"""
# Skip an optional @comment
tag = "@comment"
i = 0
if g.match_word(s,i,tag):
i += len(tag)
count = 0 ; delims = [None, None, None]
while count < 3 and i < len(s):
i = j = g.skip_ws(s,i)
while i < len(s) and not g.is_ws(s[i]) and not g.is_nl(s,i):
i += 1
if j == i: break
delims[count] = s[j:i]
count += 1
# 'rr 09/25/02
if count == 2: # delims[0] is always the single-line delim.
delims[2] = delims[1]
delims[1] = delims[0]
delims[0] = None
# 7/8/02: The "REM hack": replace underscores by blanks.
# 9/25/02: The "perlpod hack": replace double underscores by newlines.
for i in xrange(0,3):
if delims[i]:
delims[i] = string.replace(delims[i],"__",'\n')
delims[i] = string.replace(delims[i],'_',' ')
return delims[0], delims[1], delims[2]
#@-node:ekr.20031218072017.1383:set_delims_from_string
#@+node:ekr.20031218072017.1384:set_language
def set_language(s,i,issue_errors_flag=False):
"""Scan the @language directive that appears at s[i:].
The @language may have been stripped away.
Returns (language, delim1, delim2, delim3)
"""
tag = "@language"
# g.trace(g.get_line(s,i))
assert(i != None)
# assert(g.match_word(s,i,tag))
if g.match_word(s,i,tag):
i += len(tag)
# Get the argument.
i = g.skip_ws(s, i)
j = i ; i = g.skip_c_id(s,i)
# Allow tcl/tk.
arg = string.lower(s[j:i])
if app.language_delims_dict.get(arg):
language = arg
delim1, delim2, delim3 = g.set_delims_from_language(language)
return language, delim1, delim2, delim3
if issue_errors_flag:
g.es("ignoring:",g.get_line(s,i))
return None, None, None, None,
#@-node:ekr.20031218072017.1384:set_language
#@-node:ekr.20031218072017.1381:the @language and @comment directives (leoUtils)
#@+node:EKR.20040504150046.4:g.comment_delims_from_extension
def comment_delims_from_extension(filename):
"""
Return the comment delims corresponding to the filename's extension.
>>> g.comment_delims_from_extension(".py")
('#', None, None)
>>> g.comment_delims_from_extension(".c")
('//', '/*', '*/')
>>> g.comment_delims_from_extension(".html")
(None, '<!--', '-->')
"""
root, ext = os.path.splitext(filename)
if ext == '.tmp':
root, ext = os.path.splitext(root)
language = g.app.extension_dict.get(ext[1:])
if ext:
return g.set_delims_from_language(language)
else:
g.trace("unknown extension %s" % ext)
return None,None,None
#@-node:EKR.20040504150046.4:g.comment_delims_from_extension
#@+node:ekr.20071109165315:g.computeRelativePath
def computeRelativePath (path):
if len(path) > 2 and (
(path[0]=='<' and path[-1] == '>') or
(path[0]=='"' and path[-1] == '"') or
(path[0]=="'" and path[-1] == "'")
):
path = path[1:-1].strip()
# 11/14/02: we want a _relative_ path, not an absolute path.
# path = g.os_path_join(g.app.loadDir,path)
return path
#@-node:ekr.20071109165315:g.computeRelativePath
#@+node:ekr.20031218072017.1385:g.findReference
#@+at
#@nonl
# We search the descendents of v looking for the definition node matching
# name.
# There should be exactly one such node (descendents of other definition nodes
# are not searched).
#@-at
#@@c
def findReference(c,name,root):
for p in root.subtree_iter():
assert(p!=root)
if p.matchHeadline(name) and not p.isAtIgnoreNode():
return p
# g.trace("not found:",name,root)
return c.nullPosition()
#@-node:ekr.20031218072017.1385:g.findReference
#@+node:ekr.20031218072017.1260:g.get_directives_dict
# The caller passes [root_node] or None as the second arg. This allows us to distinguish between None and [None].
def get_directives_dict(p,root=None):
"""Scans root for @directives found in globalDirectiveList.
Returns a dict containing pointers to the start of each directive"""
if root: root_node = root[0]
theDict = {}
# The headline has higher precedence because it is more visible.
for kind,s in (
('body',p.v.t.headString),
('head',p.v.t.bodyString),
):
i = 0 ; n = len(s)
while i < n:
if s[i] == '@' and i+1 < n:
#@ << set theDict for @ directives >>
#@+node:ekr.20031218072017.1261:<< set theDict for @ directives >>
j = g.skip_c_id(s,i+1)
word = s[i+1:j]
global globalDirectiveList
if word in globalDirectiveList:
if theDict.has_key(word):
# Ignore second value.
pass
# g.es("Warning: conflicting values for",word,color="blue")
else:
# theDict [word] = i
k = g.skip_line(s,j)
theDict[word] = s[j:k].strip()
#@nonl
#@-node:ekr.20031218072017.1261:<< set theDict for @ directives >>
#@nl
elif kind == 'body' and root and g.match(s,i,"<<"):
#@ << set theDict["root"] for noweb * chunks >>
#@+node:ekr.20031218072017.1262:<< set theDict["root"] for noweb * chunks >>
#@+at
#@nonl
# The following looks for chunk definitions of the form < < *
# > > =. If found, we take this to be equivalent to @root
# filename if the headline has the form @root filename.
#@-at
#@@c
i = g.skip_ws(s,i+2)
if i < n and s[i] == '*' :
i = g.skip_ws(s,i+1) # Skip the '*'
if g.match(s,i,">>="):
# < < * > > = implies that @root should appear in the headline.
i += 3
if root_node:
theDict["root"]=0 # value not immportant
else:
g.es('',g.angleBrackets("*") + "= requires @root in the headline")
#@-node:ekr.20031218072017.1262:<< set theDict["root"] for noweb * chunks >>
#@nl
i = g.skip_line(s,i)
return theDict
#@-node:ekr.20031218072017.1260:g.get_directives_dict
#@+node:ekr.20031218072017.1387:g.scanAtEncodingDirective
def scanAtEncodingDirective(theDict):
"""Scan the @encoding directive at s[theDict["encoding"]:].
Returns the encoding name or None if the encoding name is invalid.
"""
encoding = theDict.get('encoding')
if not encoding:
return None
if g.isValidEncoding(encoding):
# g.trace(encoding)
return encoding
else:
g.es("invalid @encoding:",encoding,color="red")
return None
#@-node:ekr.20031218072017.1387:g.scanAtEncodingDirective
#@+node:ekr.20031218072017.1388:g.scanAtLineendingDirective
def scanAtLineendingDirective(theDict):
"""Scan the @lineending directive at s[theDict["lineending"]:].
Returns the actual lineending or None if the name of the lineending is invalid.
"""
e = theDict.get('encoding')
if e in ("cr","crlf","lf","nl","platform"):
lineending = g.getOutputNewline(name=e)
# g.trace(e,lineending)
return lineending
else:
# g.es("invalid @lineending directive:",e,color="red")
return None
#@-node:ekr.20031218072017.1388:g.scanAtLineendingDirective
#@+node:ekr.20031218072017.1389:g.scanAtPagewidthDirective
def scanAtPagewidthDirective(theDict,issue_error_flag=False):
"""Scan the @pagewidth directive at s[theDict["pagewidth"]:].
Returns the value of the width or None if the width is invalid.
"""
s = theDict.get('pagewidth')
i, val = g.skip_long(s,0)
if val != None and val > 0:
# g.trace(val)
return val
else:
if issue_error_flag:
g.es("ignoring",s,color="red")
return None
#@-node:ekr.20031218072017.1389:g.scanAtPagewidthDirective
#@+node:ekr.20031218072017.3154:g.scanAtRootOptions
def scanAtRootOptions (s,i,err_flag=False):
# The @root has been eaten when called from tangle.scanAllDirectives.
if g.match(s,i,"@root"):
i += len("@root")
i = g.skip_ws(s,i)
mode = None
while g.match(s,i,'-'):
#@ << scan another @root option >>
#@+node:ekr.20031218072017.3155:<< scan another @root option >>
i += 1 ; err = -1
if g.match_word(s,i,"code"): # Just match the prefix.
if not mode: mode = "code"
elif err_flag: g.es("modes conflict in:",g.get_line(s,i))
elif g.match(s,i,"doc"): # Just match the prefix.
if not mode: mode = "doc"
elif err_flag: g.es("modes conflict in:",g.get_line(s,i))
else:
err = i-1
# Scan to the next minus sign.
while i < len(s) and s[i] not in (' ','\t','-'):
i += 1
if err > -1 and err_flag:
g.es("unknown option:",s[err:i],"in",g.get_line(s,i))
#@-node:ekr.20031218072017.3155:<< scan another @root option >>
#@nl
if mode == None:
doc = app.config.at_root_bodies_start_in_doc_mode
mode = g.choose(doc,"doc","code")
return i,mode
#@-node:ekr.20031218072017.3154:g.scanAtRootOptions
#@+node:ekr.20031218072017.1390:g.scanAtTabwidthDirective
def scanAtTabwidthDirective(theDict,issue_error_flag=False):
"""Scan the @tabwidth directive at s[theDict["tabwidth"]:].
Returns the value of the width or None if the width is invalid.
"""
s = theDict.get('tabwidth')
junk,val = g.skip_long(s,0)
if val != None and val != 0:
# g.trace(val)
return val
else:
if issue_error_flag:
g.es("Ignoring",s,color="red")
return None
#@-node:ekr.20031218072017.1390:g.scanAtTabwidthDirective
#@+node:ekr.20070302160802:g.scanColorDirectives
def scanColorDirectives(c,p):
'''Return the language in effect at position p.'''
if c is None: return # c may be None for testing.
language = c.target_language and c.target_language.lower() or 'python'
p = p.copy()
for p in p.self_and_parents_iter():
d = g.get_directives_dict(p)
z = d.get('language')
if z is not None:
language,junk,junk,junk = g.set_language(z,0)
return language
return language
#@-node:ekr.20070302160802:g.scanColorDirectives
#@+node:ekr.20031218072017.1391:g.scanDirectives
#@+at
#@nonl
# Perhaps this routine should be the basis of atFile.scanAllDirectives and
# tangle.scanAllDirectives, but I am loath to make any further to these two
# already-infamous routines. Also, this code does not check for @color and
# @nocolor directives: leoColor.useSyntaxColoring does that.
#@-at
#@@c
def scanDirectives(c,p=None):
"""Scan vnode v and v's ancestors looking for directives.
Returns a dict containing the results, including defaults."""
if p is None:
p = c.currentPosition()
#@ << Set local vars >>
#@+node:ekr.20031218072017.1392:<< Set local vars >>
page_width = c.page_width
tab_width = c.tab_width
language = c.target_language
if c.target_language:
c.target_language = c.target_language.lower()
delim1, delim2, delim3 = g.set_delims_from_language(c.target_language)
path = None
encoding = None # 2/25/03: This must be none so that the caller can set a proper default.
lineending = g.getOutputNewline(c=c) # Init from config settings.
wrap = c.config.getBool("body_pane_wraps")
#@-node:ekr.20031218072017.1392:<< Set local vars >>
#@nl
old = {}
pluginsList = [] # 5/17/03: a list of items for use by plugins.
for p in p.self_and_parents_iter():
theDict = g.get_directives_dict(p)
#@ << Test for @comment and @language >>
#@+node:ekr.20031218072017.1393:<< Test for @comment and @language >>
# 1/23/05: Any previous @language or @comment prevents processing up the tree.
# This code is now like the code in tangle.scanAlldirectives.
if old.has_key("comment") or old.has_key("language"):
pass
elif theDict.has_key("comment"):
z = theDict["comment"]
delim1,delim2,delim3 = g.set_delims_from_string(z)
elif theDict.has_key("language"):
z = theDict["language"]
language,delim1,delim2,delim3 = g.set_language(z,0)
#@-node:ekr.20031218072017.1393:<< Test for @comment and @language >>
#@nl
#@ << Test for @encoding >>
#@+node:ekr.20031218072017.1394:<< Test for @encoding >>
if not old.has_key("encoding") and theDict.has_key("encoding"):
e = g.scanAtEncodingDirective(theDict)
if e:
encoding = e
#@-node:ekr.20031218072017.1394:<< Test for @encoding >>
#@nl
#@ << Test for @lineending >>
#@+node:ekr.20031218072017.1395:<< Test for @lineending >>
if not old.has_key("lineending") and theDict.has_key("lineending"):
e = g.scanAtLineendingDirective(theDict)
if e:
lineending = e
#@-node:ekr.20031218072017.1395:<< Test for @lineending >>
#@nl
#@ << Test for @pagewidth >>
#@+node:ekr.20031218072017.1396:<< Test for @pagewidth >>
if theDict.has_key("pagewidth") and not old.has_key("pagewidth"):
w = g.scanAtPagewidthDirective(theDict)
if w and w > 0:
page_width = w
#@-node:ekr.20031218072017.1396:<< Test for @pagewidth >>
#@nl
#@ << Test for @path >>
#@+node:ekr.20031218072017.1397:<< Test for @path >> (g.scanDirectives)
if not path and not old.has_key("path") and theDict.has_key("path"):
path = theDict["path"]
path = g.computeRelativePath(path)
if path and len(path) > 0:
base = g.getBaseDirectory(c) # returns "" on error.
path = g.os_path_join(base,path)
#@-node:ekr.20031218072017.1397:<< Test for @path >> (g.scanDirectives)
#@nl
#@ << Test for @tabwidth >>
#@+node:ekr.20031218072017.1399:<< Test for @tabwidth >>
if theDict.has_key("tabwidth") and not old.has_key("tabwidth"):
w = g.scanAtTabwidthDirective(theDict)
if w and w != 0:
tab_width = w
#@-node:ekr.20031218072017.1399:<< Test for @tabwidth >>
#@nl
#@ << Test for @wrap and @nowrap >>
#@+node:ekr.20031218072017.1400:<< Test for @wrap and @nowrap >>
if not old.has_key("wrap") and not old.has_key("nowrap"):
if theDict.has_key("wrap"):
wrap = True
elif theDict.has_key("nowrap"):
wrap = False
#@-node:ekr.20031218072017.1400:<< Test for @wrap and @nowrap >>
#@nl
g.doHook("scan-directives",c=c,p=p,v=p,s=p.bodyString(),
old_dict=old,dict=theDict,pluginsList=pluginsList)
old.update(theDict)
if path == None: path = g.getBaseDirectory(c)
# g.trace('tabwidth',tab_width)
return {
"delims" : (delim1,delim2,delim3),
"encoding" : encoding,
"language" : language,
"lineending": lineending,
"pagewidth" : page_width,
"path" : path,
"tabwidth" : tab_width,
"pluginsList": pluginsList,
"wrap" : wrap }
#@-node:ekr.20031218072017.1391:g.scanDirectives
#@+node:ekr.20040715155607:g.scanForAtIgnore
def scanForAtIgnore(c,p):
"""Scan position p and its ancestors looking for @ignore directives."""
if g.app.unitTesting:
return False # For unit tests.
for p in p.self_and_parents_iter():
d = g.get_directives_dict(p)
if d.has_key("ignore"):
return True
return False
#@-node:ekr.20040715155607:g.scanForAtIgnore
#@+node:ekr.20040712084911.1:g.scanForAtLanguage
def scanForAtLanguage(c,p):
"""Scan position p and p's ancestors looking only for @language and @ignore directives.
Returns the language found, or c.target_language."""
# Unlike the code in x.scanAllDirectives, this code ignores @comment directives.
if c and p:
for p in p.self_and_parents_iter():
d = g.get_directives_dict(p)
if d.has_key("language"):
z = d["language"]
language,delim1,delim2,delim3 = g.set_language(z,0)
return language
return c.target_language
#@-node:ekr.20040712084911.1:g.scanForAtLanguage
#@+node:ekr.20041123094807:g.scanForAtSettings
def scanForAtSettings(p):
"""Scan position p and its ancestors looking for @settings nodes."""
for p in p.self_and_parents_iter():
h = p.headString()
h = g.app.config.canonicalizeSettingName(h)
if h.startswith("@settings"):
return True
return False
#@-node:ekr.20041123094807:g.scanForAtSettings
#@+node:ekr.20031218072017.1386:getOutputNewline
def getOutputNewline (c=None,name=None):
'''Convert the name of a line ending to the line ending itself.
Priority:
- Use name if name given
- Use c.config.output_newline if c given,
- Otherwise use g.app.config.output_newline.'''
# g.trace(c,name,c.config.output_newline)
if name: s = name
elif c: s = c.config.output_newline
else: s = app.config.output_newline
if not s: s = ''
s = s.lower()
if s in ( "nl","lf"): s = '\n'
elif s == "cr": s = '\r'
elif s == "platform": s = os.linesep # 12/2/03: emakital
elif s == "crlf": s = "\r\n"
else: s = '\n' # Default for erroneous values.
return s
#@-node:ekr.20031218072017.1386:getOutputNewline
#@-node:ekr.20031218072017.1380:Directive utils...
#@+node:ekr.20031218072017.3100:wrap_lines
#@+at
#@nonl
# Important note: this routine need not deal with leading whitespace.
# Instead, the caller should simply reduce pageWidth by the width of leading
# whitespace wanted, then add that whitespace to the lines returned here.
#
# The key to this code is the invarient that line never ends in whitespace.
#@-at
#@@c
def wrap_lines (lines,pageWidth,firstLineWidth=None):
"""Returns a list of lines, consisting of the input lines wrapped to the given pageWidth."""
if pageWidth < 10:
pageWidth = 10
# First line is special
if not firstLineWidth:
firstLineWidth = pageWidth
if firstLineWidth < 10:
firstLineWidth = 10
outputLineWidth = firstLineWidth
# g.trace(lines)
result = [] # The lines of the result.
line = "" # The line being formed. It never ends in whitespace.
for s in lines:
i = 0
while i < len(s):
assert(len(line) <= outputLineWidth) # DTHEIN 18-JAN-2004
j = g.skip_ws(s,i) # ; ws = s[i:j]
k = g.skip_non_ws(s,j) ; word = s[j:k]
assert(k>i)
i = k
# DTHEIN 18-JAN-2004: wrap at exactly the text width,
# not one character less
#
wordLen = len(word)
if len(line) > 0 and wordLen > 0: wordLen += len(" ")
if wordLen + len(line) <= outputLineWidth:
if wordLen > 0:
#@ << place blank and word on the present line >>
#@+node:ekr.20031218072017.3101:<< place blank and word on the present line >>
if len(line) == 0:
# Just add the word to the start of the line.
line = word
else:
# Add the word, preceeded by a blank.
line = " ".join([line,word]) # DTHEIN 18-JAN-2004: better syntax
#@-node:ekr.20031218072017.3101:<< place blank and word on the present line >>
#@nl
else: pass # discard the trailing whitespace.
else:
#@ << place word on a new line >>
#@+node:ekr.20031218072017.3102:<< place word on a new line >>
# End the previous line.
if len(line) > 0:
result.append(line)
outputLineWidth = pageWidth # DTHEIN 3-NOV-2002: width for remaining lines
# Discard the whitespace and put the word on a new line.
line = word
# Careful: the word may be longer than pageWidth.
if len(line) > pageWidth: # DTHEIN 18-JAN-2004: line can equal pagewidth
result.append(line)
outputLineWidth = pageWidth # DTHEIN 3-NOV-2002: width for remaining lines
line = ""
#@-node:ekr.20031218072017.3102:<< place word on a new line >>
#@nl
if len(line) > 0:
result.append(line)
# g.trace(result)
return result
#@-node:ekr.20031218072017.3100:wrap_lines
#@-node:ekr.20031218072017.3099:Commands & Directives
#@+node:ekr.20031218072017.3104:Debugging, Dumping, Timing, Tracing & Sherlock
#@+node:ekr.20031218072017.3105:alert
def alert(message):
g.es('',message)
import tkMessageBox
tkMessageBox.showwarning("Alert", message)
#@-node:ekr.20031218072017.3105:alert
#@+node:ekr.20051023083258:callers & _callerName
def callers (n=8,excludeCaller=True,files=False):
'''Return a list containing the callers of the function that called g.callerList.
If the excludeCaller keyword is True (the default), g.callers is not on the list.
If the files keyword argument is True, filenames are included in the list.
'''
# sys._getframe throws ValueError in both cpython and jython if there are less than i entries.
# The jython stack often has less than 8 entries,
# so we must be careful to call g._callerName with smaller values of i first.
result = []
i = g.choose(excludeCaller,3,2)
while 1:
s = g._callerName(i,files=files)
if s:
result.append(s)
if not s or len(result) >= n: break
i += 1
result.reverse()
sep = g.choose(files,'\n',',')
return sep.join(result)
#@+node:ekr.20031218072017.3107:_callerName
def _callerName (n=1,files=False):
try: # get the function name from the call stack.
f1 = sys._getframe(n) # The stack frame, n levels up.
code1 = f1.f_code # The code object
if files:
return '%s:%s' % (g.shortFilename(code1.co_filename),code1.co_name)
else:
return code1.co_name # The code name
except ValueError:
return '' # The stack is not deep enough.
except Exception:
g.es_exception()
return '' # "<no caller name>"
#@-node:ekr.20031218072017.3107:_callerName
#@-node:ekr.20051023083258:callers & _callerName
#@+node:ekr.20041105091148:g.pdb
def pdb ():
"""Fall into pdb."""
import pdb # Required: we have just defined pdb as a function!
pdb.set_trace()
#@-node:ekr.20041105091148:g.pdb
#@+node:ekr.20031218072017.3108:Dumps
#@+node:ekr.20031218072017.3109:dump
def dump(s):
out = ""
for i in s:
out += str(ord(i)) + ","
return out
def oldDump(s):
out = ""
for i in s:
if i=='\n':
out += "[" ; out += "n" ; out += "]"
if i=='\t':
out += "[" ; out += "t" ; out += "]"
elif i==' ':
out += "[" ; out += " " ; out += "]"
else: out += i
return out
#@-node:ekr.20031218072017.3109:dump
#@+node:ekr.20060917120951:es_dump
def es_dump (s,n = 30,title=None):
if title:
g.es_print('',title)
i = 0
while i < len(s):
g.es_print('',''.join(['%2x ' % (ord(ch)) for ch in s[i:i+n]]))
i += n
#@nonl
#@-node:ekr.20060917120951:es_dump
#@+node:ekr.20031218072017.3110:es_error
def es_error (s,color=None):
if color is None and g.app.config: # May not exist during initialization.
color = g.app.config.getColor(None,"log_error_color")
g.es(s,color=color)
#@-node:ekr.20031218072017.3110:es_error
#@+node:ekr.20031218072017.3111:es_event_exception
def es_event_exception (eventName,full=False):
g.es("exception handling ",eventName,"event")
typ,val,tb = sys.exc_info()
if full:
errList = traceback.format_exception(typ,val,tb)
else:
errList = traceback.format_exception_only(typ,val)
for i in errList:
g.es('',i)
if not g.stdErrIsRedirected(): # 2/16/04
traceback.print_exc()
#@-node:ekr.20031218072017.3111:es_event_exception
#@+node:ekr.20031218072017.3112:es_exception
def es_exception (full=True,c=None,color="red"):
# __pychecker__ = '--no-argsused' # c not used. retained for compatibility.
typ,val,tb = sys.exc_info()
# g.trace(full,typ,tb)
fileName,n = g.getLastTracebackFileAndLineNumber()
if full or g.app.debugSwitch > 0:
lines = traceback.format_exception(typ,val,tb)
else:
lines = traceback.format_exception_only(typ,val)
if 0: # We might as well print the entire SyntaxError message.
lines = lines[-1:] # Usually only one line, but more for Syntax errors!
for line in lines:
g.es_error(line,color=color)
if not g.stdErrIsRedirected():
try:
print line
except Exception:
print g.toEncodedString(line,'ascii')
if g.app.debugSwitch > 1:
import pdb # Be careful: g.pdb may or may not have been defined.
pdb.set_trace()
return fileName,n
#@-node:ekr.20031218072017.3112:es_exception
#@+node:ekr.20061015090538:es_exception_type
def es_exception_type (c=None,color="red"):
# exctype is a Exception class object; value is the error message.
exctype, value = sys.exc_info()[:2]
g.es_print('','%s, %s' % (exctype.__name__, value),color=color)
#@-node:ekr.20061015090538:es_exception_type
#@+node:ekr.20040731204831:getLastTracebackFileAndLineNumber
def getLastTracebackFileAndLineNumber():
typ,val,tb = sys.exc_info()
if typ in (exceptions.SyntaxError,exceptions.IndentationError):
# Syntax and indentation errors are a special case.
# extract_tb does _not_ return the proper line number!
# This code is similar to the code in format_exception_only(!!)
try:
# g.es_print('',repr(val))
msg,(filename, lineno, offset, line) = val
return filename,lineno
except Exception:
g.trace("bad line number")
return None,0
else:
# The proper line number is the second element in the last tuple.
data = traceback.extract_tb(tb)
if data:
# g.es_print('',repr(data))
item = data[-1]
filename = item[0]
n = item[1]
return filename,n
else:
return None,0
#@-node:ekr.20040731204831:getLastTracebackFileAndLineNumber
#@+node:ekr.20031218072017.3113:printBindings
def print_bindings (name,window):
bindings = window.bind()
print
print "Bindings for", name
for b in bindings:
print b
#@-node:ekr.20031218072017.3113:printBindings
#@+node:ekr.20031218072017.3114:printGlobals
def printGlobals(message=None):
# Get the list of globals.
globs = list(globals())
globs.sort()
# Print the list.
if message:
leader = "-" * 10
print leader, ' ', message, ' ', leader
for glob in globs:
print glob
#@-node:ekr.20031218072017.3114:printGlobals
#@+node:ekr.20070510074941:g.printEntireTree
def printEntireTree(c,tag=''):
print 'printEntireTree','=' * 50
print 'printEntireTree',tag,'root',c.rootPosition()
for p in c.allNodes_iter():
print '..'*p.level(),p.v
#@nonl
#@-node:ekr.20070510074941:g.printEntireTree
#@+node:ekr.20031218072017.3115:printLeoModules
def printLeoModules(message=None):
# Create the list.
mods = []
for name in sys.modules.keys():
if name and name[0:3] == "leo":
mods.append(name)
# Print the list.
if message:
leader = "-" * 10
print leader, ' ', message, ' ', leader
mods.sort()
for m in mods:
print m,
print
#@-node:ekr.20031218072017.3115:printLeoModules
#@-node:ekr.20031218072017.3108:Dumps
#@+node:ekr.20031218072017.1317:file/module/plugin_date
def module_date (mod,format=None):
theFile = g.os_path_join(app.loadDir,mod.__file__)
root,ext = g.os_path_splitext(theFile)
return g.file_date(root + ".py",format=format)
def plugin_date (plugin_mod,format=None):
theFile = g.os_path_join(app.loadDir,"..","plugins",plugin_mod.__file__)
root,ext = g.os_path_splitext(theFile)
return g.file_date(root + ".py",format=format)
def file_date (theFile,format=None):
if theFile and len(theFile)and g.os_path_exists(theFile):
try:
n = g.os_path_getmtime(theFile)
if format == None:
format = "%m/%d/%y %H:%M:%S"
return time.strftime(format,time.gmtime(n))
except (ImportError,NameError):
pass # Time module is platform dependent.
return ""
#@-node:ekr.20031218072017.1317:file/module/plugin_date
#@+node:ekr.20031218072017.3121:redirecting stderr and stdout to Leo's log pane
class redirectClass:
"""A class to redirect stdout and stderr to Leo's log pane."""
#@ << redirectClass methods >>
#@+node:ekr.20031218072017.1656:<< redirectClass methods >>
#@+others
#@+node:ekr.20041012082437:redirectClass.__init__
def __init__ (self):
self.old = None
#@-node:ekr.20041012082437:redirectClass.__init__
#@+node:ekr.20041012082437.1:isRedirected
def isRedirected (self):
return self.old != None
#@-node:ekr.20041012082437.1:isRedirected
#@+node:ekr.20041012082437.2:flush
# For LeoN: just for compatibility.
def flush(self, *args):
return
#@-node:ekr.20041012082437.2:flush
#@+node:ekr.20041012091252:rawPrint
def rawPrint (self,s):
if self.old:
self.old.write(s+'\n')
else:
print s
#@-node:ekr.20041012091252:rawPrint
#@+node:ekr.20041012082437.3:redirect
def redirect (self,stdout=1):
if g.app.batchMode:
# Redirection is futile in batch mode.
return
if not self.old:
if stdout:
self.old,sys.stdout = sys.stdout,self
else:
self.old,sys.stderr = sys.stderr,self
#@-node:ekr.20041012082437.3:redirect
#@+node:ekr.20041012082437.4:undirect
def undirect (self,stdout=1):
if self.old:
if stdout:
sys.stdout,self.old = self.old,None
else:
sys.stderr,self.old = self.old,None
#@-node:ekr.20041012082437.4:undirect
#@+node:ekr.20041012082437.5:write
def write(self,s):
if self.old:
if app.log:
app.log.put(s)
else:
self.old.write(s+'\n')
else:
# Can happen when g.batchMode is True.
print s
#@-node:ekr.20041012082437.5:write
#@-others
#@-node:ekr.20031218072017.1656:<< redirectClass methods >>
#@nl
# Create two redirection objects, one for each stream.
redirectStdErrObj = redirectClass()
redirectStdOutObj = redirectClass()
#@<< define convenience methods for redirecting streams >>
#@+node:ekr.20031218072017.3122:<< define convenience methods for redirecting streams >>
#@+others
#@+node:ekr.20041012090942:redirectStderr & redirectStdout
# Redirect streams to the current log window.
def redirectStderr():
global redirectStdErrObj
redirectStdErrObj.redirect(stdout=False)
def redirectStdout():
global redirectStdOutObj
redirectStdOutObj.redirect()
#@-node:ekr.20041012090942:redirectStderr & redirectStdout
#@+node:ekr.20041012090942.1:restoreStderr & restoreStdout
# Restore standard streams.
def restoreStderr():
global redirectStdErrObj
redirectStdErrObj.undirect(stdout=False)
def restoreStdout():
global redirectStdOutObj
redirectStdOutObj.undirect()
#@-node:ekr.20041012090942.1:restoreStderr & restoreStdout
#@+node:ekr.20041012090942.2:stdErrIsRedirected & stdOutIsRedirected
def stdErrIsRedirected():
global redirectStdErrObj
return redirectStdErrObj.isRedirected()
def stdOutIsRedirected():
global redirectStdOutObj
return redirectStdOutObj.isRedirected()
#@-node:ekr.20041012090942.2:stdErrIsRedirected & stdOutIsRedirected
#@+node:ekr.20041012090942.3:rawPrint
# Send output to original stdout.
def rawPrint(s):
global redirectStdOutObj
redirectStdOutObj.rawPrint(s)
#@-node:ekr.20041012090942.3:rawPrint
#@-others
#@-node:ekr.20031218072017.3122:<< define convenience methods for redirecting streams >>
#@nl
if 0: # Test code: may be executed in the child node.
#@ << test code >>
#@+node:ekr.20031218072017.3123:<< test code >>
import leoGlobals as g ; import sys
print >> sys.stdout, "stdout isRedirected:", g.stdOutIsRedirected()
print >> sys.stderr, "stderr isRedirected:", g.stdErrIsRedirected()
# stderr
import leoGlobals as g ; import sys
g.redirectStderr()
print >> sys.stdout, "stdout isRedirected:", g.stdOutIsRedirected()
print >> sys.stderr, "stderr isRedirected:", g.stdErrIsRedirected()
import leoGlobals as g ; import sys
g.restoreStderr()
print >> sys.stdout, "stdout isRedirected:", g.stdOutIsRedirected()
print >> sys.stderr, "stderr isRedirected:", g.stdErrIsRedirected()
# stdout
import leoGlobals as g ; import sys
g.restoreStdout()
print >> sys.stdout, "stdout isRedirected:", g.stdOutIsRedirected()
print >> sys.stderr, "stderr isRedirected:", g.stdErrIsRedirected()
import leoGlobals as g ; import sys
g.redirectStdout()
print >> sys.stdout, "stdout isRedirected:", g.stdOutIsRedirected()
print >> sys.stderr, "stderr isRedirected:", g.stdErrIsRedirected()
#@-node:ekr.20031218072017.3123:<< test code >>
#@nl
#@-node:ekr.20031218072017.3121:redirecting stderr and stdout to Leo's log pane
#@+node:ekr.20031218072017.3127:g.get_line & get_line__after
# Very useful for tracing.
def get_line (s,i):
nl = ""
if g.is_nl(s,i):
i = g.skip_nl(s,i)
nl = "[nl]"
j = g.find_line_start(s,i)
k = g.skip_to_end_of_line(s,i)
return nl + s[j:k]
# Important: getLine is a completely different function.
# getLine = get_line
def get_line_after (s,i):
nl = ""
if g.is_nl(s,i):
i = g.skip_nl(s,i)
nl = "[nl]"
k = g.skip_to_end_of_line(s,i)
return nl + s[i:k]
getLineAfter = get_line_after
#@nonl
#@-node:ekr.20031218072017.3127:g.get_line & get_line__after
#@+node:ekr.20031218072017.3128:pause
def pause (s):
print s
i = 0
while i < 1000000L:
i += 1
#@-node:ekr.20031218072017.3128:pause
#@+node:ekr.20050819064157:print_obj & toString
def print_obj (obj,tag=None,sort=False,verbose=True,indent=''):
if type(obj) in (type(()),type([])):
g.print_list(obj,tag,sort,indent)
elif type(obj) == type({}):
g.print_dict(obj,tag,verbose,indent)
else:
print '%s%s' % (indent,repr(obj).strip())
def toString (obj,tag=None,sort=False,verbose=True,indent=''):
if type(obj) in (type(()),type([])):
return g.listToString(obj,tag,sort,indent)
elif type(obj) == type({}):
return g.dictToString(obj,tag,verbose,indent)
else:
return '%s%s' % (indent,repr(obj).strip())
#@-node:ekr.20050819064157:print_obj & toString
#@+node:ekr.20041224080039:print_dict & dictToString
def print_dict(d,tag='',verbose=True,indent=''):
# __pychecker__ = '--no-argsused'
# verbose unused, but present for compatibility with similar methods.
if not d:
if tag: print '%s...{}' % tag
else: print '{}'
return
keys = d.keys() ; keys.sort()
n = 6
for key in keys:
if type(key) == type(''):
n = max(n,len(key))
if tag: print '%s...{\n' % tag
else: print '{\n'
for key in keys:
print "%s%*s: %s" % (indent,n,key,repr(d.get(key)).strip())
print '}'
printDict = print_dict
def dictToString(d,tag=None,verbose=True,indent=''):
# __pychecker__ = '--no-argsused'
# verbose unused, but present for compatibility with similar methods.
if not d:
if tag: return '%s...{}' % tag
else: return '{}'
keys = d.keys() ; keys.sort()
n = 6
for key in keys:
if type(key) in (type(''),type(u'')):
n = max(n,len(key))
lines = ["%s%*s: %s" % (indent,n,key,repr(d.get(key)).strip()) for key in keys]
s = '\n'.join(lines)
if tag:
return '%s...{\n%s}\n' % (tag,s)
else:
return '{\n%s}\n' % s
#@-node:ekr.20041224080039:print_dict & dictToString
#@+node:ekr.20041126060136:print_list & listToString
def print_list(aList,tag=None,sort=False,indent=''):
if not aList:
if tag: print '%s...[]' % tag
else: print '[]'
return
if sort:
bList = aList[:] # Sort a copy!
bList.sort()
else:
bList = aList
if tag: print '%s...[' % tag
else: print '['
for e in bList:
print '%s%s' % (indent,repr(e).strip())
print ']'
printList = print_list
def listToString(aList,tag=None,sort=False,indent=''):
if not aList:
if tag: return '%s...{}' % tag
else: return '[]'
if sort:
bList = aList[:] # Sort a copy!
bList.sort()
else:
bList = aList
lines = ["%s%s" % (indent,repr(e).strip()) for e in bList]
s = '\n'.join(lines)
if tag:
return '[%s...\n%s\n]' % (tag,s)
else:
return '[%s]' % s
#@-node:ekr.20041126060136:print_list & listToString
#@+node:ekr.20041122153823:print_stack (printStack)
def print_stack():
traceback.print_stack()
printStack = print_stack
#@-node:ekr.20041122153823:print_stack (printStack)
#@+node:ekr.20031218072017.3129:Sherlock... (trace)
#@+at
#@nonl
# Starting with this release, you will see trace statements throughout the
# code. The trace function is defined in leoGlobals.py; trace implements much
# of the functionality of my Sherlock tracing package. Traces are more
# convenient than print statements for two reasons: 1) you don't need explicit
# trace names and 2) you can disable them without recompiling.
#
# In the following examples, suppose that the call to trace appears in
# function f.
#
# g.trace(string) prints string if tracing for f has been enabled. For
# example, the following statment prints from s[i] to the end of the line if
# tracing for f has been enabled.
#
# j = g.skip_line(s,i) ; g.trace(s[i:j])
#
# g.trace(function) exectutes the function if tracing for f has been enabled.
# For example,
#
# g.trace(self.f2)
#
# You enable and disable tracing by calling g.init_trace(args). Examples:
#
# g.init_trace("+*") # enable all traces
# g.init_trace("+a","+b") # enable traces for a and b
# g.init_trace(("+a","+b")) # enable traces for a and b
# g.init_trace("-a") # disable tracing for a
# traces = g.init_trace("?") # return the list of enabled traces
#
# If two arguments are supplied to trace, the first argument is the
# "tracepoint name" and the second argument is the "tracepoint action" as
# shown in the examples above. If tracing for the tracepoint name is enabled,
# the tracepoint action is printed (if it is a string) or exectuted (if it is
# a function name).
#
# "*" will not match an explicit tracepoint name that starts with a minus
# sign. For example,
#
# g.trace_tag("-nocolor", self.disable_color)
#@-at
#@+node:ekr.20031218072017.3130:init_sherlock
# Called by startup code.
# Args are all the arguments on the command line.
def init_sherlock (args):
g.init_trace(args,echo=0)
# g.trace("sys.argv:",sys.argv)
#@-node:ekr.20031218072017.3130:init_sherlock
#@+node:ekr.20031218072017.3131:get_Sherlock_args
#@+at
#@nonl
# It no args are given we attempt to get them from the "SherlockArgs" file.
# If there are still no arguments we trace everything. This default makes
# tracing much more useful in Python.
#@-at
#@@c
def get_Sherlock_args (args):
if not args or len(args)==0:
try:
fn = g.os_path_join(app.loadDir,"SherlockArgs")
f = open(fn)
args = f.readlines()
f.close()
except Exception: pass
elif type(args[0]) == type(("1","2")):
args = args[0] # strip away the outer tuple.
# No args means trace everything.
if not args or len(args)==0: args = ["+*"]
# print "get_Sherlock_args:", args
return args
#@-node:ekr.20031218072017.3131:get_Sherlock_args
#@+node:ekr.20031218072017.3132:init_trace
def init_trace(args,echo=1):
t = app.trace_list
args = g.get_Sherlock_args(args)
for arg in args:
if arg[0] in string.ascii_letters: prefix = '+'
else: prefix = arg[0] ; arg = arg[1:]
if prefix == '?':
print "trace list:", t
elif prefix == '+' and not arg in t:
t.append(string.lower(arg))
if echo:
print "enabling:", arg
elif prefix == '-' and arg in t:
t.remove(string.lower(arg))
if echo:
print "disabling:", arg
else:
print "ignoring:", prefix + arg
#@-node:ekr.20031218072017.3132:init_trace
#@+node:ekr.20031218072017.2317:trace
# Convert all args to strings.
def trace (*args,**keys):
#callers = keys.get("callers",False)
newline = keys.get("newline",True)
align = keys.get("align",0)
s = ""
for arg in args:
if type(arg) == type(u""):
pass
# try: arg = str(arg)
# except Exception: arg = repr(arg)
elif type(arg) != type(""):
arg = repr(arg)
if len(s) > 0:
s = s + " " + arg
else:
s = arg
message = s
try: # get the function name from the call stack.
f1 = sys._getframe(1) # The stack frame, one level up.
code1 = f1.f_code # The code object
name = code1.co_name # The code name
except Exception: name = ''
if name == "?":
name = "<unknown>"
# if callers:
# traceback.print_stack()
if align != 0 and len(name) < abs(align):
pad = ' ' * (abs(align) - len(name))
if align > 0: name = name + pad
else: name = pad + name
message = g.toEncodedString(message,'ascii') # Bug fix: 10/10/07.
if newline:
print name + ": " + message
else:
print name + ": " + message,
#@-node:ekr.20031218072017.2317:trace
#@+node:ekr.20031218072017.2318:trace_tag
# Convert all args to strings.
# Print if tracing for name has been enabled.
def trace_tag (name, *args):
s = ""
for arg in args:
if type(arg) != type(""):
arg = repr(arg)
if len(s) > 0:
s = s + ", " + arg
else:
s = arg
message = s
t = app.trace_list
# tracepoint names starting with '-' must match exactly.
minus = len(name) > 0 and name[0] == '-'
if minus: name = name[1:]
if (not minus and '*' in t) or name.lower() in t:
s = name + ": " + message
print s # Traces _always_ get printed.
#@-node:ekr.20031218072017.2318:trace_tag
#@-node:ekr.20031218072017.3129:Sherlock... (trace)
#@+node:ekr.20031218072017.3133:Statistics
#@+node:ekr.20031218072017.3134:clear_stats
def clear_stats():
g.trace()
g.app.statsDict = {}
clearStats = clear_stats
#@-node:ekr.20031218072017.3134:clear_stats
#@+node:ekr.20031218072017.3135:print_stats
def print_stats (name=None):
if name:
if type(name) != type(""):
name = repr(name)
else:
name = g._callerName(n=2) # Get caller name 2 levels back.
g.printDict(g.app.statsDict,tag='statistics at %s' % name)
printStats = print_stats
#@-node:ekr.20031218072017.3135:print_stats
#@+node:ekr.20031218072017.3136:stat
def stat (name=None):
"""Increments the statistic for name in g.app.statsDict
The caller's name is used by default.
"""
d = g.app.statsDict
if name:
if type(name) != type(""):
name = repr(name)
else:
name = g._callerName(n=2) # Get caller name 2 levels back.
# g.trace(name)
d [name] = 1 + d.get(name,0)
#@-node:ekr.20031218072017.3136:stat
#@-node:ekr.20031218072017.3133:Statistics
#@+node:ekr.20031218072017.3137:Timing
# pychecker bug: pychecker complains that there is no attribute time.clock
def getTime():
return time.clock()
def esDiffTime(message, start):
g.es('',"%s %6.3f" % (message,(time.clock()-start)))
return time.clock()
def printDiffTime(message, start):
print "%s %6.3f" % (message,(time.clock()-start))
return time.clock()
#@-node:ekr.20031218072017.3137:Timing
#@-node:ekr.20031218072017.3104:Debugging, Dumping, Timing, Tracing & Sherlock
#@+node:ekr.20031218072017.3116:Files & Directories...
#@+node:ekr.20031218072017.3117:g.create_temp_file
def create_temp_file (textMode=False):
'''Return a tuple (theFile,theFileName)
theFile: a file object open for writing.
theFileName: the name of the temporary file.'''
# mktemp is deprecated, but we can't get rid of it
# because mkstemp does not exist in Python 2.2.1.
# __pychecker__ = '--no-deprecate'
try:
# fd is an handle to an open file as would be returned by os.open()
fd,theFileName = tempfile.mkstemp(text=textMode)
mode = g.choose(textMode,'w','wb')
theFile = os.fdopen(fd,mode)
# g.trace(fd,theFile)
except AttributeError:
# g.trace("mkstemp doesn't exist")
theFileName = tempfile.mktemp()
try:
mode = g.choose(textMode,'w','wb')
theFile = file(theFileName,mode)
except IOError:
theFile,theFileName = None,''
except Exception:
g.es('Unexpected exception in g.create_temp_file',color='red')
g.es_exception()
theFile,theFileName = None,''
return theFile,theFileName
#@-node:ekr.20031218072017.3117:g.create_temp_file
#@+node:ekr.20031218072017.3118:g.ensure_extension
def ensure_extension (name, ext):
theFile, old_ext = g.os_path_splitext(name)
if not name:
return name # don't add to an empty name.
elif old_ext and old_ext == ext:
return name
else:
return name + ext
#@-node:ekr.20031218072017.3118:g.ensure_extension
#@+node:ekr.20031218072017.1264:g.getBaseDirectory
# Handles the conventions applying to the "relative_path_base_directory" configuration option.
def getBaseDirectory(c):
base = app.config.relative_path_base_directory
if base and base == "!":
base = app.loadDir
elif base and base == ".":
base = c.openDirectory
# g.trace(base)
if base and len(base) > 0 and g.os_path_isabs(base):
# Set c.chdir_to_relative_path as needed.
if not hasattr(c,'chdir_to_relative_path'):
c.chdir_to_relative_path = c.config.getBool('chdir_to_relative_path')
# Call os.chdir if requested.
if c.chdir_to_relative_path:
os.chdir(base)
return base # base need not exist yet.
else:
return "" # No relative base given.
#@-node:ekr.20031218072017.1264:g.getBaseDirectory
#@+node:EKR.20040504154039:g.is_sentinel
def is_sentinel (line,delims):
#@ << is_sentinel doc tests >>
#@+node:ekr.20040719161756:<< is_sentinel doc tests >>
"""
Return True if line starts with a sentinel comment.
>>> py_delims = comment_delims_from_extension('.py')
>>> is_sentinel("#@+node",py_delims)
True
>>> is_sentinel("#comment",py_delims)
False
>>> c_delims = comment_delims_from_extension('.c')
>>> is_sentinel("//@+node",c_delims)
True
>>> is_sentinel("//comment",c_delims)
False
>>> html_delims = comment_delims_from_extension('.html')
>>> is_sentinel("<!--@+node-->",html_delims)
True
>>> is_sentinel("<!--comment-->",html_delims)
False
"""
#@-node:ekr.20040719161756:<< is_sentinel doc tests >>
#@nl
delim1,delim2,delim3 = delims
line = line.lstrip()
if delim1:
return line.startswith(delim1+'@')
elif delim2 and delim3:
i = line.find(delim2+'@')
j = line.find(delim3)
return 0 == i < j
else:
print repr(delims)
g.es("Can't happen: is_sentinel",color="red")
return False
#@-node:EKR.20040504154039:g.is_sentinel
#@+node:ekr.20071114113736:g.makePathRelativeTo
def makePathRelativeTo (fullPath,basePath):
if fullPath.startswith(basePath):
s = fullPath[len(basePath):]
if s.startswith(os.path.sep):
s = s[len(os.path.sep):]
return s
else:
return fullPath
#@-node:ekr.20071114113736:g.makePathRelativeTo
#@+node:ekr.20031218072017.3119:g.makeAllNonExistentDirectories
# This is a generalization of os.makedir.
def makeAllNonExistentDirectories (theDir,c=None):
"""Attempt to make all non-existent directories"""
# g.trace('theDir',theDir,c.config.create_nonexistent_directories,g.callers())
if c:
if not c.config.create_nonexistent_directories:
return None
elif not app.config.create_nonexistent_directories:
return None
dir1 = theDir = g.os_path_normpath(theDir)
# Split theDir into all its component parts.
paths = []
while len(theDir) > 0:
head,tail=g.os_path_split(theDir)
if len(tail) == 0:
paths.append(head)
break
else:
paths.append(tail)
theDir = head
path = ""
paths.reverse()
for s in paths:
path = g.os_path_join(path,s)
if not g.os_path_exists(path):
try:
os.mkdir(path)
g.es("created directory:",path)
except Exception:
g.es("exception creating directory:",path)
g.es_exception()
return None
return dir1 # All have been created.
#@-node:ekr.20031218072017.3119:g.makeAllNonExistentDirectories
#@+node:ekr.20031218072017.2052:g.openWithFileName
def openWithFileName(fileName,old_c,
enableLog=True,gui=None,readAtFileNodesFlag=True):
"""Create a Leo Frame for the indicated fileName if the file exists."""
if not fileName or len(fileName) == 0:
return False, None
def munge(name):
return g.os_path_normpath(name or '').lower()
# Create a full, normalized, Unicode path name, preserving case.
relativeFileName = g.os_path_normpath(fileName)
fileName = g.os_path_normpath(g.os_path_abspath(fileName))
# g.trace(relativeFileName,'-->',fileName)
# If the file is already open just bring its window to the front.
theList = app.windowList
for frame in theList:
if munge(fileName) == munge(frame.c.mFileName):
frame.bringToFront()
frame.c.setLog()
return True, frame
if old_c:
# New in 4.4: We must read the file *twice*.
# The first time sets settings for the later call to c.finishCreate.
# g.trace('***** prereading',fileName)
c2 = g.app.config.openSettingsFile(fileName)
if c2: g.app.config.updateSettings(c2,localFlag=True)
g.doHook('open0')
# Open the file in binary mode to allow 0x1a in bodies & headlines.
theFile,isZipped = g.openLeoOrZipFile(fileName)
if not theFile: return False, None
c,frame = app.newLeoCommanderAndFrame(
fileName=fileName,
relativeFileName=relativeFileName,
gui=gui)
c.isZipped = isZipped
frame.log.enable(enableLog)
g.app.writeWaitingLog() # New in 4.3: write queued log first.
c.beginUpdate()
try:
if not g.doHook("open1",old_c=old_c,c=c,new_c=c,fileName=fileName):
c.setLog()
app.lockLog()
frame.c.fileCommands.open(
theFile,fileName,
readAtFileNodesFlag=readAtFileNodesFlag) # closes file.
app.unlockLog()
for z in g.app.windowList: # Bug fix: 2007/12/07: don't change frame var.
# The recent files list has been updated by menu.updateRecentFiles.
z.c.config.setRecentFiles(g.app.config.recentFiles)
# Bug fix in 4.4.
frame.openDirectory = g.os_path_abspath(g.os_path_dirname(fileName))
g.doHook("open2",old_c=old_c,c=c,new_c=frame.c,fileName=fileName)
finally:
c.endUpdate()
# chapterController.finishCreate must be called after the first real redraw
# because it requires a valid value for c.rootPosition().
if frame.c.chapterController:
frame.c.chapterController.finishCreate()
k = c.k
if k: k.setInputState(k.unboundKeyAction)
if c.config.getBool('outline_pane_has_initial_focus'):
c.treeWantsFocusNow()
else:
c.bodyWantsFocusNow()
return True, frame
#@nonl
#@-node:ekr.20031218072017.2052:g.openWithFileName
#@+node:ekr.20070412082527:g.openLeoOrZipFile
def openLeoOrZipFile (fileName):
try:
isZipped = zipfile.is_zipfile(fileName)
if isZipped:
theFile = zipfile.ZipFile(fileName,'r')
# g.trace('opened zip file',theFile)
else:
theFile = file(fileName,'rb')
return theFile,isZipped
except IOError:
# Do not use string + here: it will fail for non-ascii strings!
if not g.unitTesting:
g.es("can not open:",fileName,color="blue")
return None,False
#@nonl
#@-node:ekr.20070412082527:g.openLeoOrZipFile
#@+node:ekr.20031218072017.3120:g.readlineForceUnixNewline (Steven P. Schaefer)
#@+at
#@nonl
# Stephen P. Schaefer 9/7/2002
#
# The Unix readline() routine delivers "\r\n" line end strings verbatim, while
# the windows versions force the string to use the Unix convention of using
# only "\n". This routine causes the Unix readline to do the same.
#@-at
#@@c
def readlineForceUnixNewline(f):
s = f.readline()
if len(s) >= 2 and s[-2] == "\r" and s[-1] == "\n":
s = s[0:-2] + "\n"
return s
#@-node:ekr.20031218072017.3120:g.readlineForceUnixNewline (Steven P. Schaefer)
#@+node:ekr.20031218072017.3124:g.sanitize_filename
def sanitize_filename(s):
"""Prepares string s to be a valid file name:
- substitute '_' whitespace and characters used special path characters.
- eliminate all other non-alphabetic characters.
- strip leading and trailing whitespace.
- return at most 128 characters."""
result = ""
for ch in s.strip():
if ch in string.ascii_letters:
result += ch
elif ch in string.whitespace: # Translate whitespace.
result += '_'
elif ch in ('.','\\','/',':'): # Translate special path characters.
result += '_'
while 1:
n = len(result)
result = result.replace('__','_')
if len(result) == n:
break
result = result.strip()
return result [:128]
#@-node:ekr.20031218072017.3124:g.sanitize_filename
#@+node:ekr.20060328150113:g.setGlobalOpenDir
def setGlobalOpenDir (fileName):
if fileName:
g.app.globalOpenDir = g.os_path_dirname(fileName)
# g.es('current directory:',g.app.globalOpenDir)
#@-node:ekr.20060328150113:g.setGlobalOpenDir
#@+node:ekr.20031218072017.3125:g.shortFileName & shortFilename
def shortFileName (fileName):
return g.os_path_basename(fileName)
shortFilename = shortFileName
#@-node:ekr.20031218072017.3125:g.shortFileName & shortFilename
#@+node:ekr.20050104135720:Used by tangle code & leoFileCommands
#@+node:ekr.20031218072017.1241:g.update_file_if_changed
# This is part of the tangle code.
def update_file_if_changed(c,file_name,temp_name):
"""Compares two files.
If they are different, we replace file_name with temp_name.
Otherwise, we just delete temp_name. Both files should be closed."""
if g.os_path_exists(file_name):
if filecmp.cmp(temp_name, file_name):
kind = 'unchanged'
ok = g.utils_remove(temp_name)
else:
kind = '***updating'
mode = g.utils_stat(file_name)
ok = g.utils_rename(c,temp_name,file_name,mode)
else:
kind = 'creating'
ok = g.utils_rename(c,temp_name,file_name)
if ok:
g.es('','%12s: %s' % (kind,file_name))
else:
g.es("rename failed: no file created!",color="red")
g.es('',file_name," may be read-only or in use")
#@-node:ekr.20031218072017.1241:g.update_file_if_changed
#@+node:ekr.20050104123726.3:g.utils_remove
def utils_remove (fileName,verbose=True):
try:
os.remove(fileName)
return True
except Exception:
if verbose:
g.es("exception removing:",fileName)
g.es_exception()
return False
#@-node:ekr.20050104123726.3:g.utils_remove
#@+node:ekr.20031218072017.1263:g.utils_rename
#@<< about os.rename >>
#@+node:ekr.20050104123726.1:<< about os.rename >>
#@+at
#@nonl
# Here is the Python 2.4 documentation for rename (same as Python 2.3)
#
# Rename the file or directory src to dst. If dst is a directory, OSError
# will be raised.
#
# On Unix, if dst exists and is a file, it will be removed silently if the
# user
# has permission. The operation may fail on some Unix flavors if src and dst
# are
# on different filesystems. If successful, the renaming will be an atomic
# operation (this is a POSIX requirement).
#
# On Windows, if dst already exists, OSError will be raised even if it is a
# file;
# there may be no way to implement an atomic rename when dst names an existing
# file.
#@-at
#@-node:ekr.20050104123726.1:<< about os.rename >>
#@nl
def utils_rename (c,src,dst,mode=None,verbose=True):
'''Platform independent rename.'''
head, tail = g.os_path_split(dst)
if head and len(head) > 0:
g.makeAllNonExistentDirectories(head,c=c)
if g.os_path_exists(dst):
if not g.utils_remove(dst):
return False
try:
# New in Leo 4.4b1: try using shutil first.
try:
import shutil # shutil is new in Python 2.3
shutil.move(src,dst)
except ImportError:
if sys.platform == "win32":
os.rename(src,dst)
else:
try:
# Alas, distutils.file_util may not exist.
from distutils.file_util import move_file
move_file(src,dst)
except ImportError:
# Desperation: may give: 'Invalid cross-device link'
os.rename(src,dst)
if mode:
g.utils_chmod(dst,mode,verbose)
return True
except Exception:
if verbose:
g.es('Exception renaming',src,'to',dst,color='red')
g.es_exception(full=False)
return False
#@-node:ekr.20031218072017.1263:g.utils_rename
#@+node:ekr.20050104124903:g.utils_chmod
def utils_chmod (fileName,mode,verbose=True):
if mode is None:
return
try:
os.chmod(fileName,mode)
except Exception:
if verbose:
g.es("exception in os.chmod",fileName)
g.es_exception()
#@-node:ekr.20050104124903:g.utils_chmod
#@+node:ekr.20050104123726.4:g.utils_stat
def utils_stat (fileName):
'''Return the access mode of named file, removing any setuid, setgid, and sticky bits.'''
try:
mode = (os.stat(fileName))[0] & 0777
except Exception:
mode = None
return mode
#@-node:ekr.20050104123726.4:g.utils_stat
#@-node:ekr.20050104135720:Used by tangle code & leoFileCommands
#@-node:ekr.20031218072017.3116:Files & Directories...
#@+node:ekr.20031218072017.1588:Garbage Collection
# debugGC = False # Must be true to enable traces below.
lastObjectCount = 0
lastObjectsDict = {}
lastTypesDict = {}
lastFunctionsDict = {}
#@+others
#@+node:ekr.20031218072017.1589:clearAllIvars
def clearAllIvars (o):
"""Clear all ivars of o, a member of some class."""
if o:
o.__dict__.clear()
#@-node:ekr.20031218072017.1589:clearAllIvars
#@+node:ekr.20031218072017.1590:collectGarbage
def collectGarbage():
try:
if not g.app.trace_gc_inited:
g.enable_gc_debug()
if g.app.trace_gc_verbose or g.app.trace_gc_calls:
# print('Collecting garbage',g.callers())
print 'collectGarbage:'
gc.collect()
except Exception:
pass
# Only init once, regardless of what happens.
g.app.trace_gc_inited = True
#@-node:ekr.20031218072017.1590:collectGarbage
#@+node:ekr.20060127162818:enable_gc_debug
no_gc_message = False
def enable_gc_debug(event=None):
if gc:
if g.app.trace_gc_verbose:
gc.set_debug(
gc.DEBUG_STATS | # prints statistics.
gc.DEBUG_LEAK | # Same as all below.
gc.DEBUG_COLLECTABLE |
gc.DEBUG_UNCOLLECTABLE |
gc.DEBUG_INSTANCES |
gc.DEBUG_OBJECTS |
gc.DEBUG_SAVEALL
)
# else:
# gc.set_debug(gc.DEBUG_STATS)
elif not g.no_gc_message:
g.no_gc_message = True
g.es('Can not import gc module',color='blue')
#@-node:ekr.20060127162818:enable_gc_debug
#@+node:ekr.20031218072017.1592:printGc
# Formerly called from unit tests.
def printGc(tag=None):
if not g.app.trace_gc: return None
tag = tag or g._callerName(n=2)
printGcObjects(tag=tag)
printGcRefs(tag=tag)
if g.app.trace_gc_verbose:
printGcVerbose(tag=tag)
#@+node:ekr.20031218072017.1593:printGcRefs
def printGcRefs (tag=''):
refs = gc.get_referrers(app.windowList[0])
print('-' * 30,tag)
if g.app.trace_gc_verbose:
print("refs of", app.windowList[0])
for ref in refs:
print(type(ref))
else:
print("%d referers" % len(refs))
#@-node:ekr.20031218072017.1593:printGcRefs
#@-node:ekr.20031218072017.1592:printGc
#@+node:ekr.20060202161935:printGcAll
def printGcAll (tag=''):
# Suppress warning about keywords arg not supported in sort.
tag = tag or g._callerName(n=2)
d = {} ; objects = gc.get_objects()
print('-' * 30)
print('%s: %d objects' % (tag,len(objects)))
for obj in objects:
t = type(obj)
if t == 'instance':
try: t = obj.__class__
except Exception: pass
# if type(obj) == type(()):
# print id(obj),repr(obj)
d[t] = d.get(t,0) + 1
if 1: # Sort by n
items = d.items()
try:
# Support for keword args to sort function exists in Python 2.4.
# Support for None as an alternative to omitting cmp exists in Python 2.3.
items.sort(key=lambda x: x[1],reverse=True)
except Exception: pass
for z in items:
print '%40s %7d' % (z[0],z[1])
else: # Sort by type
keys = d.keys() ; keys.sort()
for t in keys:
print '%40s %7d' % (t,d.get(t))
#@-node:ekr.20060202161935:printGcAll
#@+node:ekr.20060127164729.1:printGcObjects (printNewObjects=pno)
def printGcObjects(tag=''):
'''Print newly allocated objects.'''
tag = tag or g._callerName(n=2)
global lastObjectCount
try:
n = len(gc.garbage)
n2 = len(gc.get_objects())
delta = n2-lastObjectCount
if delta == 0: return
lastObjectCount = n2
#@ << print number of each type of object >>
#@+node:ekr.20040703054646:<< print number of each type of object >>
global lastTypesDict
typesDict = {}
for obj in gc.get_objects():
t = type(obj)
if t == 'instance' and t not in types.StringTypes:
try: t = obj.__class__
except Exception: pass
if t != types.FrameType:
r = repr(t) # was type(obj) instead of repr(t)
n = typesDict.get(r,0)
typesDict[r] = n + 1
# Create the union of all the keys.
keys = typesDict.keys()
for key in lastTypesDict.keys():
if key not in keys:
keys.append(key)
empty = True
for key in keys:
n3 = lastTypesDict.get(key,0)
n4 = typesDict.get(key,0)
delta2 = n4-n3
if delta2 != 0:
empty = False
break
if not empty:
# keys = [repr(key) for key in keys]
keys.sort()
print '-' * 30
print "%s: garbage: %d, objects: %d, delta: %d" % (tag,n,n2,delta)
if 0:
for key in keys:
n1 = lastTypesDict.get(key,0)
n2 = typesDict.get(key,0)
delta2 = n2-n1
if delta2 != 0:
print("%+6d =%7d %s" % (delta2,n2,key))
lastTypesDict = typesDict
typesDict = {}
#@-node:ekr.20040703054646:<< print number of each type of object >>
#@nl
if 0:
#@ << print added functions >>
#@+node:ekr.20040703065638:<< print added functions >>
# import types
import inspect
global lastFunctionsDict
funcDict = {}
# Don't print more than 50 objects.
n = 0
for obj in gc.get_objects():
if type(obj) == types.FunctionType:
n += 1
for obj in gc.get_objects():
if type(obj) == types.FunctionType:
key = repr(obj) # Don't create a pointer to the object!
funcDict[key]=None
if n < 50 and not lastFunctionsDict.has_key(key):
print(obj)
args, varargs, varkw,defaults = inspect.getargspec(obj)
print("args", args)
if varargs: print("varargs",varargs)
if varkw: print("varkw",varkw)
if defaults:
print("defaults...")
for s in defaults: print(s)
lastFunctionsDict = funcDict
funcDict = {}
#@-node:ekr.20040703065638:<< print added functions >>
#@nl
except Exception:
traceback.print_exc()
printNewObjects = pno = printGcObjects
#@-node:ekr.20060127164729.1:printGcObjects (printNewObjects=pno)
#@+node:ekr.20060205043324.1:printGcSummary
def printGcSummary (tag=''):
tag = tag or g._callerName(n=2)
g.enable_gc_debug()
try:
n = len(gc.garbage)
n2 = len(gc.get_objects())
s = '%s: printGCSummary: garbage: %d, objects: %d' % (tag,n,n2)
print s
except Exception:
traceback.print_exc()
#@-node:ekr.20060205043324.1:printGcSummary
#@+node:ekr.20060127165509:printGcVerbose
# WARNING: the id trick is not proper because newly allocated objects
# can have the same address as old objets.
def printGcVerbose(tag=''):
tag = tag or g._callerName(n=2)
global lastObjectsDict
objects = gc.get_objects()
newObjects = [o for o in objects if not lastObjectsDict.has_key(id(o))]
lastObjectsDict = {}
for o in objects:
lastObjectsDict[id(o)]=o
dicts = 0 ; seqs = 0
i = 0 ; n = len(newObjects)
while i < 100 and i < n:
o = newObjects[i]
if type(o) == type({}): dicts += 1
elif type(o) in (type(()),type([])):
#print id(o),repr(o)
seqs += 1
#else:
# print(o)
i += 1
print('=' * 40)
print('dicts: %d, sequences: %d' % (dicts,seqs))
print("%s: %d new, %d total objects" % (tag,len(newObjects),len(objects)))
print('-' * 40)
#@-node:ekr.20060127165509:printGcVerbose
#@-others
#@-node:ekr.20031218072017.1588:Garbage Collection
#@+node:ekr.20031218072017.3139:Hooks & plugins (leoGlobals)
#@+node:ekr.20031218072017.1315:idle time functions (leoGlobals)
#@+node:EKR.20040602125018:enableIdleTimeHook
#@+at
#@nonl
# Enables the "idle" hook.
# After enableIdleTimeHook is called, Leo will call the "idle" hook
# approximately every g.idleTimeDelay milliseconds.
#@-at
#@@c
def enableIdleTimeHook(idleTimeDelay=100):
if not g.app.idleTimeHook:
# g.trace('start idle-time hook: %d msec.' % idleTimeDelay)
# Start idle-time processing only after the first idle-time event.
g.app.gui.setIdleTimeHook(g.idleTimeHookHandler)
g.app.afterHandler = g.idleTimeHookHandler
# 1/4/05: Always update these.
g.app.idleTimeHook = True
g.app.idleTimeDelay = idleTimeDelay # Delay in msec.
#@-node:EKR.20040602125018:enableIdleTimeHook
#@+node:EKR.20040602125018.1:disableIdleTimeHook
# Disables the "idle" hook.
def disableIdleTimeHook():
g.app.idleTimeHook = False
#@-node:EKR.20040602125018.1:disableIdleTimeHook
#@+node:EKR.20040602125018.2:idleTimeHookHandler
# An internal routine used to dispatch the "idle" hook.
trace_count = 0
def idleTimeHookHandler(*args,**keys):
# __pychecker__ = '--no-argsused' # args & keys not used.
if 0: # Do not use g.trace here!
global trace_count ; trace_count += 1
if 1:
print 'idleTimeHookHandler',trace_count
else:
if trace_count % 10 == 0:
for z in g.app.windowList:
c = z.c
print "idleTimeHookHandler",trace_count,c.shortFileName()
# New for Python 2.3: may be called during shutdown.
if g.app.killed: return
for z in g.app.windowList:
c = z.c
# Do NOT compute c.currentPosition.
# This would be a MAJOR leak of positions.
g.doHook("idle",c=c)
# Requeue this routine after g.app.idleTimeDelay msec.
# (This delay is set by g.enableIdleTimeHook.)
# Faster requeues overload the system.
if g.app.idleTimeHook:
g.app.gui.setIdleTimeHookAfterDelay(g.idleTimeHookHandler)
g.app.afterHandler = g.idleTimeHookHandler
else:
g.app.afterHandler = None
#@nonl
#@-node:EKR.20040602125018.2:idleTimeHookHandler
#@-node:ekr.20031218072017.1315:idle time functions (leoGlobals)
#@+node:ekr.20031218072017.1596:g.doHook
#@+at
#@nonl
# This global function calls a hook routine. Hooks are identified by the tag
# param.
# Returns the value returned by the hook routine, or None if the there is an
# exception.
#
# We look for a hook routine in three places:
# 1. c.hookFunction
# 2. app.hookFunction
# 3. leoPlugins.doPlugins()
# We set app.hookError on all exceptions. Scripts may reset app.hookError to
# try again.
#@-at
#@@c
def doHook(tag,*args,**keywords):
if g.app.killed or g.app.hookError: # or (g.app.gui and g.app.gui.isNullGui):
return None
if args:
# A minor error in Leo's core.
print "***ignoring args param. tag = %s" % tag
if not g.app.config.use_plugins:
if tag in ('open0','start1'):
s = "Plugins disabled: use_plugins is 0 in a leoSettings.leo file."
g.es_print(s,color="blue")
return None
# Get the hook handler function. Usually this is doPlugins.
c = keywords.get("c")
f = (c and c.hookFunction) or g.app.hookFunction
if not f:
import leoPlugins
g.app.hookFunction = f = leoPlugins.doPlugins
try:
# Pass the hook to the hook handler.
# print 'doHook',f.__name__,keywords.get('c')
return f(tag,keywords)
except Exception:
g.es_exception()
g.app.hookError = True # Supress this function.
g.app.idleTimeHook = False # Supress idle-time hook
return None # No return value
#@-node:ekr.20031218072017.1596:g.doHook
#@+node:ekr.20031218072017.1318:g.plugin_signon
def plugin_signon(module_name,verbose=False):
# The things we do to keep pychecker happy...
m = g.Bunch(__name__='',__version__='')
exec("import %s ; m = %s" % (module_name,module_name))
# print 'plugin_signon',module_name # ,'gui',g.app.gui
if verbose:
g.es('',"...%s.py v%s: %s" % (
m.__name__, m.__version__, g.plugin_date(m)))
print m.__name__, m.__version__
app.loadedPlugins.append(module_name)
#@-node:ekr.20031218072017.1318:g.plugin_signon
#@-node:ekr.20031218072017.3139:Hooks & plugins (leoGlobals)
#@+node:ekr.20031218072017.3145:Most common functions...
# These are guaranteed always to exist for scripts.
#@+node:ekr.20031218072017.3147:choose
def choose(cond, a, b): # warning: evaluates all arguments
if cond: return a
else: return b
#@-node:ekr.20031218072017.3147:choose
#@+node:ekr.20031218072017.1474:enl, ecnl & ecnls
def ecnl(tabName='Log'):
g.ecnls(1,tabName)
def ecnls(n,tabName='Log'):
log = app.log
if log and not log.isNull:
while log.newlines < n:
g.enl(tabName)
def enl(tabName='Log'):
log = app.log
if log and not log.isNull:
log.newlines += 1
log.putnl(tabName)
#@-node:ekr.20031218072017.1474:enl, ecnl & ecnls
#@+node:ekr.20070626132332:es & minitest
def es(s,*args,**keys):
# print 'es','app.log',repr(app.log),'log.isNull',not app.log or app.log.isNull,repr(s)
# print 'es',repr(s)
log = app.log
if app.killed:
return
# Important: defining keyword arguments in addition to *args **does not work**.
# See Section 5.3.4 (Calls) of the Python reference manual.
# In other words, the following is about the best that can be done.
color = keys.get('color')
commas = keys.get('commas') ; commas = g.choose(commas=='True',True,False) # default is False
newline = keys.get('newline') ; newline = g.choose(newline=='False',False,True) # default is True
spaces= keys.get('spaces') ; spaces = g.choose(spaces=='False',False,True) # default is True
tabName = keys.get('tabName','Log')
# Default goes to log pane *not* the presently active pane.
if color == 'suppress': return # New in 4.3.
if type(s) != type("") and type(s) != type(u""):
s = repr(s)
s = g.translateArgs(s,args,commas,spaces)
if app.batchMode:
if app.log:
app.log.put(s)
elif g.unitTesting:
if log and not log.isNull:
s = g.toEncodedString(s,'ascii')
if newline: print s
else: print s,
else:
if log and log.isNull:
pass
elif log:
log.put(s,color=color,tabName=tabName)
for ch in s:
if ch == '\n': log.newlines += 1
else: log.newlines = 0
if newline:
g.ecnl(tabName=tabName) # only valid here
elif newline:
app.logWaiting.append((s+'\n',color),)
else:
app.logWaiting.append((s,color),)
#@+node:ekr.20071024101611:mini test of es
#@@nocolor
#@@first
#@@first
#@+at
#
# This doesn't work as an external unit test.
# To test, select all following lines and do execute-script.
#
# s1 = 'line1 Ä, ڱ, 궯, 奠 end'
# s2 = g.toUnicode(s1,'utf-8')
#
# for s in (s1,s2):
# g.es(s)
# g.es_print(s)
#@-at
#@-node:ekr.20071024101611:mini test of es
#@-node:ekr.20070626132332:es & minitest
#@+node:ekr.20050707064040:es_print
# see: http://www.diveintopython.org/xml_processing/unicode.html
def es_print(s,*args,**keys):
encoding = sys.getdefaultencoding()
# Important: defining keyword arguments in addition to *args **does not work**.
# See Section 5.3.4 (Calls) of the Python reference manual.
# In other words, the following is about the best that can be done.
commas = keys.get('commas') ; commas = g.choose(commas=='True',True,False) # default is False
newline = keys.get('newline') ; newline = g.choose(newline=='False',False,True) # default is True
spaces= keys.get('spaces') ; spaces = g.choose(spaces=='False',False,True) # default is True
try:
if type(s) != type(u''):
s = unicode(s,encoding)
except Exception:
s = g.toEncodedString(s,'ascii')
s2 = g.translateArgs(s,args,commas,spaces)
if newline:
try:
print s2
except Exception:
print g.toEncodedString(s2,'ascii')
else:
try:
print s2,
except Exception:
print g.toEncodedString(s2,'ascii'),
if g.app.gui and not g.app.gui.isNullGui and not g.unitTesting:
g.es(s,*args,**keys)
#@+node:ekr.20070621092938:@@test g.es_print
if g.unitTesting:
g.es_print('\ntest of es_print: Ă',color='red',newline=False)
g.es_print('after')
g.es_print('done')
#@-node:ekr.20070621092938:@@test g.es_print
#@-node:ekr.20050707064040:es_print
#@+node:ekr.20050707065530:es_trace
def es_trace(s,*args,**keys):
g.trace(g.toEncodedString(s,'ascii'))
g.es(s,*args,**keys)
#@-node:ekr.20050707065530:es_trace
#@+node:ekr.20080220111323:translateArgs
def translateArgs (s,args,commas,spaces):
'''Return the concatenation of s and all args,
with odd args translated.'''
# Print the translated strings, but retain s for the later call to g.es.
result = []
if s:
result.append(g.translateString(s))
n = 1
for arg in args:
n += 1
if type(arg) != type("") and type(arg) != type(u""):
arg = repr(arg)
elif (n % 2) == 1:
arg = g.translateString(arg)
if arg:
if result:
# if commas: result.append(',')
if spaces: result.append(' ')
result.append(arg)
return ''.join(result)
#@-node:ekr.20080220111323:translateArgs
#@+node:ekr.20060810095921:translateString
def translateString (s):
'''Return the translated text of s.'''
if g.app.translateToUpperCase:
return s.upper()
else:
return gettext.gettext(s)
#@-node:ekr.20060810095921:translateString
#@+node:ekr.20031218072017.3148:top
if 0: # An extremely dangerous function.
def top():
"""Return the commander of the topmost window"""
# Warning: may be called during startup or shutdown when nothing exists.
try:
return app.log.c
except Exception:
return None
#@-node:ekr.20031218072017.3148:top
#@+node:ekr.20031218072017.3149:trace is defined below
#@-node:ekr.20031218072017.3149:trace is defined below
#@+node:ekr.20031218072017.3150:windows
def windows():
return app.windowList
#@-node:ekr.20031218072017.3150:windows
#@-node:ekr.20031218072017.3145:Most common functions...
#@+node:ekr.20031218072017.2145:os.path wrappers (leoGlobals.py)
#@+at
#@nonl
# Note: all these methods return Unicode strings. It is up to the user to
# convert to an encoded string as needed, say when opening a file.
#@-at
#@+node:ekr.20031218072017.2146:os_path_abspath
def os_path_abspath(path,encoding=None):
"""Convert a path to an absolute path."""
path = g.toUnicodeFileEncoding(path,encoding)
path = os.path.abspath(path)
path = g.toUnicodeFileEncoding(path,encoding)
return path
#@-node:ekr.20031218072017.2146:os_path_abspath
#@+node:ekr.20031218072017.2147:os_path_basename
def os_path_basename(path,encoding=None):
"""Return the second half of the pair returned by split(path)."""
path = g.toUnicodeFileEncoding(path,encoding)
path = os.path.basename(path)
path = g.toUnicodeFileEncoding(path,encoding)
return path
#@-node:ekr.20031218072017.2147:os_path_basename
#@+node:ekr.20031218072017.2148:os_path_dirname
def os_path_dirname(path,encoding=None):
"""Return the first half of the pair returned by split(path)."""
path = g.toUnicodeFileEncoding(path,encoding)
path = os.path.dirname(path)
path = g.toUnicodeFileEncoding(path,encoding)
return path
#@-node:ekr.20031218072017.2148:os_path_dirname
#@+node:ekr.20031218072017.2149:os_path_exists
def os_path_exists(path,encoding=None):
"""Normalize the path and convert it to an absolute path."""
path = g.toUnicodeFileEncoding(path,encoding)
return os.path.exists(path)
#@-node:ekr.20031218072017.2149:os_path_exists
#@+node:ekr.20031218072017.2150:os_path_getmtime
def os_path_getmtime(path,encoding=None):
"""Normalize the path and convert it to an absolute path."""
path = g.toUnicodeFileEncoding(path,encoding)
return os.path.getmtime(path)
#@-node:ekr.20031218072017.2150:os_path_getmtime
#@+node:ekr.20031218072017.2151:os_path_isabs
def os_path_isabs(path,encoding=None):
"""Normalize the path and convert it to an absolute path."""
path = g.toUnicodeFileEncoding(path,encoding)
return os.path.isabs(path)
#@-node:ekr.20031218072017.2151:os_path_isabs
#@+node:ekr.20031218072017.2152:os_path_isdir
def os_path_isdir(path,encoding=None):
"""Normalize the path and convert it to an absolute path."""
path = g.toUnicodeFileEncoding(path,encoding)
return os.path.isdir(path)
#@-node:ekr.20031218072017.2152:os_path_isdir
#@+node:ekr.20031218072017.2153:os_path_isfile
def os_path_isfile(path,encoding=None):
"""Normalize the path and convert it to an absolute path."""
path = g.toUnicodeFileEncoding(path,encoding)
return os.path.isfile(path)
#@-node:ekr.20031218072017.2153:os_path_isfile
#@+node:ekr.20031218072017.2154:os_path_join
def os_path_join(*args,**keys):
encoding = keys.get("encoding")
uargs = [g.toUnicodeFileEncoding(arg,encoding) for arg in args]
# Note: This is exactly the same convention as used by getBaseDirectory.
if uargs and uargs[0] == '!!':
uargs[0] = g.app.loadDir
elif uargs and uargs[0] == '.':
c = keys.get('c')
if c and c.openDirectory:
uargs[0] = c.openDirectory
# g.trace(c.openDirectory)
path = os.path.join(*uargs)
# May not be needed on some Pythons.
path = g.toUnicodeFileEncoding(path,encoding)
return path
#@-node:ekr.20031218072017.2154:os_path_join
#@+node:ekr.20031218072017.2155:os_path_norm NOT USED
if 0: # A bad idea.
def os_path_norm(path,encoding=None):
"""Normalize both the path and the case."""
path = g.toUnicodeFileEncoding(path,encoding)
path = os.path.normcase(path)
path = os.path.normpath(path)
path = g.toUnicodeFileEncoding(path,encoding)
return path
#@-node:ekr.20031218072017.2155:os_path_norm NOT USED
#@+node:ekr.20041115103456:os_path_normabs NOT USED
if 0: # A bad idea.
def os_path_normabs (path,encoding=None):
"""Convert the file name to a fully normalized absolute path.
There is no exact analog to this in os.path"""
path = g.os_path_abspath(path,encoding = encoding)
path = g.os_path_norm(path,encoding = encoding)
return path
#@-node:ekr.20041115103456:os_path_normabs NOT USED
#@+node:ekr.20031218072017.2156:os_path_normcase
def os_path_normcase(path,encoding=None):
"""Normalize the path's case."""
path = g.toUnicodeFileEncoding(path,encoding)
path = os.path.normcase(path)
path = g.toUnicodeFileEncoding(path,encoding)
return path
#@-node:ekr.20031218072017.2156:os_path_normcase
#@+node:ekr.20031218072017.2157:os_path_normpath
def os_path_normpath(path,encoding=None):
"""Normalize the path."""
path = g.toUnicodeFileEncoding(path,encoding)
path = os.path.normpath(path)
path = g.toUnicodeFileEncoding(path,encoding)
return path
#@-node:ekr.20031218072017.2157:os_path_normpath
#@+node:ekr.20031218072017.2158:os_path_split
def os_path_split(path,encoding=None):
path = g.toUnicodeFileEncoding(path,encoding)
head,tail = os.path.split(path)
head = g.toUnicodeFileEncoding(head,encoding)
tail = g.toUnicodeFileEncoding(tail,encoding)
return head,tail
#@-node:ekr.20031218072017.2158:os_path_split
#@+node:ekr.20031218072017.2159:os_path_splitext
def os_path_splitext(path,encoding=None):
path = g.toUnicodeFileEncoding(path,encoding)
head,tail = os.path.splitext(path)
head = g.toUnicodeFileEncoding(head,encoding)
tail = g.toUnicodeFileEncoding(tail,encoding)
return head,tail
#@-node:ekr.20031218072017.2159:os_path_splitext
#@+node:ekr.20031218072017.2160:toUnicodeFileEncoding
def toUnicodeFileEncoding(path,encoding):
if path: path = path.replace('\\', os.sep)
if not encoding:
if sys.platform == "win32" or sys.platform.lower().startswith('java'):
# encoding = "mbcs" # Leo 4.2 and previous.
encoding = 'utf-8' # New in Leo 4.3
else:
encoding = app.tkEncoding
# Yes, this is correct. All os_path_x functions return Unicode strings.
return g.toUnicode(path,encoding)
#@-node:ekr.20031218072017.2160:toUnicodeFileEncoding
#@-node:ekr.20031218072017.2145:os.path wrappers (leoGlobals.py)
#@+node:ekr.20031218072017.3151:Scanning... (leoGlobals.py)
#@+node:ekr.20031218072017.3152:g.scanAtFileOptions (used in 3.x read code)
def scanAtFileOptions (h,err_flag=False):
assert(g.match(h,0,"@file"))
i = len("@file")
atFileType = "@file"
optionsList = []
while g.match(h,i,'-'):
#@ << scan another @file option >>
#@+node:ekr.20031218072017.3153:<< scan another @file option >>
i += 1 ; err = -1
if g.match_word(h,i,"asis"):
if atFileType == "@file":
atFileType = "@silentfile"
elif err_flag:
g.es("using -asis option in:",h)
elif g.match(h,i,"noref"): # Just match the prefix.
if atFileType == "@file":
atFileType = "@rawfile"
elif atFileType == "@nosentinelsfile":
atFileType = "@silentfile"
elif err_flag:
g.es("ignoring redundant -noref in:",h)
elif g.match(h,i,"nosent"): # Just match the prefix.
if atFileType == "@file":
atFileType = "@nosentinelsfile"
elif atFileType == "@rawfile":
atFileType = "@silentfile"
elif err_flag:
g.es("ignoring redundant -nosent in:",h)
elif g.match_word(h,i,"thin"):
if atFileType == "@file":
atFileType = "@thinfile"
elif err_flag:
g.es("using -thin option in:",h)
else:
if 0: # doesn't work
for option in ("fat","new","now","old","thin","wait"):
if g.match_word(h,i,option):
optionsList.append(option)
if len(option) == 0:
err = i-1
# Scan to the next minus sign.
while i < len(h) and h[i] not in (' ','\t','-'):
i += 1
if err > -1:
g.es("unknown option:",h[err:i],"in",h)
#@-node:ekr.20031218072017.3153:<< scan another @file option >>
#@nl
# Convert atFileType to a list of options.
for fileType,option in (
("@silentfile","asis"),
("@nosentinelsfile","nosent"),
("@rawfile","noref"),
("@thinfile","thin")
):
if atFileType == fileType and option not in optionsList:
optionsList.append(option)
# g.trace(atFileType,optionsList)
return i,atFileType,optionsList
#@-node:ekr.20031218072017.3152:g.scanAtFileOptions (used in 3.x read code)
#@+node:ekr.20031218072017.3156:scanError
# It is dubious to bump the Tangle error count here, but it really doesn't hurt.
def scanError(s):
'''Bump the error count in the tangle command.'''
# New in Leo 4.4b1: just set this global.
g.app.scanErrors +=1
g.es('',s)
#@-node:ekr.20031218072017.3156:scanError
#@+node:ekr.20031218072017.3157:scanf
# A quick and dirty sscanf. Understands only %s and %d.
def scanf (s,pat):
count = pat.count("%s") + pat.count("%d")
pat = pat.replace("%s","(\S+)")
pat = pat.replace("%d","(\d+)")
parts = re.split(pat,s)
result = []
for part in parts:
if len(part) > 0 and len(result) < count:
result.append(part)
# g.trace("scanf returns:",result)
return result
if 0: # testing
g.scanf("1.0","%d.%d",)
#@-node:ekr.20031218072017.3157:scanf
#@+node:ekr.20031218072017.3158:Scanners: calling scanError
#@+at
#@nonl
# These scanners all call g.scanError() directly or indirectly, so they will
# call g.es if they find an error. g.scanError() also bumps
# c.tangleCommands.errors, which is harmless if we aren't tangling, and useful
# if we are.
#
# These routines are called by the Import routines and the Tangle routines.
#@-at
#@+node:ekr.20031218072017.3159:skip_block_comment
# Scans past a block comment (an old_style C comment).
def skip_block_comment (s,i):
assert(g.match(s,i,"/*"))
j = i ; i += 2 ; n = len(s)
k = string.find(s,"*/",i)
if k == -1:
g.scanError("Run on block comment: " + s[j:i])
return n
else: return k + 2
#@-node:ekr.20031218072017.3159:skip_block_comment
#@+node:ekr.20031218072017.3160:skip_braces
#@+at
#@nonl
# This code is called only from the import logic, so we are allowed to try
# some tricks. In particular, we assume all braces are matched in #if blocks.
#@-at
#@@c
def skip_braces(s,i):
'''Skips from the opening to the matching brace.
If no matching is found i is set to len(s)'''
# start = g.get_line(s,i)
assert(g.match(s,i,'{'))
level = 0 ; n = len(s)
while i < n:
c = s[i]
if c == '{':
level += 1 ; i += 1
elif c == '}':
level -= 1
if level <= 0: return i
i += 1
elif c == '\'' or c == '"': i = g.skip_string(s,i)
elif g.match(s,i,'//'): i = g.skip_to_end_of_line(s,i)
elif g.match(s,i,'/*'): i = g.skip_block_comment(s,i)
# 7/29/02: be more careful handling conditional code.
elif g.match_word(s,i,"#if") or g.match_word(s,i,"#ifdef") or g.match_word(s,i,"#ifndef"):
i,delta = g.skip_pp_if(s,i)
level += delta
else: i += 1
return i
#@-node:ekr.20031218072017.3160:skip_braces
#@+node:ekr.20031218072017.3161:skip_php_braces (no longer used)
#@+at
#@nonl
# 08-SEP-2002 DTHEIN: Added for PHP import support
# Skips from the opening to the matching . If no matching is found i is set to
# len(s).
#
# This code is called only from the import logic, and only for PHP imports.
#@-at
#@@c
def skip_php_braces(s,i):
# start = g.get_line(s,i)
assert(g.match(s,i,'{'))
level = 0 ; n = len(s)
while i < n:
c = s[i]
if c == '{':
level += 1 ; i += 1
elif c == '}':
level -= 1
if level <= 0: return i + 1
i += 1
elif c == '\'' or c == '"': i = g.skip_string(s,i)
elif g.match(s,i,"<<<"): i = g.skip_heredoc_string(s,i)
elif g.match(s,i,'//') or g.match(s,i,'#'): i = g.skip_to_end_of_line(s,i)
elif g.match(s,i,'/*'): i = g.skip_block_comment(s,i)
else: i += 1
return i
#@-node:ekr.20031218072017.3161:skip_php_braces (no longer used)
#@+node:ekr.20031218072017.3162:skip_parens
def skip_parens(s,i):
'''Skips from the opening ( to the matching ).
If no matching is found i is set to len(s)'''
level = 0 ; n = len(s)
assert(g.match(s,i,'('))
while i < n:
c = s[i]
if c == '(':
level += 1 ; i += 1
elif c == ')':
level -= 1
if level <= 0: return i
i += 1
elif c == '\'' or c == '"': i = g.skip_string(s,i)
elif g.match(s,i,"//"): i = g.skip_to_end_of_line(s,i)
elif g.match(s,i,"/*"): i = g.skip_block_comment(s,i)
else: i += 1
return i
#@-node:ekr.20031218072017.3162:skip_parens
#@+node:ekr.20031218072017.3163:skip_pascal_begin_end
def skip_pascal_begin_end(s,i):
'''Skips from begin to matching end.
If found, i points to the end. Otherwise, i >= len(s)
The end keyword matches begin, case, class, record, and try.'''
assert(g.match_c_word(s,i,"begin"))
level = 1 ; i = g.skip_c_id(s,i) # Skip the opening begin.
while i < len(s):
ch = s[i]
if ch =='{' : i = g.skip_pascal_braces(s,i)
elif ch =='"' or ch == '\'': i = g.skip_pascal_string(s,i)
elif g.match(s,i,"//"): i = g.skip_line(s,i)
elif g.match(s,i,"(*"): i = g.skip_pascal_block_comment(s,i)
elif g.match_c_word(s,i,"end"):
level -= 1
if level == 0:
# lines = s[i1:i+3] ; g.trace('\n' + lines + '\n')
return i
else: i = g.skip_c_id(s,i)
elif g.is_c_id(ch):
j = i ; i = g.skip_c_id(s,i) ; name = s[j:i]
if name in ["begin", "case", "class", "record", "try"]:
level += 1
else: i += 1
return i
#@-node:ekr.20031218072017.3163:skip_pascal_begin_end
#@+node:ekr.20031218072017.3164:skip_pascal_block_comment
# Scans past a pascal comment delimited by (* and *).
def skip_pascal_block_comment(s,i):
j = i
assert(g.match(s,i,"(*"))
i = string.find(s,"*)",i)
if i > -1: return i + 2
else:
g.scanError("Run on comment" + s[j:i])
return len(s)
# n = len(s)
# while i < n:
# if g.match(s,i,"*)"): return i + 2
# i += 1
# g.scanError("Run on comment" + s[j:i])
# return i
#@-node:ekr.20031218072017.3164:skip_pascal_block_comment
#@+node:ekr.20031218072017.3165:skip_pascal_string : called by tangle
def skip_pascal_string(s,i):
j = i ; delim = s[i] ; i += 1
assert(delim == '"' or delim == '\'')
while i < len(s):
if s[i] == delim:
return i + 1
else: i += 1
g.scanError("Run on string: " + s[j:i])
return i
#@-node:ekr.20031218072017.3165:skip_pascal_string : called by tangle
#@+node:ekr.20031218072017.3166:skip_heredoc_string : called by php import (Dave Hein)
#@+at
#@nonl
# 08-SEP-2002 DTHEIN: added function skip_heredoc_string
# A heredoc string in PHP looks like:
#
# <<<EOS
# This is my string.
# It is mine. I own it.
# No one else has it.
# EOS
#
# It begins with <<< plus a token (naming same as PHP variable names).
# It ends with the token on a line by itself (must start in first position.
#
#@-at
#@@c
def skip_heredoc_string(s,i):
j = i
assert(g.match(s,i,"<<<"))
m = re.match("\<\<\<([a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*)", s[i:])
if (None == m):
i += 3
return i
# 14-SEP-2002 DTHEIN: needed to add \n to find word, not just string
delim = m.group(1) + '\n'
i = g.skip_line(s,i) # 14-SEP-2002 DTHEIN: look after \n, not before
n = len(s)
while i < n and not g.match(s,i,delim):
i = g.skip_line(s,i) # 14-SEP-2002 DTHEIN: move past \n
if i >= n:
g.scanError("Run on string: " + s[j:i])
elif g.match(s,i,delim):
i += len(delim)
return i
#@-node:ekr.20031218072017.3166:skip_heredoc_string : called by php import (Dave Hein)
#@+node:ekr.20031218072017.3167:skip_pp_directive
# Now handles continuation lines and block comments.
def skip_pp_directive(s,i):
while i < len(s):
if g.is_nl(s,i):
if g.escaped(s,i): i = g.skip_nl(s,i)
else: break
elif g.match(s,i,"//"): i = g.skip_to_end_of_line(s,i)
elif g.match(s,i,"/*"): i = g.skip_block_comment(s,i)
else: i += 1
return i
#@-node:ekr.20031218072017.3167:skip_pp_directive
#@+node:ekr.20031218072017.3168:skip_pp_if
# Skips an entire if or if def statement, including any nested statements.
def skip_pp_if(s,i):
start_line = g.get_line(s,i) # used for error messages.
# g.trace(start_line)
assert(
g.match_word(s,i,"#if") or
g.match_word(s,i,"#ifdef") or
g.match_word(s,i,"#ifndef"))
i = g.skip_line(s,i)
i,delta1 = g.skip_pp_part(s,i)
i = g.skip_ws(s,i)
if g.match_word(s,i,"#else"):
i = g.skip_line(s,i)
i = g.skip_ws(s,i)
i,delta2 = g.skip_pp_part(s,i)
if delta1 != delta2:
g.es("#if and #else parts have different braces:",start_line)
i = g.skip_ws(s,i)
if g.match_word(s,i,"#endif"):
i = g.skip_line(s,i)
else:
g.es("no matching #endif:",start_line)
# g.trace(delta1,start_line)
return i,delta1
#@-node:ekr.20031218072017.3168:skip_pp_if
#@+node:ekr.20031218072017.3169:skip_pp_part
# Skip to an #else or #endif. The caller has eaten the #if, #ifdef, #ifndef or #else
def skip_pp_part(s,i):
# g.trace(g.get_line(s,i))
delta = 0
while i < len(s):
c = s[i]
if 0:
if c == '\n':
g.trace(delta,g.get_line(s,i))
if g.match_word(s,i,"#if") or g.match_word(s,i,"#ifdef") or g.match_word(s,i,"#ifndef"):
i,delta1 = g.skip_pp_if(s,i)
delta += delta1
elif g.match_word(s,i,"#else") or g.match_word(s,i,"#endif"):
return i,delta
elif c == '\'' or c == '"': i = g.skip_string(s,i)
elif c == '{':
delta += 1 ; i += 1
elif c == '}':
delta -= 1 ; i += 1
elif g.match(s,i,"//"): i = g.skip_line(s,i)
elif g.match(s,i,"/*"): i = g.skip_block_comment(s,i)
else: i += 1
return i,delta
#@-node:ekr.20031218072017.3169:skip_pp_part
#@+node:ekr.20031218072017.3170:skip_python_string
def skip_python_string(s,i,verbose=True):
if g.match(s,i,"'''") or g.match(s,i,'"""'):
j = i ; delim = s[i]*3 ; i += 3
k = string.find(s,delim,i)
if k > -1: return k+3
if verbose:
g.scanError("Run on triple quoted string: " + s[j:i])
return len(s)
else:
return g.skip_string(s,i)
#@-node:ekr.20031218072017.3170:skip_python_string
#@+node:ekr.20031218072017.2369:skip_string
def skip_string(s,i,verbose=True):
'''Scan forward to the end of a string.
New in Leo 4.4.2 final: give error only if verbose is True'''
j = i ; delim = s[i] ; i += 1
assert(delim == '"' or delim == '\'')
n = len(s)
while i < n and s[i] != delim:
if s[i] == '\\' : i += 2
else: i += 1
if i >= n:
if verbose:
g.scanError("Run on string: " + s[j:i])
elif s[i] == delim:
i += 1
# g.trace(s[j:i])
return i
#@-node:ekr.20031218072017.2369:skip_string
#@+node:ekr.20031218072017.3171:skip_to_semicolon
# Skips to the next semicolon that is not in a comment or a string.
def skip_to_semicolon(s,i):
n = len(s)
while i < n:
c = s[i]
if c == ';': return i
elif c == '\'' or c == '"' : i = g.skip_string(s,i)
elif g.match(s,i,"//"): i = g.skip_to_end_of_line(s,i)
elif g.match(s,i,"/*"): i = g.skip_block_comment(s,i)
else: i += 1
return i
#@-node:ekr.20031218072017.3171:skip_to_semicolon
#@+node:ekr.20031218072017.3172:skip_typedef
def skip_typedef(s,i):
n = len(s)
while i < n and g.is_c_id(s[i]):
i = g.skip_c_id(s,i)
i = g.skip_ws_and_nl(s,i)
if g.match(s,i,'{'):
i = g.skip_braces(s,i)
i = g.skip_to_semicolon(s,i)
return i
#@-node:ekr.20031218072017.3172:skip_typedef
#@-node:ekr.20031218072017.3158:Scanners: calling scanError
#@+node:ekr.20031218072017.3173:Scanners: no error messages
#@+node:ekr.20031218072017.3174:escaped
# Returns True if s[i] is preceded by an odd number of backslashes.
def escaped(s,i):
count = 0
while i-1 >= 0 and s[i-1] == '\\':
count += 1
i -= 1
return (count%2) == 1
#@-node:ekr.20031218072017.3174:escaped
#@+node:ekr.20031218072017.3175:find_line_start
def find_line_start(s,i):
if i < 0: return 0 # New in Leo 4.4.5: add this defensive code.
# bug fix: 11/2/02: change i to i+1 in rfind
i = string.rfind(s,'\n',0,i+1) # Finds the highest index in the range.
if i == -1: return 0
else: return i + 1
#@-node:ekr.20031218072017.3175:find_line_start
#@+node:ekr.20031218072017.3176:find_on_line
def find_on_line(s,i,pattern):
# j = g.skip_line(s,i) ; g.trace(s[i:j])
j = string.find(s,'\n',i)
if j == -1: j = len(s)
k = string.find(s,pattern,i,j)
if k > -1: return k
else: return None
#@-node:ekr.20031218072017.3176:find_on_line
#@+node:ekr.20031218072017.3177:is_c_id
def is_c_id(ch):
return g.isWordChar(ch)
#@-node:ekr.20031218072017.3177:is_c_id
#@+node:ekr.20031218072017.3178:is_nl
def is_nl(s,i):
return i < len(s) and (s[i] == '\n' or s[i] == '\r')
#@-node:ekr.20031218072017.3178:is_nl
#@+node:ekr.20031218072017.3179:is_special
# We no longer require that the directive appear befor any @c directive or section definition.
def is_special(s,i,directive):
'''Return True if the body text contains the @ directive.'''
# j = g.skip_line(s,i) ; g.trace(s[i:j],':',directive)
assert (directive and directive [0] == '@' )
# 10/23/02: all directives except @others must start the line.
skip_flag = directive in ("@others","@all")
while i < len(s):
if g.match_word(s,i,directive):
return True, i
else:
i = g.skip_line(s,i)
if skip_flag:
i = g.skip_ws(s,i)
return False, -1
#@-node:ekr.20031218072017.3179:is_special
#@+node:ekr.20031218072017.3180:is_ws & is_ws_or_nl
def is_ws(c):
return c == '\t' or c == ' '
def is_ws_or_nl(s,i):
return g.is_nl(s,i) or (i < len(s) and g.is_ws(s[i]))
#@-node:ekr.20031218072017.3180:is_ws & is_ws_or_nl
#@+node:ekr.20031218072017.3181:match
# Warning: this code makes no assumptions about what follows pattern.
def match(s,i,pattern):
return s and pattern and string.find(s,pattern,i,i+len(pattern)) == i
#@-node:ekr.20031218072017.3181:match
#@+node:ekr.20031218072017.3182:match_c_word
def match_c_word (s,i,name):
if name == None: return False
n = len(name)
if n == 0: return False
return name == s[i:i+n] and (i+n == len(s) or not g.is_c_id(s[i+n]))
#@-node:ekr.20031218072017.3182:match_c_word
#@+node:ekr.20031218072017.3183:match_ignoring_case
def match_ignoring_case(s1,s2):
if s1 == None or s2 == None: return False
return string.lower(s1) == string.lower(s2)
#@-node:ekr.20031218072017.3183:match_ignoring_case
#@+node:ekr.20031218072017.3184:match_word
def match_word(s,i,pattern):
if pattern == None: return False
j = len(pattern)
if j == 0: return False
if s.find(pattern,i,i+j) != i:
return False
if i+j >= len(s):
return True
ch = s[i+j]
return not g.isWordChar(ch)
#@-node:ekr.20031218072017.3184:match_word
#@+node:ekr.20031218072017.3185:skip_blank_lines
def skip_blank_lines(s,i):
while i < len(s):
if g.is_nl(s,i) :
i = g.skip_nl(s,i)
elif g.is_ws(s[i]):
j = g.skip_ws(s,i)
if g.is_nl(s,j):
i = j
else: break
else: break
return i
#@-node:ekr.20031218072017.3185:skip_blank_lines
#@+node:ekr.20031218072017.3186:skip_c_id
def skip_c_id(s,i):
n = len(s)
while i < n and g.isWordChar(s[i]):
i += 1
return i
#@-node:ekr.20031218072017.3186:skip_c_id
#@+node:ekr.20040705195048:skip_id
def skip_id(s,i,chars=None):
chars = chars and g.toUnicode(chars,encoding='ascii') or ''
n = len(s)
while i < n and (g.isWordChar(s[i]) or s[i] in chars):
i += 1
return i
#@nonl
#@-node:ekr.20040705195048:skip_id
#@+node:ekr.20031218072017.3187:skip_line, skip_to_start/end_of_line
#@+at
#@nonl
# These methods skip to the next newline, regardless of whether the newline
# may be preceeded by a backslash. Consequently, they should be used only when
# we know that we are not in a preprocessor directive or string.
#@-at
#@@c
def skip_line (s,i):
if i >= len(s): return len(s) # Bug fix: 2007/5/22
if i < 0: i = 0
i = string.find(s,'\n',i)
if i == -1: return len(s)
else: return i + 1
def skip_to_end_of_line (s,i):
if i >= len(s): return len(s) # Bug fix: 2007/5/22
if i < 0: i = 0
i = string.find(s,'\n',i)
if i == -1: return len(s)
else: return i
def skip_to_start_of_line (s,i):
if i >= len(s): return len(s)
if i <= 0: return 0
i = s.rfind('\n',0,i) # Don't find s[i], so it doesn't matter if s[i] is a newline.
if i == -1: return 0
else: return i + 1
#@-node:ekr.20031218072017.3187:skip_line, skip_to_start/end_of_line
#@+node:ekr.20031218072017.3188:skip_long
def skip_long(s,i):
'''Scan s[i:] for a valid int.
Return (i, val) or (i, None) if s[i] does not point at a number.'''
val = 0
i = g.skip_ws(s,i)
n = len(s)
if i >= n or (not s[i].isdigit() and s[i] not in u'+-'):
return i, None
j = i
if s[i] in u'+-': # Allow sign before the first digit
i +=1
while i < n and s[i].isdigit():
i += 1
try: # There may be no digits.
val = int(s[j:i])
return i, val
except Exception:
return i,None
#@-node:ekr.20031218072017.3188:skip_long
#@+node:ekr.20031218072017.3189:skip_matching_python_delims
def skip_matching_python_delims(s,i,delim1,delim2,reverse=False):
'''Skip from the opening delim to the matching delim2.
Return the index of the matching ')', or -1'''
level = 0 ; n = len(s)
# g.trace('delim1/2',repr(delim1),repr(delim2),'i',i,'s[i]',repr(s[i]),'s',repr(s[i-5:i+5]))
assert(g.match(s,i,delim1))
if reverse:
while i >= 0:
ch = s[i]
if ch == delim1:
level += 1 ; i -= 1
elif ch == delim2:
level -= 1
if level <= 0: return i
i -= 1
# Doesn't handle strings and comments properly...
else: i -= 1
else:
while i < n:
progress = i
ch = s[i]
if ch == delim1:
level += 1 ; i += 1
elif ch == delim2:
level -= 1
if level <= 0: return i
i += 1
elif ch == '\'' or ch == '"': i = g.skip_string(s,i,verbose=False)
elif g.match(s,i,'#'): i = g.skip_to_end_of_line(s,i)
else: i += 1
if i == progress: return -1
return -1
#@-node:ekr.20031218072017.3189:skip_matching_python_delims
#@+node:ekr.20060627080947:skip_matching_python_parens
def skip_matching_python_parens(s,i):
'''Skip from the opening ( to the matching ).
Return the index of the matching ')', or -1'''
return skip_matching_python_delims(s,i,'(',')')
#@-node:ekr.20060627080947:skip_matching_python_parens
#@+node:ekr.20031218072017.3190:skip_nl
# We need this function because different systems have different end-of-line conventions.
def skip_nl (s,i):
'''Skips a single "logical" end-of-line character.'''
if g.match(s,i,"\r\n"): return i + 2
elif g.match(s,i,'\n') or g.match(s,i,'\r'): return i + 1
else: return i
#@-node:ekr.20031218072017.3190:skip_nl
#@+node:ekr.20031218072017.3191:skip_non_ws
def skip_non_ws (s,i):
n = len(s)
while i < n and not g.is_ws(s[i]):
i += 1
return i
#@-node:ekr.20031218072017.3191:skip_non_ws
#@+node:ekr.20031218072017.3192:skip_pascal_braces
# Skips from the opening { to the matching }.
def skip_pascal_braces(s,i):
# No constructs are recognized inside Pascal block comments!
k = string.find(s,'}',i)
if i == -1: return len(s)
else: return k
#@-node:ekr.20031218072017.3192:skip_pascal_braces
#@+node:ekr.20031218072017.3193:skip_to_char
def skip_to_char(s,i,ch):
j = string.find(s,ch,i)
if j == -1:
return len(s),s[i:]
else:
return j,s[i:j]
#@-node:ekr.20031218072017.3193:skip_to_char
#@+node:ekr.20031218072017.3194:skip_ws, skip_ws_and_nl
def skip_ws(s,i):
n = len(s)
while i < n and g.is_ws(s[i]):
i += 1
return i
def skip_ws_and_nl(s,i):
n = len(s)
while i < n and (g.is_ws(s[i]) or g.is_nl(s,i)):
i += 1
return i
#@-node:ekr.20031218072017.3194:skip_ws, skip_ws_and_nl
#@-node:ekr.20031218072017.3173:Scanners: no error messages
#@+node:ekr.20031218072017.3195:splitLines & joinLines
def splitLines (s):
'''Split s into lines, preserving the number of lines and the ending of the last line.'''
# g.stat()
if s:
return s.splitlines(True) # This is a Python string function!
else:
return []
splitlines = splitLines
def joinLines (aList):
return ''.join(aList)
joinlines = joinLines
#@-node:ekr.20031218072017.3195:splitLines & joinLines
#@-node:ekr.20031218072017.3151:Scanning... (leoGlobals.py)
#@+node:ekr.20040327103735.2:Script Tools (leoGlobals.py)
#@+node:ekr.20031218072017.2418:g.initScriptFind (set up dialog)
def initScriptFind(c,findHeadline,changeHeadline=None,firstNode=None,
script_search=True,script_change=True):
# __pychecker__ = '--no-argsused' # firstNode is not used.
import leoTest
import leoGlobals as g
# Find the scripts.
p = c.currentPosition()
u = leoTest.testUtils(c)
find_p = u.findNodeInTree(p,findHeadline)
if find_p:
find_text = find_p.bodyString()
else:
g.es("no Find script node",color="red")
return
if changeHeadline:
change_p = u.findNodeInTree(p,changeHeadline)
else:
change_p = None
if change_p:
change_text = change_p.bodyString()
else:
change_text = ""
# print find_p,change_p
# Initialize the find panel.
c.script_search_flag = script_search
c.script_change_flag = script_change and change_text
if script_search:
c.find_text = find_text.strip() + "\n"
else:
c.find_text = find_text
if script_change:
c.change_text = change_text.strip() + "\n"
else:
c.change_text = change_text
c.frame.findPanel.init(c)
c.showFindPanel()
#@-node:ekr.20031218072017.2418:g.initScriptFind (set up dialog)
#@+node:ekr.20040321065415:g.findNode... &,findTopLevelNode
def findNodeInChildren(c,p,headline):
"""Search for a node in v's tree matching the given headline."""
for p in p.children_iter():
if p.headString().strip() == headline.strip():
return p.copy()
return c.nullPosition()
def findNodeInTree(c,p,headline):
"""Search for a node in v's tree matching the given headline."""
for p in p.subtree_iter():
if p.headString().strip() == headline.strip():
return p.copy()
return c.nullPosition()
def findNodeAnywhere(c,headline):
for p in c.allNodes_iter():
if p.headString().strip() == headline.strip():
return p.copy()
return c.nullPosition()
def findTopLevelNode(c,headline):
for p in c.rootPosition().self_and_siblings_iter():
if p.headString().strip() == headline.strip():
return p.copy()
return c.nullPosition()
#@-node:ekr.20040321065415:g.findNode... &,findTopLevelNode
#@+node:ekr.20060624085200:g.handleScriptException
def handleScriptException (c,p,script,script1):
g.es("exception executing script",color='blue')
full = c.config.getBool('show_full_tracebacks_in_scripts')
fileName, n = g.es_exception(full=full)
if p and not script1 and fileName == "<string>":
c.goToScriptLineNumber(p,script,n)
#@ << dump the lines near the error >>
#@+node:EKR.20040612215018:<< dump the lines near the error >>
if g.os_path_exists(fileName):
f = file(fileName)
lines = f.readlines()
f.close()
else:
lines = g.splitLines(script)
s = '-' * 20
g.es_print('',s)
# Print surrounding lines.
i = max(0,n-2)
j = min(n+2,len(lines))
while i < j:
ch = g.choose(i==n-1,'*',' ')
s = "%s line %d: %s" % (ch,i+1,lines[i])
g.es('',s,newline=False)
i += 1
#@-node:EKR.20040612215018:<< dump the lines near the error >>
#@nl
#@-node:ekr.20060624085200:g.handleScriptException
#@+node:ekr.20050503112513.7:g.executeFile
def executeFile(filename, options= ''):
if not os.access(filename, os.R_OK): return
subprocess = g.importExtension('subprocess',None,verbose=False)
cwd = os.getcwdu()
fdir, fname = g.os_path_split(filename)
if subprocess: # Only exists in Python 2.4.
#@ << define subprocess_wrapper >>
#@+node:ekr.20050503112513.8:<< define subprocess_wrapper >>
def subprocess_wrapper(cmdlst):
# g.trace(cmdlst, fdir)
# g.trace(subprocess.list2cmdline([cmdlst]))
p = subprocess.Popen(cmdlst, cwd=fdir,
universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdo, stde = p.communicate()
return p.wait(), stdo, stde
#@-node:ekr.20050503112513.8:<< define subprocess_wrapper >>
#@nl
rc, so, se = subprocess_wrapper('%s %s %s'%(sys.executable, fname, options))
if rc:
print 'return code', rc
print so, se
else:
if fdir: os.chdir(fdir)
d = {'__name__': '__main__'}
execfile(fname, d) #, globals()
os.system('%s %s' % (sys.executable, fname))
if fdir: os.chdir(cwd)
#@-node:ekr.20050503112513.7:g.executeFile
#@-node:ekr.20040327103735.2:Script Tools (leoGlobals.py)
#@+node:ekr.20031218072017.1498:Unicode utils...
#@+node:ekr.20061006152327:g.isWordChar & g.isWordChar1
def isWordChar (ch):
'''Return True if ch should be considered a letter.'''
return ch and (ch.isalnum() or ch == u'_')
def isWordChar1 (ch):
return ch and (ch.isalpha() or ch == u'_')
#@nonl
#@-node:ekr.20061006152327:g.isWordChar & g.isWordChar1
#@+node:ekr.20031218072017.1503:getpreferredencoding from 2.3a2
# Suppress warning about redefining getpreferredencoding
# __pychecker__ = '--no-reuseattr'
try:
# Use Python's version of getpreferredencoding if it exists.
# It is new in Python 2.3.
import locale
getpreferredencoding = locale.getpreferredencoding
except Exception:
# Use code copied from locale.py in Python 2.3alpha2.
if sys.platform in ('win32', 'darwin', 'mac'):
#@ << define getpreferredencoding using _locale >>
#@+node:ekr.20031218072017.1504:<< define getpreferredencoding using _locale >>
# On Win32, this will return the ANSI code page
# On the Mac, it should return the system encoding;
# it might return "ascii" instead.
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using."""
try:
import _locale
return _locale._getdefaultlocale()[1]
except Exception:
return None
#@-node:ekr.20031218072017.1504:<< define getpreferredencoding using _locale >>
#@nl
else:
#@ << define getpreferredencoding for *nix >>
#@+node:ekr.20031218072017.1505:<< define getpreferredencoding for *nix >>
# Pychecker & pylint complains about CODESET
try:
locale.CODESET # Bug fix, 2/12/05
except NameError:
# Fall back to parsing environment variables :-(
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using,
by looking at environment variables."""
try:
return locale.getdefaultlocale()[1]
except Exception:
return None
else:
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using,
according to the system configuration."""
try:
if do_setlocale:
oldloc = locale.setlocale(LC_CTYPE)
locale.setlocale(LC_CTYPE, "")
result = locale.nl_langinfo(CODESET)
locale.setlocale(LC_CTYPE, oldloc)
return result
else:
return locale.nl_langinfo(CODESET)
except Exception:
return None
#@-node:ekr.20031218072017.1505:<< define getpreferredencoding for *nix >>
#@nl
# __pychecker__ = '--reuseattr'
#@-node:ekr.20031218072017.1503:getpreferredencoding from 2.3a2
#@+node:ekr.20031218072017.1499:isUnicode
def isUnicode(s):
return s is None or type(s) == type(u' ')
#@-node:ekr.20031218072017.1499:isUnicode
#@+node:ekr.20031218072017.1500:isValidEncoding
def isValidEncoding (encoding):
if not encoding:
return False
if sys.platform == 'cli':
return True
import codecs
try:
codecs.lookup(encoding)
return True
except LookupError: # Windows.
return False
except AttributeError: # Linux.
return False
#@nonl
#@-node:ekr.20031218072017.1500:isValidEncoding
#@+node:ekr.20031218072017.1501:reportBadChars
def reportBadChars (s,encoding):
errors = 0
if type(s) == type(u""):
for ch in s:
try: ch.encode(encoding,"strict")
except UnicodeEncodeError:
errors += 1
if errors:
s2 = "%d errors converting %s to %s" % (
errors, s.encode(encoding,'replace'),
encoding.encode('ascii','replace'))
if not g.unitTesting:
g.es(s2,color='red')
elif type(s) == type(""):
for ch in s:
try: unicode(ch,encoding,"strict")
except Exception: errors += 1
if errors:
s2 = "%d errors converting %s (%s encoding) to unicode" % (
errors, unicode(s,encoding,'replace'),
encoding.encode('ascii','replace'))
if not g.unitTesting:
g.es(s2,color='red')
#@-node:ekr.20031218072017.1501:reportBadChars
#@+node:ekr.20031218072017.1502:toUnicode & toEncodedString (and tests)
#@+node:ekr.20050208093800:toEncodedString
def toEncodedString (s,encoding,reportErrors=False):
if type(s) == type(u""):
try:
s = s.encode(encoding,"strict")
except UnicodeError:
if reportErrors:
g.reportBadChars(s,encoding)
s = s.encode(encoding,"replace")
return s
#@-node:ekr.20050208093800:toEncodedString
#@+node:ekr.20050208093903:toEncodedStringWithErrorCode
def toEncodedStringWithErrorCode (s,encoding):
ok = True
if type(s) == type(u""):
try:
s = s.encode(encoding,"strict")
except UnicodeError:
s = s.encode(encoding,"replace")
ok = False
return s,ok
#@-node:ekr.20050208093903:toEncodedStringWithErrorCode
#@+node:ekr.20050208093800.1:toUnicode
def toUnicode (s,encoding,reportErrors=False):
if s is None:
s = u""
if type(s) == type(""):
try:
s = unicode(s,encoding,"strict")
except UnicodeError:
if reportErrors:
g.reportBadChars(s,encoding)
s = unicode(s,encoding,"replace")
return s
#@-node:ekr.20050208093800.1:toUnicode
#@+node:ekr.20050208095723:toUnicodeWithErrorCode
def toUnicodeWithErrorCode (s,encoding):
ok = True
if s is None:
s = u""
if type(s) == type(""):
try:
s = unicode(s,encoding,"strict")
except UnicodeError:
s = unicode(s,encoding,"replace")
ok = False
return s,ok
#@-node:ekr.20050208095723:toUnicodeWithErrorCode
#@-node:ekr.20031218072017.1502:toUnicode & toEncodedString (and tests)
#@-node:ekr.20031218072017.1498:Unicode utils...
#@+node:ekr.20070524083513:Unit testing (leoGlobals.py)
#@+node:ekr.20070619173330:g.getTestVars
def getTestVars ():
d = g.app.unitTestDict
c = d.get('c')
p = d.get('p')
return c,p and p.copy()
#@-node:ekr.20070619173330:g.getTestVars
#@-node:ekr.20070524083513:Unit testing (leoGlobals.py)
#@+node:EKR.20040612114220:Utility classes, functions & objects...
#@+node:ekr.20050315073003: Index utilities... (leoGlobals) (passed)
#@+node:ekr.20050314140957:g.convertPythonIndexToRowCol
def convertPythonIndexToRowCol (s,i):
'''Convert index i into string s into zero-based row/col indices.'''
if not s or i <= 0:
return 0,0
i = min(i,len(s))
# works regardless of what s[i] is
row = s.count('\n',0,i) # Don't include i
if row == 0:
return row,i
else:
prevNL = s.rfind('\n',0,i) # Don't include i
# g.trace('prevNL',prevNL,'i',i,g.callers())
return row,i-prevNL-1
#@-node:ekr.20050314140957:g.convertPythonIndexToRowCol
#@+node:ekr.20050315071727:g.convertRowColToPythonIndex
def convertRowColToPythonIndex (s,row,col,lines=None):
'''Convert zero-based row/col indices into a python index into string s.'''
if row < 0: return 0
if lines is None:
lines = g.splitLines(s)
if row >= len(lines):
return len(s)
col = min(col, len(lines[row]))
#### A big bottleneck
prev = 0
for line in lines[:row]:
prev += len(line)
return prev + col
#@-node:ekr.20050315071727:g.convertRowColToPythonIndex
#@-node:ekr.20050315073003: Index utilities... (leoGlobals) (passed)
#@+node:ekr.20031218072017.3140: List utilities...
#@+node:ekr.20031218072017.3141:appendToList
def appendToList(out, s):
for i in s:
out.append(i)
#@-node:ekr.20031218072017.3141:appendToList
#@+node:ekr.20031218072017.3142:flattenList
def flattenList (theList):
result = []
for item in theList:
if type(item) == types.ListType:
result.extend(g.flattenList(item))
else:
result.append(item)
return result
#@-node:ekr.20031218072017.3142:flattenList
#@+node:ekr.20060221081328:maxStringListLength
def maxStringListLength(aList):
'''Return the maximum string length in a list of strings.'''
n = 0
for z in aList:
if type(z) in (type(''),type(u'')):
n = max(n,len(z))
return n
#@-node:ekr.20060221081328:maxStringListLength
#@-node:ekr.20031218072017.3140: List utilities...
#@+node:ekr.20031218072017.3106:angleBrackets & virtual_event_name
# Returns < < s > >
def angleBrackets(s):
return ( "<<" + s +
">>") # must be on a separate line.
virtual_event_name = angleBrackets
#@-node:ekr.20031218072017.3106:angleBrackets & virtual_event_name
#@+node:ekr.20031218072017.3097:CheckVersion
#@+node:ekr.20060921100435:CheckVersion, helper
# Simplified version by EKR: stringCompare not used.
def CheckVersion (s1,s2,condition=">=",stringCompare=None,delimiter='.',trace=False):
vals1 = [g.CheckVersionToInt(s) for s in s1.split(delimiter)] ; n1 = len(vals1)
vals2 = [g.CheckVersionToInt(s) for s in s2.split(delimiter)] ; n2 = len(vals2)
n = max(n1,n2)
if n1 < n: vals1.extend([0 for i in xrange(n - n1)])
if n2 < n: vals2.extend([0 for i in xrange(n - n2)])
for cond,val in (
('==', vals1 == vals2), ('!=', vals1 != vals2),
('<', vals1 < vals2), ('<=', vals1 <= vals2),
('>', vals1 > vals2), ('>=', vals1 >= vals2),
):
if condition == cond:
result = val ; break
else:
raise EnvironmentError,"condition must be one of '>=', '>', '==', '!=', '<', or '<='."
if trace:
# print '%10s' % (repr(vals1)),'%2s' % (condition),'%10s' % (repr(vals2)),result
print '%7s' % (s1),'%2s' % (condition),'%7s' % (s2),result
return result
#@nonl
#@+node:ekr.20070120123930:CheckVersionToInt
def CheckVersionToInt (s):
try:
return int(s)
except ValueError:
aList = []
for ch in s:
if ch.isdigit(): aList.append(ch)
else: break
if aList:
s = string.join(aList)
return int(s)
else:
return 0
#@nonl
#@-node:ekr.20070120123930:CheckVersionToInt
#@-node:ekr.20060921100435:CheckVersion, helper
#@+node:ekr.20060921100435.1:oldCheckVersion (Dave Hein)
#@+at
# g.CheckVersion() is a generic version checker. Assumes a
# version string of up to four parts, or tokens, with
# leftmost token being most significant and each token
# becoming less signficant in sequence to the right.
#
# RETURN VALUE
#
# 1 if comparison is True
# 0 if comparison is False
#
# PARAMETERS
#
# version: the version string to be tested
# againstVersion: the reference version string to be
# compared against
# condition: can be any of "==", "!=", ">=", "<=", ">", or "<"
# stringCompare: whether to test a token using only the
# leading integer of the token, or using the
# entire token string. For example, a value
# of "0.0.1.0" means that we use the integer
# value of the first, second, and fourth
# tokens, but we use a string compare for the
# third version token.
# delimiter: the character that separates the tokens in the
# version strings.
#
# The comparison uses the precision of the version string
# with the least number of tokens. For example a test of
# "8.4" against "8.3.3" would just compare the first two
# tokens.
#
# The version strings are limited to a maximum of 4 tokens.
#@-at
#@@c
def oldCheckVersion( version, againstVersion, condition=">=", stringCompare="0.0.0.0", delimiter='.' ):
# __pychecker__ = 'maxreturns=20'
# tokenize the stringCompare flags
compareFlag = string.split( stringCompare, '.' )
# tokenize the version strings
testVersion = string.split( version, delimiter )
testAgainst = string.split( againstVersion, delimiter )
# find the 'precision' of the comparison
tokenCount = 4
if tokenCount > len(testAgainst):
tokenCount = len(testAgainst)
if tokenCount > len(testVersion):
tokenCount = len(testVersion)
# Apply the stringCompare flags
justInteger = re.compile("^[0-9]+")
for i in range(tokenCount):
if "0" == compareFlag[i]:
m = justInteger.match( testVersion[i] )
testVersion[i] = m.group()
m = justInteger.match( testAgainst[i] )
testAgainst[i] = m.group()
elif "1" != compareFlag[i]:
errMsg = "stringCompare argument must be of " +\
"the form \"x.x.x.x\" where each " +\
"'x' is either '0' or '1'."
raise EnvironmentError,errMsg
# Compare the versions
if condition == ">=":
for i in range(tokenCount):
if testVersion[i] < testAgainst[i]:
return 0
if testVersion[i] > testAgainst[i]:
return 1 # it was greater than
return 1 # it was equal
if condition == ">":
for i in range(tokenCount):
if testVersion[i] < testAgainst[i]:
return 0
if testVersion[i] > testAgainst[i]:
return 1 # it was greater than
return 0 # it was equal
if condition == "==":
for i in range(tokenCount):
if testVersion[i] != testAgainst[i]:
return 0 # any token was not equal
return 1 # every token was equal
if condition == "!=":
for i in range(tokenCount):
if testVersion[i] != testAgainst[i]:
return 1 # any token was not equal
return 0 # every token was equal
if condition == "<":
for i in range(tokenCount):
if testVersion[i] >= testAgainst[i]:
return 0
if testVersion[i] < testAgainst[i]:
return 1 # it was less than
return 0 # it was equal
if condition == "<=":
for i in range(tokenCount):
if testVersion[i] > testAgainst[i]:
return 0
if testVersion[i] < testAgainst[i]:
return 1 # it was less than
return 1 # it was equal
# didn't find a condition that we expected.
raise EnvironmentError,"condition must be one of '>=', '>', '==', '!=', '<', or '<='."
#@nonl
#@-node:ekr.20060921100435.1:oldCheckVersion (Dave Hein)
#@-node:ekr.20031218072017.3097:CheckVersion
#@+node:ekr.20031218072017.3098:class Bunch (object)
#@+at
#@nonl
# From The Python Cookbook: Often we want to just collect a bunch of stuff
# together, naming each item of the bunch; a dictionary's OK for that, but a
# small do-nothing class is even handier, and prettier to use.
#
# Create a Bunch whenever you want to group a few variables:
#
# point = Bunch(datum=y, squared=y*y, coord=x)
#
# You can read/write the named attributes you just created, add others, del
# some of them, etc:
# if point.squared > threshold:
# point.isok = True
#@-at
#@@c
class Bunch (object):
"""A class that represents a colection of things.
Especially useful for representing a collection of related variables."""
def __init__(self,**keywords):
self.__dict__.update (keywords)
def __repr__(self):
return self.toString()
def ivars(self):
return self.__dict__.keys()
def keys(self):
return self.__dict__.keys()
def toString(self):
tag = self.__dict__.get('tag')
entries = ["%s: %s" % (key,str(self.__dict__.get(key)))
for key in self.ivars() if key != 'tag']
if tag:
return "Bunch(tag=%s)...\n%s\n" % (tag,'\n'.join(entries))
else:
return "Bunch...\n%s\n" % '\n'.join(entries)
# Used by new undo code.
def __setitem__ (self,key,value):
'''Support aBunch[key] = val'''
return operator.setitem(self.__dict__,key,value)
def __getitem__ (self,key):
'''Support aBunch[key]'''
return operator.getitem(self.__dict__,key)
def get (self,key,theDefault=None):
return self.__dict__.get(key,theDefault)
bunch = Bunch
#@-node:ekr.20031218072017.3098:class Bunch (object)
#@+node:EKR.20040504150046:class mulderUpdateAlgorithm (leoGlobals)
class mulderUpdateAlgorithm:
"""A class to update derived files using
diffs in files without sentinels.
"""
#@ @+others
#@+node:EKR.20040504150046.3:__init__
def __init__ (self,testing=False,verbose=False):
self.testing = testing
self.verbose = verbose
self.do_backups = False
#@-node:EKR.20040504150046.3:__init__
#@+node:EKR.20040504150046.9:copy_sentinels
#@+at
#@nonl
# This script retains _all_ sentinels. If lines are replaced, or deleted,
# we restore deleted sentinel lines by checking for gaps in the mapping.
#@-at
#@@c
def copy_sentinels (self,write_lines,fat_lines,fat_pos,mapping,startline,endline):
"""
Copy sentinel lines from fat_lines to write_lines.
Copy all sentinels _after_ the current reader postion up to,
but not including, mapping[endline].
"""
j_last = mapping[startline]
i = startline + 1
while i <= endline:
j = mapping[i]
if j_last + 1 != j:
fat_pos = j_last + 1
# Copy the deleted sentinels that comprise the gap.
while fat_pos < j:
line = fat_lines[fat_pos]
write_lines.append(line)
if self.testing and self.verbose: print "Copy sentinel:",fat_pos,line,
fat_pos += 1
j_last = j ; i += 1
fat_pos = mapping[endline]
return fat_pos
#@-node:EKR.20040504150046.9:copy_sentinels
#@+node:EKR.20040504155109:copy_time
def copy_time(self,sourcefilename,targetfilename):
"""
Set the target file's modification time to
that of the source file.
"""
# pychecker complains about mtime.
st = os.stat(sourcefilename)
if hasattr(os, 'utime'):
os.utime(targetfilename, (st.st_atime, st.st_mtime))
elif hasattr(os, 'mtime'):
os.mtime(targetfilename, st.st_mtime)
else:
g.trace("Can not set modification time")
#@-node:EKR.20040504155109:copy_time
#@+node:EKR.20040504150046.6:create_mapping
def create_mapping (self,lines,delims):
"""
'lines' is a list of lines of a file with sentinels.
Returns:
result: lines with all sentinels removed.
mapping: a list such that result[mapping[i]] == lines[i]
for all i in range(len(result))
"""
if not lines:
return [],[]
# Create mapping and set i to the index of the last non-sentinel line.
mapping = []
for i in xrange(len(lines)):
if not g.is_sentinel(lines[i],delims):
mapping.append(i)
# Create a last mapping entry for copy_sentinels.
mapping.append(i)
# Use removeSentinelsFromLines to handle @nonl properly.
stripped_lines = self.removeSentinelsFromLines(lines,delims)
return stripped_lines, mapping
#@-node:EKR.20040504150046.6:create_mapping
#@+node:EKR.20040505080156:Get or remove sentinel lines
# These routines originally were part of push_filter & push_filter_lines.
#@+node:EKR.20040505081121:separateSentinelsFromFile/Lines
def separateSentinelsFromFile (self,filename):
"""Separate the lines of the file into a tuple of two lists,
containing the sentinel and non-sentinel lines of the file."""
lines = file(filename).readlines()
delims = g.comment_delims_from_extension(filename)
return self.separateSentinelsFromLines(lines,delims)
def separateSentinelsFromLines (self,lines,delims):
"""Separate lines (a list of lines) into a tuple of two lists,
containing the sentinel and non-sentinel lines of the original list."""
strippedLines = self.removeSentinelsFromLines(lines,delims)
sentinelLines = self.getSentinelsFromLines(lines,delims)
return strippedLines,sentinelLines
#@-node:EKR.20040505081121:separateSentinelsFromFile/Lines
#@+node:EKR.20040505080156.2:removeSentinelsFromFile/Lines
def removeSentinelsFromFile (self,filename):
"""Return a copy of file with all sentinels removed."""
lines = file(filename).readlines()
delims = g.comment_delims_from_extension(filename)
return self.removeSentinelsFromLines(lines,delims)
def removeSentinelsFromLines (self,lines,delims):
"""Return a copy of lines with all sentinels removed."""
delim1,delim2,delim3 = delims
result = [] ; last_nosent_i = -1
for i in xrange(len(lines)):
if not g.is_sentinel(lines[i],delims):
result.append(lines[i])
last_nosent_i = i
#@ << remove the newline from result[-1] if line[i] is followed by @nonl >>
#@+node:ekr.20040716105102:<< remove the newline from result[-1] if line[i] is followed by @nonl >>
i = last_nosent_i
if i + 1 < len(lines):
line = lines[i+1]
j = g.skip_ws(line,0)
if match(line,j,delim1):
j += len(delim1)
if g.match(line,j,"@nonl"):
line = lines[i]
if line[-1] == '\n':
assert(result[-1] == line)
result[-1] = line[:-1]
#@-node:ekr.20040716105102:<< remove the newline from result[-1] if line[i] is followed by @nonl >>
#@nl
return result
#@-node:EKR.20040505080156.2:removeSentinelsFromFile/Lines
#@+node:EKR.20040505080156.3:getSentinelsFromFile/Lines
def getSentinelsFromFile (self,filename,delims):
"""Returns all sentinels lines in a file."""
lines = file(filename).readlines()
delims = g.comment_delims_from_extension(filename)
return self.getSentinelsFromLines(lines,delims)
def getSentinelsFromLines (self,lines,delims):
"""Returns all sentinels lines in lines."""
return [line for line in lines if g.is_sentinel(line,delims)]
#@-node:EKR.20040505080156.3:getSentinelsFromFile/Lines
#@-node:EKR.20040505080156:Get or remove sentinel lines
#@+node:EKR.20040504150046.10:propagateDiffsToSentinelsFile
def propagateDiffsToSentinelsFile(self,sourcefilename,targetfilename):
#@ << init propagateDiffsToSentinelsFile vars >>
#@+node:EKR.20040504150046.11:<< init propagateDiffsToSentinelsFile vars >>
# Get the sentinel comment delims.
delims = g.comment_delims_from_extension(sourcefilename)
if not delims:
return
try:
# Create the readers.
sfile = file(sourcefilename)
tfile = file(targetfilename)
fat_lines = sfile.readlines() # Contains sentinels.
j_lines = tfile.readlines() # No sentinels.
i_lines,mapping = self.create_mapping(fat_lines,delims)
sfile.close()
tfile.close()
except Exception:
g.es_exception("can not open files")
return
#@-node:EKR.20040504150046.11:<< init propagateDiffsToSentinelsFile vars >>
#@nl
write_lines = self.propagateDiffsToSentinelsLines(
i_lines,j_lines,fat_lines,mapping)
# Update _source_ file if it is not the same as write_lines.
written = self.write_if_changed(write_lines,targetfilename,sourcefilename)
if written:
#@ << paranoia check>>
#@+node:EKR.20040504150046.12:<<paranoia check>>
# Check that 'push' will re-create the changed file.
strippedLines,sentinel_lines = self.separateSentinelsFromFile(sourcefilename)
if strippedLines != j_lines:
self.report_mismatch(strippedLines, j_lines,
"Propagating diffs did not work as expected",
"Content of sourcefile:",
"Content of modified file:")
# Check that no sentinels got lost.
fat_sentinel_lines = self.getSentinelsFromLines(fat_lines,delims)
if sentinel_lines != fat_sentinel_lines:
self.report_mismatch(sentinel_lines,fat_sentinel_lines,
"Propagating diffs modified sentinel lines:",
"Current sentinel lines:",
"Old sentinel lines:")
#@-node:EKR.20040504150046.12:<<paranoia check>>
#@nl
#@-node:EKR.20040504150046.10:propagateDiffsToSentinelsFile
#@+node:EKR.20040504145804.1:propagateDiffsToSentinelsLines (called from perfect import)
def propagateDiffsToSentinelsLines (self,
i_lines,j_lines,fat_lines,mapping):
"""Compare the 'i_lines' with 'j_lines' and propagate the diffs back into
'write_lines' making sure that all sentinels of 'fat_lines' are copied.
i/j_lines have no sentinels. fat_lines does."""
#@ << init propagateDiffsToSentinelsLines vars >>
#@+node:EKR.20040504145804.2:<< init propagateDiffsToSentinelsLines vars >>
# Indices into i_lines, j_lines & fat_lines.
i_pos = j_pos = fat_pos = 0
# These vars check that all ranges returned by get_opcodes() are contiguous.
i2_old = j2_old = -1
# Create the output lines.
write_lines = []
matcher = difflib.SequenceMatcher(None,i_lines,j_lines)
testing = self.testing
verbose = self.verbose
#@-node:EKR.20040504145804.2:<< init propagateDiffsToSentinelsLines vars >>
#@nl
#@ << copy the sentinels at the beginning of the file >>
#@+node:EKR.20040504145804.3:<< copy the sentinels at the beginning of the file >>
while fat_pos < mapping[0]:
line = fat_lines[fat_pos]
write_lines.append(line)
if testing:
print "copy initial line",fat_pos,line,
fat_pos += 1
#@-node:EKR.20040504145804.3:<< copy the sentinels at the beginning of the file >>
#@nl
for tag, i1, i2, j1, j2 in matcher.get_opcodes():
if testing:
if verbose: print
print "Opcode %7s %3d %3d %3d %3d" % (tag,i1,i2,j1,j2)
if verbose: print
#@ << update and check the loop invariant >>
#@+node:EKR.20040504145804.4:<< update and check the loop invariant>>
# We need the ranges returned by get_opcodes to completely cover the source lines being compared.
# We also need the ranges not to overlap.
assert(i2_old in (-1,i1))
assert(j2_old in (-1,j1))
i2_old = i2 ; j2_old = j2
# Check the loop invariants.
assert i_pos == i1
assert j_pos == j1
assert fat_pos == mapping[i1]
#@-node:EKR.20040504145804.4:<< update and check the loop invariant>>
#@nl
if tag == 'equal':
#@ << handle 'equal' tag >>
#@+node:EKR.20040504145804.5:<< handle 'equal' tag >>
# Copy the lines, including sentinels.
while fat_pos <= mapping[i2-1]:
line = fat_lines[fat_pos]
if 0: # too verbose.
if testing: print "Equal: copying ", line,
write_lines.append(line)
fat_pos += 1
if testing and verbose:
print "Equal: synch i", i_pos,i2
print "Equal: synch j", j_pos,j2
i_pos = i2
j_pos = j2
# Copy the sentinels which might follow the lines.
fat_pos = self.copy_sentinels(write_lines,fat_lines,fat_pos,mapping,i2-1,i2)
#@-node:EKR.20040504145804.5:<< handle 'equal' tag >>
#@nl
elif tag == 'replace':
#@ << handle 'replace' tag >>
#@+node:EKR.20040504145804.6:<< handle 'replace' tag >>
#@+at
#@nonl
# Replace lines that may span sentinels.
#
# For now, we put all the new contents after the first
# sentinel.
#
# A more complex approach: run the difflib across the
# different lines and try to
# construct a mapping changed line => orignal line.
#@-at
#@@c
while j_pos < j2:
line = j_lines[j_pos]
if testing:
print "Replace i:",i_pos,repr(i_lines[i_pos])
print "Replace j:",j_pos,repr(line)
i_pos += 1
write_lines.append(line)
j_pos += 1
i_pos = i2
# Copy the sentinels which might be between the changed code.
fat_pos = self.copy_sentinels(write_lines,fat_lines,fat_pos,mapping,i1,i2)
#@-node:EKR.20040504145804.6:<< handle 'replace' tag >>
#@nl
elif tag == 'delete':
#@ << handle 'delete' tag >>
#@+node:EKR.20040504145804.7:<< handle 'delete' tag >>
if testing and verbose:
print "delete: i",i_pos,i1
print "delete: j",j_pos,j1
j_pos = j2
i_pos = i2
# Restore any deleted sentinels.
fat_pos = self.copy_sentinels(write_lines,fat_lines,fat_pos,mapping,i1,i2)
#@-node:EKR.20040504145804.7:<< handle 'delete' tag >>
#@nl
elif tag == 'insert':
#@ << handle 'insert' tag >>
#@+node:EKR.20040504145804.8:<< handle 'insert' tag >>
while j_pos < j2:
line = j_lines[j_pos]
if testing: print "Insert:", line,
write_lines.append(line)
j_pos += 1
# The input streams are already in synch.
#@-node:EKR.20040504145804.8:<< handle 'insert' tag >>
#@nl
else: assert 0,"bad tag"
#@ << copy the sentinels at the end of the file >>
#@+node:EKR.20040504145804.9:<< copy the sentinels at the end of the file >>
while fat_pos < len(fat_lines):
line = fat_lines[fat_pos]
write_lines.append(line)
if testing:
print "Append last line",line
fat_pos += 1
#@-node:EKR.20040504145804.9:<< copy the sentinels at the end of the file >>
#@nl
return write_lines
#@-node:EKR.20040504145804.1:propagateDiffsToSentinelsLines (called from perfect import)
#@+node:EKR.20040504150046.5:report_mismatch
def report_mismatch (self,lines1,lines2,message,lines1_message,lines2_message):
"""
Generate a report when something goes wrong.
"""
# __pychecker__ = '--no-argsused' # Most args are presently unused.
print '='*20
print message
if 0:
print lines1_message
print '-'*20
for line in lines1:
print line,
print '='*20
print lines2_message
print '-'*20
for line in lines2:
print line,
#@-node:EKR.20040504150046.5:report_mismatch
#@+node:ekr.20040718101315:stripWhitespaceFromBlankLines(before_lines)
def stripWhitespaceFromBlankLines (self,lines):
# All backslashes must be doubled.
"""Strip blanks and tabs from lines containing only blanks and tabs.
>>> import leoGlobals as g
>>> s = "a\\n \\t\\n\\t\\t \\t\\nb"
>>> theLines = g.splitLines(s)
>>> theLines
['a\\n', ' \\t\\n', '\\t\\t \\t\\n', 'b']
>>> g.mulderUpdateAlgorithm().stripWhitespaceFromBlankLines(theLines)
['a\\n', '\\n', '\\n', 'b']
"""
for i in xrange(len(lines)):
# lstrip does not exist in python 2.2.1.
stripped_line = lines[i]
while stripped_line and stripped_line[0] in (' ','\t'):
stripped_line = stripped_line [1:]
if stripped_line in ('\n',''):
lines[i] = stripped_line
return lines
#@-node:ekr.20040718101315:stripWhitespaceFromBlankLines(before_lines)
#@+node:EKR.20040504160820:write_if_changed
def write_if_changed(self,lines,sourcefilename,targetfilename):
"""
Replaces target file if it is not the same as 'lines',
and makes the modification date of target file the same as the source file.
Optionally backs up the overwritten file.
"""
copy = not os.path.exists(targetfilename) or lines != file(targetfilename).readlines()
if self.testing:
if copy:
print "Writing",targetfilename,"without sentinals"
else:
print "Files are identical"
if copy:
if self.do_backups:
#@ << make backup file >>
#@+node:EKR.20040504160820.1:<< make backup file >>
if os.path.exists(targetfilename):
count = 0
backupname = "%s.~%s~" % (targetfilename,count)
while os.path.exists(backupname):
count += 1
backupname = "%s.~%s~" % (targetfilename,count)
os.rename(targetfilename, backupname)
if self.testing:
print "backup file in ", backupname
#@-node:EKR.20040504160820.1:<< make backup file >>
#@nl
outfile = open(targetfilename, "w")
for line in lines:
outfile.write(line)
outfile.close()
self.copy_time(sourcefilename,targetfilename)
return copy
#@-node:EKR.20040504160820:write_if_changed
#@-others
#def doMulderUpdateAlgorithm(sourcefilename,targetfilename):
#
# mu = mulderUpdateAlgorithm()
#
# mu.pull_source(sourcefilename,targetfilename)
# mu.copy_time(targetfilename,sourcefilename)
#@-node:EKR.20040504150046:class mulderUpdateAlgorithm (leoGlobals)
#@+node:ekr.20031219074948.1:class nullObject
# From the Python cookbook, recipe 5.23
class nullObject:
"""An object that does nothing, and does it very well."""
# __pychecker__ = '--no-argsused'
def __init__ (self,*args,**keys): pass
def __call__ (self,*args,**keys): return self
def __repr__ (self): return "nullObject"
def __nonzero__ (self): return 0
def __delattr__(self,attr): return self
def __getattr__(self,attr): return self
def __setattr__(self,attr,val): return self
#@-node:ekr.20031219074948.1:class nullObject
#@+node:ekr.20031218072017.3103:g.computeWindowTitle
def computeWindowTitle (fileName):
if fileName == None:
return "untitled"
else:
path,fn = g.os_path_split(fileName)
if path:
title = fn + " in " + path
else:
title = fn
return title
#@-node:ekr.20031218072017.3103:g.computeWindowTitle
#@+node:ekr.20031218072017.3138:g.executeScript
def executeScript (name):
"""Execute a script whose short python file name is given"""
mod_name,ext = g.os_path_splitext(name)
theFile = None
try:
# This code is in effect an import or a reload.
# This allows the user to modify scripts without leaving Leo.
import imp
theFile,filename,description = imp.find_module(mod_name)
imp.load_module(mod_name,theFile,filename,description)
except Exception:
g.es("Exception executing",name,color="red")
g.es_exception()
if theFile:
theFile.close()
#@-node:ekr.20031218072017.3138:g.executeScript
#@+node:ekr.20040331083824.1:g.fileLikeObject
# Note: we could use StringIo for this.
class fileLikeObject:
"""Define a file-like object for redirecting writes to a string.
The caller is responsible for handling newlines correctly."""
#@ @+others
#@+node:ekr.20050404151753: ctor
def __init__(self,fromString=None):
# New in 4.2.1: allow the file to be inited from string s.
if fromString:
self.list = g.splitLines(fromString) # Must preserve newlines!
else:
self.list = []
self.ptr = 0
# In CStringIO the buffer is read-only if the initial value (fromString) is non-empty.
#@-node:ekr.20050404151753: ctor
#@+node:ekr.20050404151753.1:clear
def clear (self):
self.list = []
#@-node:ekr.20050404151753.1:clear
#@+node:ekr.20050404151753.2:close
def close (self):
pass
# The StringIo version free's the memory buffer.
#@-node:ekr.20050404151753.2:close
#@+node:ekr.20050404151753.3:flush
def flush (self):
pass
#@-node:ekr.20050404151753.3:flush
#@+node:ekr.20050404151753.4:get & getvalue
def get (self):
return ''.join(self.list)
getvalue = get # for compatibility with StringIo
#@-node:ekr.20050404151753.4:get & getvalue
#@+node:ekr.20050404151753.5:readline
def readline(self): # New for read-from-string (readOpenFile).
if self.ptr < len(self.list):
line = self.list[self.ptr]
# g.trace(repr(line))
self.ptr += 1
return line
else:
return ''
#@-node:ekr.20050404151753.5:readline
#@+node:ekr.20050404151753.6:write
def write (self,s):
if s:
self.list.append(s)
#@-node:ekr.20050404151753.6:write
#@-others
#@-node:ekr.20040331083824.1:g.fileLikeObject
#@+node:ekr.20031218072017.3126:g.funcToMethod
#@+at
#@nonl
# The following is taken from page 188 of the Python Cookbook.
#
# The following method allows you to add a function as a method of any class.
# That is, it converts the function to a method of the class. The method just
# added is available instantly to all existing instances of the class, and to
# all instances created in the future.
#
# The function's first argument should be self.
#
# The newly created method has the same name as the function unless the
# optional name argument is supplied, in which case that name is used as the
# method name.
#@-at
#@@c
def funcToMethod(f,theClass,name=None):
setattr(theClass,name or f.__name__,f)
# g.trace(name)
#@-node:ekr.20031218072017.3126:g.funcToMethod
#@+node:EKR.20040614071102.1:g.getScript
def getScript (c,p,useSelectedText=True,forcePythonSentinels=True,useSentinels=True):
'''Return the expansion of the selected text of node p.
Return the expansion of all of node p's body text if there
is p is not the current node or if there is no text selection.'''
at = c.atFileCommands ; w = c.frame.body.bodyCtrl
if not p:
p = c.currentPosition()
try:
if g.app.inBridge:
s = p.bodyString()
elif p == c.currentPosition():
if useSelectedText and w.hasSelection():
s = w.getSelectedText()
else:
s = w.getAllText()
else:
s = p.bodyString()
# Remove extra leading whitespace so the user may execute indented code.
s = g.removeExtraLws(s,c.tab_width)
if s.strip():
g.app.scriptDict["script1"]=s
script = at.writeFromString(p.copy(),s,
forcePythonSentinels=forcePythonSentinels,
useSentinels=useSentinels)
script = script.replace("\r\n","\n") # Use brute force.
# Important, the script is an **encoded string**, not a unicode string.
g.app.scriptDict["script2"]=script
else: script = ''
except Exception:
g.es_print("unexpected exception in g.getScript")
g.es_exception()
script = ''
# g.trace(type(script),repr(script))
return script
#@-node:EKR.20040614071102.1:g.getScript
#@+node:ekr.20050920084036.4:g.longestCommonPrefix & g.itemsMatchingPrefixInList
def longestCommonPrefix (s1,s2):
'''Find the longest prefix common to strings s1 and s2.'''
prefix = ''
for ch in s1:
if s2.startswith(prefix + ch):
prefix = prefix + ch
else:
return prefix
return prefix
def itemsMatchingPrefixInList (s,aList,matchEmptyPrefix=False):
'''This method returns a sorted list items of aList whose prefix is s.
It also returns the longest common prefix of all the matches.'''
if s:
pmatches = [a for a in aList if a.startswith(s)]
elif matchEmptyPrefix:
pmatches = aList[:]
else: pmatches = []
if pmatches:
pmatches.sort()
common_prefix = reduce(g.longestCommonPrefix,pmatches)
else:
common_prefix = ''
# g.trace(repr(s),len(pmatches))
return pmatches,common_prefix
#@-node:ekr.20050920084036.4:g.longestCommonPrefix & g.itemsMatchingPrefixInList
#@+node:ekr.20031218072017.3144:g.makeDict
# From the Python cookbook.
def makeDict(**keys):
"""Returns a Python dictionary from using the optional keyword arguments."""
return keys
#@-node:ekr.20031218072017.3144:g.makeDict
#@+node:ekr.20060221083356:g.prettyPrintType
def prettyPrintType (obj):
if type(obj) in (
types.MethodType,types.UnboundMethodType,types.BuiltinMethodType):
return 'method'
elif type(obj) in (types.BuiltinFunctionType,types.FunctionType):
return 'function'
elif type(obj) == types.ModuleType:
return 'module'
elif type(obj) == types.InstanceType:
return 'object'
elif type(obj) in (types.UnicodeType,types.StringType):
return 'string'
else:
theType = str(type(obj))
if theType.startswith("<type '"): theType = theType[7:]
if theType.endswith("'>"): theType = theType[:-2]
return theType
#@-node:ekr.20060221083356:g.prettyPrintType
#@+node:ekr.20060410112600:g.stripBrackets
def stripBrackets (s):
'''Same as s.lstrip('<').rstrip('>') except it works for Python 2.2.1.'''
if s.startswith('<'):
s = s[1:]
if s.endswith('>'):
s = s[:-1]
return s
#@-node:ekr.20060410112600:g.stripBrackets
#@+node:ekr.20061031102333.2:g.getWord & getLine
def getWord (s,i):
'''Return i,j such that s[i:j] is the word surrounding s[i].'''
if i >= len(s): i = len(s) - 1
if i < 0: i = 0
# Scan backwards.
while 0 <= i < len(s) and g.isWordChar(s[i]):
i-= 1
i += 1
# Scan forwards.
j = i
while 0 <= j < len(s) and g.isWordChar(s[j]):
j += 1
return i,j
def getLine (s,i):
'''Return i,j such that s[i:j] is the line surrounding s[i].
s[i] is a newline only if the line is empty.
s[j] is a newline unless there is no trailing newline.
'''
if i > len(s): i = len(s) -1 # Bug fix: 10/6/07 (was if i >= len(s))
if i < 0: i = 0
j = s.rfind('\n',0,i) # A newline *ends* the line, so look to the left of a newline.
if j == -1: j = 0
else: j += 1
k = s.find('\n',i)
if k == -1: k = len(s)
else: k = k + 1
# g.trace('i,j,k',i,j,k,repr(s[j:k]))
return j,k
#@nonl
#@-node:ekr.20061031102333.2:g.getWord & getLine
#@+node:ekr.20041219095213:import wrappers
#@+at
#@nonl
# 1/6/05: The problem with Tkinter is that imp.load_module is equivalent to
# reload.
#
# The solutions is easy: simply return sys.modules.get(moduleName) if
# moduleName is in sys.modules!
#@-at
#@+node:ekr.20040917061619:g.cantImport
def cantImport (moduleName,pluginName=None,verbose=True):
"""Print a "Can't Import" message and return None."""
s = "Can not import %s" % moduleName
if pluginName: s = s + " from plugin %s" % pluginName
if not g.app or not g.app.gui:
print s
elif g.unitTesting:
return
elif g.app.gui.guiName() == 'tkinter' and moduleName in ('Tkinter','Pmw'):
return
else:
g.es_print('',s,color="blue")
#@-node:ekr.20040917061619:g.cantImport
#@+node:ekr.20041219095213.1:g.importModule
def importModule (moduleName,pluginName=None,verbose=False):
'''Try to import a module as Python's import command does.
moduleName is the module's name, without file extension.'''
module = sys.modules.get(moduleName)
if not module:
try:
theFile = None
import imp
try:
data = imp.find_module(moduleName) # This can open the file.
theFile,pathname,description = data
module = imp.load_module(moduleName,theFile,pathname,description)
except Exception: # Importing a module can throw exceptions other than ImportError.
g.cantImport(moduleName,pluginName=pluginName,verbose=verbose)
finally:
if theFile: theFile.close()
return module
#@-node:ekr.20041219095213.1:g.importModule
#@+node:ekr.20041219071407:g.importExtension & helpers
def importExtension (moduleName,pluginName=None,verbose=False,required=False):
'''Try to import a module. If that fails,
try to import the module from Leo's extensions directory.
moduleName is the module's name, without file extension.'''
# g.trace(verbose,moduleName,pluginName)
import os
module = g.importModule(moduleName,pluginName=pluginName,verbose=False)
extensionsDir = g.app and g.app.extensionsDir or os.path.join(os.path.dirname(__file__),'..','extensions')
if not module:
module = g.importFromPath(moduleName,extensionsDir,pluginName=pluginName,verbose=verbose)
if not module and required:
g.cantImportDialog(pluginName,moduleName)
try: # Avoid raising SystemExit if possible.
import os ; os._exit(1) # May not be available on all platforms.
except Exception:
import sys ; sys.exit(1)
return module
#@+node:ekr.20060329083657:cantImportDialog & helpers
def cantImportDialog (pluginName,moduleName):
'''Attempt to show a Tk dialog if an import fails.
Yes, this is a small Tk dependency, but it can't be helped.'''
message = '''
%s requires the %s module.
Official distributions contain this module in Leo's extensions folder,
but this module may be missing if you get Leo from cvs.
''' % (pluginName,moduleName)
if 1: # Requires minimal further imports.
try:
import Tkinter as Tk
root = g.app.root or Tk.Tk()
title = 'Can not import %s' % moduleName
top = createDialogFrame(Tk,root,title,message)
root.wait_window(top)
except ImportError:
print 'Can not import %s' % moduleName
print 'Can not import Tkinter'
print 'Leo must now exit'
else: # Can cause import problems during startup.
import leoTkinterDialog
d = leoTkinterDialog.tkinterAskOk(
c=None,title='Can not import %s' %(moduleName),
message=message)
d.run(modal=True)
#@+node:ekr.20060329083310.1:createDialogFrame
def createDialogFrame(Tk,root,title,message):
"""Create the Tk.Toplevel widget for a leoTkinterDialog."""
top = Tk.Toplevel(root)
top.title(title)
def onKey(event,top=top):
if event.char.lower() in ('\n','\r'):
top.destroy()
top.bind("<Key>",onKey)
f = Tk.Frame(top)
f.pack(side="top",expand=1,fill="both")
label = Tk.Label(f,text=message)
label.pack(pady=10)
def okButton(top=top):
top.destroy()
buttons = {"text":'OK',"command":okButton,"default":True}, # Singleton tuple.
createDialogButtons(Tk,top,buttons)
center(top)
top.lift()
top.focus_force()
# Attach the icon at idle time.
def attachIconCallback(top=top):
g.app.gui.attachLeoIcon(top)
top.after_idle(attachIconCallback)
return top
#@-node:ekr.20060329083310.1:createDialogFrame
#@+node:ekr.20060329083310.2:createDialogButtons
def createDialogButtons (Tk,top,buttons):
"""Create a row of buttons.
buttons is a list of dictionaries containing the properties of each button."""
f = Tk.Frame(top)
f.pack(side="top",padx=30)
for d in buttons:
text = d.get("text","<missing button name>")
isDefault = d.get("default",False)
underline = d.get("underline",0)
command = d.get("command",None)
bd = g.choose(isDefault,4,2)
b = Tk.Button(f,width=6,text=text,bd=bd,underline=underline,command=command)
b.pack(side="left",padx=5,pady=10)
#@-node:ekr.20060329083310.2:createDialogButtons
#@+node:ekr.20060329085417.1:center
def center(top):
"""Center the dialog on the screen.
WARNING: Call this routine _after_ creating a dialog.
(This routine inhibits the grid and pack geometry managers.)"""
sw = top.winfo_screenwidth()
sh = top.winfo_screenheight()
w,h,x,y = g.get_window_info(top)
# Set the new window coordinates, leaving w and h unchanged.
x = (sw - w)/2
y = (sh - h)/2
top.geometry("%dx%d%+d%+d" % (w,h,x,y))
return w,h,x,y
#@-node:ekr.20060329085417.1:center
#@+node:ekr.20060329085612:get_window_info
# WARNING: Call this routine _after_ creating a dialog.
# (This routine inhibits the grid and pack geometry managers.)
def get_window_info (top):
# This is an emergency measure: this call is NOT a major Tk-dependency.
top.update_idletasks() # Required to get proper info.
# Get the information about top and the screen.
geom = top.geometry() # geom = "WidthxHeight+XOffset+YOffset"
dim,x,y = string.split(geom,'+')
w,h = string.split(dim,'x')
w,h,x,y = int(w),int(h),int(x),int(y)
return w,h,x,y
#@-node:ekr.20060329085612:get_window_info
#@-node:ekr.20060329083657:cantImportDialog & helpers
#@-node:ekr.20041219071407:g.importExtension & helpers
#@+node:ekr.20031218072017.2278:g.importFromPath
def importFromPath (name,path,pluginName=None,verbose=False):
fn = g.shortFileName(name)
moduleName,ext = g.os_path_splitext(fn)
path = g.os_path_normpath(path)
path = g.toEncodedString(path,app and app.tkEncoding or 'ascii')
# g.trace(verbose,name,pluginName)
module = sys.modules.get(moduleName)
if not module:
try:
theFile = None
import imp
try:
data = imp.find_module(moduleName,[path]) # This can open the file.
theFile,pathname,description = data
module = imp.load_module(moduleName,theFile,pathname,description)
except ImportError:
if 0: # verbose:
g.es_print("Exception in g.importFromPath",color='blue')
g.es_exception()
except Exception:
g.es_print("unexpected exception in g.importFromPath(%s)" %
(name),color='blue')
g.es_exception()
# Put no return statements before here!
finally:
if theFile: theFile.close()
if not module:
g.cantImport(moduleName,pluginName=pluginName,verbose=verbose)
return module
#@-node:ekr.20031218072017.2278:g.importFromPath
#@-node:ekr.20041219095213:import wrappers
#@+node:ekr.20040629162023:readLines class and generator
#@+node:EKR.20040612114220.3:g.readLinesGenerator
# This has been replaced by readLinesClass because
# yield is not valid in jython.
# def readLinesGenerator(s):
# for line in g.splitLines(s):
# # g.trace(repr(line))
# yield line
# yield ''
#@-node:EKR.20040612114220.3:g.readLinesGenerator
#@+node:EKR.20040612114220.4:class readLinesClass
class readLinesClass:
"""A class whose next method provides a readline method for Python's tokenize module."""
def __init__ (self,s):
self.lines = g.splitLines(s)
self.i = 0
def next(self):
if self.i < len(self.lines):
line = self.lines[self.i]
self.i += 1
else:
line = ''
# g.trace(repr(line))
return line
#@-node:EKR.20040612114220.4:class readLinesClass
#@-node:ekr.20040629162023:readLines class and generator
#@-node:EKR.20040612114220:Utility classes, functions & objects...
#@+node:ekr.20031218072017.3197:Whitespace...
#@+node:ekr.20051014175117:g.adjustTripleString (same as removeExtraLws)
def adjustTripleString (s,tab_width):
'''Remove leading indentation from a triple-quoted string.
This works around the fact that Leo nodes can't represent underindented strings.
'''
# Compute the minimum leading whitespace of all non-blank lines.
lines = g.splitLines(s)
w = -1
for s in lines:
if s.strip():
lws = g.get_leading_ws(s)
w2 = g.computeWidth(lws,tab_width)
if w < 0: w = w2
else: w = min(w,w2)
# g.trace('w',w)
if w <= 0: return s
# Remove the leading whitespace.
result = [g.removeLeadingWhitespace(line,w,tab_width) for line in lines]
result = ''.join(result)
return result
#@-node:ekr.20051014175117:g.adjustTripleString (same as removeExtraLws)
#@+node:ekr.20031218072017.3198:computeLeadingWhitespace
# Returns optimized whitespace corresponding to width with the indicated tab_width.
def computeLeadingWhitespace (width, tab_width):
if width <= 0:
return ""
if tab_width > 1:
tabs = width / tab_width
blanks = width % tab_width
return ('\t' * tabs) + (' ' * blanks)
else: # 7/3/02: negative tab width always gets converted to blanks.
return (' ' * width)
#@-node:ekr.20031218072017.3198:computeLeadingWhitespace
#@+node:ekr.20031218072017.3199:computeWidth
# Returns the width of s, assuming s starts a line, with indicated tab_width.
def computeWidth (s, tab_width):
w = 0
for ch in s:
if ch == '\t':
w += (abs(tab_width) - (w % abs(tab_width)))
else:
w += 1
return w
#@-node:ekr.20031218072017.3199:computeWidth
#@+node:ekr.20031218072017.3200:get_leading_ws
def get_leading_ws(s):
"""Returns the leading whitespace of 's'."""
i = 0 ; n = len(s)
while i < n and s[i] in (' ','\t'):
i += 1
return s[0:i]
#@-node:ekr.20031218072017.3200:get_leading_ws
#@+node:ekr.20031218072017.3201:optimizeLeadingWhitespace
# Optimize leading whitespace in s with the given tab_width.
def optimizeLeadingWhitespace (line,tab_width):
i, width = g.skip_leading_ws_with_indent(line,0,tab_width)
s = g.computeLeadingWhitespace(width,tab_width) + line[i:]
return s
#@-node:ekr.20031218072017.3201:optimizeLeadingWhitespace
#@+node:ekr.20040723093558:regularizeTrailingNewlines
#@+at
#
# The caller should call g.stripBlankLines before calling this routine if
# desired.
#
# This routine does _not_ simply call rstrip(): that would delete all trailing
# whitespace-only lines, and in some cases that would change the meaning of
# program or data.
#
#@-at
#@@c
def regularizeTrailingNewlines(s,kind):
"""Kind is 'asis', 'zero' or 'one'."""
pass
#@-node:ekr.20040723093558:regularizeTrailingNewlines
#@+node:ekr.20031218072017.3202:removeLeadingWhitespace
# Remove whitespace up to first_ws wide in s, given tab_width, the width of a tab.
def removeLeadingWhitespace (s,first_ws,tab_width):
j = 0 ; ws = 0
for ch in s:
if ws >= first_ws:
break
elif ch == ' ':
j += 1 ; ws += 1
elif ch == '\t':
j += 1 ; ws += (abs(tab_width) - (ws % abs(tab_width)))
else: break
if j > 0:
s = s[j:]
return s
#@-node:ekr.20031218072017.3202:removeLeadingWhitespace
#@+node:ekr.20050211120242.2:g.removeExtraLws
def removeExtraLws (s,tab_width):
'''Remove extra indentation from one or more lines.
Warning: used by getScript. This is *not* the same as g.adjustTripleString.'''
lines = g.splitLines(s)
# Find the first non-blank line and compute w, the width of its leading whitespace.
for s in lines:
if s.strip():
lws = g.get_leading_ws(s)
w = g.computeWidth(lws,tab_width)
# g.trace('w',w)
break
else: return s
# Remove the leading whitespace.
result = [g.removeLeadingWhitespace(line,w,tab_width) for line in lines]
result = ''.join(result)
if 0:
g.trace('lines...')
for line in g.splitLines(result):
print repr(line)
return result
#@-node:ekr.20050211120242.2:g.removeExtraLws
#@+node:ekr.20031218072017.3203:removeTrailingWs
# Warning: string.rstrip also removes newlines!
def removeTrailingWs(s):
j = len(s)-1
while j >= 0 and (s[j] == ' ' or s[j] == '\t'):
j -= 1
return s[:j+1]
#@-node:ekr.20031218072017.3203:removeTrailingWs
#@+node:ekr.20031218072017.3204:skip_leading_ws
# Skips leading up to width leading whitespace.
def skip_leading_ws(s,i,ws,tab_width):
count = 0
while count < ws and i < len(s):
ch = s[i]
if ch == ' ':
count += 1
i += 1
elif ch == '\t':
count += (abs(tab_width) - (count % abs(tab_width)))
i += 1
else: break
return i
#@-node:ekr.20031218072017.3204:skip_leading_ws
#@+node:ekr.20031218072017.3205:skip_leading_ws_with_indent
def skip_leading_ws_with_indent(s,i,tab_width):
"""Skips leading whitespace and returns (i, indent),
- i points after the whitespace
- indent is the width of the whitespace, assuming tab_width wide tabs."""
count = 0 ; n = len(s)
while i < n:
ch = s[i]
if ch == ' ':
count += 1
i += 1
elif ch == '\t':
count += (abs(tab_width) - (count % abs(tab_width)))
i += 1
else: break
return i, count
#@-node:ekr.20031218072017.3205:skip_leading_ws_with_indent
#@+node:ekr.20040723093558.1:stripBlankLines
def stripBlankLines(s):
lines = g.splitLines(s)
for i in xrange(len(lines)):
line = lines[i]
j = g.skip_ws(line,0)
if j >= len(line):
lines[i] = ''
# g.trace("%4d %s" % (i,repr(lines[i])))
elif line[j] == '\n':
lines[i] = '\n'
# g.trace("%4d %s" % (i,repr(lines[i])))
return ''.join(lines)
#@-node:ekr.20040723093558.1:stripBlankLines
#@-node:ekr.20031218072017.3197:Whitespace...
#@+node:ekr.20060913091602:ZODB support
#@+node:ekr.20060913090832.1:g.init_zodb
init_zodb_import_failed = False
init_zodb_failed = {} # Keys are paths, values are True.
init_zodb_db = {} # Keys are paths, values are ZODB.DB instances.
def init_zodb (pathToZodbStorage,verbose=True):
'''Return an ZODB.DB instance from ZODB.FileStorage.FileStorage(pathToZodbStorage)
return None on any error.'''
global init_zodb_db, init_zodb_failed, init_zodb_import_failed
db = init_zodb_db.get(pathToZodbStorage)
if db: return db
if init_zodb_import_failed: return None
failed = init_zodb_failed.get(pathToZodbStorage)
if failed: return None
try:
import ZODB
except ImportError:
if verbose:
g.es('g.init_zodb: can not import ZODB')
g.es_exception()
init_zodb_import_failed = True
return None
try:
storage = ZODB.FileStorage.FileStorage(pathToZodbStorage)
init_zodb_db [pathToZodbStorage] = db = ZODB.DB(storage)
return db
except Exception:
if verbose:
g.es('g.init_zodb: exception creating ZODB.DB instance')
g.es_exception()
init_zodb_failed [pathToZodbStorage] = True
return None
#@nonl
#@-node:ekr.20060913090832.1:g.init_zodb
#@-node:ekr.20060913091602:ZODB support
#@-others
#@-node:ekr.20031218072017.3093:@thin leoGlobals.py
#@-leo
| [
"[email protected]"
]
| |
120381ac7f07d8aab59ed666511478e8e1f171ec | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/linux_os_info_fragment.py | a7b98b278ea789ddba8dba8d41a36115adb68fee | [
"MIT"
]
| permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,148 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LinuxOsInfoFragment(Model):
"""Information about a Linux OS.
:param linux_os_state: The state of the Linux OS (i.e. NonDeprovisioned,
DeprovisionRequested, DeprovisionApplied). Possible values include:
'NonDeprovisioned', 'DeprovisionRequested', 'DeprovisionApplied'
:type linux_os_state: str or ~azure.mgmt.devtestlabs.models.LinuxOsState
"""
_attribute_map = {
'linux_os_state': {'key': 'linuxOsState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(LinuxOsInfoFragment, self).__init__(**kwargs)
self.linux_os_state = kwargs.get('linux_os_state', None)
| [
"[email protected]"
]
| |
56ea4ad0b716b48c1a5d574a64c6d641fbb21a2a | 4a36b5979b0753b32cff3956fd97fb8ed8b11e84 | /0.23/_downloads/6a455d4d592574555169872fa244fae6/mne_inverse_connectivity_spectrum.py | 3cc10a8a9c242abf902bf27a70eee7a722a71e14 | []
| permissive | mne-tools/mne-tools.github.io | 8aac7ae10bf2faeeb875b9a351a5530dc0e53154 | 495e878adc1ef3374e3db88604504d7542b01194 | refs/heads/main | 2023-09-03T07:06:00.660557 | 2023-09-03T04:10:18 | 2023-09-03T04:10:18 | 35,639,371 | 12 | 16 | BSD-3-Clause | 2023-05-05T19:04:32 | 2015-05-14T22:04:23 | HTML | UTF-8 | Python | false | false | 3,460 | py | """
==============================================================
Compute full spectrum source space connectivity between labels
==============================================================
The connectivity is computed between 4 labels across the spectrum
between 7.5 Hz and 40 Hz.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
from mne.connectivity import spectral_connectivity
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# Load data
inverse_operator = read_inverse_operator(fname_inv)
raw = mne.io.read_raw_fif(fname_raw)
events = mne.read_events(fname_event)
# Add a bad channel
raw.info['bads'] += ['MEG 2443']
# Pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
eog=150e-6))
# Compute inverse solution and for each epoch. By using "return_generator=True"
# stcs will be a generator object instead of a list.
snr = 1.0 # use lower SNR for single epochs
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
pick_ori="normal", return_generator=True)
# Read some labels
names = ['Aud-lh', 'Aud-rh', 'Vis-lh', 'Vis-rh']
labels = [mne.read_label(data_path + '/MEG/sample/labels/%s.label' % name)
for name in names]
# Average the source estimates within each label using sign-flips to reduce
# signal cancellations, also here we return a generator
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip',
return_generator=True)
fmin, fmax = 7.5, 40.
sfreq = raw.info['sfreq'] # the sampling frequency
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
label_ts, method='wpli2_debiased', mode='multitaper', sfreq=sfreq,
fmin=fmin, fmax=fmax, mt_adaptive=True, n_jobs=1)
n_rows, n_cols = con.shape[:2]
fig, axes = plt.subplots(n_rows, n_cols, sharex=True, sharey=True)
for i in range(n_rows):
for j in range(i + 1):
if i == j:
axes[i, j].set_axis_off()
continue
axes[i, j].plot(freqs, con[i, j, :])
axes[j, i].plot(freqs, con[i, j, :])
if j == 0:
axes[i, j].set_ylabel(names[i])
axes[0, i].set_title(names[i])
if i == (n_rows - 1):
axes[i, j].set_xlabel(names[j])
axes[i, j].set(xlim=[fmin, fmax], ylim=[-0.2, 1])
axes[j, i].set(xlim=[fmin, fmax], ylim=[-0.2, 1])
# Show band limits
for f in [8, 12, 18, 35]:
axes[i, j].axvline(f, color='k')
axes[j, i].axvline(f, color='k')
plt.tight_layout()
plt.show()
| [
"[email protected]"
]
| |
e2a8989b66d3ad45d2be9f182be166ca7a4515a9 | 46ef191ca0c170ca1d8afc5eb5134de52eba15f1 | /abc168/venv/Scripts/easy_install-3.7-script.py | 54aa9f90fc46556f9acbb79cd356c099bb1877ad | []
| no_license | anthonyouch/Competitive-Programming- | 9a84cd7ff4b816d2e7ece4e4d6438dbeb23f5795 | 39109a7be1cd007bd0080a9694ac256efc10eab9 | refs/heads/master | 2023-03-04T00:49:00.688118 | 2021-02-05T13:19:46 | 2021-02-05T13:19:46 | 334,131,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | #!"C:\Users\Anthony Ouch\PycharmProjects\atcoder168\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"[email protected]"
]
| |
3a55742c3f10a2301ca0cb2c6e923ffe93ec7d73 | 5cc3925d0b9790733d67bf1ae74f5d2db9c07c91 | /ephys_nlm/example_datasets/__init__.py | 8be94b37d2b0ce1d4f7dedeb20eb9bb3fb312989 | [
"Apache-2.0"
]
| permissive | magland/ephys_nlm | a17bb77bbf6e6e6fadc3466695c155acf99684fd | e4109c8f123174d9cefe25065ad78e49a4ddb894 | refs/heads/master | 2021-07-01T04:07:39.873354 | 2021-05-24T20:24:59 | 2021-05-24T20:24:59 | 237,340,358 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | from .toy_example import toy_example
from .synthesize_random_firings import synthesize_random_firings
from .synthesize_random_waveforms import synthesize_random_waveforms
from .synthesize_timeseries import synthesize_timeseries
| [
"[email protected]"
]
| |
8d7ea9d63120eb522d967efccb92a92d86189d5a | 66634946aec18840c00b0e568c41faf3e9f473e7 | /Level2/Lessons17686/gamjapark.py | 162143a6889a0bd6b0edd4c59ff9e13c0cc3a062 | [
"MIT"
]
| permissive | StudyForCoding/ProgrammersLevel | 0525521b26ad73dcc1fe58a1b2f303b613c3a2f6 | dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25 | refs/heads/main | 2023-08-14T23:15:53.108351 | 2021-10-05T16:04:32 | 2021-10-05T16:04:32 | 354,728,963 | 0 | 1 | MIT | 2021-10-05T16:04:33 | 2021-04-05T05:26:25 | Python | UTF-8 | Python | false | false | 1,317 | py | # [3차] 파일명 정렬
import re
def solution(files):
answer = []
for i in range(len(files)):
name = re.split('(\d+)',files[i]) # 숫자 문자 split
name.append(name[0].lower())
name.append(int(name[1][:5]))
answer.append(name)
answer.sort(key=lambda x: (x[-2],x[-1]))
answer = list(map(lambda x: "".join(x[:len(x)-2]), answer))
return answer
'''
채점을 시작합니다.
정확성 테스트
테스트 1 〉 통과 (0.16ms, 10.4MB)
테스트 2 〉 통과 (0.15ms, 10.3MB)
테스트 3 〉 통과 (2.56ms, 10.9MB)
테스트 4 〉 통과 (3.39ms, 10.8MB)
테스트 5 〉 통과 (2.93ms, 10.7MB)
테스트 6 〉 통과 (3.11ms, 10.7MB)
테스트 7 〉 통과 (2.95ms, 10.7MB)
테스트 8 〉 통과 (2.39ms, 10.6MB)
테스트 9 〉 통과 (2.85ms, 10.8MB)
테스트 10 〉 통과 (2.95ms, 10.8MB)
테스트 11 〉 통과 (2.64ms, 10.7MB)
테스트 12 〉 통과 (3.37ms, 11MB)
테스트 13 〉 통과 (2.61ms, 11MB)
테스트 14 〉 통과 (3.47ms, 11.2MB)
테스트 15 〉 통과 (3.47ms, 11.2MB)
테스트 16 〉 통과 (3.07ms, 10.9MB)
테스트 17 〉 통과 (2.53ms, 10.9MB)
테스트 18 〉 통과 (2.60ms, 10.7MB)
테스트 19 〉 통과 (2.83ms, 10.7MB)
테스트 20 〉 통과 (3.15ms, 10.9MB)
채점 결과
정확성: 100.0
합계: 100.0 / 100.0
''' | [
"[email protected]"
]
| |
87dc014de771f52f91b8ca34703b75633023e3d9 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/sets_20200605191738.py | d7dbc86902639a050395ada6e3df8d94816b350c | []
| no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | def Strings(str):
values = {}
newArray = []
for i in str:
i.split(':')
Strings(["A:1","B:3","C:3","A:4","B:2"])
| [
"[email protected]"
]
| |
dad188cc567965957bad39f4ac1d5ce4fc0d2b77 | 5b8073c92445d9934f56c8f4b1df29f6bae83c75 | /.history/app_20190813205014.py | c5ff78835facedfcdf4559d07eeda2e91d305b25 | []
| no_license | lodorg/bcrest | 60dd80fd53158038fedcecc00f32965722a4f6dc | b44b84bc5b5c80f50e2385ed504107f4e0134f4e | refs/heads/master | 2022-02-25T22:14:42.097130 | 2019-08-13T17:40:30 | 2019-08-13T17:40:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py | from flask import Flask, request, flash, redirect
from werkzeug import secure_filename
import os
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'imgs'
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/uploader', methods = ['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
return "No file to uploaded"
file = request.files['file']
if file.filename == '':
flash('No file selected for uploading')
return "No file to uploaded"
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
flash('File successfully uploaded')
return "Success"
else:
flash('Allowed file types are txt, pdf, png, jpg, jpeg, gif')
return "Not allowed type file"
if __name__ == '__main__':
app.run(debug = True) | [
"[email protected]"
]
| |
6681b5acb57afb651ba249239227b893cdf1ee7e | 9ab9d9a3883471763edbceea59a0e83170581b5f | /eggs/Cheetah-2.2.2-py2.7-linux-i686-ucs4.egg/EGG-INFO/scripts/cheetah | 3a2f5a77680766e80358be89ad2dadf17b39984a | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | asmmhossain/phyG | 24dc211dad5b3e89c87ff384e841f2e98bbd52db | 023f505b705ab953f502cbc55e90612047867583 | refs/heads/master | 2022-11-21T12:43:46.172725 | 2014-02-14T12:33:08 | 2014-02-14T12:33:08 | 13,800,552 | 0 | 1 | NOASSERTION | 2020-07-25T21:05:41 | 2013-10-23T11:04:25 | Python | UTF-8 | Python | false | false | 119 | #!/afs/bx.psu.edu/project/pythons/linux-i686-ucs4/bin/python2.7
from Cheetah.CheetahWrapper import _cheetah
_cheetah()
| [
"[email protected]"
]
| ||
3e6d34849453d3637b8ab09d1c78e6147d34e310 | 0ba1743e9f865a023f72a14d3a5c16b99ee7f138 | /problems/test_0190.py | ad62eb3b35a711a2e1a2cf135cd8719826a02c15 | [
"Unlicense"
]
| permissive | chrisxue815/leetcode_python | d0a38a4168243b0628256825581a6df1b673855c | a33eb7b833f6998972e5340d383443f3a2ee64e3 | refs/heads/main | 2022-06-20T15:09:27.221807 | 2022-06-02T21:55:35 | 2022-06-02T21:55:35 | 94,590,264 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | import unittest
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
# Hacker's Delight, Figure 7-1
# See OpenJDK Integer.reverse():
# https://github.com/openjdk/jdk/blob/f37d9c8abca50b65ed232831a06d60c1d015013f/src/java.base/share/classes/java/lang/Integer.java#L1753
n = (n & 0x55555555) << 1 | (n >> 1) & 0x55555555
n = (n & 0x33333333) << 2 | (n >> 2) & 0x33333333
n = (n & 0x0f0f0f0f) << 4 | (n >> 4) & 0x0f0f0f0f
return (n << 24) & 0xffffffff | (n & 0xff00) << 8 | (n >> 8) & 0xff00 | n >> 24
class Test(unittest.TestCase):
def test(self):
self._test(2, 0x40000000)
self._test(43261596, 964176192)
def _test(self, n, expected):
actual = Solution().reverseBits(n)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
9415624b699589da280acdebd953fda62063c25c | 35573eebb47cf243f04e1125725920b252de6261 | /python/ray/train/base_trainer.py | e38c4080bc297942c2dead159066af0e4633e35b | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | timgates42/ray | 05df4fe47a16a028c92fea106c4a2b316665001d | cd95569b014cf06479b96875c63eba171e92ec97 | refs/heads/master | 2023-03-18T00:59:27.757929 | 2022-07-13T21:57:24 | 2022-07-13T21:57:24 | 227,523,984 | 0 | 0 | Apache-2.0 | 2019-12-12T05:01:58 | 2019-12-12T05:01:57 | null | UTF-8 | Python | false | false | 17,102 | py | import abc
import inspect
import logging
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, Union
import ray
from ray.air._internal.config import ensure_only_allowed_dataclass_keys_updated
from ray.air.checkpoint import Checkpoint
from ray.air.config import RunConfig, ScalingConfig, ScalingConfigDataClass
from ray.air.result import Result
from ray.train.constants import TRAIN_DATASET_KEY
from ray.tune import Trainable
from ray.tune.error import TuneError
from ray.tune.execution.placement_groups import PlacementGroupFactory
from ray.tune.trainable import wrap_function
from ray.util import PublicAPI
from ray.util.annotations import DeveloperAPI
from ray.util.ml_utils.dict import merge_dicts
if TYPE_CHECKING:
from ray.data import Dataset
from ray.data.preprocessor import Preprocessor
# A type representing either a ray.data.Dataset or a function that returns a
# ray.data.Dataset and accepts no arguments.
GenDataset = Union["Dataset", Callable[[], "Dataset"]]
logger = logging.getLogger(__name__)
@PublicAPI(stability="alpha")
class TrainingFailedError(RuntimeError):
"""An error indicating that training has failed."""
pass
@DeveloperAPI
class BaseTrainer(abc.ABC):
"""Defines interface for distributed training on Ray.
Note: The base ``BaseTrainer`` class cannot be instantiated directly. Only
one of its subclasses can be used.
How does a trainer work?
- First, initialize the Trainer. The initialization runs locally,
so heavyweight setup should not be done in __init__.
- Then, when you call ``trainer.fit()``, the Trainer is serialized
and copied to a remote Ray actor. The following methods are then
called in sequence on the remote actor.
- ``trainer.setup()``: Any heavyweight Trainer setup should be
specified here.
- ``trainer.preprocess_datasets()``: The provided
ray.data.Dataset are preprocessed with the provided
ray.data.Preprocessor.
- ``trainer.train_loop()``: Executes the main training logic.
- Calling ``trainer.fit()`` will return a ``ray.result.Result``
object where you can access metrics from your training run, as well
as any checkpoints that may have been saved.
**How do I create a new Trainer?**
Subclass ``ray.train.trainer.BaseTrainer``, and override the ``training_loop``
method, and optionally ``setup``.
.. code-block:: python
import torch
from ray.train.trainer import BaseTrainer
from ray import tune
from ray.air import session
class MyPytorchTrainer(BaseTrainer):
def setup(self):
self.model = torch.nn.Linear(1, 1)
self.optimizer = torch.optim.SGD(
self.model.parameters(), lr=0.1)
def training_loop(self):
# You can access any Trainer attributes directly in this method.
# self.datasets["train"] has already been
# preprocessed by self.preprocessor
dataset = self.datasets["train"]
torch_ds = dataset.to_torch(label_column="y")
loss_fn = torch.nn.MSELoss()
for epoch_idx in range(10):
loss = 0
num_batches = 0
for X, y in iter(torch_ds):
# Compute prediction error
pred = self.model(X)
batch_loss = loss_fn(pred, y.float())
# Backpropagation
self.optimizer.zero_grad()
batch_loss.backward()
self.optimizer.step()
loss += batch_loss.item()
num_batches += 1
loss /= num_batches
# Use Tune functions to report intermediate
# results.
session.report({"loss": loss, "epoch": epoch_idx})
**How do I use an existing Trainer or one of my custom Trainers?**
Initialize the Trainer, and call Trainer.fit()
.. code-block:: python
import ray
train_dataset = ray.data.from_items(
[{"x": i, "y": i} for i in range(3)])
my_trainer = MyPytorchTrainer(datasets={"train": train_dataset})
result = my_trainer.fit()
Args:
scaling_config: Configuration for how to scale training.
run_config: Configuration for the execution of the training run.
datasets: Any Ray Datasets to use for training. Use the key "train"
to denote which dataset is the training
dataset. If a ``preprocessor`` is provided and has not already been fit,
it will be fit on the training dataset. All datasets will be transformed
by the ``preprocessor`` if one is provided.
preprocessor: A preprocessor to preprocess the provided datasets.
resume_from_checkpoint: A checkpoint to resume training from.
"""
_scaling_config_allowed_keys: List[str] = ["trainer_resources"]
def __init__(
self,
*,
scaling_config: Optional[ScalingConfig] = None,
run_config: Optional[RunConfig] = None,
datasets: Optional[Dict[str, GenDataset]] = None,
preprocessor: Optional["Preprocessor"] = None,
resume_from_checkpoint: Optional[Checkpoint] = None,
):
self.scaling_config = scaling_config if scaling_config is not None else {}
self.run_config = run_config if run_config is not None else RunConfig()
self.datasets = datasets if datasets is not None else {}
self.preprocessor = preprocessor
self.resume_from_checkpoint = resume_from_checkpoint
self._validate_attributes()
def __new__(cls, *args, **kwargs):
"""Store the init args as attributes so this can be merged with Tune hparams."""
trainer = super(BaseTrainer, cls).__new__(cls)
parameters = inspect.signature(cls.__init__).parameters
parameters = list(parameters.keys())
# Remove self.
parameters = parameters[1:]
arg_dict = dict(zip(parameters, args))
trainer._param_dict = {**arg_dict, **kwargs}
return trainer
def _validate_attributes(self):
"""Called on __init()__ to validate trainer attributes."""
# Run config
if not isinstance(self.run_config, RunConfig):
raise ValueError(
f"`run_config` should be an instance of `ray.air.RunConfig`, "
f"found {type(self.run_config)} with value `{self.run_config}`."
)
# Scaling config
# Todo: move to ray.air.ScalingConfig
if not isinstance(self.scaling_config, dict):
raise ValueError(
f"`scaling_config` should be an instance of `dict`, "
f"found {type(self.scaling_config)} with value `{self.scaling_config}`."
)
# Datasets
if not isinstance(self.datasets, dict):
raise ValueError(
f"`datasets` should be a dict mapping from a string to "
f"`ray.data.Dataset` objects, "
f"found {type(self.datasets)} with value `{self.datasets}`."
)
elif any(
not isinstance(ds, ray.data.Dataset) and not callable(ds)
for ds in self.datasets.values()
):
raise ValueError(
f"At least one value in the `datasets` dict is not a "
f"`ray.data.Dataset`: {self.datasets}"
)
# Preprocessor
if self.preprocessor is not None and not isinstance(
self.preprocessor, ray.data.Preprocessor
):
raise ValueError(
f"`preprocessor` should be an instance of `ray.data.Preprocessor`, "
f"found {type(self.preprocessor)} with value `{self.preprocessor}`."
)
if self.resume_from_checkpoint is not None and not isinstance(
self.resume_from_checkpoint, ray.air.Checkpoint
):
raise ValueError(
f"`resume_from_checkpoint` should be an instance of "
f"`ray.air.Checkpoint`, found {type(self.resume_from_checkpoint)} "
f"with value `{self.resume_from_checkpoint}`."
)
@classmethod
def _validate_and_get_scaling_config_data_class(
cls, dataclass_or_dict: Union[ScalingConfigDataClass, Dict[str, Any]]
) -> ScalingConfigDataClass:
"""Return scaling config dataclass after validating updated keys."""
if isinstance(dataclass_or_dict, dict):
dataclass_or_dict = ScalingConfigDataClass(**dataclass_or_dict)
ensure_only_allowed_dataclass_keys_updated(
dataclass=dataclass_or_dict,
allowed_keys=cls._scaling_config_allowed_keys,
)
return dataclass_or_dict
def setup(self) -> None:
"""Called during fit() to perform initial setup on the Trainer.
Note: this method is run on a remote process.
This method will not be called on the driver, so any expensive setup
operations should be placed here and not in ``__init__``.
This method is called prior to ``preprocess_datasets`` and
``training_loop``.
"""
pass
def preprocess_datasets(self) -> None:
"""Called during fit() to preprocess dataset attributes with preprocessor.
Note: This method is run on a remote process.
This method is called prior to entering the training_loop.
If the ``Trainer`` has both a datasets dict and
a preprocessor, the datasets dict contains a training dataset (denoted by
the "train" key), and the preprocessor has not yet
been fit, then it will be fit on the train dataset.
Then, all Trainer's datasets will be transformed by the preprocessor.
The transformed datasets will be set back in the ``self.datasets`` attribute
of the Trainer to be used when overriding ``training_loop``.
"""
# Evaluate all datasets.
self.datasets = {k: d() if callable(d) else d for k, d in self.datasets.items()}
if self.preprocessor:
train_dataset = self.datasets.get(TRAIN_DATASET_KEY, None)
if train_dataset:
self.preprocessor.fit(train_dataset)
# Execute dataset transformations serially for now.
# Cannot execute them in remote tasks due to dataset ownership model:
# if datasets are created on a remote node, then if that node fails,
# we cannot recover the dataset.
new_datasets = {}
for key, dataset in self.datasets.items():
new_datasets[key] = self.preprocessor.transform(dataset)
self.datasets = new_datasets
@abc.abstractmethod
def training_loop(self) -> None:
"""Loop called by fit() to run training and report results to Tune.
Note: this method runs on a remote process.
``self.datasets`` have already been preprocessed by ``self.preprocessor``.
You can use the :ref:`Tune Function API functions <tune-function-docstring>`
(``session.report()`` and ``session.get_checkpoint()``) inside
this training loop.
Example:
.. code-block: python
from ray.train.trainer import BaseTrainer
class MyTrainer(BaseTrainer):
def training_loop(self):
for epoch_idx in range(5):
...
session.report({"epoch": epoch_idx})
"""
raise NotImplementedError
@PublicAPI(stability="alpha")
def fit(self) -> Result:
"""Runs training.
Returns:
A Result object containing the training result.
Raises:
TrainingFailedError: If any failures during the execution of
``self.as_trainable()``.
"""
from ray.tune.tuner import Tuner
trainable = self.as_trainable()
tuner = Tuner(trainable=trainable, run_config=self.run_config)
result_grid = tuner.fit()
assert len(result_grid) == 1
try:
result = result_grid[0]
if result.error:
raise result.error
except TuneError as e:
raise TrainingFailedError from e
return result
def as_trainable(self) -> Type[Trainable]:
"""Convert self to a ``tune.Trainable`` class."""
base_config = self._param_dict
trainer_cls = self.__class__
scaling_config = self.scaling_config
def train_func(config, checkpoint_dir=None):
# config already contains merged values.
# Instantiate new Trainer in Trainable.
trainer = trainer_cls(**config)
if checkpoint_dir:
trainer.resume_from_checkpoint = Checkpoint.from_directory(
checkpoint_dir
)
trainer.setup()
trainer.preprocess_datasets()
trainer.training_loop()
# Change the name of the training function to match the name of the Trainer
# class. This will mean the Tune trial name will match the name of Trainer on
# stdout messages and the results directory.
train_func.__name__ = trainer_cls.__name__
trainable_cls = wrap_function(train_func, warn=False)
class TrainTrainable(trainable_cls):
"""Add default resources to the Trainable."""
# Workaround for actor name not being logged correctly
# if __repr__ is not directly defined in a class.
def __repr__(self):
return super().__repr__()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Create a new config by merging the dicts.
# run_config is not a tunable hyperparameter so it does not need to be
# merged.
run_config = base_config.pop("run_config", None)
self._merged_config = merge_dicts(base_config, self.config)
self._merged_config["run_config"] = run_config
self._merged_config[
"scaling_config"
] = self._reconcile_scaling_config_with_trial_resources(
self._merged_config.get("scaling_config")
)
def _reconcile_scaling_config_with_trial_resources(
self, scaling_config: Union[ScalingConfigDataClass, Dict[str, Any]]
) -> Dict[str, Any]:
"""
ResourceChangingScheduler workaround.
Ensures that the scaling config matches trial resources.
Returns a dict so that `_validate_attributes` passes
(change when switching scaling_config to the dataclass).
This should be replaced with RCS returning a ScalingConfig
in the future.
"""
trial_resources = self.trial_resources
# This will be false if the resources are default
if not isinstance(trial_resources, PlacementGroupFactory):
return scaling_config
if scaling_config:
scaling_config = (
trainer_cls._validate_and_get_scaling_config_data_class(
scaling_config
)
)
scaling_config_from_trial_resources = (
ScalingConfigDataClass.from_placement_group_factory(trial_resources)
)
# This check should always pass if ResourceChangingScheduler is not
# used.
if scaling_config_from_trial_resources != scaling_config:
scaling_config = (
trainer_cls._validate_and_get_scaling_config_data_class(
scaling_config_from_trial_resources
)
)
return scaling_config.__dict__
def _trainable_func(self, config, reporter, checkpoint_dir):
# We ignore the config passed by Tune and instead use the merged
# config which includes the initial Trainer args.
super()._trainable_func(self._merged_config, reporter, checkpoint_dir)
@classmethod
def default_resource_request(cls, config):
updated_scaling_config = config.get("scaling_config", scaling_config)
scaling_config_dataclass = (
trainer_cls._validate_and_get_scaling_config_data_class(
updated_scaling_config
)
)
return scaling_config_dataclass.as_placement_group_factory()
return TrainTrainable
| [
"[email protected]"
]
| |
feb11aba73a832be17f8688f1d9779063737160f | 45d01a6c5fbf766ad4d996c044412dc2b268ef07 | /autoimpute/utils/patterns.py | c11d401512afe81a83b68c8ae768f1fa071447fb | [
"MIT"
]
| permissive | kearnz/autoimpute | 2cf88d8cf4a1ab6b8b6579c8dca2ecd38eb1aaf9 | 6ef82663464aad187fd341fcace8e97bd0222aaf | refs/heads/master | 2023-06-07T21:08:23.584459 | 2023-05-24T04:43:28 | 2023-05-24T04:43:28 | 168,429,609 | 245 | 27 | MIT | 2022-09-10T22:36:57 | 2019-01-30T23:09:47 | Python | UTF-8 | Python | false | false | 13,147 | py | """Methods to numerically assess patterns in missing data.
This module is a collection of methods to explore missing data and its
patterns. The module's methods are heavily influenced by those found in
section 4.1 of Flexible Imputation of Missing Data (Van Buuren). Their main
purpose is to identify trends and patterns in missing data that can help
inform what type of imputation method may apply or what cautions to take
when performing imputations in general.
"""
import numpy as np
import pandas as pd
from autoimpute.utils import check_data_structure, check_missingness
from autoimpute.utils.helpers import _sq_output, _index_output
@check_data_structure
def md_locations(data, both=False):
"""Produces locations where values are missing in a DataFrame.
Takes in a DataFrame and identifies locations where data is complete or
missing. Normally, fully complete issues warning, and fully incomplete
throws error, but this method simply shows missingness locations,
so the general standard for mixed complete-missing pattern not necessary.
Method marks 1 = missing, 0 = not missing.
Args:
data (pd.DataFrame): DataFrame to find missing & complete observations.
both (boolean, optional): return data along with missingness indicator.
Defaults to False, so just missingness indicator returned.
Returns:
pd.DataFrame: missingness indicator DataFrame OR
pd.DataFrame: missingness indicator DataFrame concatenated column-wise
with original DataFame.
Raises:
TypeError: if data is not a DataFrame. Error raised through decorator.
"""
md_df = pd.isnull(data)*1
if both:
md_df = pd.concat([data, md_df], axis=1)
return md_df
@check_data_structure
def md_pairs(data):
"""Calculates pairwise missing data statistics.
This method mimics the behavior of MICE md.pairs.
- rr: response-response pairs
- rm: response-missing pairs
- mr: missing-response pairs
- mm: missing-missing pairs
Returns a square matrix for each, where n = number of columns.
Args:
data (pd.DataFrame): DataFrame to calculate pairwise stats.
Returns:
dict: keys are pair types, values are DataFrames w/ pair stats.
Raises:
TypeError: if data is not a DataFrame. Error raised through decorator.
"""
int_ln = lambda arr: np.logical_not(arr)*1
r = int_ln(pd.isnull(data.values))
rr = np.matmul(r.T, r)
mm = np.matmul(int_ln(r).T, int_ln(r))
mr = np.matmul(int_ln(r).T, r)
rm = np.matmul(r.T, int_ln(r))
pairs = dict(rr=rr, rm=rm, mr=mr, mm=mm)
pairs = {k: _sq_output(v, data.columns, True)
for k, v in pairs.items()}
return pairs
@check_data_structure
def md_pattern(data):
"""Calculates row-wise missing data statistics in input data.
Method is a port of md.pattern method from VB 4.1. The number of rows
indicates the number of different row patterns of missingness. The 'nmis'
column is the number of missing values in a given row pattern. The
'count' is number of total rows with a given row pattern.
In this method, 0 = missing, 1 = missing.
Args:
data (pd.DataFrame): DataFrame to calculate missing data pattern.
Returns:
pd.DataFrame: DataFrame with missing data pattern and two
additional columns w/ row-wise stats: `count` and `nmis`.
"""
cols = data.columns.tolist()
r = pd.isnull(data.values)
nmis = np.sum(r, axis=0)
r = r[:, np.argsort(nmis)]
num_string = lambda row: "".join(str(e) for e in row)
pat = np.apply_along_axis(num_string, 1, r*1)
sort_r = r[np.argsort(pat), :]*1
sort_r_df = _sq_output(sort_r, cols, False)
sort_r_df = sort_r_df.groupby(cols).size().reset_index()
sort_r_df.columns = cols + ["count"]
sort_r_df["nmis"] = sort_r_df[cols].sum(axis=1)
sort_r_df[cols] = sort_r_df[cols].apply(np.logical_not)*1
return sort_r_df[["count"] + cols + ["nmis"]]
@check_missingness
def nullility_cov(data):
"""Calculates the nullility covariance between features in a DataFrame.
Leverages pandas method to calculate covariance of nullility. Note that
this method drops NA values to compute covariance. It also employs
`check_missingness` decorator to ensure DataFrame not fully missing. If
a DataFrame is fully observed, nothing is returned, as there is no
nullility.
Args:
data (pd.DataFrame): DataFrame to calculate nullility covariance.
Returns:
pd.DataFrame: DataFrame with nullility covariance b/w each feature.
Raises:
TypeError: If data not pd.DataFrame. Raised through decorator.
ValueError: If DataFrame values all missing and none complete.
Also raised through decorator.
"""
data_cov = data.isnull().cov()
return data_cov.dropna(axis=0, how="all").dropna(axis=1, how="all")
@check_missingness
def nullility_corr(data, method="pearson"):
"""Calculates the nullility correlation between features in a DataFrame.
Leverages pandas method to calculate correlation of nullility. Note that
this method drops NA values to compute correlation. It also employs
`check_missingness` decorator to ensure DataFrame not fully missing. If
a DataFrame is fully observed, nothing is returned, as there is no
nullility.
Args:
data (pd.DataFrame): DataFrame to calculate nullility correlation.
method (string, optional): correlation method to use. Default pearson,
but spearman should be used with categorical or ordinal encoding.
Returns:
pd.DataFrame: DataFrame with nullility correlation b/w each feature.
Raises:
TypeError: If data not pd.DataFrame. Raised through decorator.
ValueError: If DataFrame values all missing and none complete.
Also raised through decorator.
ValueError: If method for correlation not an accepted method.
"""
accepted_methods = ("pearson", "kendall", "spearman")
if method not in accepted_methods:
err = f"Correlation method must be in {accepted_methods}"
raise ValueError(err)
data_corr = data.isnull().corr(method=method)
return data_corr.dropna(axis=0, how="all").dropna(axis=1, how="all")
def _inbound(pairs):
"""Private method to get inbound from pairs."""
return pairs["mr"]/(pairs["mr"]+pairs["mm"])
def _outbound(pairs):
"""Private method to get outbound from pairs."""
return pairs["rm"]/(pairs["rm"]+pairs["rr"])
def _influx(pairs):
"""Private method to get influx from pairs."""
num = np.nansum(pairs["mr"], axis=1)
denom = np.nansum(pairs["mr"]+pairs["rr"], axis=1)
return num/denom
def _outflux(pairs):
"""Private method to get outflux from pairs."""
num = np.nansum(pairs["rm"], axis=1)
denom = np.nansum(pairs["rm"]+pairs["mm"], axis=1)
return num/denom
def get_stat_for(func, data):
"""Generic method to get a missing data statistic from data.
This method can be used directly with helper methods, but this behavior
is discouraged. Instead, use specific public methods below. These special
methods utilize this function internally to compute summary statistics.
Args:
func (function): Function that calculates a statistic.
data (pd.DataFrame): DataFrame on which to run the function.
Returns:
np.ndarray: Output from statistic chosen.
"""
pairs = md_pairs(data)
with np.errstate(divide="ignore", invalid="ignore"):
stat = func(pairs)
return stat
def inbound(data):
"""Calculates proportion of usable cases (Ijk) from Van Buuren 4.1.
Method ported from VB, called "inbound statistic", Ijk.
Ijk = 1 if variable Yk observed in all records where Yj missing.
Used to quickly select potential predictors Yk for imputing Yj.
High values are preferred.
Args:
data (pd.DataFrame): DataFrame to calculate inbound statistic.
Returns:
pd.DataFrame: inbound statistic between each of the features.
Inbound between a feature and itself is 0.
"""
inbound_coeff = get_stat_for(_inbound, data)
inbound_ = _sq_output(inbound_coeff, data.columns, True)
return inbound_
def outbound(data):
"""Calculates the outbound statistic (Ojk) from Van Buuren 4.1.
Method ported from VB, called "outbound statistic", Ojk.
Ojk measures how observed data Yj connect to rest of missing data.
Ojk = 1 if Yj observed in all records where Yk is missing.
Used to evaluate whether Yj is a potential predictor for imputing Yk.
High values are preferred.
Args:
data (pd.DataFrame): DataFrame to calculate outbound statistic.
Returns:
pd.DataFrame: outbound statistic between each of the features.
Outbound between a feature and itself is 0.
"""
outbound_coeff = get_stat_for(_outbound, data)
outbound_ = _sq_output(outbound_coeff, data.columns, True)
return outbound_
def influx(data):
"""Calculates the influx coefficient (Ij) from Van Buuren 4.1.
Method ported from VB, called "influx coefficient", Ij.
Ij = # pairs (Yj,Yk) w/ Yj missing & Yk observed / # observed data cells.
Value depends on the proportion of missing data of the variable.
Influx of a completely observed variable is equal to 0.
Influx for completely missing variables is equal to 1.
For two variables with the same proportion of missing data:
- Variable with higher influx is better connected to the observed data.
- Variable with higher influx might thus be easier to impute.
Args:
data (pd.DataFrame): DataFrame to calculate influx coefficient.
Returns:
pd.DataFrame: influx coefficient for each column.
"""
influx_coeff = get_stat_for(_influx, data)
influx_coeff = influx_coeff.reshape(1, len(influx_coeff))
influx_ = _sq_output(influx_coeff, data.columns, False)
influx_.index = ["Influx"]
return influx_
def outflux(data):
"""Calculates the outflux coefficient (Oj) from Van Buuren 4.1.
Method ported from VB, called "outflux coefficient", Oj.
Oj = # pairs w/ Yj observed and Yk missing / # incomplete data cells.
Value depends on the proportion of missing data of the variable.
Outflux of a completely observed variable is equal to 1.
Outflux of a completely missing variable is equal to 0.
For two variables having the same proportion of missing data:
- Variable with higher outflux is better connected to the missing data.
- Variable with higher outflux more useful for imputing other variables.
Args:
data (pd.DataFrame): DataFrame to calculate outflux coefficient.
Returns:
pd.DataFrame: outflux coefficient for each column.
"""
outflux_coeff = get_stat_for(_outflux, data)
outflux_coeff = outflux_coeff.reshape(1, len(outflux_coeff))
outflux_ = _sq_output(outflux_coeff, data.columns, False)
outflux_.index = ["Outflux"]
return outflux_
@check_data_structure
def proportions(data):
"""Calculates the proportions of the data missing and data observed.
Method calculates two arrays:
- `poms`: Proportion of missing data.
- `pobs`: Proportion of observed data.
Args:
data (pd.DataFrame): DataFrame to calculate proportions.
Returns:
pd.DataFrame: two columns, one for `poms` and one for `pobs`.
The sum of each row should equal 1. Index = original data cols.
Raises:
TypeError: if data not DataFrame. Error raised through decorator.
"""
poms = np.mean(pd.isnull(data), axis=0)
pobs = np.mean(np.logical_not(pd.isnull(data)), axis=0)
proportions_dict = dict(poms=poms, pobs=pobs)
proportions_ = _index_output(proportions_dict, data.columns)
return proportions_
def flux(data):
"""Caclulates inbound, influx, outbound, outflux, pobs, for DataFrame.
Port of Van Buuren's flux method in R. Calculates:
- `pobs`: Proportion observed (column from the `proportions` method).
- `ainb`: Average inbound statistic.
- aout: Average outbound statistic.
- influx: Influx coefficient, Ij (from the `influx` method).
- outflux: Outflux coefficient, Oj (from the `outflux` method).
Args:
data (pd.DataFrame): DataFrame to calculate relevant statistics.
Returns:
pd.DataFrame: one column for each summary statistic.
Columns of DataFrame equal the name of the summary statistics.
Indices of DataFrame equal the original DataFrame columns.
"""
row_mean = lambda row: np.nansum(row)/(len(row) - 1)
pairs = md_pairs(data)
with np.errstate(divide="ignore", invalid="ignore"):
pobs = proportions(data)["pobs"]
ainb = np.apply_along_axis(row_mean, 1, _inbound(pairs))
aout = np.apply_along_axis(row_mean, 1, _outbound(pairs))
inf = _influx(pairs)
outf = _outflux(pairs)
res = dict(pobs=pobs, influx=inf, outflux=outf, ainb=ainb, aout=aout)
flux_ = _index_output(res, data.columns)
return flux_
| [
"[email protected]"
]
| |
fc7b97550c2640523513a8ada604f94e0c97df40 | ac5d55e43eb2f1fb8c47d5d2a68336eda181d222 | /Greedy/392. Is Subsequence.py | 439737d72f004b8f25d690b2355b677978a02aa8 | []
| no_license | tinkle1129/Leetcode_Solution | 7a68b86faa37a3a8019626e947d86582549374b3 | 1520e1e9bb0c428797a3e5234e5b328110472c20 | refs/heads/master | 2021-01-11T22:06:45.260616 | 2018-05-28T03:10:50 | 2018-05-28T03:10:50 | 78,925,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,303 | py | # - * - coding:utf8 - * - -
###########################################
# Author: Tinkle
# E-mail: [email protected]
# Name: Is Subsequence.py
# Creation Time: 2017/9/25
###########################################
'''
Given a string s and a string t, check if s is subsequence of t.
You may assume that there is only lower case English letters in both s and t. t is potentially a very long (length ~= 500,000) string, and s is a short string (<=100).
A subsequence of a string is a new string which is formed from the original string by deleting some (can be none) of the characters without disturbing the relative positions of the remaining characters. (ie, "ace" is a subsequence of "abcde" while "aec" is not).
Example 1:
s = "abc", t = "ahbgdc"
Return true.
Example 2:
s = "axc", t = "ahbgdc"
Return false.
'''
class Solution(object):
def isSubsequence(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
pos_s = 0
pos_t = 0
while(pos_s<len(s) and pos_t<len(t)):
if s[pos_s]==t[pos_t]:
pos_t+=1
pos_s+=1
else:
pos_t+=1
return pos_s==len(s)
S = Solution()
print S.isSubsequence('abc','ahbgdc')
print S.isSubsequence('axc','ahbgdc') | [
"[email protected]"
]
| |
037b9190c649cf36abbd016974d528e2fec1ac1a | b1c17f43cb08740f519b0bd32bb93d9095135fc7 | /sawyer/mujoco/tasks/__init__.py | 15848b59633bc62899fafbfacf1cf4f8af3bbc6a | [
"MIT"
]
| permissive | rlworkgroup/gym-sawyer | 05f2a28a8c3e1a3031c6539db0f6b503e771d07b | 90d706cb0594c27045162bc9a00d56389f17615f | refs/heads/master | 2020-04-01T19:12:37.672577 | 2019-12-11T19:56:43 | 2019-12-11T19:56:43 | 153,541,945 | 37 | 10 | MIT | 2019-12-11T19:56:45 | 2018-10-18T00:45:57 | Python | UTF-8 | Python | false | false | 334 | py | from sawyer.mujoco.tasks.reacher_tasks import ReachTask
from sawyer.mujoco.tasks.pick_and_place_tasks import PickTask, PlaceTask
from sawyer.mujoco.tasks.toy_tasks import (InsertTask, RemoveTask, OpenTask,
CloseTask)
__all__ = [
"ReachTask", "PickTask", "PlaceTask", "InsertTask", "RemoveTask",
"OpenTask", "CloseTask"
]
| [
"[email protected]"
]
| |
75479224841b2e9b2a1fd91e22ae53f2418b53f7 | ded17d7d269daf933a04bb1e3a6e181c6f51598a | /version_1/data_processing/data_util.py | fa9307c65896c89f4ba31f4d5bf773f1fdee2e28 | []
| no_license | JDwangmo/weiboStanceDetection | 296d6bb4a1eae0546d5fce1ee1eb4ac071e6240d | 3a5196d31c4748334330e16a819e85041a04e17d | refs/heads/master | 2020-04-12T07:35:32.321179 | 2017-01-02T06:52:14 | 2017-01-02T06:52:14 | 58,032,790 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 19,646 | py | #encoding=utf8
__author__ = 'jdwang'
__date__ = 'create date: 2016-06-24'
__email__ = '[email protected]'
import numpy as np
import pandas as pd
import logging
import timeit
from sklearn.feature_extraction.text import CountVectorizer
from data_processing_util.jiebanlp.jieba_util import Jieba_Util
class DataUtil(object):
'''
微博立场分析数据处理工具类,包含以下函数:
1. load_data:加载csv格式的数据
2. save_data:保存csv格式的数据
3. print_data_detail: 打印数据详情
4. processing_na_value:处理空值数据
5. segment_sentence:分词
6. split_train_test:切分训练集和测试集
7.
'''
def __init__(self):
# 初始化jieba工具
self.jieba_util = Jieba_Util()
def load_data(self,path,header=True):
'''
读取数据
:param path: 数据文件的路径
:return:
'''
if header:
data = pd.read_csv(path,
sep='\t',
header=0,
encoding='utf8',
)
else:
data = pd.read_csv(path,
sep='\t',
header=None,
encoding='utf8',
)
return data
def load_train_test_data(self,
config = None
):
'''
加载训练数据和测试数据,已经标签索引
:param config: 一些配置信息
:param config: dict
:return:
'''
# -------------- region start : 1. 加载训练集和测试集 -------------
if config['verbose'] > 2:
logging.debug('-' * 20)
print '-' * 20
logging.debug('1. 加载训练集和测试集')
print '1. 加载训练集和测试集'
# -------------- code start : 开始 -------------
train_data_file_path = (config['train_data_file_path']) % config['train_data_type']
test_data_file_path = (config['test_data_file_path']) % config['test_data_type']
logging.debug(train_data_file_path)
print train_data_file_path
logging.debug(test_data_file_path)
print test_data_file_path
data_util = DataUtil()
train_data = data_util.load_data(train_data_file_path)
test_data = data_util.load_data(test_data_file_path)
# -------------- code start : 结束 -------------
if config['verbose'] > 2:
logging.debug('-' * 20)
print '-' * 20
# -------------- region end : 1. 加载训练集和测试集 ---------------
# 生成类别索引
label_to_index = {u'FAVOR': 0, u'AGAINST': 1, u'NONE': 2}
index_to_label = [u'FAVOR', u'AGAINST', u'NONE']
return train_data,test_data,label_to_index,index_to_label
def save_data(self,data,path):
'''
保存数据
:param path: 数据文件的路径
:return:
'''
data.to_csv(path,
sep='\t',
header=True,
index=False,
encoding='utf8',
)
def print_data_detail(self, data, has_stance=True):
'''
展示数据的详细信息
:param data: Dateframe对象
:param has_stance: 是否有STANCE字段
:return: 无
'''
logging.debug('data的个数为:%d' % (len(data)))
logging.debug('data的sample数据:')
logging.debug(data.head())
logging.debug('data的target和个数分别为:')
logging.debug(data['TARGET'].value_counts())
if has_stance:
logging.debug('统计每个Target下各个类型立场的数量...')
group = data.groupby(by=['TARGET', 'STANCE'])
logging.debug(group.count())
else:
logging.debug('没有STANCE字段')
logging.debug('数据各个字段情况...')
# print data.info()
for column in data.columns:
# 统计每个字段是否有数据是空串
# 先将所有空字符串用nan替换
data[column] = data[column].replace(r'^\s*$', np.nan, regex=True)
count_null = sum(data[column].isnull())
if count_null != 0:
logging.warn(u'%s字段有空值,个数:%d,建议使用processing_na_value()方法进一步处理!' % (column, count_null))
null_data_path = './result/null_data.csv'
logging.warn(u'将缺失值数据输出到文件:%s' % (null_data_path))
data[data[column].isnull()].to_csv(null_data_path,
index=None,
encoding='utf8',
sep='\t')
def processing_na_value(self,data,clear_na=True,fill_na = False,fill_char = 'NULL',columns=None):
'''
处理数据的空值
:param data: Dateframe对象
:param clear_na: bool,是否去掉空值数据
:param fill_na: bool,是否填充空值
:param fill_char: str,填充空置的字符
:param column: list,需要处理的字段,默认为None时,对所有字段处理
:return: Dateframe对象
'''
logging.debug('[def processing_na_value()] 对缺失值进行处理....')
for column in data.columns:
if columns == None or column in columns:
data[column] = data[column].replace(r'^\s*$', np.nan, regex=True)
count_null = sum(data[column].isnull())
if count_null != 0:
logging.warn(u'%s字段有空值,个数:%d' % (column, count_null))
if clear_na:
logging.warn(u'对数据的%s字段空值进行摘除'%(column))
data = data[data[column].notnull()].copy()
else:
if fill_na:
logging.warn(u'对数据的%s字段空值进行填充,填充字符为:%s'%(column,fill_char))
data[column] = data[column].fillna(value=fill_char)
return data
def segment_sentence(self,sentence):
segmented_sentence = self.jieba_util.seg(sentence=sentence,
sep=' ',
full_mode=True,
remove_stopword=True,
replace_number=True,
lowercase = True,
zhs2zht=True,
remove_url=True,
)
return segmented_sentence
def split_train_test(self,data, train_split=0.7):
'''
将数据切分成训练集和验证集
:param data:
:param train_split: float,取值范围[0,1],设置训练集的比例
:return: dev_data,test_data
'''
logging.debug('对数据随机切分成train和test数据集,比例为:%f' % (train_split))
num_train = len(data)
num_dev = int(num_train * train_split)
num_test = num_train - num_dev
logging.debug('全部数据、训练数据和测试数据的个数分别为:%d,%d,%d' % (num_train, num_dev, num_test))
rand_list = np.random.RandomState(0).permutation(num_train)
# print rand_list
# print rand_list[:num_dev]
# print rand_list[num_dev:]
dev_data = data.iloc[rand_list[:num_dev]].sort_index()
test_data = data.iloc[rand_list[num_dev:]].sort_index()
# print dev_data
# print test_data
return dev_data, test_data
def count_word_freq(self,data):
'''
统计每个词 在各个类别中的次数,每个词有四个统计项:
1. FAVOR: 在favor类别中的出现的次数
2. AGAINST:在AGAINST类别中的出现的次数
3. NONE : 在NONE类别中的出现的次数
4. FREQ : 在所有类别中的出现的次数,即FAVOR+AGAINST+NONE
5. SUPPORT: 最高词频词频项/(FREQ)
:param data:
:return:
'''
from data_processing_util.feature_encoder.onehot_feature_encoder import FeatureEncoder
feature_encoder = FeatureEncoder(train_data=data['WORDS'].as_matrix(),
verbose=0,
padding_mode='none',
need_segmented=False,
full_mode=True,
remove_stopword=True,
replace_number=True,
lowercase=True,
remove_url=True,
sentence_padding_length=7,
add_unkown_word=False,
mask_zero=False,
zhs2zht=True,
)
# print feature_encoder.train_padding_index
train_X_features = feature_encoder.to_onehot_array()
np.save('result/train_X_feature',train_X_features)
print train_X_features.shape
print train_X_features[:5]
vocabulary = feature_encoder.vocabulary
print ','.join(vocabulary)
print feature_encoder.vocabulary_size
np.save('result/vocabulary',vocabulary)
freq = np.sum(train_X_features,axis=0)
favor_freq = np.sum(train_X_features[data['STANCE'].as_matrix()==u'FAVOR'],axis=0)
against_freq = np.sum(train_X_features[data['STANCE'].as_matrix()==u'AGAINST'],axis=0)
none_freq = np.sum(train_X_features[data['STANCE'].as_matrix()==u'NONE'],axis=0)
support = np.nan_to_num([max(favor,against,none)/(1.0*(favor+against+none)) for favor,against,none in zip(favor_freq,against_freq,none_freq)])
print freq
print favor_freq
print against_freq
print none_freq
count_data = pd.DataFrame(data={
u'WORD':vocabulary,
u'FAVOR':favor_freq,
u'AGAINST':against_freq,
u'NONE':none_freq,
u'SUPPORT':support,
u'FREQ':freq,
})
count_data = count_data.sort_values(by=[u'SUPPORT',u'FREQ','WORD'],ascending=False)
count_data = count_data[[u'WORD',u'FAVOR',u'AGAINST',u'NONE',u'FREQ',u'SUPPORT']]
count_data.to_csv('result/word_count.csv',
sep='\t',
index=False,
header=True,
encoding='utf8',
)
print count_data.head()
def preprocess_dataAA():
'''
数据 evasampledata4-TaskAA.txt 预处理主流程
:return:
'''
# 读取数据
train_dataA_file_path = '/home/jdwang/PycharmProjects/weiboStanceDetection/train_data/evasampledata4-TaskAA.txt'
data_util = DataUtil()
data = data_util.load_data(train_dataA_file_path)
data_util.print_data_detail(data, has_stance=True)
# -------------- region start : 1. 处理空值数据 -------------
logging.debug('-' * 20)
print '-' * 20
logging.debug('1. 处理空值数据')
print '1. 处理空值数据'
print '原始数据有%d条句子'%(len(data))
# 将TEXT字段有空数值的数据去除,并保存
data = data_util.processing_na_value(data, clear_na=True,columns=[u'TEXT'])
print '去除TEXT字段有空数值的数据之后,剩下%d条句子'%(len(data))
logging.debug('-' * 20)
print '-' * 20
# -------------- region end : 1. 处理空值数据 ---------------
# 分词
data['WORDS'] = data['TEXT'].apply(data_util.segment_sentence)
# 保存数据
data_util.save_data(data, 'result/TaskAA_all_data_3000.csv')
# 将其他字段有空数值的数据去除
data = data_util.processing_na_value(data, clear_na=True)
output_file_path = 'result/TaskAA_all_data_%d.csv' % len(data)
print '去除其他字段有空数值的数据之后,剩下%d条句子,输出到:%s'%(len(data),output_file_path)
# 保存数据
data_util.save_data(data, output_file_path)
# 统计句子的长度,按词(分完词)统计
data['LENGTH'] = data['WORDS'].apply(lambda x: len(x.split()))
# 句子长度情况
print data['LENGTH'].value_counts().sort_index()
print data.head()
# print data['WORDS'][:5]
# 将数据随机切割成训练集和测试集
train_data, test_data = data_util.split_train_test(data, train_split=0.7)
print train_data.shape
print test_data.shape
data_util.print_data_detail(test_data)
# print train_data['TARGET'].value_counts()
# print test_data['TARGET'].value_counts()
# print data['TARGET'].value_counts()
# 保存数据
data_util.save_data(data, 'result/TaskAA_all_data_2986.csv')
data_util.save_data(train_data, 'result/TaskAA_train_data_2090.csv')
data_util.save_data(test_data, 'result/TaskAA_test_data_896.csv')
def preprocess_dataAR():
'''
数据 evasampledata4-TaskAR.txt 预处理主流程
:return:
'''
train_dataA_file_path = '/home/jdwang/PycharmProjects/weiboStanceDetection/train_data/evasampledata4-TaskAR.txt'
data_util = DataUtil()
data = data_util.load_data(train_dataA_file_path)
data_util.print_data_detail(data, has_stance=False)
print data.shape
data_util.save_data(data, 'result/TaskAR_all_data_2997.csv')
# 将有空数值的数据去除
data = data_util.processing_na_value(data, clear_na=True)
logging.debug('去除空值数据后剩下%d条数据。'%len(data))
print '去除空值数据后剩下%d条数据。'%len(data)
# 分词
data['WORDS'] = data['TEXT'].apply(data_util.segment_sentence)
# print data.head()
# 检验分完词之后是否出现空值
count_null_data = sum(data['WORDS'].isnull())
logging.debug('WORDS出现空值数:%d'%count_null_data)
print 'WORDS出现空值数:%d'%count_null_data
data = data_util.processing_na_value(data, clear_na=True,columns=['WORDS'])
logging.debug('去除WORDS空值数据后剩下%d条数据。' % len(data))
print '去除WORDS空值数据后剩下%d条数据。' % len(data)
data_util.save_data(data, 'result/TaskAR_all_data_2997.csv')
def preprocess_testA():
'''
数据 NLPCC2016_Stance_Detection_Task_A_Testdata.txt 预处理主流程
:return:
'''
train_dataA_file_path = '/home/jdwang/PycharmProjects/weiboStanceDetection/train_data/NLPCC2016_Stance_Detection_Task_A_Testdata.txt'
# train_dataA_file_path = '/home/jdwang/PycharmProjects/weiboStanceDetection/train_data/TaskAA_testdata_Mhalf_896.csv'
# train_dataA_file_path = '/home/jdwang/PycharmProjects/weiboStanceDetection/train_data/TaskA_all_testdata_15000.csv'
# output_file_path = 'result/TaskA_all_testdata_15000.csv'
output_file_path = '/home/jdwang/PycharmProjects/weiboStanceDetection/train_data/TaskAA_all_data_3000_Mhalf.csv'
# output_file_path = 'result/TaskAA_testdata_Mhalf_896.csv'
data_util = DataUtil()
data = data_util.load_data(train_dataA_file_path)
data['ID'] = data['ID'].astype(dtype=int)
data_util.print_data_detail(data, has_stance=False)
print data.shape
print data.head()
# quit()
# 将有空数值的数据去除
# data = data_util.processing_na_value(data,
# clear_na=True,
# columns=[u'TEXT'],
# )
logging.debug('去除TEXT字段空值数据后剩下%d条数据。'%len(data))
print '去除空值数据后剩下%d条数据。'%len(data)
# print '有%d个句子是已经标注'%sum(data[u'PREDICT'].notnull())
# -------------- region start : 对句子开始分词 -------------
logging.debug('-' * 20)
print '-' * 20
logging.debug('对句子开始分词')
print '对句子开始分词'
# 分词
data['WORDS'] = data['TEXT'].apply(data_util.segment_sentence)
print data.head()
# 检验分完词之后是否出现空值
count_null_data = sum(data['WORDS'].isnull())
logging.debug('WORDS出现空值数:%d' % count_null_data)
print 'WORDS出现空值数:%d' % count_null_data
# data = data_util.processing_na_value(data, clear_na=True, columns=['WORDS'])
# logging.debug('去除WORDS空值数据后剩下%d条数据。' % len(data))
print '去除WORDS空值数据后剩下%d条数据。' % len(data)
logging.debug('-' * 20)
print '-' * 20
# -------------- region end : 对句子开始分词 ---------------
data_util.save_data(data, output_file_path)
def padding_dataAA():
'''
文件 train_data/TaskAA_all_data_3000.csv 补全漏标标签 处理过程
:return:
'''
train_dataA_file_path = '/home/jdwang/PycharmProjects/weiboStanceDetection/train_data/evasampledata4-TaskAA.txt'
# train_dataA_file_path = '/home/jdwang/PycharmProjects/weiboStanceDetection/train_data/TaskAA_testdata_Mhalf_896.csv'
# train_dataA_file_path = '/home/jdwang/PycharmProjects/weiboStanceDetection/train_data/TaskA_all_testdata_15000.csv'
# output_file_path = 'result/TaskA_all_testdata_15000.csv'
output_file_path = '/home/jdwang/PycharmProjects/weiboStanceDetection/train_data/TaskAA_all_data_3000.csv'
# output_file_path = 'result/TaskAA_testdata_Mhalf_896.csv'
data_util = DataUtil()
data = data_util.load_data(train_dataA_file_path)
# data['ID'] = data['ID'].astype(dtype=int)
# data[data['STANCE'].isnull()]['STANCE'] =u'NONE'
# print data[data['STANCE'].isnull()]['STANCE']
data_util.print_data_detail(data, has_stance=True)
# print data.loc[3001]
data.loc[data['STANCE'].isnull(),'STANCE'] = 'NONE'
data_util.print_data_detail(data, has_stance=True)
print data.shape
# print data.head()
# quit()
# print data['STANCE'][data['STANCE'].isnull()]
# quit()
data['WORDS'] = data['TEXT'].apply(data_util.segment_sentence)
# 将有空数值的数据去除
# data = data_util.processing_na_value(data,
# clear_na=True,
# columns=[u'TEXT'],
# )
# logging.debug('去除TEXT字段空值数据后剩下%d条数据。' % len(data))
# print '去除空值数据后剩下%d条数据。' % len(data)
# print '有%d个句子是已经标注'%sum(data[u'PREDICT'].notnull())
data_util.save_data(data, output_file_path)
if __name__ == '__main__':
# 数据 evasampledata4-TaskAA.txt 预处理主流程
# preprocess_dataAA()
# 数据 evasampledata4-TaskAR.txt 预处理主流程
# preprocess_dataAR()
# NLPCC2016_Stance_Detection_Task_A_Testdata.txt预处理主流程
preprocess_testA()
# 文件train_data / TaskAA_all_data_3000.csv 补全漏标标签处理过程
# padding_dataAA()
| [
"[email protected]"
]
| |
9530f6b796f2fe2f1a57edbf76ec8b76d220b4fa | ab68d9fd15daf0460e92a471a417b188d4594b8f | /key.py | 729f60405a7dfe20726759a3e31bcbf83bfe09f0 | []
| no_license | baifengbai/Console | 63a911c850eb3c6c64e8381a14ae34e18fc4a95e | d40cb568d9dd1268379616e5d351073e303abfaa | refs/heads/master | 2020-03-20T15:55:40.782551 | 2018-06-15T18:43:09 | 2018-06-15T18:43:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,317 | py | # adopted from TerminalView
_KEY_MAP = {
"enter": "\r",
"backspace": "\x7f",
"tab": "\t",
"space": " ",
"escape": "\x1b",
"down": "\x1b[B",
"up": "\x1b[A",
"right": "\x1b[C",
"left": "\x1b[D",
"home": "\x1b[1~",
"end": "\x1b[4~",
"pageup": "\x1b[5~",
"pagedown": "\x1b[6~",
"delete": "\x1b[3~",
"insert": "\x1b[2~",
"f1": "\x1bOP",
"f2": "\x1bOQ",
"f3": "\x1bOR",
"f4": "\x1bOS",
"f5": "\x1b[15~",
"f6": "\x1b[17~",
"f7": "\x1b[18~",
"f8": "\x1b[19~",
"f9": "\x1b[20~",
"f10": "\x1b[21~",
"f12": "\x1b[24~",
"bracketed_paste_mode_start": "\x1b[200~",
"bracketed_paste_mode_end": "\x1b[201~",
}
# _APP_KEY_MAP = {
# "down": "\x1bOB",
# "up": "\x1bOA",
# "right": "\x1bOC",
# "left": "\x1bOD",
# }
_CTRL_KEY_MAP = {
"up": "\x1b[1;5A",
"down": "\x1b[1;5B",
"right": "\x1b[1;5C",
"left": "\x1b[1;5D",
"@": "\x00",
"`": "\x00",
"[": "\x1b",
"{": "\x1b",
"\\": "\x1c",
"|": "\x1c",
"]": "\x1d",
"}": "\x1d",
"^": "\x1e",
"~": "\x1e",
"_": "\x1f",
"?": "\x7f",
}
_ALT_KEY_MAP = {
"up": "\x1b[1;3A",
"down": "\x1b[1;3B",
"right": "\x1b[1;3C",
"left": "\x1b[1;3D",
}
def _get_ctrl_combination_key_code(key):
key = key.lower()
if key in _CTRL_KEY_MAP:
return _CTRL_KEY_MAP[key]
elif len(key) == 1:
c = ord(key)
if (c >= 97) and (c <= 122):
c = c - ord('a') + 1
return chr(c)
return _get_key_code(key)
return _get_key_code(key)
def _get_alt_combination_key_code(key):
key = key.lower()
if key in _ALT_KEY_MAP:
return _ALT_KEY_MAP[key]
code = _get_key_code(key)
return "\x1b" + code
# def _get_app_key_code(key):
# if key in _APP_KEY_MAP:
# return _APP_KEY_MAP[key]
# return _get_key_code(key)
def _get_key_code(key):
if key in _KEY_MAP:
return _KEY_MAP[key]
return key
def get_key_code(key, ctrl=False, alt=False, shift=False):
"""
Send keypress to the shell
"""
if ctrl:
keycode = _get_ctrl_combination_key_code(key)
elif alt:
keycode = _get_alt_combination_key_code(key)
else:
keycode = _get_key_code(key)
return keycode
| [
"[email protected]"
]
| |
6128f590ded0ddb1e220721dbb0a679f211cf24d | 54fcfec16e6c8b4bcd8830e9cfec6b354c8ba19f | /filer/walker.py | f6f26f3751dc5df243fa0d71bdb027445793d305 | []
| no_license | rboulton/filer | 752efe318b5cb5784c7ecd8690cf9648659274fa | 37732344ce592a4ddd08f93a65c4e3ddc441e392 | refs/heads/master | 2021-01-14T06:00:59.698775 | 2021-01-03T03:15:22 | 2021-01-03T03:15:22 | 242,620,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,650 | py | import hashlib
import heapq
import os
import re
import stat
import subprocess
import time
import pyinotify
import asyncio
from . import db
REGULAR_FILE = 1
SYMLINK = 2
class Walker:
def __init__(self, config):
self.config = config
self.exclude_patterns = [
re.compile(pattern) for pattern in config.exclude_patterns
]
self.swapfiles = self.find_swapfiles()
self.db_conn = db.connect(self.config, read_only=False)
db.init_schema(self.db_conn)
self.batch_size = 1000
self.batch_timeout = 5
self.watch_manager = pyinotify.WatchManager()
self.watch_mask = pyinotify.ALL_EVENTS
self.watch_mask = (
pyinotify.IN_ATTRIB
| pyinotify.IN_ATTRIB
| pyinotify.IN_CREATE
| pyinotify.IN_DELETE
| pyinotify.IN_DELETE_SELF
| pyinotify.IN_MODIFY
| pyinotify.IN_MOVE_SELF
| pyinotify.IN_MOVED_FROM
| pyinotify.IN_MOVED_TO
| pyinotify.IN_DONT_FOLLOW
| pyinotify.IN_EXCL_UNLINK
)
self.loop = asyncio.get_event_loop()
def log(self, message):
print(message)
def find_swapfiles(self):
result = subprocess.run(
["/sbin/swapon", "--show=NAME", "--noheadings"], stdout=subprocess.PIPE
)
if result.returncode != 0:
return []
return result.stdout.decode("utf8").strip().split("\n")
def calc_hash(self, path):
self.log("Calculating hash of {}".format(path))
filesize = 0
try:
h = hashlib.sha256()
with open(path, "rb") as fobj:
while True:
d = fobj.read(1024 * 128)
if len(d) == 0:
break
h.update(d)
filesize += len(d)
return h.hexdigest(), filesize
except PermissionError as e:
self.log("PermissionError calculating hash for {} - skipping".format(path))
return None, None
def visit_files(self, batch):
revisits_queued = False
stored_data = {
path: (stored_hash, stored_mtime)
for stored_hash, path, stored_mtime in db.get_current_file_data(
self.db_conn, [path for path, _ in batch]
)
}
deletes = set()
for path, mtime in batch:
if mtime is None:
deletes.add(path)
continue
now = time.time()
old_hash = None
stored = stored_data.get(path)
if stored:
if stored[1] == mtime:
# No change since last visit
db.record_visit(self.db_conn, path)
continue
self.log(
"stored timestamp for {} different from new timestamp: {} {}".format(
path, repr(stored[1]), mtime
)
)
old_hash = stored[0]
settled_time = mtime + self.config.settle_time
if now < settled_time:
# Changed more recently than settle_time
self.log(
"file {} changed recently - will revisit after {}s".format(
path, settled_time - time.time()
)
)
db.record_visit(self.db_conn, path, settled_time)
revisits_queued = True
continue
# Check mtime again before we spend time calculating the hash
try:
new_mtime = int(os.path.getmtime(path))
except FileNotFoundError:
deletes.add(path)
continue
if new_mtime != mtime:
# Changed since we logged this as something to be visited - revisit again later.
self.log(
"file {} changed since we last looked at it - will revisit after {}s".format(
path, new_mtime + self.config.settle_time - time.time()
)
)
db.record_visit(self.db_conn, path, new_mtime + self.config.settle_time)
revisits_queued = True
continue
new_hash, filesize = self.calc_hash(path)
if new_hash is None:
# Couldn't hash it - drop this file
self.log("file {} couldn't be hashed - treat as absent".format(path))
db.record_visit(self.db_conn, path, deleted=True)
db.update_deleted_file_data(self.db_conn, path, time.time())
continue
# Check mtime after hash calculated
try:
new_mtime = int(os.path.getmtime(path))
except FileNotFoundError:
deletes.add(path)
continue
if new_mtime != mtime:
# Changed since we started calculating the hash - revisit when it might have settled
db.record_visit(self.db_conn, path, new_mtime + self.config.settle_time)
revisits_queued = True
continue
# print("updating {} {} {}".format(path, mtime, now))
db.update_file_data(self.db_conn, new_hash, filesize, path, mtime, now)
db.record_visit(self.db_conn, path)
for path in deletes:
# Check file still doesn't exist
# Note - it's possible the file gets created between this check
# and the write to the db - this doesn't matter, because we'll
# get a file update notification if this happens, and guarantee
# to process that after the db has been updated, so there's no
# race condition here.
print("Processing delete: {}".format(path))
try:
new_mtime = int(os.path.getmtime(path))
except FileNotFoundError:
new_mtime = None
if new_mtime:
db.record_visit(self.db_conn, path, new_mtime + self.config.settle_time)
revisits_queued = True
else:
db.record_visit(self.db_conn, path, deleted=True)
db.update_deleted_file_data(self.db_conn, path, time.time())
self.db_conn.commit()
return revisits_queued
def visit_symlinks(self, batch):
for path, mtime in batch:
self.log("symlink {} mtime={}".format(path, mtime))
def listen(self):
"""Listen for updates
Triggers calls to visit_files() and visit_symlinks() when big enough
batches of either have been created.
"""
self.init_delete_batch_processing()
self.init_file_batch_processing()
self.init_symlink_batch_processing()
self.loop.create_task(self.start_watching_roots())
self.revisit_cond = asyncio.Condition()
self.loop.create_task(self.start_polling_revisits())
self.start_polling_changes()
self.loop.run_forever()
self.stop_polling_changes()
async def process_change(self, path, stats):
if path is None:
return
if stats is None:
await self.add_to_delete_batch(path)
return
mtime = int(stats.st_mtime)
if stat.S_ISREG(stats.st_mode):
await self.add_to_file_batch(path, mtime)
elif stat.S_ISLNK(stats.st_mode):
await self.add_to_symlink_batch(path, mtime)
else:
print("Unexpected change stats: {}".format(str(stats)))
def init_delete_batch_processing(self):
self.delete_batch = {}
self.delete_batch_time = None
self.delete_batch_cond = asyncio.Condition()
self.loop.create_task(self.start_polling_delete_batches())
async def add_to_delete_batch(self, path):
self.delete_batch[path] = None
if self.delete_batch_time is None:
self.delete_batch_time = time.time() + self.batch_timeout
if len(self.delete_batch) > self.batch_size:
await self.process_delete_batch()
async with self.delete_batch_cond:
self.delete_batch_cond.notify_all()
async def start_polling_delete_batches(self):
while True:
while self.delete_batch_time is not None:
wait_time = self.delete_batch_time - time.time()
if wait_time <= 0:
await self.process_delete_batch()
else:
self.log(
"Next delete batch time: {} ({}s)".format(
self.delete_batch_time, wait_time
)
)
await asyncio.sleep(wait_time)
self.log("No delete batch due")
async with self.delete_batch_cond:
await self.delete_batch_cond.wait()
async def process_delete_batch(self):
print("processing delete batch size: {}".format(len(self.delete_batch)))
batch = self.delete_batch
self.delete_batch = {}
self.delete_batch_time = None
revisits_queued = self.visit_files(
sorted(batch.items())
) or self.visit_symlinks(sorted(batch.items()))
if revisits_queued:
async with self.revisit_cond:
self.revisit_cond.notify_all()
def init_file_batch_processing(self):
self.file_batch = {}
self.file_batch_time = None
self.file_batch_cond = asyncio.Condition()
self.loop.create_task(self.start_polling_file_batches())
async def add_to_file_batch(self, path, mtime):
self.file_batch[path] = mtime
if self.file_batch_time is None:
self.file_batch_time = time.time() + self.batch_timeout
if len(self.file_batch) > self.batch_size:
await self.process_file_batch()
async with self.file_batch_cond:
self.file_batch_cond.notify_all()
async def start_polling_file_batches(self):
while True:
while self.file_batch_time is not None:
wait_time = self.file_batch_time - time.time()
if wait_time <= 0:
await self.process_file_batch()
else:
self.log(
"Next file batch time: {} ({}s)".format(
self.file_batch_time, wait_time
)
)
await asyncio.sleep(wait_time)
self.log("No file batch due")
async with self.file_batch_cond:
await self.file_batch_cond.wait()
async def process_file_batch(self):
print("processing file batch size: {}".format(len(self.file_batch)))
batch = self.file_batch
self.file_batch = {}
self.file_batch_time = None
revisits_queued = self.visit_files(
sorted(batch.items(), key=lambda x: (x[1], x[0]))
)
if revisits_queued:
async with self.revisit_cond:
self.revisit_cond.notify_all()
def init_symlink_batch_processing(self):
self.symlink_batch = {}
self.symlink_batch_time = None
self.symlink_batch_cond = asyncio.Condition()
self.loop.create_task(self.start_polling_symlink_batches())
async def add_to_symlink_batch(self, path, mtime):
self.symlink_batch[path] = mtime
if self.symlink_batch_time is None:
self.symlink_batch_time = time.time() + self.batch_timeout
if len(self.symlink_batch) > self.batch_size:
await self.process_symlink_batch()
async with self.symlink_batch_cond:
self.symlink_batch_cond.notify_all()
async def start_polling_symlink_batches(self):
while True:
while self.symlink_batch_time is not None:
wait_time = self.symlink_batch_time - time.time()
if wait_time <= 0:
await self.process_symlink_batch()
else:
self.log(
"Next symlink batch time: {} ({}s)".format(
self.symlink_batch_time, wait_time
)
)
await asyncio.sleep(wait_time)
self.log("No symlink batch due")
async with self.symlink_batch_cond:
await self.symlink_batch_cond.wait()
async def process_symlink_batch(self):
print("processing symlink batch size: {}".format(len(self.symlink_batch)))
batch = self.symlink_batch
self.symlink_batch = {}
self.symlink_batch_time = None
self.visit_symlinks(sorted(batch.items(), key=lambda x: (x[1], x[0])))
def check_skip_dir(self, path, dirname):
if path in self.config.exclude_paths:
return True
elif dirname in self.config.exclude_directories:
return True
else:
for pattern in self.exclude_patterns:
if pattern.search(path):
return True
return False
def check_skip_file(self, path):
if path in self.config.exclude_paths:
return True
if path in self.swapfiles:
return True
elif path in self.config.exclude_paths:
return True
else:
for pattern in self.exclude_patterns:
if pattern.search(path):
return True
return False
async def start_watching_roots(self):
"""Walks over the roots, setting up watches and processing the items found.
Doesn't follow symlinks.
Applies the exclusions from the config.
"""
db.clear_visits(self.db_conn)
for root in self.config.roots:
await self.watch_tree(root)
for path in db.get_unvisited_files(self.db_conn):
print(path)
await self.process_change(path, None)
async def watch_tree(self, root):
self.log("Checking files under {}".format(root))
async def watch_dir(base, dirs, files, basefd):
skip = []
for dirname in dirs:
d_path = os.path.normpath(os.path.realpath(os.path.join(base, dirname)))
if self.check_skip_dir(d_path, dirname):
skip.append(dirname)
else:
self.watch_manager.add_watch(d_path, self.watch_mask)
print("D", end="", flush=True)
# print();print(d_path)
for dirname in skip:
self.log("Skipping {}".format(dirname))
dirs.remove(dirname)
for name in files:
f_path = os.path.normpath(os.path.realpath(os.path.join(base, name)))
if self.check_skip_file(f_path):
self.log("Skipping {}".format(f_path))
continue
try:
stats = os.stat(name, dir_fd=basefd, follow_symlinks=False)
except FileNotFoundError:
stats = None
await self.process_change(f_path, stats)
print(".", end="", flush=True)
try:
d_path = os.path.normpath(os.path.realpath(root))
if os.path.isdir(d_path):
if not self.check_skip_dir(d_path, os.path.basename(d_path)):
self.watch_manager.add_watch(d_path, self.watch_mask)
for base, dirs, files, basefd in os.fwalk(root, follow_symlinks=False):
await watch_dir(base, dirs, files, basefd)
except FileNotFoundError as e:
self.log("File not found - aborting scan of root {}".format(root))
async def start_polling_revisits(self):
"""Start task that triggers revisiting of paths that hadn't settled
when we last checked.
"""
while True:
now = time.time()
next_revisit_time, revisit_paths = db.due_for_revisit(self.db_conn, now)
self.log(
"Next revisit time: {} ({}s), due now: {}".format(
next_revisit_time,
(next_revisit_time or now) - now,
len(revisit_paths),
)
)
for path in revisit_paths:
try:
stats = os.stat(path, follow_symlinks=False)
except FileNotFoundError:
stats = None
await self.process_change(path, stats)
else:
if next_revisit_time is None:
async with self.revisit_cond:
await self.revisit_cond.wait()
else:
await asyncio.sleep(1)
def start_polling_changes(self):
def process_inotify_event(event):
async def task():
print("EVENT: {}".format(str(event)))
try:
stats = os.stat(event.pathname, follow_symlinks=False)
except FileNotFoundError:
stats = None
await self.process_change(event.pathname, stats)
self.loop.create_task(task())
self.notifier = pyinotify.AsyncioNotifier(
self.watch_manager, self.loop, default_proc_fun=process_inotify_event
)
def stop_polling_changes(self):
self.notifier.stop()
if __name__ == "__main__":
import config
Walker(config.config).listen()
| [
"[email protected]"
]
| |
079530cbf8ae21cb0804fcd789d21375b6add52c | cae8adc520ee71ffd9cfc82418152b4ec63f9302 | /static_server/template/merge_sort.py | 380683eb6d36d3135c3d8ad67c8363ce46d08626 | []
| no_license | dong-c-git/WSGIServer | 55111c04f4bbefe239949ddaea16c71221b7f795 | 1f0b58977e2a951f3c6dec335854dd9d6e31cdfd | refs/heads/master | 2020-08-01T17:03:30.307962 | 2019-11-09T01:45:30 | 2019-11-09T01:45:30 | 211,054,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | #coding:utf-8
def merge_sort(alist):
"""归并排序"""
n = len(alist)
if n <= 1:
return alist
mid = n//2
#left 采用归并排序后形成的有序的新列表
left_li = merge_sort(alist[:mid])
#right 采用归并排序后形成的有序新列表
right_li = merge_sort(alist[mid:])
#将两个有序的子序列合并成一个新的整体
#merge(left,right)
left_pointer,right_pointer = 0,0
result = []
while left_pointer < len(left_li) and right_pointer < len(right_li):
if left_li[left_pointer] <= right_li[right_pointer]:
result.append(left_li[left_pointer])
left_pointer += 1
else:
result.append(right_li[right_pointer])
right_pointer += 1
result += left_li[left_pointer:]
result += right_li[right_pointer:]
return result
if __name__ == '__main__':
li = [54, 26, 93, 17, 77, 31, 44, 55, 20]
print(li)
sorted_li = merge_sort(li)
print(li)
print(sorted_li)
'''时间复杂度分析:
最优时间复杂度:O(nlogn)
最坏时间复杂度:O(nlogn)
稳定性:稳定
''' | [
"[email protected]"
]
| |
ab876d1b73e4a42e5ab1153009f3462a8f05167a | 080c13cd91a073457bd9eddc2a3d13fc2e0e56ae | /MY_REPOS/awesome-4-new-developers/tensorflow-master/tensorflow/python/keras/distribute/multi_worker_callback_tf2_test.py | 285e942adc5dcccb678b01547af6cc7e8662d14f | [
"Apache-2.0"
]
| permissive | Portfolio-Projects42/UsefulResourceRepo2.0 | 1dccc8961a09347f124d3ed7c27c6d73b9806189 | 75b1e23c757845b5f1894ebe53551a1cf759c6a3 | refs/heads/master | 2023-08-04T12:23:48.862451 | 2021-09-15T12:51:35 | 2021-09-15T12:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,732 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks in multi-worker training with TF2."""
import json
import os
from absl.testing import parameterized
from tensorflow.python.distribute import (
collective_all_reduce_strategy as collective_strategy,
)
from tensorflow.python.distribute import combinations as ds_combinations
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base as test_base
from tensorflow.python.framework import test_combinations as combinations
from tensorflow.python.keras import callbacks
from tensorflow.python.keras.distribute import distributed_file_utils
from tensorflow.python.keras.distribute import multi_worker_testing_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import test
def checkpoint_exists(filepath):
"""Returns whether the checkpoint `filepath` refers to exists."""
if filepath.endswith(".h5"):
return file_io.file_exists_v2(filepath)
tf_saved_model_exists = file_io.file_exists_v2(filepath)
tf_weights_only_checkpoint_exists = file_io.file_exists_v2(filepath + ".index")
return tf_saved_model_exists or tf_weights_only_checkpoint_exists
def _model_setup(test_obj, file_format):
"""Set up a MNIST Keras model for testing purposes.
This function builds a MNIST Keras model and returns relevant information
for testing.
Args:
test_obj: The `TestCase` testing object.
file_format: File format for checkpoints. 'tf' or 'h5'.
Returns:
A tuple of (model, saving_filepath, train_ds, steps) where train_ds is
the training dataset.
"""
batch_size = 64
steps = 2
with collective_strategy.CollectiveAllReduceStrategy().scope():
# TODO(b/142509827): In rare cases this errors out at C++ level with the
# "Connect failed" error message.
train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset(
batch_size, steps
)
model = multi_worker_testing_utils.get_mnist_model((28, 28, 1))
# Pass saving_filepath from the parent thread to ensure every worker has the
# same filepath to save.
saving_filepath = os.path.join(test_obj.get_temp_dir(), "checkpoint." + file_format)
return model, saving_filepath, train_ds, steps
def get_tf_config_task():
return json.loads(os.environ["TF_CONFIG"])["task"]
def get_tf_config_cluster_spec():
return json.loads(os.environ["TF_CONFIG"])["cluster"]
def get_task_type():
return get_tf_config_task()["type"]
def get_task_index():
return get_tf_config_task()["index"]
def is_chief():
return (
"chief" not in get_tf_config_cluster_spec()
and get_task_type() == "worker"
and get_task_index() == 0
)
class KerasCallbackMultiProcessTest(parameterized.TestCase, test.TestCase):
@ds_combinations.generate(
combinations.combine(
mode=["eager"], file_format=["h5", "tf"], save_weights_only=[True, False]
)
)
def test_model_checkpoint_saves_on_chief_but_not_otherwise(
self, file_format, mode, save_weights_only
):
def proc_model_checkpoint_saves_on_chief_but_not_otherwise(
test_obj, file_format
):
model, saving_filepath, train_ds, steps = _model_setup(
test_obj, file_format
)
num_epoch = 2
extension = os.path.splitext(saving_filepath)[1]
# Incorporate type/index information and thread id in saving_filepath to
# ensure every worker has a unique path. Note that in normal use case the
# saving_filepath will be the same for all workers, but we use different
# ones here just to test out chief saves checkpoint but non-chief doesn't.
task_config = get_tf_config_task()
saving_filepath = os.path.join(
test_obj.get_temp_dir(),
"checkpoint_%s_%d%s"
% (task_config["type"], task_config["index"], extension),
)
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(checkpoint_exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
validation_data=train_ds,
validation_steps=steps,
callbacks=[
callbacks.ModelCheckpoint(
filepath=saving_filepath, save_weights_only=save_weights_only
)
],
)
# If it's chief, the model should be saved; if not, the model shouldn't.
test_obj.assertEqual(checkpoint_exists(saving_filepath), is_chief())
# If it's chief, the model should be saved (`write_filepath` should
# simply return `saving_filepath`); if not, i.e. for non-chief workers,
# the temporary path generated by `write_filepath` should no longer
# contain the checkpoint that has been deleted.
test_obj.assertEqual(
checkpoint_exists(
distributed_file_utils.write_filepath(
saving_filepath, model._distribution_strategy
)
),
is_chief(),
)
multi_process_runner.run(
proc_model_checkpoint_saves_on_chief_but_not_otherwise,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self, file_format),
)
@ds_combinations.generate(combinations.combine(mode=["eager"]))
def test_model_checkpoint_works_with_same_file_path(self, mode):
def proc_model_checkpoint_works_with_same_file_path(test_obj, saving_filepath):
model, _, train_ds, steps = _model_setup(test_obj, file_format="")
num_epoch = 2
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(file_io.file_exists_v2(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.ModelCheckpoint(filepath=saving_filepath)],
)
test_obj.assertTrue(file_io.file_exists_v2(saving_filepath))
saving_filepath = os.path.join(self.get_temp_dir(), "checkpoint")
multi_process_runner.run(
proc_model_checkpoint_works_with_same_file_path,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self, saving_filepath),
)
@ds_combinations.generate(combinations.combine(mode=["eager"]))
def test_backupandrestore_checkpoint_works_with_interruption(self, mode):
class InterruptingCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
if epoch == 2:
raise RuntimeError("Interrupting!")
class AssertCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
# the interruption happened on epoch 2 as specified in
# InterruptingCallback, so the initial epoch after restart will begin
# at 2.
assert epoch > 1
def proc_model_checkpoint_works_with_same_file_path(test_obj, saving_filepath):
model, _, train_ds, steps = _model_setup(test_obj, file_format="")
num_epoch = 4
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(file_io.file_exists_v2(saving_filepath))
bar_dir = os.path.join(os.path.dirname(saving_filepath), "backup")
try:
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[
callbacks.ModelCheckpoint(filepath=saving_filepath),
callbacks.BackupAndRestore(backup_dir=bar_dir),
InterruptingCallback(),
],
)
except RuntimeError as e:
if "Interrupting!" not in str(e):
raise
multi_process_runner.get_barrier().wait()
backup_filepath = os.path.join(bar_dir, "chief", "checkpoint")
test_obj.assertTrue(file_io.file_exists_v2(backup_filepath))
test_obj.assertTrue(file_io.file_exists_v2(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[
callbacks.ModelCheckpoint(filepath=saving_filepath),
callbacks.BackupAndRestore(backup_dir=bar_dir),
AssertCallback(),
],
)
multi_process_runner.get_barrier().wait()
test_obj.assertFalse(file_io.file_exists_v2(backup_filepath))
test_obj.assertTrue(file_io.file_exists_v2(saving_filepath))
saving_filepath = os.path.join(self.get_temp_dir(), "checkpoint")
multi_process_runner.run(
proc_model_checkpoint_works_with_same_file_path,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self, saving_filepath),
)
@ds_combinations.generate(combinations.combine(mode=["eager"]))
def test_tensorboard_saves_on_chief_but_not_otherwise(self, mode):
def proc_tensorboard_saves_on_chief_but_not_otherwise(test_obj):
model, _, train_ds, steps = _model_setup(test_obj, file_format="")
num_epoch = 2
# Incorporate type/index information and thread id in saving_filepath to
# ensure every worker has a unique path. Note that in normal use case the
# saving_filepath will be the same for all workers, but we use different
# ones here just to test out chief saves summaries but non-chief doesn't.
task_config = get_tf_config_task()
saving_filepath = os.path.join(
test_obj.get_temp_dir(),
"logfile_%s_%d" % (task_config["type"], task_config["index"]),
)
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(file_io.file_exists_v2(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.TensorBoard(log_dir=saving_filepath)],
)
# If it's chief, the summaries should be saved in the filepath; if not,
# the directory should be empty (although created). Using
# `file_io.list_directory()` since the directory may be created at this
# point.
test_obj.assertEqual(
bool(file_io.list_directory_v2(saving_filepath)), is_chief()
)
multi_process_runner.run(
proc_tensorboard_saves_on_chief_but_not_otherwise,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self,),
)
@ds_combinations.generate(combinations.combine(mode=["eager"]))
def test_tensorboard_can_still_save_to_temp_even_if_it_exists(self, mode):
def proc_tensorboard_can_still_save_to_temp_even_if_it_exists(test_obj):
model, _, train_ds, steps = _model_setup(test_obj, file_format="")
num_epoch = 2
saving_filepath = os.path.join(
test_obj.get_temp_dir(), "logfile_%s" % (get_tf_config_task()["type"])
)
saving_filepath_for_temp = os.path.join(saving_filepath, "workertemp_1")
os.mkdir(saving_filepath)
os.mkdir(saving_filepath_for_temp)
# Verifies that even if `saving_filepath_for_temp` exists, tensorboard
# can still save to temporary directory.
test_obj.assertTrue(file_io.file_exists_v2(saving_filepath_for_temp))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.TensorBoard(log_dir=saving_filepath)],
)
multi_process_runner.run(
proc_tensorboard_can_still_save_to_temp_even_if_it_exists,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self,),
)
@ds_combinations.generate(combinations.combine(mode=["eager"]))
def test_tensorboard_works_with_same_file_path(self, mode):
def proc_tensorboard_works_with_same_file_path(test_obj, saving_filepath):
model, _, train_ds, steps = _model_setup(test_obj, file_format="")
num_epoch = 2
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(file_io.file_exists_v2(saving_filepath))
multi_process_runner.get_barrier().wait()
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.TensorBoard(log_dir=saving_filepath)],
)
multi_process_runner.get_barrier().wait()
test_obj.assertTrue(file_io.list_directory_v2(saving_filepath))
saving_filepath = os.path.join(self.get_temp_dir(), "logfile")
multi_process_runner.run(
proc_tensorboard_works_with_same_file_path,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self, saving_filepath),
)
@ds_combinations.generate(combinations.combine(mode=["eager"]))
def test_early_stopping(self, mode):
def proc_early_stopping(test_obj):
class EpochCounterCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs):
self.last_epoch = epoch
model, _, train_ds, steps = _model_setup(test_obj, file_format="")
epoch_counter_cbk = EpochCounterCallback()
cbks = [
callbacks.EarlyStopping(
monitor="loss", min_delta=0.05, patience=1, verbose=1
),
epoch_counter_cbk,
]
# Empirically, it is expected that `model.fit()` terminates around the
# 22th epoch. Asserting that it should have been stopped before the 50th
# epoch to avoid flakiness and be more predictable.
model.fit(x=train_ds, epochs=100, steps_per_epoch=steps, callbacks=cbks)
test_obj.assertLess(epoch_counter_cbk.last_epoch, 50)
multi_process_runner.run(
proc_early_stopping,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self,),
)
if __name__ == "__main__":
multi_process_runner.test_main()
| [
"[email protected]"
]
| |
c84aa32b0caf67da5aad77ebd9a75e59de60da5e | 69ffb7b27636d0f83994d46f5ea0a63d06a549ef | /usv_model/tcp_usv_15.py | e6ef95ee894eb1d514061141bd9a37ae644b8f63 | []
| no_license | Miao1127/code-charpter4 | 5b09f9584520af195c1f5107086e58e747ed856e | 5ce1d50fa603a04a4f68709d8e1d654b8753bbe8 | refs/heads/master | 2023-07-10T23:40:41.987689 | 2021-08-23T01:21:47 | 2021-08-23T01:21:47 | 398,938,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,248 | py | # _*_ coding:utf-8 _*_
# 开发人员:103中山分队-苗润龙
# 开发时间:2020/04/22 16:00
# 文件名:usv_1_ip.py
# 开发工具:PyCharm
# 版本说明:IP端口传输数据模式
# 功能:模拟 #1 usv,马尔科夫决策局部协同搜索,具备串口收发功能和运动能力.
# 过程描述:1. 初始化栅格概率地图和随机生成USV起始位置,此时记为k时刻;2.将当前USV探测范围内的栅格概率更新到栅格概率地图
# 中,另外,当前USV需要通过集群网络,将探测到的概率信息广播给集群中其他USV;3.遍历USV的所有可选行为策略,行
# 为策略中包含 三个未来可能采用的机动动作;4.选择自信息增益最大的行为策略,将此策略下生成的三个航点和USV当
# 前位置进行曲线 拟合,形成期望航线,USV沿此航线航行,当USV即将航行出k时刻探测范围时,进行下一次马尔科夫决
# 策,重复第2-4步。
import time
import threading
import numpy as np
from queue import Queue, LifoQueue
import copy
import random
import seaborn as sns
from socket import *
import matplotlib.pyplot as plt
from file_operator import read_csv
from solver import value_func as vf
from solver import position2grid as pg
from usv_model import sensor_model as sm
def read_msg(rc_q, ri_q, rs_q):
"""
接收通信信息
:param rc_q: 用于从接收端接收监控端的指令
:param ri_q: 用于接受其他usv的初始化信息
:param rs_q: 用于接收其他usv发送的状态信息,实现集群协同
:param rp_q: 用于接收其他usv的探测信息
:return:
"""
print('等待接收指令!')
global map_data
mutex_r = threading.Lock()
while True:
data_received = client.recv(10240)
data_received = data_received.decode()
if data_received:
print('接收到的信息:')
print(data_received)
# print('*' * 20)
data = data_received.split('$')
try:
if len(data) == 3 and data[2] == '\n':
if data[0] == 'C': # 来自监控端的指令
rc_q.put(eval(data[1]))
ri_q.queue.clear()
elif data[0] == 'S' or data[0] == 'I': # 来自其他usv的初始化状态信息
ri_q.put(eval(data[1]))
elif data[0] == 'S' or data[0] == 'U': # 来自其他usv的状态信息
if rs_q.full():
rs_q.queue.clear()
else:
rs_q.put(eval(data[1]))
elif data[0] == 'P': # 来自其他usv传感器探测信息
sensor_info = eval(data[1])
usv_num_r = list(sensor_info.keys())[0]
new_data = sensor_info[usv_num_r][2]
old_data = map_data[sensor_info[usv_num_r][0]][sensor_info[usv_num_r][1]]
if usv_num_r != usv_num and new_data > old_data != 0:
mutex_r.acquire()
map_data[sensor_info[usv_num_r][0]][sensor_info[usv_num_r][1]] = sensor_info[usv_num_r][2]
mutex_r.release()
except Exception as err:
print(err)
continue
def send_msg(ss_q):
"""
发送信息
ss_q 用于放置当前usv的状态信息,此信息需要发送给其他usv和监控端,实现集群协同
sp_q 用于放置当前usv的BPSO决策结果,并发送出去
:return:
"""
print("开启发送信息线程")
while True:
if not ss_q.empty(): # 发送当前usv状态信息
client.send(b'\n')
string_to_send = str(ss_q.get())
client.send(string_to_send.encode())
# print("发送usv状态数据:%s\n" % string_to_send)
time.sleep(t_send)
# print(time.time())
# if not sp_q.empty(): # 发送当前usv的探测信息
# client.send(b'\n')
# string_to_send = str(sp_q.get())
# client.send(string_to_send.encode())
# print("发送探测数据:%s\n" % string_to_send)
# print(time.time())
class InitUSV:
def __init__(self, u_info):
self.u_info = u_info
self._running = True
def terminate(self):
print('关闭发送初始化信息...')
self._running = False
def run(self):
while self._running:
string_to_send = 'I' + '$' + str(self.u_info) + '$' + '\n' # 当前usv的初始化信息
client.send(string_to_send.encode())
time.sleep(t_send)
def markov_solver(sp_q):
"""
:param sp_q: 将计算的行为策略发送给move_to_pose
:return:
"""
print("开启求解器线程")
global usv_info, map_data
while True:
rho = usv_info[usv_num][1] # 更新当前usv剩余路径长度
x = usv_info[usv_num][4] # 更新当前usv位置行编号
y = usv_info[usv_num][5] # 更新当前usv位置列编号
phi_start = usv_info[usv_num][6] # 更新当前usv位置艏向角
# 处理自由航行区域
if rho <= markov_rho_threshold and usv_info[usv_num][7] == 1:
print("开始计算下一次行为策略...")
value_best = 0
# b = ['a3']
# 计算usv的期望位置和艏向
action = ['a1', 'a2', 'a3', 'a4', 'a5']
# 迭代行为策略
# 第一个动作
for b1 in action:
# 判断行为策略中产生的目标航点是否超出边界或存在障碍物
if b1 == 'a1':
phi1_goal = phi_start - np.pi / 3
elif b1 == 'a2':
phi1_goal = phi_start - np.pi / 6
elif b1 == 'a3':
phi1_goal = phi_start
elif b1 == 'a4':
phi1_goal = phi_start + np.pi / 6
elif b1 == 'a5':
phi1_goal = phi_start + np.pi / 3
if phi1_goal >= np.pi:
phi1_goal = phi1_goal - 2 * np.pi
elif phi1_goal < -np.pi:
phi1_goal = 2 * np.pi + phi1_goal
x1_goal = x + r * np.cos(phi1_goal)
y1_goal = y + r * np.sin(phi1_goal)
x1_grid = np.ceil(x1_goal)
y1_grid = np.ceil(y1_goal)
# 判断采用动作后是否超出区域,若超出区域,则遍历下一个动作,若不超出,则将此动作添加到临时行为策略中
if x1_grid < region_grid_num or x1_grid > rows-region_grid_num or y1_grid < region_grid_num \
or y1_grid > cols-region_grid_num or map_data[int(x1_grid)][int(y1_grid)] == 0:
continue
else:
bb = [b1]
# 第二个动作
for b2 in action:
# 判断行为策略中产生的目标航点是否超出边界或存在障碍物
if b2 == 'a1':
phi2_goal = phi1_goal - np.pi / 3
elif b2 == 'a2':
phi2_goal = phi1_goal - np.pi / 6
elif b2 == 'a3':
phi2_goal = phi1_goal
elif b2 == 'a4':
phi2_goal = phi1_goal + np.pi / 6
elif b2 == 'a5':
phi2_goal = phi1_goal + np.pi / 3
if phi2_goal >= np.pi:
phi2_goal = phi1_goal - 2 * np.pi
if phi2_goal < -np.pi:
phi2_goal = 2 * np.pi + phi2_goal
x2_goal = x1_goal + r * np.cos(phi2_goal)
y2_goal = y1_goal + r * np.sin(phi2_goal)
x2_grid = np.ceil(x2_goal)
y2_grid = np.ceil(y2_goal)
# 判断采用连续采用第二动作后是否超出区域,若超出区域,计算第一个动作的收益,记录收益最高的动作,
# 并遍历第二动作可以采用的下一个选项,若不超出,则将动作添加到临时行为策略中,形成包含两个连续动作的临时行为策略
if x2_grid < region_grid_num or x2_grid > rows-region_grid_num or y2_grid < region_grid_num \
or y2_grid > cols-region_grid_num or map_data[int(x2_grid)][int(y2_grid)] == 0:
p_value = vf.policy_value(x, y, phi_start, r, d, bb, gama, map_data)
if p_value > value_best:
value_best = p_value
b = bb
continue
else:
bb = [b1, b2]
# 第三个动作
for b3 in action:
# 判断行为策略中产生的目标航点是否超出边界或存在障碍物
if b3 == 'a1':
phi3_goal = phi2_goal - np.pi / 3
elif b3 == 'a2':
phi3_goal = phi2_goal - np.pi / 6
elif b3 == 'a3':
phi3_goal = phi2_goal
elif b3 == 'a4':
phi3_goal = phi2_goal + np.pi / 6
elif b3 == 'a5':
phi3_goal = phi2_goal + np.pi / 3
if phi3_goal >= np.pi:
phi3_goal = phi1_goal - 2 * np.pi
if phi3_goal < -np.pi:
phi3_goal = 2 * np.pi + phi3_goal
x3_goal = x2_goal + r * np.cos(phi3_goal)
y3_goal = y2_goal + r * np.sin(phi3_goal)
x3_grid = np.ceil(x3_goal)
y3_grid = np.ceil(y3_goal)
# 判断采用连续采用第三动作后是否超出区域,若超出区域,计算第一个和第二个连续动作的收益,记录收益最高的动作组合,
# 若不超出,则将动作添加到临时行为策略中,形成包含三个连续动作的行为策略
if x3_grid < region_grid_num or x3_grid > rows-region_grid_num or y3_grid < region_grid_num \
or y3_grid > cols-region_grid_num or map_data[int(x3_grid)][int(y3_grid)] == 0:
p_value = vf.policy_value(x, y, phi_start, r, d, bb, gama, map_data)
if p_value > value_best:
value_best = p_value
b = bb
else:
bb = [b1, b2, b3]
p_value = vf.policy_value(x, y, phi_start, r, d, bb, gama, map_data)
if p_value > value_best:
value_best = p_value
b = bb
print("采用动作策略为:%s" % b)
mutex.acquire()
usv_info[usv_num][7] = 0
mutex.release()
sp_q.put(b)
# 处理接近边界和遇到障碍物区域
elif usv_info[usv_num][7] == 2:
print("采用避障策略")
b = ['a0']
mutex.acquire()
usv_info[usv_num][7] = 0
mutex.release()
sp_q.queue.clear()
sp_q.put(b)
# 当前usv运动模拟
def move_to_pose(rp_q):
"""
刷新usv所处位置和栅格概率地图
:param rp_q: 从markov_solver()接收目行为策略
:return: 无
"""
print("开启usv运动线程")
global markov_flag, usv_info, map_data
mutex_m = threading.Lock()
x = start_point[0]
y = start_point[1]
phi = start_point[2]
rho = 0
n = 1 # 避障转向次数
while True:
b = []
# 接受markov_solver产生的行为决策
if not rp_q.empty():
# print('接收到markov_solver计算结果')
b = rp_q.get()
# 执行行为决策
for action in b:
# 判断是否到达边界或者前方遇到障碍物
if usv_info[usv_num][7] == 2:
break
# 避障动作
if action == 'a0':
phi_goal = phi + n*np.pi/180
print(n)
print("执行避障...")
# 以下五个动作为马尔科夫决策动作
elif action == 'a1':
phi_goal = phi - np.pi / 3
n = 1
elif action == 'a2':
phi_goal = phi - np.pi / 6
n = 1
elif action == 'a3':
phi_goal = phi
n = 1
elif action == 'a4':
phi_goal = phi + np.pi / 6
n = 1
elif action == 'a5':
phi_goal = phi + np.pi / 3
n = 1
if phi_goal >= np.pi:
phi_goal = phi_goal - 2 * np.pi
elif phi_goal < -np.pi:
phi_goal = 2 * np.pi + phi_goal
phi_goal = round(phi_goal, 3)
x_goal = x + r * np.cos(phi_goal)
y_goal = y + r * np.sin(phi_goal)
x_goal = round(x_goal, 3)
y_goal = round(y_goal, 3)
print('下一个目标点和航向为:%s,%s,%s' % (x_goal, y_goal, phi_goal))
x_diff = x_goal - x
y_diff = y_goal - y
rho = np.sqrt(x_diff ** 2 + y_diff ** 2) # 计算距离
# print("下一个目标点为:%s" % goal)
while rho > waypoint_threshold:
# 1.判断usv目标航点是否超出边界或存在障碍物区域
x_grid = np.ceil(x_goal)
y_grid = np.ceil(y_goal)
if x_grid < region_grid_num or x_grid > rows - region_grid_num or y_grid < region_grid_num \
or y_grid > cols - region_grid_num or map_data[int(x_grid)][int(y_grid)] == 0:
print("计算下一次避障决策...")
n += 1
mutex.acquire()
usv_info[usv_num][7] = 2
b = []
mutex.release()
break
else:
# 2.运动模型
alpha = (np.arctan2(y_diff, x_diff) # 计算航向偏离当前位置与终点连线间角度
- phi + np.pi) % (2 * np.pi) - np.pi
beta = (phi_goal - phi - alpha + np.pi) % (2 * np.pi) - np.pi # 计算期望航向偏离当前位置与终点连线间的角度
v = Kp_rho * rho # 距离越远速度越大,线性控制速度
if v < v_min: # 限制最低速度
v = v_min
w = Kp_alpha * alpha + Kp_beta * beta # 当前航向与期望航向夹角越大,角速度越大
phi = phi + w * dt
if phi > np.pi:
phi = phi - 2 * np.pi
elif phi <= -np.pi:
phi = phi + 2 * np.pi
x = x + v * np.cos(phi) * dt
y = y + v * np.sin(phi) * dt
x_diff = x_goal - x
y_diff = y_goal - y
rho = np.sqrt(x_diff ** 2 + y_diff ** 2) # 计算距离
# print("与下一个航点距离为:%f" % rho)
time.sleep(dt)
# 3.更新栅格概率地图
# 划分usv探测范围内的栅格
action_grid_dict = pg.grid_of_action(x, y, phi, r, d, map_data)
# 为例依次取出字典中的栅格序列
key_name = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13']
for k in range(len(key_name)):
key = str(k + 1)
for grid in action_grid_dict[key]:
# 1.获得需要计算的栅格编号
[mm, nn] = grid
# 排除超过概率图边界的栅格编号
if mm < 0 or nn < 0 or mm >= rows or nn >= cols:
continue
# TODO 此处添加判断map_data[mm][nn]==1的栅格,若是,则探测动态目标,记录
# 2.计算当前栅格的探测确定度
p = sm.sensor(mm, nn, x, y, phi, d, r_max)
if p > map_data[mm][nn] != 0:
mutex_m.acquire()
map_data[mm][nn] = round(p, 3)
mutex_m.release()
grid_info = {usv_num: [mm, nn, round(p, 3)]}
sensor_data = 'P' + '$' + str(grid_info) + '$' + '\n'
client.send(b'\n')
client.send(sensor_data.encode())
# 4.更新usv状态
mutex.acquire()
usv_info[usv_num][1] = round(rho, 3) # 更新当前usv剩余路径长度
usv_info[usv_num][4] = round(x, 3) # 更新当前usv位置行编号
usv_info[usv_num][5] = round(y, 3) # 更新当前usv位置列编号
usv_info[usv_num][6] = round(phi, 3) # 更新当前usv位置艏向角
mutex.release()
state_data = 'U' + '$' + str(usv_info) + '$' + '\n' # 当前usv状态信息
print("广播usv当前状态:%s" % state_data)
client.send(b'\n')
client.send(state_data.encode())
# 5.触发下一次markov_solver计算
if usv_info[usv_num][7] == 0:
print("触发下一次马尔可夫决策...")
mutex.acquire()
usv_info[usv_num][7] = 1
mutex.release()
# 概率地图中,栅格确定度衰减
def map_data_model():
global usv_info, map_data
mutex_map = threading.Lock()
if usv_info[usv_num][7] == 1:
print("开启概率图模型")
while True:
time.sleep(t_map)
print("概率地图衰减...")
mutex_map.acquire()
for i in range(rows):
for j in range(cols):
if map_data[i][j] != 0:
map_data[i][j] = reduction_coefficient*map_data[i][j] \
+ random.uniform(-1*reduction_scale, reduction_scale)
# TODO 此数添加map_data[i][j]=1的栅格点作为设置的目标点,其中i和j随机选取
if map_data[i][j] < 0.1 and map_data[i][j] != 0:
map_data[i][j] = 0.1
mutex_map.release()
print(map_data)
# 绘制概率图
def draw_animation():
print("开启绘制概率图线程")
global map_data
fig = plt.figure("集群"+usv_num+"概率图")
ax = fig.add_subplot(111)
while True:
try:
if show_animation: # pragma: no cover
plt.cla() # 清除当前图形中的当前活动轴,其他轴不受影响
# 绘制概率图
sns.heatmap(map_data, annot=False, fmt='.1f', cbar=False, ax=ax)
# ax.invert_yaxis() # y轴坐标刻度从上到下递增
ax.xaxis.tick_top() # x坐标轴置于图像上方
plt.grid(True)
plt.axis("equal")
plt.pause(dt)
except Exception as err:
print('err:')
print(err)
# 多线程
def multi_threading():
global usv_info, usv_list, usv_dict
# 需要在程序运行过程中停止的线程参考下方t线程编写
ini_usv = InitUSV(usv_info)
t = threading.Thread(target=ini_usv.run)
# 在程序运行过程中不需要停止的线程置于下方
t1 = threading.Thread(target=read_msg, args=(q, q0, q1)) # 读取串口数据线程
t2 = threading.Thread(target=send_msg, args=(q3,)) # 发送串口数据线程
t3 = threading.Thread(target=markov_solver, args=(q2,)) # 当前usv求解器
t4 = threading.Thread(target=move_to_pose, args=(q2,)) # 当前usv运动模型线程
t5 = threading.Thread(target=draw_animation) # 绘制动态概率图
t6 = threading.Thread(target=map_data_model) # 概率衰减模型
t.start() # 开启初始化发送信息线程
t1.start() # 开启串口接收信息线程
while True: # 响应监控端发出的初始化指令
if not q0.empty():
# 收集集群成员信息
i_msg = q0.get() # 接收到数据格式为:{"##2": [0, 1, 0.25, 11, 5, 0]}
usv_name = list(i_msg.keys())[0]
if usv_name not in usv_list:
usv_list.append(usv_name)
usv_dict.update(i_msg) # 存储各个usv初始位置信息,用于计算各个usv到达各个分区距离
if not q.empty():
c_msg = q.get() # 接收到的数据格式为:['##2', '##1']
if len(c_msg) == len(usv_list): # 判断是否收到监控端开启集群指令和集群成员是否都集齐
ini_usv.terminate() # 停止usv向监控端发送usv编号和初始位置线程
t2.start()
print("连接到监控端")
for i in range(5):
q3.put('S' + '$' + str(usv_info) + '$' + '\n') # 回应监控端的首条信息
time.sleep(0.1)
t3.start() # 开启求解器线程
t4.start() # 开启执行器线程
t5.start() # 开启绘制概率图线程
t6.start() # 开启概率图衰减模型
break
if __name__ == '__main__':
# 线程间信息传递
q = LifoQueue() # 采用后进先出,用于从接收端接收监控端的指令
q0 = Queue() # 用于接收其他usv初始化状态信息
q1 = LifoQueue() # 用于接收其他usv发送的最新状态信息,实现集群协同
q2 = Queue() # 用于将markov_solver计算所得的期望位置和艏向角发送给move_to_pose()
q3 = Queue()
show_animation = True
# 加载集群信息
# 1.读取栅格概率地图
map_data = read_csv.run('../file_operator/map_data_100_test.csv')
rows, cols = np.array(map_data).shape
print(rows, cols)
# 2.初始化usv的初始位置和初始艏向角,艏向角范围为[-π,π],以及传感器参数
usv_num = '#15' # 当前usv编号
start_point = [3, 17, np.pi*random.uniform(0, 0.5)] # 当前usv出发点位置和艏向角
cruise_speed = 1 # 巡航速度
cover_speed = 0.99 # 遍历速度
markov_flag = 0 # 触发下一次markov_solver计算
usv_info = {usv_num: [0, 0, cruise_speed, cover_speed, start_point[0], start_point[1], start_point[2], markov_flag]}
usv_list = [usv_num] # usv列表初始化,首先存储当前usv编号,后续用于存储收集到的其他usv编号
usv_dict = copy.deepcopy(usv_info) # usv字典初始化,首先存储当前usv信息,后续用于存储收集到的其他usv的状态信息
r = 5 # 传感器有效探测范围
r_max = 6 # 传感器最大探测范围
d = 1 # 栅格尺寸
Kp_alpha = 60 # 角速度控制参数
Kp_beta = -9 # 角速度控制参数
Kp_rho = 20
dt = 0.01 # 运动时间步长
v_min = 1 # usv最低速度
sensor_date_t = 0.001 # 传感器数据传输时间间隔
gama = 0.2 # 自信息增益折扣系数
region_grid_num = 2 # 避碰栅格数量
markov_rho_threshold = 4 # 触发markov_solver的航点距离阈值
waypoint_threshold = 1 # 航点到达阈值
t_map = 60 # 概率图概率变化时间间隔
reduction_coefficient = 0.99 # 栅格概率衰减系数
reduction_scale = 0.001 # 栅格概率衰减噪声标准差
# 3.判断usv初始位置是否存在障碍物
while map_data[start_point[0]][start_point[1]] == 0:
print('%sUSV初始位置处为障碍物所在处!' % usv_num)
start_point[0] = random.randint(0, rows-1)
start_point[1] = random.randint(0, cols-1)
phi1_start = 2 * np.pi * random.random() - np.pi
print('重新随机生成的%sUSV初始位置和艏向角为[%d, %d, %d]:' % (usv_num, start_point[0], start_point[1], phi1_start))
# tcp通信
client = socket()
ip_port = ("127.0.0.1", 8080)
client.connect(ip_port)
t_send = 0.5 # 发送信息时间间隔
# 定义一个全局互斥锁,用于锁定全局变量
mutex = threading.Lock()
# 开启三个线程进行收发数据和建立usv运动模型
multi_threading()
| [
"Miao@DESKTOP-AJA95IE"
]
| Miao@DESKTOP-AJA95IE |
61023febae45328ee0de5fd4f4e2f7f2e10987be | 1347434410c173c0eed165acabfda66e3bbf735e | /bruhat/interp.py | 842ad172afe9367baaa8a1088680ba92f7786790 | [
"MIT"
]
| permissive | punkdit/bruhat | f6a857e1c7a289440f293f86ca4be9c781243347 | 6c9f94ee725843550459ac04ee9351700f90fcf1 | refs/heads/master | 2023-08-30T22:31:44.915358 | 2023-08-29T11:58:21 | 2023-08-29T11:58:21 | 86,669,985 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,973 | py | #!/usr/bin/env python3
from operator import mul
from functools import reduce
import numpy
from bruhat.poly import Poly, Q
from bruhat.util import cross, all_subsets, factorial
from bruhat.argv import argv
from bruhat.elim import solve, shortstr
ring = Q
a = Poly("a", ring)
def interp(vals):
n = len(vals)
p = Poly({}, ring)
one = Poly({():ring.one}, ring)
for i in range(n):
#print(i, vals[i])
y0 = p(a=i)
q = one
for j in range(i):
q = q*(a-j)
#print("q:", q)
for j in range(i):
assert q(a=j) == 0
#print("q(%d)=%s"%(i, q(a=i)))
r = ring.one / factorial(i)
#print("r =", r)
if y0 != vals[i]:
p = p + (vals[i] - y0)*r*q
#print("p(%d)=%s"%(i, p(a=i)))
#print()
return p
def multi_interp(target):
shape = target.shape
n = len(shape)
#print("multi_interp", shape)
vs = 'abcde'[:n]
ms = [Poly(v, ring) for v in vs]
#print(ms)
itemss = [list(range(i)) for i in shape]
coords = []
for idxs in cross(itemss):
namespace = dict((vs[i], idxs[i]) for i in range(n))
#print(namespace)
coords.append(namespace)
A = []
polys = []
for idxs in cross(itemss):
p = reduce(mul, [p**i for (p,i) in zip(ms, idxs)])
polys.append(p)
#print(idxs, p)
row = []
for coord in coords:
v = p.substitute(coord)
row.append(ring.promote(v))
A.append(row)
A = numpy.array(A, dtype=object)
A = A.transpose()
#print(A.shape)
#print(shortstr(A))
rhs = target.view()
rhs.shape = (len(A),1)
#print(rhs)
print("solve...")
v = solve(ring, A, rhs)
assert v is not None
print(shortstr(v))
q = ring.zero
for i, p in enumerate(polys):
q = q + v[i, 0]*p
return q
def multi_factorize(p, N=10, denom=2):
vs = p.get_vars()
#print("multi_factorize", vs)
ring = p.ring
d = p.degree
factors = []
idxss = list(all_subsets(len(vs)))
idxss.sort(key = len)
assert idxss[0] == []
idxss.pop(0)
for idxs in idxss:
subvs = [vs[idx] for idx in idxs]
print("subvs:", subvs)
coords = [[-ring.promote(x)/denom for x in range(N)] for v in subvs]
for ii in cross(coords):
kw = dict((subvs[i], ii[i]) for i in range(len(subvs)))
y = p(**kw)
if y!=0:
continue
q = ring.zero
for k,v in kw.items():
#print("\t", k, v)
q += Poly(k, ring) - v
while 1:
print("factor:", q)
div, rem = q.reduce(p)
if rem != 0:
break
factors.append(q)
p = div
print("\t", p)
if p.degree == 1:
break
if p != 1:
factors.append(p)
return factors
def factorize(p):
ring = p.ring
d = p.degree
factors = []
for i in range(6*20):
i = ring.one*i/6
y = p(a=-i)
if y!=0:
continue
while 1:
f = (a+i)
div, rem = f.reduce(p)
if rem != 0:
break
factors.append(a+i)
p = div
if p != 1:
factors.append(p)
return factors
if argv.vals is not None:
vals = argv.get("vals", [1, 4, 10, 20, 35, 56])
p = interp(vals)
print("p =", p)
print("degree =", p.degree)
print("factors:", factorize(p))
#print(["%s"%p(a=i) for i in range(n)])
#print([(a-i).reduce(p)[1] for i in range(n)])
elif 1:
if 0:
# B2
vals = numpy.array(
[[1, 10, 35, 84, 165],
[5, 35, 105, 231, 429],
[14, 81, 220, 455, 810],
[30, 154, 390, 770, 1326],
[55, 260, 625, 1190, 1995]])
name = argv.next() or "A2"
N = int(argv.next() or 4)
import os
data = os.popen("./sl.sage %s %s"%(name, N)).read()
vals = eval(data)
vals = numpy.array(vals)
#vals = vals[:,0,:,0]
vals = vals.copy()
print(vals)
p = multi_interp(vals)
print("degree:", p.degree)
print(p)
#factors = multi_factorize(p)
#print(factors)
elif 0:
f = lambda a, b, c : (a+1)*(b+1)*(c+1)*(a+b+2)*(b+c+2)*(a+b+c+3)//12
N = 5
for c in range(3):
for b in range(N):
for a in range(N):
print("%6s"%f(a, b, c), end=" ")
print()
print()
elif 0:
f = lambda a, b, c, d : (
(a+1)*(b+1)*(c+1)*(d+1)*
(a+b+2)*(b+c+2)*(c+d+2)*
(a+b+c+3)*(b+c+d+3)*
(a+b+c+d+4)
//288)
N = 5
for d in range(3):
for c in range(3):
for b in range(N):
for a in range(N):
print("%6s"%f(a, b, c, d), end=" ")
print()
print()
print()
| [
"[email protected]"
]
| |
c95ae527b44134a8cadd967e79f37488605ec84d | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/c09595ef061b70e4df39ae8cf5cc8b1905550faec62d2ffdd3e03598833e50ce/PyQt5/QtQuick/QSGMaterial.py | 93e72583b02cd1bd2751c8b8edd627440be97d51 | []
| no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | # encoding: utf-8
# module PyQt5.QtQuick
# from C:\Users\Doly\Anaconda3\lib\site-packages\PyQt5\QtQuick.pyd
# by generator 1.147
# no doc
# imports
import PyQt5.QtCore as __PyQt5_QtCore
import PyQt5.QtGui as __PyQt5_QtGui
import PyQt5.QtQml as __PyQt5_QtQml
import sip as __sip
class QSGMaterial(__sip.wrapper):
""" QSGMaterial() """
def compare(self, QSGMaterial): # real signature unknown; restored from __doc__
""" compare(self, QSGMaterial) -> int """
return 0
def createShader(self): # real signature unknown; restored from __doc__
""" createShader(self) -> QSGMaterialShader """
return QSGMaterialShader
def flags(self): # real signature unknown; restored from __doc__
""" flags(self) -> QSGMaterial.Flags """
pass
def setFlag(self, Union, QSGMaterial_Flags=None, QSGMaterial_Flag=None, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
""" setFlag(self, Union[QSGMaterial.Flags, QSGMaterial.Flag], enabled: bool = True) """
pass
def type(self): # real signature unknown; restored from __doc__
""" type(self) -> QSGMaterialType """
return QSGMaterialType
def __init__(self): # real signature unknown; restored from __doc__
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
Blending = 1
CustomCompileStep = 16
RequiresDeterminant = 2
RequiresFullMatrix = 14
RequiresFullMatrixExceptTranslate = 6
| [
"[email protected]"
]
| |
fa7a3160b9d8970589e8b2ca45c4b350ba6993d8 | 73f09d06295ae4cf949b4b18a8dced861d3067f2 | /cli/almond_cloud/cmd/k8s/tail.py | fc4cd3f4732b7178b8f4db39c1e47032b8bdac03 | [
"Apache-2.0"
]
| permissive | tribe-health/almond-cloud | b7d8f6aeca83e047dfc28e5f84bb441f7b9646a7 | 95744da7aec789359093689704f4e2a989de1600 | refs/heads/master | 2023-06-28T02:39:32.162066 | 2022-12-20T03:24:54 | 2022-12-20T03:24:54 | 269,748,762 | 0 | 0 | NOASSERTION | 2020-06-05T19:12:45 | 2020-06-05T19:12:44 | null | UTF-8 | Python | false | false | 3,637 | py | from typing import Iterable, List, NoReturn, Optional
from queue import Queue
from threading import Thread
import splatlog as logging
from kubernetes import client, config
from kubernetes.watch import Watch
from kubernetes.client.models.v1_pod import V1Pod
from clavier import arg_par, err, io
from almond_cloud.lib import targets
from almond_cloud.config import CONFIG
LOG = logging.getLogger(__name__)
DESC = f"""\
Follow logs of one or more pods.
"""
def add_parser(subparsers: arg_par.Subparsers):
parser = subparsers.add_parser(
"tail",
target=tail,
help=DESC.splitlines()[0],
description=DESC,
)
parser.add_argument(
"pod_names",
nargs="+",
help="Pods to follow, which are prefix-matched against the name",
)
parser.add_argument(
"-t",
"--target",
dest="target_name",
default="local",
help="Target name with the Thingpedia url and access-token to use",
)
parser.add_argument(
"-l",
"--lines",
dest="tail_lines",
default=42,
help="How many lines to print at start",
)
def match_pod_name(pod_names: Iterable[str], pod: V1Pod) -> bool:
for name in pod_names:
if pod.metadata.name == name or pod.metadata.name.startswith(
f"{name}-"
):
return True
return False
def tail_one(
api_v1: client.CoreV1Api, pod_name: str, namespace: str, tail_lines: int
) -> NoReturn:
watch = Watch()
color_name = io.capture(f"[dim white]{pod_name}[/]", end="")
for line in watch.stream(
api_v1.read_namespaced_pod_log,
pod_name,
namespace,
tail_lines=tail_lines,
):
print(f"{color_name} {line}")
def _thread_tail(
queue: Queue,
api_v1: client.CoreV1Api,
pod_name: str,
pad_width: int,
namespace: str,
) -> NoReturn:
watch = Watch()
padded_name = ("{:<" + str(pad_width) + "}").format(pod_name)
left_col = io.capture(f"[dim white]{padded_name}[/]", end="")
for line in watch.stream(
api_v1.read_namespaced_pod_log, pod_name, namespace, tail_lines=0
):
queue.put(left_col + line)
def tail_many(
api_v1: client.CoreV1Api, pod_names: List[str], namespace: str
) -> NoReturn:
max_name_length = max(len(n) for n in pod_names)
pad_width = (int(max_name_length / 4) + 1) * 4
queue = Queue()
threads = [
Thread(
target=_thread_tail,
args=(queue, api_v1, pod_name, pad_width, namespace),
)
for pod_name in pod_names
]
for thread in threads:
thread.setDaemon(True)
thread.start()
while True:
print(queue.get())
def tail(pod_names: List[str], target_name: str, tail_lines: int):
target = targets.get(target_name)
namespace = target["k8s.namespace"]
context = target.get("k8s.context")
LOG.info(
"Tailing pods...",
context=context,
namespace=namespace,
)
config.load_kube_config(context=context)
api_v1 = client.CoreV1Api()
all_pods = api_v1.list_namespaced_pod(namespace).items
pods = [pod for pod in all_pods if match_pod_name(pod_names, pod)]
if len(pods) == 0:
LOG.error(
"No pods found.",
pod_names=pod_names,
available_pods=sorted([pod.metadata.name for pod in all_pods]),
)
raise err.UserError("No pods found.")
if len(pods) == 1:
tail_one(api_v1, pods[0].metadata.name, namespace, tail_lines)
tail_many(api_v1, [pod.metadata.name for pod in pods], namespace)
| [
"[email protected]"
]
| |
f327539d93e11b288578e502e51e08a094303cb7 | 10b4c22bdb4a1737028c730136d924e1665196c4 | /src/TheLanguage/Grammars/v1_0_0/IntegrationTests/PassStatement_IntegrationTest.py | 88edae76da4a931c98fbfec7c935bf7df1615856 | [
"LicenseRef-scancode-unknown-license-reference",
"BSL-1.0",
"Python-2.0"
]
| permissive | misharp/DavidBrownell_TheLanguage | 54bd886aae3814cd691328aad9ee1e4f65c7c427 | cae8cbef94b2054f80f6df06e945e70a13a0da69 | refs/heads/master | 2023-05-23T16:11:26.372177 | 2021-06-17T04:27:06 | 2021-06-17T04:27:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,582 | py | # ----------------------------------------------------------------------
# |
# | PassStatement_IntegrationTest.py
# |
# | David Brownell <[email protected]>
# | 2021-06-15 17:09:30
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2021
# | Distributed under the Boost Software License, Version 1.0. See
# | accompanying file LICENSE_1_0.txt or copy at
# | http://www.boost.org/LICENSE_1_0.txt.
# |
# ----------------------------------------------------------------------
"""Automated test for PassStatement.py"""
import os
import textwrap
import CommonEnvironment
from CommonEnvironmentEx.Package import InitRelativeImports
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
with InitRelativeImports():
from . import Execute
from ..CommentStatement import *
# ----------------------------------------------------------------------
def test_Standard():
assert Execute(
textwrap.dedent(
"""\
pass
""",
),
) == textwrap.dedent(
"""\
<Root>
1.0.0 Grammar
Pass
'pass' <<Regex: <_sre.SRE_Match object; span=(0, 4), match='pass'>>> ws:None [1, 1 -> 1, 5]
""",
)
| [
"[email protected]"
]
| |
c904e60f7ce81205000cd204b1bbed82b6936fa4 | e953ae5da775a934b86379cfa3d864bb7376fe36 | /06 basic_python/1.py | 22e6ddfdbac0d85f102006479e6e997e28f9fad7 | []
| no_license | agyenes/greenfox-exercises | 1481f17d1ddd78099d17022aa1800955ae39d92b | a2c7912c61708c6ebc53c9a22f8c09550432d4c3 | refs/heads/master | 2020-04-11T00:42:17.842170 | 2016-10-19T06:10:22 | 2016-10-19T06:10:22 | 68,081,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | # Create a `Circle` class that takes it's radius as cinstructor parameter
# It should have a `get_circumference` method that returns it's circumference
# It should have a `get_area` method that returns it's area
import math
class Circle():
def __init__(self, radius):
self.radius = radius
def get_circumference(self):
return self.radius * 2 * math.pi
def get_area(self):
return self.radius ** 2 * math.pi
circle1 = Circle(5)
print(circle1.get_circumference())
print(circle1.get_area())
| [
"[email protected]"
]
| |
75904dffa7c7e1533d4cdc92760c5409cdef2da5 | f00699824a8c5def54421ee3cf836ec2cd15d957 | /3/django_1703_day3/app01/views.py | 594e5317ae6464d78852b58176e46267d82e60db | []
| no_license | ZhiqiWu/student_manage | 9171d78c32d6900b08de9034b9a2f50c9e24d0b8 | da12ebaf4e9d6357cd9f832144ed756a55510433 | refs/heads/master | 2023-03-29T04:56:09.486061 | 2020-01-24T08:11:50 | 2020-01-24T08:11:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,424 | py | #coding:utf8
from django.shortcuts import render,HttpResponse,render_to_response
import logging
from django.template import Template,Context
import datetime
# Create your views here.
#创建一个日志器
mylogger = logging.getLogger('app01')
class Human(object):
name = '人'
age = 10000
def sayGoodBye(self):
#return 回去的值就是模板中显示的值
return 'sayGoodBye-------------'
# def index(request):
# try:
# with open('wer','r') as f:
# f.read()
# except Exception,e:
# mylogger.error(str(e))
# return HttpResponse('app01 index page ok')
def index(request):
return render(request,'app01/index.html')
def zhale(request):
mylogger.critical('洗澡城炸了')
return HttpResponse('ok')
def tpl(request):
# --------------------------------------------------------------------
t = Template("<h1>My name is {{name}}.</h1>") # 一小段html 加载到Template里,返回Template对象
context = {'name' : 'Alice'} # 上下文字典,准备渲染到模版中的变量
c = Context(context) # 初始化一个Context对象 传入上下文字典
html = t.render(c) # 渲染模板,选入Context对象
# --------------------------------------------------------------------
# render_to_response 不需要传入request对象,
# render需要
# return render_to_response('app01/tpl.html')
# 上下文字典
h1 = Human()
context = {
'name' : '小美',
'engname' : 'XIAOMEI',
'age' : 18,
'sex' : '中',
'score' : 100.99,
#'subject' : ['python','php','java','hadoop','openstack','docker','c++'],
'subject' : [],
'info' : {'interest':'打游戏','money':0},
'h1' : h1,
'china' : [
{'北京' : ['朝阳','海淀',u'三里屯','什刹海','中南海','天安门','changping']},
{'黑龙江' : ['哈尔滨','牡丹江','齐齐哈尔','鸡西','日本','首尔','俄罗斯']},
],
'range': range(1,11),
'desc' : "了矿务局儿科就了哦字。, \n想,臭美吧厘米",
'desc1' : "how are old you",
'now' : datetime.datetime.now(),
'suibian' : None,
'link' : '<a href="http://www.baidu.com">点我</a>'
}
return render(request,'app01/tpl.html',context) # a1:request对象,a2:模板路径 a3:上下文字典 | [
"[email protected]"
]
| |
40f84b9fcfd36bd03c76173a2577863e43c9fd11 | a122d660fb6484d3dc2c5a13c7cf686e2bff0ee3 | /jr/meg/artefact.py | cefd441b4e6a1e14a53551c22abdae8a74e4a1e3 | [
"BSD-2-Clause"
]
| permissive | LauraGwilliams/jr-tools | 13e27a013f2a7e194f7701aa6716ade2849f8446 | cd2c9ed90eccea320b8cece0f5923960e3174829 | refs/heads/master | 2021-01-17T11:45:33.525798 | 2016-04-30T16:21:05 | 2016-04-30T16:21:05 | 57,447,042 | 0 | 0 | null | 2016-04-30T14:46:09 | 2016-04-30T14:46:08 | null | UTF-8 | Python | false | false | 9,082 | py | # Author: Jean-Remi King <[email protected]>
#
# Licence : GNU GPLv3
import numpy as np
from sklearn.base import BaseEstimator
from joblib import Parallel, delayed
class SelfRegression(BaseEstimator):
""" Fit a series of regressors that aim at predicting each feature when
the latter is hidden from the regressors.
Parameters
----------
estimator : sklearn regressor | None
The regressor. Defaults to LinearRegression()
n_jobs : int
The number of parallel cores.
Attributes
----------
estimators_ : array, shape (n_feature)
The array of fitted estimator for each feature.
y_pred_ : array, shape(n_samples, n_feature)
The predictions.
"""
def __init__(self, estimator=None, n_jobs=-1):
from mne.parallel import check_n_jobs
from sklearn.linear_model import LinearRegression
self.estimator = LinearRegression() if estimator is None else estimator
self.n_jobs = n_jobs = check_n_jobs(n_jobs)
def fit(self, X):
"""Fits a regressor for each feature.
Parameters
----------
X : array, shape (n_sample, n_feature)
The data.
"""
from sklearn.base import clone
n_sample, self.n_feature_ = X.shape
# Setup parallel
n_splits = n_jobs = np.min([self.n_jobs, self.n_feature_])
parallel = Parallel(n_jobs)
p_func = delayed(_fit_loop)
# Split chunks of features to avoid overheads
splits = np.array_split(np.arange(self.n_feature_), n_splits)
out = parallel(p_func([clone(self.estimator) for f in split], X, split)
for split in splits)
self.estimators_ = np.concatenate(out, axis=0)
def predict(self, X):
"""Predict all features.
Parameters
----------
X : array, shape (n_sample, n_feature)
The data.
Returns
-------
X_pred : array, shape(n_sample, n_feature)
"""
n_sample, n_feature = X.shape
if n_feature != self.n_feature_:
raise ValueError('X must have same dims in fit and predict.')
n_splits = n_jobs = np.min([self.n_jobs, self.n_feature_])
parallel = Parallel(n_jobs)
p_func = delayed(_predict_loop)
splits = np.array_split(np.arange(n_feature), n_splits)
y_pred = parallel(p_func(self.estimators_[split], X, split)
for split in splits)
self.y_pred_ = np.hstack(y_pred)
return self.y_pred_
def _fit_loop(estimators, X, split):
"""Auxiliary functions of SelfRegression"""
_, n_feature = X.shape
for feature, estimator in zip(split, estimators):
features = np.delete(np.arange(n_feature), feature)
estimator.fit(X[:, features], y=X[:, feature])
return estimators
def _predict_loop(estimators, X, split):
"""Auxiliary functions of SelfRegression"""
n_sample, n_feature = X.shape
y_pred = np.zeros((n_sample, len(split)))
for f_idx, (feature, estimator) in enumerate(zip(split, estimators)):
features = np.delete(np.arange(n_feature), feature)
y_pred[:, f_idx] = estimator.predict(X[:, features])
return y_pred
def detect_bad_channels(raw, estimator=None, n_train=1e4, n_test=1e4,
n_jobs=-1, picks=None,):
"""This example shows how EEG/MEG bad channel detection can be done by
trying to predict the value of each channel of each time point from the
activity of all other channels at the corresponding time points.
Indeed, knowning the high spatial correlation of EEG/MEG signals, a given
channel can be considered as noisy if it doesn't (anti)correlate with any
other channels.
Note that:
- this this doesn't work for intracranial EEG, where the spatial
correlation is much smaller.
- this method isn't ideal to identify bad timing. For this, I would
recommend Alex. Barachant's Potato algorithm available at
http://github.com/abarachant/pyRiemann
"""
from mne import pick_types
from sklearn.preprocessing import RobustScaler
# Subsample times for faster computation
# Note that, considering that n_sample >> n_feature, a real cross-
# validation isn't really necessary
times = np.arange(len(raw.times))
np.random.shuffle(times)
times = times[:(n_train + n_test)]
if picks is None:
picks = pick_types(raw.info, meg=True, eeg=True)
X = raw._data[picks, :][:, times].T.copy()
# To be consistent across chan types, we'll normalize the data:
X = RobustScaler().fit_transform(X)
n_time, n_chan = X.shape
# Fit
art = SelfRegression(estimator=estimator, n_jobs=n_jobs)
art.fit(X[:n_train, :])
Xpred = art.predict(X[-n_test:, :])
# Score
errors = (Xpred-X[-n_test:, :]) ** 2
return errors
def remove_linenoise(raw, noise_freq, width=2, shuffle_time=True, decim=100,
n_component=1, plot=False, copy=True, picks=None,
harmonics=True):
import matplotlib.pyplot as plt
from mne import pick_types
from mne.preprocessing import ICA
from mne.time_frequency.psd import psd_welch
# Setup line frequency
if isinstance(noise_freq, str):
# automatic harmonics
if noise_freq == 'us':
noise_freq = 60
else:
noise_freq = 50
elif not isinstance(noise_freq, (float, int)):
raise NotImplementedError('Multiple bands')
def plot_psd(psd, freqs, ax, title):
for psd_ in psd:
ax.plot(freqs, np.log10(psd_))
ax.set_xlabel('Frequencies')
ax.set_title(title)
if copy:
raw = raw.copy()
if picks is None:
picks = pick_types(raw.info, eeg=True, meg=True, seeg=True)
if plot:
fig, axes = plt.subplots(1, 3, sharex=True)
psd, freqs = psd_welch(raw, picks=picks)
plot_psd(psd, freqs, axes[0], 'Raw Sensors')
# Fit ICA on filtered data
raw_ = raw.copy()
if harmonics:
# set up harmonics
n_harm = raw_.info['sfreq'] // (2. * noise_freq) + 1
harmonics = noise_freq * np.arange(1, n_harm)
# Band pass filtering outside lowest harmonics and nquist
raw_.filter(noise_freq - width, harmonics[-1] + width)
# Band stop filter in between harmonics
raw_.notch_filter(freqs=harmonics[:-1]+noise_freq//2,
notch_widths=noise_freq - 2*width)
else:
raw_.filter(noise_freq-width, noise_freq+width)
# Shuffle time axis to avoid decimation aliasing
if shuffle_time:
time = np.arange(raw_.n_times)
np.random.shuffle(time)
raw_._data[:, time] = raw_._data
ica = ICA(verbose=False)
ica.fit(raw_, decim=decim, picks=picks)
# Compute PSD of components
raw_._data[picks, :] = np.dot(ica.mixing_matrix_, raw._data[picks, :])
psd, freqs = psd_welch(raw_, picks=picks)
if plot:
plot_psd(psd, freqs, axes[1], 'Components')
# Find noise component and remove
freq = np.where(freqs >= noise_freq)[0][0]
sel = np.argsort(psd[:, freq])[-n_component:].tolist()
raw_ = ica.apply(raw, exclude=sel, copy=True)
if plot:
psd, freqs = psd_welch(raw_, picks=picks)
plot_psd(psd, freqs, axes[2], 'Clean sensors')
return raw_
def find_reference(raw, n_cluster, pick_types=None, copy=True,
flat_threshold=1e-15, n_split=100, plot=True):
""" Computes covariance on splits of the raw data, and apply KMeans
clustering to find the number of disjoint references.
n_cluster is found with PCA if float
"""
import matplotlib.pyplot as plt
from pyriemann.estimation import Covariances
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import pairwise_distances
if copy:
raw = raw.copy()
# Remove flat lines
flat = np.where(np.std(raw._data, axis=1) < flat_threshold)[0]
for ch in flat:
raw.info['bads'] += [raw.ch_names[ch]]
# Pick data channels only
if pick_types is None:
pick_types = dict(seeg=True, exclude='bads')
raw.pick_types(**pick_types)
# Compute covariance on data splits
n_time = len(raw.times)
t_max = raw.times[n_time - n_time % n_split - 1]
raw.crop(0, t_max, copy=False) # ensure regularly sized splits
X = np.array(np.array_split(raw._data, n_split, axis=1))
covs = Covariances().fit_transform(X)
# Compute cluster for each data split
cluster = KMeans(n_cluster)
all_kmeans = list()
for cov in covs:
dist = pairwise_distances(cov)
all_kmeans.append(cluster.fit_predict(dist))
# Combine clusters
dist = pairwise_distances(np.array(all_kmeans).T)
idx = cluster.fit_predict(dist)
if plot:
idx_ = np.argsort(idx)
cov = np.median(covs, axis=0)
plt.matshow(np.log10(cov)[idx_, :][:, idx_])
clusters = [np.array(raw.ch_names)[idx == ii] for ii in np.unique(idx)]
return clusters
| [
"[email protected]"
]
| |
95a048abac7170e8cce10bc4791ad785d762f8aa | 3db1c06cd10d4a72c3e778006364d5a83d1c5e2c | /subisuhostcheck/oltcheck/apps.py | fc0613338602d6c6160a1f1a6a130ee5b7050f1f | []
| no_license | shaktijeet-ego/hostdown | 14f07d309c0346ea0a67d321d774a788d2a1b75e | 9eab7ff08746c0c276bdc46bd1f52d2f02d7d2bb | refs/heads/master | 2023-04-05T19:22:57.064463 | 2021-04-28T03:35:34 | 2021-04-28T03:35:34 | 353,187,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | from django.apps import AppConfig
class OltcheckConfig(AppConfig):
name = 'oltcheck'
| [
"[email protected]"
]
| |
70b004e7a623bdaba5208a8f234471c520d7a44b | f157635f2e0d175bbbe4d0fdc615cd00e313ea80 | /ClassificationAndRegression/GeneralizedLinearModels/ARDRegression.py | e3df0c5fd3cb189ff63d077914a1d3c28a75eeb3 | [
"MIT"
]
| permissive | kopok2/MachineLearningAlgorithms | 66a20884bb40fabd0351b33e37ed04ec69bf2691 | 9d5eb9c17a1354e726b79e9cfae9e5638976b919 | refs/heads/master | 2021-06-30T14:47:01.490219 | 2020-11-30T09:08:11 | 2020-11-30T09:08:11 | 196,454,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | # coding=utf-8
"""ARD Regression.
Automatic Relevance Determination.
"""
import numpy as np
from sklearn import linear_model
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
from scipy import stats
if __name__ == "__main__":
print("Generating data...")
n_samples, n_features = 2000, 50
X = np.random.randn(n_samples, n_features)
rel_features = 10
lambda_ = 0.4
w = np.zeros(n_features)
rel_f = np.random.randint(0, n_features, rel_features)
for i in rel_f:
w[i] = stats.norm.rvs(loc=0, scale=1.0 / np.sqrt(lambda_))
alpha_ = 0.30
noise = stats.norm.rvs(loc=0, scale=1.0 / np.sqrt(lambda_), size=n_samples)
y = np.dot(X, w) + noise
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
print("Fitting model...")
ard = linear_model.ARDRegression()
ard.fit(X_train, y_train)
print("R2 score: {0}".format(r2_score(y_test, ard.predict(X_test))))
print("Plotting predictions...")
plt.scatter(np.arange(n_samples // 2), y_train, color="red")
plt.scatter(np.arange(n_samples // 2) + n_samples // 2, y_test, color="fuchsia")
plt.plot(np.arange(n_samples), ard.predict(X), color="purple")
plt.show()
| [
"[email protected]"
]
| |
eaddf9c02bba01b2e67dcfbd369737b7a59732e4 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_103/ch57_2020_04_13_15_35_20_007820.py | c5500bc24504c86df8f638052c8b4955a9d7877c | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | def verifica_progressao(lista):
razaopa=lista[1]-lista[0]
razaopg=(lista[1]/lista[0])
if lista[0]==0:
razaopg=lista[2]-lista[1]
for i in range (1,len(lista)):
if (lista[i+1]-lista[i])==razaopa and (lista[i+1]/lista[i])==razaopg:
return 'AG'
else:
if lista[i+1]-lista[i]==razaopa and lista[(len(lista)-1)]-lista[(len(lista)-2)]==razaopa:
return 'PA'
elif lista[i+1]/lista[i]==razaopg and lista[(len(lista)-1)]/lista[(len(lista)-2)]==razaopg:
return 'PG'
else:
return 'NA'
| [
"[email protected]"
]
| |
11c200638fbf348a5ea5e1f13aec2570258ce52e | 917aecb0568e70b2b0e3a6969076b0f28e48eca3 | /archive/wiki_web_traffic_predict/LSTM_train_V1.py | a036778335788c1c296e1c898044070e1a1f3d2f | []
| no_license | yennanliu/Kaggle.com_mini_project | d708af67172144ca2e4bac317e1dc44a59b99a95 | 9ca654692acf5c29c466c26cd101b10d1dd98a7c | refs/heads/master | 2020-05-21T14:00:45.873899 | 2020-03-28T01:36:17 | 2020-03-28T01:36:17 | 50,233,788 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,673 | py | # ops
import numpy as np
import pandas as pd
import datetime as dt
import time
import math
import re
# DL
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.recurrent import LSTM, GRU
from keras.layers import Convolution1D, MaxPooling1D
from keras.callbacks import Callback
def load_data():
train = pd.read_csv('train_1.csv').fillna(0)
return train
# help functions
def get_language(page):
res = re.search('[a-z][a-z].wikipedia.org',page)
if res:
"""
----- fix here for python 3 ----
https://stackoverflow.com/questions/18493677/how-do-i-return-a-string-from-a-regex-match-in-python
"""
return res.group(0)[:2]
return 'na'
def get_aggregated_data(train):
lang_sets = {} # get the search data without language column
lang_sets['en'] = train[train.lang=='en'].iloc[:,0:-1]
lang_sets['ja'] = train[train.lang=='ja'].iloc[:,0:-1]
lang_sets['de'] = train[train.lang=='de'].iloc[:,0:-1]
lang_sets['na'] = train[train.lang=='na'].iloc[:,0:-1]
lang_sets['fr'] = train[train.lang=='fr'].iloc[:,0:-1]
lang_sets['zh'] = train[train.lang=='zh'].iloc[:,0:-1]
lang_sets['ru'] = train[train.lang=='ru'].iloc[:,0:-1]
lang_sets['es'] = train[train.lang=='es'].iloc[:,0:-1]
sums = {} # avg daily searching (for each language )
for key in lang_sets:
sums[key] = lang_sets[key].iloc[:,1:].sum(axis=0) / lang_sets[key].shape[0]
print (sums)
return sums
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
def single_input_LSTM(sums,language):
#for language in sums.keys():
#for language in ['fr']:
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(sums[language].reshape(-1, 1))
print ('language : ', language)
# split into train and test sets
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
print ('-------')
print ('train_size : ', train_size)
print ('test_size : ', test_size)
print ('-------')
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# reshape input to be [samples, time steps, features]
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=20, batch_size=1, verbose=2)
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
# shift train predictions for plotting
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
# shift test predictions for plotting
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
#plot
"""
series,=plt.plot(scaler.inverse_transform(dataset)[:,])
prediccion_entrenamiento,=plt.plot(trainPredictPlot[:,],color = 'red')
prediccion_test,=plt.plot(testPredictPlot[:,],color = 'blue')
plt.title('Web View Forecasting (LSTM, lookback=1)')
plt.xlabel('Number of Days from Start')
plt.ylabel('Web View')
plt.legend()
plt.legend([serie,prediccion_entrenamiento,prediccion_test],['all data','train','test'], loc='upper right')
plt.show()
"""
if __name__ == '__main__':
train = load_data()
train['lang'] = train.Page.map(get_language)
print (train.head(3))
sums = get_aggregated_data(train)
single_input_LSTM(sums,'ja')
| [
"[email protected]"
]
| |
f59cf85c3391d05a7cd3c647b118d27d9acc8f44 | 564d6a4d305a8ac6a7e01c761831fb2081c02d0f | /sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_12_01/aio/operations/_virtual_machine_run_commands_operations.py | 0abb8cec5a340f97b7a6b41b4d141a7d9fcc1479 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
]
| permissive | paultaiton/azure-sdk-for-python | 69af4d889bac8012b38f5b7e8108707be679b472 | d435a1a25fd6097454b7fdfbbdefd53e05029160 | refs/heads/master | 2023-01-30T16:15:10.647335 | 2020-11-14T01:09:50 | 2020-11-14T01:09:50 | 283,343,691 | 0 | 0 | MIT | 2020-07-28T22:43:43 | 2020-07-28T22:43:43 | null | UTF-8 | Python | false | false | 7,966 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineRunCommandsOperations:
"""VirtualMachineRunCommandsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs
) -> AsyncIterable["models.RunCommandListResult"]:
"""Lists all available run commands for a subscription in a location.
:param location: The location upon which run commands is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RunCommandListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_12_01.models.RunCommandListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RunCommandListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RunCommandListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/runCommands'} # type: ignore
async def get(
self,
location: str,
command_id: str,
**kwargs
) -> "models.RunCommandDocument":
"""Gets specific run command for a subscription in a location.
:param location: The location upon which run commands is queried.
:type location: str
:param command_id: The command id.
:type command_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RunCommandDocument, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_12_01.models.RunCommandDocument
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RunCommandDocument"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'commandId': self._serialize.url("command_id", command_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RunCommandDocument', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/runCommands/{commandId}'} # type: ignore
| [
"[email protected]"
]
| |
c0ae26be6c3e7f00a90660c2254e971d71819294 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02997/s890368623.py | 91aaff9a40a4f404716381112f1c21a775c6e166 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | N,K=map(int,input().split())
fil=int((N-1)*(N-2)/2)
if K>fil:
print(-1)
else:
M=N-1+fil-K
print(M)
for i in range(N-1):
print(1,i+2)
count=0
list=[]
for j in range(2,N):
for k in range(j+1,N+1):
list.append((j,k))
for i in range(fil-K):
print(list[i][0],list[i][1]) | [
"[email protected]"
]
| |
4920502b9c8a0d5803967e8e6e59f571e503e028 | ba78d67c366d213c54d7fd88cef0d0bc1d40b1cd | /51.py | 1307af3e0e3d2a0b1e38eff320796cbaf4ad662c | [
"MIT"
]
| permissive | thaisNY/GuanabaraPy | 4de00dce606a729fe18936481d77b18efd5f6859 | a0a3acbd9242a39491a365b07562037d7a936bba | refs/heads/main | 2023-08-23T17:00:28.479117 | 2021-11-05T03:29:17 | 2021-11-05T03:29:17 | 424,429,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | #Leia o primeiro termo de uma pa
#Leia a razão da pa
#Imprima os 10 primeiros termos da pa
a1 = int(input('Digite o primeiro termo da Pa'))
r = int(input('Digite a razão da Pa'))
termo = 0
cont = 0
for c in range (a1,11):
termo = a1 + (cont*r)
print(termo)
cont = cont + 1
| [
"[email protected]"
]
| |
f06779f7e8828690d06957ef4314a50c7af1296d | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/3215bc98fe934f21f37fc5cc38ae5f123f444140-<get_srid_info>-fix.py | 61033a70af5b915d436643ecaa2d98431d9a883a | []
| no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | def get_srid_info(srid, connection):
'\n Returns the units, unit name, and spheroid WKT associated with the\n given SRID from the `spatial_ref_sys` (or equivalent) spatial database\n table for the given database connection. These results are cached.\n '
from django.contrib.gis.gdal import SpatialReference
global _srid_cache
try:
SpatialRefSys = connection.ops.spatial_ref_sys()
except NotImplementedError:
SpatialRefSys = None
(alias, get_srs) = ((connection.alias, (lambda srid: SpatialRefSys.objects.using(connection.alias).get(srid=srid).srs)) if SpatialRefSys else (None, SpatialReference))
if (srid not in _srid_cache[alias]):
srs = get_srs(srid)
(units, units_name) = srs.units
sphere_name = srs['spheroid']
spheroid = ('SPHEROID["%s",%s,%s]' % (sphere_name, srs.semi_major, srs.inverse_flattening))
_srid_cache[alias][srid] = (units, units_name, spheroid)
return _srid_cache[alias][srid] | [
"[email protected]"
]
| |
5ea7e91fd5e4d0c40cc4286bb01cc7771ee6343c | 8a2474f61a49b0e24812456b34f59948b756a94e | /autotest/test_gwt_lkt01.py | 4971e5c94125ddf25a82b9ac3bd4b2326e24b07e | [
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
]
| permissive | MODFLOW-USGS/modflow6 | 6e913abcab5c23686ed05b1cceac85f90282955d | 43f6198125867c487eedc64b17e9adaceb73f5ab | refs/heads/master | 2023-09-01T20:51:30.970467 | 2023-06-28T20:17:59 | 2023-06-28T20:17:59 | 116,149,490 | 158 | 111 | NOASSERTION | 2023-09-14T17:02:58 | 2018-01-03T15:00:55 | Fortran | UTF-8 | Python | false | false | 12,602 | py | # Simple one-layer model with a lak. Purpose is to test a constant
# stage and constant concentration lake with a value of 100. The aquifer
# starts with a concentration of zero, but the values grow as the lake
# leaks into the aquifer.
import os
import flopy
import numpy as np
import pytest
from framework import TestFramework
from simulation import TestSimulation
ex = ["lkt_01"]
def build_model(idx, dir):
lx = 5.0
lz = 1.0
nlay = 1
nrow = 1
ncol = 5
nper = 1
delc = 1.0
delr = lx / ncol
delz = lz / nlay
top = [0.0, 0.0, -0.90, 0.0, 0.0]
botm = list(top - np.arange(delz, nlay * delz + delz, delz))
botm[2] = -1.0
perlen = [0.1]
nstp = [10]
kstp = perlen[0] / nstp[0]
tsmult = [1.0]
Kh = 20.0
Kv = 20.0
steady = [True]
tdis_rc = []
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
single_matrix = False
nouter, ninner = 700, 300
hclose, rclose, relax = 1e-8, 1e-6, 0.97
name = ex[idx]
# build MODFLOW 6 files
ws = dir
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
# create tdis package
tdis = flopy.mf6.ModflowTdis(
sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
)
# create gwf model
gwfname = "gwf_" + name
gwf = flopy.mf6.MFModel(
sim,
model_type="gwf6",
modelname=gwfname,
model_nam_file=f"{gwfname}.nam",
)
imsgwf = flopy.mf6.ModflowIms(
sim,
print_option="ALL",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="NONE",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="BICGSTAB",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
filename=f"{gwfname}.ims",
)
idomain = np.full((nlay, nrow, ncol), 1)
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
idomain=idomain,
)
# initial conditions
ic = flopy.mf6.ModflowGwfic(gwf, strt=0.0)
# node property flow
npf = flopy.mf6.ModflowGwfnpf(
gwf,
xt3doptions=False,
save_flows=True,
save_specific_discharge=True,
icelltype=0,
k=Kh,
k33=Kv,
)
# chd files
chdlist1 = [
[(0, 0, 0), -0.5, 0.0],
[(0, 0, ncol - 1), -0.5, 0.0],
]
chd1 = flopy.mf6.ModflowGwfchd(
gwf,
stress_period_data=chdlist1,
print_input=True,
print_flows=True,
save_flows=False,
pname="CHD-1",
auxiliary="CONCENTRATION",
filename=f"{gwfname}.chd",
)
nlakeconn = 3 # note: this is the number of connectiosn for a lake, not total number of connections
# pak_data = [lakeno, strt, nlakeconn, CONC, dense, boundname]
pak_data = [(0, -0.4, nlakeconn, 0.0, 1025.0)]
connlen = connwidth = delr / 2.0
con_data = []
# con_data=(lakeno,iconn,(cellid),claktype,bedleak,belev,telev,connlen,connwidth )
con_data.append(
(0, 0, (0, 0, 1), "HORIZONTAL", "None", 10, 10, connlen, connwidth)
)
con_data.append(
(0, 1, (0, 0, 3), "HORIZONTAL", "None", 10, 10, connlen, connwidth)
)
con_data.append(
(0, 2, (0, 0, 2), "VERTICAL", "None", 10, 10, connlen, connwidth)
)
p_data = [
(0, "STATUS", "CONSTANT"),
(0, "STAGE", -0.4),
(0, "RAINFALL", 0.1),
(0, "EVAPORATION", 0.2),
(0, "RUNOFF", 0.1 * delr * delc),
(0, "WITHDRAWAL", 0.1),
]
# <outletno> <lakein> <lakeout> <couttype> <invert> <width> <rough> <slope>
outlets = [(0, 0, -1, "SPECIFIED", 999.0, 999.0, 999.0, 999.0)]
outletperioddata = [(0, "RATE", -0.1)]
# note: for specifying lake number, use fortran indexing!
lak_obs = {
("lak_obs.csv"): [
("lakestage", "stage", 1),
("lakevolume", "volume", 1),
("lak1", "lak", 1, 1),
("lak2", "lak", 1, 2),
("lak3", "lak", 1, 3),
]
}
lak = flopy.mf6.modflow.ModflowGwflak(
gwf,
save_flows=True,
print_input=True,
print_flows=True,
print_stage=True,
stage_filerecord="stage",
budget_filerecord="lakebud",
budgetcsv_filerecord=f"{gwfname}.lak.bud.csv",
nlakes=1,
ntables=0,
noutlets=1,
packagedata=pak_data,
outlets=outlets,
pname="LAK-1",
connectiondata=con_data,
perioddata=p_data + outletperioddata,
observations=lak_obs,
auxiliary=["CONCENTRATION", "DENSITY"],
)
# output control
oc = flopy.mf6.ModflowGwfoc(
gwf,
budget_filerecord=f"{gwfname}.cbc",
head_filerecord=f"{gwfname}.hds",
headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
printrecord=[("HEAD", "LAST"), ("BUDGET", "LAST")],
)
# create gwt model
gwtname = "gwt_" + name
gwt = flopy.mf6.MFModel(
sim,
model_type="gwt6",
modelname=gwtname,
model_nam_file=f"{gwtname}.nam",
)
if not single_matrix:
imsgwt = flopy.mf6.ModflowIms(
sim,
print_option="ALL",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="NONE",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="BICGSTAB",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
filename=f"{gwtname}.ims",
)
sim.register_ims_package(imsgwt, [gwt.name])
dis = flopy.mf6.ModflowGwtdis(
gwt,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
idomain=idomain,
)
# initial conditions
ic = flopy.mf6.ModflowGwtic(gwt, strt=0.0, filename=f"{gwtname}.ic")
# advection
adv = flopy.mf6.ModflowGwtadv(
gwt, scheme="UPSTREAM", filename=f"{gwtname}.adv"
)
# storage
porosity = 0.30
sto = flopy.mf6.ModflowGwtmst(
gwt, porosity=porosity, filename=f"{gwtname}.sto"
)
# sources
sourcerecarray = [
("CHD-1", "AUX", "CONCENTRATION"),
# ('WEL-1', 'AUX', 'CONCENTRATION'),
]
ssm = flopy.mf6.ModflowGwtssm(
gwt, sources=sourcerecarray, filename=f"{gwtname}.ssm"
)
lktpackagedata = [
(0, 35.0, 99.0, 999.0, "mylake"),
]
lktperioddata = [
(0, "STATUS", "CONSTANT"),
(0, "CONCENTRATION", 100.0),
(0, "RAINFALL", 25.0),
(0, "EVAPORATION", 25.0),
(0, "RUNOFF", 25.0),
]
lkt_obs = {
(gwtname + ".lkt.obs.csv",): [
("lkt-1-conc", "CONCENTRATION", 1),
("lkt-1-extinflow", "EXT-INFLOW", 1),
("lkt-1-rain", "RAINFALL", 1),
("lkt-1-roff", "RUNOFF", 1),
("lkt-1-evap", "EVAPORATION", 1),
("lkt-1-wdrl", "WITHDRAWAL", 1),
("lkt-1-stor", "STORAGE", 1),
("lkt-1-const", "CONSTANT", 1),
("lkt-1-gwt2", "LKT", 1, 1),
("lkt-1-gwt4", "LKT", 1, 3),
("lkt-1-gwt3", "LKT", 1, 2),
("lkt-1-mylake", "LKT", "MYLAKE"),
],
}
# append additional obs attributes to obs dictionary
lkt_obs["digits"] = 7
lkt_obs["print_input"] = True
lkt_obs["filename"] = gwtname + ".lkt.obs"
lkt = flopy.mf6.modflow.ModflowGwtlkt(
gwt,
boundnames=True,
save_flows=True,
print_input=True,
print_flows=True,
print_concentration=True,
concentration_filerecord=gwtname + ".lkt.bin",
budget_filerecord="gwtlak1.bud",
packagedata=lktpackagedata,
lakeperioddata=lktperioddata,
observations=lkt_obs,
flow_package_name="LAK-1",
flow_package_auxiliary_name="CONCENTRATION",
pname="LKT-1",
auxiliary=["aux1", "aux2"],
)
# output control
oc = flopy.mf6.ModflowGwtoc(
gwt,
budget_filerecord=f"{gwtname}.cbc",
concentration_filerecord=f"{gwtname}.ucn",
concentrationprintrecord=[
("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")
],
saverecord=[("CONCENTRATION", "ALL")],
printrecord=[
("CONCENTRATION", "ALL"),
("BUDGET", "ALL"),
],
)
# GWF GWT exchange
gwfgwt = flopy.mf6.ModflowGwfgwt(
sim,
exgtype="GWF6-GWT6",
exgmnamea=gwfname,
exgmnameb=gwtname,
filename=f"{name}.gwfgwt",
)
return sim, None
def get_mfsim(testsim):
ws = testsim.simpath
sim = flopy.mf6.MFSimulation.load(sim_ws=ws)
return sim
def eval_csv_information(testsim):
sim = get_mfsim(testsim)
name = testsim.name
gwfname = "gwf_" + name
gwtname = "gwt_" + name
gwf = sim.get_model(gwfname)
gwt = sim.get_model(gwtname)
lak_budget = gwf.lak.output.budgetcsv().data
result = lak_budget["PERCENT_DIFFERENCE"]
answer = np.zeros(result.shape)
assert np.allclose(
result, answer
), f"Lake package does not have zero mass balance error: {result}"
def eval_results(sim):
print("evaluating results...")
# eval csv files
eval_csv_information(sim)
# ensure lake concentrations were saved
name = sim.name
gwtname = "gwt_" + name
fname = gwtname + ".lkt.bin"
fname = os.path.join(sim.simpath, fname)
assert os.path.isfile(fname)
# load the lake concentrations and make sure all values are 100.
cobj = flopy.utils.HeadFile(fname, text="CONCENTRATION")
clak = cobj.get_alldata().flatten()
answer = np.ones(10) * 100.0
assert np.allclose(clak, answer), f"{clak} {answer}"
# load the aquifer concentrations and make sure all values are correct
fname = gwtname + ".ucn"
fname = os.path.join(sim.simpath, fname)
cobj = flopy.utils.HeadFile(fname, text="CONCENTRATION")
caq = cobj.get_alldata()
answer = np.array(
[4.86242795, 27.24270616, 64.55536421, 27.24270616, 4.86242795]
)
assert np.allclose(
caq[-1].flatten(), answer
), f"{caq[-1].flatten()} {answer}"
# lkt observation results
fpth = os.path.join(sim.simpath, gwtname + ".lkt.obs.csv")
try:
tc = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, f'could not load data from "{fpth}"'
res = tc["LKT1CONC"]
answer = np.ones(10) * 100.0
assert np.allclose(res, answer), f"{res} {answer}"
res = tc["LKT1EXTINFLOW"]
answer = np.ones(10) * 0.0
assert np.allclose(res, answer), f"{res} {answer}"
res = tc["LKT1RAIN"]
answer = np.ones(10) * 2.5
assert np.allclose(res, answer), f"{res} {answer}"
res = tc["LKT1ROFF"]
answer = np.ones(10) * 2.5
assert np.allclose(res, answer), f"{res} {answer}"
res = tc["LKT1EVAP"]
answer = np.ones(10) * -5.0
assert np.allclose(res, answer), f"{res} {answer}"
res = tc["LKT1WDRL"]
answer = np.ones(10) * -10.0
assert np.allclose(res, answer), f"{res} {answer}"
res = tc["LKT1STOR"]
answer = np.ones(10) * 0.0
assert np.allclose(res, answer), f"{res} {answer}"
res = tc["LKT1CONST"]
answer = np.ones(10) * 236.3934
assert np.allclose(res, answer), f"{res} {answer}"
res = tc["LKT1GWT2"]
answer = np.ones(10) * -91.80328
assert np.allclose(res, answer), f"{res} {answer}"
res = tc["LKT1GWT4"]
answer = np.ones(10) * -32.78689
assert np.allclose(res, answer), f"{res} {answer}"
res = tc["LKT1GWT3"]
answer = np.ones(10) * -91.80328
assert np.allclose(res, answer), f"{res} {answer}"
res = tc["LKT1MYLAKE"]
answer = np.ones(10) * -216.3934
assert np.allclose(res, answer), f"{res} {answer}"
# uncomment when testing
# assert False
@pytest.mark.parametrize(
"idx, name",
list(enumerate(ex)),
)
def test_mf6model(idx, name, function_tmpdir, targets):
ws = str(function_tmpdir)
test = TestFramework()
test.build(build_model, idx, ws)
test.run(
TestSimulation(
name=name, exe_dict=targets, exfunc=eval_results, idxsim=idx
),
ws,
)
| [
"[email protected]"
]
| |
eb5ec60604bedf1e414bdc0f40d2044b46690a1e | ab32e6384b7c679a327a4bf1df6dd24c058b78a5 | /core/site_utils.py | 449603b8ffa124f07af3cb5eb922b8628204b060 | []
| no_license | webmaxdev0110/digi-django | ad2497791d6d3b6aa74eb697dd7eef324ebb5846 | 4cd52c07bb64e9d9381a957323d277489a02181a | refs/heads/master | 2020-03-23T13:37:12.600565 | 2017-07-10T10:23:15 | 2017-07-10T10:23:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | from django.contrib.sites.models import Site
from django.http.request import split_domain_port
try:
# python 2
from urlparse import urlparse
except ImportError:
# Python 3
from urllib.parse import urlparse
def get_site_from_request_origin(request):
origin = request.META.get('HTTP_ORIGIN', None)
if not origin:
return None
if not origin.startswith('http'):
# During the test, the ORIGIN has not schema
origin = '//' + origin
netloc = urlparse(origin).netloc
domain, port = split_domain_port(netloc)
try:
return Site.objects.get(domain=domain)
except Site.DoesNotExist:
return None
| [
"[email protected]"
]
| |
4dc2667a569aca4ad5058cc12a464ca8ebe81cdc | a81984a197944169935f005f95a0e69e8c64143b | /artifacts/default/Ubuntu_16/get-sdk-2019.01.01-Ubuntu_16.py | 02eac38bda03b95afbf5e893ac65df8df933f711 | []
| no_license | pulp-platform/pulp-sdk-release | d6531bfb2f55335d02103a63fc5af90877333af3 | a3ad33b4bd5bcf704580857b9a1adcba3ed2a7ff | refs/heads/master | 2021-06-05T02:10:59.317545 | 2020-01-09T09:12:05 | 2020-01-09T09:12:05 | 132,143,493 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,268 | py | #!/usr/bin/env python3
# This file has been auto-generated and can be used for downloading the SDK it has
# been generated for.
import os
import tarfile
import os.path
import argparse
src="59b44701b6ac8390a97936cbd049256fc2917212"
artefacts=[
["https://iis-artifactory.ee.ethz.ch/artifactory/release/Ubuntu_16/pulp/sdk/mainstream/e5a5beca5677e4ea6fb3db81099a0375d57b5d02/0/sdk.tar.bz2", "pkg/sdk/2019.01.01"],
["https://iis-artifactory.ee.ethz.ch/artifactory/release/Ubuntu_16/pulp/pulp_riscv_gcc/mainstream/1.0.9/0/pulp_riscv_gcc.tar.bz2", "pkg/pulp_riscv_gcc/1.0.9"]
]
exports=[
["PULP_SDK_HOME", "$PULP_PROJECT_HOME/pkg/sdk/2019.01.01"],
["PULP_SDK_INSTALL", "$PULP_SDK_HOME/install"],
["PULP_SDK_WS_INSTALL", "$PULP_SDK_HOME/install/ws"],
["PULP_RISCV_GCC_TOOLCHAIN_CI", "$PULP_PROJECT_HOME/pkg/pulp_riscv_gcc/1.0.9"],
["PULP_RISCV_GCC_VERSION", "3"]
]
sourceme=[
["$PULP_SDK_HOME/env/setup.sh", "$PULP_SDK_HOME/env/setup.csh"]
]
pkg=["sdk", "2019.01.01"]
parser = argparse.ArgumentParser(description='PULP downloader')
parser.add_argument('command', metavar='CMD', type=str, nargs='*',
help='a command to be execute')
parser.add_argument("--path", dest="path", default=None, help="Specify path where to install packages and sources")
args = parser.parse_args()
if len(args.command ) == 0:
args.command = ['get']
if args.path != None:
path = os.path.expanduser(args.path)
if not os.path.exists(path):
os.makedirs(path)
os.chdir(path)
for command in args.command:
if command == 'get' or command == 'download':
dir = os.getcwd()
if command == 'get':
if not os.path.exists('pkg'): os.makedirs('pkg')
os.chdir('pkg')
for artefactDesc in artefacts:
artefact = artefactDesc[0]
path = os.path.join(dir, artefactDesc[1])
urlList = artefact.split('/')
fileName = urlList[len(urlList)-1]
if command == 'download' or not os.path.exists(path):
if os.path.exists(fileName):
os.remove(fileName)
if os.system('wget --no-check-certificate %s' % (artefact)) != 0:
exit(-1)
if command == 'get':
os.makedirs(path)
t = tarfile.open(os.path.basename(artefact), 'r')
t.extractall(path)
os.remove(os.path.basename(artefact))
os.chdir(dir)
if command == 'get' or command == 'download' or command == 'env':
if not os.path.exists('env'):
os.makedirs('env')
filePath = 'env/env-%s-%s.sh' % (pkg[0], pkg[1])
with open(filePath, 'w') as envFile:
#envFile.write('export PULP_ENV_FILE_PATH=%s\n' % os.path.join(os.getcwd(), filePath))
#envFile.write('export PULP_SDK_SRC_PATH=%s\n' % os.environ.get("PULP_SDK_SRC_PATH"))
envFile.write('export %s=%s\n' % ('PULP_PROJECT_HOME', os.getcwd()))
for export in exports:
envFile.write('export %s=%s\n' % (export[0], export[1].replace('$PULP_PROJECT_HOME', os.getcwd())))
for env in sourceme:
envFile.write('source %s\n' % (env[0].replace('$PULP_PROJECT_HOME', os.getcwd())))
#envFile.write('if [ -e "$PULP_SDK_SRC_PATH/init.sh" ]; then source $PULP_SDK_SRC_PATH/init.sh; fi')
#filePath = 'env/env-%s-%s.csh' % (pkg[0], pkg[1])
#with open(filePath, 'w') as envFile:
# envFile.write('setenv PULP_ENV_FILE_PATH %s\n' % os.path.join(os.getcwd(), filePath))
# envFile.write('setenv PULP_SDK_SRC_PATH %s\n' % os.environ.get("PULP_SDK_SRC_PATH"))
# for env in envFileStrCsh:
# envFile.write('%s\n' % (env.replace('@PULP_PKG_HOME@', os.getcwd())))
# envFile.write('if ( -e "$PULP_SDK_SRC_PATH/init.sh" ) then source $PULP_SDK_SRC_PATH/init.sh; endif')
if command == 'src':
if os.path.exists('.git'):
os.system('git checkout %s' % (src))
else:
os.system('git init .')
os.system('git remote add -t \* -f origin [email protected]:pulp-sw/pulp_pipeline.git')
os.system('git checkout %s' % (src))
| [
"[email protected]"
]
| |
bc38d2efb5600eba0aec10d2e7009307896556f1 | 4e96f383d4703ad8ee58869ed91a0c8432c8a051 | /Cura/Cura/plugins/GCodeReader/MarlinFlavorParser.py | 482285a2c9508de5ae79665113c0b1fd84a5663f | [
"LGPL-3.0-only",
"GPL-3.0-only"
]
| permissive | flight7788/3d-printing-with-moveo-1 | b2dba26010c4fa31815bc1d2d0966161a8600081 | 7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0 | refs/heads/Feature_Marlin_with_AlanBoy | 2022-08-30T18:36:44.785058 | 2020-05-30T07:52:58 | 2020-05-30T07:52:58 | 212,583,912 | 0 | 0 | MIT | 2020-05-16T07:39:47 | 2019-10-03T13:13:01 | C | UTF-8 | Python | false | false | 305 | py | # Copyright (c) 2017 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from . import FlavorParser
# This parser is intented for interpret the Marlin/Sprinter Firmware flavor
class MarlinFlavorParser(FlavorParser.FlavorParser):
def __init__(self):
super().__init__() | [
"[email protected]"
]
| |
60e0c37b3231d5f3a8af1f4f81ace45335df8286 | 457b687219cb723585164e84417ed3bacc8c234d | /qianfeng_400/爬虫/网络编程/1,TCP客户端.py | 2d367d1a423db6b3b0d86786545c5eebc10f4263 | []
| no_license | jxy147258/qianfeng_python | ffdc77fb05cfb2302af51fc3047efa0eadeb2064 | 0f6c06fdf19a47f7b5083cde4e1eb2011442c5f7 | refs/heads/master | 2021-07-16T15:19:45.759356 | 2021-02-03T16:04:14 | 2021-02-03T16:04:14 | 237,947,869 | 2 | 2 | null | 2020-02-07T06:41:06 | 2020-02-03T11:17:13 | Python | UTF-8 | Python | false | false | 362 | py | # 使用socket模块
import socket
client = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
client.connect(("localhost",8081))
count = 0
while True:
count += 1
data = input("请输入数据")
client.send(data.encode("utf-8"))
info = client.recv(1024)
print("服务器说:"+info.decode("utf-8"))
if count > 10:
client.close()
| [
"[email protected]"
]
| |
4c6cd8fa2c76f07442cc03436bacac23835e402e | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/robort_20200727105954.py | bf79f5392d4aaf3cefaba3da6918f708fb08598f | []
| no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | def uniquePaths(m,n):
# use dynamic programming and answer is at arr[m][n]
# let's create and empty grid with 0's
grid = [[0 for x in range(m)] for y in range(n)]
print(grid)
# then using the top down uproach we shall prefill all the
# i,j i = 0 and j+1
# then i +1 ,j = 0
for i in range(len(grid)):
# print('i',i)
for j in range(len(grid[i])):
if i == 0 or j == 0:
print('i',i)
grid[i][j] = 1
print(grid)
uniquePaths(3,2) | [
"[email protected]"
]
| |
53761b003841d9f17b9bd786b1199e029573d183 | ff23900a911e099595c392a7efab1d268b4f5f7d | /python_modules/libraries/dagster-snowflake-pandas/dagster_snowflake_pandas_tests/test_snowflake_pandas_type_handler.py | 045f02682b567ae7c7cbb3a15986b9b2d4dfdf84 | [
"Apache-2.0"
]
| permissive | zkan/dagster | bbf2da091bdc7fca028c569db72b9c68ddf55e98 | b2b19edb71fc8985f505b116927350dd23b4a7d9 | refs/heads/master | 2022-08-24T03:20:12.583577 | 2022-08-16T00:01:23 | 2022-08-16T00:01:23 | 244,012,061 | 0 | 0 | Apache-2.0 | 2020-02-29T17:33:24 | 2020-02-29T17:33:24 | null | UTF-8 | Python | false | false | 7,662 | py | import logging
import os
import uuid
from contextlib import contextmanager
from typing import Iterator
from unittest.mock import patch
import pandas
import pytest
from dagster_snowflake import build_snowflake_io_manager
from dagster_snowflake.resources import SnowflakeConnection
from dagster_snowflake.snowflake_io_manager import TableSlice
from dagster_snowflake_pandas import SnowflakePandasTypeHandler
from dagster_snowflake_pandas.snowflake_pandas_type_handler import (
_convert_string_to_timestamp,
_convert_timestamp_to_string,
)
from pandas import DataFrame
from dagster import (
MetadataValue,
Out,
TableColumn,
TableSchema,
build_input_context,
build_output_context,
job,
op,
)
resource_config = {
"database": "database_abc",
"account": "account_abc",
"user": "user_abc",
"password": "password_abc",
"warehouse": "warehouse_abc",
}
IS_BUILDKITE = os.getenv("BUILDKITE") is not None
SHARED_BUILDKITE_SNOWFLAKE_CONF = {
"account": os.getenv("SNOWFLAKE_ACCOUNT", ""),
"user": "BUILDKITE",
"password": os.getenv("SNOWFLAKE_BUILDKITE_PASSWORD", ""),
}
@contextmanager
def temporary_snowflake_table(schema_name: str, db_name: str, column_str: str) -> Iterator[str]:
snowflake_config = dict(database=db_name, **SHARED_BUILDKITE_SNOWFLAKE_CONF)
table_name = "test_io_manager_" + str(uuid.uuid4()).replace("-", "_")
with SnowflakeConnection(
snowflake_config, logging.getLogger("temporary_snowflake_table")
).get_connection() as conn:
conn.cursor().execute(f"create table {schema_name}.{table_name} ({column_str})")
try:
yield table_name
finally:
conn.cursor().execute(f"drop table {schema_name}.{table_name}")
def test_handle_output():
with patch("dagster_snowflake_pandas.snowflake_pandas_type_handler._connect_snowflake"):
handler = SnowflakePandasTypeHandler()
df = DataFrame([{"col1": "a", "col2": 1}])
output_context = build_output_context(resource_config=resource_config)
metadata = handler.handle_output(
output_context,
TableSlice(
table="my_table",
schema="my_schema",
database="my_db",
columns=None,
partition=None,
),
df,
)
assert metadata == {
"dataframe_columns": MetadataValue.table_schema(
TableSchema(columns=[TableColumn("col1", "object"), TableColumn("col2", "int64")])
),
"row_count": 1,
}
def test_load_input():
with patch("dagster_snowflake_pandas.snowflake_pandas_type_handler._connect_snowflake"), patch(
"dagster_snowflake_pandas.snowflake_pandas_type_handler.pd.read_sql"
) as mock_read_sql:
mock_read_sql.return_value = DataFrame([{"COL1": "a", "COL2": 1}])
handler = SnowflakePandasTypeHandler()
input_context = build_input_context()
df = handler.load_input(
input_context,
TableSlice(
table="my_table",
schema="my_schema",
database="my_db",
columns=None,
partition=None,
),
)
assert mock_read_sql.call_args_list[0][1]["sql"] == "SELECT * FROM my_db.my_schema.my_table"
assert df.equals(DataFrame([{"col1": "a", "col2": 1}]))
def test_type_conversions():
# no timestamp data
no_time = pandas.Series([1, 2, 3, 4, 5])
converted = _convert_string_to_timestamp(_convert_timestamp_to_string(no_time))
assert (converted == no_time).all()
# timestamp data
with_time = pandas.Series(
[
pandas.Timestamp("2017-01-01T12:30:45.35"),
pandas.Timestamp("2017-02-01T12:30:45.35"),
pandas.Timestamp("2017-03-01T12:30:45.35"),
]
)
time_converted = _convert_string_to_timestamp(_convert_timestamp_to_string(with_time))
assert (with_time == time_converted).all()
# string that isn't a time
string_data = pandas.Series(["not", "a", "timestamp"])
assert (_convert_string_to_timestamp(string_data) == string_data).all()
@pytest.mark.skipif(not IS_BUILDKITE, reason="Requires access to the BUILDKITE snowflake DB")
def test_io_manager_with_snowflake_pandas():
with temporary_snowflake_table(
schema_name="SNOWFLAKE_IO_MANAGER_SCHEMA",
db_name="TEST_SNOWFLAKE_IO_MANAGER",
column_str="foo string, quux integer",
) as table_name:
# Create a job with the temporary table name as an output, so that it will write to that table
# and not interfere with other runs of this test
@op(
out={
table_name: Out(
io_manager_key="snowflake", metadata={"schema": "SNOWFLAKE_IO_MANAGER_SCHEMA"}
)
}
)
def emit_pandas_df(_):
return pandas.DataFrame({"foo": ["bar", "baz"], "quux": [1, 2]})
@op
def read_pandas_df(df: pandas.DataFrame):
assert set(df.columns) == {"foo", "quux"}
assert len(df.index) == 2
snowflake_io_manager = build_snowflake_io_manager([SnowflakePandasTypeHandler()])
@job(
resource_defs={"snowflake": snowflake_io_manager},
config={
"resources": {
"snowflake": {
"config": {
**SHARED_BUILDKITE_SNOWFLAKE_CONF,
"database": "TEST_SNOWFLAKE_IO_MANAGER",
}
}
}
},
)
def io_manager_test_pipeline():
read_pandas_df(emit_pandas_df())
res = io_manager_test_pipeline.execute_in_process()
assert res.success
@pytest.mark.skipif(not IS_BUILDKITE, reason="Requires access to the BUILDKITE snowflake DB")
def test_io_manager_with_snowflake_pandas_timestamp_data():
with temporary_snowflake_table(
schema_name="SNOWFLAKE_IO_MANAGER_SCHEMA",
db_name="TEST_SNOWFLAKE_IO_MANAGER",
column_str="foo string, date TIMESTAMP_NTZ(9)",
) as table_name:
time_df = pandas.DataFrame(
{
"foo": ["bar", "baz"],
"date": [
pandas.Timestamp("2017-01-01T12:30:45.350"),
pandas.Timestamp("2017-02-01T12:30:45.350"),
],
}
)
@op(
out={
table_name: Out(
io_manager_key="snowflake", metadata={"schema": "SNOWFLAKE_IO_MANAGER_SCHEMA"}
)
}
)
def emit_time_df(_):
return time_df
@op
def read_time_df(df: pandas.DataFrame):
assert set(df.columns) == {"foo", "date"}
assert (df["date"] == time_df["date"]).all()
snowflake_io_manager = build_snowflake_io_manager([SnowflakePandasTypeHandler()])
@job(
resource_defs={"snowflake": snowflake_io_manager},
config={
"resources": {
"snowflake": {
"config": {
**SHARED_BUILDKITE_SNOWFLAKE_CONF,
"database": "TEST_SNOWFLAKE_IO_MANAGER",
}
}
}
},
)
def io_manager_timestamp_test_job():
read_time_df(emit_time_df())
res = io_manager_timestamp_test_job.execute_in_process()
assert res.success
| [
"[email protected]"
]
| |
38c9cd8dc8eff9b5dc906dfce24d42353280b930 | 80301f1cffc5afce13256e2ecab6323c5df00194 | /cn.sc/py/C3303.py | 30d8242ea489520331139f30afd0a81ca0e0f40f | []
| no_license | ZhenjianYang/SoraVoiceScripts | c1ddf7c1bbcb933243754f9669bd6b75777c87b9 | 94a948090aba0f63b10b2c69dc845dc99c822fc4 | refs/heads/master | 2023-04-18T04:54:44.306652 | 2023-04-06T11:15:17 | 2023-04-06T11:15:17 | 103,167,541 | 43 | 11 | null | 2021-03-06T08:52:54 | 2017-09-11T17:36:55 | Python | UTF-8 | Python | false | false | 11,332 | py | from ED6SCScenarioHelper import *
def main():
SetCodePage("gbk")
# 蔡斯
CreateScenaFile(
FileName = 'C3303 ._SN',
MapName = 'Zeiss',
Location = 'C3303.x',
MapIndex = 1,
MapDefaultBGM = "ed60032",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'ED6_DT21/C3303_1 ._SN',
'',
'',
'',
'',
'ED6_DT21/SUB000 ._SN',
''
],
)
BuildStringList(
'@FileName', # 8
'吉米', # 9
'妖化企鹅', # 10
'企鹅', # 11
'企鹅', # 12
'小企鹅', # 13
'企鹅', # 14
'企鹅', # 15
'小企鹅', # 16
'回音', # 17
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT07/CH01040 ._CH', # 00
'ED6_DT29/CH12930 ._CH', # 01
'ED6_DT09/CH10630 ._CH', # 02
'ED6_DT09/CH10640 ._CH', # 03
'ED6_DT09/CH10650 ._CH', # 04
'ED6_DT09/CH10660 ._CH', # 05
'ED6_DT09/CH10670 ._CH', # 06
'ED6_DT09/CH10690 ._CH', # 07
'ED6_DT27/CH04000 ._CH', # 08
'ED6_DT27/CH04001 ._CH', # 09
'ED6_DT07/CH00120 ._CH', # 0A
'ED6_DT07/CH00121 ._CH', # 0B
'ED6_DT06/CH20137 ._CH', # 0C
'ED6_DT07/CH00151 ._CH', # 0D
'ED6_DT07/CH00130 ._CH', # 0E
'ED6_DT07/CH00131 ._CH', # 0F
'ED6_DT07/CH00140 ._CH', # 10
'ED6_DT07/CH00141 ._CH', # 11
'ED6_DT07/CH00160 ._CH', # 12
'ED6_DT07/CH00161 ._CH', # 13
'ED6_DT07/CH00170 ._CH', # 14
'ED6_DT07/CH00171 ._CH', # 15
'ED6_DT27/CH03005 ._CH', # 16
'ED6_DT26/CH20311 ._CH', # 17
'ED6_DT29/CH12932 ._CH', # 18
)
AddCharChipPat(
'ED6_DT07/CH01040P._CP', # 00
'ED6_DT29/CH12930P._CP', # 01
'ED6_DT09/CH10630P._CP', # 02
'ED6_DT09/CH10640P._CP', # 03
'ED6_DT09/CH10650P._CP', # 04
'ED6_DT09/CH10660P._CP', # 05
'ED6_DT09/CH10670P._CP', # 06
'ED6_DT09/CH10690P._CP', # 07
'ED6_DT27/CH04000P._CP', # 08
'ED6_DT27/CH04001P._CP', # 09
'ED6_DT07/CH00120P._CP', # 0A
'ED6_DT07/CH00121P._CP', # 0B
'ED6_DT06/CH20137P._CP', # 0C
'ED6_DT07/CH00151P._CP', # 0D
'ED6_DT07/CH00130P._CP', # 0E
'ED6_DT07/CH00131P._CP', # 0F
'ED6_DT07/CH00140P._CP', # 10
'ED6_DT07/CH00141P._CP', # 11
'ED6_DT07/CH00160P._CP', # 12
'ED6_DT07/CH00161P._CP', # 13
'ED6_DT07/CH00170P._CP', # 14
'ED6_DT07/CH00171P._CP', # 15
'ED6_DT27/CH03005P._CP', # 16
'ED6_DT26/CH20311P._CP', # 17
'ED6_DT29/CH12932P._CP', # 18
)
DeclNpc(
X = 9460,
Z = 40,
Y = 112430,
Direction = 90,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 8000,
Z = -3500,
Y = 119800,
Direction = 0,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x1,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 5,
ChipIndex = 0x5,
NpcIndex = 0x1C1,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 6,
ChipIndex = 0x6,
NpcIndex = 0x1C1,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 7,
ChipIndex = 0x7,
NpcIndex = 0x1C1,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 5,
ChipIndex = 0x5,
NpcIndex = 0x1C1,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 6,
ChipIndex = 0x6,
NpcIndex = 0x1C1,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 7,
ChipIndex = 0x7,
NpcIndex = 0x1C1,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x1C1,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclEvent(
X = 12660,
Y = -2000,
Z = 95880,
Range = 3560,
Unknown_10 = 0x7D0,
Unknown_14 = 0x1848E,
Unknown_18 = 0x10000,
Unknown_1C = 0,
)
DeclActor(
TriggerX = 5350,
TriggerZ = 50,
TriggerY = 109980,
TriggerRange = 1000,
ActorX = 1170,
ActorZ = -2060,
ActorY = 108860,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 2,
Unknown_22 = 0,
)
ScpFunction(
"Function_0_2D6", # 00, 0
"Function_1_2D7", # 01, 1
"Function_2_2E2", # 02, 2
)
def Function_0_2D6(): pass
label("Function_0_2D6")
Return()
# Function_0_2D6 end
def Function_1_2D7(): pass
label("Function_1_2D7")
OP_71(0x0, 0x4)
OP_22(0x1CD, 0x1, 0x64)
Return()
# Function_1_2D7 end
def Function_2_2E2(): pass
label("Function_2_2E2")
EventBegin(0x1)
ChrTalk( #0
0x101,
"#1001F这里好像可以钓上什么来。\x02",
)
CloseMessageWindow()
def lambda_30E():
OP_6D(3000, 20, 108970, 1500)
ExitThread()
QueueWorkItem(0x0, 1, lambda_30E)
def lambda_326():
OP_67(0, 8000, -10000, 1500)
ExitThread()
QueueWorkItem(0x0, 2, lambda_326)
def lambda_33E():
OP_6B(3200, 1500)
ExitThread()
QueueWorkItem(0x1, 1, lambda_33E)
def lambda_34E():
OP_6C(45000, 1500)
ExitThread()
QueueWorkItem(0x1, 2, lambda_34E)
Sleep(1000)
SetChrName("")
AnonymousTalk( #1
"\x07\x05钓鱼吗?\x07\x00\x02",
)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0x18), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Menu(
0,
10,
32,
1,
(
"钓鱼\x01", # 0
"离开\x01", # 1
)
)
MenuEnd(0x0)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0xFFFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_5F(0x0)
OP_56(0x0)
WaitChrThread(0x0, 0x1)
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_3D5")
OP_C0(0xE, 0x20, 0x14E6, 0x32, 0x1AD9C, 0x10E, 0x78A, 0xFFFFFC18, 0x1A928)
OP_0D()
EventEnd(0x1)
Jump("loc_3E4")
label("loc_3D5")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_3E4")
EventEnd(0x1)
label("loc_3E4")
Return()
# Function_2_2E2 end
SaveToFile()
Try(main)
| [
"[email protected]"
]
| |
71ba1fab1dc153bbad617aeb8002714868377f16 | 9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612 | /19100101/qiming09/d5_exercise_stats_text.py | 9123ccf331de4f4c95e8ba80583a8abffaa4e99a | []
| no_license | shen-huang/selfteaching-python-camp | e8410bfc06eca24ee2866c5d890fd063e9d4be89 | 459f90c9f09bd3a3df9e776fc64dfd64ac65f976 | refs/heads/master | 2022-05-02T05:39:08.932008 | 2022-03-17T07:56:30 | 2022-03-17T07:56:30 | 201,287,222 | 9 | 6 | null | 2019-08-08T15:34:26 | 2019-08-08T15:34:25 | null | UTF-8 | Python | false | false | 1,456 | py | # this is d5 excercise_2 for text
# date : 2019.3.22
# author by : qiming
# 原始文本
text = '''
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambxiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
'''
# 只统计英文单词,不包括非英文字符的其他任何符号,如连接符号、空白字符等等
list1 = text.split( )
i=0
for i in range(0,len(list1)):
list1[i]=list1[i].strip('*-,.!')
if list1[i]==' ':
list1[i].remove(' ')
else:
i=i+1
# 使用dict统计字符串样本中各个英文单词出现的次数
# 按照出现次数从大到小排列,示例 {'is': 10, ‘better’ : 9, …… }
import collections
print(collections.Counter(list1))
| [
"[email protected]"
]
| |
3ae0ae43b2a62b7f1488f67bc1f6544159d9e898 | ee6acbd5fcd0fcd16230e96a4a539de41a02c97e | /operators/crossplane/python/pulumi_pulumi_kubernetes_crds_operators_crossplane/workload/v1alpha1/_inputs.py | 826df05576fb8c04615d93c4721b9ec5c5646afd | [
"Apache-2.0"
]
| permissive | isabella232/pulumi-kubernetes-crds | 777e78137aaf6525a44b61a02dccf91bf0d87a14 | 372c4c0182f6b899af82d6edaad521aa14f22150 | refs/heads/master | 2023-03-15T04:29:16.039753 | 2020-12-30T19:35:54 | 2020-12-30T19:35:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58,238 | py | # coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'KubernetesApplicationResourceSpecArgs',
'KubernetesApplicationResourceSpecSecretsArgs',
'KubernetesApplicationResourceSpecTargetRefArgs',
'KubernetesApplicationResourceStatusArgs',
'KubernetesApplicationResourceStatusConditionedStatusArgs',
'KubernetesApplicationResourceStatusConditionedStatusConditionsArgs',
'KubernetesApplicationResourceStatusRemoteArgs',
'KubernetesApplicationSpecArgs',
'KubernetesApplicationSpecResourceSelectorArgs',
'KubernetesApplicationSpecResourceSelectorMatchExpressionsArgs',
'KubernetesApplicationSpecResourceTemplatesArgs',
'KubernetesApplicationSpecResourceTemplatesSpecArgs',
'KubernetesApplicationSpecResourceTemplatesSpecSecretsArgs',
'KubernetesApplicationSpecResourceTemplatesSpecTargetRefArgs',
'KubernetesApplicationSpecTargetRefArgs',
'KubernetesApplicationSpecTargetSelectorArgs',
'KubernetesApplicationSpecTargetSelectorMatchExpressionsArgs',
'KubernetesApplicationStatusArgs',
'KubernetesApplicationStatusConditionedStatusArgs',
'KubernetesApplicationStatusConditionedStatusConditionsArgs',
'KubernetesTargetSpecArgs',
'KubernetesTargetSpecClusterRefArgs',
'KubernetesTargetSpecConnectionSecretRefArgs',
'KubernetesTargetStatusArgs',
'KubernetesTargetStatusConditionsArgs',
]
@pulumi.input_type
class KubernetesApplicationResourceSpecArgs:
def __init__(__self__, *,
template: pulumi.Input[Mapping[str, Any]],
secrets: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationResourceSpecSecretsArgs']]]] = None,
target_ref: Optional[pulumi.Input['KubernetesApplicationResourceSpecTargetRefArgs']] = None):
"""
KubernetesApplicationResourceSpec specifies the desired state of a KubernetesApplicationResource.
:param pulumi.Input[Mapping[str, Any]] template: A Template for a Kubernetes resource to be submitted to the KubernetesCluster to which this application resource is scheduled. The resource must be understood by the KubernetesCluster. Crossplane requires only that the resource contains standard Kubernetes type and object metadata.
:param pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationResourceSpecSecretsArgs']]] secrets: Secrets upon which this application resource depends. These secrets will be propagated to the Kubernetes cluster to which this application is scheduled.
:param pulumi.Input['KubernetesApplicationResourceSpecTargetRefArgs'] target_ref: Target to which this application has been scheduled.
"""
pulumi.set(__self__, "template", template)
if secrets is not None:
pulumi.set(__self__, "secrets", secrets)
if target_ref is not None:
pulumi.set(__self__, "target_ref", target_ref)
@property
@pulumi.getter
def template(self) -> pulumi.Input[Mapping[str, Any]]:
"""
A Template for a Kubernetes resource to be submitted to the KubernetesCluster to which this application resource is scheduled. The resource must be understood by the KubernetesCluster. Crossplane requires only that the resource contains standard Kubernetes type and object metadata.
"""
return pulumi.get(self, "template")
@template.setter
def template(self, value: pulumi.Input[Mapping[str, Any]]):
pulumi.set(self, "template", value)
@property
@pulumi.getter
def secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationResourceSpecSecretsArgs']]]]:
"""
Secrets upon which this application resource depends. These secrets will be propagated to the Kubernetes cluster to which this application is scheduled.
"""
return pulumi.get(self, "secrets")
@secrets.setter
def secrets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationResourceSpecSecretsArgs']]]]):
pulumi.set(self, "secrets", value)
@property
@pulumi.getter(name="targetRef")
def target_ref(self) -> Optional[pulumi.Input['KubernetesApplicationResourceSpecTargetRefArgs']]:
"""
Target to which this application has been scheduled.
"""
return pulumi.get(self, "target_ref")
@target_ref.setter
def target_ref(self, value: Optional[pulumi.Input['KubernetesApplicationResourceSpecTargetRefArgs']]):
pulumi.set(self, "target_ref", value)
@pulumi.input_type
class KubernetesApplicationResourceSpecSecretsArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class KubernetesApplicationResourceSpecTargetRefArgs:
def __init__(__self__, *,
name: pulumi.Input[str]):
"""
Target to which this application has been scheduled.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class KubernetesApplicationResourceStatusArgs:
def __init__(__self__, *,
conditioned_status: Optional[pulumi.Input['KubernetesApplicationResourceStatusConditionedStatusArgs']] = None,
remote: Optional[pulumi.Input['KubernetesApplicationResourceStatusRemoteArgs']] = None,
state: Optional[pulumi.Input[str]] = None):
"""
KubernetesApplicationResourceStatus represents the observed state of a KubernetesApplicationResource.
:param pulumi.Input['KubernetesApplicationResourceStatusConditionedStatusArgs'] conditioned_status: A ConditionedStatus reflects the observed status of a resource. Only one condition of each type may exist.
:param pulumi.Input['KubernetesApplicationResourceStatusRemoteArgs'] remote: Remote status of the resource templated by this application resource.
:param pulumi.Input[str] state: State of the application.
"""
if conditioned_status is not None:
pulumi.set(__self__, "conditioned_status", conditioned_status)
if remote is not None:
pulumi.set(__self__, "remote", remote)
if state is not None:
pulumi.set(__self__, "state", state)
@property
@pulumi.getter(name="conditionedStatus")
def conditioned_status(self) -> Optional[pulumi.Input['KubernetesApplicationResourceStatusConditionedStatusArgs']]:
"""
A ConditionedStatus reflects the observed status of a resource. Only one condition of each type may exist.
"""
return pulumi.get(self, "conditioned_status")
@conditioned_status.setter
def conditioned_status(self, value: Optional[pulumi.Input['KubernetesApplicationResourceStatusConditionedStatusArgs']]):
pulumi.set(self, "conditioned_status", value)
@property
@pulumi.getter
def remote(self) -> Optional[pulumi.Input['KubernetesApplicationResourceStatusRemoteArgs']]:
"""
Remote status of the resource templated by this application resource.
"""
return pulumi.get(self, "remote")
@remote.setter
def remote(self, value: Optional[pulumi.Input['KubernetesApplicationResourceStatusRemoteArgs']]):
pulumi.set(self, "remote", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
State of the application.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@pulumi.input_type
class KubernetesApplicationResourceStatusConditionedStatusArgs:
def __init__(__self__, *,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationResourceStatusConditionedStatusConditionsArgs']]]] = None):
"""
A ConditionedStatus reflects the observed status of a resource. Only one condition of each type may exist.
:param pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationResourceStatusConditionedStatusConditionsArgs']]] conditions: Conditions of the resource.
"""
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationResourceStatusConditionedStatusConditionsArgs']]]]:
"""
Conditions of the resource.
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationResourceStatusConditionedStatusConditionsArgs']]]]):
pulumi.set(self, "conditions", value)
@pulumi.input_type
class KubernetesApplicationResourceStatusConditionedStatusConditionsArgs:
def __init__(__self__, *,
last_transition_time: pulumi.Input[str],
reason: pulumi.Input[str],
status: pulumi.Input[str],
type: pulumi.Input[str],
message: Optional[pulumi.Input[str]] = None):
"""
A Condition that may apply to a resource.
:param pulumi.Input[str] last_transition_time: LastTransitionTime is the last time this condition transitioned from one status to another.
:param pulumi.Input[str] reason: A Reason for this condition's last transition from one status to another.
:param pulumi.Input[str] status: Status of this condition; is it currently True, False, or Unknown?
:param pulumi.Input[str] type: Type of this condition. At most one of each condition type may apply to a resource at any point in time.
:param pulumi.Input[str] message: A Message containing details about this condition's last transition from one status to another, if any.
"""
pulumi.set(__self__, "last_transition_time", last_transition_time)
pulumi.set(__self__, "reason", reason)
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
if message is not None:
pulumi.set(__self__, "message", message)
@property
@pulumi.getter(name="lastTransitionTime")
def last_transition_time(self) -> pulumi.Input[str]:
"""
LastTransitionTime is the last time this condition transitioned from one status to another.
"""
return pulumi.get(self, "last_transition_time")
@last_transition_time.setter
def last_transition_time(self, value: pulumi.Input[str]):
pulumi.set(self, "last_transition_time", value)
@property
@pulumi.getter
def reason(self) -> pulumi.Input[str]:
"""
A Reason for this condition's last transition from one status to another.
"""
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: pulumi.Input[str]):
pulumi.set(self, "reason", value)
@property
@pulumi.getter
def status(self) -> pulumi.Input[str]:
"""
Status of this condition; is it currently True, False, or Unknown?
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input[str]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of this condition. At most one of each condition type may apply to a resource at any point in time.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
"""
A Message containing details about this condition's last transition from one status to another, if any.
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@pulumi.input_type
class KubernetesApplicationResourceStatusRemoteArgs:
def __init__(__self__, *,
raw: Optional[pulumi.Input[str]] = None):
"""
Remote status of the resource templated by this application resource.
:param pulumi.Input[str] raw: Raw JSON representation of the remote status as a byte array.
"""
if raw is not None:
pulumi.set(__self__, "raw", raw)
@property
@pulumi.getter
def raw(self) -> Optional[pulumi.Input[str]]:
"""
Raw JSON representation of the remote status as a byte array.
"""
return pulumi.get(self, "raw")
@raw.setter
def raw(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "raw", value)
@pulumi.input_type
class KubernetesApplicationSpecArgs:
def __init__(__self__, *,
resource_selector: pulumi.Input['KubernetesApplicationSpecResourceSelectorArgs'],
resource_templates: pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationSpecResourceTemplatesArgs']]],
target_ref: Optional[pulumi.Input['KubernetesApplicationSpecTargetRefArgs']] = None,
target_selector: Optional[pulumi.Input['KubernetesApplicationSpecTargetSelectorArgs']] = None):
"""
A KubernetesApplicationSpec specifies the resources of a Kubernetes application.
:param pulumi.Input['KubernetesApplicationSpecResourceSelectorArgs'] resource_selector: ResourceSelector selects the KubernetesApplicationResources that are managed by this KubernetesApplication. Note that a KubernetesApplication will never adopt orphaned KubernetesApplicationResources, and thus this selector serves only to help match a KubernetesApplication to its KubernetesApplicationResources.
:param pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationSpecResourceTemplatesArgs']]] resource_templates: ResourceTemplates specifies a set of Kubernetes application resources managed by this application.
:param pulumi.Input['KubernetesApplicationSpecTargetRefArgs'] target_ref: Target to which this application has been scheduled.
:param pulumi.Input['KubernetesApplicationSpecTargetSelectorArgs'] target_selector: TargetSelector selects the targets to which this application may be scheduled. Leave both match labels and expressions empty to match any target.
"""
pulumi.set(__self__, "resource_selector", resource_selector)
pulumi.set(__self__, "resource_templates", resource_templates)
if target_ref is not None:
pulumi.set(__self__, "target_ref", target_ref)
if target_selector is not None:
pulumi.set(__self__, "target_selector", target_selector)
@property
@pulumi.getter(name="resourceSelector")
def resource_selector(self) -> pulumi.Input['KubernetesApplicationSpecResourceSelectorArgs']:
"""
ResourceSelector selects the KubernetesApplicationResources that are managed by this KubernetesApplication. Note that a KubernetesApplication will never adopt orphaned KubernetesApplicationResources, and thus this selector serves only to help match a KubernetesApplication to its KubernetesApplicationResources.
"""
return pulumi.get(self, "resource_selector")
@resource_selector.setter
def resource_selector(self, value: pulumi.Input['KubernetesApplicationSpecResourceSelectorArgs']):
pulumi.set(self, "resource_selector", value)
@property
@pulumi.getter(name="resourceTemplates")
def resource_templates(self) -> pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationSpecResourceTemplatesArgs']]]:
"""
ResourceTemplates specifies a set of Kubernetes application resources managed by this application.
"""
return pulumi.get(self, "resource_templates")
@resource_templates.setter
def resource_templates(self, value: pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationSpecResourceTemplatesArgs']]]):
pulumi.set(self, "resource_templates", value)
@property
@pulumi.getter(name="targetRef")
def target_ref(self) -> Optional[pulumi.Input['KubernetesApplicationSpecTargetRefArgs']]:
"""
Target to which this application has been scheduled.
"""
return pulumi.get(self, "target_ref")
@target_ref.setter
def target_ref(self, value: Optional[pulumi.Input['KubernetesApplicationSpecTargetRefArgs']]):
pulumi.set(self, "target_ref", value)
@property
@pulumi.getter(name="targetSelector")
def target_selector(self) -> Optional[pulumi.Input['KubernetesApplicationSpecTargetSelectorArgs']]:
"""
TargetSelector selects the targets to which this application may be scheduled. Leave both match labels and expressions empty to match any target.
"""
return pulumi.get(self, "target_selector")
@target_selector.setter
def target_selector(self, value: Optional[pulumi.Input['KubernetesApplicationSpecTargetSelectorArgs']]):
pulumi.set(self, "target_selector", value)
@pulumi.input_type
class KubernetesApplicationSpecResourceSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationSpecResourceSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
ResourceSelector selects the KubernetesApplicationResources that are managed by this KubernetesApplication. Note that a KubernetesApplication will never adopt orphaned KubernetesApplicationResources, and thus this selector serves only to help match a KubernetesApplication to its KubernetesApplicationResources.
:param pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationSpecResourceSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationSpecResourceSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationSpecResourceSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class KubernetesApplicationSpecResourceSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class KubernetesApplicationSpecResourceTemplatesArgs:
def __init__(__self__, *,
metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
spec: Optional[pulumi.Input['KubernetesApplicationSpecResourceTemplatesSpecArgs']] = None):
"""
A KubernetesApplicationResourceTemplate is used to instantiate new KubernetesApplicationResources.
:param pulumi.Input['KubernetesApplicationSpecResourceTemplatesSpecArgs'] spec: KubernetesApplicationResourceSpec specifies the desired state of a KubernetesApplicationResource.
"""
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if spec is not None:
pulumi.set(__self__, "spec", spec)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def spec(self) -> Optional[pulumi.Input['KubernetesApplicationSpecResourceTemplatesSpecArgs']]:
"""
KubernetesApplicationResourceSpec specifies the desired state of a KubernetesApplicationResource.
"""
return pulumi.get(self, "spec")
@spec.setter
def spec(self, value: Optional[pulumi.Input['KubernetesApplicationSpecResourceTemplatesSpecArgs']]):
pulumi.set(self, "spec", value)
@pulumi.input_type
class KubernetesApplicationSpecResourceTemplatesSpecArgs:
def __init__(__self__, *,
template: pulumi.Input[Mapping[str, Any]],
secrets: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationSpecResourceTemplatesSpecSecretsArgs']]]] = None,
target_ref: Optional[pulumi.Input['KubernetesApplicationSpecResourceTemplatesSpecTargetRefArgs']] = None):
"""
KubernetesApplicationResourceSpec specifies the desired state of a KubernetesApplicationResource.
:param pulumi.Input[Mapping[str, Any]] template: A Template for a Kubernetes resource to be submitted to the KubernetesCluster to which this application resource is scheduled. The resource must be understood by the KubernetesCluster. Crossplane requires only that the resource contains standard Kubernetes type and object metadata.
:param pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationSpecResourceTemplatesSpecSecretsArgs']]] secrets: Secrets upon which this application resource depends. These secrets will be propagated to the Kubernetes cluster to which this application is scheduled.
:param pulumi.Input['KubernetesApplicationSpecResourceTemplatesSpecTargetRefArgs'] target_ref: Target to which this application has been scheduled.
"""
pulumi.set(__self__, "template", template)
if secrets is not None:
pulumi.set(__self__, "secrets", secrets)
if target_ref is not None:
pulumi.set(__self__, "target_ref", target_ref)
@property
@pulumi.getter
def template(self) -> pulumi.Input[Mapping[str, Any]]:
"""
A Template for a Kubernetes resource to be submitted to the KubernetesCluster to which this application resource is scheduled. The resource must be understood by the KubernetesCluster. Crossplane requires only that the resource contains standard Kubernetes type and object metadata.
"""
return pulumi.get(self, "template")
@template.setter
def template(self, value: pulumi.Input[Mapping[str, Any]]):
pulumi.set(self, "template", value)
@property
@pulumi.getter
def secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationSpecResourceTemplatesSpecSecretsArgs']]]]:
"""
Secrets upon which this application resource depends. These secrets will be propagated to the Kubernetes cluster to which this application is scheduled.
"""
return pulumi.get(self, "secrets")
@secrets.setter
def secrets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationSpecResourceTemplatesSpecSecretsArgs']]]]):
pulumi.set(self, "secrets", value)
@property
@pulumi.getter(name="targetRef")
def target_ref(self) -> Optional[pulumi.Input['KubernetesApplicationSpecResourceTemplatesSpecTargetRefArgs']]:
"""
Target to which this application has been scheduled.
"""
return pulumi.get(self, "target_ref")
@target_ref.setter
def target_ref(self, value: Optional[pulumi.Input['KubernetesApplicationSpecResourceTemplatesSpecTargetRefArgs']]):
pulumi.set(self, "target_ref", value)
@pulumi.input_type
class KubernetesApplicationSpecResourceTemplatesSpecSecretsArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class KubernetesApplicationSpecResourceTemplatesSpecTargetRefArgs:
def __init__(__self__, *,
name: pulumi.Input[str]):
"""
Target to which this application has been scheduled.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class KubernetesApplicationSpecTargetRefArgs:
def __init__(__self__, *,
name: pulumi.Input[str]):
"""
Target to which this application has been scheduled.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class KubernetesApplicationSpecTargetSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationSpecTargetSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
TargetSelector selects the targets to which this application may be scheduled. Leave both match labels and expressions empty to match any target.
:param pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationSpecTargetSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationSpecTargetSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationSpecTargetSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class KubernetesApplicationSpecTargetSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class KubernetesApplicationStatusArgs:
def __init__(__self__, *,
conditioned_status: Optional[pulumi.Input['KubernetesApplicationStatusConditionedStatusArgs']] = None,
desired_resources: Optional[pulumi.Input[int]] = None,
state: Optional[pulumi.Input[str]] = None,
submitted_resources: Optional[pulumi.Input[int]] = None):
"""
KubernetesApplicationStatus represents the observed state of a KubernetesApplication.
:param pulumi.Input['KubernetesApplicationStatusConditionedStatusArgs'] conditioned_status: A ConditionedStatus reflects the observed status of a resource. Only one condition of each type may exist.
:param pulumi.Input[int] desired_resources: Desired resources of this application, i.e. the number of resources that match this application's resource selector.
:param pulumi.Input[str] state: State of the application.
:param pulumi.Input[int] submitted_resources: Submitted resources of this workload, i.e. the subset of desired resources that have been successfully submitted to their scheduled Kubernetes cluster.
"""
if conditioned_status is not None:
pulumi.set(__self__, "conditioned_status", conditioned_status)
if desired_resources is not None:
pulumi.set(__self__, "desired_resources", desired_resources)
if state is not None:
pulumi.set(__self__, "state", state)
if submitted_resources is not None:
pulumi.set(__self__, "submitted_resources", submitted_resources)
@property
@pulumi.getter(name="conditionedStatus")
def conditioned_status(self) -> Optional[pulumi.Input['KubernetesApplicationStatusConditionedStatusArgs']]:
"""
A ConditionedStatus reflects the observed status of a resource. Only one condition of each type may exist.
"""
return pulumi.get(self, "conditioned_status")
@conditioned_status.setter
def conditioned_status(self, value: Optional[pulumi.Input['KubernetesApplicationStatusConditionedStatusArgs']]):
pulumi.set(self, "conditioned_status", value)
@property
@pulumi.getter(name="desiredResources")
def desired_resources(self) -> Optional[pulumi.Input[int]]:
"""
Desired resources of this application, i.e. the number of resources that match this application's resource selector.
"""
return pulumi.get(self, "desired_resources")
@desired_resources.setter
def desired_resources(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "desired_resources", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
State of the application.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="submittedResources")
def submitted_resources(self) -> Optional[pulumi.Input[int]]:
"""
Submitted resources of this workload, i.e. the subset of desired resources that have been successfully submitted to their scheduled Kubernetes cluster.
"""
return pulumi.get(self, "submitted_resources")
@submitted_resources.setter
def submitted_resources(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "submitted_resources", value)
@pulumi.input_type
class KubernetesApplicationStatusConditionedStatusArgs:
def __init__(__self__, *,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationStatusConditionedStatusConditionsArgs']]]] = None):
"""
A ConditionedStatus reflects the observed status of a resource. Only one condition of each type may exist.
:param pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationStatusConditionedStatusConditionsArgs']]] conditions: Conditions of the resource.
"""
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationStatusConditionedStatusConditionsArgs']]]]:
"""
Conditions of the resource.
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesApplicationStatusConditionedStatusConditionsArgs']]]]):
pulumi.set(self, "conditions", value)
@pulumi.input_type
class KubernetesApplicationStatusConditionedStatusConditionsArgs:
def __init__(__self__, *,
last_transition_time: pulumi.Input[str],
reason: pulumi.Input[str],
status: pulumi.Input[str],
type: pulumi.Input[str],
message: Optional[pulumi.Input[str]] = None):
"""
A Condition that may apply to a resource.
:param pulumi.Input[str] last_transition_time: LastTransitionTime is the last time this condition transitioned from one status to another.
:param pulumi.Input[str] reason: A Reason for this condition's last transition from one status to another.
:param pulumi.Input[str] status: Status of this condition; is it currently True, False, or Unknown?
:param pulumi.Input[str] type: Type of this condition. At most one of each condition type may apply to a resource at any point in time.
:param pulumi.Input[str] message: A Message containing details about this condition's last transition from one status to another, if any.
"""
pulumi.set(__self__, "last_transition_time", last_transition_time)
pulumi.set(__self__, "reason", reason)
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
if message is not None:
pulumi.set(__self__, "message", message)
@property
@pulumi.getter(name="lastTransitionTime")
def last_transition_time(self) -> pulumi.Input[str]:
"""
LastTransitionTime is the last time this condition transitioned from one status to another.
"""
return pulumi.get(self, "last_transition_time")
@last_transition_time.setter
def last_transition_time(self, value: pulumi.Input[str]):
pulumi.set(self, "last_transition_time", value)
@property
@pulumi.getter
def reason(self) -> pulumi.Input[str]:
"""
A Reason for this condition's last transition from one status to another.
"""
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: pulumi.Input[str]):
pulumi.set(self, "reason", value)
@property
@pulumi.getter
def status(self) -> pulumi.Input[str]:
"""
Status of this condition; is it currently True, False, or Unknown?
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input[str]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of this condition. At most one of each condition type may apply to a resource at any point in time.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
"""
A Message containing details about this condition's last transition from one status to another, if any.
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@pulumi.input_type
class KubernetesTargetSpecArgs:
def __init__(__self__, *,
cluster_ref: Optional[pulumi.Input['KubernetesTargetSpecClusterRefArgs']] = None,
connection_secret_ref: Optional[pulumi.Input['KubernetesTargetSpecConnectionSecretRefArgs']] = None):
"""
A TargetSpec defines the common fields of objects used for exposing infrastructure to workloads that can be scheduled to.
:param pulumi.Input['KubernetesTargetSpecClusterRefArgs'] cluster_ref: A ResourceReference specifies an existing managed resource, in any namespace, which this target should attempt to propagate a connection secret from.
:param pulumi.Input['KubernetesTargetSpecConnectionSecretRefArgs'] connection_secret_ref: WriteConnectionSecretToReference specifies the name of a Secret, in the same namespace as this target, to which any connection details for this target should be written or already exist. Connection secrets referenced by a target should contain information for connecting to a resource that allows for scheduling of workloads.
"""
if cluster_ref is not None:
pulumi.set(__self__, "cluster_ref", cluster_ref)
if connection_secret_ref is not None:
pulumi.set(__self__, "connection_secret_ref", connection_secret_ref)
@property
@pulumi.getter(name="clusterRef")
def cluster_ref(self) -> Optional[pulumi.Input['KubernetesTargetSpecClusterRefArgs']]:
"""
A ResourceReference specifies an existing managed resource, in any namespace, which this target should attempt to propagate a connection secret from.
"""
return pulumi.get(self, "cluster_ref")
@cluster_ref.setter
def cluster_ref(self, value: Optional[pulumi.Input['KubernetesTargetSpecClusterRefArgs']]):
pulumi.set(self, "cluster_ref", value)
@property
@pulumi.getter(name="connectionSecretRef")
def connection_secret_ref(self) -> Optional[pulumi.Input['KubernetesTargetSpecConnectionSecretRefArgs']]:
"""
WriteConnectionSecretToReference specifies the name of a Secret, in the same namespace as this target, to which any connection details for this target should be written or already exist. Connection secrets referenced by a target should contain information for connecting to a resource that allows for scheduling of workloads.
"""
return pulumi.get(self, "connection_secret_ref")
@connection_secret_ref.setter
def connection_secret_ref(self, value: Optional[pulumi.Input['KubernetesTargetSpecConnectionSecretRefArgs']]):
pulumi.set(self, "connection_secret_ref", value)
@pulumi.input_type
class KubernetesTargetSpecClusterRefArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
field_path: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
resource_version: Optional[pulumi.Input[str]] = None,
uid: Optional[pulumi.Input[str]] = None):
"""
A ResourceReference specifies an existing managed resource, in any namespace, which this target should attempt to propagate a connection secret from.
:param pulumi.Input[str] api_version: API version of the referent.
:param pulumi.Input[str] field_path: If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.
:param pulumi.Input[str] kind: Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
:param pulumi.Input[str] namespace: Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
:param pulumi.Input[str] resource_version: Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
:param pulumi.Input[str] uid: UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
"""
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if field_path is not None:
pulumi.set(__self__, "field_path", field_path)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if resource_version is not None:
pulumi.set(__self__, "resource_version", resource_version)
if uid is not None:
pulumi.set(__self__, "uid", uid)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
API version of the referent.
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> Optional[pulumi.Input[str]]:
"""
If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.
"""
return pulumi.get(self, "field_path")
@field_path.setter
def field_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "field_path", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="resourceVersion")
def resource_version(self) -> Optional[pulumi.Input[str]]:
"""
Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
"""
return pulumi.get(self, "resource_version")
@resource_version.setter
def resource_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_version", value)
@property
@pulumi.getter
def uid(self) -> Optional[pulumi.Input[str]]:
"""
UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
"""
return pulumi.get(self, "uid")
@uid.setter
def uid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uid", value)
@pulumi.input_type
class KubernetesTargetSpecConnectionSecretRefArgs:
def __init__(__self__, *,
name: pulumi.Input[str]):
"""
WriteConnectionSecretToReference specifies the name of a Secret, in the same namespace as this target, to which any connection details for this target should be written or already exist. Connection secrets referenced by a target should contain information for connecting to a resource that allows for scheduling of workloads.
:param pulumi.Input[str] name: Name of the secret.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the secret.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class KubernetesTargetStatusArgs:
def __init__(__self__, *,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesTargetStatusConditionsArgs']]]] = None):
"""
A TargetStatus defines the observed status a target.
:param pulumi.Input[Sequence[pulumi.Input['KubernetesTargetStatusConditionsArgs']]] conditions: Conditions of the resource.
"""
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesTargetStatusConditionsArgs']]]]:
"""
Conditions of the resource.
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['KubernetesTargetStatusConditionsArgs']]]]):
pulumi.set(self, "conditions", value)
@pulumi.input_type
class KubernetesTargetStatusConditionsArgs:
def __init__(__self__, *,
last_transition_time: pulumi.Input[str],
reason: pulumi.Input[str],
status: pulumi.Input[str],
type: pulumi.Input[str],
message: Optional[pulumi.Input[str]] = None):
"""
A Condition that may apply to a resource.
:param pulumi.Input[str] last_transition_time: LastTransitionTime is the last time this condition transitioned from one status to another.
:param pulumi.Input[str] reason: A Reason for this condition's last transition from one status to another.
:param pulumi.Input[str] status: Status of this condition; is it currently True, False, or Unknown?
:param pulumi.Input[str] type: Type of this condition. At most one of each condition type may apply to a resource at any point in time.
:param pulumi.Input[str] message: A Message containing details about this condition's last transition from one status to another, if any.
"""
pulumi.set(__self__, "last_transition_time", last_transition_time)
pulumi.set(__self__, "reason", reason)
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
if message is not None:
pulumi.set(__self__, "message", message)
@property
@pulumi.getter(name="lastTransitionTime")
def last_transition_time(self) -> pulumi.Input[str]:
"""
LastTransitionTime is the last time this condition transitioned from one status to another.
"""
return pulumi.get(self, "last_transition_time")
@last_transition_time.setter
def last_transition_time(self, value: pulumi.Input[str]):
pulumi.set(self, "last_transition_time", value)
@property
@pulumi.getter
def reason(self) -> pulumi.Input[str]:
"""
A Reason for this condition's last transition from one status to another.
"""
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: pulumi.Input[str]):
pulumi.set(self, "reason", value)
@property
@pulumi.getter
def status(self) -> pulumi.Input[str]:
"""
Status of this condition; is it currently True, False, or Unknown?
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input[str]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of this condition. At most one of each condition type may apply to a resource at any point in time.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
"""
A Message containing details about this condition's last transition from one status to another, if any.
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
| [
"[email protected]"
]
| |
ebccb5939007fff56633866cd7d1b87aa282bfec | f2411753c4eb2dd04ee9136c594784c073d1de02 | /graphene/contrib/django/debug/sql/tracking.py | 47f7a30c5d9283dbb046f20617a21cb2349d971f | [
"MIT"
]
| permissive | AdrielVelazquez/graphene | 9a5dbcfa02102cbf4c7463476fd1c51dcdefb107 | 4d15bc4f796db403e1ed4877665b80422b516eca | refs/heads/master | 2020-12-28T23:50:29.035822 | 2016-05-20T19:20:10 | 2016-05-20T19:20:10 | 59,317,384 | 0 | 0 | null | 2016-05-20T18:29:37 | 2016-05-20T18:29:37 | null | UTF-8 | Python | false | false | 4,857 | py | # Code obtained from django-debug-toolbar sql panel tracking
from __future__ import absolute_import, unicode_literals
import json
from threading import local
from time import time
from django.utils import six
from django.utils.encoding import force_text
class SQLQueryTriggered(Exception):
"""Thrown when template panel triggers a query"""
class ThreadLocalState(local):
def __init__(self):
self.enabled = True
@property
def Wrapper(self):
if self.enabled:
return NormalCursorWrapper
return ExceptionCursorWrapper
def recording(self, v):
self.enabled = v
state = ThreadLocalState()
recording = state.recording # export function
def wrap_cursor(connection, panel):
if not hasattr(connection, '_graphene_cursor'):
connection._graphene_cursor = connection.cursor
def cursor():
return state.Wrapper(connection._graphene_cursor(), connection, panel)
connection.cursor = cursor
return cursor
def unwrap_cursor(connection):
if hasattr(connection, '_graphene_cursor'):
del connection._graphene_cursor
del connection.cursor
class ExceptionCursorWrapper(object):
"""
Wraps a cursor and raises an exception on any operation.
Used in Templates panel.
"""
def __init__(self, cursor, db, logger):
pass
def __getattr__(self, attr):
raise SQLQueryTriggered()
class NormalCursorWrapper(object):
"""
Wraps a cursor and logs queries.
"""
def __init__(self, cursor, db, logger):
self.cursor = cursor
# Instance of a BaseDatabaseWrapper subclass
self.db = db
# logger must implement a ``record`` method
self.logger = logger
def _quote_expr(self, element):
if isinstance(element, six.string_types):
return "'%s'" % force_text(element).replace("'", "''")
else:
return repr(element)
def _quote_params(self, params):
if not params:
return params
if isinstance(params, dict):
return dict((key, self._quote_expr(value))
for key, value in params.items())
return list(map(self._quote_expr, params))
def _decode(self, param):
try:
return force_text(param, strings_only=True)
except UnicodeDecodeError:
return '(encoded string)'
def _record(self, method, sql, params):
start_time = time()
try:
return method(sql, params)
finally:
stop_time = time()
duration = (stop_time - start_time)
_params = ''
try:
_params = json.dumps(list(map(self._decode, params)))
except Exception:
pass # object not JSON serializable
alias = getattr(self.db, 'alias', 'default')
conn = self.db.connection
vendor = getattr(conn, 'vendor', 'unknown')
params = {
'vendor': vendor,
'alias': alias,
'sql': self.db.ops.last_executed_query(
self.cursor, sql, self._quote_params(params)),
'duration': duration,
'raw_sql': sql,
'params': _params,
'start_time': start_time,
'stop_time': stop_time,
'is_slow': duration > 10,
'is_select': sql.lower().strip().startswith('select'),
}
if vendor == 'postgresql':
# If an erroneous query was ran on the connection, it might
# be in a state where checking isolation_level raises an
# exception.
try:
iso_level = conn.isolation_level
except conn.InternalError:
iso_level = 'unknown'
params.update({
'trans_id': self.logger.get_transaction_id(alias),
'trans_status': conn.get_transaction_status(),
'iso_level': iso_level,
'encoding': conn.encoding,
})
# We keep `sql` to maintain backwards compatibility
self.logger.record(**params)
def callproc(self, procname, params=()):
return self._record(self.cursor.callproc, procname, params)
def execute(self, sql, params=()):
return self._record(self.cursor.execute, sql, params)
def executemany(self, sql, param_list):
return self._record(self.cursor.executemany, sql, param_list)
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
| [
"[email protected]"
]
| |
f73a5a5a3e638c2d9e725a8cc34c709f5d3794e8 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2327/60621/239684.py | 40d8b72fef8bfd3b2b5a04ac9e8fd343edc92093 | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | a=input()
I=a.count("I")
D=a.count("D")
b=[0]
for i in range(len(a)):
b.append(i+1)
b1=[i for i in b[0:I]]
b2=[j for j in b[I:]]
b2.sort(reverse=True)
c=[];cu1,cu2=0,0
for i in a:
if i=="I":
c.append(b1[cu1])
cu1+=1
else:
c.append(b2[cu2])
cu2+=1
if(cu1!=len(b1)):
c.append(b1[len(b1)-1])
else:
c.append(b2[len(b2)-1])
print(c) | [
"[email protected]"
]
| |
74c1927ddec50a5fd6abea8524720bce578f3c39 | da052c0bbf811dc4c29a83d1b1bffffd41becaab | /core/stock_count_link_inv_adjust/models/stock_count.py | 320955c28c49a62f0247469c7f96181c6750588d | []
| no_license | Muhammad-SF/Test | ef76a45ad28ac8054a4844f5b3826040a222fb6e | 46e15330b5d642053da61754247f3fbf9d02717e | refs/heads/main | 2023-03-13T10:03:50.146152 | 2021-03-07T20:28:36 | 2021-03-07T20:28:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | from odoo import fields, models, api
class StockCountInherit(models.Model):
_inherit = "stock.count"
def action_done(self):
self.action_inventory_adjustment()
self.inv_id.action_done()
print("inv id=====1111111=======", self.inv_id)
self.inv_id.write({'state': 'confirm'})
self.write({'state': 'close'})
| [
"[email protected]"
]
| |
45f034a56235052cb6039c6ce0f0280c9bfbc65c | 1533e34bec5adac39a27d42971c39f7cf610dc00 | /migrations/versions/77190eba98d9_vamo.py | 706a1baee8e798abbe085b43f9244e70f67af49d | []
| no_license | apocalipsys/florabot | 3d8c510d88ca20260471ae0916f406c8715beb4a | ff181430e20cd5739b1f3e6f872d4506002f9a7f | refs/heads/rama-desert | 2022-12-10T04:07:17.181809 | 2020-02-25T06:04:12 | 2020-02-25T06:04:12 | 241,262,587 | 2 | 0 | null | 2022-12-08T03:39:03 | 2020-02-18T03:22:14 | Python | UTF-8 | Python | false | false | 842 | py | """vamo
Revision ID: 77190eba98d9
Revises:
Create Date: 2019-08-11 03:10:10.235839
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '77190eba98d9'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('username', sa.String(length=20), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('users')
# ### end Alembic commands ###
| [
"[email protected]"
]
| |
11266b5585922b736d205d227e1d6801bc5a9630 | cfd2df2e798d85805970ab9f355ee33ff20f0d6e | /array_find_repeat.py | f19559e5d4e105dc9c934943b234dc804e5dd3aa | []
| no_license | chaichai1997/python-algorithm | 483691ec4e3f386acac26412edaaae328cce3fae | 3b4815f3cc27eaceb9ad0b15b5e45077a9cd62a5 | refs/heads/master | 2022-11-15T22:37:28.035141 | 2020-07-13T13:01:35 | 2020-07-13T13:01:35 | 261,659,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py | # -*- coding: utf-8 -*-
# author = "chaichai"
"""
找到数组中的唯一重复元素
"""
"""
借助字典实现,以空间换时间
"""
def find(array):
if array is None:
return -1
lens = len(array)
hash_table = dict()
for i in range(lens):
hash_table[array[i]] = 0
j = 0
for j in range(lens):
if hash_table[array[j]] == 0:
hash_table[array[j]] = 1
else:
return array[j]
return -1
"""
异或法
"""
def find_xor(array):
if array is None:
return -1
lens = len(array)
result = 0
for i in range(lens):
result ^= array[i]
for j in range(lens):
result ^= j
return result
"""
数据映射
"""
def find_map(array):
if array is None:
return -1
lens = len(array)
index = 0
i = 0
while True:
if array[i] >= lens:
return -1
if array[index] < 0:
break
array[index] *= -1
index = -1 * array[index]
if index >= lens:
print("非法")
return -1
return index
if __name__ == '__main__':
array = [1, 3, 4, 2, 5, 3]
print(find(array))
print(find_xor(array))
| [
"[email protected]"
]
| |
43add3d270a3864bc374264f663d6631bb82c1bc | f60b964dc39ba54bb84f1c4949be3b91a92b8346 | /track_order/forms.py | 07a601aa7dea5764b5b5fcc4168a15febb3a3310 | [
"Apache-2.0"
]
| permissive | jiejiang/courier | 4b0b4fc56c5510228ffcc4de51b074c7aff9502f | 6fdeaf041c77dba0f97e206adb7b0cded9674d3d | refs/heads/master | 2022-11-30T14:24:53.950502 | 2019-12-06T16:42:00 | 2019-12-06T16:42:00 | 195,387,643 | 0 | 0 | Apache-2.0 | 2022-11-22T01:22:33 | 2019-07-05T10:08:19 | Python | UTF-8 | Python | false | false | 5,108 | py | # *- coding: utf-8 -*
from django.core.validators import RegexValidator
import re
from django.shortcuts import get_object_or_404
from django import forms
from django.db.models.manager import Manager
from django.utils.translation import ugettext as _
from mezzanine.accounts.forms import LoginForm, PasswordResetForm, ProfileForm, get_profile_for_user
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Fieldset, ButtonHolder, Div, Field, HTML
from crispy_forms.bootstrap import PrependedText, InlineRadios, FieldWithButtons, StrictButton
from captcha.fields import CaptchaField
def format_parcel_force_import_job_create_form(form):
form.helper = FormHelper()
form.helper.form_class = 'form-horizontal'
form.helper.label_class = 'col-lg-5'
form.helper.field_class = 'col-lg-7'
form.fields['input_file'].label = _(u"选择PDF文件")
form.helper.layout = Layout(
'input_file',
ButtonHolder(
Submit('submit', _(u"上载"), css_class='btn-block btn-lg btn-success btn'),
)
)
return form
class QueryOrderForm(forms.Form):
ROUTE_CHOICES = (('order_system', _(u"包税线路")), ('parcel_force', _(u"Parcel Force")))
DAYS_CHOICES = (('7', _(u"一周")), ('14', _(u"两周")), ('31', _(u"一个月")), ('62', _(u"两个月")),)
route = forms.ChoiceField(label=_(u"线路选择"), choices=ROUTE_CHOICES, required=True, initial='order_system')
name = forms.CharField(label=_(u"收件人姓名"), required=False)
mobile = forms.CharField(label=_(u"收件人手机号码(无区号)"), required=False)
id = forms.CharField(label=_(u"收件人身份证号码"), required=False)
days = forms.ChoiceField(label=_(u"下单时间范围"), choices=DAYS_CHOICES, required=True, initial=31)
captcha = CaptchaField(label=_(u"验证码"))
class Media:
js = ('js/route_choice.js',)
def __init__(self, *args, **kwargs):
super(QueryOrderForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'cols-sm-2'
self.helper.field_class = 'cols-sm-10'
self.helper.layout = Layout(
InlineRadios('route'),
Div(
HTML("<span>" + _(u"信息填写") + "</span>"), css_class="strike"
),
PrependedText('name', '<i class="fa-user fa" aria-hidden="true"></i>', placeholder=_(u"收件人姓名"),
wrapper_class='order_system'),
PrependedText('mobile', '<i class="fa-mobile fa" aria-hidden="true"></i>', placeholder=_(u"收件人手机号码")),
Div(
HTML("<span>" + _(u"或") + "</span>"), css_class="strike order_system"
),
PrependedText('id', '<i class="fa-id-card fa" aria-hidden="true"></i>', placeholder=_(u"收件人身份证号码"),
wrapper_class='order_system'),
InlineRadios('days'),
HTML("<hr/>"),
Field('captcha', placeholder=_(u"输入验证码")),
ButtonHolder(
Submit('submit', _(u"查询"), css_class='btn-block btn-lg login-button'),
css_class='form-group',
)
)
self.valid_days = set([x[0] for x in self.DAYS_CHOICES])
def clean(self):
error = {}
route = self.cleaned_data['route']
id = self.cleaned_data.get('id', None)
name = self.cleaned_data.get('name', None)
mobile = self.cleaned_data.get('mobile', None)
days = self.cleaned_data.get('days', None)
if days not in self.valid_days:
error['days'] = _(u"非法选项")
if (route == 'order_system' and (id or (name and mobile))) or (route == 'parcel_force' and mobile):
pass
else:
for field in ('id', 'name', 'mobile'):
if not self.cleaned_data.get(field, None):
error[field] = _(u"请填写此字段")
raise forms.ValidationError(error)
class TrackShippingForm(forms.Form):
order_number = forms.CharField(min_length=8, max_length=25,
validators=[
RegexValidator(
regex=r'^[a-zA-Z\d]+$',
message=_(u'订单号格式错误'),
),
]
)
def __init__(self, *args, **kwargs):
super(TrackShippingForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_show_labels = False
self.helper.form_id = "track-form"
self.helper.layout = Layout(
FieldWithButtons(Field('order_number', placeholder=_(u"输入订单号")),
StrictButton("<i class='fa fa-search'></i> " + _(u"立刻查询"), type="submit",
css_class="btn-primary", id="track-submit"))
)
| [
"[email protected]"
]
| |
f405684acdf758881166e153094be05f1bd7e74f | 0191140830e827ddfde9300d5cc5962018a7bac1 | /stats/repository/profile_repository.py | e5cb1f59a579dc58327fccc72662973f10bf9cc2 | []
| no_license | NicolleLouis/LouisNicolle | d816a60f30d92a9c2bc1b6ef6443c477505bf1bc | b99ae034d58afce5670d0b2fb0e5f3ce57bf1449 | refs/heads/master | 2023-08-17T20:37:29.024430 | 2021-09-13T14:26:02 | 2021-09-13T14:26:02 | 291,709,252 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | from stats.models.profile import Profile
class ProfileRepository:
@staticmethod
def get_queryset():
return Profile.objects.all()
@staticmethod
def get_by_id(profile_id):
return Profile.objects.get(user__id=profile_id)
| [
"[email protected]"
]
| |
ab1b81423c60f8d6124f9ae265efaabae2dc2218 | 06476bc4cb7fc3ce378beb357fac7d5aacb87b3b | /Prototype/env/lib/python3.8/site-packages/numba/targets/base.py | e44b60224e131b42f5f71dd9513eef2b77f2e2de | [
"MIT"
]
| permissive | marc-ortuno/VOPEC | 44d3a74d3e0686474dd57fcb21e845fd5fd48897 | e7ed1f13cc1868a824f4036dd08ec6bed4266c08 | refs/heads/main | 2023-06-12T19:15:18.060897 | 2021-07-01T17:15:03 | 2021-07-01T17:15:03 | 344,433,646 | 0 | 0 | MIT | 2021-06-14T19:15:47 | 2021-03-04T10:22:05 | Python | UTF-8 | Python | false | false | 43,040 | py | from __future__ import print_function
from collections import namedtuple, defaultdict
import copy
import os
import sys
from itertools import permutations, takewhile
from contextlib import contextmanager
import numpy as np
from llvmlite import ir as llvmir
import llvmlite.llvmpy.core as lc
from llvmlite.llvmpy.core import Type, Constant, LLVMException
import llvmlite.binding as ll
from numba import types, utils, cgutils, typing, funcdesc, debuginfo, config
from numba import _dynfunc, _helperlib
from numba.compiler_lock import global_compiler_lock
from numba.pythonapi import PythonAPI
from . import arrayobj, builtins, imputils
from .imputils import (user_function, user_generator,
builtin_registry, impl_ret_borrowed,
RegistryLoader)
from numba import datamodel
GENERIC_POINTER = Type.pointer(Type.int(8))
PYOBJECT = GENERIC_POINTER
void_ptr = GENERIC_POINTER
class OverloadSelector(object):
"""
An object matching an actual signature against a registry of formal
signatures and choosing the best candidate, if any.
In the current implementation:
- a "signature" is a tuple of type classes or type instances
- the "best candidate" is the most specific match
"""
def __init__(self):
# A list of (formal args tuple, value)
self.versions = []
self._cache = {}
def find(self, sig):
out = self._cache.get(sig)
if out is None:
out = self._find(sig)
self._cache[sig] = out
return out
def _find(self, sig):
candidates = self._select_compatible(sig)
if candidates:
return candidates[self._best_signature(candidates)]
else:
raise NotImplementedError(self, sig)
def _select_compatible(self, sig):
"""
Select all compatible signatures and their implementation.
"""
out = {}
for ver_sig, impl in self.versions:
if self._match_arglist(ver_sig, sig):
out[ver_sig] = impl
return out
def _best_signature(self, candidates):
"""
Returns the best signature out of the candidates
"""
ordered, genericity = self._sort_signatures(candidates)
# check for ambiguous signatures
if len(ordered) > 1:
firstscore = genericity[ordered[0]]
same = list(takewhile(lambda x: genericity[x] == firstscore,
ordered))
if len(same) > 1:
msg = ["{n} ambiguous signatures".format(n=len(same))]
for sig in same:
msg += ["{0} => {1}".format(sig, candidates[sig])]
raise TypeError('\n'.join(msg))
return ordered[0]
def _sort_signatures(self, candidates):
"""
Sort signatures in ascending level of genericity.
Returns a 2-tuple:
* ordered list of signatures
* dictionary containing genericity scores
"""
# score by genericity
genericity = defaultdict(int)
for this, other in permutations(candidates.keys(), r=2):
matched = self._match_arglist(formal_args=this, actual_args=other)
if matched:
# genericity score +1 for every another compatible signature
genericity[this] += 1
# order candidates in ascending level of genericity
ordered = sorted(candidates.keys(), key=lambda x: genericity[x])
return ordered, genericity
def _match_arglist(self, formal_args, actual_args):
"""
Returns True if the signature is "matching".
A formal signature is "matching" if the actual signature matches exactly
or if the formal signature is a compatible generic signature.
"""
# normalize VarArg
if formal_args and isinstance(formal_args[-1], types.VarArg):
ndiff = len(actual_args) - len(formal_args) + 1
formal_args = formal_args[:-1] + (formal_args[-1].dtype,) * ndiff
if len(formal_args) != len(actual_args):
return False
for formal, actual in zip(formal_args, actual_args):
if not self._match(formal, actual):
return False
return True
def _match(self, formal, actual):
if formal == actual:
# formal argument matches actual arguments
return True
elif types.Any == formal:
# formal argument is any
return True
elif isinstance(formal, type) and issubclass(formal, types.Type):
if isinstance(actual, type) and issubclass(actual, formal):
# formal arg is a type class and actual arg is a subclass
return True
elif isinstance(actual, formal):
# formal arg is a type class of which actual arg is an instance
return True
def append(self, value, sig):
"""
Add a formal signature and its associated value.
"""
assert isinstance(sig, tuple), (value, sig)
self.versions.append((sig, value))
self._cache.clear()
@utils.runonce
def _load_global_helpers():
"""
Execute once to install special symbols into the LLVM symbol table.
"""
# This is Py_None's real C name
ll.add_symbol("_Py_NoneStruct", id(None))
# Add Numba C helper functions
for c_helpers in (_helperlib.c_helpers, _dynfunc.c_helpers):
for py_name, c_address in c_helpers.items():
c_name = "numba_" + py_name
ll.add_symbol(c_name, c_address)
# Add Numpy C helpers (npy_XXX)
for c_name, c_address in _helperlib.npymath_exports.items():
ll.add_symbol(c_name, c_address)
# Add all built-in exception classes
for obj in utils.builtins.__dict__.values():
if isinstance(obj, type) and issubclass(obj, BaseException):
ll.add_symbol("PyExc_%s" % (obj.__name__), id(obj))
class BaseContext(object):
"""
Notes on Structure
------------------
Most objects are lowered as plain-old-data structure in the generated
llvm. They are passed around by reference (a pointer to the structure).
Only POD structure can live across function boundaries by copying the
data.
"""
# True if the target requires strict alignment
# Causes exception to be raised if the record members are not aligned.
strict_alignment = False
# Force powi implementation as math.pow call
implement_powi_as_math_call = False
implement_pow_as_math_call = False
# Emit Debug info
enable_debuginfo = False
DIBuilder = debuginfo.DIBuilder
# Bound checking
@property
def enable_boundscheck(self):
if config.BOUNDSCHECK is not None:
return config.BOUNDSCHECK
return self._boundscheck
@enable_boundscheck.setter
def enable_boundscheck(self, value):
self._boundscheck = value
# NRT
enable_nrt = False
# Auto parallelization
auto_parallel = False
# PYCC
aot_mode = False
# Error model for various operations (only FP exceptions currently)
error_model = None
# Whether dynamic globals (CPU runtime addresses) is allowed
allow_dynamic_globals = False
# Fast math flags
fastmath = False
# python execution environment
environment = None
# the function descriptor
fndesc = None
def __init__(self, typing_context):
_load_global_helpers()
self.address_size = utils.MACHINE_BITS
self.typing_context = typing_context
# A mapping of installed registries to their loaders
self._registries = {}
# Declarations loaded from registries and other sources
self._defns = defaultdict(OverloadSelector)
self._getattrs = defaultdict(OverloadSelector)
self._setattrs = defaultdict(OverloadSelector)
self._casts = OverloadSelector()
self._get_constants = OverloadSelector()
# Other declarations
self._generators = {}
self.special_ops = {}
self.cached_internal_func = {}
self._pid = None
self._codelib_stack = []
self._boundscheck = False
self.data_model_manager = datamodel.default_manager
# Initialize
self.init()
def init(self):
"""
For subclasses to add initializer
"""
def refresh(self):
"""
Refresh context with new declarations from known registries.
Useful for third-party extensions.
"""
# Populate built-in registry
from . import (arraymath, enumimpl, iterators, linalg, numbers,
optional, polynomial, rangeobj, slicing, tupleobj,
gdb_hook, hashing, heapq, literal)
try:
from . import npdatetime
except NotImplementedError:
pass
self.install_registry(builtin_registry)
self.load_additional_registries()
# Also refresh typing context, since @overload declarations can
# affect it.
self.typing_context.refresh()
def load_additional_registries(self):
"""
Load target-specific registries. Can be overridden by subclasses.
"""
def mangler(self, name, types):
"""
Perform name mangling.
"""
return funcdesc.default_mangler(name, types)
def get_env_name(self, fndesc):
"""Get the environment name given a FunctionDescriptor.
Use this instead of the ``fndesc.env_name`` so that the target-context
can provide necessary mangling of the symbol to meet ABI requirements.
"""
return fndesc.env_name
def declare_env_global(self, module, envname):
"""Declare the Environment pointer as a global of the module.
The pointer is initialized to NULL. It must be filled by the runtime
with the actual address of the Env before the associated function
can be executed.
Parameters
----------
module :
The LLVM Module
envname : str
The name of the global variable.
"""
if envname not in module.globals:
gv = llvmir.GlobalVariable(module, cgutils.voidptr_t, name=envname)
gv.linkage = 'common'
gv.initializer = cgutils.get_null_value(gv.type.pointee)
return module.globals[envname]
def get_arg_packer(self, fe_args):
return datamodel.ArgPacker(self.data_model_manager, fe_args)
def get_data_packer(self, fe_types):
return datamodel.DataPacker(self.data_model_manager, fe_types)
@property
def target_data(self):
raise NotImplementedError
@utils.cached_property
def nrt(self):
from numba.runtime.context import NRTContext
return NRTContext(self, self.enable_nrt)
def subtarget(self, **kws):
obj = copy.copy(self) # shallow copy
for k, v in kws.items():
if not hasattr(obj, k):
raise NameError("unknown option {0!r}".format(k))
setattr(obj, k, v)
if obj.codegen() is not self.codegen():
# We can't share functions across different codegens
obj.cached_internal_func = {}
return obj
def install_registry(self, registry):
"""
Install a *registry* (a imputils.Registry instance) of function
and attribute implementations.
"""
try:
loader = self._registries[registry]
except KeyError:
loader = RegistryLoader(registry)
self._registries[registry] = loader
self.insert_func_defn(loader.new_registrations('functions'))
self._insert_getattr_defn(loader.new_registrations('getattrs'))
self._insert_setattr_defn(loader.new_registrations('setattrs'))
self._insert_cast_defn(loader.new_registrations('casts'))
self._insert_get_constant_defn(loader.new_registrations('constants'))
def insert_func_defn(self, defns):
for impl, func, sig in defns:
self._defns[func].append(impl, sig)
def _insert_getattr_defn(self, defns):
for impl, attr, sig in defns:
self._getattrs[attr].append(impl, sig)
def _insert_setattr_defn(self, defns):
for impl, attr, sig in defns:
self._setattrs[attr].append(impl, sig)
def _insert_cast_defn(self, defns):
for impl, sig in defns:
self._casts.append(impl, sig)
def _insert_get_constant_defn(self, defns):
for impl, sig in defns:
self._get_constants.append(impl, sig)
def insert_user_function(self, func, fndesc, libs=()):
impl = user_function(fndesc, libs)
self._defns[func].append(impl, impl.signature)
def add_user_function(self, func, fndesc, libs=()):
if func not in self._defns:
msg = "{func} is not a registered user function"
raise KeyError(msg.format(func=func))
impl = user_function(fndesc, libs)
self._defns[func].append(impl, impl.signature)
def insert_generator(self, genty, gendesc, libs=()):
assert isinstance(genty, types.Generator)
impl = user_generator(gendesc, libs)
self._generators[genty] = gendesc, impl
def remove_user_function(self, func):
"""
Remove user function *func*.
KeyError is raised if the function isn't known to us.
"""
del self._defns[func]
def get_external_function_type(self, fndesc):
argtypes = [self.get_argument_type(aty)
for aty in fndesc.argtypes]
# don't wrap in pointer
restype = self.get_argument_type(fndesc.restype)
fnty = Type.function(restype, argtypes)
return fnty
def declare_function(self, module, fndesc):
fnty = self.call_conv.get_function_type(fndesc.restype, fndesc.argtypes)
fn = module.get_or_insert_function(fnty, name=fndesc.mangled_name)
self.call_conv.decorate_function(fn, fndesc.args, fndesc.argtypes, noalias=fndesc.noalias)
if fndesc.inline:
fn.attributes.add('alwaysinline')
return fn
def declare_external_function(self, module, fndesc):
fnty = self.get_external_function_type(fndesc)
fn = module.get_or_insert_function(fnty, name=fndesc.mangled_name)
assert fn.is_declaration
for ak, av in zip(fndesc.args, fn.args):
av.name = "arg.%s" % ak
return fn
def insert_const_string(self, mod, string):
"""
Insert constant *string* (a str object) into module *mod*.
"""
stringtype = GENERIC_POINTER
name = ".const.%s" % string
text = cgutils.make_bytearray(string.encode("utf-8") + b"\x00")
gv = self.insert_unique_const(mod, name, text)
return Constant.bitcast(gv, stringtype)
def insert_const_bytes(self, mod, bytes, name=None):
"""
Insert constant *byte* (a `bytes` object) into module *mod*.
"""
stringtype = GENERIC_POINTER
name = ".bytes.%s" % (name or hash(bytes))
text = cgutils.make_bytearray(bytes)
gv = self.insert_unique_const(mod, name, text)
return Constant.bitcast(gv, stringtype)
def insert_unique_const(self, mod, name, val):
"""
Insert a unique internal constant named *name*, with LLVM value
*val*, into module *mod*.
"""
try:
gv = mod.get_global(name)
except KeyError:
return cgutils.global_constant(mod, name, val)
else:
return gv
def get_argument_type(self, ty):
return self.data_model_manager[ty].get_argument_type()
def get_return_type(self, ty):
return self.data_model_manager[ty].get_return_type()
def get_data_type(self, ty):
"""
Get a LLVM data representation of the Numba type *ty* that is safe
for storage. Record data are stored as byte array.
The return value is a llvmlite.ir.Type object, or None if the type
is an opaque pointer (???).
"""
return self.data_model_manager[ty].get_data_type()
def get_value_type(self, ty):
return self.data_model_manager[ty].get_value_type()
def pack_value(self, builder, ty, value, ptr, align=None):
"""
Pack value into the array storage at *ptr*.
If *align* is given, it is the guaranteed alignment for *ptr*
(by default, the standard ABI alignment).
"""
dataval = self.data_model_manager[ty].as_data(builder, value)
builder.store(dataval, ptr, align=align)
def unpack_value(self, builder, ty, ptr, align=None):
"""
Unpack value from the array storage at *ptr*.
If *align* is given, it is the guaranteed alignment for *ptr*
(by default, the standard ABI alignment).
"""
dm = self.data_model_manager[ty]
return dm.load_from_data_pointer(builder, ptr, align)
def get_constant_generic(self, builder, ty, val):
"""
Return a LLVM constant representing value *val* of Numba type *ty*.
"""
try:
impl = self._get_constants.find((ty,))
return impl(self, builder, ty, val)
except NotImplementedError:
raise NotImplementedError("Cannot lower constant of type '%s'" % (ty,))
def get_constant(self, ty, val):
"""
Same as get_constant_generic(), but without specifying *builder*.
Works only for simple types.
"""
# HACK: pass builder=None to preserve get_constant() API
return self.get_constant_generic(None, ty, val)
def get_constant_undef(self, ty):
lty = self.get_value_type(ty)
return Constant.undef(lty)
def get_constant_null(self, ty):
lty = self.get_value_type(ty)
return Constant.null(lty)
def get_function(self, fn, sig, _firstcall=True):
"""
Return the implementation of function *fn* for signature *sig*.
The return value is a callable with the signature (builder, args).
"""
assert sig is not None
sig = sig.as_function()
if isinstance(fn, (types.Function, types.BoundFunction,
types.Dispatcher)):
key = fn.get_impl_key(sig)
overloads = self._defns[key]
else:
key = fn
overloads = self._defns[key]
try:
return _wrap_impl(overloads.find(sig.args), self, sig)
except NotImplementedError:
pass
if isinstance(fn, types.Type):
# It's a type instance => try to find a definition for the type class
try:
return self.get_function(type(fn), sig)
except NotImplementedError:
# Raise exception for the type instance, for a better error message
pass
# Automatically refresh the context to load new registries if we are
# calling the first time.
if _firstcall:
self.refresh()
return self.get_function(fn, sig, _firstcall=False)
raise NotImplementedError("No definition for lowering %s%s" % (key, sig))
def get_generator_desc(self, genty):
"""
"""
return self._generators[genty][0]
def get_generator_impl(self, genty):
"""
"""
res = self._generators[genty][1]
self.add_linking_libs(getattr(res, 'libs', ()))
return res
def get_bound_function(self, builder, obj, ty):
assert self.get_value_type(ty) == obj.type
return obj
def get_getattr(self, typ, attr):
"""
Get the getattr() implementation for the given type and attribute name.
The return value is a callable with the signature
(context, builder, typ, val, attr).
"""
if isinstance(typ, types.Module):
# Implement getattr for module-level globals.
# We are treating them as constants.
# XXX We shouldn't have to retype this
attrty = self.typing_context.resolve_module_constants(typ, attr)
if attrty is None or isinstance(attrty, types.Dummy):
# No implementation required for dummies (functions, modules...),
# which are dealt with later
return None
else:
pyval = getattr(typ.pymod, attr)
llval = self.get_constant(attrty, pyval)
def imp(context, builder, typ, val, attr):
return impl_ret_borrowed(context, builder, attrty, llval)
return imp
# Lookup specific getattr implementation for this type and attribute
overloads = self._getattrs[attr]
try:
return overloads.find((typ,))
except NotImplementedError:
pass
# Lookup generic getattr implementation for this type
overloads = self._getattrs[None]
try:
return overloads.find((typ,))
except NotImplementedError:
pass
raise NotImplementedError("No definition for lowering %s.%s" % (typ, attr))
def get_setattr(self, attr, sig):
"""
Get the setattr() implementation for the given attribute name
and signature.
The return value is a callable with the signature (builder, args).
"""
assert len(sig.args) == 2
typ = sig.args[0]
valty = sig.args[1]
def wrap_setattr(impl):
def wrapped(builder, args):
return impl(self, builder, sig, args, attr)
return wrapped
# Lookup specific setattr implementation for this type and attribute
overloads = self._setattrs[attr]
try:
return wrap_setattr(overloads.find((typ, valty)))
except NotImplementedError:
pass
# Lookup generic setattr implementation for this type
overloads = self._setattrs[None]
try:
return wrap_setattr(overloads.find((typ, valty)))
except NotImplementedError:
pass
raise NotImplementedError("No definition for lowering %s.%s = %s"
% (typ, attr, valty))
def get_argument_value(self, builder, ty, val):
"""
Argument representation to local value representation
"""
return self.data_model_manager[ty].from_argument(builder, val)
def get_returned_value(self, builder, ty, val):
"""
Return value representation to local value representation
"""
return self.data_model_manager[ty].from_return(builder, val)
def get_return_value(self, builder, ty, val):
"""
Local value representation to return type representation
"""
return self.data_model_manager[ty].as_return(builder, val)
def get_value_as_argument(self, builder, ty, val):
"""Prepare local value representation as argument type representation
"""
return self.data_model_manager[ty].as_argument(builder, val)
def get_value_as_data(self, builder, ty, val):
return self.data_model_manager[ty].as_data(builder, val)
def get_data_as_value(self, builder, ty, val):
return self.data_model_manager[ty].from_data(builder, val)
def pair_first(self, builder, val, ty):
"""
Extract the first element of a heterogeneous pair.
"""
pair = self.make_helper(builder, ty, val)
return pair.first
def pair_second(self, builder, val, ty):
"""
Extract the second element of a heterogeneous pair.
"""
pair = self.make_helper(builder, ty, val)
return pair.second
def cast(self, builder, val, fromty, toty):
"""
Cast a value of type *fromty* to type *toty*.
This implements implicit conversions as can happen due to the
granularity of the Numba type system, or lax Python semantics.
"""
if fromty == toty or toty == types.Any:
return val
try:
impl = self._casts.find((fromty, toty))
return impl(self, builder, fromty, toty, val)
except NotImplementedError:
raise NotImplementedError(
"Cannot cast %s to %s: %s" % (fromty, toty, val))
def generic_compare(self, builder, key, argtypes, args):
"""
Compare the given LLVM values of the given Numba types using
the comparison *key* (e.g. '=='). The values are first cast to
a common safe conversion type.
"""
at, bt = argtypes
av, bv = args
ty = self.typing_context.unify_types(at, bt)
assert ty is not None
cav = self.cast(builder, av, at, ty)
cbv = self.cast(builder, bv, bt, ty)
fnty = self.typing_context.resolve_value_type(key)
# the sig is homogeneous in the unified casted type
cmpsig = fnty.get_call_type(self.typing_context, (ty, ty), {})
cmpfunc = self.get_function(fnty, cmpsig)
self.add_linking_libs(getattr(cmpfunc, 'libs', ()))
return cmpfunc(builder, (cav, cbv))
def make_optional_none(self, builder, valtype):
optval = self.make_helper(builder, types.Optional(valtype))
optval.valid = cgutils.false_bit
return optval._getvalue()
def make_optional_value(self, builder, valtype, value):
optval = self.make_helper(builder, types.Optional(valtype))
optval.valid = cgutils.true_bit
optval.data = value
return optval._getvalue()
def is_true(self, builder, typ, val):
"""
Return the truth value of a value of the given Numba type.
"""
fnty = self.typing_context.resolve_value_type(bool)
sig = fnty.get_call_type(self.typing_context, (typ,), {})
impl = self.get_function(fnty, sig)
return impl(builder, (val,))
def get_c_value(self, builder, typ, name, dllimport=False):
"""
Get a global value through its C-accessible *name*, with the given
LLVM type.
If *dllimport* is true, the symbol will be marked as imported
from a DLL (necessary for AOT compilation under Windows).
"""
module = builder.function.module
try:
gv = module.get_global_variable_named(name)
except LLVMException:
gv = module.add_global_variable(typ, name)
if dllimport and self.aot_mode and sys.platform == 'win32':
gv.storage_class = "dllimport"
return gv
def call_external_function(self, builder, callee, argtys, args):
args = [self.get_value_as_argument(builder, ty, arg)
for ty, arg in zip(argtys, args)]
retval = builder.call(callee, args)
return retval
def get_function_pointer_type(self, typ):
return self.data_model_manager[typ].get_data_type()
def call_function_pointer(self, builder, funcptr, args, cconv=None):
return builder.call(funcptr, args, cconv=cconv)
def print_string(self, builder, text):
mod = builder.module
cstring = GENERIC_POINTER
fnty = Type.function(Type.int(), [cstring])
puts = mod.get_or_insert_function(fnty, "puts")
return builder.call(puts, [text])
def debug_print(self, builder, text):
mod = builder.module
cstr = self.insert_const_string(mod, str(text))
self.print_string(builder, cstr)
def printf(self, builder, format_string, *args):
mod = builder.module
if isinstance(format_string, str):
cstr = self.insert_const_string(mod, format_string)
else:
cstr = format_string
fnty = Type.function(Type.int(), (GENERIC_POINTER,), var_arg=True)
fn = mod.get_or_insert_function(fnty, "printf")
return builder.call(fn, (cstr,) + tuple(args))
def get_struct_type(self, struct):
"""
Get the LLVM struct type for the given Structure class *struct*.
"""
fields = [self.get_value_type(v) for _, v in struct._fields]
return Type.struct(fields)
def get_dummy_value(self):
return Constant.null(self.get_dummy_type())
def get_dummy_type(self):
return GENERIC_POINTER
def _compile_subroutine_no_cache(self, builder, impl, sig, locals={},
flags=None):
"""
Invoke the compiler to compile a function to be used inside a
nopython function, but without generating code to call that
function.
Note this context's flags are not inherited.
"""
# Compile
from numba import compiler
with global_compiler_lock:
codegen = self.codegen()
library = codegen.create_library(impl.__name__)
if flags is None:
flags = compiler.Flags()
flags.set('no_compile')
flags.set('no_cpython_wrapper')
cres = compiler.compile_internal(self.typing_context, self,
library,
impl, sig.args,
sig.return_type, flags,
locals=locals)
# Allow inlining the function inside callers.
self.active_code_library.add_linking_library(cres.library)
return cres
def compile_subroutine(self, builder, impl, sig, locals={}, flags=None,
caching=True):
"""
Compile the function *impl* for the given *sig* (in nopython mode).
Return an instance of CompileResult.
If *caching* evaluates True, the function keeps the compiled function
for reuse in *.cached_internal_func*.
"""
cache_key = (impl.__code__, sig, type(self.error_model))
if not caching:
cached = None
else:
if impl.__closure__:
# XXX This obviously won't work if a cell's value is
# unhashable.
cache_key += tuple(c.cell_contents for c in impl.__closure__)
cached = self.cached_internal_func.get(cache_key)
if cached is None:
cres = self._compile_subroutine_no_cache(builder, impl, sig,
locals=locals,
flags=flags)
self.cached_internal_func[cache_key] = cres
cres = self.cached_internal_func[cache_key]
# Allow inlining the function inside callers.
self.active_code_library.add_linking_library(cres.library)
return cres
def compile_internal(self, builder, impl, sig, args, locals={}):
"""
Like compile_subroutine(), but also call the function with the given
*args*.
"""
cres = self.compile_subroutine(builder, impl, sig, locals)
return self.call_internal(builder, cres.fndesc, sig, args)
def call_internal(self, builder, fndesc, sig, args):
"""
Given the function descriptor of an internally compiled function,
emit a call to that function with the given arguments.
"""
# Add call to the generated function
llvm_mod = builder.module
fn = self.declare_function(llvm_mod, fndesc)
status, res = self.call_conv.call_function(builder, fn, sig.return_type,
sig.args, args)
with cgutils.if_unlikely(builder, status.is_error):
self.call_conv.return_status_propagate(builder, status)
res = imputils.fix_returning_optional(self, builder, sig, status, res)
return res
def call_unresolved(self, builder, name, sig, args):
"""
Insert a function call to an unresolved symbol with the given *name*.
Note: this is used for recursive call.
In the mutual recursion case::
@njit
def foo():
... # calls bar()
@njit
def bar():
... # calls foo()
foo()
When foo() is called, the compilation of bar() is fully completed
(codegen'ed and loaded) before foo() is. Since MCJIT's eager compilation
doesn't allow loading modules with declare-only functions (which is
needed for foo() in bar()), the call_unresolved injects a global
variable that the "linker" can update even after the module is loaded by
MCJIT. The linker would allocate space for the global variable before
the bar() module is loaded. When later foo() module is defined, it will
update bar()'s reference to foo().
The legacy lazy JIT and the new ORC JIT would allow a declare-only
function be used in a module as long as it is defined by the time of its
first use.
"""
# Insert an unresolved reference to the function being called.
codegen = self.codegen()
fnty = self.call_conv.get_function_type(sig.return_type, sig.args)
fn = codegen.insert_unresolved_ref(builder, fnty, name)
# Normal call sequence
status, res = self.call_conv.call_function(builder, fn, sig.return_type,
sig.args, args)
with cgutils.if_unlikely(builder, status.is_error):
self.call_conv.return_status_propagate(builder, status)
res = imputils.fix_returning_optional(self, builder, sig, status, res)
return res
def get_executable(self, func, fndesc):
raise NotImplementedError
def get_python_api(self, builder):
return PythonAPI(self, builder)
def sentry_record_alignment(self, rectyp, attr):
"""
Assumes offset starts from a properly aligned location
"""
if self.strict_alignment:
offset = rectyp.offset(attr)
elemty = rectyp.typeof(attr)
align = self.get_abi_alignment(self.get_data_type(elemty))
if offset % align:
msg = "{rec}.{attr} of type {type} is not aligned".format(
rec=rectyp, attr=attr, type=elemty)
raise TypeError(msg)
def get_helper_class(self, typ, kind='value'):
"""
Get a helper class for the given *typ*.
"""
# XXX handle all types: complex, array, etc.
# XXX should it be a method on the model instead? this would allow a default kind...
return cgutils.create_struct_proxy(typ, kind)
def _make_helper(self, builder, typ, value=None, ref=None, kind='value'):
cls = self.get_helper_class(typ, kind)
return cls(self, builder, value=value, ref=ref)
def make_helper(self, builder, typ, value=None, ref=None):
"""
Get a helper object to access the *typ*'s members,
for the given value or reference.
"""
return self._make_helper(builder, typ, value, ref, kind='value')
def make_data_helper(self, builder, typ, ref=None):
"""
As make_helper(), but considers the value as stored in memory,
rather than a live value.
"""
return self._make_helper(builder, typ, ref=ref, kind='data')
def make_array(self, typ):
return arrayobj.make_array(typ)
def populate_array(self, arr, **kwargs):
"""
Populate array structure.
"""
return arrayobj.populate_array(arr, **kwargs)
def make_complex(self, builder, typ, value=None):
"""
Get a helper object to access the given complex numbers' members.
"""
assert isinstance(typ, types.Complex), typ
return self.make_helper(builder, typ, value)
def make_tuple(self, builder, typ, values):
"""
Create a tuple of the given *typ* containing the *values*.
"""
tup = self.get_constant_undef(typ)
for i, val in enumerate(values):
tup = builder.insert_value(tup, val, i)
return tup
def make_constant_array(self, builder, typ, ary):
"""
Create an array structure reifying the given constant array.
A low-level contiguous array constant is created in the LLVM IR.
"""
datatype = self.get_data_type(typ.dtype)
# don't freeze ary of non-contig or bigger than 1MB
size_limit = 10**6
if (self.allow_dynamic_globals and
(typ.layout not in 'FC' or ary.nbytes > size_limit)):
# get pointer from the ary
dataptr = ary.ctypes.data
data = self.add_dynamic_addr(builder, dataptr, info=str(type(dataptr)))
rt_addr = self.add_dynamic_addr(builder, id(ary), info=str(type(ary)))
else:
# Handle data: reify the flattened array in "C" or "F" order as a
# global array of bytes.
flat = ary.flatten(order=typ.layout)
# Note: we use `bytearray(flat.data)` instead of `bytearray(flat)` to
# workaround issue #1850 which is due to numpy issue #3147
consts = Constant.array(Type.int(8), bytearray(flat.data))
data = cgutils.global_constant(builder, ".const.array.data", consts)
# Ensure correct data alignment (issue #1933)
data.align = self.get_abi_alignment(datatype)
# No reference to parent ndarray
rt_addr = None
# Handle shape
llintp = self.get_value_type(types.intp)
shapevals = [self.get_constant(types.intp, s) for s in ary.shape]
cshape = Constant.array(llintp, shapevals)
# Handle strides
stridevals = [self.get_constant(types.intp, s) for s in ary.strides]
cstrides = Constant.array(llintp, stridevals)
# Create array structure
cary = self.make_array(typ)(self, builder)
intp_itemsize = self.get_constant(types.intp, ary.dtype.itemsize)
self.populate_array(cary,
data=builder.bitcast(data, cary.data.type),
shape=cshape,
strides=cstrides,
itemsize=intp_itemsize,
parent=rt_addr,
meminfo=None)
return cary._getvalue()
def add_dynamic_addr(self, builder, intaddr, info):
"""
Returns dynamic address as a void pointer `i8*`.
Internally, a global variable is added to inform the lowerer about
the usage of dynamic addresses. Caching will be disabled.
"""
assert self.allow_dynamic_globals, "dyn globals disabled in this target"
assert isinstance(intaddr, utils.INT_TYPES), 'dyn addr not of int type'
mod = builder.module
llvoidptr = self.get_value_type(types.voidptr)
addr = self.get_constant(types.uintp, intaddr).inttoptr(llvoidptr)
# Use a unique name by embedding the address value
symname = 'numba.dynamic.globals.{:x}'.format(intaddr)
gv = mod.add_global_variable(llvoidptr, name=symname)
# Use linkonce linkage to allow merging with other GV of the same name.
# And, avoid optimization from assuming its value.
gv.linkage = 'linkonce'
gv.initializer = addr
return builder.load(gv)
def get_abi_sizeof(self, ty):
"""
Get the ABI size of LLVM type *ty*.
"""
assert isinstance(ty, llvmir.Type), "Expected LLVM type"
return ty.get_abi_size(self.target_data)
def get_abi_alignment(self, ty):
"""
Get the ABI alignment of LLVM type *ty*.
"""
assert isinstance(ty, llvmir.Type), "Expected LLVM type"
return ty.get_abi_alignment(self.target_data)
def get_preferred_array_alignment(context, ty):
"""
Get preferred array alignment for Numba type *ty*.
"""
# AVX prefers 32-byte alignment
return 32
def post_lowering(self, mod, library):
"""Run target specific post-lowering transformation here.
"""
def create_module(self, name):
"""Create a LLVM module
"""
return lc.Module(name)
@property
def active_code_library(self):
"""Get the active code library
"""
return self._codelib_stack[-1]
@contextmanager
def push_code_library(self, lib):
"""Push the active code library for the context
"""
self._codelib_stack.append(lib)
try:
yield
finally:
self._codelib_stack.pop()
def add_linking_libs(self, libs):
"""Add iterable of linking libraries to the *active_code_library*.
"""
colib = self.active_code_library
for lib in libs:
colib.add_linking_library(lib)
class _wrap_impl(object):
"""
A wrapper object to call an implementation function with some predefined
(context, signature) arguments.
The wrapper also forwards attribute queries, which is important.
"""
def __init__(self, imp, context, sig):
self._callable = _wrap_missing_loc(imp)
self._imp = self._callable()
self._context = context
self._sig = sig
def __call__(self, builder, args, loc=None):
res = self._imp(self._context, builder, self._sig, args, loc=loc)
self._context.add_linking_libs(getattr(self, 'libs', ()))
return res
def __getattr__(self, item):
return getattr(self._imp, item)
def __repr__(self):
return "<wrapped %s>" % repr(self._callable)
def _has_loc(fn):
"""Does function *fn* take ``loc`` argument?
"""
sig = utils.pysignature(fn)
return 'loc' in sig.parameters
class _wrap_missing_loc(object):
def __init__(self, fn):
self.func = fn # store this to help with debug
def __call__(self):
"""Wrap function for missing ``loc`` keyword argument.
Otherwise, return the original *fn*.
"""
fn = self.func
if not _has_loc(fn):
def wrapper(*args, **kwargs):
kwargs.pop('loc') # drop unused loc
return fn(*args, **kwargs)
# Copy the following attributes from the wrapped.
# Following similar implementation as functools.wraps but
# ignore attributes if not available (i.e fix py2.7)
attrs = '__name__', 'libs'
for attr in attrs:
try:
val = getattr(fn, attr)
except AttributeError:
pass
else:
setattr(wrapper, attr, val)
return wrapper
else:
return fn
def __repr__(self):
return "<wrapped %s>" % self.func
| [
"[email protected]"
]
| |
15a5b9f4edafc0425bbc71ad0fadb13380abbce3 | 8dcd3ee098b4f5b80879c37a62292f42f6b2ae17 | /venv/Lib/site-packages/pythonwin/pywin/Demos/app/basictimerapp.py | 46531f04cc5ac47ca4ff16d82187736d37233b04 | []
| no_license | GregVargas1999/InfinityAreaInfo | 53fdfefc11c4af8f5d2b8f511f7461d11a3f7533 | 2e4a7c6a2424514ca0ec58c9153eb08dc8e09a4a | refs/heads/master | 2022-12-01T20:26:05.388878 | 2020-08-11T18:37:05 | 2020-08-11T18:37:05 | 286,821,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,609 | py | # basictimerapp - a really simple timer application.
# This should be run using the command line:
# pythonwin /app demos\basictimerapp.py
import string
import sys
import time
import timer
import win32api
import win32con
import win32ui
from pywin.framework import app, cmdline, dlgappcore, cmdline
class TimerAppDialog(dlgappcore.AppDialog):
softspace = 1
def __init__(self, appName=""):
dlgappcore.AppDialog.__init__(self, win32ui.IDD_GENERAL_STATUS)
self.timerAppName = appName
self.argOff = 0
if len(self.timerAppName) == 0:
if len(sys.argv) > 1 and sys.argv[1][0] != '/':
self.timerAppName = sys.argv[1]
self.argOff = 1
def PreDoModal(self):
# sys.stderr = sys.stdout
pass
def ProcessArgs(self, args):
for arg in args:
if arg == "/now":
self.OnOK()
def OnInitDialog(self):
win32ui.SetProfileFileName('pytimer.ini')
self.title = win32ui.GetProfileVal(self.timerAppName, "Title", "Remote System Timer")
self.buildTimer = win32ui.GetProfileVal(self.timerAppName, "Timer", "EachMinuteIntervaler()")
self.doWork = win32ui.GetProfileVal(self.timerAppName, "Work", "DoDemoWork()")
# replace "\n" with real \n.
self.doWork = self.doWork.replace('\\n', '\n')
dlgappcore.AppDialog.OnInitDialog(self)
self.SetWindowText(self.title)
self.prompt1 = self.GetDlgItem(win32ui.IDC_PROMPT1)
self.prompt2 = self.GetDlgItem(win32ui.IDC_PROMPT2)
self.prompt3 = self.GetDlgItem(win32ui.IDC_PROMPT3)
self.butOK = self.GetDlgItem(win32con.IDOK)
self.butCancel = self.GetDlgItem(win32con.IDCANCEL)
self.prompt1.SetWindowText("Python Timer App")
self.prompt2.SetWindowText("")
self.prompt3.SetWindowText("")
self.butOK.SetWindowText("Do it now")
self.butCancel.SetWindowText("Close")
self.timerManager = TimerManager(self)
self.ProcessArgs(sys.argv[self.argOff:])
self.timerManager.go()
return 1
def OnDestroy(self, msg):
dlgappcore.AppDialog.OnDestroy(self, msg)
self.timerManager.stop()
def OnOK(self):
# stop the timer, then restart after setting special boolean
self.timerManager.stop()
self.timerManager.bConnectNow = 1
self.timerManager.go()
return
# def OnCancel(self): default behaviour - cancel == close.
# return
class TimerManager:
def __init__(self, dlg):
self.dlg = dlg
self.timerId = None
self.intervaler = eval(self.dlg.buildTimer)
self.bConnectNow = 0
self.bHaveSetPrompt1 = 0
def CaptureOutput(self):
self.oldOut = sys.stdout
self.oldErr = sys.stderr
sys.stdout = sys.stderr = self
self.bHaveSetPrompt1 = 0
def ReleaseOutput(self):
sys.stdout = self.oldOut
sys.stderr = self.oldErr
def write(self, str):
s = str.strip()
if len(s):
if self.bHaveSetPrompt1:
dest = self.dlg.prompt3
else:
dest = self.dlg.prompt1
self.bHaveSetPrompt1 = 1
dest.SetWindowText(s)
def go(self):
self.OnTimer(None, None)
def stop(self):
if self.timerId: timer.kill_timer(self.timerId)
self.timerId = None
def OnTimer(self, id, timeVal):
if id: timer.kill_timer(id)
if self.intervaler.IsTime() or self.bConnectNow:
# do the work.
try:
self.dlg.SetWindowText(self.dlg.title + " - Working...")
self.dlg.butOK.EnableWindow(0)
self.dlg.butCancel.EnableWindow(0)
self.CaptureOutput()
try:
exec(self.dlg.doWork)
print("The last operation completed successfully.")
except:
t, v, tb = sys.exc_info()
str = "Failed: %s: %s" % (t, repr(v))
print(str)
self.oldErr.write(str)
tb = None # Prevent cycle
finally:
self.ReleaseOutput()
self.dlg.butOK.EnableWindow()
self.dlg.butCancel.EnableWindow()
self.dlg.SetWindowText(self.dlg.title)
else:
now = time.time()
nextTime = self.intervaler.GetNextTime()
if nextTime:
timeDiffSeconds = nextTime - now
timeDiffMinutes = int(timeDiffSeconds / 60)
timeDiffSeconds = timeDiffSeconds % 60
timeDiffHours = int(timeDiffMinutes / 60)
timeDiffMinutes = timeDiffMinutes % 60
self.dlg.prompt1.SetWindowText(
"Next connection due in %02d:%02d:%02d" % (timeDiffHours, timeDiffMinutes, timeDiffSeconds))
self.timerId = timer.set_timer(self.intervaler.GetWakeupInterval(), self.OnTimer)
self.bConnectNow = 0
class TimerIntervaler:
def __init__(self):
self.nextTime = None
self.wakeUpInterval = 2000
def GetWakeupInterval(self):
return self.wakeUpInterval
def GetNextTime(self):
return self.nextTime
def IsTime(self):
now = time.time()
if self.nextTime is None:
self.nextTime = self.SetFirstTime(now)
ret = 0
if now >= self.nextTime:
ret = 1
self.nextTime = self.SetNextTime(self.nextTime, now)
# do the work.
return ret
class EachAnyIntervaler(TimerIntervaler):
def __init__(self, timeAt, timePos, timeAdd, wakeUpInterval=None):
TimerIntervaler.__init__(self)
self.timeAt = timeAt
self.timePos = timePos
self.timeAdd = timeAdd
if wakeUpInterval:
self.wakeUpInterval = wakeUpInterval
def SetFirstTime(self, now):
timeTup = time.localtime(now)
lst = []
for item in timeTup:
lst.append(item)
bAdd = timeTup[self.timePos] > self.timeAt
lst[self.timePos] = self.timeAt
for pos in range(self.timePos + 1, 6):
lst[pos] = 0
ret = time.mktime(tuple(lst))
if (bAdd):
ret = ret + self.timeAdd
return ret;
def SetNextTime(self, lastTime, now):
return lastTime + self.timeAdd
class EachMinuteIntervaler(EachAnyIntervaler):
def __init__(self, at=0):
EachAnyIntervaler.__init__(self, at, 5, 60, 2000)
class EachHourIntervaler(EachAnyIntervaler):
def __init__(self, at=0):
EachAnyIntervaler.__init__(self, at, 4, 3600, 10000)
class EachDayIntervaler(EachAnyIntervaler):
def __init__(self, at=0):
EachAnyIntervaler.__init__(self, at, 3, 86400, 10000)
class TimerDialogApp(dlgappcore.DialogApp):
def CreateDialog(self):
return TimerAppDialog()
def DoDemoWork():
print("Doing the work...")
print("About to connect")
win32api.MessageBeep(win32con.MB_ICONASTERISK)
win32api.Sleep(2000)
print("Doing something else...")
win32api.MessageBeep(win32con.MB_ICONEXCLAMATION)
win32api.Sleep(2000)
print("More work.")
win32api.MessageBeep(win32con.MB_ICONHAND)
win32api.Sleep(2000)
print("The last bit.")
win32api.MessageBeep(win32con.MB_OK)
win32api.Sleep(2000)
app = TimerDialogApp()
def t():
t = TimerAppDialog("Test Dialog")
t.DoModal()
return t
if __name__ == '__main__':
import demoutils
demoutils.NeedApp()
| [
"[email protected]"
]
| |
3ea816954b53404bf8485c9a0d61ec4f52b95ec5 | ea3e35eb82436bfa1e544346267b126bd80888e6 | /verb_filter.py | 6f71b4c06076e8329e81faadebc86f830d37683a | []
| no_license | amazingguni/commits-dataset | 5800ed7c9624d036c0e286b7e6e14887ed6d261e | 688eea9b1906859e2538cd8eda50dac82b006738 | refs/heads/master | 2022-11-29T18:14:43.623696 | 2020-08-04T08:03:54 | 2020-08-04T08:03:54 | 261,342,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,425 | py | import sys
from pathlib import Path
from utils import tokenize, overlap_two_seq, starts_with_verb, remove_tag_issue_number,\
remove_http_urls, remove_redundant_white_space, remove_last_special_char, starts_with_verb2, remove_no_english_str
import random
def main(path):
preprocessed_dir = Path(path)
f_all_index = open(preprocessed_dir / 'all.index')
f_all_target = open(preprocessed_dir / 'all.target')
f_all_origin_target = open(preprocessed_dir / 'all.origin.target')
f_all_line_diff = open(preprocessed_dir / 'all.line.source')
f_all_word_diff = open(preprocessed_dir / 'all.word.source')
f_filtered_index = open(preprocessed_dir / 'all.verbfilter.index', 'w')
f_filtered_target = open(preprocessed_dir / 'all.verbfilter.target', 'w')
f_filtered_line_diff = open(preprocessed_dir / 'all.verbfilter.line.source', 'w')
f_filtered_word_diff = open(preprocessed_dir / 'all.verbfilter.word.source', 'w')
total_cnt = 0
filtered_cnt = 0
word_not_overlap_cnt = 0
for index, origin_target, target, line_diff, word_diff in zip(f_all_index, f_all_origin_target, f_all_target, f_all_line_diff, f_all_word_diff):
total_cnt += 1
target = target.strip()
origin_target = origin_target.strip()
if not target:
continue
line_diff = line_diff.strip()
word_diff = word_diff.strip()
if 'revert' in target.lower():
continue
target_words = target.split()
if not starts_with_verb(target.lower().split()):
continue
word_diff_words = word_diff.strip()
if not overlap_two_seq(word_diff_words, target_words):
word_not_overlap_cnt += 1
continue
f_filtered_index.write(f'{index.strip()}\n')
f_filtered_target.write(f'{target}\n')
f_filtered_line_diff.write(f'{line_diff}\n')
f_filtered_word_diff.write(f'{word_diff}\n')
filtered_cnt += 1
print(f'Filtered {filtered_cnt} data generated(total: {total_cnt})')
print(f'word_not_overlap_cnt: {word_not_overlap_cnt}')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Verb filter commit message dataset')
parser.add_argument('--path', type=str, metavar='N', required=True, help='Directory which contains all dataset')
args = parser.parse_args()
main(args.path)
| [
"[email protected]"
]
| |
23c84214c30992885af2c6f196c75971c9b62e9f | e88a8bb96ee85d52fdd21613356a1b48a0aba18e | /src/analyse/run.py | 2dcf1d51eec94f97723cefce71980c8ca2528fdd | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
]
| permissive | timtroendle/money-land | b5c3f527e7a30eaa25dd47cf2f1082c5dbb6bb29 | fe3ed6e531cfe91156886d4fa685a14840749f36 | refs/heads/master | 2023-06-28T16:42:27.982087 | 2021-08-04T15:07:51 | 2021-08-04T15:07:51 | 229,407,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,718 | py | import calliope
from calliope.core.util.logging import set_log_verbosity
import calliope.backend.run
import calliope.backend.pyomo
import calliope.core.attrdict
import calliope.exceptions
from calliope.analysis import postprocess
import pyomo.core as po
ROOFTOP_TECH_NAME1 = "roof_mounted_pv_n"
ROOFTOP_TECH_NAME2 = "roof_mounted_pv_e_w"
ROOFTOP_TECH_NAME3 = "roof_mounted_pv_s_flat"
UTILITY_TECH_NAME = "open_field_pv"
WIND_TECH_NAME1 = "wind_onshore_monopoly"
WIND_TECH_NAME2 = "wind_onshore_competing"
OFFSHORE_TECH_NAME = "wind_offshore"
def run(path_to_model, override_dict, roof_share, util_share, wind_share, offshore_share,
units_without_shore, overrides, path_to_output):
assert roof_share + util_share + wind_share + offshore_share == 100
set_log_verbosity("info", include_solver_output=True, capture_warnings=True)
model = calliope.Model(
path_to_model,
scenario=",".join(overrides),
override_dict=override_dict
)
model.run(build_only=True)
pyomo_model = model.backend._backend
pyomo_model.roof_constraint = po.Constraint(pyomo_model.locs, rule=rooftop_constraint(roof_share / 100))
pyomo_model.util_constraint = po.Constraint(pyomo_model.locs, rule=utility_constraint(util_share / 100))
pyomo_model.wind_constraint = po.Constraint(
pyomo_model.locs,
rule=wind_constraint(wind_share / 100, offshore_share / 100, units_without_shore)
)
pyomo_model.offshore_constraint = po.Constraint(
pyomo_model.locs,
rule=offshore_constraint(offshore_share / 100, units_without_shore)
)
model = run_updated_model(model)
scenario = f"roof-{roof_share}-percent,util-{util_share}-percent,wind-{wind_share}-percent,offshore-{offshore_share}-percent"
model._model_data.attrs["scenario"] = scenario
model.to_netcdf(path_to_output)
def rooftop_constraint(share):
def rooftop_constraint(model, loc):
lhs = sum(
model.energy_cap[loc_tech]
for loc_tech in model.loc_techs
if is_rooftop_pv(loc_tech) and (loc_tech.split("::")[0] == loc)
)
rhs = share * sum(
model.energy_cap[loc_tech]
for loc_tech in model.loc_techs
if is_pv_or_wind(loc_tech) and (loc_tech.split("::")[0] == loc)
)
return lhs == rhs
return rooftop_constraint
def utility_constraint(share):
def utility_constraint(model, loc):
lhs = sum(
model.energy_cap[loc_tech]
for loc_tech in model.loc_techs
if loc_tech.split("::") == [loc, UTILITY_TECH_NAME]
)
rhs = share * sum(
model.energy_cap[loc_tech]
for loc_tech in model.loc_techs
if is_pv_or_wind(loc_tech) and (loc_tech.split("::")[0] == loc)
)
return lhs == rhs
return utility_constraint
def wind_constraint(wind_share, offshore_share, units_without_shore):
def wind_constraint(model, loc):
if offshore_share > 0 and loc in units_without_shore:
share = wind_share + offshore_share
else:
share = wind_share
lhs = sum(
model.energy_cap[loc_tech]
for loc_tech in model.loc_techs
if is_wind(loc_tech) and (loc_tech.split("::")[0] == loc)
)
rhs = share * sum(
model.energy_cap[loc_tech]
for loc_tech in model.loc_techs
if is_pv_or_wind(loc_tech) and (loc_tech.split("::")[0] == loc)
)
return lhs == rhs
return wind_constraint
def offshore_constraint(offshore_share, units_without_shore):
def offshore_constraint(model, loc):
if offshore_share > 0 and loc in units_without_shore:
share = 0
else:
share = offshore_share
lhs = sum(
model.energy_cap[loc_tech]
for loc_tech in model.loc_techs
if loc_tech.split("::") == [loc, OFFSHORE_TECH_NAME]
)
rhs = share * sum(
model.energy_cap[loc_tech]
for loc_tech in model.loc_techs
if is_pv_or_wind(loc_tech) and (loc_tech.split("::")[0] == loc)
)
return lhs == rhs
return offshore_constraint
def is_wind(loc_tech):
loc_tech = str(loc_tech)
return (
(WIND_TECH_NAME1 in loc_tech)
or (WIND_TECH_NAME2 in loc_tech)
)
def is_rooftop_pv(loc_tech):
loc_tech = str(loc_tech)
return (
(ROOFTOP_TECH_NAME1 in loc_tech)
or (ROOFTOP_TECH_NAME2 in loc_tech)
or (ROOFTOP_TECH_NAME3 in loc_tech)
)
def is_pv_or_wind(loc_tech):
loc_tech = str(loc_tech)
return (
(ROOFTOP_TECH_NAME1 in loc_tech)
or (ROOFTOP_TECH_NAME2 in loc_tech)
or (ROOFTOP_TECH_NAME3 in loc_tech)
or (UTILITY_TECH_NAME in loc_tech)
or (WIND_TECH_NAME1 in loc_tech)
or (WIND_TECH_NAME2 in loc_tech)
or (OFFSHORE_TECH_NAME in loc_tech)
)
def run_updated_model(model):
# This method is largely taken from various places within Calliope's core code,
# as Calliope does not offer this functionality.
# The code is thus copyright Calliope authors.
backend_model = model.backend._backend
backend_model.__calliope_run_config = calliope.core.attrdict.AttrDict.from_yaml_string(
model._model_data.attrs['run_config']
)
results, backend_mode = calliope.backend.run.run_plan(
model_data=model._model_data,
timings=model._timings,
backend=calliope.backend.pyomo.model,
backend_rerun=backend_model,
build_only=False
)
# Add additional post-processed result variables to results
if results.attrs.get('termination_condition', None) in ['optimal', 'feasible']:
results = postprocess.postprocess_model_results(
results, model._model_data, model._timings
)
else:
raise calliope.exceptions.BackendError("Problem is non optimal.")
for var in results.data_vars:
results[var].attrs['is_result'] = 1
model._model_data.update(results)
model._model_data.attrs.update(results.attrs)
model.results = model._model_data.filter_by_attrs(is_result=1)
return model
if __name__ == "__main__":
run(
path_to_model=snakemake.input.model,
override_dict=snakemake.params.override_dict,
roof_share=int(snakemake.wildcards.roof),
util_share=int(snakemake.wildcards.util),
wind_share=int(snakemake.wildcards.wind),
offshore_share=int(snakemake.wildcards.offshore),
units_without_shore=snakemake.params.no_shore,
overrides=snakemake.params.overrides,
path_to_output=snakemake.output[0]
)
| [
"[email protected]"
]
| |
8aa2163b58f7138a4de76912ea66304b7649d175 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_327/ch178_2020_08_14_14_01_59_028335.py | 7badb3c3c79c26e0d310b79dcaf29de895cd1704 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | def junta_nomes(l0,l1,l2):
result = []
for i in l0:
for e in l2:
result.append(i + " " + e)
for a in l1:
for b in l2:
result.append(a + " " + b)
return result | [
"[email protected]"
]
| |
f9d3a990ff56d875f011dbad5ee2666d88489f69 | d6b7b16b6e9c3287ffcac3869d11b4c4286f4b89 | /pmg/models/posts.py | 8f1926e2c73c2377ec6ccd75b099b8ed739e0bc9 | [
"Apache-2.0"
]
| permissive | havanhuy1997/pmg-cms-2 | 39e3e66f2b9f57a347e56b93d963c87554983fa7 | 21571235cf3d9552013bca29ab9af288b08e00d6 | refs/heads/master | 2020-06-27T20:05:05.776667 | 2019-08-01T07:46:47 | 2019-08-01T07:46:47 | 200,036,932 | 0 | 0 | Apache-2.0 | 2019-08-01T11:21:00 | 2019-08-01T11:20:59 | null | UTF-8 | Python | false | false | 1,502 | py | from sqlalchemy import func, sql
from sqlalchemy.orm import validates
from .base import FileLinkMixin
from pmg import db
class Post(db.Model):
__tablename__ = 'post'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String, nullable=False)
slug = db.Column(db.String, nullable=False, unique=True, index=True)
featured = db.Column(db.Boolean(), default=False, server_default=sql.expression.false(), nullable=False, index=True)
body = db.Column(db.Text)
date = db.Column(db.DateTime(timezone=True), index=True, unique=False, nullable=False, server_default=func.now())
files = db.relationship("PostFile", lazy='joined')
created_at = db.Column(db.DateTime(timezone=True), index=True, unique=False, nullable=False, server_default=func.now())
updated_at = db.Column(db.DateTime(timezone=True), server_default=func.now(), onupdate=func.current_timestamp())
@validates('slug')
def validate_slug(self, key, value):
return value.strip('/')
def __unicode__(self):
return unicode(self.title)
class PostFile(FileLinkMixin, db.Model):
__tablename__ = "post_files"
id = db.Column(db.Integer, primary_key=True)
post_id = db.Column(db.Integer, db.ForeignKey('post.id', ondelete='CASCADE'), index=True, nullable=False)
post = db.relationship('Post')
file_id = db.Column(db.Integer, db.ForeignKey('file.id', ondelete="CASCADE"), index=True, nullable=False)
file = db.relationship('File', lazy='joined')
| [
"[email protected]"
]
| |
6bf6394e69ea92fd3ce0755abb504a5cbf668f18 | ece0d321e48f182832252b23db1df0c21b78f20c | /engine/2.80/scripts/addons/io_mesh_raw/export_raw.py | b5c5ef36fea3b28cc3b7d5627738ef87e4a9815a | [
"GPL-3.0-only",
"Font-exception-2.0",
"GPL-3.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain-disclaimer",
"Bitstream-Vera",
"LicenseRef-scancode-blender-2010",
"LGPL-2.1-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"PSF-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"BSD-2-Clause",
"Unlicense"
]
| permissive | byteinc/Phasor | 47d4e48a52fa562dfa1a2dbe493f8ec9e94625b9 | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | refs/heads/master | 2022-10-25T17:05:01.585032 | 2019-03-16T19:24:22 | 2019-03-16T19:24:22 | 175,723,233 | 3 | 1 | Unlicense | 2022-10-21T07:02:37 | 2019-03-15T00:58:08 | Python | UTF-8 | Python | false | false | 2,896 | py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
"""
This script exports a Mesh to a RAW triangle format file.
The raw triangle format is very simple; it has no verts or faces lists.
It's just a simple ascii text file with the vertices of each triangle
listed on each line. In addition, also quads can be exported as a line
of 12 values (this was the default before blender 2.5). Now default
settings will triangulate the mesh.
Usage:
Execute this script from the "File->Export" menu. You can select
whether modifiers should be applied and if the mesh is triangulated.
"""
import bpy
def faceToTriangles(face):
triangles = []
if len(face) == 4:
triangles.append([face[0], face[1], face[2]])
triangles.append([face[2], face[3], face[0]])
else:
triangles.append(face)
return triangles
def faceValues(face, mesh, matrix):
fv = []
for verti in face.vertices:
fv.append((matrix * mesh.vertices[verti].co)[:])
return fv
def faceToLine(face):
return " ".join([("%.6f %.6f %.6f" % v) for v in face] + ["\n"])
def write(filepath,
applyMods=True,
triangulate=True,
):
scene = bpy.context.scene
faces = []
for obj in bpy.context.selected_objects:
if applyMods or obj.type != 'MESH':
try:
me = obj.to_mesh(scene, True, "PREVIEW")
except:
me = None
is_tmp_mesh = True
else:
me = obj.data
if not me.tessfaces and me.polygons:
me.calc_tessface()
is_tmp_mesh = False
if me is not None:
matrix = obj.matrix_world.copy()
for face in me.tessfaces:
fv = faceValues(face, me, matrix)
if triangulate:
faces.extend(faceToTriangles(fv))
else:
faces.append(fv)
if is_tmp_mesh:
bpy.data.meshes.remove(me)
# write the faces to a file
file = open(filepath, "w")
for face in faces:
file.write(faceToLine(face))
file.close()
| [
"[email protected]"
]
| |
3e6f3db8997dc8059f9c74f10b8bd93d869af08e | ece0d321e48f182832252b23db1df0c21b78f20c | /engine/2.80/scripts/addons/presets/operator/mesh.primitive_xyz_function_surface/catalan.py | 8c8767584bda51ce6cf33a934249ef20f9c3c6f2 | [
"GPL-3.0-only",
"Font-exception-2.0",
"GPL-3.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain-disclaimer",
"Bitstream-Vera",
"LicenseRef-scancode-blender-2010",
"LGPL-2.1-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"PSF-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"BSD-2-Clause",
"Unlicense"
]
| permissive | byteinc/Phasor | 47d4e48a52fa562dfa1a2dbe493f8ec9e94625b9 | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | refs/heads/master | 2022-10-25T17:05:01.585032 | 2019-03-16T19:24:22 | 2019-03-16T19:24:22 | 175,723,233 | 3 | 1 | Unlicense | 2022-10-21T07:02:37 | 2019-03-15T00:58:08 | Python | UTF-8 | Python | false | false | 446 | py | import bpy
op = bpy.context.active_operator
op.x_eq = 'u-sin(u)*cosh(v)'
op.y_eq = '4*sin(1/2*u)*sinh(v/2)'
op.z_eq = '1-cos(u)*cosh(v)'
op.range_u_min = -3.1415927410125732
op.range_u_max = 9.42477798461914
op.range_u_step = 32
op.wrap_u = False
op.range_v_min = -2.0
op.range_v_max = 2.0
op.range_v_step = 128
op.wrap_v = False
op.close_v = False
op.n_eq = 1
op.a_eq = '0'
op.b_eq = '0'
op.c_eq = '0'
op.f_eq = '0'
op.g_eq = '0'
op.h_eq = '0'
| [
"[email protected]"
]
| |
bfae4fb55b4f57600152cc8d3cb55d720b812077 | b9959cb19e518674b722e2a6fb879056c0f1ba83 | /kozmic/builds/views.py | 73a8d199b00b7c502aeff64b890e322d89a53943 | []
| no_license | bazilio91/kozmic-ci | d186d7c5b61081ea5eec972d36091c17ecfdf4be | f3ddc9145e4eb93803caae1511e1bcb4a9b18c7a | refs/heads/master | 2021-01-16T01:07:50.770776 | 2014-12-24T06:59:28 | 2014-12-24T07:40:16 | 31,765,700 | 0 | 0 | null | 2015-03-06T11:05:36 | 2015-03-06T11:05:35 | null | UTF-8 | Python | false | false | 3,379 | py | import json
import github3
import sqlalchemy
from flask import request, redirect, url_for
from kozmic import db, csrf
from kozmic.models import Project, Build, Hook, HookCall
from . import bp, tasks
def get_ref_and_sha(payload):
action = payload.get('action')
if action is None:
# See `tests.func_fixtures.PUSH_HOOK_CALL_DATA` for payload
ref = payload.get('ref') # ref looks like "refs/heads/master"
if not ref or not ref.startswith('refs/heads/'):
return None
prefix_length = len('refs/heads/')
ref = ref[prefix_length:]
sha = payload.get('head_commit', {}).get('id')
if not sha:
return None
return ref, sha
elif action in ('opened', 'synchronize'):
# See `tests.func_fixtures.PULL_REQUEST_HOOK_CALL_DATA` for payload
gh_pull = github3.pulls.PullRequest(payload.get('pull_request', {}))
try:
return gh_pull.head.ref, gh_pull.head.sha
except:
return None
else:
return None
@csrf.exempt
@bp.route('/_hooks/hook/<int:id>/', methods=('POST',))
def hook(id):
hook = Hook.query.get_or_404(id)
payload = json.loads(request.data)
if set(payload.keys()) == {'zen', 'hook_id'}:
# http://developer.github.com/webhooks/#ping-event
if hook.gh_id != payload['hook_id']:
return 'Wrong hook URL', 400
else:
return 'OK'
ref_and_sha = get_ref_and_sha(payload)
if not ref_and_sha:
return 'Failed to fetch ref and commit from payload', 400
ref, sha = ref_and_sha
gh_commit = hook.project.gh.git_commit(sha)
build = hook.project.builds.filter(
Build.gh_commit_ref == ref,
Build.gh_commit_sha == gh_commit.sha).first()
if not build:
build = Build(
project=hook.project,
status='enqueued',
gh_commit_ref=ref,
gh_commit_sha=gh_commit.sha,
gh_commit_author=gh_commit.author['name'],
gh_commit_message=gh_commit.message)
build.calculate_number()
db.session.add(build)
hook_call = HookCall(
hook=hook,
build=build,
gh_payload=payload)
db.session.add(hook_call)
try:
db.session.commit()
except sqlalchemy.exc.IntegrityError:
# Commit may fail due to "unique_ref_and_sha_within_project"
# constraint on Build or "unique_hook_call_within_build" on
# HookCall. It means that GitHub called this hook twice
# (for example, on push and pull request sync events)
# at the same time and Build and HookCall has been just
# committed by another transaction.
db.session.rollback()
return 'OK'
tasks.do_job.delay(hook_call_id=hook_call.id)
return 'OK'
@bp.route('/badges/<gh_login>/<gh_name>/<ref>')
def badge(gh_login, gh_name, ref):
project = Project.query.filter_by(
gh_login=gh_login, gh_name=gh_name).first_or_404()
build = project.get_latest_build(ref=ref)
badge = build and build.status or 'success'
response = redirect(url_for(
'static',
filename='img/badges/{}.png'.format(badge),
_external=True,
# Use https so that GitHub does not cache images served from HTTPS
_scheme='https'))
response.status_code = 307
return response
| [
"[email protected]"
]
| |
64060e6e7e281f5870da0bd130d6b4b05662328c | e2ca3205bb5240a1e4c87de0bdb13faa70241f16 | /src/verify/image/alpine/proxy/setup.py | f3dc277e61ded0dff9250e7536da94e2549aecb9 | [
"Apache-2.0"
]
| permissive | random-python/nspawn | 67da4d96d54dcbf537adaf3421a03020ea5c1769 | 25f53aa565c0685842a89d48d949b0459b1de0a6 | refs/heads/master | 2023-05-11T21:27:44.557577 | 2023-05-07T17:00:18 | 2023-05-07T17:00:18 | 184,904,641 | 21 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | #!/usr/bin/env python
# import os, runpy
# this_dir = os.path.dirname(os.path.abspath(__file__))
# runpy.run_path(f"{this_dir}/a.py")
from nspawn.setup import *
import platform
epoch = "3.10"
release = f"{epoch}.3"
hardware = platform.machine()
machine_name = "alpa-proxy"
network_face = TOOL.select_interface()
IMAGE(f"file://localhost/tmp/nspawn/repo/alpine/proxy/default-{release}-{hardware}.tar.gz")
MACHINE(
# define machine name
name=machine_name,
# extra entries for [Unit] section
unit_conf=[
"Description=hello-kitty", # override description
],
# extra entries for [Service] section
service_conf=[
"CPUQuota=10%", # throttle processor usage
],
# extra entries for [Install] section
install_conf=[
"# user comment: hello-kitty", # inject user comment
],
)
WITH(
# Hostname="alpase", # needs systemd v 239
Boot='yes', # auto detect /bin/init program
Quiet="yes", # suppress "press to escape" message
KeepUnit="yes", # use service unit as nspawn scope
Register="yes", # expose service unit with machinectl
MACVLAN=network_face,
# Capability='all',
)
# use host ssh login for container
WITH(BindReadOnly="/root/.ssh/authorized_keys")
# alpine system entry
# EXEC(['/sbin/init'])
# EXEC(['/bin/ls', '-Rlas', f"/root"])
# external config
config_dir = f"{TOOL.nspawn_tempdir()}/machine/{machine_name}"
# externally configurable hostname
hostname_path = f"{config_dir}/etc/hostname"
WITH(BindReadOnly=f"{hostname_path}:/etc/hostname")
CAST(source="/etc/hostname", target=hostname_path, machine_name=machine_name)
# externally exposed message log
messages_path = f"{config_dir}/var/log/messages"
WITH(Bind=f"{messages_path}:/var/log/messages")
| [
"[email protected]"
]
| |
865c1a19719a5537fde1e8a41f62e258ab016386 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_3/mtsmol017/question3.py | c1a527c00cceea2444895527ffa725ec32f0b7c3 | []
| no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | x=input("Enter the message:\n")
y=eval(input("Enter the message repeat count:\n"))
b=eval(input("Enter the frame thickness:\n"))
v=b-1
u=len(x)
p=u+2
c=p+2*v
e=0
space=0
for i in range(v):
print("|"*e,"+","-"*c,"+","|"*e,sep="")
c=c-2
space=space+1
e=e+1
if b>0:
print("|"*v,"+",p*"-","+","|"*v,sep="")
t=1
for i in range(0):
print("|")
for i in range(y):
print("|"*v,"|"*t, " ",x, " ","|"*v,end="|",sep="")
print()
if b>0:
print("|"*v,"+",p*"-","+","|"*v,sep="")
c=p+1
space=v-1
for i in range(v):
print("|"*space,"+","-","-"*c,"+","|"*space,sep="")
c=c+2
space=space-1 | [
"[email protected]"
]
| |
ada0361a5b786a11fe3b4e3330a7fe7f771d22a9 | f6a36e12544e228d327c22f17e425d9c1b69b988 | /SignalMC/python/AMSB_chargino_FilterSumPt50_cfi_py_GEN_SIM_DIGI_L1_DIGI2RAW_HLT_RAW2DIGI_L1Reco_RECO_PU.py | ae3e47d1a5520b7582cfed77a42a0938e92ec0da | []
| no_license | srimanob/DisappTrks | 1fcbe1bf6ac8ab0be5cde802d0da89efe52a5694 | 21c4edab368ea873502179352d9f186e6bd9ea69 | refs/heads/master | 2021-01-18T12:04:35.492215 | 2016-07-01T15:51:01 | 2016-07-01T15:51:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,685 | py | # Auto generated configuration file
# using:
# Revision: 1.381.2.27
# Source: /local/reps/CMSSW/CMSSW/Configuration/PyReleaseValidation/python/ConfigBuilder.py,v
# with command line options: DisappTrks/SignalMC/python/AMSB_chargino_FilterSumPt50_cfi.py -s GEN,SIM,DIGI,L1,DIGI2RAW,HLT:7E33v2,RAW2DIGI,L1Reco,RECO --conditions START53_V27::All --beamspot Realistic8TeVCollision --datatier GEN-SIM-RECO --pileup 2012_Summer_50ns_PoissonOOTPU --datamix NODATAMIXER --eventcontent RECOSIM -n 2 --no_exec --fileout AMSB_chargino_RECO.root
import FWCore.ParameterSet.Config as cms
process = cms.Process('HLT')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mix_2012_Summer_50ns_PoissonOOTPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.GeometrySimDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedRealistic8TeVCollision_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.SimIdeal_cff')
process.load('Configuration.StandardSequences.Digi_cff')
process.load('Configuration.StandardSequences.SimL1Emulator_cff')
process.load('Configuration.StandardSequences.DigiToRaw_cff')
process.load('HLTrigger.Configuration.HLT_7E33v2_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.L1Reco_cff')
process.load('Configuration.StandardSequences.Reconstruction_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2)
)
# Input source
process.source = cms.Source("EmptySource")
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.381.2.27 $'),
annotation = cms.untracked.string('DisappTrks/SignalMC/python/AMSB_chargino_FilterSumPt50_cfi.py nevts:2'),
name = cms.untracked.string('PyReleaseValidation')
)
# Output definition
process.RECOSIMoutput = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
outputCommands = process.RECOSIMEventContent.outputCommands,
fileName = cms.untracked.string('AMSB_chargino_RECO.root'),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('GEN-SIM-RECO')
),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
)
)
# Additional output definition
# Other statements
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'START53_V27::All', '')
process.dicharginoSumPtFilter = cms.EDFilter("MCParticlePairSumPtFilter",
MinSumPt = cms.untracked.double(50.0),
ParticleIDs = cms.untracked.vint32(1000022, 1000024)
)
process.generator = cms.EDFilter("Pythia6GeneratorFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(3),
comEnergy = cms.double(8000.0),
particleFile = cms.untracked.string('DisappTrks/SignalMC/data/geant4_AMSB_chargino_MASSPOINTGeV_ctauLIFETIMEcm.slha'),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
processFile = cms.untracked.string('SimG4Core/CustomPhysics/data/RhadronProcessList.txt'),
useregge = cms.bool(False),
slhaFile = cms.untracked.string('DisappTrks/SignalMC/data/AMSB_chargino_MASSPOINTGeV_Isajet780.slha'),
massPoint = cms.untracked.int32(-999),
hscpFlavor = cms.untracked.string('stau'),
PythiaParameters = cms.PSet(
pythiaUESettings = cms.vstring('MSTU(21)=1 ! Check on possible errors during program execution',
'MSTJ(22)=2 ! Decay those unstable particles',
'PARJ(71)=10 . ! for which ctau 10 mm',
'MSTP(33)=0 ! no K factors in hard cross sections',
'MSTP(2)=1 ! which order running alphaS',
'MSTP(51)=10042 ! structure function chosen (external PDF CTEQ6L1)',
'MSTP(52)=2 ! work with LHAPDF',
'PARP(82)=1.921 ! pt cutoff for multiparton interactions',
'PARP(89)=1800. ! sqrts for which PARP82 is set',
'PARP(90)=0.227 ! Multiple interactions: rescaling power',
'MSTP(95)=6 ! CR (color reconnection parameters)',
'PARP(77)=1.016 ! CR',
'PARP(78)=0.538 ! CR',
'PARP(80)=0.1 ! Prob. colored parton from BBR',
'PARP(83)=0.356 ! Multiple interactions: matter distribution parameter',
'PARP(84)=0.651 ! Multiple interactions: matter distribution parameter',
'PARP(62)=1.025 ! ISR cutoff',
'MSTP(91)=1 ! Gaussian primordial kT',
'PARP(93)=10.0 ! primordial kT-max',
'MSTP(81)=21 ! multiple parton interactions 1 is Pythia default',
'MSTP(82)=4 ! Defines the multi-parton model'),
processParameters = cms.vstring('IMSS(1) = 11 ! Spectrum from external SLHA file',
'IMSS(21) = 33 ! LUN number for SLHA File (must be 33) ',
'IMSS(22) = 33 ! Read-in SLHA decay table ',
'MSEL = 0 ! General SUSY',
'MSUB(226) = 1 ! to double chargino',
'MSUB(229) = 1 ! to neutralino + chargino',
'MDCY(312,1) = 0 ! set the chargino stable.'),
parameterSets = cms.vstring('pythiaUESettings',
'processParameters',
'SLHAParameters'),
SLHAParameters = cms.vstring('SLHAFILE = DisappTrks/SignalMC/data/AMSB_chargino_MASSPOINTGeV_Isajet780.slha')
)
)
process.ProductionFilterSequence = cms.Sequence(process.generator+process.dicharginoSumPtFilter)
# Path and EndPath definitions
process.generation_step = cms.Path(process.pgen)
process.genParticlePlusGeant = cms.EDProducer("GenPlusSimParticleProducer",
src = cms.InputTag("g4SimHits"), # use "famosSimHits" for FAMOS
setStatus = cms.int32(8), # set status = 8 for GEANT GPs
filter = cms.vstring("pt > 0.0"), # just for testing (optional)
genParticles = cms.InputTag("genParticles") # original genParticle list
)
process.simulation_step = cms.Path(process.psim + process.genParticlePlusGeant)
process.RECOSIMoutput.outputCommands.extend( [
"keep *_genParticlePlusGeant_*_*",
] )
process.digitisation_step = cms.Path(process.pdigi)
process.L1simulation_step = cms.Path(process.SimL1Emulator)
process.digi2raw_step = cms.Path(process.DigiToRaw)
process.raw2digi_step = cms.Path(process.RawToDigi)
process.L1Reco_step = cms.Path(process.L1Reco)
process.reconstruction_step = cms.Path(process.reconstruction)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RECOSIMoutput_step = cms.EndPath(process.RECOSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.generation_step,process.genfiltersummary_step,process.simulation_step,process.digitisation_step,process.L1simulation_step,process.digi2raw_step)
process.schedule.extend(process.HLTSchedule)
process.schedule.extend([process.raw2digi_step,process.L1Reco_step,process.reconstruction_step,process.endjob_step,process.RECOSIMoutput_step])
# filter all path with the production filter sequence
for path in process.paths:
getattr(process,path)._seq = process.ProductionFilterSequence * getattr(process,path)._seq
# customisation of the process.
# Automatic addition of the customisation function from HLTrigger.Configuration.customizeHLTforMC
from HLTrigger.Configuration.customizeHLTforMC import customizeHLTforMC
#call to customisation function customizeHLTforMC imported from HLTrigger.Configuration.customizeHLTforMC
process = customizeHLTforMC(process)
# End of customisation functions
# The configuration settings below are needed for simulating long-lived charginos:
from SimG4Core.CustomPhysics.Exotica_HSCP_SIM_cfi import customise
process = customise(process)
process.g4SimHits.StackingAction.SavePrimaryDecayProductsAndConversionsInTracker = cms.untracked.bool(True)
process.g4SimHits.StackingAction.SavePrimaryDecayProductsAndConversionsInCalo = cms.untracked.bool(True)
process.g4SimHits.StackingAction.SavePrimaryDecayProductsAndConversionsInMuon = cms.untracked.bool(True)
process.g4SimHits.SteppingAction.MaxTrackTimes = cms.vdouble(2000.0, 500.0, 500.0)
process.g4SimHits.StackingAction.MaxTrackTimes = cms.vdouble(2000.0, 500.0, 500.0)
process.common_maximum_time.MaxTrackTimes = cms.vdouble(2000.0, 500.0, 500.0)
## Dump python config if wished
outfile = open('dumpedConfig.py','w'); print >> outfile,process.dumpPython(); outfile.close()
| [
"[email protected]"
]
| |
b3254a295aa190918b3fac6417611cf475e45b40 | 4bb8b5ba278b26ce13e9adc0ca6c9d22daebea97 | /equivariantExperiments/trainOnMultipleGPUsCcFC_angles.py | 6c218f0b7f2c3c0995ab4a37b401332345bec5b9 | []
| no_license | thodorisGeorgiou/rotation_in-equivariance_experiments | 7a2d1389d25af99e8b6771b1524da40f50b92095 | 02722225547ba237cf3515892cdeeca621d16809 | refs/heads/main | 2023-03-13T02:53:18.150069 | 2021-03-04T12:17:05 | 2021-03-04T12:17:05 | 344,463,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,610 | py | import os
import sys
import numpy
import pickle
import scipy.ndimage
# from multiprocessing import Pool
import multiprocessing
sys.path.append("/scratch/georgioutk/cliffordConvolution/")
import tensorflow as tf
import cliffordConvolution as cc
import preprocessing
import ccCnnCcFC as network
# import perCInCcCnnCcFC as network
# import opCnnAllcc55L as network
numGpus = 1
numGpusTest = 1
numCpus = 5
numEpochs = 360
batch_size = 128
testBatch_size = 400
MOVING_AVERAGE_DECAY = 0.0
run = sys.argv[1]
baseDir = "trained_testNewAngles/ccCcFCNet5L3x96_64_2x36_32_16_fs_9_all_7_16Bins_weightMask_trainAll_onlyRescaleBNFC_0MAD_MaxConv0_droppingLR_30_signedDiffMinus_notMaybe_"+run+"_"
# baseDir = "trained_testNewAngles/ccCcFCNet5L3x96_64_2x36_32_16_fs_9_all_7_16Bins_weightMask_trainAll_onlyRescaleBNFC_dropout_3x0.5_0MAD_MaxConv0_droppingLR_30_signedDiffMinus_stopGradient_"+run+"_"
train_dir = os.getcwd()+"/"+baseDir
def rotateDataset(dataset=None, output=None):
res = numpy.zeros(dataset.shape)
for i in range(dataset.shape[0]):
a = numpy.random.rand()*2*numpy.pi
res[i] = scipy.ndimage.rotate(dataset[i], numpy.degrees(a), order=3, reshape=False)
# return res
output['x'] = res
def testNetwork(sess, top_1, testBatch_size, iterator, gx_test, y_test, a_test, log):
inputPlaceholders = tf.get_collection("inputTestData")
sess.run(iterator.initializer, feed_dict={inputPlaceholders[0]: gx_test, inputPlaceholders[1]: y_test, inputPlaceholders[2]: a_test})
correct = 0
xEntropy = 0
anDif = 0
count = 0
while True:
try:
res = sess.run(top_1)
correct += numpy.sum(res[0])
xEntropy += res[1]
anDif += res[2]
count += testBatch_size
except tf.errors.OutOfRangeError:
break
# print("Validation accuracy: "+str(correct/count), file=log)
# return correct/count
return 2.3 - xEntropy/count, correct/count, anDif/count
def ema_to_weights(ema, variables):
return tf.group(*(tf.assign(var, ema.average(var).read_value()) for var in variables))
def save_weight_backups():
return tf.group(*(tf.assign(bck, var.read_value()) for var, bck in zip(model_vars, backup_vars)))
def restore_weight_backups():
return tf.group(*(tf.assign(var, bck.read_value()) for var, bck in zip(model_vars, backup_vars)))
def to_training():
with tf.control_dependencies([tf.assign(is_training, True)]):
return restore_weight_backups()
def to_testing(ema):
with tf.control_dependencies([tf.assign(is_training, False)]):
with tf.control_dependencies([save_weight_backups()]):
return ema_to_weights(ema, model_vars)
if __name__ == '__main__':
if tf.gfile.Exists(train_dir):
tf.gfile.DeleteRecursively(train_dir)
tf.gfile.MakeDirs(train_dir)
is_training = tf.get_variable('is_training', shape=(), dtype=tf.bool, initializer=tf.constant_initializer(True, dtype=tf.bool), trainable=False)
global_step = tf.Variable(0, trainable=False)
# mnsitTrain = numpy.load("/data/georgioutk/experiments/mnistRot/data/mnist_rotation_new/rotated_train.npz")
# mnsitVal = numpy.load("/data/georgioutk/experiments/mnistRot/data/mnist_rotation_new/rotated_valid.npz")
# mnsitTrainVal = numpy.loadtxt("/data/georgioutk/experiments/mnistRot/data/mnist_rotation/mnist_all_rotation_normalized_float_train_valid.amat", dtype=numpy.float32)
# mnsitTrainVal = numpy.loadtxt("data/mnist_all_rotation_normalized_float_train_valid.amat", dtype=numpy.float32)
# mnsitTestRaw = numpy.loadtxt("data/mnist_all_rotation_normalized_float_test.amat", dtype=numpy.float32)
# mnsitTrain = {}
# mnsitVal = {}
# mnsitTest = {}
# mnsitTrainFull = {}
# mnsitTrain['x'] = mnsitTrainVal[:,:-1]
# mnsitTrain['y'] = mnsitTrainVal[:,-1].astype(numpy.int32)
# mnsitTrainFull['x'] = mnsitTrainVal[:,:-1]
# mnsitTrainFull['y'] = mnsitTrainVal[:,-1].astype(numpy.int32)
# mnsitTest['x'] = mnsitTestRaw[:,:-1]
# mnsitTest['y'] = mnsitTestRaw[:,-1].astype(numpy.int32)
mnsitTrain = {'x': numpy.load("/scratch/georgioutk/mnist/my_mnist_rot/mnist_rot_train_imanges.npy"), \
'y': (numpy.load("/scratch/georgioutk/mnist/my_mnist_rot/mnist_rot_train_labels.npy")[:,:1]).astype(numpy.int32), \
'a': (numpy.load("/scratch/georgioutk/mnist/my_mnist_rot/mnist_rot_train_labels.npy")[:,1:2])}
mnsitTest = {'x': numpy.load("/scratch/georgioutk/mnist/my_mnist_rot/mnist_rot_test_imanges.npy"), \
'y': (numpy.load("/scratch/georgioutk/mnist/my_mnist_rot/mnist_rot_test_labels.npy")[:,:1]).astype(numpy.int32), \
'a': (numpy.load("/scratch/georgioutk/mnist/my_mnist_rot/mnist_rot_test_labels.npy")[:,1:2])}
# manager = multiprocessing.Manager()
# trainSet = []
# threads = []
# for i in range(numCpus):
# trainSet.append(manager.dict())
# p = multiprocessing.Process(target=rotateDataset, args=(), kwargs={'dataset':mnsitTrain['x'].reshape([10000,28,28,1]), 'output':trainSet[-1]})
# threads.append(p)
# p.start()
currLr = 1e-3
lr = tf.Variable(currLr, dtype=tf.float32, trainable=False)
tf.add_to_collection("learning_rate", lr)
# [next_example, next_label], [next_testExample, next_testLabel], [trainIterator, testIterator] = preprocessing.colorInput(batch_size, mnsitTrain['x'].reshape([10000,28,28,1]), mnsitTrain['y'].reshape([10000,1]))
[next_example, next_label, next_angle], [next_testExample, next_testLabel, next_testAngle], [trainIterator, testIterator] = \
preprocessing.gradientOrientationWithAngle(batch_size, mnsitTrain['x'], mnsitTrain['y'], mnsitTrain['a'], testBatch_size=testBatch_size)
# [next_example, next_label], [next_testExample, next_testLabel], [trainIterator, testIterator] = preprocessing.gradientOrientation(batch_size, mnsitTrain['x'].reshape([12000,28,28,1]), mnsitTrain['y'].reshape([12000,1]), testBatch_size=testBatch_size)
# [next_example, next_label], [next_testExample, next_testLabel], [trainIterator, testIterator] = preprocessing.gradientOrientation(batch_size, testBatch_size=testBatch_size)
# paddings = tf.constant([[0,0],[2,2],[2,2],[0,0]])
# next_example = tf.pad(next_example, paddings)
# next_testExample = tf.pad(next_testExample, paddings)
nex = tf.split(next_example, numGpus, axis=0)
nla = tf.split(next_label, numGpus, axis=0)
nan = tf.split(next_angle, numGpus, axis=0)
test_nex = tf.split(next_testExample, numGpusTest, axis=0)
test_nla = tf.split(next_testLabel, numGpusTest, axis=0)
for i in range(numGpus):
with tf.name_scope('tower_%d' % (i)) as scope:
with tf.device('/gpu:%d' % i):
print("Defining tower "+str(i))
softmax_linear, prob, pred_angle = network.inference(nex[i], batch_size//numGpus, "train", first=(i==0), resuse_batch_norm=False, fs=7, normalizationMode="bn")
pred_angle = tf.squeeze(cc.ops.reduceIndex(pred_angle, tf.expand_dims(tf.expand_dims(nla[i], axis=-1), axis=-1)))
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=softmax_linear, labels=nla[i], name='cross_entropy_per_example_'+str(i))
# nan2 = nan[i] - numpy.pi
angleDif = nan[i] - pred_angle
# angleDif2 = nan2 - pred_angle
normAngleDif = tf.abs(tf.atan2(tf.sin(angleDif), tf.cos(angleDif)))
# normAngleDif2 = tf.abs(tf.atan2(tf.sin(angleDif2), tf.cos(angleDif2)))
# normAngleDif = tf.reduce_min(tf.stack([normAngleDif, normAngleDif2], axis=0), axis=0)
meanAnDif = tf.reduce_mean(normAngleDif, name="angle_loss_"+str(i))
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy_'+str(i))
tf.add_to_collection('x_entropies', cross_entropy_mean)
tf.add_to_collection('angle_losses', meanAnDif)
# softmax_linear, prob, weightDecayFactor = inference(next_example, batch_size, "train", first=True, resuse_batch_norm=False)
# cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=softmax_linear, labels=next_label, name='cross_entropy_per_example')
# cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
# tf.add_to_collection('losses', cross_entropy_mean)
tf.add_to_collection('losses', tf.reduce_mean(tf.get_collection('x_entropies')))
tf.add_to_collection('losses', tf.reduce_mean(tf.get_collection('angle_losses')))
print("All towers defined.")
netOut = []
netLogits = []
netAngles = []
for i in range(numGpusTest):
with tf.name_scope('tower_%d' % (i)) as scope:
with tf.device('/gpu:%d' % i):
testSoftmax, testProb, testPredAngle = network.inference(test_nex[i], testBatch_size//numGpusTest, "test", first=False, resuse_batch_norm=True, fs=7, normalizationMode="bn")
testPredAngle = tf.squeeze(cc.ops.reduceIndex(testPredAngle, tf.expand_dims(tf.expand_dims(test_nla[i], axis=-1), axis=-1)))
netOut.append(testProb)
netLogits.append(testSoftmax)
netAngles.append(testPredAngle)
netLogits = tf.concat(netLogits, axis=0)
netAngles = tf.concat(netAngles, axis=0)
# next_testAngle2 = tf.where(tf.logical_or(tf.equal(next_testLabel, 0), tf.equal(next_testLabel, 8)), next_testAngle-numpy.pi, next_testAngle)
anDif = next_testAngle - netAngles
# anDif2 = next_testAngle2 - netAngles
testAnDif = tf.reduce_mean(tf.abs(tf.atan2(tf.sin(anDif), tf.cos(anDif))))
# testAnDif2 = tf.reduce_mean(tf.abs(tf.atan2(tf.sin(anDif2), tf.cos(anDif2))))
# testAnDif = tf.reduce_min(tf.stack([testAnDif, testAnDif2], axis=0), axis=0)
testXEntropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=netLogits, labels=next_testLabel))
top_1 = tf.nn.in_top_k(tf.concat(netOut, axis=0), next_testLabel, 1)
print("Test towers defined.")
# cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=softmax_linear, labels=next_label, name='cross_entropy_per_example')
# cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
# tf.add_to_collection('losses', cross_entropy_mean)
total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
loss_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
# opt = tf.train.GradientDescentOptimizer(lr)
opt = tf.train.AdamOptimizer(lr)
# opt = tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True)
# grads = opt.compute_gradients(total_loss, var_list=tf.trainable_variables())
grads = opt.compute_gradients(total_loss, var_list=tf.trainable_variables(), colocate_gradients_with_ops=True)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
regOps = tf.get_collection("regularizationOps")
# Track the moving averages of all trainable variables.
model_vars = tf.trainable_variables()
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(model_vars)
for l in tf.get_collection("losses") + [total_loss]:
tf.summary.scalar(l.op.name +' (raw)', l)
for l in tf.get_collection("x_entropies"):
tf.summary.scalar(l.op.name +' (raw)', l)
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
with tf.variable_scope('BackupVariables'):
backup_vars = [tf.get_variable(var.op.name, dtype=var.value().dtype, trainable=False, initializer=var.initialized_value()) for var in model_vars]
empty_op = lambda: tf.group()
to_test_op = to_testing(variable_averages)
to_train_op = to_training()
saver = tf.train.Saver(tf.global_variables())
saverMax = tf.train.Saver(tf.global_variables())
# idcs = pickle.load(open("mnistRandInds.pkl", "rb"))
inputPlaceholders = tf.get_collection("inputTrainData")
init = tf.global_variables_initializer()
myconfig = tf.ConfigProto(log_device_placement=False)
myconfig.gpu_options.allow_growth = True
sess = tf.Session(config=myconfig)
writer = tf.summary.FileWriter(train_dir, sess.graph)
writerMax = tf.summary.FileWriter(train_dir[:-1]+"Release/", sess.graph)
sess.run(init)
# print("Stop preparing data")
# x_train = []
# y_train = []
# for t in range(numCpus):
# threads[t].join()
# x_train.append(trainSet[t]["x"])
# y_train.append(mnsitTrain['y'].reshape([10000,1]))
# trainSet[t]["x"] = None
# trainSet[t]["y"] = None
# x_train = numpy.concatenate(x_train, axis=0)
# y_train = numpy.concatenate(y_train, axis=0)
# print("Start preparing data")
# threads = []
# for i in range(numCpus):
# p = multiprocessing.Process(target=rotateDataset, args=(), kwargs={'dataset':mnsitTrain['x'].reshape([10000,28,28,1]), 'output':trainSet[i]})
# threads.append(p)
# threads[i].start()
# sess.run(trainIterator.initializer, feed_dict={inputPlaceholders[0]: x_train, inputPlaceholders[1]: y_train})
log = open(baseDir+".txt", "w", 1)
# log = open("cc6LHalfWidth256LastVectorNormTraininAugmentationNewRotationPadBeforeGradsNL360Epochs.txt", "w", 1)
_summ = tf.summary.merge_all()
max_val = 0
max_test = 0
SuccRateValidation = None
SuccRateTest = None
SuccRate_summary = tf.Summary()
SuccRate_summary.value.add(tag='validation_accuracy', simple_value=SuccRateValidation)
SuccRate_summary.value.add(tag='max_validation_accuracy', simple_value=max_val)
stepsPerEpoch = mnsitTrain['x'].shape[0]//batch_size
scores = []
for step in range(int(numEpochs*mnsitTrain['x'].shape[0]/batch_size)):
if step % (mnsitTrain['x'].shape[0]*30//(batch_size*0.25)) == 0 and (currLr > 1e-5) and step > 0:
currLr = 1e-5
print(str(step)+": learning rate = "+str(currLr))
lr.load(currLr, sess)
# if step % (mnsitTrain['x'].shape[0]*60//(batch_size*0.25)) == 0 and (currLr == 1e-5) and step > 0:
# currLr = 2e-6
# print(str(step)+": learning rate = "+str(currLr))
# lr.load(currLr, sess)
# if step % (mnsitTrain['x'].shape[0]*60//(batch_size*0.25)) == 0 and (currLr > 1e-6) and step > 0:
# currLr = 1e-7
# print(str(step)+": learning rate = "+str(currLr))
# lr.load(currLr, sess)
# if step % (mnsitTrain['x'].shape[0]*70//(batch_size*0.25)) == 0 and (currLr > 1e-7) and step > 0:`
# currLr = 1e-7
# print(str(step)+": learning rate = "+str(currLr))
# lr.load(currLr, sess)
# if step >= 15*stepsPerEpoch and (step % stepsPerEpoch) == 0:
# currLr *= 0.8
# print("learning rate = "+str(currLr), file=log)
# lr.load(currLr, sess)
# if step == 10000:
# currLr *= 0.8
# print("learning rate = "+str(currLr), file=log)
# lr.load(currLr, sess)
# if step == 20000:
# currLr *= 0.8
# print("learning rate = "+str(currLr), file=log)
# lr.load(currLr, sess)
# if step == 22000:
# currLr *= 0.2
# print("learning rate = "+str(currLr), file=log)
# lr.load(currLr, sess)
__ = sess.run(regOps)
exEntropy, totalLoss, summ, _ = sess.run([cross_entropy_mean, total_loss, _summ, train_op])
# try:
# exEntropy, totalLoss, summ, _ = sess.run([cross_entropy_mean, total_loss, _summ, train_op])
# except tf.errors.OutOfRangeError:
# x_train = []
# y_train = []
# for t in range(numCpus):
# threads[t].join()
# x_train.append(trainSet[t]["x"])
# y_train.append(mnsitTrain['y'].reshape([10000,1]))
# trainSet[t]["x"] = None
# trainSet[t]["y"] = None
# x_train = numpy.concatenate(x_train, axis=0)
# y_train = numpy.concatenate(y_train, axis=0)
# sess.run(trainIterator.initializer, feed_dict={inputPlaceholders[0]: x_train, inputPlaceholders[1]: y_train})
# threads = []
# for i in range(numCpus):
# p = multiprocessing.Process(target=rotateDataset, args=(), kwargs={'dataset':mnsitTrain['x'].reshape([10000,28,28,1]), 'output':trainSet[i]})
# threads.append(p)
# threads[i].start()
# continue
writer.add_summary(summ, step)
# print(str(step)+" "+str(exEntropy), file=log)
if step % (mnsitTrain['x'].shape[0]//(batch_size*0.25)) == 0:
print("%2.2f"%(step*100/(int(numEpochs*mnsitTrain['x'].shape[0]/batch_size))), end="\r", flush=True)
checkpoint_path = os.path.join(train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
if step % (mnsitTrain['x'].shape[0]//(batch_size*0.25)) == 0 and step != 0:
sess.run(to_test_op)
# valExEntropy, SuccRateValidation = testNetwork(sess, [top_1, testXEntropy], testBatch_size, testIterator, mnsitVal['x'].reshape([2000,28,28,1]), mnsitVal['y'].reshape([2000,1]), log)
valExEntropy, SuccRateValidation, anDifValidation = testNetwork(sess, [top_1, testXEntropy, testAnDif], testBatch_size, testIterator, mnsitTrain['x'], mnsitTrain['y'], mnsitTrain['a'], log)
# trainFullExEntropy, SuccRateTrainFull = testNetwork(sess, [top_1, testXEntropy], testBatch_size, testIterator, mnsitTrainFull['x'].reshape([12000,28,28,1]), mnsitTrainFull['y'].reshape([12000,1]), log)
TestExEntropy, SuccRateTest, anDifTest = testNetwork(sess, [top_1, testXEntropy, testAnDif], testBatch_size, testIterator, mnsitTest['x'], mnsitTest['y'], mnsitTest['a'], log)
# print("Validation: "+str(SuccRateValidation)+"/"+str(max_val), file=log)
print("Scores: "+str([valExEntropy, TestExEntropy, SuccRateValidation, SuccRateTest, anDifValidation, anDifTest]), file=log)
scores.append([valExEntropy, SuccRateValidation, TestExEntropy, SuccRateTest])
if valExEntropy > max_val:
max_val = valExEntropy
checkpoint_path = os.path.join(train_dir[:-1]+"Release/", 'model.ckpt')
saverMax.save(sess, checkpoint_path, global_step=step)
writerMax.add_summary(summ, step)
SuccRate_summary.value[0].simple_value = valExEntropy
SuccRate_summary.value[1].simple_value = max_val
writer.add_summary(SuccRate_summary, step)
sess.run(to_train_op)
# if step % 30*mnsitTrain['x'].shape[0]//batch_size == 0 and step != 0:
# currLr /= 10
# lr.load(currLr, sess)
print("Saving..")
pickle.dump(scores, open("scores.pkl", "wb"))
checkpoint_path = os.path.join(train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
sess.run(to_test_op)
valExEntropy, SuccRateValidation, anDifValidation = testNetwork(sess, [top_1, testXEntropy, testAnDif], testBatch_size, testIterator, mnsitTrain['x'], mnsitTrain['y'], mnsitTrain['a'], log)
# valExEntropy, SuccRateValidation = testNetwork(sess, [top_1, testXEntropy], testBatch_size, testIterator, mnsitVal['x'].reshape([2000,28,28,1]), mnsitVal['y'].reshape([2000,1]), log)
print("Validation: "+str(SuccRateValidation)+"/"+str(max_val)+" - "+str(anDifValidation), file=log)
SuccRate_summary.value[0].simple_value = valExEntropy
if valExEntropy > max_val:
max_val = valExEntropy
checkpoint_path = os.path.join(train_dir[:-1]+"Release/", 'model.ckpt')
saverMax.save(sess, checkpoint_path, global_step=step)
writerMax.add_summary(summ, step)
SuccRate_summary.value[1].simple_value = max_val
writer.add_summary(SuccRate_summary, step)
sess.run(to_train_op)
| [
"[email protected]"
]
| |
b687da01dd8d2a85a8e6edf65dbb440abe5934a3 | 5a5b109bb29b21d3e2805a1cdac72261d862e2ae | /loop/ForElse.py | 2ad6e235650d27e97659178806759d836ac3da80 | []
| no_license | 3454833326/geek | 428030f7f780e9cd6b6f775b4518f295c6b6bb2d | 39214ff8c68e60b065f636da6dcb04c52e2a787a | refs/heads/master | 2021-05-27T04:20:16.035334 | 2020-04-08T23:27:25 | 2020-04-08T23:27:25 | 254,203,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | for i in range(1,10):
for j in range(1,i+1):
print(f'{i}*{j}={i*j}',end=' ')
print()
for i in range(1,6):
for j in range(1,i+1):#控制
print('*',end=' ')
print()
| [
"[email protected]"
]
| |
a86077f8cd3c3a560ec3bc48a61c949481ea7daf | aaf4ac03bc2497e5dc6df63a7c6ac8b88ec535be | /Notebooks/week-8/day1/twitter/settings.py | ae1e1d647a123e993c49e6e7dbb899f71894f85a | []
| no_license | tiy-gvl-python-2015-09/class-notes | ae7864e868eb052a7bd182c0416f4feba11c5182 | e7b1f4849f6045d657f5034d3432070c6cc08c57 | refs/heads/master | 2016-09-06T15:41:42.356534 | 2015-11-02T17:05:04 | 2015-11-02T17:05:04 | 42,471,740 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,769 | py | """
Django settings for twitter project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jh$q4pu8+7yp88rlzzz3_lp4wa$-=j__l$v6k!m*d6#h%-6&8w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tweet',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'twitter.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'twitter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
| [
"[email protected]"
]
| |
d12a52f71e3698240804934cd88872171f43888d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/79/usersdata/247/43511/submittedfiles/serie1.py | 90e784d791ec3951343a353ba1a25c5d7cf6ffee | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | # -*- coding: utf-8 -*-
import math
n=int(input('digite n: '))
d=1
c=1
soma=0
t=1
while t<=n:
d=d+1
c=d**2
t=t+1
if t%2==0:
soma=soma-(d//c)
else:
soma=soma+(d//c)
print(c)
| [
"[email protected]"
]
| |
3219371d6d9064ce91bbce562de29eee8403174a | 4da55187c399730f13c5705686f4b9af5d957a3f | /resources/sumo_exporter/crossroad.py | 066119d5b50767c46efb4945edfc1ac6845750d0 | [
"Apache-2.0"
]
| permissive | Ewenwan/webots | 7111c5587100cf35a9993ab923b39b9e364e680a | 6b7b773d20359a4bcf29ad07384c5cf4698d86d3 | refs/heads/master | 2020-04-17T00:23:54.404153 | 2019-01-16T13:58:12 | 2019-01-16T13:58:12 | 166,048,591 | 2 | 0 | Apache-2.0 | 2019-01-16T13:53:50 | 2019-01-16T13:53:50 | null | UTF-8 | Python | false | false | 3,345 | py | """Road class container."""
import math
import re
from re_definitions import floatRE, intRE
from data_structures import grouper
from lxml import etree as ET
class Crossroad(object):
"""Class matching with a Webots Crossroad, containing facilities to export to SUMO junctions."""
crossroads = []
def __init__(self, crossroadType):
"""Constructor: Initialize the crossroad with a unique id."""
self.roads = [] # connected roads
self.id = 'Custom%d' % len(Crossroad.crossroads)
self.translation = [0.0, 0.0, 0.0]
self.connectedRoadIDs = []
self.shape = []
self.crossroadType = crossroadType
def init_from_wbt_string(self, wbtString):
"""Extract info from the wbtString matching the node."""
try:
self.id = re.findall(r'id\s*"([^"]*)"', wbtString)[0]
except:
pass
try:
self.translation = [float(x) for x in re.findall(r'translation\s*(%s\s*%s\s*%s)' % (floatRE, floatRE, floatRE), wbtString)[0].split()]
except:
pass
try:
self.rotation = [float(x) for x in re.findall(r'rotation\s*(%s\s*%s\s*%s\s*%s)' % (floatRE, floatRE, floatRE, floatRE), wbtString)[0].split()]
except:
self.rotation = [0.0, 1.0, 0.0, 0.0]
try:
self.connectedRoadIDs = [x.replace('"', '') for x in re.findall(r'connectedRoadIDs\s*\[([^\]]*)\]', wbtString)[0].split()]
except:
pass
if self.crossroadType == "Crossroad":
try:
self.shape = grouper(3, [float(x) for x in re.findall(r'shape\s*\[([^\]]*)\]', wbtString)[0].split()])
except:
pass
elif self.crossroadType == "RoadIntersection":
roadNumber = 4
self.shape = []
try:
roadNumber = int(re.findall(r'roadNumber\s*(%s)' % intRE, wbtString)[0])
except:
roadNumber = 4
roadsWidth = 7.0
try:
roadsWidth = float(re.findall(r'roadsWidth\s*(%s)' % floatRE, wbtString)[0])
except:
roadsWidth = 7.0
outerRadius = roadsWidth / (2 * math.sin(math.pi / roadNumber))
angle = self.rotation[3]
if self.rotation[1] > 0:
angle = -angle
for i in range(roadNumber):
x1 = outerRadius * math.cos(2 * math.pi * i / roadNumber)
y1 = outerRadius * math.sin(2 * math.pi * i / roadNumber)
x2 = math.cos(angle) * x1 - math.sin(angle) * y1
y2 = math.cos(angle) * y1 + math.sin(angle) * x1
self.shape.append([x2, 0, y2])
def create_node(self, nodes):
"""Populate the SUMO XML node."""
node = ET.SubElement(nodes, 'node')
node.attrib['id'] = self.id
node.attrib['x'] = str(- self.translation[0])
node.attrib['y'] = str(self.translation[2])
if len(self.shape) > 0:
shape = ""
for wayPoint in self.shape:
shape += "%f,%f " % (- wayPoint[0] - self.translation[0], wayPoint[2] + self.translation[2])
shape += "%f,%f" % (- self.shape[0][0] - self.translation[0], self.shape[0][2] + self.translation[2])
node.attrib['shape'] = shape
| [
"[email protected]"
]
| |
d7f490aa2885aa9e485209b26c0ef98369f6b933 | 6bfda75657070e177fa620a43c917096cbd3c550 | /kubernetes/client/models/v1beta1_daemon_set.py | 60bc14e1d3b94368a23f36ef626edc220da4a1ff | [
"Apache-2.0"
]
| permissive | don41382/client-python | 8e7e747a62f9f4fc0402eea1a877eab1bb80ab36 | e69d4fe204b98f7d7ee3ada3996b4f5fbceae5fe | refs/heads/master | 2021-01-19T23:15:50.172933 | 2017-04-18T18:00:48 | 2017-04-18T18:00:48 | 88,943,866 | 0 | 0 | null | 2017-04-21T05:19:52 | 2017-04-21T05:19:52 | null | UTF-8 | Python | false | false | 7,290 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1DaemonSet(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
"""
V1beta1DaemonSet - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta1DaemonSetSpec',
'status': 'V1beta1DaemonSetStatus'
}
self.attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
self._api_version = api_version
self._kind = kind
self._metadata = metadata
self._spec = spec
self._status = status
@property
def api_version(self):
"""
Gets the api_version of this V1beta1DaemonSet.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:return: The api_version of this V1beta1DaemonSet.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1DaemonSet.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1DaemonSet.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1beta1DaemonSet.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1DaemonSet.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1DaemonSet.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1DaemonSet.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1DaemonSet.
Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
:return: The metadata of this V1beta1DaemonSet.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1DaemonSet.
Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
:param metadata: The metadata of this V1beta1DaemonSet.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this V1beta1DaemonSet.
The desired behavior of this daemon set. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
:return: The spec of this V1beta1DaemonSet.
:rtype: V1beta1DaemonSetSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this V1beta1DaemonSet.
The desired behavior of this daemon set. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
:param spec: The spec of this V1beta1DaemonSet.
:type: V1beta1DaemonSetSpec
"""
self._spec = spec
@property
def status(self):
"""
Gets the status of this V1beta1DaemonSet.
The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
:return: The status of this V1beta1DaemonSet.
:rtype: V1beta1DaemonSetStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1beta1DaemonSet.
The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
:param status: The status of this V1beta1DaemonSet.
:type: V1beta1DaemonSetStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
]
| |
85b2df707765c4c88c8d8c146a13f60e046de768 | 460b244bbec6b389628eeb764b5e0b7e867e02cb | /optimization/core/util_data.py | 55284667810af9c0a148fac5e10e8db3e6ceaed7 | []
| no_license | chrisgarcia001/Synchronized-Multi-Assignment-Orienteering | e570af8fbc43d8731bf02b2abcd24a2c092aae23 | 70479c06d621086b7f9f2f675176aea5032bbdd3 | refs/heads/master | 2023-04-13T11:25:16.588814 | 2022-11-01T04:08:21 | 2022-11-01T04:08:21 | 427,838,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,931 | py | #-----------------------------------------------------------------------------------------------------
# Author: [email protected]
# Created: 2/20/2020
# About: This file contains some utility functions for generating data.
#
# Revision History:
# Date Author Change Description
# 2/20/2020 cgarcia Initial version.
#-----------------------------------------------------------------------------------------------------
import random as rnd
import functools as ft
# Generate a matrix of constant values.
def const_matrix(n_rows, n_columns, const=0):
return [[const for j in range(n_columns)] for i in range(n_rows)]
# Expands a matrix (2D list) by duplicating rows and columns according to the specified dimensional factors.
def expand_matrix(matrix, row_factor=1, column_factor=1):
rows = []
for i in matrix:
for rf in range(row_factor):
row = list(i)
col = []
for j in row:
for cf in range(column_factor):
col.append(j)
rows.append(col)
return rows
# Generate a random float in the specified range.
def rand_float(minf, maxf):
return minf + ((maxf - minf) * rnd.random())
# Generate a random segment in form of (start, finish), based on the length and bound constraints specified.
def rand_segment(min_length, max_length, lower_bound, upper_bound, integer=False):
min_length = min(min_length, upper_bound - lower_bound)
max_length = min(max_length, upper_bound - lower_bound)
length = rand_float(min_length, max_length)
position = rand_float(lower_bound, upper_bound - length)
if not(integer):
return (position, position + length)
else:
a = round(position)
b = min(round(position + length), int(upper_bound))
return(int(a), int(b))
# Generate a list of random 0/1 values according to the specified probability of getting a 1.
def rand_binaries(n, prob_1):
rb = lambda: 1 if rnd.random() < prob_1 else 0
return [rb() for i in range(n)]
# Given m items and n slots, randomly distrubute the items to the slots and return the final slot item counts.
def rand_slot_distribute(m_items, n_slots):
slots = [0 for i in range(n_slots)]
for i in range(m_items):
slots[rnd.randint(0, n_slots - 1)] += 1
return slots
# Generates a random binary matrix such that no row or column sums to zero, provided sum(column_sums) >= n_rows.
# @param n_rows: the number of rows in the matrix
# @param column_sums: a column vector specifying the sums that each column should have in the final matrix.
def rand_bin_matrix(n_rows, column_sums):
column_sums = [min(s, n_rows) for s in column_sums] # safety feature to prevent an infinite loop
mat = const_matrix(n_rows, len(column_sums), 0)
zeros = [0 for i in range(len(column_sums))]
ones = [1 for i in range(len(column_sums))]
i = 0
while column_sums != zeros:
if i >= n_rows:
i = 0
rnd.shuffle(mat)
try:
j = rnd.sample([x for x in range(len(column_sums)) if mat[i][x] == 0 and column_sums[x] != 0], 1)[0]
mat[i][j] += 1
column_sums[j] -= 1
except:
pass
i += 1
return mat
# Return a shuffled copy of some_list.
def shuffled(some_list):
x = list(some_list)
rnd.shuffle(x)
return x
# Breaks a segment/window to a set of discrete points evenly separated by delta.
# @param a: the lower point of the segment
# @param b: the upper point of the segment
# @param delta: the distance between points, except for possibly the last point.
# @returns: a list of numbers consisting of points in the segment.
def segment_to_points(a, b, delta):
points = []
curr = a
while curr < b:
points.append(curr)
curr += delta
points.append(b)
return points
# Given a list of numeric weights that correspond to the probability that an index will be chosen, randomly select
# one of the indices and return it.
def random_weighted_index(weights):
if len(weights) < 1:
raise 'random_weight_index: weights must not be empty'
sm = sum(weights)
tw = 0
rd = rand_float(0, sm)
for i in range(len(weights)):
tw += weights[i]
if rd <= tw:
return i
# For a set of matrices all containing m rows, return the column-concatenated matrix that results
def cbind(*matrices):
return ft.reduce(lambda w,x: [y + z for (y, z) in zip(w, x)], matrices)
# For a set of matrices all containing n columns, return the row-concatenated matrix that results
def rbind(*matrices):
return ft.reduce(lambda x,y: x + y, matrices)
# Compute the dot product between x and y.
def dot(x, y):
return sum(a * b for (a,b) in zip(x, y))
# Compute the transpose of a matrix.
def transpose(matrix):
return [list(x) for x in zip(*matrix)]
| [
"[email protected]"
]
| |
42b4d2cf9e093309d52964fb146fcb504ce9a272 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/3/iul.py | db39819f804cf833bbe9f7691ab507e47bf45447 | []
| no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'iUL':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
]
| |
03af5da735bc880fd6176f87063e0985480a8d6a | af82475dc7eb45c478414372c222e7b6016359d4 | /python书籍/Python For Finance Code/Code of Python For Finance/4375OS_09_Code/4375_09_21_binomial_Su_Sd.py | c2af289baf7f964e56e4f84512260d98c35b921b | []
| no_license | enfangzhong/PythonBaseCode | 8f58c8b817eb9f4b0f0a5be437a52d5b5fab3433 | 9ab4a578b2692fdbb6aeeacb310251d51f72e953 | refs/heads/master | 2020-05-17T16:26:02.598344 | 2019-04-27T20:49:40 | 2019-04-27T20:49:40 | 183,817,172 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | """
Name : 4375OS_09_21_binomial_Su_Sd.py
Book : Python for Finance
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 12/26/2013
email : [email protected]
[email protected]
"""
import matplotlib.pyplot as plt
xlim(0,1)
plt.figtext(0.18,0.5,'S')
plt.figtext(0.6,0.5+0.25,'Su')
plt.figtext(0.6,0.5-0.25,'Sd')
plt.annotate('',xy=(0.6,0.5+0.25), xytext=(0.1,0.5), arrowprops=dict(facecolor='b',shrink=0.01))
plt.annotate('',xy=(0.6,0.5-0.25), xytext=(0.1,0.5), arrowprops=dict(facecolor='b',shrink=0.01))
plt.axis('off')
plt.show()
| [
"[email protected]"
]
| |
614e8846da19530315cf0a985d7bc505b4ad1ffa | 03052ddcbda8944b836248ba3de58fecc946974e | /cardinal_pythonlib/rnc_text.py | 41945d8709dd3bbff2f51a2390ef19664823c865 | [
"Apache-2.0"
]
| permissive | kileung-at-cb/pythonlib | 1bc31ebd78b3404bfefb77ce675a7d3a19dda89d | b79595de03a09796b48e7d6d19d4c37ddcb643ac | refs/heads/master | 2021-01-01T06:42:24.240931 | 2017-05-02T08:52:11 | 2017-05-02T08:52:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,763 | py | #!/usr/bin/env python
# -*- encoding: utf8 -*-
"""Textfile results storage.
Author: Rudolf Cardinal ([email protected])
Created: 2009
Last update: 24 Sep 2015
Copyright/licensing:
Copyright (C) 2009-2015 Rudolf Cardinal ([email protected]).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import csv
import datetime
from typing import Any, Dict, Iterable, List, Optional, Sequence, TextIO, Tuple
def produce_csv_output(filehandle: TextIO,
fields: Sequence[str],
values: Iterable[str]) -> None:
"""Produce CSV output, without using csv.writer, so the log can be used for
lots of things."""
output_csv(filehandle, fields)
for row in values:
output_csv(filehandle, row)
def output_csv(filehandle: TextIO, values: Iterable[str]) -> None:
line = ",".join(values)
filehandle.write(line + "\n")
def get_what_follows_raw(s: str,
prefix: str,
onlyatstart: bool = True,
stripwhitespace: bool = True) -> Tuple[bool, str]:
prefixstart = s.find(prefix)
if ((prefixstart == 0 and onlyatstart) or
(prefixstart != -1 and not onlyatstart)):
# substring found
resultstart = prefixstart + len(prefix)
result = s[resultstart:]
if stripwhitespace:
result = result.strip()
return True, result
return False, ""
def get_what_follows(strings: Sequence[str],
prefix: str,
onlyatstart: bool = True,
stripwhitespace: bool = True,
precedingline: str = "") -> str:
if not precedingline:
for s in strings:
(found, result) = get_what_follows_raw(s, prefix, onlyatstart,
stripwhitespace)
if found:
return result
return ""
else:
for i in range(1, len(strings)): # i indexes the second of a pair
if strings[i-1].find(precedingline) == 0:
# ... if found at the start
(found, result) = get_what_follows_raw(strings[i], prefix,
onlyatstart,
stripwhitespace)
if found:
return result
return ""
def get_string(strings: Sequence[str],
prefix: str,
ignoreleadingcolon: bool = False,
precedingline: str = "") -> Optional[str]:
s = get_what_follows(strings, prefix, precedingline=precedingline)
if ignoreleadingcolon:
f = s.find(":")
if f != -1:
s = s[f+1:].strip()
if len(s) == 0:
return None
return s
def get_string_relative(strings: Sequence[str],
prefix1: str,
delta: str,
prefix2: str,
ignoreleadingcolon: bool = False,
stripwhitespace: bool = True) -> Optional[str]:
"""Finds line beginning prefix1. Moves delta lines. Returns end of line
beginning prefix2, if found."""
for firstline in range(0, len(strings)):
if strings[firstline].find(prefix1) == 0: # if found...
secondline = firstline + delta
if secondline < 0 or secondline >= len(strings):
continue
if strings[secondline].find(prefix2) == 0:
s = strings[secondline][len(prefix2):]
if stripwhitespace:
s = s.strip()
if ignoreleadingcolon:
f = s.find(":")
if f != -1:
s = s[f+1:].strip()
if stripwhitespace:
s = s.strip()
if len(s) == 0:
return None
return s
return None
def get_int(strings: Sequence[str],
prefix: str,
ignoreleadingcolon: bool = False,
precedingline: str = "") -> Optional[int]:
return get_int_raw(get_string(strings, prefix,
ignoreleadingcolon=ignoreleadingcolon,
precedingline=precedingline))
def get_float(strings: Sequence[str],
prefix: str,
ignoreleadingcolon: bool = False,
precedingline: str = "") -> Optional[float]:
return get_float_raw(get_string(strings, prefix,
ignoreleadingcolon=ignoreleadingcolon,
precedingline=precedingline))
def get_int_raw(s: str) -> Optional[int]:
if s is None:
return None
return int(s)
def get_bool_raw(s: str) -> Optional[bool]:
if s == "Y" or s == "y":
return True
elif s == "N" or s == "n":
return False
return None
def get_float_raw(s: str) -> Optional[float]:
if s is None:
return None
return float(s)
def get_bool(strings: Sequence[str],
prefix: str,
ignoreleadingcolon: bool = False,
precedingline: str = "") -> Optional[bool]:
return get_bool_raw(get_string(strings, prefix,
ignoreleadingcolon=ignoreleadingcolon,
precedingline=precedingline))
def get_bool_relative(strings: Sequence[str],
prefix1: str,
delta: str,
prefix2: str,
ignoreleadingcolon: bool = False) -> Optional[bool]:
return get_bool_raw(get_string_relative(
strings, prefix1, delta, prefix2,
ignoreleadingcolon=ignoreleadingcolon))
def get_float_relative(strings: Sequence[str],
prefix1: str,
delta: str,
prefix2: str,
ignoreleadingcolon: bool = False) -> Optional[float]:
return get_float_raw(get_string_relative(
strings, prefix1, delta, prefix2,
ignoreleadingcolon=ignoreleadingcolon))
def get_int_relative(strings: Sequence[str],
prefix1: str,
delta: str,
prefix2: str,
ignoreleadingcolon: bool = False) -> Optional[int]:
return get_int_raw(get_string_relative(
strings, prefix1, delta, prefix2,
ignoreleadingcolon=ignoreleadingcolon))
def get_datetime(strings: Sequence[str],
prefix: str,
datetime_format_string: str,
ignoreleadingcolon: bool = False,
precedingline: str = "") -> Optional[datetime.datetime]:
x = get_string(strings, prefix, ignoreleadingcolon=ignoreleadingcolon,
precedingline=precedingline)
if len(x) == 0:
return None
# For the format strings you can pass to datetime.datetime.strptime, see
# http://docs.python.org/library/datetime.html
# A typical one is "%d-%b-%Y (%H:%M:%S)"
d = datetime.datetime.strptime(x, datetime_format_string)
return d
def find_line_beginning(strings: Sequence[str],
linestart: Optional[str]) -> int:
if linestart is None: # match an empty line
for i in range(len(strings)):
if is_empty_string(strings[i]):
return i
return -1
for i in range(len(strings)):
if strings[i].find(linestart) == 0:
return i
return -1
def find_line_containing(strings: Sequence[str], contents: str) -> int:
for i in range(len(strings)):
if strings[i].find(contents) != -1:
return i
return -1
def get_lines_from_to(strings: Sequence[str],
firstlinestart: str,
list_of_lastline_starts: Iterable[Optional[str]]) \
-> List[str]:
"""Takes a list of strings. Returns a list of strings FROM firstlinestart
(inclusive) TO one of list_of_lastline_starts (exclusive).
To search to the end of the list, use list_of_lastline_starts = []
To search to a blank line, use list_of_lastline_starts = [None]"""
start_index = find_line_beginning(strings, firstlinestart)
if start_index == -1:
return []
end_offset = None # itself a valid slice index
for lls in list_of_lastline_starts:
possible_end_offset = find_line_beginning(strings[start_index:], lls)
if possible_end_offset != -1: # found one
if end_offset is None or possible_end_offset < end_offset:
end_offset = possible_end_offset
end_index = None if end_offset is None else (start_index + end_offset)
return strings[start_index:end_index]
def is_empty_string(s: str) -> bool:
return len(s.strip(s)) == 0
def csv_to_list_of_fields(lines: Sequence[str],
csvheader: str,
quotechar: str = '"') -> List[str]:
data = [] # type: List[str]
# an empty line marks the end of the block
csvlines = get_lines_from_to(lines, csvheader, [None])[1:]
# ... remove the CSV header
reader = csv.reader(csvlines, quotechar=quotechar)
for fields in reader:
data.append(fields)
return data
def csv_to_list_of_dicts(lines: Sequence[str],
csvheader: str,
quotechar: str = '"') -> List[Dict[str, str]]:
data = [] # type: List[Dict[str, str]]
# an empty line marks the end of the block
csvlines = get_lines_from_to(lines, csvheader, [None])[1:]
# ... remove the CSV header
headerfields = csvheader.split(",")
reader = csv.reader(csvlines, quotechar=quotechar)
for fields in reader:
row = {} # type: Dict[str, str]
for f in range(len(headerfields)):
row[headerfields[f]] = fields[f]
data.append(row)
return data
def dictlist_convert_to_string(dict_list: Iterable[Dict], key: str) -> None:
for d in dict_list:
d[key] = str(d[key])
if d[key] == "":
d[key] = None
def dictlist_convert_to_datetime(dict_list: Iterable[Dict],
key: str,
datetime_format_string: str) -> None:
for d in dict_list:
d[key] = datetime.datetime.strptime(d[key], datetime_format_string)
def dictlist_convert_to_int(dict_list: Iterable[Dict], key: str) -> None:
for d in dict_list:
try:
d[key] = int(d[key])
except ValueError:
d[key] = None
def dictlist_convert_to_float(dict_list: Iterable[Dict], key: str) -> None:
for d in dict_list:
try:
d[key] = float(d[key])
except ValueError:
d[key] = None
def dictlist_convert_to_bool(dict_list: Iterable[Dict], key: str) -> None:
for d in dict_list:
# d[key] = True if d[key] == "Y" else False
d[key] = 1 if d[key] == "Y" else 0
def dictlist_replace(dict_list: Iterable[Dict], key: str, value: Any) -> None:
for d in dict_list:
d[key] = value
def dictlist_wipe_key(dict_list: Iterable[Dict], key: str) -> None:
for d in dict_list:
d.pop(key, None)
| [
"[email protected]"
]
| |
0dd4858585bb91807053895813bd9c5cfe6a5169 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/warmheartli_ChatBotCourse/ChatBotCourse-master/subtitle/preprocess/mv_zip.py | abd02f4f9029aae4f962d508e75b310134b61ee9 | []
| no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 481 | py | import glob
import os
import fnmatch
import shutil
import sys
def iterfindfiles(path, fnexp):
for root, dirs, files in os.walk(path):
for filename in fnmatch.filter(files, fnexp):
yield os.path.join(root, filename)
i=0
for filename in iterfindfiles(r"./input/", "*.ZIP"):
i=i+1
newfilename = "zip/" + str(i) + "_" + os.path.basename(filename)
print filename + " <===> " + newfilename
shutil.move(filename, newfilename)
#sys.exit(-1)
| [
"[email protected]"
]
| |
ccfcc740765b86fd30656349bd302d96acc5b0ac | 2194b6c17f3153c5976d6ac4a9ab78211027adab | /otoroshi_admin_api_client/models/otoroshiutilsmailer_generic_mailer_settings.py | a92d1005941524a407022b597ec668261a6a32e8 | []
| no_license | krezreb/otoroshi-admin-api-client | 7fab5e873c9c5950d77fffce6bcf80d3fdf4c319 | 9b3156c11eac227024cfe4a26c0129618deb2c4d | refs/heads/master | 2023-05-08T08:32:00.982987 | 2021-05-27T09:55:00 | 2021-05-27T09:55:00 | 371,324,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,673 | py | from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.otoroshiutilsmailer_email_location import OtoroshiutilsmailerEmailLocation
from ..models.otoroshiutilsmailer_generic_mailer_settings_headers import OtoroshiutilsmailerGenericMailerSettingsHeaders
from ..models.otoroshiutilsmailer_generic_mailer_settings_type import OtoroshiutilsmailerGenericMailerSettingsType
from ..types import UNSET, Unset
T = TypeVar("T", bound="OtoroshiutilsmailerGenericMailerSettings")
@attr.s(auto_attribs=True)
class OtoroshiutilsmailerGenericMailerSettings:
"""Settings for the generic mailer (http requests)"""
headers: Union[Unset, OtoroshiutilsmailerGenericMailerSettingsHeaders] = UNSET
to: Union[Unset, List[OtoroshiutilsmailerEmailLocation]] = UNSET
type: Union[Unset, OtoroshiutilsmailerGenericMailerSettingsType] = UNSET
url: Union[Unset, str] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
headers: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.headers, Unset):
headers = self.headers.to_dict()
to: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.to, Unset):
to = []
for to_item_data in self.to:
to_item = to_item_data.to_dict()
to.append(to_item)
type: Union[Unset, str] = UNSET
if not isinstance(self.type, Unset):
type = self.type.value
url = self.url
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if headers is not UNSET:
field_dict["headers"] = headers
if to is not UNSET:
field_dict["to"] = to
if type is not UNSET:
field_dict["type"] = type
if url is not UNSET:
field_dict["url"] = url
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
_headers = d.pop("headers", UNSET)
headers: Union[Unset, OtoroshiutilsmailerGenericMailerSettingsHeaders]
if isinstance(_headers, Unset):
headers = UNSET
else:
headers = OtoroshiutilsmailerGenericMailerSettingsHeaders.from_dict(_headers)
to = []
_to = d.pop("to", UNSET)
for to_item_data in _to or []:
to_item = OtoroshiutilsmailerEmailLocation.from_dict(to_item_data)
to.append(to_item)
_type = d.pop("type", UNSET)
type: Union[Unset, OtoroshiutilsmailerGenericMailerSettingsType]
if isinstance(_type, Unset):
type = UNSET
else:
type = OtoroshiutilsmailerGenericMailerSettingsType(_type)
url = d.pop("url", UNSET)
otoroshiutilsmailer_generic_mailer_settings = cls(
headers=headers,
to=to,
type=type,
url=url,
)
otoroshiutilsmailer_generic_mailer_settings.additional_properties = d
return otoroshiutilsmailer_generic_mailer_settings
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| [
"[email protected]"
]
| |
84255a0dd811e56d25d5f188a90e82f3cde0ebfd | f360c6fe06fb9859039a5d39fad5815fd4aff372 | /community/community/settings.py | 03c23fe8c6938d3fa32e191fc23503d3ce85174c | []
| no_license | gwjczwy/Django-CMS | d6297055957548997e86d383d54ae051062c8854 | f1a00d637c65809d606df3d4b96bcc594af09bd8 | refs/heads/master | 2020-04-24T21:57:44.818864 | 2019-03-03T08:41:50 | 2019-03-03T08:41:50 | 172,295,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,206 | py | """
Django settings for community project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lj$)a*tr+m3bv)02z%hmj022hv^e&e&!qu88*wlybtd^5um9k8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'posting',
'accounts',
'bootstrap4',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'community.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'posting/../templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'community.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
]
| |
0f78b322495fcc4b94ab7128cbaa72bd339f862a | b8bbdfc593b6d816e67a344f720f90ec05236778 | /dev/mypy/plugin/decorators.py | 1957b59996576d5742ee2d4702404edfd4a247c8 | [
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | apache/airflow | ed78db0a8bab7e096990e143926e52f518e288ab | 1b122c15030e99cef9d4ff26d3781a7a9d6949bc | refs/heads/main | 2023-09-01T08:37:34.556097 | 2023-09-01T06:49:05 | 2023-09-01T06:49:05 | 33,884,891 | 22,756 | 11,558 | Apache-2.0 | 2023-09-14T20:12:36 | 2015-04-13T18:04:58 | Python | UTF-8 | Python | false | false | 2,942 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import copy
import functools
from mypy.nodes import ARG_NAMED_OPT
from mypy.plugin import FunctionContext, Plugin
from mypy.types import CallableType, NoneType, UnionType
TYPED_DECORATORS = {
"airflow.providers.google.cloud.hooks.dataflow._fallback_to_project_id_from_variables": ["project_id"],
"fallback_to_default_project_id of GoogleBaseHook": ["project_id"],
"provide_gcp_credential_file of GoogleBaseHook": [],
}
class TypedDecoratorPlugin(Plugin):
"""Mypy plugin for typed decorators."""
def get_function_hook(self, fullname: str):
"""Check for known typed decorators by name."""
if fullname in TYPED_DECORATORS:
return functools.partial(
_analyze_decorator,
provided_arguments=TYPED_DECORATORS[fullname],
)
return None
def _analyze_decorator(function_ctx: FunctionContext, provided_arguments: list[str]):
if not isinstance(function_ctx.arg_types[0][0], CallableType):
return function_ctx.default_return_type
if not isinstance(function_ctx.default_return_type, CallableType):
return function_ctx.default_return_type
return _change_decorator_function_type(
function_ctx.arg_types[0][0],
function_ctx.default_return_type,
provided_arguments,
)
def _change_decorator_function_type(
decorated: CallableType,
decorator: CallableType,
provided_arguments: list[str],
) -> CallableType:
decorator.arg_kinds = decorated.arg_kinds
decorator.arg_names = decorated.arg_names
# Mark provided arguments as optional
decorator.arg_types = copy.copy(decorated.arg_types)
for argument in provided_arguments:
try:
index = decorated.arg_names.index(argument)
except ValueError:
continue
decorated_type = decorated.arg_types[index]
decorator.arg_types[index] = UnionType.make_union([decorated_type, NoneType()])
decorated.arg_kinds[index] = ARG_NAMED_OPT
return decorator
def plugin(version: str):
"""Mypy plugin entrypoint."""
return TypedDecoratorPlugin
| [
"[email protected]"
]
| |
db7d202b86ebe4b5f5f44e2c932195e11dc2d9d3 | 2a157b0378fb3b59ffea8160de942b780e433bac | /surf.py | 4209986097b2a2ce431fe7d24d3a078c7d6202cf | []
| no_license | WeixinGithubJiang/imgret | bb05a5b4c71a5e023882f474007df468070264bd | 28ac6461de815e37539f1893c29d4af6d1c1647d | refs/heads/master | 2021-10-09T01:03:43.443255 | 2018-12-19T14:08:18 | 2018-12-19T14:08:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | import cv2
from local_feature import LocalFeature
class SURF(LocalFeature):
def __init__(self,
image_size,
keypoint_image_border_size,
max_keypoint_count,
ldescriptor_length,
hessian_threshold,
extended,
upright):
super(SURF, self).__init__(
image_size=image_size,
keypoint_image_border_size=keypoint_image_border_size,
max_keypoint_count=max_keypoint_count,
ldescriptor_length=ldescriptor_length)
self.feature_detector = cv2.xfeatures2d.SURF_create(
hessianThreshold=hessian_threshold,
nOctaves=4,
nOctaveLayers=3,
extended=extended,
upright=upright)
self.descriptor_extractor = self.feature_detector
| [
"[email protected]"
]
| |
82e74179866ad9243cd200fd873cdcc54082b43c | eabf9d677b9ccd59f42e5359e46720899bf8cf10 | /PyFunceble/status/reputation/domain_and_ip.py | 6e54eed6e74d4487e4bbead828c16010eb4cac60 | [
"Apache-2.0"
]
| permissive | Phyleas/PyFunceble | 26263c55f7cf1f282348660cdadcb6c9c3989d70 | 1f6fb58e1afc29fc4418ffc84d1e066cbd836125 | refs/heads/master | 2023-08-21T22:27:14.448059 | 2021-06-13T08:50:33 | 2021-06-13T08:50:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,178 | py | """
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides the status interface for domains and IP reputation check.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/special-thanks.html
Contributors:
https://pyfunceble.github.io/contributors.html
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/master/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2021 Nissar Chababy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import PyFunceble
from ..gatherer_base import GathererBase
class DomainAndIp(GathererBase):
"""
Gather the reputation of the given domain or IP.
"""
# pylint: disable=no-member
def __init__(self, subject, filename=None, whois_db=None, inactive_db=None):
super().__init__(
subject, filename=filename, whois_db=whois_db, inactive_db=inactive_db
)
self.subject_type += "domain"
self.__gather()
def __gather(self):
"""
Process the gathering.
"""
self.status["_status_source"] = self.status.status_source = "REPUTATION"
if self.status.domain_syntax_validation or self.status.ipv4_syntax_validation:
if self.status.tested in PyFunceble.lookup.IPv4Reputation():
self.status[
"_status"
] = self.status.status = PyFunceble.STATUS.official.malicious
else:
self.status[
"_status"
] = self.status.status = PyFunceble.STATUS.official.sane
else:
self.status[
"_status"
] = self.status.status = PyFunceble.STATUS.official.sane
PyFunceble.output.Generate(
self.status.given,
self.subject_type,
self.status.status,
source=self.status.status_source,
whois_server=self.status.whois_server,
filename=self.filename,
ip_validation=self.status.ipv4_syntax_validation
or self.status.ipv6_syntax_validation,
).status_file()
PyFunceble.LOGGER.debug(f"[{self.status.given}] State:\n{self.status.get()}")
| [
"[email protected]"
]
| |
b63576e0ab27f1712ffce71a6a7894c4cfe75dca | 6b19ed8845f7cb020ad49da57a0c0fe85314a274 | /zerver/migrations/0154_fix_invalid_bot_owner.py | 831dd1298d504d5f67ce28e23ff86f7dfc463cd7 | [
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
]
| permissive | jahau/zulip | eb4da13858892065591caced88fc9a086fa0e0d2 | 51a8873579b9d4bb95219cd4a5c859fa972fa06b | refs/heads/master | 2021-05-18T03:44:32.003307 | 2020-03-27T22:29:55 | 2020-03-28T19:04:36 | 251,087,399 | 1 | 0 | Apache-2.0 | 2020-03-29T17:11:42 | 2020-03-29T17:11:42 | null | UTF-8 | Python | false | false | 890 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-04-03 01:52
from __future__ import unicode_literals
from django.db import migrations
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
def migrate_fix_invalid_bot_owner_values(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
"""Fixes UserProfile objects that incorrectly had a bot_owner set"""
UserProfile = apps.get_model('zerver', 'UserProfile')
UserProfile.objects.filter(is_bot=False).exclude(bot_owner=None).update(bot_owner=None)
class Migration(migrations.Migration):
dependencies = [
('zerver', '0153_remove_int_float_custom_fields'),
]
operations = [
migrations.RunPython(
migrate_fix_invalid_bot_owner_values,
reverse_code=migrations.RunPython.noop),
]
| [
"[email protected]"
]
| |
b73696b3d2790af9f065fd9f7d86caf8d4ac6135 | 812045c3ec6587827aeb18bde666237dfffc21ae | /tf_quant_finance/models/heston/approximations/__init__.py | 5432abd6f5a8def3d371ed7293d0a56b73ae16a4 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
]
| permissive | google/tf-quant-finance | 2062082c85e8679b71e69bbeb579fe338c1b0288 | 0d3a2193c0f2d320b65e602cf01d7a617da484df | refs/heads/master | 2023-08-31T01:58:15.415811 | 2023-08-15T07:37:46 | 2023-08-15T07:38:22 | 198,669,252 | 4,165 | 557 | Apache-2.0 | 2023-08-04T19:25:55 | 2019-07-24T16:09:50 | Python | UTF-8 | Python | false | false | 1,143 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Approximations to the Heston model."""
from tf_quant_finance.models.heston.approximations.asian_prices import asian_option_price
from tf_quant_finance.models.heston.approximations.calibration import calibration
from tf_quant_finance.models.heston.approximations.european_option import european_option_price
from tensorflow.python.util.all_util import remove_undocumented # pylint: disable=g-direct-tensorflow-import
_allowed_symbols = [
'asian_option_price',
'calibration',
'european_option_price',
]
remove_undocumented(__name__, _allowed_symbols)
| [
"[email protected]"
]
| |
e71034657c2ddde1dd53e31bbc3a037549103ec6 | 433a24663b73fa3550069fafe3a8af24c61a864c | /pyscreenshot/plugins/pyside2_grabwindow.py | 39b0cd494cb67bc94d84ddb506b22f64c38cc2d1 | [
"BSD-2-Clause"
]
| permissive | robocorp/rpaframework-screenshot | 472c9f73237df27266e68ff43dd96e2402eb6325 | 7cf03b23f4bdf1e4a2e3df1893de598e852dd346 | refs/heads/master | 2021-06-13T21:31:47.342026 | 2020-04-12T18:16:34 | 2020-04-12T18:16:34 | 254,451,568 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,051 | py | import logging
from PIL import Image
from pyscreenshot.plugins.backend import CBackend
from pyscreenshot.util import py2
if py2():
import StringIO
BytesIO = StringIO.StringIO
else:
import io
BytesIO = io.BytesIO
log = logging.getLogger(__name__)
# TODO:PY2 error:
# TypeError: 'PySide2.QtGui.QScreen.grabWindow' called with wrong argument types:
# PySide2.QtGui.QScreen.grabWindow(int)
# Supported signatures:
# PySide2.QtGui.QScreen.grabWindow(WId, int = 0, int = 0, int = -1, int = -1)
# https://stackoverflow.com/questions/59118938/type-error-when-calling-qscreen-grabwindow
class PySide2BugError(Exception):
pass
app = None
class PySide2GrabWindow(CBackend):
name = "pyside2"
childprocess = False
apply_childprocess = True
def __init__(self):
pass
def grab_to_buffer(self, buff, file_type="png"):
if py2():
raise PySide2BugError()
import PySide2
from PySide2 import QtGui
from PySide2 import QtCore
from PySide2 import QtWidgets
QApplication = QtWidgets.QApplication
QBuffer = QtCore.QBuffer
QIODevice = QtCore.QIODevice
QScreen = QtGui.QScreen
# QPixmap = self.PySide2.QtGui.QPixmap
global app
if not app:
app = QApplication([])
qbuffer = QBuffer()
qbuffer.open(QIODevice.ReadWrite)
QScreen.grabWindow(
QApplication.primaryScreen(), QApplication.desktop().winId()
).save(qbuffer, file_type)
# https://stackoverflow.com/questions/52291585/pyside2-typeerror-bytes-object-cannot-be-interpreted-as-an-integer
buff.write(qbuffer.data().data())
qbuffer.close()
def grab(self, bbox=None):
strio = BytesIO()
self.grab_to_buffer(strio)
strio.seek(0)
im = Image.open(strio)
if bbox:
im = im.crop(bbox)
return im
def backend_version(self):
import PySide2
return PySide2.__version__
| [
"ponty@home"
]
| ponty@home |
dbf9838d8a0fe09396539ff90c7f896781279b36 | d489eadec9d4499ed066223f8e4881f14c3cc777 | /.ci/prep_azure.py | 5199a87e0ef8d5042c35777013a9fdcd20065a68 | [
"BSD-3-Clause"
]
| permissive | lumatijev/mitogen | d0121faa8c3aa87a08b09bbe5967d6c0a3ac1263 | b610b0c93bbab1bc0fbe86cfcc4f3a56fd2b2c14 | refs/heads/master | 2020-04-28T15:48:12.177452 | 2019-03-13T16:59:48 | 2019-03-13T16:59:48 | 175,389,282 | 0 | 0 | BSD-3-Clause | 2019-03-13T09:34:01 | 2019-03-13T09:34:00 | null | UTF-8 | Python | false | false | 616 | py | #!/usr/bin/env python
import os
import sys
import ci_lib
batches = []
if ci_lib.have_apt():
batches.append([
'echo force-unsafe-io | sudo tee /etc/dpkg/dpkg.cfg.d/nosync',
'sudo add-apt-repository ppa:deadsnakes/ppa',
'sudo apt-get update',
'sudo apt-get -y install python2.6 python2.6-dev libsasl2-dev libldap2-dev',
])
#batches.append([
#'pip install -r dev_requirements.txt',
#])
if ci_lib.have_docker():
batches.extend(
['docker pull %s' % (ci_lib.image_for_distro(distro),)]
for distro in ci_lib.DISTROS
)
ci_lib.run_batches(batches)
| [
"[email protected]"
]
| |
6acd32f865beb3eb546bed1a532bd159c0d26e2f | 940d7b93fb27e8eead9b6e52bc5c7444666744dd | /python/src/Tools/msi/msi.py | 38731e25c61d6c00a1c1f91fb9ced0156963d8d9 | [
"Apache-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Python-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-python-cwi"
]
| permissive | pilotx45/sl4a | d446531d310cc17d93f24aab7271a0813e8f628d | 150e3e46b5103a9b9a391034ef3fbc5bd5160d0f | refs/heads/master | 2022-03-24T19:48:30.340479 | 2022-03-08T16:23:58 | 2022-03-08T16:23:58 | 277,016,574 | 1 | 0 | Apache-2.0 | 2022-03-08T16:23:59 | 2020-07-04T01:25:36 | null | UTF-8 | Python | false | false | 58,459 | py | # Python MSI Generator
# (C) 2003 Martin v. Loewis
# See "FOO" in comments refers to MSDN sections with the title FOO.
import msilib, schema, sequence, os, glob, time, re, shutil
from msilib import Feature, CAB, Directory, Dialog, Binary, add_data
import uisample
from win32com.client import constants
from distutils.spawn import find_executable
from uuids import product_codes
# Settings can be overridden in config.py below
# 0 for official python.org releases
# 1 for intermediate releases by anybody, with
# a new product code for every package.
snapshot = 1
# 1 means that file extension is px, not py,
# and binaries start with x
testpackage = 0
# Location of build tree
srcdir = os.path.abspath("../..")
# Text to be displayed as the version in dialogs etc.
# goes into file name and ProductCode. Defaults to
# current_version.day for Snapshot, current_version otherwise
full_current_version = None
# Is Tcl available at all?
have_tcl = True
# path to PCbuild directory
PCBUILD="PCbuild"
# msvcrt version
MSVCR = "90"
try:
from config import *
except ImportError:
pass
# Extract current version from Include/patchlevel.h
lines = open(srcdir + "/Include/patchlevel.h").readlines()
major = minor = micro = level = serial = None
levels = {
'PY_RELEASE_LEVEL_ALPHA':0xA,
'PY_RELEASE_LEVEL_BETA': 0xB,
'PY_RELEASE_LEVEL_GAMMA':0xC,
'PY_RELEASE_LEVEL_FINAL':0xF
}
for l in lines:
if not l.startswith("#define"):
continue
l = l.split()
if len(l) != 3:
continue
_, name, value = l
if name == 'PY_MAJOR_VERSION': major = value
if name == 'PY_MINOR_VERSION': minor = value
if name == 'PY_MICRO_VERSION': micro = value
if name == 'PY_RELEASE_LEVEL': level = levels[value]
if name == 'PY_RELEASE_SERIAL': serial = value
short_version = major+"."+minor
# See PC/make_versioninfo.c
FIELD3 = 1000*int(micro) + 10*level + int(serial)
current_version = "%s.%d" % (short_version, FIELD3)
# This should never change. The UpgradeCode of this package can be
# used in the Upgrade table of future packages to make the future
# package replace this one. See "UpgradeCode Property".
# upgrade_code gets set to upgrade_code_64 when we have determined
# that the target is Win64.
upgrade_code_snapshot='{92A24481-3ECB-40FC-8836-04B7966EC0D5}'
upgrade_code='{65E6DE48-A358-434D-AA4F-4AF72DB4718F}'
upgrade_code_64='{6A965A0C-6EE6-4E3A-9983-3263F56311EC}'
if snapshot:
current_version = "%s.%s.%s" % (major, minor, int(time.time()/3600/24))
product_code = msilib.gen_uuid()
else:
product_code = product_codes[current_version]
if full_current_version is None:
full_current_version = current_version
extensions = [
'bz2.pyd',
'pyexpat.pyd',
'select.pyd',
'unicodedata.pyd',
'winsound.pyd',
'_elementtree.pyd',
'_bsddb.pyd',
'_socket.pyd',
'_ssl.pyd',
'_testcapi.pyd',
'_tkinter.pyd',
'_msi.pyd',
'_ctypes.pyd',
'_ctypes_test.pyd',
'_sqlite3.pyd',
'_hashlib.pyd',
'_multiprocessing.pyd'
]
# Well-known component UUIDs
# These are needed for SharedDLLs reference counter; if
# a different UUID was used for each incarnation of, say,
# python24.dll, an upgrade would set the reference counter
# from 1 to 2 (due to what I consider a bug in MSI)
# Using the same UUID is fine since these files are versioned,
# so Installer will always keep the newest version.
# NOTE: All uuids are self generated.
pythondll_uuid = {
"24":"{9B81E618-2301-4035-AC77-75D9ABEB7301}",
"25":"{2e41b118-38bd-4c1b-a840-6977efd1b911}",
"26":"{34ebecac-f046-4e1c-b0e3-9bac3cdaacfa}",
} [major+minor]
# Compute the name that Sphinx gives to the docfile
docfile = ""
if micro:
docfile = str(micro)
if level < 0xf:
docfile = '%x%s' % (level, serial)
docfile = 'python%s%s%s.chm' % (major, minor, docfile)
# Build the mingw import library, libpythonXY.a
# This requires 'nm' and 'dlltool' executables on your PATH
def build_mingw_lib(lib_file, def_file, dll_file, mingw_lib):
warning = "WARNING: %s - libpythonXX.a not built"
nm = find_executable('nm')
dlltool = find_executable('dlltool')
if not nm or not dlltool:
print warning % "nm and/or dlltool were not found"
return False
nm_command = '%s -Cs %s' % (nm, lib_file)
dlltool_command = "%s --dllname %s --def %s --output-lib %s" % \
(dlltool, dll_file, def_file, mingw_lib)
export_match = re.compile(r"^_imp__(.*) in python\d+\.dll").match
f = open(def_file,'w')
print >>f, "LIBRARY %s" % dll_file
print >>f, "EXPORTS"
nm_pipe = os.popen(nm_command)
for line in nm_pipe.readlines():
m = export_match(line)
if m:
print >>f, m.group(1)
f.close()
exit = nm_pipe.close()
if exit:
print warning % "nm did not run successfully"
return False
if os.system(dlltool_command) != 0:
print warning % "dlltool did not run successfully"
return False
return True
# Target files (.def and .a) go in PCBuild directory
lib_file = os.path.join(srcdir, PCBUILD, "python%s%s.lib" % (major, minor))
def_file = os.path.join(srcdir, PCBUILD, "python%s%s.def" % (major, minor))
dll_file = "python%s%s.dll" % (major, minor)
mingw_lib = os.path.join(srcdir, PCBUILD, "libpython%s%s.a" % (major, minor))
have_mingw = build_mingw_lib(lib_file, def_file, dll_file, mingw_lib)
# Determine the target architechture
dll_path = os.path.join(srcdir, PCBUILD, dll_file)
msilib.set_arch_from_file(dll_path)
if msilib.pe_type(dll_path) != msilib.pe_type("msisupport.dll"):
raise SystemError, "msisupport.dll for incorrect architecture"
if msilib.Win64:
upgrade_code = upgrade_code_64
# Bump the last digit of the code by one, so that 32-bit and 64-bit
# releases get separate product codes
digit = hex((int(product_code[-2],16)+1)%16)[-1]
product_code = product_code[:-2] + digit + '}'
if testpackage:
ext = 'px'
testprefix = 'x'
else:
ext = 'py'
testprefix = ''
if msilib.Win64:
SystemFolderName = "[System64Folder]"
registry_component = 4|256
else:
SystemFolderName = "[SystemFolder]"
registry_component = 4
msilib.reset()
# condition in which to install pythonxy.dll in system32:
# a) it is Windows 9x or
# b) it is NT, the user is privileged, and has chosen per-machine installation
sys32cond = "(Windows9x or (Privileged and ALLUSERS))"
def build_database():
"""Generate an empty database, with just the schema and the
Summary information stream."""
if snapshot:
uc = upgrade_code_snapshot
else:
uc = upgrade_code
if msilib.Win64:
productsuffix = " (64-bit)"
else:
productsuffix = ""
# schema represents the installer 2.0 database schema.
# sequence is the set of standard sequences
# (ui/execute, admin/advt/install)
db = msilib.init_database("python-%s%s.msi" % (full_current_version, msilib.arch_ext),
schema, ProductName="Python "+full_current_version+productsuffix,
ProductCode=product_code,
ProductVersion=current_version,
Manufacturer=u"Python Software Foundation",
request_uac = True)
# The default sequencing of the RemoveExistingProducts action causes
# removal of files that got just installed. Place it after
# InstallInitialize, so we first uninstall everything, but still roll
# back in case the installation is interrupted
msilib.change_sequence(sequence.InstallExecuteSequence,
"RemoveExistingProducts", 1510)
msilib.add_tables(db, sequence)
# We cannot set ALLUSERS in the property table, as this cannot be
# reset if the user choses a per-user installation. Instead, we
# maintain WhichUsers, which can be "ALL" or "JUSTME". The UI manages
# this property, and when the execution starts, ALLUSERS is set
# accordingly.
add_data(db, "Property", [("UpgradeCode", uc),
("WhichUsers", "ALL"),
("ProductLine", "Python%s%s" % (major, minor)),
])
db.Commit()
return db
def remove_old_versions(db):
"Fill the upgrade table."
start = "%s.%s.0" % (major, minor)
# This requests that feature selection states of an older
# installation should be forwarded into this one. Upgrading
# requires that both the old and the new installation are
# either both per-machine or per-user.
migrate_features = 1
# See "Upgrade Table". We remove releases with the same major and
# minor version. For an snapshot, we remove all earlier snapshots. For
# a release, we remove all snapshots, and all earlier releases.
if snapshot:
add_data(db, "Upgrade",
[(upgrade_code_snapshot, start,
current_version,
None, # Ignore language
migrate_features,
None, # Migrate ALL features
"REMOVEOLDSNAPSHOT")])
props = "REMOVEOLDSNAPSHOT"
else:
add_data(db, "Upgrade",
[(upgrade_code, start, current_version,
None, migrate_features, None, "REMOVEOLDVERSION"),
(upgrade_code_snapshot, start, "%s.%d.0" % (major, int(minor)+1),
None, migrate_features, None, "REMOVEOLDSNAPSHOT")])
props = "REMOVEOLDSNAPSHOT;REMOVEOLDVERSION"
props += ";TARGETDIR;DLLDIR"
# Installer collects the product codes of the earlier releases in
# these properties. In order to allow modification of the properties,
# they must be declared as secure. See "SecureCustomProperties Property"
add_data(db, "Property", [("SecureCustomProperties", props)])
class PyDialog(Dialog):
"""Dialog class with a fixed layout: controls at the top, then a ruler,
then a list of buttons: back, next, cancel. Optionally a bitmap at the
left."""
def __init__(self, *args, **kw):
"""Dialog(database, name, x, y, w, h, attributes, title, first,
default, cancel, bitmap=true)"""
Dialog.__init__(self, *args)
ruler = self.h - 36
bmwidth = 152*ruler/328
if kw.get("bitmap", True):
self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin")
self.line("BottomLine", 0, ruler, self.w, 0)
def title(self, title):
"Set the title text of the dialog at the top."
# name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix,
# text, in VerdanaBold10
self.text("Title", 135, 10, 220, 60, 0x30003,
r"{\VerdanaBold10}%s" % title)
def back(self, title, next, name = "Back", active = 1):
"""Add a back button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next)
def cancel(self, title, next, name = "Cancel", active = 1):
"""Add a cancel button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next)
def next(self, title, next, name = "Next", active = 1):
"""Add a Next button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next)
def xbutton(self, name, title, next, xpos):
"""Add a button with a given title, the tab-next button,
its name in the Control table, giving its x position; the
y-position is aligned with the other buttons.
Return the button, so that events can be associated"""
return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next)
def add_ui(db):
x = y = 50
w = 370
h = 300
title = "[ProductName] Setup"
# see "Dialog Style Bits"
modal = 3 # visible | modal
modeless = 1 # visible
track_disk_space = 32
add_data(db, 'ActionText', uisample.ActionText)
add_data(db, 'UIText', uisample.UIText)
# Bitmaps
if not os.path.exists(srcdir+r"\PC\python_icon.exe"):
raise "Run icons.mak in PC directory"
add_data(db, "Binary",
[("PythonWin", msilib.Binary(r"%s\PCbuild\installer.bmp" % srcdir)), # 152x328 pixels
("py.ico",msilib.Binary(srcdir+r"\PC\py.ico")),
])
add_data(db, "Icon",
[("python_icon.exe", msilib.Binary(srcdir+r"\PC\python_icon.exe"))])
# Scripts
# CheckDir sets TargetExists if TARGETDIR exists.
# UpdateEditIDLE sets the REGISTRY.tcl component into
# the installed/uninstalled state according to both the
# Extensions and TclTk features.
if os.system("nmake /nologo /c /f msisupport.mak") != 0:
raise "'nmake /f msisupport.mak' failed"
add_data(db, "Binary", [("Script", msilib.Binary("msisupport.dll"))])
# See "Custom Action Type 1"
if msilib.Win64:
CheckDir = "CheckDir"
UpdateEditIDLE = "UpdateEditIDLE"
else:
CheckDir = "_CheckDir@4"
UpdateEditIDLE = "_UpdateEditIDLE@4"
add_data(db, "CustomAction",
[("CheckDir", 1, "Script", CheckDir)])
if have_tcl:
add_data(db, "CustomAction",
[("UpdateEditIDLE", 1, "Script", UpdateEditIDLE)])
# UI customization properties
add_data(db, "Property",
# See "DefaultUIFont Property"
[("DefaultUIFont", "DlgFont8"),
# See "ErrorDialog Style Bit"
("ErrorDialog", "ErrorDlg"),
("Progress1", "Install"), # modified in maintenance type dlg
("Progress2", "installs"),
("MaintenanceForm_Action", "Repair")])
# Fonts, see "TextStyle Table"
add_data(db, "TextStyle",
[("DlgFont8", "Tahoma", 9, None, 0),
("DlgFontBold8", "Tahoma", 8, None, 1), #bold
("VerdanaBold10", "Verdana", 10, None, 1),
("VerdanaRed9", "Verdana", 9, 255, 0),
])
compileargs = r'-Wi "[TARGETDIR]Lib\compileall.py" -f -x bad_coding|badsyntax|site-packages|py3_ "[TARGETDIR]Lib"'
lib2to3args = r'-c "import lib2to3.pygram, lib2to3.patcomp;lib2to3.patcomp.PatternCompiler()"'
# See "CustomAction Table"
add_data(db, "CustomAction", [
# msidbCustomActionTypeFirstSequence + msidbCustomActionTypeTextData + msidbCustomActionTypeProperty
# See "Custom Action Type 51",
# "Custom Action Execution Scheduling Options"
("InitialTargetDir", 307, "TARGETDIR",
"[WindowsVolume]Python%s%s" % (major, minor)),
("SetDLLDirToTarget", 307, "DLLDIR", "[TARGETDIR]"),
("SetDLLDirToSystem32", 307, "DLLDIR", SystemFolderName),
# msidbCustomActionTypeExe + msidbCustomActionTypeSourceFile
# See "Custom Action Type 18"
("CompilePyc", 18, "python.exe", compileargs),
("CompilePyo", 18, "python.exe", "-O "+compileargs),
("CompileGrammar", 18, "python.exe", lib2to3args),
])
# UI Sequences, see "InstallUISequence Table", "Using a Sequence Table"
# Numbers indicate sequence; see sequence.py for how these action integrate
add_data(db, "InstallUISequence",
[("PrepareDlg", "Not Privileged or Windows9x or Installed", 140),
("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141),
("InitialTargetDir", 'TARGETDIR=""', 750),
# In the user interface, assume all-users installation if privileged.
("SetDLLDirToSystem32", 'DLLDIR="" and ' + sys32cond, 751),
("SetDLLDirToTarget", 'DLLDIR="" and not ' + sys32cond, 752),
("SelectDirectoryDlg", "Not Installed", 1230),
# XXX no support for resume installations yet
#("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240),
("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250),
("ProgressDlg", None, 1280)])
add_data(db, "AdminUISequence",
[("InitialTargetDir", 'TARGETDIR=""', 750),
("SetDLLDirToTarget", 'DLLDIR=""', 751),
])
# Execute Sequences
add_data(db, "InstallExecuteSequence",
[("InitialTargetDir", 'TARGETDIR=""', 750),
("SetDLLDirToSystem32", 'DLLDIR="" and ' + sys32cond, 751),
("SetDLLDirToTarget", 'DLLDIR="" and not ' + sys32cond, 752),
("UpdateEditIDLE", None, 1050),
("CompilePyc", "COMPILEALL", 6800),
("CompilePyo", "COMPILEALL", 6801),
("CompileGrammar", "COMPILEALL", 6802),
])
add_data(db, "AdminExecuteSequence",
[("InitialTargetDir", 'TARGETDIR=""', 750),
("SetDLLDirToTarget", 'DLLDIR=""', 751),
("CompilePyc", "COMPILEALL", 6800),
("CompilePyo", "COMPILEALL", 6801),
("CompileGrammar", "COMPILEALL", 6802),
])
#####################################################################
# Standard dialogs: FatalError, UserExit, ExitDialog
fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
fatal.title("[ProductName] Installer ended prematurely")
fatal.back("< Back", "Finish", active = 0)
fatal.cancel("Cancel", "Back", active = 0)
fatal.text("Description1", 135, 70, 220, 80, 0x30003,
"[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.")
fatal.text("Description2", 135, 155, 220, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c=fatal.next("Finish", "Cancel", name="Finish")
# See "ControlEvent Table". Parameters are the event, the parameter
# to the action, and optionally the condition for the event, and the order
# of events.
c.event("EndDialog", "Exit")
user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
user_exit.title("[ProductName] Installer was interrupted")
user_exit.back("< Back", "Finish", active = 0)
user_exit.cancel("Cancel", "Back", active = 0)
user_exit.text("Description1", 135, 70, 220, 80, 0x30003,
"[ProductName] setup was interrupted. Your system has not been modified. "
"To install this program at a later time, please run the installation again.")
user_exit.text("Description2", 135, 155, 220, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = user_exit.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Exit")
exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
exit_dialog.title("Completing the [ProductName] Installer")
exit_dialog.back("< Back", "Finish", active = 0)
exit_dialog.cancel("Cancel", "Back", active = 0)
exit_dialog.text("Acknowledgements", 135, 95, 220, 120, 0x30003,
"Special Windows thanks to:\n"
" Mark Hammond, without whose years of freely \n"
" shared Windows expertise, Python for Windows \n"
" would still be Python for DOS.")
c = exit_dialog.text("warning", 135, 200, 220, 40, 0x30003,
"{\\VerdanaRed9}Warning: Python 2.5.x is the last "
"Python release for Windows 9x.")
c.condition("Hide", "NOT Version9X")
exit_dialog.text("Description", 135, 235, 220, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = exit_dialog.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Return")
#####################################################################
# Required dialog: FilesInUse, ErrorDlg
inuse = PyDialog(db, "FilesInUse",
x, y, w, h,
19, # KeepModeless|Modal|Visible
title,
"Retry", "Retry", "Retry", bitmap=False)
inuse.text("Title", 15, 6, 200, 15, 0x30003,
r"{\DlgFontBold8}Files in Use")
inuse.text("Description", 20, 23, 280, 20, 0x30003,
"Some files that need to be updated are currently in use.")
inuse.text("Text", 20, 55, 330, 50, 3,
"The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.")
inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess",
None, None, None)
c=inuse.back("Exit", "Ignore", name="Exit")
c.event("EndDialog", "Exit")
c=inuse.next("Ignore", "Retry", name="Ignore")
c.event("EndDialog", "Ignore")
c=inuse.cancel("Retry", "Exit", name="Retry")
c.event("EndDialog","Retry")
# See "Error Dialog". See "ICE20" for the required names of the controls.
error = Dialog(db, "ErrorDlg",
50, 10, 330, 101,
65543, # Error|Minimize|Modal|Visible
title,
"ErrorText", None, None)
error.text("ErrorText", 50,9,280,48,3, "")
error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None)
error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo")
error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes")
error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort")
error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel")
error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore")
error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk")
error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry")
#####################################################################
# Global "Query Cancel" dialog
cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title,
"No", "No", "No")
cancel.text("Text", 48, 15, 194, 30, 3,
"Are you sure you want to cancel [ProductName] installation?")
cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
"py.ico", None, None)
c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No")
c.event("EndDialog", "Exit")
c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes")
c.event("EndDialog", "Return")
#####################################################################
# Global "Wait for costing" dialog
costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title,
"Return", "Return", "Return")
costing.text("Text", 48, 15, 194, 30, 3,
"Please wait while the installer finishes determining your disk space requirements.")
costing.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
"py.ico", None, None)
c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None)
c.event("EndDialog", "Exit")
#####################################################################
# Preparation dialog: no user input except cancellation
prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel")
prep.text("Description", 135, 70, 220, 40, 0x30003,
"Please wait while the Installer prepares to guide you through the installation.")
prep.title("Welcome to the [ProductName] Installer")
c=prep.text("ActionText", 135, 110, 220, 20, 0x30003, "Pondering...")
c.mapping("ActionText", "Text")
c=prep.text("ActionData", 135, 135, 220, 30, 0x30003, None)
c.mapping("ActionData", "Text")
prep.back("Back", None, active=0)
prep.next("Next", None, active=0)
c=prep.cancel("Cancel", None)
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Target directory selection
seldlg = PyDialog(db, "SelectDirectoryDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
seldlg.title("Select Destination Directory")
c = seldlg.text("Existing", 135, 25, 235, 30, 0x30003,
"{\VerdanaRed9}This update will replace your existing [ProductLine] installation.")
c.condition("Hide", 'REMOVEOLDVERSION="" and REMOVEOLDSNAPSHOT=""')
seldlg.text("Description", 135, 50, 220, 40, 0x30003,
"Please select a directory for the [ProductName] files.")
seldlg.back("< Back", None, active=0)
c = seldlg.next("Next >", "Cancel")
c.event("DoAction", "CheckDir", "TargetExistsOk<>1", order=1)
# If the target exists, but we found that we are going to remove old versions, don't bother
# confirming that the target directory exists. Strictly speaking, we should determine that
# the target directory is indeed the target of the product that we are going to remove, but
# I don't know how to do that.
c.event("SpawnDialog", "ExistingDirectoryDlg", 'TargetExists=1 and REMOVEOLDVERSION="" and REMOVEOLDSNAPSHOT=""', 2)
c.event("SetTargetPath", "TARGETDIR", 'TargetExists=0 or REMOVEOLDVERSION<>"" or REMOVEOLDSNAPSHOT<>""', 3)
c.event("SpawnWaitDialog", "WaitForCostingDlg", "CostingComplete=1", 4)
c.event("NewDialog", "SelectFeaturesDlg", 'TargetExists=0 or REMOVEOLDVERSION<>"" or REMOVEOLDSNAPSHOT<>""', 5)
c = seldlg.cancel("Cancel", "DirectoryCombo")
c.event("SpawnDialog", "CancelDlg")
seldlg.control("DirectoryCombo", "DirectoryCombo", 135, 70, 172, 80, 393219,
"TARGETDIR", None, "DirectoryList", None)
seldlg.control("DirectoryList", "DirectoryList", 135, 90, 208, 136, 3, "TARGETDIR",
None, "PathEdit", None)
seldlg.control("PathEdit", "PathEdit", 135, 230, 206, 16, 3, "TARGETDIR", None, "Next", None)
c = seldlg.pushbutton("Up", 306, 70, 18, 18, 3, "Up", None)
c.event("DirectoryListUp", "0")
c = seldlg.pushbutton("NewDir", 324, 70, 30, 18, 3, "New", None)
c.event("DirectoryListNew", "0")
#####################################################################
# SelectFeaturesDlg
features = PyDialog(db, "SelectFeaturesDlg", x, y, w, h, modal|track_disk_space,
title, "Tree", "Next", "Cancel")
features.title("Customize [ProductName]")
features.text("Description", 135, 35, 220, 15, 0x30003,
"Select the way you want features to be installed.")
features.text("Text", 135,45,220,30, 3,
"Click on the icons in the tree below to change the way features will be installed.")
c=features.back("< Back", "Next")
c.event("NewDialog", "SelectDirectoryDlg")
c=features.next("Next >", "Cancel")
c.mapping("SelectionNoItems", "Enabled")
c.event("SpawnDialog", "DiskCostDlg", "OutOfDiskSpace=1", order=1)
c.event("EndDialog", "Return", "OutOfDiskSpace<>1", order=2)
c=features.cancel("Cancel", "Tree")
c.event("SpawnDialog", "CancelDlg")
# The browse property is not used, since we have only a single target path (selected already)
features.control("Tree", "SelectionTree", 135, 75, 220, 95, 7, "_BrowseProperty",
"Tree of selections", "Back", None)
#c=features.pushbutton("Reset", 42, 243, 56, 17, 3, "Reset", "DiskCost")
#c.mapping("SelectionNoItems", "Enabled")
#c.event("Reset", "0")
features.control("Box", "GroupBox", 135, 170, 225, 90, 1, None, None, None, None)
c=features.xbutton("DiskCost", "Disk &Usage", None, 0.10)
c.mapping("SelectionNoItems","Enabled")
c.event("SpawnDialog", "DiskCostDlg")
c=features.xbutton("Advanced", "Advanced", None, 0.30)
c.event("SpawnDialog", "AdvancedDlg")
c=features.text("ItemDescription", 140, 180, 210, 30, 3,
"Multiline description of the currently selected item.")
c.mapping("SelectionDescription","Text")
c=features.text("ItemSize", 140, 210, 210, 45, 3,
"The size of the currently selected item.")
c.mapping("SelectionSize", "Text")
#####################################################################
# Disk cost
cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title,
"OK", "OK", "OK", bitmap=False)
cost.text("Title", 15, 6, 200, 15, 0x30003,
"{\DlgFontBold8}Disk Space Requirements")
cost.text("Description", 20, 20, 280, 20, 0x30003,
"The disk space required for the installation of the selected features.")
cost.text("Text", 20, 53, 330, 60, 3,
"The highlighted volumes (if any) do not have enough disk space "
"available for the currently selected features. You can either "
"remove some files from the highlighted volumes, or choose to "
"install less features onto local drive(s), or select different "
"destination drive(s).")
cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223,
None, "{120}{70}{70}{70}{70}", None, None)
cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return")
#####################################################################
# WhichUsers Dialog. Only available on NT, and for privileged users.
# This must be run before FindRelatedProducts, because that will
# take into account whether the previous installation was per-user
# or per-machine. We currently don't support going back to this
# dialog after "Next" was selected; to support this, we would need to
# find how to reset the ALLUSERS property, and how to re-run
# FindRelatedProducts.
# On Windows9x, the ALLUSERS property is ignored on the command line
# and in the Property table, but installer fails according to the documentation
# if a dialog attempts to set ALLUSERS.
whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title,
"AdminInstall", "Next", "Cancel")
whichusers.title("Select whether to install [ProductName] for all users of this computer.")
# A radio group with two options: allusers, justme
g = whichusers.radiogroup("AdminInstall", 135, 60, 235, 80, 3,
"WhichUsers", "", "Next")
g.condition("Disable", "VersionNT=600") # Not available on Vista and Windows 2008
g.add("ALL", 0, 5, 150, 20, "Install for all users")
g.add("JUSTME", 0, 25, 235, 20, "Install just for me (not available on Windows Vista)")
whichusers.back("Back", None, active=0)
c = whichusers.next("Next >", "Cancel")
c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1)
c.event("EndDialog", "Return", order = 2)
c = whichusers.cancel("Cancel", "AdminInstall")
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Advanced Dialog.
advanced = PyDialog(db, "AdvancedDlg", x, y, w, h, modal, title,
"CompilePyc", "Ok", "Ok")
advanced.title("Advanced Options for [ProductName]")
# A radio group with two options: allusers, justme
advanced.checkbox("CompilePyc", 135, 60, 230, 50, 3,
"COMPILEALL", "Compile .py files to byte code after installation", "Ok")
c = advanced.cancel("Ok", "CompilePyc", name="Ok") # Button just has location of cancel button.
c.event("EndDialog", "Return")
#####################################################################
# Existing Directory dialog
dlg = Dialog(db, "ExistingDirectoryDlg", 50, 30, 200, 80, modal, title,
"No", "No", "No")
dlg.text("Title", 10, 20, 180, 40, 3,
"[TARGETDIR] exists. Are you sure you want to overwrite existing files?")
c=dlg.pushbutton("Yes", 30, 60, 55, 17, 3, "Yes", "No")
c.event("[TargetExists]", "0", order=1)
c.event("[TargetExistsOk]", "1", order=2)
c.event("EndDialog", "Return", order=3)
c=dlg.pushbutton("No", 115, 60, 55, 17, 3, "No", "Yes")
c.event("EndDialog", "Return")
#####################################################################
# Installation Progress dialog (modeless)
progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel", bitmap=False)
progress.text("Title", 20, 15, 200, 15, 0x30003,
"{\DlgFontBold8}[Progress1] [ProductName]")
progress.text("Text", 35, 65, 300, 30, 3,
"Please wait while the Installer [Progress2] [ProductName]. "
"This may take several minutes.")
progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:")
c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...")
c.mapping("ActionText", "Text")
#c=progress.text("ActionData", 35, 140, 300, 20, 3, None)
#c.mapping("ActionData", "Text")
c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537,
None, "Progress done", None, None)
c.mapping("SetProgress", "Progress")
progress.back("< Back", "Next", active=False)
progress.next("Next >", "Cancel", active=False)
progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg")
# Maintenance type: repair/uninstall
maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
maint.title("Welcome to the [ProductName] Setup Wizard")
maint.text("BodyText", 135, 63, 230, 42, 3,
"Select whether you want to repair or remove [ProductName].")
g=maint.radiogroup("RepairRadioGroup", 135, 108, 230, 60, 3,
"MaintenanceForm_Action", "", "Next")
g.add("Change", 0, 0, 200, 17, "&Change [ProductName]")
g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]")
g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]")
maint.back("< Back", None, active=False)
c=maint.next("Finish", "Cancel")
# Change installation: Change progress dialog to "Change", then ask
# for feature selection
c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1)
c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2)
# Reinstall: Change progress dialog to "Repair", then invoke reinstall
# Also set list of reinstalled features to "ALL"
c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5)
c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6)
c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7)
c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8)
# Uninstall: Change progress to "Remove", then invoke uninstall
# Also set list of removed features to "ALL"
c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11)
c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12)
c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13)
c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14)
# Close dialog when maintenance action scheduled
c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20)
c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21)
maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg")
# See "Feature Table". The feature level is 1 for all features,
# and the feature attributes are 0 for the DefaultFeature, and
# FollowParent for all other features. The numbers are the Display
# column.
def add_features(db):
# feature attributes:
# msidbFeatureAttributesFollowParent == 2
# msidbFeatureAttributesDisallowAdvertise == 8
# Features that need to be installed with together with the main feature
# (i.e. additional Python libraries) need to follow the parent feature.
# Features that have no advertisement trigger (e.g. the test suite)
# must not support advertisement
global default_feature, tcltk, htmlfiles, tools, testsuite, ext_feature, private_crt
default_feature = Feature(db, "DefaultFeature", "Python",
"Python Interpreter and Libraries",
1, directory = "TARGETDIR")
shared_crt = Feature(db, "SharedCRT", "MSVCRT", "C Run-Time (system-wide)", 0,
level=0)
private_crt = Feature(db, "PrivateCRT", "MSVCRT", "C Run-Time (private)", 0,
level=0)
add_data(db, "Condition", [("SharedCRT", 1, sys32cond),
("PrivateCRT", 1, "not "+sys32cond)])
# We don't support advertisement of extensions
ext_feature = Feature(db, "Extensions", "Register Extensions",
"Make this Python installation the default Python installation", 3,
parent = default_feature, attributes=2|8)
if have_tcl:
tcltk = Feature(db, "TclTk", "Tcl/Tk", "Tkinter, IDLE, pydoc", 5,
parent = default_feature, attributes=2)
htmlfiles = Feature(db, "Documentation", "Documentation",
"Python HTMLHelp File", 7, parent = default_feature)
tools = Feature(db, "Tools", "Utility Scripts",
"Python utility scripts (Tools/", 9,
parent = default_feature, attributes=2)
testsuite = Feature(db, "Testsuite", "Test suite",
"Python test suite (Lib/test/)", 11,
parent = default_feature, attributes=2|8)
def extract_msvcr90():
# Find the redistributable files
if msilib.Win64:
arch = "amd64"
else:
arch = "x86"
dir = os.path.join(os.environ['VS90COMNTOOLS'], r"..\..\VC\redist\%s\Microsoft.VC90.CRT" % arch)
result = []
installer = msilib.MakeInstaller()
# omit msvcm90 and msvcp90, as they aren't really needed
files = ["Microsoft.VC90.CRT.manifest", "msvcr90.dll"]
for f in files:
path = os.path.join(dir, f)
kw = {'src':path}
if f.endswith('.dll'):
kw['version'] = installer.FileVersion(path, 0)
kw['language'] = installer.FileVersion(path, 1)
result.append((f, kw))
return result
def generate_license():
import shutil, glob
out = open("LICENSE.txt", "w")
shutil.copyfileobj(open(os.path.join(srcdir, "LICENSE")), out)
shutil.copyfileobj(open("crtlicense.txt"), out)
for name, pat, file in (("bzip2","bzip2-*", "LICENSE"),
("Berkeley DB", "db-*", "LICENSE"),
("openssl", "openssl-*", "LICENSE"),
("Tcl", "tcl8*", "license.terms"),
("Tk", "tk8*", "license.terms"),
("Tix", "tix-*", "license.terms")):
out.write("\nThis copy of Python includes a copy of %s, which is licensed under the following terms:\n\n" % name)
dirs = glob.glob(srcdir+"/../"+pat)
if not dirs:
raise ValueError, "Could not find "+srcdir+"/../"+pat
if len(dirs) > 2:
raise ValueError, "Multiple copies of "+pat
dir = dirs[0]
shutil.copyfileobj(open(os.path.join(dir, file)), out)
out.close()
class PyDirectory(Directory):
"""By default, all components in the Python installer
can run from source."""
def __init__(self, *args, **kw):
if not kw.has_key("componentflags"):
kw['componentflags'] = 2 #msidbComponentAttributesOptional
Directory.__init__(self, *args, **kw)
# See "File Table", "Component Table", "Directory Table",
# "FeatureComponents Table"
def add_files(db):
cab = CAB("python")
tmpfiles = []
# Add all executables, icons, text files into the TARGETDIR component
root = PyDirectory(db, cab, None, srcdir, "TARGETDIR", "SourceDir")
default_feature.set_current()
if not msilib.Win64:
root.add_file("%s/w9xpopen.exe" % PCBUILD)
root.add_file("README.txt", src="README")
root.add_file("NEWS.txt", src="Misc/NEWS")
generate_license()
root.add_file("LICENSE.txt", src=os.path.abspath("LICENSE.txt"))
root.start_component("python.exe", keyfile="python.exe")
root.add_file("%s/python.exe" % PCBUILD)
root.start_component("pythonw.exe", keyfile="pythonw.exe")
root.add_file("%s/pythonw.exe" % PCBUILD)
# msidbComponentAttributesSharedDllRefCount = 8, see "Component Table"
dlldir = PyDirectory(db, cab, root, srcdir, "DLLDIR", ".")
pydll = "python%s%s.dll" % (major, minor)
pydllsrc = os.path.join(srcdir, PCBUILD, pydll)
dlldir.start_component("DLLDIR", flags = 8, keyfile = pydll, uuid = pythondll_uuid)
installer = msilib.MakeInstaller()
pyversion = installer.FileVersion(pydllsrc, 0)
if not snapshot:
# For releases, the Python DLL has the same version as the
# installer package.
assert pyversion.split(".")[:3] == current_version.split(".")
dlldir.add_file("%s/python%s%s.dll" % (PCBUILD, major, minor),
version=pyversion,
language=installer.FileVersion(pydllsrc, 1))
DLLs = PyDirectory(db, cab, root, srcdir + "/" + PCBUILD, "DLLs", "DLLS|DLLs")
# msvcr90.dll: Need to place the DLL and the manifest into the root directory,
# plus another copy of the manifest in the DLLs directory, with the manifest
# pointing to the root directory
root.start_component("msvcr90", feature=private_crt)
# Results are ID,keyword pairs
manifest, crtdll = extract_msvcr90()
root.add_file(manifest[0], **manifest[1])
root.add_file(crtdll[0], **crtdll[1])
# Copy the manifest
# Actually, don't do that anymore - no DLL in DLLs should have a manifest
# dependency on msvcr90.dll anymore, so this should not be necessary
#manifest_dlls = manifest[0]+".root"
#open(manifest_dlls, "w").write(open(manifest[1]['src']).read().replace("msvcr","../msvcr"))
#DLLs.start_component("msvcr90_dlls", feature=private_crt)
#DLLs.add_file(manifest[0], src=os.path.abspath(manifest_dlls))
# Now start the main component for the DLLs directory;
# no regular files have been added to the directory yet.
DLLs.start_component()
# Check if _ctypes.pyd exists
have_ctypes = os.path.exists(srcdir+"/%s/_ctypes.pyd" % PCBUILD)
if not have_ctypes:
print "WARNING: _ctypes.pyd not found, ctypes will not be included"
extensions.remove("_ctypes.pyd")
# Add all .py files in Lib, except lib-tk, test
dirs={}
pydirs = [(root,"Lib")]
while pydirs:
# Commit every now and then, or else installer will complain
db.Commit()
parent, dir = pydirs.pop()
if dir == ".svn" or dir.startswith("plat-"):
continue
elif dir in ["lib-tk", "idlelib", "Icons"]:
if not have_tcl:
continue
tcltk.set_current()
elif dir in ['test', 'tests', 'data', 'output']:
# test: Lib, Lib/email, Lib/bsddb, Lib/ctypes, Lib/sqlite3
# tests: Lib/distutils
# data: Lib/email/test
# output: Lib/test
testsuite.set_current()
elif not have_ctypes and dir == "ctypes":
continue
else:
default_feature.set_current()
lib = PyDirectory(db, cab, parent, dir, dir, "%s|%s" % (parent.make_short(dir), dir))
# Add additional files
dirs[dir]=lib
lib.glob("*.txt")
if dir=='site-packages':
lib.add_file("README.txt", src="README")
continue
files = lib.glob("*.py")
files += lib.glob("*.pyw")
if files:
# Add an entry to the RemoveFile table to remove bytecode files.
lib.remove_pyc()
if dir.endswith('.egg-info'):
lib.add_file('entry_points.txt')
lib.add_file('PKG-INFO')
lib.add_file('top_level.txt')
lib.add_file('zip-safe')
continue
if dir=='test' and parent.physical=='Lib':
lib.add_file("185test.db")
lib.add_file("audiotest.au")
lib.add_file("cfgparser.1")
lib.add_file("sgml_input.html")
lib.add_file("test.xml")
lib.add_file("test.xml.out")
lib.add_file("testtar.tar")
lib.add_file("test_difflib_expect.html")
lib.add_file("check_soundcard.vbs")
lib.add_file("empty.vbs")
lib.glob("*.uue")
lib.glob("*.pem")
lib.glob("*.pck")
lib.add_file("readme.txt", src="README")
lib.add_file("zipdir.zip")
if dir=='decimaltestdata':
lib.glob("*.decTest")
if dir=='output':
lib.glob("test_*")
if dir=='idlelib':
lib.glob("*.def")
lib.add_file("idle.bat")
if dir=="Icons":
lib.glob("*.gif")
lib.add_file("idle.icns")
if dir=="command" and parent.physical=="distutils":
lib.glob("wininst*.exe")
if dir=="setuptools":
lib.add_file("cli.exe")
lib.add_file("gui.exe")
if dir=="lib2to3":
lib.removefile("pickle", "*.pickle")
if dir=="data" and parent.physical=="test" and parent.basedir.physical=="email":
# This should contain all non-.svn files listed in subversion
for f in os.listdir(lib.absolute):
if f.endswith(".txt") or f==".svn":continue
if f.endswith(".au") or f.endswith(".gif"):
lib.add_file(f)
else:
print "WARNING: New file %s in email/test/data" % f
for f in os.listdir(lib.absolute):
if os.path.isdir(os.path.join(lib.absolute, f)):
pydirs.append((lib, f))
# Add DLLs
default_feature.set_current()
lib = DLLs
lib.add_file("py.ico", src=srcdir+"/PC/py.ico")
lib.add_file("pyc.ico", src=srcdir+"/PC/pyc.ico")
dlls = []
tclfiles = []
for f in extensions:
if f=="_tkinter.pyd":
continue
if not os.path.exists(srcdir + "/" + PCBUILD + "/" + f):
print "WARNING: Missing extension", f
continue
dlls.append(f)
lib.add_file(f)
# Add sqlite
if msilib.msi_type=="Intel64;1033":
sqlite_arch = "/ia64"
elif msilib.msi_type=="x64;1033":
sqlite_arch = "/amd64"
tclsuffix = "64"
else:
sqlite_arch = ""
tclsuffix = ""
lib.add_file("sqlite3.dll")
if have_tcl:
if not os.path.exists("%s/%s/_tkinter.pyd" % (srcdir, PCBUILD)):
print "WARNING: Missing _tkinter.pyd"
else:
lib.start_component("TkDLLs", tcltk)
lib.add_file("_tkinter.pyd")
dlls.append("_tkinter.pyd")
tcldir = os.path.normpath(srcdir+("/../tcltk%s/bin" % tclsuffix))
for f in glob.glob1(tcldir, "*.dll"):
lib.add_file(f, src=os.path.join(tcldir, f))
# check whether there are any unknown extensions
for f in glob.glob1(srcdir+"/"+PCBUILD, "*.pyd"):
if f.endswith("_d.pyd"): continue # debug version
if f in dlls: continue
print "WARNING: Unknown extension", f
# Add headers
default_feature.set_current()
lib = PyDirectory(db, cab, root, "include", "include", "INCLUDE|include")
lib.glob("*.h")
lib.add_file("pyconfig.h", src="../PC/pyconfig.h")
# Add import libraries
lib = PyDirectory(db, cab, root, PCBUILD, "libs", "LIBS|libs")
for f in dlls:
lib.add_file(f.replace('pyd','lib'))
lib.add_file('python%s%s.lib' % (major, minor))
# Add the mingw-format library
if have_mingw:
lib.add_file('libpython%s%s.a' % (major, minor))
if have_tcl:
# Add Tcl/Tk
tcldirs = [(root, '../tcltk%s/lib' % tclsuffix, 'tcl')]
tcltk.set_current()
while tcldirs:
parent, phys, dir = tcldirs.pop()
lib = PyDirectory(db, cab, parent, phys, dir, "%s|%s" % (parent.make_short(dir), dir))
if not os.path.exists(lib.absolute):
continue
for f in os.listdir(lib.absolute):
if os.path.isdir(os.path.join(lib.absolute, f)):
tcldirs.append((lib, f, f))
else:
lib.add_file(f)
# Add tools
tools.set_current()
tooldir = PyDirectory(db, cab, root, "Tools", "Tools", "TOOLS|Tools")
for f in ['i18n', 'pynche', 'Scripts', 'versioncheck', 'webchecker']:
lib = PyDirectory(db, cab, tooldir, f, f, "%s|%s" % (tooldir.make_short(f), f))
lib.glob("*.py")
lib.glob("*.pyw", exclude=['pydocgui.pyw'])
lib.remove_pyc()
lib.glob("*.txt")
if f == "pynche":
x = PyDirectory(db, cab, lib, "X", "X", "X|X")
x.glob("*.txt")
if os.path.exists(os.path.join(lib.absolute, "README")):
lib.add_file("README.txt", src="README")
if f == 'Scripts':
lib.add_file("2to3.py", src="2to3")
if have_tcl:
lib.start_component("pydocgui.pyw", tcltk, keyfile="pydocgui.pyw")
lib.add_file("pydocgui.pyw")
# Add documentation
htmlfiles.set_current()
lib = PyDirectory(db, cab, root, "Doc", "Doc", "DOC|Doc")
lib.start_component("documentation", keyfile=docfile)
lib.add_file(docfile, src="build/htmlhelp/"+docfile)
cab.commit(db)
for f in tmpfiles:
os.unlink(f)
# See "Registry Table", "Component Table"
def add_registry(db):
# File extensions, associated with the REGISTRY.def component
# IDLE verbs depend on the tcltk feature.
# msidbComponentAttributesRegistryKeyPath = 4
# -1 for Root specifies "dependent on ALLUSERS property"
tcldata = []
if have_tcl:
tcldata = [
("REGISTRY.tcl", msilib.gen_uuid(), "TARGETDIR", registry_component, None,
"py.IDLE")]
add_data(db, "Component",
# msidbComponentAttributesRegistryKeyPath = 4
[("REGISTRY", msilib.gen_uuid(), "TARGETDIR", registry_component, None,
"InstallPath"),
("REGISTRY.doc", msilib.gen_uuid(), "TARGETDIR", registry_component, None,
"Documentation"),
("REGISTRY.def", msilib.gen_uuid(), "TARGETDIR", registry_component,
None, None)] + tcldata)
# See "FeatureComponents Table".
# The association between TclTk and pythonw.exe is necessary to make ICE59
# happy, because the installer otherwise believes that the IDLE and PyDoc
# shortcuts might get installed without pythonw.exe being install. This
# is not true, since installing TclTk will install the default feature, which
# will cause pythonw.exe to be installed.
# REGISTRY.tcl is not associated with any feature, as it will be requested
# through a custom action
tcldata = []
if have_tcl:
tcldata = [(tcltk.id, "pythonw.exe")]
add_data(db, "FeatureComponents",
[(default_feature.id, "REGISTRY"),
(htmlfiles.id, "REGISTRY.doc"),
(ext_feature.id, "REGISTRY.def")] +
tcldata
)
# Extensions are not advertised. For advertised extensions,
# we would need separate binaries that install along with the
# extension.
pat = r"Software\Classes\%sPython.%sFile\shell\%s\command"
ewi = "Edit with IDLE"
pat2 = r"Software\Classes\%sPython.%sFile\DefaultIcon"
pat3 = r"Software\Classes\%sPython.%sFile"
pat4 = r"Software\Classes\%sPython.%sFile\shellex\DropHandler"
tcl_verbs = []
if have_tcl:
tcl_verbs=[
("py.IDLE", -1, pat % (testprefix, "", ewi), "",
r'"[TARGETDIR]pythonw.exe" "[TARGETDIR]Lib\idlelib\idle.pyw" -n -e "%1"',
"REGISTRY.tcl"),
("pyw.IDLE", -1, pat % (testprefix, "NoCon", ewi), "",
r'"[TARGETDIR]pythonw.exe" "[TARGETDIR]Lib\idlelib\idle.pyw" -n -e "%1"',
"REGISTRY.tcl"),
]
add_data(db, "Registry",
[# Extensions
("py.ext", -1, r"Software\Classes\."+ext, "",
"Python.File", "REGISTRY.def"),
("pyw.ext", -1, r"Software\Classes\."+ext+'w', "",
"Python.NoConFile", "REGISTRY.def"),
("pyc.ext", -1, r"Software\Classes\."+ext+'c', "",
"Python.CompiledFile", "REGISTRY.def"),
("pyo.ext", -1, r"Software\Classes\."+ext+'o', "",
"Python.CompiledFile", "REGISTRY.def"),
# MIME types
("py.mime", -1, r"Software\Classes\."+ext, "Content Type",
"text/plain", "REGISTRY.def"),
("pyw.mime", -1, r"Software\Classes\."+ext+'w', "Content Type",
"text/plain", "REGISTRY.def"),
#Verbs
("py.open", -1, pat % (testprefix, "", "open"), "",
r'"[TARGETDIR]python.exe" "%1" %*', "REGISTRY.def"),
("pyw.open", -1, pat % (testprefix, "NoCon", "open"), "",
r'"[TARGETDIR]pythonw.exe" "%1" %*', "REGISTRY.def"),
("pyc.open", -1, pat % (testprefix, "Compiled", "open"), "",
r'"[TARGETDIR]python.exe" "%1" %*', "REGISTRY.def"),
] + tcl_verbs + [
#Icons
("py.icon", -1, pat2 % (testprefix, ""), "",
r'[DLLs]py.ico', "REGISTRY.def"),
("pyw.icon", -1, pat2 % (testprefix, "NoCon"), "",
r'[DLLs]py.ico', "REGISTRY.def"),
("pyc.icon", -1, pat2 % (testprefix, "Compiled"), "",
r'[DLLs]pyc.ico', "REGISTRY.def"),
# Descriptions
("py.txt", -1, pat3 % (testprefix, ""), "",
"Python File", "REGISTRY.def"),
("pyw.txt", -1, pat3 % (testprefix, "NoCon"), "",
"Python File (no console)", "REGISTRY.def"),
("pyc.txt", -1, pat3 % (testprefix, "Compiled"), "",
"Compiled Python File", "REGISTRY.def"),
# Drop Handler
("py.drop", -1, pat4 % (testprefix, ""), "",
"{60254CA5-953B-11CF-8C96-00AA00B8708C}", "REGISTRY.def"),
("pyw.drop", -1, pat4 % (testprefix, "NoCon"), "",
"{60254CA5-953B-11CF-8C96-00AA00B8708C}", "REGISTRY.def"),
("pyc.drop", -1, pat4 % (testprefix, "Compiled"), "",
"{60254CA5-953B-11CF-8C96-00AA00B8708C}", "REGISTRY.def"),
])
# Registry keys
prefix = r"Software\%sPython\PythonCore\%s" % (testprefix, short_version)
add_data(db, "Registry",
[("InstallPath", -1, prefix+r"\InstallPath", "", "[TARGETDIR]", "REGISTRY"),
("InstallGroup", -1, prefix+r"\InstallPath\InstallGroup", "",
"Python %s" % short_version, "REGISTRY"),
("PythonPath", -1, prefix+r"\PythonPath", "",
r"[TARGETDIR]Lib;[TARGETDIR]DLLs;[TARGETDIR]Lib\lib-tk", "REGISTRY"),
("Documentation", -1, prefix+r"\Help\Main Python Documentation", "",
"[TARGETDIR]Doc\\"+docfile , "REGISTRY.doc"),
("Modules", -1, prefix+r"\Modules", "+", None, "REGISTRY"),
("AppPaths", -1, r"Software\Microsoft\Windows\CurrentVersion\App Paths\Python.exe",
"", r"[TARGETDIR]Python.exe", "REGISTRY.def"),
("DisplayIcon", -1,
r"Software\Microsoft\Windows\CurrentVersion\Uninstall\%s" % product_code,
"DisplayIcon", "[TARGETDIR]python.exe", "REGISTRY.def")
])
# Shortcuts, see "Shortcut Table"
add_data(db, "Directory",
[("ProgramMenuFolder", "TARGETDIR", "."),
("MenuDir", "ProgramMenuFolder", "PY%s%s|%sPython %s.%s" % (major,minor,testprefix,major,minor))])
add_data(db, "RemoveFile",
[("MenuDir", "TARGETDIR", None, "MenuDir", 2)])
tcltkshortcuts = []
if have_tcl:
tcltkshortcuts = [
("IDLE", "MenuDir", "IDLE|IDLE (Python GUI)", "pythonw.exe",
tcltk.id, r'"[TARGETDIR]Lib\idlelib\idle.pyw"', None, None, "python_icon.exe", 0, None, "TARGETDIR"),
("PyDoc", "MenuDir", "MODDOCS|Module Docs", "pythonw.exe",
tcltk.id, r'"[TARGETDIR]Tools\scripts\pydocgui.pyw"', None, None, "python_icon.exe", 0, None, "TARGETDIR"),
]
add_data(db, "Shortcut",
tcltkshortcuts +
[# Advertised shortcuts: targets are features, not files
("Python", "MenuDir", "PYTHON|Python (command line)", "python.exe",
default_feature.id, None, None, None, "python_icon.exe", 2, None, "TARGETDIR"),
# Advertising the Manual breaks on (some?) Win98, and the shortcut lacks an
# icon first.
#("Manual", "MenuDir", "MANUAL|Python Manuals", "documentation",
# htmlfiles.id, None, None, None, None, None, None, None),
## Non-advertised shortcuts: must be associated with a registry component
("Manual", "MenuDir", "MANUAL|Python Manuals", "REGISTRY.doc",
"[#%s]" % docfile, None,
None, None, None, None, None, None),
("Uninstall", "MenuDir", "UNINST|Uninstall Python", "REGISTRY",
SystemFolderName+"msiexec", "/x%s" % product_code,
None, None, None, None, None, None),
])
db.Commit()
db = build_database()
try:
add_features(db)
add_ui(db)
add_files(db)
add_registry(db)
remove_old_versions(db)
db.Commit()
finally:
del db
| [
"[email protected]"
]
| |
33863f605bfdb090461483e022ae5109bdf4aec5 | 41a008ceea2ae75b94cf2110a1370af1f789ff3f | /lava/helper/tests/test_dispatcher.py | 1eb1b3a512256d244782fd34e41b11f1e0982a9f | []
| no_license | guanhe0/lava_v1 | 937916a0009c0a3f801e61f7580b96e324da64b1 | c49e753ce55104e3eadb0126088b7580a39446fe | refs/heads/master | 2022-10-28T02:33:52.924608 | 2017-01-04T07:24:59 | 2017-01-04T08:43:37 | 78,068,030 | 0 | 1 | null | 2022-10-07T02:00:16 | 2017-01-05T01:36:27 | Python | UTF-8 | Python | false | false | 2,855 | py | # Copyright (C) 2013 Linaro Limited
#
# Author: Milo Casagrande <[email protected]>
#
# This file is part of lava-tool.
#
# lava-tool is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation
#
# lava-tool is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with lava-tool. If not, see <http://www.gnu.org/licenses/>.
"""lava.helper.dispatcher tests."""
import os
import tempfile
from mock import patch
from lava.tool.errors import CommandError
from lava.helper.tests.helper_test import HelperTest
from lava.helper.dispatcher import (
choose_devices_path,
)
class DispatcherTests(HelperTest):
def setUp(self):
super(DispatcherTests, self).setUp()
self.devices_dir = os.path.join(tempfile.gettempdir(), "devices")
os.makedirs(self.devices_dir)
def tearDown(self):
super(DispatcherTests, self).tearDown()
os.removedirs(self.devices_dir)
def test_choose_devices_path_0(self):
# Tests that when passing more than one path, the first writable one
# is returned.
obtained = choose_devices_path(
["/", "/root", self.temp_dir, os.path.expanduser("~")])
expected = os.path.join(self.temp_dir, "devices")
self.assertEqual(expected, obtained)
def test_choose_devices_path_1(self):
# Tests that when passing a path that is not writable, CommandError
# is raised.
self.assertRaises(CommandError, choose_devices_path,
["/", "/root", "/root/tmpdir"])
def test_choose_devices_path_2(self):
# Tests that the correct path for devices is created on the filesystem.
expected_path = os.path.join(self.temp_dir, "devices")
obtained = choose_devices_path([self.temp_dir])
self.assertEqual(expected_path, obtained)
self.assertTrue(os.path.isdir(expected_path))
def test_choose_devices_path_3(self):
# Tests that returns the already existing devices path.
obtained = choose_devices_path([tempfile.gettempdir()])
self.assertEqual(self.devices_dir, obtained)
@patch("__builtin__.open")
def test_choose_devices_path_4(self, mocked_open):
# Tests that when IOError is raised and we pass only one dir
# CommandError is raised.
mocked_open.side_effect = IOError()
self.assertRaises(CommandError, choose_devices_path,
[tempfile.gettempdir()])
self.assertTrue(mocked_open.called)
| [
"[email protected]"
]
| |
88a28ce29d0031ad716e0bbc7ad2035a3d388f17 | 05f4b73f8fab3a7b995bf2bd28b9f7eeba89d9e7 | /2_20210127.py | ff3d5f3cf95828e755cc4f194752270dd86c6d17 | []
| no_license | AlgorithmOnline/jaeeun | c7ee9504dd8109a8e0154e0ce514f35796ba368c | 65dfee579d7a75bcc240a190e7edff10d285d113 | refs/heads/master | 2023-04-20T02:52:12.673218 | 2021-05-07T13:56:05 | 2021-05-07T13:56:05 | 286,736,284 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | N, M = map(int, input().split())
print(N+M)
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.