metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joamilab/inpe-mestrado-cintia2",
"score": 2
} |
#### File: joamilab/inpe-mestrado-cintia2/datavisualization.py
```python
import matplotlib.pyplot as plt
import datetime, os
import pandas as pd
from sklearn.manifold import Isomap
def plotSpectra(spectra):
plt.figure(1, figsize=(10,8))
for index, spectrum in spectra.iterrows():
plt.plot(spectrum[2], spectrum[3], label=spectrum[1])
plt.legend(loc="upper right", fontsize="x-small", framealpha=0.5, markerfirst=False)
plt.xlabel('wavelength')
plt.ylabel('flux')
plt.title('Supernovae Spectra (after preprocessing)')
pathFile = os.path.dirname(os.path.dirname(os.getcwd()))
name_figure = "Spectra_" + datetime.datetime.now().strftime("%d-%m-%Y_%H-%M-%S") + ".png"
plt.savefig(os.path.join(pathFile, 'Images/' + name_figure))
plt.show()
def applyIsomap(spectra):
spectraFlux = []
for spectrum in spectra:
if spectrum != 0:
spectrumDf = pd.DataFrame(spectrum)
spectraFlux.append(spectrumDf.iloc[:,0])
iso = Isomap(n_components=2, n_jobs=-1).fit_transform(spectraFlux)
isoDf = pd.DataFrame(iso)
return isoDf.iloc[:,0], isoDf.iloc[:,1]
def plotScatter(xs, ys, rna, patterns):
patternsDf = pd.DataFrame(patterns)
colors_dict = {'Tipo Ia': 'red', 'Tipo Ib': 'blue', 'Tipo Ic': 'green',
'Tipo II': 'purple', 'Tipo Não Identificado': 'gray'}
colors = []
sn_types = []
for index, pattern in patternsDf.iterrows():
colors.append(colors_dict[pattern[3]])
sn_types.append(pattern[3])
if rna == 'Ia':
numberFig = 2
elif rna == 'Ib':
numberFig = 3
elif rna == 'Ic':
numberFig = 4
else:
numberFig = 5
plt.figure(numberFig, figsize=(10,8))
index = 0
plots = []
sn_types_aux = []
for y in ys:
plot_scatter = plt.scatter(xs[index], y, color=colors[index])
if sn_types[index] not in sn_types_aux:
sn_types_aux.append(sn_types[index])
plots.append(plot_scatter)
index = index+1
plots.reverse()
sn_types_aux.reverse()
plt.legend((plots), (sn_types_aux), loc="upper right", title="Types")
plt.title('RNA ' + rna + ': scatter plot of spectra after classification.')
plt.xlabel('first component isomap')
plt.ylabel('second component isomap')
pathFile = os.path.dirname(os.getcwd())
name_figure = "Scatter_" + rna + "_" + datetime.datetime.now().strftime("%d-%m-%Y_%H-%M-%S") + ".png"
plt.savefig(os.path.join(pathFile, 'Images/' + name_figure))
if rna == 'II':
plt.show()
```
#### File: joamilab/inpe-mestrado-cintia2/extractfeatures.py
```python
import csv
import numpy as np
from scipy import interpolate
import pandas as pd
import os
#Interpola espectro a cada 8 pontos
#Interpolates spectrum every 8 points
def interpolateSpectrum(spectrum):
f = interpolate.interp1d(spectrum[2],spectrum[3])
xinterp = np.arange(4000.0, 7000.0, 8)
yinterp = f(xinterp)
data = {
'Wavelength': xinterp,
'Flux': yinterp
}
interpolateInterval = pd.DataFrame(data, columns=['Wavelength', 'Flux'])
return interpolateInterval
#Seleciona espectros com comprimento de onda entre 4000 e 7000 e interpola
#Selects spectra with wavelenght between 4000 and 7000 for interpolate
def interpolateInterval4000a7000(spectra):
wavelength_interp = []
flux_interp = []
for index, row in spectra.iterrows():
wavelength = row[2]
if wavelength.iloc[0]<=4000 and wavelength.iloc[-1]>=7000:
interp = interpolateSpectrum(row)
wavelength_interp.append(interp.iloc[:,0])
flux_interp.append(interp.iloc[:,1])
else:
wavelength_interp.append(0)
flux_interp.append(0)
print(str(row[1]) + ' not contains the interval 4000-7000 Angstroms.\n\tSorry, I cannot classify this spectrum.')
spectra['WavelengthInterpolated'] = wavelength_interp
spectra['FluxInterpolated'] = flux_interp
return spectra
def extractInterval(spectrum, limitInf, limitUpp, classifier):
values = []
wavelength = spectrum[4]
flux = spectrum[5]
i = 0
if limitUpp != limitInf:
for w in wavelength:
if w >= float(limitInf) and w <= float(limitUpp):
values.append(flux[i])
elif w > float(limitUpp):
break
i = i+1
else:
for w in wavelength:
if (w >= 4000.0 and w<=5000.0) or (w>=6000.0 and w<=7000.0):
values.append(flux[i])
elif w > 7000.0:
break
i = i+1
return values
#Extrai entradas dos espectros
#Extract inputs from spectra
def extractInputs(spectra):
intervalIa = []
intervalIb = []
intervalIc = []
intervalII = []
for index, spectrum in spectra.iterrows():
if not(isinstance(spectrum['WavelengthInterpolated'], int)):
intervalIa.append(extractInterval(spectrum, 5000.0, 6500.0, 'Ia'))
intervalIb.append(extractInterval(spectrum, 5500.0, 7000.0, 'Ib'))
intervalIc.append(extractInterval(spectrum, 5500.0, 6500.0, 'Ic'))
intervalII.append(extractInterval(spectrum, 0, 0, 'II'))
else:
intervalIa.append(0)
intervalIb.append(0)
intervalIc.append(0)
intervalII.append(0)
spectra['IntervalIa'] = intervalIa
spectra['IntervalIb'] = intervalIb
spectra['IntervalIc'] = intervalIc
spectra['IntervalII'] = intervalII
return spectra
#Gera arquivo com os padrões para a classificação
#Generates a file with the patterns for classification
def generateFileTest(rna, spectra):
pathFile = os.path.dirname(os.path.dirname(os.getcwd()))
rootFolder = os.path.join(pathFile, 'M' + rna)
fileOut = os.path.join(rootFolder, 'EntradaRNA_' + rna + '_Teste.csv')
listOfParams = []
patterns = []
typeInterv = 'Interval' + rna
i = 0
for index, spectrum in spectra.iterrows():
if rna == 'Ia':
interval = spectrum[6]
elif rna == 'Ib':
interval = spectrum[7]
elif rna == 'Ic':
interval = spectrum[8]
elif rna == 'II':
interval = spectrum[9]
if not(isinstance(interval, int)):
parameters = []
pattern = []
for flux in interval:
parameters.append(flux)
parameters.extend(['', '', spectrum[0], '', '', '', '', '', spectrum[1]])
listOfParams.append(parameters)
pattern.extend([i+1, spectrum[0], spectrum[1]])
patterns.append(pattern)
i = i+1
amountOfPatterns = [len(spectra[typeInterv])]
with open(fileOut, 'w') as csvfile:
wr = csv.writer(csvfile, delimiter=';', quoting=csv.QUOTE_MINIMAL)
wr.writerow(amountOfPatterns)
for row in listOfParams:
wr.writerow(row)
wr.writerow(amountOfPatterns)
return patterns, pathFile
``` |
{
"source": "joan2937/picod",
"score": 3
} |
#### File: EXAMPLES/py_picod/continuous_servo.py
```python
import time
import picod
GPIO = 5
def servo(gpio, speed_percent):
if speed_percent > 100:
speed_percent = 100;
elif speed_percent < -100:
speed_percent = -100;
pw = 1500 + (speed_percent * 5)
pico.tx_servo(gpio, pw)
pico = picod.pico()
if not pico.connected:
exit()
servo(GPIO, 50) # half clockwise
time.sleep(1)
servo(GPIO, 0) # stop
time.sleep(1)
servo(GPIO, -50) # half anticlockwise
time.sleep(1)
pico.tx_servo(GPIO, 0) # servo off
pico.close()
```
#### File: EXAMPLES/py_picod/picod_mcp3008.py
```python
import picod
class MCP3008:
"""
MCP3008 8 ch 10-bit ADC
CH0 1 o o 16 V+
CH1 2 o o 15 Vref
CH2 3 o o 14 AGND
CH3 4 o o 13 SCLK
CH4 5 o o 12 SDO
CH5 6 o o 11 SDI
CH6 7 o o 10 CS/SHDN
CH7 8 o o 9 DGND
Be aware that SDO will be at the same voltage as V+.
"""
def __init__(self, pico, channel, tx, rx, sck, cs, speed=1e6):
"""
"""
self._pico = pico
self._hw = channel
self._cs = cs
status, speed = pico.spi_open(channel, tx, rx, sck, speed)
if status != picod.STATUS_OKAY:
raise ValueError
def read_single_ended(self, channel):
assert 0 <= channel <= 7
status, d = self._pico.spi_xfer(
self._hw, self._cs, [1, 0x80+(channel<<4), 0])
if status == picod.STATUS_OKAY:
c1 = d[1] & 0x03
c2 = d[2]
val = (c1<<8)+c2
return val
return None
def read_differential_plus(self, channel):
assert 0 <= channel <= 3
status, d = self._pico.spi_xfer(
self._hw, self._cs, [1, channel<<5, 0])
if status == picod.STATUS_OKAY:
c1 = d[1] & 0x03
c2 = d[2]
val = (c1<<8)+c2
return val
return None
def read_differential_minus(self, channel):
assert 0 <= channel <= 3
status, d = self._pico.spi_xfer(
self._hw, self._cs, [1, (channel<<5)+16, 0])
if status == picod.STATUS_OKAY:
c1 = d[1] & 0x03
c2 = d[2]
val = (c1<<8)+c2
return val
return None
def close(self):
self._pico.spi_close(self._hw)
if __name__ == "__main__":
import time
import picod
import picod_mcp3008
pico = picod.pico()
pico.pico_reset()
adc = picod_mcp3008.MCP3008(pico, 0, 7, 4, 6, 5, 50000)
end_time = time.time() + 60
while time.time() < end_time:
print(adc.read_single_ended(0))
time.sleep(0.1)
adc.close()
``` |
{
"source": "joan726/BigDL",
"score": 2
} |
#### File: chronos/autots/test_tspipeline.py
```python
import tempfile
from unittest import TestCase
import pytest
import torch
from torch.utils.data import TensorDataset, DataLoader
from bigdl.chronos.autots import AutoTSEstimator, TSPipeline
from bigdl.orca.common import init_orca_context, stop_orca_context
def train_data_creator(config):
return DataLoader(TensorDataset(torch.randn(1000,
config.get('past_seq_len', 10),
config.get('input_feature_num', 2)),
torch.randn(1000,
config.get('future_seq_len', 2),
config.get('output_feature_num', 2))),
batch_size=config.get('batch_size', 32), shuffle=True)
def valid_data_creator(config):
return DataLoader(TensorDataset(torch.randn(1000,
config.get('past_seq_len', 10),
config.get('input_feature_num', 2)),
torch.randn(1000,
config.get('future_seq_len', 2),
config.get('output_feature_num', 2))),
batch_size=config.get('batch_size', 32), shuffle=False)
class TestTSPipeline(TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_seq2seq_tsppl_support_dataloader(self):
tmp_seq2seq_dir = tempfile.TemporaryDirectory()
init_orca_context(cores=4, memory="4g", init_ray_on_spark=True)
autots = AutoTSEstimator(model="seq2seq",
search_space="minimal",
input_feature_num=2,
output_target_num=2,
past_seq_len=10,
future_seq_len=2)
tsppl_seq2seq = autots.fit(data=train_data_creator({}),
validation_data=valid_data_creator({}),
epochs=2,
batch_size=32)
tsppl_seq2seq.save(tmp_seq2seq_dir.name)
del tsppl_seq2seq
stop_orca_context()
# load
tsppl_seq2seq = TSPipeline.load(tmp_seq2seq_dir.name)
tsppl_seq2seq.fit(data=train_data_creator,
validation_data=valid_data_creator,
epochs=2,
batch_size=128)
assert tsppl_seq2seq._best_config['batch_size'] == 128
config = tsppl_seq2seq._best_config
# predict
yhat = tsppl_seq2seq.predict(valid_data_creator, batch_size=16)
assert yhat.shape == (1000,
config['future_seq_len'],
config['input_feature_num'])
assert tsppl_seq2seq._best_config['batch_size'] == 16
yhat = tsppl_seq2seq.predict_with_onnx(valid_data_creator, batch_size=64)
assert yhat.shape == (1000,
config['future_seq_len'],
config['input_feature_num'])
assert tsppl_seq2seq._best_config['batch_size'] == 64
# evaluate
_, smape = tsppl_seq2seq.evaluate(valid_data_creator,
metrics=['mse', 'smape'],
batch_size=16)
assert tsppl_seq2seq._best_config['batch_size'] == 16
assert smape < 2.0
_, smape = tsppl_seq2seq.evaluate_with_onnx(valid_data_creator,
metrics=['mse', 'smape'],
batch_size=64)
assert tsppl_seq2seq._best_config['batch_size'] == 64
assert smape < 2.0
with pytest.raises(RuntimeError):
tsppl_seq2seq.predict(torch.randn(1000,
config['past_seq_len'],
config['input_feature_num']))
with pytest.raises(RuntimeError):
tsppl_seq2seq.evaluate(torch.randn(1000,
config['past_seq_len'],
config['input_feature_num']))
def test_tcn_tsppl_support_dataloader(self):
tmp_tcn_dir = tempfile.TemporaryDirectory()
init_orca_context(cores=4, memory="4g", init_ray_on_spark=True)
autots = AutoTSEstimator(model="tcn",
search_space="minimal",
input_feature_num=2,
output_target_num=2,
past_seq_len=10,
future_seq_len=2)
tsppl_tcn = autots.fit(data=train_data_creator({}),
validation_data=valid_data_creator({}),
epochs=2,
batch_size=32)
tsppl_tcn.save(tmp_tcn_dir.name)
del tsppl_tcn
stop_orca_context()
# load
tsppl_tcn = TSPipeline.load(tmp_tcn_dir.name)
tsppl_tcn.fit(data=train_data_creator,
validation_data=valid_data_creator,
epochs=2,
batch_size=128)
assert tsppl_tcn._best_config['batch_size'] == 128
config = tsppl_tcn._best_config
yhat = tsppl_tcn.predict(data=valid_data_creator, batch_size=16)
assert tsppl_tcn._best_config['batch_size'] == 16
assert yhat.shape == (1000,
config['future_seq_len'],
config['output_feature_num'])
_, smape = tsppl_tcn.evaluate(data=valid_data_creator,
metrics=['mse', 'smape'],
batch_size=64)
assert tsppl_tcn._best_config['batch_size'] == 64
assert smape < 2.0
def test_lstm_tsppl_support_dataloader(self):
tmp_lstm_dir = tempfile.TemporaryDirectory()
init_orca_context(cores=4, memory="4g", init_ray_on_spark=True)
autots = AutoTSEstimator(model="lstm",
search_space="minimal",
input_feature_num=2,
output_target_num=2,
past_seq_len=10)
tsppl_lstm = autots.fit(data=train_data_creator({'future_seq_len': 1}),
validation_data=valid_data_creator({'future_seq_len': 1}),
epochs=2,
batch_size=32)
tsppl_lstm.save(tmp_lstm_dir.name)
del tsppl_lstm
stop_orca_context()
# load
tsppl_lstm = TSPipeline.load(tmp_lstm_dir.name)
tsppl_lstm.fit(data=train_data_creator,
validation_data=valid_data_creator,
epochs=2,
batch_size=128)
assert tsppl_lstm._best_config['batch_size'] == 128
config = tsppl_lstm._best_config
yhat = tsppl_lstm.predict(data=valid_data_creator, batch_size=16)
assert tsppl_lstm._best_config['batch_size'] == 16
assert yhat.shape == (1000,
config['future_seq_len'],
config['output_feature_num'])
_, smape = tsppl_lstm.evaluate(data=valid_data_creator,
metrics=['mse', 'smape'],
batch_size=64)
assert tsppl_lstm._best_config['batch_size'] == 64
assert smape < 2.0
if __name__ == "__main__":
pytest.main([__file__])
``` |
{
"source": "Joan95/TFM",
"score": 3
} |
#### File: tuf/client/basic_client.py
```python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import sys
import optparse
import logging
import tuf
import tuf.formats
import tuf.client.updater
import tuf.log
# See 'log.py' to learn how logging is handled in TUF.
logger = logging.getLogger('tuf.basic_client')
def update_client(repository_mirror):
"""
<Purpose>
Perform an update of the metadata and target files located at
'repository_mirror'. Target files are saved to the 'targets' directory
in the current working directory. The current directory must already
include a 'metadata' directory, which in turn must contain the 'current'
and 'previous' directories. At a minimum, these two directories require
the 'root.json' metadata file.
<Arguments>
repository_mirror:
The URL to the repository mirror hosting the metadata and target
files. E.g., 'http://localhost:8001'
<Exceptions>
tuf.RepositoryError, if 'repository_mirror' is improperly formatted.
<Side Effects>
Connects to a repository mirror and updates the metadata files and
any target files. Obsolete targets are also removed locally.
<Returns>
None.
"""
# Does 'repository_mirror' have the correct format?
try:
tuf.formats.URL_SCHEMA.check_match(repository_mirror)
except tuf.FormatError as e:
message = 'The repository mirror supplied is invalid.'
raise tuf.RepositoryError(message)
# Set the local repository directory containing all of the metadata files.
tuf.conf.repository_directory = '.'
# Set the repository mirrors. This dictionary is needed by the Updater
# class of updater.py.
repository_mirrors = {'mirror': {'url_prefix': repository_mirror,
'metadata_path': 'metadata',
'targets_path': 'targets',
'confined_target_dirs': ['']}}
# Create the repository object using the repository name 'repository'
# and the repository mirrors defined above.
updater = tuf.client.updater.Updater('repository', repository_mirrors)
# The local destination directory to save the target files.
destination_directory = './targets'
# Refresh the repository's top-level roles, store the target information for
# all the targets tracked, and determine which of these targets have been
# updated.
updater.refresh()
all_targets = updater.all_targets()
updated_targets = updater.updated_targets(all_targets, destination_directory)
# Download each of these updated targets and save them locally.
for target in updated_targets:
try:
updater.download_target(target, destination_directory)
except tuf.DownloadError as e:
pass
# Remove any files from the destination directory that are no longer being
# tracked.
updater.remove_obsolete_targets(destination_directory)
def parse_options():
"""
<Purpose>
Parse the command-line options and set the logging level
as specified by the user through the --verbose option.
'basic_client' expects the '--repo' to be set by the user.
Example:
$ python basic_client.py --repo http://localhost:8001
If the required option is unset, a parser error is printed
and the scripts exits.
<Arguments>
None.
<Exceptions>
None.
<Side Effects>
Sets the logging level for TUF logging.
<Returns>
The 'options.REPOSITORY_MIRROR' string.
"""
parser = optparse.OptionParser()
# Add the options supported by 'basic_client' to the option parser.
parser.add_option('--verbose', dest='VERBOSE', type=int, default=2,
help='Set the verbosity level of logging messages.'
'The lower the setting, the greater the verbosity.')
parser.add_option('--repo', dest='REPOSITORY_MIRROR', type='string',
help='Specifiy the repository mirror\'s URL prefix '
'(e.g., http://www.example.com:8001/tuf/).'
' The client will download updates from this mirror.')
options, args = parser.parse_args()
# Set the logging level.
if options.VERBOSE == 5:
tuf.log.set_log_level(logging.CRITICAL)
elif options.VERBOSE == 4:
tuf.log.set_log_level(logging.ERROR)
elif options.VERBOSE == 3:
tuf.log.set_log_level(logging.WARNING)
elif options.VERBOSE == 2:
tuf.log.set_log_level(logging.INFO)
elif options.VERBOSE == 1:
tuf.log.set_log_level(logging.DEBUG)
else:
tuf.log.set_log_level(logging.NOTSET)
# Ensure the '--repo' option was set by the user.
if options.REPOSITORY_MIRROR is None:
message = '"--repo" must be set on the command-line.'
parser.error(message)
# Return the repository mirror containing the metadata and target files.
return options.REPOSITORY_MIRROR
if __name__ == '__main__':
# Parse the options and set the logging level.
repository_mirror = parse_options()
# Perform an update of all the files in the 'targets' directory located in
# the current directory.
try:
update_client(repository_mirror)
except (tuf.NoWorkingMirrorError, tuf.RepositoryError) as e:
sys.stderr.write('Error: '+str(e)+'\n')
sys.exit(1)
# Successfully updated the client's target files.
sys.exit(0)
```
#### File: tuf/encoding/__init__.py
```python
from __future__ import print_function
from __future__ import unicode_literals
import tuf
import tuf.formats
def hex_from_octetstring(octetstring):
"""
Convert a pyasn1 OctetString object into a hex string.
Example return: '4b394ae2'
Raises Error() if an individual octet's supposed integer value is out of
range (0 <= x <= 255).
"""
octets = octetstring.asNumbers()
hex_string = ''
for x in octets:
if x < 0 or x > 255:
raise tuf.Error('Unable to generate hex string from OctetString: integer '
'value of octet provided is not in range: ' + str(x))
hex_string += '%.2x' % x
# Make sure that the resulting value is a valid hex string.
tuf.formats.HEX_SCHEMA.check_match(hex_string)
if '\\x' in str(hex_string):
print(hex_string)
import pdb; pdb.set_trace()
print()
return hex_string
```
#### File: tuf/encoding/snapshot_asn1_coder.py
```python
from __future__ import unicode_literals
from pyasn1.type import tag
from tuf.encoding.metadata_asn1_definitions import *
from tuf.encoding import hex_from_octetstring
import tuf.conf
import calendar
from datetime import datetime #import datetime
def get_asn_signed(pydict_signed):
"""
Given a Python dictionary conformant to TUF's standard data specification for
Snapshot metadata (tuf.formats.SNAPSHOT_SCHEMA), convert to the new ASN.1
format for Snapshot metadata, which derives from Snapshot*.asn1.
"""
json_fileinfos = pydict_signed['meta']
target_role_fileinfos = TargetRoleFileInfos().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
number_of_target_role_files = 0
root_fileinfo = None
sorted_filenames = sorted(json_fileinfos)
for filename in sorted_filenames:
pydict_fileinfo = json_fileinfos[filename]
# TODO: Consider checking the file itself to determine format... but have
# to make sure we only mess with the real root metadata role file. (Don't
# accidentally hit other metadata files?)
if filename == 'root.' + tuf.conf.METADATA_FORMAT:
# If we're dealing with the root metadata file, we expect hashes and
# length in addition to just filename and version.
# TODO: Check if we've already added a root file. Raise error.
# TODO: Add ASN1_Conversion
root_fileinfo = RootRoleFileInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))
root_fileinfo['filename'] = filename
root_fileinfo['version'] = pydict_fileinfo['version']
if 'length' not in pydict_fileinfo or 'hashes' not in pydict_fileinfo:
# TODO: Better error
raise tuf.Error('ASN1 Conversion failure for Snapshot role: given '
'fileinfo for assumed root metadata file (filename: ' +
repr(filename) + '), found either hashes or length missing.')
root_fileinfo['length'] = pydict_fileinfo['length']
hashes = Hashes().subtype(
implicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatSimple, 4))
number_of_hashes = 0
# We're going to generate a list of hashes from the dictionary of hashes.
# The DER will contain this list, and the order of items in this list will
# affect hashing of the DER, and therefore signature verification.
# We have to make the order deterministic.
sorted_hashtypes = sorted(pydict_fileinfo['hashes'])
for hashtype in sorted_hashtypes:
hashval = pydict_fileinfo['hashes'][hashtype]
hash = Hash()
hash['function'] = int(HashFunction(hashtype))
hash['digest'] = OctetString(hexValue=hashval)
hashes[number_of_hashes] = hash
number_of_hashes += 1
root_fileinfo['hashes'] = hashes
root_fileinfo['numberOfHashes'] = number_of_hashes
else:
# Otherwise (if we're not dealing with the fileinfo for the root metadata
# file), we're dealing with a target role file (the main Targets role
# file or a delegated Targets role file), so we only expect filename and
# version.
if 'length' in pydict_fileinfo or 'hashes' in pydict_fileinfo:
# TODO: Better error
raise tuf.Error('ASN1 Conversion failure for Snapshot role: given '
'fileinfo for assumed Targets or delegated metadata file '
'(filename: ' +repr(filename) + '), found either hashes or length, '
'which are not expected in Snapshot for a Targets role file.')
fileinfo = TargetRoleFileInfo()
fileinfo['filename'] = filename
fileinfo['version'] = pydict_fileinfo['version']
target_role_fileinfos[number_of_target_role_files] = fileinfo
number_of_target_role_files += 1
# Loop complete, all fileinfo (root, targets, any delegated targets)
# loaded into target_role_fileinfos and root_fileinfo.
if len(target_role_fileinfos) < 1:
raise tuf.Error('ASN1 Conversion failure for Snapshot role: Found no '
'Targets role file info entries or conversion failed for all fileinfo '
'for Targets role files.')
if root_fileinfo is None:
raise tuf.Error('ASN1 Conversion failure for Snapshot role: Found no '
'Root role file info entry or conversion failed for Root fileinfo.')
snapshot_metadata = SnapshotMetadata().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatConstructed, 2))
snapshot_metadata['numberOfTargetRoleFiles'] = number_of_target_role_files
snapshot_metadata['targetRoleFileInfos'] = target_role_fileinfos
snapshot_metadata['rootRoleFileInfo'] = root_fileinfo
# Construct the 'signed' entry in the Snapshot metadata file, in ASN.1.
asn_signed = Signed().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatConstructed, 0))
asn_signed['type'] = int(RoleType('snapshot'))
asn_signed['expires'] = calendar.timegm(datetime.strptime(
pydict_signed['expires'], "%Y-%m-%dT%H:%M:%SZ").timetuple())
asn_signed['version'] = pydict_signed['version']
asn_signed['body'] = SignedBody().subtype(explicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatConstructed, 3))
asn_signed['body']['snapshotMetadata'] = snapshot_metadata
return asn_signed
def get_json_signed(asn_metadata):
"""
Given an ASN.1 object conforming to the new ASN.1 metadata definitions
derived from Snapshot*.asn1, return a Python dictionary containing the same
information, conformant to TUF's standard data specification for Snapshot
metadata (tuf.formats.SNAPSHOT_SCHEMA).
TUF internally does not use the ASN.1, converting it in and out of the
standard Python dictionary formats defined in tuf.formats.
"""
pydict_signed = {}
# TODO: Normalize this function's interface: the asn_metadata given is
# actually both 'signed' and 'signatures', which is strange since the
# get_asn_signed function takes only the contents of the 'signed' entry, and
# this function only returns the contents of a corresponding 'signed' entry.
# (It is confusingly inconsistent to take the full object, return a converted
# partial object, and have parallel naming and placement with a function that
# takes and returns a partial object.)
# This change has to percolate across all modules, however.
asn_signed = asn_metadata['signed'] # This should be the argument instead of asn_metadata.
# Should check this from the ASN, but... the ASN definitions don't actually
# USE a type, so I'm entirely basing the type encoded on the filename. This
# is bad, I think. Could it be a security issue to not sign the metadata type
# in there? The metadata types are pretty distinct, but... it's still best to
# fix this at some point.
pydict_signed['_type'] = 'Snapshot'
pydict_signed['expires'] = datetime.utcfromtimestamp(
asn_signed['expires']).isoformat()+'Z'
pydict_signed['version'] = int(asn_signed['version'])
# Next, extract the fileinfo for each role file described in the ASN.1
# Snapshot metadata.
snapshot_metadata = asn_signed['body']['snapshotMetadata']
number_of_target_role_files = int(
snapshot_metadata['numberOfTargetRoleFiles'])
asn_target_fileinfos = snapshot_metadata['targetRoleFileInfos']
pydict_fileinfos = {}
# Copy the Targets and delegated roles fileinfos:
for i in range(number_of_target_role_files):
asn_role_fileinfo = asn_target_fileinfos[i]
filename = str(asn_role_fileinfo['filename'])
pydict_fileinfos[filename] = {'version': int(asn_role_fileinfo['version'])}
# Add in the Root role fileinfo:
# In the Python dictionary format for Snapshot metadata, these all exist in
# one dictionary.
filename = str(snapshot_metadata['rootRoleFileInfo']['filename'])
version = int(snapshot_metadata['rootRoleFileInfo']['version'])
length = int(snapshot_metadata['rootRoleFileInfo']['length'])
if filename in pydict_fileinfos:
raise tuf.Error('ASN1 Conversion failure for Snapshot role: duplicate '
'fileinfo entries detected: filename ' + str(filename) + ' identified '
'both as Root role and Targets role in Snapshot metadata.')
# Populate the hashes in the fileinfo describing the Root role.
hashes = {}
for i in range(snapshot_metadata['rootRoleFileInfo']['numberOfHashes']):
asn_hash_info = snapshot_metadata['rootRoleFileInfo']['hashes'][i]
# This is how we'd extract the name of the hash function from the
# enumeration (namedValues) that is in the class (HashFunction), indexed by
# the underlying "value" of asn_hash_info. The [0] at the end selects
# the string description from a 2-tuple of e.g. ('sha256', 1), where 1 is
# the value in the enum.
# TODO: Should probably make this its own function. The following should
# work:
# def translate_pyasn_enum_to_value(asn_enum_value):
# return asn_enum_value.namedValues[asn_enum_value][0]
#
hashtype = asn_hash_info['function'].namedValues[asn_hash_info['function']]
hashval = hex_from_octetstring(asn_hash_info['digest'])
hashes[hashtype] = hashval
# Finally, add all the information gathered about the Root role.
pydict_fileinfos[filename] = {
'version': version,
'length': length,
'hashes': hashes}
pydict_signed['meta'] = pydict_fileinfos
return pydict_signed
``` |
{
"source": "joanaferreira0011/challenge_Coderbyte_QuestionsMarks",
"score": 3
} |
#### File: joanaferreira0011/challenge_Coderbyte_QuestionsMarks/master_code.py
```python
def QuestionsMarks(str):
ultimo = []
i = 0
while i < len(str):
a = str[i]
if isinstance(a, int):
ultimo.append([a, i])
i += 1
else:
i += 1
return ultimo
######transform the list into int and strings.
def is_number(s):
try:
int(s)
return int(s)
except ValueError:
return s
# returns a list with index sum 10
def soma(lista):
ind = []
i = 0
while i < len(lista) - 1:
first = lista[i]
second = lista[i + 1]
if first[0] + second[0] == 10:
ind.append([first[1], second[1]])
i += 1
else:
i += 1
return ind
def questionmark(position, vector):
i = 0
while (i < len(position) and True):
range = position[i]
initial = range[0]
final = range[1]
contador = 0
while initial<final:
if vector[initial] == '?':
contador += 1
initial += 1
else:
initial += 1
if not (contador == 3):
return False
else:
return True
i += 1
if i==len(position):
return True
else:
return False
stringa = tuple(input("Yup?"))
lista = list(stringa)
final = list(map(is_number, lista))
print(questionmark(soma(QuestionsMarks(final)), stringa))
``` |
{
"source": "joanaMCSP/celery-rabbitmq",
"score": 2
} |
#### File: celery_rabbitmq/celery_rabbitmq/models.py
```python
from django.db import models
class Job(models.Model):
TYPES = (
('map_url', 'map_url'),
('merge_maps', 'merge_maps'),
)
STATUSES = (
('pending', 'pending'),
('started', 'started'),
('finished', 'finished'),
('failed', 'failed'),
)
type = models.CharField(choices=TYPES, max_length=20)
status = models.CharField(choices=STATUSES, max_length=20)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
argument = models.TextField()
result = models.TextField(blank=True)
def save(self, *args, **kwargs):
"""Save model and schedule pending state jobs"""
super(Job, self).save(*args, **kwargs)
if self.status == 'pending':
from .tasks import TASK_MAPPING
task = TASK_MAPPING[self.type]
task.delay(job_id=self.id, s=self.argument)
``` |
{
"source": "joanamdsantos/biogasmod",
"score": 3
} |
#### File: biogasmod/biogasmod/genericmodel.py
```python
from .waste import Waste
class Model():
def __init__(self, q, m, l0, k, t0, t, tf):
""" Generic model class for calculating biogas
or methane generation rate over time, which can
be classified into zero-order, first-order,
second-order, with multiphase or single phase.
-----
Atributes :
mass (float) representing the mass of waste in place (m),
unit in mass
methane_pot (float) representing the methane generation
potential (l0), units of volume per mass (m3/Mg)
gas_rate (float) first-order gas production rate constant (k)
in reciprocal time
lag_time (float) time to methane generation start after
disposal (t0) (years)
time_waste (float) age of the waste deposited, unit in
time (t) (years)
time_to_end (float): time to end-point of generation (tf)
data_list (a dataframe of integers and floats) a dataframe
of year and mass deposited extracted from the data file
------
"""
Waste.__init__(self, m, l0, k)
self.methane_gen = q
self.lag_time = t0
self.time_waste = t
self.time_end = tf
``` |
{
"source": "joanamdsantos/IEO-Dashboard",
"score": 2
} |
#### File: IEO-Dashboard/ieoapp/routes.py
```python
from ieoapp import app
import json, plotly
from flask import render_template, request, Response, jsonify
from collections import OrderedDict
from scripts.macro import return_figures
@app.route('/', methods=['POST', 'GET'])
@app.route('/index/', methods=['POST', 'GET'])
def index():
return render_template('index.html')
@app.route('/macro/', methods=['POST', 'GET'])
def macro():
# List of countries for filter
country_codes = OrderedDict([('Non-OECD - Africa', 'AFR'),('Non-OECD - Middle East', 'MID'),
('Non-OECD Americas - Brazil', 'BRZ'), ('Non-OECD Americas - Other', 'CSA'),
('Non-OECD Americas', 'NAM'), ('Non-OECD Asia - China', 'CHI'),
('Non-OECD Asia - India', 'IND'), ('Non-OECD Asia - Other', 'OAS'),
('Non-OECD Asia', 'NAA'), ('Non-OECD Europe and Eurasia - Russia', 'RUS'),
('Non-OECD Europe and Eurasia - Other', 'URA'), ('Non-OECD Europe and Eurasia', 'NUR'),
('Total Non-OECD', 'NNN'),('OECD Americas - Canada', 'CAN'),
('OECD Americas - Mexico and Chile', 'MXC'), ('OECD Americas - United States', 'USA'),
('OECD Americas', 'OAM'), ('OECD Asia - Australia and New Zealand', 'ANZ'),
('OECD Asia - Japan', 'JPN'), ('OECD Asia - South Korea', 'SKO'),
('OECD Europe', 'EUR'), ('Total OECD', 'OOO'), ('Total World', 'WOR')])
# default lists of economic growth and oil price scenarios
scenario_codes = OrderedDict([('High economic growth', 'HIGHMACRO'),('Reference', 'REFERENCE'),
('Low economic growth', 'LOWMACRO'), ('High oil price', 'HIGHOILPRICE'),
('Low oil price', 'LOWOILPRICE')])
# Parse the POST request countries list
if (request.method == 'POST') and request.form:
figures = return_figures(request.form)
countries_selected = []
for country in request.form.lists():
countries_selected.append(country[1][0])
# GET request returns all countries for initial page load
else:
figures = return_figures()
countries_selected = []
for country in country_codes:
countries_selected.append(country[1])
# plot ids for the html id tag
ids = ['figure-{}'.format(i) for i, _ in enumerate(figures)]
# Convert the plotly figures to JSON for javascript in html template
figuresJSON = json.dumps(figures, cls=plotly.utils.PlotlyJSONEncoder)
return render_template('/macro.html', ids=ids,
figuresJSON=figuresJSON,
all_countries=country_codes,
countries_selected=countries_selected)
@app.route('/power/')
def power():
return render_template('power.html')
@app.route('/energy/')
def energy():
return render_template('energy.html')
@app.route('/carbon/')
def carbon():
return render_template('carbon.html')
@app.route('/references/')
def references():
return render_template('references.html')
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html")
@app.errorhandler(500)
def page_not_found(e):
return render_template("500.html")
# TODO: Add another route. You can use any names you want
# Then go into the templates folder and add an html file that matches the file name you put in the render_template method. You can create a new file by going to the + sign at the top of the workspace and clicking on Create New File. Make sure to place the new html file in the templates folder.
if __name__=="__main__":
app.run(debug = True)
``` |
{
"source": "JoanaSantosMartins/snappy",
"score": 2
} |
#### File: JoanaSantosMartins/snappy/test_pipeline.py
```python
from scripts import cleaner
from scripts import spliter
from scripts import align_and_map
from scripts import concatenator
from scripts.tree_maker import *
from scripts.blast_recomb import *
from scripts.summarize_results import *
import os
import subprocess
KEYS = ['1', '2', '3']
IDX = ['test01', 'test02', 'test03']
GR = 'GAG-POL-ENV'
# Test config
def test_config():
with open('config.yaml', 'r') as read_config:
config = read_config.readlines()
assert config[1].split("'")[1] == GR
# Test cleaner
## Create test folder
directory = 'test/testfolder'
if not os.path.exists(directory):
os.makedirs(directory)
## Create test file
open('test/testfile', 'a').close()
## Tests
def test_clean_folders():
cleaner.clean_folders('test/testfolder')
assert os.path.exists('test/testfolder') == False
def test_clean_files():
cleaner.clean_files('test/testfile')
assert os.path.exists('test/testfile') == False
# Test spliter
## Create test folder
directory = 'aligned'
if not os.path.exists(directory):
os.makedirs(directory)
## Tests
def test_spliter():
spliter.spliter('test/test_msa.fasta', KEYS, IDX)
assert sorted(os.listdir('aligned')) == sorted(['1.fasta', '2.fasta', '3.fasta'])
# Test align and map
## Tests
def test_align_and_map():
inputs = os.listdir('aligned')
expected_output = ['aligned_1.fasta', 'aligned_2.fasta', 'aligned_3.fasta']
for file in inputs:
align_and_map.align_and_map(f'aligned/{file}', f'aligned/aligned_{file}', GR)
real_output = [x for x in os.listdir('aligned') if 'aligned' in x]
assert sorted(real_output) == sorted(expected_output)
# Test concatenator
## Tests
def test_concatenator():
inputs = [x for x in sorted(os.listdir('aligned')) if 'aligned' in x]
with open('all_aligned.fasta', 'w') as out_msa:
for file in inputs:
seq = concatenator.concatenator(f'aligned/{file}', KEYS, IDX)
out_msa.write(f'>{seq[0]}\n{seq[1]}\n')
with open('all_aligned.fasta') as this_output:
out = this_output.read()
with open('test/all_aligned.fasta') as expected_output:
e_out = expected_output.read()
assert out == e_out
# Test tree_maker
## Create test folders
directory = 'blast'
if not os.path.exists(directory):
os.makedirs(directory)
directory = 'trees'
if not os.path.exists(directory):
os.makedirs(directory)
## Tests blast_closser
def test_blast_closse():
inputs = [x for x in os.listdir('aligned') if 'aligned' in x]
for file in inputs:
NAME = str(f'aligned/{file}').replace('aligned/aligned_' ,'').replace('.fasta', '')
blast_closser(f'aligned/{file}', NAME)
expected_output = ['blast_1.txt', 'blast_2.txt', 'blast_3.txt']
real_output = [x for x in os.listdir('blast') if 'blast_' in x]
assert sorted(real_output) == sorted(expected_output)
## Tests build_msas
def test_build_msas():
inputs = [x for x in os.listdir('aligned') if 'aligned' in x]
for file in inputs:
NAME = str(f'aligned/{file}').replace('aligned/aligned_' ,'').replace('.fasta', '')
build_msas(f'aligned/{file}', NAME)
expected_output = ['msa_recomb_2.fasta', 'msa_all_2.fasta', 'msa_pure_3.fasta',
'msa_all_3.fasta', 'msa_recomb_3.fasta', 'msa_all_1.fasta', 'msa_pure_1.fasta',
'msa_pure_2.fasta', 'msa_recomb_1.fasta']
real_output = [x for x in os.listdir('trees') if 'msa_' in x]
assert sorted(real_output) == sorted(expected_output)
## Tests tree_maker
def test_tree_maker():
inputs = [x for x in os.listdir('aligned') if 'aligned' in x]
for file in inputs:
NAME = str(f'aligned/{file}').replace('aligned/aligned_' ,'').replace('.fasta', '')
tree_maker(f'aligned/{file}', NAME)
expected_output = ['pure_2.nwk', 'pure_1.nwk', 'all_2.nwk', 'recomb_3.nwk',
'all_1.nwk', 'recomb_2.nwk', 'all_3.nwk', 'pure_3.nwk', 'recomb_1.nwk']
real_output = [x for x in os.listdir('trees') if '.nwk' in x]
assert sorted(real_output) == sorted(expected_output)
# Test blast_recomb
## Tests do_sub_aligns
def test_do_sub_aligns():
inputs = [x for x in os.listdir('aligned') if 'aligned' in x]
for file in inputs:
NAME = str(f'aligned/{file}').replace('aligned/aligned_' ,'').replace('.fasta', '')
do_sub_aligns(f'aligned/{file}', NAME)
expected_output = ['sub_2.fasta', 'sub_3.fasta', 'sub_1.fasta']
real_output = [x for x in os.listdir('blast') if 'sub' in x]
assert sorted(real_output) == sorted(expected_output)
## Tests do_blast_window
def test_do_blast_window():
inputs = [x for x in os.listdir('aligned') if 'aligned' in x]
for file in inputs:
NAME = str(f'aligned/{file}').replace('aligned/aligned_' ,'').replace('.fasta', '')
do_blast_window(f'aligned/{file}', NAME)
expected_output = ['recblast_3.txt', 'recblast_1.txt', 'recblast_2.txt']
real_output = [x for x in os.listdir('blast') if 'rec' in x]
assert sorted(real_output) == sorted(expected_output)
# Test summarize_results
## Tests
def get_clossest_blast():
all_trees_inputs = [x for x in os.listdir('trees') if (x[-4:] == '.nwk') & (x[:4] == 'all_')]
pure_trees_inputs = [x for x in os.listdir('trees') if (x[-4:] == '.nwk') & (x[:5] == 'pure_')]
recomb_trees_inputs = [x for x in os.listdir('trees') if (x[-4:] == '.nwk') & (x[:7] == 'recomb_')]
blast_c = [x for x in os.listdir('blast') if (x[-4:] == '.txt') & (x[:6] == 'blast_')]
blast_inputs = [x for x in os.listdir('blast') if (x[-4:] == '.txt') & (x[:9] == 'recblast_')]
results = {}
for pos, key in enumerate(KEYS):
results[key] = [IDX[pos]]
for blast_res in blast_inputs:
output = process_blast_recomb(blast_res)
results[output[0]] += output[1]
for tree in all_trees_inputs:
output = process_trees(tree)
results[output[0]] += output[1:]
for tree in pure_trees_inputs:
output = process_trees(tree)
results[output[0]] += output[1:]
with open('subtype_results.csv') as this_output:
out = this_output.read()
with open('test/subtype_results.csv') as expected_output:
e_out = expected_output.read()
with open('report_subtype_results.csv') as this_output:
rout = this_output.read()
with open('test/report_subtype_results.csv') as expected_output:
e_rout = expected_output.read()
assert (out == e_out) & (rout == e_rout)
# Test cleaner all
## Tests
def test_delete_all_outputs():
FOLDERS = ['trees','aligned','blast']
FILES = ['subtype_results.csv', 'report_subtype_results.csv.csv',
'all_aligned.fasta']
for folder in FOLDERS:
cleaner.clean_folders(folder)
for file in FILES:
cleaner.clean_files(file)
``` |
{
"source": "JoanAzpeitia/lp_sg",
"score": 2
} |
#### File: lp_sg/config/after_project_create.py
```python
def create(sg, project_id, log, **kwargs):
"""
Insert post-project code here
"""
pass
```
#### File: tk-multi-publish2/photoshopcc.basic/publish_photoshop_document.py
```python
import os
import pprint
import sgtk
HookBaseClass = sgtk.get_hook_baseclass()
class PhotoshopCCDocumentPublishPlugin(HookBaseClass):
"""
Plugin for publishing Photoshop documents in Shotgun.
"""
@property
def icon(self):
"""
Path to an png icon on disk
"""
# look for icon one level up from this hook's folder in "icons" folder
return os.path.join(
self.disk_location,
os.pardir,
"icons",
"publish.png"
)
@property
def name(self):
"""
One line display name describing the plugin
"""
return "Publish to Shotgun"
@property
def description(self):
"""
Verbose, multi-line description of what the plugin does. This can
contain simple html for formatting.
"""
loader_url = "https://support.shotgunsoftware.com/hc/en-us/articles/219033078"
return """
Publishes the file to Shotgun. A <b>Publish</b> entry will be
created in Shotgun which will include a reference to the file's current
path on disk. Other users will be able to access the published file via
the <b><a href='%s'>Loader</a></b> so long as they have access to
the file's location on disk.
If the session has not been saved, validation will fail and a button
will be provided in the logging output to save the file.
<h3>File versioning</h3>
If the filename contains a version number, the process will bump the
file to the next version after publishing.
The <code>version</code> field of the resulting <b>Publish</b> in
Shotgun will also reflect the version number identified in the filename.
The basic worklfow recognizes the following version formats by default:
<ul>
<li><code>filename.v###.ext</code></li>
<li><code>filename_v###.ext</code></li>
<li><code>filename-v###.ext</code></li>
</ul>
After publishing, if a version number is detected in the file, the file
will automatically be saved to the next incremental version number.
For example, <code>filename.v001.ext</code> will be published and copied
to <code>filename.v002.ext</code>
If the next incremental version of the file already exists on disk, the
validation step will produce a warning, and a button will be provided in
the logging output which will allow saving the session to the next
available version number prior to publishing.
<br><br><i>NOTE: any amount of version number padding is supported.</i>
<h3>Overwriting an existing publish</h3>
A file can be published multiple times however only the most recent
publish will be available to other users. Warnings will be provided
during validation if there are previous publishes.
""" % (loader_url,)
# TODO: add link to workflow docs
@property
def settings(self):
"""
Dictionary defining the settings that this plugin expects to receive
through the settings parameter in the accept, validate, publish and
finalize methods.
A dictionary on the following form::
{
"Settings Name": {
"type": "settings_type",
"default": "default_value",
"description": "One line description of the setting"
}
The type string should be one of the data types that toolkit accepts as
part of its environment configuration.
"""
return {
"File Types": {
"type": "list",
"default": "[]",
"description": (
"List of file types to include. Each entry in the list "
"is a list in which the first entry is the Shotgun "
"published file type and subsequent entries are file "
"extensions that should be associated.")
},
}
@property
def item_filters(self):
"""
List of item types that this plugin is interested in.
Only items matching entries in this list will be presented to the
accept() method. Strings can contain glob patters such as *, for example
["maya.*", "file.maya"]
"""
return ["photoshop.document"]
def accept(self, settings, item):
"""
Method called by the publisher to determine if an item is of any
interest to this plugin. Only items matching the filters defined via the
item_filters property will be presented to this method.
A publish task will be generated for each item accepted here. Returns a
dictionary with the following booleans:
- accepted: Indicates if the plugin is interested in this value at
all. Required.
- enabled: If True, the plugin will be enabled in the UI, otherwise
it will be disabled. Optional, True by default.
- visible: If True, the plugin will be visible in the UI, otherwise
it will be hidden. Optional, True by default.
- checked: If True, the plugin will be checked in the UI, otherwise
it will be unchecked. Optional, True by default.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
:returns: dictionary with boolean keys accepted, required and enabled
"""
document = item.properties.get("document")
if not document:
self.logger.warn("Could not determine the document for item")
return {"accepted": False}
path = _document_path(document)
if not path:
# the document has not been saved before (no path determined).
# provide a save button. the document will need to be saved before
# validation will succeed.
self.logger.warn(
"The Photoshop document '%s' has not been saved." %
(document.name,),
extra=self._get_save_as_action(document)
)
self.logger.info(
"Photoshop '%s' plugin accepted document: %s." %
(self.name, document.name)
)
return {
"accepted": True,
"checked": True
}
def validate(self, settings, item):
"""
Validates the given item to check that it is ok to publish.
Returns a boolean to indicate validity.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
:returns: True if item is valid, False otherwise.
"""
publisher = self.parent
document = item.properties["document"]
path = _document_path(document)
if not path:
save_error_message = "The Photoshop document '%s' has not been saved." % (document.name,)
# the document still requires saving. provide a save button.
# validation fails.
self.logger.error(
save_error_message,
extra=self._get_save_as_action(document)
)
raise Exception(save_error_message)
# get the path in a normalized state. no trailing separator,
# separators are appropriate for current os, no double separators,
# etc.
sgtk.util.ShotgunPath.normalize(path)
# get the publish name for this file path. this will ensure we get a
# consistent publish name when looking up existing publishes.
publish_name = publisher.util.get_publish_name(path)
# see if there are any other publishes of this path with a status.
# Note the name, context, and path *must* match the values supplied to
# register_publish in the publish phase in order for this to return an
# accurate list of previous publishes of this file.
publishes = publisher.util.get_conflicting_publishes(
item.context,
path,
publish_name,
filters=["sg_status_list", "is_not", None]
)
if publishes:
conflict_info = (
"If you continue, these conflicting publishes will no longer "
"be available to other users via the loader:<br>"
"<pre>%s</pre>" % (pprint.pformat(publishes),)
)
self.logger.warn(
"Found %s conflicting publishes in Shotgun" %
(len(publishes),),
extra={
"action_show_more_info": {
"label": "Show Conflicts",
"tooltip": "Show the conflicting publishes in Shotgun",
"text": conflict_info
}
}
)
# if the file has a version number in it, see if the next version exists
next_version_path = publisher.util.get_next_version_path(path)
if next_version_path and os.path.exists(next_version_path):
# determine the next available version_number. just keep asking for
# the next one until we get one that doesn't exist.
while os.path.exists(next_version_path):
next_version_path = publisher.util.get_next_version_path(
next_version_path)
# now extract the version number of the next available to display
# to the user
version = publisher.util.get_version_number(next_version_path)
engine = publisher.engine
version_error_message = "The next version of this file already exists on disk."
self.logger.error(
version_error_message,
extra={
"action_button": {
"label": "Save to v%s" % (version,),
"tooltip": "Save to the next available version number, "
"v%s" % (version,),
"callback": lambda: engine.save_to_path(document,
next_version_path)
}
}
)
raise Exception(version_error_message)
self.logger.info("A Publish will be created in Shotgun and linked to:")
self.logger.info(" %s" % (path,))
return True
def publish(self, settings, item):
"""
Executes the publish logic for the given item and settings.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
"""
publisher = self.parent
engine = publisher.engine
document = item.properties["document"]
path = _document_path(document)
# get the path in a normalized state. no trailing separator, separators
# are appropriate for current os, no double separators, etc.
path = sgtk.util.ShotgunPath.normalize(path)
# ensure the document is saved
engine.save(document)
# get the publish name for this file path. this will ensure we get a
# consistent name across version publishes of this file.
publish_name = publisher.util.get_publish_name(path)
# extract the version number for publishing. use 1 if no version in path
# also make sure we are handling None and 0 differently
version_number = publisher.util.get_version_number(path)
if version_number is None:
version_number = 1
path_info = publisher.util.get_file_path_components(path)
extension = path_info["extension"]
publish_type = self._get_publish_type(extension, settings)
# arguments for publish registration
self.logger.info("Registering publish...")
publish_data = {
"tk": publisher.sgtk,
"context": item.context,
"comment": item.description,
"path": path,
"name": publish_name,
"version_number": version_number,
"thumbnail_path": item.get_thumbnail_as_path(),
"published_file_type": publish_type,
"dependency_paths": [] # TODO: dependencies
}
# log the publish data for debugging
self.logger.debug(
"Populated Publish data...",
extra={
"action_show_more_info": {
"label": "Publish Data",
"tooltip": "Show the complete Publish data dictionary",
"text": "<pre>%s</pre>" % (pprint.pformat(publish_data),)
}
}
)
# create the publish and stash it in the item properties for other
# plugins to use.
item.properties["sg_publish_data"] = sgtk.util.register_publish(
**publish_data)
# inject the publish path such that children can refer to it when
# updating dependency information
item.properties["sg_publish_path"] = path
self.logger.info("Publish registered!")
# now that we've published. keep a handle on the path that was published
item.properties["path"] = path
def finalize(self, settings, item):
"""
Execute the finalization pass. This pass executes once all the publish
tasks have completed, and can for example be used to version up files.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
"""
publisher = self.parent
document = item.properties.get("document")
# get the data for the publish that was just created in SG
publish_data = item.properties["sg_publish_data"]
# ensure conflicting publishes have their status cleared
publisher.util.clear_status_for_conflicting_publishes(
item.context, publish_data)
self.logger.info(
"Cleared the status of all previous, conflicting publishes")
path = item.properties["path"]
self.logger.info(
"Publish created for file: %s" % (path,),
extra={
"action_show_in_shotgun": {
"label": "Show Publish",
"tooltip": "Open the Publish in Shotgun.",
"entity": publish_data
}
}
)
# insert the path into the properties
item.properties["next_version_path"] = self._bump_file_version(
document, path)
def _bump_file_version(self, document, path):
"""
Save the supplied path to the next version on disk.
"""
publisher = self.parent
version_number = publisher.util.get_version_number(path)
if version_number is None:
self.logger.debug(
"No version number detected in the publish path. "
"Skipping the bump file version step."
)
return None
self.logger.info("Incrementing session file version number...")
next_version_path = publisher.util.get_next_version_path(path)
# nothing to do if the next version path can't be determined or if it
# already exists.
if not next_version_path:
self.logger.warning("Could not determine the next version path.")
return None
elif os.path.exists(next_version_path):
self.logger.warning(
"The next version of the path already exists",
extra={
"action_show_folder": {
"path": next_version_path
}
}
)
return None
# save the session to the new path
engine = publisher.engine
engine.save_to_path(document, next_version_path)
self.logger.info("Session saved as: %s" % (next_version_path,))
return next_version_path
def _get_publish_type(self, extension, settings):
"""
Get a publish type for the supplied extension and publish settings.
:param extension: The file extension to find a publish type for
:param settings: The publish settings defining the publish types
:return: A publish type or None if one could not be found.
"""
# ensure lowercase and no dot
extension = extension.lstrip(".").lower()
for type_def in settings["File Types"].value:
publish_type = type_def[0]
file_extensions = type_def[1:]
if extension in file_extensions:
# found a matching type in settings. use it!
return publish_type
# no publish type identified. fall back to regular image type
return "Image"
def _get_save_as_action(self, document):
"""
Simple helper for returning a log action dict for saving the session
"""
engine = self.parent.engine
return {
"action_button": {
"label": "Save As...",
"tooltip": "Save the current session",
"callback": lambda: engine.save_as(document)
}
}
def _document_path(document):
"""
Returns the path on disk to the supplied document. May be ``None`` if the
document has not been saved.
"""
try:
path = document.fullName.fsName
except RuntimeError:
path = None
return path
```
#### File: tk_multi_publish2/context_widget/context_widget.py
```python
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .ui.context_editor_widget import Ui_ContextWidget
# framework imports
shotgun_fields = sgtk.platform.import_framework(
"tk-framework-qtwidgets", "shotgun_fields")
shotgun_menus = sgtk.platform.import_framework(
"tk-framework-qtwidgets", "shotgun_menus")
shotgun_globals = sgtk.platform.import_framework(
"tk-framework-shotgunutils", "shotgun_globals")
settings = sgtk.platform.import_framework(
"tk-framework-shotgunutils", "settings")
logger = sgtk.platform.get_logger(__name__)
# fields required to create a context from a task entity without falling back to
# a SG query
TASK_QUERY_FIELDS =[
"type",
"id",
"content",
"project",
"entity",
"step"
]
class ContextWidget(QtGui.QWidget):
"""
Widget which represents the current context and allows the user to search
for a different context via search completer. A menu is also provided for
recent contexts as well as tasks assigned to the user.
Emits a context_changed signal when the user selects a new context
"""
# emitted when a settings button is clicked on a node
context_changed = QtCore.Signal(object)
def __init__(self, parent):
"""
Initialize the widget
:param parent: The model parent.
:type parent: :class:`~PySide.QtGui.QObject`
"""
super(ContextWidget, self).__init__(parent)
self._bundle = sgtk.platform.current_bundle()
project = self._bundle.context.project
# get instance of user settings to save/restore values across sessions
self._settings = settings.UserSettings(self._bundle)
# the key we'll use to store/retrieve recent contexts via user settings
self._settings_recent_contexts_key = "%s_recent_contexts_%s" % (
self._bundle.name, project["id"])
# we will do a bg query that requires an id to catch results
self._schema_query_id = None
# another query to get all tasks assigned to the current user
self._my_tasks_query_id = None
# and a query for related tasks for a given context
self._related_tasks_query_id = None
# keep an in-memory cache of tasks for a given entity to prevent
# unnecessary lookups
self._related_tasks_cache = {}
# keep a handle on the current context
self._context = None
# also keep a handle on the task manager used by completer and for
# querying shotgun in the bg
self._task_manager = None
# menu for recent and user contexts
self._task_menu = shotgun_menus.ShotgunMenu(self)
self._task_menu.setObjectName("context_menu")
self._task_menu.addAction("Loading...")
# keep a handle on all actions created. the my tasks menu will be
# constant, but the recents menu will be dynamic. so we build the menu
# just before it is shown. these lists hold the QActions for each
# group of contexts to show in the menu
self._menu_actions = {
"Related": [],
"My Tasks": [],
"Recent": []
}
# set up the UI
self.ui = Ui_ContextWidget()
self.ui.setupUi(self)
def eventFilter(self, widget, event):
"""
Filter out and handle some key/click events on the search widgets.
"""
key_event = QtCore.QEvent.KeyPress
click_event = QtCore.QEvent.MouseButtonRelease
if widget == self.ui.task_display:
if event.type() == click_event:
# user clicked on the task display, show the search widget
self._manual_task_search_toggle(True)
return True
elif widget == self.ui.task_search:
if event.type() == key_event:
if event.key() == QtCore.Qt.Key_Escape:
# user escaped in the task search, show the display
self._manual_task_search_toggle(False)
return True
elif event.key() in [
QtCore.Qt.Key_Tab,
QtCore.Qt.Key_Return,
QtCore.Qt.Key_Enter,
]:
# user hit tab/enter/return in search. go with the currently
# highlighted item or the first one
result = \
self.ui.task_search.completer().get_current_result() or\
self.ui.task_search.completer().get_first_result()
if result:
self._on_entity_activated(
result["type"],
result["id"],
result["name"],
)
elif widget == self.ui.link_display:
if event.type() == click_event:
# user clicked on the link display, show the search widget
self._manual_link_search_toggle(True)
return True
elif widget == self.ui.link_search:
if event.type() == key_event:
if event.key() == QtCore.Qt.Key_Escape:
# user escaped in the task search, show the display
self._manual_link_search_toggle(False)
return True
elif event.key() in [
QtCore.Qt.Key_Tab,
QtCore.Qt.Key_Return,
QtCore.Qt.Key_Enter,
]:
# user hit tab/enter/return in search. go with the currently
# highlighted item or the first one
result = \
self.ui.link_search.completer().get_current_result() or\
self.ui.link_search.completer().get_first_result()
if result:
self._on_entity_activated(
result["type"],
result["id"],
result["name"],
)
return False
def save_recent_contexts(self):
"""
Should be called by the parent widget, typically when the dialog closes,
to ensure the recent contexts are saved to disk when closing.
"""
# build a list of serialized recent contexts. we grab all the QActions
# from the recents list ad serialize them.
serialized_contexts = []
for recent_action in self._menu_actions["Recent"]:
recent_context = recent_action.data()
# don't include the user credentials in the serialized context as
# it may cause issues with authentication when deserializing
serialized_context = recent_context.serialize(
with_user_credentials=False)
serialized_contexts.append(serialized_context)
logger.debug("Storing serialized 'Recent' contexts.")
# store the recent contexts on disk. the scope is per-project
self._settings.store(
self._settings_recent_contexts_key,
serialized_contexts,
scope=settings.UserSettings.SCOPE_PROJECT
)
def set_context(self, context, task_display_override=None,
link_display_override=None):
"""
Set the context to display in the widget.
The initial display values can be overridden via the task and link
override args.
"""
logger.debug("Setting context to: %s" % (context,))
# clear any related tasks from the previous context
self._menu_actions["Related"] = []
self._context = context
self._show_context(
context,
task_display_override=task_display_override,
link_display_override=link_display_override
)
# ensure the new context is added to the list of recents.
# TODO: we should only update recents after publishing. but until we
# have the ability to easily assign a context to multiple publish items
# simultaneously, this provides a nice mechanism for quick assignment
# in the UI.
if context:
self._add_to_recents(context)
def set_up(self, task_manager):
"""
Handles initial set up of the widget. Includes setting up menu, running
any background set up tasks, etc.
:param task_manager:
:return:
"""
logger.debug("Setting up the UI...")
self._task_manager = task_manager
# attach the context menu
self.ui.task_menu_btn.setMenu(self._task_menu)
self._task_menu.aboutToShow.connect(
self._on_about_to_show_contexts_menu)
# setup the search toggle
self.ui.task_search_btn.toggled.connect(self._on_task_search_toggled)
self.ui.task_search.hide()
# setup the search toggle
self.ui.link_search_btn.toggled.connect(self._on_link_search_toggled)
self.ui.link_search.hide()
# set up the task manager to the task search widget
self.ui.task_search.set_placeholder_text("Search for Tasks...")
self.ui.task_search.set_bg_task_manager(task_manager)
self.ui.task_search.entity_activated.connect(
self._on_entity_activated)
# save as above but for the link search widget
self.ui.link_search.set_placeholder_text("Search for entity link...")
self.ui.link_search.set_bg_task_manager(task_manager)
self.ui.link_search.entity_activated.connect(
self._on_entity_activated
)
# set up event filters for the task/link display labels so that when
# clicked, they go directly to an edit state
self.ui.task_display.installEventFilter(self)
self.ui.link_display.installEventFilter(self)
# setup event filters for the task/link search inputs so that when
# certain keys are pressed, the widget can react to it properly
self.ui.task_search.installEventFilter(self)
self.ui.link_search.installEventFilter(self)
# we need to limit the search completer to entity types that are valid
# for ``PublishedFile.entity`` field links. To do this, query the
# shotgun schema to get a list of these entity types. We use the current
# project schema if we're in a project. We do this as a background query
# via the supplied task manager.
# connect to the task manager signals so that we can get the results
task_manager.task_completed.connect(self._on_task_completed)
task_manager.task_failed.connect(self._on_task_failed)
# Query Shotgun for valid entity types for PublishedFile.entity field
self._schema_query_id = task_manager.add_task(
_query_publishedfile_entity_schema)
# query all my assigned tasks in a bg task
self._my_tasks_query_id = task_manager.add_task(_query_my_tasks)
# get recent contexts from user settings
self._get_recent_contexts()
@property
def context_label(self):
"""
The label for the context widget.
"""
return self.ui.label
def enable_editing(self, enabled, message=None):
"""
Show/hide the input widgets and display a message in the context label.
"""
if enabled:
self.ui.edit_widget.show()
else:
self.ui.edit_widget.hide()
self.context_label.setText(message or "")
def _add_to_recents(self, context):
"""
Adds the supplied context as an action in the list of recents context
actions
"""
# don't add a "project" context to recents. we shouldn't encourage it
if context.project and not context.entity and not context.task:
return
logger.debug("Adding context to 'Recents': %s" % (context,))
recent_actions = self._menu_actions["Recent"]
matching_indexes = []
for i, recent_action in enumerate(recent_actions):
recent_context = recent_action.data()
if recent_context == context:
# contexts support __eq__ so this should be enough for comparing
matching_indexes.append(i)
if matching_indexes:
# context exists in recent list in one or more places. remove the
# QAction(s) and put one of them at the front of the list
recent_action = None
for match_index in matching_indexes:
recent_action = recent_actions.pop(match_index)
else:
# the context does not exist in the recents. add it
recent_action = self._get_qaction_for_context(context)
if recent_action:
recent_actions.insert(0, recent_action)
# only keep the 5 most recent
self._menu_actions["Recent"] = recent_actions[:5]
def _build_actions(self, tasks, group_name, sort=False,
exclude_current_context=False):
"""
Build a list of actions from the supplied tasks. The actions are stored
in the instance's _menu_actions dictionary and used to build the menu.
The actions will be sorted by name if ``sort`` is set to True. If the
``exclude_current_context`` is supplied, the widget's current context
will not be included in the list of actions.
"""
publisher = sgtk.platform.current_bundle()
if not tasks:
logger.debug("No tasks supplied for group: %s" % (group_name,))
return
logger.debug("Building actions for group: %s" % (group_name,))
task_actions = []
for task in tasks:
task_context = publisher.sgtk.context_from_entity_dictionary(task)
# the context from dict method clears all unnecessary fields
# from the task upon creation. now that we have the context,
# update the fields with the queried task fields
task_context.task.update(task)
# don't include the current context in this list of actions
if (self._context and
exclude_current_context and
task_context == self._context):
continue
# build the action and add it to the list
task_action = self._get_qaction_for_context(task_context)
task_actions.append(task_action)
# sort on the action text if requested
if sort:
task_actions.sort(key=lambda a: a.text())
# store the actions list for use when building the menu
self._menu_actions[group_name] = task_actions
def _get_qaction_for_context(self, context):
"""
Helper method to build a QAction for the supplied context.
"""
# get the display string and icon path for the context
context_display = _get_context_display(context, plain_text=True)
icon_path = _get_context_icon_path(context)
# construct the action
action = QtGui.QAction(self)
action.setText(context_display)
action.setIcon(QtGui.QIcon(icon_path))
action.setData(context)
action.triggered.connect(
lambda c=context: self._on_context_activated(c))
return action
def _get_recent_contexts(self):
"""
Pull the stored, serialized contexts from user settings and populate the
Recent actions list for use when building the contexts menu.
"""
logger.debug("Retrieving stored 'Recent' actions from disk...")
# get the serialized contexts from disk
serialized_recent_contexts = self._settings.retrieve(
self._settings_recent_contexts_key,
default=[],
scope=settings.UserSettings.SCOPE_PROJECT
)
# turn these into QActions to add to the list of recents in the menu
for serialized_context in serialized_recent_contexts:
try:
context = sgtk.Context.deserialize(serialized_context)
except Exception, e:
logger.debug("Unable to deserialize stored context.")
else:
recent_action = self._get_qaction_for_context(context)
self._menu_actions["Recent"].append(recent_action)
def _manual_task_search_toggle(self, checked):
"""
Small wrapper to manual toggle the task searching on/off
"""
self.ui.task_search_btn.setChecked(checked)
self.ui.task_search_btn.setDown(checked)
def _manual_link_search_toggle(self, checked):
"""
Small wrapper to manual toggle the link searching on/off
"""
self.ui.link_search_btn.setChecked(checked)
self.ui.link_search_btn.setDown(checked)
def _on_about_to_show_contexts_menu(self):
"""
Slot called just before the contexts menu is shown. It handles
organizing the actions into menus.
"""
# clear and rebuild the menu since the recents/related sections are
# dynamic.
self._task_menu.clear()
publisher = sgtk.platform.current_bundle()
project = publisher.context.project
# ---- build the "Related" menu
related_actions = self._menu_actions["Related"]
if related_actions:
self._task_menu.add_group(related_actions, "Related")
# ---- build the "My Tasks" menu
# TODO: here we're organizing the tasks by status. since these contexts
# are status for a publish session, we could (perhaps should) organize
# them once (elsewhere) and simply construct the menus here. For now,
# this simplifies the logic since `self._menu_actions` is just a
# dictionary of flat lists of QActions.
my_tasks_actions = self._menu_actions["My Tasks"]
if my_tasks_actions:
status_groups = {}
# organize the tasks by status
for task_action in my_tasks_actions:
context = task_action.data()
task = context.task
status_code = task.get("sg_status_list", "ip")
status_groups.setdefault(status_code, [])
status_groups[status_code].append(task_action)
# special case the "ip" tasks and show them at the top level
ip_tasks = status_groups.get("ip", [])
top_level_my_tasks_actions = ip_tasks
# create submenus for everything else
for status_code in status_groups.keys():
if status_code == "ip":
# skipping special cased "in progress" tasks
continue
# get the display name for the status code
status_display = shotgun_globals.get_status_display_name(
status_code,
project.get("id")
)
# get the actions for this code
status_actions = status_groups[status_code]
# build the submenu for this status
status_menu = shotgun_menus.ShotgunMenu(self)
status_menu.setTitle(status_display)
status_menu.add_group(status_actions, status_display)
# add the submenu to the top level my tasks menu
top_level_my_tasks_actions.append(status_menu)
self._task_menu.add_group(top_level_my_tasks_actions, "My Tasks")
# ---- build the "Recent" menu
recent_actions = self._menu_actions["Recent"]
if recent_actions:
self._task_menu.add_group(recent_actions, "Recent")
# if there are no menu items, show a message
if not self._task_menu.actions():
self._task_menu.addAction("No Tasks to show")
def _on_context_activated(self, context):
"""
Called when a new context is set via the menu or one of the completers.
"""
logger.debug("Context changed to: %s" % (context,))
# update the widget to display the new context and alert listeners that
# a new context was selected
self._show_context(context)
self.context_changed.emit(context)
def _on_entity_activated(self, entity_type, entity_id, entity_name):
"""
Slot called when an entity is selected via one of the search completers.
"""
publisher = sgtk.platform.current_bundle()
context = publisher.sgtk.context_from_entity(entity_type, entity_id)
self._on_context_activated(context)
def _on_task_search_toggled(self, checked):
"""
Slot called when the user clicks the task display or the task search
button.
If checked, hides the task display label and shows the search completer.
Also populates the completer with context info to help the user.
If not checked, hides the search info and shows the task display widget.
"""
if checked:
# hide the display, show the search
self.ui.task_display.hide()
self.ui.task_menu_btn.hide()
self.ui.task_search.show()
self.ui.task_search.setFocus()
# populate and show the completer
if self._context:
search_str = ""
if self._context.entity:
search_str = self._context.entity["name"]
if self._context.task:
search_str = "%s %s " % (
search_str, self._context.task["name"])
self.ui.task_search.setText(search_str)
self.ui.task_search.completer().search(search_str)
self.ui.task_search.completer().complete()
else:
# hide the search, show the display
self.ui.task_display.show()
self.ui.task_menu_btn.show()
self.ui.task_search.hide()
def _on_link_search_toggled(self, checked):
"""
Slot called when the user clicks the link display or the link search
button.
If checked, hides the link display label and shows the search completer.
Also populates the completer with context info to help the user.
If not checked, hides the search info and shows the link display widget.
"""
if checked:
# hide the display, show the search
self.ui.link_display.hide()
self.ui.link_search.show()
self.ui.link_search.setFocus()
# populate and show the completer
if self._context:
search_str = ""
if self._context.entity:
search_str = self._context.entity["name"]
if search_str:
self.ui.link_search.setText(search_str)
self.ui.link_search.completer().search(search_str)
self.ui.link_search.completer().complete()
else:
# hide the search, show the display
self.ui.link_search.hide()
self.ui.link_display.show()
def _on_task_completed(self, task_id, group, result):
"""
Slot called when a background task completes. Displatches methods to
handle the results depending on which task was completed.
"""
# queried valid entity types for PublishedFile.entity field
if task_id == self._schema_query_id:
logger.debug("Completed query of PublishedFile.entity schema")
self._restrict_searchable_entity_types(result)
# queried the current user's tasks
elif task_id == self._my_tasks_query_id:
logger.debug("Completed query for current user tasks.")
self._build_actions(result, "My Tasks")
# queried tasks related to the currently selected link
elif task_id == self._related_tasks_query_id:
logger.debug("Completed query for the current user's Tasks.")
self._build_actions(
result,
"Related",
sort=True,
exclude_current_context=True
)
def _on_task_failed(self, task_id, group, message, traceback_str):
"""
If the schema query fails, add a log warning. It's not catastrophic, but
it shouldn't fail, so we need to make a record of it.
"""
# failed to query valid entity types for PublishedFile.entity field
if task_id == self._schema_query_id:
logger.warn(
"Unable to query valid entity types for PublishedFile.entity."
"Error Message: %s.\n%s" % (message, traceback_str)
)
# failed to query the current user's tasks
elif task_id == self._my_tasks_query_id:
logger.warn(
"Unable to query tasks for the current Shotgun user."
"Error Message: %s.\n%s" % (message, traceback_str)
)
# failed to query tasks related to the currently selected link
elif task_id == self._related_tasks_query_id:
logger.warn(
"Unable to related tasks for the selected entity link."
"Error Message: %s.\n%s" % (message, traceback_str)
)
def _query_related_tasks(self, context):
"""
Method called via background task to query tasks related to the current
context's entity.
"""
if not context.entity:
return []
logger.debug("Querying related tasks for context: %s" % (context,))
# unique id for entity to use as local cache lookup
entity_id = "%s_%s" % (
context.entity["type"],
context.entity["id"],
)
# if we've queried tasks for this entity before, just return those
if entity_id in self._related_tasks_cache:
return self._related_tasks_cache[entity_id]
publisher = sgtk.platform.current_bundle()
# query the tasks for the entity
tasks = publisher.shotgun.find(
"Task",
[["entity", "is", context.entity]],
# query all fields required to create a context from a task entity
# dictionary. see sgtk api `context_from_entity_dictionary`
fields=TASK_QUERY_FIELDS
)
# cache the tasks
self._related_tasks_cache[entity_id] = tasks
return tasks
def _restrict_searchable_entity_types(self, published_file_entity_schema):
"""
Called after successful lookup of valid PublishedFile.entity types.
The supplied field schema contains the valid entity names. Use these to
restrict the search completers.
"""
# drill down into the schema to retrieve the valid types for the
# field. this is ugly, but will ensure we get a list no matter what
entity_types = published_file_entity_schema. \
get("entity", {}). \
get("properties", {}). \
get("valid_types", {}). \
get("value", [])
# always include Project and Tasks
entity_types.append("Project")
logger.debug(
"Limiting context link completer to these entities: %s" %
(entity_types,)
)
# construct a dictionary that the search widget expects for
# filtering. This is a dictionary with the entity types as keys and
# values a list of search filters. We don't have any filters, so we
# just use empty list.
entity_types_dict = dict((k, []) for k in entity_types)
logger.debug(
"Setting searchable entity types to: %s" % (entity_types_dict,))
# update the types for the link completer
self.ui.link_search.set_searchable_entity_types(
entity_types_dict)
# limit the task search to tasks only.
# TODO: limit to tasks linked to entities of the types queried above
task_types_dict = {"Task": []}
# now update the types for the task completer
self.ui.task_search.set_searchable_entity_types(task_types_dict)
def _show_context(self, context, task_display_override=None,
link_display_override=None):
"""
Show the supplied context in the UI.
"""
if task_display_override:
task_display = task_display_override
else:
task_display = _get_task_display(context)
if link_display_override:
link_display = link_display_override
else:
link_display = _get_link_display(context)
# update the task display/state
self.ui.task_display.setText(task_display)
self.ui.task_search_btn.setChecked(False)
self.ui.task_search_btn.setDown(False)
# update the link display/state
self.ui.link_display.setText(link_display)
self.ui.link_search_btn.setChecked(False)
self.ui.link_search_btn.setDown(False)
if context:
# given the context, populate any related tasks for the menu
self._related_tasks_query_id = self._task_manager.add_task(
self._query_related_tasks,
task_args=[context]
)
def _get_task_display(context, plain_text=False):
"""
Build a display string for the task of the supplied context.
By default, return rich text with an entity icon. If ``plain_text`` is True,
simply return the name of the task.
"""
if not context or not context.task:
return ""
task_name = context.task["name"]
if plain_text:
# just the name
display_name = task_name
else:
# return the name with the appropriate icon in front
task_type = context.task["type"]
task_icon = "<img src='%s'>" % (
shotgun_globals.get_entity_type_icon_url(task_type),)
display_name = "%s %s" % (task_icon, task_name)
return display_name
def _get_link_display(context, plain_text=False):
"""
Build a display string for the link of the supplied context.
By default, return rich text with an entity icon. If ``plain_text`` is True,
simply return the name of the link.
"""
if not context:
return ""
entity = context.entity or context.project or None
if not entity:
return ""
entity_name = entity["name"]
if plain_text:
# just the name
display_name = entity_name
else:
# return the name with the appropriate icon in front
entity_type = entity["type"]
entity_icon = "<img src='%s'>" % (
shotgun_globals.get_entity_type_icon_url(entity_type),)
display_name = "%s %s" % (entity_icon, entity_name)
return display_name
def _get_context_display(context, plain_text=False):
"""
Return the full display string for the supplied context.
By default, return rich text with entity icons. If ``plain_text`` is True,
simply return the display text for link > task.
"""
# individual display of task/link
task_display = _get_task_display(context, plain_text=plain_text)
link_display = _get_link_display(context, plain_text=plain_text)
# always show link (entity)
display_name = link_display
# include task if there is one
if task_display:
if plain_text:
display_name = "%s > %s" % (display_name, task_display)
else:
display_name = """
%s <b><code>></code></b> %s
""" % (link_display, task_display)
return display_name
def _get_context_icon_path(context):
"""
Get the most appropriate icon for a given context.
"""
# We use the context's entity icon primarily since the task icon is a
# checkmark and looks wonky in menus (where this is primarily called from).
if context.entity:
entity_type = context.entity["type"]
return shotgun_globals.get_entity_type_icon_url(entity_type)
elif context.task:
return shotgun_globals.get_entity_type_icon_url("Task")
elif context.project:
return shotgun_globals.get_entity_type_icon_url("Project")
else:
return ""
def _query_my_tasks():
"""
Called via bg task to query SG for tasks assigned to the current user.
"""
publisher = sgtk.platform.current_bundle()
project = publisher.context.project
current_user = publisher.context.user
logger.debug("Querying tasks for the curren user: %s" % (current_user,))
filters = [
["project", "is", project],
["task_assignees", "is", current_user],
]
order = [
{"field_name": "entity", "direction": "asc"},
{"field_name": "content", "direction": "asc"}
]
# query all fields required to create a context from a task entity
# dictionary. see sgtk api `context_from_entity_dictionary`
task_fields = TASK_QUERY_FIELDS
task_fields.extend(["sg_status_list"])
return publisher.shotgun.find(
"Task",
filters,
fields=task_fields,
order=order
)
def _query_publishedfile_entity_schema():
"""
Called as bg task to query SG for the field schema for PublishedFile.entity
"""
logger.debug("Querying PublishedFile.entity schema...")
publisher = sgtk.platform.current_bundle()
project = publisher.context.project
return publisher.shotgun.schema_field_read(
"PublishedFile",
field_name="entity",
project_entity=project
)
```
#### File: python/tk_multi_publish2/__init__.py
```python
from sgtk.platform.qt import QtCore, QtGui
import util
def show_dialog(app):
"""
Show the main dialog ui
:param app: The parent App
"""
# defer imports so that the app works gracefully in batch modes
from .dialog import AppDialog
# start ui
app.engine.show_dialog("Shotgun Publish", app, AppDialog)
```
#### File: tk_multi_publish2/progress/progress_details_widget.py
```python
import os
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .ui.progress_details_widget import Ui_ProgressDetailsWidget
logger = sgtk.platform.get_logger(__name__)
class ProgressDetailsWidget(QtGui.QWidget):
"""
Progress reporting and logging
"""
copy_to_clipboard_clicked = QtCore.Signal()
def __init__(self, progress_widget, parent):
"""
:param parent: The model parent.
:type parent: :class:`~PySide.QtGui.QObject`
"""
super(ProgressDetailsWidget, self).__init__(parent)
self._bundle = sgtk.platform.current_bundle()
# set up the UI
self.ui = Ui_ProgressDetailsWidget()
self.ui.setupUi(self)
self._progress_widget = progress_widget
# hook up a listener to the parent window so this widget
# follows along when the parent window changes size
filter = ResizeEventFilter(parent)
filter.resized.connect(self._on_parent_resized)
parent.installEventFilter(filter)
# dispatch clipboard signal
self.ui.copy_log_button.clicked.connect(self.copy_to_clipboard_clicked.emit)
self.ui.close.clicked.connect(self.toggle)
# make sure the first column takes up as much space as poss.
if self._bundle.engine.has_qt5:
# see http://doc.qt.io/qt-5/qheaderview-obsolete.html#setResizeMode
self.ui.log_tree.header().setSectionResizeMode(0, QtGui.QHeaderView.Stretch)
else:
self.ui.log_tree.header().setResizeMode(0, QtGui.QHeaderView.Stretch)
self.ui.log_tree.setIndentation(8)
self.hide()
def toggle(self):
"""
Toggles visibility on and off
"""
if self.isVisible():
self.hide()
else:
self.show()
def show(self):
super(ProgressDetailsWidget, self).show()
self.__recompute_position()
self.ui.log_tree.expandAll()
@property
def log_tree(self):
"""
The tree widget which holds the log items
"""
return self.ui.log_tree
def __recompute_position(self):
"""
Adjust geometry of the widget based on progress widget
"""
pos = self._progress_widget.pos()
self.setGeometry(QtCore.QRect(
pos.x(),
5,
self._progress_widget.width(),
pos.y() - 5
))
def _on_parent_resized(self):
"""
Special slot hooked up to the event filter.
When associated widget is resized this slot is being called.
"""
self.__recompute_position()
class ResizeEventFilter(QtCore.QObject):
"""
Utility and helper.
Event filter which emits a resized signal whenever
the monitored widget resizes.
You use it like this:
# create the filter object. Typically, it's
# it's easiest to parent it to the object that is
# being monitored (in this case self.ui.thumbnail)
filter = ResizeEventFilter(self.ui.thumbnail)
# now set up a signal/slot connection so that the
# __on_thumb_resized slot gets called every time
# the widget is resized
filter.resized.connect(self.__on_thumb_resized)
# finally, install the event filter into the QT
# event system
self.ui.thumbnail.installEventFilter(filter)
"""
resized = QtCore.Signal()
def eventFilter(self, obj, event):
"""
Event filter implementation.
For information, see the QT docs:
http://doc.qt.io/qt-4.8/qobject.html#eventFilter
This will emit the resized signal (in this class)
whenever the linked up object is being resized.
:param obj: The object that is being watched for events
:param event: Event object that the object has emitted
:returns: Always returns False to indicate that no events
should ever be discarded by the filter.
"""
# peek at the message
if event.type() == QtCore.QEvent.Resize:
# re-broadcast any resize events
self.resized.emit()
# pass it on!
return False
```
#### File: tk_multi_publish2/progress/publish_logging.py
```python
import sgtk
import logging
logger = sgtk.platform.get_logger(__name__)
class PublishLogHandler(logging.Handler):
"""
Publish Log handler that links up a handler to a
qt tree for display.
"""
def __init__(self, progress_widget):
"""
:param tree_widget: QTreeWidget to use for logging
"""
# avoiding super in order to be py25-compatible
logging.Handler.__init__(self)
self._progress_widget = progress_widget
def emit(self, record):
"""
Emit a log message back to the engine logging callback.
:param record: std log record to handle logging for
"""
# look for actions attached to the record
if hasattr(record, "action_button"):
# generic button
action = record.action_button
action["type"] = "button"
elif hasattr(record, "action_show_folder"):
# show folder in file browser
action = record.action_show_folder
action["type"] = "show_folder"
elif hasattr(record, "action_show_in_shotgun"):
# show entity in shotgun
action = record.action_show_in_shotgun
action["type"] = "show_in_shotgun"
elif hasattr(record, "action_show_more_info"):
# show additional supplied data in a popup
action = record.action_show_more_info
action["type"] = "show_more_info"
elif hasattr(record, "action_open_url"):
# open a url in a browser
action = record.action_open_url
action["type"] = "open_url"
else:
action = None
# for simplicity, add a 'basename' property to the record to
# only contain the leaf part of the logging name
# sgtk.env.asset.tk-maya -> tk-maya
# sgtk.env.asset.tk-maya.tk-multi-publish -> tk-multi-publish
record.basename = record.name.rsplit(".", 1)[-1]
if record.levelno < logging.ERROR and record.levelno > logging.INFO:
status = self._progress_widget.WARNING
elif record.levelno > logging.WARNING:
status = self._progress_widget.ERROR
elif record.levelno < logging.INFO:
status = self._progress_widget.DEBUG
else:
status = self._progress_widget.INFO
# request that the log manager processes the message
self._progress_widget.process_log_message(record.msg, status, action)
class PublishLogWrapper(object):
"""
Convenience object that wraps around a logger and a handler
that can be used for publishing.
"""
def __init__(self, progress_widget):
"""
:param tree_widget: QTreeWidget to use for logging
"""
self._bundle = sgtk.platform.current_bundle()
# set up a logger
full_log_path = "%s.hook" % self._bundle.logger.name
self._logger = logging.getLogger(full_log_path)
# seal the logger - this will prevent any log messages
# emitted by the publish hooks to propagate up
# in the hierarchy and be picked up by engine loggers.
# The reason we are doing this is because it may seem odd
# to get plugin info in for example the maya console.
# more importantly, it will appear in the shotgun engine
# in a dialog window after app exit which is non-ideal.
self._logger.propagate = False
self._handler = PublishLogHandler(progress_widget)
# and handle it in the UI
self._logger.addHandler(self._handler)
logger.debug("Installed log handler for publishing @ %s" % full_log_path)
# log level follows the global settings
if sgtk.LogManager().global_debug:
self._handler.setLevel(logging.DEBUG)
else:
self._handler.setLevel(logging.INFO)
formatter = logging.Formatter(
"[%(levelname)s %(basename)s] %(message)s"
)
self._handler.setFormatter(formatter)
def shut_down(self):
"""
Deallocate logging
"""
self._logger.removeHandler(self._handler)
@property
def logger(self):
"""
The associated logger
"""
return self._logger
```
#### File: python/tk_multi_publish2/publish_description_edit.py
```python
import sgtk
from sgtk.platform.qt import QtCore, QtGui
logger = sgtk.platform.get_logger(__name__)
class PublishDescriptionEdit(QtGui.QPlainTextEdit):
"""
Widget that holds the summary description
"""
def __init__(self, parent):
"""
Constructor
:param parent: QT parent object
"""
QtGui.QPlainTextEdit.__init__(self, parent)
self._show_placeholder = False
# this is the placeholder text to be displayed in the bottom right corner of the widget. The spaces afterwards were added so that the
# placeholder text won't be hidden behind the scroll bar that is automatically added when the text is too long
self._placeholder_text = "<multiple values>"
def paintEvent(self, paint_event):
"""
Paints the line plain text editor and adds a placeholder on bottom right corner when multiple values are detected.
"""
# If the box does not have focus, draw <multiple values> placeholder when self._show_placeholder is true, even if the widget has text
if not self.hasFocus() and self._show_placeholder == True:
p = QtGui.QPainter(self.viewport())
# right placeholder note in blue
col = QtGui.QColor(24,167,227) # blue
p.setPen(QtGui.QPen(col))
p.setBrush(QtGui.QBrush(col))
p.drawText(self.rect(),QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft, self._placeholder_text)
else:
QtGui.QPlainTextEdit.paintEvent(self, paint_event)
```
#### File: python/tk_multi_publish2/settings_widget.py
```python
import sgtk
from sgtk.platform.qt import QtCore, QtGui
# import the shotgun_model and view modules from the shotgun utils framework
shotgun_model = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_model")
shotgun_view = sgtk.platform.import_framework("tk-framework-qtwidgets", "views")
shotgun_globals = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_globals")
from .ui.settings_widget import Ui_SettingsWidget
class FieldNameLabel(QtGui.QLabel):
"""
Wrapper class so that we can style based on class
"""
pass
class FieldValueLabel(QtGui.QLabel):
"""
Wrapper class so that we can style based on class
"""
pass
class SettingsWidget(QtGui.QWidget):
"""
Widget that shows shotgun data in a name-value pair, top down fashion:
Status: In Progress
Description: Foo Bar
Created By: <NAME>
The widget is constructing the contents of this widget using QLabels
which will contain clickable hyperlink fields to linked entities.
"""
link_activated = QtCore.Signal(str)
def __init__(self, parent):
"""
Constructor
:param parent: QT parent object
"""
QtGui.QWidget.__init__(self, parent)
self._app = sgtk.platform.current_bundle()
# now load in the UI that was created in the UI designer
self.ui = Ui_SettingsWidget()
self.ui.setupUi(self)
self._widgets = []
def clear(self):
"""
Clear all items in the widget
"""
self._app.log_debug("Clearing UI...")
# before we begin widget operations, turn off visibility
# of the whole widget in order to avoid recomputes
self.setVisible(False)
try:
for x in self._widgets:
# remove widget from layout:
self.ui.settings_layout.removeWidget(x)
# set it's parent to None so that it is removed from the widget hierarchy
x.setParent(None)
# mark it to be deleted when event processing returns to the main loop
x.deleteLater()
self._widgets = []
finally:
# make the window visible again and trigger a redraw
self.setVisible(True)
def set_data(self, settings):
"""
Clear any existing data in the widget and populate it with new data
:param settings: Shotgun data dictionary
"""
# first clear existing stuff
self.clear()
if len(settings) == 0:
# an empty dictionary indicates no data available.
return
self.setVisible(False)
try:
# now create new items - order alphabetically
curr_row = 0
for setting in settings:
field_label = FieldNameLabel(self)
field_label.setText(setting.name)
field_label.setToolTip(setting.description)
field_label.setWordWrap(True)
field_label.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
value_label = FieldValueLabel(self)
value_label.setText(setting.string_value)
value_label.setWordWrap(True)
value_label.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.ui.settings_layout.addWidget(field_label, curr_row, 0)
self.ui.settings_layout.addWidget(value_label, curr_row, 1)
self._widgets.append(value_label)
self._widgets.append(field_label)
curr_row += 1
# let the value column be the expanding one
self.ui.settings_layout.setColumnStretch(1, 1)
# and push all rows together
self.ui.settings_layout.setRowStretch(curr_row, 1)
finally:
# make the window visible again and trigger a redraw
self.setVisible(True)
def set_static_data(self, settings):
"""
Clear any existing data in the widget and populate it with new data
:param settings: Shotgun data dictionary
"""
# first clear existing stuff
self.clear()
if len(settings) == 0:
# an empty dictionary indicates no data available.
return
self.setVisible(False)
try:
# now create new items - order alphabetically
curr_row = 0
for (name, value) in settings:
field_label = FieldNameLabel(self)
field_label.setText(name)
field_label.setWordWrap(True)
field_label.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
value_label = FieldValueLabel(self)
value_label.setText(str(value))
value_label.setWordWrap(True)
value_label.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
self.ui.settings_layout.addWidget(field_label, curr_row, 0)
self.ui.settings_layout.addWidget(value_label, curr_row, 1)
self._widgets.append(value_label)
self._widgets.append(field_label)
curr_row += 1
# let the value column be the expanding one
self.ui.settings_layout.setColumnStretch(1, 1)
# and push all rows together
self.ui.settings_layout.setRowStretch(curr_row, 1)
finally:
# make the window visible again and trigger a redraw
self.setVisible(True)
```
#### File: python/tk_multi_publish2/util.py
```python
import os
import pprint
import sgtk
# create a logger to use throughout
logger = sgtk.platform.get_logger(__name__)
# ---- file/path util functions
def get_version_path(path, version):
"""
Given a path without a version number, return the path with the supplied
version number.
If a version number is detected in the supplied path, the path will be
returned as-is.
:param path: The path to inject a version number.
:param version: The version number to inject.
:return: The modified path with the supplied version number inserted.
"""
# the logic for this method lives in a hook that can be overridden by
# clients. exposing the method here in the publish utils api prevents
# clients from having to call other hooks directly in their
# collector/publisher hook implementations.
publisher = sgtk.platform.current_bundle()
return publisher.execute_hook_method(
"path_info",
"get_version_path",
path=path,
version=version
)
def get_next_version_path(path):
"""
Given a file path, return a path to the next version.
This is typically used by auto-versioning logic in plugins that need to
save the current work file to the next version number.
If no version can be identified in the supplied path, ``None`` will be
returned, indicating that the next version path can't be determined.
:param path: The path to a file, likely one to be published.
:return: The path to the next version of the supplied path.
"""
# the logic for this method lives in a hook that can be overridden by
# clients. exposing the method here in the publish utils api prevents
# clients from having to call other hooks directly in their
# collector/publisher hook implementations.
publisher = sgtk.platform.current_bundle()
return publisher.execute_hook_method(
"path_info",
"get_next_version_path",
path=path
)
def get_file_path_components(path):
"""
Convenience method for determining file components for a given path.
:param str path: The path to the file to componentize.
Returns file path components in the form::
Examples::
# path="/path/to/the/file/my_file.v001.ext"
{
"path": "/path/to/the/file/my_file.v001.ext",
"folder": "/path/to/the/file" ,
"filename": "my_file.v001.ext",
"extension": "ext",
}
# path="/path/to/the/folder"
{
"path": "/path/to/the/folder",
"folder": "/path/to/the" ,
"filename": "folder",
"extension": None,
}
"""
# get the path in a normalized state. no trailing separator, separators are
# appropriate for current os, no double separators, etc.
path = sgtk.util.ShotgunPath.normalize(path)
logger.debug("Getting file path components for path: '%s'..." % (path,))
# break it up into the major components
(folder, filename) = os.path.split(path)
if os.path.isdir(path):
# folder
extension = None
else:
# file. extract the extension and remove the "."
(_, extension) = os.path.splitext(filename)
if extension:
extension = extension.lstrip(".")
else:
# prevent extension = ""
extension = None
file_info = dict(
path=path,
folder=folder,
filename=filename,
extension=extension,
)
logger.debug(
"Extracted components from path '%s': %s" %
(path, file_info)
)
return file_info
def get_frame_sequence_path(path, frame_spec=None):
"""
Given a path with a frame number, return the sequence path where the frame
number is replaced with a given frame specification such as ``{FRAME}`` or
``%04d`` or ``$F``.
:param path: The input path with a frame number
:param frame_spec: The frame specification to replace the frame number with.
:return: The full frame sequence path
"""
# the logic for this method lives in a hook that can be overridden by
# clients. exposing the method here in the publish utils api prevents
# clients from having to call other hooks directly in their
# collector/publisher hook implementations.
publisher = sgtk.platform.current_bundle()
return publisher.execute_hook_method(
"path_info",
"get_frame_sequence_path",
path=path,
frame_spec=frame_spec
)
def get_frame_sequences(folder, extensions=None, frame_spec=None):
"""
Given a folder, inspect the contained files to find what appear to be
files with frame numbers.
:param folder: The path to a folder potentially containing a sequence of
files.
:param extensions: A list of file extensions to retrieve paths for.
If not supplied, the extension will be ignored.
:param frame_spec: A string to use to represent the frame number in the
return sequence path.
:return: A list of tuples for each identified frame sequence. The first
item in the tuple is a sequence path with the frame number replaced
with the supplied frame specification. If no frame spec is supplied,
a python string format spec will be returned with the padding found
in the file.
Example::
get_frame_sequences(
"/path/to/the/folder",
["exr", "jpg"],
frame_spec="{FRAME}"
)
[
(
"/path/to/the/supplied/folder/key_light1.{FRAME}.exr",
[<frame_1_path>, <frame_2_path>, ...]
),
(
"/path/to/the/supplied/folder/fill_light1.{FRAME}.jpg",
[<frame_1_path>, <frame_2_path>, ...]
)
]
"""
# the logic for this method lives in a hook that can be overridden by
# clients. exposing the method here in the publish utils api prevents
# clients from having to call other hooks directly in their
# collector/publisher hook implementations.
publisher = sgtk.platform.current_bundle()
return publisher.execute_hook_method(
"path_info",
"get_frame_sequences",
folder=folder,
extensions=extensions,
frame_spec=frame_spec
)
def get_publish_name(path, sequence=False):
"""
Given a file path, return the display name to use for publishing.
Typically, this is a name where the path and any version number are removed
in order to keep the publish name consistent as subsequent versions are
published.
Example::
in: /path/to/the/file/my_file.v001.jpg
out: my_file.jpg
:param path: The path to a file, likely one to be published.
:param sequence: If True, treat the path as a sequence name and replace
the frame number with placeholder
:return: A publish display name for the provided path.
"""
# the logic for this method lives in a hook that can be overridden by
# clients. exposing the method here in the publish utils api prevents
# clients from having to call other hooks directly in their
# collector/publisher hook implementations.
publisher = sgtk.platform.current_bundle()
return publisher.execute_hook_method(
"path_info",
"get_publish_name",
path=path,
sequence=sequence
)
def get_version_number(path):
"""
Extract a version number from the supplied path.
This is used by plugins that need to know what version number to associate
with the file when publishing.
Example::
in: /path/to/the/file/my_file.v001.jpg
out: 1
:param path: The path to a file, likely one to be published.
:return: An integer representing the version number in the supplied path.
If no version found, ``None`` will be returned.
"""
# the logic for this method lives in a hook that can be overridden by
# clients. exposing the method here in the publish utils api prevents
# clients from having to call other hooks directly in their
# collector/publisher hook implementations.
publisher = sgtk.platform.current_bundle()
return publisher.execute_hook_method(
"path_info",
"get_version_number",
path=path
)
# ---- publish util functions
def get_conflicting_publishes(context, path, publish_name, filters=None):
"""
Returns a list of SG published file dicts for any existing publishes that
match the supplied context, path, and publish_name.
:param context: The context to search publishes for
:param path: The path to match against previous publishes
:param publish_name: The name of the publish.
:param filters: A list of additional SG find() filters to apply to the
publish search.
:return: A list of ``dict``s representing existing publishes that match
the supplied arguments. The paths returned are the standard "id", and
"type" as well as the "path" field.
This method is typically used by publish plugin hooks to determine if there
are existing publishes for a given context, publish_name, and path and
warning appropriately.
"""
publisher = sgtk.platform.current_bundle()
logger.debug(
"Getting conflicting publishes for context: %s, path: %s, name: %s" %
(context, path, publish_name)
)
# ask core to do a dry_run of a publish with the supplied criteria. this is
# a workaround for our inability to filter publishes by path. so for now,
# get a dictionary of data that would be used to create a matching publish
# and use that to get publishes via a call to find(). Then we'll filter
# those by their path field. Once we have the ability in SG to filter by
# path, we can replace this whole method with a simple call to find().
publish_data = sgtk.util.register_publish(
publisher.sgtk,
context,
path,
publish_name,
version_number=None,
dry_run=True
)
logger.debug("Publish dry run data: %s" % (publish_data,))
# now build up the filters to match against
publish_filters = [filters] if filters else []
for field in ["code", "entity", "name", "project", "task"]:
publish_filters.append([field, "is", publish_data[field]])
logger.debug("Build publish filters: %s" % (publish_filters,))
# run the
publishes = publisher.shotgun.find(
"PublishedFile",
publish_filters,
["path"]
)
# ensure the path is normalized for comparison
normalized_path = sgtk.util.ShotgunPath.normalize(path)
# next, extract the publish path from each of the returned publishes and
# compare it against the supplied path. if the paths match, we add the
# publish to the list of publishes to return.
logger.debug("Comparing publish paths...")
matching_publishes = []
for publish in publishes:
publish_path = sgtk.util.resolve_publish_path(publisher.sgtk, publish)
if publish_path:
# ensure the published path is normalized for comparison
normalized_publish_path = sgtk.util.ShotgunPath.normalize(
publish_path)
if normalized_path == normalized_publish_path:
matching_publishes.append(publish)
return matching_publishes
def clear_status_for_conflicting_publishes(context, publish_data):
"""
Clear the status of any conflicting publishes matching the supplied publish
data.
The loader app respects the status of publishes to determine which are
available for the user to load in their DCC. Because it is possible to
create a version entry in SG with the same path multiple times, this method
provides an easy way to clear the status of previous publishes for a given
path.
The publish data supplied should be the fully populated publish data
returned by a call to ``sgtk.util.register_publish()``.
:param publish_data: Dictionary of the current publish data (i.e. the
publish entry whose status will not be cleared).
"""
publisher = sgtk.platform.current_bundle()
logger.debug("Clearing the status of any conflicting publishes.")
# determine the path from the publish data. this will match the path that
# was used to register the publish
path = sgtk.util.resolve_publish_path(publisher.sgtk, publish_data)
name = publish_data["name"]
# get a list of all publishes matching this criteria
publishes = get_conflicting_publishes(
context,
path,
name,
filters=["sg_status_list", "is_not", None]
)
if not publishes:
# no conflicting publishes. nothing to do.
logger.debug("No conflicting publishes detected for path: %s" % (path,))
return
# do a batch update of the conflicting publishes to clear their status
batch_data = []
for publish in publishes:
# make sure we don't update the supplied publish
if publish["id"] == publish_data["id"]:
continue
# add the update info to the batch data list
batch_data.append({
"request_type": "update",
"entity_type": publish["type"],
"entity_id": publish["id"],
"data": {"sg_status_list": None} # will clear the status
})
if batch_data:
logger.debug(
"Batch updating publish data: %s" %
(pprint.pformat(batch_data),)
)
# execute all the updates!
publisher.shotgun.batch(batch_data)
```
#### File: config/hooks/tk-nuke_actions.py
```python
import sgtk
import os
HookBaseClass = sgtk.get_hook_baseclass()
class NukeActions(HookBaseClass):
"""
Shotgun Panel Actions for Nuke
"""
def generate_actions(self, sg_data, actions, ui_area):
"""
Returns a list of action instances for a particular object.
The data returned from this hook will be used to populate the
actions menu.
The mapping between Shotgun objects and actions are kept in a different place
(in the configuration) so at the point when this hook is called, the app
has already established *which* actions are appropriate for this object.
This method needs to return detailed data for those actions, in the form of a list
of dictionaries, each with name, params, caption and description keys.
The ui_area parameter is a string and indicates where the item is to be shown.
- If it will be shown in the main browsing area, "main" is passed.
- If it will be shown in the details area, "details" is passed.
:param sg_data: Shotgun data dictionary.
:param actions: List of action strings which have been defined in the app configuration.
:param ui_area: String denoting the UI Area (see above).
:returns List of dictionaries, each with keys name, params, caption and description
"""
app = self.parent
app.log_debug("Generate actions called for UI element %s. "
"Actions: %s. Shotgun Data: %s" % (ui_area, actions, sg_data))
action_instances = []
try:
# call base class first
action_instances += HookBaseClass.generate_actions(self, sg_data, actions, ui_area)
except AttributeError, e:
# base class doesn't have the method, so ignore and continue
pass
if "read_node" in actions:
action_instances.append( {"name": "read_node",
"params": None,
"caption": "Create Read Node",
"description": "This will add a read node to the current scene."} )
if "script_import" in actions:
action_instances.append( {"name": "script_import",
"params": None,
"caption": "Import Contents",
"description": "This will import all the nodes into the current scene."} )
if "open_project" in actions:
action_instances.append( {"name": "open_project",
"params": None,
"caption": "Open Project",
"description": "This will open the Nuke Studio project in the current session."} )
return action_instances
def execute_action(self, name, params, sg_data):
"""
Execute a given action. The data sent to this be method will
represent one of the actions enumerated by the generate_actions method.
:param name: Action name string representing one of the items returned by generate_actions.
:param params: Params data, as specified by generate_actions.
:param sg_data: Shotgun data dictionary
:returns: No return value expected.
"""
app = self.parent
app.log_debug("Execute action called for action %s. "
"Parameters: %s. Shotgun Data: %s" % (name, params, sg_data))
if name == "read_node":
# resolve path - forward slashes on all platforms in Nuke
path = self.get_publish_path(sg_data).replace(os.path.sep, "/")
self._create_read_node(path, sg_data)
elif name == "script_import":
# resolve path - forward slashes on all platforms in Nuke
path = self.get_publish_path(sg_data).replace(os.path.sep, "/")
self._import_script(path, sg_data)
elif name == "open_project":
# resolve path - forward slashes on all platforms in Nuke
path = self.get_publish_path(sg_data).replace(os.path.sep, "/")
self._open_project(path, sg_data)
else:
try:
HookBaseClass.execute_action(self, name, params, sg_data)
except AttributeError, e:
# base class doesn't have the method, so ignore and continue
pass
##############################################################################################################
# helper methods which can be subclassed in custom hooks to fine tune the behavior of things
def _import_script(self, path, sg_publish_data):
"""
Import contents of the given file into the scene.
:param path: Path to file.
:param sg_publish_data: Shotgun data dictionary with all the standard publish fields.
"""
import nuke
if not os.path.exists(path):
raise Exception("File not found on disk - '%s'" % path)
nuke.nodePaste(path)
def _open_project(self, path, sg_publish_data):
"""
Open the nuke studio project.
:param path: Path to file.
:param sg_publish_data: Shotgun data dictionary with all the standard publish fields.
"""
if not os.path.exists(path):
raise Exception("File not found on disk - '%s'" % path)
import nuke
if not nuke.env.get("studio"):
# can't import the project unless nuke studio is running
raise Exception("Nuke Studio is required to open the project.")
import hiero
hiero.core.openProject(path)
def _create_read_node(self, path, sg_publish_data):
"""
Create a read node representing the publish.
:param path: Path to file.
:param sg_publish_data: Shotgun data dictionary with all the standard publish fields.
"""
import nuke
(_, ext) = os.path.splitext(path)
# If this is an Alembic cache, use a ReadGeo2 and we're done.
if ext.lower() == '.abc':
nuke.createNode('ReadGeo2', 'file {%s}' % path)
return
valid_extensions = [".png",
".jpg",
".jpeg",
".exr",
".cin",
".dpx",
".tiff",
".tif",
".mov",
".psd",
".tga",
".ari",
".gif",
".iff"]
if ext.lower() not in valid_extensions:
raise Exception("Unsupported file extension for '%s'!" % path)
# `nuke.createNode()` will extract the format and frame range from the
# file itself (if possible), whereas `nuke.nodes.Read()` won't. We'll
# also check to see if there's a matching template and override the
# frame range, but this should handle the zero config case. This will
# also automatically extract the format and frame range for movie files.
read_node = nuke.createNode("Read")
read_node["file"].fromUserText(path)
# find the sequence range if it has one:
seq_range = self._find_sequence_range(path)
if seq_range:
# override the detected frame range.
read_node["first"].setValue(seq_range[0])
read_node["last"].setValue(seq_range[1])
def _find_sequence_range(self, path):
"""
Helper method attempting to extract sequence information.
Using the toolkit template system, the path will be probed to
check if it is a sequence, and if so, frame information is
attempted to be extracted.
:param path: Path to file on disk.
:returns: None if no range could be determined, otherwise (min, max)
"""
# find a template that matches the path:
template = None
try:
template = self.parent.sgtk.template_from_path(path)
except sgtk.TankError:
pass
if not template:
return None
# get the fields and find all matching files:
fields = template.get_fields(path)
if not "SEQ" in fields:
return None
files = self.parent.sgtk.paths_from_template(template, fields, ["SEQ", "eye"])
# find frame numbers from these files:
frames = []
for file in files:
fields = template.get_fields(file)
frame = fields.get("SEQ")
if frame != None:
frames.append(frame)
if not frames:
return None
# return the range
return (min(frames), max(frames))
```
#### File: tk-3dsmaxplus/v0.5.0/engine.py
```python
import os
import time
import math
import sgtk
import MaxPlus
class MaxEngine(sgtk.platform.Engine):
"""
The main Toolkit engine for 3ds Max
"""
@property
def host_info(self):
"""
:returns: A dictionary with information about the application hosting this engine.
The returned dictionary is of the following form on success:
Note that the version field refers to the release year.
{
"name": "3ds Max",
"version": "2018",
}
The returned dictionary is of following form on an error preventing
the version identification.
{
"name": "3ds Max",
"version: "unknown"
}
References:
http://docs.autodesk.com/3DSMAX/16/ENU/3ds-Max-Python-API-Documentation/index.html
"""
host_info = {"name": "3ds Max", "version": "unknown"}
try:
host_info["version"] = str(self._max_version_to_year(self._get_max_version()))
except:
# Fallback to initialized values above
pass
return host_info
def __init__(self, *args, **kwargs):
"""
Engine Constructor
"""
# Add instance variables before calling our base class
# __init__() because the initialization may need those
# variables.
self._parent_to_max = True
self._on_menus_loaded_handler = None
self._dock_widgets = []
# proceed about your business
sgtk.platform.Engine.__init__(self, *args, **kwargs)
##########################################################################################
# properties
@property
def context_change_allowed(self):
"""
Tells the core API that context changes are allowed by this engine.
"""
return True
##########################################################################################
# init
def pre_app_init(self):
"""
Called before all apps have initialized
"""
from sgtk.platform.qt import QtCore
self.log_debug("%s: Initializing..." % self)
if self._get_max_version() > MaxEngine.MAXIMUM_SUPPORTED_VERSION:
# Untested max version
highest_supported_version = self._max_version_to_year(MaxEngine.MAXIMUM_SUPPORTED_VERSION)
msg = ("Shotgun Pipeline Toolkit!\n\n"
"The Shotgun Pipeline Toolkit has not yet been fully tested with 3ds Max versions greater than %s. "
"You can continue to use the Toolkit but you may experience bugs or instability. "
"Please report any issues you see to <EMAIL>" % highest_supported_version)
# Display warning dialog
max_year = self._max_version_to_year(self._get_max_version())
max_next_year = highest_supported_version + 1
if max_year >= self.get_setting("compatibility_dialog_min_version", max_next_year):
MaxPlus.Core.EvalMAXScript('messagebox "Warning - ' + msg + '" title: "Shotgun Warning"')
# and log the warning
self.log_warning(msg)
elif not self._is_at_least_max_2016():
# Unsupported max version
msg = ("Shotgun Pipeline Toolkit!\n\n"
"The Shotgun Pipeline Toolkit does not work with 3ds max versions prior to 2016.")
# Display warning dialog
MaxPlus.Core.EvalMAXScript('messagebox "Warning - ' + msg + '" title: "Shotgun Warning"')
# and log the warning
self.log_warning(msg)
self._safe_dialog = []
# Add image formats since max doesn't add the correct paths by default and jpeg won't be readable
maxpath = QtCore.QCoreApplication.applicationDirPath()
pluginsPath = os.path.join(maxpath, "plugins")
QtCore.QCoreApplication.addLibraryPath(pluginsPath)
# Window focus objects are used to enable proper keyboard handling by the window instead of 3dsMax's accelerators
engine = self
class DialogEvents(QtCore.QObject):
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.WindowActivate:
MaxPlus.CUI.DisableAccelerators()
elif event.type() == QtCore.QEvent.WindowDeactivate:
MaxPlus.CUI.EnableAccelerators()
# Remove from tracked dialogs
if event.type() == QtCore.QEvent.Close:
if obj in engine._safe_dialog:
engine._safe_dialog.remove(obj)
return False
self.dialogEvents = DialogEvents()
# set up a qt style sheet
# note! - try to be smart about this and only run
# the style setup once per session - it looks like
# 3dsmax slows down if this is executed every engine restart.
qt_app_obj = sgtk.platform.qt.QtCore.QCoreApplication.instance()
curr_stylesheet = qt_app_obj.styleSheet()
if "toolkit 3dsmax style extension" not in curr_stylesheet:
# If we're in pre-2017 Max then we need to handle our own styling. Otherwise
# we just inherit from Max.
if self._max_version_to_year(self._get_max_version()) < 2017:
self._initialize_dark_look_and_feel()
curr_stylesheet += "\n\n /* toolkit 3dsmax style extension */ \n\n"
curr_stylesheet += "\n\n QDialog#TankDialog > QWidget { background-color: #343434; }\n\n"
qt_app_obj.setStyleSheet(curr_stylesheet)
# This needs to be present for apps as it will be used in show_dialog when perforce asks for login
# info very early on.
self.tk_3dsmax = self.import_module("tk_3dsmaxplus")
# The "qss_watcher" setting causes us to monitor the engine's
# style.qss file and re-apply it on the fly when it changes
# on disk. This is very useful for development work,
if self.get_setting("qss_watcher", False):
self._qss_watcher = QtCore.QFileSystemWatcher(
[os.path.join(self.disk_location, sgtk.platform.constants.BUNDLE_STYLESHEET_FILE)],
)
self._qss_watcher.fileChanged.connect(self.reload_qss)
def _add_shotgun_menu(self):
"""
Add Shotgun menu to the main menu bar.
"""
self.log_debug("Adding the shotgun menu to the main menu bar.")
self._menu_generator.create_menu()
self.tk_3dsmax.MaxScript.enable_menu()
def _remove_shotgun_menu(self):
"""
Remove Shotgun menu from the main menu bar.
"""
self.log_debug("Removing the shotgun menu from the main menu bar.")
self._menu_generator.destroy_menu()
def _on_menus_loaded(self, code):
"""
Called when receiving CuiMenusPostLoad from 3dsMax.
:param code: Notification code received
"""
self._add_shotgun_menu()
def post_app_init(self):
"""
Called when all apps have initialized
"""
# set up menu handler
self._menu_generator = self.tk_3dsmax.MenuGenerator(self)
self._add_shotgun_menu()
try:
# Listen to the CuiMenusPostLoad notification in order to add
# our shotgun menu after workspace reset/switch.
self._on_menus_loaded_handler = MaxPlus.NotificationManager.Register(
MaxPlus.NotificationCodes.CuiMenusPostLoad, self._on_menus_loaded)
except AttributeError:
self.log_debug("CuiMenusPostLoad notification code is not available in this version of MaxPlus.")
# Run a series of app instance commands at startup.
self._run_app_instance_commands()
def post_context_change(self, old_context, new_context):
"""
Handles necessary processing after a context change has been completed
successfully.
:param old_context: The previous context.
:param new_context: The current, new context.
"""
# Replacing the menu will cause the old one to be removed
# and the new one put into its place.
self._add_shotgun_menu()
def _run_app_instance_commands(self):
"""
Runs the series of app instance commands listed in the 'run_at_startup' setting
of the environment configuration yaml file.
"""
# Build a dictionary mapping app instance names to dictionaries of commands they registered with the engine.
app_instance_commands = {}
for (command_name, value) in self.commands.iteritems():
app_instance = value["properties"].get("app")
if app_instance:
# Add entry 'command name: command function' to the command dictionary of this app instance.
command_dict = app_instance_commands.setdefault(app_instance.instance_name, {})
command_dict[command_name] = value["callback"]
# Run the series of app instance commands listed in the 'run_at_startup' setting.
for app_setting_dict in self.get_setting("run_at_startup", []):
app_instance_name = app_setting_dict["app_instance"]
# Menu name of the command to run or '' to run all commands of the given app instance.
setting_command_name = app_setting_dict["name"]
# Retrieve the command dictionary of the given app instance.
command_dict = app_instance_commands.get(app_instance_name)
if command_dict is None:
self.log_warning(
"%s configuration setting 'run_at_startup' requests app '%s' that is not installed." %
(self.name, app_instance_name))
else:
if not setting_command_name:
# Run all commands of the given app instance.
for (command_name, command_function) in command_dict.iteritems():
self.log_debug("%s startup running app '%s' command '%s'." %
(self.name, app_instance_name, command_name))
command_function()
else:
# Run the command whose name is listed in the 'run_at_startup' setting.
command_function = command_dict.get(setting_command_name)
if command_function:
self.log_debug("%s startup running app '%s' command '%s'." %
(self.name, app_instance_name, setting_command_name))
command_function()
else:
known_commands = ', '.join("'%s'" % name for name in command_dict)
self.log_warning(
"%s configuration setting 'run_at_startup' requests app '%s' unknown command '%s'. "
"Known commands: %s" %
(self.name, app_instance_name, setting_command_name, known_commands))
def destroy_engine(self):
"""
Called when the engine is shutting down
"""
self.log_debug('%s: Destroying...' % self)
if self._on_menus_loaded_handler is not None:
MaxPlus.NotificationManager.Unregister(self._on_menus_loaded_handler)
self._remove_shotgun_menu()
def update_shotgun_menu(self):
"""
Rebuild the shotgun menu displayed in the main menu bar
"""
self._remove_shotgun_menu()
self._add_shotgun_menu()
##########################################################################################
# logging
# Should only call logging function from the main thread, although output to listener is
# supposed to be thread-safe.
# Note From the max team: Python scripts run in MAXScript are not thread-safe.
# Python commands are always executed in the main 3ds Max thread.
# You should not attempt to spawn separate threads in your scripts
# (for example, by using the Python threading module).
def log_debug(self, msg):
"""
Debug logging.
:param msg: The message string to log
"""
if self.get_setting("debug_logging", False):
self.async_execute_in_main_thread(self._print_output, "Shotgun Debug: %s" % msg)
def log_info(self, msg):
"""
Info logging.
:param msg: The message string to log
"""
self.async_execute_in_main_thread(self._print_output, "Shotgun Info: %s" % msg)
def log_warning(self, msg):
"""
Warning logging.
:param msg: The message string to log
"""
self.async_execute_in_main_thread(self._print_output, "Shotgun Warning: %s" % msg)
def log_error(self, msg):
"""
Error logging.
:param msg: The message string to log
"""
self.async_execute_in_main_thread(self._print_output, "Shotgun Error: %s" % msg)
def _print_output(self, msg):
"""
Print the specified message to the maxscript listener
:param msg: The message string to print
"""
print "[%-13s] %s" % (str(time.time()), msg)
##########################################################################################
# Engine
def show_panel(self, panel_id, title, bundle, widget_class, *args, **kwargs):
"""
Docks an app widget in a 3dsmax panel.
:param panel_id: Unique identifier for the panel, as obtained by register_panel().
:param title: The title of the panel
:param bundle: The app, engine or framework object that is associated with this window
:param widget_class: The class of the UI to be constructed. This must derive from QWidget.
Additional parameters specified will be passed through to the widget_class constructor.
:returns: the created widget_class instance
"""
from sgtk.platform.qt import QtCore, QtGui
self.log_debug("Begin showing panel %s" % panel_id)
if self._max_version_to_year(self._get_max_version()) <= 2017:
# Qt docking is supported in version 2018 and later.
self.log_warning("Panel functionality not implemented. Falling back to showing "
"panel '%s' in a modeless dialog" % panel_id)
return super(MaxEngine, self).show_panel(panel_id, title, bundle, widget_class, *args, **kwargs)
dock_widget_id = "sgtk_dock_widget_" + panel_id
main_window = MaxPlus.GetQMaxMainWindow()
# Check if the dock widget wrapper already exists.
dock_widget = main_window.findChild(QtGui.QDockWidget, dock_widget_id)
if dock_widget is None:
# The dock widget wrapper cannot be found in the main window's
# children list so that means it has not been created yet, so create it.
widget_instance = widget_class(*args, **kwargs)
widget_instance.setParent(self._get_dialog_parent())
widget_instance.setObjectName(panel_id)
dock_widget = QtGui.QDockWidget(title, parent=main_window)
dock_widget.setObjectName(dock_widget_id)
dock_widget.setWidget(widget_instance)
self.log_debug("Created new dock widget %s" % dock_widget_id)
# Disable 3dsMax accelerators, in order for QTextEdit and QLineEdit
# widgets to work properly.
widget_instance.setProperty("NoMaxAccelerators", True)
else:
# The dock widget wrapper already exists, so just get the
# shotgun panel from it.
widget_instance = dock_widget.widget()
self.log_debug("Found existing dock widget %s" % dock_widget_id)
# apply external stylesheet
self._apply_external_stylesheet(bundle, widget_instance)
if not main_window.restoreDockWidget(dock_widget):
# The dock widget cannot be restored from the main window's state,
# so dock it to the right dock area and make it float by default.
main_window.addDockWidget(QtCore.Qt.RightDockWidgetArea, dock_widget)
dock_widget.setFloating(True)
dock_widget.show()
# Remember the dock widget, so we can delete it later.
self._dock_widgets.append(dock_widget)
return widget_instance
def close_windows(self):
"""
Closes the various windows (dialogs, panels, etc.) opened by the engine.
"""
# Make a copy of the list of Tank dialogs that have been created by the engine and
# are still opened since the original list will be updated when each dialog is closed.
opened_dialog_list = self.created_qt_dialogs[:]
# Loop through the list of opened Tank dialogs.
for dialog in opened_dialog_list:
dialog_window_title = dialog.windowTitle()
try:
# Close the dialog and let its close callback remove it from the original dialog list.
self.log_debug("Closing dialog %s." % dialog_window_title)
dialog.close()
except Exception, exception:
self.log_error("Cannot close dialog %s: %s" % (dialog_window_title, exception))
# Delete all dock widgets previously added.
for dock_widget in self._dock_widgets:
# Keep MaxPlus.GetQMaxMainWindow() inside for-loop
# This will be executed only in version > 2017
# which supports Qt-docking.
MaxPlus.GetQMaxMainWindow().removeDockWidget(dock_widget)
dock_widget.deleteLater()
def _create_dialog(self, title, bundle, widget, parent):
"""
Parent function override to install event filtering in order to allow proper events to
reach window dialogs (such as keyboard events).
"""
dialog = sgtk.platform.Engine._create_dialog(self, title, bundle, widget, parent)
# Attaching the dialog to Max is a matter of whether this is a new
# enough version of 3ds Max. Anything short of 2016 SP1 is going to
# fail here with an AttributeError, so we can just catch that and
# continue on without the new-style parenting.
previous_parent = dialog.parent()
if self._parent_to_max:
try:
self.log_debug("Attempting to attach dialog to 3ds Max...")
# widget must be parentless when calling MaxPlus.AttachQWidgetToMax
dialog.setParent(None)
MaxPlus.AttachQWidgetToMax(dialog)
self.log_debug("AttachQWidgetToMax successful.")
except AttributeError:
dialog.setParent(previous_parent)
self.log_debug("AttachQWidgetToMax not available in this version of 3ds Max.")
dialog.installEventFilter(self.dialogEvents)
# Add to tracked dialogs (will be removed in eventFilter)
self._safe_dialog.append(dialog)
# Apply the engine-level stylesheet.
self._apply_external_styleshet(self, dialog)
return dialog
def reload_qss(self):
"""
Causes the style.qss file that comes with the tk-rv engine to
be re-applied to all dialogs that the engine has previously
launched.
"""
self.log_warning("Reloading engine QSS...")
for dialog in self.created_qt_dialogs:
self._apply_external_styleshet(self, dialog)
dialog.update()
def show_modal(self, title, bundle, widget_class, *args, **kwargs):
from sgtk.platform.qt import QtGui
if not self.has_ui:
self.log_error("Sorry, this environment does not support UI display! Cannot show "
"the requested window '%s'." % title)
return None
status = QtGui.QDialog.DialogCode.Rejected
try:
# Disable 'Shotgun' background menu while modals are there.
self.tk_3dsmax.MaxScript.disable_menu()
# create the dialog:
try:
self._parent_to_max = False
dialog, widget = self._create_dialog_with_widget(
title,
bundle,
widget_class,
*args, **kwargs
)
finally:
self._parent_to_max = True
# finally launch it, modal state
status = dialog.exec_()
except Exception:
import traceback
tb = traceback.format_exc()
self.log_error("Exception in modal window: %s" % tb)
finally:
# Re-enable 'Shotgun' background menu after modal has been closed
self.tk_3dsmax.MaxScript.enable_menu()
# lastly, return the instantiated widget
return (status, widget)
def safe_dialog_exec(self, func):
"""
If running a command from a dialog also creates a 3ds max window, this function tries to
ensure that the dialog will stay alive and that the max modal window becomes visible
and unobstructed.
:param script: Function to execute (partial/lambda)
"""
# Merge operation can cause max dialogs to pop up, and closing the window results in a crash.
# So keep alive and hide all of our qt windows while this type of operations are occuring.
from sgtk.platform.qt import QtGui
toggled = []
for dialog in self._safe_dialog:
needs_toggling = dialog.isVisible()
if needs_toggling:
self.log_debug("Toggling dialog off: %r" % dialog)
toggled.append(dialog)
dialog.hide()
dialog.lower()
QtGui.QApplication.processEvents()
else:
self.log_debug("Dialog is already hidden: %r" % dialog)
try:
func()
finally:
for dialog in toggled:
# Restore the window after the operation is completed
self.log_debug("Toggling dialog on: %r" % dialog)
dialog.show()
dialog.activateWindow() # for Windows
dialog.raise_() # for MacOS
##########################################################################################
# MaxPlus SDK Patching
# Version Id for 3dsmax 2016 Taken from Max Sdk (not currently available in maxplus)
MAX_RELEASE_R18 = 18000
# Latest supported max version
MAXIMUM_SUPPORTED_VERSION = 20000
def _max_version_to_year(self, version):
"""
Get the max year from the max release version.
Note that while 17000 is 2015, 17900 would be 2016 alpha
"""
year = 2000 + (math.ceil(version / 1000.0) - 2)
return year
def _get_max_version(self):
"""
Returns Version integer of max release number.
"""
# 3dsMax Version returns a number which contains max version, sdk version, etc...
version_id = MaxPlus.Application.Get3DSMAXVersion()
# Transform it to a version id
# (Macro to get 3ds max release from version id)
version_number = (version_id >> 16) & 0xffff
return version_number
def _is_at_least_max_2016(self):
"""
Returns True if current Max version is equal or above 3ds max 2015
"""
return self._get_max_version() >= MaxEngine.MAX_RELEASE_R18
```
#### File: python/tk_3dsmaxplus/maxscript.py
```python
import hashlib
import MaxPlus
class MaxScript:
"""
MaxScript/Python Bridge Utilities
"""
@staticmethod
def add_to_menu(from_menu_var, to_menu_var, from_menu_name):
"""
Add given menu to another menu
:param from_menu_var: MaxScript variable name of menu to add from
:param to_menu_var: MaxScript variable name of menu to add to
:param from_menu_name: Name of menu item to give to MaxScript
"""
MaxPlus.Core.EvalMAXScript('''
sgtk_menu_sub_item = menuMan.createSubMenuItem "{from_menu_name}" {from_menu_var}
{to_menu_var}.addItem sgtk_menu_sub_item -1
'''.format(from_menu_var=from_menu_var, to_menu_var=to_menu_var, from_menu_name=from_menu_name))
@staticmethod
def create_menu(menu_name, menu_var):
"""
Create a menu
:param menu_name: String name of menu to create
:param menu_var: MaxScript variable name in which the menu will be created
"""
MaxPlus.Core.EvalMAXScript('''
-- clear the old menu
sgtk_oldMenu = menuMan.findMenu "{menu_name}"
if sgtk_oldMenu != undefined then menuMan.unregisterMenu sgtk_oldMenu
-- create the main menu
{menu_var} = menuMan.createMenu "{menu_name}"
'''.format(menu_var=menu_var, menu_name=menu_name))
@staticmethod
def add_separator(menu_var):
"""
Add separator to a menu
:param menu_var: MaxScript variable name of the menu to add separator into
"""
MaxPlus.Core.EvalMAXScript('''
sgtk_menu_separator = menuMan.createSeparatorItem()
{menu_var}.addItem sgtk_menu_separator -1
'''.format(menu_var=menu_var))
@staticmethod
def add_to_main_menu_bar(menu_var, menu_name):
"""
Add menu to 3ds max's main menu bar
:param menu_var: MaxScript variable name of menu to add to the main menu bar
:param menu_name: String name of the menu to add
"""
MaxPlus.Core.EvalMAXScript('''
-- Add main menu to Max, second to last which should be before Help
sgtk_main_menu_bar = menuMan.getMainMenuBar()
sgtk_sub_menu_index = sgtk_main_menu_bar.numItems() - 1
sgtk_sub_menu_item = menuMan.createSubMenuItem "{menu_name}" {menu_var}
sgtk_main_menu_bar.addItem sgtk_sub_menu_item sgtk_sub_menu_index
menuMan.updateMenuBar()
'''.format(menu_var=menu_var, menu_name=menu_name))
@staticmethod
def add_action_to_menu(callback, action_name, menu_var, engine):
"""
Add a menu item for this command to the given MaxScript menu variable name.
:param callback: Callback function to call with this action
:param action_name: Name of the action, as will appear to the user
:param menu_var: MaxScript menu variable name to add menu item to.
:param engine: Current engine where the action can be globally linked back to.
"""
obj = callback.im_self
method_name = callback.__name__
# Note that we're using the action name because we need these
# macros to reference things consistently across sessions. Sadly,
# if a second, concurrent, 3ds Max session is launched, Toolkit
# will build the Shotgun menu in that session and Max will write
# that updated menu layout to disk for the user, because it thinks
# that needs to persist across sessions. This causes us problems
# in the first session, then, because Max looks up what macro to
# run from the xml stored on disk when the action is triggered.
# This means that if we have anything referenced from the macro
# that is not available in the first session, the action will
# fail.
hash_name = action_name
# This won't be visible to the user, so we'll go the ugly route
# to resolve conflicts and just append underscores until we get
# a unique key. Ultimately, this is just covering what should be
# the very rare edge case of having two menu actions with the
# same name. That would be bad practice, in my opinion, but since
# it is possible we will handle it.
while hash_name in engine.maxscript_objects:
hash_name += "_"
engine.maxscript_objects[hash_name] = obj
"""
Macro name must not have any strange characters (spaces, dash, etc..)
These macro script will be saved as files by 3ds max on the user folder.
Therefore, the name is made unique to the action name so as to not pollute the usermacro folder
with new macro for the same action every time shotgun is reloaded.
eg: 'Publish...' action will always re-use the same MacroScript.
"""
macro_name = 'sg_' + hashlib.md5(action_name).hexdigest()
# Creating python code separately as it needs to have no indentation in the macroscript
python_code = (
"import sgtk\n"
"engine = sgtk.platform.current_engine()\n"
"if '{hash_name}' in engine.maxscript_objects:\n"
" command_object = engine.maxscript_objects['{hash_name}']\n"
" command_object.{command_name}()\n"
"else:\n"
" engine.log_error('Shotgun Error: Failed to find Action command in MAXScript callback for action [{action_name}]!')\n"
).format(hash_name=hash_name, command_name=method_name, action_name=action_name)
MaxPlus.Core.EvalMAXScript('''
-- Create MacroScript that will callback to our python object
macroScript {macro_name}
category: "Shotgun Menu Actions"
tooltip: "{action_name}"
(
on execute do
(
/*
This is a workaround to prevent any menu item from being used while there is a modal window.
Calling any python code from maxscript while there is a modal window (even 'a = 1') results in
an exception.
Note: Keeping the indent is important here
*/
if (sgtk_main_menu_enabled != undefined and sgtk_main_menu_enabled == True) then
python.execute "{python_code}"
else
print "Shotgun Warning: You need to close the current window dialog before using any more commands."
)
)
-- Add menu item using previous MacroScript action
sgtk_menu_action = menuMan.createActionItem "{macro_name}" "Shotgun Menu Actions"
sgtk_menu_action.setUseCustomTitle true
sgtk_menu_action.setTitle("{action_name}")
{menu_var}.addItem sgtk_menu_action -1
'''.format(macro_name=macro_name, menu_var=menu_var, action_name=action_name, python_code=python_code))
@staticmethod
def disable_menu():
"""
Sets a flag so that menu actions will not be called, which would throw exceptions. See add_action_menu's macroscript
comments for details.
This is used to disable actions while a modal window is opened.
"""
MaxPlus.Core.EvalMAXScript("sgtk_main_menu_enabled = False")
@staticmethod
def enable_menu():
"""
Sets a flag so that menu actions can be called.
"""
MaxPlus.Core.EvalMAXScript("sgtk_main_menu_enabled = True")
```
#### File: python/tk_3dsmaxplus/menu_generation.py
```python
import MaxPlus
import os
import sys
import traceback
import unicodedata
from sgtk.platform.qt import QtCore, QtGui
from .maxscript import MaxScript
MENU_LABEL = "Shotgun"
class MenuGenerator(object):
"""
Menu generation functionality for 3dsmax
Actual menu creation is done through MaxScript to prevent a crash with modal dialogs.
The crash happens if a modal dialog is open and a user clicks on a menu with action items
that directly call python code
"""
def __init__(self, engine):
"""
Initialize Menu Generator.
:param engine: Engine to get commands from.
"""
self._engine = engine
# Maxscript variable name for context menu
self._ctx_var = 'sgtk_menu_ctx'
# Mascript variable name for Shotgun main menu
self._menu_var = 'sgtk_menu_main'
# Need a globally available object for maxscript action callbacks to be able to refer to python objects
self._engine.maxscript_objects = {}
def create_menu(self):
"""
Create the Shotgun Menu
"""
# Create the main menu
MaxScript.create_menu(MENU_LABEL, self._menu_var)
# enumerate all items and create menu objects for them
cmd_items = []
for (cmd_name, cmd_details) in self._engine.commands.items():
cmd_items.append(AppCommand(cmd_name, cmd_details))
# start with context menu
self._create_context_builder()
for cmd in cmd_items:
if cmd.get_type() == "context_menu":
cmd.add_to_menu(self._ctx_var, self._engine)
# now favourites
for fav in self._engine.get_setting("menu_favourites", []):
app_instance_name = fav["app_instance"]
menu_name = fav["name"]
# scan through all menu items
for cmd in cmd_items:
if cmd.get_app_instance_name() == app_instance_name and cmd.name == menu_name:
# found our match!
cmd.add_to_menu(self._menu_var, self._engine)
# mark as a favourite item
cmd.favourite = True
MaxScript.add_separator(self._menu_var)
# now go through all of the menu items.
# separate them out into various sections
commands_by_app = {}
for cmd in cmd_items:
if cmd.get_type() != "context_menu":
# normal menu
app_name = cmd.get_app_name()
if app_name is None:
# un-parented app
app_name = "Other Items"
if not app_name in commands_by_app:
commands_by_app[app_name] = []
commands_by_app[app_name].append(cmd)
# now add all apps to main menu
self._add_app_menu(commands_by_app)
MaxScript.add_to_main_menu_bar(self._menu_var, MENU_LABEL)
def destroy_menu(self):
if MaxPlus.MenuManager.MenuExists(MENU_LABEL):
MaxPlus.MenuManager.UnregisterMenu(MENU_LABEL)
def _create_context_builder(self):
"""
Adds a context menu wich displays the current context
:returns: Menu builder
"""
ctx = self._engine.context
ctx_name = str(ctx)
MaxScript.create_menu(ctx_name, self._ctx_var)
MaxScript.add_action_to_menu(self._jump_to_sg, 'Jump to Shotgun', self._ctx_var, self._engine)
# Add the menu item only when there are some file system locations.
if ctx.filesystem_locations:
MaxScript.add_action_to_menu(self._jump_to_fs, 'Jump to File System', self._ctx_var, self._engine)
MaxScript.add_separator(self._menu_var)
MaxScript.add_to_menu(self._ctx_var, self._menu_var, "ctx_builder")
def _jump_to_sg(self):
"""
Jump from context to Sg
"""
url = self._engine.context.shotgun_url
QtGui.QDesktopServices.openUrl(QtCore.QUrl(url))
def _jump_to_fs(self):
"""
Jump from context to Fs
"""
# launch one window for each location on disk
paths = self._engine.context.filesystem_locations
for disk_location in paths:
# get the setting
system = sys.platform
# run the app
if system == "linux2":
cmd = 'xdg-open "%s"' % disk_location
elif system == "darwin":
cmd = 'open "%s"' % disk_location
elif system == "win32":
cmd = 'cmd.exe /C start "Folder" "%s"' % disk_location
else:
raise Exception("Platform '%s' is not supported." % system)
exit_code = os.system(cmd)
if exit_code != 0:
self._engine.log_error("Failed to launch '%s'!" % cmd)
def _add_app_menu(self, commands_by_app):
"""
Add all apps to the main menu, process them one by one.
:param commands_by_app: Dictionary of app name and commands related to the app, which
will be added to the menu builder
"""
for app_name in sorted(commands_by_app.keys()):
if len(commands_by_app[app_name]) > 1:
# more than one menu entry fort his app
# make a sub menu and put all items in the sub menu
menu_var = 'sgtk_menu_builder'
MaxScript.create_menu(app_name, menu_var)
for cmd in commands_by_app[app_name]:
cmd.add_to_menu(menu_var, self._engine)
MaxScript.add_to_menu(menu_var, self._menu_var, "ShotgunMenu")
else:
# this app only has a single entry.
# display that on the menu
cmd_obj = commands_by_app[app_name][0]
if not cmd_obj.favourite:
# skip favourites since they are alreay on the menu
cmd_obj.add_to_menu(self._menu_var, self._engine)
class AppCommand(object):
"""
Wraps around a single command that you get from engine.commands
"""
def __init__(self, name, command_dict):
"""
Initialize AppCommand object.
:param name: Command name
:param command_dict: Dictionary containing a 'callback' property to use as callback.
"""
self.name = name
self.properties = command_dict["properties"]
self.callback = command_dict["callback"]
self.favourite = False
def get_app_name(self):
"""
Returns the name of the app that this command belongs to
"""
if "app" in self.properties:
return self.properties["app"].display_name
return None
def get_app_instance_name(self):
"""
Returns the name of the app instance, as defined in the environment.
Returns None if not found.
"""
engine = self.get_engine()
if engine is None:
return None
if "app" not in self.properties:
return None
app_instance = self.properties["app"]
for (app_instance_name, app_instance_obj) in engine.apps.items():
if app_instance_obj == app_instance:
# found our app!
return app_instance_name
return None
def get_documentation_url_str(self):
"""
Returns the documentation as a str
"""
if "app" in self.properties:
app = self.properties["app"]
doc_url = app.documentation_url
# deal with nuke's inability to handle unicode. #fail
if doc_url.__class__ == unicode:
doc_url = unicodedata.normalize('NFKD', doc_url).encode('ascii', 'ignore')
return doc_url
return None
def get_engine(self):
"""
Returns the engine from the App Instance
Returns None if not found
"""
if "app" not in self.properties:
return None
app_instance = self.properties["app"]
engine = app_instance.engine
return engine
def get_type(self):
"""
returns the command type. Returns node, custom_pane or default.
"""
return self.properties.get("type", "default")
def execute(self):
"""
Delegate method for this command
"""
try:
self.callback()
except:
tb = traceback.format_exc()
engine = self.get_engine()
if engine is not None:
engine.log_error("Failed to call command '%s'. '%s'!" % (self.name, tb))
def add_to_menu(self, menu_var, engine):
"""
Add command to menu
:param menu_var: MaxScript menu variable name to add menu item to.
:param engine: Current engine where the action can be globally linked back to. (Not the App engine)
"""
MaxScript.add_action_to_menu(self.execute, self.name, menu_var, engine)
```
#### File: python/tk_desktop/action_list_view.py
```python
from sgtk.platform.qt import QtGui
from sgtk.platform.qt import QtCore
class ActionListView(QtGui.QListView):
""" Subclass of QListView that handles special logic when the items launch actions on select """
def mousePressEvent(self, event):
# Ignore right click events
if (event.button() == QtCore.Qt.RightButton):
return
# pass through to QListView for all other mouse events
QtGui.QListView.mousePressEvent(self, event)
```
#### File: python/tk_desktop/communication_base.py
```python
from .rpc import RPCServerThread, RPCProxy
from sgtk import LogManager
logger = LogManager.get_logger(__name__)
class CommunicationBase(object):
"""
Communication channel base class.
"""
def __init__(self, engine):
"""
:param engine: Toolkit engine.
"""
self._engine = engine
self._msg_server = None
self._proxy = None
@property
def is_connected(self):
"""
Indicates whether there a live connection.
:rtype: bool
"""
return (
self._proxy is not None and
not self._proxy.is_closed()
)
def shut_down(self):
"""
Disconnects from the other process and shuts down the local server.
"""
logger.debug("Shutting down communication channel...")
# Be super careful when closing the proxy, because it can be in an inconsistent state and
# throw errors.
if self._proxy is not None:
try:
self._notify_proxy_closure()
except Exception:
logger.exception("Error while destroying app proxy:")
else:
logger.debug("Destroyed app proxy.")
self._destroy_proxy()
# close down our server thread
if self._msg_server is not None:
self._msg_server.close()
logger.debug("Closed message server.")
self._msg_server = None
def register_function(self, callable, function_name):
"""
Registers a function for the background process to call.
:param callable: Callable object to execute when the function is called from the background
process.
:param function_name: Name to register the callable under.
"""
self._msg_server.register_function(callable, function_name)
def call(self, name, *args, **kwargs):
"""
Calls a method on the background process and waits for the result.
:param name: Name of the method to call.
:param args: Position arguments for the call.
:param kwargs: Named arguments for the call.
"""
return self._proxy.call(name, *args, **kwargs)
def call_no_response(self, name, *args, **kwargs):
"""
Calls a method on the background process and does not wait for the result.
:param name: Name of the method to call.
:param args: Position arguments for the call.
:param kwargs: Named arguments for the call.
"""
return self._proxy.call_no_response(name, *args, **kwargs)
def _create_proxy(self, pipe, authkey):
"""
Connects to the other process's RPC server.
"""
logger.info("Connecting to gui pipe %s" % pipe)
self._proxy = RPCProxy(pipe, authkey)
logger.debug("Connected to the proxy server.")
def _create_server(self):
"""
Launches an RPC server.
"""
logger.debug("Starting RPC server")
self._msg_server = RPCServerThread(self._engine)
self._msg_server.start()
@property
def server_pipe(self):
"""
:returns: The server's pipe.
"""
return self._msg_server.pipe
@property
def server_authkey(self):
"""
:returns: The server's authorization key.
"""
return self._msg_server.authkey
def _destroy_proxy(self):
"""
Disconnects from the background process's RPC server. This method is actually invoked from the
background process to inform the Desktop app that it is about to shut down.
"""
# Notify clients that the background process requested a shut down.
if self._proxy is not None:
try:
self._proxy.close()
except Exception, e:
logger.warning("Error disconnecting from proxy: %s", e)
else:
logger.debug("Disconnected from the proxy.")
finally:
self._proxy = None
```
#### File: python/tk_desktop/console.py
```python
import sgtk
import logging
from sgtk.platform.qt import QtGui
from sgtk.platform.qt import QtCore
from .ui import resources_rc
settings = sgtk.platform.import_framework("tk-framework-shotgunutils", "settings")
COLOR_MAP = {
# colors from the Tomorrow Night Eighties theme
logging.CRITICAL: '#f2777a',
logging.ERROR: '#f2777a',
logging.WARNING: '#ffcc66',
logging.INFO: '#cccccc',
logging.DEBUG: '#999999'
}
class ConsoleLogHandler(logging.Handler):
# Dummy type to hold the log_message signal.
class LogSignaller(QtCore.QObject):
log_message = QtCore.Signal(str, bool)
def __init__(self, console):
logging.Handler.__init__(self)
self.__console = console
self.__formatter = logging.Formatter("%(asctime)s [%(levelname) 8s] %(message)s")
# Wrap the real message logging with a signal/slot,
# to ensure that the console is updated within the UI thread.
self.__signals = self.LogSignaller()
self.__signals.log_message.connect(self.__console.append_text)
def emit(self, record):
# Convert the record to pretty HTML
message = self.__formatter.format(record)
if record.levelno in COLOR_MAP:
color = COLOR_MAP[record.levelno]
message = "<font color=\"%s\">%s</font>" % (color, message)
message = "<pre>%s</pre>" % message
# Update console (possibly in a different thread than the current one)
# force_show can pop open the console automatically, for example on
# ERROR: record.levelno >= logging.ERROR
self.__signals.log_message.emit(message, False)
class Console(QtGui.QDialog):
def __init__(self, parent=None):
super(Console, self).__init__(parent)
self.setWindowTitle('Shotgun Desktop Console')
self.setWindowIcon(QtGui.QIcon(":/tk-desktop/default_systray_icon.png"))
self.__logs = QtGui.QPlainTextEdit()
layout = QtGui.QHBoxLayout()
layout.addWidget(self.__logs)
self.setLayout(layout)
# configure the text widget
self.__logs.setReadOnly(True)
self.__logs.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.__logs.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)
self.__logs.customContextMenuRequested.connect(self.on_logs_context_menu_request)
self.__logs.setStyleSheet("QPlainTextEdit:focus { border: none; }")
# load up previous size
self._settings_manager = settings.UserSettings(sgtk.platform.current_bundle())
pos = self._settings_manager.retrieve("console.pos", self.pos(), self._settings_manager.SCOPE_GLOBAL)
size = self._settings_manager.retrieve(
"console.size", QtCore.QSize(800, 400), self._settings_manager.SCOPE_GLOBAL)
self.move(pos)
self.resize(size)
def on_logs_context_menu_request(self, point):
menu = self.__logs.createStandardContextMenu()
clear_action = menu.addAction("Clear")
clear_action.triggered.connect(self.clear)
close_action = menu.addAction("Close")
close_action.triggered.connect(self.close)
menu.exec_(self.__logs.mapToGlobal(point))
def append_text(self, text, force_show=False):
self.__logs.appendHtml(text)
cursor = self.__logs.textCursor()
cursor.movePosition(cursor.End)
cursor.movePosition(cursor.StartOfLine)
self.__logs.setTextCursor(cursor)
self.__logs.ensureCursorVisible()
if force_show:
self.show_and_raise()
def clear(self):
self.__logs.setPlainText("")
def show_and_raise(self):
self.show()
self.raise_()
self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
def closeEvent(self, event):
self._settings_manager.store("console.pos", self.pos(), self._settings_manager.SCOPE_GLOBAL)
self._settings_manager.store("console.size", self.size(), self._settings_manager.SCOPE_GLOBAL)
event.accept()
```
#### File: python/tk_desktop/__init__.py
```python
def get_engine_implementation(implementation_type):
if implementation_type == "site":
from .desktop_engine_site_implementation import DesktopEngineSiteImplementation
return DesktopEngineSiteImplementation
if implementation_type == "project":
from .desktop_engine_project_implementation import DesktopEngineProjectImplementation
return DesktopEngineProjectImplementation
raise RuntimeError("unknown implementation_type: '%s'" % implementation_type)
```
#### File: tk_desktop/notifications/desktop_notification.py
```python
from .notification import Notification
import sgtk
logger = sgtk.platform.get_logger(__name__)
class DesktopNotification(Notification):
"""
Notification that can be hard-coded in the tk-desktop engine.
"""
_DESKTOP_NOTIFICATIONS = "desktop.notifications"
def __init__(self, engine):
"""
:param engine: Toolkit engine.
"""
self._engine = engine
@classmethod
def create(cls, banner_settings, engine):
"""
Notification factory for the ``DesktopNotification`` class.
If the ``banner_id`` and ``banner_message`` settings for the engine are set,
an instance of this class will be returned. Otherwise, ``None`` will be returned.
Note that if this notification has been dismissed in the past, the method will also
return ``None``.
:param banner_settings: Dictionary of banner settings.
:param engine: Toolkit engine.
:returns: A :class:`DesktopNotification` instance, or ``None``.
"""
banner_id = engine.get_setting("banner_id")
banner_message = engine.get_setting("banner_message")
if not banner_id or not banner_message:
logger.debug(
"banner_id and/or banner_message not set."
)
return
if banner_settings.get(
cls._DESKTOP_NOTIFICATIONS, {}
).get(banner_id, False):
logger.debug(
"banner_id %s has already been dismissed.",
banner_id
)
return None
else:
logger.debug("Desktop notification is available.")
return DesktopNotification(engine)
@property
def message(self):
"""
Message to display.
"""
return self._engine.get_setting("banner_message")
@property
def unique_id(self):
"""
Returns the unique identifier of a notification.
"""
return self._DESKTOP_NOTIFICATIONS + self._engine.get_setting("banner_id")
def _dismiss(self, banner_settings):
"""
Updates the ``banner_settings`` so this notification does not come back in the future.
:param banner_settings: Dictionary of the banners settings.
"""
banner_settings.setdefault(
self._DESKTOP_NOTIFICATIONS, {}
)[self._engine.get_setting("banner_id")] = True
```
#### File: python/tk_desktop/setup_new_os.py
```python
from sgtk.platform.qt import QtGui
from sgtk.platform.qt import QtCore
from .ui import setup_new_os
class SetupNewOS(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.ui = setup_new_os.Ui_SetupNewOS()
self.ui.setupUi(self)
self.ui.button.clicked.connect(self.launch_docs)
self._parent = parent
filter = ResizeEventFilter(self._parent)
filter.resized.connect(self._on_parent_resized)
self._parent.installEventFilter(filter)
self.setVisible(False)
def launch_docs(self):
url = "https://support.shotgunsoftware.com/entries/95443887#Multiple%20Operating%20Systems"
QtGui.QDesktopServices.openUrl(url)
def _on_parent_resized(self):
"""
Special slot hooked up to the event filter.
When associated widget is resized this slot is being called.
"""
# resize overlay
self.resize(self._parent.size())
class ResizeEventFilter(QtCore.QObject):
"""
Event filter which emits a resized signal whenever
the monitored widget resizes. This is so that the overlay wrapper
class can be informed whenever the Widget gets a resize event.
"""
resized = QtCore.Signal()
def eventFilter(self, obj, event):
# peek at the message
if event.type() == QtCore.QEvent.Resize:
# re-broadcast any resize events
self.resized.emit()
# pass it on!
return False
```
#### File: python/tk_desktop/thumb_widget.py
```python
from tank.platform.qt import QtGui
from tank.platform.qt import QtCore
from .ui import thumb_widget
class ThumbWidget(QtGui.QWidget):
"""Thumbnail widget to poplulate the projects list view """
SIZER_WIDGET = None
def __init__(self, width=120, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setVisible(False)
self.ui = thumb_widget.Ui_ThumbWidget()
self.ui.setupUi(self)
# fix thumbnail size to full width minus margins
margins = self.ui.widget_frame_layout.contentsMargins()
self.thumb_size = width - margins.left() - margins.right()
self.ui.thumbnail.setFixedSize(self.thumb_size, self.thumb_size)
def set_thumbnail(self, pixmap):
""" Set a thumbnail given the current pixmap. """
# zoom to fit height, then crop to center
pixmap = pixmap.scaledToHeight(self.thumb_size, QtCore.Qt.SmoothTransformation)
if pixmap.width() > self.thumb_size:
extra = pixmap.width() - self.thumb_size
pixmap = pixmap.copy(extra/2, 0, self.thumb_size, self.thumb_size)
self.ui.thumbnail.setPixmap(pixmap)
@classmethod
def height_for_width(cls, width, text):
if cls.SIZER_WIDGET is None:
cls.SIZER_WIDGET = cls(width)
# figure out height for the given width
# top/bottom margins + thumbnail + spacing + label height
margins = cls.SIZER_WIDGET.ui.widget_frame_layout.contentsMargins()
spacing = cls.SIZER_WIDGET.ui.widget_frame_layout.spacing()
thumb_height = cls.SIZER_WIDGET.thumb_size
cls.SIZER_WIDGET.set_text(text)
label_height = cls.SIZER_WIDGET.ui.label.heightForWidth(width - margins.left() - margins.right())
return margins.top() + margins.bottom() + spacing + thumb_height + label_height
def set_text(self, label):
"""Populate the line of text in the widget """
self.ui.label.setText(label)
def set_selected(self, selected):
"""Adjust the style sheet to indicate selection or not"""
if selected:
p = QtGui.QPalette()
highlight_col = p.color(QtGui.QPalette.Active, QtGui.QPalette.Highlight)
border = "rgb(%s, %s, %s)" % (highlight_col.red(), highlight_col.green(), highlight_col.blue())
background = "rgba(%s, %s, %s, 25%%)" % (highlight_col.red(), highlight_col.green(), highlight_col.blue())
self.ui.widget_frame.setStyleSheet("""
#widget_frame {
border: 1px solid %s;
background-color: %s;
}
""" % (border, background))
else:
self.ui.widget_frame.setStyleSheet("""
#widget_frame {
border: 1px solid transparent;
}
""")
# force a refresh of the stylesheet
self.ui.widget_frame.style().unpolish(self.ui.widget_frame)
self.ui.widget_frame.style().polish(self.ui.widget_frame)
self.ui.widget_frame.update()
```
#### File: tk_desktop/ui/thumb_widget.py
```python
from sgtk.platform.qt import QtCore, QtGui
class Ui_ThumbWidget(object):
def setupUi(self, ThumbWidget):
ThumbWidget.setObjectName("ThumbWidget")
ThumbWidget.resize(542, 453)
self.verticalLayout_2 = QtGui.QVBoxLayout(ThumbWidget)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.widget_frame = QtGui.QFrame(ThumbWidget)
self.widget_frame.setMouseTracking(True)
self.widget_frame.setFrameShape(QtGui.QFrame.NoFrame)
self.widget_frame.setFrameShadow(QtGui.QFrame.Plain)
self.widget_frame.setLineWidth(0)
self.widget_frame.setObjectName("widget_frame")
self.widget_frame_layout = QtGui.QVBoxLayout(self.widget_frame)
self.widget_frame_layout.setSpacing(10)
self.widget_frame_layout.setContentsMargins(15, 15, 15, 15)
self.widget_frame_layout.setObjectName("widget_frame_layout")
self.thumbnail = QtGui.QLabel(self.widget_frame)
self.thumbnail.setMouseTracking(True)
self.thumbnail.setPixmap(QtGui.QPixmap(":/tk-desktop/loading_512x400.png"))
self.thumbnail.setScaledContents(False)
self.thumbnail.setAlignment(QtCore.Qt.AlignCenter)
self.thumbnail.setObjectName("thumbnail")
self.widget_frame_layout.addWidget(self.thumbnail)
self.label = QtGui.QLabel(self.widget_frame)
self.label.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.label.setWordWrap(True)
self.label.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)
self.label.setObjectName("label")
self.widget_frame_layout.addWidget(self.label)
self.verticalLayout_2.addWidget(self.widget_frame)
self.retranslateUi(ThumbWidget)
QtCore.QMetaObject.connectSlotsByName(ThumbWidget)
def retranslateUi(self, ThumbWidget):
ThumbWidget.setWindowTitle(QtGui.QApplication.translate("ThumbWidget", "Form", None, QtGui.QApplication.UnicodeUTF8))
ThumbWidget.setProperty("label", QtGui.QApplication.translate("ThumbWidget", "project_thumbnail", None, QtGui.QApplication.UnicodeUTF8))
from . import resources_rc
```
#### File: python/tk_desktop/wait_screen.py
```python
from sgtk.platform.qt import QtGui
from sgtk.platform.qt import QtCore
from .ui import wait_screen
class WaitScreen(QtGui.QDialog):
""" Simple wait dialog """
def __init__(self, header="", subheader="", parent=None):
QtGui.QDialog.__init__(self, parent)
self.setWindowFlags(QtCore.Qt.Popup)
# setup the GUI
self.ui = wait_screen.Ui_WaitScreen()
self.ui.setupUi(self)
self.ui.header.setText(header)
self.ui.subheader.setText(subheader)
def set_header(self, header):
self.ui.header.setText(header)
def set_subheader(self, subheader):
self.ui.subheader.setText(subheader)
```
#### File: python/setup_project/base_page.py
```python
from sgtk.platform.qt import QtGui
from sgtk import TankError
import traceback
class BasePage(QtGui.QWizardPage):
""" Base page for all Shotgun pages to inherit from. """
# by default return the general setting up your project page
_HELP_URL = "https://support.shotgunsoftware.com/entries/95442748"
def __init__(self, parent=None):
""" Constructor """
QtGui.QWizardPage.__init__(self, parent)
self._page_id = None
self._next_page_id = None
self._error_field = None
def setup_ui(self, page_id, error_field=None):
"""
Setup page UI after the Wizard's UI has been setup from the uic.
:param page_id: Page id for current page.
:param error_field: QLabel object to use to display error messages.
These messages may be one-liners as well as full call stacks.
"""
self._page_id = page_id
self._error_field = error_field
def page_id(self):
""" Return the cached id of this page """
return self._page_id
def set_next_page(self, page):
""" Override which page comes next """
self._next_page_id = page.page_id()
def nextId(self):
""" Enhanced logic for non-linear wizards """
if self._next_page_id is None:
return QtGui.QWizardPage.nextId(self)
return self._next_page_id
def help_requested(self):
if self._HELP_URL:
QtGui.QDesktopServices.openUrl(self._HELP_URL)
def validatePage(self):
"""
Validate the current page.
The idea of having this in BasePage is that whatever the last page is
will be the one calling pre_setup_validation.
"""
state = True
try:
# Validate
if self.isCommitPage():
wiz = self.wizard()
wiz.core_wizard.pre_setup_validation()
except TankError, e:
if self._error_field:
self._error_field.setText(str(e))
state = False
except Exception, e:
if self._error_field:
self._error_field.setText(traceback.format_exc())
state = False
return state
```
#### File: python/setup_project/progress_page.py
```python
import sgtk.platform
from sgtk.platform.qt import QtCore
from .base_page import BasePage
class RunSetupThread(QtCore.QThread):
""" Simple thread to run the wizard in the background """
success = QtCore.Signal()
failure = QtCore.Signal(str)
def __init__(self, wizard, parent=None):
QtCore.QThread.__init__(self, parent)
self._wizard = wizard
def run(self):
try:
self._wizard.execute()
self.success.emit()
except Exception, e:
self.failure.emit(str(e))
class ProgressPage(BasePage):
""" Page to show the progress bar during configuration setup. """
def __init__(self, parent=None):
BasePage.__init__(self, parent)
self._thread_success = False
self._original_next_css = None
self._original_next_text = None
self.execute_thread = None
def setup_ui(self, page_id):
BasePage.setup_ui(self, page_id)
wiz = self.wizard()
wiz.ui.progress_output.hide()
wiz.ui.additional_details_button.pressed.connect(self.additional_details_pressed)
def initializePage(self):
# disable the cancel and back buttons
wiz = self.wizard()
wiz.button(wiz.NextButton).setEnabled(False)
self._original_next_text = wiz.buttonText(wiz.NextButton)
self._original_next_css = wiz.button(wiz.NextButton).styleSheet()
wiz.setButtonText(wiz.NextButton, "Running...")
wiz.button(wiz.NextButton).setStyleSheet("background-color: rgb(128, 128, 128);")
# setup for progress reporting
wiz.ui.progress.setValue(0)
wiz.core_wizard.set_progress_callback(self.progress_callback)
# run the thread
self.execute_thread = RunSetupThread(wiz.core_wizard)
self.execute_thread.success.connect(self._on_run_succeeded)
self.execute_thread.failure.connect(self._on_run_failed)
self.execute_thread.finished.connect(self._on_thread_finished)
self.execute_thread.start()
# can no longer cancel or hit back
wiz.setButtonLayout([wiz.HelpButton, wiz.Stretch, wiz.NextButton, wiz.FinishButton])
def additional_details_pressed(self):
# handle the additional details toggle being pressed
wiz = self.wizard()
# show the additional details and hide the additional details button
wiz.ui.progress_output.show()
wiz.ui.additional_details_button.hide()
def append_log_message(self, text):
# since a thread could be calling this make sure we are doing GUI work on the main thread
engine = sgtk.platform.current_engine()
engine.execute_in_main_thread(self.__append_on_main_thread, text)
def __append_on_main_thread(self, text):
# append the log message to the end of the logging area
wiz = self.wizard()
wiz.ui.progress_output.appendHtml(text)
cursor = wiz.ui.progress_output.textCursor()
cursor.movePosition(cursor.End)
cursor.movePosition(cursor.StartOfLine)
wiz.ui.progress_output.setTextCursor(cursor)
wiz.ui.progress_output.ensureCursorVisible()
def progress_callback(self, chapter, progress):
# since a thread could be calling this make sure we are doing GUI work on the main thread
engine = sgtk.platform.current_engine()
engine.execute_in_main_thread(self.__progress_on_main_thread, chapter, progress)
def __progress_on_main_thread(self, chapter, progress):
# update the progress display
if progress is not None:
wiz = self.wizard()
wiz.ui.message.setText(chapter)
wiz.ui.progress.setValue(progress)
def _on_run_finished(self):
# since a thread could be calling this make sure we are doing GUI work on the main thread
engine = sgtk.platform.current_engine()
engine.execute_in_main_thread(self._on_run_finished_main_thread)
def _on_run_finished_main_thread(self):
# thread has finished
# clean up the page state
wiz = self.wizard()
wiz.button(wiz.NextButton).setEnabled(True)
def _on_run_succeeded(self):
# since a thread could be calling this make sure we are doing GUI work on the main thread
engine = sgtk.platform.current_engine()
engine.execute_in_main_thread(self._on_run_succeeded_main_thread)
def _on_run_succeeded_main_thread(self):
# thread finished successfully
self._on_run_finished()
self._thread_success = True
wiz = self.wizard()
wiz.ui.progress.setValue(100)
wiz.ui.message.setText("Set up finished")
wiz.setButtonText(wiz.NextButton, self._original_next_text)
wiz.button(wiz.NextButton).setStyleSheet(self._original_next_css)
if wiz.ui.progress_output.isHidden():
# auto advance if details are not shown
wiz.next()
def _on_run_failed(self, message):
# since a thread could be calling this make sure we are doing GUI work on the main thread
engine = sgtk.platform.current_engine()
engine.execute_in_main_thread(self._on_run_failed_main_thread, message)
def _on_run_failed_main_thread(self, message):
# thread failed
self._on_run_finished()
# show the failure icon and message
wiz = self.wizard()
wiz.button(wiz.CancelButton).setVisible(True)
wiz.setButtonText(wiz.NextButton, "Quit")
wiz.ui.complete_errors.setText(message)
def _on_thread_finished(self):
# since a thread could be calling this make sure we are doing GUI work on the main thread
engine = sgtk.platform.current_engine()
engine.execute_in_main_thread(self._on_thread_finished_main_thread)
def _on_thread_finished_main_thread(self):
# let the wizard know that our complete state has changed
self.completeChanged.emit()
def isComplete(self):
return self._thread_success
```
#### File: python/setup_project/project_model.py
```python
import sgtk
from sgtk.platform.qt import QtGui
from sgtk.platform.qt import QtCore
shotgun_model = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_model")
ShotgunModel = shotgun_model.ShotgunModel
class ProjectModel(ShotgunModel):
""" Simple Project model to pull down Toolkit projects and their thumbnails """
DISPLAY_NAME_ROLE = QtCore.Qt.UserRole + 101
PROJECT_ID_ROLE = QtCore.Qt.UserRole + 102
def __init__(self, parent):
ShotgunModel.__init__(self, parent, download_thumbs=True)
# load the missing project thumbnail
self._missing_thumbnail_project = \
QtGui.QPixmap(":/tk-framework-adminui/setup_project/missing_thumbnail_project.png")
# and load the data from Shotgun
filters = [
["archived", "is_not", True],
["tank_name", "is_not", None],
["name", "is_not", "Template Project"],
]
ShotgunModel._load_data(
self,
entity_type="Project",
filters=filters,
hierarchy=["name"],
fields=["name", "id"],
order=[{"field_name": "name", "direction": "asc"}],
)
self._refresh_data()
def _populate_item(self, item, sg_data):
item.setData(sg_data.get("name") or "No Name", self.DISPLAY_NAME_ROLE)
item.setData(sg_data.get("id"), self.PROJECT_ID_ROLE)
def _populate_default_thumbnail(self, item):
item.setIcon(self._missing_thumbnail_project)
def _populate_thumbnail(self, item, field, path):
thumb = QtGui.QPixmap(path)
item.setIcon(thumb)
```
#### File: python/setup_project/setup_type_page.py
```python
from sgtk.platform.qt import QtGui
from .base_page import BasePage
class SetupTypePage(BasePage):
""" Page to choose what configuration type to use. """
STANDARD_ID = 0
PROJECT_ID = 1
GITHUB_ID = 2
DISK_ID = 3
def __init__(self, parent=None):
BasePage.__init__(self, parent)
self._disk_page_id = None
self._github_page_id = None
self._project_page_id = None
self._default_configs_page_id = None
def setup_ui(self, page_id):
BasePage.setup_ui(self, page_id)
# Setup buttongroup by hand since in PySide it breaks the ui compilation
wiz = self.wizard()
self._config_type_button_group = QtGui.QButtonGroup(self)
self._config_type_button_group.addButton(wiz.ui.select_standard, self.STANDARD_ID)
self._config_type_button_group.addButton(wiz.ui.select_project, self.PROJECT_ID)
self._config_type_button_group.addButton(wiz.ui.select_github, self.GITHUB_ID)
self._config_type_button_group.addButton(wiz.ui.select_disk, self.DISK_ID)
def set_project_page(self, page):
""" Set the page to switch to if project is selected. """
self._project_page_id = page.page_id()
def set_github_page(self, page):
""" Set the page to switch to if github url is selected. """
self._github_page_id = page.page_id()
def set_disk_page(self, page):
""" Set the page to switch to if disk location is selected. """
self._disk_page_id = page.page_id()
def set_default_configs_page(self, page):
""" Set the page to switch to if default config is selected. """
self._default_configs_page_id = page.page_id()
def nextId(self):
# return the appropriate id for the current selection
selection = self._config_type_button_group.checkedId()
if (selection == 0) and self._default_configs_page_id is not None:
return self._default_configs_page_id
if (selection == 1) and self._project_page_id is not None:
return self._project_page_id
elif (selection == 2) and self._github_page_id is not None:
return self._github_page_id
elif (selection == 3) and self._disk_page_id is not None:
return self._disk_page_id
return BasePage.nextId(self)
```
#### File: python/setup_project/default_config_page.py
```python
from sgtk.platform.qt import QtGui
from .base_page import BasePage
from .wait_screen import WaitScreen
class DefaultConfigPage(BasePage):
""" Page to choose which default configuration to use. """
_HELP_URL = BasePage._HELP_URL + "#Default%20configuration%20templates"
DEFAULT_ID = 0
LEGACY_DEFAULT_ID = 1
SELECTION_ID_MAP = {
DEFAULT_ID: "tk-config-default2",
LEGACY_DEFAULT_ID: "tk-config-default",
}
def __init__(self, parent=None):
BasePage.__init__(self, parent)
def setup_ui(self, page_id):
BasePage.setup_ui(self, page_id)
# Setup buttongroup by hand since in PySide it breaks the ui compilation
wiz = self.wizard()
self._config_button_group = QtGui.QButtonGroup(self)
self._config_button_group.addButton(wiz.ui.select_default_config, self.DEFAULT_ID)
self._config_button_group.addButton(wiz.ui.select_legacy_default_config, self.LEGACY_DEFAULT_ID)
def validatePage(self):
selected_id = self._config_button_group.checkedId()
uri = self.SELECTION_ID_MAP[selected_id]
wiz = self.wizard()
wait = WaitScreen("Downloading Config,", "hold on...", parent=self)
wait.show()
QtGui.QApplication.instance().processEvents()
try:
wiz.set_config_uri(uri)
wiz.ui.github_errors.setText("")
except Exception, e:
wiz.ui.default_configs_errors.setText(str(e))
return False
finally:
wait.hide()
return True
```
#### File: tk-framework-desktopserver/v1.3.1/framework.py
```python
import sgtk
import sys
import os
import struct
from sgtk.util import LocalFileStorageManager
class DesktopserverFramework(sgtk.platform.Framework):
"""
Provides browser integration.
"""
def __init__(self, *args, **kwargs):
super(DesktopserverFramework, self).__init__(*args, **kwargs)
self._server = None
self._settings = None
self._tk_framework_desktopserver = None
def can_run_server(self):
"""
Checks if we can use the framework to run the server.
:returns: ``True`` if we can, ``False`` otherwise.
"""
# Server requires 64-bit libraries to run.
return self.__is_64bit_python()
def add_different_user_requested_callback(self, cb):
"""
Registers a callback to know when a different user or site is making browser integration requests.
The caller is not waiting for the callback to return.
:param function cb: Callback of the form:
def callback(site, user_id):
'''
Called when the site or user is different than the current site or user.
:param str site: Url of the site the request is coming from.
:param int user_id: Id of the HumanUser who made the request.
'''
"""
# Lazy-init because engine is initialized after its frameworks, so QtCore is not initialized yet.
from sgtk.platform.qt import QtCore
if self._server:
self._server.notifier.different_user_requested.connect(cb, type=QtCore.Qt.QueuedConnection)
##########################################################################################
# init and destroy
def launch_desktop_server(self, host, user_id, parent=None):
"""
Initializes the desktop server.
The server actually supports two protocols, named v1 and v2. v1 can be used to process requests from any
users from any sites, while v2 can only be used to process requests from the currently authenticated
user.
:param str host: Host for which we desire to answer requests.
:param int user_id: Id of the user for which we desire to answer requests.
:param parent: Parent widget for any pop-ups to show during initialization.
:type parent: :class:`PySide.QtGui.QWidget`
"""
# Twisted only runs on 64-bits.
# No not even attempt to import the framework, as it will cause 64-bits DLLs to be loaded.
if not self.__is_64bit_python():
self.logger.warning("The browser integration is only available with 64-bit versions of Python.")
self._integration_enabled = False
return
self._tk_framework_desktopserver = self.import_module("tk_framework_desktopserver")
# Read the browser integration settings from disk. By passing in location=None, the Toolkit API will be
# used to locate the settings instead of looking at a specific file.
self._settings = self._tk_framework_desktopserver.Settings(
location=None,
default_certificate_folder=os.path.join(
LocalFileStorageManager.get_global_root(
LocalFileStorageManager.CACHE, LocalFileStorageManager.CORE_V18
),
"desktop",
"config",
"certificates"
)
)
self._settings.dump(self.logger)
# Did the user disable it?
if not self._settings.integration_enabled:
self.logger.info("Browser integration has been disabled in the Toolkit settings.")
self._integration_enabled = False
else:
self._integration_enabled = True
if not self._integration_enabled:
return
try:
if self._site_supports_shotgunlocalhost():
self.__retrieve_certificates_from_shotgun()
keys_path = self._get_shotgunlocalhost_keys_folder()
encrypt = True
else:
self.__ensure_certificate_ready(regenerate_certs=False, parent=parent)
keys_path = self._settings.certificate_folder
encrypt = False
self._server = self._tk_framework_desktopserver.Server(
keys_path=keys_path,
encrypt=encrypt,
host=host,
user_id=user_id,
port=self._settings.port
)
self._server.start()
except Exception:
self.logger.exception("Could not start the browser integration:")
def _get_shotgunlocalhost_keys_folder(self):
"""
Retrieves the location where the shotgunlocalhost.com keys will be downloaded to.
:returns: Path to the folder where server.crt and server.key are.
"""
return os.path.join(self.cache_location, "keys")
def _write_cert(self, filename, cert):
"""
Writes a certificate to disk. Converts any textual \n into actual \n. This is required
because certificates returned from Shotgun have their \n encoded as actual \n in the text.
:param filename: Name of the file to save under the keys folder.
:param cert: Certificate taken from Shotgun.
"""
with open(os.path.join(self._get_shotgunlocalhost_keys_folder(), filename), "w") as fw:
fw.write("\n".join(cert.split("\\n")))
def _site_supports_shotgunlocalhost(self):
"""
Checks if the site supports encryption.
"""
return self.shotgun.server_info.get("shotgunlocalhost_browser_integration_enabled", False)
def can_regenerate_certificates(self):
"""
Indicates if we can regenerate certificates.
Certificates can only be regenerated when we're not using shotgunlocalhost.
:returns: True if certificates can be regenerated, False otherwise.
"""
return self._site_supports_shotgunlocalhost() is False
def regenerate_certificates(self, parent=None):
"""
Regenerates the certificates.
:param parent: Parent widget for any pop-ups to show during certificate generation.
:type parent: :class:`PySide.QtGui.QWidget`
"""
self.__ensure_certificate_ready(regenerate_certs=True, parent=parent)
def destroy_framework(self):
"""
Called on finalization of the framework.
Closes the websocket server.
"""
if self._server and self._server.is_running():
self._server.tear_down()
def __retrieve_certificates_from_shotgun(self):
"""
Retrieves certificates from Shotgun.
"""
self.logger.debug("Retrieving certificates from Shotgun")
certs = self.shotgun._call_rpc("sg_desktop_certificates", {})
sgtk.util.filesystem.ensure_folder_exists(self._get_shotgunlocalhost_keys_folder())
self._write_cert("server.crt", certs["sg_desktop_cert"])
self._write_cert("server.key", certs["sg_desktop_key"])
def __ensure_certificate_ready(self, regenerate_certs=False, parent=None):
"""
Ensures that the certificates are created and registered. If something is amiss, then the
certificates are regenerated.
:param bool regenerate_certs: If ``True``, certificates will be regenerated.
:param parent: Parent widget for any pop-ups to show during certificate generation.
:type parent: :class:`PySide.QtGui.QWidget`
"""
cert_handler = self._tk_framework_desktopserver.get_certificate_handler(
self._settings.certificate_folder
)
if regenerate_certs:
self.logger.info("Backing up current certificates files if they exist.")
cert_handler.backup_files()
# We only warn once.
warned = False
# Make sure the certificates exist.
if not cert_handler.exists():
self.logger.info("Certificate doesn't exist.")
# Start by unregistering certificates from the keychains, this can happen if the user
# wiped his shotgun/desktop/config/certificates folder.
if cert_handler.is_registered():
self.logger.info("Unregistering lingering certificate.")
# Warn once.
self.__warn_for_prompt(parent)
warned = True
cert_handler.unregister()
self.logger.info("Unregistered.")
# Create the certificate files
cert_handler.create()
self.logger.info("Certificate created.")
else:
self.logger.info("Certificate already exist.")
# Check if the certificates are registered with the keychain.
if not cert_handler.is_registered():
self.logger.info("Certificate not registered.")
# Only if we've never been warned before.
if not warned:
self.__warn_for_prompt(parent)
cert_handler.register()
self.logger.info("Certificate registered.")
else:
self.logger.info("Certificates already registered.")
def __get_certificate_prompt(self, keychain_name, action):
"""
Generates the text to use when alerting the user that we need to register the certificate.
:param keychain_name: Name of the keychain-like entity for a particular OS.
:param action: Description of what the user will need to do when the OS prompts the user.
:returns: String containing an error message formatted
"""
return ("The Shotgun Desktop needs to update the security certificate list from your %s before "
"it can turn on the browser integration.\n"
"\n"
"%s" % (keychain_name, action))
def __warn_for_prompt(self, parent):
"""
Warn the user he will be prompted.
"""
from sgtk.platform.qt import QtGui
if sys.platform == "darwin":
QtGui.QMessageBox.information(
parent,
"Shotgun browser integration",
self.__get_certificate_prompt(
"keychain",
"You will be prompted to enter your username and password by MacOS's keychain "
"manager in order to proceed with the updates."
)
)
elif sys.platform == "win32":
QtGui.QMessageBox.information(
parent,
"Shotgun browser integration",
self.__get_certificate_prompt(
"Windows certificate store",
"Windows will now prompt you to accept one or more updates to your certificate store."
)
)
# On Linux there's no need to prompt. It's all silent.
def __is_64bit_python(self):
"""
:returns: True if 64-bit Python, False otherwise.
"""
return struct.calcsize("P") == 8
```
#### File: python/tk_framework_desktopserver/command.py
```python
import os
import subprocess
from threading import Thread
from Queue import Queue
import tempfile
import sys
import traceback
from .logger import get_logger
logger = get_logger(__name__)
class ReadThread(Thread):
"""
Thread that reads a pipe.
"""
def __init__(self, p_out, target_queue):
"""
Constructor.
:param p_out: Pipe to read.
:param target_queue: Queue that will accumulate the pipe output.
"""
Thread.__init__(self)
self.pipe = p_out
self.target_queue = target_queue
def run(self):
"""
Reads the contents of the pipe and adds it to the queue until the pipe
is closed.
"""
while True:
line = self.pipe.readline() # blocking read
if line == '':
break
self.target_queue.put(line)
class Command(object):
@staticmethod
def _create_temp_file():
"""
:returns: Returns the path to a temporary file.
"""
handle, path = tempfile.mkstemp(prefix="desktop_server")
os.close(handle)
return path
@staticmethod
def call_cmd(args):
"""
Runs a command in a separate process.
:param args: Command line tokens.
:returns: A tuple containing (exit code, stdout, stderr).
"""
# The commands that are being run are probably being launched from Desktop, which would
# have a TANK_CURRENT_PC environment variable set to the site configuration. Since we
# preserve that value for subprocesses (which is usually the behavior we want), the DCCs
# being launched would try to run in the project environment and would get an error due
# to the conflict.
#
# Clean up the environment to prevent that from happening.
env = os.environ.copy()
vars_to_remove = ["TANK_CURRENT_PC"]
for var in vars_to_remove:
if var in env:
del env[var]
# Launch the child process
# Due to discrepencies on how child file descriptors and shell=True are
# handled on Windows and Unix, we'll provide two implementations. See the Windows
# implementation for more details.
if sys.platform == "win32":
ret, stdout_lines, stderr_lines = Command._call_cmd_win32(args, env)
else:
ret, stdout_lines, stderr_lines = Command._call_cmd_unix(args, env)
out = ''.join(stdout_lines)
err = ''.join(stderr_lines)
return ret, out, err
@staticmethod
def _call_cmd_unix(args, env):
"""
Runs a command in a separate process. Implementation for Unix based OSes.
:param args: Command line tokens.
:param env: Environment variables to set for the subprocess.
:returns: A tuple containing (exit code, stdout, stderr).
"""
# Note: Tie stdin to a PIPE as well to avoid this python bug on windows
# http://bugs.python.org/issue3905
# Queue code taken from: http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
stdout_lines = []
stderr_lines = []
try:
process = subprocess.Popen(
args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env
)
process.stdin.close()
stdout_q = Queue()
stderr_q = Queue()
stdout_t = ReadThread(process.stdout, stdout_q)
stdout_t.setDaemon(True)
stdout_t.start()
stderr_t = ReadThread(process.stderr, stderr_q)
stderr_t.setDaemon(True)
stderr_t.start()
# Popen.communicate() doesn't play nicely if the stdin pipe is closed
# as it tries to flush it causing an 'I/O error on closed file' error
# when run from a terminal
#
# to avoid this, lets just poll the output from the process until
# it's finished
process.wait()
try:
process.stdout.flush()
process.stderr.flush()
except IOError:
# This fails on OSX 10.7, but it looks like there's no ill side effect
# from failing on that platform so we can ignore it.
logger.exception("Error while flushing file descriptor:")
stdout_t.join()
stderr_t.join()
while not stdout_q.empty():
stdout_lines.append(stdout_q.get())
while not stderr_q.empty():
stderr_lines.append(stderr_q.get())
ret = process.returncode
except StandardError:
# Do not log the command line, it might contain sensitive information!
logger.exception("Error running subprocess:")
ret = 1
stderr_lines = traceback.format_exc().split()
stderr_lines.append("%s" % args)
return ret, stdout_lines, stderr_lines
@staticmethod
def _call_cmd_win32(args, env):
"""
Runs a command in a separate process. Implementation for Windows.
:param args: Command line tokens.
:param env: Environment variables to set for the subprocess.
:returns: A tuple containing (exit code, stdout, stderr).
"""
stdout_lines = []
stderr_lines = []
try:
stdout_path = Command._create_temp_file()
stderr_path = Command._create_temp_file()
# On Windows, file descriptors like sockets can be inherited by child
# process and are only closed when the main process and all child
# processes are closed. This is bad because it means that the port
# the websocket server uses will never be released as long as any DCCs
# or tank commands are running. Therefore, closing the Desktop and
# restarting it for example wouldn't free the port and would give the
# "port 9000 already in use" error we've seen before.
# To avoid this, close_fds needs to be specified when launching a child
# process. However, there's a catch. On Windows, specifying close_fds
# also means that you can't share stdout, stdin and stderr with the child
# process, which is required here because we want to capture the output
# of the process.
# Therefore on Windows we'll invoke the code in a shell environment. The
# output will be redirected to two temporary files which will be read
# when the child process is over.
# Ideally, we'd be using this implementation on Unix as well. After all,
# the syntax of the command line is the same. However, specifying shell=True
# on Unix means that the following ["ls", "-al"] would be invoked like this:
# ["/bin/sh", "-c", "ls", "-al"]. This means that only ls is sent to the
# shell and -al is considered to be an argument of the shell and not part
# of what needs to be launched. The naive solution would be to quote the
# argument list and pass ["\"ls -al \""] to Popen, but that would ignore
# the fact that there could already be quotes on that command line and
# they would need to be escaped as well. Python 2's only utility to
# escape strings for the command line is pipes.quote, which is deprecated.
# Because of these reasons, we'll keep both implementations for now.
args = args + ["1>", stdout_path, "2>", stderr_path]
# Prevents the cmd.exe dialog from appearing on Windows.
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
process = subprocess.Popen(
args,
close_fds=True,
startupinfo=startupinfo,
env=env,
shell=True
)
process.wait()
# Read back the output from the two.
with open(stdout_path) as stdout_file:
stdout_lines = [l for l in stdout_file]
with open(stderr_path) as stderr_file:
stderr_lines = [l for l in stderr_file]
# Track the result code.
ret = process.returncode
except StandardError:
logger.exception("Error running subprocess:")
ret = 1
stderr_lines = [traceback.format_exc().split()]
stderr_lines.append("%s" % args)
# Don't lose any sleep over temporary files that can't be deleted.
try:
os.remove(stdout_path)
except:
pass
try:
os.remove(stderr_path)
except:
pass
return ret, stdout_lines, stderr_lines
```
#### File: python/tk_framework_desktopserver/errors.py
```python
class BrowserIntegrationError(Exception):
"""
Base class for all browser integration errors.
"""
pass
class MissingCertificateError(BrowserIntegrationError):
"""
Base class for all browser integration errors.
"""
pass
class PortBusyError(BrowserIntegrationError):
"""
Exception raised when the TCP port is busy.
"""
pass
class CertificateRegistrationError(BrowserIntegrationError):
"""
Exception raised when something goes wrong while registering or
unregistering a certificate.
"""
pass
class MissingConfigurationFileError(BrowserIntegrationError):
"""
Raised when the configuration file can't be found.
"""
def __init__(self, location):
"""
Constructor.
:params location: Path to the missing configuration file.
"""
BrowserIntegrationError.__init__(
self,
"The configuration file at '%s' could not be found!" % location
)
```
#### File: python/tk_framework_desktopserver/process_manager_win.py
```python
import os
from .process_manager import ProcessManager
class ProcessManagerWin(ProcessManager):
"""
Windows OS Interface for Shotgun Commands.
"""
platform_name = "windows"
def _get_toolkit_script_name(self):
return "shotgun.bat"
def _get_toolkit_fallback_script_name(self):
return "tank.bat"
def open(self, filepath):
"""
Opens a file with default os association or launcher found in environments. Not blocking.
:param filepath: String file path (ex: "c:/file.mov")
:returns: Bool If the operation was successful
"""
self._verify_file_open(filepath)
launcher = self._get_launcher()
result = True
if launcher is None:
# Note: startfile is always async. As per docs, there is no way to retrieve exit code.
os.startfile(filepath)
else:
result = self._launch_process(launcher, filepath, "Could not open file.")
return result
def pick_file_or_directory(self, multi=False):
"""
Pop-up a file selection window.
:param multi: Boolean Allow selecting multiple elements.
:returns: List of files that were selected with file browser.
"""
files = ProcessManager.pick_file_or_directory(self, multi)
files = [f.replace("/", "\\") for f in files]
# Qt returns files with / while the javascript code expects paths on Windows to use \
return files
```
#### File: python/mock_test_classes/mock_host.py
```python
class MockHost(object):
def __init__(self):
self._reply_data = None
@property
def reply_data(self):
return self._reply_data
def reply(self, reply_data):
self._reply_data = reply_data
```
#### File: activity_stream/ui/reply_dialog.py
```python
from tank.platform.qt import QtCore, QtGui
class Ui_ReplyDialog(object):
def setupUi(self, ReplyDialog):
ReplyDialog.setObjectName("ReplyDialog")
ReplyDialog.resize(416, 128)
ReplyDialog.setModal(True)
self.verticalLayout = QtGui.QVBoxLayout(ReplyDialog)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtGui.QLabel(ReplyDialog)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.note_widget = NoteInputWidget(ReplyDialog)
self.note_widget.setMinimumSize(QtCore.QSize(0, 40))
self.note_widget.setFocusPolicy(QtCore.Qt.NoFocus)
self.note_widget.setObjectName("note_widget")
self.verticalLayout.addWidget(self.note_widget)
self.retranslateUi(ReplyDialog)
QtCore.QMetaObject.connectSlotsByName(ReplyDialog)
def retranslateUi(self, ReplyDialog):
ReplyDialog.setWindowTitle(QtGui.QApplication.translate("ReplyDialog", "Reply", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("ReplyDialog", "<big>Please enter a Reply:</big>", None, QtGui.QApplication.UnicodeUTF8))
from ..qtwidgets import NoteInputWidget
```
#### File: python/activity_stream/widget_new_item.py
```python
from sgtk.platform.qt import QtCore, QtGui
from .widget_activity_stream_base import ActivityStreamBaseWidget
from .ui.new_item_widget import Ui_NewItemWidget
from .ui.simple_new_item_widget import Ui_SimpleNewItemWidget
from .data_manager import ActivityStreamDataHandler
from . import utils
class NewItemWidget(ActivityStreamBaseWidget):
"""
Activity stream widget that shows a UI representing a newly
created object, for example a version or a publish.
"""
def __init__(self, parent):
"""
:param parent: QT parent object
:type parent: :class:`PySide.QtGui.QWidget`
"""
# first, call the base class and let it do its thing.
ActivityStreamBaseWidget.__init__(self, parent)
# now load in the UI that was created in the UI designer
self.ui = Ui_NewItemWidget()
self.ui.setupUi(self)
self._interactive = True
# thumbnails are hidden by default, only to appear
# for created objects that have them set
self.ui.details_thumb.setVisible(False)
# make sure that click on hyperlinks bubble up
self.ui.footer.linkActivated.connect(self._entity_request_from_url)
self.ui.header_left.linkActivated.connect(self._entity_request_from_url)
self.ui.details_thumb.playback_clicked.connect(
lambda sg_data: self.playback_requested.emit(sg_data)
)
self.ui.user_thumb.entity_requested.connect(
lambda entity_type, entity_id: self.entity_requested.emit(
entity_type,
entity_id,
)
)
##############################################################################
# properties
@property
def user_thumb(self):
"""
The user thumbnail widget.
"""
return self.ui.user_thumb
def _get_interactive(self):
"""
Whether the new item label is interactive, showing a play icon.
"""
return self._interactive
def _set_interactive(self, state):
self._interactive = bool(state)
self.ui.details_thumb.interactive = self._interactive
if self._interactive:
self.user_thumb.setCursor(QtCore.Qt.PointingHandCursor)
else:
self.user_thumb.setCursor(QtCore.Qt.ArrowCursor)
interactive = QtCore.Property(
bool,
_get_interactive,
_set_interactive,
)
##############################################################################
# public interface
def set_info(self, data):
"""
Populate text fields for this widget.
Example of data:
{'created_at': 1437322777.0,
'created_by': {'id': 38,
'image': '',
'name': '<NAME>',
'status': 'act',
'type': 'HumanUser'},
'id': 116,
'meta': {'entity_id': 6007, 'entity_type': 'Version', 'type': 'new_entity'},
'primary_entity': {'description': 'testing testing\n\n1\n\n2\n\n3',
'id': 6007,
'image': '',
'name': 'note_addressing',
'sg_uploaded_movie': {'content_type': 'video/quicktime',
'id': 180,
'link_type': 'upload',
'name': 'note_addressing.mov',
'type': 'Attachment',
'url': ''},
'status': 'rev',
'type': 'Version'},
'read': False,
'update_type': 'create'}
:param data: data dictionary with activity stream info.
"""
# call base class
ActivityStreamBaseWidget.set_info(self, data)
# make the user icon clickable
self.ui.user_thumb.set_shotgun_data(data["created_by"])
# set standard date and header fields
self._set_timestamp(data, self.ui.date)
primary_entity = data["primary_entity"]
entity_url = self._generate_entity_url(primary_entity, this_syntax=False)
header = "%s was created" % entity_url
# add link if there is a link field that is populated
if "entity" in primary_entity and primary_entity["entity"]:
link_url = self._generate_entity_url(primary_entity["entity"])
header += " on %s" % link_url
self.ui.header_left.setText(header)
# set the footer area to contain the description
if primary_entity.get("description"):
self.ui.footer.setText("%s" % primary_entity.get("description"))
else:
# hide footer fields
self.ui.footer.setVisible(False)
if primary_entity.get("image"):
# there is a thumbnail. Show thumbnail.
self.ui.details_thumb.setVisible(True)
self.ui.details_thumb.set_shotgun_data(primary_entity)
def apply_thumbnail(self, data):
"""
Populate the UI with the given thumbnail
:param image: QImage with thumbnail data
:param thumbnail_type: thumbnail enum constant:
ActivityStreamDataHandler.THUMBNAIL_CREATED_BY
ActivityStreamDataHandler.THUMBNAIL_ENTITY
ActivityStreamDataHandler.THUMBNAIL_ATTACHMENT
"""
activity_id = data["activity_id"]
if activity_id != self.activity_id:
return
thumbnail_type = data["thumbnail_type"]
image = data["image"]
if thumbnail_type == ActivityStreamDataHandler.THUMBNAIL_CREATED_BY:
thumb = utils.create_round_thumbnail(image)
self.ui.user_thumb.setPixmap(thumb)
elif thumbnail_type == ActivityStreamDataHandler.THUMBNAIL_ENTITY:
thumb = utils.create_rectangular_256x144_thumbnail(image)
self.ui.details_thumb.setPixmap(thumb)
class SimpleNewItemWidget(ActivityStreamBaseWidget):
"""
Similar to the NewItemWidget, but a smaller version of it.
This is used for 'less important' newly created items such
as tasks. The visual representation is smaller and without
a thumbnail, with a smaller user icon.
"""
def __init__(self, parent):
"""
:param parent: QT parent object
:type parent: :class:`PySide.QtGui.QWidget`
"""
# first, call the base class and let it do its thing.
ActivityStreamBaseWidget.__init__(self, parent)
# now load in the UI that was created in the UI designer
self.ui = Ui_SimpleNewItemWidget()
self.ui.setupUi(self)
# make sure that click on hyperlinks bubble up
self.ui.header_left.linkActivated.connect(self._entity_request_from_url)
self.ui.user_thumb.entity_requested.connect(lambda entity_type, entity_id: self.entity_requested.emit(entity_type, entity_id))
##############################################################################
# public interface
def set_info(self, data):
"""
Populate text fields for this widget.
Example of data:
{'created_at': 1437322777.0,
'created_by': {'id': 38,
'image': '',
'name': '<NAME>',
'status': 'act',
'type': 'HumanUser'},
'id': 116,
'meta': {'entity_id': 6007, 'entity_type': 'Version', 'type': 'new_entity'},
'primary_entity': {'description': 'testing testing\n\n1\n\n2\n\n3',
'id': 6007,
'image': '',
'name': 'note_addressing',
'sg_uploaded_movie': {'content_type': 'video/quicktime',
'id': 180,
'link_type': 'upload',
'name': 'note_addressing.mov',
'type': 'Attachment',
'url': ''},
'status': 'rev',
'type': 'Version'},
'read': False,
'update_type': 'create'}
:param data: data dictionary with activity stream info.
"""
# call base class
ActivityStreamBaseWidget.set_info(self, data)
# make the user icon clickable
self.ui.user_thumb.set_shotgun_data(data["created_by"])
# set standard date and header fields
self._set_timestamp(data, self.ui.date)
primary_entity = data["primary_entity"]
entity_url = self._generate_entity_url(primary_entity, this_syntax=False)
header = "%s was created" % entity_url
# add link if there is a link field that is populated
if "entity" in primary_entity and primary_entity["entity"]:
link_url = self._generate_entity_url(primary_entity["entity"])
header += " on %s" % link_url
self.ui.header_left.setText(header)
def apply_thumbnail(self, data):
"""
Populate the UI with the given thumbnail
:param image: QImage with thumbnail data
:param thumbnail_type: thumbnail enum constant:
ActivityStreamDataHandler.THUMBNAIL_CREATED_BY
ActivityStreamDataHandler.THUMBNAIL_ENTITY
ActivityStreamDataHandler.THUMBNAIL_ATTACHMENT
"""
activity_id = data["activity_id"]
if activity_id != self.activity_id:
return
thumbnail_type = data["thumbnail_type"]
image = data["image"]
if thumbnail_type == ActivityStreamDataHandler.THUMBNAIL_CREATED_BY:
thumb = utils.create_round_thumbnail(image)
self.ui.user_thumb.setPixmap(thumb)
```
#### File: python/elided_label/elided_label.py
```python
import sgtk
from sgtk.platform.qt import QtCore, QtGui
utils = sgtk.platform.import_framework("tk-framework-shotgunutils", "utils")
class ElidedLabel(QtGui.QLabel):
"""
Label that gracefully handles when the text doesn't fit
within the given space.
"""
def __init__(self, parent=None):
"""
:param parent: The parent QWidget
:type parent: :class:`~PySide.QtGui.QWidget`
"""
QtGui.QLabel.__init__(self, parent)
self._elide_mode = QtCore.Qt.ElideRight
self._actual_text = ""
self._line_width = 0
self._ideal_width = None
self.setSizePolicy(
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Preferred,
)
def sizeHint(self):
base_size_hint = super(ElidedLabel, self).sizeHint()
return QtCore.QSize(
self._get_width_hint(),
base_size_hint.height()
)
def _get_width_hint(self):
if not self._ideal_width:
doc = QtGui.QTextDocument()
try:
# add the extra space to buffer the width a bit
doc.setHtml(self._actual_text + " ")
doc.setDefaultFont(self.font())
width = doc.idealWidth()
except Exception:
width = self.width()
finally:
utils.safe_delete_later(doc)
self._ideal_width = width
return self._ideal_width
def _get_elide_mode(self):
"""
Returns current elide mode
:returns: The current elide mode, either QtCore.Qt.ElideLeft or QtCore.Qt.ElideRight
"""
return self._elide_mode
def _set_elide_mode(self, value):
"""
Set the current elide mode.
:param value: The elide mode to use - must be either QtCore.Qt.ElideLeft or QtCore.Qt.ElideRight
"""
if (value != QtCore.Qt.ElideLeft
and value != QtCore.Qt.ElideRight):
raise ValueError("elide_mode must be set to either QtCore.Qt.ElideLeft or QtCore.Qt.ElideRight")
self._elide_mode = value
self._update_elided_text()
#: Property to get or set the elide mode. The value provided
#: should be either QtCore.Qt.ElideLeft or QtCore.Qt.ElideRight
elide_mode = property(_get_elide_mode, _set_elide_mode)
def text(self):
"""
Overridden base method to return the original unmodified text
:returns: The original unmodified text
"""
return self._actual_text
def setText(self, text):
"""
Overridden base method to set the text on the label
:param text: The text to set on the label
"""
# clear out the ideal width so that the widget can recalculate based on
# the new text
self._ideal_width = None
self._actual_text = text
self._update_elided_text()
# if we're elided, make the tooltip show the full text
if super(ElidedLabel, self).text() != self._actual_text:
# wrap the actual text in a paragraph so that it wraps nicely
self.setToolTip("<p>%s</p>" % (self._actual_text,))
else:
self.setToolTip("")
def resizeEvent(self, event):
"""
Overridden base method called when the widget is resized.
:param event: The resize event
"""
self._update_elided_text()
def _update_elided_text(self):
"""
Update the elided text on the label
"""
text = self._elide_text(self._actual_text, self._elide_mode)
QtGui.QLabel.setText(self, text)
def _elide_text(self, text, elide_mode):
"""
Elide the specified text using the specified mode
:param text: The text to elide
:param elide_mode: The elide mode to use
:returns: The elided text.
"""
# target width is the label width:
target_width = self.width()
# Use a QTextDocument to measure html/richtext width
doc = QtGui.QTextDocument()
try:
doc.setHtml(text)
doc.setDefaultFont(self.font())
# if line width is already less than the target width then great!
line_width = doc.idealWidth()
if line_width <= target_width:
self._line_width = line_width
return text
# depending on the elide mode, insert ellipses in the correct place
cursor = QtGui.QTextCursor(doc)
ellipses = ""
if elide_mode != QtCore.Qt.ElideNone:
# add the ellipses in the correct place:
ellipses = "..."
if elide_mode == QtCore.Qt.ElideLeft:
cursor.setPosition(0)
elif elide_mode == QtCore.Qt.ElideRight:
char_count = doc.characterCount()
cursor.setPosition(char_count-1)
cursor.insertText(ellipses)
ellipses_len = len(ellipses)
# remove characters until the text fits within the target width:
while line_width > target_width:
start_line_width = line_width
# if string is less than the ellipses length then just return
# an empty string
char_count = doc.characterCount()
if char_count <= ellipses_len:
self._line_width = 0
return ""
# calculate the number of characters to remove - should always remove at least 1
# to be sure the text gets shorter!
line_width = doc.idealWidth()
p = target_width/line_width
# play it safe and remove a couple less than the calculated amount
chars_to_delete = max(1, char_count - int(float(char_count) * p)-2)
# remove the characters:
if elide_mode == QtCore.Qt.ElideLeft:
start = ellipses_len
end = chars_to_delete + ellipses_len
else:
# default is to elide right
start = max(0, char_count - chars_to_delete - ellipses_len - 1)
end = max(0, char_count - ellipses_len - 1)
cursor.setPosition(start)
cursor.setPosition(end, QtGui.QTextCursor.KeepAnchor)
cursor.removeSelectedText()
# update line width:
line_width = doc.idealWidth()
if line_width == start_line_width:
break
self._line_width = line_width
return doc.toHtml()
finally:
utils.safe_delete_later(doc)
@property
def line_width(self):
"""
(:obj:`int`) width of the line of text in pixels
"""
return self._line_width
```
#### File: python/overlay_widget/shotgun_model_overlay_widget.py
```python
from sgtk.platform.qt import QtCore, QtGui
# load resources
from .ui import resources_rc
from .shotgun_overlay_widget import ShotgunOverlayWidget
class ShotgunModelOverlayWidget(ShotgunOverlayWidget):
"""
A convenience class specifically designed to work with a
:class:`~tk-framework-shotgunutils:shotgun_model.ShotgunModel`.
By using this class, multiple overlay widgets can be
easily created and connected to the same shotgun model.
"""
def __init__(self, sg_model, parent=None):
"""
:param sg_model: Shotgun model that this widget should connect to
:type sg_model: :class:`~tk-framework-shotgunutils:shotgun_model.ShotgunModel`
:param parent: Widget to attach the overlay to
:type parent: :class:`PySide.QtGui.QWidget`
"""
ShotgunOverlayWidget.__init__(self, parent)
# connect up to signals being emitted from Shotgun model:
self._model = None
self._connect_to_model(sg_model)
def set_model(self, sg_model):
"""
Set the model this widget should be connected to
:param sg_model: Shotgun model that this widget should connect to
:type sg_model: :class:`~tk-framework-shotgunutils:shotgun_model.ShotgunModel`
"""
self.hide()
self._connect_to_model(sg_model)
def _connect_to_model(self, sg_model):
"""
Connect to the signals emitted by the specified model
:param sg_model: Shotgun model that this widget should connect to
:type sg_model: :class:`~tk-framework-shotgunutils:shotgun_model.ShotgunModel`
"""
if sg_model == self._model:
# already connected!
return
if self._model:
# disconnect from the previous model:
self._model.query_changed.disconnect(self._model_query_changed)
self._model.data_refreshing.disconnect(self._model_refreshing)
self._model.data_refreshed.disconnect(self._model_refreshed)
self._model.data_refresh_fail.disconnect(self._model_refresh_failed)
self._model = None
self.hide(hide_errors=True)
if sg_model:
# connect to the new model:
self._model = sg_model
self._model.query_changed.connect(self._model_query_changed)
self._model.data_refreshing.connect(self._model_refreshing)
self._model.data_refreshed.connect(self._model_refreshed)
self._model.data_refresh_fail.connect(self._model_refresh_failed)
def _model_query_changed(self):
"""
Slot signaled when the query changes on the connected Shotgun model
"""
self.hide(hide_errors=True)
def _model_refreshing(self):
"""
Slot signaled when the connected Shotgun model starts refreshing
"""
if not self._model.is_data_cached():
self.start_spin()
def _model_refreshed(self, data_changed):
"""
Slot signaled when the data from the connected Shotgun model has
been refreshed
:param data_changed: True if the refresh resulted in the data changing
"""
self.hide(hide_errors=True)
def _model_refresh_failed(self, msg):
"""
Slot signaled when the connected Shotgun model refresh fails
:param msg: The reason the refresh failed
"""
self.show_error_message(msg)
```
#### File: python/overlay_widget/shotgun_spinning_widget.py
```python
import math
from tank.platform.qt import QtCore, QtGui
# load resources
from .ui import resources_rc # noqa
class ShotgunSpinningWidget(QtGui.QWidget):
"""
Overlay widget that can be placed on top over any QT widget.
Once you have placed the overlay widget, you can use it to
display a spinner or report progress in the form of an arc that goes
from 0 to 360 degrees.
"""
MODE_OFF = 0
MODE_SPIN = 1
MODE_PROGRESS = 2
# Indicates how many times per second does the spinner update. This means every 40ms.
_UPDATES_PER_SECOND = 25
def __init__(self, parent):
"""
:param parent: Widget to attach the overlay to
:type parent: :class:`PySide.QtGui.QWidget`
"""
QtGui.QWidget.__init__(self, parent)
# turn off the widget
self.setVisible(False)
self._mode = self.MODE_OFF
# setup spinner timer
self._timer = QtCore.QTimer(self)
self._timer.timeout.connect(self._on_animation)
# This is the current spin angle
self._spin_angle = 0
# This is where we need to scroll to.
self._spin_angle_to = 0
# This is where we were told last time where we need to spin to
self._previous_spin_angle_to = 0
# This counts how many times we've ticked in the last second to know how big the heartbeat
# needs to be.
self._heartbeat = 0
self._sg_icon = QtGui.QPixmap(":/tk_framework_qtwidgets.overlay_widget/sg_logo.png")
############################################################################################
# public interface
def start_spin(self):
"""
Enables the overlay and shows an animated spinner.
If you want to stop the spinning, call :meth:`hide`.
"""
self._timer.start(40)
self.setVisible(True)
self._mode = self.MODE_SPIN
def start_progress(self):
"""
Enables the overlay and shows an animated progress arc.
If you want to stop the progress, call :meth:`hide`.
"""
self.setVisible(True)
self._timer.start(1000 / self._UPDATES_PER_SECOND)
self._mode = self.MODE_PROGRESS
self._spin_angle_to = 0
self._spin_angle = 0
def report_progress(self, current):
"""
Updates the widget current progress value.
:param current: New value for the progress arc. Must be between 0.0 (nothing) and 1.0 (complete).
:type current: float
"""
# We're about to ask the cursor to reach another point. Make sure that
# we are at least caught up with where we were requested to be last time.
self._spin_angle = max(self._previous_spin_angle_to, self._spin_angle)
self._spin_angle_to = 360 * current
self.repaint()
def hide(self):
"""
Hides the overlay.
"""
self._timer.stop()
self._mode = self.MODE_OFF
self.setVisible(False)
############################################################################################
# internal methods
def _on_animation(self):
"""
Async callback to help animate the widget.
"""
if self._mode == self.MODE_SPIN:
self._spin_angle += 1
if self._spin_angle == 90:
self._spin_angle = 0
elif self._mode == self.MODE_PROGRESS:
# If the current spin angle has not reached the destination yet,
# increment it, but not past where we are supposed to end at.
# The progress tries to give a smooth impression of the progress. Instead of jumping straight
# to the requested value, it will slide over to that value. Sliding from 0 to 1 however is done in
# a single second, so the sliding is still quick to the eye. If there are more than
# _UPDATES_PER_SECOND steps, this sliding effect is actually not visible since individual increments
# between steps will be smaller than 1 / _UPDATES_PER_SECOND of the circumference.
self._spin_angle = min(self._spin_angle_to, self._spin_angle + 360 / self._UPDATES_PER_SECOND)
self._heartbeat = (self._heartbeat + 1) % 25
self.repaint()
def _draw_opened_circle(self, painter, start_angle, span_angle):
"""
Draws an arc around the SG logo.
:param painter: Painter object we will draw with.
:param start_angle: Angle at which we will start drawing the arc.
:param span_angle: Degrees the arc covers.
"""
# show the spinner
painter.translate((painter.device().width() / 2) - 40,
(painter.device().height() / 2) - 40)
pen = QtGui.QPen(QtGui.QColor("#424141"))
pen.setWidth(3)
painter.setPen(pen)
painter.drawPixmap(QtCore.QPoint(8, 24), self._sg_icon)
r = QtCore.QRectF(0.0, 0.0, 80.0, 80.0)
# drawArc accepts 1/16th on angles.
painter.drawArc(r, start_angle * 16, span_angle * 16)
def paintEvent(self, event):
"""
Render the UI.
:param event: Qt Paint event.
"""
if self._mode == self.MODE_OFF:
return
painter = QtGui.QPainter()
painter.begin(self)
try:
# set up semi transparent backdrop
painter.setRenderHint(QtGui.QPainter.Antialiasing)
# now draw different things depending on mode
if self._mode == self.MODE_SPIN:
self._draw_opened_circle(
painter,
self._spin_angle * 4,
340
)
elif self._mode == self.MODE_PROGRESS:
self._draw_opened_circle(
painter,
# Start at noon
90,
# Go clockwise
-self._spin_angle
)
self._draw_heartbeat(painter)
finally:
painter.end()
def _draw_heartbeat(self, painter):
"""
Draws the heartbeat of the progress reporter so it doesn't look like
the UI has frozen when progress is not updated in a while.
:param painter: Painter object that will be used to render.
"""
# The heartbeat beats one per second. At the halfway point it is at it's
# max amplitude.
half_update = self._UPDATES_PER_SECOND / 2.0
amplitude = (math.fabs(self._heartbeat - half_update) / half_update) * 6
# Progress reporting starts at -90, which is (0, 1) in Cartesian coordinates.
angle = self._spin_angle - 90
y = math.sin(math.radians(angle))
x = math.cos(math.radians(angle))
pen = QtGui.QPen(QtGui.QColor("#424141"))
brush = QtGui.QBrush(QtGui.QColor("#424141"))
pen.setWidth(1)
painter.setPen(pen)
painter.setBrush(brush)
# Draws the ellipse around the head of the arc.
painter.drawEllipse(
QtCore.QRectF(
x * 40 + 40 - amplitude / 2,
y * 40 + 40 - amplitude / 2,
amplitude,
amplitude,
)
)
```
#### File: python/search_completer/global_search_result_delegate.py
```python
import sgtk
from sgtk.platform.qt import QtCore
from .search_result_delegate import SearchResultDelegate
# import the shotgun_model and view modules from the shotgun utils framework
shotgun_model = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_model")
shotgun_globals = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_globals")
views = sgtk.platform.current_bundle().import_module("views")
class GlobalSearchResultDelegate(SearchResultDelegate):
"""
Delegate which renders search match entries in the global
search completer.
"""
def _render_result(self, widget, model_index):
"""
Renders a result from the model into the provided widget.
:param widget: Widget used to render the result.
:type widget: ``SearchResultWidget``
:param model_index: Index of the item to render.
:type model_index: :class:`~PySide.QtCore.QModelIndex`
"""
from .global_search_completer import GlobalSearchCompleter
icon = shotgun_model.get_sanitized_data(model_index, QtCore.Qt.DecorationRole)
if icon:
thumb = icon.pixmap(512)
widget.set_thumbnail(thumb)
else:
# probably won't hit here, but just in case, use default/empty
# thumbnail
widget.set_thumbnail(self._pixmaps.no_thumbnail)
data = shotgun_model.get_sanitized_data(model_index, GlobalSearchCompleter.SG_DATA_ROLE)
# Example of data stored in the data role:
# {'status': 'vwd',
# 'name': 'bunny_010_0050_comp_v001',
# 'links': ['Shot', 'bunny_010_0050'],
# 'image': 'https://xxx',
# 'project_id': 65,
# 'type': 'Version',
# 'id': 99}
entity_type_display_name = shotgun_globals.get_type_display_name(data["type"])
content = ""
et_url = shotgun_globals.get_entity_type_icon_url(data["type"])
underlined_name = self._underline_search_term(data["name"])
if et_url:
# present thumbnail icon and name
content += "<img src='%s'/> <b style='color: rgb(48, 167, 227)';>%s</b>" % (
et_url, underlined_name
)
else:
# present type name name
content += "%s" % underlined_name
content += "<br>%s" % entity_type_display_name
links = data["links"]
# note users return weird data so ignore it.
if links and links[0] != "" and links[0] != "HumanUser" and links[0] != "ClientUser":
underlined_link = self._underline_search_term(links[1])
# there is a referenced entity
et_url = shotgun_globals.get_entity_type_icon_url(links[0])
if et_url:
# present thumbnail icon and name
content += " on <img align=absmiddle src='%s'/> %s" % (et_url, underlined_link)
else:
# present type name name
link_entity_type = links[0]
content += " on %s %s" % (shotgun_globals.get_type_display_name(link_entity_type), underlined_link)
widget.set_text(content)
```
#### File: python/search_widget/search_widget.py
```python
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .ui.search_widget import Ui_SearchWidget
class SearchWidget(QtGui.QWidget):
"""
Search widget class
"""
# emited when the search QTextField is being edited
search_edited = QtCore.Signal(object)# search text
# emited when the search QTextField has been changed (e.g. after hitting enter)
search_changed = QtCore.Signal(object)# search text
def __init__(self, parent=None):
"""
Construction
:param parent: The parent widget
"""
QtGui.QWidget.__init__(self, parent)
# set up the UI
self._ui = Ui_SearchWidget()
self._ui.setupUi(self)
self.set_placeholder_text("Search...")
# dynamically create the clear button so that we can place it over the
# edit widget:
self._clear_btn = QtGui.QPushButton(self._ui.search_edit)
self._clear_btn.setFocusPolicy(QtCore.Qt.StrongFocus)
self._clear_btn.setFlat(True)
self._clear_btn.setCursor(QtCore.Qt.ArrowCursor)
style = ("QPushButton {"
+ "border: 0px solid;"
+ "image: url(:/tk-framework-qtwidgets/search_widget/clear_search.png);"
+ "width: 16;"
+ "height: 16;"
+ "}"
+ "QPushButton::hover {"
+ "image: url(:/tk-framework-qtwidgets/search_widget/clear_search_hover.png);"
+ "}")
self._clear_btn.setStyleSheet(style)
self._clear_btn.hide()
h_layout = QtGui.QHBoxLayout(self._ui.search_edit)
h_layout.addStretch()
h_layout.addWidget(self._clear_btn)
h_layout.setContentsMargins(3, 0, 3, 0)
h_layout.setSpacing(0)
self._ui.search_edit.setLayout(h_layout)
# hook up the signals:
self._ui.search_edit.textEdited.connect(self._on_text_edited)
self._ui.search_edit.returnPressed.connect(self._on_return_pressed)
self._clear_btn.clicked.connect(self._on_clear_clicked)
# @property
def _get_search_text(self):
"""
get the search text from the widget
"""
text = self._ui.search_edit.text()
return self._safe_to_string(text)
# @search_text.setter
def _set_search_text(self, value):
"""
set the search text on the widget
"""
self._ui.search_edit.setText(value)
self._clear_btn.setVisible(bool(value))
search_text = property(_get_search_text, _set_search_text)
def set_placeholder_text(self, text):
"""
Set the placeholder text for the widget
:param text: The text to use
"""
# Note, setPlaceholderText is only available in recent versions of Qt.
if hasattr(self._ui.search_edit, "setPlaceholderText"):
self._ui.search_edit.setPlaceholderText(text)
def clear(self):
"""
"""
self._ui.search_edit.setText("")
self._clear_btn.hide()
def _on_clear_clicked(self):
"""
Slot triggered when the clear button is clicked - clears the text
and emits the relevant signals.
"""
self.clear()
self.search_changed.emit("")
self.search_edited.emit("")
def _on_text_edited(self):
"""
Slot triggered when the text has been edited
"""
text = self.search_text
self._clear_btn.setVisible(bool(text))
self.search_edited.emit(text)
def _on_return_pressed(self):
"""
Slot triggered when return has been pressed
"""
self.search_changed.emit(self.search_text)
def _safe_to_string(self, value):
"""
Safely convert the value to a string - handles
unicode and QtCore.QString if using PyQt
:param value: The value to convert to a string
:returns: utf8 encoded string of the input value
"""
if isinstance(value, str):
# it's a string anyway so just return
return value
if isinstance(value, unicode):
# convert to utf-8
return value.encode("utf8")
if hasattr(QtCore, "QString"):
# running PyQt!
if isinstance(value, QtCore.QString):
# QtCore.QString inherits from str but supports
# unicode, go figure! Lets play safe and return
# a utf-8 string
return str(value.toUtf8())
# For everything else, just return as string
return str(value)
```
#### File: search_widget/ui/search_widget.py
```python
from tank.platform.qt import QtCore, QtGui
class Ui_SearchWidget(object):
def setupUi(self, SearchWidget):
SearchWidget.setObjectName("SearchWidget")
SearchWidget.resize(312, 24)
self.horizontalLayout = QtGui.QHBoxLayout(SearchWidget)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.search_edit = QtGui.QLineEdit(SearchWidget)
self.search_edit.setMinimumSize(QtCore.QSize(0, 24))
self.search_edit.setStyleSheet("#search_edit {\n"
"background-image: url(:/tk-framework-qtwidgets/search_widget/search.png);\n"
"background-repeat: no-repeat;\n"
"background-position: center left;\n"
"border-radius: 5px;\n"
"padding-left:20px;\n"
"padding-right:20px;\n"
"}")
self.search_edit.setObjectName("search_edit")
self.horizontalLayout.addWidget(self.search_edit)
self.horizontalLayout.setStretch(0, 1)
self.retranslateUi(SearchWidget)
QtCore.QMetaObject.connectSlotsByName(SearchWidget)
def retranslateUi(self, SearchWidget):
SearchWidget.setWindowTitle(QtGui.QApplication.translate("SearchWidget", "Form", None, QtGui.QApplication.UnicodeUTF8))
from . import resources_rc
```
#### File: python/shotgun_fields/date_widget.py
```python
import datetime
import sgtk
from sgtk.platform.qt import QtGui, QtCore
from .label_base_widget import LabelBaseWidget
from .shotgun_field_meta import ShotgunFieldMeta
shotgun_globals = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_globals")
class DateWidget(LabelBaseWidget):
"""
Display a ``date`` field value as returned by the Shotgun API.
"""
__metaclass__ = ShotgunFieldMeta
_DISPLAY_TYPE = "date"
def _display_value(self, value):
"""
Set the value displayed by the widget.
:param value: The value returned by the Shotgun API to be displayed
"""
self.setText(self._string_value(value))
self.setToolTip(self._tooltip_value(value))
def _ensure_date(self, value):
"""
Ensures the supplied value is a python date object.
"""
if not isinstance(value, datetime.date):
value = datetime.datetime.strptime(value, "%Y-%m-%d").date()
return value
def _string_value(self, value):
"""
Convert the Shotgun value for this field into a string
:param value: The value to convert into a string
:type value: A String representing the date in YYYY-MM-DD form
"""
date = self._ensure_date(value)
return shotgun_globals.create_human_readable_date(date)
def _tooltip_value(self, value):
"""
Convert the Shotgun value for this field into a tooltip string
:param value: The value to convert into a string
:type value: A String representing the date in YYYY-MM-DD form
"""
date = self._ensure_date(value)
return date.strftime("%x")
class DateEditorWidget(QtGui.QDateEdit):
"""
Allows editing of a ``date`` field value as returned by the Shotgun API.
Pressing ``Enter`` or ``Return`` when the widget has focus will cause the
value to be applied and the ``value_changed`` signal to be emitted.
"""
__metaclass__ = ShotgunFieldMeta
_EDITOR_TYPE = "date"
def get_value(self):
"""
:return: The internal value being displayed by the widget.
"""
value = self.date()
if hasattr(QtCore, "QVariant"):
# pyqt
return value.toPyDate()
else:
# pyside
return value.toPython()
def keyPressEvent(self, event):
"""
Provides shortcuts for applying modified values.
:param event: The key press event object
:type event: :class:`~PySide.QtGui.QKeyEvent`
"""
if event.key() in [QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]:
self.value_changed.emit()
else:
super(DateEditorWidget, self).keyPressEvent(event)
def setup_widget(self):
"""
Prepare the widget for display.
Called by the metaclass during initialization.
"""
self.setCalendarPopup(True)
self.setMinimumWidth(100)
def _display_default(self):
"""
Display the default value of the widget.
"""
self.clear()
def _display_value(self, value):
"""
Set the value displayed by the widget.
:param value: The value returned by the Shotgun API to be displayed
"""
# shotgun_model converts datetimes to floats representing unix time so
# handle that as a valid value as well
if not isinstance(value, datetime.date):
value = datetime.datetime.strptime(value, "%Y-%m-%d")
self.setDate(value)
```
#### File: python/shotgun_fields/multi_entity_widget.py
```python
import sgtk
from sgtk.platform.qt import QtGui, QtCore
from .bubble_widget import BubbleEditWidget, BubbleWidget
from .entity_widget import EntityWidget
from .shotgun_field_meta import ShotgunFieldMeta
from .util import check_project_search_supported
shotgun_globals = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_globals")
global_search_completer = sgtk.platform.current_bundle().import_module("global_search_completer")
class MultiEntityWidget(EntityWidget):
"""
Display a ``multi_entity`` field value as returned by the Shotgun API.
"""
_DISPLAY_TYPE = "multi_entity"
def _string_value(self, value):
"""
Convert the Shotgun value for this field into a string
:param value: The value to convert into a string
:type value: A List of Shotgun entity dictionaries, each with keys for at
least type, id, and name
"""
return ", ".join([self._entity_dict_to_html(entity) for entity in value])
class MultiEntityEditorWidget(BubbleEditWidget):
"""
Allows editing of a ``multi_entity`` field value as returned by the Shotgun API.
"""
__metaclass__ = ShotgunFieldMeta
_EDITOR_TYPE = "multi_entity"
def add_entity(self, entity_dict):
"""
Add an entity bubble to the widget.
:param dict entity_dict: A dictionary of information about the entity
:return: (int) unique id for the added entity
The ``entity_dict`` must include the following fields::
{
"type": "Asset",
"id": 12345,
"name": "Teapot",
}
"""
# get a list of the current entity bubbles to see if the entity being
# added is already in the list. if it is, remove it and re-add it to the
# end of the list
bubbles = self.get_bubbles()
for bubble in bubbles:
bubble_entity_dict = bubble.get_data()
# see if the bubble matches the supplied entity dict
if (bubble_entity_dict["type"] == entity_dict["type"] and
bubble_entity_dict["id"] == entity_dict["id"]):
# move the bubble to the end
self.remove_bubble(bubble.id)
self.add_entity(bubble_entity_dict)
return
# get an icon to display for the entity type
entity_icon_url = shotgun_globals.get_entity_type_icon_url(entity_dict["type"])
# truncate the display name of the entity if necessary
name = entity_dict["name"]
display_name = name[0:22]
if len(name) > 22:
display_name += "..."
# create a bubble widget to display the entity
entity_bubble = BubbleWidget()
entity_bubble.set_data(entity_dict)
entity_bubble.set_image(entity_icon_url)
entity_bubble.set_text(display_name)
# return the unique id for the added bubble
return self.add_bubble(entity_bubble)
def focusInEvent(self, event):
"""
Show the completer when the widget receives focus.
:param event: The focus in event object
:type event: :class:`~PySide.QtGui.QEvent`
"""
# "remind" the completer what widget it operates on
# apparently this is needed - see
# http://doc.qt.io/qt-4.8/qt-tools-customcompleter-example.html
self._completer.setWidget(self)
if not self._completer.popup().isVisible():
self._show_completer()
super(MultiEntityEditorWidget, self).focusInEvent(event)
def get_value(self):
"""
Return a list of entity dicitionaries for the entity bubbles in the widget.
:returns: A list of :obj:`dict` objects.
:rtype: :obj:`list`
"""
return [b.get_data() for b in self.get_bubbles()]
def hideEvent(self, event):
"""
Make sure the completer is hidden when the widget is.
:param event: The hide event object
:type event: :class:`~PySide.QtGui.QEvent`
"""
self._hide_completer()
super(MultiEntityEditorWidget, self).hideEvent(event)
def keyPressEvent(self, event):
"""
Handles user interaction with the widget via keyboard.
- Ctrl+Enter and Ctrl+Return will trigger the ``value_changed`` signal to be emitted
- Enter, Return, and Tab will attempt to add the current completer item
:param event: The key press event.
:type event: :class:`~PySide.QtGui.QEvent`
"""
if event.key() in [
QtCore.Qt.Key_Enter,
QtCore.Qt.Key_Return
] and event.modifiers() & QtCore.Qt.ControlModifier:
self.value_changed.emit()
event.ignore()
return
elif event.key() in [
QtCore.Qt.Key_Enter,
QtCore.Qt.Key_Return,
QtCore.Qt.Key_Tab,
]:
entity_dict = self._completer.get_current_result()
if not entity_dict:
# nothing current, get the first result
entity_dict = self._completer.get_first_result()
if entity_dict:
self.add_entity(entity_dict)
self.clear_typed_text()
event.ignore()
return
super(MultiEntityEditorWidget, self).keyPressEvent(event)
def setup_widget(self):
"""
Prepare the widget for display.
Called by the metaclass during initialization. Sets up the completer and
valid types accepted by the widget.
"""
sg_connection = self._bundle.sgtk.shotgun
# TODO: remove this check and backward compatibility layer. added 09/16
self._project_search_supported = check_project_search_supported(sg_connection)
valid_types = {}
# get this field's schema
for entity_type in shotgun_globals.get_valid_types(self._entity_type, self._field_name):
if entity_type == "Project" and not self._project_search_supported:
# there is an issue querying Project entities via text_search
# with older versions of SG. for now, don't restrict the editor
continue
else:
valid_types[entity_type] = []
self._completer = global_search_completer.GlobalSearchCompleter()
self._completer.set_bg_task_manager(self._bg_task_manager)
self._completer.set_searchable_entity_types(valid_types)
self._completer.setWidget(self)
# connect the signals.
self.textChanged.connect(self._on_text_changed)
self._completer.entity_activated.connect(self._on_entity_activated)
def _display_default(self):
"""
Display the default value of the widget.
"""
self.clear()
def _display_value(self, value):
"""
Set the value displayed by the widget.
:param value: The value returned by the Shotgun API to be displayed
"""
self.clear()
for entity_dict in value:
self.add_entity(entity_dict)
def _hide_completer(self):
"""
Convenience wrapper for hiding the completer popup.
"""
self._completer.popup().hide()
def _on_entity_activated(self, type, id, name):
"""
When an entity is activated via the completer, add it to the widget.
:param str type: The entity type
:param int id: The entity's id
:param str name: The name of the entity.
"""
entity_dict = {"type": type, "id": id, "name": name}
self._completer.popup().hide()
self._completer.clear()
self.clear_typed_text()
self.add_entity(entity_dict)
def _on_text_changed(self):
"""
Show the copmleter as text is changing in the widget.
"""
self._show_completer()
def _show_completer(self):
"""
Handles displaying the completer in the proper location relative to the cursor.
"""
typed_text = self.get_typed_text()
if self.isVisible() and typed_text:
rect = self.cursorRect()
rect.setWidth(300)
rect.moveLeft(self.rect().left())
rect.moveTop(rect.top() + 6)
self._completer.setCompletionPrefix(typed_text)
self._completer.complete(rect)
self._completer.search(typed_text)
```
#### File: python/activity_stream/activity_stream.py
```python
import os
import sys
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .ui.activity_stream_widget import Ui_ActivityStreamWidget
from .widget_new_item import NewItemWidget, SimpleNewItemWidget
from .widget_note import NoteWidget
from .widget_value_update import ValueUpdateWidget
from .dialog_reply import ReplyDialog
from .data_manager import ActivityStreamDataHandler
from .overlaywidget import SmallOverlayWidget
note_input_widget = sgtk.platform.current_bundle().import_module("note_input_widget")
shotgun_globals = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_globals")
utils = sgtk.platform.import_framework("tk-framework-shotgunutils", "utils")
class ActivityStreamWidget(QtGui.QWidget):
"""
QT Widget that displays the Shotgun activity stream for an entity.
:signal entity_requested(str, int): Fires when someone clicks an entity inside
the activity stream. The returned parameters are entity type and entity id.
:signal playback_requested(dict): Fires when someone clicks the playback url
on a version. Returns a shotgun dictionary with information
about the version.
:signal entity_created(object): Fires when a Note or Reply entity is created by
an underlying widget within the activity stream. Returns a Shotgun dictionary
with information about the new Entity.
:ivar reply_dialog: When a ReplyDialog is active it can be accessed here. If there
is no ReplyDialog active, then this will be set to None.
:vartype reply_dialog: .dialog_reply.ReplyDialog or None
"""
# max number of items to show in the activity stream.
MAX_STREAM_LENGTH = 20
# Activity attributes that we do not want displayed.
_SKIP_ACTIVITY_ATTRIBUTES = ["viewed_by_current_user"]
entity_requested = QtCore.Signal(str, int)
playback_requested = QtCore.Signal(dict)
# The int is the Note entity id that was selected or deselected.
note_selected = QtCore.Signal(int)
note_deselected = QtCore.Signal(int)
note_arrived = QtCore.Signal(int)
# Emitted when a Note or Reply entity is created. The
# entity type as a string and id as an int will be
# provided.
#
# dict(entity_type="Note", id=1234)
entity_created = QtCore.Signal(object)
def __init__(self, parent):
"""
:param parent: QT parent object
:type parent: :class:`~PySide.QtGui.QWidget`
"""
# first, call the base class and let it do its thing.
QtGui.QWidget.__init__(self, parent)
self._bundle = sgtk.platform.current_bundle()
# now load in the UI that was created in the UI designer
self.ui = Ui_ActivityStreamWidget()
self.ui.setupUi(self)
# The note widget will be turned on when an entity is loaded
# if the entity is of an appropriate type.
self.ui.note_widget.hide()
# customizations
self._allow_screenshots = True
self._show_sg_stream_button = True
self._version_items_playable = True
self._clickable_user_icons = True
self._show_note_links = True
self._highlight_new_arrivals = True
self._notes_are_selectable = False
self._attachments_filter = None
# apply styling
self._load_stylesheet()
# keep an overlay for loading
overlay_module = self._bundle.import_module("overlay_widget")
self.__overlay = overlay_module.ShotgunOverlayWidget(self)
self.__small_overlay = SmallOverlayWidget(self)
# set insertion order into list to be bottom-up
self.ui.activity_stream_layout.setDirection(QtGui.QBoxLayout.BottomToTop)
# create a data manager to handle backend
self._data_manager = ActivityStreamDataHandler(self)
# set up signals
self._data_manager.note_arrived.connect(self._process_new_note)
self._data_manager.update_arrived.connect(self._process_new_data)
self._data_manager.thumbnail_arrived.connect(self._process_thumbnail)
self.ui.note_widget.entity_created.connect(self._on_entity_created)
self.ui.note_widget.data_updated.connect(self.rescan)
# keep handles to all widgets to be nice to the GC
self._loading_widget = None
self._activity_stream_static_widgets = []
self._activity_stream_data_widgets = {}
# state management
self._task_manager = None
self._sg_entity_dict = None
self._entity_type = None
self._entity_id = None
self._select_on_arrival = dict()
# We'll be keeping a persistent reply dialog available because
# we need to connect to a signal that it's emitting. It's easiest
# to do that if we're dealing with an object that persists.
self.reply_dialog = ReplyDialog(
self,
self._task_manager,
note_id=None,
allow_screenshots=self._allow_screenshots,
)
# We'll allow for a pre-note-creation callback. This is for additional
# pre-processing that needs to occur before a Note or Reply is created
# in Shotgun. This makes sure that the activity stream data coming down
# during the rescan after submission contains anything like additional
# attachments that this widget didn't explicitly handle itself prior to
# submission.
self._pre_submit_callback = None
self.reply_dialog.note_widget.entity_created.connect(self._on_entity_created)
def set_bg_task_manager(self, task_manager):
"""
Specify the background task manager to use to pull
data in the background. Data calls
to Shotgun will be dispatched via this object.
:param task_manager: Background task manager to use
:type task_manager: :class:`~tk-framework-shotgunutils:task_manager.BackgroundTaskManager`
"""
self._task_manager = task_manager
self._data_manager.set_bg_task_manager(task_manager)
self.ui.note_widget.set_bg_task_manager(task_manager)
def destroy(self):
"""
Should be called before the widget is closed
"""
self._data_manager.destroy()
self._task_manager = None
############################################################################
# properties
@property
def note_threads(self):
"""
The currently loaded note threads, keyed by Note entity id and
containing a list of Shotgun entity dictionaries. All note threads
currently displayed by the activity stream widget will be returned.
Example structure containing a Note, a Reply, and an attachment::
6040: [
{
'addressings_cc': [],
'addressings_to': [],
'client_note': False,
'content': 'This is a test note.',
'created_at': 1466477744.0,
'created_by': {
'id': 39,
'name': '<NAME>',
'type': 'HumanUser'
},
'id': 6040,
'note_links': [
{
'id': 1167,
'name': '123',
'type': 'Shot'
},
{
'id': 6023,
'name': 'Scene_v030_123',
'type': 'Version'
}
],
'read_by_current_user': 'read',
'subject': "Jeff's Note on Scene_v030_123, 123",
'tasks': [
{
'id': 2118,
'name': 'Comp',
'type': 'Task'
}
],
'type': 'Note',
'user': {
'id': 39,
'name': '<NAME>',
'type': 'HumanUser'
},
'user.ApiUser.image': None,
'user.ClientUser.image': None,
'user.HumanUser.image': 'https://url_to_file'
},
{
'content': 'test reply',
'created_at': 1469221928.0,
'id': 23,
'type': 'Reply',
'user': {
'id': 39,
'image': 'https://url_to_file',
'name': '<NAME>',
'type': 'HumanUser'
}
},
{
'attachment_links': [
{
'id': 6051,
'name': "Jeff's Note on Scene_v030_123, 123 - testing.",
'type': 'Note'
}
],
'created_at': 1469484693.0,
'created_by': {
'id': 39,
'name': '<NAME>',
'type': 'HumanUser'
},
'id': 601,
'image': 'https://url_to_file',
'this_file': {
'content_type': 'image/png',
'id': 601,
'link_type': 'upload',
'name': 'screencapture_vrviim.png',
'type': 'Attachment',
'url': 'https://url_to_file'
},
'type': 'Attachment'
},
]
"""
return self._data_manager.note_threads
@property
def note_widget(self):
"""
Returns the :class:`~note_input_widget.NoteInputWidget` contained within
the ActivityStreamWidget. Note that this is the widget used for NEW note
input and not Note replies. To get the NoteInputWidget used for Note
replies, access can be found via :meth:`ReplyDialog.note_widget`.
"""
return self.ui.note_widget
def _get_clickable_user_icons(self):
"""
Whether user icons in the activity stream display as clickable.
If True, a pointing hand cursor will be shown when the mouse is
hovered over the icons, otherwise the default arrow cursor will be
used.
"""
return self._clickable_user_icons
def _set_clickable_user_icons(self, state):
self._clickable_user_icons = bool(state)
for widget in self._activity_stream_data_widgets.values():
if isinstance(widget, NoteWidget):
if state:
widget.set_user_thumb_cursor(QtCore.Qt.PointingHandCursor)
else:
widget.set_user_thumb_cursor(QtCore.Qt.ArrowCursor)
clickable_user_icons = property(
_get_clickable_user_icons,
_set_clickable_user_icons,
)
def _get_pre_submit_callback(self):
"""
The pre-submit callback. This is None if one is not set, or a Python
callable if it is. This callable is run prior to submission of a new
Note or Reply. Note that the first (and only) argument passed to the
callback will be the calling :class:`NoteInputWidget`.
:returns: Python callable or None
"""
return self._pre_submit_callback
def _set_pre_submit_callback(self, callback):
self._pre_submit_callback = callback
self.reply_dialog.note_widget.pre_submit_callback = callback
self.note_widget.pre_submit_callback = callback
pre_submit_callback = property(
_get_pre_submit_callback,
_set_pre_submit_callback,
)
def _get_allow_screenshots(self):
"""
Whether this activity stream is allowed to give the user access to a
button that performs screenshot operations.
"""
return self._allow_screenshots
def _set_allow_screenshots(self, state):
self._allow_screenshots = bool(state)
self.ui.note_widget.allow_screenshots(self._allow_screenshots)
allow_screenshots = property(
_get_allow_screenshots,
_set_allow_screenshots,
)
def _get_show_sg_stream_button(self):
"""
Whether the button to navigate to Shotgun is shown in the stream.
"""
return self._show_sg_stream_button
def _set_show_sg_stream_button(self, state):
"""
Sets whether to show the button to navigate to Shotgun.
:param state: True or False
"""
self._show_sg_stream_button = bool(state)
show_sg_stream_button = property(
_get_show_sg_stream_button,
_set_show_sg_stream_button,
)
def _get_version_items_playable(self):
"""
Whether the label representing a created Version entity is shown
as being "playable" within the UI. If True, then a play icon is
visible over the thumbnail image, and no icon overlay is shown
when False.
"""
return self._version_items_playable
def _set_version_items_playable(self, state):
self._version_items_playable = bool(state)
version_items_playable = property(
_get_version_items_playable,
_set_version_items_playable,
)
def _get_show_note_links(self):
"""
If True, lists out the parent entity as a list of clickable
items for each Note entity that is represented in the activity
stream.
"""
return self._show_note_links
def _set_show_note_links(self, state):
self._show_note_links = bool(state)
show_note_links = property(
_get_show_note_links,
_set_show_note_links,
)
def _get_highlight_new_arrivals(self):
"""
If True, highlights items in the activity stream that are new
since the last time data was loaded.
"""
return self._highlight_new_arrivals
def _set_highlight_new_arrivals(self, state):
self._highlight_new_arrivals = bool(state)
highlight_new_arrivals = property(
_get_highlight_new_arrivals,
_set_highlight_new_arrivals,
)
def _get_notes_are_selectable(self):
return self._notes_are_selectable
def _set_notes_are_selectable(self, state):
self._notes_are_selectable = bool(state)
notes_are_selectable = property(
_get_notes_are_selectable,
_set_notes_are_selectable,
)
def _get_attachments_filter(self):
"""
If set to a compiled regular expression, attachment file names that match
will be filtered OUT and NOT shown.
.. note:: An re.match() is used, which means the regular expression must
match from the start of the attachment file's basename. See Python's
"re" module documentation for Python 2.x for more information and
examples.
Example to match only ".gif" extensions::
re.compile(r"\w+[.]gif$")
"""
return self._attachments_filter
def _set_attachments_filter(self, regex):
self._attachments_filter = regex
attachments_filter = property(_get_attachments_filter, _set_attachments_filter)
############################################################################
# public interface
def select_note(self, note_id):
selectedWidget = None
for widget in self._activity_stream_data_widgets.values():
if isinstance(widget, NoteWidget):
match = widget.note_id == note_id
if match and not widget.selected:
self._note_selected_changed(True, widget.note_id)
selectedWidget = widget
widget.set_selected(match)
if selectedWidget is not None:
self.ui.activity_stream_scroll_area.ensureWidgetVisible(selectedWidget)
def deselect_note(self):
"""
If a note is currently selected, it will be deselected. This will NOT
trigger a note_deselected signal to be emitted, as that is only emitted
when the user triggers the deselection and not via procedural means.
"""
for widget in self._activity_stream_data_widgets.values():
if isinstance(widget, NoteWidget) and widget.selected:
widget.set_selected(False)
# We only support single selection right now, so we don't
# need to continue on once we've hit a note that's selected.
return
def get_note_attachments(self, note_id):
"""
Gets the Attachment entities associated with the given Note
entity. Only attachments from Notes currently loaded by the
activity stream widget will be returned.
.. note:: It is possible for attachments to be added to a Note
entity after the activity stream data has been cached.
In this situation, those attachments will NOT be returned,
as Shotgun will not be requeried for that new data unless
specifically requested to do so.
:param int note_id: The Note entity id.
"""
for widget in self._activity_stream_data_widgets.values():
if isinstance(widget, NoteWidget) and widget.note_id == note_id:
return widget.attachments
def load_data(self, sg_entity_dict):
"""
Reset the state of the widget and then load up the data
for a given entity.
:param dict sg_entity_dict: Dictionary with keys type and id
"""
self._bundle.log_debug("Setting up activity stream for entity %s" % sg_entity_dict)
# clean up everything first
self._clear()
# change the state
self._sg_entity_dict = sg_entity_dict
self._entity_type = self._sg_entity_dict["type"]
self._entity_id = self._sg_entity_dict["id"]
# tell our "new note" widget which entity it should link up against
self.ui.note_widget.set_current_entity(self._entity_type,
self._entity_id)
# to mimic the behavior in shotgun - which seems quite strange and
# inconsistent for users, we need to disable to note dialog for
# these cases
# note - this may return [] if shotgun globals aren't yet cached
schema_fields = shotgun_globals.get_entity_fields(self._entity_type)
is_non_project_entity_type = len(schema_fields) > 0 and "project" not in schema_fields
# if the project context is None, the entity is a non project entity and it's NOT
# the project entity itself, we don't have access to any project state.
no_project_available = (
self._bundle.context.project is None and
is_non_project_entity_type and
self._entity_type != "Project"
)
# also disable note creation in the case we have a site context
# and a non-project entity
if self._entity_type in ["ApiUser", "HumanUser", "ClientUser"]:
self.ui.note_widget.setVisible(False)
elif no_project_available:
# we don't have any project to hang these notes off, so disable
# the note integration
self.ui.note_widget.setVisible(False)
else:
self.ui.note_widget.setVisible(True)
# now load cached data for the given entity
self._bundle.log_debug("Setting up db manager....")
ids_to_process = self._data_manager.load_activity_data(self._entity_type,
self._entity_id,
self.MAX_STREAM_LENGTH)
if len(ids_to_process) == 0:
# nothing cached - show spinner!
# NOTE!!!! - cannot use the actual spinning animation because
# this triggers the GIL bug where signals from threads
# will deadlock the GIL
self.__overlay.show_message("Loading Shotgun Data...")
all_reply_users = []
attachment_requests = []
###############################################################
# Phase 1 - render the UI.
# before we begin widget operations, turn off visibility
# of the whole widget in order to avoid recomputes
self._bundle.log_debug("Start building widgets based on cached data...")
self.setVisible(False)
try:
# we are building the widgets bottom up.
# first of all, insert a widget that will expand so that
# it consumes all unused space. This is to keep other
# widgets from growing when there are only a few widgets
# available in the scroll area.
self._bundle.log_debug("Adding expanding base widget...")
expanding_widget = QtGui.QLabel(self)
self.ui.activity_stream_layout.addWidget(expanding_widget)
self.ui.activity_stream_layout.setStretchFactor(expanding_widget, 1)
self._activity_stream_static_widgets.append(expanding_widget)
if self.show_sg_stream_button:
sg_stream_button = QtGui.QPushButton(self)
sg_stream_button.setText("Click here to see the Activity stream in Shotgun.")
sg_stream_button.setObjectName("full_shotgun_stream_button")
sg_stream_button.setCursor(QtCore.Qt.PointingHandCursor)
sg_stream_button.setFocusPolicy(QtCore.Qt.NoFocus)
sg_stream_button.clicked.connect(self._load_shotgun_activity_stream)
self.ui.activity_stream_layout.addWidget(sg_stream_button)
self._activity_stream_static_widgets.append(sg_stream_button)
# ids are returned in async order. Now pop them onto the activity stream,
# old items first order...
self._bundle.log_debug("Adding activity widgets...")
for activity_id in ids_to_process:
w = self._create_activity_widget(activity_id)
# note that not all activity data entries generate
# a widget in our factory method.
if w:
# a widget was generated! Insert it into
# the widget layouts etc.
self._activity_stream_data_widgets[activity_id] = w
self.ui.activity_stream_layout.addWidget(w)
# run extra init for notes
# this is to fetch the actual note payload -
# content, replies, attachments etc.
if isinstance(w, NoteWidget):
data = self._data_manager.get_activity_data(activity_id)
note_id = data["primary_entity"]["id"]
(note_reply_users, note_attachment_requests) = self._populate_note_widget(w, activity_id, note_id)
# extend user and attachment requests to our full list
# so that we can request thumbnails for these later...
all_reply_users.extend(note_reply_users)
attachment_requests.extend(note_attachment_requests)
# last, create "loading" widget
# to put at the top of the list.
#
# We add this into the scroll area so that it scrolls with the
# rest of the items in the list.
#
self._loading_widget = QtGui.QLabel(self)
self._loading_widget.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self._loading_widget.setText("Loading data from Shotgun...")
self._loading_widget.setObjectName("loading_widget")
self.ui.activity_stream_layout.addWidget(self._loading_widget)
finally:
# make the window visible again and trigger a redraw
self.setVisible(True)
self._bundle.log_debug("...UI building complete!")
###############################################################
# Phase 2 - request additional data.
# note that we don't interleave these requests with building
# the ui - this is to minimise the risk of GIL signal issues
# request thumbs
self._bundle.log_debug("Request thumbnails...")
for activity_id in ids_to_process:
self._data_manager.request_activity_thumbnails(activity_id)
for attachment_req in attachment_requests:
self._data_manager.request_attachment_thumbnail(attachment_req["activity_id"],
attachment_req["attachment_group_id"],
attachment_req["attachment_data"])
# now request thumbnails for all users who have replied, but
# only once per user
reply_users_dup_check = []
for reply_user in all_reply_users:
unique_user = (reply_user["type"], reply_user["id"])
if unique_user not in reply_users_dup_check:
reply_users_dup_check.append(unique_user)
self._data_manager.request_user_thumbnail(reply_user["type"],
reply_user["id"],
reply_user["image"])
self._bundle.log_debug("...done")
# and now request an update check
self._bundle.log_debug("Ask db manager to ask shotgun for updates...")
self._data_manager.rescan()
self._bundle.log_debug("...done")
def show_new_note_dialog(self, modal=True):
"""
Shows a dialog that allows the user to input a new note.
.. note:: The return value of the new note dialog is not provided,
as the activity stream widget will emit an entity_created
signal if the user successfully creates a new Note entity.
:param bool modal: Whether the dialog should be shown modally or not.
"""
if self._entity_id == None:
self._bundle.log_debug("Skipping New Note Dialog - No entity loaded.")
return
note_dialog = note_input_widget.NoteInputDialog(parent=self)
note_dialog.entity_created.connect(self._on_entity_created)
note_dialog.data_updated.connect(self.rescan)
note_dialog.set_bg_task_manager(self._task_manager)
note_dialog.set_current_entity(self._entity_type, self._entity_id)
if modal:
note_dialog.exec_()
else:
note_dialog.show()
def rescan(self, force_activity_stream_update=False):
"""
Triggers a rescan of the current activity stream data.
:param force_activity_stream_update: If True, will force a requery
of activity stream data, even
if it is already cached.
:type force_activity_stream_update: bool
"""
# kick the data manager to rescan for changes
self._data_manager.rescan(
force_activity_stream_update=force_activity_stream_update,
)
############################################################################
# internals
def _load_stylesheet(self):
"""
Loads in a stylesheet from disk
"""
qss_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "style.qss")
try:
f = open(qss_file, "rt")
qss_data = f.read()
# apply to widget (and all its children)
self.setStyleSheet(qss_data)
finally:
f.close()
def _clear(self):
"""
Clear the widget. This will remove all items the UI
"""
self._bundle.log_debug("Clearing UI...")
# before we begin widget operations, turn off visibility
# of the whole widget in order to avoid recomputes
self.setVisible(False)
# scroll to top
self.ui.activity_stream_scroll_area.verticalScrollBar().setValue(0)
try:
self._bundle.log_debug("Clear loading widget")
self._clear_loading_widget()
self._bundle.log_debug("Removing all widget items")
for x in self._activity_stream_data_widgets.values():
# remove widget from layout:
self.ui.activity_stream_layout.removeWidget(x)
# set it's parent to None so that it is removed from the widget hierarchy
x.setParent(None)
utils.safe_delete_later(x)
self._bundle.log_debug("Clearing python data structures")
self._activity_stream_data_widgets = {}
self._bundle.log_debug("Removing expanding widget")
for w in self._activity_stream_static_widgets:
self.ui.activity_stream_layout.removeWidget(w)
w.setParent(None)
utils.safe_delete_later(w)
self._activity_stream_static_widgets = []
finally:
# make the window visible again and trigger a redraw
self.setVisible(True)
# Since we have no entity loaded, we don't need to show
# the note widget.
self.ui.note_widget.setVisible(False)
self.ui.note_widget.clear()
def _clear_loading_widget(self):
"""
Remove the loading widget from the widget list
"""
if self._loading_widget:
self._bundle.log_debug("Clearing the loading widget")
self.ui.activity_stream_layout.removeWidget(self._loading_widget)
self._loading_widget.setParent(None)
utils.safe_delete_later(self._loading_widget)
self._loading_widget = None
self._bundle.log_debug("...done")
def _populate_note_widget(self, note_widget, activity_id, note_id):
"""
Load note content and replies into a note widget
:param note_widget: Note widget to populate with replies and
attachments.
:param activity_id: Activity stream id to load
:param note_id: Note id to load
:returns: (reply_users, attachment_requests) where reply_users is a
list of users (dict with type, id, name and image) for each
of the replies and attachment_requests a list of dicts of
attahchment request dictionaries
"""
# set note content
note_thread_data = self._data_manager.get_note(note_id)
attachment_requests = []
reply_users = []
if note_thread_data:
# we have cached note data
note_data = note_thread_data[0]
replies_and_attachments = note_thread_data[1:]
# set up the note data first
note_widget.set_note_info(note_data)
# now add replies
note_widget.add_replies(replies_and_attachments)
# add a reply button and connect it
reply_button = note_widget.add_reply_button()
reply_button.clicked.connect(lambda : self._on_reply_clicked(note_id))
# get list of users who have replied
for item in replies_and_attachments:
if item["type"] == "Reply":
# note that the reply data structure is special:
# the 'user' key is not a normal sg link dict,
# but contains an additional image field to describe
# the thumbnail:
#
# {'content': 'Reply content...',
# 'created_at': 1438649419.0,
# 'type': 'Reply',
# 'id': 73,
# 'user': {'image': '...',
# 'type': 'HumanUser',
# 'id': 38,
# 'name': '<NAME>'}}]
reply_users.append(item["user"])
# get all attachment data
# can request thumbnails post UI build
for attachment_group_id in note_widget.get_attachment_group_widget_ids():
agw = note_widget.get_attachment_group_widget(attachment_group_id)
for attachment_data in agw.get_data():
ag_request = {"attachment_group_id": attachment_group_id,
"activity_id": activity_id,
"attachment_data": attachment_data}
attachment_requests.append(ag_request)
return (reply_users, attachment_requests)
def _create_activity_widget(self, activity_id):
"""
Create a widget for a given activity id
If the activity id is not supported by the implementation,
returns None. This can for example happen if the type of
data returned by the activity stream doesn't have a
suitable widget implemented.
:returns: Activity widget object or None
"""
data = self._data_manager.get_activity_data(activity_id)
widget = None
# factory logic
if data["update_type"] == "create":
if data["primary_entity"]["type"] in ["Version", "PublishedFile", "TankPublishedFile"]:
# full on 'new item' widget with thumbnail, description etc.
widget = NewItemWidget(self)
widget.interactive = self.version_items_playable
elif data["primary_entity"]["type"] == "Note":
# new note
widget = NoteWidget(data["primary_entity"]["id"], self)
widget.show_note_links = self.show_note_links
widget.attachments_filter = self.attachments_filter
# If it's been requested that we select this Note entity's widget when it's
# constructed, then we do so here. This is the situation where the user has
# created a new Note, which upon completion we want to have autoselected.
if data["primary_entity"]["id"] == self._select_on_arrival.get("id"):
# First we need to deselect whatever is selected, because we're going
# to be selecting our new widget right after.
for w in self._activity_stream_data_widgets.values():
if isinstance(w, NoteWidget):
if w.selected:
w.set_selected(False)
self._note_selected_changed(False, w.note_id)
widget.set_selected(True)
self._note_selected_changed(True, widget.note_id)
self._select_on_arrival = dict()
else:
# minimalistic 'new' widget for all other cases
widget = SimpleNewItemWidget(self)
elif data["update_type"] == "create_reply":
# new note widget
widget = NoteWidget(data["primary_entity"]["id"], self)
widget.show_note_links = self.show_note_links
widget.attachments_filter = self.attachments_filter
elif data["update_type"] == "update":
# 37660: We're going to ignore "viewed by" activity for the time being.
# According to the review team these entries shouldn't have been returned
# as part of the stream anyway, but we have existing data that might
# contain these entries that we need to handle elegantly.
if data.get("meta", {}).get("attribute_name") not in self._SKIP_ACTIVITY_ATTRIBUTES:
widget = ValueUpdateWidget(self)
else:
self._bundle.log_debug("Activity type not supported and will not be "
"rendered: %s" % data["update_type"])
# initialize the widget
if widget:
widget.set_host_entity(self._entity_type, self._entity_id)
widget.set_info(data)
widget.entity_requested.connect(lambda entity_type, entity_id: self.entity_requested.emit(entity_type, entity_id))
widget.playback_requested.connect(lambda sg_data: self.playback_requested.emit(sg_data))
# If we're not wanting the user icons to display as clickable, then
# we need to set their cursor to be the default arrow cursor. Otherwise
# we don't need to do anything because they default to the clickable
# finger-pointing cursor.
if not self.clickable_user_icons and isinstance(widget, NoteWidget):
widget.set_user_thumb_cursor(QtCore.Qt.ArrowCursor)
return widget
def _note_selected_changed(self, selected, note_id):
"""
Handles a change in selection state for a given Note entity id.
:param bool selected: The new selection state of the Note.
:param int note_id: The Note entity id.
"""
if selected:
self.note_selected.emit(note_id)
else:
self.note_deselected.emit(note_id)
def _process_new_data(self, activity_ids):
"""
Process new activity ids as they arrive from
the data manager.
:param activity_ids: List of activity ids
"""
self._bundle.log_debug(
"Process new data called for %s activity events" % len(activity_ids)
)
# keep track of new note widgets created
note_widgets_added = []
# remove the "loading please wait .... widget
self._clear_loading_widget()
# note! For an item which hasn't been previously been cached or
# hasn't been visited for some time, there may be a lot more than
# MAX_STREAM_LENGTH updates. In this case, truncate the stream.
# this will result in a UI where you may have a maxmimum of
# MAX_STREAM_LENGTH * 2 items (already loaded + new) and there
# may be gaps in activity data because we always want to show
# the latest data, so when we cull, it happens in the 'middle'
# of the stream, resulting in existing data, the a potential gap
# and then MAX_STREAM_LENGTH items.
#
# Note that this is in the UI only, so a refresh of the page
# would immediately rectify the discrepancy.
# load in the new data
# the list of ids is delivered in ascending order
# and we pop them on to the widget
if len(activity_ids) > self.MAX_STREAM_LENGTH:
self._bundle.log_debug("Capping the %s new activity items down to "
"%s items" % (len(activity_ids), self.MAX_STREAM_LENGTH))
# transform [10,11,12,13,14,15,16,17] -> [15,16,17]
activity_ids = activity_ids[-self.MAX_STREAM_LENGTH:]
for activity_id in activity_ids:
self._bundle.log_debug("Creating new widget...")
w = self._create_activity_widget(activity_id)
if w:
self._activity_stream_data_widgets[activity_id] = w
self._bundle.log_debug("Adding %s to layout" % w)
self.ui.activity_stream_layout.addWidget(w)
# add special blue border to indicate that this is a new arrival
if self.highlight_new_arrivals:
w.setStyleSheet("QFrame#frame{ border: 1px solid rgba(48, 167, 227, 50%); }")
# register if it is a note so we can post process
if isinstance(w, NoteWidget):
note_widgets_added.append(w)
# when everything is loaded in, load the thumbs
self._bundle.log_debug("Requesting thumbnails")
for activity_id in activity_ids:
self._data_manager.request_activity_thumbnails(activity_id)
self._bundle.log_debug("Process new data complete.")
# now cull out any activity items that are duplicated in the list
# this may be the case if a note has been replied to - in this case
# the note already exists in the list
note_ids_added = [widget.note_id for widget in note_widgets_added]
for widget in self._activity_stream_data_widgets.values():
if isinstance(widget, NoteWidget) and widget not in note_widgets_added:
if widget.note_id in note_ids_added:
widget.hide()
# turn off the overlay in case it is spinning
# (which only happens on a full load)
self.__overlay.hide()
def _process_thumbnail(self, data):
"""
New thumbnail has arrived from the data manager
"""
# broadcast to all activity widgets
for widget in self._activity_stream_data_widgets.values():
widget.apply_thumbnail(data)
def _process_new_note(self, activity_id, note_id):
"""
A new note has arrived from the data manager
"""
if activity_id in self._activity_stream_data_widgets:
widget = self._activity_stream_data_widgets[activity_id]
(reply_users, attachment_requests) = self._populate_note_widget(widget, activity_id, note_id)
# request thumbs
for attachment_req in attachment_requests:
self._data_manager.request_attachment_thumbnail(attachment_req["activity_id"],
attachment_req["attachment_group_id"],
attachment_req["attachment_data"])
for reply_user in reply_users:
self._data_manager.request_user_thumbnail(reply_user["type"],
reply_user["id"],
reply_user["image"])
self.note_arrived.emit(note_id)
else:
self.note_arrived.emit(note_id)
def _on_entity_created(self, entity):
"""
Callback when an entity is created by an underlying widget.
:param entity: The Shotgun entity that was created.
"""
if entity["type"] == "Note":
try:
from sgtk.util.metrics import EventMetric
fields = [] # reserved for future use
annotations = {} # reserved for future use
properties = {
"Source": "Activity Stream",
"Linked Entity Type": entity.get("type", "Unknown"),
"Field Used": fields,
"Annotations": annotations
}
EventMetric.log(
EventMetric.GROUP_MEDIA,
"Created Note",
properties=properties,
bundle=self._bundle
)
except:
# ignore all errors. ex: using a core that doesn't support metrics
pass
if self.notes_are_selectable:
self._select_on_arrival = entity
self.entity_created.emit(entity)
def _on_reply_clicked(self, note_id):
"""
Callback when someone clicks reply on a given note
:param note_id: The id of the Shotgun Note entity being replied to.
"""
self.reply_dialog.note_id = note_id
# Position the reply modal dialog above the activity stream scroll area.
pos = self.mapToGlobal(self.ui.activity_stream_scroll_area.pos())
x_pos = pos.x() + (self.ui.activity_stream_scroll_area.width() / 2) - (self.reply_dialog.width() / 2) - 10
y_pos = pos.y() + (self.ui.activity_stream_scroll_area.height() / 2) - (self.reply_dialog.height() / 2) - 20
self.reply_dialog.move(x_pos, y_pos)
# and pop it
try:
self.__small_overlay.show()
if self.reply_dialog.exec_() == QtGui.QDialog.Accepted:
self.load_data(self._sg_entity_dict)
try:
from sgtk.util.metrics import EventMetric
properties = {
"Source": "Activity Stream",
}
EventMetric.log(
EventMetric.GROUP_MEDIA,
"Created Reply",
properties=properties,
bundle=self._bundle
)
except:
# ignore all errors. ex: using a core that doesn't support metrics
pass
finally:
self.__small_overlay.hide()
def _load_shotgun_activity_stream(self):
"""
Called when someone clicks 'show activity stream in shotgun'
"""
url = "%s/detail/%s/%s" % (self._bundle.sgtk.shotgun_url, self._entity_type, self._entity_id)
QtGui.QDesktopServices.openUrl(QtCore.QUrl(url))
############################################################################
# events
def mousePressEvent(self, event):
"""
Overrides the default event handler in Qt.
"""
if not self.notes_are_selectable:
return
# If they clicked on a note, select it. Any notes that were not
# clicked on will be deselected.
position = event.globalPos()
for widget in self._activity_stream_data_widgets.values():
if isinstance(widget, NoteWidget):
selected = widget.underMouse()
if selected != widget.selected:
widget.set_selected(selected)
self._note_selected_changed(selected, widget.note_id)
```
#### File: python/activity_stream/reply_list.py
```python
import sgtk
import os
import sys
from sgtk.platform.qt import QtCore, QtGui
from .dialog_reply import ReplyDialog
from .ui.reply_list_widget import Ui_ReplyListWidget
from .label_widgets import ClickableLabel
from .data_manager import ActivityStreamDataHandler
from .widget_attachment_group import AttachmentGroupWidget
from .widget_reply import ReplyWidget
from .overlaywidget import SmallOverlayWidget
utils = sgtk.platform.import_framework("tk-framework-shotgunutils", "utils")
class ReplyListWidget(QtGui.QWidget):
"""
QT Widget that displays a note conversation,
including attachments and the ability to reply.
This will first render the body of the note, including the attachments,
and then subsequent replies. This widget uses the same
widgets, data backend and visual components as the
activity stream.
:signal entity_requested(str, int): Fires when someone clicks an entity inside
the activity stream. The returned parameters are entity type and entity id.
"""
# when someone clicks a link or similar
entity_requested = QtCore.Signal(str, int)
def __init__(self, parent):
"""
:param parent: QT parent object
:type parent: :class:`~PySide.QtGui.QWidget`
"""
# first, call the base class and let it do its thing.
QtGui.QWidget.__init__(self, parent)
# now load in the UI that was created in the UI designer
self.ui = Ui_ReplyListWidget()
self.ui.setupUi(self)
self._note_id = None
self._sg_entity_dict = None
self._task_manager = None
self._general_widgets = []
self._reply_widgets = []
self._attachment_group_widgets = {}
self._bundle = sgtk.platform.current_bundle()
# apply styling
self._load_stylesheet()
# small overlay
self.__small_overlay = SmallOverlayWidget(self)
# create a data manager to handle backend
self._data_manager = ActivityStreamDataHandler(self)
self._data_manager.thumbnail_arrived.connect(self._process_thumbnail)
self._data_manager.note_arrived.connect(self._process_note)
def set_bg_task_manager(self, task_manager):
"""
Specify the background task manager to use to pull
data in the background. Data calls
to Shotgun will be dispatched via this object.
:param task_manager: Background task manager to use
:type task_manager: :class:`~tk-framework-shotgunutils:task_manager.BackgroundTaskManager`
"""
self._data_manager.set_bg_task_manager(task_manager)
self._task_manager = task_manager
def destroy(self):
"""
Should be called before the widget is closed
"""
self._data_manager.destroy()
self._task_manager = None
##########################################################################################
# public interface
def load_data(self, sg_entity_dict):
"""
Load replies for a given entity.
:param sg_entity_dict: Shotgun link dictionary with keys type and id.
"""
self._bundle.log_debug("Loading replies for %s" % sg_entity_dict)
if sg_entity_dict["type"] != "Note":
self._bundle.log_error("Can only show replies for Notes.")
return
# first ask the data manager to load up cached
# information about our note
self._sg_entity_dict = sg_entity_dict
note_id = self._sg_entity_dict["id"]
self._data_manager.load_note_data(note_id)
# now attempt to render the note based on cached data
self._process_note(activity_id=None, note_id=note_id)
# and read in any updates in the background
self._data_manager.rescan()
##########################################################################################
# internal methods
def _process_note(self, activity_id, note_id):
"""
Callback that gets executed when note data arrives from
the data manager.
:param activiy_id: Activity stream id that this note is
associated with. Note that in this case,
when we have requested a note outside
the context of the activity stream, this
value is undefined.
:param note_id: Note id for the note for which data is available
in the data manager.
"""
self._bundle.log_debug("Retrieved new data notification for "
"activity id %s, note id %s" % (activity_id, note_id))
# set note content
note_thread_data = self._data_manager.get_note(note_id)
if note_thread_data:
self._build_replies(note_thread_data)
def _build_replies(self, note_thread_data):
# before we begin widget operations, turn off visibility
# of the whole widget in order to avoid recomputes
self.setVisible(False)
try:
###############################################################
# Phase 1 - render the UI.
self._clear()
note_id = self._sg_entity_dict["id"]
attachment_requests = []
# first display the content of the note
note_data = note_thread_data[0]
note_content = note_data.get("content") or \
"This note does not have any content associated."
content_widget = QtGui.QLabel(self)
content_widget.setWordWrap(True)
content_widget.setText(note_content)
content_widget.setObjectName("note_content_label")
self.ui.reply_layout.addWidget(content_widget)
self._general_widgets.append(content_widget)
# we have cached note data
replies_and_attachments = note_thread_data[1:]
# now add replies
self._add_replies_and_attachments(replies_and_attachments)
# add a reply button and connect it
reply_button = self._add_reply_button()
reply_button.clicked.connect(lambda : self._on_reply_clicked(note_id))
# add a proxy widget that should expand to fill all white
# space available
expanding_widget = QtGui.QLabel(self)
self.ui.reply_layout.addWidget(expanding_widget)
self.ui.reply_layout.setStretchFactor(expanding_widget, 1)
self._general_widgets.append(expanding_widget)
###############################################################
# Phase 2 - request additional data.
# note that we don't interleave these requests with building
# the ui - this is to minimise the risk of GIL signal issues
# get all attachment data
# can request thumbnails post UI build
for attachment_group_id in self._attachment_group_widgets.keys():
agw = self._attachment_group_widgets[attachment_group_id]
for attachment_data in agw.get_data():
ag_request = {"attachment_group_id": attachment_group_id,
"attachment_data": attachment_data}
attachment_requests.append(ag_request)
self._bundle.log_debug("Request thumbnails...")
for attachment_req in attachment_requests:
self._data_manager.request_attachment_thumbnail(-1,
attachment_req["attachment_group_id"],
attachment_req["attachment_data"])
# now go through the shotgun data
# for each reply, request a thumbnail.
requested_items = []
for item in replies_and_attachments:
if item["type"] == "Reply":
# note that the reply data structure is special:
# the 'user' key is not a normal sg link dict,
# but contains an additional image field to describe
# the thumbnail:
#
# {'content': 'Reply content...',
# 'created_at': 1438649419.0,
# 'type': 'Reply',
# 'id': 73,
# 'user': {'image': '...',
# 'type': 'HumanUser',
# 'id': 38,
# 'name': '<NAME>'}}]
reply_author = item["user"]
uniqueness_key = (reply_author["type"], reply_author["id"])
if uniqueness_key not in requested_items:
# this thumbnail has not been requested yet
if reply_author.get("image"):
# there is a thumbnail for this user!
requested_items.append(uniqueness_key)
self._data_manager.request_user_thumbnail(reply_author["type"],
reply_author["id"],
reply_author["image"])
finally:
# make the window visible again and trigger a redraw
self.setVisible(True)
self._bundle.log_debug("...done")
def _clear(self):
"""
Clear the widget. This will remove all items from the UI
"""
self._bundle.log_debug("Clearing UI...")
for x in self._general_widgets + self._reply_widgets + self._attachment_group_widgets.values():
# remove widget from layout:
self.ui.reply_layout.removeWidget(x)
# set it's parent to None so that it is removed from the widget hierarchy
x.setParent(None)
utils.safe_delete_later(x)
self._general_widgets = []
self._reply_widgets = []
self._attachment_group_widgets = {}
def _load_stylesheet(self):
"""
Loads in a stylesheet from disk
"""
qss_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "style.qss")
try:
f = open(qss_file, "rt")
qss_data = f.read()
# apply to widget (and all its children)
self.setStyleSheet(qss_data)
finally:
f.close()
def _add_reply_button(self):
"""
Add a reply button to the stream of widgets
"""
reply_button = ClickableLabel(self)
reply_button.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTop)
reply_button.setText("Reply to this Note")
reply_button.setObjectName("reply_button")
self.ui.reply_layout.addWidget(reply_button)
self._general_widgets.append(reply_button)
return reply_button
def _add_attachment_group(self, attachments, after_note):
"""
Add an attachments group to the stream of widgets
"""
curr_attachment_group_widget_id = len(self._attachment_group_widgets)
attachment_group = AttachmentGroupWidget(self, attachments)
# show an 'ATTACHMENTS' header
attachment_group.show_attachments_label(True)
offset = attachment_group.OFFSET_NONE if after_note else attachment_group.OFFSET_LARGE_THUMB
attachment_group.adjust_left_offset(offset)
self.ui.reply_layout.addWidget(attachment_group)
# add it to our mapping dict and increment the counter
self._attachment_group_widgets[curr_attachment_group_widget_id] = attachment_group
def _add_replies_and_attachments(self, replies_and_attachments):
"""
Add replies and attachment widgets to the stream of widgets
:param replies_and_attachments: List of Shotgun data dictionary.
These are eithere reply entities or attachment entities.
"""
current_attachments = []
attachment_is_directly_after_note = True
for item in replies_and_attachments:
if item["type"] == "Reply":
# first, wrap up attachments
if len(current_attachments) > 0:
self._add_attachment_group(current_attachments, attachment_is_directly_after_note)
current_attachments = []
w = ReplyWidget(self)
w.adjust_thumb_style(w.LARGE_USER_THUMB)
self.ui.reply_layout.addWidget(w)
w.set_info(item)
self._reply_widgets.append(w)
# ensure navigation requests from replies bubble up
w.entity_requested.connect(self.entity_requested.emit)
# next bunch of attachments will be after a reply
# rather than directly under the note
# (this affects the visual style)
attachment_is_directly_after_note = False
if item["type"] == "Attachment" and item["this_file"]["link_type"] == "upload":
current_attachments.append(item)
# see if there are still open attachments
if len(current_attachments) > 0:
self._add_attachment_group(current_attachments, attachment_is_directly_after_note)
current_attachments = []
def _process_thumbnail(self, data):
"""
Callback that gets called when a new thumbnail is available.
Populate the UI with the given thumbnail
:param data: dictionary with keys:
- thumbnail_type: thumbnail enum constant:
ActivityStreamDataHandler.THUMBNAIL_CREATED_BY
ActivityStreamDataHandler.THUMBNAIL_ENTITY
ActivityStreamDataHandler.THUMBNAIL_ATTACHMENT
- activity_id: Activity stream id that this update relates
to. Note requests (which don't have an associated
id, will use -1 to indicate this).
QImage with thumbnail data
:param thumbnail_type: thumbnail enum constant:
"""
thumbnail_type = data["thumbnail_type"]
activity_id = data["activity_id"]
image = data["image"]
if thumbnail_type == ActivityStreamDataHandler.THUMBNAIL_ATTACHMENT and activity_id == -1:
group_id = data["attachment_group_id"]
attachment_group = self._attachment_group_widgets[group_id]
attachment_group.apply_thumbnail(data)
elif thumbnail_type == ActivityStreamDataHandler.THUMBNAIL_USER:
# a thumbnail for a user possibly for one of our replies
for reply_widget in self._reply_widgets:
if reply_widget.thumbnail_populated:
# already set
continue
if data["entity"] == reply_widget.created_by:
reply_widget.set_thumbnail(image)
def _on_reply_clicked(self, note_id):
"""
Callback when someone clicks reply to note
:param note_id: Note id to reply to
"""
# TODO - refactor to avoid having this code in two places
# create reply dialog window
reply_dialog = ReplyDialog(self, self._task_manager, note_id)
# position the reply modal dialog above the activity stream scroll area
pos = self.mapToGlobal(self.ui.reply_scroll_area.pos())
x_pos = pos.x() + (self.ui.reply_scroll_area.width() / 2) - (reply_dialog.width() / 2) - 10
y_pos = pos.y() + (self.ui.reply_scroll_area.height() / 2) - (reply_dialog.height() / 2) - 20
reply_dialog.move(x_pos, y_pos)
# show the dialog, and while it's showing,
# enable a transparent overlay on top of the existing replies
# in order to make the reply window stand out.
try:
self.__small_overlay.show()
if reply_dialog.exec_() == QtGui.QDialog.Accepted:
self._data_manager.rescan()
try:
from sgtk.util.metrics import EventMetric
properties = {
"Source": "Reply List",
}
EventMetric.log(
EventMetric.GROUP_MEDIA,
"Created Reply",
properties=properties,
bundle=self._bundle
)
except:
# ignore all errors. ex: using a core that doesn't support metrics
pass
finally:
self.__small_overlay.hide()
```
#### File: python/shotgun_fields/shotgun_field_delegate.py
```python
import sgtk
from sgtk.platform.qt import QtCore, QtGui
views = sgtk.platform.current_bundle().import_module("views")
shotgun_globals = sgtk.platform.import_framework(
"tk-framework-shotgunutils", "shotgun_globals"
)
shotgun_model = sgtk.platform.import_framework(
"tk-framework-shotgunutils", "shotgun_model"
)
class ShotgunFieldDelegateGeneric(views.WidgetDelegate):
"""
A generic, model-agnostic, shotgun field widget delegate.
This class is designed to be used with any model that represents data that
can be stored in Shotgun fields.
The included subclass, ``ShotgunFieldDelegate``, is designed to work
specifically with ``ShotgunModel`` instances. For other model types use this
class and supply a ``field_data_role`` to this class constructor. The
default is ``QtCore.Qt.EditRole``.
"""
def __init__(self, sg_entity_type, field_name, display_class, editor_class,
view, bg_task_manager=None,
field_data_role=QtCore.Qt.EditRole):
"""
Constructor
:param sg_entity_type: Shotgun entity type
:type sg_entity_type: String
:param field_name: Shotgun field name
:type field_name: String
:param display_class: A shotgun field :class:`~PySide.QtGui.QWidget` to
display the field info
:param editor_class: A shotgun field :class:`~PySide.QtGui.QWidget` to
edit the field info
:param view: The parent view for this delegate
:type view: :class:`~PySide.QtGui.QWidget`
:param bg_task_manager: Optional Task manager. If this is not passed in
one will be created when the delegate widget is created.
:type bg_task_manager: :class:`~task_manager.BackgroundTaskManager`
:param int field_data_role: The data role that stores SG field data in
the model where this delegate is to be used.
"""
views.WidgetDelegate.__init__(self, view)
# The model role used to get/set values for editing the field widget
self._field_data_role = field_data_role
self._entity_type = sg_entity_type
self._field_name = field_name
self._display_class = display_class
self._editor_class = editor_class
self._bg_task_manager = bg_task_manager
@property
def field_data_role(self):
"""
The item role used to get and set data associated with the fields being
represented by this delegate.
"""
return self._field_data_role
def paint(self, painter, style_options, model_index):
"""
Paint method to handle all cells that are not being currently edited.
:param painter: The painter instance to use when painting
:param style_options: The style options to use when painting
:param model_index: The index in the data model that needs to be painted
"""
# let the base class do all the heavy lifting
super(ShotgunFieldDelegateGeneric, self).paint(
painter,
style_options,
model_index
)
# clear out the paint widget's contents to prevent it from showing in
# other places in the view (since the widget is shared)
widget = self._get_painter_widget(model_index, self.view)
widget.set_value(None)
def _create_widget(self, parent):
"""
Creates a widget to use for the delegate.
:param parent: QWidget to parent the widget to
:type parent: :class:`~PySide.QtGui.QWidget`
:returns: QWidget that will be used to paint grid cells in the view.
:rtype: :class:`~PySide.QtGui.QWidget`
"""
widget = self._display_class(
parent=parent,
entity_type=self._entity_type,
field_name=self._field_name,
bg_task_manager=self._bg_task_manager,
delegate=True,
)
if self._display_class == self._editor_class:
# display and edit classes are the same. we need to make sure
# we disable the editing so that the delegate isn't drawn in its
# edit state.
widget.enable_editing(False)
return widget
def sizeHint(self, style_options, model_index):
"""
Returns the size needed by the delegate to display the item specified by
``model_index``, taking into account the style information provided by
``style_options``.
Reimplemented from ``QStyledItemDelegate.sizeHint``
:param style_options: Style information for the item.
:type style_options: :class:`~PySide.QtGui.QStyleOptionViewItem`
:param model_index: The index of the item to return the size of.
:type model_index: :class:`~PySide.QtCore.QModelIndex`
:returns: size required by the delegate
:rtype: :class:`~PySide.QtCore.QSize`
"""
if not model_index.isValid():
return QtCore.QSize()
size_hint = QtCore.QSize()
painter_widget = self._get_painter_widget(model_index, self.view)
if painter_widget:
size_hint = painter_widget.size()
return size_hint
def _create_editor_widget(self, model_index, style_options, parent):
"""
Create an editor widget for the supplied model index.
:param model_index: The index of the item in the model to return a
widget for
:type model_index: :class:`~PySide.QtCore.QModelIndex`
:param style_options: Specifies the current Qt style options for this
index
:type style_options: :class:`~PySide.QtGui.QStyleOptionViewItem`
:param parent: The parent view that the widget should be parented to
:type parent: :class:`~PySide.QtGui.QWidget`
:returns: A QWidget to be used for editing the current index
:rtype: :class:`~PySide.QtGui.QWidget`
"""
# ensure the field is editable
if not shotgun_globals.field_is_editable(self._entity_type,
self._field_name):
return None
if not model_index.isValid():
return None
if not self._editor_class:
return None
widget = self._editor_class(
parent=parent,
entity_type=self._entity_type,
field_name=self._field_name,
bg_task_manager=self._bg_task_manager,
delegate=True,
)
if self._display_class == self._editor_class:
# display and edit classes are the same. we need to make sure
# we enable the editing
widget.enable_editing(True)
# auto fill the background color so that the display widget doesn't show
# behind.
widget.setAutoFillBackground(True)
return widget
def _on_before_paint(self, widget, model_index, style_options):
"""
Update the display widget with the value stored in the supplied model
index. The value is retrieved for the role supplied to the
``field_data_role`` argument supplied to the constructor.
:param widget: The QWidget (constructed in _create_widget()) which will
be used to paint the cell.
:param model_index: object representing the data of the object that is
about to be drawn.
:type model_index: :class:`~PySide.QtCore.QModelIndex`
:param style_options: Object containing specifics about the
view related state of the cell.
:type style_options: :class:`~PySide.QtGui.QStyleOptionViewItem`
"""
# make sure the display widget is populated with the correct data
self._set_widget_value(widget, model_index)
def setEditorData(self, editor, model_index):
"""
Sets the data to be displayed and edited by the editor from the data
model item specified by the model index.
:param editor: The editor widget.
:type editor: :class:`~PySide.QtGui.QWidget`
:param model_index: The index of the model to be edited.
:type model_index: :class:`~PySide.QtCore.QModelIndex`
"""
# make sure the editor widget is populated with the correct data
self._set_widget_value(editor, model_index)
def setModelData(self, editor, model, index):
"""
Gets data from the editor widget and stores it in the specified model at
the item index.
:param editor: The editor widget.
:type editor: :class:`~PySide.QtGui.QWidget`
:param model: The SG model where the data lives.
:type model: :class:`~PySide.QtCore.QAbstractItemModel`
:param index: The index of the model to be edited.
:type index: :class:`~PySide.QtCore.QModelIndex`
"""
src_index = _map_to_source(index)
if not src_index or not src_index.isValid():
# invalid index, do nothing
return
# compare the new/old values to see if there is a change
new_value = editor.get_value()
cur_value = src_index.data(self.field_data_role)
if cur_value == new_value:
# value didn't change. nothing to do here.
return
# attempt to set the new value in the model
successful = src_index.model().setData(
src_index, new_value, self.field_data_role)
if not successful:
bundle = sgtk.platform.current_bundle()
bundle.log_error(
"Unable to set model data for widget delegate: %s, %s" %
(self._entity_type, self._field_name)
)
def editorEvent(self, event, model, option, index):
"""
Handles mouse events on the editor.
:param event: The event that occurred.
:type event: :class:`~PySide.QtCore.QEvent`
:param model: The SG model where the data lives.
:type model: :class:`~PySide.QtCore.QAbstractItemModel`
:param option: Options for rendering the item.
:type option: :class:`~PySide.QtQui.QStyleOptionViewItem`
:param index: The index of the model to be edited.
:type index: :class:`~PySide.QtCore.QModelIndex`
:return: ``True``, if the event was handled, ``False`` otherwise.
:rtype: ``bool``
"""
# The primary use for this is labels displaying clickable links (entity,
# multi-entity, etc). By default, they're painted into the view via the
# delegate, so you can't interact with them. There were some suggestions
# online how to work around this that seemed really complex. This is a
# solution rob suggested which I tried and it seems to work... and is
# much simpler! Basically, detect a mouse click (release is all we have
# really) in the delegate, populate the underlying widget with the data
# from the index, then forward the event to the widget. The result is a
# simulation of clicking on the actual widget.
# Forward mouse clicks to the underlying display widget. This only kicks
# in if the editor widget isn't displayed or doesn't process a mouse
# event for some reason. If you're having trouble with editors
# disappearing, it may be because they can't receive focus or aren't
# handling a mouse click.
if event.type() == QtCore.QEvent.MouseButtonRelease:
self._forward_mouse_event(event, index)
return True
return False
def _forward_mouse_event(self, mouse_event, index):
"""
Forward the mouse event to the display widget to simulate
interacting with the widget. This is necessary since the delegate only
paints the widget in the view rather than being an actual widget
instance.
:param mouse_event: The event that occured on the delegate.
:type mouse_event: :class:`~PySide.QtCore.QEvent`
:param index: The model index that was acted on.
:type index: :class:`~PySide.QtCore.QModelIndex`
"""
# get the widget used to paint this index, populate it with the
# value for this index
widget = self._get_painter_widget(index, self.view)
self._set_widget_value(widget, index)
item_rect = self.view.visualRect(index)
# get the rect of the item in the view
widget.resize(item_rect.size())
# move the widget to 0, 0 so we know exactly where it is
widget.move(0, 0)
# map global mouse position to within item_rect
view_pos = self.view.viewport().mapFromGlobal(QtGui.QCursor.pos())
# calculate the offset from the item rect
widget_x = view_pos.x() - item_rect.x()
widget_y = view_pos.y() - item_rect.y()
# forward the mouse event to the display widget
forward_event = QtGui.QMouseEvent(
mouse_event.type(),
QtCore.QPoint(widget_x, widget_y),
mouse_event.button(),
mouse_event.buttons(),
mouse_event.modifiers(),
)
QtGui.QApplication.sendEvent(widget, forward_event)
def _set_widget_value(self, widget, model_index):
"""
Updates the supplied widget with data from the supplied model index.
:param widget: The widget to set the value for
:param model_index: The index of the model where the data comes from
:type model_index: :class:`~PySide.QtCore.QModelIndex`
"""
src_index = _map_to_source(model_index)
if not src_index or not src_index.isValid():
# invalid index, do nothing
return
value = src_index.data(self.field_data_role)
widget.set_value(shotgun_model.sanitize_qt(value))
class ShotgunFieldDelegate(ShotgunFieldDelegateGeneric):
"""
A delegate for a given type of Shotgun field. This delegate is designed to
work with indexes from a ``ShotgunModel`` where the value of the field is
stored in the ``SG_ASSOCIATED_FIELD_ROLE`` role.
"""
def __init__(self, sg_entity_type, field_name, display_class, editor_class,
view, bg_task_manager=None):
"""
Constructor
:param sg_entity_type: Shotgun entity type
:type sg_entity_type: String
:param field_name: Shotgun field name
:type field_name: String
:param display_class: A shotgun field :class:`~PySide.QtGui.QWidget` to
display the field info
:param editor_class: A shotgun field :class:`~PySide.QtGui.QWidget` to
edit the field info
:param view: The parent view for this delegate
:type view: :class:`~PySide.QtGui.QWidget`
:param bg_task_manager: Optional Task manager. If this is not passed in
one will be created when the delegate widget is created.
:type bg_task_manager: :class:`~task_manager.BackgroundTaskManager`
"""
field_data_role = shotgun_model.ShotgunModel.SG_ASSOCIATED_FIELD_ROLE
super(ShotgunFieldDelegate, self).__init__(
sg_entity_type, field_name, display_class, editor_class, view,
bg_task_manager=bg_task_manager, field_data_role=field_data_role
)
def setModelData(self, editor, model, index):
"""
Gets data from the editor widget and stores it in the specified model at
the item index.
:param editor: The editor widget.
:type editor: :class:`~PySide.QtGui.QWidget`
:param model: The SG model where the data lives.
:type model: :class:`~PySide.QtCore.QAbstractItemModel`
:param index: The index of the model to be edited.
:type index: :class:`~PySide.QtCore.QModelIndex`
"""
src_index = _map_to_source(index)
if not src_index or not src_index.isValid():
# invalid index, do nothing
return
# compare the new/old values to see if there is a change
new_value = editor.get_value()
cur_value = src_index.data(self.field_data_role)
if cur_value == new_value:
# value didn't change. nothing to do here.
return
bundle = sgtk.platform.current_bundle()
# special case for image fields in the ShotgunModel. The SG model stores
# the image field in the first column. If the value has changed, set the
# icon value there.
if editor.get_field_name() == "image":
primary_item = src_index.model().item(src_index.row(), 0)
try:
if new_value:
# update the value locally in the model
primary_item.setIcon(QtGui.QIcon(new_value))
else:
primary_item.setIcon(QtGui.QIcon())
except Exception, e:
bundle.log_error(
"Unable to set icon for widget delegate: %s" % (e,))
return
successful = src_index.model().setData(
src_index,
new_value,
self.field_data_role
)
if not successful:
bundle.log_error(
"Unable to set model data for widget delegate: %s, %s" %
(self._entity_type, self._field_name)
)
def _set_widget_value(self, widget, model_index):
"""
Updates the supplied widget with data from the supplied model index.
:param widget: The widget to set the value for
:param model_index: The index of the model where the data comes from
:type model_index: :class:`~PySide.QtCore.QModelIndex`
"""
src_index = _map_to_source(model_index)
if not src_index or not src_index.isValid():
# invalid index, do nothing
return
# special case for image fields in the ShotgunModel. The SG model has
# the ability to pre-query thumbnails for entities for efficiency. If
# this is the image field for an entity in the SG model, we can make use
# of the potentially pre-queried image available in the first column.
if widget.get_field_name() == "image":
primary_item = src_index.model().item(src_index.row(), 0)
icon = primary_item.icon()
if icon:
widget.set_value(icon.pixmap(QtCore.QSize(256, 256)))
return
value = src_index.data(self.field_data_role)
widget.set_value(shotgun_model.sanitize_qt(value))
def _map_to_source(idx, recursive=True):
"""
Map the specified index to it's source model. This can be done recursively
to map back through a chain of proxy models to the source model at the
beginning of the chain
:param idx: The index to map from
:param recursive: If true then the function will recurse up the model chain
until it finds an index belonging to a model that doesn't derive from
QAbstractProxyModel. If false then it will just return the index from
the imediate parent model.
:returns: QModelIndex in the source model or the first model in the chain
that isn't a proxy model if recursive is True.
"""
src_idx = idx
while src_idx.isValid() and isinstance(
src_idx.model(), QtGui.QAbstractProxyModel):
src_idx = src_idx.model().mapToSource(src_idx)
if not recursive:
break
return src_idx
```
#### File: python/shotgun_fields/shotgun_field_manager.py
```python
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .shotgun_field_delegate import ShotgunFieldDelegateGeneric, ShotgunFieldDelegate
from .shotgun_field_editable import ShotgunFieldEditable, ShotgunFieldNotEditable
shotgun_globals = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_globals")
class ShotgunFieldManager(QtCore.QObject):
"""
Inherited from a :class:`~PySide.QtCore.QObject`, this class acts as a factory
for the set of widgets that can display values from Shotgun in a way appropriate
to their field type.
:signals:
``initialized()`` - Fires when the manager has finished running all the background tasks
it needs for its functionality
:enum: ``DISPLAY, EDITOR, EDITABLE`` - Enumeration for widget types managed and
provided by the class
"""
# dictionary that keeps the mapping from Shotgun data type to widget class
__WIDGET_TYPE_CLS_MAP = {}
# dictionary that keeps the mapping from Shotgun data type to widget class.
# similar to ``__WIDGET_TYPE_CLS_MAP``, but this lookup stores widget classes
# for specific entity+field combinations
__ENTITY_FIELD_WIDGET_TYPE_CLS_MAP = {}
# fires when we are ready to manage the widgets
initialized = QtCore.Signal()
# widget types enumeration
_WIDGET_TYPES = (DISPLAY, EDITOR, EDITABLE) = ("display", "editor", "editable")
############################################################################
# class methods
@classmethod
def get_class(cls, sg_entity_type, field_name, widget_type=DISPLAY):
"""
Returns the registered class associated with the field name for the
supplied entity and widget type.
``widget_type`` must be one of the enum values ``DISPLAY``, ``EDITOR``, or
``EDITABLE`` defined by the manager class. The default is ``DISPLAY``.
This method typically doens't need to be called. Use the :meth:`.create_widget`
to get an instance of a registered class.
:param str sg_entity_type: Shotgun entity type
:param str field_name: Shotgun field name
:param str widget_type: The type of widget class to return
:returns: :class:`~PySide.QtGui.QWidget` class or ``None`` if the field
type has no display widget
:raises: :class:`ValueError` if the supplied ``widget_type`` is not known.
"""
if widget_type not in cls._WIDGET_TYPES:
raise ValueError(
"ShotgunFieldManager unable to retrieve fields of type: %s " %
(widget_type,)
)
# see if theres a widget class registered for this specific entity type
# and field combination.
entity_field_widget_cls = cls.__ENTITY_FIELD_WIDGET_TYPE_CLS_MAP.\
get(sg_entity_type, {}).get(field_name, {}).get(widget_type)
if entity_field_widget_cls:
# found a widget class for the specific entity+field+type combo
return entity_field_widget_cls
# fall back to the widget class for this field's data type
data_type = shotgun_globals.get_data_type(sg_entity_type, field_name)
return cls.__WIDGET_TYPE_CLS_MAP.get(data_type, {}).get(widget_type)
@classmethod
def register_class(cls, field_type, widget_class, widget_type):
"""
Register a widget class for the given Shotgun field type.
``widget_type`` must be one of the enum values ``DISPLAY``, ``EDITOR``, or
``EDITABLE`` defined by the manager class.
This method usually does not need to be called. Widget classes are
typically registered as they are imported (when using the
:class:`.ShotgunFieldMeta` class).
If you wish to override widgets at a global level (across all entity types),
you can call this method manually. To override a widget for a specific
entity and field type combination, call the ``register_entity_field_class()``
method.
:param str field_type: The data type of the field to associate with a type of widget
:param widget_class: The display widget class to associate with the given field type
:type widget_class: :class:`PySide.QtGui.QWidget`
:param str widget_type: The type of widget class to register.
:raises: :class:`ValueError` if the supplied ``widget_type`` is not known.
"""
if widget_type not in cls._WIDGET_TYPES:
raise ValueError(
"ShotgunFieldManager unable to register unrecognized widget type: %s " %
(widget_type,)
)
cls.__WIDGET_TYPE_CLS_MAP.setdefault(field_type, {})[widget_type] = widget_class
@classmethod
def register_entity_field_class(cls, entity_type, field_name, widget_class, widget_type):
"""
Similar to the ``register_class`` method, but registers a widget to be used
with a specific entity type and field. This is provided to allow very specific
widget customizations for displaying and editing fields when the default
widgets are insufficient.
Example usage includes ``checkbox`` fields (boolean values) where you may want
to display an icon (or not) based on the field value rather than a standard
``QtGui.QCheckbox`` based widget.
:param str entity_type: The entity type to register the widget class for
:param str field_name: The name of the field to register the widget class for
:param widget_class: The class of the widget to register for the entity type/field_name
:type widget_class: :class:`~PySide.QtGui.QWidget`
:param str widget_type: The type of widget to register.
"""
if widget_type not in cls._WIDGET_TYPES:
raise ValueError(
"ShotgunFieldManager unable to register unrecognized widgets type: %s " %
(widget_type,)
)
# register with a separate lookup specific to entity+field combo
field_map = cls.__ENTITY_FIELD_WIDGET_TYPE_CLS_MAP.setdefault(entity_type, {})
field_map.setdefault(field_name, {})[widget_type] = widget_class
############################################################################
# special methods
def __init__(self, parent, bg_task_manager=None):
"""
Initialize the field manager factory.
:param parent: Parent object
:type parent: :class:`~PySide.QtGui.QWidget`
:param bg_task_manager: Optional Task manager. If this is not passed in one will be created
when the object is initialized.
:type bg_task_manager: :class:`~task_manager.BackgroundTaskManager`
"""
QtCore.QObject.__init__(self, parent)
self._task_manager = bg_task_manager
self._initialized = False
def __del__(self):
"""
Destructor.
Unregisters the field manager's background task manager.
"""
if self._initialized:
shotgun_globals.unregister_bg_task_manager(self._task_manager)
############################################################################
# public methods
def create_delegate(self, sg_entity_type, field_name, view):
"""
Returns a delegate that can be used in the given view to show data from the given
field from the given entity type. This delegate is designed to be used by items
from a shotgun_model's additional columns. It assumes that the value for the field
will be stored in the ``SG_ASSOCIATED_FIELD_ROLE``
(via the :class:`~tk-framework-shotgunutils:shotgun_model.ShotgunModel`) role of
its current index.
:param str sg_entity_type: Shotgun entity type
:param str field_name: Shotgun field name
:param view: The parent view for this delegate
:type view: :class:`~PySide.QtGui.QWidget`
:returns: A :class:`ShotgunFieldDelegate` configured to represent the given field
"""
display_class = self.get_class(sg_entity_type, field_name)
if not display_class:
from .label_base_widget import LabelBaseWidget
display_class = LabelBaseWidget
editor_class = self.get_class(sg_entity_type, field_name, self.EDITOR)
return ShotgunFieldDelegate(
sg_entity_type,
field_name,
display_class,
editor_class,
view,
bg_task_manager=self._task_manager
)
def create_generic_delegate(self, sg_entity_type, field_name, view, field_data_role=QtCore.Qt.EditRole):
"""
Returns a delegate that can be used in the given view to show data from
the given field from the given entity type. Unlike ``create_delegate``,
this method returns a delegate that can be used with any model
representing SG field data. The additional ``field_data_role`` parameter
is supplied to tell the delegate wich role in the model will store the
field data to edit/display.
to be used by items
:param str sg_entity_type: Shotgun entity type
:param str field_name: Shotgun field name
:param view: The parent view for this delegate
:type view: :class:`~PySide.QtGui.QWidget`
:param int field_data_role: The data role that stores SG field data in
the model where this delegate is to be used. The default value is
``QtCore.Qt.EditRole``.
:returns: A :class:``ShotgunFieldDelegateGeneric`` configured to
represent the given field
"""
display_class = self.get_class(sg_entity_type, field_name)
if not display_class:
from .label_base_widget import LabelBaseWidget
display_class = LabelBaseWidget
editor_class = self.get_class(sg_entity_type, field_name, self.EDITOR)
return ShotgunFieldDelegateGeneric(
sg_entity_type,
field_name,
display_class,
editor_class,
view,
bg_task_manager=self._task_manager,
field_data_role=field_data_role
)
def create_label(self, sg_entity_type, field_name, prefix=None, postfix=None):
"""
Returns a widget that can be used as a label for the given field.
:param str sg_entity_type: Shotgun entity type
:param str field_name: Shotgun field name
:param str prefix: Additional text to prefix the field label.
:param str postfix: Additional text to postfix the field label.
:returns: :class:`~PySide.QtGui.QLabel`
"""
display_name = shotgun_globals.get_field_display_name(sg_entity_type, field_name)
if prefix:
display_name = prefix + display_name
if postfix:
display_name += postfix
return QtGui.QLabel(display_name)
def create_widget(self, sg_entity_type, field_name, widget_type=EDITABLE, entity=None, parent=None, **kwargs):
"""
Returns a widget associated with the entity and field type if a
corresponding widget class been registered.
``widget_type`` must be one of the enum values ``DISPLAY``, ``EDITOR``, or
``EDITABLE`` defined by the manager class.
If the entity is passed in and has the value for the requested field
then the initial contents of the widget will display that value.
Any keyword args other than those below will be passed to the
constructor of whatever ``QWidget`` the field widget wraps.
:param str sg_entity_type: Shotgun entity type
:param str field_name: Shotgun field name
:param str widget_type: The type of widget to return.
:param dict entity: The Shotgun entity dictionary to pull the field value from.
:param parent: Parent widget
:type parent: :class:`PySide.QtGui.QWidget`
:returns: :class:`~PySide.QtGui.QWidget` or ``None`` if the field type has no display widget
"""
if widget_type is self.EDITABLE:
widget = self._create_editable_widget(
sg_entity_type, field_name, entity, parent, **kwargs)
elif widget_type is self.EDITOR:
widget = self._create_editor_widget(
sg_entity_type, field_name, entity, parent, **kwargs)
elif widget_type is self.DISPLAY:
widget = self._create_display_widget(
sg_entity_type, field_name, entity, parent, **kwargs)
else:
raise TypeError(
"Unknown widget type supplied to ShotgunFieldManager."
"create_widget: %s" % (widget_type,)
)
return widget
def initialize(self):
"""
Initialize the task manager.
When initialization is complete the initialized signal will be emitted.
"""
if self._initialized:
# already initialized
return
if self._task_manager is None:
# create our own task manager if one wasn't passed in
task_manager = sgtk.platform.import_framework("tk-framework-shotgunutils", "task_manager")
self._task_manager = task_manager.BackgroundTaskManager(
parent=self,
max_threads=1,
start_processing=True
)
# let shotgun globals start loading the schema
shotgun_globals.register_bg_task_manager(self._task_manager)
shotgun_globals.run_on_schema_loaded(self.__schema_loaded)
self._initialized = True
def supported_fields(self, sg_entity_type, field_names, widget_type=None):
"""
Returns the subset of fields from field_names that have an associated widget class.
Field_names may be in "bubbled" notation, for example "sg_task.Task.assignee".
``widget_type`` must be one of the enum values ``DISPLAY``, ``EDITOR``, or
``EDITABLE`` defined by the manager class or ``None``.
If ``widget_type`` is ``None``, ``DISPLAY`` will be assumed.
The default is to return a list of field names that have an associated
display widget registered.
:param str sg_entity_type: Shotgun entity type
:param list field_names: An list of (:obj:`str`) Shotgun field names
:param str widget_type: The type of widget class to check for support.
:returns: The subset of ``field_names`` that have associated widget classes.
"""
supported_fields = []
widget_type = widget_type or self.DISPLAY
# go through each of the supplied field names to see if widgets are defined for them
for field_name in field_names:
# handle bubbled field syntax
if "." in field_name:
(resolved_entity_type, resolved_field_name) = field_name.split(".")[-2:]
else:
(resolved_entity_type, resolved_field_name) = (sg_entity_type, field_name)
# see if this entity+field+type combo has a widget registered
widget_cls = self.get_class(resolved_entity_type, resolved_field_name, widget_type)
if widget_cls:
supported_fields.append(field_name)
continue
# if we're here, then no direct widget for the supplied entity+field
# or data type. the only other possibility is if this is an editable
# widget type request. if so, then the field may be supported by
# the default editable widget combining a display & editor. see if
# those exist for this entity+field
if widget_type == self.EDITABLE:
display_cls = self.get_class(
resolved_entity_type, resolved_field_name, widget_type=self.DISPLAY)
editor_cls = self.get_class(
resolved_entity_type, resolved_field_name, widget_type=self.EDITOR)
if display_cls and editor_cls:
supported_fields.append(field_name)
return supported_fields
############################################################################
# protected methods
def _create_display_widget(self, sg_entity_type, field_name, entity=None, parent=None, **kwargs):
"""
Returns an instance of the display widget registered for the supplied field type.
If the entity is passed in and has the value for the requested field in it then the
initial contents of the widget will display that value.
Any keyword args other than those below will be passed to the constructor of whatever
``QWidget`` the field widget wraps.
:param str sg_entity_type: Shotgun entity type
:param str field_name: Shotgun field name
:param entity: The Shotgun entity dictionary to pull the field value from.
:type entity: Whatever is returned by the Shotgun API for this field
:param parent: Parent widget
:type parent: :class:`PySide.QtGui.QWidget`
:returns: :class:`~PySide.QtGui.QWidget` or ``None`` if the field type has no display widget
"""
display_cls = self.get_class(sg_entity_type, field_name)
widget = None
if display_cls:
# instantiate the widget
widget = display_cls(
parent=parent,
entity_type=sg_entity_type,
field_name=field_name,
entity=entity,
bg_task_manager=self._task_manager,
**kwargs
)
# registered classes can act as both display and editor. check to
# see if the classes match, and if so, disable editing since only
# display was requested.
editor_cls = self.get_class(sg_entity_type, field_name, self.EDITOR)
if editor_cls == display_cls:
widget.enable_editing(False)
return widget
def _create_editor_widget(self, sg_entity_type, field_name, entity=None, parent=None, **kwargs):
"""
Returns an instance of the editor widget registered for the supplied field type.
If the entity is passed in and has the value for the requested field in it then the
initial contents of the widget will edit that value.
Any keyword args other than those below will be passed to the constructor of whatever
``QWidget`` the field widget wraps.
:param str sg_entity_type: Shotgun entity type
:param str field_name: Shotgun field name
:param entity: The Shotgun entity dictionary to pull the field value from.
:type entity: Whatever is returned by the Shotgun API for this field
:param parent: Parent widget
:type parent: :class:`PySide.QtGui.QWidget`
:returns: :class:`~PySide.QtGui.QWidget` or ``None`` if the field type has no editor widget
"""
# check to make sure the field is editable. if it is not, return a
# wrapped version of the display widget that indicates that the field
# is not editable.
if not shotgun_globals.field_is_editable(sg_entity_type, field_name):
display_widget = self._create_display_widget(
sg_entity_type, field_name, entity, parent, **kwargs)
if display_widget:
return ShotgunFieldNotEditable(display_widget)
else:
# no guarantee that a display widget has been registered
return None
# the field is editable, try to get the editor class
editor_cls = self.get_class(sg_entity_type, field_name, self.EDITOR)
widget = None
if editor_cls:
# instantiate the widget
widget = editor_cls(
parent=parent,
entity_type=sg_entity_type,
field_name=field_name,
entity=entity,
bg_task_manager=self._task_manager,
**kwargs
)
# registered classes can act as both display and editor. check to
# see if the classes match, and if so, make sure the editor is enabled
display_cls = self.get_class(sg_entity_type, field_name)
if display_cls == editor_cls:
# display and edit classes are the same. we need to make sure
# we enable the editing
widget.enable_editing(True)
return widget
def _create_editable_widget(self, sg_entity_type, field_name, entity=None, parent=None, **kwargs):
"""
Returns an instance of the editable widget registered for the supplied field type.
If no editable widget is registered, a wrapped widget will be constructed
using the registered display and editor widgets.
If the entity is passed in and has the value for the requested field in it then the
initial contents of the widget will edit that value.
Any keyword args other than those below will be passed to the constructor of whatever
``QWidget`` the field widget wraps.
:param str sg_entity_type: Shotgun entity type
:param str field_name: Shotgun field name
:param entity: The Shotgun entity dictionary to pull the field value from.
:type entity: Whatever is returned by the Shotgun API for this field
:param parent: Parent widget
:type parent: :class:`PySide.QtGui.QWidget`
:returns: :class:`~PySide.QtGui.QWidget` or ``None`` if the field type
has no editable widget and one could not be constructed.
"""
editable_cls = self.get_class(sg_entity_type, field_name, self.EDITABLE)
if editable_cls:
# instantiate the widget
widget = editable_cls(
parent=parent,
entity_type=sg_entity_type,
field_name=field_name,
entity=entity,
bg_task_manager=self._task_manager,
**kwargs
)
return widget
# no registered editable widget. that's ok, we'll try to construct one
# with the registered display/editor classes using `ShotgunEditableWidget`
# as a wrapper (stacked widget)
display_cls = self.get_class(sg_entity_type, field_name)
if not display_cls:
# nothing to do if can't even display the field
return None
display_widget = self._create_display_widget(
sg_entity_type, field_name, entity, parent, **kwargs)
# check to make sure the field is editable. if it is not, return a
# wrapped version of the display widget that indicates that the field
# is not editable.
if not shotgun_globals.field_is_editable(sg_entity_type, field_name):
return ShotgunFieldNotEditable(display_widget)
editor_cls = self.get_class(sg_entity_type, field_name, self.EDITOR)
if editor_cls and editor_cls == display_cls:
# if the editor and display are the same, just return the editing
# enabled version of the display widget.
display_widget.enable_editing(True)
return display_widget
if not editor_cls:
return ShotgunFieldNotEditable(display_widget)
editor_widget = self._create_editor_widget(
sg_entity_type, field_name, entity, parent, **kwargs)
# should have both a display and eidtor widget, wrap them up and return
return ShotgunFieldEditable(display_widget, editor_widget, parent)
############################################################################
# private methods
def __schema_loaded(self):
"""
Internal method that will be called when the schema is available.
"""
self.initialized.emit()
# import the actual field types to give them a chance to register
from . import (
checkbox_widget,
currency_widget,
date_and_time_widget,
date_widget,
entity_widget,
file_link_widget,
float_widget,
footage_widget,
image_widget,
list_widget,
multi_entity_widget,
number_widget,
percent_widget,
status_list_widget,
tags_widget,
text_widget,
url_template_widget,
)
# TODO: wait to register timecode field until the fps associated with this field
# is available from the API
from . import timecode_widget
# TODO: wait to register duration field until display options for hours versus
# days and of hours in a day are available to the API
from . import duration_widget
```
#### File: python/shotgun_fields/tags_widget.py
```python
from sgtk.platform.qt import QtCore, QtGui
from .bubble_widget import BubbleEditWidget, BubbleWidget
from .label_base_widget import ElidedLabelBaseWidget
from .shotgun_field_meta import ShotgunFieldMeta
from .ui import resources_rc
class TagsWidget(ElidedLabelBaseWidget):
"""
Display a ``tag_list`` field value as returned by the Shotgun API.
"""
__metaclass__ = ShotgunFieldMeta
_DISPLAY_TYPE = "tag_list"
def _string_value(self, value):
"""
Convert the Shotgun value for this field into a string
:param list value: A list of tag name strings
"""
tag_strings = []
for tag in value:
tag_strings.append(
"<img src='%s'> %s" % (
":/qtwidgets-shotgun-fields/tag.png", tag
)
)
return " ".join(tag_strings)
class TagsEditorWidget(BubbleEditWidget):
# TODO: The python API does not currently support add/remove/edit of tags.
# Once the api supports tag updates, this class can be further fleshed out
# to mimic the editing capabilities available in the web interface.
# TODO: The following line is commented out so that the class is not
# registered as a tag editor. Uncomment when tags are supported.
#__metaclass__ = ShotgunFieldMeta
#_EDITOR_TYPE = "tag_list"
# TODO: some additional validation will need to happen to make sure a valid
# tag was entered and that the user can create a tag if one does not exist.
# A tag completer would also be useful if matching tag list could be queried
# or made available via the cached schema.
def add_tag(self, tag):
"""
Add a tag bubble to the widget.
:param str tag: The name of a tag to display
:return: unique id for the added tag
:rtype: :obj:`int`
"""
# get a list of the current tag bubbles to see if the tag being
# added is already in the list. if it is, remove it and re-add it to the
# end of the list
bubbles = self.get_bubbles()
for bubble in bubbles:
bubble_tag = bubble.get_data()
# see if the bubble matches the supplied tag
if tag == bubble_tag:
# move the bubble to the end
self.remove_bubble(bubble.id)
self.add_tag(tag)
return
# create a bubble widget to display the tag
tag_bubble = BubbleWidget()
tag_bubble.set_data(tag)
tag_bubble.set_image(":/qtwidgets-shotgun-fields/tag.png")
tag_bubble.set_text(tag)
tag_bubble_id = self.add_bubble(tag_bubble)
# return the unique id for the added bubble
return tag_bubble_id
def get_value(self):
"""
Return a list of tag names for the entity bubbles in the widget.
:returns: A list of :obj:`str` objects.
:rtype: :obj:`list`
"""
return [b.get_data() for b in self.get_bubbles()]
def keyPressEvent(self, event):
"""
Handles user interaction with the widget via keyboard.
- Enter, Return, Tab, Comma, and Space will cause the currently typed tag to be added.
:param event: The key press event.
:type event: :class:`~PySide.QtGui.QEvent`
"""
if event.key() in [
QtCore.Qt.Key_Enter,
QtCore.Qt.Key_Return,
QtCore.Qt.Key_Tab,
QtCore.Qt.Key_Comma,
QtCore.Qt.Key_Space,
]:
tag = self.get_typed_text()
tag.strip()
self.add_tag(tag)
self.clear_typed_text()
event.ignore()
return
super(TagsEditorWidget, self).keyPressEvent(event)
def remove_tag(self, tag):
"""
Removes the supplied tag bubble from the widget.
:param str tag: The tag to remove
"""
bubbles = self.get_bubbles()
for bubble in bubbles:
bubble_tag = bubble.get_data()
if tag == bubble_tag:
self.remove_bubble(bubble.id)
return
def _display_default(self):
"""
Display the default value of the widget.
"""
self.clear()
def _display_value(self, value):
"""
Set the value displayed by the widget.
:param value: The value returned by the Shotgun API to be displayed
"""
self.clear()
for tag in value:
self.add_tag(tag)
```
#### File: python/shotgun_fields/text_widget.py
```python
from .label_base_widget import ElidedLabelBaseWidget
from .shotgun_field_meta import ShotgunFieldMeta
from sgtk.platform.qt import QtCore, QtGui
class TextWidget(ElidedLabelBaseWidget):
"""
Display a ``text`` field value as returned by the Shotgun API.
"""
__metaclass__ = ShotgunFieldMeta
_DISPLAY_TYPE = "text"
class TextEditorWidget(QtGui.QTextEdit):
"""
Allows editing of a ``text`` field value as returned by the Shotgun API.
"""
__metaclass__ = ShotgunFieldMeta
_EDITOR_TYPE = "text"
def get_value(self):
"""
:return: The internal value being displayed by the widget.
"""
return self._get_safe_str(self.toPlainText())
def keyPressEvent(self, event):
"""
Provides shortcuts for applying modified values.
:param event: The key press event object
:type event: :class:`~PySide.QtGui.QKeyEvent`
Ctrl+Enter or Ctrl+Return will trigger the emission of the ``value_changed``
signal.
"""
if event.key() in [
QtCore.Qt.Key_Enter,
QtCore.Qt.Key_Return
] and event.modifiers() & QtCore.Qt.ControlModifier:
self.value_changed.emit()
event.ignore()
return
super(TextEditorWidget, self).keyPressEvent(event)
def setup_widget(self):
"""
Prepare the widget for display.
Called by the metaclass during initialization.
"""
self.setSizePolicy(
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Preferred
)
def _display_default(self):
"""
Display the default value of the widget.
"""
self.clear()
def _display_value(self, value):
"""
Set the value displayed by the widget.
:param value: The value returned by the Shotgun API to be displayed
"""
self.setText(self._string_value(value))
def _string_value(self, value):
"""
Ensure the value to be displayed is a string.
:param value: The value from Shotgun
"""
return str(value)
```
#### File: python/shotgun_search_widget/shotgun_search_widget.py
```python
import os
from sgtk.platform.qt import QtCore, QtGui
class ShotgunSearchWidget(QtGui.QLineEdit):
"""
A QT Widget deriving from :class:`~PySide.QtGui.QLineEdit` that creates
a search input box with auto completion.
The derived classes are expected to provide a :class:`PySide.QtGui.QCompleter`
during initialization. The completer must have ``search(str)`` and ``destroy`` method.
"""
def __init__(self, parent):
"""
:param parent: Qt parent object
:type parent: :class:`~PySide.QtGui.QWidget`
"""
# first, call the base class and let it do its thing.
super(ShotgunSearchWidget, self).__init__(parent)
# trigger the completer to popup as text changes
self.textEdited.connect(self._search_edited)
# Taken from https://wiki.qt.io/Delay_action_to_wait_for_user_interaction
self._delay_timer = QtCore.QTimer(self)
self._delay_timer.timeout.connect(self._typing_timeout)
self._delay_timer.setSingleShot(True)
# FIXME: The following was stolen from SearchWidget. We can't refactor easily that
# part of the code since the base classes for ShotgunSearchWidget and SearchWidget
# are not the same, but at least the ShotgunSearchWidget has feature parity.
self.set_placeholder_text("Search")
# dynamically create the clear button so that we can place it over the
# edit widget:
self._clear_btn = QtGui.QPushButton(self)
self._clear_btn.setFocusPolicy(QtCore.Qt.StrongFocus)
self._clear_btn.setFlat(True)
self._clear_btn.setCursor(QtCore.Qt.ArrowCursor)
# Loads the style sheet for the search button.
qss_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "search_style.qss")
with open(qss_file, "rt") as f:
# apply to widget (and all its children)
self._clear_btn.setStyleSheet(f.read())
self._clear_btn.hide()
h_layout = QtGui.QHBoxLayout(self)
h_layout.addStretch()
h_layout.addWidget(self._clear_btn)
h_layout.setContentsMargins(3, 0, 3, 0)
h_layout.setSpacing(0)
self.setLayout(h_layout)
self._clear_btn.clicked.connect(self._on_clear_clicked)
def set_placeholder_text(self, text):
"""
Set the placeholder text for the widget
:param text: The text to use
"""
# Note, setPlaceholderText is only available in recent versions of Qt.
if hasattr(self, "setPlaceholderText"):
self.setPlaceholderText(text)
def set_bg_task_manager(self, task_manager):
"""
Specify the background task manager to use to pull
data in the background. Data calls
to Shotgun will be dispatched via this object.
:param task_manager: Background task manager to use
:type task_manager: :class:`~tk-framework-shotgunutils:task_manager.BackgroundTaskManager`
"""
self.completer().set_bg_task_manager(task_manager)
def _search_edited(self, text):
"""
Called every time the user types something in the search box.
"""
# This will fire _typing_timeout after 300 ms. If the user types something before it fires,
# the timer restarts counting. This differs from the editingFinished event on a QLineEdit which
# fires only when the user pressed enter. This fires when the user has finished typing for
# a short period of time.
self._clear_btn.setVisible(bool(text))
self._delay_timer.start(300)
def _typing_timeout(self):
"""
Launches the search in the completer.
"""
self.completer().search(self.text())
def destroy(self):
"""
Should be called before the widget is closed.
"""
self.completer().destroy()
def clear(self):
"""
Clears the search box.
"""
self.setText("")
self._clear_btn.hide()
def _on_clear_clicked(self):
"""
Slot triggered when the clear button is clicked - clears the text
and emits the relevant signals.
"""
self.clear()
def keyPressEvent(self, event):
"""
Clears the line edit when the user hits escape.
"""
if event.key() == QtCore.Qt.Key_Escape:
self.clear()
self.completer().popup().close()
else:
super(ShotgunSearchWidget, self).keyPressEvent(event)
```
#### File: shotgun_entities/ui/card_widget.py
```python
from tank.platform.qt import QtCore, QtGui
class Ui_ShotgunEntityCardWidget(object):
def setupUi(self, ShotgunEntityCardWidget):
ShotgunEntityCardWidget.setObjectName("ShotgunEntityCardWidget")
ShotgunEntityCardWidget.resize(355, 75)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(ShotgunEntityCardWidget.sizePolicy().hasHeightForWidth())
ShotgunEntityCardWidget.setSizePolicy(sizePolicy)
ShotgunEntityCardWidget.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.horizontalLayout_3 = QtGui.QHBoxLayout(ShotgunEntityCardWidget)
self.horizontalLayout_3.setSpacing(1)
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.box = QtGui.QFrame(ShotgunEntityCardWidget)
self.box.setFrameShape(QtGui.QFrame.NoFrame)
self.box.setFrameShadow(QtGui.QFrame.Plain)
self.box.setObjectName("box")
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.box)
self.horizontalLayout_2.setSpacing(5)
self.horizontalLayout_2.setContentsMargins(2, 2, 2, 2)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.box_layout = QtGui.QHBoxLayout()
self.box_layout.setSpacing(10)
self.box_layout.setContentsMargins(0, -1, -1, -1)
self.box_layout.setObjectName("box_layout")
self.left_layout = QtGui.QVBoxLayout()
self.left_layout.setObjectName("left_layout")
self.box_layout.addLayout(self.left_layout)
self.right_layout = QtGui.QVBoxLayout()
self.right_layout.setSpacing(0)
self.right_layout.setContentsMargins(-1, 0, -1, 0)
self.right_layout.setObjectName("right_layout")
self.field_grid_layout = QtGui.QGridLayout()
self.field_grid_layout.setContentsMargins(-1, 4, -1, 4)
self.field_grid_layout.setHorizontalSpacing(5)
self.field_grid_layout.setVerticalSpacing(2)
self.field_grid_layout.setObjectName("field_grid_layout")
self.right_layout.addLayout(self.field_grid_layout)
self.box_layout.addLayout(self.right_layout)
self.horizontalLayout_2.addLayout(self.box_layout)
self.horizontalLayout_3.addWidget(self.box)
self.retranslateUi(ShotgunEntityCardWidget)
QtCore.QMetaObject.connectSlotsByName(ShotgunEntityCardWidget)
def retranslateUi(self, ShotgunEntityCardWidget):
ShotgunEntityCardWidget.setWindowTitle(QtGui.QApplication.translate("ShotgunEntityCardWidget", "Form", None, QtGui.QApplication.UnicodeUTF8))
```
#### File: version_details/ui/version_details_widget.py
```python
from tank.platform.qt import QtCore, QtGui
class Ui_VersionDetailsWidget(object):
def setupUi(self, VersionDetailsWidget):
VersionDetailsWidget.setObjectName("VersionDetailsWidget")
VersionDetailsWidget.resize(390, 737)
self.verticalLayout_17 = QtGui.QVBoxLayout(VersionDetailsWidget)
self.verticalLayout_17.setSpacing(0)
self.verticalLayout_17.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_17.setObjectName("verticalLayout_17")
self.details_title_bar = QtGui.QFrame(VersionDetailsWidget)
self.details_title_bar.setMinimumSize(QtCore.QSize(0, 13))
self.details_title_bar.setMaximumSize(QtCore.QSize(16777215, 13))
self.details_title_bar.setFrameShape(QtGui.QFrame.NoFrame)
self.details_title_bar.setFrameShadow(QtGui.QFrame.Plain)
self.details_title_bar.setLineWidth(0)
self.details_title_bar.setObjectName("details_title_bar")
self.horizontalLayout_9 = QtGui.QHBoxLayout(self.details_title_bar)
self.horizontalLayout_9.setSpacing(3)
self.horizontalLayout_9.setContentsMargins(0, 5, 5, 0)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
spacerItem = QtGui.QSpacerItem(350, 0, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_9.addItem(spacerItem)
self.float_button = QtGui.QToolButton(self.details_title_bar)
self.float_button.setMaximumSize(QtCore.QSize(8, 8))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/version_details/dock.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
icon.addPixmap(QtGui.QPixmap(":/version_details/undock_hover.png"), QtGui.QIcon.Active, QtGui.QIcon.Off)
icon.addPixmap(QtGui.QPixmap(":/version_details/undock.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
icon.addPixmap(QtGui.QPixmap(":/version_details/undock_hover.png"), QtGui.QIcon.Selected, QtGui.QIcon.Off)
icon.addPixmap(QtGui.QPixmap(":/version_details/undock_hover.png"), QtGui.QIcon.Selected, QtGui.QIcon.On)
icon.addPixmap(QtGui.QPixmap(":/version_details/dock_hover.png"), QtGui.QIcon.Active, QtGui.QIcon.On)
self.float_button.setIcon(icon)
self.float_button.setCheckable(True)
self.float_button.setAutoRaise(True)
self.float_button.setObjectName("float_button")
self.horizontalLayout_9.addWidget(self.float_button)
self.close_button = QtGui.QToolButton(self.details_title_bar)
self.close_button.setMaximumSize(QtCore.QSize(8, 8))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/version_details/close.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
icon1.addPixmap(QtGui.QPixmap(":/version_details/close_hover.png"), QtGui.QIcon.Active, QtGui.QIcon.Off)
icon1.addPixmap(QtGui.QPixmap(":/version_details/close.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
icon1.addPixmap(QtGui.QPixmap(":/version_details/close_hover.png"), QtGui.QIcon.Selected, QtGui.QIcon.Off)
icon1.addPixmap(QtGui.QPixmap(":/version_details/close_hover.png"), QtGui.QIcon.Selected, QtGui.QIcon.On)
icon1.addPixmap(QtGui.QPixmap(":/version_details/close_hover.png"), QtGui.QIcon.Active, QtGui.QIcon.On)
self.close_button.setIcon(icon1)
self.close_button.setAutoRaise(True)
self.close_button.setObjectName("close_button")
self.horizontalLayout_9.addWidget(self.close_button)
self.verticalLayout_17.addWidget(self.details_title_bar)
self.pages = QtGui.QStackedWidget(VersionDetailsWidget)
self.pages.setObjectName("pages")
self.main_page = QtGui.QWidget()
self.main_page.setObjectName("main_page")
self.verticalLayout = QtGui.QVBoxLayout(self.main_page)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.entity_tab_widget = QtGui.QTabWidget(self.main_page)
self.entity_tab_widget.setFocusPolicy(QtCore.Qt.NoFocus)
self.entity_tab_widget.setStyleSheet("QTabWidget::tab-bar { alignment: center; border: none }")
self.entity_tab_widget.setObjectName("entity_tab_widget")
self.entity_note_tab = QtGui.QWidget()
self.entity_note_tab.setObjectName("entity_note_tab")
self.verticalLayout_3 = QtGui.QVBoxLayout(self.entity_note_tab)
self.verticalLayout_3.setSpacing(2)
self.verticalLayout_3.setContentsMargins(8, 5, 8, 5)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.info_layout = QtGui.QHBoxLayout()
self.info_layout.setSpacing(0)
self.info_layout.setContentsMargins(0, 0, -1, 0)
self.info_layout.setObjectName("info_layout")
self.widget = QtGui.QWidget(self.entity_note_tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setObjectName("widget")
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.widget)
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setContentsMargins(10, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.current_version_card = ShotgunEntityCardWidget(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.current_version_card.sizePolicy().hasHeightForWidth())
self.current_version_card.setSizePolicy(sizePolicy)
self.current_version_card.setObjectName("current_version_card")
self.horizontalLayout_2.addWidget(self.current_version_card)
self.verticalLayout_4 = QtGui.QVBoxLayout()
self.verticalLayout_4.setSpacing(0)
self.verticalLayout_4.setContentsMargins(-1, -1, 0, 0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setSpacing(4)
self.horizontalLayout.setContentsMargins(-1, 0, 2, -1)
self.horizontalLayout.setObjectName("horizontalLayout")
self.shotgun_nav_button = QtGui.QToolButton(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.shotgun_nav_button.sizePolicy().hasHeightForWidth())
self.shotgun_nav_button.setSizePolicy(sizePolicy)
self.shotgun_nav_button.setMaximumSize(QtCore.QSize(15, 15))
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/version_details/navigate_out_hover.png"), QtGui.QIcon.Active, QtGui.QIcon.On)
icon2.addPixmap(QtGui.QPixmap(":/version_details/navigate_out.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
icon2.addPixmap(QtGui.QPixmap(":/version_details/navigate_out_hover.png"), QtGui.QIcon.Selected, QtGui.QIcon.On)
self.shotgun_nav_button.setIcon(icon2)
self.shotgun_nav_button.setAutoRaise(True)
self.shotgun_nav_button.setObjectName("shotgun_nav_button")
self.horizontalLayout.addWidget(self.shotgun_nav_button)
self.pin_button = QtGui.QToolButton(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pin_button.sizePolicy().hasHeightForWidth())
self.pin_button.setSizePolicy(sizePolicy)
self.pin_button.setMaximumSize(QtCore.QSize(15, 15))
self.pin_button.setText("")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/version_details/tack_hover.png"), QtGui.QIcon.Active, QtGui.QIcon.On)
icon3.addPixmap(QtGui.QPixmap(":/version_details/tack_up.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
icon3.addPixmap(QtGui.QPixmap(":/version_details/tack_hover.png"), QtGui.QIcon.Selected, QtGui.QIcon.On)
self.pin_button.setIcon(icon3)
self.pin_button.setCheckable(True)
self.pin_button.setAutoRaise(True)
self.pin_button.setObjectName("pin_button")
self.horizontalLayout.addWidget(self.pin_button)
self.verticalLayout_4.addLayout(self.horizontalLayout)
spacerItem1 = QtGui.QSpacerItem(0, 0, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem1)
self.horizontalLayout_2.addLayout(self.verticalLayout_4)
self.info_layout.addWidget(self.widget)
self.verticalLayout_3.addLayout(self.info_layout)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setSpacing(2)
self.horizontalLayout_4.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
spacerItem2 = QtGui.QSpacerItem(40, 0, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem2)
self.more_info_button = QtGui.QToolButton(self.entity_note_tab)
self.more_info_button.setStyleSheet("QToolButton { border: none; background: transparent; }")
self.more_info_button.setCheckable(True)
self.more_info_button.setToolButtonStyle(QtCore.Qt.ToolButtonTextOnly)
self.more_info_button.setArrowType(QtCore.Qt.NoArrow)
self.more_info_button.setObjectName("more_info_button")
self.horizontalLayout_4.addWidget(self.more_info_button)
self.more_fields_button = QtGui.QToolButton(self.entity_note_tab)
self.more_fields_button.setStyleSheet("QToolButton { border: none; background: transparent; }")
self.more_fields_button.setPopupMode(QtGui.QToolButton.InstantPopup)
self.more_fields_button.setObjectName("more_fields_button")
self.horizontalLayout_4.addWidget(self.more_fields_button)
spacerItem3 = QtGui.QSpacerItem(40, 0, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem3)
self.verticalLayout_3.addLayout(self.horizontalLayout_4)
self.notes_tab_line = QtGui.QFrame(self.entity_note_tab)
self.notes_tab_line.setFrameShadow(QtGui.QFrame.Sunken)
self.notes_tab_line.setFrameShape(QtGui.QFrame.HLine)
self.notes_tab_line.setFrameShadow(QtGui.QFrame.Sunken)
self.notes_tab_line.setObjectName("notes_tab_line")
self.verticalLayout_3.addWidget(self.notes_tab_line)
self.note_stream_widget = ActivityStreamWidget(self.entity_note_tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.note_stream_widget.sizePolicy().hasHeightForWidth())
self.note_stream_widget.setSizePolicy(sizePolicy)
self.note_stream_widget.setStyleSheet("border: none")
self.note_stream_widget.setObjectName("note_stream_widget")
self.verticalLayout_3.addWidget(self.note_stream_widget)
self.entity_tab_widget.addTab(self.entity_note_tab, "")
self.entity_version_tab = QtGui.QWidget()
self.entity_version_tab.setObjectName("entity_version_tab")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.entity_version_tab)
self.verticalLayout_2.setSpacing(2)
self.verticalLayout_2.setContentsMargins(5, 0, 5, 5)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.version_top_layout = QtGui.QWidget(self.entity_version_tab)
self.version_top_layout.setObjectName("version_top_layout")
self.horizontalLayout_5 = QtGui.QHBoxLayout(self.version_top_layout)
self.horizontalLayout_5.setContentsMargins(0, 2, 0, 2)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setSpacing(10)
self.horizontalLayout_3.setContentsMargins(-1, -1, 0, -1)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.horizontalLayout_6 = QtGui.QHBoxLayout()
self.horizontalLayout_6.setSpacing(0)
self.horizontalLayout_6.setContentsMargins(0, -1, -1, -1)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.version_fields_button = QtGui.QToolButton(self.version_top_layout)
self.version_fields_button.setStyleSheet("QToolButton { border: none; background: transparent; }")
self.version_fields_button.setPopupMode(QtGui.QToolButton.InstantPopup)
self.version_fields_button.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.version_fields_button.setObjectName("version_fields_button")
self.horizontalLayout_6.addWidget(self.version_fields_button)
self.label = QtGui.QLabel(self.version_top_layout)
self.label.setMaximumSize(QtCore.QSize(8, 8))
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap(":/version_details/arrow.png"))
self.label.setScaledContents(True)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.horizontalLayout_6.addWidget(self.label)
self.horizontalLayout_3.addLayout(self.horizontalLayout_6)
self.horizontalLayout_7 = QtGui.QHBoxLayout()
self.horizontalLayout_7.setSpacing(0)
self.horizontalLayout_7.setContentsMargins(0, -1, 0, -1)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.version_sort_button = QtGui.QToolButton(self.version_top_layout)
self.version_sort_button.setStyleSheet("QToolButton { border: none; background: transparent; }")
self.version_sort_button.setPopupMode(QtGui.QToolButton.InstantPopup)
self.version_sort_button.setObjectName("version_sort_button")
self.horizontalLayout_7.addWidget(self.version_sort_button)
self.label_2 = QtGui.QLabel(self.version_top_layout)
self.label_2.setMaximumSize(QtCore.QSize(8, 8))
self.label_2.setText("")
self.label_2.setPixmap(QtGui.QPixmap(":/version_details/arrow.png"))
self.label_2.setScaledContents(True)
self.label_2.setObjectName("label_2")
self.horizontalLayout_7.addWidget(self.label_2)
self.horizontalLayout_3.addLayout(self.horizontalLayout_7)
self.horizontalLayout_5.addLayout(self.horizontalLayout_3)
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem4)
self.version_search = SearchWidget(self.version_top_layout)
self.version_search.setStyleSheet("background-color: rgb(50,50,50);")
self.version_search.setObjectName("version_search")
self.horizontalLayout_5.addWidget(self.version_search)
self.verticalLayout_2.addWidget(self.version_top_layout)
self.line = QtGui.QFrame(self.entity_version_tab)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout_2.addWidget(self.line)
self.entity_version_view = QtGui.QListView(self.entity_version_tab)
self.entity_version_view.setFocusPolicy(QtCore.Qt.NoFocus)
self.entity_version_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.entity_version_view.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.entity_version_view.setVerticalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.entity_version_view.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.entity_version_view.setObjectName("entity_version_view")
self.verticalLayout_2.addWidget(self.entity_version_view)
self.entity_tab_widget.addTab(self.entity_version_tab, "")
self.verticalLayout.addWidget(self.entity_tab_widget)
self.pages.addWidget(self.main_page)
self.empty_page = QtGui.QWidget()
self.empty_page.setObjectName("empty_page")
self.verticalLayout_5 = QtGui.QVBoxLayout(self.empty_page)
self.verticalLayout_5.setSpacing(0)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.empty_label = QtGui.QLabel(self.empty_page)
self.empty_label.setText("")
self.empty_label.setPixmap(QtGui.QPixmap(":/version_details/panel_empty_background.png"))
self.empty_label.setAlignment(QtCore.Qt.AlignCenter)
self.empty_label.setObjectName("empty_label")
self.verticalLayout_5.addWidget(self.empty_label)
self.pages.addWidget(self.empty_page)
self.verticalLayout_17.addWidget(self.pages)
self.retranslateUi(VersionDetailsWidget)
self.pages.setCurrentIndex(0)
self.entity_tab_widget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(VersionDetailsWidget)
def retranslateUi(self, VersionDetailsWidget):
VersionDetailsWidget.setWindowTitle(QtGui.QApplication.translate("VersionDetailsWidget", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.float_button.setText(QtGui.QApplication.translate("VersionDetailsWidget", "...", None, QtGui.QApplication.UnicodeUTF8))
self.close_button.setText(QtGui.QApplication.translate("VersionDetailsWidget", "...", None, QtGui.QApplication.UnicodeUTF8))
self.shotgun_nav_button.setText(QtGui.QApplication.translate("VersionDetailsWidget", "...", None, QtGui.QApplication.UnicodeUTF8))
self.more_info_button.setText(QtGui.QApplication.translate("VersionDetailsWidget", "More info", None, QtGui.QApplication.UnicodeUTF8))
self.more_fields_button.setText(QtGui.QApplication.translate("VersionDetailsWidget", "Fields...", None, QtGui.QApplication.UnicodeUTF8))
self.entity_tab_widget.setTabText(self.entity_tab_widget.indexOf(self.entity_note_tab), QtGui.QApplication.translate("VersionDetailsWidget", "NOTES", None, QtGui.QApplication.UnicodeUTF8))
self.version_fields_button.setText(QtGui.QApplication.translate("VersionDetailsWidget", "Fields", None, QtGui.QApplication.UnicodeUTF8))
self.version_sort_button.setText(QtGui.QApplication.translate("VersionDetailsWidget", "Sort", None, QtGui.QApplication.UnicodeUTF8))
self.entity_tab_widget.setTabText(self.entity_tab_widget.indexOf(self.entity_version_tab), QtGui.QApplication.translate("VersionDetailsWidget", "VERSIONS", None, QtGui.QApplication.UnicodeUTF8))
from ..qtwidgets import ActivityStreamWidget, SearchWidget, ShotgunEntityCardWidget
from . import resources_rc
from . import resources_rc
```
#### File: python/views/edit_selected_widget_delegate.py
```python
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .widget_delegate import WidgetDelegate
class EditSelectedWidgetDelegate(WidgetDelegate):
"""
Custom delegate that provides a simple mechanism where an actual widget (editor) is
presented for the selected item whilst all other items are simply drawn with a single
widget.
:ivar selection_model: The selection model of the delegate's parent view, if one
existed at the time of the delegate's initialization.
:vartype selection_model: QtGui.QItemSelectionModel
You use this class by subclassing it and implementing the methods:
- :meth:`_get_painter_widget()` - return the widget to be used to paint an index
- :meth:`_on_before_paint()` - set up the widget with the specific data ready to be painted
- :meth:`sizeHint()` - return the size of the widget to be used in the view
If you want to have an interactive widget (editor) for the selected item
then you will also need to implement:
- :meth:`_create_editor_widget()` - return a unique editor instance to be used for editing
- :meth:`_on_before_selection()` - set up the widget with the specific data ready for
interaction
.. note:: If you are using the same widget for all items then you can just implement
the :meth:`_create_widget()` method instead of the separate :meth:`_get_painter_widget()`
and :meth:`_create_editor_widget()` methods.
.. note:: In order for this class to handle selection correctly, it needs to be
attached to the view *after* the model has been attached. (This is
to ensure that it is able to obtain the view's selection model correctly.)
"""
def __init__(self, view):
"""
:param view: The parent view for this delegate
:type view: :class:`~PySide.QtGui.QWidget`
"""
WidgetDelegate.__init__(self, view)
# tracks the currently active cell
self.__current_editor_index = None
# note! Need to have a model connected to the view in order
# to have a selection model.
self.selection_model = view.selectionModel()
if self.selection_model:
self.selection_model.selectionChanged.connect(self._on_selection_changed)
########################################################################################
# implemented by deriving classes
def _on_before_selection(self, widget, model_index, style_options):
"""
This method is called just before a cell is selected. This method should
configure values on the widget (such as labels, thumbnails etc) based on the
data contained in the model index parameter which is being passed.
:param widget: The QWidget (constructed in _create_widget()) which will
be used to paint the cell.
:type parent: :class:`~PySide.QtGui.QWidget`
:param model_index: QModelIndex object representing the data of the object that is
about to be drawn.
:type model_index: :class:`~PySide.QtCore.QModelIndex`
:param style_options: object containing specifics about the
view related state of the cell.
:type style_options: :class:`~PySide.QtGui.QStyleOptionViewItem`
"""
pass
########################################################################################
# 'private' methods that are not meant to be subclassed or called by a deriving class.
def _on_selection_changed(self, selected, deselected):
"""
Signal triggered when someone changes the selection in the view.
:param selected: A list of the indexes in the model that were selected
:type selected: :class:`~PySide.QtGui.QItemSelection`
:param deselected: A list of the indexes in the model that were deselected
:type deselected: :class:`~PySide.QtGui.QItemSelection`
"""
# clean up
if self.__current_editor_index:
self.parent().closePersistentEditor(self.__current_editor_index)
self.__current_editor_index = None
selected_indexes = selected.indexes()
if len(selected_indexes) > 0:
# get the currently selected model index
model_index = selected_indexes[0]
# create an editor widget that we use for the selected item
self.__current_editor_index = model_index
# this will trigger the call to createEditor
self.parent().openPersistentEditor(model_index)
def createEditor(self, parent_widget, style_options, model_index):
"""
Subclassed implementation from QStyledItemDelegate which is
called when an "editor" is set up - the editor is set up
via the openPersistentEditor call and is created upon selection
of an item.
Normally, for performance, when we draw hundreds of grid cells,
we use the same Qwidget as a brush and simply use it to paint.
For the currently selected cell however, we need to be able to interact
with the widget (e.g. click a button for example) and therefore we need
to have a real widget for this.
:param parent_widget: The parent widget to use for the new editor widget
:type parent_widget: :class:`~PySide.QtGui.QWidget`
:param style_options: The style options to use when creating the editor
:type style_options: :class:`~PySide.QtGui.QStyleOptionViewItem`
:param model_index: The index in the data model that will be edited
using this editor
:type model_index: :class:`~PySide.QtCore.QModelIndex`
:returns: An editor widget that will be used to edit this
index
:rtype: :class:`~PySide.QtGui.QWidget`
"""
# create the editor by calling the base method:
editor_widget = WidgetDelegate.createEditor(self, parent_widget, style_options, model_index)
# and set it up to operate on the index:
self._on_before_selection(editor_widget, model_index, style_options)
return editor_widget
def paint(self, painter, style_options, model_index):
"""
Paint method to handle all cells that are not being currently edited.
:param painter: The painter instance to use when painting
:param style_options: The style options to use when painting
:type style_options: :class:`~PySide.QtGui.QStyleOptionViewItem`
:param model_index: The index in the data model that needs to be painted
:type model_index: :class:`~PySide.QtCore.QModelIndex`
"""
if model_index == self.__current_editor_index:
# avoid painting the index twice!
return
WidgetDelegate.paint(self, painter, style_options, model_index)
```
#### File: views/grouped_list_view/group_widget_base.py
```python
import sgtk
from sgtk.platform.qt import QtGui, QtCore
class GroupWidgetBase(QtGui.QWidget):
"""
Base interface for a group widget that will be used in the
:class:`GroupedListView` custom view
:signal toggle_expanded(bool): Emitted when the group's expansion
state is toggled. Includes a boolean
to indicate if the group is expanded or not.
"""
# True if expanded, False if collapsed
toggle_expanded = QtCore.Signal(bool)
def set_item(self, model_idx):
"""
Set the item this widget should be associated with. This should be
implemented in derived classes
:param model_idx: The index of the item in the model
:type model_index: :class:`~PySide.QtCore.QModelIndex`
"""
raise NotImplementedError()
def set_expanded(self, expand=True):
"""
Set if this widget is expanded or not. This should be implemented
in derived classes
:param expand: True if the widget should be expanded, False if it
should be collapsed.
:type expand: bool
"""
raise NotImplementedError()
```
#### File: python/shotgun_data/shotgun_data_retriever.py
```python
import os
import glob
import urllib
import urlparse
import hashlib
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from sgtk import TankError
class ShotgunDataRetriever(QtCore.QObject):
"""
Asynchronous data retriever class which can be used to retrieve data and
thumbnails from Shotgun and from disk thumbnail cache. Uses the
:class:`~task_manager.BackgroundTaskManager` to run tasks in background
threads and emits signals when each query has either completed or failed.
Requests are queued up using for example the :meth:`execute_find()` and
:meth:`request_thumbnail()` methods.
Requests are executed in the following priority order:
- First any thumbnails that are already cached on disk are handled.
- Next, shotgun find() queries are handled.
- Lastly thumbnail downloads are handled.
The thread will emit work_completed and work_failure signals when
tasks are completed (or fail). The :meth:`clear()` method will
clear the current queue. The currently processing item will finish
processing and may send out signals even after a clear. Make sure you
call the :meth:`stop()` method prior to destruction in order for the
system to gracefully shut down.
:signal work_completed(uid, request_type, data_dict): Emitted every time
a requested task has completed. ``uid`` is a unique id which matches
the unique id returned by the corresponding request call.
``request_type`` is a string denoting the type of request this
event is associated with. ``data_dict`` is a dictionary containing
the payload of the request. It will be different depending on what
type of request it is.
:signal work_failure(uid, error_message): Emitted every time a requested
task has failed. ``uid`` is a unique id which matches the unique
id returned by the corresponding request call.
"""
# syntax: work_completed(uid, request_type, data_dict)
# - uid is a unique id which matches the unique id
# returned by the corresponding request call.
#
# - request_type is a string denoting the type of request
# this event is associated with. It can be either "find"
# "find_one", "update", "create", "delete", "schema", "expand_nav"
# or "thumbnail"
#
# - data_dict is a dictionary containing the payload
# of the request. It will be different depending on
# what type of request it is.
#
# For find() requests, the data_dict will be on the form
# {"sg": data }, where data is the data returned by the sg API
#
# For thumbnail requests, the data dict will be on the form
# {"thumb_path": path}, where path is a path to a location
# on disk where the thumbnail can be accessed.
work_completed = QtCore.Signal(str, str, dict)
# syntax: work_failure(uid, error_message)
# - uid is a unique id which matches the unique id
# returned by the corresponding request call.
# - error message is an error message string.
work_failure = QtCore.Signal(str, str)
# Individual task priorities used when adding tasks to the task manager
# Note: a higher value means more important and will get run before lower
# priority tasks
# Attachment checks and downloads are more important than thumbnails,
# as having access to that data will often be required instead of as
# a nice-to-have. As a result, this gets a bit more priority.
_CHECK_ATTACHMENT_PRIORITY = 55
# thumbnail checks are local disk checks and very fast. These
# are always carried out before any shotgun calls
_CHECK_THUMB_PRIORITY = 50
# the shotgun schema is often useful to have as early on as possible,
# sometimes other shotgun operations also need the shotgun schema
# (and it's typically also cached) so this call has a higher priority
# than the rest of the shotgun calls
_SG_DOWNLOAD_SCHEMA_PRIORITY = 40
# next the priority for any other Shotgun calls (e.g. find, create,
# update, delete, etc.)
_SG_CALL_PRIORITY = 30
# Attachment downloads are not necessarily fast (but might be), but unlike
# thumbnails they will be required for functionality in the calling code.
# As such, we'll give these downloads a bit more priority.
_DOWNLOAD_ATTACHMENT_PRIORITY = 25
# thumbnails are downloaded last as they are considered low-priority
# and can take a relatively significant amount of time
_DOWNLOAD_THUMB_PRIORITY = 20
def __init__(self, parent=None, sg=None, bg_task_manager=None):
"""
:param parent: Parent object
:type parent: :class:`~PySide.QtGui.QWidget`
:param sg: Optional Shotgun API Instance
:param bg_task_manager: Optional Task manager
:class bg_task_manager: :class:`~task_manager.BackgroundTaskManager`
"""
QtCore.QObject.__init__(self, parent)
self._bundle = sgtk.platform.current_bundle()
# set up the background task manager:
task_manager = self._bundle.import_module("task_manager")
self._task_manager = bg_task_manager or task_manager.BackgroundTaskManager(parent=self, max_threads=1)
self._owns_task_manager = (bg_task_manager is None)
self._bg_tasks_group = self._task_manager.next_group_id()
self._task_manager.task_completed.connect(self._on_task_completed)
self._task_manager.task_failed.connect(self._on_task_failed)
self._thumb_task_id_map = {}
self._attachment_task_id_map = {}
############################################################################################################
# Public methods
@staticmethod
def download_thumbnail(url, bundle):
"""
Convenience and compatibility method for quick and easy synchrnous thumbnail download.
This will retrieve a shotgun thumbnail given a url - if it already exists in the cache,
a path to it will be returned instantly. If not, it will be downloaded from Shotgun,
placed in the standard cache location on disk and its path will be returned.
This method returns the transcoded version of the thumbnail originally uploaded to
Shotgun. The image returned will always be a fixed-sized jpeg. To retrieve the thumbnail
file in its original format and resolution, use :meth:`ShotgunDataRetriever.download_thumbnail_source`
instead.
This is a helper method meant to make it easy to port over synchronous legacy
code - for a better solution, we recommend using the thumbnail retrieval
that runs in a background thread.
Because Shotgun thumbnail urls have an expiry time, make sure to only
pass urls to this method that have been very recently retrieved via a Shotgun find call.
:param url: The thumbnail url string that is associated with this thumbnail. This is
the field value as returned by a Shotgun query.
:param bundle: App, Framework or Engine object requesting the download.
:returns: A path to the thumbnail on disk.
"""
path_to_cached_thumb, thumb_exists = ShotgunDataRetriever._get_thumbnail_path(
url, bundle
)
if not thumb_exists:
# create folders on disk
bundle.ensure_folder_exists(os.path.dirname(path_to_cached_thumb))
# download using standard core method. This will ensure that
# proxy and connection settings as set in the SG API are used
try:
# Ask sgtk.util.download_url() to append the file type extension
# to the input path_to_cached_thumb to get the full path to the
# cache file.
full_path = sgtk.util.download_url(
bundle.shotgun, url, path_to_cached_thumb, True
)
path_to_cached_thumb = full_path
except TypeError:
# This may be raised if an older version of core is in use
# that doesn't have the final `use_url_extension` arg implemented
# in sgtk.util.download_url() (set to True above). Since the url
# is not being checked for an extension, also revert to the
# previous behavior of _get_thumbnail_path() which hard-coded a
# ".jpeg" extension to the thumbnail file path.
path_to_cached_thumb = "%s.jpeg" % path_to_cached_thumb
sgtk.util.download_url(bundle.shotgun, url, path_to_cached_thumb)
# modify the permissions of the file so it's writeable by others
old_umask = os.umask(0)
try:
os.chmod(path_to_cached_thumb, 0666)
finally:
os.umask(old_umask)
return path_to_cached_thumb
@staticmethod
def download_thumbnail_source(entity_type, entity_id, bundle):
"""
Convenience and compatibility method for quick and easy synchronous thumbnail download.
This will retrieve the source file for a thumbnail given a shotgun entity type and id.
If the resolved thumbnail source file has already been cached, a path to it will be
returned instantly. Otherwise, it will be downloaded from Shotgun and placed in the
standard cache location on disk. The full path to cached thumbnail is returned.
This method returns the thumbnail file in the original format and resolution it was
uploaded to Shotgun as, which should be considered arbitrary. To retrieve a transcoded
fixed-size jpeg version of the thumbnail, use :meth:`ShotgunDataRetriever.download_thumbnail`
instead.
This is a helper method meant to make it easy to port over synchronous legacy
code - for a better solution, we recommend using the thumbnail retrieval
that runs in a background thread.
:param str entity_type: Shotgun entity type with which the thumb is associated.
:param int entity_id: Shotgun entity id with which the thumb is associated.
:param bundle: App, Framework or Engine object requesting the download.
:returns: A path to the thumbnail on disk.
"""
thumb_source_url = urlparse.urlunparse((
bundle.shotgun.config.scheme, bundle.shotgun.config.server,
"/thumbnail/full/%s/%s" % (urllib.quote(str(entity_type)),
urllib.quote(str(entity_id))), None, None, None
))
path_to_cached_thumb, thumb_exists = ShotgunDataRetriever._get_thumbnail_path(
thumb_source_url, bundle
)
if not thumb_exists:
# create folders on disk
bundle.ensure_folder_exists(os.path.dirname(path_to_cached_thumb))
# download using standard core method. This will ensure that
# proxy and connection settings as set in the SG API are used.
# Allow the core method to determine the file type extension
# for the url about to be downloaded. Capture the full path to the
# thumbnail file as returned by sgtk.util.download_url().
try:
full_path = sgtk.util.download_url(
bundle.shotgun, thumb_source_url, path_to_cached_thumb, True
)
path_to_cached_thumb = full_path
except TypeError, e:
# This may be raised if an older version of core is in use
# that doesn't have the final `use_url_extension` arg implemented
# in sgtk.util.download_url() (set to True above). Since the source
# thumbnail url spec does not contain the file type extension, there
# is no way to determine the proper file name to download to.
# Raise a TankError indicating that a newer version of core must be
# used in conjunction with this method.
raise TankError(
"Caught error: \n%s\n"
"Unable to download source thumbnail URL '%s' because the "
"file type extension cannot be determined. Must update to a "
"newer version of core to use ShotgunDataRetriever."
"download_thumbnail_source()." % (e, thumb_source_url)
)
# modify the permissions of the file so it's writeable by others
old_umask = os.umask(0)
try:
os.chmod(path_to_cached_thumb, 0666)
finally:
os.umask(old_umask)
return path_to_cached_thumb
def start(self):
"""
Start the retriever thread.
:raises: TankError if there is no :class:`~task_manager.BackgroundTaskManager` associated with this instance
"""
if not self._task_manager:
raise TankError("Unable to start the ShotgunDataRetriever as it has no BackgroundTaskManager!")
self._task_manager.start_processing()
def stop(self):
"""
Gracefully stop the receiver.
Once stop() has been called, the object needs to be discarded.
This is a blocking call. It will synchronously wait
until any potential currently processing item has completed.
Note that once stopped the data retriever can't be restarted as the handle to the
:class:`~task_manager.BackgroundTaskManager` instance is released.
"""
if not self._task_manager:
return
if self._owns_task_manager:
# we own the task manager so we'll need to completely shut it down before
# returning
self._task_manager.shut_down()
self._task_manager = None
else:
# we don't own the task manager so just stop any tasks we might be running
# and disconnect from it:
self._task_manager.stop_task_group(self._bg_tasks_group)
# make sure we don't get exceptions trying to disconnect if the
# signals were never connected or somehow disconnected externally.
try:
self._task_manager.task_completed.disconnect(self._on_task_completed)
except (TypeError, RuntimeError), e: # was never connected
self._bundle.log_warning(
"Could not disconnect '_on_task_completed' slot from the "
"task manager's 'task_completed' signal: %s" % (e,)
)
try:
self._task_manager.task_failed.disconnect(self._on_task_failed)
except (TypeError, RuntimeError), e: # was never connected
self._bundle.log_debug(
"Could not disconnect '_on_task_failed' slot from the "
"task manager's 'task_failed' signal: %s" % (e,)
)
self._task_manager = None
def clear(self):
"""
Clears the queue.
Any currently processing item will complete without interruption, and signals will be
sent out for these items.
"""
if not self._task_manager:
return
# stop any tasks running in the task group:
self._task_manager.stop_task_group(self._bg_tasks_group)
def stop_work(self, task_id):
"""
Stop the specified task
:param task_id: The task to stop
"""
if not self._task_manager:
return
# stop the task:
self._task_manager.stop_task(task_id)
def get_schema(self, project_id=None):
"""
Execute the schema_read and schema_entity_read methods asynchronously
:param project_id: If specified, the schema listing returned will
be constrained by the schema settings for
the given project.
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
return self._add_task(self._task_get_schema,
priority = ShotgunDataRetriever._SG_DOWNLOAD_SCHEMA_PRIORITY,
task_kwargs = {"project_id":project_id})
def execute_find(self, *args, **kwargs):
"""
Executes a Shotgun find query asynchronously.
This method takes the same parameters as the Shotgun find() call.
The query will be queued up and once processed, either a
work_completed or work_failure signal will be emitted.
:param ``*args``: args to be passed to the Shotgun find() call
:param ``**kwargs``: Named parameters to be passed to the Shotgun find() call
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
return self._add_task(self._task_execute_find,
priority = ShotgunDataRetriever._SG_CALL_PRIORITY,
task_args = args,
task_kwargs = kwargs)
def execute_find_one(self, *args, **kwargs):
"""
Executes a Shotgun find_one query asynchronously.
This method takes the same parameters as the Shotgun find_one() call.
The query will be queued up and once processed, either a
work_completed or work_failure signal will be emitted.
:param ``*args``: args to be passed to the Shotgun find_one() call
:param ``**kwargs``: Named parameters to be passed to the Shotgun find_one() call
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
return self._add_task(self._task_execute_find_one,
priority = ShotgunDataRetriever._SG_CALL_PRIORITY,
task_args = args,
task_kwargs = kwargs)
def execute_update(self, *args, **kwargs):
"""
Execute a Shotgun update call asynchronously
This method takes the same parameters as the Shotgun update() call.
The query will be queued up and once processed, either a
work_completed or work_failure signal will be emitted.
:param ``*args``: args to be passed to the Shotgun update() call
:param ``**kwargs``: Named parameters to be passed to the Shotgun update() call
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
return self._add_task(self._task_execute_update,
priority = ShotgunDataRetriever._SG_CALL_PRIORITY,
task_args = args,
task_kwargs = kwargs)
def execute_create(self, *args, **kwargs):
"""
Execute a Shotgun create call asynchronously
The query will be queued up and once processed, either a
work_completed or work_failure signal will be emitted.
This method takes the same parameters as the Shotgun create() call.
:param ``*args``: args to be passed to the Shotgun create() call
:param ``**kwargs``: Named parameters to be passed to the Shotgun create() call
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
return self._add_task(self._task_execute_create,
priority = ShotgunDataRetriever._SG_CALL_PRIORITY,
task_args = args,
task_kwargs = kwargs)
def execute_delete(self, *args, **kwargs):
"""
Execute a Shotgun delete call asynchronously
This method takes the same parameters as the Shotgun delete() call.
The query will be queued up and once processed, either a
work_completed or work_failure signal will be emitted.
:param ``*args``: args to be passed to the Shotgun delete() call
:param ``**kwargs``: Named parameters to be passed to the Shotgun delete() call
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
return self._add_task(self._task_execute_delete,
priority = ShotgunDataRetriever._SG_CALL_PRIORITY,
task_args = args,
task_kwargs = kwargs)
def execute_method(self, method, *args, **kwargs):
"""
Executes a generic execution of a method asynchronously. This is pretty much a
wrapper for executing a task through the :class:`~task_manager.BackgroundTaskManager`.
The specified method will be called on the following form::
method(sg, data)
Where sg is a shotgun API instance. Data is typically
a dictionary with specific data that the method needs.
The query will be queued up and once processed, either a
work_completed or work_failure signal will be emitted.
:param method: The method that should be executed.
:param ``*args``: args to be passed to the method
:param ``**kwargs``: Named parameters to be passed to the method
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
# note that as the 'task' is actually going to call through to another method, we
# encode the method name, args and kwargs in the task's kwargs dictionary as this
# keeps them nicely encapsulated.
task_kwargs = {"method":method, "method_args":args, "method_kwargs":kwargs}
return self._add_task(self._task_execute_method,
priority = ShotgunDataRetriever._SG_CALL_PRIORITY,
task_kwargs = task_kwargs)
def execute_text_search(self, *args, **kwargs):
"""
Executes a Shotgun ``text_search`` query asynchronously.
See the python api documentation here:
https://github.com/shotgunsoftware/python-api/wiki
This method takes the same parameters as the Shotgun ``text_search()`` call.
The query will be queued up and once processed, either a
work_completed or work_failure signal will be emitted.
:param ``*args``: args to be passed to the Shotgun ``text_search()`` call
:param ``**kwargs``: Named parameters to be passed to the Shotgun ``text_search()`` call
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
return self._add_task(
self._task_execute_text_search,
priority=ShotgunDataRetriever._SG_CALL_PRIORITY,
task_args=args,
task_kwargs=kwargs
)
def execute_nav_expand(self, *args, **kwargs):
"""
Executes a Shotgun ``nav_expand`` query asynchronously.
See the python api documentation here:
https://github.com/shotgunsoftware/python-api/wiki
This method takes the same parameters as the Shotgun ``nav_expand()`` call.
The query will be queued up and once processed, either a
work_completed or work_failure signal will be emitted.
:param ``*args``: args to be passed to the Shotgun ``nav_expand()`` call
:param ``**kwargs``: Named parameters to be passed to the Shotgun ``nav_expand()`` call
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
return self._add_task(
self._task_execute_nav_expand,
priority=ShotgunDataRetriever._SG_CALL_PRIORITY,
task_args=args,
task_kwargs=kwargs
)
def execute_nav_search_string(self, *args, **kwargs):
"""
Executes a Shotgun ``nav_search_string`` query asynchronously.
See the python api documentation here:
https://github.com/shotgunsoftware/python-api/wiki
This method takes the same parameters as the Shotgun ``nav_search_string()`` call.
The query will be queued up and once processed, either a
work_completed or work_failure signal will be emitted.
:param ``*args``: args to be passed to the Shotgun ``nav_search_string()`` call
:param ``**kwargs``: Named parameters to be passed to the Shotgun ``nav_search_string()`` call
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
return self._add_task(
self._task_execute_nav_search_string,
priority=ShotgunDataRetriever._SG_CALL_PRIORITY,
task_args=args,
task_kwargs=kwargs
)
def execute_nav_search_entity(self, *args, **kwargs):
"""
Executes a Shotgun ``nav_search_entity`` query asynchronously.
See the python api documentation here:
https://github.com/shotgunsoftware/python-api/wiki
This method takes the same parameters as the Shotgun ``nav_search_entity()`` call.
The query will be queued up and once processed, either a
work_completed or work_failure signal will be emitted.
:param ``*args``: args to be passed to the Shotgun ``nav_search_entity()`` call
:param ``**kwargs``: Named parameters to be passed to the Shotgun ``nav_search_entity()`` call
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
return self._add_task(
self._task_execute_nav_search_entity,
priority=ShotgunDataRetriever._SG_CALL_PRIORITY,
task_args=args,
task_kwargs=kwargs
)
def _add_task(self, task_cb, priority, task_args=None, task_kwargs=None):
"""
Simplified wrapper to add a task to the task manager. All tasks get added into
the same group (self._bg_tasks_group) and the returned task_id is cast to a string
to retain backwards compatibility (it used to return a uuid string).
:param task_cb: The function to execute for the task
:param priority: The priority the task should be run with
:param task_args: Arguments that should be passed to the task callback
:param task_kwargs: Named arguments that should be passed to the task callback
:returns: String representation of the task id
:raises: TankError if there is no task manager available to add the task to!
"""
if not self._task_manager:
raise TankError("Data retriever does not have a task manager to add the task to!")
task_id = self._task_manager.add_task(task_cb,
priority,
group = self._bg_tasks_group,
task_args = task_args,
task_kwargs = task_kwargs)
return str(task_id)
def request_attachment(self, attachment_entity):
"""
Downloads an attachment from Shotgun asynchronously or returns a cached
file path if found.
.. note:: The provided Attachment entity definition must contain, at a
minimum, the "this_file" substructure.
.. code-block:: python
{
"id": 597,
"this_file": {
"content_type": "image/png",
"id": 597,
"link_type": "upload",
"name": "test.png",
"type": "Attachment",
"url": "https://abc.shotgunstudio.com/file_serve/attachment/597"
},
"type": "Attachment"
}
:param dict attachment_entity: The Attachment entity to download data from.
:returns: A unique identifier representing this request.
"""
if not self._task_manager:
self._bundle.log_warning(
"No task manager has been associated with this data retriever. "
"Unable to request attachment."
)
return
# always add check for attachments already downloaded:
check_task_id = self._task_manager.add_task(
self._task_check_attachment,
priority=self._CHECK_ATTACHMENT_PRIORITY,
group=self._bg_tasks_group,
task_kwargs=dict(attachment_entity=attachment_entity),
)
# Add download thumbnail task. This is dependent on the check task above and will be passed
# the returned results from that task in addition to the kwargs specified below. This allows
# a task dependency chain to be created with different priorities for the separate tasks.
dl_task_id = self._task_manager.add_task(
self._task_download_attachment,
upstream_task_ids=[check_task_id],
priority=self._DOWNLOAD_ATTACHMENT_PRIORITY,
group=self._bg_tasks_group,
task_kwargs=dict(attachment_entity=attachment_entity),
)
# all results for requesting a thumbnail should be returned with the same id so use
# a mapping to track the 'primary' task id:
self._attachment_task_id_map[dl_task_id] = check_task_id
return str(check_task_id)
def request_thumbnail(self, url, entity_type, entity_id, field, load_image=False):
"""
Downloads a thumbnail from Shotgun asynchronously or returns a cached thumbnail
if found. Optionally loads the thumbnail into a QImage.
:param url: The thumbnail url string that is associated with this thumbnail. This is
the field value as returned by a Shotgun query.
:param entity_type: Shotgun entity type with which the thumb is associated.
:param entity_id: Shotgun entity id with which the thumb is associated.
:param field: Thumbnail field. Normally 'image' but could also for example be a deep
link field such as ``sg_sequence.Sequence.image``
:param load_image: If set to True, the return data structure will contain a QImage object
with the image data loaded.
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
if not self._task_manager:
self._bundle.log_warning(
"No task manager has been associated with this data retriever. "
"Unable to request thumbnail."
)
return
# always add check for thumbnail already downloaded:
check_task_id = self._task_manager.add_task(self._task_check_thumbnail,
priority = self._CHECK_THUMB_PRIORITY,
group = self._bg_tasks_group,
task_kwargs = {"url":url,
"load_image":load_image})
# Add download thumbnail task. This is dependent on the check task above and will be passed
# the returned results from that task in addition to the kwargs specified below. This allows
# a task dependency chain to be created with different priorities for the separate tasks.
dl_task_id = self._task_manager.add_task(self._task_download_thumbnail,
upstream_task_ids = [check_task_id],
priority = self._DOWNLOAD_THUMB_PRIORITY,
group = self._bg_tasks_group,
task_kwargs = {"url":url,
"entity_type":entity_type,
"entity_id":entity_id,
"field":field,
"load_image":load_image
#"thumb_path":<passed from check task>
#"image":<passed from check task>
})
# all results for requesting a thumbnail should be returned with the same id so use
# a mapping to track the 'primary' task id:
self._thumb_task_id_map[dl_task_id] = check_task_id
return str(check_task_id)
def request_thumbnail_source(self, entity_type, entity_id, load_image=False):
"""
Downloads a thumbnail from Shotgun asynchronously or returns a cached thumbnail
if found. Optionally loads the thumbnail into a QImage.
:param entity_type: Shotgun entity type with which the thumb is associated.
:param entity_id: Shotgun entity id with which the thumb is associated.
:param load_image: If set to True, the return data structure will contain a
QImage object with the image data loaded.
:returns: A unique identifier representing this request. This
identifier is also part of the payload sent via the
work_completed and work_failure signals, making it
possible to match them up.
"""
# construct the url that refers to the thumbnail's source image
thumb_source_url = urlparse.urlunparse((
self._bundle.shotgun.config.scheme, self._bundle.shotgun.config.server,
"/thumbnail/full/%s/%s" % (
urllib.quote(str(entity_type)),
urllib.quote(str(entity_id))
), None, None, None
))
return self.request_thumbnail(
thumb_source_url, entity_type, entity_id, None, load_image
)
# ------------------------------------------------------------------------------------------------
# Background task management and methods
def _download_url(self, file_path, url, entity_type, entity_id, field):
"""
Downloads a file located at the given url to the provided file path.
:param str file_path: The target path.
:param str url: The url location of the file to download.
:param str entity_type: The Shotgun entity type that the url is
associated with. In the event that the
provided url has expired, the entity
type and id provided will be used to query
a fresh url.
:param int entity_id: The Shotgun entity id that the url is
associated with. In the event that the
provided url has expired, the entity type and
id provided will be used to query a fresh url.
:param str field: The name of the field that contains the url. If
the url needs to be requeried, this field will be
where the fresh url is pulled from.
:returns: Full path the downloaded file. This value may be different
than the input `file_path` if the resolved url's extension
differed from what was specified.
"""
try:
# download using standard core method. This will ensure that
# proxy and connection settings as set in the SG API are used
try:
# Ask sgtk.util.download_url() to append the file type extension
# to the input file_path to get the full path to the cache file.
download_path = sgtk.util.download_url(
self._bundle.shotgun, url, file_path, True
)
file_path = download_path
except TypeError:
# This may be raised if an older version of core is in use
# that doesn't have the final `use_url_extension` arg implemented
# in sgtk.util.download_url() (set to True above). Since the url
# is not being checked for an extension, also revert to the
# previous behavior of _get_thumbnail_path() which hard-coded a
# ".jpeg" extension to the thumbnail file path.
file_path = "%s.jpeg" % file_path
sgtk.util.download_url(self._bundle.shotgun, url, file_path)
except TankError, e:
if field is not None:
sg_data = self._bundle.shotgun.find_one(
entity_type,
[["id", "is", entity_id]],
[field],
)
if sg_data is None or sg_data.get(field) is None:
# This means there's nothing in Shotgun for this field, which
# means we can't download anything.
raise IOError(
"Field %s does not contain data for %s (id=%s)." % (
field,
entity_type,
entity_id,
)
)
else:
# Again, download using standard core method. This will ensure that
# proxy and connection settings as set in the SG API are used.
url = sg_data[field]
try:
# Ask sgtk.util.download_url() to append the file type extension
# to the input file_path to get the full path to the cache file.
download_path = sgtk.util.download_url(
self._bundle.shotgun, url, file_path, True
)
file_path = download_path
except TypeError:
# This may be raised if an older version of core is in use
# that doesn't have the final `use_url_extension` arg implemented
# in sgtk.util.download_url() (set to True above). Since the url
# is not being checked for an extension, also revert to the
# previous behavior of _get_thumbnail_path() which hard-coded a
# ".jpeg" extension to the thumbnail file path.
file_path = "%s.jpeg" % file_path
sgtk.util.download_url(self._bundle.shotgun, url, file_path)
# now we have a thumbnail on disk, either via the direct download, or via the
# url-fresh-then-download approach. Because the file is downloaded with user-only
# permissions we have to modify the permissions so that it's writeable by others
old_umask = os.umask(0)
try:
os.chmod(file_path, 0666)
finally:
os.umask(old_umask)
return file_path
@staticmethod
def _get_attachment_path(attachment_entity, bundle):
"""
Returns the location on disk suitable for an attachment file.
:param dict attachment_entity: The Attachment entity definition.
:param bundle: App, Engine or Framework instance
:returns: Path as a string.
"""
url = attachment_entity["this_file"]["url"]
file_name = attachment_entity["this_file"]["name"]
directory_path, path_exists = ShotgunDataRetriever._get_thumbnail_path(
url,
bundle,
directory_only=True,
)
return os.path.join(directory_path, file_name)
@staticmethod
def _get_thumbnail_path(url, bundle, directory_only=False):
"""
Returns the location on disk suitable for a thumbnail given its url and
whether a cached file for the specified ``url`` already exists. Two cases
are handled:
Case A: ``directory_only`` is set to False and the ``url`` cache file does not exist:
>>> (path, cache_exists) = _get_thumbnail_path("https://foo/bar/baz.jpg")
Where return data ``(path, cache_exists) = ('/tmp/xx/yy/1245/6678', False)``
This will always return a file path without an extension. Since the cache
file does not exist, download it using sgtk.util.download_url(), setting
the ``use_url_extension`` arg to True, which will return the full path to the
cached file:
>>> full_path = sgtk.util.download_url(sg, "https://foo/bar/baz.jpg", path, True)
Where ``full_path`` now contains a file extension: /tmp/xx/yy/1245/6678.jpg
Case B: ``directory_only`` is set to False and the ``url`` cache file does exist:
>>> (path, cache_exists) = _get_thumbnail_path("https://foo/bar/baz.jpg")
Where return data ``(path, cache_exists) = ('/tmp/xx/yy/1245/6678.jpg', True)``
This will always return the full path to the cached file, so no need to
do any addtional work.
:param str url: Path to a thumbnail
:param bundle: App, Engine or Framework instance
:param bool directory_only: Whether to return a directory path or a
full file path. Default is False, which
indicates a full file path, including
file name, will be returned.
:returns: Tuple (str, bool) Path or path with basename as a string,
cached thumbnail exists on disk
"""
# If we don't have a URL, then we know we don't
# have a thumbnail to worry about.
if not url:
return None
# hash the path portion of the thumbnail url
url_obj = urlparse.urlparse(url)
url_hash = hashlib.md5()
url_hash.update(str(url_obj.path))
hash_str = url_hash.hexdigest()
# Now turn this hash into a tree structure. For a discussion about sensible
# sharding methodology, see
# http://stackoverflow.com/questions/13841931/using-guids-as-folder-names-splitting-up
#
# From the hash, generate paths on the form C1C2/C3C4/rest_of_hash
# (where C1 is the first character of the hash). For a million evenly distributed
# items, this means ~15 items per folder.
first_folder = hash_str[0:2]
second_folder = hash_str[2:4]
# Establish the cache path directory
cache_path_items = [
bundle.cache_location, "thumbs", first_folder, second_folder
]
cached_thumb_exists = False
# If we were only asked to give back a directory path then we can
# skip building and appending a file name.
if not directory_only:
# Look for an existing cache file. Use the glob module since
# we do not know what the file type of the cache file is.
path_base = hash_str[4:]
cache_base = os.path.join(*(cache_path_items + [path_base]))
# Attempt to match something that looks like:
# /bundle_cache_location/thumbs/C1C2/C3C4/rest_of_hash.*
cache_matches = glob.glob("%s.*" % cache_base)
if len(cache_matches):
if len(cache_matches) > 1:
# If somehow more than one cache file exists, the wrong icon may be displayed.
# Log some information about how to resolve this problem.
bundle.log_debug("More than one cached file found for url '%s':" % url)
[bundle.log_debug(" %s" % cache_match) for cache_match in cache_matches]
bundle.log_debug("Using '%s'. "
"If this is incorrect, manually remove the undesired cache file." %
cache_matches[0]
)
# Cache file exists, so append the full file name (e.g. rest_of_hash.png)
cache_path_items.append(os.path.basename(cache_matches[0]))
cached_thumb_exists = True
else:
# Cache file does not exist, so only append the basename of the cached
# thumbnail that does NOT include the file type extension (e.g. rest_of_hash).
# The extension will be appended later by a call to sgtk.util.download_url()
cache_path_items.append(path_base)
# Join up the path cache items which result in either a directory like
# '/bundle_cache_location/thumbs/C1C2/C3C4' or a file path like
# '/bundle_cache_location/thumbs/C1C2/C3C4/rest_of_hash' if the cache file
# does not exist or '/bundle_cache_location/thumbs/C1C2/C3C4/rest_of_hash.ext'
# if it does.
path_to_cached_thumb = os.path.join(*cache_path_items)
return (path_to_cached_thumb, cached_thumb_exists)
def _task_get_schema(self, project_id):
"""
Method that gets executed in a background task/thread to retrieve the fields
and types schema from Shotgun
:param project_id: The id of the project to query the schema for or None to
retrieve for all projects
:returns: Dictionary containing the 'action' together with the schema
fields and types
"""
if project_id is not None:
project = {"type": "Project", "id": project_id}
else:
project = None
# read in details about all fields
sg_field_schema = self._bundle.shotgun.schema_read(project)
# and read in details about all entity types
sg_type_schema = self._bundle.shotgun.schema_entity_read(project)
# need to wrap it in a dict not to confuse pyqt's signals and type system
return {"action":"schema", "fields":sg_field_schema, "types":sg_type_schema}
def _task_execute_find(self, *args, **kwargs):
"""
Method that gets executed in a background task/thread to perform a Shotgun
find query
:param ``*args``: Unnamed arguments to be passed to the find() call
:param ``**kwargs``: Named arguments to be passed to the find() call
:returns: Dictionary containing the 'action' together with result
returned by the find() call
"""
sg_res = self._bundle.shotgun.find(*args, **kwargs)
return {"action": "find", "sg_result": sg_res}
def _task_execute_find_one(self, *args, **kwargs):
"""
Method that gets executed in a background task/thread to perform a Shotgun
find_one query
:param ``*args``: Unnamed arguments to be passed to the find_one() call
:param ``**kwargs``: Named arguments to be passed to the find_one() call
:returns: Dictionary containing the 'action' together with result
returned by the find_one() call
"""
sg_res = self._bundle.shotgun.find_one(*args, **kwargs)
return {"action": "find_one", "sg_result": sg_res}
def _task_execute_update(self, *args, **kwargs):
"""
Method that gets executed in a background task/thread to perform a Shotgun
update call
:param ``*args``: Unnamed arguments to be passed to the update() call
:param ``**kwargs``: Named arguments to be passed to the update() call
:returns: Dictionary containing the 'action' together with result
returned by the update() call
"""
sg_res = self._bundle.shotgun.update(*args, **kwargs)
return {"action": "update", "sg_result": sg_res}
def _task_execute_create(self, *args, **kwargs):
"""
Method that gets executed in a background task/thread to perform a Shotgun
create call
:param ``*args``: Unnamed arguments to be passed to the create() call
:param ``**kwargs``: Named arguments to be passed to the create() call
:returns: Dictionary containing the 'action' together with result
returned by the create() call
"""
sg_res = self._bundle.shotgun.create(*args, **kwargs)
return {"action": "create", "sg_result": sg_res}
def _task_execute_delete(self, *args, **kwargs):
"""
Method that gets executed in a background task/thread to perform a Shotgun
delete call
:param ``*args``: Unnamed arguments to be passed to the delete() call
:param ``**kwargs``: Named arguments to be passed to the delete() call
:returns: Dictionary containing the 'action' together with result
returned by the delete() call
"""
sg_res = self._bundle.shotgun.delete(*args, **kwargs)
return {"action": "delete", "sg_result": sg_res}
def _task_execute_method(self, method, method_args, method_kwargs):
"""
Method that gets executed in a background task/thread to execute a method
with a thread-specific shotgun connection.
:param method: The method to be run asynchronously
:param method_args: Arguments to be passed to the method
:param method_kwargs: Named arguments to be passed to the method
:returns: Dictionary containing the 'action' together with the result
returned by the method
"""
res = method(self._bundle.shotgun, *method_args, **method_kwargs)
return {"action": "method", "result": res}
def _task_execute_text_search(self, *args, **kwargs):
"""
Method that gets executed in a background task/thread to perform a Shotgun
``text_search`` query
:param ``*args``: Unnamed arguments to be passed to the ``text_search()`` call
:param ``**kwargs``: Named arguments to be passed to the ``text_search()`` call
:returns: Dictionary containing the 'action' together with result
returned by the find() call
"""
sg_res = self._bundle.shotgun.text_search(*args, **kwargs)
return {"action": "text_search", "sg_result": sg_res}
def _task_execute_nav_expand(self, *args, **kwargs):
"""
Method that gets executed in a background task/thread to perform a Shotgun
``nav_expand`` query
:param ``*args``: Unnamed arguments to be passed to the ``nav_expand()`` call
:param ``**kwargs``: Named arguments to be passed to the ``nav_expand()`` call
:returns: Dictionary containing the 'action' together with result
returned by the find() call
"""
sg_res = self._bundle.shotgun.nav_expand(*args, **kwargs)
return {"action": "nav_expand", "sg_result": sg_res}
def _task_execute_nav_search_string(self, *args, **kwargs):
"""
Method that gets executed in a background task/thread to perform a Shotgun
``nav_search_string`` query
:param ``*args``: Unnamed arguments to be passed to the ``nav_search_string()`` call
:param ``**kwargs``: Named arguments to be passed to the ``nav_search_string()`` call
:returns: Dictionary containing the 'action' together with result
returned by the find() call
"""
try:
sg_res = self._bundle.shotgun.nav_search_string(*args, **kwargs)
except AttributeError:
# running an older core which doesn't come with a
# sg API which has a nav_search_string() method
sg_res = []
return {"action": "nav_search_string", "sg_result": sg_res}
def _task_execute_nav_search_entity(self, *args, **kwargs):
"""
Method that gets executed in a background task/thread to perform a Shotgun
``nav_search_entity`` query
:param ``*args``: Unnamed arguments to be passed to the ``nav_search_entity()`` call
:param ``**kwargs``: Named arguments to be passed to the ``nav_search_entity()`` call
:returns: Dictionary containing the 'action' together with result
returned by the find() call
"""
# FIXME: Project can't be resolved with the API right now due to a bug on the Shotgun-side.
# Mock the call instead.
if args[1]["type"] == "Project":
project_id = args[1]["id"]
sg_data = self._bundle.shotgun.find_one("Project", [["id", "is", project_id]], ["name"])
sg_res = [
{
"incremental_path": ["/Project/%d" % project_id],
"label": sg_data["name"],
"path_label": "",
"project_id": project_id,
"ref": sg_data
}
]
else:
try:
sg_res = self._bundle.shotgun.nav_search_entity(*args, **kwargs)
except AttributeError:
# running an older core which doesn't come with a
# sg API which has a nav_search_string() method
sg_res = []
return {"action": "nav_search_entity", "sg_result": sg_res}
def _task_check_attachment(self, attachment_entity):
"""
Check to see if an attachment file exists for the specified Attachment
entity.
:param dict attachment_entity: The Attachment entity definition.
:returns: A dictionary containing the cached path for the specified
Attachment entity.
"""
url = attachment_entity["this_file"]["url"]
file_name = attachment_entity["this_file"]["name"]
data = dict(action="check_attachment", file_path=None)
if not url or not file_name:
return data
file_path = self._get_attachment_path(
attachment_entity,
self._bundle,
)
if file_path and os.path.exists(file_path):
data["file_path"] = file_path
return data
def _task_check_thumbnail(self, url, load_image):
"""
Check to see if a thumbnail exists for the specified url. If it does then it is returned.
:param url: The url to return the cached path for
:param load_image: If True then if the thumbnail is found in the cache then the file will
be loaded into a QImage
:returns: A dictionary containing the cached path for the specified url and a QImage
if load_image is True and the thumbnail exists in the cache.
"""
# If there's no URL then we definitely won't be finding
# a thumbnail.
if not url:
return {"action":"check_thumbnail", "thumb_path":None, "image":None}
# first look up the path in the cache:
thumb_path, thumb_exists = ShotgunDataRetriever._get_thumbnail_path(url, self._bundle)
thumb_image = None
if thumb_exists:
if load_image:
# load the thumbnail into a QImage:
thumb_image = QtGui.QImage()
thumb_image.load(thumb_path)
else:
thumb_path = None
return {"action":"check_thumbnail", "thumb_path":thumb_path, "image":thumb_image}
def _task_download_attachment(self, file_path, attachment_entity, **kwargs):
"""
Download the specified attachment. This downloads the file associated with
the provided Attachment entity into the framework's cache directory structure
and returns the cached path.
:param str file_path: The target file path to download to.
:param dict attachment_entity: The Attachment entity definition.
:returns: A dictionary containing the cached path for the specified
Attachment entity, as well as an action identifier that
marks the data as having come from a "download_attachment"
task.
"""
if file_path:
return {}
file_path = self._get_attachment_path(attachment_entity, self._bundle)
if not file_path:
return {}
self._bundle.ensure_folder_exists(os.path.dirname(file_path))
if not os.path.exists(file_path):
self._bundle.shotgun.download_attachment(
attachment=attachment_entity,
file_path=file_path,
)
return dict(
action="download_attachment",
file_path=file_path,
)
def _task_download_thumbnail(self, thumb_path, url, entity_type, entity_id, field, load_image, **kwargs):
"""
Download the thumbnail for the specified entity type, id and field. This downloads the
thumbnail into the thumbnail cache directory and returns the cached path.
If thumb_path already contains a path then this method does nothing and just returns the path
without further checking/work.
:param thumb_path: Path to an existing thumbnail or None.
:param url: The url for the thumbnail which may or may not still be valid!
:param entity_type: Type of the entity to retrieve the thumbnail for
:param entity_id: Id of the entity to retrieve the thumbnail for
:param field: The field on the entity that holds the url for the thumbnail to retrieve
:param load_image: If True then if the thumbnail is downloaded from Shotgun then the file will
be loaded into a QImage
:returns: A dictionary containing the cached path for the specified url and a QImage
if load_image is True and the thumbnail exists in the cache.
"""
if thumb_path:
# no need to do anything as the thumbnail was previously
# found when we ran the check!
return {}
# download the actual thumbnail. Because of S3, the url
# may have expired - in that case fall back, get a fresh url
# from shotgun and try again
thumb_path, thumb_exists = self._get_thumbnail_path(url, self._bundle)
# If we have no path, then there's no thumbnail that exists.
if not thumb_path:
return {}
# there may be a case where another process has alrady downloaded the thumbnail for us, so
# make sure that we aren't doing any extra work :)
if not thumb_exists:
self._bundle.ensure_folder_exists(os.path.dirname(thumb_path))
# try to download based on the path we have
try:
thumb_path = self._download_url(thumb_path, url, entity_type, entity_id, field)
except IOError:
thumb_path = None
# finally, see if we should also load in the image
thumb_image = None
if thumb_path:
if load_image:
# load the thumbnail into a QImage:
thumb_image = QtGui.QImage()
thumb_image.load(thumb_path)
else:
thumb_path = None
return dict(
action="download_thumbnail",
thumb_path=thumb_path,
image=thumb_image,
)
def _on_task_completed(self, task_id, group, result):
"""
Slot triggered when a task is completed.
:param task_id: The id of the task that has completed
:param group: The group the task belongs to
:param result: The task result
"""
if group != self._bg_tasks_group:
# ignore - it isn't our task! - this slot will recieve signals for tasks started
# by other objects/instances so we need to make sure we filter them out here
return
action = result.get("action")
if action in [
"find", "find_one", "create", "delete", "update",
"nav_expand", "nav_search_string", "text_search"
]:
self.work_completed.emit(
str(task_id),
action,
{"sg": result["sg_result"]}
)
elif action == "schema":
self.work_completed.emit(
str(task_id),
"schema",
{"fields": result["fields"], "types": result["types"]}
)
elif action == "method":
self.work_completed.emit(
str(task_id),
"method",
{"return_value": result["result"]}
)
elif action == "check_thumbnail":
path = result.get("thumb_path", "")
if path:
# check found a thumbnail!
self.work_completed.emit(
str(task_id),
"check_thumbnail",
{"thumb_path": path, "image": result["image"]}
)
elif action == "download_thumbnail":
# look up the primary thumbnail task id in the map:
thumb_task_id = self._thumb_task_id_map.get(task_id)
if thumb_task_id is not None:
del self._thumb_task_id_map[task_id]
self.work_completed.emit(
str(thumb_task_id),
"download_thumbnail",
{"thumb_path": result["thumb_path"], "image": result["image"]}
)
elif action == "check_attachment":
path = result.get("file_path", "")
if path:
self.work_completed.emit(
str(task_id),
"check_attachment",
{"file_path": path},
)
elif action == "download_attachment":
attachment_task_id = self._attachment_task_id_map.get(task_id)
if attachment_task_id is not None:
del self._attachment_task_id_map[task_id]
self.work_completed.emit(
str(attachment_task_id),
"download_attachment",
{"file_path": result["file_path"]},
)
def _on_task_failed(self, task_id, group, msg, tb):
"""
Slot triggered when a task fails for some reason
:param task_id: The id of the task that failed
:param msg: The error/exception message for the failed task
:param tb: The stack trace of the exception raised by the failed task
"""
if group != self._bg_tasks_group:
# ignore - it isn't our task - this slot will recieve signals for tasks started
# by other objects/instances so we need to make sure we filter them out here
return
# remap task ids for thumbnails:
if task_id in self._thumb_task_id_map:
orig_task_id = task_id
task_id = self._thumb_task_id_map[task_id]
del self._thumb_task_id_map[orig_task_id]
# remap task ids for attachments:
if task_id in self._attachment_task_id_map:
orig_task_id = task_id
task_id = self._attachment_task_id_map[task_id]
del self._attachment_task_id_map[orig_task_id]
# emit failure signal:
self.work_failure.emit(str(task_id), msg)
```
#### File: python/shotgun_globals/cached_schema.py
```python
from __future__ import with_statement
import os
import sgtk
from sgtk.platform.qt import QtCore, QtGui
import cPickle as pickle
class CachedShotgunSchema(QtCore.QObject):
"""
Wraps around the shotgun schema and caches it for fast lookups.
Singleton-style setup, so all access method happen via class methods:
- get_type_display_name - Display name for entity type
- get_field_display_name - Display name for field
- get_empty_phrase - String to denote 'no value' for item
- get_status_display_name - Display name for status code
This caches the shotgun schema to disk *once* and doesn't check for
further updates. If the cache fails to find a value, the technical
name rather than the display name is returned, so there is graceful
fallback.
:signal schema_loaded: Fires when the schema has been loaded
:signal status_loaded: Fires when the status list has been loaded
"""
__instance = None
# Both will be sent along with the project id.
schema_loaded = QtCore.Signal(int)
status_loaded = QtCore.Signal(int)
@classmethod
def __get_instance(cls):
"""
Singleton access
"""
if cls.__instance is None:
cls.__instance = CachedShotgunSchema()
return cls.__instance
def __init__(self):
"""
Constructor
"""
QtCore.QObject.__init__(self)
self._bundle = sgtk.platform.current_bundle()
self._field_schema = {}
self._type_schema = {}
self.__sg_data_retrievers = []
self._status_data = {}
self._sg_schema_query_ids = {}
self._sg_status_query_ids = {}
# load cached values from disk
self._load_cached_schema()
self._load_cached_status()
def _is_schema_loaded(self, project_id=None):
"""
Whether the schema has been loaded into memory.
:param project_id: The project Entity id. If None, the current
context's project will be used, or the "site"
cache location will be returned if the current
context does not have an associated project.
:returns: bool
"""
project_id = project_id or self._get_current_project_id()
return (project_id in self._field_schema)
def _is_status_loaded(self, project_id=None):
"""
Whether statuses have been loaded into memory.
:param project_id: The project Entity id. If None, the current
context's project will be used, or the "site"
cache location will be returned if the current
context does not have an associated project.
:returns: bool
"""
project_id = project_id or self._get_current_project_id()
return (project_id in self._status_data)
def _get_current_project_id(self):
"""
Return the id of the current project.
:returns: The project id associated with the current context, or ``None``
if operating in a site-level context.
:rtype: ``int`` or ``None``
"""
if self._bundle.tank.pipeline_configuration.is_site_configuration():
# site configuration (no project id). Return None which is
# consistent with core.
project_id = None
else:
project_id = self._bundle.tank.pipeline_configuration.get_project_id()
return project_id
def _get_cache_root_path(self, project_id=None):
"""
Gets the parent bundle's cache location.
:param project_id: The project Entity id. If None, the current
context's project will be used, or the "site"
cache location will be returned if the current
context does not have an associated project.
:returns: str
"""
if project_id is None:
return self._bundle.cache_location
else:
# Backwards compatible here with pre-v0.18.1 tk-core. If we don't
# have access to the get_project_cache_location method on the bundle,
# then we just get the current project's cache location and suffer
# the minor consequences. This is unlikely to happen, because apps
# that make use of the project_id feature are almost certainly going
# to also require a modern version of core.
try:
return self._bundle.get_project_cache_location(project_id)
except AttributeError:
self._bundle.log_debug(
"Bundle.get_project_cache_location() is not available. "
"Falling back on Bundle.cache_location instead."
)
return self._bundle.cache_location
def _get_schema_cache_path(self, project_id=None):
"""
Gets the path to the schema cache file.
:param project_id: The project Entity id. If None, the current
context's project will be used, or the "site"
cache location will be returned if the current
context does not have an associated project.
:returns: str
"""
return os.path.join(
self._get_cache_root_path(project_id),
"sg_schema.pickle",
)
def _get_status_cache_path(self, project_id=None):
"""
Gets the path to the status cache file.
:param project_id: The project Entity id. If None, the current
context's project will be used, or the "site"
cache location will be returned if the current
context does not have an associated project.
:returns: str
"""
return os.path.join(
self._get_cache_root_path(project_id),
"sg_status.pickle",
)
def _load_cached_status(self, project_id=None):
"""
Load cached status from disk if it exists.
:param project_id: The project Entity id. If None, the current
context's project will be used.
:returns bool: True if loaded, False if not.
"""
project_id = project_id or self._get_current_project_id()
status_cache_path = self._get_status_cache_path(project_id)
if os.path.exists(status_cache_path):
try:
self._bundle.log_debug("Loading cached status from '%s'" % status_cache_path)
with open(status_cache_path, "rb") as fh:
status_data = pickle.load(fh)
# Check to make sure the structure of the data
# is what we expect. If it isn't then we don't
# accept the data which will force it to be
# recached.
if "statuses" in status_data and "status_order" in status_data:
self._status_data[project_id] = status_data
except Exception, e:
self._bundle.log_warning("Could not open cached status "
"file '%s': %s" % (status_cache_path, e))
else:
self.status_loaded.emit(project_id)
return True
return False
def _load_cached_schema(self, project_id=None):
"""
Load cached metaschema from disk if it exists.
:param project_id: The project Entity id. If None, the current
context's project will be used.
:returns bool: True if loaded, False if not.
"""
project_id = project_id or self._get_current_project_id()
schema_cache_path = self._get_schema_cache_path(project_id)
if os.path.exists(schema_cache_path):
try:
self._bundle.log_debug("Loading cached schema from '%s'" % schema_cache_path)
with open(schema_cache_path, "rb") as fh:
data = pickle.load(fh)
self._field_schema[project_id] = data["field_schema"]
self._type_schema[project_id] = data["type_schema"]
except Exception, e:
self._bundle.log_warning("Could not open cached schema "
"file '%s': %s" % (schema_cache_path, e))
else:
self.schema_loaded.emit(project_id)
return True
return False
def _check_schema_refresh(self, entity_type=None, field_name=None, project_id=None):
"""
Check and potentially trigger a cache refresh.
:param str entity_type: Shotgun entity type
:param str field_name: Shotgun field name
:param int project_id: The project Entity id. If None, the current
context's project will be used.
"""
current_project_id = self._get_current_project_id()
project_id = project_id or current_project_id
# TODO: currently, this only checks if there is a full cache in memory
# or not. Later on, when we have the ability to check the current
# metaschema generation via the shotgun API, this can be handled in a
# more graceful fashion.
if not self._is_schema_loaded(project_id) and project_id not in self._sg_schema_query_ids.values():
# schema is not requested and not loaded.
# If a schema was requested for a project that isn't the current project, then
# let's check to see if we can get it from disk before we resort to going to
# Shotgun.
if project_id != current_project_id:
if self._load_cached_schema(project_id=project_id):
# If we were able to load the cached schema from disk then we don't
# have anything else to do.
return
# so download it from shotgun!
self._bundle.log_debug("Starting to download new metaschema from Shotgun...")
if self.__sg_data_retrievers:
data_retriever = self.__sg_data_retrievers[0]["data_retriever"]
self._sg_schema_query_ids[data_retriever.get_schema(project_id)] = project_id
else:
self._bundle.log_warning(
"No data retrievers registered with this schema manager. "
"Cannot load shotgun schema."
)
def _check_status_refresh(self, project_id=None):
"""
Request status data from Shotgun.
:param int project_id: The project Entity id. If None, the current
context's project will be used.
"""
current_project_id = self._get_current_project_id()
project_id = project_id or current_project_id
if not self._is_status_loaded(project_id) and project_id not in self._sg_status_query_ids.values():
# If statuses were requested for a project that isn't the current project, then
# let's check to see if we can get it from disk before we resort to going to
# Shotgun.
if project_id != current_project_id:
if self._load_cached_status(project_id=project_id):
# If we were able to load the cached schema from disk then we don't
# have anything else to do.
return
fields = ["bg_color", "code", "name"]
self._bundle.log_debug("Starting to download status list from Shotgun...")
if self.__sg_data_retrievers:
# pick the first one
data_retriever = self.__sg_data_retrievers[0]["data_retriever"]
self._sg_status_query_ids[data_retriever.execute_find("Status", [], fields)] = project_id
else:
self._bundle.log_warning(
"No data retrievers registered with this schema manager. "
"Cannot load Shotgun statuses."
)
def _on_worker_failure(self, uid, msg):
"""
Asynchronous callback - the worker thread errored.
"""
shotgun_model = self._bundle.import_module("shotgun_model")
if uid in self._sg_schema_query_ids:
msg = shotgun_model.sanitize_qt(msg) # qstring on pyqt, str on pyside
self._bundle.log_warning("Could not load sg schema: %s" % msg)
del self._sg_schema_query_ids[uid]
elif uid in self._sg_status_query_ids:
msg = shotgun_model.sanitize_qt(msg) # qstring on pyqt, str on pyside
self._bundle.log_warning("Could not load sg status: %s" % msg)
del self._sg_status_query_ids[uid]
def _on_worker_signal(self, uid, request_type, data):
"""
Signaled whenever the worker completes something.
This method will dispatch the work to different methods
depending on what async task has completed.
"""
shotgun_model = self._bundle.import_module("shotgun_model")
uid = shotgun_model.sanitize_qt(uid) # qstring on pyqt, str on pyside
data = shotgun_model.sanitize_qt(data)
if uid in self._sg_schema_query_ids:
self._bundle.log_debug("Metaschema arrived from Shotgun...")
project_id = self._sg_schema_query_ids[uid]
# store the schema in memory
self._field_schema[project_id] = data["fields"]
self._type_schema[project_id] = data["types"]
# job done!
del self._sg_schema_query_ids[uid]
self.schema_loaded.emit(project_id)
# and write out the data to disk
self._bundle.log_debug(
"Saving schema to '%s'..." % self._get_schema_cache_path(project_id)
)
try:
with open(self._get_schema_cache_path(project_id), "wb") as fh:
data = dict(
field_schema=self._field_schema[project_id],
type_schema=self._type_schema[project_id],
)
pickle.dump(data, fh)
self._bundle.log_debug("...done")
except Exception, e:
self._bundle.log_warning(
"Could not write schema "
"file '%s': %s" % (self._get_schema_cache_path(project_id), e)
)
elif uid in self._sg_status_query_ids:
self._bundle.log_debug("Status list arrived from Shotgun...")
project_id = self._sg_status_query_ids[uid]
# store status in memory
self._status_data[project_id] = dict(
status_order=[],
statuses={},
)
for x in data["sg"]:
self._status_data[project_id]["statuses"][x["code"]] = x
self._status_data[project_id]["status_order"].append(x["code"])
# job done!
del self._sg_status_query_ids[uid]
self.status_loaded.emit(project_id)
# and write out the data to disk
self._bundle.log_debug(
"Saving status to '%s'..." % self._get_status_cache_path(project_id)
)
try:
with open(self._get_status_cache_path(project_id), "wb") as fh:
pickle.dump(self._status_data[project_id], fh)
self._bundle.log_debug("...done")
except Exception, e:
self._bundle.log_warning(
"Could not write status "
"file '%s': %s" % (self._get_status_cache_path(project_id), e)
)
##########################################################################################
# public methods
@classmethod
def register_bg_task_manager(cls, task_manager):
"""
Register a background task manager with the singleton.
Once a background task manager has been registered, the schema
singleton can refresh its cache.
:param task_manager: Background task manager to use
:type task_manager: :class:`~tk-framework-shotgunutils:task_manager.BackgroundTaskManager`
"""
self = cls.__get_instance()
# create a data retriever
shotgun_data = self._bundle.import_module("shotgun_data")
data_retriever = shotgun_data.ShotgunDataRetriever(self, bg_task_manager=task_manager)
data_retriever.start()
data_retriever.work_completed.connect(self._on_worker_signal)
data_retriever.work_failure.connect(self._on_worker_failure)
dr = {"data_retriever": data_retriever, "task_manager": task_manager}
self.__sg_data_retrievers.append(dr)
@classmethod
def unregister_bg_task_manager(cls, task_manager):
"""
Unregister a previously registered data retriever with the singleton.
:param task_manager: Background task manager to use
:type task_manager: :class:`~tk-framework-shotgunutils:task_manager.BackgroundTaskManager`
"""
self = cls.__get_instance()
culled_retrievers = []
for dr in self.__sg_data_retrievers:
if dr["task_manager"] == task_manager:
self._bundle.log_debug("Unregistering %r from schema manager" % task_manager)
data_retriever = dr["data_retriever"]
data_retriever.stop()
# make sure we don't get exceptions trying to disconnect if the
# signals were never connected or somehow disconnected externally
try:
data_retriever.work_completed.disconnect(self._on_worker_signal)
except (TypeError, RuntimeError), e: # was never connected
self._bundle.log_warning(
"Could not disconnect '_on_worker_signal' slot from "
"the task manager's 'work_completed' signal: %s" % (e,)
)
try:
data_retriever.work_failure.disconnect(self._on_worker_failure)
except (TypeError, RuntimeError), e: # was never connected
self._bundle.log_warning(
"Could not disconnect '_on_worker_failure' slot from "
"the task manager's 'work_failure' signal: %s" % (e,)
)
else:
culled_retrievers.append(dr)
self.__sg_data_retrievers = culled_retrievers
@classmethod
def run_on_schema_loaded(cls, callback, project_id=None):
"""
Run the given callback once the schema is loaded.
:param callback: Method with no argument to run when the schema is loaded
:param project_id: The id of the project entity to load the schema for. If
None, the current context's project will be used.
"""
self = cls.__get_instance()
if self._is_schema_loaded(project_id=project_id):
callback()
else:
self.schema_loaded.connect(callback)
# kick off full schema loading
self._check_schema_refresh(project_id=project_id)
@classmethod
def get_entity_fields(cls, sg_entity_type, project_id=None):
"""
Returns the fields for a Shotgun entity type.
:param sg_entity_type: Shotgun entity type
:param project_id: The id of the project entity to get fields from.
If None, the current context's project will be used.
:returns: List of field names
"""
self = cls.__get_instance()
project_id = project_id or self._get_current_project_id()
self._check_schema_refresh(sg_entity_type, project_id=project_id)
if project_id in self._field_schema and sg_entity_type in self._field_schema[project_id]:
return self._field_schema[project_id][sg_entity_type].keys()
else:
return []
@classmethod
def get_type_display_name(cls, sg_entity_type, project_id=None):
"""
Returns the display name for a Shotgun entity type.
If no display name is known for this object, the system
name is returned, e.g. the same that's being passed in
via the sg_entity_type parameter.
If the data is not present locally, a cache reload
will be triggered, meaning that subsequent cache requests may
return valid data.
:param sg_entity_type: Shotgun entity type
:param project_id: The id of the project entity to get a name from.
If None, the current context's project will be used.
:returns: Entity type display name
"""
self = cls.__get_instance()
project_id = project_id or self._get_current_project_id()
self._check_schema_refresh(sg_entity_type, project_id=project_id)
if project_id in self._type_schema and sg_entity_type in self._type_schema[project_id]:
# cache contains our item
data = self._type_schema[project_id][sg_entity_type]
display_name = data["name"]["value"]
else:
display_name = sg_entity_type
return display_name
@classmethod
def get_field_display_name(cls, sg_entity_type, field_name, project_id=None):
"""
Returns the display name for a given Shotgun field. If the field
cannot be found or the value is not yet cached, the system name
for the field is returned.
If the data is not present locally, a cache reload
will be triggered, meaning that subsequent cache requests may
return valid data.
:param sg_entity_type: Shotgun entity type
:param field_name: Shotgun field name
:param project_id: The id of the project entity to get a name from.
If None, the current context's project will be used.
:returns: Field display name
"""
self = cls.__get_instance()
project_id = project_id or self._get_current_project_id()
(sg_entity_type, field_name) = _account_for_bubble_fields(
sg_entity_type, field_name)
self._check_schema_refresh(
sg_entity_type,
field_name,
project_id=project_id,
)
if field_name == "type":
# type doesn't seem to exist in the schema
# so treat as a special case
return "Type"
elif project_id in self._type_schema and sg_entity_type in self._type_schema[project_id]:
if field_name in self._field_schema[project_id][sg_entity_type]:
data = self._field_schema[project_id][sg_entity_type][field_name]
return data["name"]["value"]
return field_name
@classmethod
def get_empty_phrase(cls, sg_entity_type, field_name, project_id=None):
"""
Get an appropriate phrase to describe the fact that
a given Shotgun field is empty. The phrase will differ depending on
the data type of the field.
:param sg_entity_type: Shotgun entity type
:param field_name: Shotgun field name
:param project_id: The id of the project entity to get a phrase from.
If None, the current context's project will be used.
:returns: Empty phrase string
"""
self = cls.__get_instance()
project_id = project_id or self._get_current_project_id()
(sg_entity_type, field_name) = _account_for_bubble_fields(
sg_entity_type, field_name)
self._check_schema_refresh(
sg_entity_type,
field_name,
project_id=project_id,
)
empty_value = "Not set"
try:
data_type = cls.get_data_type(
sg_entity_type,
field_name,
project_id=project_id,
)
if data_type == "Entity":
empty_value = "Not set"
except Exception:
pass
return empty_value
@classmethod
def get_data_type(cls, sg_entity_type, field_name, project_id=None):
"""
Return the data type for the given Shotgun field.
:param sg_entity_type: Shotgun entity type
:param field_name: Shotgun field name
:param project_id: The id of the project entity to get a type from.
If None, the current context's project will be used.
:returns: Data type string
"""
source_field_name = field_name
self = cls.__get_instance()
project_id = project_id or self._get_current_project_id()
(sg_entity_type, field_name) = _account_for_bubble_fields(
sg_entity_type, field_name)
self._check_schema_refresh(
sg_entity_type,
field_name,
project_id=project_id,
)
if project_id in self._type_schema and sg_entity_type in self._type_schema[project_id]:
if field_name in self._field_schema[project_id][sg_entity_type]:
data = self._field_schema[project_id][sg_entity_type][field_name]
return data["data_type"]["value"]
raise ValueError("Could not find the schema for %s.%s" % (
sg_entity_type, source_field_name))
@classmethod
def get_valid_types(cls, sg_entity_type, field_name, project_id=None):
"""
Return the valid entity types that the given Shotgun field can link to.
:param sg_entity_type: Shotgun entity type
:param field_name: Shotgun field name
:param project_id: The id of the project entity to get types from.
If None, the current context's project will be used.
:returns: List of entity types
"""
source_field_name = field_name
self = cls.__get_instance()
project_id = project_id or self._get_current_project_id()
(sg_entity_type, field_name) = _account_for_bubble_fields(
sg_entity_type, field_name)
self._check_schema_refresh(
sg_entity_type,
field_name,
project_id=project_id,
)
if project_id in self._type_schema and sg_entity_type in self._type_schema[project_id]:
if field_name in self._field_schema[project_id][sg_entity_type]:
data = self._field_schema[project_id][sg_entity_type][field_name]
valid_types = data.get("properties", {}).get("valid_types", {}).get("value")
if valid_types is None:
raise ValueError(
"The data type for %s.%s does not have valid types" % (
sg_entity_type,
source_field_name
)
)
return valid_types
raise ValueError("Could not find the schema for %s.%s" % (
sg_entity_type, source_field_name))
@classmethod
def get_valid_values(cls, sg_entity_type, field_name, project_id=None):
"""
Returns valid values for fields with a list of choices.
:param str sg_entity_type: The entity type.
:param str field_name: The name of the field on the entity
:param project_id: The id of the project entity to get a name from.
If None, the current context's project will be used.
:return: A :obj:`list` of valid values defined by the schema
:raises: ``ValueError`` if the field has no valid values.
"""
source_field_name = field_name
self = cls.__get_instance()
project_id = project_id or self._get_current_project_id()
(sg_entity_type, field_name) = _account_for_bubble_fields(
sg_entity_type, field_name)
self._check_schema_refresh(
sg_entity_type,
field_name,
project_id=project_id,
)
if project_id in self._type_schema and project_id in self._field_schema:
if sg_entity_type in self._type_schema[project_id]:
if field_name in self._field_schema[project_id][sg_entity_type]:
data = self._field_schema[project_id][sg_entity_type][field_name]
valid_values = data.get("properties", {}).get("valid_values", {}).get("value")
if valid_values is None:
raise ValueError(
"The data type for %s.%s does not have valid values" % (
sg_entity_type,
source_field_name
)
)
return valid_values
raise ValueError("Could not find the schema for %s.%s" % (
sg_entity_type, source_field_name))
@classmethod
def get_status_display_name(cls, status_code, project_id=None):
"""
Returns the display name for a given status code.
If the status code cannot be found or haven't been loaded,
the status code is returned back.
If the data is not present locally, a cache reload
will be triggered, meaning that subsequent cache requests may
return valid data.
:param status_code: Status short code (e.g 'ip')
:param project_id: The id of the project entity to get a name from.
If None, the current context's project will be used.
:returns: string with descriptive status name
"""
self = cls.__get_instance()
project_id = project_id or self._get_current_project_id()
self._check_status_refresh(project_id=project_id)
display_name = status_code
if project_id in self._status_data and status_code in self._status_data[project_id]["statuses"]:
data = self._status_data[project_id]["statuses"][status_code]
display_name = data.get("name") or status_code
return display_name
@classmethod
def get_status_color(cls, status_code, project_id=None):
"""
Returns the color for a given status code.
If the status code cannot be found or haven't been loaded,
None is returned.
If the data is not present locally, a cache reload
will be triggered, meaning that subsequent cache requests may
return valid data.
:param status_code: Status short code (e.g 'ip')
:param project_id: The id of the project entity to get a color from.
If None, the current context's project will be used.
:returns: string with r,g,b values, e.g. ``"123,255,10"``
"""
self = cls.__get_instance()
project_id = project_id or self._get_current_project_id()
self._check_status_refresh(project_id=project_id)
status_color = None
if project_id in self._status_data and status_code in self._status_data[project_id]["statuses"]:
data = self._status_data[project_id]["statuses"][status_code]
# color is in the form of "123,255,10"
status_color = data.get("bg_color")
return status_color
@classmethod
def field_is_editable(cls, sg_entity_type, field_name, project_id=None):
"""
Returns a boolean identifying the editability of the entity's field.
:param str sg_entity_type: the entity type
:param str field_name: the field name to check editibility
:param project_id: The project Entity id. If None, the current
context's project will be used, or the "site"
cache location will be returned if the current
context does not have an associated project.
The ``field_name`` may be in "bubble" notation. This method will account
for it and return the editability setting for the evaluated entity type
and field defined in the bubble noation. For example, if the field is
defined as "sg_sequence.Sequence.code", this method will return the
editability of the `code` field on the `Sequence` entity.
:returns: ``True`` if the field is ediable, ``False`` otherwise.
"""
self = cls.__get_instance()
source_field_name = field_name
project_id = project_id or self._get_current_project_id()
self._check_schema_refresh(sg_entity_type, field_name, project_id=project_id)
(sg_entity_type, field_name) = _account_for_bubble_fields(
sg_entity_type, field_name)
# make sure the project id is found in each of the type and file schemas
# and that the entity type and field name are found in their respective
# project caches
if (project_id in self._type_schema and
project_id in self._field_schema and
sg_entity_type in self._type_schema[project_id] and
field_name in self._field_schema[project_id][sg_entity_type]):
data = self._field_schema[project_id][sg_entity_type][field_name]
try:
return data["editable"]["value"]
except KeyError:
raise ValueError("Could not determine editability from the schema.")
raise ValueError("Could not find the schema for %s.%s" % (
sg_entity_type, source_field_name))
@classmethod
def field_is_visible(cls, sg_entity_type, field_name, project_id=None):
"""
Returns a boolean identifying the visibility of the entity's field.
:param sg_entity_type: the entity type
:param field_name: the field name to check visibility
:param project_id: The project Entity id. If None, the current
context's project will be used, or the "site"
cache location will be returned if the current
context does not have an associated project.
The ``field_name`` may be in "bubble" notation. This method will account
for it and return the visibility setting for the evaluated entity type
and field defined in the bubble noation. For example, if the field is
defined as "sg_sequence.Sequence.code", this method will return the
visibility of the `code` field on the `Sequence` entity.
:returns: ``True`` if the field is visible, ``False`` otherwise.
"""
source_field_name = field_name
self = cls.__get_instance()
project_id = project_id or self._get_current_project_id()
self._check_schema_refresh(sg_entity_type, field_name, project_id=project_id)
(sg_entity_type, field_name) = _account_for_bubble_fields(
sg_entity_type, field_name)
# make sure the project id is found in each of the type and file schemas
# and that the entity type and field name are found in their respective
# project caches
if (project_id in self._type_schema and
project_id in self._field_schema and
sg_entity_type in self._type_schema[project_id] and
field_name in self._field_schema[project_id][sg_entity_type]):
data = self._field_schema[project_id][sg_entity_type][field_name]
try:
return data["visible"]["value"]
except KeyError:
raise ValueError("Could not determine visibility from the schema.")
raise ValueError("Could not find the schema for %s.%s" % (
sg_entity_type, source_field_name))
@classmethod
def get_ordered_status_list(cls, display_names=False, project_id=None):
"""
Returns a list of statuses in their order as defined by the
Shotgun site preferences.
If the data is not present locally, a cache reload
will be triggered, meaning that subsequent cache requests may
return valid data.
:param display_names: If True, returns status display names. If
False, status codes are returned. Default is
False.
:param project_id: The id of the project entity to get statuses from.
If None, the current context's project will be used.
:returns: list of string display names in order
"""
self = cls.__get_instance()
project_id = project_id or self._get_current_project_id()
self._check_status_refresh(project_id=project_id)
if project_id not in self._status_data:
raise ValueError("Could not find the statuses for project %i" % (project_id))
statuses = self._status_data[project_id]["statuses"]
if display_names:
return [cls.get_status_display_name(s) for s in self._status_data[project_id]["status_order"]]
else:
return self._status_data[project_id]["status_order"]
@classmethod
def clear_cached_data(cls, project_id=None):
"""
Remove both the schema and status cache files from disk for
the specified project_id. If no project_id is specified, then
use the current context project.
:param project_id: The id of the project entity to remove
schema and status cache files for. If
None, the current context's project will
be used.
"""
self = cls.__get_instance()
project_id = project_id or self._get_current_project_id()
schema_cache = self._get_schema_cache_path(project_id)
if os.path.isfile(schema_cache):
self._bundle.log_debug("Removing schema cache file : %s" % schema_cache)
try:
os.remove(schema_cache)
except Exception, e:
self._bundle.log_error(
"Caught error attempting to remove schema cache file [%s] :\n%s" %
(schema_cache, e)
)
raise
status_cache = self._get_status_cache_path(project_id)
if os.path.isfile(status_cache):
self._bundle.log_debug("Removing status cache file : %s" % status_cache)
try:
os.remove(status_cache)
except Exception, e:
self._bundle.log_error(
"Caught error attempting to remove status cache file [%s] :\n%s" %
(status_cache, e)
)
raise
def _account_for_bubble_fields(sg_entity_type, field_name):
"""Detect bubble fields and return the proper entity type and field name.
:param str sg_entity_type: The intput entity type name. If the field name
is a bubbled field notation, this value will be replaced by the
parsed entity type in the field string.
:param str field_name: The name of the field. This may be in "bubble"
notation: "sg_sequence.Sequence.code"
If field_name is in bubble notation (example: "sg_sequence.Sequence.code")
this method will return "code" as the field name and "Sequence" as the
entity type.
If the field name is not in bubble notation, this method simply returns a
tuple containing the supplied arguments.
:returns: A tuple (str, str) where the first item is the evaluated entity
type name and the second is the evaluated field name.
:rtype: tuple
"""
if "." in field_name:
(sg_entity_type, field_name) = field_name.split(".")[-2:]
return (sg_entity_type, field_name)
```
#### File: python/shotgun_globals/icon.py
```python
from sgtk.platform.qt import QtCore, QtGui
from .ui import resources_rc
# list of all entity types for which an icon exists
_entity_types_with_icons = ["Asset",
"ClientUser",
"EventLogEntry",
"Group",
"HumanUser",
"PublishedFile",
"TankPublishedFile",
"Note",
"Playlist",
"Project",
"Sequence",
"Shot",
"Tag",
"Task",
"Ticket",
"Version",
]
_cached_entity_icons = {}
def get_entity_type_icon_url(entity_type):
"""
Retrieve the icon resource path for the specified entity type if available.
This is useful if you want to include an icon in a ``QLabel`` using
an ``<img>`` html tag.
:param entity_type: The entity type to retrieve the icon for
:returns: A string url with a qt resource path
"""
if entity_type in _entity_types_with_icons:
return ":/tk-framework-shotgunutils/icon_%s_dark.png" % entity_type
else:
return None
def get_entity_type_icon(entity_type):
"""
Retrieve the icon for the specified entity type if available.
:param entity_type: The entity type to retrieve the icon for
:returns: A QIcon if an icon was found for the specified entity
type, otherwise None.
"""
global _cached_entity_icons
if entity_type not in _cached_entity_icons:
# not yet cached
icon = None
url = get_entity_type_icon_url(entity_type)
if url:
# create a QIcon for it
icon = QtGui.QIcon(QtGui.QPixmap(url))
# cache it
_cached_entity_icons[entity_type] = icon
# we've previously asked for the icon
return _cached_entity_icons[entity_type]
```
#### File: python/shotgun_model/data_handler_nav.py
```python
import copy
import gc
from .data_handler import ShotgunDataHandler, log_timing
from .errors import ShotgunModelDataError
from .data_item import ShotgunItemData
from .data_handler_cache import ShotgunDataHandlerCache
class ShotgunNavDataHandler(ShotgunDataHandler):
"""
Shotgun Model low level data storage for use
with the Shotgun Hierarchy Model.
This implements a data storage where a series of
nav_expand queries are stringed together into a single
cache file on disk.
"""
# constant values to refer to the fields where the paths are stored in the
# returned navigation data.
_SG_PATH_FIELD = "path"
_SG_PARENT_PATH_FIELD = "parent_path"
def __init__(self, root_path, seed_entity_field, entity_fields, cache_path, include_root=None):
"""
:param str root_path: The path to the root of the hierarchy to display.
This corresponds to the ``path`` argument of the
:meth:`~shotgun-api3:shotgun_api3.Shotgun.nav_expand()`
api method. For example, ``/Project/65`` would correspond to a
project on you shotgun site with id of ``65``.
:param str seed_entity_field: This is a string that corresponds to the
field on an entity used to seed the hierarchy. For example, a value
of ``Version.entity`` would cause the model to display a hierarchy
where the leaves match the entity value of Version entities.
:param dict entity_fields: A dictionary that identifies what fields to
include on returned entities. Since the hierarchy can include any
entity structure, this argument allows for specification of
additional fields to include as these entities are returned. The
dict's keys correspond to the entity type and the value is a list
of field names to return.
:param str cache_path: Path to cache file location.
:param str include_root: Defines the name of an additional, top-level
model item that represents the root. In views, this item will appear
as a sibling to top-level children of the root. This allows for
UX whereby a user can select an item representing the root without
having a UI that shows a single, top-level item. An example would
be displaying published file entity hierarchy with top level items:
"Assets", "Shots", and "Project Publishes". In this example, the
supplied arg would look like: ``include_root="Project Publishes"``.
If ``include_root`` is ``None``, no root item will be added.
"""
super(ShotgunNavDataHandler, self).__init__(cache_path)
self.__root_path = root_path
self.__seed_entity_field = seed_entity_field
self.__entity_fields = entity_fields
self.__include_root = include_root
def generate_data_request(self, data_retriever, path):
"""
Generate a data request for a data retriever.
Once the data has arrived, the caller is expected to
call meth:`update_data` and pass in the received
data payload for processing.
:param data_retriever: :class:`~tk-framework-shotgunutils:shotgun_data.ShotgunDataRetriever` instance.
:returns: Request id or None if no work is needed
"""
self._log_debug("generate_data_request for path %s" % path)
worker_id = data_retriever.execute_nav_expand(
path,
self.__seed_entity_field,
self.__entity_fields
)
return worker_id
@log_timing
def update_data(self, sg_data):
"""
The counterpart to :meth:`generate_data_request`. When the data
request has been carried out, this method should be called by the calling
class and the data payload from Shotgun should be provided via the
sg_data parameter.
The shotgun nav data is compared against an existing part of the tree and
a list of differences is returned, indicating which nodes were
added, deleted and modified, on the following form::
[
{
"data": ShotgunItemData instance,
"mode": self.UPDATED|ADDED|DELETED
},
{
"data": ShotgunItemData instance,
"mode": self.UPDATED|ADDED|DELETED
},
...
]
:param sg_data: list, resulting from a Shotgun nav_expand query
:returns: list of updates. see above
:raises: :class:`ShotgunModelDataError` if no cache is loaded into memory
"""
if self._cache is None:
raise ShotgunModelDataError("No data currently loaded in memory!")
self._log_debug("Updating %s with %s shotgun records." % (self, len(sg_data)))
item_path = sg_data.get(self._SG_PATH_FIELD, None)
self._log_debug("Got hierarchy data for path: %s" % (item_path,))
if not item_path:
raise ShotgunModelDataError(
"Unexpected error occurred. Could not determine the path"
"from the queried hierarchy item."
)
if self._cache.size == 0:
self._log_debug("In-memory cache is empty.")
# ensure the data is clean
self._log_debug("sanitizing data...")
sg_data = self._sg_clean_data(sg_data)
self._log_debug("...done!")
self._log_debug("Generating new tree in memory...")
# create a brand new tree rather than trying to be clever
# about how we cull intermediate nodes for deleted items
diff_list = []
num_adds = 0
num_deletes = 0
num_modifications = 0
new_uids = set()
# a list of sg data dicts to display items for
child_data = []
if item_path == self.__root_path:
self._log_debug("This is the root of the tree.")
parent_uid = None
if self.__include_root:
# the calling code has requested that the root of the tree be
# displayed as a top-level sibling (to make it easy to discover
# hierarchy targets attached to the root entity). To do this, we
# simply make a copy of the root item dictionary (sans children)
# and add it as an additional child to display. We also set the
# label to the supplied value.
root_item = copy.deepcopy(sg_data)
root_item["label"] = self.__include_root
# get rid of child data since it'll be displayed as a sibling
root_item["has_children"] = False
del root_item["children"]
# add the root dict to the list of data to build items for
child_data.append(root_item)
else:
parent_uid = item_path
previous_uids = set(self._cache.get_child_uids(parent_uid))
# process all the children
child_data.extend(sg_data["children"])
# analyze the incoming shotgun data
for sg_item in child_data:
if self._SG_PATH_FIELD not in sg_item:
# note: leaf nodes of kind 'empty' don't have a path
unique_field_value = "%s/%s" % (parent_uid, sg_item["label"])
else:
unique_field_value = sg_item.get(self._SG_PATH_FIELD)
new_uids.add(unique_field_value)
# check if item already exists
already_exists = self._cache.item_exists(unique_field_value)
# insert the change into the data set directly.
# if the item already existed and was updated,
# this returns true
updated = self._cache.add_item(
parent_uid=parent_uid,
sg_data=sg_item,
field_name=None,
is_leaf=not sg_item["has_children"],
uid=unique_field_value
)
if not already_exists:
# item was added
diff_list.append({
"data": self._cache.get_entry_by_uid(unique_field_value),
"mode": self.ADDED
})
num_adds += 1
elif updated:
# item existed but was updated
diff_list.append({
"data": self._cache.get_entry_by_uid(unique_field_value),
"mode": self.UPDATED
})
num_modifications += 1
# now figure out if anything has been removed
for deleted_uid in previous_uids.difference(new_uids):
item = self._cache.take_item(deleted_uid)
diff_list.append({
"data": item,
"mode": self.DELETED
})
num_deletes += 1
self._log_debug("Shotgun data (%d records) received and processed. " % len(sg_data))
self._log_debug(" The new tree is %d records." % self._cache.size)
self._log_debug(" There were %d diffs from in-memory cache:" % len(diff_list))
self._log_debug(" Number of new records: %d" % num_adds)
self._log_debug(" Number of deleted records: %d" % num_deletes)
self._log_debug(" Number of modified records: %d" % num_modifications)
return diff_list
```
#### File: python/utils/qt.py
```python
def safe_delete_later(widget):
"""
Will call the deleteLater method on the given widget, but only if
running in a Qt4 environment. This allows us to proactively delete
widgets in Qt4, but protects us from garbage collection issues
associated with doing the same in PySide2/Qt5.
:param widget: The widget to potentially call deleteLater on.
"""
from sgtk.platform.qt import QtCore
if QtCore.__version__.startswith("4."):
widget.deleteLater()
```
#### File: bundles/test_engine/engine.py
```python
from tank.platform import Engine
import tank
import sys
class TestEngine(Engine):
def init_engine(self):
pass
##########################################################################################
# logging interfaces
def log_debug(self, msg):
if self.get_setting("debug_logging", False):
sys.stdout.write("DEBUG: %s\n" % msg)
def log_info(self, msg):
sys.stdout.write("%s\n" % msg)
def log_warning(self, msg):
sys.stdout.write("WARNING: %s\n" % msg)
def log_error(self, msg):
sys.stdout.write("ERROR: %s\n" % msg)
```
#### File: tests/python/base_test.py
```python
import os
from tank_test.tank_test_base import *
import sgtk
class TestShotgunUtilsFramework(TankTestBase):
"""
Baseclass for all Shotgun Utils unit tests.
This sets up the fixtures, starts an engine and provides
the following members:
- self.framework_root: The path on disk to the framework bundle
- self.engine: The test engine running
- self.app: The test app running
- self.framework: The shotgun utils fw running
In your test classes, import module functionality like this::
self.shotgun_model = self.framework.import_module("shotgun_model")
"""
def setUp(self):
"""
Fixtures setup
"""
super(TestShotgunUtilsFramework, self).setUp()
self.setup_fixtures()
# set up an environment variable that points to the root of the
# framework so we can specify its location in the environment fixture
self.framework_root = os.path.abspath(os.path.join( os.path.dirname(__file__), "..", ".."))
os.environ["FRAMEWORK_ROOT"] = self.framework_root
# Add these to mocked shotgun
self.add_to_sg_mock_db([self.project])
# run folder creation for the shot
self.tk.create_filesystem_structure(self.project["type"], self.project["id"])
# now make a context
context = self.tk.context_from_entity(self.project["type"], self.project["id"])
# and start the engine
self.engine = sgtk.platform.start_engine("test_engine", self.tk, context)
self.app = self.engine.apps["test_app"]
self.framework = self.app.frameworks['tk-framework-shotgunutils']
def tearDown(self):
"""
Fixtures teardown
"""
# engine is held as global, so must be destroyed.
cur_engine = sgtk.platform.current_engine()
if cur_engine:
cur_engine.destroy()
# important to call base class so it can clean up memory
super(TestShotgunUtilsFramework, self).tearDown()
```
#### File: pyside120_py26_qt484_win64/PySide/__init__.py
```python
__all__ = ['QtCore', 'QtGui', 'QtNetwork', 'QtOpenGL', 'QtSql', 'QtSvg', 'QtTest', 'QtWebKit', 'QtScript']
__version__ = "1.2.0"
__version_info__ = (1, 2, 0, "final", 0)
def _setupQtDirectories():
import sys
import os
from . import _utils
pysideDir = _utils.get_pyside_dir()
# On Windows add the PySide\openssl folder (if it exists) to the
# PATH so the SSL DLLs can be found when Qt tries to dynamically
# load them. Tell Qt to load them and then reset the PATH.
if sys.platform == 'win32':
opensslDir = os.path.join(pysideDir, 'openssl')
if os.path.exists(opensslDir):
path = os.environ['PATH']
try:
os.environ['PATH'] = opensslDir + os.pathsep + path
try:
from . import QtNetwork
except ImportError:
pass
else:
QtNetwork.QSslSocket.supportsSsl()
finally:
os.environ['PATH'] = path
# Tell Qt to look for plugins in the PySide package, if the
# plugins folder exists there, instead of just the default of
# looking only in Qt's install or build folder.
try:
from . import QtCore
except ImportError:
pass
else:
pluginsDir = os.path.join(pysideDir, 'plugins')
if os.path.exists(pluginsDir) and \
pluginsDir not in QtCore.QCoreApplication.libraryPaths():
QtCore.QCoreApplication.addLibraryPath(pluginsDir)
# Tell Qt to look for qml imports in the PySide package, if the
# imports folder exists there.
importsDir = os.path.join(pysideDir, 'imports')
if os.path.exists(importsDir):
if 'QML_IMPORT_PATH' in os.environ:
qml_import_path = os.environ['QML_IMPORT_PATH']
os.environ['QML_IMPORT_PATH'] = importsDir + os.pathsep + qml_import_path
else:
os.environ['QML_IMPORT_PATH'] = importsDir
_setupQtDirectories()
```
#### File: browser_widget/ui_pyside/header.py
```python
from tank.platform.qt import QtCore, QtGui
class Ui_Header(object):
def setupUi(self, Header):
Header.setObjectName("Header")
Header.resize(389, 37)
Header.setMinimumSize(QtCore.QSize(0, 0))
Header.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.verticalLayout = QtGui.QVBoxLayout(Header)
self.verticalLayout.setSpacing(2)
self.verticalLayout.setContentsMargins(3, 12, 3, 3)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtGui.QLabel(Header)
self.label.setStyleSheet("")
self.label.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.line = QtGui.QFrame(Header)
self.line.setStyleSheet("")
self.line.setFrameShadow(QtGui.QFrame.Plain)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout.addWidget(self.line)
self.retranslateUi(Header)
QtCore.QMetaObject.connectSlotsByName(Header)
def retranslateUi(self, Header):
Header.setWindowTitle(QtGui.QApplication.translate("Header", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Header", "<big>Header</big>", None, QtGui.QApplication.UnicodeUTF8))
```
#### File: python/tk_hiero_export/base.py
```python
import os
import sys
import shutil
import time
import hiero.core
from hiero.exporters import FnShotExporter
from hiero.exporters import FnShotProcessor
from hiero.exporters import FnTranscodeExporter
import tank
from tank.platform.qt import QtGui, QtCore
class ShotgunHieroObjectBase(object):
"""Base class to make the Hiero classes app aware."""
_app = None
@classmethod
def setApp(cls, app):
cls._app = app
@property
def app(self):
return self._app
def _formatTkVersionString(self, hiero_version_str):
"""Reformat the Hiero version string to the tk format.
"""
try:
version_number = int(hiero_version_str[1:])
except ValueError:
# Version is sometimes a glob expression (when building tracks for example)
# in these cases, return the original string without the leading 'v'
return hiero_version_str[1:]
version_template = self.app.get_template('template_version')
tk_version_str = version_template.apply_fields({'version': version_number})
return tk_version_str
def _upload_thumbnail_to_sg(self, sg_entity, thumb_qimage):
"""
Updates the thumbnail for an entity in Shotgun
"""
import tempfile
import uuid
thumbdir = tempfile.mkdtemp(prefix='hiero_process_thumbnail_')
try:
path = "%s.png" % os.path.join(thumbdir, sg_entity.get('name', 'thumbnail'))
# scale it down to 600px wide
thumb_qimage_scaled = thumb_qimage.scaledToWidth(600, QtCore.Qt.SmoothTransformation)
thumb_qimage_scaled.save(path)
self.app.log_debug("Uploading thumbnail for %s %s..." % (sg_entity['type'], sg_entity['id']))
self.app.shotgun.upload_thumbnail(sg_entity['type'], sg_entity['id'], path)
except Exception, e:
self.app.log_info("Thumbnail for %s %s (#%s) was not refreshed in Shotgun: %s" % (sg_entity['type'], sg_entity.get('name'), sg_entity['id'], e))
finally:
# Sometimes Windows holds on to the temporary thumbnail file longer than expected which
# can cause an exception here. If we wait a second and try again, this usually solves
# the issue.
try:
shutil.rmtree(thumbdir)
except Exception:
self.parent.log_error("Error removing temporary thumbnail file, trying again.")
time.sleep(1.0)
shutil.rmtree(thumbdir)
def _cutsSupported(self):
"""Returns True if the site has Cut support, False otherwise."""
return self.app.shotgun.server_caps.version >= (7, 0, 0)
```
#### File: python/tk_hiero_export/shot_updater.py
```python
import hiero.core
from hiero.exporters import FnShotExporter
from .base import ShotgunHieroObjectBase
from .collating_exporter import CollatingExporter
class ShotgunShotUpdater(ShotgunHieroObjectBase, FnShotExporter.ShotTask, CollatingExporter):
"""
Ensures that Shots and Sequences exist in Shotgun
"""
def __init__(self, initDict):
FnShotExporter.ShotTask.__init__(self, initDict)
CollatingExporter.__init__(self)
self._cut_order = None
def get_cut_item_data(self):
"""
Return some computed values for use when creating cut items.
The values correspond to the exported version created on disk.
"""
(head_in, tail_out) = self.collatedOutputRange(clampToSource=False)
handles = self._cutHandles if self._cutHandles is not None else 0
in_handle = handles
out_handle = handles
# get the frame offset specified in the export options
startFrame = self._startFrame or 0
# these are the source in/out frames. we'll use them to determine if we
# have enough frames to account for the handles. versions of
# hiero/nukestudio handle missing handles differently
source_in = int(self._item.sourceIn())
source_out = int(self._item.sourceOut())
if self._has_nuke_backend() and source_in < in_handle:
# newer versions of the hiero/nukestudio. no black frames will be
# written to disk for the head when not enough source for the in
# handle. the in/out should be correct. but the start handle is
# limited by the in value. the source in point is within the
# specified handles.
in_handle = source_in
# NOTE: even new versions of hiero/nukestudio will write black
# frames for insuffient tail handles. so we don't need to account
# for that case here.
# "cut_length" is a boolean set on the updater by the shot processor.
# it signifies whether the transcode task will write the cut length
# to disk (True) or if it will write the full source to disk (False)
if self.is_cut_length_export():
cut_in = head_in + in_handle
cut_out = tail_out - out_handle
else:
cut_in = source_in
cut_out = source_out
# account for any custom start frame
cut_in += startFrame
cut_out += startFrame
# get the edit in/out points from the timeline
edit_in = self._item.timelineIn()
edit_out = self._item.timelineOut()
# account for custom start code in the hiero timeline
seq = self._item.sequence()
edit_in += seq.timecodeStart()
edit_out += seq.timecodeStart()
cut_duration = cut_out - cut_in + 1
edit_duration = edit_out - edit_in + 1
if cut_duration != edit_duration:
self.app.log_warning(
"It looks like the shot %s has a retime applied. SG cuts do "
"not support retimes." % (self.clipName(),)
)
working_duration = tail_out - head_in + 1
if not self._has_nuke_backend() and self.isCollated():
# undo the offset that is automatically added when collating.
# this is only required in older versions of hiero
head_in -= self.HEAD_ROOM_OFFSET
tail_out -= self.HEAD_ROOM_OFFSET
# return the computed cut information
return {
"cut_item_in": cut_in,
"cut_item_out": cut_out,
"cut_item_duration": cut_duration,
"edit_in": edit_in,
"edit_out": edit_out,
"edit_duration": edit_duration,
"head_in": head_in,
"tail_out": tail_out,
"working_duration": working_duration,
}
def taskStep(self):
"""
Execution payload.
"""
# Only process actual shots... so uncollated items and hero collated items
if self.isCollated() and not self.isHero():
return False
# execute base class
FnShotExporter.ShotTask.taskStep(self)
# call the preprocess hook to get extra values
if self.app.shot_count == 0:
self.app.preprocess_data = {}
sg_shot = self.app.execute_hook("hook_get_shot", task=self, item=self._item, data=self.app.preprocess_data)
# clean up the dict
shot_id = sg_shot['id']
del sg_shot['id']
shot_type = sg_shot['type']
del sg_shot['type']
# The cut order may have been set by the processor. Otherwise keep old behavior.
cut_order = self.app.shot_count + 1
if self._cut_order:
cut_order = self._cut_order
# update the frame range
sg_shot["sg_cut_order"] = cut_order
# get cut info
cut_info = self.get_cut_item_data()
head_in = cut_info["head_in"]
tail_out = cut_info["tail_out"]
cut_in = cut_info["cut_item_in"]
cut_out = cut_info["cut_item_out"]
cut_duration = cut_info["cut_item_duration"]
working_duration = cut_info["working_duration"]
self.app.log_debug("Head/Tail from Hiero: %s, %s" % (head_in, tail_out))
if self.isCollated():
if self.is_cut_length_export():
# nothing to do here. the default calculation above is enough.
self.app.log_debug("Exporting... collated, cut length.")
# Log cut length collate metric
try:
self.app.log_metric("Collate/Cut Length", log_version=True)
except:
# ingore any errors. ex: metrics logging not supported
pass
else:
self.app.log_debug("Exporting... collated, clip length.")
# NOTE: Hiero crashes when trying to collate with a
# custom start frame. so this will only work for source start
# frame.
# the head/in out values should be the first and last frames of
# the source, but they're not. they're actually the values we
# expect for the cut in/out.
cut_in = head_in
cut_out = tail_out
# ensure head/tail match the entire clip (clip length export)
head_in = 0
tail_out = self._clip.duration() - 1
# get the frame offset specified in the export options
start_frame = self._startFrame or 0
# account for a custom start frame if/when clip length collate
# works on custom start frame.
head_in += start_frame
tail_out += start_frame
cut_in += start_frame
cut_out += start_frame
# since we've set the head/tail, recalculate the working
# duration to make sure it is correct
working_duration = tail_out - head_in + 1
# since we've set the cut in/out, recalculate the cut duration
# to make sure it is correct
cut_duration = cut_out - cut_in + 1
# Log clip length collate metric
try:
self.app.log_metric("Collate/Clip Length", log_version=True)
except:
# ingore any errors. ex: metrics logging not supported
pass
else:
# regular export. values we have are good. just log it
if self.is_cut_length_export():
self.app.log_debug("Exporting... cut length.")
else:
# the cut in/out should already be correct here. just log
self.app.log_debug("Exporting... clip length.")
# update the frame range
sg_shot["sg_head_in"] = head_in
sg_shot["sg_cut_in"] = cut_in
sg_shot["sg_cut_out"] = cut_out
sg_shot["sg_tail_out"] = tail_out
sg_shot["sg_cut_duration"] = cut_duration
sg_shot["sg_working_duration"] = working_duration
# get status from the hiero tags
status = None
status_map = dict(self._preset.properties()["sg_status_hiero_tags"])
for tag in self._item.tags():
if tag.name() in status_map:
status = status_map[tag.name()]
break
if status:
sg_shot['sg_status_list'] = status
# get task template from the tags
template = None
template_map = dict(self._preset.properties()["task_template_map"])
for tag in self._item.tags():
if tag.name() in template_map:
template = self.app.tank.shotgun.find_one('TaskTemplate',
[['entity_type', 'is', shot_type],
['code', 'is', template_map[tag.name()]]])
break
# if there are no associated, assign default template...
if template is None:
default_template = self.app.get_setting('default_task_template')
if default_template:
template = self.app.tank.shotgun.find_one('TaskTemplate',
[['entity_type', 'is', shot_type], ['code', 'is', default_template]])
if template is not None:
sg_shot['task_template'] = template
# commit the changes and update the thumbnail
self.app.log_debug("Updating info for %s %s: %s" % (shot_type, shot_id, str(sg_shot)))
self.app.tank.shotgun.update(shot_type, shot_id, sg_shot)
# create the directory structure
self.app.log_debug("Creating file system structure for %s %s..." % (shot_type, shot_id))
self.app.tank.create_filesystem_structure(shot_type, [shot_id])
# return without error
self.app.log_info("Updated %s %s" % (shot_type, self.shotName()))
# keep shot count
self.app.shot_count += 1
cut = None
# create the CutItem with the data populated by the shot processor
if hasattr(self, "_cut_item_data"):
cut_item_data = self._cut_item_data
cut_item = self.app.tank.shotgun.create("CutItem", cut_item_data)
self.app.log_info("Created CutItem in Shotgun: %s" % (cut_item,))
# update the object's cut item data to include the new info
self._cut_item_data.update(cut_item)
cut = cut_item["cut"]
# see if this task has been designated to update the Cut thumbnail
if cut and hasattr(self, "_create_cut_thumbnail"):
hiero_sequence = self._item.sequence()
try:
# see if we can find a poster frame for the sequence
thumbnail = hiero_sequence.thumbnail(hiero_sequence.posterFrame())
except Exception:
self.app.log_debug("No thumbnail found for the 'Cut'.")
pass
else:
# found one, uplaod to sg for the cut
self._upload_thumbnail_to_sg(cut, thumbnail)
# return false to indicate success
return False
def is_cut_length_export(self):
"""
Returns ``True`` if this task has the "Cut Length" option checked.
This is set by the shot processor.
"""
return hasattr(self, "_cut_length") and self._cut_length
class ShotgunShotUpdaterPreset(ShotgunHieroObjectBase, hiero.core.TaskPresetBase):
"""
Settings preset
"""
def __init__(self, name, properties):
hiero.core.TaskPresetBase.__init__(self, ShotgunShotUpdater, name)
self.properties().update(properties)
def supportedItems(self):
return hiero.core.TaskPresetBase.kAllItems
```
#### File: tk-houdini-alembicnode/v0.3.0/app.py
```python
import sgtk
class TkAlembicNodeApp(sgtk.platform.Application):
"""The Alembic Output Node."""
def init_app(self):
"""Initialize the app."""
tk_houdini_alembic = self.import_module("tk_houdini_alembicnode")
self.handler = tk_houdini_alembic.TkAlembicNodeHandler(self)
def convert_to_regular_alembic_nodes(self):
"""Convert Toolkit Alembic nodes to regular Alembic nodes.
Convert all Toolkit Alembic nodes found in the current script to
regular Alembic nodes. Additional Toolkit information will be stored in
user data named 'tk_*'
Example usage::
>>> import sgtk
>>> eng = sgtk.platform.current_engine()
>>> app = eng.apps["tk-houdini-alembicnode"]
>>> app.convert_to_regular_alembic_nodes()
"""
self.log_debug(
"Converting Toolkit Alembic nodes to built-in Alembic nodes.")
tk_houdini_alembic = self.import_module("tk_houdini_alembicnode")
tk_houdini_alembic.TkAlembicNodeHandler.\
convert_to_regular_alembic_nodes(self)
def convert_back_to_tk_alembic_nodes(self):
"""Convert regular Alembic nodes back to Tooklit Alembic nodes.
Convert any regular Alembic nodes that were previously converted
from Tooklit Alembic nodes back into Toolkit Alembic nodes.
Example usage::
>>> import sgtk
>>> eng = sgtk.platform.current_engine()
>>> app = eng.apps["tk-houdini-alembicnode"]
>>> app.convert_back_to_tk_alembic_nodes()
"""
self.log_debug(
"Converting built-in Alembic nodes back to Toolkit Alembic nodes.")
tk_houdini_alembic = self.import_module("tk_houdini_alembicnode")
tk_houdini_alembic.TkAlembicNodeHandler.\
convert_back_to_tk_alembic_nodes(self)
def get_nodes(self):
"""
Returns a list of hou.node objects for each tk alembic node.
Example usage::
>>> import sgtk
>>> eng = sgtk.platform.current_engine()
>>> app = eng.apps["tk-houdini-alembicnode"]
>>> tk_alembic_nodes = app.get_nodes()
"""
self.log_debug("Retrieving tk-houdini-alembic nodes...")
tk_houdini_alembic = self.import_module("tk_houdini_alembicnode")
nodes = tk_houdini_alembic.TkAlembicNodeHandler.\
get_all_tk_alembic_nodes()
self.log_debug("Found %s tk-houdini-alembic nodes." % (len(nodes),))
return nodes
def get_output_path(self, node):
"""
Returns the evaluated output path for the supplied node.
Example usage::
>>> import sgtk
>>> eng = sgtk.platform.current_engine()
>>> app = eng.apps["tk-houdini-alembicnode"]
>>> output_path = app.get_output_path(tk_alembic_node)
"""
self.log_debug("Retrieving output path for %s" % (node,))
tk_houdini_alembic = self.import_module("tk_houdini_alembicnode")
output_path = tk_houdini_alembic.TkAlembicNodeHandler.\
get_output_path(node)
self.log_debug("Retrieved output path: %s" % (output_path,))
return output_path
def get_work_file_template(self):
"""
Returns the configured work file template for the app.
"""
return self.get_template("work_file_template")
```
#### File: tk-houdini-mantranode/v0.3.0/app.py
```python
import sgtk
class TkMantraNodeApp(sgtk.platform.Application):
"""The Mantra Output Node."""
def init_app(self):
"""Initialize the app."""
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
self.handler = tk_houdini_mantra.TkMantraNodeHandler(self)
def convert_to_regular_mantra_nodes(self):
"""Convert Toolkit Mantra nodes to regular Mantra nodes.
Convert all Tooklit Mantra nodes found in the current script to
regular Mantra nodes. Additional Toolkit information will be stored in
user data named 'tk_*'
Example usage::
>>> import sgtk
>>> eng = sgtk.platform.current_engine()
>>> app = eng.apps["tk-houdini-mantranode"]
>>> app.convert_to_regular_mantra_nodes()
"""
self.log_debug(
"Converting Toolkit Mantra nodes to built-in Mantra nodes.")
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
tk_houdini_mantra.TkMantraNodeHandler.\
convert_to_regular_mantra_nodes(self)
def convert_back_to_tk_mantra_nodes(self):
"""Convert regular Mantra nodes back to Toolkit Mantra nodes.
Convert any regular Mantra nodes that were previously converted
from Toolkit Mantra nodes back into Toolkit Mantra nodes.
Example usage::
>>> import sgtk
>>> eng = sgtk.platform.current_engine()
>>> app = eng.apps["tk-houdini-mantranode"]
>>> app.convert_back_to_tk_mantra_nodes()
"""
self.log_debug(
"Converting built-in Mantra nodes back to Toolkit Mantra nodes.")
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
tk_houdini_mantra.TkMantraNodeHandler.\
convert_back_to_tk_mantra_nodes(self)
def get_nodes(self):
"""
Returns a list of hou.node objects for each tk mantra node.
Example usage::
>>> import sgtk
>>> eng = sgtk.platform.current_engine()
>>> app = eng.apps["tk-houdini-mantranode"]
>>> tk_mantra_nodes = app.get_nodes()
"""
self.log_debug("Retrieving tk-houdini-mantra nodes...")
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
nodes = tk_houdini_mantra.TkMantraNodeHandler.\
get_all_tk_mantra_nodes()
self.log_debug("Found %s tk-houdini-mantra nodes." % (len(nodes),))
return nodes
def get_output_path(self, node):
"""
Returns the evaluated output path for the supplied node.
Example usage::
>>> import sgtk
>>> eng = sgtk.platform.current_engine()
>>> app = eng.apps["tk-houdini-mantranode"]
>>> output_path = app.get_output_path(tk_mantra_node)
"""
self.log_debug("Retrieving output path for %s" % (node,))
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
output_path = tk_houdini_mantra.TkMantraNodeHandler.\
get_output_path(node)
self.log_debug("Retrieved output path: %s" % (output_path,))
return output_path
def get_work_file_template(self):
"""
Returns the configured work file template for the app.
"""
return self.get_template("work_file_template")
```
#### File: tk-multi-publish2/basic/collector.py
```python
import os
import hou
import sgtk
HookBaseClass = sgtk.get_hook_baseclass()
# A dict of dicts organized by category, type and output file parm
_HOUDINI_OUTPUTS = {
# rops
hou.ropNodeTypeCategory(): {
"alembic": "filename", # alembic cache
"comp": "copoutput", # composite
"ifd": "vm_picture", # mantra render node
"opengl": "picture", # opengl render
"wren": "wr_picture", # wren wireframe
},
}
class HoudiniSessionCollector(HookBaseClass):
"""
Collector that operates on the current houdini session. Should inherit from
the basic collector hook.
"""
@property
def settings(self):
"""
Dictionary defining the settings that this collector expects to receive
through the settings parameter in the process_current_session and
process_file methods.
A dictionary on the following form::
{
"Settings Name": {
"type": "settings_type",
"default": "default_value",
"description": "One line description of the setting"
}
The type string should be one of the data types that toolkit accepts as
part of its environment configuration.
"""
# grab any base class settings
collector_settings = super(HoudiniSessionCollector, self).settings or {}
# settings specific to this collector
houdini_session_settings = {
"Work Template": {
"type": "template",
"default": None,
"description": "Template path for artist work files. Should "
"correspond to a template defined in "
"templates.yml. If configured, is made available"
"to publish plugins via the collected item's "
"properties. ",
},
}
# update the base settings with these settings
collector_settings.update(houdini_session_settings)
return collector_settings
def process_current_session(self, settings, parent_item):
"""
Analyzes the current Houdini session and parents a subtree of items
under the parent_item passed in.
:param dict settings: Configured settings for this collector
:param parent_item: Root item instance
"""
# create an item representing the current houdini session
item = self.collect_current_houdini_session(settings, parent_item)
# remember if we collect any alembic/mantra nodes
self._alembic_nodes_collected = False
self._mantra_nodes_collected = False
# methods to collect tk alembic/mantra nodes if the app is installed
self.collect_tk_alembicnodes(item)
self.collect_tk_mantranodes(item)
# collect other, non-toolkit outputs to present for publishing
self.collect_node_outputs(item)
def collect_current_houdini_session(self, settings, parent_item):
"""
Creates an item that represents the current houdini session.
:param dict settings: Configured settings for this collector
:param parent_item: Parent Item instance
:returns: Item of type houdini.session
"""
publisher = self.parent
# get the path to the current file
path = hou.hipFile.path()
# determine the display name for the item
if path:
file_info = publisher.util.get_file_path_components(path)
display_name = file_info["filename"]
else:
display_name = "Current Houdini Session"
# create the session item for the publish hierarchy
session_item = parent_item.create_item(
"houdini.session",
"Houdini File",
display_name
)
# get the icon path to display for this item
icon_path = os.path.join(
self.disk_location,
os.pardir,
"icons",
"houdini.png"
)
session_item.set_icon_from_path(icon_path)
# if a work template is defined, add it to the item properties so that
# it can be used by attached publish plugins
work_template_setting = settings.get("Work Template")
if work_template_setting:
work_template = publisher.engine.get_template_by_name(
work_template_setting.value)
# store the template on the item for use by publish plugins. we
# can't evaluate the fields here because there's no guarantee the
# current session path won't change once the item has been created.
# the attached publish plugins will need to resolve the fields at
# execution time.
session_item.properties["work_template"] = work_template
self.logger.debug(
"Work template defined for Houdini collection.")
self.logger.info("Collected current Houdini session")
return session_item
def collect_node_outputs(self, parent_item):
"""
Creates items for known output nodes
:param parent_item: Parent Item instance
"""
for node_category in _HOUDINI_OUTPUTS:
for node_type in _HOUDINI_OUTPUTS[node_category]:
if node_type == "alembic" and self._alembic_nodes_collected:
self.logger.debug(
"Skipping regular alembic node collection since tk "
"alembic nodes were collected. "
)
continue
if node_type == "ifd" and self._mantra_nodes_collected:
self.logger.debug(
"Skipping regular mantra node collection since tk "
"mantra nodes were collected. "
)
continue
path_parm_name = _HOUDINI_OUTPUTS[node_category][node_type]
# get all the nodes for the category and type
nodes = hou.nodeType(node_category, node_type).instances()
# iterate over each node
for node in nodes:
# get the evaluated path parm value
path = node.parm(path_parm_name).eval()
# ensure the output path exists
if not os.path.exists(path):
continue
self.logger.info(
"Processing %s node: %s" % (node_type, node.path()))
# allow the base class to collect and create the item. it
# should know how to handle the output path
item = super(HoudiniSessionCollector, self)._collect_file(
parent_item,
path,
frame_sequence=True
)
# the item has been created. update the display name to
# include the node path to make it clear to the user how it
# was collected within the current session.
item.name = "%s (%s)" % (item.name, node.path())
def collect_tk_alembicnodes(self, parent_item):
"""
Checks for an installed `tk-houdini-alembicnode` app. If installed, will
search for instances of the node in the current session and create an
item for each one with an output on disk.
:param parent_item: The item to parent new items to.
"""
publisher = self.parent
engine = publisher.engine
alembicnode_app = engine.apps.get("tk-houdini-alembicnode")
if not alembicnode_app:
self.logger.debug(
"The tk-houdini-alembicnode app is not installed. "
"Will not attempt to collect those nodes."
)
return
try:
tk_alembic_nodes = alembicnode_app.get_nodes()
except AttributeError, e:
self.logger.warning(
"Unable to query the session for tk-houdini-alembicnode "
"instances. It looks like perhaps an older version of the "
"app is in use which does not support querying the nodes. "
"Consider updating the app to allow publishing their outputs."
)
return
# retrieve the work file template defined by the app. we'll set this
# on the collected alembicnode items for use during publishing.
work_template = alembicnode_app.get_work_file_template()
for node in tk_alembic_nodes:
out_path = alembicnode_app.get_output_path(node)
if not os.path.exists(out_path):
continue
self.logger.info(
"Processing sgtk_alembic node: %s" % (node.path(),))
# allow the base class to collect and create the item. it
# should know how to handle the output path
item = super(HoudiniSessionCollector, self)._collect_file(
parent_item, out_path)
# the item has been created. update the display name to
# include the node path to make it clear to the user how it
# was collected within the current session.
item.name = "%s (%s)" % (item.name, node.path())
if work_template:
item.properties["work_template"] = work_template
self._alembic_nodes_collected = True
def collect_tk_mantranodes(self, parent_item):
"""
Checks for an installed `tk-houdini-mantranode` app. If installed, will
search for instances of the node in the current session and create an
item for each one with an output on disk.
:param parent_item: The item to parent new items to.
"""
publisher = self.parent
engine = publisher.engine
mantranode_app = engine.apps.get("tk-houdini-mantranode")
if not mantranode_app:
self.logger.debug(
"The tk-houdini-mantranode app is not installed. "
"Will not attempt to collect those nodes."
)
return
try:
tk_mantra_nodes = mantranode_app.get_nodes()
except AttributeError, e:
self.logger.warning(
"Unable to query the session for tk-houdini-mantranode "
"instances. It looks like perhaps an older version of the "
"app is in use which does not support querying the nodes. "
"Consider updating the app to allow publishing their outputs."
)
return
# retrieve the work file template defined by the app. we'll set this
# on the collected alembicnode items for use during publishing.
work_template = mantranode_app.get_work_file_template()
for node in tk_mantra_nodes:
out_path = mantranode_app.get_output_path(node)
if not os.path.exists(out_path):
continue
self.logger.info(
"Processing sgtk_mantra node: %s" % (node.path(),))
# allow the base class to collect and create the item. it
# should know how to handle the output path
item = super(HoudiniSessionCollector, self)._collect_file(
parent_item,
out_path,
frame_sequence=True
)
# the item has been created. update the display name to
# include the node path to make it clear to the user how it
# was collected within the current session.
item.name = "%s (%s)" % (item.name, node.path())
if work_template:
item.properties["work_template"] = work_template
self._mantra_nodes_collected = True
```
#### File: python/tk_houdini_basic/plugin_bootstrap.py
```python
import os
import sys
def bootstrap(plugin_root_path):
"""
Entry point for toolkit bootstrap in houdini.
Called by the basic/startup/pythonX.Xlibs/pythonrc.py file.
:param str plugin_root_path: Path to the root folder of the plugin
"""
# --- Import Core ---
#
# - If we are running the plugin built as a stand-alone unit,
# try to retrieve the path to sgtk core and add that to the pythonpath.
# When the plugin has been built, there is a sgtk_plugin_basic_houdini
# module which we can use to retrieve the location of core and add it
# to the pythonpath.
# - If we are running toolkit as part of a larger zero config workflow
# and not from a standalone workflow, we are running the plugin code
# directly from the engine folder without a bundle cache and with this
# configuration, core already exists in the pythonpath.
# now see if we are running stand alone or in situ
try:
from sgtk_plugin_basic_houdini import manifest
running_stand_alone = True
except ImportError:
manifest = None
running_stand_alone = False
if running_stand_alone:
# running stand alone. import core from the manifest's core path and
# extract the plugin info from the manifest
# Retrieve the Shotgun toolkit core included with the plug-in and
# prepend its python package path to the python module search path.
# this will allow us to import sgtk
tk_core_python_path = manifest.get_sgtk_pythonpath(plugin_root_path)
sys.path.insert(0, tk_core_python_path)
# plugin info from the manifest
plugin_id = manifest.plugin_id
base_config = manifest.base_configuration
# get the path to the built plugin's bundle cache
bundle_cache = os.path.join(plugin_root_path, "bundle_cache")
else:
# running in situ as part of zero config. sgtk has already added sgtk
# to the python path. need to extract the plugin info from info.yml
# import the yaml parser
from tank_vendor import yaml
# build the path to the info.yml file
plugin_info_yml = os.path.join(plugin_root_path, "info.yml")
# open the yaml file and read the data
with open(plugin_info_yml, "r") as plugin_info_fh:
plugin_info = yaml.load(plugin_info_fh)
base_config = plugin_info["base_configuration"]
plugin_id = plugin_info["plugin_id"]
# no bundle cache in in situ mode
bundle_cache = None
# ---- now we have everything needed to bootstrap. finish initializing the
# manager and logger, authenticate, then bootstrap the engine.
import sgtk
# start logging to log file
sgtk.LogManager().initialize_base_file_handler("tk-houdini")
# get a logger for the plugin
sgtk_logger = sgtk.LogManager.get_logger("plugin")
sgtk_logger.debug("Booting up toolkit plugin.")
try:
# When the user is not yet authenticated, pop up the Shotgun login
# dialog to get the user's credentials, otherwise, get the cached user's
# credentials.
user = sgtk.authentication.ShotgunAuthenticator().get_user()
except sgtk.authentication.AuthenticationCancelled:
# TODO: show a "Shotgun > Login" menu in houdini
sgtk_logger.info("Shotgun login was cancelled by the user.")
return
# Create a boostrap manager for the logged in user with the plug-in
# configuration data.
toolkit_mgr = sgtk.bootstrap.ToolkitManager(user)
toolkit_mgr.base_configuration = base_config
toolkit_mgr.plugin_id = plugin_id
# include the bundle cache as a fallback if supplied
if bundle_cache:
toolkit_mgr.bundle_cache_fallback_paths = [bundle_cache]
# Retrieve the Shotgun entity type and id when they exist in the
# environment. These are passed down through the app launcher when running
# in zero config
entity = toolkit_mgr.get_entity_from_environment()
sgtk_logger.debug("Will launch the engine with entity: %s" % entity)
# set up a simple progress reporter
toolkit_mgr.progress_callback = bootstrap_progress_callback
# start engine
sgtk_logger.info("Bootstrapping the Shotgun engine for Houdini...")
toolkit_mgr.bootstrap_engine("tk-houdini", entity)
sgtk_logger.debug("Bootstrap complete.")
def bootstrap_progress_callback(progress_value, message):
"""
Called whenever toolkit reports progress.
:param float progress_value: The current progress value. Values will be
reported in incremental order and always in the range 0.0 to 1.0
:param str message: Progress message string
"""
print "Bootstrap progress %s%%: %s" % (int(progress_value * 100), message)
```
#### File: python/tk_houdini/menu_action.py
```python
import sys
import hou
import tank.platform.engine
def error(msg):
if hou.isUIAvailable():
hou.ui.displayMessage(msg)
else:
print msg
cmd_id = sys.argv[1]
engine = tank.platform.engine.current_engine()
if engine is None or not hasattr(engine, 'launch_command'):
error("Shotgun: Houdini engine is not loaded")
else:
engine.launch_command(cmd_id)
```
#### File: tk-houdini/v1.3.0/startup.py
```python
import os
import sys
import sgtk
from sgtk.platform import SoftwareLauncher, SoftwareVersion, LaunchInformation
class HoudiniLauncher(SoftwareLauncher):
"""
Handles launching Houdini executables. Automatically starts up a tk-houdini
engine with the current context in the new session of Houdini.
"""
# A lookup to map an executable name to a product. This is critical for
# windows and linux where the product does not show up in the path.
EXECUTABLE_TO_PRODUCT = {
"houdini": "Houdini",
"hescape": "Houdini",
"happrentice": "Houdini Apprentice",
"houdinicore": "Houdini Core",
"houdinifx": "Houdini FX",
"hindie": "Houdini Indie",
}
# Named regex strings to insert into the executable template paths when
# matching against supplied versions and products. Similar to the glob
# strings, these allow us to alter the regex matching for any of the
# variable components of the path in one place
COMPONENT_REGEX_LOOKUP = {
"version": "[\d.]+",
"product": "[\w\s]+",
"executable": "[\w]+",
"version_back": "[\d.]+",
}
# This dictionary defines a list of executable template strings for each
# of the supported operating systems. The templates are used for both
# globbing and regex matches by replacing the named format placeholders
# with an appropriate glob or regex string. As Side FX adds modifies the
# install path on a given OS for a new release, a new template will need
# to be added here.
EXECUTABLE_TEMPLATES = {
"darwin": [
# /Applications/Houdini 15.5.565/Houdini.app
"/Applications/Houdini {version}/{product}.app",
# /Applications/Houdini/Houdini16.0.504.20/Houdini Core 16.0.504.20.app
"/Applications/Houdini/Houdini{version}/{product} {version_back}.app",
],
"win32": [
# C:\Program Files\Side Effects Software\Houdini 15.5.565\bin\houdinifx.exe
"C:/Program Files/Side Effects Software/Houdini {version}/bin/{executable}.exe",
],
"linux2": [
# example path: /opt/hfs14.0.444/bin/houdinifx
"/opt/hfs{version}/bin/{executable}",
]
}
@property
def minimum_supported_version(self):
"""The minimum supported Houdini version."""
return "14.0"
def prepare_launch(self, exec_path, args, file_to_open=None):
"""
Prepares the given software for launch
:param str exec_path: Path to DCC executable to launch
:param str args: Command line arguments as strings
:param str file_to_open: (optional) Full path name of a file to open on
launch
:returns: :class:`LaunchInformation` instance
"""
# construct the path to the engine's python directory and add it to sys
# path. this provides us access to the bootstrap module which contains
# helper methods for constructing the proper environment based on the
# bootstrap scanario.
tk_houdini_python_path = os.path.join(
self.disk_location,
"python",
)
sys.path.insert(0, tk_houdini_python_path)
from tk_houdini import bootstrap
# Check the engine settings to see whether any plugins have been
# specified to load.
launch_plugins = self.get_setting("launch_builtin_plugins")
if launch_plugins:
# Prepare the launch environment with variables required by the
# plugin bootstrap.
self.logger.debug("Launch plugins: %s" % (launch_plugins,))
required_env = bootstrap.get_plugin_startup_env(launch_plugins)
# Add context and site info
required_env.update(self.get_standard_plugin_environment())
else:
# pull the env var names from the bootstrap module
engine_env = bootstrap.g_sgtk_engine_env
context_env = bootstrap.g_sgtk_context_env
# Prepare the launch environment with variables required by the
# classic bootstrap.
required_env = bootstrap.get_classic_startup_env()
required_env[engine_env] = self.engine_name
required_env[context_env] = sgtk.context.serialize(self.context)
# populate the file to open env. Note this env variable name existed
# pre software launch setup.
if file_to_open:
file_to_open_env = bootstrap.g_sgtk_file_to_open_env
required_env[file_to_open_env] = file_to_open
self.logger.debug("Launch environment: %s" % (required_env,))
return LaunchInformation(exec_path, args, required_env)
def scan_software(self):
"""
Scan the filesystem for houdini executables.
:return: A list of :class:`SoftwareVersion` objects.
"""
self.logger.debug("Scanning for Houdini executables...")
supported_sw_versions = []
for sw_version in self._find_software():
(supported, reason) = self._is_supported(sw_version)
if supported:
supported_sw_versions.append(sw_version)
else:
self.logger.debug(
"SoftwareVersion %s is not supported: %s" %
(sw_version, reason)
)
return supported_sw_versions
def _find_software(self):
# use the bundled engine icon
icon_path = os.path.join(
self.disk_location,
"icon_256.png"
)
self.logger.debug("Using icon path: %s" % (icon_path,))
# all the executable templates for the current OS
executable_templates = self.EXECUTABLE_TEMPLATES.get(sys.platform, [])
# all the discovered executables
sw_versions = []
for executable_template in executable_templates:
self.logger.debug("Processing template %s.", executable_template)
executable_matches = self._glob_and_match(
executable_template,
self.COMPONENT_REGEX_LOOKUP
)
# Extract all products from that executable.
for (executable_path, key_dict) in executable_matches:
# extract the matched keys form the key_dict (default to None if
# not included)
executable_version = key_dict.get("version")
executable_product = key_dict.get("product")
executable_name = key_dict.get("executable")
# we need a product to match against. If that isn't provided,
# then an executable name should be available. We can map that
# to the proper product.
if not executable_product:
executable_product = \
self.EXECUTABLE_TO_PRODUCT.get(executable_name)
# only include the products that are covered in the EXECUTABLE_TO_PRODUCT dict
if executable_product is None or executable_product not in self.EXECUTABLE_TO_PRODUCT.values():
self.logger.debug(
"Product '%s' is unrecognized. Skipping." %
(executable_product,)
)
continue
sw_versions.append(
SoftwareVersion(
executable_version,
executable_product,
executable_path,
icon_path
)
)
return sw_versions
```
#### File: v1.2.0/hooks/get_project_creation_args.py
```python
import sgtk
from sgtk import TankError
import mari
HookBaseClass = sgtk.get_hook_baseclass()
class GetArgsHook(HookBaseClass):
def get_project_creation_args(self, sg_publish_data):
"""
Get the arguments to use when creating a new project from a selection
of Published geometry files.
Further details about these arguments can be found in the Mari api
documentation (Help->SDK->Python->Documentation from the Mari menu)
:param sg_publish_data: A list of the Shotgun publish records that will
be loaded to initialize the new project.
:returns: A dictionary of creation args that should contain
any of the following entries:
'channels_to_create'
- Details of any channels that should be created
in the new project.
'channels_to_import'
- Details of any channels to be imported into the
new project
'project_meta_options'
- Options to use when importing geometry from the
published files
'objects_to_load'
- Specific objects to be loaded from the published
files.
"""
creation_args = {}
# lets use default channels:
creation_args["channels_to_create"] = []
creation_args["channels_to_import"] = []
# define the options to be used for the geometry import:
#
project_meta_options = {}
# prefer uvV (UDIM) over ptex
project_meta_options["MappingScheme"] = mari.projects.UV_OR_PTEX
# create selection sets from face groups based on shader assignments
project_meta_options["CreateSelectionSets"] = mari.geo.SELECTION_GROUPS_CREATE_FROM_FACE_GROUPS
# merge nodes within file but not all geometry into a single mesh
project_meta_options["MergeType"] = mari.geo.MERGETYPE_JUST_MERGE_NODES
creation_args["project_meta_options"] = project_meta_options
# specific objects to load from within geometry files - default (None)
# will load everything
creation_args["objects_to_load"] = None
return creation_args
```
#### File: python/tk_mari_projectmanager/project_manager.py
```python
from .async_worker import AsyncWorker
import sgtk
from sgtk import TankError
from sgtk.platform.qt import QtGui
import mari
from new_project_form import NewProjectForm
class ProjectManager(object):
"""
Handle all Mari project management
"""
def __init__(self, app):
"""
Construction
:param app: The Application instance that created this instance
"""
self._app = app
self.__new_project_publishes = []
self.__project_name_template = self._app.get_template("template_new_project_name")
def create_new_project(self, name_part, sg_publish_data):
"""
Create a new project in the current Toolkit context and seed it
with the specified geometry
:param name: The name to use in the project_name template when
generating the project name
:param sg_publish_data: List of the initial geometry publishes to load for
into the new project. Each entry in the list is a
Shotgun entity dictionary
:returns: The new Mari project instance if successful or None
if not
:raises: TankError if something went wrong at any stage!
"""
# create the project name:
name_result = self._generate_new_project_name(name_part)
project_name = name_result.get("project_name")
if not project_name:
raise TankError("Failed to determine the project name: %s" % name_result.get("message"))
# use a hook to retrieve the project creation settings to use:
hook_res = {}
try:
hook_res = self._app.execute_hook_method("get_project_creation_args_hook",
"get_project_creation_args",
sg_publish_data = sg_publish_data)
if hook_res == None:
hook_res = {}
elif not isinstance(hook_res, dict):
raise TankError("get_project_creation_args_hook returned unexpected type!")
except TankError, e:
raise TankError("Failed to get project creation args from hook: %s" % e)
except Exception, e:
self._app.log_exception("Failed to get project creation args from hook!")
raise TankError("Failed to get project creation args from hook: %s" % e)
# extract the options from the hook result:
channels_to_create = hook_res.get("channels_to_create", [])
channels_to_import = hook_res.get("channels_to_import", [])
project_meta_options = hook_res.get("project_meta_options", {})
objects_to_load = hook_res.get("objects_to_load", [])
# and create the project using the tk-mari engine helper method:
new_project = self._app.engine.create_project(project_name,
sg_publish_data,
channels_to_create = channels_to_create,
channels_to_import = channels_to_import,
project_meta_options = project_meta_options,
objects_to_load = objects_to_load)
try:
hook_res = self._app.execute_hook_method("post_project_creation_hook",
"post_project_creation",
sg_publish_data = sg_publish_data)
if hook_res == None:
hook_res = {}
elif not isinstance(hook_res, dict):
raise TankError("post_project_creation_hook returned unexpected type!")
except TankError, e:
raise TankError("Failed to post project creation from hook: %s" % e)
except Exception, e:
self._app.log_exception("Failed to post project creation from hook!")
raise TankError("Failed to post project creation from hook: %s" % e)
return new_project
def show_new_project_dialog(self):
"""
Show the new project dialog
"""
self.__new_project_publishes = []
default_name = self._app.get_setting("default_project_name")
# create a background worker that will be responsible for updating
# the project name preview as the user enters a name.
worker_cb = lambda name: self._generate_new_project_name(name)
preview_updater = AsyncWorker(worker_cb)
try:
preview_updater.start()
# show modal dialog:
res, new_project_form = self._app.engine.show_modal("New Project", self._app, NewProjectForm,
self._app, self._init_new_project_form,
preview_updater, default_name)
finally:
# wait for the background thread to finish!
preview_updater.stop()
def _generate_new_project_name(self, name):
"""
Generate the new project name using the current context, the provided name and the
project name template defined for the app.
:param name: The name the user entered
:returns: Dictionary containing "message" and/or "project_name". If the project
name can't be determined then the message should be populated with
the reason why
"""
if not name:
return {"message":"Please enter a name!"}
if not self.__project_name_template.keys["name"].validate(name):
return {"message":"Your name contains illegal characters!"}
project_name = None
try:
# get fields from the current context"
fields = self._app.context.as_template_fields(self.__project_name_template)
# add in the name:
fields["name"] = name
# try to create the project name:
project_name = self.__project_name_template.apply_fields(fields)
except TankError, e:
return {"message":"Failed to create project name!"}
if project_name in mari.projects.names():
return {"message":"A project with this name already exists!"}
return {"project_name":project_name}
def _init_new_project_form(self, new_project_form):
"""
Initialise the new project form after it's been created
:param new_project_form: The new project form to initialise
"""
# connect to signals:
new_project_form.create_project.connect(self._on_create_new_project)
new_project_form.browse_publishes.connect(self._on_browse_for_publishes)
new_project_form.remove_publishes.connect(self._on_remove_publishes)
def _on_remove_publishes(self, new_project_form, publish_ids):
"""
Called when user interaction has requested that publishes be removed
from the publish list:
:param new_project_form: The new project form to initialise
:param publish_ids: List of publish ids to remove
"""
# remove publishes from the list:
publishes = []
for publish in self.__new_project_publishes:
if publish["id"] in publish_ids:
continue
publishes.append(publish)
self.__new_project_publishes = publishes
# update the list to reflect changes:
new_project_form.update_publishes(self.__new_project_publishes)
def _on_browse_for_publishes(self, new_project_form):
"""
Called when the user clicks the 'Add Publishes' button in the new
project form. Opens the loader so that the user can select a publish
to be loaded into the new project.
:param new_project_form: The new project form that the button was
clicked in
"""
loader_app = self._app.engine.apps.get("tk-multi-loader2")
if not loader_app:
raise TankError("The tk-multi-loader2 app needs to be available to browse for publishes!")
# browse for publishes:
publish_types = self._app.get_setting("publish_types")
selected_publishes = loader_app.open_publish("Select Published Geometry", "Select", publish_types)
# make sure we keep this list of publishes unique:
current_ids = set([p["id"] for p in self.__new_project_publishes])
for sg_publish in selected_publishes:
publish_id = sg_publish.get("id")
if publish_id != None and publish_id not in current_ids:
current_ids.add(publish_id)
self.__new_project_publishes.append(sg_publish)
# update new project form with selected geometry:
new_project_form.update_publishes(self.__new_project_publishes)
def _on_create_new_project(self, new_project_form):
"""
Called when the user clicks the 'Create Project' button in the new project
form. This will create the project and close the form if successful, otherwise
it will display a message box with the reason the project wasn't created.
:param new_project_form: The new project form that the button was clicked
in
"""
try:
name = new_project_form.project_name
if self.create_new_project(name, self.__new_project_publishes):
new_project_form.close()
except TankError, e:
QtGui.QMessageBox.information(new_project_form, "Failed to create new project!", "%s" % e)
except Exception, e:
QtGui.QMessageBox.information(new_project_form, "Failed to create new project!", "%s" % e)
self._app.log_exception("Failed to create new project!")
```
#### File: python/tk_mari_projectmanager/publish_list_view.py
```python
import os
from datetime import datetime, timedelta
import sgtk
from sgtk.platform.qt import QtCore, QtGui
browser_widget = sgtk.platform.import_framework("tk-framework-widget", "browser_widget")
class PublishListView(browser_widget.BrowserWidget):
"""
UI for displaying a list of snapshot items
"""
# signal emitted when the user is requesting that publishes be removed from the list
remove_publishes = QtCore.Signal(list)
def __init__(self, parent=None):
"""
Construction
:param parent: The parent QWidget
"""
browser_widget.BrowserWidget.__init__(self, parent)
# tweak style
self.title_style = "none"
self.enable_search(False)
self.enable_multi_select(True)
self.set_label("")
self.ui.browser_header.setVisible(False)
# cache of publish images that we've looked up from Shotgun. Used to
# avoid unnecessary lookups.
self.__publish_images = {}
# add right-click menu to remove items:
self.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
remove_action = QtGui.QAction("Remove Selected Publishes", self)
self.addAction(remove_action)
remove_action.triggered[()].connect(self._on_remove_selected_publishes)
def keyPressEvent(self, event):
"""
Executed when a key is pressed by the user whilst the list view has focus
:param event: The key press event details
"""
if event.key() in [QtCore.Qt.Key_Delete, QtCore.Qt.Key_Backspace]:
self._on_remove_selected_publishes()
else:
# call the base class implementation:
browser_widget.BrowserWidget.keyPressEvent(self, event)
def _on_remove_selected_publishes(self):
"""
Called when something has requested that selected publishes be removed
from the list.
"""
publish_ids = []
for list_item in self.get_selected_items():
publish_ids.append(list_item.publish_id)
if publish_ids:
self.remove_publishes.emit(publish_ids)
def get_data(self, data):
"""
Threaded - retrieve the data that will be used to populate the
list view
:param data: Information that can be used to retrieve the data
"""
if data:
# re-retrieve the thumbnails for the publishes if as the one linked
# from the publish data might have expired!
publishes = dict([(p["id"], p) for p in data])
# build a list of publish ids that we need to fetch details for:
ids_to_fetch = []
for id, publish in publishes.iteritems():
if id in self.__publish_images:
if self.__publish_images[id]:
publish["image"] = self.__publish_images[id]
else:
ids_to_fetch.append(id)
if ids_to_fetch:
# query Shotgun for the publish details:
pf_type = data[0]["type"]
filters = [["id", "in", ids_to_fetch]]
fields = ["image"]
try:
sg_res = self._app.shotgun.find(pf_type, filters, fields)
# update publishes:
for id, image in [(r["id"], r.get("image")) for r in sg_res]:
self.__publish_images[id] = image
if not image:
continue
publishes[id]["image"] = image
except:
pass
return data
def process_result(self, result):
"""
Process worker result on main thread - can create list items here.
:param result: The result passed through from the get_data method
"""
for sg_publish in result:
list_item = self.add_item(browser_widget.ListItem)
list_item.publish_id = sg_publish["id"]
thumbnail_path = sg_publish.get("image")
name = sg_publish.get("name")
version = sg_publish.get("version_number")
entity_type = sg_publish.get("entity", {}).get("type")
entity_name = sg_publish.get("entity", {}).get("name")
task_name = sg_publish.get("task.Task.content")
if thumbnail_path:
list_item.set_thumbnail(thumbnail_path)
line_1 = "<b>%s v%03d</b>" % (name, version)
line_2 = "%s %s, %s" % (entity_type, entity_name, task_name)
list_item.set_details("<br>".join([line_1, line_2]))
```
#### File: python/tk_mari/geometry.py
```python
import sgtk
from sgtk import TankError
import os
import mari
from .metadata import MetadataManager
from .utils import update_publish_records, get_publish_type_field
class GeometryManager(object):
"""
Provides various utility methods that deal with Mari geometry
"""
def __init__(self):
"""
Construction
"""
self.__md_mgr = MetadataManager()
def find_geometry_for_publish(self, sg_publish):
"""
Find the geometry and version instances for the specified publish if it exists in
the current project
:param sg_publish: The Shotgun publish to find geo for. This should be a Shotgun
entity dictionary containing at least the entity "type" and "id".
:returns: Tuple containing the geo and version that match the publish
if found.
"""
engine = sgtk.platform.current_bundle()
# ensure that sg_publish contains the information we need:
publish_type_field = get_publish_type_field()
update_publish_records([sg_publish], ["project", "entity", "task", "name", publish_type_field])
publish_entity = sg_publish["entity"]
publish_task = sg_publish["task"]
# enumerate through all geometry in project that has Shotgun metadata:
found_geo = None
sg_publish_version_ids = None
for geo, entity, task in [(g.get("geo"), g.get("entity"), g.get("task")) for g in self.list_geometry()]:
if not geo:
continue
# we can rule out the entire geo if it doesn't match the entity and/or task:
if publish_entity and entity:
if entity["type"] != publish_entity["type"] or entity["id"] != publish_entity["id"]:
continue
if publish_task and task:
if task["id"] != publish_task["id"]:
continue
# enumerate through all versions of this geo that have Shotgun metadata:
matches_geo = False
for version_item in self.list_geometry_versions(geo):
geo_version = version_item.get("geo_version")
if not geo_version:
continue
geo_version_publish_id = version_item.get("publish_id")
if geo_version_publish_id == None:
# can't do much without a publish id!
continue
if geo_version_publish_id == sg_publish["id"]:
# perfect match!
return (geo, geo_version)
elif not matches_geo:
# didn't find an exact match so lets see if this is
# is a different version of the publish
if sg_publish_version_ids == None:
# get a list of publish versions from Shotgun that match sg_publish:
filters = [["project", "is", sg_publish["project"]]]
if publish_entity:
filters.append(["entity", "is", publish_entity])
if publish_task:
filters.append(["task", "is", publish_task])
if sg_publish["name"]:
filters.append(["name", "is", sg_publish["name"]])
if sg_publish[publish_type_field]:
filters.append([publish_type_field, "is", sg_publish[publish_type_field]])
sg_res = []
try:
sg_res = engine.shotgun.find(sg_publish["type"], filters, [])
except Exception, e:
raise TankError("Failed to query publish versions for publish '%s': %s"
% (sg_publish["name"], e))
sg_publish_version_ids = set([res["id"] for res in sg_res])
if geo_version_publish_id in sg_publish_version_ids:
# found a matching publish that just differs by version
matches_geo = True
if matches_geo:
# didn't find an exact match for the version but did find the geo
# so return that instead:
return (geo, None)
# didn't find a match :(
return (None, None)
def list_geometry(self):
"""
Find all Shotgun aware geometry in the scene. Any non-Shotgun aware geometry is ignored!
:returns: A list of dictionaries containing the geo together with any Shotgun metadata
that was found on it
"""
all_geo = []
for geo in mari.geo.list():
metadata = self.__md_mgr.get_geo_metadata(geo)
if not metadata:
continue
metadata["geo"] = geo
all_geo.append(metadata)
return all_geo
def list_geometry_versions(self, geo):
"""
Find all Shotgun aware versions for the specified geometry. Any non-Shotgun aware versions are
ignored!
:param geo: The Mari GeoEntity to find all versions for
:returns: A list of dictionaries containing the geo_version together with any Shotgun metadata
that was found on it
"""
all_geo_versions = []
for geo_version in geo.versionList():
metadata = self.__md_mgr.get_geo_version_metadata(geo_version)
if not metadata:
continue
metadata["geo_version"] = geo_version
all_geo_versions.append(metadata)
return all_geo_versions
def load_geometry(self, sg_publish, options, objects_to_load):
"""
Wraps the Mari GeoManager.load() method and additionally tags newly loaded geometry with Shotgun
specific metadata. See Mari API documentation for more information on GeoManager.load().
:param sg_publish: The shotgun publish to load. This should be a Shotgun entity
dictionary containing at least the entity "type" and "id".
:param options: [Mari arg] - Options to be passed to the file loader when loading the geometry
:param objects_to_load: [Mari arg] - A list of objects to load from the file
:returns: A list of the loaded GeoEntity instances that were created
"""
# ensure that sg_publish contains the information we need:
update_publish_records([sg_publish])
# extract the file path for the publish
publish_path = self.__get_publish_path(sg_publish)
if not publish_path or not os.path.exists(publish_path):
raise TankError("Publish '%s' couldn't be found on disk!" % publish_path)
# load everything:
new_geo = []
try:
# (AD) Note - passing options as a named parameter (e.g. options=options) seems to
# stop any channels specified in the options list from being created so just pass
# as indexed parameters instead!
new_geo = mari.geo.load(publish_path,
options,
objects_to_load)
except Exception, e:
raise TankError("Failed to load published geometry from '%s': %s" % (publish_path, e))
# and initialize all new geo:
for geo in new_geo:
self.initialise_new_geometry(geo, publish_path, sg_publish)
return new_geo
def add_geometry_version(self, geo, sg_publish, options):
"""
Wraps the Mari GeoEntity.addVersion() method and additionally tags newly loaded geometry versions
with Shotgun specific metadata. See Mari API documentation for more information on
GeoEntity.addVersion().
:param geo: The Mari GeoEntity to add a version to
:param sg_publish: The publish to load as a new version. This should be a Shotgun entity
dictionary containing at least the entity "type" and "id".
:param options: [Mari arg] - Options to be passed to the file loader when loading the geometry. The
options will default to the options that were used to load the current version if
not specified.
:returns: The new GeoEntityVersion instance
"""
# ensure that sg_publish contains the information we need:
update_publish_records([sg_publish], min_fields = ["id", "path", "version_number"])
# extract the file path for the publish
publish_path = self.__get_publish_path(sg_publish)
if not publish_path or not os.path.exists(publish_path):
raise TankError("Publish '%s' couldn't be found on disk!" % publish_path)
# determine the name of the new version:
version = sg_publish.get("version_number")
version_name = "v%03d" % (version or 0)
if version_name in geo.versionNames():
raise TankError("Unable to add version as '%s' already exists for '%s'!"
% (version_name, geo.name()))
# add the version
try:
geo.addVersion(publish_path,
version_name,
options)
except Exception, e:
raise TankError("Failed to load published geometry version from '%s': %s" % (publish_path, e))
# Make sure the version was successfully added:
if version_name not in geo.versionNames():
raise TankError("Failed to add geometry version '%s' to '%s'!"
% (version_name, geo.name()))
geo_version = geo.version(version_name)
# initialise the version:
self.initialise_new_geometry_version(geo_version, publish_path, sg_publish)
return geo_version
def initialise_new_geometry(self, geo, publish_path, sg_publish):
"""
Initialise a new geometry. This sets the name and updates the Shotgun metadata.
:param geo: The geometry to initialise
:param publish_path: The path of the publish this geometry was loaded from
:param sg_publish: The Shotgun publish record for this geometry. This should be a Shotgun
entity dictionary containing at least the entity "type" and "id".
"""
# determine the name to use:
publish_name = sg_publish.get("name")
scene_name = os.path.basename(publish_path).split(".")[0]
geo_name = publish_name or scene_name
# look at the current name and see if it's merged or an individual entity:
current_name = geo.name()
if not current_name.startswith(publish_name) and not current_name.startswith(scene_name):
# geo name should include the existing name as it's not merged!
geo_name = "%s_%s" % (geo_name, current_name)
if geo_name != current_name:
# make sure the name is unique:
test_name = geo_name
test_index = 1
existing_names = mari.geo.names()
while True:
if test_name not in existing_names:
# have unique name!
break
test_name = "%s_%d" % (geo_name, test_index)
test_index += 1
geo_name = test_name
# set the geo name:
if geo_name != current_name:
geo.setName(geo_name)
# set the geo metadata:
sg_project = sg_publish.get("project")
sg_entity = sg_publish.get("entity")
sg_task = sg_publish.get("task")
self.__md_mgr.set_geo_metadata(geo, sg_project, sg_entity, sg_task)
# there should be a single version for the geo:
geo_versions = geo.versionList()
if len(geo_versions) != 1:
raise TankError("Invalid number of versions found for geometry "
"- expected 1 but found %d!" % len(geo_versions))
# finally, initialize the geometry version:
self.initialise_new_geometry_version(geo_versions[0], publish_path, sg_publish)
def initialise_new_geometry_version(self, geo_version, publish_path, sg_publish):
"""
Initialise a new geometry version. This sets the name and updates the Shotgun metadata.
:param geo_version: The geometry version to initialise
:param publish_path: The path of the publish this geometry was loaded from
:param sg_publish: The Shotgun publish record for this geometry version. This should be a
Shotgun entity dictionary containing at least the entity "type" and "id".
"""
sg_publish_id = sg_publish.get("id")
sg_version = sg_publish.get("version_number")
# set geo_version name if needed:
geo_version_name = "v%03d" % (sg_version or 0)
if geo_version.name() != geo_version_name:
geo_version.setName(geo_version_name)
# and store metadata:
self.__md_mgr.set_geo_version_metadata(geo_version, publish_path, sg_publish_id, sg_version)
def __get_publish_path(self, sg_publish):
"""
Get the publish path from a Shotgun publish record.
# (TODO) - move this to use a centralized method in core
:param sg_publish: The publish to extract the path from.
:returns: The path if found or None
"""
return sg_publish.get("path", {}).get("local_path")
```
#### File: v0.9.1/startup/userSetup.py
```python
import os
import maya.OpenMaya as OpenMaya
import maya.cmds as cmds
def start_toolkit_classic():
"""
Parse enviornment variables for an engine name and
serialized Context to use to startup Toolkit and
the tk-maya engine and environment.
"""
import sgtk
logger = sgtk.LogManager.get_logger(__name__)
logger.debug("Launching toolkit in classic mode.")
# Get the name of the engine to start from the environement
env_engine = os.environ.get("SGTK_ENGINE")
if not env_engine:
OpenMaya.MGlobal.displayError(
"Shotgun: Missing required environment variable SGTK_ENGINE."
)
return
# Get the context load from the environment.
env_context = os.environ.get("SGTK_CONTEXT")
if not env_context:
OpenMaya.MGlobal.displayError(
"Shotgun: Missing required environment variable SGTK_CONTEXT."
)
return
try:
# Deserialize the environment context
context = sgtk.context.deserialize(env_context)
except Exception, e:
OpenMaya.MGlobal.displayError(
"Shotgun: Could not create context! Shotgun Pipeline Toolkit will "
"be disabled. Details: %s" % e
)
return
try:
# Start up the toolkit engine from the environment data
logger.debug("Launching engine instance '%s' for context %s" % (env_engine, env_context))
engine = sgtk.platform.start_engine(env_engine, context.sgtk, context)
except Exception, e:
OpenMaya.MGlobal.displayError(
"Shotgun: Could not start engine: %s" % e
)
return
def start_toolkit_with_plugins():
"""
Parse environment variables for a list of plugins to load that will
ultimately startup Toolkit and the tk-maya engine and environment.
"""
import sgtk
logger = sgtk.LogManager.get_logger(__name__)
logger.debug("Launching maya in plugin mode")
for plugin_path in os.environ["SGTK_LOAD_MAYA_PLUGINS"].split(os.pathsep):
# Find the appropriate "plugin" sub directory. Maya will not be
# able to find any plugins under the base directory without this.
if os.path.isdir(os.path.join(plugin_path, "plug-ins")):
load_path = os.path.join(plugin_path, "plug-ins")
elif os.path.isdir(os.path.join(plugin_path, "plugins")):
load_path = os.path.join(plugin_path, "plugins")
else:
load_path = plugin_path
# Load the plugins from the resolved path individually, as the
# loadPlugin Maya command has difficulties loading all (*) plugins
# from a path that contains a string in the form of 'v#.#.#':
# loadPlugin "/shotgun/site/project/install/app_store/tk-maya/v0.7.10/plugins/basic/plug-ins/*";
# // Error: line 1: Plug-in, "/shotgun/site/project/install/app_store/tk-maya/v0.7.10/plugins/basic/plug-ins/*", was not found on MAYA_PLUG_IN_PATH. //
# loadPlugin "/shotgun/site/project/install/app_store/tk-maya-no_version/plugins/basic/plug-ins/*";
# // Result: shotgun //
for plugin_filename in os.listdir(load_path):
if not plugin_filename.endswith(".py"):
# Skip files/directories that are not plugins
continue
# Construct the OS agnostic full path to the plugin
# and attempt to load the plugin. Note that the loadPlugin
# command always returns a list, even when loading a single plugin.
full_plugin_path = os.path.join(load_path, plugin_filename)
logger.debug("Loading plugin %s" % full_plugin_path)
loaded_plugins = cmds.loadPlugin(full_plugin_path)
# note: loadPlugin returns a list of the loaded plugins
if not loaded_plugins:
OpenMaya.MGlobal.displayWarning(
"Shotgun: Could not load plugin: %s" % full_plugin_path
)
continue
def start_toolkit():
"""
Import Toolkit and start up a tk-maya engine based on
environment variables.
"""
# Verify sgtk can be loaded.
try:
import sgtk
except Exception, e:
OpenMaya.MGlobal.displayError(
"Shotgun: Could not import sgtk! Disabling for now: %s" % e
)
return
# start up toolkit logging to file
sgtk.LogManager().initialize_base_file_handler("tk-maya")
if os.environ.get("SGTK_LOAD_MAYA_PLUGINS"):
# Plugins will take care of initalizing everything
start_toolkit_with_plugins()
else:
# Rely on the classic boostrapping method
start_toolkit_classic()
# Check if a file was specified to open and open it.
file_to_open = os.environ.get("SGTK_FILE_TO_OPEN")
if file_to_open:
OpenMaya.MGlobal.displayInfo(
"Shotgun: Opening '%s'..." % file_to_open
)
cmds.file(file_to_open, force=True, open=True)
# Clean up temp env variables.
del_vars = [
"SGTK_ENGINE", "SGTK_CONTEXT", "SGTK_FILE_TO_OPEN",
"SGTK_LOAD_MAYA_PLUGINS",
]
for var in del_vars:
if var in os.environ:
del os.environ[var]
# Fire up Toolkit and the environment engine when there's time.
cmds.evalDeferred("start_toolkit()")
```
#### File: tk-multi-publish2/basic/collector.py
```python
import os
import sgtk
from pyfbsdk import FBApplication
mb_app = FBApplication()
HookBaseClass = sgtk.get_hook_baseclass()
class MotionBuilderSessionCollector(HookBaseClass):
"""
Collector that operates on the motion builder session. Should inherit from the basic
collector hook.
"""
@property
def settings(self):
"""
Dictionary defining the settings that this collector expects to receive
through the settings parameter in the process_current_session and
process_file methods.
A dictionary on the following form::
{
"Settings Name": {
"type": "settings_type",
"default": "default_value",
"description": "One line description of the setting"
}
The type string should be one of the data types that toolkit accepts as
part of its environment configuration.
"""
# grab any base class settings
collector_settings = super(MotionBuilderSessionCollector, self).settings or {}
# settings specific to this collector
motionbuilder_session_settings = {
"Work Template": {
"type": "template",
"default": None,
"description": "Template path for artist work files. Should "
"correspond to a template defined in "
"templates.yml. If configured, is made available"
"to publish plugins via the collected item's "
"properties. ",
},
}
# update the base settings with these settings
collector_settings.update(motionbuilder_session_settings)
return collector_settings
def process_current_session(self, settings, parent_item):
"""
Analyzes the current session open in Motion Builder and parents a subtree of
items under the parent_item passed in.
:param parent_item: Root item instance
"""
# create an item representing the current motion builder session
item = self.collect_current_motion_builder_session(settings, parent_item)
def collect_current_motion_builder_session(self, settings, parent_item):
"""
Creates an item that represents the current motion builder session.
:param parent_item: Parent Item instance
:returns: Item of type motionbuilder.session
"""
publisher = self.parent
# get the path to the current file
path = mb_app.FBXFileName
# determine the display name for the item
if path:
file_info = publisher.util.get_file_path_components(path)
display_name = file_info["filename"]
else:
display_name = "Current Motion Builder Session"
# create the session item for the publish hierarchy
session_item = parent_item.create_item(
"motionbuilder.fbx",
"Motion Builder FBX",
display_name
)
# get the icon path to display for this item
icon_path = os.path.join(
self.disk_location,
os.pardir,
"icons",
"motionbuilder.png"
)
session_item.set_icon_from_path(icon_path)
# discover the project root which helps in discovery of other
# publishable items
project_root = path
session_item.properties["project_root"] = project_root
# if a work template is defined, add it to the item properties so
# that it can be used by attached publish plugins
work_template_setting = settings.get("Work Template")
if work_template_setting:
work_template = publisher.engine.get_template_by_name(
work_template_setting.value)
# store the template on the item for use by publish plugins. we
# can't evaluate the fields here because there's no guarantee the
# current session path won't change once the item has been created.
# the attached publish plugins will need to resolve the fields at
# execution time.
session_item.properties["work_template"] = work_template
self.logger.debug("Work template defined for Motion Builder collection.")
self.logger.info("Collected current Motion Builder scene")
return session_item
```
#### File: python/tk_multi_about/context_browser.py
```python
import tank
import os
import sys
import datetime
import threading
from tank.platform.qt import QtCore, QtGui
browser_widget = tank.platform.import_framework("tk-framework-widget", "browser_widget")
class ContextBrowserWidget(browser_widget.BrowserWidget):
def __init__(self, parent=None):
browser_widget.BrowserWidget.__init__(self, parent)
def get_data(self, data):
data = {}
ctx = self._app.context
if ctx.project:
# get project data
data["project"] = self._app.shotgun.find_one("Project",
[ ["id", "is", ctx.project["id"]] ],
["name", "sg_description", "image"])
if ctx.entity:
# get entity data
data["entity"] = self._app.shotgun.find_one(ctx.entity["type"],
[ ["id", "is", ctx.entity["id"]] ],
["code", "description", "image"])
if ctx.step:
# get step data
data["step"] = self._app.shotgun.find_one("Step",
[ ["id", "is", ctx.step["id"]] ],
["code", "description"])
if ctx.task:
# get task data
data["task"] = self._app.shotgun.find_one("Task",
[ ["id", "is", ctx.task["id"]] ],
["content", "image", "task_assignees", "sg_status_list"])
data["additional"] = []
for ae in ctx.additional_entities:
# additional entity data
d = self._app.shotgun.find_one(ae["type"],
[ ["id", "is", ae["id"]] ],
["code", "description", "image"])
data["additional"].append(d)
return data
def process_result(self, result):
if result.get("project"):
d = result["project"]
i = self.add_item(browser_widget.ListItem)
details = []
details.append("<b>Project %s</b>" % d.get("name"))
details.append( d.get("sg_description") if d.get("sg_description") else "No Description" )
i.set_details("<br>".join(details))
i.sg_data = d
i.setToolTip("Double click to see more details in Shotgun.")
if d.get("image"):
i.set_thumbnail(d.get("image"))
if result.get("entity"):
d = result["entity"]
i = self.add_item(browser_widget.ListItem)
details = []
nice_name = tank.util.get_entity_type_display_name(self._app.tank, d.get("type"))
details.append("<b>%s %s</b>" % (nice_name, d.get("code")))
details.append( d.get("description") if d.get("description") else "No Description" )
i.set_details("<br>".join(details))
i.sg_data = d
i.setToolTip("Double click to see more details in Shotgun.")
if d.get("image"):
i.set_thumbnail(d.get("image"))
for d in result["additional"]:
i = self.add_item(browser_widget.ListItem)
details = []
nice_name = tank.util.get_entity_type_display_name(self._app.tank, d.get("type"))
details.append("<b>%s %s</b>" % (nice_name, d.get("code")))
details.append( d.get("description") if d.get("description") else "No Description" )
i.set_details("<br>".join(details))
i.sg_data = d
i.setToolTip("Double click to see more details in Shotgun.")
if d.get("image"):
i.set_thumbnail(d.get("image"))
if result.get("step"):
d = result["step"]
i = self.add_item(browser_widget.ListItem)
details = []
details.append("<b>Pipeline Step %s</b>" % d.get("code", ""))
details.append( d.get("description") if d.get("description") else "No Description" )
i.set_details("<br>".join(details))
i.sg_data = d
i.setToolTip("Double click to see more details in Shotgun.")
i.set_thumbnail(":/res/pipeline_step.png")
if result.get("task"):
d = result["task"]
i = self.add_item(browser_widget.ListItem)
details = []
details.append("<b>Task %s</b>" % d.get("content"))
details.append("Status: %s" % d.get("sg_status_list"))
names = [ x.get("name") for x in d.get("task_assignees", []) ]
names_str = ", ".join(names)
details.append("Assigned to: %s" % names_str)
i.set_details("<br>".join(details))
i.sg_data = d
i.setToolTip("Double click to see more details in Shotgun.")
if d.get("image"):
i.set_thumbnail(d.get("image"))
```
#### File: python/tk_multi_about/dialog.py
```python
import tank
import unicodedata
import os
import sys
import threading
from tank.platform.qt import QtCore, QtGui
from tank.platform import restart
from .ui.dialog import Ui_Dialog
class AppDialog(QtGui.QWidget):
def __init__(self, app):
QtGui.QWidget.__init__(self)
# set up the UI
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self._app = app
# set up the browsers
self.ui.context_browser.set_app(self._app)
self.ui.context_browser.set_label("Your Current Work Context")
self.ui.context_browser.enable_search(False)
self.ui.context_browser.action_requested.connect( self.show_in_sg )
self.ui.app_browser.set_app(self._app)
self.ui.app_browser.set_label("Currently Running Apps")
self.ui.app_browser.action_requested.connect( self.show_app_in_app_store )
self.ui.environment_browser.set_app(self._app)
self.ui.environment_browser.set_label("The Current Environment")
self.ui.environment_browser.enable_search(False)
self.ui.environment_browser.action_requested.connect( self.show_engine_in_app_store )
self.ui.jump_to_fs.clicked.connect( self.show_in_fs )
self.ui.support.clicked.connect( self.open_helpdesk )
self.ui.reload_apps.clicked.connect( self.reload )
self.ui.close.clicked.connect( self.close )
# load data from shotgun
self.setup_context_list()
self.setup_apps_list()
self.setup_environment_list()
# When there is no file system locations, hide the "Jump to the File System" button.
if not self._app.context.filesystem_locations:
self.ui.jump_to_fs.setVisible(False)
########################################################################################
# make sure we trap when the dialog is closed so that we can shut down
# our threads. Nuke does not do proper cleanup on exit.
def closeEvent(self, event):
self.ui.context_browser.destroy()
self.ui.app_browser.destroy()
self.ui.environment_browser.destroy()
# okay to close!
event.accept()
########################################################################################
# basic business logic
def setup_context_list(self):
self.ui.context_browser.clear()
self.ui.context_browser.load({})
def setup_apps_list(self):
self.ui.app_browser.clear()
self.ui.app_browser.load({})
def setup_environment_list(self):
self.ui.environment_browser.clear()
self.ui.environment_browser.load({})
def open_helpdesk(self):
QtGui.QDesktopServices.openUrl(QtCore.QUrl("http://support.shotgunsoftware.com"))
def reload(self):
"""
Reload templates and restart engine.
"""
restart()
def show_in_fs(self):
"""
Jump from context to FS
"""
# launch one window for each location on disk
paths = self._app.context.filesystem_locations
for disk_location in paths:
# get the setting
system = sys.platform
# run the app
if system == "linux2":
cmd = 'xdg-open "%s"' % disk_location
elif system == "darwin":
cmd = 'open "%s"' % disk_location
elif system == "win32":
cmd = 'cmd.exe /C start "Folder" "%s"' % disk_location
else:
raise Exception("Platform '%s' is not supported." % system)
exit_code = os.system(cmd)
if exit_code != 0:
self._app.log_error("Failed to launch '%s'!" % cmd)
def show_in_sg(self):
"""
Jump to shotgun
"""
curr_selection = self.ui.context_browser.get_selected_item()
if curr_selection is None:
return
data = curr_selection.sg_data
# steps do not have detail pages in shotgun so omit those
if data["type"] == "Step":
return
sg_url = "%s/detail/%s/%d" % (self._app.shotgun.base_url, data["type"], data["id"])
QtGui.QDesktopServices.openUrl(QtCore.QUrl(sg_url))
def show_app_in_app_store(self):
"""
Jump to app store
"""
curr_selection = self.ui.app_browser.get_selected_item()
if curr_selection is None:
return
doc_url = curr_selection.data.get("documentation_url")
if doc_url is None:
QtGui.QMessageBox.critical(self,
"No Documentation found!",
"Sorry, this app does not have any associated documentation!")
else:
QtGui.QDesktopServices.openUrl(QtCore.QUrl(doc_url))
def show_engine_in_app_store(self):
"""
Jump to app store
"""
curr_selection = self.ui.environment_browser.get_selected_item()
if curr_selection is None:
return
doc_url = curr_selection.data.get("documentation_url")
if doc_url:
QtGui.QDesktopServices.openUrl(QtCore.QUrl(doc_url))
```
#### File: python/tk_multi_breakdown/scene_browser.py
```python
import tank
import os
import sys
import datetime
import threading
import tank
from tank.platform.qt import QtCore, QtGui
from . import breakdown
browser_widget = tank.platform.import_framework("tk-framework-widget", "browser_widget")
from .breakdown_list_item import BreakdownListItem
class SceneBrowserWidget(browser_widget.BrowserWidget):
def __init__(self, parent=None):
browser_widget.BrowserWidget.__init__(self, parent)
def get_data(self, data):
items = breakdown.get_breakdown_items()
return {"items": items, "show_red": data["show_red"], "show_green": data["show_green"] }
def _make_row(self, first, second):
return "<tr><td><b>%s</b> </td><td>%s</td></tr>" % (first, second)
def process_result(self, result):
if len(result.get("items")) == 0:
self.set_message("No versioned data in your scene!")
return
################################################################################
# PASS 1 - grouping
# group these items into various buckets first based on type, and asset type
groups = {}
for d in result["items"]:
if d.get("sg_data"):
# publish in shotgun!
sg_data = d["sg_data"]
entity = sg_data.get("entity")
if entity is None:
entity_type = "Unknown Type"
else:
entity_type = entity["type"]
asset_type = sg_data["entity.Asset.sg_asset_type"]
if asset_type:
group = "%ss" % asset_type # eg. Characters
else:
group = "%ss" % entity_type # eg. Shots
# it is an asset, so group by asset type
if group not in groups:
groups[group] = []
groups[group].append(d)
else:
# everything not in shotgun goes into the other bucket
OTHER_ITEMS = "Unpublished Items"
if OTHER_ITEMS not in groups:
groups[OTHER_ITEMS] = []
groups[OTHER_ITEMS].append(d)
################################################################################
# PASS 2 - display the content of all groups
if tank.util.get_published_file_entity_type(self._app.tank) == "PublishedFile":
published_file_type_field = "published_file_type"
else:# == "TankPublishedFile"
published_file_type_field = "tank_type"
# now iterate through the groups
for group in sorted(groups.keys()):
i = self.add_item(browser_widget.ListHeader)
i.set_title(group)
for d in groups[group]:
# item has a publish in sg
i = self.add_item(BreakdownListItem)
# provide a limited amount of data for receivers via the
# data dictionary on
# the item object
i.data = {"node_name": d["node_name"],
"node_type": d["node_type"],
"template": d["template"],
"fields": d["fields"] }
# populate the description
details = []
if d.get("sg_data"):
sg_data = d["sg_data"]
details.append( self._make_row("Item", "%s, Version %d" % (sg_data["name"], sg_data["version_number"]) ) )
# see if this publish is associated with an entity
linked_entity = sg_data.get("entity")
if linked_entity:
details.append( self._make_row(linked_entity["type"], linked_entity["name"]) )
# does it have a tank type ?
if sg_data.get(published_file_type_field):
details.append( self._make_row("Type", sg_data.get(published_file_type_field).get("name")))
details.append( self._make_row("Node", d["node_name"]))
else:
details.append(self._make_row("Version", d["fields"]["version"] ))
# display some key fields in the widget
# todo: make this more generic?
relevant_fields = ["Shot", "Asset", "Step", "Sequence", "name"]
for (k,v) in d["fields"].items():
# only show relevant fields - a bit of a hack
if k in relevant_fields:
details.append( self._make_row(k,v) )
details.append( self._make_row("Node", d["node_name"]))
inner = "".join(details)
i.set_details("<table>%s</table>" % inner)
# finally, ask the node to calculate its red-green status
# this will happen asynchronously.
i.calculate_status(d["template"],
d["fields"],
result["show_red"],
result["show_green"],
d.get("sg_data"))
```
#### File: tk_multi_breakdown/ui/thumbnail_label.py
```python
import os
import sys
from tank.platform.qt import QtCore, QtGui
class ThumbnailLabel(QtGui.QLabel):
def __init__(self, parent=None):
QtGui.QLabel.__init__(self, parent)
def setPixmap(self, pixmap):
# scale the pixmap down to fit
if pixmap.height() > 40 or pixmap.width() > 60:
# scale it down to 120x80
pixmap = pixmap.scaled( QtCore.QSize(60,40), QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
# now slap it on top of a 120x80 transparent canvas
rendered_pixmap = QtGui.QPixmap(60, 40)
rendered_pixmap.fill(QtCore.Qt.transparent)
w_offset = (60 - pixmap.width()) / 2
h_offset = (40 - pixmap.height()) / 2
painter = QtGui.QPainter(rendered_pixmap)
painter.drawPixmap(w_offset, h_offset, pixmap)
painter.end()
# and finally assign it
QtGui.QLabel.setPixmap(self, rendered_pixmap)
```
#### File: photoshop/startup/userSetup.py
```python
import os
import sys
def msgbox(msg):
if sys.platform == "win32":
import ctypes
MessageBox = ctypes.windll.user32.MessageBoxA
MessageBox(None, msg, "Shotgun", 0)
elif sys.platform == "darwin":
os.system("""osascript -e 'tell app "System Events" to activate""")
os.system("""osascript -e 'tell app "System Events" to display dialog "%s"'""" % msg)
def bootstrap_tank():
try:
import tank
except Exception, e:
msgbox("Shotgun: Could not import sgtk! Disabling for now: %s" % e)
return
if not "TANK_ENGINE" in os.environ:
msgbox("Shotgun: Missing required environment variable TANK_ENGINE.")
return
engine_name = os.environ.get("TANK_ENGINE")
try:
context = tank.context.deserialize(os.environ.get("TANK_CONTEXT"))
except Exception, e:
msgbox("Shotgun: Could not create context! Shotgun Pipeline Toolkit will be disabled. Details: %s" % e)
return
try:
engine = tank.platform.start_engine(engine_name, context.tank, context)
except Exception, e:
msgbox("Shotgun: Could not start Photoshop engine: %s" % e)
return
file_to_open = os.environ.get("TANK_FILE_TO_OPEN")
if file_to_open:
import photoshop
f = photoshop.RemoteObject("flash.filesystem::File", file_to_open)
photoshop.app.load(f)
# clean up temp env vars
for var in ["TANK_ENGINE", "TANK_CONTEXT", "TANK_FILE_TO_OPEN"]:
if var in os.environ:
del os.environ[var]
bootstrap_tank()
```
#### File: python/tk_multi_launchapp/base_launcher.py
```python
import os
import sys
from distutils.version import LooseVersion
import sgtk
from sgtk import TankError
from .util import apply_version_to_setting, get_clean_version_string
from .util import clear_dll_directory, restore_dll_directory
from .prepare_apps import prepare_launch_for_engine
class BaseLauncher(object):
"""
Functionality to register engine commands that launch DCC
applications, as well as the business logic to perform the launch.
Subclasses of this class are responsible for parsing the
information required to launch an application from a variety
of sources.
"""
def __init__(self):
"""
Initialize members
"""
# Retrieve the TK Application from the current bundle
self._tk_app = sgtk.platform.current_bundle()
# Store the current platform value
self._platform_name = {
"linux2": "linux", "darwin": "mac", "win32": "windows"
}[sys.platform]
def _register_launch_command(
self,
app_menu_name,
app_icon,
app_engine,
app_path,
app_args,
version=None,
group=None,
group_default=True,
):
"""
Register a launch command with the current engine.
Also handles replacement of {version} tokens.
:param str app_menu_name: Menu name to display to launch this DCC. This is also
used to construct the associated command name.
:param str app_icon: Icon to display for this DCC
:param str app_engine: The TK engine associated with the DCC to be launched
:param str app_path: Full path name to the DCC. This may contain environment
variables and/or the locally supported {version}, {v0},
{v1}, ... variables
:param str app_args: Args string to pass to the DCC at launch time
:param str version: (Optional) Specific version of DCC to use.
:param str group: (Optional) Group name this command belongs to. This value is
interpreted by the engine the command is registered with.
:param bool group_default: (Optional) If this command is one of a group of commands,
indicate whether to launch this command if the group is
selected instead of an individual command. This value is
also interpreted by the engine the command is registered with.
"""
# do the {version} replacement if needed
icon = apply_version_to_setting(app_icon, version)
menu_name = apply_version_to_setting(app_menu_name, version)
# Resolve any env variables in the specified path to the application to launch.
app_path = os.path.expandvars(
apply_version_to_setting(app_path, version)
)
# the command name mustn't contain spaces and funny chars, so sanitize it.
# Also, should be nice for the shell engine.
# "Launch NukeX..." -> launch_nukex
command_name = menu_name.lower().replace(" ", "_")
if command_name.endswith("..."):
command_name = command_name[:-3]
# special case! @todo: fix this.
# this is to allow this app to be loaded for sg entities of
# type publish but not show up on the Shotgun menu. The
# launch_from_path() and launch_from_path_and_context()
# methods for this app should be used for these environments
# instead. These methods are normally accessed via a hook.
skip_environments = [
"shotgun_tankpublishedfile",
"shotgun_publishedfile",
"shotgun_version",
]
if self._tk_app.engine.environment.get("name") not in skip_environments:
properties = {
"title": menu_name,
"short_name": command_name,
"description": "Launches and initializes an application environment.",
"icon": icon,
"group": group,
"group_default": group_default,
"engine_name": app_engine,
}
def launch_version(*args, **kwargs):
self._launch_callback(
menu_name,
app_engine,
app_path,
app_args,
version,
*args, **kwargs
)
self._tk_app.log_debug(
"Registering command %s to launch %s with args %s for engine %s" %
(command_name, app_path, app_args, app_engine)
)
self._tk_app.engine.register_command(
command_name, launch_version, properties
)
def _launch_app(
self, menu_name, app_engine, app_path, app_args, context,
version=None, file_to_open=None
):
"""
Launches an application. No environment variable change is
leaked to the outside world.
:param menu_name: Menu name to display to launch this DCC. This is
also used to construct the associated command name.
:param app_engine: The TK engine associated with the DCC to be launched
:param app_path: Full path name to the DCC. This may contain environment
variables and/or the locally supported {version}, {v0},
{v1}, ... variables
:param app_args: Args string to pass to the DCC at launch time
:param context: Toolkit context to open the app in.
:param version: (Optional) Version of the app to launch. Specifying
None means no {version} substitutions will take place.
:param file_to_open: (Optional) File to open when the app launches.
"""
try:
# Clone the environment variables
environ_clone = os.environ.copy()
sys_path_clone = list(sys.path)
# Get the executable path path and args. Adjust according to
# the relevant engine.
app_path = apply_version_to_setting(app_path, version)
app_args = apply_version_to_setting(app_args, version)
if app_engine:
(prepped_path, prepped_args) = prepare_launch_for_engine(
app_engine, app_path, app_args, context, file_to_open
)
# QUESTION: Since *some* of the "prep" methods may modify
# the app_path and app_args values (e.g. _prepare_flame_flare_launch),
# should they be reset here like this?
# (This is not what it does currently)
app_path = prepped_path or app_path
app_args = prepped_args or app_args
version_string = get_clean_version_string(version)
# run before launch hook
self._tk_app.log_debug("Running before app launch hook...")
self._tk_app.execute_hook(
"hook_before_app_launch",
app_path=app_path,
app_args=app_args,
version=version_string,
engine_name=app_engine,
)
# Ticket 26741: Avoid having odd DLL loading issues on windows
# Desktop PySide sets an explicit DLL path, which is getting
# inherited by subprocess. The following undoes that to make
# sure that apps depending on not having a DLL path are set
# to work properly
dll_directory_cache = clear_dll_directory()
try:
# Launch the application
self._tk_app.log_debug(
"Launching executable '%s' with args '%s'" %
(app_path, app_args)
)
result = self._tk_app.execute_hook(
"hook_app_launch",
app_path=app_path,
app_args=app_args,
version=version_string,
engine_name=app_engine,
)
launch_cmd = result.get("command")
return_code = result.get("return_code")
finally:
restore_dll_directory(dll_directory_cache)
self._tk_app.log_debug("Hook tried to launch '%s'" % launch_cmd)
if return_code != 0:
# some special logic here to decide how to display failure feedback
if app_engine == "tk-shotgun":
# for the shotgun engine, use the log info in order to
# get the proper html formatting
self._tk_app.log_info(
"<b>Failed to launch application!</b> "
"This is most likely because the path is not set correctly."
"The command that was used to attempt to launch is '%s'. "
"<br><br><a href='%s' target=_new>Click here</a> to learn "
"more about how to setup your app launch configuration." %
(launch_cmd, self._tk_app.HELP_DOC_URL)
)
elif self._tk_app.engine.has_ui:
# got UI support. Launch dialog with nice message
from ..not_found_dialog import show_path_error_dialog
show_path_error_dialog(self._tk_app, launch_cmd)
else:
# traditional non-ui environment without any html support.
self._tk_app.log_error(
"Failed to launch application! This is most likely because "
"the path is not set correctly. The command that was used "
"to attempt to launch is '%s'. To learn more about how to "
"set up your app launch configuration, see the following "
"documentation: %s" % (launch_cmd, self._tk_app.HELP_DOC_URL)
)
else:
# Emit a launched software metric
try:
# Dedicated try/except block: we wouldn't want a metric-related
# exception to prevent execution of the remaining code.
engine = sgtk.platform.current_engine()
engine.log_metric("Launched Software")
except Exception as e:
pass
# Write an event log entry
self._register_event_log(menu_name, app_engine, context, launch_cmd)
finally:
# Clear the original structures and add into them so
# that users who did from os import environ and from
# sys import path get the restored values.
os.environ.clear()
os.environ.update(environ_clone)
del sys.path[:]
sys.path.extend(sys_path_clone)
def _register_event_log(self, menu_name, app_engine, ctx, command_executed):
"""
Writes an event log entry to the shotgun event log, informing
about the app launch
:param menu_name: Menu name displayed to launch a DCC.
:param app_engine: The TK engine associated with the launched DCC.
:param ctx: TK context DCC was launched with
:param command_executed: Command (including args) that was used to
launch the DCC.
"""
meta = {}
meta["core"] = self._tk_app.sgtk.version
meta["engine"] = "%s %s" % (self._tk_app.engine.name, self._tk_app.engine.version)
meta["app"] = "%s %s" % (self._tk_app.name, self._tk_app.version)
meta["launched_engine"] = app_engine
meta["command"] = command_executed
meta["platform"] = sys.platform
if ctx.task:
meta["task"] = ctx.task["id"]
desc = "%s %s: %s" % (self._tk_app.name, self._tk_app.version, menu_name)
sgtk.util.create_event_log_entry(
self._tk_app.sgtk, ctx, "Toolkit_App_Startup", desc, meta
)
def _launch_callback(self, menu_name, app_engine, app_path, app_args, version=None, file_to_open=None):
"""
Default method to launch DCC application command based on the current context.
:param menu_name: Menu name displayed to launch this DCC.
:param app_engine: The TK engine associated with the DCC to be launched.
:param app_path: Full path to the DCC. May contain environment variables
and/or the locally supported {version}, {v0}, {v1}, ...
variables.
:param app_args: Args string to pass to the DCC at launch time.
:param version: (Optional) Specific version of DCC to launch. Used to
parse {version}, {v0}, {v1}, ... information from.
"""
# Verify a Project is defined in the context.
if self._tk_app.context.project is None:
raise TankError(
"Your context does not have a project defined. Cannot continue."
)
# Extract an entity type and id from the context.
entity_type = self._tk_app.context.project["type"]
entity_id = self._tk_app.context.project["id"]
# if there is an entity then that takes precedence
if self._tk_app.context.entity:
entity_type = self._tk_app.context.entity["type"]
entity_id = self._tk_app.context.entity["id"]
# and if there is a task that is even better
if self._tk_app.context.task:
entity_type = self._tk_app.context.task["type"]
entity_id = self._tk_app.context.task["id"]
if len(self._tk_app.sgtk.roots) == 0:
# configuration doesn't have any filesystem roots defined
self._tk_app.log_debug(
"Configuration does not have any filesystem roots defined. "
"Skipping folder creation."
)
else:
# Do the folder creation. If there is a specific defer keyword,
# this takes precedence. Otherwise, use the engine name for the
# DCC application by default.
defer_keyword = self._tk_app.get_setting("defer_keyword") or app_engine
try:
self._tk_app.log_debug(
"Creating folders for %s %s. Defer keyword: '%s'" %
(entity_type, entity_id, defer_keyword)
)
self._tk_app.sgtk.create_filesystem_structure(
entity_type, entity_id, engine=defer_keyword
)
except sgtk.TankError, err:
raise TankError(
"Could not create folders on disk. Error reported: %s" % err
)
# Launch the DCC
self._launch_app(
menu_name,
app_engine,
app_path,
app_args,
self._tk_app.context,
version,
file_to_open,
)
def register_launch_commands(self):
"""
Abstract method implemented by derived classes to
envoke _register_launch_command()
"""
raise NotImplementedError
def launch_from_path(self, path, version=None):
"""
Abstract method that can optionally be implemented by
derived classes
:param path: File path DCC should open after launch.
:param version: (optional) Specific version of DCC
to launch.
"""
raise NotImplementedError
def launch_from_path_and_context(self, path, context, version=None):
"""
Abstract method that can optionally be implemented by derived classes
:param path: File path DCC should open after launch.
:param context: Specific context to launch DCC with.
:param version: (Optional) Specific version of DCC to launch.
"""
raise NotImplementedError
def _sort_versions(self, versions):
"""
Uses standard python modules to determine how to sort arbitrary version numbers.
A version number consists of a series of numbers, separated by either periods or
strings of letters. When comparing version numbers, the numeric components will
be compared numerically, and the alphabetic components lexically. For example:
1.1 < 1.2 < 1.3
1.2 < 1.2a < 1.2ab < 1.2b
The input list of versions is not modified.
:param list versions: List of version "numbers" (may be strings)
:returns: List of sorted versions in descending order. The highest version is
at index 0.
"""
# Cast the incoming version strings as LooseVersion instances to sort using
# the LooseVersion.__cmp__ method.
sort_versions = [LooseVersion(version) for version in versions]
sort_versions.sort(reverse=True)
# Convert the LooseVersions back to strings on return.
return [str(version) for version in sort_versions]
```
#### File: tk-multi-loader2/v1.18.0/app.py
```python
from sgtk.platform.qt import QtCore, QtGui
import sgtk
import sys
import os
class MultiLoader(sgtk.platform.Application):
def init_app(self):
"""
Called as the application is being initialized
"""
# We won't be able to do anything if there's no UI. The import
# of our tk-multi-loader module below required some Qt components,
# and will likely blow up.
if not self.engine.has_ui:
return
tk_multi_loader = self.import_module("tk_multi_loader")
# register command
cb = lambda : tk_multi_loader.show_dialog(self)
menu_caption = "%s..." % self.get_setting("menu_name")
menu_options = {
"short_name": self.get_setting("menu_name").replace(" ", "_"),
# dark themed icon for engines that recognize this format
"icons": {
"dark": {
"png": os.path.join(
os.path.dirname(__file__),
"resources",
"load_menu_icon.png",
),
}
}
}
self.engine.register_command(menu_caption, cb, menu_options)
@property
def context_change_allowed(self):
"""
Specifies that context changes are allowed.
"""
return True
def open_publish(self, title="Open Publish", action="Open", publish_types = []):
"""
Display the loader UI in an open-file style where a publish can be selected and the
artist can then click the action button. This will then return the selected publish.
:param title: The title to be used for the dialog
:param action: The label to use for the action button
:param publish_types: If specified then the UI will only show publishes
that matches these types - this overrides the setting
from the configuration.
:returns: A list of Shotgun publish records for the publish(es)
that were selected in the UI. Each record in the list
is garunteed to have a type and id but will usually
contain a much more complete list of fields from the
Shotgun PublishedFile entity
"""
tk_multi_loader = self.import_module("tk_multi_loader")
return tk_multi_loader.open_publish_browser(self, title, action, publish_types)
```
#### File: v1.18.0/hooks/tk-houdini_actions.py
```python
import os
import re
import sgtk
HookBaseClass = sgtk.get_hook_baseclass()
class HoudiniActions(HookBaseClass):
##############################################################################################################
# public interface - to be overridden by deriving classes
def generate_actions(self, sg_publish_data, actions, ui_area):
"""
Returns a list of action instances for a particular publish.
This method is called each time a user clicks a publish somewhere in the UI.
The data returned from this hook will be used to populate the actions menu for a publish.
The mapping between Publish types and actions are kept in a different place
(in the configuration) so at the point when this hook is called, the loader app
has already established *which* actions are appropriate for this object.
The hook should return at least one action for each item passed in via the
actions parameter.
This method needs to return detailed data for those actions, in the form of a list
of dictionaries, each with name, params, caption and description keys.
Because you are operating on a particular publish, you may tailor the output
(caption, tooltip etc) to contain custom information suitable for this publish.
The ui_area parameter is a string and indicates where the publish is to be shown.
- If it will be shown in the main browsing area, "main" is passed.
- If it will be shown in the details area, "details" is passed.
- If it will be shown in the history area, "history" is passed.
Please note that it is perfectly possible to create more than one action "instance" for
an action! You can for example do scene introspection - if the action passed in
is "character_attachment" you may for example scan the scene, figure out all the nodes
where this object can be attached and return a list of action instances:
"attach to left hand", "attach to right hand" etc. In this case, when more than
one object is returned for an action, use the params key to pass additional
data into the run_action hook.
:param sg_publish_data: Shotgun data dictionary with all the standard publish fields.
:param actions: List of action strings which have been defined in the app configuration.
:param ui_area: String denoting the UI Area (see above).
:returns List of dictionaries, each with keys name, params, caption and description
"""
app = self.parent
app.log_debug("Generate actions called for UI element %s. "
"Actions: %s. Publish Data: %s" % (ui_area, actions, sg_publish_data))
action_instances = []
if "merge" in actions:
action_instances.append( {"name": "merge",
"params": None,
"caption": "Merge",
"description": "This will merge the item into the scene."} )
if "import" in actions:
action_instances.append({
"name": "import",
"params": None,
"caption": "Import",
"description": "Import the Alembic cache file into a geometry network.",
})
if "file_cop" in actions:
action_instances.append({
"name": "file_cop",
"params": None,
"caption": "File COP",
"description": "Load an image or image sequence via File COP.",
})
return action_instances
def execute_multiple_actions(self, actions):
"""
Executes the specified action on a list of items.
The default implementation dispatches each item from ``actions`` to
the ``execute_action`` method.
The ``actions`` is a list of dictionaries holding all the actions to execute.
Each entry will have the following values:
name: Name of the action to execute
sg_publish_data: Publish information coming from Shotgun
params: Parameters passed down from the generate_actions hook.
.. note::
This is the default entry point for the hook. It reuses the ``execute_action``
method for backward compatibility with hooks written for the previous
version of the loader.
.. note::
The hook will stop applying the actions on the selection if an error
is raised midway through.
:param list actions: Action dictionaries.
"""
for single_action in actions:
name = single_action["name"]
sg_publish_data = single_action["sg_publish_data"]
params = single_action["params"]
self.execute_action(name, params, sg_publish_data)
def execute_action(self, name, params, sg_publish_data):
"""
Execute a given action. The data sent to this be method will
represent one of the actions enumerated by the generate_actions method.
:param name: Action name string representing one of the items returned by generate_actions.
:param params: Params data, as specified by generate_actions.
:param sg_publish_data: Shotgun data dictionary with all the standard publish fields.
:returns: No return value expected.
"""
app = self.parent
app.log_debug("Execute action called for action %s. "
"Parameters: %s. Publish Data: %s" % (name, params, sg_publish_data))
# resolve path
path = self.get_publish_path(sg_publish_data)
if name == "merge":
self._merge(path, sg_publish_data)
if name == "import":
self._import(path, sg_publish_data)
if name == "file_cop":
self._file_cop(path, sg_publish_data)
##############################################################################################################
# helper methods which can be subclassed in custom hooks to fine tune the behaviour of things
def _merge(self, path, sg_publish_data):
"""
Merge a published hip file into the working hip file with
the default settings Houdini would use if you did it in the UI.
:param path: Path to file.
:param sg_publish_data: Shotgun data dictionary with all the standard publish fields.
"""
import hou
if not os.path.exists(path):
raise Exception("File not found on disk - '%s'" % path)
# use the default settings, which tries to merge all nodes
# and is conservative about overwriting and errors
hou.hipFile.merge(path,
node_pattern="*",
overwrite_on_conflict=False,
ignore_load_warnings=False)
##############################################################################################################
def _import(self, path, sg_publish_data):
"""Import the supplied path as a geo/alembic sop.
:param str path: The path to the file to import.
:param dict sg_publish_data: The publish data for the supplied path.
"""
import hou
app = self.parent
name = sg_publish_data.get("name", "alembic")
path = self.get_publish_path(sg_publish_data)
# houdini doesn't like UNC paths.
path = path.replace("\\", "/")
obj_context = _get_current_context("/obj")
try:
geo_node = obj_context.createNode("geo", name)
except hou.OperationFailed:
# failed to create the node in this context, create at top-level
obj_context = hou.node("/obj")
geo_node = obj_context.createNode("geo", name)
app.log_debug("Created geo node: %s" % (geo_node.path(),))
# delete the default nodes created in the geo
for child in geo_node.children():
child.destroy()
alembic_sop = geo_node.createNode("alembic", name)
alembic_sop.parm("fileName").set(path)
app.log_debug(
"Creating alembic sop: %s\n path: '%s' " %
(alembic_sop.path(), path)
)
alembic_sop.parm("reload").pressButton()
_show_node(alembic_sop)
##############################################################################################################
def _file_cop(self, path, sg_publish_data):
"""Read the supplied path as a file COP.
:param str path: The path to the file to import.
:param dict sg_publish_data: The publish data for the supplied path.
"""
import hou
app = self.parent
publish_name = sg_publish_data.get("name", "published_file")
# we'll use the publish name for the file cop node name, but we need to
# remove non alphanumeric characers from the string (houdini node names
# must be alphanumeric). first, build a regex to match non alpha-numeric
# characters. Then use it to replace any matches with an underscore
pattern = re.compile('[\W_]+', re.UNICODE)
publish_name = pattern.sub('_', publish_name)
# get the publish path
path = self.get_publish_path(sg_publish_data)
# houdini doesn't like UNC paths.
path = path.replace("\\", "/")
img_context = _get_current_context("/img")
try:
file_cop = img_context.createNode("file", publish_name)
except hou.OperationFailed:
# failed to create the node in the current context.
img_context = hou.node("/img")
comps = [c for c in img_context.children()
if c.type().name() == "img"]
if comps:
# if there are comp networks, just pick the first one
img_network = comps[0]
else:
# if not, create one at the /img and then add the file cop
img_network = img_context.createNode("img", "comp1")
file_cop = img_network.createNode("file", publish_name)
# replace any %0#d format string with the corresponding houdini frame
# env variable. example %04d => $F4
frame_pattern = re.compile("(%0(\d)d)")
frame_match = re.search(frame_pattern, path)
if frame_match:
full_frame_spec = frame_match.group(1)
padding = frame_match.group(2)
path = path.replace(full_frame_spec, "$F%s" % (padding,))
file_cop.parm("filename1").set(path)
app.log_debug(
"Created file COP: %s\n path: '%s' " % (file_cop.path(), path))
file_cop.parm("reload").pressButton()
_show_node(file_cop)
##############################################################################################################
def _get_current_context(context_type):
"""Attempts to return the current node context.
:param str context_type: Return a full context under this context type.
Example: "/obj"
Looks for a current network pane tab displaying the supplied context type.
Returns the full context being displayed in that network editor.
"""
import hou
# default to the top level context type
context = hou.node(context_type)
network_tab = _get_current_network_panetab(context_type)
if network_tab:
context = network_tab.pwd()
return context
##############################################################################################################
def _get_current_network_panetab(context_type):
"""Attempt to retrieve the current network pane tab.
:param str context_type: Search for a network pane showing this context
type. Example: "/obj"
"""
import hou
network_tab = None
# there doesn't seem to be a way to know the current context "type" since
# there could be multiple network panels open with different contexts
# displayed. so for now, loop over pane tabs and find a network editor in
# the specified context type that is the current tab in its pane. hopefully
# that's the one the user is looking at.
for panetab in hou.ui.paneTabs():
if (isinstance(panetab, hou.NetworkEditor) and
panetab.pwd().path().startswith(context_type) and
panetab.isCurrentTab()):
network_tab = panetab
break
return network_tab
##############################################################################################################
def _show_node(node):
"""Frame the supplied node in the current network pane.
:param hou.Node node: The node to frame in the current network pane.
"""
context_type = "/" + node.path().split("/")[0]
network_tab = _get_current_network_panetab(context_type)
if not network_tab:
return
# select the node and frame it
node.setSelected(True, clear_all_selected=True)
network_tab.cd(node.parent().path())
network_tab.frameSelection()
```
#### File: v1.18.0/hooks/tk-shell_actions.py
```python
import pprint
import sgtk
HookBaseClass = sgtk.get_hook_baseclass()
class ShellActions(HookBaseClass):
"""
Stub implementation of the shell actions, used for testing.
"""
def generate_actions(self, sg_publish_data, actions, ui_area):
"""
Return a list of action instances for a particular publish.
This method is called each time a user clicks a publish somewhere in the UI.
The data returned from this hook will be used to populate the actions menu for a publish.
The mapping between Publish types and actions are kept in a different place
(in the configuration) so at the point when this hook is called, the loader app
has already established *which* actions are appropriate for this object.
The hook should return at least one action for each item passed in via the
actions parameter.
This method needs to return detailed data for those actions, in the form of a list
of dictionaries, each with name, params, caption and description keys.
Because you are operating on a particular publish, you may tailor the output
(caption, tooltip etc) to contain custom information suitable for this publish.
The ui_area parameter is a string and indicates where the publish is to be shown.
- If it will be shown in the main browsing area, "main" is passed.
- If it will be shown in the details area, "details" is passed.
- If it will be shown in the history area, "history" is passed.
Please note that it is perfectly possible to create more than one action "instance" for
an action! You can for example do scene introspection - if the action passed in
is "character_attachment" you may for example scan the scene, figure out all the nodes
where this object can be attached and return a list of action instances:
"attach to left hand", "attach to right hand" etc. In this case, when more than
one object is returned for an action, use the params key to pass additional
data into the run_action hook.
:param sg_publish_data: Shotgun data dictionary with all the standard publish fields.
:param actions: List of action strings which have been defined in the app configuration.
:param ui_area: String denoting the UI Area (see above).
:returns List of dictionaries, each with keys name, params, caption and description
"""
app = self.parent
app.log_debug("Generate actions called for UI element %s. "
"Actions: %s. Publish Data: %s" % (ui_area, actions, sg_publish_data))
action_instances = []
# For the sake of easy test, we'll reuse Maya publish types.
if "debug_action_1" in actions:
action_instances.append({"name": "debug_action_1",
"params": "Debug Action 1 'params'",
"caption": "Debug Action 1",
"description": "Executes Debug Action 1."})
if "debug_action_2" in actions:
action_instances.append({"name": "debug_action_2",
"params": "Debug Action 2 'params'",
"caption": "Debug Action 2",
"description": "Executes Debug Action 2."})
if "debug_action_3" in actions:
action_instances.append({"name": "debug_action_3",
"params": "Debug Action 3 'params'",
"caption": "Debug Action 3",
"description": "Executes Debug Action 3."})
if "debug_action_4" in actions:
action_instances.append({"name": "debug_action_4",
"params": "Debug Action 4 'params'",
"caption": "Debug Action 4",
"description": "Executes Debug Action 4."})
return action_instances
def execute_multiple_actions(self, actions):
"""
Executes the specified action on a list of items.
The default implementation dispatches each item from ``actions`` to
the ``execute_action`` method.
The ``actions`` is a list of dictionaries holding all the actions to execute.
Each entry will have the following values:
name: Name of the action to execute
sg_publish_data: Publish information coming from Shotgun
params: Parameters passed down from the generate_actions hook.
.. note::
This is the default entry point for the hook. It reuses the ``execute_action``
method for backward compatibility with hooks written for the previous
version of the loader.
.. note::
The hook will stop applying the actions on the selection if an error
is raised midway through.
:param list actions: Action dictionaries.
"""
app = self.parent
app.log_info("Executing action '%s' on the selection")
# Helps to visually scope selections
# Execute each action.
for single_action in actions:
name = single_action["name"]
sg_publish_data = single_action["sg_publish_data"]
params = single_action["params"]
self.execute_action(name, params, sg_publish_data)
def execute_action(self, name, params, sg_publish_data):
"""
Print out all actions. The data sent to this be method will
represent one of the actions enumerated by the generate_actions method.
:param name: Action name string representing one of the items returned by generate_actions.
:param params: Params data, as specified by generate_actions.
:param sg_publish_data: Shotgun data dictionary with all the standard publish fields.
:returns: No return value expected.
"""
app = self.parent
app.log_info("Action Name: %s" % name)
app.log_info("Parameters:")
for l in pprint.pformat(params, indent=4).split("\n"):
app.log_info(l)
app.log_info("Publish data:")
for l in pprint.pformat(sg_publish_data, indent=4).split("\n"):
app.log_info(l)
app.log_info("=" * 20)
```
#### File: python/tk_multi_loader/banner.py
```python
from sgtk.platform.qt import QtCore, QtGui
import time
class Banner(QtGui.QLabel):
"""
Banner that can be shown and then dismissed with an animation. Will always
be shown for at least 3 seconds even if a request to hide it is done before
the time is up. The banner will always be displayed at the top of the parent
widget's window.
"""
# Height of the widget.
_HEIGHT = 32
def __init__(self, parent):
"""
:param parent: Parent widget.
"""
super(Banner, self).__init__(parent)
# Sets the style sheet for the widget.
self.setStyleSheet("""
background-color: rgb(67, 131, 168);
color: rgb(255, 255, 255);
border-bottom-left-radius: 10px;
border-bottom-right-radius: 10px;
""")
# Hide the widget by default.
self.hide()
self._banner_animation = QtCore.QSequentialAnimationGroup(self)
self._show_time = 0
def show_banner(self, message):
"""
Shows the banner at the top of the widget's dialog.
:param message: Message to display in the banner.
"""
# Widget is created originally in the widget of the loader. Then Toolkit
# adds that widget to a dialog. We want to be displayed at the top of
# that dialog, so we'll reparent ourselves on first call to show_banner.
if self.parentWidget() != self.window():
self.setParent(self.window())
# Make sure any currently running animations are cleared, we want to be
# displayed at the top
self._banner_animation.clear()
# Displays the widget at the top of the dialog
self.setGeometry(self._calc_expanded_pos())
self.setText(message)
self.show()
# Take note of the time at which we displayed the banner so it remains
# visible at least 3 seconds.
self._show_time = time.time()
def hide_banner(self):
"""
Hides the banner with a scrolling animation.
"""
elapsed = (time.time() - self._show_time) * 1000
# We'll pause if required.
self._banner_animation.addPause(max(3000 - elapsed, 0))
# Compute the fully expanded and folded positions.
expanded_pos = self._calc_expanded_pos()
folded_pos = expanded_pos.translated(0, -self._HEIGHT)
# Animate the banner sliding out of the dialog.
sliding_out = QtCore.QPropertyAnimation(self, "geometry")
sliding_out.setDuration(250)
sliding_out.setStartValue(expanded_pos)
sliding_out.setEndValue(folded_pos)
self._banner_animation.addAnimation(sliding_out)
# Launch the sliding out!
self._banner_animation.start()
def _calc_expanded_pos(self):
"""
Calculates the position of the banner in the parent window. The banner
is centered and its top-side is flush with the dialog's top-side.
:returns: The rectangle in which the banner will be displayed.
:rtype: :class:`PySide.QtCore.QRect`
"""
window_size = self.window().size()
banner_width = window_size.width() * 0.5
return QtCore.QRect(
(window_size.width() - banner_width) / 2,
0,
banner_width,
self._HEIGHT
)
```
#### File: python/tk_multi_loader/model_entity.py
```python
import sgtk
from sgtk.platform.qt import QtCore, QtGui
# import the shotgun_model module from the shotgun utils framework
shotgun_model = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_model")
ShotgunModel = shotgun_model.ShotgunModel
class SgEntityModel(ShotgunModel):
"""
This model represents the data which is displayed inside one of the treeview tabs
on the left hand side.
"""
def __init__(self, parent, entity_type, filters, hierarchy, bg_task_manager):
"""
Constructor
"""
# folder icon
self._default_icon = QtGui.QIcon(QtGui.QPixmap(":/res/icon_Folder.png"))
# shotgun entity icons
self._entity_icons = {}
self._entity_icons["Shot"] = QtGui.QIcon(QtGui.QPixmap(":/res/icon_Shot_dark.png"))
self._entity_icons["Asset"] = QtGui.QIcon(QtGui.QPixmap(":/res/icon_Asset_dark.png"))
self._entity_icons["EventLogEntry"] = QtGui.QIcon(QtGui.QPixmap(":/res/icon_EventLogEntry_dark.png"))
self._entity_icons["Group"] = QtGui.QIcon(QtGui.QPixmap(":/res/icon_Group_dark.png"))
self._entity_icons["HumanUser"] = QtGui.QIcon(QtGui.QPixmap(":/res/icon_HumanUser_dark.png"))
self._entity_icons["Note"] = QtGui.QIcon(QtGui.QPixmap(":/res/icon_Note_dark.png"))
self._entity_icons["Project"] = QtGui.QIcon(QtGui.QPixmap(":/res/icon_Project_dark.png"))
self._entity_icons["Sequence"] = QtGui.QIcon(QtGui.QPixmap(":/res/icon_Sequence_dark.png"))
self._entity_icons["Task"] = QtGui.QIcon(QtGui.QPixmap(":/res/icon_Task_dark.png"))
self._entity_icons["Ticket"] = QtGui.QIcon(QtGui.QPixmap(":/res/icon_Ticket_dark.png"))
self._entity_icons["Version"] = QtGui.QIcon(QtGui.QPixmap(":/res/icon_Version_dark.png"))
ShotgunModel.__init__(self,
parent,
download_thumbs=False,
schema_generation=4,
bg_load_thumbs=True,
bg_task_manager=bg_task_manager)
fields=["image", "sg_status_list", "description"]
self._load_data(entity_type, filters, hierarchy, fields)
############################################################################################
# public methods
def async_refresh(self):
"""
Trigger an asynchronous refresh of the model
"""
self._refresh_data()
############################################################################################
# subclassed methods
def _populate_default_thumbnail(self, item):
"""
Whenever an item is constructed, this methods is called. It allows subclasses to intercept
the construction of a QStandardItem and add additional metadata or make other changes
that may be useful. Nothing needs to be returned.
:param item: QStandardItem that is about to be added to the model. This has been primed
with the standard settings that the ShotgunModel handles.
:param sg_data: Shotgun data dictionary that was received from Shotgun given the fields
and other settings specified in load_data()
"""
found_icon = False
# get the associated field data with this node
field_data = shotgun_model.get_sanitized_data(item, self.SG_ASSOCIATED_FIELD_ROLE)
# get the full sg data for this node (leafs only)
sg_data = shotgun_model.get_sg_data(item)
# {'name': 'sg_sequence', 'value': {'type': 'Sequence', 'id': 11, 'name': 'bunny_080'}}
field_value = field_data["value"]
if isinstance(field_value, dict) and "name" in field_value and "type" in field_value:
# this is an intermediate node which is an entity type link
if field_value.get("type") in self._entity_icons:
# use sg icon!
item.setIcon(self._entity_icons[ field_value.get("type") ])
found_icon = True
elif sg_data:
# this is a leaf node!
if sg_data.get("type") in self._entity_icons:
# use sg icon!
item.setIcon(self._entity_icons[ sg_data.get("type") ])
found_icon = True
# for all items where we didn't find the icon, fall back onto the default
if not found_icon:
item.setIcon(self._default_icon)
```
#### File: python/tk_multi_loader/model_publishhistory.py
```python
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from . import utils, constants
# import the shotgun_model module from the shotgun utils framework
shotgun_model = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_model")
ShotgunModel = shotgun_model.ShotgunModel
class SgPublishHistoryModel(ShotgunModel):
"""
This model represents the version history for a publish.
"""
USER_THUMB_ROLE = QtCore.Qt.UserRole + 101
PUBLISH_THUMB_ROLE = QtCore.Qt.UserRole + 102
def __init__(self, parent, bg_task_manager):
"""
Constructor
"""
# folder icon
self._loading_icon = QtGui.QPixmap(":/res/loading_100x100.png")
app = sgtk.platform.current_bundle()
ShotgunModel.__init__(self,
parent,
download_thumbs=app.get_setting("download_thumbnails"),
schema_generation=2,
bg_load_thumbs=True,
bg_task_manager=bg_task_manager)
############################################################################################
# public interface
def load_data(self, sg_data):
"""
Load the details for the shotgun publish entity described by sg_data.
:param sg_data: dictionary describing a publish in shotgun, including all the common
publish fields.
"""
app = sgtk.platform.current_bundle()
publish_entity_type = sgtk.util.get_published_file_entity_type(app.sgtk)
if publish_entity_type == "PublishedFile":
publish_type_field = "published_file_type"
else:
publish_type_field = "tank_type"
# fields to pull down
fields = [publish_type_field] + constants.PUBLISHED_FILES_FIELDS
# when we filter out which other publishes are associated with this one,
# to effectively get the "version history", we look for items
# which have the same project, same entity assocation, same name, same type
# and the same task.
filters = [ ["project", "is", sg_data["project"] ],
["name", "is", sg_data["name"] ],
["task", "is", sg_data["task"] ],
["entity", "is", sg_data["entity"] ],
[publish_type_field, "is", sg_data[publish_type_field] ],
]
# add external filters from config
app = sgtk.platform.current_bundle()
pub_filters = app.get_setting("publish_filters", [])
filters.extend(pub_filters)
ShotgunModel._load_data(self,
entity_type=publish_entity_type,
filters=filters,
hierarchy=["version_number"],
fields=fields)
self._refresh_data()
def async_refresh(self):
"""
Refresh the current data set
"""
self._refresh_data()
############################################################################################
# subclassed methods
def _populate_item(self, item, sg_data):
"""
Whenever an item is constructed, this methods is called. It allows subclasses to intercept
the construction of a QStandardItem and add additional metadata or make other changes
that may be useful. Nothing needs to be returned.
:param item: QStandardItem that is about to be added to the model. This has been primed
with the standard settings that the ShotgunModel handles.
:param sg_data: Shotgun data dictionary that was received from Shotgun given the fields
and other settings specified in load_data()
"""
# note that when the sg model creates the name field for each item,
# it creates a string value. In our case, we use version number as the name
# and use this for automatic sorting, meaning that QT will auto sort
# "1", "10", "2" etc instead of proper integer sorting. Avoid this by
# force setting the name field to a three zero padded string instead to
# reflect how values are displayed.
# Also note that since we use a delegate for data display, this value is
# only used for sorting, not for display.
if sg_data.get("version_number"):
item.setText("%03d" % sg_data.get("version_number"))
# see if we can get a thumbnail for the user
if sg_data.get("created_by.HumanUser.image"):
# get the thumbnail - store the unique id we get back from
# the data retrieve in a dict for fast lookup later
self._request_thumbnail_download(item,
"created_by.HumanUser.image",
sg_data["created_by.HumanUser.image"],
sg_data["created_by"]["type"],
sg_data["created_by"]["id"])
def _before_data_processing(self, sg_data_list):
"""
Called just after data has been retrieved from Shotgun but before any processing
takes place. This makes it possible for deriving classes to perform summaries,
calculations and other manipulations of the data before it is passed on to the model
class.
:param sg_data_list: list of shotgun dictionaries, as returned by the find() call.
:returns: should return a list of shotgun dictionaries, on the same form as the input.
"""
app = sgtk.platform.current_bundle()
return utils.filter_publishes(app, sg_data_list)
def _populate_default_thumbnail(self, item):
"""
Called whenever an item needs to get a default thumbnail attached to a node.
When thumbnails are loaded, this will be called first, when an object is
either created from scratch or when it has been loaded from a cache, then later
on a call to _populate_thumbnail will follow where the subclassing implementation
can populate the real image.
"""
# set up publishes with a "thumbnail loading" icon
item.setData(self._loading_icon, SgPublishHistoryModel.PUBLISH_THUMB_ROLE)
thumb = utils.create_overlayed_user_publish_thumbnail(item.data(SgPublishHistoryModel.PUBLISH_THUMB_ROLE),
None)
item.setIcon(QtGui.QIcon(thumb))
def _populate_thumbnail_image(self, item, field, image, path):
"""
Called whenever a thumbnail for an item has arrived on disk. In the case of
an already cached thumbnail, this may be called very soon after data has been
loaded, in cases when the thumbs are downloaded from Shotgun, it may happen later.
This method will be called only if the model has been instantiated with the
download_thumbs flag set to be true. It will be called for items which are
associated with shotgun entities (in a tree data layout, this is typically
leaf nodes).
This method makes it possible to control how the thumbnail is applied and associated
with the item. The default implementation will simply set the thumbnail to be icon
of the item, but this can be altered by subclassing this method.
Any thumbnails requested via the _request_thumbnail_download() method will also
resurface via this callback method.
:param item: QStandardItem which is associated with the given thumbnail
:param field: The Shotgun field which the thumbnail is associated with.
:param path: A path on disk to the thumbnail. This is a file in jpeg format.
"""
if field == "image":
thumb = QtGui.QPixmap.fromImage(image)
item.setData(thumb, SgPublishHistoryModel.PUBLISH_THUMB_ROLE)
else:
thumb = QtGui.QPixmap.fromImage(image)
item.setData(thumb, SgPublishHistoryModel.USER_THUMB_ROLE)
# composite the user thumbnail and the publish thumb into a single image
thumb = utils.create_overlayed_user_publish_thumbnail(item.data(SgPublishHistoryModel.PUBLISH_THUMB_ROLE),
item.data(SgPublishHistoryModel.USER_THUMB_ROLE))
item.setIcon(QtGui.QIcon(thumb))
```
#### File: python/tk_multi_loader/open_publish_form.py
```python
import sgtk
from sgtk import TankError
from sgtk.platform.qt import QtCore, QtGui
from .dialog import AppDialog
from .ui.open_publish_form import Ui_OpenPublishForm
from .open_publish_action_manager import OpenPublishActionManager
def open_publish_browser(app, title, action, publish_types=None):
"""
Display the loader UI in an open-file style where a publish can be selected and the
artist can then click the action button. This will then return the selected publish.
:param app: The app this is being called from.
:param title: The title to be used for the dialog
:param action: The label to use for the action button
:param publish_types: If specified then the UI will only show publishes
that matches these types - this overrides the setting
from the configuration.
:returns: A list of Shotgun publish records for the publish(es)
that were selected in the UI. Each record in the list
is guaranteed to have a type and id but will usually
contain a much more complete list of fields from the
Shotgun PublishedFile entity
"""
from .open_publish_form import OpenPublishForm
res, widget = app.engine.show_modal(title, app, OpenPublishForm, action, publish_types)
if res == QtGui.QDialog.Accepted:
return widget.selected_publishes
return []
class OpenPublishForm(QtGui.QWidget):
"""
An 'open-file' style UI that wraps the regular loader widget.
"""
def __init__(self, action, publish_types, parent=None):
"""
Construction
:param action: A String representing the 'open' action. This is used as
the label on the 'open' button.
:param publish_types: A list of published file types to show. This list is used to pre-filter
the normal list of type filters presented in the UI.
:param parent: The QWidget this instance should be parented to
"""
QtGui.QWidget.__init__(self, parent)
self.__exit_code = QtGui.QDialog.Rejected
self.__selected_publishes = []
# create an action manager specific to the open dialog. This
# is more limited than the regular action manager to avoid
# the user being able to perform actions outside the scope of
# opening a file!
action_manager = OpenPublishActionManager(publish_types)
action_manager.default_action_triggered.connect(self._on_do_default_action)
# set up the UI
self.__ui = Ui_OpenPublishForm()
self.__ui.setupUi(self)
# now replace the placeholder loader form with the actual one. We
# do it this way so that we can set our own arguments to pass in to
# the constructor.
#
# find the placeholder form and remove it from the parent and parent
# layout, then mark it to be deleted.
placeholder_loader_form = self.__ui.loader_form
placeholder_parent = placeholder_loader_form.parent()
placeholder_parent.layout().removeWidget(placeholder_loader_form)
placeholder_loader_form.setParent(None)
placeholder_loader_form.deleteLater()
# create the new loader form with our custom action manager:
loader_form = AppDialog(action_manager, placeholder_parent)
# and finally, add it to the parent layout:
placeholder_parent.layout().insertWidget(0, loader_form, 1)
self.__ui.loader_form = loader_form
# connect all controls up:
self.__ui.open_btn.setText(action)
self.__ui.open_btn.clicked.connect(self._on_open_clicked)
self.__ui.cancel_btn.clicked.connect(self._on_cancel_clicked)
self.__ui.loader_form.selection_changed.connect(self._on_selection_changed)
@property
def exit_code(self):
"""
Used to pass exit code back though sgtk dialog
:returns: The dialog exit code
"""
return self.__exit_code
@property
def selected_publishes(self):
"""
Access the currently selected publishes in the UI.
:returns: A list of Shotgun publish records for the publish(es) that were selected in the
UI. Each record in the list is guaranteed to have a type and id but will usually
contain a much more complete list of fields from the Shotgun PublishedFile entity
"""
return self.__selected_publishes
def closeEvent(self, event):
"""
Called when the widget is being closed.
:param event: The close event
"""
# disconnect from the loader form so we don't recieve any more signals:
self.__ui.loader_form.selection_changed.disconnect(self._on_selection_changed)
# make sure we clean up the loader form with all it's threads and stuff!
self.__ui.loader_form.close()
def _on_open_clicked(self):
"""
Called when the 'open' button is clicked.
"""
self.__exit_code = QtGui.QDialog.Accepted
self.close()
def _on_cancel_clicked(self):
"""
Called when the 'cancel' button is clicked.
"""
self.__exit_code = QtGui.QDialog.Rejected
self.close()
def _on_selection_changed(self):
"""
Called when the selection in the UI changes.
"""
# cache the selected publishes as we won't have access
# to these once the UI has been closed!
self.__selected_publishes = self.__ui.loader_form.selected_publishes
def _on_do_default_action(self, sg_data):
"""
Called when the default action is triggered for a publish in the loader
form.
:param sg_data: The Shotgun entity dictionary for the Publish that the
default action was triggered for
"""
if not sg_data:
return
# keep track of the publish:
self.__selected_publishes = [sg_data]
# and close the dialog returning the accepted exit code.
self.__exit_code = QtGui.QDialog.Accepted
self.close()
```
#### File: tk_multi_loader/ui/open_publish_form.py
```python
from sgtk.platform.qt import QtCore, QtGui
class Ui_OpenPublishForm(object):
def setupUi(self, OpenPublishForm):
OpenPublishForm.setObjectName("OpenPublishForm")
OpenPublishForm.resize(1228, 818)
self.verticalLayout = QtGui.QVBoxLayout(OpenPublishForm)
self.verticalLayout.setSpacing(4)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.loader_form = QtGui.QWidget(OpenPublishForm)
self.loader_form.setStyleSheet("#loader_form {\n"
"background-color: rgb(255, 128, 0);\n"
"}")
self.loader_form.setObjectName("loader_form")
self.verticalLayout.addWidget(self.loader_form)
self.break_line = QtGui.QFrame(OpenPublishForm)
self.break_line.setFrameShape(QtGui.QFrame.HLine)
self.break_line.setFrameShadow(QtGui.QFrame.Sunken)
self.break_line.setObjectName("break_line")
self.verticalLayout.addWidget(self.break_line)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setContentsMargins(12, 8, 12, 12)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
spacerItem = QtGui.QSpacerItem(0, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem)
self.cancel_btn = QtGui.QPushButton(OpenPublishForm)
self.cancel_btn.setMinimumSize(QtCore.QSize(90, 0))
self.cancel_btn.setObjectName("cancel_btn")
self.horizontalLayout_3.addWidget(self.cancel_btn)
self.open_btn = QtGui.QPushButton(OpenPublishForm)
self.open_btn.setMinimumSize(QtCore.QSize(90, 0))
self.open_btn.setDefault(True)
self.open_btn.setObjectName("open_btn")
self.horizontalLayout_3.addWidget(self.open_btn)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.verticalLayout.setStretch(0, 1)
self.retranslateUi(OpenPublishForm)
QtCore.QMetaObject.connectSlotsByName(OpenPublishForm)
def retranslateUi(self, OpenPublishForm):
OpenPublishForm.setWindowTitle(QtGui.QApplication.translate("OpenPublishForm", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.cancel_btn.setText(QtGui.QApplication.translate("OpenPublishForm", "Cancel", None, QtGui.QApplication.UnicodeUTF8))
self.open_btn.setText(QtGui.QApplication.translate("OpenPublishForm", "Open", None, QtGui.QApplication.UnicodeUTF8))
```
#### File: tk_multi_loader/ui/search_widget.py
```python
from sgtk.platform.qt import QtCore, QtGui
class Ui_SearchWidget(object):
def setupUi(self, SearchWidget):
SearchWidget.setObjectName("SearchWidget")
SearchWidget.resize(161, 50)
self.horizontalLayout = QtGui.QHBoxLayout(SearchWidget)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.group = QtGui.QGroupBox(SearchWidget)
self.group.setTitle("")
self.group.setObjectName("group")
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.group)
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setContentsMargins(4, 15, 4, 2)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.search = QtGui.QLineEdit(self.group)
self.search.setObjectName("search")
self.horizontalLayout_2.addWidget(self.search)
self.horizontalLayout.addWidget(self.group)
self.retranslateUi(SearchWidget)
QtCore.QMetaObject.connectSlotsByName(SearchWidget)
def retranslateUi(self, SearchWidget):
SearchWidget.setWindowTitle(QtGui.QApplication.translate("SearchWidget", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.search.setToolTip(QtGui.QApplication.translate("SearchWidget", "Enter some text to filter the publishes shown in the view below.<br>\n"
"Click the magnifying glass icon above to disable the filter.", None, QtGui.QApplication.UnicodeUTF8))
from . import resources_rc
```
#### File: python/tk_multi_loader/proxymodel_latestpublish.py
```python
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .model_latestpublish import SgLatestPublishModel
shotgun_model = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_model")
class SgLatestPublishProxyModel(QtGui.QSortFilterProxyModel):
"""
Filter model to be used in conjunction with SgLatestPublishModel
"""
# signal which is emitted whenever a filter changes
filter_changed = QtCore.Signal()
def __init__(self, parent):
QtGui.QSortFilterProxyModel.__init__(self, parent)
self._valid_type_ids = None
self._show_folders = True
self._search_filter = ""
def set_search_query(self, search_filter):
"""
Specify a filter to use for searching
:param search_filter: search filter string
"""
self._search_filter = search_filter
self.invalidateFilter()
self.filter_changed.emit()
def set_filter_by_type_ids(self, type_ids, show_folders):
"""
Specify which type ids the publish model should allow through
"""
self._valid_type_ids = type_ids
self._show_folders = show_folders
# tell model to repush data
self.invalidateFilter()
self.filter_changed.emit()
def filterAcceptsRow(self, source_row, source_parent_idx):
"""
Overridden from base class.
This will check each row as it is passing through the proxy
model and see if we should let it pass or not.
"""
if self._valid_type_ids is None:
# accept all!
return True
model = self.sourceModel()
current_item = model.invisibleRootItem().child(source_row) # assume non-tree structure
# first analyze any search filtering
if self._search_filter:
# there is a search filter entered
field_data = shotgun_model.get_sanitized_data(current_item, SgLatestPublishModel.SEARCHABLE_NAME)
# all input we are getting from pyside is as unicode objects
# all data from shotgun is utf-8. By converting to utf-8,
# filtering on items containing unicode text also work.
search_str = self._search_filter.encode("UTF-8")
if search_str.lower() not in field_data.lower():
# item text is not matching search filter
return False
# now check if folders should be shown
is_folder = current_item.data(SgLatestPublishModel.IS_FOLDER_ROLE)
if is_folder:
return self._show_folders
# lastly, check out type filter checkboxes
sg_type_id = current_item.data(SgLatestPublishModel.TYPE_ID_ROLE)
if sg_type_id is None:
# no type. So always show.
return True
elif sg_type_id in self._valid_type_ids:
return True
else:
return False
```
#### File: tk_multi_publish2/publish_tree_widget/tree_node_summary.py
```python
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .custom_widget_summary import CustomTreeWidgetSummary
logger = sgtk.platform.get_logger(__name__)
from .tree_node_base import TreeNodeBase
class TreeNodeSummary(TreeNodeBase):
"""
Tree item for a publish item
"""
def __init__(self, parent):
"""
:param item:
:param parent: The parent QWidget for this control
"""
super(TreeNodeSummary, self).__init__(parent)
self.setFlags(
QtCore.Qt.ItemIsEnabled |
QtCore.Qt.ItemIsSelectable
)
def _create_widget(self, parent):
"""
Create the widget that is used to visualise the node
"""
# create an item widget and associate it with this QTreeWidgetItem
widget = CustomTreeWidgetSummary(self, parent)
return widget
def __repr__(self):
return "<TreeNodeSummary>"
def validate(self, standalone):
"""
Perform validation
"""
return True
def publish(self):
"""
Perform publish
"""
return True
def finalize(self):
"""
Perform finalize
"""
return True
```
#### File: tk_multi_publish2/publish_tree_widget/tree_node_item.py
```python
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .custom_widget_item import CustomTreeWidgetItem
logger = sgtk.platform.get_logger(__name__)
from .tree_node_base import TreeNodeBase
from .tree_node_task import TreeNodeTask
class TreeNodeItem(TreeNodeBase):
"""
Tree item for a publish item
"""
def __init__(self, item, parent):
"""
:param item:
:param parent: The parent QWidget for this control
"""
self._item = item
super(TreeNodeItem, self).__init__(parent)
self.setFlags(self.flags() | QtCore.Qt.ItemIsSelectable)
# go ahead and keep a handle on these so they can be reused
self._expanded_icon = QtGui.QIcon(":/tk_multi_publish2/down_arrow.png")
self._collapsed_icon = QtGui.QIcon(":/tk_multi_publish2/right_arrow.png")
def _create_widget(self, parent):
"""
Create the widget that is used to visualise the node
"""
# create an item widget and associate it with this QTreeWidgetItem
widget = CustomTreeWidgetItem(self, parent)
# update with any saved state
widget.set_header("<b>%s</b><br>%s" % (self._item.name, self._item.display_type))
widget.set_icon(self._item.icon)
widget.set_checkbox_value(self.data(0, self.CHECKBOX_ROLE))
# connect the collapse/expand tool button to the toggle callback
widget.expand_indicator.clicked.connect(
lambda: self.setExpanded(not self.isExpanded()))
return widget
def __repr__(self):
return "<TreeNodeItem %s>" % str(self)
def __str__(self):
return "%s %s" % (self._item.display_type, self._item.name)
def create_summary(self):
"""
Creates summary of actions
:returns: List of strings
"""
if self.enabled:
items_summaries = []
task_summaries = []
for child_index in xrange(self.childCount()):
child_item = self.child(child_index)
if isinstance(child_item, TreeNodeTask):
task_summaries.extend(child_item.create_summary())
else:
# sub-items
items_summaries.extend(child_item.create_summary())
summary = []
if len(task_summaries) > 0:
summary_str = "<b>%s</b><br>" % self.item.name
summary_str += "<br>".join(["– %s" % task_summary for task_summary in task_summaries])
summary.append(summary_str)
summary.extend(items_summaries)
return summary
else:
return []
@property
def item(self):
"""
Associated item instance
"""
return self._item
def get_publish_instance(self):
"""
Returns the low level item or task instance
that this object represents
:returns: task or item instance
"""
return self.item
def setExpanded(self, expand):
"""
Expands the item if expand is true, otherwise collapses the item.
Overrides the default implementation to display the custom
expand/collapse toggle tool button properly.
:param bool expand: True if item should be expanded, False otherwise
"""
super(TreeNodeItem, self).setExpanded(expand)
self._check_expand_state()
def double_clicked(self, column):
"""Called when the item is double clicked
:param int column: The model column that was double clicked on the item.
"""
# ensure the expand/collapse indicator is properly displayed. this is
# called just before the expansion state is toggled. so we show the
# opposite icon
if self.isExpanded():
icon = self._collapsed_icon
else:
icon = self._expanded_icon
self._embedded_widget.expand_indicator.setIcon(icon)
def _check_expand_state(self):
"""
Sets the expand indicator based on the expand state of the item
:return:
"""
if self.isExpanded():
icon = self._expanded_icon
else:
icon = self._collapsed_icon
self._embedded_widget.expand_indicator.setIcon(icon)
class TopLevelTreeNodeItem(TreeNodeItem):
"""
Tree item for a publish item
"""
def __init__(self, item, parent):
"""
:param item:
:param parent: The parent QWidget for this control
"""
super(TopLevelTreeNodeItem, self).__init__(item, parent)
# ensure items that allow context change are draggable
if self.item.context_change_allowed:
flags = QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsDragEnabled
else:
flags = QtCore.Qt.ItemIsSelectable
self.setFlags(self.flags() | flags)
def _create_widget(self, parent):
"""
Create the widget that is used to visualise the node
"""
widget = super(TopLevelTreeNodeItem, self)._create_widget(parent)
# show the proper drag handle
widget.show_drag_handle(self.item.context_change_allowed)
return widget
def synchronize_context(self):
"""
Updates the context for the underlying item given the
current position in the tree
"""
# our parent node is always a context node
self.item.context = self.parent().context
```
#### File: publish_tree_widget/ui/task_widget.py
```python
from tank.platform.qt import QtCore, QtGui
class Ui_TaskWidget(object):
def setupUi(self, TaskWidget):
TaskWidget.setObjectName("TaskWidget")
TaskWidget.resize(338, 36)
self.verticalLayout = QtGui.QVBoxLayout(TaskWidget)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setContentsMargins(2, 2, 2, 2)
self.verticalLayout.setObjectName("verticalLayout")
self.frame = QtGui.QFrame(TaskWidget)
self.frame.setMinimumSize(QtCore.QSize(0, 32))
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName("frame")
self.horizontalLayout = QtGui.QHBoxLayout(self.frame)
self.horizontalLayout.setSpacing(8)
self.horizontalLayout.setContentsMargins(8, 2, 2, 2)
self.horizontalLayout.setObjectName("horizontalLayout")
self.icon = QtGui.QLabel(self.frame)
self.icon.setMinimumSize(QtCore.QSize(18, 18))
self.icon.setMaximumSize(QtCore.QSize(18, 18))
self.icon.setPixmap(QtGui.QPixmap(":/tk_multi_publish2/shotgun.png"))
self.icon.setScaledContents(True)
self.icon.setAlignment(QtCore.Qt.AlignCenter)
self.icon.setObjectName("icon")
self.horizontalLayout.addWidget(self.icon)
self.header = QtGui.QLabel(self.frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.header.sizePolicy().hasHeightForWidth())
self.header.setSizePolicy(sizePolicy)
self.header.setObjectName("header")
self.horizontalLayout.addWidget(self.header)
self.status = QtGui.QToolButton(self.frame)
self.status.setMinimumSize(QtCore.QSize(30, 22))
self.status.setMaximumSize(QtCore.QSize(30, 22))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/tk_multi_publish2/status_publish.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.status.setIcon(icon)
self.status.setIconSize(QtCore.QSize(16, 16))
self.status.setObjectName("status")
self.horizontalLayout.addWidget(self.status)
self.checkbox = QtGui.QCheckBox(self.frame)
self.checkbox.setObjectName("checkbox")
self.horizontalLayout.addWidget(self.checkbox)
self.verticalLayout.addWidget(self.frame)
self.retranslateUi(TaskWidget)
QtCore.QMetaObject.connectSlotsByName(TaskWidget)
def retranslateUi(self, TaskWidget):
TaskWidget.setWindowTitle(QtGui.QApplication.translate("TaskWidget", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.header.setText(QtGui.QApplication.translate("TaskWidget", "<big>Alembic Caches</big>", None, QtGui.QApplication.UnicodeUTF8))
self.status.setToolTip(QtGui.QApplication.translate("TaskWidget", "Click for more details", None, QtGui.QApplication.UnicodeUTF8))
self.status.setText(QtGui.QApplication.translate("TaskWidget", "...", None, QtGui.QApplication.UnicodeUTF8))
self.checkbox.setToolTip(QtGui.QApplication.translate("TaskWidget", "hint: shift-click to toggle all items of this type", None, QtGui.QApplication.UnicodeUTF8))
from . import resources_rc
```
#### File: v0.10.7/hooks/primary_publish.py
```python
import os
import uuid
import tempfile
import tank
from tank import Hook
from tank import TankError
class PrimaryPublishHook(Hook):
"""
Single hook that implements publish of the primary task
"""
def execute(
self, task, work_template, comment, thumbnail_path, sg_task, progress_cb,
user_data, **kwargs
):
"""
Main hook entry point
:param task: Primary task to be published. This is a
dictionary containing the following keys:
{
item: Dictionary
This is the item returned by the scan hook
{
name: String
description: String
type: String
other_params: Dictionary
}
output: Dictionary
This is the output as defined in the configuration - the
primary output will always be named 'primary'
{
name: String
publish_template: template
tank_type: String
}
}
:param work_template: template
This is the template defined in the config that
represents the current work file
:param comment: String
The comment provided for the publish
:param thumbnail: Path string
The default thumbnail provided for the publish
:param sg_task: Dictionary (shotgun entity description)
The shotgun task to use for the publish
:param progress_cb: Function
A progress callback to log progress during pre-publish. Call:
progress_cb(percentage, msg)
to report progress to the UI
:param user_data: A dictionary containing any data shared by other hooks run prior to
this hook. Additional data may be added to this dictionary that will
then be accessible from user_data in any hooks run after this one.
:returns: Path String
Hook should return the path of the primary publish so that it
can be passed as a dependency to all secondary publishes
:raises: Hook should raise a TankError if publish of the
primary task fails
"""
# get the engine name from the parent object (app/engine/etc.)
engine = self.parent.engine
engine_name = engine.name
args = [
task,
work_template,
comment,
thumbnail_path,
sg_task,
progress_cb,
user_data,
]
# depending on engine:
if engine_name == "tk-maya":
return self._do_maya_publish(*args)
elif engine_name == "tk-motionbuilder":
return self._do_motionbuilder_publish(*args)
elif engine_name == "tk-hiero":
return self._do_hiero_publish(*args)
elif engine_name == "tk-nuke":
return self._do_nuke_publish(*args)
elif engine_name == "tk-3dsmax":
return self._do_3dsmax_publish(*args)
elif engine_name == "tk-3dsmaxplus":
return self._do_3dsmaxplus_publish(*args)
elif engine_name == "tk-houdini":
return self._do_houdini_publish(*args)
elif engine_name == "tk-softimage":
return self._do_softimage_publish(*args)
elif engine_name == "tk-photoshopcc":
return self._do_photoshop_publish(*args)
elif engine_name == "tk-photoshop":
return self._do_legacy_photoshop_publish(*args)
elif engine_name == "tk-mari":
return self._do_mari_publish(*args)
else:
raise TankError("Unable to perform publish for unhandled engine %s" % engine_name)
def _do_maya_publish(
self, task, work_template, comment, thumbnail_path, sg_task,
progress_cb, user_data
):
"""
Publish the main Maya scene
:param task: The primary task to publish
:param work_template: The primary work template to use
:param comment: The publish description/comment
:param thumbnail_path: The path to the thumbnail to associate with the published file
:param sg_task: The Shotgun task that this publish should be associated with
:param progress_cb: A callback to use when reporting any progress
to the UI
:param user_data: A dictionary containing any data shared by other hooks run prior to
this hook. Additional data may be added to this dictionary that will
then be accessible from user_data in any hooks run after this one.
:returns: The path to the file that has been published
"""
import maya.cmds as cmds
progress_cb(0.0, "Finding scene dependencies", task)
dependencies = self._maya_find_additional_scene_dependencies()
# get scene path
scene_path = os.path.abspath(cmds.file(query=True, sn=True))
if not work_template.validate(scene_path):
raise TankError("File '%s' is not a valid work path, unable to publish!" % scene_path)
# use templates to convert to publish path:
output = task["output"]
fields = work_template.get_fields(scene_path)
fields["TankType"] = output["tank_type"]
publish_template = output["publish_template"]
publish_path = publish_template.apply_fields(fields)
if os.path.exists(publish_path):
raise TankError("The published file named '%s' already exists!" % publish_path)
# save the scene:
progress_cb(10.0, "Saving the scene")
self.parent.log_debug("Saving the scene...")
cmds.file(save=True, force=True)
# copy the file:
progress_cb(50.0, "Copying the file")
try:
publish_folder = os.path.dirname(publish_path)
self.parent.ensure_folder_exists(publish_folder)
self.parent.log_debug("Copying %s --> %s..." % (scene_path, publish_path))
self.parent.copy_file(scene_path, publish_path, task)
except Exception, e:
raise TankError("Failed to copy file from %s to %s - %s" % (scene_path, publish_path, e))
# work out publish name:
publish_name = self._get_publish_name(publish_path, publish_template, fields)
# finally, register the publish:
progress_cb(75.0, "Registering the publish")
self._register_publish(publish_path,
publish_name,
sg_task,
fields["version"],
output["tank_type"],
comment,
thumbnail_path,
dependencies)
progress_cb(100)
return publish_path
def _maya_find_additional_scene_dependencies(self):
"""
Find additional dependencies from the scene
"""
import maya.cmds as cmds
# default implementation looks for references and
# textures (file nodes) and returns any paths that
# match a template defined in the configuration
ref_paths = set()
# first let's look at maya references
ref_nodes = cmds.ls(references=True)
for ref_node in ref_nodes:
# get the path:
ref_path = cmds.referenceQuery(ref_node, filename=True)
# make it platform dependent
# (maya uses C:/style/paths)
ref_path = ref_path.replace("/", os.path.sep)
if ref_path:
ref_paths.add(ref_path)
# now look at file texture nodes
for file_node in cmds.ls(l=True, type="file"):
# ensure this is actually part of this scene and not referenced
if cmds.referenceQuery(file_node, isNodeReferenced=True):
# this is embedded in another reference, so don't include it in the
# breakdown
continue
# get path and make it platform dependent
# (maya uses C:/style/paths)
texture_path = cmds.getAttr("%s.fileTextureName" % file_node).replace("/", os.path.sep)
if texture_path:
ref_paths.add(texture_path)
# now, for each reference found, build a list of the ones
# that resolve against a template:
dependency_paths = []
for ref_path in ref_paths:
# see if there is a template that is valid for this path:
for template in self.parent.tank.templates.values():
if template.validate(ref_path):
dependency_paths.append(ref_path)
break
return dependency_paths
def _do_motionbuilder_publish(
self, task, work_template, comment, thumbnail_path, sg_task,
progress_cb, user_data
):
"""
Publish the main Motion Builder scene
:param task: The primary task to publish
:param work_template: The primary work template to use
:param comment: The publish description/comment
:param thumbnail_path: The path to the thumbnail to associate with the published file
:param sg_task: The Shotgun task that this publish should be associated with
:param progress_cb: A callback to use when reporting any progress
to the UI
:param user_data: A dictionary containing any data shared by other hooks run prior to
this hook. Additional data may be added to this dictionary that will
then be accessible from user_data in any hooks run after this one.
:returns: The path to the file that has been published
"""
from pyfbsdk import FBApplication
mb_app = FBApplication()
progress_cb(0.0, "Finding scene dependencies", task)
dependencies = self._motionbuilder_find_additional_scene_dependencies()
# get scene path
scene_path = os.path.abspath(mb_app.FBXFileName)
if not work_template.validate(scene_path):
raise TankError("File '%s' is not a valid work path, unable to publish!" % scene_path)
# use templates to convert to publish path:
output = task["output"]
fields = work_template.get_fields(scene_path)
fields["TankType"] = output["tank_type"]
publish_template = output["publish_template"]
publish_path = publish_template.apply_fields(fields)
if os.path.exists(publish_path):
raise TankError("The published file named '%s' already exists!" % publish_path)
# save the scene:
progress_cb(10.0, "Saving the scene")
self.parent.log_debug("Saving the scene...")
mb_app.FileSave(scene_path)
# copy the file:
progress_cb(50.0, "Copying the file")
try:
publish_folder = os.path.dirname(publish_path)
self.parent.ensure_folder_exists(publish_folder)
self.parent.log_debug("Copying %s --> %s..." % (scene_path, publish_path))
self.parent.copy_file(scene_path, publish_path, task)
except Exception, e:
raise TankError("Failed to copy file from %s to %s - %s" % (scene_path, publish_path, e))
# work out publish name:
publish_name = self._get_publish_name(publish_path, publish_template, fields)
# finally, register the publish:
progress_cb(75.0, "Registering the publish")
self._register_publish(publish_path,
publish_name,
sg_task,
fields["version"],
output["tank_type"],
comment,
thumbnail_path,
dependencies)
progress_cb(100)
return publish_path
def _motionbuilder_find_additional_scene_dependencies(self):
"""
Find additional dependencies from the scene
"""
# initial implementation does nothing!
return []
def _do_3dsmax_publish(
self, task, work_template, comment, thumbnail_path, sg_task,
progress_cb, user_data
):
"""
Publish the main 3ds Max scene
:param task: The primary task to publish
:param work_template: The primary work template to use
:param comment: The publish description/comment
:param thumbnail_path: The path to the thumbnail to associate with the published file
:param sg_task: The Shotgun task that this publish should be associated with
:param progress_cb: A callback to use when reporting any progress
to the UI
:param user_data: A dictionary containing any data shared by other hooks run prior to
this hook. Additional data may be added to this dictionary that will
then be accessible from user_data in any hooks run after this one.
:returns: The path to the file that has been published
"""
from Py3dsMax import mxs
progress_cb(0.0, "Finding scene dependencies", task)
dependencies = self._3dsmax_find_additional_scene_dependencies()
# get scene path
scene_path = os.path.abspath(os.path.join(mxs.maxFilePath, mxs.maxFileName))
if not work_template.validate(scene_path):
raise TankError("File '%s' is not a valid work path, unable to publish!" % scene_path)
# use templates to convert to publish path:
output = task["output"]
fields = work_template.get_fields(scene_path)
fields["TankType"] = output["tank_type"]
publish_template = output["publish_template"]
publish_path = publish_template.apply_fields(fields)
if os.path.exists(publish_path):
raise TankError("The published file named '%s' already exists!" % publish_path)
# save the scene:
progress_cb(10.0, "Saving the scene")
self.parent.log_debug("Saving the scene...")
mxs.saveMaxFile(scene_path)
# copy the file:
progress_cb(50.0, "Copying the file")
try:
publish_folder = os.path.dirname(publish_path)
self.parent.ensure_folder_exists(publish_folder)
self.parent.log_debug("Copying %s --> %s..." % (scene_path, publish_path))
self.parent.copy_file(scene_path, publish_path, task)
except Exception, e:
raise TankError("Failed to copy file from %s to %s - %s" % (scene_path, publish_path, e))
# work out publish name:
publish_name = self._get_publish_name(publish_path, publish_template, fields)
# finally, register the publish:
progress_cb(75.0, "Registering the publish")
self._register_publish(publish_path,
publish_name,
sg_task,
fields["version"],
output["tank_type"],
comment,
thumbnail_path,
dependencies)
progress_cb(100)
return publish_path
def _3dsmax_find_additional_scene_dependencies(self):
"""
Find additional dependencies from the scene
"""
# default implementation does nothing!
return []
def _do_3dsmaxplus_publish(
self, task, work_template, comment, thumbnail_path, sg_task,
progress_cb, user_data
):
"""
Publish the main 3ds Max scene
:param task: The primary task to publish
:param work_template: The primary work template to use
:param comment: The publish description/comment
:param thumbnail_path: The path to the thumbnail to associate with the published file
:param sg_task: The Shotgun task that this publish should be associated with
:param progress_cb: A callback to use when reporting any progress
to the UI
:param user_data: A dictionary containing any data shared by other hooks run prior to
this hook. Additional data may be added to this dictionary that will
then be accessible from user_data in any hooks run after this one.
:returns: The path to the file that has been published
"""
import MaxPlus
progress_cb(0.0, "Finding scene dependencies", task)
dependencies = self._3dsmax_plus_find_additional_scene_dependencies()
# get scene path
scene_path = MaxPlus.FileManager.GetFileNameAndPath()
if not work_template.validate(scene_path):
raise TankError("File '%s' is not a valid work path, unable to publish!" % scene_path)
# use templates to convert to publish path:
output = task["output"]
fields = work_template.get_fields(scene_path)
fields["TankType"] = output["tank_type"]
publish_template = output["publish_template"]
publish_path = publish_template.apply_fields(fields)
if os.path.exists(publish_path):
raise TankError("The published file named '%s' already exists!" % publish_path)
# save the scene:
progress_cb(10.0, "Saving the scene")
self.parent.log_debug("Saving the scene...")
MaxPlus.FileManager.Save(scene_path)
# copy the file:
progress_cb(50.0, "Copying the file")
try:
publish_folder = os.path.dirname(publish_path)
self.parent.ensure_folder_exists(publish_folder)
self.parent.log_debug("Copying %s --> %s..." % (scene_path, publish_path))
self.parent.copy_file(scene_path, publish_path, task)
except Exception, e:
raise TankError("Failed to copy file from %s to %s - %s" % (scene_path, publish_path, e))
# work out publish name:
publish_name = self._get_publish_name(publish_path, publish_template, fields)
# finally, register the publish:
progress_cb(75.0, "Registering the publish")
self._register_publish(publish_path,
publish_name,
sg_task,
fields["version"],
output["tank_type"],
comment,
thumbnail_path,
dependencies)
progress_cb(100)
return publish_path
def _3dsmax_plus_find_additional_scene_dependencies(self):
"""
Find additional dependencies from the scene
"""
# default implementation does nothing!
return []
def _do_nukestudio_publish(
self, task, work_template, comment, thumbnail_path, sg_task,
progress_cb, user_data
):
"""
Publish the currently selected hiero project.
:param task: The primary task to publish
:param work_template: The primary work template to use
:param comment: The publish description/comment
:param thumbnail_path: The path to the thumbnail to associate with the published file
:param sg_task: The Shotgun task that this publish should be associated with
:param progress_cb: A callback to use when reporting any progress
to the UI
:param user_data: A dictionary containing any data shared by other hooks run prior to
this hook. Additional data may be added to this dictionary that will
then be accessible from user_data in any hooks run after this one.
:returns: The path to the file that has been published
"""
# The routine, out of the box, is the same as in Hiero, so
# we can just call through to that.
return self._do_hiero_publish(
task,
work_template,
comment,
thumbnail_path,
sg_task,
progress_cb,
user_data,
)
def _do_hiero_publish(
self, task, work_template, comment, thumbnail_path, sg_task,
progress_cb, user_data
):
"""
Publish the currently selected hiero project.
:param task: The primary task to publish
:param work_template: The primary work template to use
:param comment: The publish description/comment
:param thumbnail_path: The path to the thumbnail to associate with the published file
:param sg_task: The Shotgun task that this publish should be associated with
:param progress_cb: A callback to use when reporting any progress
to the UI
:param user_data: A dictionary containing any data shared by other hooks run prior to
this hook. Additional data may be added to this dictionary that will
then be accessible from user_data in any hooks run after this one.
:returns: The path to the file that has been published
"""
import hiero.core
# first find which the current project is. Hiero is a multi project
# environment so we can ask the engine which project was clicked in order
# to launch this publish.
selection = self.parent.engine.get_menu_selection()
# these values should in theory already be validated, but just in case...
if len(selection) != 1:
raise TankError("Please select a single Project!")
if not isinstance(selection[0], hiero.core.Bin):
raise TankError("Please select a Hiero Project!")
project = selection[0].project()
if project is None:
# apparently bins can be without projects (child bins I think)
raise TankError("Please select a Hiero Project!")
progress_cb(0.0, "Finding scene dependencies", task)
dependencies = self._hiero_find_additional_scene_dependencies()
# get scene path
scene_path = os.path.abspath(project.path().replace("/", os.path.sep))
if not work_template.validate(scene_path):
raise TankError("File '%s' is not a valid work path, unable to publish!" % scene_path)
# use templates to convert to publish path:
output = task["output"]
fields = work_template.get_fields(scene_path)
fields["TankType"] = output["tank_type"]
publish_template = output["publish_template"]
publish_path = publish_template.apply_fields(fields)
if os.path.exists(publish_path):
raise TankError("The published file named '%s' already exists!" % publish_path)
# save the scene:
progress_cb(10.0, "Saving the scene")
self.parent.log_debug("Saving the scene...")
project.save()
# copy the file:
progress_cb(50.0, "Copying the file")
try:
publish_folder = os.path.dirname(publish_path)
self.parent.ensure_folder_exists(publish_folder)
self.parent.log_debug("Copying %s --> %s..." % (scene_path, publish_path))
self.parent.copy_file(scene_path, publish_path, task)
except Exception, e:
raise TankError("Failed to copy file from %s to %s - %s" % (scene_path, publish_path, e))
# work out publish name:
publish_name = self._get_publish_name(publish_path, publish_template, fields)
# finally, register the publish:
progress_cb(75.0, "Registering the publish")
self._register_publish(publish_path,
publish_name,
sg_task,
fields["version"],
output["tank_type"],
comment,
thumbnail_path,
dependencies)
progress_cb(100)
return publish_path
def _hiero_find_additional_scene_dependencies(self):
"""
Find additional dependencies from the scene
"""
# default implementation does nothing!
return []
def _do_nuke_publish(
self, task, work_template, comment, thumbnail_path, sg_task,
progress_cb, user_data
):
"""
Publish the main Nuke script
:param task: The primary task to publish
:param work_template: The primary work template to use
:param comment: The publish description/comment
:param thumbnail_path: The path to the thumbnail to associate with the published file
:param sg_task: The Shotgun task that this publish should be associated with
:param progress_cb: A callback to use when reporting any progress
to the UI
:param user_data: A dictionary containing any data shared by other hooks run prior to
this hook. Additional data may be added to this dictionary that will
then be accessible from user_data in any hooks run after this one.
:returns: The path to the file that has been published
"""
# If we're in Nuke Studio or Hiero, run those publish routines.
engine = self.parent.engine
if hasattr(engine, "studio_enabled") and engine.studio_enabled:
return self._do_nukestudio_publish(
task,
work_template,
comment,
thumbnail_path,
sg_task,
progress_cb,
user_data,
)
elif hasattr(engine, "hiero_enabled") and engine.hiero_enabled:
return self._do_hiero_publish(
task,
work_template,
comment,
thumbnail_path,
sg_task,
progress_cb,
user_data,
)
import nuke
progress_cb(0.0, "Finding dependencies", task)
dependencies = self._nuke_find_script_dependencies()
# get scene path
script_path = nuke.root().name().replace("/", os.path.sep)
if script_path == "Root":
script_path = ""
script_path = os.path.abspath(script_path)
if not work_template.validate(script_path):
raise TankError("File '%s' is not a valid work path, unable to publish!" % script_path)
# use templates to convert to publish path:
output = task["output"]
fields = work_template.get_fields(script_path)
fields["TankType"] = output["tank_type"]
publish_template = output["publish_template"]
publish_path = publish_template.apply_fields(fields)
if os.path.exists(publish_path):
raise TankError("The published file named '%s' already exists!" % publish_path)
# save the scene:
progress_cb(25.0, "Saving the script")
self.parent.log_debug("Saving the Script...")
nuke.scriptSave()
# copy the file:
progress_cb(50.0, "Copying the file")
try:
publish_folder = os.path.dirname(publish_path)
self.parent.ensure_folder_exists(publish_folder)
self.parent.log_debug("Copying %s --> %s..." % (script_path, publish_path))
self.parent.copy_file(script_path, publish_path, task)
except Exception, e:
raise TankError("Failed to copy file from %s to %s - %s" % (script_path, publish_path, e))
# work out name for publish:
publish_name = self._get_publish_name(publish_path, publish_template, fields)
# finally, register the publish:
progress_cb(75.0, "Registering the publish")
self._register_publish(publish_path,
publish_name,
sg_task,
fields["version"],
output["tank_type"],
comment,
thumbnail_path,
dependencies)
progress_cb(100)
return publish_path
def _nuke_find_script_dependencies(self):
"""
Find all dependencies for the current nuke script
"""
import nuke
# figure out all the inputs to the scene and pass them as dependency candidates
dependency_paths = []
for read_node in nuke.allNodes("Read"):
# make sure we have a file path and normalize it
# file knobs set to "" in Python will evaluate to None. This is different than
# if you set file to an empty string in the UI, which will evaluate to ""!
file_name = read_node.knob("file").evaluate()
if not file_name:
continue
file_name = file_name.replace('/', os.path.sep)
# validate against all our templates
for template in self.parent.tank.templates.values():
if template.validate(file_name):
fields = template.get_fields(file_name)
# translate into a form that represents the general
# tank write node path.
fields["SEQ"] = "FORMAT: %d"
fields["eye"] = "%V"
dependency_paths.append(template.apply_fields(fields))
break
return dependency_paths
def _do_houdini_publish(
self, task, work_template, comment, thumbnail_path, sg_task,
progress_cb, user_data
):
"""
Publish the main Houdini scene
:param task: The primary task to publish
:param work_template: The primary work template to use
:param comment: The publish description/comment
:param thumbnail_path: The path to the thumbnail to associate with the published file
:param sg_task: The Shotgun task that this publish should be associated with
:param progress_cb: A callback to use when reporting any progress
to the UI
:param user_data: A dictionary containing any data shared by other hooks run prior to
this hook. Additional data may be added to this dictionary that will
then be accessible from user_data in any hooks run after this one.
:returns: The path to the file that has been published
"""
import hou
progress_cb(0.0, "Finding scene dependencies", task)
dependencies = self._houdini_find_additional_scene_dependencies()
# get scene path
scene_path = os.path.abspath(hou.hipFile.name())
if not work_template.validate(scene_path):
raise TankError("File '%s' is not a valid work path, unable to publish!" % scene_path)
# use templates to convert to publish path:
output = task["output"]
fields = work_template.get_fields(scene_path)
fields["TankType"] = output["tank_type"]
publish_template = output["publish_template"]
publish_path = publish_template.apply_fields(fields)
if os.path.exists(publish_path):
raise TankError("The published file named '%s' already exists!" % publish_path)
# save the scene:
progress_cb(10.0, "Saving the scene")
self.parent.log_debug("Saving the scene...")
hou.hipFile.save()
# copy the file:
progress_cb(50.0, "Copying the file")
try:
publish_folder = os.path.dirname(publish_path)
self.parent.ensure_folder_exists(publish_folder)
self.parent.log_debug("Copying %s --> %s..." % (scene_path, publish_path))
self.parent.copy_file(scene_path, publish_path, task)
except Exception, e:
raise TankError("Failed to copy file from %s to %s - %s" % (scene_path, publish_path, e))
# work out publish name:
publish_name = self._get_publish_name(publish_path, publish_template, fields)
# finally, register the publish:
progress_cb(75.0, "Registering the publish")
self._register_publish(publish_path,
publish_name,
sg_task,
fields["version"],
output["tank_type"],
comment,
thumbnail_path,
dependencies)
progress_cb(100)
return publish_path
def _houdini_find_additional_scene_dependencies(self):
"""
Find additional dependencies from the scene
"""
# initial implementation does nothing!
return []
def _do_softimage_publish(
self, task, work_template, comment, thumbnail_path, sg_task,
progress_cb, user_data
):
"""
Publish the main Softimage scene
:param task: The primary task to publish
:param work_template: The primary work template to use
:param comment: The publish description/comment
:param thumbnail_path: The path to the thumbnail to associate with the published file
:param sg_task: The Shotgun task that this publish should be associated with
:param progress_cb: A callback to use when reporting any progress
to the UI
:param user_data: A dictionary containing any data shared by other hooks run prior to
this hook. Additional data may be added to this dictionary that will
then be accessible from user_data in any hooks run after this one.
:returns: The path to the file that has been published
"""
import win32com
from win32com.client import Dispatch, constants
from pywintypes import com_error
Application = Dispatch("XSI.Application").Application
progress_cb(0.0, "Finding scene dependencies", task)
dependencies = self._softimage_find_additional_scene_dependencies()
# get scene path
scene_path = os.path.abspath(Application.ActiveProject.ActiveScene.filename.value)
if not work_template.validate(scene_path):
raise TankError("File '%s' is not a valid work path, unable to publish!" % scene_path)
# use templates to convert to publish path:
output = task["output"]
fields = work_template.get_fields(scene_path)
fields["TankType"] = output["tank_type"]
publish_template = output["publish_template"]
publish_path = publish_template.apply_fields(fields)
if os.path.exists(publish_path):
raise TankError("The published file named '%s' already exists!" % publish_path)
# save the scene:
progress_cb(10.0, "Saving the scene")
self.parent.log_debug("Saving the scene...")
Application.SaveScene()
# copy the file:
progress_cb(50.0, "Copying the file")
try:
publish_folder = os.path.dirname(publish_path)
self.parent.ensure_folder_exists(publish_folder)
self.parent.log_debug("Copying %s --> %s..." % (scene_path, publish_path))
self.parent.copy_file(scene_path, publish_path, task)
except Exception, e:
raise TankError("Failed to copy file from %s to %s - %s" % (scene_path, publish_path, e))
# work out publish name:
publish_name = self._get_publish_name(publish_path, publish_template, fields)
# finally, register the publish:
progress_cb(75.0, "Registering the publish")
self._register_publish(publish_path,
publish_name,
sg_task,
fields["version"],
output["tank_type"],
comment,
thumbnail_path,
dependencies)
progress_cb(100)
return publish_path
def _softimage_find_additional_scene_dependencies(self):
"""
Find additional dependencies from the scene
"""
# initial implementation does nothing!
return []
def _do_photoshop_publish(
self, task, work_template, comment, thumbnail_path, sg_task,
progress_cb, user_data
):
"""
Publish the main Photoshop scene
:param task: The primary task to publish
:param work_template: The primary work template to use
:param comment: The publish description/comment
:param thumbnail_path: The path to the thumbnail to associate with the published file
:param sg_task: The Shotgun task that this publish should be associated with
:param progress_cb: A callback to use when reporting any progress
to the UI
:param user_data: A dictionary containing any data shared by other hooks run prior to
this hook. Additional data may be added to this dictionary that will
then be accessible from user_data in any hooks run after this one.
:returns: The path to the file that has been published
:raises: TankError on failure.
"""
adobe = self.parent.engine.adobe
try:
doc = adobe.app.activeDocument
except RuntimeError:
raise TankError("There is no active document!")
# get scene path
try:
scene_path = doc.fullName.fsName
except RuntimeError:
raise TankError("The active document has not been saved!")
if not work_template.validate(scene_path):
raise TankError("File '%s' is not a valid work path, unable to publish!" % scene_path)
# use templates to convert to publish path:
output = task["output"]
fields = work_template.get_fields(scene_path)
fields["TankType"] = output["tank_type"]
publish_template = output["publish_template"]
publish_path = publish_template.apply_fields(fields)
if os.path.exists(publish_path):
raise TankError("The published file named '%s' already exists!" % publish_path)
# save the scene:
progress_cb(0.0, "Saving the scene")
self.parent.log_debug("Saving the scene...")
adobe.save_as(doc, scene_path)
# copy the file:
progress_cb(25.0, "Copying the file")
try:
publish_folder = os.path.dirname(publish_path)
self.parent.ensure_folder_exists(publish_folder)
self.parent.log_debug("Copying %s --> %s..." % (scene_path, publish_path))
self.parent.copy_file(scene_path, publish_path, task)
except Exception, e:
raise TankError("Failed to copy file from %s to %s - %s" % (scene_path, publish_path, e))
# work out publish name:
publish_name = self._get_publish_name(publish_path, publish_template, fields)
# finally, register the publish:
progress_cb(50.0, "Registering the publish")
tank_publish = self._register_publish(publish_path,
publish_name,
sg_task,
fields["version"],
output["tank_type"],
comment,
thumbnail_path,
dependency_paths=[])
#################################################################################
# create a version!
try:
# The export_as_jpeg method was not available in early releases
# of the tk-photoshopcc engine. It is not possible to specify a minimum
# release for an engine in a multi app, so check if the method is
# available and issue a warning if not.
if not hasattr(self.parent.engine, "export_as_jpeg"):
raise UserWarning(
"A more recent release than %s %s is needed to generate a Jpeg Version." % (
self.parent.engine.name,
self.parent.engine.version,
))
# Export a Jpeg image
jpeg_pub_path = self.parent.engine.export_as_jpeg()
# Then register version
progress_cb(60.0, "Creating Version...")
ctx = self.parent.context
data = {
"user": ctx.user,
"description": comment,
"sg_first_frame": 1,
"frame_count": 1,
"frame_range": "1-1",
"sg_last_frame": 1,
"entity": ctx.entity,
"sg_path_to_frames": publish_path,
"project": ctx.project,
"sg_task": sg_task,
"code": tank_publish["code"],
"created_by": ctx.user,
}
if tank.util.get_published_file_entity_type(self.parent.tank) == "PublishedFile":
data["published_files"] = [tank_publish]
else: # == "TankPublishedFile"
data["tank_published_file"] = tank_publish
version = self.parent.shotgun.create("Version", data)
# upload jpeg
progress_cb(70.0, "Uploading to Shotgun...")
self.parent.shotgun.upload(
"Version",
version['id'],
jpeg_pub_path,
"sg_uploaded_movie"
)
try:
os.remove(jpeg_pub_path)
except Exception, e:
# Catch the error if unable to remove the temp file, but log the
# error for debug purpose.
self.parent.log_debug(
"Failed to remove tmp jpeg file %s: %s" % (jpeg_pub_path, e)
)
except Exception, e:
# Do not prevent publishing to complete if an error happened when
# creating a Version.
self.parent.log_warning(
"Unable to create a Version in SG because of the following error:"
)
self.parent.log_exception(e)
progress_cb(100)
return publish_path
def _do_legacy_photoshop_publish(
self, task, work_template, comment, thumbnail_path, sg_task,
progress_cb, user_data
):
"""
Publish the main Photoshop scene
:param task: The primary task to publish
:param work_template: The primary work template to use
:param comment: The publish description/comment
:param thumbnail_path: The path to the thumbnail to associate with the published file
:param sg_task: The Shotgun task that this publish should be associated with
:param progress_cb: A callback to use when reporting any progress
to the UI
:param user_data: A dictionary containing any data shared by other hooks run prior to
this hook. Additional data may be added to this dictionary that will
then be accessible from user_data in any hooks run after this one.
:returns: The path to the file that has been published
"""
import photoshop
doc = photoshop.app.activeDocument
if doc is None:
raise TankError("There is no currently active document!")
# get scene path
scene_path = doc.fullName.nativePath
if not work_template.validate(scene_path):
raise TankError("File '%s' is not a valid work path, unable to publish!" % scene_path)
# use templates to convert to publish path:
output = task["output"]
fields = work_template.get_fields(scene_path)
fields["TankType"] = output["tank_type"]
publish_template = output["publish_template"]
publish_path = publish_template.apply_fields(fields)
if os.path.exists(publish_path):
raise TankError("The published file named '%s' already exists!" % publish_path)
# save the scene:
progress_cb(0.0, "Saving the scene")
self.parent.log_debug("Saving the scene...")
photoshop.save_as(doc, scene_path)
# copy the file:
progress_cb(25.0, "Copying the file")
try:
publish_folder = os.path.dirname(publish_path)
self.parent.ensure_folder_exists(publish_folder)
self.parent.log_debug("Copying %s --> %s..." % (scene_path, publish_path))
self.parent.copy_file(scene_path, publish_path, task)
except Exception, e:
raise TankError("Failed to copy file from %s to %s - %s" % (scene_path, publish_path, e))
# work out publish name:
publish_name = self._get_publish_name(publish_path, publish_template, fields)
# finally, register the publish:
progress_cb(50.0, "Registering the publish")
tank_publish = self._register_publish(publish_path,
publish_name,
sg_task,
fields["version"],
output["tank_type"],
comment,
thumbnail_path,
dependency_paths=[])
#################################################################################
# create a version!
jpeg_pub_path = os.path.join(tempfile.gettempdir(), "%s_sgtk.jpg" % uuid.uuid4().hex)
jpeg_file = photoshop.RemoteObject('flash.filesystem::File', jpeg_pub_path)
jpeg_options = photoshop.RemoteObject('com.adobe.photoshop::JPEGSaveOptions')
jpeg_options.quality = 12
# save as a copy
photoshop.app.activeDocument.saveAs(jpeg_file, jpeg_options, True)
# then register version
progress_cb(60.0, "Creating Version...")
ctx = self.parent.context
data = {
"user": ctx.user,
"description": comment,
"sg_first_frame": 1,
"frame_count": 1,
"frame_range": "1-1",
"sg_last_frame": 1,
"entity": ctx.entity,
"sg_path_to_frames": publish_path,
"project": ctx.project,
"sg_task": sg_task,
"code": tank_publish["code"],
"created_by": ctx.user,
}
if tank.util.get_published_file_entity_type(self.parent.tank) == "PublishedFile":
data["published_files"] = [tank_publish]
else: # == "TankPublishedFile"
data["tank_published_file"] = tank_publish
version = self.parent.shotgun.create("Version", data)
# upload jpeg
progress_cb(70.0, "Uploading to Shotgun...")
self.parent.shotgun.upload("Version", version['id'], jpeg_pub_path, "sg_uploaded_movie")
try:
os.remove(jpeg_pub_path)
except:
pass
progress_cb(100)
return publish_path
def _do_mari_publish(
self, task, work_template, comment, thumbnail_path, sg_task,
progress_cb, user_data
):
"""
Perform the primary publish for Mari
:param task: The primary task to publish
:param work_template: The primary work template to use
:param comment: The publish description/comment
:param thumbnail_path: The path to the thumbnail to associate with the published file
:param sg_task: The Shotgun task that this publish should be associated with
:param progress_cb: A callback to use when reporting any progress
to the UI
:param user_data: A dictionary containing any data shared by other hooks run prior to
this hook. Additional data may be added to this dictionary that will
then be accessible from user_data in any hooks run after this one.
:returns: The path to the file that has been published
"""
import mari
# Currently there is no primary publish for Mari so just save the current
# project to ensure nothing is lost if something goes wrong!
progress_cb(0, "Saving the current project", task)
proj = mari.projects.current()
if proj:
proj.save()
progress_cb(100)
def _get_publish_name(self, path, template, fields=None):
"""
Return the 'name' to be used for the file - if possible
this will return a 'versionless' name
"""
# first, extract the fields from the path using the template:
fields = fields.copy() if fields else template.get_fields(path)
if "name" in fields and fields["name"]:
# well, that was easy!
name = fields["name"]
else:
# find out if version is used in the file name:
template_name, _ = os.path.splitext(os.path.basename(template.definition))
version_in_name = "{version}" in template_name
# extract the file name from the path:
name, _ = os.path.splitext(os.path.basename(path))
delims_str = "_-. "
if version_in_name:
# looks like version is part of the file name so we
# need to isolate it so that we can remove it safely.
# First, find a dummy version whose string representation
# doesn't exist in the name string
version_key = template.keys["version"]
dummy_version = 9876
while True:
test_str = version_key.str_from_value(dummy_version)
if test_str not in name:
break
dummy_version += 1
# now use this dummy version and rebuild the path
fields["version"] = dummy_version
path = template.apply_fields(fields)
name, _ = os.path.splitext(os.path.basename(path))
# we can now locate the version in the name and remove it
dummy_version_str = version_key.str_from_value(dummy_version)
v_pos = name.find(dummy_version_str)
# remove any preceeding 'v'
pre_v_str = name[:v_pos].rstrip("v")
post_v_str = name[v_pos + len(dummy_version_str):]
if (pre_v_str and post_v_str
and pre_v_str[-1] in delims_str
and post_v_str[0] in delims_str):
# only want one delimiter - strip the second one:
post_v_str = post_v_str.lstrip(delims_str)
versionless_name = pre_v_str + post_v_str
versionless_name = versionless_name.strip(delims_str)
if versionless_name:
# great - lets use this!
name = versionless_name
else:
# likely that version is only thing in the name so
# instead, replace the dummy version with #'s:
zero_version_str = version_key.str_from_value(0)
new_version_str = "#" * len(zero_version_str)
name = name.replace(dummy_version_str, new_version_str)
return name
def _register_publish(self, path, name, sg_task, publish_version, tank_type, comment, thumbnail_path,
dependency_paths):
"""
Helper method to register publish using the
specified publish info.
"""
# construct args:
args = {
"tk": self.parent.tank,
"context": self.parent.context,
"comment": comment,
"path": path,
"name": name,
"version_number": publish_version,
"thumbnail_path": thumbnail_path,
"task": sg_task,
"dependency_paths": dependency_paths,
"published_file_type": tank_type,
}
self.parent.log_debug("Register publish in shotgun: %s" % str(args))
# register publish;
sg_data = tank.util.register_publish(**args)
return sg_data
```
#### File: python/tk_multi_publish/publish_result_form.py
```python
import tank
from tank.platform.qt import QtCore, QtGui
class PublishResultForm(QtGui.QWidget):
"""
Implementation of the main publish UI
"""
close = QtCore.Signal()
def __init__(self, parent=None):
"""
Construction
"""
QtGui.QWidget.__init__(self, parent)
self._status = True
self._errors = []
# set up the UI
from .ui.publish_result_form import Ui_PublishResultForm
self._ui = Ui_PublishResultForm()
self._ui.setupUi(self)
self._ui.close_btn.clicked.connect(self._on_close)
self._update_ui()
# @property
def __get_status(self):
return self._status
# @status.setter
def __set_status(self, value):
self._status = value
self._update_ui()
status=property(__get_status, __set_status)
# @property
def __get_errors(self):
return self._errors
# @errors.setter
def __set_errors(self, value):
self._errors = value
self._update_ui()
errors=property(__get_errors, __set_errors)
def _on_close(self):
self.close.emit()
def _update_ui(self):
self._ui.status_icon.setPixmap(QtGui.QPixmap([":/res/failure.png", ":/res/success.png"][self._status]))
self._ui.status_title.setText(["Failure!", "Success"][self._status])
details = ""
if self._status:
details = ("Your Publish has successfully completed. Your "
"work has been shared, your scene has been "
"versioned up and your mates have been notified!")
else:
details = "\n\n".join(self._errors)
self._ui.status_details.setText(details)
```
#### File: tk_multi_publish/ui/item_list.py
```python
from tank.platform.qt import QtCore, QtGui
class Ui_ItemList(object):
def setupUi(self, ItemList):
ItemList.setObjectName("ItemList")
ItemList.resize(397, 265)
self.horizontalLayout_2 = QtGui.QHBoxLayout(ItemList)
self.horizontalLayout_2.setContentsMargins(12, 2, 2, 2)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.main_frame = QtGui.QFrame(ItemList)
self.main_frame.setStyleSheet("#main_frame {\n"
"border-style: solid;\n"
"border-width: 1;\n"
"border-radius: 2px;\n"
"}")
self.main_frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.main_frame.setFrameShadow(QtGui.QFrame.Raised)
self.main_frame.setObjectName("main_frame")
self.verticalLayout = QtGui.QVBoxLayout(self.main_frame)
self.verticalLayout.setSpacing(2)
self.verticalLayout.setContentsMargins(2, 2, 2, 2)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.section_label = QtGui.QLabel(self.main_frame)
self.section_label.setStyleSheet("#section_label {\n"
"font-size: 10pt\n"
"}")
self.section_label.setIndent(4)
self.section_label.setObjectName("section_label")
self.horizontalLayout.addWidget(self.section_label)
self.expand_label = QtGui.QLabel(self.main_frame)
self.expand_label.setMinimumSize(QtCore.QSize(20, 20))
self.expand_label.setBaseSize(QtCore.QSize(20, 20))
self.expand_label.setText("")
self.expand_label.setPixmap(QtGui.QPixmap(":/res/group_expand.png"))
self.expand_label.setScaledContents(False)
self.expand_label.setAlignment(QtCore.Qt.AlignCenter)
self.expand_label.setObjectName("expand_label")
self.horizontalLayout.addWidget(self.expand_label)
self.horizontalLayout.setStretch(0, 1)
self.verticalLayout.addLayout(self.horizontalLayout)
self.line = QtGui.QFrame(self.main_frame)
self.line.setFrameShadow(QtGui.QFrame.Plain)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout.addWidget(self.line)
self.item_frame = QtGui.QFrame(self.main_frame)
self.item_frame.setStyleSheet("#item_frame {\n"
"border-style: none;\n"
"}")
self.item_frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.item_frame.setFrameShadow(QtGui.QFrame.Raised)
self.item_frame.setObjectName("item_frame")
self.verticalLayout.addWidget(self.item_frame)
self.verticalLayout.setStretch(2, 1)
self.horizontalLayout_2.addWidget(self.main_frame)
self.retranslateUi(ItemList)
QtCore.QMetaObject.connectSlotsByName(ItemList)
def retranslateUi(self, ItemList):
ItemList.setWindowTitle(QtGui.QApplication.translate("ItemList", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.section_label.setText(QtGui.QApplication.translate("ItemList", "<b>n items available</b>, <i>expand to turn individual items on and off</i>", None, QtGui.QApplication.UnicodeUTF8))
from . import resources_rc
```
#### File: v0.10.9/hooks/scan_scene_tk-3dsmaxplus.py
```python
import os
import MaxPlus
import sgtk
from sgtk import Hook
from sgtk import TankError
class ScanSceneHook(Hook):
"""
Hook to scan scene for items to publish
"""
def execute(self, **kwargs):
"""
Main hook entry point
:returns: A list of any items that were found to be published.
Each item in the list should be a dictionary containing
the following keys:
{
type: String
This should match a scene_item_type defined in
one of the outputs in the configuration and is
used to determine the outputs that should be
published for the item
name: String
Name to use for the item in the UI
description: String
Description of the item to use in the UI
selected: Bool
Initial selected state of item in the UI.
Items are selected by default.
required: Bool
Required state of item in the UI. If True then
item will not be deselectable. Items are not
required by default.
other_params: Dictionary
Optional dictionary that will be passed to the
pre-publish and publish hooks
}
"""
items = []
# get the main scene:
filename = MaxPlus.FileManager.GetFileName()
if not filename:
raise TankError("Please Save your file before Publishing")
# create the primary item - 'type' should match the 'primary_scene_item_type':
items.append({"type": "work_file", "name": filename})
# If there are objects in the scene, then we will register
# a geometry item. This is a bit simplistic in it's approach
# to determining whether there's exportable data in the scene
# that's useful when exported as an Alembic cache, but it will
# work in most cases.
if list(MaxPlus.Core.GetRootNode().Children):
items.append({"type": "geometry", "name": "All Scene Geometry"})
return items
```
#### File: v0.10.9/hooks/secondary_publish_tk-nuke.py
```python
import os
import shutil
import nuke
import tank
from tank import Hook
from tank import TankError
class PublishHook(Hook):
"""
Single hook that implements publish functionality for secondary tasks
"""
def __init__(self, *args, **kwargs):
"""
Construction
"""
# call base init
Hook.__init__(self, *args, **kwargs)
# cache a couple of apps that we may need later on:
self.__write_node_app = self.parent.engine.apps.get("tk-nuke-writenode")
self.__review_submission_app = self.parent.engine.apps.get("tk-multi-reviewsubmission")
def execute(self, *args, **kwargs):
"""
Main hook entry point
:param tasks: List of secondary tasks to be published. Each task is a
dictionary containing the following keys:
{
item: Dictionary
This is the item returned by the scan hook
{
name: String
description: String
type: String
other_params: Dictionary
}
output: Dictionary
This is the output as defined in the configuration - the
primary output will always be named 'primary'
{
name: String
publish_template: template
tank_type: String
}
}
:param work_template: template
This is the template defined in the config that
represents the current work file
:param comment: String
The comment provided for the publish
:param thumbnail: Path string
The default thumbnail provided for the publish
:param sg_task: Dictionary (shotgun entity description)
The shotgun task to use for the publish
:param primary_publish_path: Path string
This is the path of the primary published file as returned
by the primary publish hook
:param progress_cb: Function
A progress callback to log progress during pre-publish. Call:
progress_cb(percentage, msg)
to report progress to the UI
:param primary_task: The primary task that was published by the primary publish hook. Passed
in here for reference. This is a dictionary in the same format as the
secondary tasks above.
:param user_data: A dictionary containing any data shared by other hooks run prior to
this hook. Additional data may be added to this dictionary that will
then be accessible from user_data in any hooks run after this one.
:returns: A list of any tasks that had problems that need to be reported
in the UI. Each item in the list should be a dictionary containing
the following keys:
{
task: Dictionary
This is the task that was passed into the hook and
should not be modified
{
item:...
output:...
}
errors: List
A list of error messages (strings) to report
}
"""
engine = self.parent.engine
if hasattr(engine, "hiero_enabled") and engine.hiero_enabled:
return self._hiero_execute(*args, **kwargs)
elif hasattr(engine, "studio_enabled") and engine.studio_enabled:
return self._studio_execute(*args, **kwargs)
else:
return self._nuke_execute(*args, **kwargs)
def _studio_execute(
self, tasks, work_template, comment, thumbnail_path, sg_task,
primary_task, primary_publish_path, progress_cb, **kwargs
):
"""
The Nuke Studio specific secondary publish routine.
"""
# We treat Nuke Studio the same as Hiero, so call through.
return self._hiero_execute(
tasks,
work_template,
comment,
thumbnail_path,
sg_task,
primary_task,
primary_publish_path,
progress_cb,
**kwargs
)
def _hiero_execute(
self, tasks, work_template, comment, thumbnail_path, sg_task,
primary_task, primary_publish_path, progress_cb, **kwargs
):
"""
The Hiero specific secondary publish routine.
"""
results = []
# publish all tasks:
for task in tasks:
item = task["item"]
output = task["output"]
errors = []
# report progress:
progress_cb(0, "Publishing", task)
# publish item here, e.g.
#if output["name"] == "foo":
# ...
#else:
# don't know how to publish this output types!
errors.append("Don't know how to publish this item!")
# if there is anything to report then add to result
if len(errors) > 0:
# add result:
results.append({"task":task, "errors":errors})
progress_cb(100)
return results
def _nuke_execute(
self, tasks, work_template, comment, thumbnail_path, sg_task,
primary_task, primary_publish_path, progress_cb, **kwargs
):
"""
The Nuke specific secondary publish routine.
"""
results = []
# it's important that tasks for render output are processed
# before tasks for quicktime output, so let's group the
# task list by output. This can be controlled through the
# configuration but we shouldn't rely on that being set up
# correctly!
output_order = ["render", "quicktime"]
tasks_by_output = {}
for task in tasks:
output_name = task["output"]["name"]
tasks_by_output.setdefault(output_name, list()).append(task)
if output_name not in output_order:
output_order.append(output_name)
# make sure we have any apps required by the publish process:
if "render" in tasks_by_output or "quicktime" in tasks_by_output:
# we will need the write node app if we have any render outputs to validate
if not self.__write_node_app:
raise TankError("Unable to publish Shotgun Write Nodes without the tk-nuke-writenode app!")
if "quicktime" in tasks_by_output:
# If we have the tk-multi-reviewsubmission app we can create versions
if not self.__review_submission_app:
raise TankError("Unable to publish Review Versions without the tk-multi-reviewsubmission app!")
# Keep of track of what has been published in shotgun
# this is needed as input into the review creation code...
render_publishes = {}
# process outputs in order:
for output_name in output_order:
# process each task for this output:
for task in tasks_by_output.get(output_name, []):
# keep track of our errors for this task
errors = []
# report progress:
progress_cb(0.0, "Publishing", task)
if output_name == "render":
# Publish the rendered output for a Shotgun Write Node
# each publish task is connected to a nuke write node
# this value was populated via the scan scene hook
write_node = task["item"].get("other_params", dict()).get("node")
if not write_node:
raise TankError("Could not determine nuke write node for item '%s'!" % str(task))
# publish write-node rendered sequence
try:
(sg_publish, thumbnail_path) = self._publish_write_node_render(task,
write_node,
primary_publish_path,
sg_task,
comment,
progress_cb)
# keep track of our publish data so that we can pick it up later in review
render_publishes[ write_node.name() ] = (sg_publish, thumbnail_path)
except Exception, e:
errors.append("Publish failed - %s" % e)
elif output_name == "quicktime":
# Publish the reviewable quicktime movie for a Shotgun Write Node
# each publish task is connected to a nuke write node
# this value was populated via the scan scene hook
write_node = task["item"].get("other_params", dict()).get("node")
if not write_node:
raise TankError("Could not determine nuke write node for item '%s'!" % str(task))
# Submit published sequence to Screening Room
try:
# pick up sg data from the render dict we are maintianing
# note: we assume that the rendering tasks always happen
# before the review tasks inside the publish...
(sg_publish, thumbnail_path) = render_publishes[ write_node.name() ]
self._send_to_screening_room (
write_node,
sg_publish,
sg_task,
comment,
thumbnail_path,
progress_cb
)
except Exception, e:
errors.append("Submit to Screening Room failed - %s" % e)
else:
# unhandled output type!
errors.append("Don't know how to publish this item!")
# if there is anything to report then add to result
if len(errors) > 0:
# add result:
results.append({"task":task, "errors":errors})
# task is finished
progress_cb(100)
return results
def _send_to_screening_room(self, write_node, sg_publish, sg_task, comment, thumbnail_path, progress_cb):
"""
Take a write node's published files and run them through the review_submission app
to get a movie and Shotgun Version.
:param write_node: The Shotgun Write node to submit a review version for
:param sg_publish: The Shotgun publish entity dictionary to link the version with
:param sg_task: The Shotgun task entity dictionary for the publish
:param comment: The publish comment
:param thumbnail_path: The path to a thumbnail for the publish
:param progress_cb: A callback to use to report any progress
"""
render_path = self.__write_node_app.get_node_render_path(write_node)
render_template = self.__write_node_app.get_node_render_template(write_node)
publish_template = self.__write_node_app.get_node_publish_template(write_node)
render_path_fields = render_template.get_fields(render_path)
if hasattr(self.__review_submission_app, "render_and_submit_version"):
# this is a recent version of the review submission app that contains
# the new method that also accepts a colorspace argument.
colorspace = self._get_node_colorspace(write_node)
self.__review_submission_app.render_and_submit_version(
publish_template,
render_path_fields,
int(nuke.root()["first_frame"].value()),
int(nuke.root()["last_frame"].value()),
[sg_publish],
sg_task,
comment,
thumbnail_path,
progress_cb,
colorspace
)
else:
# This is an older version of the app so fall back to the legacy
# method - this may mean the colorspace of the rendered movie is
# inconsistent/wrong!
self.__review_submission_app.render_and_submit(
publish_template,
render_path_fields,
int(nuke.root()["first_frame"].value()),
int(nuke.root()["last_frame"].value()),
[sg_publish],
sg_task,
comment,
thumbnail_path,
progress_cb
)
def _get_node_colorspace(self, node):
"""
Get the colorspace for the specified nuke node
:param node: The nuke node to find the colorspace for
:returns: The string representing the colorspace for the node
"""
cs_knob = node.knob("colorspace")
if not cs_knob:
return
cs = cs_knob.value()
# handle default value where cs would be something like: 'default (linear)'
if cs.startswith("default (") and cs.endswith(")"):
cs = cs[9:-1]
return cs
def _publish_write_node_render(self, task, write_node, published_script_path, sg_task, comment, progress_cb):
"""
Publish render output for write node
"""
if self.__write_node_app.is_node_render_path_locked(write_node):
# this is a fatal error as publishing would result in inconsistent paths for the rendered files!
raise TankError("The render path is currently locked and does not match match the current Work Area.")
progress_cb(10, "Finding renders")
# get info we need in order to do the publish:
render_path = self.__write_node_app.get_node_render_path(write_node)
render_files = self.__write_node_app.get_node_render_files(write_node)
render_template = self.__write_node_app.get_node_render_template(write_node)
publish_template = self.__write_node_app.get_node_publish_template(write_node)
tank_type = self.__write_node_app.get_node_tank_type(write_node)
# publish (copy files):
progress_cb(25, "Copying files")
for fi, rf in enumerate(render_files):
progress_cb(25 + (50*(len(render_files)/(fi+1))))
# construct the publish path:
fields = render_template.get_fields(rf)
fields["TankType"] = tank_type
target_path = publish_template.apply_fields(fields)
# copy the file
try:
target_folder = os.path.dirname(target_path)
self.parent.ensure_folder_exists(target_folder)
self.parent.copy_file(rf, target_path, task)
except Exception, e:
raise TankError("Failed to copy file from %s to %s - %s" % (rf, target_path, e))
progress_cb(40, "Publishing to Shotgun")
# use the render path to work out the publish 'file' and name:
render_path_fields = render_template.get_fields(render_path)
render_path_fields["TankType"] = tank_type
publish_path = publish_template.apply_fields(render_path_fields)
# construct publish name:
publish_name = ""
rp_name = render_path_fields.get("name")
rp_channel = render_path_fields.get("channel")
if not rp_name and not rp_channel:
publish_name = "Publish"
elif not rp_name:
publish_name = "Channel %s" % rp_channel
elif not rp_channel:
publish_name = rp_name
else:
publish_name = "%s, Channel %s" % (rp_name, rp_channel)
publish_version = render_path_fields["version"]
# get/generate thumbnail:
thumbnail_path = self.__write_node_app.generate_node_thumbnail(write_node)
# register the publish:
sg_publish = self._register_publish(publish_path,
publish_name,
sg_task,
publish_version,
tank_type,
comment,
thumbnail_path,
[published_script_path])
return sg_publish, thumbnail_path
def _register_publish(self, path, name, sg_task, publish_version, tank_type, comment, thumbnail_path, dependency_paths):
"""
Helper method to register publish using the
specified publish info.
"""
# construct args:
args = {
"tk": self.parent.tank,
"context": self.parent.context,
"comment": comment,
"path": path,
"name": name,
"version_number": publish_version,
"thumbnail_path": thumbnail_path,
"task": sg_task,
"dependency_paths": dependency_paths,
"published_file_type":tank_type,
}
# register publish;
sg_data = tank.util.register_publish(**args)
return sg_data
```
#### File: v0.10.9/hooks/secondary_publish_tk-softimage.py
```python
import os
import shutil
import tank
from tank import Hook
from tank import TankError
import win32com
from win32com.client import Dispatch, constants
from pywintypes import com_error
Application = Dispatch("XSI.Application").Application
class PublishHook(Hook):
"""
Single hook that implements publish functionality for secondary tasks
"""
def execute(
self, tasks, work_template, comment, thumbnail_path, sg_task, primary_task,
primary_publish_path, progress_cb, user_data, **kwargs):
"""
Main hook entry point
:param tasks: List of secondary tasks to be published. Each task is a
dictionary containing the following keys:
{
item: Dictionary
This is the item returned by the scan hook
{
name: String
description: String
type: String
other_params: Dictionary
}
output: Dictionary
This is the output as defined in the configuration - the
primary output will always be named 'primary'
{
name: String
publish_template: template
tank_type: String
}
}
:param work_template: template
This is the template defined in the config that
represents the current work file
:param comment: String
The comment provided for the publish
:param thumbnail: Path string
The default thumbnail provided for the publish
:param sg_task: Dictionary (shotgun entity description)
The shotgun task to use for the publish
:param primary_publish_path: Path string
This is the path of the primary published file as returned
by the primary publish hook
:param progress_cb: Function
A progress callback to log progress during pre-publish. Call:
progress_cb(percentage, msg)
to report progress to the UI
:param primary_task: The primary task that was published by the primary publish hook. Passed
in here for reference. This is a dictionary in the same format as the
secondary tasks above.
:param user_data: A dictionary containing any data shared by other hooks run prior to
this hook. Additional data may be added to this dictionary that will
then be accessible from user_data in any hooks run after this one.
:returns: A list of any tasks that had problems that need to be reported
in the UI. Each item in the list should be a dictionary containing
the following keys:
{
task: Dictionary
This is the task that was passed into the hook and
should not be modified
{
item:...
output:...
}
errors: List
A list of error messages (strings) to report
}
"""
results = []
# publish all tasks:
for task in tasks:
item = task["item"]
output = task["output"]
errors = []
# report progress:
progress_cb(0, "Publishing", task)
# publish item here, e.g.
#if output["name"] == "foo":
# ...
#else:
# don't know how to publish this output types!
errors.append("Don't know how to publish this item!")
# if there is anything to report then add to result
if len(errors) > 0:
# add result:
results.append({"task":task, "errors":errors})
progress_cb(100)
return results
```
#### File: python/tk_multi_publish/publish.py
```python
import os
import tempfile
import tank
from tank import TankError
from tank.platform.qt import QtCore, QtGui
from .progress import TaskProgressReporter
from .output import PublishOutput
from .item import Item
from .task import Task
logger = tank.platform.get_logger(__name__)
class PublishHandler(object):
"""
Main publish handler
"""
def __init__(self, app):
"""
Construction
"""
self._app = app
# load outputs from configuration:
self.build_outputs()
# validate the secondary outputs:
unique_names = []
for output in self._secondary_outputs:
# secondary output name can't be primary
if output.name == PublishOutput.PRIMARY_NAME:
raise TankError("Secondary output name cannot be '%s'" % PublishOutput.PRIMARY_NAME)
# output names must be unique:
if output.name in unique_names:
raise TankError("Multiple secondary outputs found with the name '%s'" % output.name)
unique_names.append(output.name)
# secondary output scene item type can't be the same as the primary scene
# item type (the interface doesn't allow it!)
# TODO: This may be a redundant requirement but need to confirm
# before removing
if output.scene_item_type == self._primary_output.scene_item_type:
raise TankError("Secondary output is defined with the same scene_item_type (%s) as the primary output - this is not allowed"
% self._primary_output.scene_item_type)
@property
def work_template(self):
"""
The current work file template as sourced from the parent app.
"""
return self._app.get_template("template_work")
def build_outputs(self):
"""
Rebuilds the primary and secondary outputs based on the parent app's
current settings.
"""
# ---- load primary outputs from configuration:
primary_output_dict = {}
primary_output_dict["scene_item_type"] = self._app.get_setting("primary_scene_item_type")
primary_output_dict["display_name"] = self._app.get_setting("primary_display_name")
primary_output_dict["description"] = self._app.get_setting("primary_description")
primary_output_dict["icon"] = self._app.get_setting("primary_icon")
primary_output_dict["tank_type"] = self._app.get_setting("primary_tank_type")
primary_output_dict["publish_template"] = self._app.get_setting("primary_publish_template")
logger.debug("Primary Output: %s" % (primary_output_dict,))
self._primary_output = PublishOutput(
self._app,
primary_output_dict,
name=PublishOutput.PRIMARY_NAME,
selected=True,
required=True,
)
# ---- load secondary outputs from configuration:
self._secondary_outputs = []
for output in self._app.get_setting("secondary_outputs"):
logger.debug("Secondary Output: %s" % (output,))
self._secondary_outputs.append(
PublishOutput(self._app, output)
)
def rebuild_primary_output(self):
"""
Deprecated in favor of `build_outputs()`. Left to ensure backward
compatibility for any client code that may be calling this.
:return:
"""
# call the full output build
self.build_outputs()
def show_publish_dlg(self):
"""
Displays the publish dialog
"""
try:
# create new multi-publish dialog instance
from .publish_form import PublishForm
display_name = self._app.get_setting("display_name")
form = self._app.engine.show_dialog(
display_name,
self._app,
PublishForm,
self._app,
self,
)
form.publish.connect(lambda f = form: self._on_publish(f))
except TankError, e:
QtGui.QMessageBox.information(None, "Unable To Publish!", "%s" % e)
except Exception:
import traceback
QtGui.QMessageBox.information(
None,
"Unable To Publish!",
traceback.format_exc(),
)
def get_publish_tasks(self):
"""
Get the list of tasks that can be published
"""
# scan scene for items
items = self._scan_scene()
# build task list:
tasks = self._build_task_list(items)
return tasks
def get_shotgun_tasks(self):
"""
Pull a list of tasks from shotgun based on the current context
"""
filters = []
if self._app.context.entity is None:
# looks like we have a project only context!
# in this case, show tasks associated with the project
filters.append( ["entity", "is", self._app.context.project] )
else:
# std entity based context
filters.append( ["entity", "is", self._app.context.entity] )
if self._app.context.step:
filters.append( ["step", "is", self._app.context.step] )
order = [{"field_name":"step", "direction":"asc"}, {"field_name":"content", "direction":"asc"}]
fields = ["step", "content"]
sg_tasks = self._app.shotgun.find("Task", filters=filters, fields=fields, order=order)
return sg_tasks
def get_initial_thumbnail(self):
"""
Get the initial thumbnail to use for the publish.
:returns: A :class:`QtGui.QPixmap` instance or None if the thumbnail
generation failed.
"""
try:
thumb_path = self._app.execute_hook("hook_thumbnail")
# If the hook didn't return anything, don't try to build a QPixmap
# from it.
if thumb_path:
pixmap = QtGui.QPixmap(thumb_path)
if pixmap.isNull():
# Log debug information: sometimes the jpeg format is not
# available from PySide, which can explain why no thumbnail
# appears in the UI.
self._app.log_debug(
"Unable to build a pixmap from %s" % thumb_path
)
self._app.log_debug(
"Supported formats are %s" % QtGui.QImageReader.supportedImageFormats()
)
return pixmap
except Exception, e:
logger.warning(
"Unable to generate initial thumbnail because of the following error:"
)
# Log the exception + traceback for debug purpose.
logger.exception(e)
return None
def _on_publish(self, publish_form):
"""
Slot called when publish signal is emitted from the UI
"""
# get list of tasks from UI:
selected_tasks = publish_form.selected_tasks
# stop if can't actually do the publish!
if not selected_tasks:
# TODO - replace with tank dialog
QtGui.QMessageBox.information(publish_form, "Publish", "Nothing selected to publish - unable to continue!")
return
# split tasks into primary and secondary:
primary_task=None
secondary_tasks=[]
for ti, task in enumerate(selected_tasks):
if task.output == self._primary_output:
if primary_task:
raise TankError("Found multiple primary tasks to publish!")
primary_task = task
secondary_tasks = selected_tasks[:ti] + selected_tasks[(ti+1):]
if not primary_task:
raise TankError("Couldn't find primary task to publish!")
# pull rest of info from UI
sg_task = publish_form.shotgun_task
thumbnail = publish_form.thumbnail
comment = publish_form.comment
# create progress reporter and connect to UI:
progress = TaskProgressReporter(selected_tasks)
publish_form.set_progress_reporter(progress)
# show pre-publish progress:
publish_form.show_publish_progress("Doing Pre-Publish")
progress.reset()
# make dialog modal whilst we're doing work:
"""
(AD) - whilst this almost works, returning from modal state seems to
completely mess up the window parenting in Maya so may need to have another
way to do this or (more likely) move it to a separate dialog!
geom = publish_form.window().geometry()
publish_form.window().setWindowModality(QtCore.Qt.ApplicationModal)
publish_form.window().hide()
publish_form.window().show()
publish_form.window().setGeometry(geom)
"""
# We're going to pass a dict through the hooks that will allow
# data to be passed from one hook down the line to the rest.
user_data = dict()
# do pre-publish:
try:
self._do_pre_publish(
primary_task,
secondary_tasks,
progress.report,
user_data=user_data,
)
except TankError, e:
QtGui.QMessageBox.information(publish_form, "Pre-publish Failed",
"Pre-Publish Failed!\n\n%s" % e)
publish_form.show_publish_details()
return
except Exception, e:
self._app.log_exception("Pre-publish Failed")
publish_form.show_publish_details()
return
finally:
"""
# restore window to be modeless
publish_form.window().setWindowModality(QtCore.Qt.NonModal)
publish_form.window().hide()
publish_form.window().show()
publish_form.window().setGeometry(geom)
QtGui.QApplication.processEvents()
"""
# We have cases where the DCC's window is brought to foreground
# when certain operations are performed, so after each phase of
# the publish process is complete we'll make sure our window is
# still on top.
publish_form.window().raise_()
# check that we can continue:
num_errors = 0
for task in selected_tasks:
num_errors += len(task.pre_publish_errors)
if num_errors > 0:
publish_form.show_publish_details()
# TODO: replace with Tank dialog
res = QtGui.QMessageBox.warning(publish_form,
"Pre-publish Messages",
("Pre-publish checks returned some messages for "
"your attention. \n\nWould you like to go back and review "
"these prior to publish?"),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if res == QtGui.QMessageBox.Yes:
return
# show publish progress:
publish_form.show_publish_progress("Publishing")
progress.reset()
# save the thumbnail to a temporary location:
thumbnail_path = ""
try:
if thumbnail and not thumbnail.isNull():
# have a thumbnail so save it to a temporary file:
temp_file, thumbnail_path = tempfile.mkstemp(suffix=".png", prefix="tanktmp")
if temp_file:
os.close(temp_file)
thumbnail.save(thumbnail_path)
# do the publish
publish_errors = []
do_post_publish = False
try:
# do primary publish:
primary_path = self._do_primary_publish(
primary_task,
sg_task,
thumbnail_path,
comment,
progress.report,
user_data=user_data,
)
do_post_publish = True
# We have cases where the DCC's window is brought to foreground
# when certain operations are performed, so after each phase of
# the publish process is complete we'll make sure our window is
# still on top.
publish_form.window().raise_()
# do secondary publishes:
self._do_secondary_publish(
secondary_tasks,
primary_task,
primary_path,
sg_task,
thumbnail_path,
comment,
progress.report,
user_data=user_data,
)
except TankError, e:
self._app.log_exception("Publish Failed")
publish_errors.append("%s" % e)
except Exception, e:
self._app.log_exception("Publish Failed")
publish_errors.append("%s" % e)
finally:
# We have cases where the DCC's window is brought to foreground
# when certain operations are performed, so after each phase of
# the publish process is complete we'll make sure our window is
# still on top.
publish_form.window().raise_()
# delete temporary thumbnail file:
if thumbnail_path:
os.remove(thumbnail_path)
# check for any other publish errors:
for task in secondary_tasks:
for error in task.publish_errors:
publish_errors.append("%s, %s: %s" % (task.output.display_name, task.item.name, error))
# if publish didn't fail then do post publish:
if do_post_publish:
publish_form.show_publish_progress("Doing Post-Publish")
progress.reset(1)
try:
self._do_post_publish(
primary_task,
secondary_tasks,
progress.report,
user_data=user_data,
)
# Log the toolkit "Published" metric
try:
self._app.log_metric("Published")
except:
# ignore all errors. ex: using a core that doesn't support metrics
pass
except TankError, e:
self._app.log_exception("Post-publish Failed")
publish_errors.append("Post-publish: %s" % e)
except Exception, e:
self._app.log_exception("Post-publish Failed")
publish_errors.append("Post-publish: %s" % e)
finally:
# We have cases where the DCC's window is brought to foreground
# when certain operations are performed, so after each phase of
# the publish process is complete we'll make sure our window is
# still on top.
publish_form.window().raise_()
else:
# inform that post-publish didn't run
publish_errors.append("Post-publish was not run due to previous errors!")
# show publish result:
publish_form.show_publish_result(not publish_errors, publish_errors)
def _build_task_list(self, items):
"""
Takes a list of items and builds a list of tasks containing
each item and it's corresponding output in output-centric
order
"""
# need single list of all outputs:
all_outputs = [self._primary_output] + self._secondary_outputs
# First, validate that all items specify a known scene item type. Any
# that don't are skipped and won't be published by the app.
valid_items = []
output_scene_item_types = set([output.scene_item_type for output in all_outputs])
for item in items:
if item.scene_item_type in output_scene_item_types:
valid_items.append(item)
else:
self._app.log_debug("Skipping item '%s' as it has an unrecognised scene item type %s"
% (item.name, item.scene_item_type))
# Now loop through all outputs and build list of tasks.
# Note: this is deliberately output-centric to allow control
# of the order through the configuration (order of secondary
# outputs)
tasks = []
for output in all_outputs:
for item in valid_items:
if item.scene_item_type == output.scene_item_type:
tasks.append(Task(item, output))
return tasks
def _scan_scene(self):
"""
Find the list of 'items' to publish
"""
# find the items:
items = [Item(item) for item in self._app.execute_hook("hook_scan_scene")]
# validate that only one matches the primary type
# and that all items are valid:
primary_type = self._primary_output.scene_item_type
primary_item = None
for item in items:
item.validate()
item_type = item.scene_item_type
if item_type == primary_type:
if primary_item:
raise TankError("Scan scene returned multiple items for the primary output type '%s' which is not allowed"
% primary_type)
else:
primary_item = item
if not primary_item:
raise TankError("Scan scene didn't return a primary item to publish!")
return items
def _do_pre_publish(self, primary_task, secondary_tasks, progress_cb, user_data):
"""
Do pre-publish pass on tasks using the pre-publish hook
"""
# do pre-publish of primary task:
primary_task.pre_publish_errors = self._app.execute_hook(
"hook_primary_pre_publish",
task=primary_task.as_dictionary(),
work_template=self.work_template,
progress_cb=progress_cb,
user_data=user_data,
)
# do pre-publish of secondary tasks:
hook_tasks = [task.as_dictionary() for task in secondary_tasks]
pp_results = self._app.execute_hook(
"hook_secondary_pre_publish",
tasks=hook_tasks,
work_template=self.work_template,
progress_cb=progress_cb,
user_data=user_data,
)
# push any errors back to tasks:
result_index = {}
for result in pp_results:
try:
errors = result.get("errors")
if not errors:
continue
item_name = result["task"]["item"]["name"]
output_name = result["task"]["output"]["name"]
result_index[(item_name, output_name)] = result
except:
raise TankError("Badly formed result returned from hook: %s" % result)
for task in secondary_tasks:
result = result_index.get((task.item.name, task.output.name))
if result:
task.pre_publish_errors = result["errors"]
else:
task.pre_publish_errors = []
def _do_primary_publish(
self, primary_task, sg_task, thumbnail_path, comment, progress_cb,
user_data
):
"""
Do publish of primary task with the primary publish hook
"""
primary_path = self._app.execute_hook(
"hook_primary_publish",
task=primary_task.as_dictionary(),
work_template=self.work_template,
comment=comment,
thumbnail_path=thumbnail_path,
sg_task=sg_task,
progress_cb=progress_cb,
user_data=user_data,
)
return primary_path
def _do_secondary_publish(
self, secondary_tasks, primary_task, primary_publish_path, sg_task,
thumbnail_path, comment, progress_cb, user_data
):
"""
Do publish of secondary tasks using the secondary publish hook
"""
# do publish of secondary tasks:
hook_tasks = [task.as_dictionary() for task in secondary_tasks]
p_results = self._app.execute_hook(
"hook_secondary_publish",
tasks=hook_tasks,
work_template=self.work_template,
comment=comment,
thumbnail_path=thumbnail_path,
sg_task=sg_task,
primary_task=primary_task.as_dictionary(),
primary_publish_path=primary_publish_path,
progress_cb=progress_cb,
user_data=user_data,
)
# push any errors back to tasks:
result_index = {}
for result in p_results:
try:
errors = result.get("errors")
if not errors:
continue
item_name = result["task"]["item"]["name"]
output_name = result["task"]["output"]["name"]
result_index[(item_name, output_name)] = result
except:
raise TankError("Badly formed result returned from hook: %s" % result)
for task in secondary_tasks:
result = result_index.get((task.item.name, task.output.name))
if result:
task.publish_errors = result["errors"]
else:
task.publish_errors = []
def _do_post_publish(self, primary_task, secondary_tasks, progress_cb, user_data):
"""
Do post-publish using the post-publish hook
"""
# do post-publish using post-publish hook:
primary_hook_task = primary_task.as_dictionary()
secondary_hook_tasks = [task.as_dictionary() for task in secondary_tasks]
self._app.execute_hook(
"hook_post_publish",
work_template=self.work_template,
primary_task=primary_hook_task,
secondary_tasks=secondary_hook_tasks,
progress_cb=progress_cb,
user_data=user_data,
)
```
#### File: python/tk_multi_reviewsubmission/submitter.py
```python
import sgtk
import os
from sgtk.platform.qt import QtCore
class Submitter(object):
def __init__(self):
"""
Construction
"""
self.__app = sgtk.platform.current_bundle()
def submit_version(self, path_to_frames, path_to_movie, thumbnail_path, sg_publishes,
sg_task, comment, store_on_disk, first_frame, last_frame,
upload_to_shotgun):
"""
Create a version in Shotgun for this path and linked to this publish.
"""
# get current shotgun user
current_user = sgtk.util.get_current_user(self.__app.sgtk)
# create a name for the version based on the file name
# grab the file name, strip off extension
name = os.path.splitext(os.path.basename(path_to_movie))[0]
# do some replacements
name = name.replace("_", " ")
# and capitalize
name = name.capitalize()
# Create the version in Shotgun
ctx = self.__app.context
data = {
"code": name,
"sg_status_list": self.__app.get_setting("new_version_status"),
"entity": ctx.entity,
"sg_task": sg_task,
"sg_first_frame": first_frame,
"sg_last_frame": last_frame,
"frame_count": (last_frame-first_frame+1),
"frame_range": "%s-%s" % (first_frame, last_frame),
"sg_frames_have_slate": False,
"created_by": current_user,
"user": current_user,
"description": comment,
"sg_path_to_frames": path_to_frames,
"sg_movie_has_slate": True,
"project": ctx.project,
}
if sgtk.util.get_published_file_entity_type(self.__app.sgtk) == "PublishedFile":
data["published_files"] = sg_publishes
else:# == "TankPublishedFile"
if len(sg_publishes) > 0:
if len(sg_publishes) > 1:
self.__app.log_warning("Only the first publish of %d can be registered for the new version!" % len(sg_publishes))
data["tank_published_file"] = sg_publishes[0]
if store_on_disk:
data["sg_path_to_movie"] = path_to_movie
sg_version = self.__app.sgtk.shotgun.create("Version", data)
self.__app.log_debug("Created version in shotgun: %s" % str(data))
# upload files:
self._upload_files(sg_version, path_to_movie, thumbnail_path, upload_to_shotgun)
return sg_version
def _upload_files(self, sg_version, output_path, thumbnail_path, upload_to_shotgun):
"""
"""
# Upload in a new thread and make our own event loop to wait for the
# thread to finish.
event_loop = QtCore.QEventLoop()
thread = UploaderThread(self.__app, sg_version, output_path, thumbnail_path, upload_to_shotgun)
thread.finished.connect(event_loop.quit)
thread.start()
event_loop.exec_()
# log any errors generated in the thread
for e in thread.get_errors():
self.__app.log_error(e)
class UploaderThread(QtCore.QThread):
"""
Simple worker thread that encapsulates uploading to shotgun.
Broken out of the main loop so that the UI can remain responsive
even though an upload is happening
"""
def __init__(self, app, version, path_to_movie, thumbnail_path, upload_to_shotgun):
QtCore.QThread.__init__(self)
self._app = app
self._version = version
self._path_to_movie = path_to_movie
self._thumbnail_path = thumbnail_path
self._upload_to_shotgun = upload_to_shotgun
self._errors = []
def get_errors(self):
"""
can be called after execution to retrieve a list of errors
"""
return self._errors
def run(self):
"""
Thread loop
"""
upload_error = False
if self._upload_to_shotgun:
try:
self._app.sgtk.shotgun.upload("Version", self._version["id"], self._path_to_movie, "sg_uploaded_movie")
except Exception, e:
self._errors.append("Movie upload to Shotgun failed: %s" % e)
upload_error = True
if not self._upload_to_shotgun or upload_error:
try:
self._app.sgtk.shotgun.upload_thumbnail("Version", self._version["id"], self._thumbnail_path)
except Exception, e:
self._errors.append("Thumbnail upload to Shotgun failed: %s" % e)
```
#### File: v1.4.8/hooks/tk-3dsmaxplus_actions.py
```python
import os
import sgtk
import MaxPlus
HookBaseClass = sgtk.get_hook_baseclass()
class MaxActions(HookBaseClass):
"""
Shotgun Panel Actions for 3dsMax
"""
def generate_actions(self, sg_data, actions, ui_area):
"""
Returns a list of action instances for a particular object.
The data returned from this hook will be used to populate the
actions menu.
The mapping between Shotgun objects and actions are kept in a different place
(in the configuration) so at the point when this hook is called, the app
has already established *which* actions are appropriate for this object.
This method needs to return detailed data for those actions, in the form of a list
of dictionaries, each with name, params, caption and description keys.
The ui_area parameter is a string and indicates where the item is to be shown.
- If it will be shown in the main browsing area, "main" is passed.
- If it will be shown in the details area, "details" is passed.
:param sg_data: Shotgun data dictionary with all the standard shotgun fields.
:param actions: List of action strings which have been defined in the app configuration.
:param ui_area: String denoting the UI Area (see above).
:returns List of dictionaries, each with keys name, params, caption and description
"""
app = self.parent
app.log_debug("Generate actions called for UI element %s. "
"Actions: %s. Shotgun Data: %s" % (ui_area, actions, sg_data))
action_instances = []
try:
# call base class first
action_instances += HookBaseClass.generate_actions(self, sg_data, actions, ui_area)
except AttributeError, e:
# base class doesn't have the method, so ignore and continue
pass
if "import" in actions:
action_instances.append( {"name": "merge",
"params": None,
"caption": "Merge",
"description": "This will merge the contents of this file into the current scene."} )
if "reference" in actions:
action_instances.append( {"name": "xref_scene",
"params": None,
"caption": "XRef Reference",
"description": "This will insert a reference to this file into the current scene."} )
if "texture_node" in actions:
action_instances.append( {"name": "texture_node",
"params": None,
"caption": "Create Texture Node",
"description": "Creates a file texture node for the selected item."} )
return action_instances
def execute_action(self, name, params, sg_data):
"""
Execute a given action. The data sent to this be method will
represent one of the actions enumerated by the generate_actions method.
:param name: Action name string representing one of the items returned by generate_actions.
:param params: Params data, as specified by generate_actions.
:param sg_data: Shotgun data dictionary
:returns: No return value expected.
"""
app = self.parent
app.log_debug("Execute action called for action %s. "
"Parameters: %s. Shotgun Data: %s" % (name, params, sg_data))
# resolve path
path = self.get_publish_path(sg_data)
# If this is an Alembic cache, then we can import that.
if path.lower().endswith(".abc"):
# Note that native Alembic support is only available in Max 2016+.
if app.engine._max_version_to_year(
app.engine._get_max_version()) >= 2016:
self._import_alembic(path)
else:
app.log_warning(
"Alembic imports are not available in Max 2015, skipping.")
elif name == "merge":
self._merge(path, sg_data)
elif name == "xref_scene":
self._xref_scene(path, sg_data)
elif name == "texture_node":
self._create_texture_node(path, sg_data)
else:
try:
HookBaseClass.execute_action(self, name, params, sg_data)
except AttributeError, e:
# base class doesn't have the method, so ignore and continue
pass
##############################################################################################################
# helper methods which can be subclassed in custom hooks to fine tune the behaviour of things
def _import_alembic(self, path):
"""
Imports the given Alembic cache into the scene.
:param path: Path to .abc file.
"""
# Note that this is assuming Z-Up data. That means this will
# work properly for .abc files exported from 3ds Max, but
# will likely NOT be correct when importing .abc files from
# DCC applications that operate in a Y-Up coordinate system.
# The fix for that would be to set AlembicImport.ZUp to false
# via maxscript prior to running the importFile.
self.parent.engine.safe_dialog_exec(
lambda: MaxPlus.Core.EvalMAXScript(
"importFile @\"%s\" #noPrompt" % path
)
)
def _merge(self, path, sg_publish_data):
"""
Merge contents of the given file into the scene.
:param path: Path to file.
:param sg_publish_data: Shotgun data dictionary with all the standard publish fields.
"""
if not os.path.exists(path):
raise Exception("File not found on disk - '%s'" % path)
(_, ext) = os.path.splitext(path)
supported_file_exts = [".max"]
if ext.lower() not in supported_file_exts:
raise Exception("Unsupported file extension for '%s'. "
"Supported file extensions are: %s" % (
path, supported_file_exts))
app = self.parent
# Note: MaxPlus.FileManager.Merge() is not equivalent as it opens a dialog.
app.engine.safe_dialog_exec(lambda: MaxPlus.Core.EvalMAXScript(
'mergeMAXFile(\"' + path.replace('\\', '/') + '\")'))
def _xref_scene(self, path, sg_publish_data):
"""
Insert a reference to the given external file into the current scene.
:param path: Path to file.
:param sg_publish_data: Shotgun data dictionary with all the standard publish fields.
"""
if not os.path.exists(path):
raise Exception("File not found on disk - '%s'" % path)
(_, ext) = os.path.splitext(path)
supported_file_exts = [".max"]
if ext.lower() not in supported_file_exts:
raise Exception("Unsupported file extension for '%s'. "
"Supported file extensions are: %s" % (
path, supported_file_exts))
app = self.parent
# No direct equivalent found in MaxPlus. Would potentially need to get scene root node (INode) and use addNewXRef on that otherwise.
app.engine.safe_dialog_exec(lambda: MaxPlus.Core.EvalMAXScript(
'xrefs.addNewXRefFile(\"' + path.replace('\\', '/') + '\")'))
def _create_texture_node(self, path, sg_publish_data):
"""
Create a file texture node for a texture
:param path: Path to file.
:param sg_publish_data: Shotgun data dictionary with all the standard publish fields.
:returns: The newly created file node
"""
max_script = CREATE_TEXTURE_NODE_MAXSCRIPT % (path,)
MaxPlus.Core.EvalMAXScript(max_script)
# This maxscript creates a bitmap texture node and attaches it to a standard
# material.
CREATE_TEXTURE_NODE_MAXSCRIPT = """
--opens material editor
actionMan.executeAction 0 "50048"
--creates a bitmap texture node
bmap = Bitmaptexture fileName:"%s"
bmap.alphaSource = 2
--creates a standard max material node
mat = Standardmaterial ()
mat.diffuseMap = bmap
--assigns it slot of the compact material editor
meditMaterials[1] = mat
"""
```
#### File: python/app/model_publish_history.py
```python
from sgtk.platform.qt import QtCore, QtGui
import sgtk
# import the shotgun_model module from the shotgun utils framework
shotgun_model = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_model")
shotgun_data = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_data")
ShotgunModel = shotgun_model.ShotgunModel
from .model_entity_listing import SgEntityListingModel
class SgPublishHistoryListingModel(SgEntityListingModel):
"""
Model that shows the version history for a publish.
The data fetching pass in this model has a two-pass
setup: First, the details for the given publish are fetched:
version number, type, task etc. Once we have those fields,
the shotgun model is updated to retrieve all associated
publishes.
"""
def __init__(self, entity_type, parent, bg_task_manager):
"""
Constructor.
:param entity_type: The entity type that should be loaded into this model.
Needs to be a PublishedFile or TankPublishedFile.
:param parent: QT parent object
:param bg_task_manager: task manager used to process data
"""
# current publish we have loaded
self._sg_location = None
# the version number for the current publish
self._current_version = None
# tracking the background task
self._sg_query_id = None
# overlay for reporting errors
self._overlay = None
# init base class
SgEntityListingModel.__init__(self, entity_type, parent, bg_task_manager)
self._app = sgtk.platform.current_bundle()
self.__sg_data_retriever = shotgun_data.ShotgunDataRetriever(self,
bg_task_manager=bg_task_manager)
self.__sg_data_retriever.start()
self.__sg_data_retriever.work_completed.connect(self.__on_worker_signal)
self.__sg_data_retriever.work_failure.connect(self.__on_worker_failure)
def set_overlay(self, overlay):
"""
Specify a overlay object for progress reporting
:param overlay: Overlay object
:type overlay: :class:`~tk-framework-qtwidgets:overlay_widget.ShotgunOverlayWidget`
"""
self._overlay = overlay
############################################################################################
# slots
def __on_worker_failure(self, uid, msg):
"""
Asynchronous callback - the worker thread errored.
"""
uid = shotgun_model.sanitize_qt(uid) # qstring on pyqt, str on pyside
msg = shotgun_model.sanitize_qt(msg)
if uid == self._sg_query_id:
self._app.log_warning("History model query error: %s" % msg)
full_msg = "Error retrieving data from Shotgun: %s" % msg
if self._overlay:
self._overlay.show_error_message(full_msg)
def __on_worker_signal(self, uid, request_type, data):
"""
Signaled whenever the worker completes something.
This method will dispatch the work to different methods
depending on what async task has completed.
"""
uid = shotgun_model.sanitize_qt(uid) # qstring on pyqt, str on pyside
data = shotgun_model.sanitize_qt(data)
if self._sg_query_id == uid:
# hide spinner
if self._overlay:
self._overlay.hide()
# process the data
sg_records = data["sg"]
if len(sg_records) != 1 and self._overlay:
self._overlay.show_error_message("Publish could not be found!")
sg_data = sg_records[0]
# figure out which publish type we are after
if self._sg_formatter.entity_type == "PublishedFile":
publish_type_field = "published_file_type"
else:
publish_type_field = "tank_type"
# when we filter out which other publishes are associated with this one,
# to effectively get the "version history", we look for items
# which have the same project, same entity assocation, same name, same type
# and the same task.
filters = [ ["project", "is", sg_data["project"] ],
["name", "is", sg_data["name"] ],
["task", "is", sg_data["task"] ],
["entity", "is", sg_data["entity"] ],
[publish_type_field, "is", sg_data[publish_type_field] ],
]
# the proxy model that is sorting this model will
# sort based on id (pk), meaning that more recently
# commited transactions will appear later in the list.
# This ensures that publishes with no version number defined
# (yes, these exist) are also sorted correctly.
hierarchy = ["created_at"]
self._current_version = sg_data["version_number"]
ShotgunModel._load_data(
self,
self._sg_formatter.entity_type,
filters,
hierarchy,
self._sg_formatter.fields
)
self._refresh_data()
############################################################################################
# public interface
def load_data(self, sg_location):
"""
Clears the model and sets it up for a particular entity.
Loads any cached data that exists.
:param sg_location: Location object representing the *associated*
object for which items should be loaded. For this class,
the location should always represent a published file.
"""
self._sg_location = sg_location
self._current_version = None
self.__sg_data_retriever.clear()
# figure out which publish type we are after
if self._sg_formatter.entity_type == "PublishedFile":
publish_type_field = "published_file_type"
else:
publish_type_field = "tank_type"
filters = [["id", "is", sg_location.entity_id]]
fields = ["name",
"version_number",
"task",
"entity",
"project",
publish_type_field]
# get publish details async
self._sg_query_id = self.__sg_data_retriever.execute_find(self._sg_formatter.entity_type,
filters,
fields)
def is_highlighted(self, model_index):
"""
Compute if a model index belonging to this model
should be highlighted.
In the case of this model, the current version is highlighted
"""
# see if the model tracks a concept of a current version.
# this is used for version histories, when we want to highlight
# a particular item in a history
sg_data = shotgun_model.get_sg_data(model_index)
if sg_data.get("version_number") == self._current_version:
return True
else:
return False
```
#### File: python/app/model_version_listing.py
```python
from sgtk.platform.qt import QtCore, QtGui
import sgtk
from .model_entity_listing import SgEntityListingModel
class SgVersionModel(SgEntityListingModel):
"""
Special model for versions so that we can control
how to display items with different review status.
"""
def __init__(self, entity_type, parent, bg_task_manager):
"""
Constructor.
:param entity_type: The entity type that should be loaded into this model.
Needs to be a PublishedFile or TankPublishedFile.
:param parent: QT parent object
"""
self._show_pending_only = False
# init base class
SgEntityListingModel.__init__(self, entity_type, parent, bg_task_manager)
def _get_filters(self):
"""
Return the filter to be used for the current query
"""
# get base class filters
filters = SgEntityListingModel._get_filters(self)
if self._show_pending_only:
# limit based on status
filters.append(["sg_status_list", "is", "rev"])
return filters
############################################################################################
# public interface
def load_data(self, sg_location, show_pending_only):
"""
Clears the model and sets it up for a particular entity.
:param sg_location: Location object representing the *associated*
object for which items should be loaded.
:param show_latest_only: If true, the listing will be culled so that
only latest items are shown.
"""
# figure out our current entity type
self._show_pending_only = show_pending_only
# make sure that we include the status regardless of how the
# ui is configured - this is so we can do a status comparison
# later in the _get_filters method.
SgEntityListingModel.load_data(
self,
sg_location,
additional_fields=["sg_status_list"],
sort_field="id"
)
```
#### File: python/app/note_updater.py
```python
import sgtk
from sgtk.platform.qt import QtCore, QtGui
shotgun_data = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_data")
shotgun_model = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_model")
class NoteUpdater(QtCore.QObject):
"""
Class that operates asynchronously on notes.
"""
def __init__(self, task_manager, parent):
"""
Constructor
:param data_retriever: Task manager to use for background work
:param parent: QT parent object
"""
QtCore.QObject.__init__(self, parent)
self._guids = []
self._app = sgtk.platform.current_bundle()
self.__sg_data_retriever = shotgun_data.ShotgunDataRetriever(self,
bg_task_manager=task_manager)
self.__sg_data_retriever.start()
self.__sg_data_retriever.work_completed.connect(self.__on_worker_signal)
self.__sg_data_retriever.work_failure.connect(self.__on_worker_failure)
def __on_worker_failure(self, uid, msg):
"""
Asynchronous callback - the worker thread errored.
:param uid: Unique id for request that failed
:param msg: Error message
"""
uid = shotgun_model.sanitize_qt(uid) # qstring on pyqt, str on pyside
msg = shotgun_model.sanitize_qt(msg)
if uid in self._guids:
self._app.log_warning("Could not update note: %s" % msg)
self._guids.remove(uid)
def __on_worker_signal(self, uid, request_type, data):
"""
Signaled whenever the worker completes something.
This method will dispatch the work to different methods
depending on what async task has completed.
:param uid: Unique id for request
:param request_type: String identifying the request class
:param data: the data that was returned
"""
uid = shotgun_model.sanitize_qt(uid) # qstring on pyqt, str on pyside
data = shotgun_model.sanitize_qt(data)
if uid in self._guids:
self._app.log_debug("Note update complete: %s" % data)
self._guids.remove(uid)
def mark_note_as_read(self, note_id):
"""
Mark the note as read if it's unread.
:param note_id: Shotgun note id to operate on
"""
data = {"note_id": note_id }
uid = self.__sg_data_retriever.execute_method(self._mark_note_as_read, data)
self._guids.append(uid)
def _mark_note_as_read(self, sg, data):
"""
Async callback called by the data retriever.
Sets the note read status to read in case the status is set to unread.
"""
note_id = data["note_id"]
sg_data = sg.find_one("Note", [["id", "is", note_id]], ["read_by_current_user"])
if sg_data and sg_data["read_by_current_user"] == "unread":
sg.update("Note", note_id, {"read_by_current_user": "read"})
```
#### File: python/app/widget_list_item.py
```python
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .ui.list_item_widget import Ui_ListItemWidget
from .work_area_button import FloatingWorkAreaButton
class ListItemWidget(QtGui.QWidget):
"""
Widget that is used to display entries in all the item listings.
This widget goes together with the list item delegate and is always
manufactured by the list item delegate.
"""
def __init__(self, parent):
"""
Constructor
:param parent: QT parent object
"""
QtGui.QWidget.__init__(self, parent)
# make sure this widget isn't shown
self.setVisible(False)
# set up the UI
self.ui = Ui_ListItemWidget()
self.ui.setupUi(self)
# the property stylesheet syntax seems brittle and hacky so
# keeping the style sheet modifications local here rather
# than in global css
# todo: figure out a better way to do this!
self._css_decorated = """
#box { border-width: 2px;
border-radius: 4px;
border-color: rgb(48, 167, 227);
border-style: solid;
}
"""
self._css_selected = """
#box { border-width: 2px;
border-radius: 4px;
border-color: rgb(48, 167, 227);
border-style: solid;
background-color: rgba(48, 167, 227, 25%);
}
"""
self._no_style = """
#box { border-width: 2px;
border-radius: 4px;
border-color: rgba(0, 0, 0, 0%);
border-style: solid;
}
"""
# set up action menu. parent it to the button to prevent cases where it
# shows up elsewhere on screen (as in Houdini)
self._menu = QtGui.QMenu(self.ui.button)
self._actions = []
self.ui.button.setMenu(self._menu)
self.ui.button.setVisible(False)
# this forces the menu to be right aligned with the button. This is
# preferable since many DCCs show the embed panel on the far right. In
# houdini at least, before forcing this layout direction, the menu was
# showing up partially offscreen.
self.ui.button.setLayoutDirection(QtCore.Qt.RightToLeft)
# add work area button
self._work_area_button = FloatingWorkAreaButton(self.ui.box)
@property
def work_area_button(self):
"""
The special button which controls the work area
"""
return self._work_area_button
def set_selected(self, selected):
"""
Adjust the style sheet to indicate selection or not
:param selected: True if selected, false if not
"""
if selected:
self.ui.box.setStyleSheet(self._css_selected)
def set_highlighted(self, highlighted):
"""
Adjust the style sheet to indicate that an object is highlighted
:param selected: True if selected, false if not
"""
if highlighted:
self.ui.box.setStyleSheet(self._css_decorated)
else:
self.ui.box.setStyleSheet(self._no_style)
def set_actions(self, actions):
"""
Adds a list of QActions to add to the actions menu for this widget.
:param actions: List of QActions to add
"""
if len(actions) == 0:
self.ui.button.setVisible(False)
else:
self.ui.button.setVisible(True)
self._actions = actions
for a in self._actions:
self._menu.addAction(a)
def set_up_work_area(self, entity_type, entity_id):
"""
Sets up the set work area button
:param entity_type: shotgun type to set up work area for
:param entity_id: Shotgun id to set up work area for
"""
self._work_area_button.set_up(entity_type, entity_id)
def set_thumbnail(self, pixmap):
"""
Set a thumbnail given the current pixmap.
The pixmap must be 100x100 or it will appear squeezed
:param pixmap: pixmap object to use
"""
self.ui.thumbnail.setPixmap(pixmap)
def set_text(self, header_left, header_right, body):
"""
Populate the lines of text in the widget
:param header: Header text as string
:param body: Body text as string
"""
self.ui.list_item_top_left.setText(header_left)
self.ui.list_item_top_right.setText(header_right)
self.ui.list_item_body.setText(body)
@staticmethod
def calculate_size():
"""
Calculates and returns a suitable size for this widget.
:returns: Size of the widget
"""
return QtCore.QSize(300, 102)
```
#### File: python/app/work_area_dialog.py
```python
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .ui.work_area_dialog import Ui_WorkAreaDialog
shotgun_globals = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_globals")
class WorkAreaDialog(QtGui.QDialog):
"""
Task selector and creator dialog
"""
ENTITY_TYPE_ROLE = QtCore.Qt.UserRole + 1001
ENTITY_ID_ROLE = QtCore.Qt.UserRole + 1002
def __init__(self, entity_type, entity_id, parent):
"""
:param entity_type: Entity type to display tasks for
:param entity_id: Entity id to display tasks for
:param parent: The model parent.
:type parent: :class:`~PySide.QtGui.QObject`
"""
super(WorkAreaDialog, self).__init__(parent)
# now load in the UI that was created in the UI designer
self.ui = Ui_WorkAreaDialog()
self.ui.setupUi(self)
# double clicking an item in the list closes the dialog
self.ui.task_list.itemDoubleClicked.connect(self.accept)
self._bundle = sgtk.platform.current_bundle()
# find information about the main item
main_item = self._bundle.shotgun.find_one(
entity_type,
[["id", "is", entity_id]],
["code", "description"]
)
if main_item.get("code"):
entity_name = "%s %s" % (shotgun_globals.get_type_display_name(entity_type), main_item.get("code"))
else:
entity_name = "Unnamed %s" % shotgun_globals.get_type_display_name(entity_type)
# # insert main item
# self._main_item = QtGui.QListWidgetItem(entity_name, self.ui.task_list)
# self._main_item.setToolTip(main_item.get("description") or "No description found.")
# self._main_item.setData(self.ENTITY_TYPE_ROLE, entity_type)
# self._main_item.setData(self.ENTITY_ID_ROLE, entity_id)
#
# # make this selected by default
# self._main_item.setSelected(True)
# now get all tasks from Shotgun
tasks = self._bundle.shotgun.find(
"Task",
[["entity", "is", {"type": entity_type, "id": entity_id}]],
["content", "step", "sg_status_list", "task_assignees"]
)
# insert into list
for task in tasks:
task_name = "Task %s on %s" % (task["content"], entity_name)
# indicate users assigned
if task["task_assignees"]:
task_name += " (%s)" % ", ".join([x["name"] for x in task["task_assignees"]])
task_item = QtGui.QListWidgetItem(task_name, self.ui.task_list)
task_item.setData(self.ENTITY_TYPE_ROLE, task["type"])
task_item.setData(self.ENTITY_ID_ROLE, task["id"])
# as the last item, create the "create new task widget"
# embedded into a list widget
self.new_task = QtGui.QWidget(self)
self.new_task.setObjectName("new_task")
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.new_task)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.task_name = QtGui.QLineEdit(self.new_task)
self.task_name.setObjectName("task_name")
self.horizontalLayout_2.addWidget(self.task_name)
self.step_combo = QtGui.QComboBox(self.new_task)
self.step_combo.setObjectName("step_combo")
self.horizontalLayout_2.addWidget(self.step_combo)
self.task_name.setPlaceholderText("Create new task...")
self._new_item = QtGui.QListWidgetItem(self.ui.task_list)
self.ui.task_list.setItemWidget(self._new_item, self.new_task)
# find the steps for this entity type
steps = self._bundle.shotgun.find(
"Step",
[["entity_type", "is", entity_type]],
["code", "id"]
)
# populate combo box
for step in steps:
self.step_combo.addItem(step["code"], step["id"])
# install filter so that when the task name is clicked
# the list widget is selected
self.task_name.installEventFilter(self)
@property
def is_new_task(self):
"""
Returns true if the selected object is a new task
"""
return self._new_item.isSelected()
@property
def new_task_name(self):
"""
The new task name for new tasks or "" if not set
"""
return self.task_name.text()
@property
def new_step_id(self):
"""
Step if for new task or None if not set
"""
return self.step_combo.itemData(self.step_combo.currentIndex())
@property
def selected_entity(self):
"""
The selected (entity_type, entity_id) or
(None, None) if a new task is selected
"""
if self.is_new_task:
return None, None
else:
current_item = self.ui.task_list.currentItem()
return (
current_item.data(self.ENTITY_TYPE_ROLE),
current_item.data(self.ENTITY_ID_ROLE)
)
def eventFilter(self, obj, event):
"""
Event filter implementation.
For information, see the QT docs:
http://doc.qt.io/qt-4.8/qobject.html#eventFilter
Will select the "new item" listitem if someone
clicks on the task name widget.
:param obj: The object that is being watched for events
:param event: Event object that the object has emitted
:returns: Always returns False to indicate that no events
should ever be discarded by the filter.
"""
# peek at the message
if event.type() == QtCore.QEvent.FocusIn:
# re-broadcast any resize events
self._new_item.setSelected(True)
# pass it on!
return False
```
#### File: python/app/action_manager.py
```python
import sgtk
import datetime
import os
import sys
from sgtk.platform.qt import QtCore, QtGui
from tank_vendor import shotgun_api3
from sgtk import TankError
class ActionManager(QtCore.QObject):
"""
Manager class that is used to generate action menus and dispatch action
execution into the various action hooks. This provides an interface between
the action hooks, action defs in the config, and the rest of the app.
"""
# emitted when the user requests a refresh via the actions system
refresh_request = QtCore.Signal()
# the area of the UI that an action is being requested/run for.
UI_AREA_MAIN = 0x1
UI_AREA_DETAILS = 0x2
def __init__(self, parent):
"""
Constructor
"""
QtCore.QObject.__init__(self, parent)
self._app = sgtk.platform.current_bundle()
def get_actions(self, sg_data, ui_area):
"""
Returns a list of actions for an entity
:param sg_data: Shotgun data for a publish
:param ui_area: Indicates which part of the UI the request is coming from.
Currently one of UI_AREA_MAIN, UI_AREA_DETAILS and UI_AREA_HISTORY
:returns: List of QAction objects, ready to be parented to some QT Widgetry.
"""
if sg_data is None:
return []
# check if we have logic configured to handle this
action_defs = []
all_mappings = self._app.get_setting("action_mappings")
if all_mappings.get(sg_data["type"]):
mappings = all_mappings[ sg_data["type"] ]
# this is now a list of items, each a dictioary
# with keys filters and actions
# [{'filters': {}, 'actions': ['assign_task']}]
# now cull out actions that don't match our filters
actions_to_evaluate = []
for mapping in mappings:
actions_def = mapping["actions"]
filters_def = mapping["filters"]
if filters_def is None or len(filters_def) == 0:
# no filters to consider
actions_to_evaluate.extend(actions_def)
else:
# filters are on the form
# field_name: value
for (field_name, field_value) in filters_def.iteritems():
# resolve linked fields into a string value
sg_value = sg_data.get(field_name)
if isinstance(sg_value, dict):
sg_value = sg_value.get("name")
# check if the filter is valid
if sg_value == field_value:
actions_to_evaluate.extend(actions_def)
if len(actions_to_evaluate) > 0:
# no actions to run through the hook
# cool so we have one or more actions
# call out to hook to give us the specifics.
# resolve UI area
if ui_area == self.UI_AREA_DETAILS:
ui_area_str = "details"
elif ui_area == self.UI_AREA_MAIN:
ui_area_str = "main"
else:
raise TankError("Unsupported UI_AREA. Contact support.")
# convert created_at unix time stamp to shotgun std time stamp
unix_timestamp = sg_data.get("created_at")
if unix_timestamp:
sg_timestamp = datetime.datetime.fromtimestamp(unix_timestamp,
shotgun_api3.sg_timezone.LocalTimezone())
sg_data["created_at"] = sg_timestamp
action_defs = []
try:
action_defs = self._app.execute_hook_method("actions_hook",
"generate_actions",
sg_data=sg_data,
actions=actions_to_evaluate,
ui_area=ui_area_str)
except Exception:
self._app.log_exception("Could not execute generate_actions hook.")
# create QActions
actions = []
for action_def in action_defs:
name = action_def["name"]
caption = action_def["caption"]
params = action_def["params"]
description = action_def["description"]
a = QtGui.QAction(caption, None)
a.setToolTip(description)
a.triggered[()].connect(lambda n=name, sg=sg_data, p=params: self._execute_hook(n, sg, p))
actions.append(a)
if ui_area == self.UI_AREA_DETAILS:
actions = self._get_default_detail_actions(sg_data) + actions
return actions
def _get_default_detail_actions(self, sg_data):
"""
Returns a list of default actions for the detail area
:param sg_data: Shotgun data directory
"""
refresh = QtGui.QAction("Refresh", None)
refresh.triggered[()].connect(lambda f=sg_data: self._refresh(f))
view_in_sg = QtGui.QAction("View in Shotgun", None)
view_in_sg.triggered[()].connect(lambda f=sg_data: self._show_in_sg(f))
copy_url = QtGui.QAction("Copy Shotgun url to clipboard", None)
copy_url.triggered[()].connect(lambda f=sg_data: self._copy_to_clipboard(f))
show_docs = QtGui.QAction("Documentation", None)
show_docs.triggered[()].connect(self._show_docs)
separator = QtGui.QAction(None)
separator.setSeparator(True)
return [refresh, view_in_sg, copy_url, show_docs, separator]
########################################################################################
# callbacks
def _execute_hook(self, action_name, sg_data, params):
"""
callback - executes a hook
:param action_name: Name of action to execute
:param sg_data: Shotgun data dictionary
:param params: action parameters passed in from the hook
"""
self._app.log_debug("Calling action hook for %s. "
"Params: %s. Sg data: %s" % (action_name, params, sg_data))
try:
self._app.execute_hook_method("actions_hook",
"execute_action",
name=action_name,
params=params,
sg_data=sg_data)
# refresh UI
self.refresh_request.emit()
except Exception, e:
self._app.log_exception("Could not execute execute_action hook.")
QtGui.QMessageBox.critical(None, "Action Error", "Error: %s" % e)
else:
self._app._log_metric_launched_action(action_name)
def _show_docs(self):
"""
Internal action callback - Launch app documentation
"""
self._app.log_debug("Opening url %s..." % self._app.documentation_url)
QtGui.QDesktopServices.openUrl(QtCore.QUrl(self._app.documentation_url))
def _refresh(self, entity):
"""
Internal action callback - refreshes the main dialog UI
:param entity: std sg entity dict with keys type, id and name
"""
self.refresh_request.emit()
def _show_in_sg(self, entity):
"""
Internal action callback - Shows a shotgun entity in the web browser
:param entity: std sg entity dict with keys type, id and name
"""
url = "%s/detail/%s/%d" % (self._app.sgtk.shotgun.base_url, entity["type"], entity["id"])
QtGui.QDesktopServices.openUrl(QtCore.QUrl(url))
def _copy_to_clipboard(self, entity):
"""
Internal action callback - copy shotgun url to clipboard
:param entity: std sg entity dict with keys type, id and name
"""
url = "%s/detail/%s/%d" % (self._app.sgtk.shotgun.base_url, entity["type"], entity["id"])
app = QtCore.QCoreApplication.instance()
app.clipboard().setText(url)
```
#### File: tk-multi-snapshot/v0.7.3/app.py
```python
import os
import tank
from tank import TankError
class MultiSnapshot(tank.platform.Application):
def init_app(self):
"""
Called as the application is being initialized
"""
self.tk_multi_snapshot = self.import_module("tk_multi_snapshot")
# ensure snapshot template has at least one of increment or timestamp:
snapshot_template = self.get_template("template_snapshot")
if (not "timestamp" in snapshot_template.keys
and not "increment" in snapshot_template.keys):
self.log_error("'template_snapshot' must contain at least one of 'timestamp' or 'increment'")
return
# register commands:
self.engine.register_command(
"Snapshot...",
self.show_snapshot_dlg,
{
# dark themed icon for engines that recognize this format
"icons": {
"dark": {
"png": os.path.join(
os.path.dirname(__file__),
"resources",
"snapshot_menu_icon.png"
)
}
}
}
)
self.engine.register_command(
"Snapshot History...",
self.show_snapshot_history_dlg,
{
# dark themed icon for engines that recognize this format
"icons": {
"dark": {
"png": os.path.join(
os.path.dirname(__file__),
"resources",
"snapshot_history_menu_icon.png"
)
}
}
}
)
@property
def context_change_allowed(self):
"""
Specifies that context changes are allowed.
"""
return True
def destroy_app(self):
self.tk_multi_snapshot = None
self.log_debug("Destroying tk-multi-snapshot")
def show_snapshot_dlg(self):
"""
Shows the Snapshot Dialog.
"""
return self.tk_multi_snapshot.Snapshot(self).show_snapshot_dlg()
def show_snapshot_history_dlg(self):
"""
Shows the Snapshot History Dialog.
"""
self.tk_multi_snapshot.Snapshot(self).show_snapshot_history_dlg()
def can_snapshot(self, work_path=None):
"""
Helper method to determine if a snapshot can be made with work_path.
"""
return self.tk_multi_snapshot.Snapshot(self).can_snapshot(work_path)
def snapshot(self, comment=None, thumbnail=None):
"""
Snapshots the current scene without any UI
"""
handler = self.tk_multi_snapshot.Snapshot(self)
work_path = handler.get_current_file_path()
return handler.do_snapshot(work_path, thumbnail, comment)
```
#### File: v0.7.3/hooks/scene_operation_tk-houdini.py
```python
import os
import hou
import tank
from tank import Hook
from tank import TankError
class SceneOperation(Hook):
"""
Hook called to perform an operation with the current scene
"""
def execute(self, operation, file_path, **kwargs):
"""
Main hook entry point
:operation: String
Scene operation to perform
:file_path: String
File path to use if the operation
requires it (e.g. open)
:returns: Depends on operation:
'current_path' - Return the current scene
file path as a String
all others - None
"""
if operation == "current_path":
return str(hou.hipFile.name())
elif operation == "open":
# give houdini forward slashes
file_path = file_path.replace(os.path.sep, '/')
hou.hipFile.load(str(file_path))
elif operation == "save":
hou.hipFile.save()
```
#### File: v0.7.3/hooks/scene_operation_tk-nuke.py
```python
import os
import nuke
import tank
from tank import Hook
from tank import TankError
class SceneOperation(Hook):
"""
Hook called to perform an operation with the
current scene
"""
def execute(self, *args, **kwargs):
"""
Main hook entry point
:operation: String
Scene operation to perform
:file_path: String
File path to use if the operation
requires it (e.g. open)
:returns: Depends on operation:
'current_path' - Return the current scene
file path as a String
all others - None
"""
engine = self.parent.engine
if hasattr(engine, "hiero_enabled") and engine.hiero_enabled:
return self._hiero_execute(*args, **kwargs)
elif hasattr(engine, "studio_enabled") and engine.studio_enabled:
return self._studio_execute(*args, **kwargs)
else:
return self._nuke_execute(*args, **kwargs)
def _studio_execute(self, operation, file_path, **kwargs):
"""
The Nuke Studio specific scene operations.
"""
# Out of the box, we treat Nuke Studio just like Hiero, so we
# can just call through here.
return self._hiero_execute(operation, file_path, **kwargs)
def _hiero_execute(self, operation, file_path, **kwargs):
"""
The Hiero specific scene operations.
"""
import hiero.core
if operation == "current_path":
# return the current script path
project = self._get_current_project()
curr_path = project.path().replace("/", os.path.sep)
return curr_path
elif operation == "open":
# first close the current project then open the specified file
project = self._get_current_project()
project.close()
# manually fire signal since Hiero doesn't fire this when loading
# from the tk file manager
hiero.core.events.sendEvent("kBeforeProjectLoad", None)
hiero.core.openProject(file_path.replace(os.path.sep, "/"))
elif operation == "save":
# save the current script:
project = self._get_current_project()
project.save()
def _nuke_execute(self, operation, file_path, **kwargs):
"""
The Nuke specific scene operations.
"""
if file_path:
file_path = file_path.replace("/", os.path.sep)
if operation == "current_path":
# return the current script path
return nuke.root().name().replace("/", os.path.sep)
elif operation == "open":
# open the specified script into the current window
if nuke.root().modified():
raise TankError("Script is modified!")
nuke.scriptClear()
nuke.scriptOpen(file_path)
elif operation == "save":
# save the current script:
nuke.scriptSave()
def _get_current_project(self):
"""
Returns the current project based on where in the UI the user clicked
"""
import hiero.core
# get the menu selection from hiero engine
selection = self.parent.engine.get_menu_selection()
if len(selection) != 1:
raise TankError("Please select a single Project!")
if not isinstance(selection[0] , hiero.core.Bin):
raise TankError("Please select a Hiero Project!")
project = selection[0].project()
if project is None:
# apparently bins can be without projects (child bins I think)
raise TankError("Please select a Hiero Project!")
return project
```
#### File: python/tk_multi_snapshot/string_utils.py
```python
from tank.platform.qt import QtCore
def safe_to_string(value):
"""
Safely convert the value to a string - handles
unicode and QtCore.QString if using PyQt
:param value: The value to convert to a string
:returns str: utf8 encoded string of the input value
"""
if isinstance(value, str):
# it's a string anyway so just return
return value
if isinstance(value, unicode):
# convert to utf-8
return value.encode("utf8")
if hasattr(QtCore, "QString"):
# running PyQt!
if isinstance(value, QtCore.QString):
# QtCore.QString inherits from str but supports
# unicode, go figure! Lets play safe and return
# a utf-8 string
return str(value.toUtf8())
# For everything else, just return as string
return str(value)
```
#### File: tk_multi_workfiles/actions/interactive_open_action.py
```python
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .open_file_action import OpenFileAction
from ..wrapper_dialog import WrapperDialog
from ..open_options_form import OpenOptionsForm
from ..user_cache import g_user_cache
class InteractiveOpenAction(OpenFileAction):
def __init__(self, file, file_versions, environment, workfiles_visible, publishes_visible):
"""
"""
OpenFileAction.__init__(self, "Open", file, file_versions, environment)
self._workfiles_visible = workfiles_visible
self._publishes_visible = publishes_visible
def execute(self, parent_ui):
"""
"""
if not self.file:
return False
# this is the smart action where all the logic tries to decide what the actual
# action should be!
#print "Opening file '%s' which is in user sandbox '%s'" % (self.file.path, self.environment.context.user["name"])
# get information about the max local & publish versions:
local_versions = [v for v, f in self.file_versions.iteritems() if f.is_local]
publish_versions = [v for v, f in self.file_versions.iteritems() if f.is_published]
max_local_version = max(local_versions) if local_versions else None
max_publish_version = max(publish_versions) if publish_versions else None
max_version = max(max_local_version, max_publish_version)
if (self._publishes_visible and self.file.is_published
and (not self._workfiles_visible or not self.file.is_local)):
# opening a publish and either not showing work files or the file isn't local
if self.file.version < max_publish_version:
# opening an old version of a publish!
return self._open_previous_publish(self.file, self.environment, parent_ui)
else:
# opening the most recent version of a publish!
latest_work_file = None
if max_local_version != None:
latest_work_file = self.file_versions[max_local_version]
return self._open_publish_with_check(self.file, latest_work_file, self.environment, max_version+1, parent_ui)
elif (self._workfiles_visible and self.file.is_local):
# opening a workfile and either not showing publishes or the file hasn't been published
# OR
# opening a file that is both local and published and both are visible in the view!
# (is this the right thing to do when a file is both local and a publish??)
if self.file.version < max_local_version:
# opening an old version of work file:
return self._open_previous_workfile(self.file, self.environment, parent_ui)
else:
# opening the most recent version of a work file!
latest_publish = None
if max_publish_version != None:
latest_publish = self.file_versions[max_publish_version]
return self._open_workfile_with_check(self.file, latest_publish, self.environment, max_version+1, parent_ui)
else:
# this shouldn't happen and is in here primarily for debug purposes!
raise NotImplementedError("Unsure what action to take when opening this file!")
# didn't do anything!
return False
def _open_workfile_with_check(self, work_file, publish_file, env, next_version, parent_ui):
"""
Function called when user clicks Open for a file
in the Work Area
"""
# different options depending if the publish file is more
# recent or not:
open_mode = OpenOptionsForm.OPEN_WORKFILE
if publish_file and work_file.compare_with_publish(publish_file) < 0:
# options are different if the publish and work files are the same path as there
# doesn't need to be the option of opening the publish read-only.
publish_requires_copy = True
if env.publish_template == env.work_template:
if "version" not in env.publish_template.keys:
publish_requires_copy = False
form = OpenOptionsForm(None, self._app, work_file, publish_file, OpenOptionsForm.OPEN_WORKFILE_MODE,
next_version, publish_requires_copy)
open_mode = WrapperDialog.show_modal(form, parent_ui, "Found a More Recent Publish!")
if open_mode == OpenOptionsForm.OPEN_WORKFILE:
# open the work file:
if not self._open_workfile(work_file, env, parent_ui):
return False
elif open_mode == OpenOptionsForm.OPEN_PUBLISH:
# open the published file instead:
if not self._open_publish_as_workfile(publish_file, env, next_version, parent_ui):
return False
else:
# cancelled so stop!
return False
return True
def _open_previous_workfile(self, file, env, parent_ui):
"""
Open a previous version of a work file - this just opens
it directly without any file copying or validation
"""
# Confirm how the previous work file should be opened:
# (TODO) expand this out to allow opening directly or option to continue
# working as the next work file
answer = QtGui.QMessageBox.question(parent_ui, "Open Previous Work File?",
("Continue opening the old work file\n\n %s (v%d)\n\n"
"from the work area?" % (file.name, file.version)),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if answer != QtGui.QMessageBox.Yes:
return False
return self._do_copy_and_open(None, file.path, file.version, False, env.context, parent_ui)
def _open_publish_with_check(self, publish_file, work_file, env, next_version, parent_ui):
"""
Function called when user clicks Open for a file
in the Publish Area
"""
# options are different if the publish and work files are the same path as there
# doesn't need to be the option of opening the publish read-only.
publish_requires_copy = True
if env.publish_template == env.work_template:
if "version" not in env.publish_template.keys:
publish_requires_copy = False
# different options depending if the work file is more
# recent or not:
dlg_title = ""
if work_file and work_file.compare_with_publish(publish_file) > 0:
dlg_title = "Found a More Recent Work File!"
else:
dlg_title = "Open Publish"
work_file = None
if work_file or publish_requires_copy:
# show dialog with options to user:
open_mode = OpenOptionsForm.OPEN_PUBLISH
mode = OpenOptionsForm.OPEN_PUBLISH_MODE if publish_requires_copy else OpenOptionsForm.OPEN_PUBLISH_NO_READONLY_MODE
form = OpenOptionsForm(None, self._app, work_file, publish_file, mode, next_version, publish_requires_copy)
open_mode = WrapperDialog.show_modal(form, parent_ui, dlg_title)
if open_mode == OpenOptionsForm.OPEN_WORKFILE:
# open the work file:
return self._open_workfile(work_file, env, parent_ui)
elif open_mode == OpenOptionsForm.OPEN_PUBLISH:
# open the published file instead:
return self._open_publish_as_workfile(publish_file, env, next_version, parent_ui)
elif open_mode == OpenOptionsForm.OPEN_PUBLISH_READONLY:
# open the published file read-only instead:
return self._open_publish_read_only(publish_file, env, parent_ui)
else:
return False
else:
# just open the published file:
return self._open_publish_as_workfile(publish_file, env, next_version, parent_ui)
def _open_workfile(self, file, env, parent_ui):
"""
Handles opening a work file - this checks to see if the file
is in another users sandbox before opening
"""
if not file.editable:
answer = QtGui.QMessageBox.question(parent_ui, "Open file read-only?",
("The work file you are opening: '%s', is "
"read-only:\n\n%s.\n\nWould you like to continue?"
% (file.name, file.not_editable_reason)),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if answer == QtGui.QMessageBox.No:
return False
# trying to open a work file...
src_path = None
work_path = file.path
# construct a context for this path to determine if it's in
# a user sandbox or not:
if env.context.user:
current_user = g_user_cache.current_user
if current_user and current_user["id"] != env.context.user["id"]:
# file is in a user sandbox - construct path
# for the current user's sandbox:
try:
# get fields from work path:
fields = env.work_template.get_fields(work_path)
# add in the fields from the context with the current user:
local_ctx = env.context.create_copy_for_user(current_user)
ctx_fields = local_ctx.as_template_fields(env.work_template)
fields.update(ctx_fields)
# construct the local path from these fields:
local_path = env.work_template.apply_fields(fields)
except Exception, e:
QtGui.QMessageBox.critical(parent_ui, "Failed to resolve file path",
("Failed to resolve the user sandbox file path:\n\n%s\n\nto the local "
"path:\n\n%s\n\nUnable to open file!" % (work_path, e)))
self._app.log_exception("Failed to resolve user sandbox file path %s" % work_path)
return False
if local_path != work_path:
# more than just an open so prompt user to confirm:
answer = QtGui.QMessageBox.question(parent_ui, "Open file from another user?",
("The work file you are opening:\n\n%s\n\n"
"is in a user sandbox belonging to %s. Would "
"you like to copy the file to your sandbox and open it?"
% (work_path, env.context.user["name"])),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.Cancel)
if answer == QtGui.QMessageBox.Cancel:
return False
src_path = work_path
work_path = local_path
try:
self._app.log_metric("Open workfile")
except:
# ignore all errors. ex: using a core that doesn't support metrics
pass
return self._do_copy_and_open(src_path, work_path, None, not file.editable, env.context, parent_ui)
def _open_previous_publish(self, file, env, parent_ui):
"""
Open a previous version of a publish file from the publish area
"""
# confirm how the previous published file should be opened:
# (TODO) expand this out to allow opening directly or option to continue
# working as the next work file
answer = QtGui.QMessageBox.question(parent_ui, "Open Previous Publish?",
("Continue opening the old published file\n\n %s (v%d)\n\n"
"from the publish area?" % (file.name, file.version)),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if answer != QtGui.QMessageBox.Yes:
return False
return self._do_copy_and_open(src_path = None,
dst_path = file.publish_path,
version = file.version,
read_only = True,
new_ctx = env.context,
parent_ui = parent_ui)
def _open_publish_read_only(self, file, env, parent_ui):
"""
Open a previous version of a publish file from the publish
area - this just opens it directly without any file copying
or validation
"""
return self._do_copy_and_open(src_path = None,
dst_path = file.publish_path,
version = file.version,
read_only = True,
new_ctx = env.context,
parent_ui = parent_ui)
def _open_publish_as_workfile(self, file, env, new_version, parent_ui):
"""
Open the published file - this will construct a new work path from the
work template and the publish fields before copying it and opening it
as a new work file
"""
if not file or not file.is_published:
return False
if not file.editable:
answer = QtGui.QMessageBox.question(parent_ui, "Open file read-only?",
("The published file you are opening: '%s', is "
"read-only:\n\n%s.\n\nWould you like to continue?"
% (file.name, file.not_editable_reason)),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if answer == QtGui.QMessageBox.No:
return False
# trying to open a publish:
work_path = None
src_path = file.publish_path
# early check to see if the publish path & work path will actually be different:
if env.publish_template == env.work_template and "version" not in env.publish_template.keys:
# assume that the work and publish paths will actally be the same!
work_path = src_path
else:
# get the work path for the publish:
try:
# get fields for the path:
fields = env.publish_template.get_fields(src_path)
# construct a context for the path:
sp_ctx = self._app.sgtk.context_from_path(src_path, env.context)
# if current user is defined, update fields to use this:
current_user = g_user_cache.current_user
if current_user and sp_ctx.user and sp_ctx.user["id"] != current_user["id"]:
sp_ctx = sp_ctx.create_copy_for_user(current_user)
# finally, use context to populate additional fields:
ctx_fields = sp_ctx.as_template_fields(env.work_template)
fields.update(ctx_fields)
# add next version to fields:
fields["version"] = new_version
# construct work path:
work_path = env.work_template.apply_fields(fields)
except Exception, e:
QtGui.QMessageBox.critical(parent_ui, "Failed to get work file path",
("Failed to resolve work file path from publish path:\n\n%s\n\n%s\n\n"
"Unable to open file!" % (src_path, e)))
self._app.log_exception("Failed to resolve work file path from publish path: %s" % src_path)
return False
try:
self._app.log_metric("Open published file")
except:
# ignore all errors. ex: using a core that doesn't support metrics
pass
return self._do_copy_and_open(src_path, work_path, None, not file.editable, env.context, parent_ui)
```
#### File: python/tk_multi_workfiles/crash_dbg_form.py
```python
import sgtk
from sgtk.platform.qt import QtGui
from .ui.crash_dbg_form import Ui_CrashDbgForm
import threading
import random
import time
class SgRunner(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self._lock = threading.Lock()
self._run = True
self._sg_searches = [
{"entity_type":"Task",
"filters":[['project', 'is', {'type': 'Project', 'name': 'Another Demo Project', 'id': 67}], ['entity', 'type_is', 'Asset']],
"fields":['project', 'code', 'description', 'image', 'entity.Asset.sg_asset_type', 'entity', 'content', 'step', 'sg_status_list', 'task_assignees', 'name'],
"order":[]},
{"entity_type":"Task",
"filters":[['project', 'is', {'type': 'Project', 'name': 'Another Demo Project', 'id': 67}], ['entity', 'type_is', 'Shot']],
"fields":['project', 'entity.Shot.sg_sequence', 'code', 'description', 'image', 'entity', 'content', 'step', 'sg_status_list', 'task_assignees', 'name'],
"order":[]}
]
self._thread_local = threading.local()
@property
def _shotgun(self):
self._lock.acquire()
try:
if not hasattr(self._thread_local, "sg"):
self._thread_local.sg = sgtk.util.shotgun.create_sg_connection()
return self._thread_local.sg
finally:
self._lock.release()
def stop(self):
self._lock.acquire()
try:
self._run = False
finally:
self._lock.release()
def run(self):
res = {}
while True:
self._lock.acquire()
try:
if not self._run:
break
finally:
self._lock.release()
"""
s = []
for tick in range(512):
time.sleep(0.001)
multiplier = random.randint(1, 8)
for i in range(8*multiplier):
s.append(tick*i)
time.sleep(2)
res = dict((i, c) for i, c in enumerate(s))
"""
sg_search = self._sg_searches[random.randint(0, len(self._sg_searches)-1)]
res = self._shotgun.find(sg_search["entity_type"],
sg_search["filters"],
sg_search["fields"],
sg_search["order"])
print len(res)
class CrashDbgForm(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self._ui = Ui_CrashDbgForm()
self._ui.setupUi(self)
refresh_action = QtGui.QAction("Refresh", self)
refresh_action.setShortcut(QtGui.QKeySequence(QtGui.QKeySequence.Refresh))
refresh_action.triggered.connect(self._on_refresh_triggered)
self.addAction(refresh_action)
# create model:
self._model = QtGui.QStandardItemModel()
self._ui.tree_view.setModel(self._model)
self._ui.list_view.setModel(self._model)
# create sg query threads:
self._sg_runner_threads = []
self._sg_runner_threads.append(SgRunner())
self._sg_runner_threads.append(SgRunner())
self._sg_runner_threads.append(SgRunner())
for thread in self._sg_runner_threads:
thread.start()
def closeEvent(self, event):
"""
"""
for thread in self._sg_runner_threads:
print "Stopping sg runner thread..."
thread.stop()
thread.join()
print " > Stopped!"
if self._model:
self._model.deleteLater()
self._model = None
return QtGui.QWidget.closeEvent(self, event)
def _on_refresh_triggered(self):
"""
"""
#time.sleep(0.1)
self._model.clear()
self._repopulate_model()
self._repopulate_model()
self._repopulate_model()
def _update_groups(self, group_names):
"""
"""
new_items = []
for name in group_names:
group_item = QtGui.QStandardItem(name)
new_items.append(group_item)
if new_items:
self._model.invisibleRootItem().appendRows(new_items)
def _add_group(self, group_name):
"""
"""
group_item = QtGui.QStandardItem(group_name)
self._model.invisibleRootItem().appendRow(group_item)
return group_item
def _add_files(self, group_item, file_names):
"""
"""
new_items = []
for name in file_names:
item = QtGui.QStandardItem(name)
new_items.append(item)
if new_items:
group_item.appendRows(new_items)
def _repopulate_model(self):
"""
"""
search_id = random.randint(0, 19)
if search_id == 0:
self._update_groups(["Sequence 01"])
elif search_id == 1:
self._update_groups(["123", "Anm - Animation"])
grp = self._add_group("Anm - Animation")
self._add_files(grp, ["reviewtest", "reviewtest", "reviewtest", "reviewtest", "launchtest", "reviewtest", "reviewtest", "reviewtest", "reviewtest", "reviewtest", "colourspacetest", "crashtest", "shouldbreak", "reviewtest", "scene", "reviewtest", "reviewtest"])
elif search_id == 2:
self._update_groups(["Anm", "Anm - Animation"])
grp = self._add_group("Anm - Animation")
self._add_files(grp, ["reviewtest", "reviewtest", "reviewtest", "reviewtest", "launchtest", "reviewtest", "reviewtest", "reviewtest", "reviewtest", "reviewtest", "colourspacetest", "crashtest", "shouldbreak", "reviewtest", "scene", "reviewtest", "reviewtest"])
elif search_id == 3:
self._update_groups(["Animation"])
grp = self._add_group("Animation")
self._add_files(grp, ["reviewtest", "reviewtest", "reviewtest", "reviewtest", "launchtest", "reviewtest", "reviewtest", "reviewtest", "reviewtest", "reviewtest", "colourspacetest", "crashtest", "shouldbreak", "reviewtest", "scene", "reviewtest", "reviewtest"])
elif search_id == 4:
self._update_groups(["shot_010", "Anm - Animation", "Comp - MoreComp", "FX - Effects", "FX - More FX", "Light - EvenMoreLighting", "Light - Lighting", "Light - MoreLighting", "Light - StillMoreLighting", "Light - YetMoreLighting", "More Anim - MoreAnim", "Roto - Roto"])
grp = self._add_group("Comp - MoreComp")
self._add_files(grp, ["nopublishes"])
grp = self._add_group("Light - EvenMoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
grp = self._add_group("Light - Lighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
grp = self._add_group("Light - MoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
grp = self._add_group("Light - StillMoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
grp = self._add_group("Light - YetMoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
elif search_id == 5:
self._update_groups(["Anm", "Anm - Animation"])
elif search_id == 6:
self._update_groups(["Animation"])
elif search_id == 7:
self._update_groups(["Comp", "Comp - MoreComp"])
grp = self._add_group("Comp - MoreComp")
self._add_files(grp, ["nopublishes"])
elif search_id == 8:
self._update_groups(["FX", "FX - Effects", "FX - More FX"])
elif search_id == 9:
self._update_groups(["Light", "Light - EvenMoreLighting", "Light - Lighting", "Light - MoreLighting", "Light - StillMoreLighting", "Light - YetMoreLighting"])
grp = self._add_group("Light - EvenMoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
grp = self._add_group("Light - Lighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
grp = self._add_group("Light - MoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
grp = self._add_group("Light - StillMoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
grp = self._add_group("Light - YetMoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
elif search_id == 10:
self._update_groups(["EvenMoreLighting"])
grp = self._add_group("EvenMoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
elif search_id == 11:
self._update_groups(["Lighting"])
grp = self._add_group("Lighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
elif search_id == 12:
self._update_groups(["MoreLighting"])
grp = self._add_group("MoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
elif search_id == 13:
self._update_groups(["StillMoreLighting"])
grp = self._add_group("StillMoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
elif search_id == 14:
self._update_groups(["YetMoreLighting"])
grp = self._add_group("YetMoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
elif search_id == 15:
self._update_groups(["More Anim", "More Anim - MoreAnim"])
elif search_id == 16:
self._update_groups(["Roto", "Roto - Roto"])
elif search_id == 17:
self._update_groups(["shot_020", "Light - Lighting"])
elif search_id == 18:
self._update_groups(["Light", "Light - Lighting"])
elif search_id == 19:
self._update_groups(["The End", "Anm - Animation", "Anm - Animation B", "Comp - Finalize"])
grp = self._add_group("Anm - Animation")
self._add_files(grp, ["reviewtest", "writenodetestBAD", "writenodeconversiontest", "reviewtest", "testscene", "writenodeconversiontest", "reviewtest", "reviewtest", "reviewtest", "colourspacetest", "scene", "scene", "reviewtest", "sendtoreviewtest", "testscene", "shouldbreak", "reviewtest", "writenodeconversiontest", "reviewtest", "launchtest", "writenodetest", "writenodeconversiontestb", "osxreviewtest", "crashtest", "reviewtest", "writenodetestBAD", "writenodeconversiontest", "reviewtest", "sendtoreviewtest", "testscene", "nopublishes", "reviewtest", "reviewtest", "osxreviewtest", "testscene", "scene1"])
grp = self._add_group("Anm - Animation B")
self._add_files(grp, ["reviewtest", "writenodetestBAD", "writenodeconversiontest", "reviewtest", "testscene", "writenodeconversiontest", "reviewtest", "reviewtest", "reviewtest", "colourspacetest", "scene", "scene", "reviewtest", "sendtoreviewtest", "testscene", "shouldbreak", "reviewtest", "writenodeconversiontest", "reviewtest", "launchtest", "writenodetest", "writenodeconversiontestb", "osxreviewtest", "crashtest", "reviewtest", "writenodetestBAD", "writenodeconversiontest", "reviewtest", "sendtoreviewtest", "testscene", "nopublishes", "reviewtest", "reviewtest", "osxreviewtest", "testscene", "scene1"])
grp = self._add_group("Comp - Finalize")
self._add_files(grp, ["reviewtest", "writenodetestBAD", "writenodeconversiontest", "reviewtest", "testscene", "writenodeconversiontest", "reviewtest", "reviewtest", "reviewtest", "colourspacetest", "scene", "scene", "reviewtest", "sendtoreviewtest", "testscene", "shouldbreak", "reviewtest", "writenodeconversiontest", "reviewtest", "launchtest", "writenodetest", "writenodeconversiontestb", "osxreviewtest", "crashtest", "reviewtest", "writenodetestBAD", "writenodeconversiontest", "reviewtest", "sendtoreviewtest", "testscene", "nopublishes", "reviewtest", "reviewtest", "osxreviewtest", "testscene", "scene1"])
```
#### File: tk_multi_workfiles/entity_tree/entity_tree_form.py
```python
import weakref
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from ..ui.entity_tree_form import Ui_EntityTreeForm
from .entity_tree_proxy_model import EntityTreeProxyModel
from ..framework_qtwidgets import Breadcrumb
from ..util import get_model_str, map_to_source, get_source_model, monitor_qobject_lifetime
shotgun_model = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_model")
ShotgunEntityModel = shotgun_model.ShotgunEntityModel
class EntityTreeForm(QtGui.QWidget):
"""
Entity tree widget class
"""
class _EntityBreadcrumb(Breadcrumb):
"""
Breadcrumb for a single model item.
"""
def __init__(self, label, entity):
"""
Constructor.
:param label: Text label for the breabcrumb.
:param entity: Entity associated with this breadcrumb.
"""
Breadcrumb.__init__(self, label)
self.entity = entity
# Signal emitted when an entity is selected in the tree.
entity_selected = QtCore.Signal(object, object)# selection details, breadcrumbs
# Signal emitted when the 'New Task' button is clicked.
create_new_task = QtCore.Signal(object, object)# entity, step
def __init__(self, entity_model, search_label, allow_task_creation, extra_fields, parent):
"""
Construction
:param entity_model: The Shotgun Model this widget should connect to
:param search_label: The hint label to be displayed on the search control
:param allow_task_creation: Indicates if the form is allowed by the app settings to show the
create task button.
:param extra_fields: Extra fields to use when comparing model entries.
:param parent: The parent QWidget for this control
"""
QtGui.QWidget.__init__(self, parent)
# control if step->tasks in the entity hierarchy should be collapsed when building
# the search details.
self._collapse_steps_with_tasks = True
# keep track of the entity to select when the model is updated:
self._entity_to_select = None
# keep track of the currently selected item:
self._current_item_ref = None
# keep track of expanded items as items in the tree are expanded/collapsed. We
# also want to auto-expand root items the first time they appear so track them
# as well
self._expanded_items = set()
self._auto_expanded_root_items = set()
# load the setting that states whether the first level of the tree should be auto expanded
app = sgtk.platform.current_bundle()
self._auto_expand_tree = app.get_setting("auto_expand_tree")
# set up the UI
self._ui = Ui_EntityTreeForm()
self._ui.setupUi(self)
self._ui.search_ctrl.set_placeholder_text("Search %s" % search_label)
self._ui.search_ctrl.setToolTip("Press enter to complete the search")
# enable/hide the my-tasks-only button if we are showing tasks:
have_tasks = (entity_model and entity_model.get_entity_type() == "Task")
if not have_tasks:
self._ui.my_tasks_cb.hide()
# enable/hide the new task button if we have tasks and task creation is allowed:
if have_tasks and allow_task_creation:
# enable and connect the new task button
self._ui.new_task_btn.clicked.connect(self._on_new_task)
self._ui.new_task_btn.setEnabled(False)
else:
self._ui.new_task_btn.hide()
self._ui.entity_tree.expanded.connect(self._on_item_expanded)
self._ui.entity_tree.collapsed.connect(self._on_item_collapsed)
self._is_resetting_model = False
entity_model.modelAboutToBeReset.connect(self._model_about_to_reset)
entity_model.modelReset.connect(self._model_reset)
if entity_model:
# Every time the model is refreshed with data from Shotgun, we'll need to re-expand nodes
# that were expanded and reapply the current selection.
entity_model.data_refreshed.connect(self._on_data_refreshed)
if True:
# create a filter proxy model between the source model and the task tree view:
filter_model = EntityTreeProxyModel(self, ["content", {"entity": "name"}] + extra_fields)
monitor_qobject_lifetime(filter_model, "%s entity filter model" % search_label)
filter_model.setSourceModel(entity_model)
self._ui.entity_tree.setModel(filter_model)
# connect up the filter controls:
self._ui.search_ctrl.search_changed.connect(self._on_search_changed)
self._ui.my_tasks_cb.toggled.connect(self._on_my_tasks_only_toggled)
else:
self._ui.entity_tree.setModel(entity_model)
self._expand_root_rows()
# connect to the selection model for the tree view:
selection_model = self._ui.entity_tree.selectionModel()
if selection_model:
selection_model.selectionChanged.connect(self._on_selection_changed)
def _model_about_to_reset(self):
# Catch the currently selected item and convert it to dictionary form
# so we can pick it back after the model is reset.
if self._current_item_ref:
item = self._current_item_ref()
if item:
idx = item.index()
self._entity_to_select = idx.model().get_entity(item)
self._is_resetting_model = True
def _model_reset(self):
self._is_resetting_model = False
def shut_down(self):
"""
Clean up as much as we can to help the gc once the widget is finished with.
"""
signals_blocked = self.blockSignals(True)
try:
# clear any references:
self._entity_to_select = None
self._expanded_items = set()
self._auto_expanded_root_items = set()
# clear the selection:
if self._ui.entity_tree.selectionModel():
self._ui.entity_tree.selectionModel().clear()
# detach the filter model from the view:
view_model = self._ui.entity_tree.model()
if view_model:
self._ui.entity_tree.setModel(None)
if isinstance(view_model, EntityTreeProxyModel):
view_model.setSourceModel(None)
finally:
self.blockSignals(signals_blocked)
def select_entity(self, entity_type, entity_id):
"""
Select the specified entity in the tree. If the tree is still being populated then the selection
will happen when an item representing the entity appears in the model.
Note that this doesn't emit an entity_selected signal.
:param entity_type: The type of the entity to select
:param entity_id: The id of the entity to select
"""
# track the selected entity - this allows the entity to be selected when
# it appears in the model even if the model hasn't been fully populated yet:
self._entity_to_select = {"type":entity_type, "id":entity_id}
# reset the current selection without emitting a signal:
prev_selected_item = self._reset_selection()
self._current_item_ref = None
self._update_ui()
# try to update the selection to reflect the change:
self._update_selection(prev_selected_item)
def get_selection(self):
"""
Get the currently selected item as well as the breadcrumb trail that represents
the path for the selection.
:returns: A Tuple containing the details and breadcrumb trail of the current selection:
(selection_details, breadcrumb_trail)
- selection_details is a dictionary containing:
{"label":label, "entity":entity, "children":[children]}
- breadcrumb_trail is a list of Breadcrumb instances
"""
selection_details = {}
breadcrumb_trail = []
# get the currently selected index:
selected_indexes = self._ui.entity_tree.selectionModel().selectedIndexes()
if len(selected_indexes) == 1:
selection_details = self._get_entity_details(selected_indexes[0])
breadcrumb_trail = self._build_breadcrumb_trail(selected_indexes[0])
return (selection_details, breadcrumb_trail)
def navigate_to(self, breadcrumb_trail):
"""
Update the selection to match the specified breadcrumb trail
:param breadcrumb_trail: A list of Breadcrumb instances that represent
an item in the tree.
"""
tree_model = self._ui.entity_tree.model()
entity_model = get_source_model(tree_model)
if not entity_model:
return
# figure out the item in the tree to select from the breadcrumb trail:
current_item = entity_model.invisibleRootItem()
for crumb in breadcrumb_trail:
# look for an item under the current item that this breadcrumb represents:
found_item = None
if isinstance(crumb, EntityTreeForm._EntityBreadcrumb):
# look for a child item that represents the entity:
for row in range(current_item.rowCount()):
child_item = current_item.child(row)
sg_entity = entity_model.get_entity(child_item)
if (sg_entity["type"] == crumb.entity["type"]
and sg_entity["id"] == crumb.entity["id"]):
found_item = child_item
break
else:
# look for a child item that has the same label:
for row in range(current_item.rowCount()):
child_item = current_item.child(row)
if get_model_str(child_item) == crumb.label:
found_item = child_item
break
if not found_item:
# stop traversal!
break
if isinstance(tree_model, QtGui.QAbstractProxyModel):
# check to see if the item is visible in the current filtered model:
filtered_idx = tree_model.mapFromSource(found_item.index())
if not filtered_idx.isValid():
# stop traversal as the item isn't in the filtered model!
break
# iterate down to the next level:
current_item = found_item
# finally, select the item in the tree:
idx_to_select = current_item.index()
if isinstance(tree_model, QtGui.QAbstractProxyModel):
idx_to_select = tree_model.mapFromSource(current_item.index())
self._ui.entity_tree.selectionModel().setCurrentIndex(idx_to_select, QtGui.QItemSelectionModel.SelectCurrent)
# ------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------
def _get_selected_item(self):
"""
Get the currently selected item.
:returns: The currently selected model item if any
"""
item = None
indexes = self._ui.entity_tree.selectionModel().selectedIndexes()
if len(indexes) == 1:
item = self._item_from_index(indexes[0])
return item
def _reset_selection(self):
"""
Reset the current selection, returning the currently selected item if any. This
doesn't result in any signals being emitted by the current selection model.
:returns: The selected item before the selection was reset if any
"""
prev_selected_item = self._get_selected_item()
# reset the current selection without emitting any signals:
self._ui.entity_tree.selectionModel().reset()
self._update_ui()
return prev_selected_item
def _get_entity_details(self, idx):
"""
Get entity details for the specified model index. If steps are being collapsed into tasks
then these details will reflect that and will not be a 1-1 representation of the tree itself.
:param idx: The QModelIndex of the item to get the entity details for.
:returns: A dictionary containing entity information about the specified index containing the
following information:
{"label":label, "entity":entity, "children":[children]}
- label: The label of the corresponding item
- entity: The entity dictionary for the corresponding item
- children: A list of immediate children for the corresponding item - each item in
the list is a dictionary containing 'label' and 'entity'.
"""
if not idx.isValid():
return {}
# first, ensure that all child data has been loaded
idx.model().ensure_data_is_loaded(idx)
item = self._item_from_index(idx)
entity_model = get_source_model(idx.model())
if not item or not entity_model:
return {}
# get details for this item:
label = get_model_str(item)
entity = entity_model.get_entity(item)
# get details for children:
children = []
collapsed_children = []
view_model = self._ui.entity_tree.model()
for row in range(view_model.rowCount(idx)):
child_idx = view_model.index(row, 0, idx)
child_item = self._item_from_index(child_idx)
if not child_item:
continue
child_label = get_model_str(child_item)
child_entity = entity_model.get_entity(child_item)
children.append({"label":child_label, "entity":child_entity})
if self._collapse_steps_with_tasks and child_entity and child_entity["type"] == "Step":
# see if grand-child is actually a task:
for child_row in range(view_model.rowCount(child_idx)):
grandchild_idx = view_model.index(child_row, 0, child_idx)
grandchild_item = self._item_from_index(grandchild_idx)
if not grandchild_item:
continue
grandchild_label = get_model_str(grandchild_item)
grandchild_entity = entity_model.get_entity(grandchild_item)
if grandchild_entity and grandchild_entity["type"] == "Task":
# found a task under a step so we can safely collapse tasks to steps!
collapsed_child_label = "%s - %s" % (child_label, grandchild_label)
collapsed_children.append({"label":collapsed_child_label, "entity":grandchild_entity})
if collapsed_children:
# prefer collapsed children instead of children if we have them
children = collapsed_children
elif self._collapse_steps_with_tasks and entity and entity["type"] == "Step":
# it's possible that entity is actually a Step and the Children are all tasks - if this is
# the case then update the child entities to be 'collapsed' and clear the entity on the Step
# item:
for child in children:
child_label = child["label"]
child_entity = child["entity"]
if child_entity and child_entity["type"] == "Task":
collapsed_child_label = "%s - %s" % (label, child_label)
collapsed_children.append({"label":collapsed_child_label, "entity":child_entity})
if collapsed_children:
entity = None
children = collapsed_children
return {"label":label, "entity":entity, "children":children}
def _on_search_changed(self, search_text):
"""
Slot triggered when the search text has been changed.
:param search_text: The new search text
"""
# reset the current selection without emitting any signals:
prev_selected_item = self._reset_selection()
try:
# update the proxy filter search text:
filter_reg_exp = QtCore.QRegExp(search_text, QtCore.Qt.CaseInsensitive, QtCore.QRegExp.FixedString)
self._ui.entity_tree.model().setFilterRegExp(filter_reg_exp)
finally:
# and update the selection - this will restore the original selection if possible.
self._update_selection(prev_selected_item)
self._fix_expanded_rows()
def _on_my_tasks_only_toggled(self, checked):
"""
Slot triggered when the show-my-tasks checkbox is toggled
:param checked: True if the checkbox has been checked, otherwise False
"""
# reset the current selection without emitting any signals:
prev_selected_item = self._reset_selection()
try:
self._ui.entity_tree.model().only_show_my_tasks = checked
finally:
# and update the selection - this will restore the original selection if possible.
self._update_selection(prev_selected_item)
self._fix_expanded_rows()
def _update_selection(self, prev_selected_item):
"""
Update the selection to either the to-be-selected entity if set or the current item if known. The
current item is the item that was last selected but which may no longer be visible in the view due
to filtering. This allows it to be tracked so that the selection state is correctly restored when
it becomes visible again.
"""
entity_model = get_source_model(self._ui.entity_tree.model())
if not entity_model:
return
# we want to make sure we don't emit any signals whilst we are
# manipulating the selection:
signals_blocked = self.blockSignals(True)
try:
# try to get the item to select:
item = None
if self._entity_to_select:
# we know about an entity we should try to select:
if entity_model.get_entity_type() == self._entity_to_select["type"]:
item = entity_model.item_from_entity(self._entity_to_select["type"], self._entity_to_select["id"])
elif self._current_item_ref:
# no item to select but we do know about a current item:
item = self._current_item_ref()
if item:
idx = item.index()
if isinstance(self._ui.entity_tree.model(), QtGui.QAbstractProxyModel):
# map the index to the proxy model:
idx = self._ui.entity_tree.model().mapFromSource(idx)
if idx.isValid():
# make sure the item is expanded and visible in the tree:
self._ui.entity_tree.scrollTo(idx)
# select the item:
self._ui.entity_tree.selectionModel().setCurrentIndex(idx, QtGui.QItemSelectionModel.SelectCurrent)
finally:
self.blockSignals(signals_blocked)
# if the selection is different to the previously selected item then we
# will emit an entity_selected signal:
selected_item = self._get_selected_item()
if id(selected_item) != id(prev_selected_item):
# get the selected entity details:
selection_details, breadcrumbs = self.get_selection()
# emit a selection changed signal:
self.entity_selected.emit(selection_details, breadcrumbs)
def _update_ui(self):
"""
Update the UI to reflect the current selection, etc.
"""
enable_new_tasks = False
selected_indexes = self._ui.entity_tree.selectionModel().selectedIndexes()
if len(selected_indexes) == 1:
item = self._item_from_index(selected_indexes[0])
entity_model = get_source_model(selected_indexes[0].model())
if item and entity_model:
entity = entity_model.get_entity(item)
if entity and entity["type"] != "Step":
if entity["type"] == "Task":
if entity.get("entity"):
enable_new_tasks = True
else:
enable_new_tasks = True
self._ui.new_task_btn.setEnabled(enable_new_tasks)
def _on_selection_changed(self, selected, deselected):
"""
Slot triggered when the selection changes due to user action
:param selected: QItemSelection containing any newly selected indexes
:param deselected: QItemSelection containing any newly deselected indexes
"""
# As the model is being reset, the selection is getting updated constantly,
# so ignore these selection changes.
if self._is_resetting_model:
return
# our tree is single-selection so extract the newly selected item from the
# list of indexes:
selection_details = {}
breadcrumbs = []
item = None
selected_indexes = selected.indexes()
if len(selected_indexes) == 1:
selection_details = self._get_entity_details(selected_indexes[0])
breadcrumbs = self._build_breadcrumb_trail(selected_indexes[0])
item = self._item_from_index(selected_indexes[0])
# update the UI
self._update_ui()
# keep track of the current item:
self._current_item_ref = weakref.ref(item) if item else None
if self._current_item_ref:
# clear the entity-to-select as the current item now takes precedence
self._entity_to_select = None
# emit selection_changed signal:
self.entity_selected.emit(selection_details, breadcrumbs)
def _on_data_refreshed(self, modifications_made):
"""
Slot triggered when new rows are inserted into the filter model. When this happens
we just make sure that any new root rows are expanded.
:param parent_idx: The parent model index of the rows that were inserted
:param first: The first row id inserted
:param last: The last row id inserted
"""
if not modifications_made:
return
# expand any new root rows:
self._expand_root_rows()
# try to select the current entity from the new items in the model:
prev_selected_item = self._reset_selection()
self._update_selection(prev_selected_item)
def _expand_root_rows(self):
"""
Expand all root rows in the Tree if they have never been expanded
"""
view_model = self._ui.entity_tree.model()
if not view_model:
return
# check if we should automatically expand the root level of the tree
if not self._auto_expand_tree:
return
# disable widget paint updates whilst we update the expanded state of the tree:
self._ui.entity_tree.setUpdatesEnabled(False)
# and block signals so that the expanded signal doesn't fire during item expansion!
signals_blocked = self._ui.entity_tree.blockSignals(True)
try:
for row in range(view_model.rowCount()):
idx = view_model.index(row, 0)
item = self._item_from_index(idx)
if not item:
continue
ref = weakref.ref(item)
if ref in self._auto_expanded_root_items:
# we already processed this item
continue
# expand item:
self._ui.entity_tree.expand(idx)
self._auto_expanded_root_items.add(ref)
self._expanded_items.add(ref)
finally:
self._ui.entity_tree.blockSignals(signals_blocked)
# re-enable updates to allow painting to continue
self._ui.entity_tree.setUpdatesEnabled(True)
def _fix_expanded_rows(self):
"""
Update all items that have previously been expanded to be expanded. Filtering resets
the expanded state of items so this is used to reset them correctly.
"""
view_model = self._ui.entity_tree.model()
if not view_model:
return
# disable widget paint updates whilst we update the expanded state of the tree:
self._ui.entity_tree.setUpdatesEnabled(False)
# and block signals so that the expanded signal doesn't fire during item expansion!
signals_blocked = self._ui.entity_tree.blockSignals(True)
try:
valid_expanded_items = set()
for item_ref in self._expanded_items:
if not item_ref() or not item_ref().model():
# bad ref!
continue
src_idx = item_ref().index()
if not src_idx.isValid():
# item doesn't exist in the source model!
continue
valid_expanded_items.add(item_ref)
# map from the source index to the proxy index if needed:
filtered_idx = src_idx
if isinstance(view_model, QtGui.QAbstractProxyModel):
filtered_idx = self._ui.entity_tree.model().mapFromSource(src_idx)
if not filtered_idx.isValid():
continue
# and if the item isn't expanded then expand it:
if not self._ui.entity_tree.isExpanded(filtered_idx):
self._ui.entity_tree.expand(filtered_idx)
# update expanded item list with valid item refs:
self._expanded_items = valid_expanded_items
finally:
self._ui.entity_tree.blockSignals(signals_blocked)
# re-enable updates to allow painting to continue
self._ui.entity_tree.setUpdatesEnabled(True)
def _item_from_index(self, idx):
"""
Find the corresponding model item from the specified index. This handles
the indirection introduced by the filter model.
:param idx: The model index to find the item for
:returns: The item in the model represented by the index
"""
src_idx = map_to_source(idx)
return src_idx.model().itemFromIndex(src_idx)
def _on_item_expanded(self, idx):
"""
Slot triggered when an item in the tree is expanded - used to track expanded
state for all items.
:param idx: The index of the item in the tree being expanded
"""
item = self._item_from_index(idx)
if not item:
return
self._expanded_items.add(weakref.ref(item))
def _on_item_collapsed(self, idx):
"""
Slot triggered when an item in the tree is collapsed - used to track expanded
state for all items.
:param idx: The index of the item in the tree being collapsed
"""
item = self._item_from_index(idx)
if not item:
return
self._expanded_items.discard(weakref.ref(item))
def _on_new_task(self):
"""
Slot triggered when the new task button is clicked. Extracts the necessary
information from the widget and raises a uniform signal for containing code
"""
# get the currently selected index:
selected_indexes = self._ui.entity_tree.selectionModel().selectedIndexes()
if len(selected_indexes) != 1:
return
# extract the selected model index from the selection:
src_index = map_to_source(selected_indexes[0])
# determine the currently selected entity:
entity_model = src_index.model()
entity_item = entity_model.itemFromIndex(src_index)
entity = entity_model.get_entity(entity_item)
if not entity:
return
if entity["type"] == "Step":
# can't create tasks on steps as we don't have an entity!
return
step = None
if entity["type"] == "Task":
step = entity.get("step")
entity = entity.get("entity")
if not entity:
return
# and emit the signal for this entity:
self.create_new_task.emit(entity, step)
def _build_breadcrumb_trail(self, idx):
"""
Builds the breadcrumb trail for the selected model index.
:param idx: Index of an item in the selection model.
:returns: List of _EntityBreadcrumb for each item in the hierarchy.
"""
if not idx.isValid():
return []
# walk up the tree starting with the specified index:
breadcrumbs = []
src_index = map_to_source(idx)
entity_model = src_index.model()
while src_index.isValid():
entity = entity_model.get_entity(entity_model.itemFromIndex(src_index))
if entity:
name_token = "content" if entity["type"] == "Task" else "name"
label = "<b>%s</b> %s" % (entity["type"], entity.get(name_token))
breadcrumbs.append(EntityTreeForm._EntityBreadcrumb(label, entity))
else:
label = get_model_str(src_index)
breadcrumbs.append(Breadcrumb(label))
src_index = src_index.parent()
# return reversed list:
return breadcrumbs[::-1]
```
#### File: python/tk_multi_workfiles/file_search_cache.py
```python
import sgtk
from .util import Threaded
class FileSearchCache(Threaded):
"""
Implementation of FileSearchCache class
"""
class _CachedFileInfo(object):
"""
Storage for file versions - encapsulates a dictionary if files indexed
by their version
"""
def __init__(self):
"""
Construction
"""
self.versions = {}# version:FileItem()
class _CacheEntry(object):
"""
A single cache entry - stores the work area the files were found in together with the
list of files indexed by the unique file key.
"""
def __init__(self):
"""
Construction
"""
self.work_area = None
self.is_dirty = True
self.file_info = {}# FileItem.key:_CachedFileInfo()
def __init__(self):
"""
Construction
"""
Threaded.__init__(self)
self._cache = {}
@Threaded.exclusive
def add(self, work_area, files, is_dirty=None):
"""
Add the specified files to the cache along with the work area they were found in
:param work_area: A WorkArea instance containing information about the work area the
files were found in
:param files: A list of the FileItem's representing the files found in the specified
work area
:param is_dirty: True if this cache entry should be marked as dirty, False if not. If
is_dirty is None then the previous value will be used or True if there
is no previous value.
"""
# find the current entry if there is one - this also returns the cache key:
key, current_entry = self._find_entry(work_area)
if is_dirty is None:
if current_entry:
# use the current value for the dirty flag:
is_dirty = current_entry.is_dirty
else:
# default dirty to True
is_dirty = True
# build the new cache entry from the list of files:
new_entry = FileSearchCache._CacheEntry()
new_entry.work_area = work_area
new_entry.is_dirty = is_dirty
for file_item in files:
new_entry.file_info.setdefault(file_item.key,
FileSearchCache._CachedFileInfo()).versions[file_item.version] = file_item
# add the new entry to the cache:
self._cache[key] = new_entry
@Threaded.exclusive
def find_file_versions(self, work_area, file_key, clean_only=False):
"""
Find all file versions for the specified file key and context.
:param work_area: The work area to find the file version for
:param file_key: A unique file key that can be used to locate all versions of a single file
:param clean_only: If False then dirty cache entries will be included in the returned results. If
True then they will be omitted. Defaults to False.
:returns: A dictionary {version:FileItem} of all file versions found.
"""
_, entry = self._find_entry(work_area)
if not entry:
# return None as we don't have a cached result for this context!
return None
if clean_only and entry.is_dirty:
return None
file_info = entry.file_info.get(file_key)
if not file_info:
# although we have a cache entry, we don't have any files for the key!
return {}
# return a dictionary of version:FileItem entries:
return dict([(v, f) for v, f in file_info.versions.iteritems()])
@Threaded.exclusive
def find(self, entity, user=None):
"""
Find the list of files and work area for the specified entity and user.
:param entity: The entity to return files for
:param user: The user to return files for. If user is None then the user for the current
context will be used
:returns: Tuple containing (list(FileItem), WorkArea) or None of an entry isn't found
"""
key = self._construct_key(entity, user)
entry = self._cache.get(key)
if not entry:
return None
files = []
for file_info in entry.file_info.values():
files.extend([f for f in file_info.versions.values()])
return (files, entry.work_area)
@Threaded.exclusive
def set_dirty(self, entity, user=None, is_dirty=True):
"""
Mark the cache entry for the specified entity and user as being dirty.
:param entity: The entity to set the cache entry dirty for
:param user: The user to set the cache entry dirty for. If user is None then the user for
the current context will be used.
:param is_dirty: True if the entry should be marked as dirty, otherwise False
"""
key = self._construct_key(entity, user)
entry = self._cache.get(key)
if not entry:
return None
entry.is_dirty = is_dirty
@Threaded.exclusive
def set_work_area_dirty(self, work_area, dirty=True):
"""
Mark the cache entry for the specified work area as being dirty.
:param work_area: The work area to update
:param dirty: True if the entry should be marked as dirty, otherwise False
"""
_, entry = self._find_entry(work_area)
if not entry:
return
entry.is_dirty = dirty
@Threaded.exclusive
def clear(self):
"""
Clear the cache
"""
self._cache = {}
def _find_entry(self, work_area):
"""
Find the current entry for the specified work area if there is one
:param work_area: The work area to find the cache entry for
:returns: Tuple containing (key, entry) where key is the key into the cache
and entry is the cache entry
"""
if not work_area or not work_area.context:
return (None, None)
ctx = work_area.context
key_entity = ctx.task or ctx.step or ctx.entity or ctx.project
key = self._construct_key(key_entity, ctx.user)
entry = self._cache.get(key)
return (key, entry)
def _construct_key(self, entity, user):
"""
Construct a cache key from the specified entity and user.
:param entity: The entity to construct the cache key with
:param user: The user to construct the cache key with
:returns: A unique key which can be used to locate the entry in the cache
for the specified entity and user
"""
if not user:
# use the current user from the app context:
app = sgtk.platform.current_bundle()
user = app.context.user
key_parts = []
key_parts.append((entity["type"], entity["id"]) if entity else None)
key_parts.append((user["type"], user["id"]) if user else None)
# key needs to be hashable to return a tuple of the key parts:
return tuple(key_parts)
```
#### File: tk_multi_workfiles/my_tasks/my_task_item_delegate.py
```python
import weakref
import sgtk
from sgtk.platform.qt import QtCore, QtGui
shotgun_model = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_model")
ShotgunEntityModel = shotgun_model.ShotgunEntityModel
from .task_widget import TaskWidget
from ..framework_qtwidgets import WidgetDelegate
from ..util import map_to_source
class MyTaskItemDelegate(WidgetDelegate):
"""
"""
def __init__(self, extra_display_fields, view):
"""
"""
WidgetDelegate.__init__(self, view)
self._extra_display_fields = extra_display_fields
view.setRootIsDecorated(False)
self._paint_widget = None
self._widget_sz = None
# ------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------
def _get_painter_widget(self, model_index, parent):
"""
"""
if not model_index.isValid():
return None
if not self._paint_widget or not self._paint_widget():
paint_widget = TaskWidget(parent)
self._paint_widget = weakref.ref(paint_widget)
self._widget_sz = paint_widget.size()
return self._paint_widget()
def _create_editor_widget(self, model_index, style_options, parent):
"""
"""
if not model_index.isValid():
return None
widget = TaskWidget(parent)
# setup the widget to operate on this item:
style_options.state = style_options.state | QtGui.QStyle.State_Selected
self._setup_widget(widget, model_index, style_options)
return widget
def sizeHint(self, style_options, model_index):
"""
"""
if not model_index.isValid():
return QtCore.QSize()
if self._widget_sz is None:
# the size is set when we first create the painter widget
self._get_painter_widget(model_index, self.view)
return self._widget_sz or QtCore.QSize()
def _on_before_paint(self, widget, model_index, style_options):
"""
"""
self._setup_widget(widget, model_index, style_options)
def _setup_widget(self, widget, model_index, style_options):
"""
"""
src_index = map_to_source(model_index)
if not src_index or not src_index.isValid():
return
model = src_index.model()
if not model:
return
item = model.itemFromIndex(src_index)
if not item:
return
sg_data = item.get_sg_data()
# set the thumbnail to the icon for the item:
widget.set_thumbnail(item.icon())
# set entity info:
entity = sg_data.get("entity")
entity_name = entity.get("name")
entity_type = entity.get("type")
entity_type_icon = model.get_entity_icon(entity_type) if entity_type else None
widget.set_entity(entity_name, entity_type, entity_type_icon)
# set task info:
task_name = sg_data.get("content")
task_type_icon = model.get_entity_icon("Task")
widget.set_task(task_name, task_type_icon)
# set 'other' info:
other_data = [str(sg_data.get(field)) for field in self._extra_display_fields]
other_text = ", ".join(other_data)
widget.set_other(other_text)
# finally, update the selected state of the widget:
widget.set_selected((style_options.state & QtGui.QStyle.State_Selected) == QtGui.QStyle.State_Selected)
```
#### File: python/tk_multi_workfiles/util.py
```python
import threading
import sgtk
from sgtk.platform.qt import QtCore, QtGui
class Threaded(object):
"""
Threaded base class that contains a threading.Lock member and an
'exclusive' function decorator that implements exclusive access
to the contained code using the lock
"""
def __init__(self):
"""
Construction
"""
self._lock = threading.Lock()
@staticmethod
def exclusive(func):
"""
Static method intended to be used as a function decorator in derived
classes. Use it by doing:
@Threaded.exclusive
def my_method(self, ...):
...
:param func: Function to decorate/wrap
:returns: Wrapper function that executes the function inside the acquired lock
"""
def wrapper(self, *args, **kwargs):
"""
Internal wrapper method that executes the function with the specified arguments
inside the acquired lock
:param *args: The function parameters
:param **kwargs: The function named parameters
:returns: The result of the function call
"""
self._lock.acquire()
try:
return func(self, *args, **kwargs)
finally:
self._lock.release()
return wrapper
def value_to_str(value):
"""
Safely convert the value to a string - handles QtCore.QString if usign PyQt
:param value: The value to convert to a Python str
:returns: A Python string representing the value
"""
if value == None:
return ""
# handle PyQt.QVariant
if hasattr(QtCore, "QVariant") and isinstance(value, QtCore.QVariant):
value = value.toPyObject()
if isinstance(value, unicode):
# encode to str utf-8
return value.encode("utf-8")
elif isinstance(value, str):
# it's a string anyway so just return
return value
elif hasattr(QtCore, "QString") and isinstance(value, QtCore.QString):
# running PyQt!
# QtCore.QString inherits from str but supports
# unicode, go figure! Lets play safe and return
# a utf-8 string
return str(value.toUtf8())
else:
# For everything else, just return as string
return str(value)
def get_model_data(item_or_index, role=QtCore.Qt.DisplayRole):
"""
Safely get the Qt model data for the specified item or index. This handles QVariant
types returned when using PyQt instead of PySide.
:param item_or_index: The QStandardModelItem or QModelIndex to retrieve data for
:param role: The Qt data role to return data for
:returns: The data for the specified item or index.
"""
data = item_or_index.data(role)
if hasattr(QtCore, "QVariant") and isinstance(data, QtCore.QVariant):
# handle PyQt!
data = data.toPyObject()
return data
def get_model_str(item_or_index, role=QtCore.Qt.DisplayRole):
"""
Safely get the Qt model data as a Python string for the specified item or index. This
handles QVariant types returned when using PyQt instead of PySide.
:param item_or_index: The QStandardModelItem or QModelIndex to retrieve a string for
:param role: The Qt data role to return as a string
:returns: A Python string representing the data for the specified item
or index.
"""
data = get_model_data(item_or_index, role)
return value_to_str(data)
def map_to_source(idx, recursive=True):
"""
Map the specified index to it's source model. This can be done recursively to map
back through a chain of proxy models to the source model at the beginning of the chain
:param idx: The index to map from
:param recursive: If true then the function will recurse up the model chain until it
finds an index belonging to a model that doesn't derive from
QAbstractProxyModel. If false then it will just return the index
from the imediate parent model.
:returns: QModelIndex in the source model or the first model in the chain that
isn't a proxy model if recursive is True.
"""
src_idx = idx
while src_idx.isValid() and isinstance(src_idx.model(), QtGui.QAbstractProxyModel):
src_idx = src_idx.model().mapToSource(src_idx)
if not recursive:
break
return src_idx
def get_source_model(model, recursive=True):
"""
Return the source model for the specified model. If recursive is True then this will return
the first model in the model chain that isn't a proxy model.
:param model: The model to get the source model from
:param recursive: If True then recurse up the model chain until we find a model that isn't
derived from QAbstractProxyModel. If false then just return the immediate
parent model.
:returns: The source model or the first non-proxy model if recursive is True
"""
src_model = model
while src_model and isinstance(src_model, QtGui.QAbstractProxyModel):
src_model = src_model.sourceModel()
if not recursive:
break
return src_model
def set_widget_property(widget, property_name, property_value, refresh_style=True, refresh_children=False):
"""
Set a Qt property on a widget and if requested, also ensure that the style
sheet is refreshed
:param widget: The widget to set the property on
:param property_name: The name of the property to set
:param property_value: The value to set the property to
:param refresh_style: If True then the widgets style will be refreshed
:param refresh_children: If True and refresh_style is also True then the style
of any child widgets will also be refreshed
"""
# set the property:
widget.setProperty(property_name, property_value)
# and if needed, refresh the style:
if refresh_style:
refresh_widget_style_r(widget, refresh_children)
def refresh_widget_style_r(widget, refresh_children=False):
"""
Recursively refresh the style sheet of the widget and optionally it's children
by unpolishing and repolishing the widgets style.
:param widget: The widget to refresh the style of
:param refresh_children: If True then the style of any child widgets will also
be refreshed
"""
widget.style().unpolish(widget)
widget.ensurePolished()
if not refresh_children:
return
for child in widget.children():
if not isinstance(child, QtGui.QWidget):
continue
refresh_widget_style_r(child, refresh_children)
# storage for any tracked qobjects
_g_monitored_qobjects = {}
def monitor_qobject_lifetime(obj, name=""):
"""
Debug method to help track the lifetime of a QObject derived instance. Hooks into
the instances destroyed signal to report when the QObject has been destroyed.
:param obj: The QObject instance to monitor
:param name: An optional name to be appended to the debug output, useful for identifying
a specific instance of a class.
"""
msg = type(obj).__name__
if name:
msg = "%s [%s]" % (msg, name)
global _g_monitored_qobjects
uid = len(_g_monitored_qobjects)
_g_monitored_qobjects[uid] = msg
obj.destroyed.connect(lambda m=msg, u=uid: _on_qobject_destroyed(m, u))
def _on_qobject_destroyed(name, uid):
"""
Slot triggered whenever a monitored qobject is destroyed - reports to debug that the object
was destroyed.
:param name: Name of the instance that was destroyed
:param uid: Unique id of the QObject used to look it up in the monitored list
"""
app = sgtk.platform.current_bundle()
app.log_debug("%s destroyed" % name)
global _g_monitored_qobjects
if uid in _g_monitored_qobjects:
del _g_monitored_qobjects[uid]
def report_non_destroyed_qobjects(clear_list = True):
"""
Report any monitored QObjects that have not yet been destroyed. Care should be taken to
account for QObjects that are pending destruction via deleteLater signals that may be
pending.
:param clear_list: If true then the list of monitored QObjects will be cleared after
this function has reported them.
"""
app = sgtk.platform.current_bundle()
global _g_monitored_qobjects
app.log_debug("%d monitored QObjects have not been destroyed!" % len(_g_monitored_qobjects))
for msg in _g_monitored_qobjects.values():
app.log_debug(" - %s" % msg)
if clear_list:
_g_monitored_qobjects = {}
def get_template_user_keys(template):
"""
Finds the keys in a template that relate to the HumanUser entity.
:param template: Template to look for HumanUser related keys.
:returns: A list of key names.
"""
# find all 'user' keys in the template:
user_keys = set()
if "HumanUser" in template.keys:
user_keys.add("HumanUser")
for key in template.keys.values():
if key.shotgun_entity_type == "HumanUser":
user_keys.add(key.name)
return user_keys
def resolve_filters(filters):
"""
When passed a list of filters, it will resolve strings found in the filters using the context
example: '{context.user}' could get resolved to {'type': 'HumanUser', 'id': 86, 'name': '<NAME>'}
:param filters: a list of filters as found in the info.yml config
should be in the format: [[task_assignees, is, '{context.user}'],[sg_status_list, not_in, [fin,omt]]]
:return: A List of filters for use with the shotgun api
"""
app = sgtk.platform.current_bundle()
resolved_filters = []
for filter in filters:
if type(filter) is dict:
resolved_filter = {
"filter_operator": filter["filter_operator"],
"filters": resolve_filters(filter["filters"])}
else:
resolved_filter = []
for field in filter:
if field == "{context.entity}":
field = app.context.entity
elif field == "{context.step}":
field = app.context.step
elif field == "{context.task}":
field = app.context.task
elif field == "{context.user}":
field = app.context.user
resolved_filter.append(field)
resolved_filters.append(resolved_filter)
return resolved_filters
```
#### File: python/tk_multi_workfiles/work_files.py
```python
import sys
import gc
import sgtk
from sgtk.platform.qt import QtCore
from .util import report_non_destroyed_qobjects
def dbg_info(func):
"""
Decorator function used to track memory and other useful debug information around the file-open
and file-save modal dialog calls. If debug is enabled, this will print out a list of monitored
QObject's that aren't destroyed correctly together with some Python memory/object stats.
Note that the list of QObjects is misleading if the QApplication is set to close when the last
window is closed and the dialog is the last window.
"""
def wrapper(*args, **kwargs):
"""
"""
# grab the pre-run memory info:
num_objects_before = len(gc.get_objects())
bytes_before = 0
if sys.platform == "Darwin":
import resource
bytes_before = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.0 / 1024.0
# run the function:
res = func(*args, **kwargs)
# report any non-destroyed QObjects:
# Note, this will usually run before the main objects have been destroyed by the
# event loop so it's important to cross-check the output with subsequent lines.
report_non_destroyed_qobjects()
# cleanup and grab the post-run memory info:
gc.collect()
bytes_after = 0
if sys.platform == "Darwin":
bytes_after = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.0 / 1024.0
num_objects_after = len(gc.get_objects())
# and report any difference in memory usage:
bytes_diff = bytes_after - bytes_before
obj_diff = num_objects_after - num_objects_before
msg = ("Memory before: %0.2fMb, current: %0.2fMb, leaked: %0.2fMb (%d new Python objects)"
% (bytes_before, bytes_after, bytes_diff, obj_diff))
app = sgtk.platform.current_bundle()
app.log_debug(msg)
# return the result:
return res
return wrapper
class WorkFiles(object):
"""
Main entry point for all commands in the app.
"""
def __init__(self):
"""
Constructor.
"""
app = sgtk.platform.current_bundle()
app.log_debug("Synchronizing remote path cache...")
app.sgtk.synchronize_filesystem_structure()
app.log_debug("Path cache up to date!")
# If the user wants to debug the dialog, show it modally and wrap it
# with memory leak-detection code.
if app.use_debug_dialog:
self._dialog_launcher = dbg_info(app.engine.show_modal)
else:
self._dialog_launcher = app.engine.show_dialog
@staticmethod
def show_file_open_dlg():
"""
Show the file open dialog
"""
handler = WorkFiles()
from .file_open_form import FileOpenForm
handler._show_file_dlg("File Open", FileOpenForm)
@staticmethod
def show_file_save_dlg():
"""
Show the file save dialog
"""
handler = WorkFiles()
from .file_save_form import FileSaveForm
handler._show_file_dlg("File Save", FileSaveForm)
def _show_file_dlg(self, dlg_name, form):
"""
Shows the file dialog modally or not depending on the current DCC and settings.
:param dlg_name: Title of the dialog.
:param form: Factory for the dialog class.
"""
app = sgtk.platform.current_bundle()
try:
self._dialog_launcher(dlg_name, app, form)
except:
app.log_exception("Failed to create %s dialog!" % dlg_name)
```
#### File: tk_multi_workfiles/actions/new_task_action.py
```python
import sgtk
from sgtk.platform.qt import QtGui
from .action import Action
from ..new_task_form import NewTaskForm
from ..user_cache import g_user_cache
class NewTaskAction(Action):
"""
This action creates a new task for a given entity.
"""
def __init__(self, entity, step):
"""
Constructor.
:param entity: Entity for which a task needs to be created.
:param step: Default pipeline step for the new task.
"""
Action.__init__(self, "Create New Task")
self._entity = entity
self._step = step
def execute(self, parent_ui):
"""
Shows the task creation form and creates the task.
:param parent_ui: Parent widget for the dialog.
:returns: If True, task creation was completed, returns False otherwise.
"""
if not self._entity:
return False
# show new task dialog:
app = sgtk.platform.current_bundle()
res, new_task_form = app.engine.show_modal("Create New Task", app, NewTaskForm, self._entity, self._step,
g_user_cache.current_user, parent_ui)
if res != QtGui.QDialog.Accepted:
return False
try:
from sgtk.util.metrics import EventMetric
pipeline_step = new_task_form._get_pipeline_step()
properties = {
"Linked Entity Type": pipeline_step.get("type", "Unknown"),
"Method": "Form", # since this was created from the Qt widget,
"Task Name": pipeline_step.get("code","unknown"),
}
# Log usage statistics about the Shotgun Desktop executable and the desktop startup.
EventMetric.log(
EventMetric.GROUP_TASKS,
"Created Task",
properties=properties,
bundle=app
)
except ImportError as e:
# ignore all errors. ex: using a core that doesn't support metrics
pass
return True
```
#### File: tk_multi_workfiles/actions/open_file_action.py
```python
import os
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from sgtk import TankError
from .file_action import FileAction
from ..scene_operation import reset_current_scene, open_file, OPEN_FILE_ACTION
from ..work_area import WorkArea
from ..file_item import FileItem
from ..file_finder import FileFinder
from ..user_cache import g_user_cache
class OpenFileAction(FileAction):
"""
"""
def _copy_file(self, source_path, target_path):
"""
Use hook to copy a file from source to target path
"""
self._app.log_debug("Copying file '%s' to '%s' via hook" % (source_path, target_path))
self._app.execute_hook("hook_copy_file",
source_path=source_path,
target_path=target_path)
def _do_copy_and_open(self, src_path, dst_path, version, read_only, new_ctx, parent_ui):
"""
Copies src_path to dst_path, creates folders, restarts the engine and then opens
the file from dst_path
:param src_path: The path of the file to copy
:param dst_path: The destination file path to open
:param version: The version of the work file to be opened
:param read_only: True if the work file should be opened read-only
:param new_ctx: The context that the work file should be opened in
:returns: True of the source file is copied and successfully opened
"""
if not dst_path or not new_ctx:
# can't do anything!
return False
if src_path and src_path != dst_path:
# check that the source path exists:
if not os.path.exists(src_path):
QtGui.QMessageBox.critical(parent_ui, "File doesn't exist!",
"The file\n\n%s\n\nCould not be found to open!" % src_path)
return False
if new_ctx != self._app.context:
# ensure folders exist. This serves the
# dual purpose of populating the path
# cache and ensuring we can copy the file
# if we need to
try:
FileAction.create_folders(new_ctx)
except Exception, e:
QtGui.QMessageBox.critical(parent_ui, "Failed to create folders!",
"Failed to create folders:\n\n%s!" % e)
self._app.log_exception("Failed to create folders")
return False
# reset the current scene:
try:
if not reset_current_scene(self._app, OPEN_FILE_ACTION, new_ctx):
self._app.log_debug("Failed to reset the current scene!")
return False
except Exception, e:
QtGui.QMessageBox.critical(parent_ui, "Failed to reset the scene",
"Failed to reset the scene:\n\n%s\n\nUnable to continue!" % e)
self._app.log_exception("Failed to reset the scene!")
return False
# if need to, copy the file
if src_path and src_path != dst_path:
# check that local path doesn't already exist:
if os.path.exists(dst_path):
#TODO: replace with Toolkit dialog
answer = QtGui.QMessageBox.question(parent_ui, "Overwrite file?",
"The file\n\n%s\n\nalready exists. Would you like to overwrite it?" % (dst_path),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.Cancel)
if answer == QtGui.QMessageBox.Cancel:
return False
try:
# make sure that the folder exists - this will handle any leaf folders that aren't
# created by Toolkit (e.g. a dynamic static folder that isn't part of the schema)
dst_dir = os.path.dirname(dst_path)
self._app.ensure_folder_exists(dst_dir)
# copy file:
self._copy_file(src_path, dst_path)
except Exception, e:
QtGui.QMessageBox.critical(parent_ui, "Copy file failed!",
"Copy of file failed!\n\n%s!" % e)
self._app.log_exception("Copy file failed")
return False
# switch context:
previous_context = self._app.context
if not new_ctx == self._app.context:
try:
# Change the curent context.
FileAction.change_context(new_ctx)
except Exception, e:
QtGui.QMessageBox.critical(parent_ui, "Failed to change the work area",
"Failed to change the work area to '%s':\n\n%s\n\nUnable to continue!" % (new_ctx, e))
self._app.log_exception("Failed to change the work area to %s!" % new_ctx)
return False
# open file
try:
is_file_opened = open_file(self._app, OPEN_FILE_ACTION, new_ctx, dst_path, version, read_only)
except Exception, e:
QtGui.QMessageBox.critical(parent_ui, "Failed to open file",
"Failed to open file\n\n%s\n\n%s" % (dst_path, e))
self._app.log_exception("Failed to open file %s!" % dst_path)
FileAction.restore_context(parent_ui, previous_context)
return False
# Test specifically for False. Legacy open hooks return None, which means success.
if is_file_opened is False:
FileAction.restore_context(parent_ui, previous_context)
return False
try:
self._app.log_metric("Opened Workfile")
except:
# ignore all errors. ex: using a core that doesn't support metrics
pass
return True
class CopyAndOpenInCurrentWorkAreaAction(OpenFileAction):
"""
"""
def _open_in_current_work_area(self, src_path, src_template, file, src_work_area, parent_ui):
"""
"""
# get info about the current work area:
app = sgtk.platform.current_bundle()
# no need to try/except this WorkArea object creation, since if we're here it means the
# context is fully configured.
dst_work_area = WorkArea(app.context)
if not dst_work_area.work_template:
# should never happen!
app.log_error("Unable to copy the file '%s' to the current work area as no valid "
"work template could be found" % src_path)
return False
# determine the set of fields for the destination file in the current work area:
#
# get fields from file path using the source work template:
fields = src_template.get_fields(src_path)
# get the template fields for the current context using the current work template:
context_fields = dst_work_area.context.as_template_fields(dst_work_area.work_template)
# this will overide any context fields obtained from the source path:
fields.update(context_fields)
# get the sandbox user name if there is one:
sandbox_user_name = None
if (src_work_area and src_work_area.contains_user_sandboxes
and src_work_area.context and src_work_area.context.user and g_user_cache.current_user
and src_work_area.context.user["id"] != g_user_cache.current_user["id"]):
sandbox_user_name = src_work_area.context.user.get("name", "Unknown")
src_version = None
dst_version = None
if "version" in dst_work_area.work_template.keys:
# need to figure out the next version:
src_version = fields["version"]
# build a file key from the fields:
file_key = FileItem.build_file_key(fields,
dst_work_area.work_template,
dst_work_area.version_compare_ignore_fields)
# look for all files that match this key:
finder = FileFinder()
found_files = finder.find_files(dst_work_area.work_template,
dst_work_area.publish_template,
dst_work_area.context,
file_key)
# get the max version:
versions = [file.version for file in found_files]
dst_version = (max(versions or [0]) + 1)
fields["version"] = dst_version
# confirm we should copy and open the file:
msg = "'%s" % file.name
if src_version:
msg += ", v%03d" % src_version
msg += "'"
if sandbox_user_name is not None:
msg += " is in %s's Work Area (%s)." % (sandbox_user_name, src_work_area.context)
else:
msg += " is in a different Work Area (%s)." % (src_work_area.context)
msg += ("\n\nWould you like to copy the file to your current Work Area (%s)" % (dst_work_area.context))
if dst_version:
msg += " as version v%03d" % dst_version
msg += " and open it from there?"
answer = QtGui.QMessageBox.question(parent_ui, "Open file in current Work Area?", msg,
QtGui.QMessageBox.Yes | QtGui.QMessageBox.Cancel)
if answer != QtGui.QMessageBox.Yes:
return False
# build the destination path from the fields:
dst_file_path = ""
try:
dst_file_path = dst_work_area.work_template.apply_fields(fields)
except TankError, e:
app.log_error("Unable to copy the file '%s' to the current work area as Toolkit is "
"unable to build the destination file path: %s" % (src_path, e))
return False
# copy and open the file:
return self._do_copy_and_open(src_path,
dst_file_path,
version = None,
read_only = False,
new_ctx = dst_work_area.context,
parent_ui = parent_ui)
class ContinueFromFileAction(OpenFileAction):
"""
"""
def __init__(self, label, file, file_versions, environment):
"""
"""
# Q. should the next version include the current version?
all_versions = [v for v, f in file_versions.iteritems()] + [file.version]
max_version = max(all_versions)
self._version = max_version+1
label = "%s (as v%03d)" % (label, self._version)
OpenFileAction.__init__(self, label, file, file_versions, environment)
def _continue_from(self, src_path, src_template, parent_ui):
"""
"""
# get the destination work area for the current user:
dst_work_area = self.environment.create_copy_for_user(g_user_cache.current_user)
app = sgtk.platform.current_bundle()
if not dst_work_area.work_template:
# should never happen!
app.log_error("Unable to copy the file '%s' to the current work area as no valid "
"work template could be found" % src_path)
return False
# build dst path for the next version of this file:
fields = src_template.get_fields(src_path)
# get the template fields for the current context using the current work template:
context_fields = dst_work_area.context.as_template_fields(dst_work_area.work_template)
# this will overide any context fields obtained from the source path:
fields.update(context_fields)
# update version:
fields["version"] = self._version
# build the destination path:
dst_path = dst_work_area.work_template.apply_fields(fields)
# copy and open the file:
return self._do_copy_and_open(src_path, dst_path, None, not self.file.editable,
dst_work_area.context, parent_ui)
```
#### File: v0.7.4/hooks/scene_operation_tk-nuke.py
```python
import os
import nuke
import tank
from tank import Hook
from tank import TankError
from tank.platform.qt import QtGui
class SceneOperation(Hook):
"""
Hook called to perform an operation with the current scene.
"""
def execute(self, *args, **kwargs):
"""
Main hook entry point
:param operation: String
Scene operation to perform
:param file_path: String
File path to use if the operation
requires it (e.g. open)
:param context: Context
The context the file operation is being
performed in.
:param parent_action: This is the action that this scene operation is
being executed for. This can be one of:
- open_file
- new_file
- save_file_as
- version_up
:param file_version: The version/revision of the file to be opened. If this is 'None'
then the latest version should be opened.
:param read_only: Specifies if the file should be opened read-only or not
:returns: Depends on operation:
'current_path' - Return the current scene
file path as a String
'reset' - True if scene was reset to an empty
state, otherwise False
all others - None
"""
engine = self.parent.engine
if hasattr(engine, "hiero_enabled") and engine.hiero_enabled:
return self._hiero_execute(*args, **kwargs)
elif hasattr(engine, "studio_enabled") and engine.studio_enabled:
return self._studio_execute(*args, **kwargs)
else:
return self._nuke_execute(*args, **kwargs)
def _studio_execute(self, operation, file_path, context, parent_action, file_version, read_only, **kwargs):
"""
The Nuke Studio specific scene operations.
"""
# Out of the box, we treat Nuke Studio just like Hiero, so we
# can just call through.
return self._hiero_execute(
operation,
file_path,
context,
parent_action,
file_version,
read_only,
**kwargs
)
def _hiero_execute(self, operation, file_path, context, parent_action, file_version, read_only, **kwargs):
"""
The Hiero specific scene operations.
"""
import hiero
if operation == "current_path":
# return the current script path
project = self._get_current_project()
curr_path = project.path().replace("/", os.path.sep)
return curr_path
elif operation == "open":
# Manually fire the kBeforeProjectLoad event in order to work around a bug in Hiero.
# The Foundry has logged this bug as:
# Bug 40413 - Python API - kBeforeProjectLoad event type is not triggered
# when calling hiero.core.openProject() (only triggered through UI)
# It exists in all versions of Hiero through (at least) v1.9v1b12.
#
# Once this bug is fixed, a version check will need to be added here in order to
# prevent accidentally firing this event twice. The following commented-out code
# is just an example, and will need to be updated when the bug is fixed to catch the
# correct versions.
# if (hiero.core.env['VersionMajor'] < 1 or
# hiero.core.env['VersionMajor'] == 1 and hiero.core.env['VersionMinor'] < 10:
hiero.core.events.sendEvent("kBeforeProjectLoad", None)
# open the specified script
hiero.core.openProject(file_path.replace(os.path.sep, "/"))
elif operation == "save":
# save the current script:
project = self._get_current_project()
project.save()
elif operation == "save_as":
project = self._get_current_project()
project.saveAs(file_path.replace(os.path.sep, "/"))
elif operation == "reset":
# do nothing and indicate scene was reset to empty
return True
elif operation == "prepare_new":
# add a new project to hiero
hiero.core.newProject()
def _nuke_execute(self, operation, file_path, context, parent_action, file_version, read_only, **kwargs):
"""
The Nuke specific scene operations.
"""
if file_path:
file_path = file_path.replace("/", os.path.sep)
if operation == "current_path":
# return the current script path
return nuke.root().name().replace("/", os.path.sep)
elif operation == "open":
# open the specified script
nuke.scriptOpen(file_path)
# reset any write node render paths:
if self._reset_write_node_render_paths():
# something changed so make sure to save the script again:
nuke.scriptSave()
elif operation == "save":
# save the current script:
nuke.scriptSave()
elif operation == "save_as":
old_path = nuke.root()["name"].value()
try:
# rename script:
nuke.root()["name"].setValue(file_path)
# reset all write nodes:
self._reset_write_node_render_paths()
# save script:
nuke.scriptSaveAs(file_path, -1)
except Exception, e:
# something went wrong so reset to old path:
nuke.root()["name"].setValue(old_path)
raise TankError("Failed to save scene %s", e)
elif operation == "reset":
"""
Reset the scene to an empty state
"""
while nuke.root().modified():
# changes have been made to the scene
res = QtGui.QMessageBox.question(None,
"Save your script?",
"Your script has unsaved changes. Save before proceeding?",
QtGui.QMessageBox.Yes|QtGui.QMessageBox.No|QtGui.QMessageBox.Cancel)
if res == QtGui.QMessageBox.Cancel:
return False
elif res == QtGui.QMessageBox.No:
break
else:
nuke.scriptSave()
# now clear the script:
nuke.scriptClear()
return True
def _get_current_project(self):
"""
Returns the current project based on where in the UI the user clicked
"""
import hiero
# get the menu selection from hiero engine
selection = self.parent.engine.get_menu_selection()
if len(selection) != 1:
raise TankError("Please select a single Project!")
if not isinstance(selection[0] , hiero.core.Bin):
raise TankError("Please select a Hiero Project!")
project = selection[0].project()
if project is None:
# apparently bins can be without projects (child bins I think)
raise TankError("Please select a Hiero Project!")
return project
def _reset_write_node_render_paths(self):
"""
Use the tk-nuke-writenode app interface to find and reset
the render path of any Tank write nodes in the current script
"""
write_node_app = self.parent.engine.apps.get("tk-nuke-writenode")
if not write_node_app:
return False
# only need to forceably reset the write node render paths if the app version
# is less than or equal to v0.1.11
from distutils.version import LooseVersion
if (write_node_app.version == "Undefined"
or LooseVersion(write_node_app.version) > LooseVersion("v0.1.11")):
return False
write_nodes = write_node_app.get_write_nodes()
for write_node in write_nodes:
write_node_app.reset_node_render_path(write_node)
return len(write_nodes) > 0
```
#### File: python/tk_multi_workfiles/change_version_form.py
```python
import tank
from tank.platform.qt import QtCore, QtGui
class ChangeVersionForm(QtGui.QWidget):
"""
UI for changing the version of the current work file
"""
@property
def exit_code(self):
return self._exit_code
def __init__(self, current_version, new_version, parent = None):
"""
Construction
"""
QtGui.QWidget.__init__(self, parent)
self._exit_code = QtGui.QDialog.Rejected
# set up the UI
from .ui.change_version_form import Ui_ChangeVersionForm
self._ui = Ui_ChangeVersionForm()
self._ui.setupUi(self)
self._ui.cancel_btn.clicked.connect(self._on_cancel)
self._ui.change_version_btn.clicked.connect(self._on_change_version)
self._ui.new_version_edit.setValidator(QtGui.QIntValidator(0, 99999, self))
self._ui.current_version_label.setText("v%03d" % current_version)
self._ui.new_version_edit.setText("%d" % new_version)
self._ui.new_version_edit.selectAll()
# initialize line to be plain and the same colour as the text:
self._ui.break_line.setFrameShadow(QtGui.QFrame.Plain)
clr = QtGui.QApplication.palette().text().color()
self._ui.break_line.setStyleSheet("#break_line{color: rgb(%d,%d,%d);}" % (clr.red() * 0.75, clr.green() * 0.75, clr.blue() * 0.75))
self.setFocusProxy(self._ui.new_version_edit)
@property
def new_version(self):
"""
Get the new version
"""
return self._get_new_version()
def _on_cancel(self):
"""
Called when the cancel button is clicked
"""
self._exit_code = QtGui.QDialog.Rejected
self.close()
def _on_change_version(self):
"""
Called when the change version button is clicked
"""
self._exit_code = QtGui.QDialog.Accepted
self.close()
def _get_new_version(self):
"""
Get the new version from the UI
"""
new_version = -1
try:
new_version = int(self._ui.new_version_edit.text())
except ValueError:
pass
return new_version
```
#### File: python/tk_multi_workfiles/file_list_view.py
```python
from operator import itemgetter
from datetime import datetime, timedelta
from pprint import pprint
import tank
from tank.platform.qt import QtCore, QtGui
browser_widget = tank.platform.import_framework("tk-framework-widget", "browser_widget")
from .file_item_form import FileItemForm
from .file_item import FileItem
from .file_filter import FileFilter
class FileListView(browser_widget.BrowserWidget):
# signals - note, 'object' is used to avoid
# issues with PyQt when None is passed as PyQt
# doesn't allow None to be passed to classes
# other than object (an exception is raised)
open_previous_workfile = QtCore.Signal(object)#FileItem
open_previous_publish = QtCore.Signal(object)#FileItem
view_in_shotgun = QtCore.Signal(object)#FileItem
NO_TASK_NAME = "No Task"
def __init__(self, parent=None):
"""
Construction
"""
browser_widget.BrowserWidget.__init__(self, parent)
self._current_filter = {}
# tweak style
self.title_style = "none"
self._update_title()
@property
def selected_published_file(self):
selected_item = self.get_selected_item()
if selected_item:
return selected_item.published_file
return None
@property
def selected_work_file(self):
selected_item = self.get_selected_item()
if selected_item:
return selected_item.work_file
return None
# Enable to force all work to be done in the main thread
# which can help when debugging
# IMPORTANT - set this to False before releasing!!!
DEBUG_GET_DATA_IN_MAIN_THREAD=False
def get_data(self, data):
"""
Called by browser widget in worker thread to query the list
of files to display for the specified context
"""
if FileListView.DEBUG_GET_DATA_IN_MAIN_THREAD:
# debug only - _get_data will be called first in
# process_result which runs in the main thread
return data
else:
return self._get_data(data)
def _get_data(self, data):
"""
Retrieve the list of files to display as well as the various display and grouping options required
to build the file list.
:param data: Dictionary containing:
handler - A 'WorkFiles' instance containing the main application business logic
filter - The current 'FileFilter' instance being applied to the list
:returns: Dictionary containing the various display & grouping options required to build the
file list as well as the list of files organised by task.
"""
result = {"task_groups":{}, "task_name_order":{}}
handler = data["handler"]
filter = data.get("filter")
mode = filter.mode
# get some additional info from the handler:
ctx = handler.get_current_work_area()
result["can_do_new_file"] = handler.can_do_new_file()
result["have_valid_workarea"] = (ctx and (ctx.entity or ctx.project))
result["have_valid_configuration"] = handler.have_valid_configuration_for_work_area()
result["current_task_name"] = ctx.task.get("name") if ctx and ctx.task else None
result["can_change_work_area"] = handler.can_change_work_area()
result["filter"] = filter
result["task_order"] = []
if result["have_valid_workarea"] and result["have_valid_configuration"]:
# get the list of files from the handler:
files = handler.find_files(filter)
# re-pivot this list of files ready to display:
#
# builds the following structure
# { task_name : { (file)name : { "files" : { 1:file,2:file, ... }, "thumbnail" : path, ... } } }
task_groups = {}
for file in files:
# first level is task group
task_name = file.task.get("name") if file.task else FileListView.NO_TASK_NAME
task_group = task_groups.setdefault(task_name, dict())
# next level is name:
name_group = task_group.setdefault(file.name, dict())
# finally, add file to files:
file_versions = name_group.setdefault("files", dict())
file_versions[file.version] = file
# do some pre-processing of file groups:
filtered_task_groups = {}
task_modified_pairs = []
task_name_order = {}
for task, name_groups in task_groups.iteritems():
name_modified_pairs = []
filtered_name_groups = {}
for name, details in name_groups.iteritems():
files_versions = details["files"]
# find highest version info:
local_versions = [f.version for f in files_versions.values() if f.is_local]
if mode == FileFilter.WORKFILES_MODE and not local_versions:
# don't have a version of this file to display!
continue
publish_versions = [f.version for f in files_versions.values() if f.is_published]
if mode == FileFilter.PUBLISHES_MODE and not publish_versions:
# don't have a version of this file to display!
continue
highest_local_version = -1
if local_versions:
highest_local_version = max(local_versions)
details["highest_local_file"] = files_versions[highest_local_version]
highest_publish_version = -1
if publish_versions:
highest_publish_version = max(publish_versions)
details["highest_publish_file"] = files_versions[highest_publish_version]
# find thumbnail to use:
sorted_versions = sorted(files_versions.keys(), reverse=True)
thumbnail = None
for version in sorted_versions:
# skip any versions that are greater than the one we are looking for
# Note: we shouldn't choose a thumbnail for versions that aren't
# going to be displayed so filter these out
if ((mode == FileFilter.WORKFILES_MODE and version > highest_local_version)
or (mode == FileFilter.PUBLISHES_MODE and version > highest_publish_version)):
continue
thumbnail = files_versions[version].thumbnail
if thumbnail:
# special case - update the thumbnail!
if mode == FileFilter.WORKFILES_MODE and version < highest_local_version:
files_versions[highest_local_version].set_thumbnail(thumbnail)
break
details["thumbnail"] = thumbnail
# update group with details:
filtered_name_groups[name] = details
# determine when this file was last updated (modified or published)
# this is used to sort the files in the list:
last_updated = None
if mode == FileFilter.WORKFILES_MODE and highest_local_version >= 0:
last_updated = files_versions[highest_local_version].modified_at
if highest_publish_version >= 0:
published_at = files_versions[highest_publish_version].published_at
last_updated = max(last_updated, published_at) if last_updated else published_at
name_modified_pairs.append((name, last_updated))
if not filtered_name_groups:
# everything in this group was filtered out!
continue
filtered_task_groups[task] = filtered_name_groups
# sort names in reverse order of modified date:
name_modified_pairs.sort(key=itemgetter(1), reverse=True)
task_name_order[task] = [n for (n, _) in name_modified_pairs]
task_modified_pairs.append((task, max([m for (_, m) in name_modified_pairs])))
# sort tasks in reverse order of modified date:
task_modified_pairs.sort(key=itemgetter(1), reverse=True)
task_order = [n for (n, _) in task_modified_pairs]
result["task_groups"] = filtered_task_groups
result["task_name_order"] = task_name_order
result["task_order"] = task_order
return result
def process_result(self, result):
"""
Process list of tasks retrieved by get_data on the main thread
:param result: Dictionary containing the various display & grouping options required to build the
file list as well as the list of files organised by task.
"""
if FileListView.DEBUG_GET_DATA_IN_MAIN_THREAD:
# gathering of data was not done in the get_data stage so we
# should do it here instead - this method gets called in the
# main thread
result = self._get_data(result)
task_groups = result["task_groups"]
task_name_order = result["task_name_order"]
task_order = result["task_order"]
current_task_name = result["current_task_name"]
self._current_filter = result["filter"]
self._update_title()
if not task_groups:
# build a useful error message using the info we have available:
msg = ""
if not result["can_change_work_area"]:
if not result["have_valid_workarea"]:
msg = "The current Work Area is not valid!"
elif not result["have_valid_configuration"]:
msg = ("Shotgun File Manager has not been configured for the environment "
"being used by the selected Work Area!")
elif not result["can_do_new_file"]:
msg = "Couldn't find any files in this Work Area!"
else:
msg = "Couldn't find any files!\nClick the New file button to start work."
else:
if not result["have_valid_workarea"]:
msg = "The current Work Area is not valid!"
elif not result["have_valid_configuration"]:
msg = ("Shotgun File Manager has not been configured for the environment "
"being used by the selected Work Area!\n"
"Please choose a different Work Area to continue.")
elif not result["can_do_new_file"]:
msg = "Couldn't find any files in this Work Area!\nTry selecting a different Work Area."
else:
msg = "Couldn't find any files!\nClick the New file button to start work."
self.set_message(msg)
return
for task_name in task_order:
name_groups = task_groups[task_name]
if (len(task_groups) > 1
or (task_name != current_task_name
and task_name != FileListView.NO_TASK_NAME
and current_task_name == None)):
# add header for task:
h = self.add_item(browser_widget.ListHeader)
h.set_title("%s" % (task_name))
ordered_names = task_name_order[task_name]
for name in ordered_names:
details = name_groups[name]
files = details["files"]
highest_local_file = details.get("highest_local_file")
highest_publish_file = details.get("highest_publish_file")
thumbnail = details["thumbnail"]
# add new item to list:
item = self._add_file_item(highest_publish_file, highest_local_file)
if not item:
continue
# set thumbnail if have one:
if thumbnail:
item.set_thumbnail(thumbnail)
# add context menu:
item.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
# if it's a publish then add 'View In Shotgun' item:
if highest_publish_file:
action = QtGui.QAction("View latest Publish in Shotgun", item)
# (AD) - the '[()]' syntax in action.triggered[()].connect looks weird right!
# 'triggered' is a QtCore.SignalInstance which actually defines multiple
# signals: triggered() & triggered(bool). PySide will correctly determine which
# one to use but PyQt gets confused and calls the (bool) version instead which
# causes problems for us... Luckily, Qt lets us use the argument list () to
# index into the SignalInstance object to force the use of the non-bool version - yay!
action.triggered[()].connect(lambda f=highest_publish_file: self._on_show_in_shotgun_action_triggered(f))
item.addAction(action)
# build context menu for all publish versions:
published_versions = [f.version for f in files.values() if f.is_published and isinstance(f.version, int)]
if published_versions:
published_versions.sort(reverse=True)
publishes_action = QtGui.QAction("Open Publish Read-Only", item)
publishes_sm = QtGui.QMenu(item)
publishes_action.setMenu(publishes_sm)
item.addAction(publishes_action)
for v in published_versions[:20]:
f = files[v]
msg = ("v%03d" % f.version)
action = QtGui.QAction(msg, publishes_sm)
# see above for explanation of [()] syntax in action.triggered[()].connect...
action.triggered[()].connect(lambda f=f: self._on_open_publish_action_triggered(f))
publishes_sm.addAction(action)
# build context menu for all work files:
wf_versions = [f.version for f in files.values() if f.is_local and isinstance(f.version, int)]
if wf_versions:
wf_versions.sort(reverse=True)
wf_action = QtGui.QAction("Open Work File", item)
wf_sm = QtGui.QMenu(item)
wf_action.setMenu(wf_sm)
item.addAction(wf_action)
for v in wf_versions[:20]:
f = files[v]
msg = ("v%03d" % f.version)
action = QtGui.QAction(msg, wf_sm)
# see above for explanation of [()] syntax in action.triggered[()].connect...
action.triggered[()].connect(lambda f=f: self._on_open_workfile_action_triggered(f))
wf_sm.addAction(action)
def _update_title(self):
"""
Update the list title depending on the mode
"""
if not self._current_filter:
return
self.set_label(self._current_filter.list_title)
def _add_file_item(self, latest_published_file, latest_work_file):
"""
Add an item to the file list given the latest publish & work files
:param latest_published_file: The latest published version of the file to be added
:param latest_work_file: The latest work/local version of the file to be added
"""
details = ""
tooltip = ""
# colours for item titles:
red = "rgb(200, 84, 74)"
green = "rgb(145, 206, 95)"
current_mode = self._current_filter.mode
file = None
editable = True
not_editable_reason = ""
if current_mode == FileFilter.WORKFILES_MODE:
file = latest_work_file
title_colour = None
if latest_published_file:
if file.compare_with_publish(latest_published_file) >= 0:
# work file is most recent
title_colour = green
tooltip += "This is the latest version of this file"
else:
# published file is most recent
title_colour = red
tooltip += "<b>A more recent published version of this file is available:</b>"
tooltip += "<br>"
tooltip += ("<br><b>Version v%03d</b>" % latest_published_file.version)
tooltip += "<br>" + latest_published_file.format_published_by_details()
tooltip += "<br>"
tooltip += "<br><b>Description:</b>"
tooltip += "<br>" + latest_published_file.format_publish_description()
else:
tooltip += "This file has never been published"
if file.version is not None:
details = "<b>%s, v%03d</b>" % (file.name, file.version)
else:
details = "<b>%s</b>" % (file.name)
if title_colour:
details = "<span style='color:%s'>%s</span>" % (title_colour, details)
details += "<br>" + file.format_modified_by_details()
editable = file.editable
not_editable_reason = file.not_editable_reason
elif current_mode == FileFilter.PUBLISHES_MODE:
file = latest_published_file
title_colour = None
tooltip += "<b>Description:</b>"
tooltip += "<br>" + file.format_publish_description()
tooltip += "<hr>"
if latest_work_file:
if latest_work_file.compare_with_publish(file) <= 0:
# published file is most recent
title_colour = green
tooltip += "This is the latest version of this file"
else:
# work file is most recent
#title_colour = red
tooltip += "<b>A more recent version of this file was found in your work area:</b>"
tooltip += "<br>"
#tooltip += "<br><b>Details:</b>"
tooltip += ("<br><b>Version v%03d</b>" % latest_work_file.version)
tooltip += "<br>" + latest_work_file.format_modified_by_details()
else:
title_colour = green
tooltip += "This is the latest version of this file"
details = "<b>%s, v%03d</b>" % (file.name, file.version)
if title_colour:
details = "<span style='color:%s'>%s</span>" % (title_colour, details)
details += "<br>" + file.format_published_by_details()
editable = file.editable
not_editable_reason = file.not_editable_reason
else:
raise TankError("Display mode is not recognised!")
# update editable info on the tooltip
if not editable:
tooltip += "<hr>"
tooltip += "Read-only: " + not_editable_reason
# add item:
item = self.add_item(FileItemForm)
item.published_file = latest_published_file
item.work_file = latest_work_file
# set tool tip
item.setToolTip(tooltip)
# build and set details string:
item.set_details(details)
item.set_is_editable(editable, not_editable_reason)
return item
def _on_open_workfile_action_triggered(self, file):
"""
Open action triggered from context menu
"""
self.open_previous_workfile.emit(file)
def _on_open_publish_action_triggered(self, file):
"""
Open action triggered from context menu
"""
self.open_previous_publish.emit(file)
def _on_show_in_shotgun_action_triggered(self, file):
"""
Show in Shotgun action triggered from context menu
"""
self.view_in_shotgun.emit(file)
```
#### File: python/tk_multi_workfiles/save_as.py
```python
import os
from itertools import chain
import tank
from tank.platform.qt import QtCore, QtGui
from tank import TankError
from .async_worker import AsyncWorker
from .scene_operation import get_current_path, save_file, SAVE_FILE_AS_ACTION
from .find_files import FileFinder
from .file_item import FileItem
class SaveAs(object):
"""
Functionality for performing Shotgun Save-As operations on the current scene. This contains
commands that will show the Save-As UI as well as the commands that can be used to perform
the save operation.
"""
@staticmethod
def show_save_as_dlg(app):
"""
Show the save-as dialog
:param app: The instance of the workfiles app that this method is called from/for
"""
handler = SaveAs(app)
handler._show_save_as_dlg()
def __init__(self, app):
"""
Construction
"""
self._app = app
self._work_template = self._app.get_template("template_work")
self._publish_template = self._app.get_template("template_publish")
self._cached_files = None
# cache any fields that should be ignored when looking for work files:
self.__version_compare_ignore_fields = self._app.get_setting("version_compare_ignore_fields", [])
def _show_save_as_dlg(self):
"""
Show the save as dialog
"""
# get the current file path:
try:
current_path = get_current_path(self._app, SAVE_FILE_AS_ACTION, self._app.context)
except Exception, e:
msg = ("Failed to get the current file path:\n\n"
"%s\n\n"
"Unable to continue!" % e)
QtGui.QMessageBox.critical(None, "Save As Error!", msg)
self._app.log_exception("Failed to get the current file path")
return
# determine if this is a publish path or not:
is_publish = self._publish_template.validate(current_path) and self._publish_template != self._work_template
# see if name is used in the work template:
name_is_used = "name" in self._work_template.keys
name_is_optional = name_is_used and self._work_template.is_optional("name")
# see if version is used in the work template:
version_is_used = "version" in self._work_template.keys
# update some initial info:
title = "Save to Work Area" if is_publish else "Shotgun Save As"
name = ""
if name_is_used:
if is_publish:
fields = self._publish_template.get_fields(current_path)
name = fields.get("name")
else:
# get the default name from settings:
default_name = self._app.get_setting("saveas_default_name")
if not default_name and not name_is_optional:
# name isn't optional so we should use something:
default_name = "scene"
# determine the initial name depending on the current path:
fields = {}
if self._work_template.validate(current_path):
fields = self._work_template.get_fields(current_path)
name = fields.get("name")
if not name and not name_is_optional:
name = default_name
else:
fields = self._app.context.as_template_fields(self._work_template)
name = default_name
# see if versioning up is preferred - if it is then the version will be incremented instead
# of appending/incrementing a number as a suffix on the name if a file with the same name
# already exists.
prefer_version_up = version_is_used and self._app.get_setting("saveas_prefer_version_up")
if name and not prefer_version_up:
# default is to not version-up so lets make sure we
# at least start with a unique name!
try:
# make sure the work file name doesn't already exist:
# note, this could potentially be slow so for now lets
# limit it:
# split name into alpha and numeric parts so that we can
# increment the numeric part in order to find a unique name
name_alpha = name.rstrip("0123456789")
name_num_str = name[len(name_alpha):] or "0"
name_num = int(name_num_str)
name_format_str = "%s%%0%dd" % (name_alpha, len(name_num_str))
counter_limit = 10
for counter in range(0, counter_limit):
test_name = name
if counter > 0:
# build new name
test_name = name_format_str % (name_num+counter)
test_fields = fields.copy()
test_fields["name"] = test_name
existing_files = self._app.tank.paths_from_template(self._work_template, test_fields, ["version"])
if not existing_files:
name = test_name
break
except TankError, e:
# this shouldn't be fatal so just log a debug message:
self._app.log_debug("Warning - failed to find a default name for Shotgun Save-As: %s" % e)
worker_cb = (lambda details, wp=current_path, ip=is_publish:
self.generate_new_work_file_path(wp, ip, details.get("name"), details.get("reset_version")))
try:
preview_updater = AsyncWorker(worker_cb)
preview_updater.start()
while True:
# reset cached files just in case something has changed:
self._cached_files = None
# show modal dialog:
from .save_as_form import SaveAsForm
(res, form) = self._app.engine.show_modal(title, self._app, SaveAsForm, preview_updater,
is_publish, name_is_used, name, version_is_used)
if res == QtGui.QDialog.Accepted:
# get details from UI:
name = form.name
reset_version = form.reset_version
try:
details = self.generate_new_work_file_path(current_path, is_publish, name, reset_version, require_path=True)
except TankError, e:
QtGui.QMessageBox.critical(None, "Failed to save file!", "Failed to save file:\n\n%s" % str(e))
self._app.log_exception("Something went wrong while saving!")
continue
new_path = details.get("path")
msg = details.get("message")
if not new_path:
# something went wrong!
QtGui.QMessageBox.information(None, "Unable to Save", "Unable to Save!\n\n%s" % msg)
continue
# ok, so do save-as:
try:
self.save_as(new_path)
except Exception, e:
QtGui.QMessageBox.critical(None, "Failed to save file!", "Failed to save file:\n\n%s" % msg)
self._app.log_exception("Something went wrong while saving!")
# ok, all done or cancelled
break
finally:
preview_updater.stop()
def save_as(self, new_path):
"""
Do actual save-as of the current scene as the new path - assumes all validity checking has already
been done
:param new_path: The new path to save the current script/scene to
"""
# we used to always create folders but this seems unnecessary as the folders should have been
# created when the work area was set - either as part of the launch process or when switching
# work area within the app.
# To be on the safe side though, we'll check if the directory that the file is being saved in
# to exists and run create folders if it doesn't - this covers any potential edge cases where
# the Work area has been set without folder creation being run correctly.
dir = os.path.dirname(new_path)
if not dir or not os.path.exists(dir):
# work files always operates in some sort of context, either project, entity or task
ctx_entity = self._app.context.task or self._app.context.entity or self._app.context.project
self._app.log_debug("Creating folders for context %s" % self._app.context)
self._app.tank.create_filesystem_structure(ctx_entity.get("type"), ctx_entity.get("id"))
# finally, make sure that the folder exists - this will handle any leaf folders that aren't
# created above (e.g. a dynamic static folder that isn't part of the schema)
self._app.ensure_folder_exists(dir)
# and save the current file as the new path:
save_file(self._app, SAVE_FILE_AS_ACTION, self._app.context, new_path)
def generate_new_work_file_path(self, current_path, current_is_publish, new_name, reset_version, require_path=False):
"""
Generate a new work file path from the current path taking into
account existing work files and publishes.
"""
new_work_path = ""
msg = None
can_reset_version = False
has_name_field = "name" in self._work_template.keys
has_version_field = "version" in self._work_template.keys
# validate name:
if has_name_field:
if not self._work_template.is_optional("name") and not new_name:
msg = "You must enter a name!"
return {"message":msg}
if new_name and not self._work_template.keys["name"].validate(new_name):
msg = "Your filename contains illegal characters!"
return {"message":msg}
# build fields dictionary to use for the new path:
fields = {}
# start with fields from context:
fields = self._app.context.as_template_fields(self._work_template)
# add in any additional fields from current path:
base_template = self._publish_template if current_is_publish else self._work_template
if base_template.validate(current_path):
template_fields = base_template.get_fields(current_path)
fields = dict(chain(template_fields.iteritems(), fields.iteritems()))
else:
if has_version_field:
# just make sure there is a version
fields["version"] = 1
# keep track of the current name:
current_name = fields.get("name")
# update name field:
if new_name:
fields["name"] = new_name
else:
# clear the current name:
if "name" in fields:
del fields["name"]
# if we haven't cached the file list already, do it now:
if not self._cached_files:
finder = FileFinder(self._app)
self._cached_files = finder.find_files(self._work_template, self._publish_template, self._app.context, require_path=require_path)
# construct a file key that represents all versions of this publish/work file:
file_key = FileItem.build_file_key(fields, self._work_template,
self.__version_compare_ignore_fields + ["version"])
# find the max work file and publish versions:
work_versions = [f.version for f in self._cached_files if f.is_local and f.key == file_key]
max_work_version = max(work_versions) if work_versions else 0
publish_versions = [f.version for f in self._cached_files if f.is_published and f.key == file_key]
max_publish_version = max(publish_versions) if publish_versions else 0
max_version = max(max_work_version, max_publish_version)
if has_version_field:
# get the current version:
current_version = fields.get("version")
# now depending on what the source was
# and if the name has been changed:
new_version = None
if current_is_publish and ((not has_name_field) or new_name == current_name):
# we're ok to just copy publish across and version up
can_reset_version = False
new_version = max_version + 1 if max_version else 1
msg = None
else:
if max_version:
# already have a publish and/or work file
can_reset_version = False
new_version = max_version + 1
if max_work_version > max_publish_version:
if has_name_field:
msg = ("A work file with this name already exists. If you proceed, your file "
"will use the next available version number.")
else:
msg = ("A previous version of this work file already exists. If you proceed, "
"your file will use the next available version number.")
else:
if has_name_field:
msg = ("A publish file with this name already exists. If you proceed, your file "
"will use the next available version number.")
else:
msg = ("A published version of this file already exists. If you proceed, "
"your file will use the next available version number.")
else:
# don't have an existing version
can_reset_version = True
msg = ""
if reset_version:
new_version = 1
if new_version:
fields["version"] = new_version
else:
# handle when version isn't in the work template:
if max_work_version > 0 and max_work_version >= max_publish_version:
msg = "A file with this name already exists. If you proceed, the existing file will be overwritten."
elif max_publish_version:
msg = "A published version of this file already exists."
# create the new path
new_work_path = self._work_template.apply_fields(fields)
return {"path":new_work_path, "message":msg, "can_reset_version":can_reset_version}
```
#### File: tk_multi_workfiles/ui/new_task.py
```python
from tank.platform.qt import QtCore, QtGui
class Ui_NewTask(object):
def setupUi(self, NewTask):
NewTask.setObjectName("NewTask")
NewTask.resize(451, 289)
self.verticalLayout = QtGui.QVBoxLayout(NewTask)
self.verticalLayout.setObjectName("verticalLayout")
self.label_3 = QtGui.QLabel(NewTask)
self.label_3.setWordWrap(True)
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setHorizontalSpacing(20)
self.gridLayout.setObjectName("gridLayout")
self.label_4 = QtGui.QLabel(NewTask)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 2, 0, 1, 1)
self.entity = QtGui.QLabel(NewTask)
self.entity.setObjectName("entity")
self.gridLayout.addWidget(self.entity, 2, 1, 1, 1)
self.label_6 = QtGui.QLabel(NewTask)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 3, 0, 1, 1)
self.assigned_to = QtGui.QLabel(NewTask)
self.assigned_to.setObjectName("assigned_to")
self.gridLayout.addWidget(self.assigned_to, 3, 1, 1, 1)
self.label = QtGui.QLabel(NewTask)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 4, 0, 1, 1)
self.pipeline_step = QtGui.QComboBox(NewTask)
self.pipeline_step.setObjectName("pipeline_step")
self.gridLayout.addWidget(self.pipeline_step, 4, 1, 1, 1)
self.label_2 = QtGui.QLabel(NewTask)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 5, 0, 1, 1)
self.task_name = QtGui.QLineEdit(NewTask)
self.task_name.setObjectName("task_name")
self.gridLayout.addWidget(self.task_name, 5, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
spacerItem = QtGui.QSpacerItem(20, 19, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.buttonBox = QtGui.QDialogButtonBox(NewTask)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(NewTask)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), NewTask.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), NewTask.reject)
QtCore.QMetaObject.connectSlotsByName(NewTask)
def retranslateUi(self, NewTask):
NewTask.setWindowTitle(QtGui.QApplication.translate("NewTask", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("NewTask", "<b><big>Create a new Task</big></b>\n"
"<br><br>\n"
"Type in a Task Name and select a Pipeline Step below.\n"
"<br><br>", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("NewTask", "Entity", None, QtGui.QApplication.UnicodeUTF8))
self.entity.setText(QtGui.QApplication.translate("NewTask", "Shot ABC 123", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("NewTask", "Assigned to", None, QtGui.QApplication.UnicodeUTF8))
self.assigned_to.setText(QtGui.QApplication.translate("NewTask", "Mr <NAME>", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("NewTask", "Pipeline Step", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("NewTask", "Task Name", None, QtGui.QApplication.UnicodeUTF8))
```
#### File: tk_multi_workfiles/ui/thumbnail_label.py
```python
import tank
from tank.platform.qt import QtGui, QtCore
class ThumbnailLabel(QtGui.QLabel):
"""
Special case label that resizes pixmap that gets set to a specific size. This
is duplicated from the tk-framework-widget browser_widget control
"""
def __init__(self, parent=None):
QtGui.QLabel.__init__(self, parent)
def setPixmap(self, pixmap):
# scale the pixmap down to fit
if pixmap.height() > 55 or pixmap.width() > 80:
# scale it down to 120x80
pixmap = pixmap.scaled( QtCore.QSize(80,55),
QtCore.Qt.KeepAspectRatio,
QtCore.Qt.SmoothTransformation)
QtGui.QLabel.setPixmap(self, pixmap)
```
#### File: python/tk_multi_workfiles/users.py
```python
import os
import sys
class UserCache(object):
"""
A cache of user information retrieved from Shotgun as needed
"""
def __init__(self, app):
"""
Construction
"""
self.__app = app
self.__user_details_by_login = {}
self.__user_details_by_id = {}
self.__sg_fields = ["id", "type", "email", "login", "name", "image"]
def get_user_details_for_id(self, id):
"""
Get the user details for the specified user entity id.
:param id: The entity id of the user whose details should be returned
:returns: A Shotgun entity dictionary for the user if found, otherwise {}
"""
return self.get_user_details_for_ids([id]).get(id)
def get_user_details_for_ids(self, ids):
"""
Get the user details for all users represented by the list of supplied entity ids
:param ids: The entity ids of the users whose details should be returned
:returns: A dictionary of id->Shotgun entity dictionary containing one entry
for each user requested. An empty dictionary will be returned for users
that couldn't be found!
"""
if not ids:
# nothing to look for!
return {}
# first, check for cached user info for ids:
user_details = {}
users_to_fetch = set()
for user_id in ids:
details = self.__user_details_by_id.get(user_id)
if details:
user_details[id] = details
elif details == None:
# never looked user up before so add to list to find:
users_to_fetch.add(user_id)
if users_to_fetch:
# get user details from shotgun:
sg_users = []
try:
sg_users = self.__app.shotgun.find("HumanUser", [["id", "in"] + list(users_to_fetch)], self.__sg_fields)
except:
sg_users = []
# add found users to look-ups:
users_found = set()
for sg_user in sg_users:
user_id = sg_user.get("id")
if user_id not in users_to_fetch:
continue
self.__user_details_by_id[user_id] = sg_user
self.__user_details_by_login[sg_user["login"]] = sg_user
users_found.add(user_id)
user_details[user_id] = sg_user
# and fill in any blanks so we don't bother searching again:
for user in users_to_fetch:
if user_id not in users_found:
# store empty dictionary to differenctiate from 'None'
self.__user_details_by_id[user_id] = {}
user_details[user_id] = {}
return user_details
def get_file_last_modified_user(self, path):
"""
Get the user details of the last person to modify the specified file. Note, this currently
doesn't work on Windows as Windows doesn't provide this information as standard
:param path: The path to find the last modified user for
:returns: A Shotgun entity dictionary for the HumanUser that last modified the path
"""
login_name = None
if sys.platform == "win32":
# TODO: add windows support..
pass
else:
try:
from pwd import getpwuid
login_name = getpwuid(os.stat(path).st_uid).pw_name
except:
pass
if login_name:
return self.__get_user_details_for_login(login_name)
return None
def __get_user_details_for_login(self, login_name):
"""
Get the shotgun HumanUser entry for the specified login name
:param login_name: The login name of the user to find
:returns: A Shotgun entity dictionary for the HumanUser entity found
"""
# first look to see if we've already found it:
sg_user = self.__user_details_by_login.get(login_name)
if not sg_user:
try:
sg_user = self.__app.shotgun.find_one("HumanUser", [["login", "is", login_name]], self.__sg_fields)
except:
sg_user = {}
self.__user_details_by_login[login_name] = sg_user
if sg_user:
self.__user_details_by_id[sg_user["id"]] = sg_user
return sg_user
```
#### File: classic_startup/restart/menu.py
```python
import nuke
import os
import sys
def handle_new_tank_session():
import tk_nuke
# Ensure the engine has been restarted before registering callback since
# callbacks registration depend on engine setting.
tk_nuke.tank_startup_node_callback()
tk_nuke.tank_ensure_callbacks_registered()
if not nuke.env.get("hiero"):
# now we need to add our callback module to the pythonpath manually.
# note! __file__ does not work for this file, so the engine passes
# down the engine's python folder location to us via an env var.
path = os.environ.get("TANK_NUKE_ENGINE_MOD_PATH")
if path:
sys.path.append(path)
handle_new_tank_session()
else:
nuke.error("Shotgun could not find the environment variable TANK_NUKE_ENGINE_MOD_PATH!")
```
#### File: tk-multi-publish2/basic/collector.py
```python
import os
import nuke
import sgtk
HookBaseClass = sgtk.get_hook_baseclass()
# A look up of node types to parameters for finding outputs to publish
_NUKE_OUTPUTS = {
"Write": "file",
"WriteGeo": "file",
}
class NukeSessionCollector(HookBaseClass):
"""
Collector that operates on the current nuke/nukestudio session. Should
inherit from the basic collector hook.
"""
@property
def settings(self):
"""
Dictionary defining the settings that this collector expects to receive
through the settings parameter in the process_current_session and
process_file methods.
A dictionary on the following form::
{
"Settings Name": {
"type": "settings_type",
"default": "default_value",
"description": "One line description of the setting"
}
The type string should be one of the data types that toolkit accepts as
part of its environment configuration.
"""
# grab any base class settings
collector_settings = super(NukeSessionCollector, self).settings or {}
# settings specific to this collector
nuke_session_settings = {
"Work Template": {
"type": "template",
"default": None,
"description": "Template path for artist work files. Should "
"correspond to a template defined in "
"templates.yml. If configured, is made available"
"to publish plugins via the collected item's "
"properties. ",
},
}
# update the base settings with these settings
collector_settings.update(nuke_session_settings)
return collector_settings
def process_current_session(self, settings, parent_item):
"""
Analyzes the current session open in Nuke/NukeStudio and parents a
subtree of items under the parent_item passed in.
:param dict settings: Configured settings for this collector
:param parent_item: Root item instance
"""
publisher = self.parent
engine = publisher.engine
if ((hasattr(engine, "studio_enabled") and engine.studio_enabled) or
(hasattr(engine, "hiero_enabled") and engine.hiero_enabled)):
# running nuke studio or hiero
self.collect_current_nukestudio_session(settings, parent_item)
# since we're in NS, any additional collected outputs will be
# parented under the root item
project_item = parent_item
else:
# running nuke. ensure additional collected outputs are parented
# under the session
project_item = self.collect_current_nuke_session(settings,
parent_item)
# run node collection if not in hiero
if hasattr(engine, "hiero_enabled") and not engine.hiero_enabled:
self.collect_sg_writenodes(project_item)
self.collect_node_outputs(project_item)
def collect_current_nuke_session(self, settings, parent_item):
"""
Analyzes the current session open in Nuke and parents a subtree of items
under the parent_item passed in.
:param dict settings: Configured settings for this collector
:param parent_item: Root item instance
"""
publisher = self.parent
# get the current path
path = _session_path()
# determine the display name for the item
if path:
file_info = publisher.util.get_file_path_components(path)
display_name = file_info["filename"]
else:
display_name = "Current Nuke Session"
# create the session item for the publish hierarchy
session_item = parent_item.create_item(
"nuke.session",
"Nuke Script",
display_name
)
# get the icon path to display for this item
icon_path = os.path.join(
self.disk_location,
os.pardir,
"icons",
"nuke.png"
)
session_item.set_icon_from_path(icon_path)
# if a work template is defined, add it to the item properties so
# that it can be used by attached publish plugins
work_template_setting = settings.get("Work Template")
if work_template_setting:
work_template = publisher.engine.get_template_by_name(
work_template_setting.value)
# store the template on the item for use by publish plugins. we
# can't evaluate the fields here because there's no guarantee the
# current session path won't change once the item has been created.
# the attached publish plugins will need to resolve the fields at
# execution time.
session_item.properties["work_template"] = work_template
self.logger.debug("Work template defined for Nuke collection.")
self.logger.info("Collected current Nuke script")
return session_item
def collect_current_nukestudio_session(self, settings, parent_item):
"""
Analyzes the current session open in NukeStudio and parents a subtree of
items under the parent_item passed in.
:param dict settings: Configured settings for this collector
:param parent_item: Root item instance
"""
# import here since the hooks are imported into nuke and nukestudio.
# hiero module is only available in later versions of nuke
import hiero.core
publisher = self.parent
# go ahead and build the path to the icon for use by any projects
icon_path = os.path.join(
self.disk_location,
os.pardir,
"icons",
"nukestudio.png"
)
if hiero.ui.activeSequence():
active_project = hiero.ui.activeSequence().project()
else:
active_project = None
# attempt to retrive a configured work template. we can attach
# it to the collected project items
work_template_setting = settings.get("Work Template")
work_template = None
if work_template_setting:
work_template = publisher.engine.get_template_by_name(
work_template_setting.value)
# FIXME: begin temporary workaround
# we use different logic here only because we don't have proper support
# for multi context workflows when templates are in play. So if we have
# a work template configured, for now we'll only collect the current,
# active document. Once we have proper multi context support, we can
# remove this.
if work_template:
# same logic as the loop below but only processing the active doc
if not active_project:
return
project_item = parent_item.create_item(
"nukestudio.project",
"NukeStudio Project",
active_project.name()
)
self.logger.info(
"Collected Nuke Studio project: %s" % (active_project.name(),))
project_item.set_icon_from_path(icon_path)
project_item.properties["project"] = active_project
project_item.properties["work_template"] = work_template
self.logger.debug(
"Work template defined for NukeStudio collection.")
return
# FIXME: end temporary workaround
for project in hiero.core.projects():
# create the session item for the publish hierarchy
project_item = parent_item.create_item(
"nukestudio.project",
"NukeStudio Project",
project.name()
)
project_item.set_icon_from_path(icon_path)
# add the project object to the properties so that the publish
# plugins know which open project to associate with this item
project_item.properties["project"] = project
self.logger.info(
"Collected Nuke Studio project: %s" % (project.name(),))
# enable the active project and expand it. other projects are
# collapsed and disabled.
if active_project and active_project.guid() == project.guid():
project_item.expanded = True
project_item.checked = True
elif active_project:
# there is an active project, but this isn't it. collapse and
# disable this item
project_item.expanded = False
project_item.checked = False
# store the template on the item for use by publish plugins. we
# can't evaluate the fields here because there's no guarantee the
# current session path won't change once the item has been created.
# the attached publish plugins will need to resolve the fields at
# execution time.
if work_template:
project_item.properties["work_template"] = work_template
self.logger.debug(
"Work template defined for NukeStudio collection.")
def collect_node_outputs(self, parent_item):
"""
Scan known output node types in the session and see if they reference
files that have been written to disk.
:param parent_item: The parent item for any nodes collected
"""
# iterate over all the known output types
for node_type in _NUKE_OUTPUTS:
# get all the instances of the node type
all_nodes_of_type = [n for n in nuke.allNodes()
if n.Class() == node_type]
# iterate over each instance
for node in all_nodes_of_type:
param_name = _NUKE_OUTPUTS[node_type]
# evaluate the output path parameter which may include frame
# expressions/format
file_path = node[param_name].evaluate()
if not file_path or not os.path.exists(file_path):
# no file or file does not exist, nothing to do
continue
self.logger.info(
"Processing %s node: %s" % (node_type, node.name()))
# file exists, let the basic collector handle it
item = super(NukeSessionCollector, self)._collect_file(
parent_item,
file_path,
frame_sequence=True
)
# the item has been created. update the display name to include
# the nuke node to make it clear to the user how it was
# collected within the current session.
item.name = "%s (%s)" % (item.name, node.name())
def collect_sg_writenodes(self, parent_item):
"""
Collect any rendered sg write nodes in the session.
:param parent_item: The parent item for any sg write nodes collected
"""
publisher = self.parent
engine = publisher.engine
sg_writenode_app = engine.apps.get("tk-nuke-writenode")
if not sg_writenode_app:
self.logger.debug(
"The tk-nuke-writenode app is not installed. "
"Will not attempt to collect those nodes."
)
return
first_frame = int(nuke.root()["first_frame"].value())
last_frame = int(nuke.root()["last_frame"].value())
for node in sg_writenode_app.get_write_nodes():
# see if any frames have been rendered for this write node
rendered_files = sg_writenode_app.get_node_render_files(node)
if not rendered_files:
continue
# some files rendered, use first frame to get some publish item info
path = rendered_files[0]
item_info = super(NukeSessionCollector, self)._get_item_info(path)
# item_info will be for the single file. we'll update the type and
# display to represent a sequence. This is the same pattern used by
# the base collector for image sequences. We're not using the base
# collector to create the publish item though since we already have
# the sequence path, template knowledge provided by the
# tk-nuke-writenode app. The base collector makes some "zero config"
# assupmtions about the path that we don't need to make here.
item_type = "%s.sequence" % (item_info["item_type"],)
type_display = "%s Sequence" % (item_info["type_display"],)
# we'll publish the path with the frame/eye spec (%V, %04d)
publish_path = sg_writenode_app.get_node_render_path(node)
# construct publish name:
render_template = sg_writenode_app.get_node_render_template(node)
render_path_fields = render_template.get_fields(publish_path)
rp_name = render_path_fields.get("name")
rp_channel = render_path_fields.get("channel")
if not rp_name and not rp_channel:
publish_name = "Publish"
elif not rp_name:
publish_name = "Channel %s" % rp_channel
elif not rp_channel:
publish_name = rp_name
else:
publish_name = "%s, Channel %s" % (rp_name, rp_channel)
# get the version number from the render path
version_number = render_path_fields.get("version")
# use the path basename and nuke writenode name for display
(_, filename) = os.path.split(publish_path)
display_name = "%s (%s)" % (publish_name, node.name())
# create and populate the item
item = parent_item.create_item(
item_type, type_display, display_name)
item.set_icon_from_path(item_info["icon_path"])
# if the supplied path is an image, use the path as # the thumbnail.
item.set_thumbnail_from_path(path)
# disable thumbnail creation since we get it for free
item.thumbnail_enabled = False
# all we know about the file is its path. set the path in its
# properties for the plugins to use for processing.
item.properties["path"] = publish_path
# include an indicator that this is an image sequence and the known
# file that belongs to this sequence
item.properties["sequence_paths"] = rendered_files
# store publish info on the item so that the base publish plugin
# doesn't fall back to zero config path parsing
item.properties["publish_name"] = publish_name
item.properties["publish_version"] = version_number
item.properties["publish_template"] = \
sg_writenode_app.get_node_publish_template(node)
item.properties["work_template"] = \
sg_writenode_app.get_node_render_template(node)
item.properties["color_space"] = self._get_node_colorspace(node)
item.properties["first_frame"] = first_frame
item.properties["last_frame"] = last_frame
# store the nuke writenode on the item as well. this can be used by
# secondary publish plugins
item.properties["sg_writenode"] = node
# we have a publish template so disable context change. This
# is a temporary measure until the publisher handles context
# switching natively.
item.context_change_allowed = False
self.logger.info("Collected file: %s" % (publish_path,))
def _get_node_colorspace(self, node):
"""
Get the colorspace for the specified nuke node
:param node: The nuke node to find the colorspace for
:returns: The string representing the colorspace for the node
"""
cs_knob = node.knob("colorspace")
if not cs_knob:
return
cs = cs_knob.value()
# handle default value where cs would be something like: 'default (linear)'
if cs.startswith("default (") and cs.endswith(")"):
cs = cs[9:-1]
return cs
def _session_path():
"""
Return the path to the current session
:return:
"""
root_name = nuke.root().name()
return None if root_name == "Root" else root_name
```
#### File: plugins/basic/menu.py
```python
import os
import sys
def plugin_startup():
"""
Initializes the Toolkit plugin for Nuke.
"""
# construct the path to the plugin root's folder.
# plugins/basic/menu.py
# -------------|
# this part ^
plugin_root_path = os.path.dirname(__file__)
# the plugin python path will be just below the root level. add it to
# sys.path
plugin_python_path = os.path.join(plugin_root_path, "Python")
sys.path.insert(0, plugin_python_path)
# now that the path is there, we can import the plugin bootstrap logic
try:
from tk_nuke_basic import plugin_bootstrap
plugin_bootstrap.bootstrap(plugin_root_path)
except Exception, e:
import traceback
stack_trace = traceback.format_exc()
message = "Shotgun Toolkit Error: %s" % (e,)
details = "Error stack trace:\n\n%s" % (stack_trace)
import nuke
nuke.error(message)
nuke.error(details)
# Invoked on startup while Nuke is walking NUKE_PATH.
plugin_startup()
```
#### File: python/tk_nuke/context.py
```python
import nuke
import tank
class PluginStudioContextSwitcher(object):
"""
A Toolkit context-switching manager.
This class provides a context switcher for non template based pipeline configurations.
As such, there is no way to find the context of a file by extracting entities from the
path. It is therefore an empty shell.
"""
def __init__(self, engine):
pass
def get_new_context(self, file_path):
return None
def destroy(self):
pass
class ClassicStudioContextSwitcher(object):
"""
A Toolkit context-switching manager.
This class operates by registering an event handler with Nuke Studio,
which allows it to detect when the user has changed from the top-level
"project" view to a Nuke node graph, and vice versa. When changing to
the "Nuke" portion of Nuke Studio, the .nk script being shown will be
checked against Shotgun Toolkit to determine whether it resides in a
known context, and if it does the tk-nuke engine will switch to that
on the fly. When the user comes out of the "Nuke" portion of Nuke Studio
and is once again at the project level, tk-nuke's context will again
be changed to match.
"""
def __init__(self, engine):
"""
Initializes a PluginStudioContextSwitcher object.
:param engine: The running sgtk.engine.Engine to associate the
context switcher with.
"""
self._event_desc = [
dict(
add=nuke.addOnCreate,
remove=nuke.removeOnCreate,
registrar=nuke.callbacks.onCreates,
function=self._startup_node_callback,
),
dict(
add=nuke.addOnScriptSave,
remove=nuke.removeOnScriptSave,
registrar=nuke.callbacks.onScriptSaves,
function=self._on_save_callback,
),
]
self._context_cache = dict()
self._init_project_root = engine.tank.project_path
self._init_context = engine.context
self._is_in_nuke = False
self.register_events(reregister=True)
##########################################################################
# properties
@property
def context(self):
"""
The current sgtk.context.Context.
"""
self._context = self.engine.context
@property
def is_in_nuke(self):
"""
Whether Nuke Studio is current in "Nuke" mode or not.
"""
return self._is_in_nuke
@property
def engine(self):
"""
The current engine that is running.
"""
return tank.platform.current_engine()
@property
def init_context(self):
"""
The sgtk.context.Context that was used at initialization time.
"""
return self._init_context
@property
def init_project_root(self):
"""
The project root directory path at initialization time.
"""
return self._init_project_root
##########################################################################
# private
def _check_if_registered(self, func, registrar):
"""
Checks if a callback is already registered with Nuke Studio.
"""
# The test is made by comparing the name of the functions.
# see: http://docs.thefoundry.co.uk/nuke/90/pythondevguide/callbacks.html
for nodeClass_category in registrar.values():
for (function, args, kwargs, nodeClass) in nodeClass_category:
if func.__name__ == function.__name__:
return True
return False
def _eventHandler(self, event):
"""
Event handler for context switching events in Nuke Studio.
:param event: The Nuke Studio event that was triggered.
"""
# Testing if we actually changed context or if the event got fired without
# the user switching to the node graph. Early exit if it's still the
# same context.
if self._is_in_nuke == event.focusInNuke:
return
# Set the current context to be remembered for the next context
# change.
self._is_in_nuke = event.focusInNuke
if self.is_in_nuke:
# We switched from the project timeline to a Nuke node graph.
try:
script_path = nuke.scriptName()
except Exception:
script_path = None
if script_path:
# Switched to nuke with a script open. We have a path and could try
# to figure out the sgtk context from that.
new_context = self.get_new_context(script_path)
if new_context is not None and new_context != self.engine.context:
self.change_context(new_context)
else:
# There is no script open in the node graph. Because of that, we
# will stay in the current context since we're essentially just in
# a non-special state of Nuke Studio where we're on the empty node
# graph tab.
return
else:
# This is a switch back to the project-level timeline,
# so change to that context based on that project file's
# path.
project_path = self._get_current_project()
if project_path:
new_context = self.get_new_context(project_path)
if new_context:
self.change_context(new_context)
return
# If all else fails here, then we just go back to the init
# context that we have cached. Since we know we're not in
# the Nuke node graph, then we should be fine to go ahead
# with what we had at launch.
self.change_context(self._init_context)
def _get_context_from_script(self, script):
"""
Returns an sgtk.context.Context object from the given script path.
:param script: The path to a script file on disk.
"""
tk = tank.tank_from_path(script)
context = tk.context_from_path(
script,
previous_context=self.engine.context,
)
if context.project is None:
raise tank.TankError(
"The Nuke engine needs at least a project "
"context in order to start! Your context: %s" % context
)
else:
return context
def _get_current_project(self):
"""
Returns the current project based on where in the UI the user clicked.
"""
import hiero.core
import hiero.ui
view = hiero.ui.activeView()
if isinstance(view, hiero.ui.TimelineEditor):
sequence = view.sequence()
if sequence:
bin_item = sequence.binItem()
if bin_item:
return bin_item.project().path()
return None
def _on_save_callback(self):
"""
Callback that fires every time a file is saved.
"""
try:
# Get the new file name.
file_name = nuke.root().name()
try:
# This file could be in another project altogether, so
# create a new Tank instance.
tk = tank.tank_from_path(file_name)
except tank.TankError, e:
self.engine.menu_generator.create_sgtk_disabled_menu(e)
return
# Extract a new context based on the file and change to that
# context.
new_context = tk.context_from_path(
file_name,
previous_context=self.context,
)
self.change_context(new_context)
except Exception:
self.engine.menu_generator.create_sgtk_error_menu()
def _startup_node_callback(self):
"""
Callback that fires every time a node gets created.
"""
try:
# Look for the root node. This is created only when a new or existing
# file is opened.
if nuke.thisNode() != nuke.root():
return
if nuke.root().name() == "Root":
# This is a file->new call, so base it on the context we
# stored from the previous session.
tk = tank.Tank(self.init_project_root)
if self.init_context:
new_ctx = self.init_context
else:
new_ctx = tk.context_empty()
else:
# This is a file->open call, so we can get the new context
# from the file path that was opened.
file_name = nuke.root().name()
try:
tk = tank.tank_from_path(file_name)
except tank.TankError, e:
self.engine.menu_generator.create_sgtk_disabled_menu(e)
return
new_ctx = tk.context_from_path(
file_name,
previous_context=self.context,
)
# Now change the context for the engine and apps.
self.change_context(new_ctx)
except Exception, e:
self.engine.menu_generator.create_sgtk_error_menu(e)
##########################################################################
# public
def change_context(self, new_context):
"""
Changes Toolkit's context, or creates a disabled menu item if
that is not possible.
:param new_context: The sgtk.context.Context to change to.
"""
if new_context == self.engine.context:
return
try:
tank.platform.change_context(new_context)
except tank.TankEngineInitError, e:
# Context was not sufficient!
self.engine.menu_generator.create_sgtk_disabled_menu(e)
def destroy(self):
"""
Tears down the context switcher by deregistering event handlers.
"""
self.unregister_events()
def get_new_context(self, script_path):
"""
Returns a new sgtk.context.Context for the given script path.
If the context exists in the in-memory cache, then that is returned,
otherwise a new Context object is constructed, cached, and returned.
:param script_path: The path to a script file on disk.
"""
context = self._context_cache.get(script_path)
if context:
return context
try:
context = self._get_context_from_script(script_path)
if context:
self._context_cache[script_path] = context
return context
else:
raise tank.TankError(
"Toolkit could not determine the context associated with this script."
)
except Exception, e:
self.engine.menu_generator.create_sgtk_disabled_menu(e)
self.engine.logger.debug(e)
return None
def register_events(self, reregister=False):
"""
Registers context-switching event handlers with Nuke Studio.
:param reregister: If True, previously-registered event handlers will
be removed and new instances of those handlers will
be reregistered with Nuke Studio. If False, any
event handler that has already been registered with
Nuke Studio will be skipped.
"""
import hiero.core
# Event for context switching from Hiero to Nuke.
hiero.core.events.registerInterest(
hiero.core.events.EventType.kContextChanged,
self._eventHandler,
)
for func_desc in self._event_desc:
# This is the variable that stores a dict of currently-registered
# callbacks.
registrar = func_desc.get('registrar')
# The function we wish to register.
function = func_desc.get('function')
# The function used to register the callback.
add = func_desc.get('add')
# Check if the callback is already registered.
if self._check_if_registered(function, registrar):
if reregister:
self._unregister_events(only=[func_desc])
else:
continue
add(function)
def unregister_events(self, only=None):
"""
Unregisters any event handlers that the context switcher
created during a register_events call.
:param only: A list of callback functions to unregister. If
not provided, all known event callbacks will be
unregistered.
"""
import hiero.core
hiero.core.events.unregisterInterest(
hiero.core.events.EventType.kContextChanged,
self._eventHandler,
)
func_descs = only or self._event_desc
for func_desc in func_descs:
registrar = func_desc.get('registrar')
# The function we wish to unregister.
function = func_desc.get('function')
# The function used to unregister the callback.
remove = func_desc.get('remove')
if self._check_if_registered(function, registrar):
remove(function)
```
#### File: tk-nuke-writenode/v1.2.0/app.py
```python
import os
import nuke
import tank
from tank import TankError
class NukeWriteNode(tank.platform.Application):
def init_app(self):
"""
Called as the application is being initialized
"""
# import module and create handler
tk_nuke_writenode = self.import_module("tk_nuke_writenode")
self.__write_node_handler = tk_nuke_writenode.TankWriteNodeHandler(self)
# patch handler onto nuke module for access in WriteNode knobs
nuke._shotgun_write_node_handler = self.__write_node_handler
# and for backwards compatibility!
nuke._tank_write_node_handler = self.__write_node_handler
# add WriteNodes to nuke menu
self.__add_write_node_commands()
# add callbacks:
self.__write_node_handler.add_callbacks()
@property
def context_change_allowed(self):
"""
Specifies that context changes are allowed.
"""
return True
def destroy_app(self):
"""
Called when the app is unloaded/destroyed
"""
self.log_debug("Destroying tk-nuke-writenode app")
# remove any callbacks that were registered by the handler:
self.__write_node_handler.remove_callbacks()
# clean up the nuke module:
if hasattr(nuke, "_shotgun_write_node_handler"):
del nuke._shotgun_write_node_handler
if hasattr(nuke, "_tank_write_node_handler"):
del nuke._tank_write_node_handler
def post_context_change(self, old_context, new_context):
"""
Handles refreshing the render paths of all Shotgun write nodes
after a context change has been completed.
:param old_context: The sgtk.context.Context being switched from.
:param new_context: The sgtk.context.Context being switched to.
"""
for node in self.get_write_nodes():
self.reset_node_render_path(node)
self.__write_node_handler.populate_profiles_from_settings()
self.__write_node_handler.populate_script_template()
self.__add_write_node_commands(new_context)
def process_placeholder_nodes(self):
"""
Convert any placeholder nodes to TK Write Nodes
"""
self.__write_node_handler.process_placeholder_nodes()
# interface for other apps to query write node info:
#
# access general information:
def get_write_nodes(self):
"""
Return list of all write nodes
"""
return self.__write_node_handler.get_nodes()
def get_node_name(self, node):
"""
Return the name for the specified node
"""
return self.__write_node_handler.get_node_name(node)
def get_node_profile_name(self, node):
"""
Return the name of the profile the specified node
is using
"""
return self.__write_node_handler.get_node_profile_name(node)
def get_node_tank_type(self, node):
"""
Return the tank type for the specified node
Note: Legacy version with old 'Tank Type' name - use
get_node_published_file_type instead!
"""
return self.__write_node_handler.get_node_tank_type(node)
def get_node_published_file_type(self, node):
"""
Return the published file type for the specified node
"""
return self.__write_node_handler.get_node_tank_type(node)
def is_node_render_path_locked(self, node):
"""
Determine if the render path for the specified node
is locked. The path will become locked if the cached
version of the path no longer matches the computed
path (using the appropriate render template). This
can happen if the file is moved on disk or if the template
is changed.
"""
return self.__write_node_handler.render_path_is_locked(node)
# access full-res render information:
def get_node_render_path(self, node):
"""
Return the render path for the specified node
"""
return self.__write_node_handler.compute_render_path(node)
def get_node_render_files(self, node):
"""
Return the list of rendered files for the node
"""
return self.__write_node_handler.get_files_on_disk(node)
def get_node_render_template(self, node):
"""
Return the render template for the specified node
"""
return self.__write_node_handler.get_render_template(node)
def get_node_publish_template(self, node):
"""
Return the publish template for the specified node
"""
return self.__write_node_handler.get_publish_template(node)
# access proxy-res render information:
def get_node_proxy_render_path(self, node):
"""
Return the render path for the specified node
"""
return self.__write_node_handler.compute_proxy_path(node)
def get_node_proxy_render_files(self, node):
"""
Return the list of rendered files for the node
"""
return self.__write_node_handler.get_proxy_files_on_disk(node)
def get_node_proxy_render_template(self, node):
"""
Return the render template for the specified node
"""
return self.__write_node_handler.get_proxy_render_template(node)
def get_node_proxy_publish_template(self, node):
"""
Return the publish template for the specified node
"""
return self.__write_node_handler.get_proxy_publish_template(node)
# useful utility functions:
def generate_node_thumbnail(self, node):
"""
Generate a thumnail for the specified node
"""
return self.__write_node_handler.generate_thumbnail(node)
def reset_node_render_path(self, node):
"""
Reset the render path of the specified node. This
will force the render path to be updated based on
the current script path and configuraton.
Note, this should really never be needed now that the
path is reset automatically when the user changes something.
"""
self.__write_node_handler.reset_render_path(node)
def convert_to_write_nodes(self):
"""
Convert all Shotgun write nodes found in the current Script to regular
Nuke Write nodes. Additional toolkit information will be stored on
additional user knobs named 'tk_*'
"""
self.__write_node_handler.convert_sg_to_nuke_write_nodes()
def convert_from_write_nodes(self):
"""
Convert all regular Nuke Write nodes that have previously been converted
from Shotgun Write nodes, back into Shotgun Write nodes.
"""
self.__write_node_handler.convert_nuke_to_sg_write_nodes()
# Private methods
#
def __add_write_node_commands(self, context=None):
"""
Creates write node menu entries for all write node configurations
"""
context = context or self.context
write_node_icon = os.path.join(self.disk_location, "resources", "tk2_write.png")
for profile_name in self.__write_node_handler.profile_names:
# add to toolbar menu
cb_fn = lambda pn=profile_name: self.__write_node_handler.create_new_node(pn)
self.engine.register_command(
"%s [Shotgun]" % profile_name,
cb_fn,
dict(
type="node",
icon=write_node_icon,
context=context,
)
)
```
#### File: tk_photoshopcc/rpc/communicator.py
```python
import json
import threading
import sys
import os.path
import time
import logging
import contextlib
# Add our third-party packages to sys.path.
sys.path.append(os.path.join(os.path.dirname(__file__), "packages"))
import socketIO_client.exceptions
from socketIO_client import SocketIO
from .proxy import ProxyScope, ProxyWrapper, ClassInstanceProxyWrapper
class Communicator(object):
"""
A communication manager that owns a socket.io client. The
communicator offers access to a global scope provided by
a server that the communicator connects to at instantiation
time. Basic RPC calls are also implemented.
"""
_RESULTS = dict()
_UID = 0
_LOCK = threading.Lock()
_RPC_EXECUTE_COMMAND = "execute_command"
_REGISTRY = dict()
_COMMAND_REGISTRY = dict()
def __init__(self, port=8090, host="localhost", disconnect_callback=None, logger=None, network_debug=False, event_processor=None):
"""
Constructor. Rather than instantiating the Communicator directly,
it is advised to make use of the get_or_create() classmethod as
a factory constructor.
:param int port: The port num to connect to. Default is 8090.
:param str host: The host to connect to. Default is localhost.
:param disconnect_callback: A callback to call if a disconnect
message is received from the host.
:param logger: A standard Python logger to use for network debug
logging.
:param bool network_debug: Whether network debug logging is desired.
:param event_processor: A callable that will be called during each
iteration of the response wait loop. An
example would be passing in the
QtGui.QApplication.processEvents callable,
which will force an iteration of the Qt
event loop during response wait periods,
which will stop Qt widgets from being
blocked from repainting.
"""
self._port = port
self._host = host
self._network_debug = network_debug
self._logger = logger or logging.getLogger(__name__)
self._event_processor = event_processor
self._response_logging_silenced = False
self._io = SocketIO(host, port)
self._io.on("return", self._handle_response)
self._global_scope = None
self._disconnect_callback = disconnect_callback
if disconnect_callback:
self._io.on("disconnect", disconnect_callback)
self._get_global_scope()
##########################################################################################
# constructor
@classmethod
def get_or_create(cls, identifier, *args, **kwargs):
"""
A factory constructor that provides singleton instantiation
behavior based on a given unique identifier. If an instance
exists with the given identifier it will be returned,
otherwise a new instance is constructed and returned after
being recorded by the given identifier.
:param identifier: Some hashable identifier to associate
the instantiated communicator with.
:param int port: The port to connect to. Default is 8090.
:param str host: The host to connect to. Default is localhost.
:param disconnect_callback: A callback to call if a disconnect
message is received from the host.
"""
if identifier in cls._REGISTRY:
instance = cls._REGISTRY[identifier]
instance.logger.debug("Reusing Communicator by id '%s'" % identifier)
else:
instance = cls(*args, **kwargs)
instance._identifier = identifier
cls._REGISTRY[identifier] = instance
instance.logger.debug("New Communicator of id '%s'" % identifier)
return instance
##########################################################################################
# properties
@property
def event_processor(self):
"""
The callable event processor that will be called between iterations
of the RPC response wait loop.
"""
return self._event_processor
@event_processor.setter
def event_processor(self, processor):
self._event_processor = processor
@property
def host(self):
"""
The host that was connected to.
"""
return self._host
@property
def logger(self):
"""
The standard Python logger used by the communicator.
"""
return self._logger
@logger.setter
def logger(self, logger):
self._logger = logger
@property
def network_debug(self):
"""
Whether network debugging messages are logged.
"""
return self._network_debug
@network_debug.setter
def network_debug(self, state):
self._network_debug = bool(state)
@property
def port(self):
"""
The port number connected to.
"""
return self._port
##########################################################################################
# context managers
@contextlib.contextmanager
def response_logging_silenced(self):
"""
A context manager that will silence RPC command response logging
on enter, and enable it on exit. This is useful if you're emitting
an RPC command that you expect might fail, but you want to handle
that failure without alerting a user via logging.
"""
self._response_logging_silenced = True
yield
self._response_logging_silenced = False
##########################################################################################
# RPC
def disconnect(self):
"""
Disconnects from the socket.io server.
"""
self._io.disconnect()
del self._REGISTRY[self._identifier]
def ping(self):
"""
Pings the host, testing whether the connection is still live.
"""
self._io._ping()
def process_new_messages(self, wait=0.01, single_loop=False, process_events=True):
"""
Processes new messages that have arrived but that have not been
previously handled.
:param float wait: How long to poll for new messages, in seconds.
:param bool single_loop: If True, only a single check for messages
will be made and the timeout duration will
not be used. Default is False.
:param bool process_events: If True and an event processor callable
is registered with the communicator, it
will be called at the end of the wait
duration.
"""
self.log_network_debug("Processing new messages, wait is %s" % wait)
try:
self._io._heartbeat_thread.hurry()
self._io._transport.set_timeout(seconds=0.1)
start = time.time()
while wait >= (time.time() - start) or single_loop:
try:
self._io._process_packets()
except socketIO_client.exceptions.TimeoutError:
# Timeouts here are not a problem. It can be something
# as simple as the server being busy and not responding
# quickly enough, in which case subsequent attempts will
# go through without a problem.
self.log_network_debug(
"Timed out during _process_packets call. This is "
"likely not a problem if it only happens occasionally."
)
else:
if single_loop:
break
# Force an event loop iteration if we were provided with a
# callable event processor.
if self.event_processor and process_events:
self.event_processor()
finally:
self._io._heartbeat_thread.relax()
self._io._transport.set_timeout()
self.log_network_debug("New message processing complete.")
def rpc_call(self, proxy_object, params=[], parent=None):
"""
Executes a "call" RPC command.
:param proxy_object: The proxy object to call via RPC.
:param list params: The list of arguments to pass to the
callable when it is called.
:param parent: The parent proxy object, if any. If given, the
callable will be called as a method of the
parent object. If a parent is not given, it
will be called as a function of the global
scope.
:returns: The data returned by the callable when it is
called.
"""
self.log_network_debug("Sending a call message using rpc_call...")
if parent:
params.insert(0, parent.uid)
self.log_network_debug("Parent given, UID is %s" % parent.uid)
else:
self.log_network_debug("No parent given.")
params.insert(0, None)
return self.__run_rpc_command(
method="call",
proxy_object=proxy_object,
params=params,
wrapper_class=ProxyWrapper,
)
def rpc_eval(self, command):
"""
Evaluates the given string command via RPC.
:param str command: The command to execute.
:returns: The data returned by the evaluated command.
"""
self.log_network_debug("Sending an eval message using rpc_eval...")
self.log_network_debug("Command is: %s" % command)
return self.__run_rpc_command(
method="eval",
proxy_object=None,
params=[command],
wrapper_class=ProxyWrapper,
)
def rpc_get(self, proxy_object, property_name):
"""
Gets the value of the given property for the given proxy
proxy object.
:param proxy_object: The proxy object to get the property
value from.
:param str property_name: The name of the property to get.
:returns: The value of the property of the remote object.
"""
self.log_network_debug("Sending a get message using rpc_get...")
self.log_network_debug(
"Getting property %s from object UID %s" % (
property_name,
proxy_object.uid
)
)
return self.__run_rpc_command(
method="get",
proxy_object=proxy_object,
params=[property_name],
wrapper_class=ProxyWrapper,
attach_parent=proxy_object,
)
def rpc_get_index(self, proxy_object, index):
"""
Gets the value at the given index of the given proxy object.
:param proxy_object: The proxy object to index into.
:param int index: The index to get the value of.
:returns: The value of the index of the remote object.
"""
self.log_network_debug("Sending a get_index message using rpc_get_index...")
self.log_network_debug(
"Getting index %s of object UID %s" % (
index,
proxy_object.uid
)
)
return self.__run_rpc_command(
method="get_index",
proxy_object=proxy_object,
params=[index],
wrapper_class=ProxyWrapper,
)
def rpc_new(self, class_name):
"""
Instantiates a new remote object of the given class name.
:param str class_name: The name of the class to instantiate.
:returns: A proxy object pointing to the instantiated
remote object.
"""
self.log_network_debug("Sending a 'new' message using rpc_new...")
self.log_network_debug("Instantiating class %s" % class_name)
return self.__run_rpc_command(
method="new",
proxy_object=None,
params=[class_name],
wrapper_class=ClassInstanceProxyWrapper,
)
def rpc_set(self, proxy_object, property_name, value):
"""
Sets the given property to the given value on the given proxy
object.
:param proxy_object: The proxy object to set the property of.
:param str property_name: The name of the property to set.
:param value: The value to set the property to.
"""
self.log_network_debug("Sending a set message using rpc_set...")
self.log_network_debug(
"Setting property %s to %s for object UID %s" % (
property_name,
value,
proxy_object.uid
)
)
return self.__run_rpc_command(
method="set",
proxy_object=proxy_object,
params=[property_name, value],
wrapper_class=ProxyWrapper,
)
def wait(self, timeout=0.1, single_loop=False, process_events=True):
"""
Triggers a wait and the processing of any messages already
queued up or that arrive during the wait period.
:param float timeout: The duration of time, in seconds, to
wait.
:param bool single_loop: If True, only a single check for messages
will be made and the timeout duration will
not be used. Default is False.
:param bool process_events: If True and an event processor callable
is registered with the communicator, it
will be called at the end of the wait
duration.
"""
self.log_network_debug("Triggering a wait of duration %s" % timeout)
self.log_network_debug("single_loop is %s" % single_loop)
self.log_network_debug("process_events is %s" % process_events)
self.process_new_messages(
wait=float(timeout),
single_loop=single_loop,
process_events=process_events,
)
##########################################################################################
# logging
def log_network_debug(self, msg):
"""
Logs a debug message if 'network_debug' is turned on.
:param str msg: The log message.
"""
if self.network_debug:
self.logger.debug(msg)
##########################################################################################
# internal methods
def _get_global_scope(self):
"""
Emits a message requesting that the remote global scope be
introspected, wrapped, and returned as JSON data.
"""
self.log_network_debug("Getting the remote global scope...")
payload = self._get_payload("get_global_scope")
self.log_network_debug("Payload: %s" % payload)
self._io.emit(self._RPC_EXECUTE_COMMAND, payload)
uid = payload["id"]
results = self._wait_for_response(uid)
self.log_network_debug("Raw data response: %s" % results)
self._global_scope = ProxyScope(results, self)
def _get_payload(self, method, proxy_object=None, params=[]):
"""
Builds the payload dictionary to be sent via RPC.
:param str method: The JSON-RPC method name to call.
:param proxy_object: The proxy object to be included in the
payload.
:param list params: The list of paramaters to be packaged.
:returns: The payload dictionary, formatted for JSON-RPC
use.
"""
payload = dict(
id=self.__get_uid(),
method=method,
jsonrpc="2.0",
params=[],
)
if proxy_object:
payload["params"] = [proxy_object.serialized]
if params:
payload["params"].extend(self.__prepare_params(params))
else:
payload["params"] = self.__prepare_params(params)
self.log_network_debug("Payload constructed: %s" % payload)
return payload
def _handle_response(self, response, *args):
"""
Handles the response to an already-emitted message.
:param str response: The JSON encoded message response.
:returns: The decoded result data.
"""
self.log_network_debug("Handling RPC response...")
result = json.loads(response)
uid = result["id"]
self.log_network_debug("Response UID is %s" % uid)
try:
self._RESULTS[uid] = self._ensure_utf8(json.loads(result["result"]))
except (TypeError, ValueError):
self._RESULTS[uid] = self._ensure_utf8(result.get("result"))
except KeyError:
if not self._response_logging_silenced:
self.logger.error("RPC command (UID=%s) failed!" % uid)
self.logger.error("Failed command payload: %s" % self._COMMAND_REGISTRY[uid])
self.logger.debug("Failure raw response: %s" % response)
self.logger.debug("Failure results: %s" % result)
raise RuntimeError("RPC command (UID=%s) failed!" % uid)
self.log_network_debug(
"Processed response data: %s" % self._RESULTS[uid]
)
def _ensure_utf8(self, in_string):
if isinstance(in_string, unicode):
in_string = in_string.encode("utf-8")
return in_string
def _wait_for_response(self, uid):
"""
Waits for the results of an RPC call.
:param int uid: The unique id of the RPC call to wait for.
:returns: The raw returned results data.
"""
self.log_network_debug("Waiting for RPC response for UID %s..." % uid)
while uid not in self._RESULTS:
# If we were given an event processor, we can call that here. That
# will be something like QApplication.processEvents, which will
# force an iteration of the Qt event loop so that we're not
# completely the UI thread here, even though we're blocking Python.
if self.event_processor:
self.event_processor()
self.wait(single_loop=True, process_events=False)
results = self._RESULTS[uid]
del self._RESULTS[uid]
self.log_network_debug("Results arrived for UID %s" % uid)
return results
##########################################################################################
# private methods
def __get_uid(self):
"""
Gets the next available unique id number.
"""
with self._LOCK:
self._UID += 1
return self._UID
def __prepare_params(self, params):
"""
Prepares a list of paramaters to be emitted as part of an
RPC call.
:param list params: The list of paramaters to prepare.
:returns: The list of prepared paramaters, fit for emission.
"""
processed = []
for param in params:
# TODO: Probably handle all iterables.
if isinstance(param, list):
processed.extend(self.__prepare_params(param))
elif isinstance(param, ProxyWrapper):
processed.append(param.data)
else:
if isinstance(param, basestring) and not isinstance(param, unicode):
# ensure the strings are unicode
param = param.decode("utf-8")
processed.append(param)
return processed
def __run_rpc_command(self, method, proxy_object, params, wrapper_class, attach_parent=None):
"""
Emits the requested JSON-RPC method via socket.io and handles
the returned result when it arrives.
:param str method: The JSON-RPC method name to call.
:param proxy_object: The proxy object to send.
:param list params: The list of parameters to emit.
:param wrapper_class: The class reference to use when
wrapping results.
:param attach_parent: An optional parent object to associate
the returned data to.
:returns: The wrapped results of the RPC call.
"""
payload = self._get_payload(
method=method,
proxy_object=proxy_object,
params=params,
)
self._COMMAND_REGISTRY[payload["id"]] = payload
self._io.emit(self._RPC_EXECUTE_COMMAND, payload)
results = self._wait_for_response(payload["id"])
return wrapper_class(results, self, parent=attach_parent)
##########################################################################################
# magic methods
def __getattr__(self, name):
try:
return getattr(self._global_scope, name)
except AttributeError:
# If we were asked for something that's not in the global
# scope, it's possible that it's a class that needs to be
# instantiated.
# TODO: This needs to be behavior that's custom to the given
# environment we're dealing with. Right now, this behavior here
# is handling a situation that arises in ExtendScript, but might
# not even be appropriate for other flavors/versions of JS.
# NOTE: I'm thinking we can do this sort of thing just with a
# subclass. The base Communicator class can define the simpler
# getattr, which assumes anything requested is available from
# the global scope object. For Adobe, we can implement an
# AdobeCommunicator subclass that reimplements getattr and
# adds the below logic.
instance = self.rpc_new(name)
if isinstance(instance, ProxyWrapper):
return instance
else:
raise
```
#### File: tk-multi-publish2/basic/upload_version.py
```python
import os
import pprint
import tempfile
import uuid
import sys
import sgtk
HookBaseClass = sgtk.get_hook_baseclass()
class PhotoshopUploadVersionPlugin(HookBaseClass):
"""
Plugin for sending photoshop documents to shotgun for review.
"""
@property
def icon(self):
"""
Path to an png icon on disk
"""
# look for icon one level up from this hook's folder in "icons" folder
return os.path.join(
self.disk_location,
os.pardir,
"icons",
"review.png"
)
@property
def name(self):
"""
One line display name describing the plugin
"""
return "Upload for review"
@property
def description(self):
"""
Verbose, multi-line description of what the plugin does. This can
contain simple html for formatting.
"""
publisher = self.parent
shotgun_url = publisher.sgtk.shotgun_url
media_page_url = "%s/page/media_center" % (shotgun_url,)
review_url = "https://www.shotgunsoftware.com/features-review"
return """
Upload the file to Shotgun for review.<br><br>
A <b>Version</b> entry will be created in Shotgun and a transcoded
copy of the file will be attached to it. The file can then be reviewed
via the project's <a href='%s'>Media</a> page, <a href='%s'>RV</a>, or
the <a href='%s'>Shotgun Review</a> mobile app.
""" % (media_page_url, review_url, review_url)
@property
def settings(self):
"""
Dictionary defining the settings that this plugin expects to recieve
through the settings parameter in the accept, validate, publish and
finalize methods.
A dictionary on the following form::
{
"Settings Name": {
"type": "settings_type",
"default": "default_value",
"description": "One line description of the setting"
}
The type string should be one of the data types that toolkit accepts as
part of its environment configuration.
"""
return {}
@property
def item_filters(self):
"""
List of item types that this plugin is interested in.
Only items matching entries in this list will be presented to the
accept() method. Strings can contain glob patters such as *, for example
["maya.*", "file.maya"]
"""
# we use "video" since that's the mimetype category.
return ["photoshop.document"]
def accept(self, settings, item):
"""
Method called by the publisher to determine if an item is of any
interest to this plugin. Only items matching the filters defined via the
item_filters property will be presented to this method.
A publish task will be generated for each item accepted here. Returns a
dictionary with the following booleans:
- accepted: Indicates if the plugin is interested in this value at
all. Required.
- enabled: If True, the plugin will be enabled in the UI, otherwise
it will be disabled. Optional, True by default.
- visible: If True, the plugin will be visible in the UI, otherwise
it will be hidden. Optional, True by default.
- checked: If True, the plugin will be checked in the UI, otherwise
it will be unchecked. Optional, True by default.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
:returns: dictionary with boolean keys accepted, required and enabled
"""
document = item.properties.get("document")
if not document:
self.logger.warn("Could not determine the document for item")
return {"accepted": False}
path = _document_path(document)
if not path:
# the document has not been saved before (no path determined).
# provide a save button. the document will need to be saved before
# validation will succeed.
self.logger.warn(
"The Photoshop document '%s' has not been saved." %
(document.name,),
extra=_get_save_as_action(document)
)
self.logger.info(
"Photoshop '%s' plugin accepted document: %s" %
(self.name, document.name)
)
return {
"accepted": True,
"checked": True
}
def validate(self, settings, item):
"""
Validates the given item to check that it is ok to publish.
Returns a boolean to indicate validity.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
:returns: True if item is valid, False otherwise.
"""
document = item.properties["document"]
path = _document_path(document)
if not path:
# the document still requires saving. provide a save button.
# validation fails.
error_msg = "The Photoshop document '%s' has not been saved." % \
(document.name,)
self.logger.error(
error_msg,
extra=_get_save_as_action(document)
)
raise Exception(error_msg)
return True
def publish(self, settings, item):
"""
Executes the publish logic for the given item and settings.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
"""
publisher = self.parent
engine = publisher.engine
document = item.properties["document"]
path = _document_path(document)
upload_path = path
file_info = publisher.util.get_file_path_components(path)
if file_info["extension"] in ["psd", "psb"]:
with engine.context_changes_disabled():
# remember the active document so that we can restore it.
previous_active_document = engine.adobe.app.activeDocument
# make the document being processed the active document
engine.adobe.app.activeDocument = document
# path to a temp jpg file
upload_path = os.path.join(
tempfile.gettempdir(),
"%s_sgtk.jpg" % uuid.uuid4().hex
)
# jpg file/options
jpg_file = engine.adobe.File(upload_path)
jpg_options = engine.adobe.JPEGSaveOptions
jpg_options.quality = 12
# mark the temp upload path for removal
item.properties["remove_upload"] = True
# save a jpg copy of the document
document.saveAs(jpg_file, jpg_options, True)
# restore the active document
engine.adobe.app.activeDocument = previous_active_document
# use the path's filename as the publish name
path_components = publisher.util.get_file_path_components(path)
publish_name = path_components["filename"]
# populate the version data to send to SG
self.logger.info("Creating Version...")
version_data = {
"project": item.context.project,
"code": publish_name,
"description": item.description,
"entity": self._get_version_entity(item),
"sg_task": item.context.task
}
publish_data = item.properties.get("sg_publish_data")
# if the file was published, add the publish data to the version
if publish_data:
version_data["published_files"] = [publish_data]
# log the version data for debugging
self.logger.debug(
"Populated Version data...",
extra={
"action_show_more_info": {
"label": "Version Data",
"tooltip": "Show the complete Version data dictionary",
"text": "<pre>%s</pre>" % (
pprint.pformat(version_data),)
}
}
)
# create the version
self.logger.info("Creating version for review...")
version = self.parent.shotgun.create("Version", version_data)
# stash the version info in the item just in case
item.properties["sg_version_data"] = version
# on windows, ensure the path is utf-8 encoded to avoid issues with
# the shotgun api
if sys.platform.startswith("win"):
upload_path = upload_path.decode("utf-8")
# upload the file to SG
self.logger.info("Uploading content...")
self.parent.shotgun.upload(
"Version",
version["id"],
upload_path,
"sg_uploaded_movie"
)
self.logger.info("Upload complete!")
# go ahead and update the publish thumbnail (if there was one)
if publish_data:
self.logger.info("Updating publish thumbnail...")
self.parent.shotgun.upload_thumbnail(
publish_data["type"],
publish_data["id"],
upload_path
)
self.logger.info("Publish thumbnail updated!")
item.properties["upload_path"] = upload_path
def finalize(self, settings, item):
"""
Execute the finalization pass. This pass executes once all the publish
tasks have completed, and can for example be used to version up files.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
"""
version = item.properties["sg_version_data"]
self.logger.info(
"Version uploaded for Photoshop document",
extra={
"action_show_in_shotgun": {
"label": "Show Version",
"tooltip": "Reveal the version in Shotgun.",
"entity": version
}
}
)
upload_path = item.properties["upload_path"]
# remove the tmp file
if item.properties.get("remove_upload", False):
try:
os.remove(upload_path)
except Exception:
self.logger.warn(
"Unable to remove temp file: %s" % (upload_path,))
pass
def _get_version_entity(self, item):
"""
Returns the best entity to link the version to.
"""
if item.context.entity:
return item.context.entity
elif item.context.project:
return item.context.project
else:
return None
def _get_save_as_action(document):
"""
Simple helper for returning a log action dict for saving the document
"""
engine = sgtk.platform.current_engine()
# default save callback
callback = lambda: engine.save_as(document)
# if workfiles2 is configured, use that for file save
if "tk-multi-workfiles2" in engine.apps:
app = engine.apps["tk-multi-workfiles2"]
if hasattr(app, "show_file_save_dlg"):
callback = app.show_file_save_dlg
return {
"action_button": {
"label": "Save As...",
"tooltip": "Save the current document",
"callback": callback
}
}
def _document_path(document):
"""
Returns the path on disk to the supplied document. May be ``None`` if the
document has not been saved.
"""
try:
path = document.fullName.fsName
except RuntimeError:
path = None
return path
```
#### File: python/tk_photoshopcc_basic/classic_init.py
```python
import sys
import os
from . import log
def toolkit_classic_bootstrap():
"""
Business logic for bootstrapping toolkit as a traditional setup.
"""
import sgtk
logger = sgtk.LogManager.get_logger(__name__)
# ---- setup logging
log_handler = log.get_sgtk_logger(sgtk)
logger.info("Launching Toolkit in classic mode.")
logger.debug("TANK_CONTEXT and TANK_ENGINE variables found.")
# Deserialize the Context object and use that when starting
# the engine.
context = sgtk.context.deserialize(os.environ["TANK_CONTEXT"])
engine_name = os.environ["TANK_ENGINE"]
logger.info("Starting %s using context %s..." % (engine_name, context))
engine = sgtk.platform.start_engine(engine_name, context.tank, context)
# ---- tear down logging
sgtk.LogManager().root_logger.removeHandler(log_handler)
logger.debug("Removed bootstrap log handler from root logger...")
logger.info("Toolkit Bootstrapped!")
```
#### File: python/tk_photoshopcc_basic/log.py
```python
import logging
class BootstrapLogHandler(logging.StreamHandler):
"""
Manually flushes emitted records for js to pickup.
"""
def emit(self, record):
"""
Forwards the record back to to js via the engine communicator.
:param record: The record to log.
"""
# can't use super here because in python 2.6, logging.StreamHandler is
# not a new style class.
logging.StreamHandler.emit(self, record)
# always flush to ensure its seen by the js process
self.flush()
def get_sgtk_logger(sgtk):
"""
Sets up a std log handler for toolkit
:param sgtk: An sgtk module reference.
:returns: A log handler.
"""
# add a custom handler to the root logger so that all toolkit log messages
# are forwarded back to python via the communicator
bootstrap_log_formatter = logging.Formatter("[%(levelname)s]: %(message)s")
bootstrap_log_handler = BootstrapLogHandler()
bootstrap_log_handler.setFormatter(bootstrap_log_formatter)
if sgtk.LogManager().global_debug:
bootstrap_log_handler.setLevel(logging.DEBUG)
else:
bootstrap_log_handler.setLevel(logging.INFO)
# now get a logger to use during bootstrap
sgtk.LogManager().initialize_custom_handler(bootstrap_log_handler)
# initializes the file where logging output will go
sgtk.LogManager().initialize_base_file_handler("tk-photoshopcc")
return bootstrap_log_handler
```
#### File: python/tk_photoshopcc/adobe_bridge.py
```python
import os
import functools
import threading
import sgtk
from sgtk.platform.qt import QtCore
# use api json to cover py 2.5
from tank_vendor import shotgun_api3
json = shotgun_api3.shotgun.json
from .rpc import Communicator
##########################################################################################
# functions
def timeout(seconds=5.0, error_message="Timed out."):
"""
A timeout decorator. When the given amount of time has passed
after the decorated callable is called, if it has not completed
an RPCTimeoutError is raised.
:param float seconds: The timeout duration, in seconds.
:param str error_message: The error message to raise once timed out.
"""
def decorator(func):
def _handle_timeout():
raise RPCTimeoutError(error_message)
def wrapper(*args, **kwargs):
timer = threading.Timer(float(seconds), _handle_timeout)
try:
timer.start()
result = func(*args, **kwargs)
finally:
timer.cancel()
return result
return functools.wraps(func)(wrapper)
return decorator
##########################################################################################
# classes
class MessageEmitter(QtCore.QObject):
"""
Container QObject for Qt signals fired when messages requesting certain
actions take place in Python arrive from the remote process.
:signal logging_received(str, str): Fires when a logging call has been
received. The first string is the logging level (debug, info, warning,
or error) and the second string is the message.
:signal command_received(int): Fires when an engine command has been
received. The integer value is the unique id of the engine command
that was requested to be executed.
:signal run_tests_request_received: Fires when a request for unit tests to
be run has been received.
:signal state_requested: Fires when the remote process requests the current
state.
:signal active_document_changed(str): Fires when alerted to a change in active
document by the RPC server. The string value is the path to the new
active document, or an empty string if the active document is unsaved.
"""
logging_received = QtCore.Signal(str, str)
command_received = QtCore.Signal(int)
run_tests_request_received = QtCore.Signal()
state_requested = QtCore.Signal()
active_document_changed = QtCore.Signal(str)
class AdobeBridge(Communicator):
"""
Bridge layer between the Adobe product and Shotgun Toolkit.
"""
# Backwards compatibility added to support tk-photoshop environment vars.
# https://support.shotgunsoftware.com/hc/en-us/articles/219039748-Photoshop#If%20the%20engine%20does%20not%20start
SHOTGUN_ADOBE_RESPONSE_TIMEOUT = os.environ.get(
"SHOTGUN_ADOBE_RESPONSE_TIMEOUT",
os.environ.get(
"SGTK_PHOTOSHOP_TIMEOUT",
300.0,
),
)
SHOTGUN_ADOBE_HEARTBEAT_TIMEOUT = os.environ.get(
"SHOTGUN_ADOBE_HEARTBEAT_TIMEOUT",
os.environ.get(
"SGTK_PHOTOSHOP_HEARTBEAT_TIMEOUT",
0.5,
),
)
def __init__(self, *args, **kwargs):
super(AdobeBridge, self).__init__(*args, **kwargs)
self.logger.debug(
"SHOTGUN_ADOBE_RESPONSE_TIMEOUT "
"is %s" % self.SHOTGUN_ADOBE_RESPONSE_TIMEOUT
)
self.logger.debug(
"SHOTGUN_ADOBE_HEARTBEAT_TIMEOUT "
"is %s" % self.SHOTGUN_ADOBE_HEARTBEAT_TIMEOUT
)
self._emitter = MessageEmitter()
self._io.on("logging", self._forward_logging)
self._io.on("command", self._forward_command)
self._io.on("run_tests", self._forward_run_tests)
self._io.on("state_requested", self._forward_state_request)
self._io.on("active_document_changed", self._forward_active_document_changed)
##########################################################################################
# properties
@property
def active_document_changed(self):
"""
The signal that is emitted when notification of an active document
change arrives via RPC.
"""
return self._emitter.active_document_changed
@property
def logging_received(self):
"""
The signal that is emitted when a logging message has arrived
via RPC.
"""
return self._emitter.logging_received
@property
def command_received(self):
"""
The signal that is emitted when a command message has arrived
via RPC.
"""
return self._emitter.command_received
@property
def run_tests_request_received(self):
"""
The signal that is emitted when a run_tests message has arrived
via RPC.
"""
return self._emitter.run_tests_request_received
@property
def state_requested(self):
"""
The QSignal that is emitted when the state is requested via RPC.
"""
return self._emitter.state_requested
##########################################################################################
# public methods
@timeout(SHOTGUN_ADOBE_HEARTBEAT_TIMEOUT, "Ping timed out.")
def ping(self):
"""
Pings the socket.io server to test whether the connection is still
active.
"""
super(AdobeBridge, self).ping()
def get_active_document_path(self):
"""
Gets the path to the currently-active document. This will do so
without raising a RuntimeError if the active document is a "new"
document that has not been saved. In that case, a None will be
returned instead.
:returns: The active document's file path on disk as a str, or
None if the document has never been saved.
"""
with self.response_logging_silenced():
try:
path = self.app.activeDocument.fullName.fsName
except AttributeError:
path = None
if isinstance(path, unicode):
path = path.encode("utf-8")
return path
def log_message(self, level, msg):
"""
Log a message from python so that it is visible on js side.
:param level: The js log level name.
:param msg: The message to log.
"""
log_data = {
"level": level,
"msg": msg
}
# NOTE: do not log in this method
json_log_data = json.dumps(log_data)
self._io.emit("log_message", json_log_data)
def send_commands(self, commands):
"""
Responsible for forwarding the current engine commands to js.
This method knows about the structure of the json that the js side
expects. We provide display info and we also
"""
# encode the python dict as json
json_commands = json.dumps(commands)
self.logger.debug("Sending commands: %s" % json_commands)
self._io.emit("set_commands", json_commands)
def send_context_display(self, context_display):
"""
Responsible for forwarding the current engine context display to js.
This method knows about the structure of the json that the js side
expects. We provide display info and we also
"""
# encode the python dict as json
json_context_display = json.dumps(context_display)
self.logger.debug("Sending context display.")
self._io.emit("set_context_display", json_context_display)
def send_context_thumbnail(self, context_thumbnail):
"""
Responsible for forwarding the current engine context thumb path to js.
This method knows about the structure of the json that the js side
expects. We provide display info and we also
"""
# encode the python dict as json
json_context_thumbnail = json.dumps(context_thumbnail)
self.logger.debug("Sending context thumb path: %s" % json_context_thumbnail)
self._io.emit("set_context_thumbnail", json_context_thumbnail)
def send_log_file_path(self, log_file):
"""
Responsible for forwarding the current log file path to js.
The path is displayed in errors to help facilitate getting the log to
support teams when problems occur.
"""
json_file_path = json.dumps(log_file)
self.logger.debug("Sending log file path: %s" % json_file_path)
self._io.emit("set_log_file_path", json_file_path)
def send_unknown_context(self):
"""
Sent when a context can not be determined for the current file.
"""
self.logger.debug("Alerting js that there is no context")
self._io.emit("set_unknown_context")
def context_about_to_change(self):
"""
Sent just before the context is about to change.
"""
self.logger.debug("Sending context about to change message.")
self._io.emit("context_about_to_change")
def save_as(self, doc, file_path):
"""
Performs a save-as operation on the given document, saving to the
given file path. The purpose of this method is to abstract away the
additional processing required to save a .psb file, as compared to
a more-typical .psd file save-as.
:param doc: The document to be saved.
:param str file_path: The destination file path.
"""
(root, ext) = os.path.splitext(file_path)
if ext.lower() == ".psb":
self._save_as_psb(file_path)
else:
doc.saveAs(self.File(file_path))
##########################################################################################
# internal methods
def _forward_active_document_changed(self, response):
"""
Forwards the notification that the host application's active document
has changed.
:param response: The data received with the message. This
is disregarded.
"""
self.logger.debug("Emitting active_document_changed signal.")
response = json.loads(response)
self.active_document_changed.emit(response.get("active_document_path"))
def _forward_command(self, response):
"""
Forwards the received command on as a Qt Signal.
:param response: The data received with the message. This
will take the form of a JSON encoded integeter
that is the unique id of the command to be called.
"""
self.logger.debug("Emitting command_received signal.")
self.command_received.emit(int(json.loads(response)))
def _forward_logging(self, response):
"""
Forwards the logging request received as a Qt Signal.
:param response: The data received with the message. This will
take the form of a JSON encoded dictionary with
"level" and "message" keys containing the severity
level of the logging message, and the message itself,
respectively.
"""
response = json.loads(response)
self.logging_received.emit(
response.get("level"),
response.get("message"),
)
def _forward_run_tests(self, response):
"""
Forwards the request for tests to be run as a Qt Signal.
:param response: The data received with the message. This
is disregarded.
"""
self.logger.debug("Emitting run_tests_request_received signal.")
self.run_tests_request_received.emit()
def _forward_state_request(self, response):
"""
Forwards the request for state as a QtSignal.
:param response: The data received with the message. This
is disregarded.
"""
self.logger.debug("Emitting state_requested signal.")
self.state_requested.emit()
def _save_as_psb(self, file_path):
"""
Saves a PSB file.
:param str file_path: The PSB file path to save to.
"""
# script listener generates this sequence of statements.
# var idsave = charIDToTypeID( "save" );
# var desc29 = new ActionDescriptor();
# var idAs = charIDToTypeID( "As " );
# var desc30 = new ActionDescriptor();
# var idmaximizeCompatibility = stringIDToTypeID( "maximizeCompatibility" );
# desc30.putBoolean( idmaximizeCompatibility, true );
# var idPhteight = charIDToTypeID( "Pht8" );
# desc29.putObject( idAs, idPhteight, desc30 );
# var idIn = charIDToTypeID( "In " );
# desc29.putPath( idIn, new File( "/Users/boismej/Downloads/Untitled-1 copy.psd" ) );
# ... // Omitting parameters that don't concern us. We'll use the defaults for these.
# executeAction( idsave, desc29, DialogModes.NO );
#
# Note: There are instances where PSBs are saved using Pht3 instead. Haven't been able to
# isolate why. Pht3 stands for photoshop35Format according to documentation, but PSBs were
# introduced in CS1 (aka 8.0). It might be that this value is ignored by Photoshop when the
# extension is PSB? However, it's not clear why saving an empty canvas sometimes saves with
# pht8 and sometimes pht3.
desc_29 = self.ActionDescriptor()
id_save = self.charIDToTypeID("save")
id_as = self.charIDToTypeID("As ")
desc_30 = self.ActionDescriptor()
id_max_compatibility = self.stringIDToTypeID("maximizeCompatibility")
id_pht_8 = self.charIDToTypeID("Pht8")
desc_29.putObject(id_as, id_pht_8, desc_30)
id_in = self.charIDToTypeID("In ")
desc_29.putPath(id_in, self.File(file_path))
self.executeAction(id_save, desc_29, self.DialogModes.NO)
@timeout(SHOTGUN_ADOBE_RESPONSE_TIMEOUT, "Timed out waiting for response.")
def _wait_for_response(self, uid):
"""
Waits for the results of an RPC call. A timeout is attached to this
operation equal to the number of seconds defined in the
SHOTGUN_ADOBE_RESPONSE_TIMEOUT environment variable, or 300 seconds
if that is not defined.
:param int uid: The unique id of the RPC call to wait for.
:returns: The raw returned results data.
"""
return super(AdobeBridge, self)._wait_for_response(uid)
##########################################################################################
# exceptions
class RPCTimeoutError(Exception):
"""
Raised when an RPC event times out.
"""
pass
```
#### File: python/tk_photoshopcc/win_32_api.py
```python
import ctypes
from ctypes import wintypes
############################################################################
# user32.dll
EnumWindows = ctypes.windll.user32.EnumWindows
EnumWindowsProc = ctypes.WINFUNCTYPE(ctypes.c_bool, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int))
GetWindowText = ctypes.windll.user32.GetWindowTextW
GetWindowTextLength = ctypes.windll.user32.GetWindowTextLengthW
SendMessage = ctypes.windll.user32.SendMessageW
SendMessageTimeout = ctypes.windll.user32.SendMessageTimeoutW
GetWindowThreadProcessId = ctypes.windll.user32.GetWindowThreadProcessId
SetParent = ctypes.windll.user32.SetParent
RealGetWindowClass = ctypes.windll.user32.RealGetWindowClassW
EnableWindow = ctypes.windll.user32.EnableWindow
IsWindowEnabled = ctypes.windll.user32.IsWindowEnabled
GetWindowLong = ctypes.windll.user32.GetWindowLongW
SetWindowLong = ctypes.windll.user32.SetWindowLongW
############################################################################
# kernal32.dll
CloseHandle = ctypes.windll.kernel32.CloseHandle
CreateToolhelp32Snapshot = ctypes.windll.kernel32.CreateToolhelp32Snapshot
Process32First = ctypes.windll.kernel32.Process32FirstW
Process32Next = ctypes.windll.kernel32.Process32NextW
############################################################################
# globals
TH32CS_SNAPPROCESS = 0x00000002
WM_GETTEXT = 0x000D
SMTO_ABORTIFHUNG = 0x0002
SMTO_BLOCK = 0x0001
GWL_EXSTYLE = -20
WS_EX_NOPARENTNOTIFY = 0x00000004
WS_EX_NOINHERITLAYOUT = 0x00100000
############################################################################
# structures
class PROCESSENTRY32(ctypes.Structure):
_fields_ = [
("dwSize", ctypes.wintypes.DWORD),
("cntUsage", ctypes.wintypes.DWORD),
("th32ProcessID", ctypes.wintypes.DWORD),
("th32DefaultHeapID", ctypes.POINTER(ctypes.c_ulong)),
("th32ModuleID", ctypes.wintypes.DWORD),
("cntThreads", ctypes.wintypes.DWORD),
("th32ParentProcessID", ctypes.wintypes.DWORD),
("pcPriClassBase", ctypes.c_long),
("dwFlags", ctypes.wintypes.DWORD),
("szExeFile", ctypes.c_wchar * ctypes.wintypes.MAX_PATH),
]
############################################################################
# functions
def find_parent_process_id(process_id):
"""
Find the parent process id for a given process
:param int process_id: ID of the process to find the parent of.
:returns: The parent process id or None if a parent isn't found.
"""
parent_process_id = None
try:
h_process_snapshot = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)
pe = PROCESSENTRY32()
pe.dwSize = ctypes.sizeof(PROCESSENTRY32)
ret = Process32First(h_process_snapshot, ctypes.byref(pe))
while ret:
if pe.th32ProcessID == process_id:
parent_process_id = pe.th32ParentProcessID
break
ret = Process32Next(h_process_snapshot, ctypes.byref(pe))
except Exception, e:
pass
else:
CloseHandle(h_process_snapshot)
return parent_process_id
def safe_get_window_text(hwnd):
"""
Safely get the window text (title) of a specified window.
:param hwnd: Window handle to get the text of.
:returns: Window title, if found, otherwise an empty string.
"""
title = ""
try:
buffer_sz = 1024
unicode_buffer = ctypes.create_unicode_buffer(buffer_sz)
result = SendMessageTimeout(
hwnd,
WM_GETTEXT,
buffer_sz,
ctypes.byref(unicode_buffer),
SMTO_ABORTIFHUNG | SMTO_BLOCK,
100,
0,
)
if result != 0:
title = unicode_buffer.value
except Exception, e:
pass
return title
def find_windows(process_id=None, class_name=None, window_text=None, stop_if_found=True):
"""
Find top-level windows matching certain criteria.
:param int process_id: Only match windows that belong to this process id if specified.
:param str class_name: Only match windows that match this class name if specified.
:param str window_text: Only match windows that match this window text if specified.
:param bool stop_if_found: Stop as soon as a match is found.
:returns: List of window handles found by search
"""
found_hwnds = []
# sub-function used to actually enumerate the windows in EnumWindows
def enum_windows_proc(hwnd, lparam):
# try to match process id:
matches_proc_id = True
if process_id != None:
win_process_id = ctypes.c_long()
GetWindowThreadProcessId(hwnd, ctypes.byref(win_process_id))
matches_proc_id = (win_process_id.value == process_id)
if not matches_proc_id:
return True
# try to match class name:
matches_class_name = True
if class_name != None:
buffer_len = 1024
unicode_buffer = ctypes.create_unicode_buffer(buffer_len)
RealGetWindowClass(hwnd, unicode_buffer, buffer_len)
matches_class_name = (class_name == unicode_buffer.value)
if not matches_class_name:
return True
# try to match window text:
matches_window_text = True
if window_text != None:
hwnd_text = safe_get_window_text(hwnd)
matches_window_text = (window_text in hwnd_text)
if not matches_window_text:
return True
# found a match
found_hwnds.append(hwnd)
return not stop_if_found
# enumerate all top-level windows:
EnumWindows(EnumWindowsProc(enum_windows_proc), None)
return found_hwnds
def qwidget_winid_to_hwnd(winid):
"""
Convert the winid for a QWidget to an HWND.
:param int winid: The QWidget winid to convert.
:returns: Window handle
"""
# Setup arguments and return types.
ctypes.pythonapi.PyCObject_AsVoidPtr.restype = ctypes.c_void_p
ctypes.pythonapi.PyCObject_AsVoidPtr.argtypes = [ctypes.py_object]
# Convert PyCObject to a void pointer.
hwnd = ctypes.pythonapi.PyCObject_AsVoidPtr(winid)
return hwnd
```
#### File: tk-shotgun-folders/v0.1.6/app.py
```python
from tank.platform import Application
import tank
class CreateFolders(Application):
def init_app(self):
deny_permissions = self.get_setting("deny_permissions")
deny_platforms = self.get_setting("deny_platforms")
p = {
"title": "Create Folders",
"deny_permissions": deny_permissions,
"deny_platforms": deny_platforms,
"supports_multiple_selection": True
}
self.engine.register_command("create_folders", self.create_folders, p)
p = {
"title": "Preview Create Folders",
"deny_permissions": deny_permissions,
"deny_platforms": deny_platforms,
"supports_multiple_selection": True
}
self.engine.register_command("preview_folders", self.preview_create_folders, p)
def _add_plural(self, word, items):
"""
appends an s if items > 1
"""
if items > 1:
return "%ss" % word
else:
return word
def preview_create_folders(self, entity_type, entity_ids):
if len(entity_ids) == 0:
self.log_info("No entities specified!")
return
paths = []
try:
paths.extend( self.tank.preview_filesystem_structure(entity_type, entity_ids) )
except tank.TankError, tank_error:
# tank errors are errors that are expected and intended for the user
self.log_error(tank_error)
except Exception, error:
# other errors are not expected and probably bugs - here it's useful with a callstack.
self.log_exception("Error when previewing folders!")
else:
# success! report back to user
if len(paths) == 0:
self.log_info("*No folders would be generated on disk for this item!*")
else:
self.log_info("*Creating folders would generate %d items on disk:*" % len(paths))
self.log_info("")
for p in paths:
self.log_info(p)
self.log_info("")
self.log_info("Note that some of these folders may exist on disk already.")
def create_folders(self, entity_type, entity_ids):
if len(entity_ids) == 0:
self.log_info("No entities specified!")
return
entities_processed = 0
try:
entities_processed = self.tank.create_filesystem_structure(entity_type, entity_ids)
except tank.TankError, tank_error:
# tank errors are errors that are expected and intended for the user
self.log_error(tank_error)
except Exception, error:
# other errors are not expected and probably bugs - here it's useful with a callstack.
self.log_exception("Error when creating folders!")
else:
# report back to user
self.log_info("%d %s processed - "
"Processed %d folders on disk." % (len(entity_ids),
self._add_plural(entity_type, len(entity_ids)),
entities_processed))
```
#### File: tk-shotgun-launchfolder/v0.1.5/app.py
```python
import tank
import sys
import os
class LaunchFolder(tank.platform.Application):
def init_app(self):
entity_types = self.get_setting("entity_types")
deny_permissions = self.get_setting("deny_permissions")
deny_platforms = self.get_setting("deny_platforms")
p = {
"title": "Show in File System",
"deny_permissions": deny_permissions,
"deny_platforms": deny_platforms,
"supports_multiple_selection": True
}
self.engine.register_command("show_in_filesystem", self.show_in_filesystem, p)
def launch(self, path):
self.log_debug("Launching file system viewer for folder %s" % path)
# get the setting
system = sys.platform
# run the app
if system == "linux2":
cmd = 'xdg-open "%s"' % path
elif system == "darwin":
cmd = 'open "%s"' % path
elif system == "win32":
cmd = 'cmd.exe /C start "Folder" "%s"' % path
else:
raise Exception("Platform '%s' is not supported." % system)
self.log_debug("Executing command '%s'" % cmd)
exit_code = os.system(cmd)
if exit_code != 0:
self.log_error("Failed to launch '%s'!" % cmd)
def show_in_filesystem(self, entity_type, entity_ids):
"""
Pop up a filesystem finder window for each folder associated
with the given entity ids.
"""
paths = []
for eid in entity_ids:
# Use the path cache to look up all paths linked to the task's entity
context = self.tank.context_from_entity(entity_type, eid)
paths.extend( context.filesystem_locations )
if len(paths) == 0:
self.log_info("No location exists on disk yet for any of the selected entities. "
"Please use shotgun to create folders and then try again!")
else:
# launch folder windows
for x in paths:
self.launch(x)
```
#### File: tk-shotgun-launchpublish/v0.3.0/app.py
```python
from tank.platform import Application
from tank import TankError
import tank
import sys
import os
import re
import urllib2
class LaunchPublish(Application):
@property
def context_change_allowed(self):
"""
Returns whether this app allows on-the-fly context changes without
needing itself to be restarted.
:rtype: bool
"""
return True
def init_app(self):
deny_permissions = self.get_setting("deny_permissions")
deny_platforms = self.get_setting("deny_platforms")
p = {
"title": "Open in Associated Application",
"deny_permissions": deny_permissions,
"deny_platforms": deny_platforms,
"supports_multiple_selection": False
}
self.engine.register_command("launch_publish", self.launch_publish, p)
def launch(self, path):
self.log_debug("Launching default system viewer for file %s" % path)
# get the setting
system = sys.platform
# run the app
if system == "linux2":
cmd = 'xdg-open "%s"' % path
elif system == "darwin":
cmd = 'open "%s"' % path
elif system == "win32":
cmd = 'cmd.exe /C start "file" "%s"' % path
else:
raise Exception("Platform '%s' is not supported." % system)
self.log_debug("Executing command '%s'" % cmd)
exit_code = os.system(cmd)
if exit_code != 0:
self.log_error("Failed to launch '%s'!" % cmd)
def _launch_viewer(self, path):
"""
Launches an image viewer based on config settings.
We assume that the path to the image is just passed as a param to the viewer.
This seems to be standard for most apps.
"""
# get the setting
system = sys.platform
try:
app_setting = {"linux2": "viewer_path_linux",
"darwin": "viewer_path_mac",
"win32": "viewer_path_windows"}[system]
app_path = self.get_setting(app_setting)
if not app_path:
raise KeyError()
except KeyError:
raise Exception("Platform '%s' is not supported." % system)
# run the app
if system.startswith("linux"):
cmd = '%s "%s" &' % (app_path, path)
elif system == "darwin":
cmd = 'open -n "%s" --args "%s"' % (app_path, path)
elif system == "win32":
cmd = 'start /B "Maya" "%s" "%s"' % (app_path, path)
else:
raise Exception("Platform '%s' is not supported." % system)
self.log_debug("Executing launch command '%s'" % cmd)
exit_code = os.system(cmd)
if exit_code != 0:
self.log_error("Failed to launch Viewer! This is most likely because the path "
"to the viewer executable is not set to a correct value. The "
"current value is '%s' - please double check that this path "
"is valid and update as needed in this app's configuration. "
"If you have any questions, don't hesitate to contact support "
"on <EMAIL>." % app_path )
def launch_publish(self, entity_type, entity_ids):
published_file_entity_type = tank.util.get_published_file_entity_type(self.tank)
if entity_type not in [published_file_entity_type, "Version"]:
raise Exception("Sorry, this app only works with entities of type %s or Version." % published_file_entity_type)
if len(entity_ids) != 1:
raise Exception("Action only accepts a single item.")
if entity_type == "Version":
# entity is a version so try to get the id
# of the published file it is linked to:
if published_file_entity_type == "PublishedFile":
v = self.shotgun.find_one("Version", [["id", "is", entity_ids[0]]], ["published_files"])
if not v.get("published_files"):
self.log_error("Sorry, this can only be used on Versions with an associated Published File.")
return
publish_id = v["published_files"][0]["id"]
else:# == "TankPublishedFile":
v = self.shotgun.find_one("Version", [["id", "is", entity_ids[0]]], ["tank_published_file"])
if not v.get("tank_published_file"):
self.log_error("Sorry, this can only be used on Versions with an associated Published File.")
return
publish_id = v["tank_published_file"]["id"]
else:
publish_id = entity_ids[0]
# first get the path to the file on the local platform
d = self.shotgun.find_one(published_file_entity_type, [["id", "is", publish_id]], ["path", "task", "entity"])
path_on_disk = d.get("path").get("local_path")
# If this PublishedFile came from a zero config publish, it will
# have a file URL rather than a local path.
if path_on_disk is None:
path_on_disk = d.get("path").get("url")
if path_on_disk is not None:
# We might have something like a %20, which needs to be
# unquoted into a space, as an example.
if "%" in path_on_disk:
path_on_disk = urllib2.unquote(path_on_disk)
# If this came from a file url via a zero-config style publish
# then we'll need to remove that from the head in order to end
# up with the local disk path to the file.
#
# On Windows, we will have a path like file:///E:/path/to/file.jpg
# and we need to ditch all three of the slashes at the head. On
# other operating systems it will just be file:///path/to/file.jpg
# and we will want to keep the leading slash.
if sys.platform.startswith("win"):
pattern = r"^file:///"
else:
pattern = r"^file://"
path_on_disk = re.sub(pattern, "", path_on_disk)
else:
self.log_error("Unable to determine the path on disk for entity id=%s." % publish_id)
# first check if we should pass this to the viewer
# hopefully this will cover most image sequence types
# any image sequence types not passed to the viewer
# will fail later when we check if the file exists on disk
for x in self.get_setting("viewer_extensions", {}):
if path_on_disk.endswith(".%s" % x):
self._launch_viewer(path_on_disk)
return
# check that it exists
if not os.path.exists(path_on_disk):
self.log_error("The file associated with this publish, "
"%s, cannot be found on disk!" % path_on_disk)
return
# now get the context - try to be as inclusive as possible here:
# start with the task, if that doesn't work, fall back onto the path
# this is because some paths don't include all the metadata that
# is contained inside the publish record (e.g typically not the task)
if d.get("task"):
ctx = self.tank.context_from_entity("Task", d.get("task").get("id"))
else:
ctx = self.tank.context_from_path(path_on_disk)
# call out to the hook
try:
launched = self.execute_hook("hook_launch_publish",
path=path_on_disk,
context=ctx,
associated_entity=d.get("entity"))
except TankError, e:
self.log_error("Failed to launch an application for this published file: %s" % e)
return
if not launched:
# hook didn't know how to launch this
# just use std associated file launch
self.launch(path_on_disk)
```
#### File: developer/utils/caching.py
```python
import os
import glob
import shutil
import stat
from tank.util import filesystem
from tank.platform import environment
from tank.descriptor import Descriptor, create_descriptor
from tank import LogManager
logger = LogManager.get_logger("utils.caching")
def _cache_descriptor(sg, desc_type, desc_dict, target_path):
"""
Cache the given descriptor into a new bundle cache.
:param sg: Shotgun API instance
:param desc_type: Descriptor.ENGINE | Descriptor.APP | Descriptor.FRAMEWORK
:param desc_dict: descriptor dict or uri
:param target_path: bundle cache root to cache into
"""
desc = create_descriptor(sg, desc_type, desc_dict, fallback_roots=[target_path])
desc.ensure_local()
desc_size_kb = filesystem.compute_folder_size(desc.get_path()) / 1024
logger.info("Caching %s into plugin bundle cache (size %d KiB)" % (desc, desc_size_kb))
if not desc._io_descriptor.is_immutable():
logger.warning("Descriptor %r may not work for other users using the plugin!" % desc)
desc.clone_cache(target_path)
def cache_apps(sg_connection, cfg_descriptor, bundle_cache_root):
"""
Iterates over all environments within the given configuration descriptor
and caches all items into the bundle cache root.
:param sg_connection: Shotgun connection
:param cfg_descriptor: Config descriptor
:param bundle_cache_root: Root where to cache payload
"""
# introspect the config and cache everything
logger.info("Introspecting environments...")
env_path = os.path.join(cfg_descriptor.get_path(), "env")
# find all environment files
env_filenames = []
for filename in os.listdir(env_path):
if filename.endswith(".yml"):
# matching the env filter (or no filter set)
logger.info("> found %s" % filename)
env_filenames.append(os.path.join(env_path, filename))
# traverse and cache
for env_path in env_filenames:
logger.info("Processing %s..." % env_path)
env = environment.Environment(env_path)
for eng in env.get_engines():
# resolve descriptor and clone cache into bundle cache
_cache_descriptor(
sg_connection,
Descriptor.ENGINE,
env.get_engine_descriptor_dict(eng),
bundle_cache_root
)
for app in env.get_apps(eng):
# resolve descriptor and clone cache into bundle cache
_cache_descriptor(
sg_connection,
Descriptor.APP,
env.get_app_descriptor_dict(eng, app),
bundle_cache_root
)
for framework in env.get_frameworks():
_cache_descriptor(
sg_connection,
Descriptor.FRAMEWORK,
env.get_framework_descriptor_dict(framework),
bundle_cache_root
)
logger.info("Total size of bundle cache: %d KiB" % (filesystem.compute_folder_size(bundle_cache_root) / 1024))
def _on_rm_error(func, path, exc_info):
# On Windows, Python's shutil can't delete read-only files, so if we were trying to delete one,
# remove the flag.
# Inspired by http://stackoverflow.com/a/4829285/1074536
if func == os.unlink:
os.chmod(path, stat.S_IWRITE)
func(path)
else:
# Raise the exception, something else went wrong.
raise exc_info[1]
def wipe_folder(folder):
"""
Deletes all folders recursively.
This will take care of wiping any permissions that might prevent from deleting a file.
:param str bundle_cache_root: Path to the bundle cache.
"""
shutil.rmtree(folder, onerror=_on_rm_error)
def cleanup_bundle_cache(bundle_cache_root):
"""
Cleans up the bundle cache from any stray files that should not be shipped.
This includes:
- .git folders.
"""
logger.info("")
glob_patterns = [
os.path.join(
bundle_cache_root,
"git*", # Grabs all git descriptors
"*", # Grabs all bundles inside those descriptors
"*", # Grabs all commits inside those bundles
".git" # Grabs all git files inside those commits.
),
os.path.join(
bundle_cache_root,
"*", # Grabs all descriptor types
"*", # Grabs all bundles inside those descriptors
"*", # Grabs all commits inside those bundles
"tests" # Grabs all tests folders.
)
]
for glob_pattern in glob_patterns:
for folder_to_remove in glob.glob(glob_pattern):
logger.info("Removing %s...", folder_to_remove)
wipe_folder(folder_to_remove)
```
#### File: core/hooks/cache_location.py
```python
import sgtk
import os
from sgtk.util import filesystem, LocalFileStorageManager
HookBaseClass = sgtk.get_hook_baseclass()
log = sgtk.LogManager.get_logger(__name__)
class CacheLocation(HookBaseClass):
"""
Hook to control cache folder creation.
For further details, see individual cache methods below.
"""
def get_path_cache_path(self, project_id, plugin_id, pipeline_configuration_id):
"""
Establish a location for the path cache database file.
This hook method was introduced in Toolkit v0.18 and replaces path_cache.
If you already have implemented path_cache, this will be detected and called instead,
however we strongly recommend that you tweak your hook.
Overriding this method in a hook allows a user to change the location on disk where
the path cache file is located. The path cache file holds a temporary cache representation
of the FilesystemLocation entities stored in Shotgun for a project. Typically, this cache
is stored on a local machine, separate for each user.
Note! In the case of the site configuration, project id will be set to None.
In the case of an unmanaged pipeline configuration, pipeline config
id will be set to None.
:param project_id: The shotgun id of the project to store caches for
:param plugin_id: Unique string to identify the scope for a particular plugin
or integration. For more information,
see :meth:`~sgtk.bootstrap.ToolkitManager.plugin_id`. For
non-plugin based toolkit projects, this value is None.
:param pipeline_configuration_id: The shotgun pipeline config id to store caches for
:returns: The path to a path cache file. This file should exist when this method returns.
"""
# backwards compatibility with custom hooks created before 0.18
if hasattr(self, "path_cache") and callable(getattr(self, "path_cache")):
# there is a custom version of the legacy hook path_cache
log.warning(
"Detected old core cache hook implementation. "
"It is strongly recommended that this is upgraded."
)
# call legacy hook to make sure we call the custom
# implementation that is provided by the user.
# this implementation expects project id 0 for
# the site config, so ensure that's the case too
if project_id is None:
project_id = 0
return self.path_cache(project_id, pipeline_configuration_id)
cache_filename = "path_cache.db"
tk = self.parent
cache_root = LocalFileStorageManager.get_configuration_root(
tk.shotgun_url,
project_id,
plugin_id,
pipeline_configuration_id,
LocalFileStorageManager.CACHE
)
target_path = os.path.join(cache_root, cache_filename)
if os.path.exists(target_path):
# new style path cache file exists, return it
return target_path
# The target path does not exist. This could be because it just hasn't
# been created yet, or it could be because of a core upgrade where the
# cache root directory structure has changed (such is the case with
# v0.17.x -> v0.18.x). To account for this scenario, see if the target
# exists in an old location first, and if so, return that path instead.
legacy_cache_root = LocalFileStorageManager.get_configuration_root(
tk.shotgun_url,
project_id,
plugin_id,
pipeline_configuration_id,
LocalFileStorageManager.CACHE,
generation=LocalFileStorageManager.CORE_V17
)
legacy_target_path = os.path.join(legacy_cache_root, cache_filename)
if os.path.exists(legacy_target_path):
# legacy path cache file exists, return it
return legacy_target_path
# neither new style or legacy path cache exists. use the new style
filesystem.ensure_folder_exists(cache_root)
filesystem.touch_file(target_path)
return target_path
def get_bundle_data_cache_path(self, project_id, plugin_id, pipeline_configuration_id, bundle):
"""
Establish a cache folder for an app, engine or framework.
This hook method was introduced in Toolkit v0.18 and replaces bundle_cache.
If you already have implemented bundle_cache, this will be detected and called instead,
however we strongly recommend that you tweak your hook.
Apps, Engines or Frameworks commonly caches data on disk. This can be
small files, shotgun queries, thumbnails etc. This method implements the
logic which defines this location on disk. The cache should be organized in
a way so that all instances of the app can re-use the same data. (Apps
which needs to cache things per-instance can implement this using a sub
folder inside the bundle cache location).
:param project_id: The shotgun id of the project to store caches for
:param plugin_id: Unique string to identify the scope for a particular plugin
or integration. For more information,
see :meth:`~sgtk.bootstrap.ToolkitManager.plugin_id`. For
non-plugin based toolkit projects, this value is None.
:param pipeline_configuration_id: The shotgun pipeline config id to store caches for
:param bundle: The app, engine or framework object which is requesting the cache folder.
:returns: The path to a folder which should exist on disk.
"""
# backwards compatibility with custom hooks created before 0.18
if hasattr(self, "bundle_cache") and callable(getattr(self, "bundle_cache")):
# there is a custom version of the legacy hook path_cache
log.warning(
"Detected old core cache hook implementation. "
"It is strongly recommended that this is upgraded."
)
# call legacy hook to make sure we call the custom
# implementation that is provided by the user.
# this implementation expects project id 0 for
# the site config, so ensure that's the case too
if project_id is None:
project_id = 0
return self.bundle_cache(project_id, pipeline_configuration_id, bundle)
tk = self.parent
cache_root = LocalFileStorageManager.get_configuration_root(
tk.shotgun_url,
project_id,
plugin_id,
pipeline_configuration_id,
LocalFileStorageManager.CACHE
)
# in the interest of trying to minimize path lengths (to avoid
# the MAX_PATH limit on windows, we apply some shortcuts
# if the bundle is a framework, we shorten it:
# tk-framework-shotgunutils --> fw-shotgunutils
# if the bundle is a multi-app, we shorten it:
# tk-multi-workfiles2 --> tm-workfiles2
bundle_name = bundle.name
bundle_name = bundle_name.replace("tk-framework-", "fw-")
bundle_name = bundle_name.replace("tk-multi-", "tm-")
target_path = os.path.join(cache_root, bundle_name)
if os.path.exists(target_path):
# new style cache bundle folder exists, return it
return target_path
# The target path does not exist. This could be because it just hasn't
# been created yet, or it could be because of a core upgrade where the
# cache root directory structure has changed (such is the case with
# v0.17.x -> v0.18.x). To account for this scenario, see if the target
# exists in an old location first, and if so, return that path instead.
legacy_cache_root = LocalFileStorageManager.get_configuration_root(
tk.shotgun_url,
project_id,
plugin_id,
pipeline_configuration_id,
LocalFileStorageManager.CACHE,
generation=LocalFileStorageManager.CORE_V17
)
legacy_target_path = os.path.join(legacy_cache_root, bundle.name)
if os.path.exists(legacy_target_path):
# legacy cache bundle folder exists, return it
return legacy_target_path
# neither new style or legacy path cache exists. use the new style
filesystem.ensure_folder_exists(target_path)
return target_path
```
#### File: core/hooks/example_template_hook.py
```python
from tank import Hook
import os
class ProceduralTemplateEvaluator(Hook):
def execute(self, setting, bundle_obj, extra_params, **kwargs):
"""
Example pass-through implementation. One option is expected in extra_params,
and this will be returned.
So the following two things will evaluate to the same thing:
> template_snapshot: maya_shot_publish
> template_snapshot: hook:example_template_hook:maya_shot_publish
:param setting: The name of the setting for which we are evaluating
In our example above, it would be template_snapshot.
:param bundle_obj: The app, engine or framework object that the setting
is associated with.
:param extra_params: List of options passed from the setting. If the settings
string is "hook:hook_name:foo:bar", extra_params would
be ['foo', 'bar']
returns: needs to return the name of a template, as a string.
"""
template_name = extra_params[0]
return template_name
```
#### File: core/hooks/get_current_login.py
```python
from tank import Hook
import os, sys
class GetCurrentLogin(Hook):
def execute(self, **kwargs):
"""
Return the login name for the user currently logged in. This is typically used
by Toolkit to resolve against the 'login' field in the Shotgun users table in order
to extract further metadata.
"""
if sys.platform == "win32":
# http://stackoverflow.com/questions/117014/how-to-retrieve-name-of-current-windows-user-ad-or-local-using-python
return os.environ.get("USERNAME", None)
else:
try:
import pwd
pwd_entry = pwd.getpwuid(os.geteuid())
return pwd_entry[0]
except:
return None
```
#### File: tank/authentication/console_authentication.py
```python
from __future__ import print_function
from . import session_cache
from .. import LogManager
from .errors import AuthenticationError, AuthenticationCancelled
from tank_vendor.shotgun_api3 import MissingTwoFactorAuthenticationFault
from ..util.shotgun.connection import sanitize_url
from getpass import getpass
logger = LogManager.get_logger(__name__)
class ConsoleAuthenticationHandlerBase(object):
"""
Base class for authenticating on the console. It will take care of the credential retrieval loop,
requesting new credentials as long as they are invalid or until the user provides the right one
or cancels the authentication. This class should not be instantiated directly, instead it is used
through the authenticate and renew_session methods.
"""
def authenticate(self, hostname, login, http_proxy):
"""
Prompts the user for this password to retrieve a new session token and rewews
the session token.
:param hostname: Host to renew a token for.
:param login: User to renew a token for.
:param http_proxy: Proxy to use for the request. Can be None.
:returns: The (hostname, login, session token) tuple.
:raises AuthenticationCancelled: If the user aborts the login process, this exception
is raised.
"""
logger.debug("Requesting password on command line.")
while True:
# Get the credentials from the user
try:
hostname, login, password = self._get_user_credentials(hostname, login)
except EOFError:
# Insert a \n on the current line so the print is displayed on a new time.
print()
raise AuthenticationCancelled()
try:
try:
# Try to generate a session token and return the user info.
return hostname, login, session_cache.generate_session_token(
hostname, login, password, http_proxy
)
except MissingTwoFactorAuthenticationFault:
# session_token was None, we need 2fa.
code = self._get_2fa_code()
# Ask again for a token using 2fa this time. If this throws an AuthenticationError because
# the code is invalid or already used, it will be caught by the except clause beneath.
return hostname, login, session_cache.generate_session_token(
hostname, login, password, http_proxy, auth_token=code
)
except AuthenticationError:
# If any combination of credentials are invalid (user + invalid pass or
# user + valid pass + invalid 2da code) we'll end up here.
print("Login failed.")
print()
def _get_user_credentials(self, hostname, login):
"""
Prompts the user for his credentials.
:param host Host to authenticate for.
:param login: User that needs authentication.
:param http_proxy: Proxy to connect to when authenticating.
:returns: The (hostname, login, plain text password) tuple.
:raises AuthenticationCancelled: If the user cancels the authentication process,
this exception will be thrown.
"""
raise NotImplementedError
def _get_password(self):
"""
Prompts the user for his password. The password will not be visible on the console.
:returns: Plain text password.
:raises AuthenticationCancelled: If the user enters an empty password, the exception
will be thrown.
"""
password = getpass("Password (empty to abort): ")
if not password:
raise AuthenticationCancelled()
return password
def _read_clean_input(self, text):
"""
Reads a line a text from the keyboard and strips any trailing or tailing
whitespaces.
:param text: Text to display before prompting the user.
:returns: The user's text input.
"""
return raw_input(text).strip()
def _get_keyboard_input(self, label, default_value=""):
"""
Queries for keyboard input.
:param label: The name of the input we require.
:param default_value: The value to use if the user has entered no input.
:returns: The user input or default_value if nothing was entered.
"""
text = label
if default_value:
text += " [%s]" % default_value
text += ": "
user_input = None
while not user_input:
user_input = self._read_clean_input(text) or default_value
# Strip whitespace before and after user input.
return user_input
def _get_2fa_code(self):
"""
Prompts the user for his 2fa code.
:returns: Two factor authentication code.
:raises AuthenticationCancelled: If the user enters an empty code, the exception will be
thrown.
"""
code = self._read_clean_input("Two factor authentication code (empty to abort): ")
if not code:
raise AuthenticationCancelled()
return code
class ConsoleRenewSessionHandler(ConsoleAuthenticationHandlerBase):
"""
Handles session renewal. Prompts for the user's password. This class should
not be instantiated directly and be used through the authenticate and
renew_session methods.
"""
def _get_user_credentials(self, hostname, login):
"""
Reads the user password from the keyboard.
:param hostname: Name of the host we will be logging on.
:param login: Current user
:returns: The (hostname, login, plain text password) tuple.
"""
print("%s, your current session has expired." % login)
print("Please enter your password to renew your session for %s" % hostname)
return hostname, login, self._get_password()
class ConsoleLoginHandler(ConsoleAuthenticationHandlerBase):
"""
Handles username/password authentication. This class should not be
instantiated directly and be used through the authenticate and renew_session
methods.
"""
def __init__(self, fixed_host):
"""
Constructor.
"""
super(ConsoleLoginHandler, self).__init__()
self._fixed_host = fixed_host
def _get_user_credentials(self, hostname, login):
"""
Reads the user credentials from the keyboard.
:param hostname: Name of the host we will be logging on.
:param login: Default value for the login.
:returns: A tuple of (login, password) strings.
"""
if self._fixed_host:
print("Please enter your login credentials for %s" % hostname)
else:
print("Please enter your login credentials.")
hostname = self._get_keyboard_input("Host", hostname)
login = self._get_keyboard_input("Login", login)
password = <PASSWORD>()
return sanitize_url(hostname), login, password
```
#### File: tank/authentication/interactive_authentication.py
```python
from __future__ import with_statement
from .errors import AuthenticationCancelled
from .console_authentication import ConsoleLoginHandler, ConsoleRenewSessionHandler
from .ui_authentication import UiAuthenticationHandler
from .. import LogManager
import threading
import sys
import os
logger = LogManager.get_logger(__name__)
###############################################################################################
# internal classes and methods
def _get_current_os_user():
"""
Gets the current operating system username.
:returns: The username string.
"""
if sys.platform == "win32":
# http://stackoverflow.com/questions/117014/how-to-retrieve-name-of-current-windows-user-ad-or-local-using-python
return os.environ.get("USERNAME", None)
else:
try:
import pwd
pwd_entry = pwd.getpwuid(os.geteuid())
return pwd_entry[0]
except:
return None
def _get_qt_state():
"""
Returns the state of Qt: the libraries available and if we have a ui or not.
:returns: If Qt is available, a tuple of (QtCore, QtGui, has_ui_boolean_flag).
Otherwise, (None, None, False)
"""
qt_core = None
qt_gui = None
qapp_instance_active = False
try:
from .ui.qt_abstraction import QtGui, QtCore
qt_core = QtCore
qt_gui = QtGui
qapp_instance_active = (QtGui.QApplication.instance() is not None)
except:
pass
return (qt_core, qt_gui, qapp_instance_active)
class SessionRenewal(object):
"""
Handles multi-threaded session renewal. This class handles the use case when
multiple threads simultaneously try to ask the user for a password.
Use this class by calling the static method renew_session(). Please see this method
for more details.
"""
# Lock the assures only one thread at a time can execute the authentication logic.
_renew_session_internal_lock = threading.Lock()
# List of possible states for session renewal.
WAITING, CANCELLED, SUCCESS = range(3)
# When a thread cancels session renewal, this flag is set so other threads know
# to raise an exception as well.
_auth_state = WAITING
# Makes access to the thread count and executing logic based on it thread
# safe.
_renew_session_lock = threading.Lock()
# Number of threads who are trying to renew the session.
_renew_session_thread_count = 0
@staticmethod
def _renew_session_internal(user, credentials_handler):
"""
Prompts the user for the password. This method should never be called directly
and _renew_session should be called instead.
:param user: SessionUserImpl instance of the user that needs its session
renewed.
:param credentials_handler: Object that actually prompts the user for
credentials.
:raises AuthenticationCancelled: Raised if the authentication is cancelled.
"""
logger.debug("About to take the authentication lock.")
with SessionRenewal._renew_session_internal_lock:
logger.debug("Took the authentication lock.")
# When authentication is cancelled, every thread who enter the authentication
# critical section should throw as well.
if SessionRenewal._auth_state == SessionRenewal.CANCELLED:
raise AuthenticationCancelled()
# If authentication was successful, simply return.
elif SessionRenewal._auth_state == SessionRenewal.SUCCESS:
return
# We're the first thread, so authenticate.
try:
logger.debug("Not authenticated, requesting user input.")
hostname, login, session_token = credentials_handler.authenticate(
user.get_host(),
user.get_login(),
user.get_http_proxy()
)
SessionRenewal._auth_state = SessionRenewal.SUCCESS
logger.debug("Login successful!")
user.set_session_token(session_token)
except AuthenticationCancelled:
SessionRenewal._auth_state = SessionRenewal.CANCELLED
logger.debug("Authentication cancelled")
raise
@staticmethod
def renew_session(user, credentials_handler):
"""
Prompts the user for the password. This method is thread-safe, meaning if
multiple users call this method at the same time, it will keep track of
how many threads are currently running inside it and all threads waiting
for the authentication to complete will return with the same result
as the thread that actually did the authentication, either returning or
raising an exception.
:param user: SessionUser we are re-authenticating.
:param credentials_handler: Object that actually prompts the user for
credentials.
:raises AuthenticationCancelled: If the user cancels the authentication,
this exception is raised.
"""
# One more renewer.
with SessionRenewal._renew_session_lock:
SessionRenewal._renew_session_thread_count += 1
try:
# Renew the session
SessionRenewal._renew_session_internal(user, credentials_handler)
finally:
# We're leaving the method somehow, cleanup!
with SessionRenewal._renew_session_lock:
# Decrement the thread count
SessionRenewal._renew_session_thread_count -= 1
# If we're the last one, clear the cancel flag.
if SessionRenewal._renew_session_thread_count == 0:
SessionRenewal._auth_state = SessionRenewal.WAITING
# At this point, if the method _renew_session_internal simply
# returned, this method returns. If the method raised an exception,
# it will keep being propagated.
###############################################################################################
# public methods
def renew_session(user):
"""
Prompts the user to enter this password on the console or in a ui to
retrieve a new session token.
:param user: SessionUser that needs its session token refreshed.
:raises AuthenticationCancelled: If the user cancels the authentication,
this exception is raised.
"""
logger.debug("Credentials were out of date, renewing them.")
QtCore, QtGui, has_ui = _get_qt_state()
# If we have a gui, we need gui based authentication
if has_ui:
authenticator = UiAuthenticationHandler(is_session_renewal=True)
else:
authenticator = ConsoleRenewSessionHandler()
SessionRenewal.renew_session(user, authenticator)
def authenticate(default_host, default_login, http_proxy, fixed_host):
"""
Prompts the user for his user name and password. If the host is not fixed,
it is also possible to edit the host. If Qt is available and an QApplication
instantiated, a dialog will prompt for user input. If not, the console will
prompt instead.
:param default_host: Default host to present to the user.
:param default_login: Default login to present to the user.
:param http_proxy: Proxy to use to connect to the host.
:param fixed_host: If True, the host won't be editable.
:returns: The (hostname, login, session token) tuple for this authenticated
user.
:raises AuthenticationCancelled: If the user cancels the authentication,
this exception is raised.
"""
# If there is no default login, let's provide the os user's instead.
default_login = default_login or _get_current_os_user()
QtCore, QtGui, has_ui = _get_qt_state()
# If we have a gui, we need gui based authentication
if has_ui:
# If we are renewing for a background thread, use the invoker
authenticator = UiAuthenticationHandler(is_session_renewal=False, fixed_host=fixed_host)
else:
authenticator = ConsoleLoginHandler(fixed_host=fixed_host)
return authenticator.authenticate(default_host, default_login, http_proxy)
```
#### File: tank/authentication/invoker.py
```python
from .. import LogManager
logger = LogManager.get_logger(__name__)
def create():
"""
Create the object used to invoke function calls on the main thread when
called from a different thread.
You typically use this method like this:
def show_ui():
# show QT dialog
dlg = MyQtDialog()
result = dlg.exec_()
return result
# create invoker object
my_invoker = invoker.create()
# launch dialog - invoker ensures that the UI
# gets launched in the main thread
result = my_invoker(show_ui)
:returns: Invoker instance. If Qt is not available or there is no UI, a
simple pass through method will execute the code in the same
thread will be produced.
"""
from .ui.qt_abstraction import QtCore, QtGui
# If we are already in the main thread, no need for an invoker, invoke directly in this thread.
if QtCore.QThread.currentThread() == QtGui.QApplication.instance().thread():
return lambda fn, *args, **kwargs: fn(*args, **kwargs)
class MainThreadInvoker(QtCore.QObject):
"""
Class that allows sending message to the main thread. This can be useful
when a background thread needs to prompt the user via a dialog. The
method passed into the invoker will be invoked on the main thread and
the result, either a return value or exception, will be brought back
to the invoking thread as if it was the thread that actually executed
the code.
"""
def __init__(self):
"""
Constructor.
"""
QtCore.QObject.__init__(self)
self._res = None
self._exception = None
# Make sure that the invoker is bound to the main thread
self.moveToThread(QtGui.QApplication.instance().thread())
def __call__(self, fn, *args, **kwargs):
"""
Asks the MainTheadInvoker to call a function with the provided parameters in the main
thread.
:param fn: Function to call in the main thread.
:param args: Array of arguments for the method.
:param kwargs: Dictionary of named arguments for the method.
:returns: The result from the function.
"""
self._fn = lambda: fn(*args, **kwargs)
self._res = None
logger.debug("Sending ui request to main thread.")
QtCore.QMetaObject.invokeMethod(self, "_do_invoke", QtCore.Qt.BlockingQueuedConnection)
# If an exception has been thrown, rethrow it.
if self._exception:
raise self._exception
return self._res
@QtCore.Slot()
def _do_invoke(self):
"""
Execute function and return result
"""
try:
logger.debug("Invoking from main thread.")
self._res = self._fn()
except Exception as e:
self._exception = e
return MainThreadInvoker()
```
#### File: tank/authentication/login_dialog.py
```python
from .ui import resources_rc # noqa
from .ui import login_dialog
from . import session_cache
from ..util.shotgun import connection
from .errors import AuthenticationError
from .ui.qt_abstraction import QtGui, QtCore
from tank_vendor.shotgun_api3 import MissingTwoFactorAuthenticationFault
class LoginDialog(QtGui.QDialog):
"""
Dialog for getting user credentials.
"""
# Formatting required to display error messages.
ERROR_MSG_FORMAT = "<font style='color: rgb(252, 98, 70);'>%s</font>"
def __init__(self, is_session_renewal, hostname=None, login=None, fixed_host=False, http_proxy=None, parent=None):
"""
Constructs a dialog.
:param is_session_renewal: Boolean indicating if we are renewing a session or authenticating a user from
scratch.
:param hostname: The string to populate the site field with. If None, the field will be empty.
:param login: The string to populate the login field with. If None, the field will be empty.
:param fixed_host: Indicates if the hostname can be changed. Defaults to False.
:param http_proxy: The proxy server to use when testing authentication. Defaults to None.
:param parent: The Qt parent for the dialog (defaults to None)
"""
QtGui.QDialog.__init__(self, parent)
hostname = hostname or ""
login = login or ""
self._is_session_renewal = is_session_renewal
# setup the gui
self.ui = login_dialog.Ui_LoginDialog()
self.ui.setupUi(self)
# Set the title
self.setWindowTitle("Shotgun Login")
# Assign credentials
self._http_proxy = http_proxy
self.ui.site.setText(hostname)
self.ui.login.setText(login)
if fixed_host:
self._disable_text_widget(
self.ui.site,
"The Shotgun site has been predefined and cannot be modified."
)
# Disable keyboard input in the site and login boxes if we are simply renewing the session.
# If the host is fixed, disable the site textbox.
if is_session_renewal:
self._disable_text_widget(
self.ui.site,
"You are renewing your session: you can't change your host.")
self._disable_text_widget(
self.ui.login,
"You are renewing your session: you can't change your login."
)
# Set the focus appropriately on the topmost line edit that is empty.
if self.ui.site.text():
if self.ui.login.text():
self.ui.password.setFocus(QtCore.Qt.OtherFocusReason)
else:
self.ui.login.setFocus(QtCore.Qt.OtherFocusReason)
if self._is_session_renewal:
self._set_login_message("Your session has expired. Please enter your password.")
else:
self._set_login_message("Please enter your credentials.")
# Select the right first page.
self.ui.stackedWidget.setCurrentWidget(self.ui.login_page)
# hook up signals
self.ui.sign_in.clicked.connect(self._ok_pressed)
self.ui.stackedWidget.currentChanged.connect(self._current_page_changed)
self.ui.verify_2fa.clicked.connect(self._verify_2fa_pressed)
self.ui.use_backup.clicked.connect(self._use_backup_pressed)
self.ui.verify_backup.clicked.connect(self._verify_backup_pressed)
self.ui.use_app.clicked.connect(self._use_app_pressed)
self.ui.forgot_password_link.linkActivated.connect(self._link_activated)
self.ui.site.editingFinished.connect(self._strip_whitespaces)
self.ui.login.editingFinished.connect(self._strip_whitespaces)
self.ui._2fa_code.editingFinished.connect(self._strip_whitespaces)
self.ui.backup_code.editingFinished.connect(self._strip_whitespaces)
def _strip_whitespaces(self):
"""
Cleans up a field after editing.
"""
self.sender().setText(self.sender().text().strip())
def _link_activated(self, site):
"""
Clicked when the user presses on the "Forgot your password?" link.
"""
# Don't use the URL that is set in the link, but the URL set in the
# text box.
site = connection.sanitize_url(self.ui.site.text())
# Give visual feedback that we are patching the URL before invoking
# the desktop services.
self.ui.site.setText(site)
# Launch the browser
forgot_password = <PASSWORD>" % site
if not QtGui.QDesktopServices.openUrl(forgot_password):
self._set_error_message(
self.ui.message, "Can't open '%s'." % forgot_password
)
def _current_page_changed(self, index):
"""
Resets text error message on the destination page.
:param index: Index of the page changed.
"""
if self.ui.stackedWidget.indexOf(self.ui._2fa_page) == index:
self.ui.invalid_code.setText("")
elif self.ui.stackedWidget.indexOf(self.ui.backup_page) == index:
self.ui.invalid_backup_code.setText("")
def _disable_text_widget(self, widget, tooltip_text):
"""
Disables a widget and adds tooltip to it.
:param widget: Text editing widget to disable.
:param toolkit_text: Tooltip text that explains why the widget is disabled.
"""
widget.setReadOnly(True)
widget.setEnabled(False)
widget.setToolTip(tooltip_text)
def _set_login_message(self, message):
"""
Set the message in the dialog.
:param message: Message to display in the dialog.
"""
self.ui.message.setText(message)
def exec_(self):
"""
Displays the window modally.
"""
self.show()
self.raise_()
self.activateWindow()
# the trick of activating + raising does not seem to be enough for
# modal dialogs. So force put them on top as well.
# On PySide2, or-ring the current window flags with WindowStaysOnTopHint causes the dialog
# to freeze, so only set the WindowStaysOnTopHint flag as this appears to not disable the
# other flags.
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
return QtGui.QDialog.exec_(self)
def result(self):
"""
Displays a modal dialog asking for the credentials.
:returns: A tuple of (hostname, username and session token) string if the user authenticated
None if the user cancelled.
"""
if self.exec_() == QtGui.QDialog.Accepted:
return (self.ui.site.text().encode("utf-8"),
self.ui.login.text().encode("utf-8"),
self._new_session_token)
else:
return None
def _set_error_message(self, widget, message):
"""
Set the error message in the dialog.
:param widget: Widget to display the message on.
:param message: Message to display in red in the dialog.
"""
widget.setText(self.ERROR_MSG_FORMAT % message)
def _ok_pressed(self):
"""
Validate the values, accepting if login is successful and display an error message if not.
"""
# pull values from the gui
site = self.ui.site.text().strip()
login = self.ui.login.text().strip()
password = self.ui.password.text()
if len(site) == 0:
self._set_error_message(self.ui.message, "Please enter the address of the site to connect to.")
self.ui.site.setFocus(QtCore.Qt.OtherFocusReason)
return
site = connection.sanitize_url(site)
# Cleanup the URL.
self.ui.site.setText(site)
if len(login) == 0:
self._set_error_message(self.ui.message, "Please enter your login name.")
self.ui.login.setFocus(QtCore.Qt.OtherFocusReason)
return
if len(password) == 0:
self._set_error_message(self.ui.message, "Please enter your password.")
self.ui.password.setFocus(QtCore.Qt.OtherFocusReason)
return
try:
self._authenticate(self.ui.message, site, login, password)
except MissingTwoFactorAuthenticationFault:
# We need a two factor authentication code, move to the next page.
self.ui.stackedWidget.setCurrentWidget(self.ui._2fa_page)
except Exception as e:
self._set_error_message(self.ui.message, e)
def _authenticate(self, error_label, site, login, password, auth_code=None):
"""
Authenticates the user using the passed in credentials.
:param error_label: Label to display any error raised from the authentication.
:param site: Site to connect to.
:param login: Login to use for that site.
:param password: <PASSWORD> with the login.
:param auth_code: Optional two factor authentication code.
:raises MissingTwoFactorAuthenticationFault: Raised if auth_code was None but was required
by the server.
"""
success = False
try:
# set the wait cursor
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
QtGui.QApplication.processEvents()
# try and authenticate
self._new_session_token = session_cache.generate_session_token(
site, login, password, self._http_proxy, auth_code
)
except AuthenticationError as e:
# authentication did not succeed
self._set_error_message(error_label, e)
else:
success = True
finally:
# restore the cursor
QtGui.QApplication.restoreOverrideCursor()
# dialog is done
QtGui.QApplication.processEvents()
# Do not accept while the cursor is overriden, if freezes the dialog.
if success:
self.accept()
def _verify_2fa_pressed(self):
"""
Called when the Verify button is pressed on the 2fa page.
"""
self._verify_pressed(self.ui._2fa_code.text(), self.ui.invalid_code)
def _verify_backup_pressed(self):
"""
Called when the Verify button is pressed on the backup codes page.
"""
self._verify_pressed(self.ui.backup_code.text(), self.ui.invalid_backup_code)
def _verify_pressed(self, code, error_label):
"""
Validates the code, dismissing the dialog if the login is succesful and displaying an error
if not.
:param code: Code entered by the user.
:param error_label: Label to update if the code is invalid.
"""
if not code:
self._set_error_message(error_label, "Please enter your code.")
return
site = self.ui.site.text().strip()
login = self.ui.login.text()
password = self.ui.password.text()
try:
self._authenticate(error_label, site, login, password, code)
except Exception as e:
self._set_error_message(self.ui.message, e)
def _use_backup_pressed(self):
"""
Switches to the backup codes page.
"""
self.ui.stackedWidget.setCurrentWidget(self.ui.backup_page)
def _use_app_pressed(self):
"""
Switches to the main two factor authentication page.
"""
self.ui.stackedWidget.setCurrentWidget(self.ui._2fa_page)
```
#### File: tank/authentication/shotgun_wrapper.py
```python
from tank_vendor.shotgun_api3 import Shotgun, AuthenticationFault
from . import interactive_authentication, session_cache
from .. import LogManager
logger = LogManager.get_logger(__name__)
class ShotgunWrapper(Shotgun):
"""
This class wraps the Shotgun instance that communicates with the Shotgun
server. Every time a request is made to the server and we are authenticated
as a session user, the call will be monitored for an AuthenticationFault.
If it happens, the call will be interrupted by a prompt asking for the user's
password to renew the session. Once the session is renewed, the call will be
executed again.
"""
def __init__(self, *args, **kwargs):
"""
Constructor. This has the same parameters as the Shotgun class, but it
has an extra sg_auth_user parameter documented below.
:param sg_auth_user: ShotgunUser derived instance that represents the
authenticated user making the request.
"""
self._user = kwargs["sg_auth_user"]
del kwargs["sg_auth_user"]
super(ShotgunWrapper, self).__init__(*args, **kwargs)
def _call_rpc(self, *args, **kwargs):
"""
Wraps the _call_rpc method from the base class to trap authentication
errors and prompt for the user's password.
"""
try:
# If the user's session token has changed since we last tried to
# call the server, it's because the token expired and there's a
# new one available, so use that one instead in the future.
if self._user.get_session_token() != self.config.session_token:
logger.debug("Global session token has changed. Using that instead.")
self.config.session_token = self._user.get_session_token()
return super(ShotgunWrapper, self)._call_rpc(*args, **kwargs)
except AuthenticationFault:
logger.debug("Authentication failure.")
pass
# Before renewing the session token, let's see if there is another
# one in the session_cache.
session_info = session_cache.get_session_data(self._user.get_host(), self._user.get_login())
# If the one if the cache is different, maybe another process refreshed the token
# for us, let's try that token instead.
if session_info and session_info["session_token"] != self._user.get_session_token():
logger.debug("Different session token found in the session cache. Will try it.")
self.config.session_token = session_info["session_token"]
# Try again. If it fails with an authentication fault, that's ok
try:
result = super(ShotgunWrapper, self)._call_rpc(*args, **kwargs)
# It didn't fail, so we can update the session token for the user. The value is
# coming from the cache, so we should avoid an unnecessary write to disk.
logger.debug("Cached token was not expired. Saving to memory.")
self._user.set_session_token(session_info["session_token"], cache=False)
return result
except AuthenticationFault:
logger.debug("Authentication failure, cached token was also expired.")
pass
# We end up here if we were in sync with the cache or if tried the cached value but it
# didn't work.
# Let's renew the session token!
interactive_authentication.renew_session(self._user)
self.config.session_token = self._user.get_session_token()
# If there is once again an authentication fault, then it means
# something else is going wrong and we will then simply rethrow
return super(ShotgunWrapper, self)._call_rpc(*args, **kwargs)
```
#### File: authentication/ui/aspect_preserving_label.py
```python
from .qt_abstraction import QtGui
from .qt_abstraction import QtCore
class AspectPreservingLabel(QtGui.QLabel):
"""
Label that displays a scaled down version of an image if it is bigger
than the label.
"""
def __init__(self, parent=None):
"""
Constructor
:params parent: Parent widget.
"""
QtGui.QLabel.__init__(self, parent)
self._pix = None
def setPixmap(self, pixmap):
"""
Sets the pixmap for the label.
:param pixmap: Pixmap to display in the label.
"""
self._pix = pixmap
scaled_pixmap = self._pix.scaled(
self.size(), QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
QtGui.QLabel.setPixmap(self, scaled_pixmap)
def heightForWidth(self, width):
"""
Computes the height for a given width while preserving aspect ratio.
:param width: Width we want to get the height for.
:returns: The height.
"""
if self._pix is None:
return self._pix.height() * width / self._pix.width()
return QtGui.QLabel.heightForWidth(self, width)
def sizeHint(self):
"""
Computes the aspect-ratio preserving size hint for this label.
"""
width = min(self.width(), self.pixmap().width())
return QtCore.QSize(width, self.heightForWidth(width))
def resizeEvent(self, e):
"""
Rescales the pixmap when the widget size is changed.
:param e: Resize event payload.
"""
if self._pix is None:
return
scaled_pixmap = self._pix.scaled(
self.size(), QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
QtGui.QLabel.setPixmap(self, scaled_pixmap)
QtGui.QApplication.instance().processEvents()
```
#### File: tank/authentication/user.py
```python
from . import user_impl
class ShotgunUser(object):
"""
Represents a Shotgun user, either a script or a person and provides an entry point
into the authentication system.
User objects are created via the :class:`ShotgunAuthenticator` object, which will handle
caching user objects on disk, prompting the user for their credentials etc.
Once you have retrieved one of the user objects below, this can be used to access
Shotgun in a seamless way. The :meth:`create_sg_connection()` will return a Shotgun API handle
which is associated with the current user. This API handle is also monitored for
authentication timeouts, so if the user's session times out (typically due to periods
of inactivity), the user may be prompted (via a QT UI or stdin/stdout if only
console is accessible) to refresh their Shotgun session by typing in their password.
If you need to persist the user object, this is possible via the serialization
methods. This is particularly useful if you need to pass a user object from one
process to another, for example when launching a DCC such as Maya or Nuke.
"""
def __init__(self, impl):
"""
:param impl: Internal user implementation class this class proxies.
"""
self._impl = impl
@property
def host(self):
"""
Returns the host for this user.
:returns: The host string.
"""
return self._impl.get_host()
@property
def http_proxy(self):
"""
Returns the HTTP proxy for this user.
:returns: The HTTP proxy string.
"""
return self._impl.get_http_proxy()
@property
def login(self):
"""
The login for this current user. For Shotgun user types that don't have a concept
of a login (like API scripts), None is returned.
:returns: The login string or None.
"""
return self._impl.get_login()
def create_sg_connection(self):
"""
Creates a Shotgun connection using the credentials for this user.
:returns: A Shotgun connection.
"""
return self._impl.create_sg_connection()
def are_credentials_expired(self):
"""
Checks if the credentials for the user are expired.
:returns: True if the credentials are expired, False otherwise.
"""
return self._impl.are_credentials_expired()
def refresh_credentials(self):
"""
Refreshes the credentials of this user so that they don't expire.
If they are expired, you will be prompted for the user's password.
"""
self._impl.refresh_credentials()
def __str__(self):
"""
Returns the name of the user.
:returns: The user's name string.
"""
return str(self.impl)
def __repr__(self):
"""
Returns a string representation of the user.
:returns: A string representation of the user.
"""
return repr(self.impl)
@property
def impl(self):
"""
Returns the user implementation object. Note: Retrieving the implementation
object is unsupported and should not be attempted. It is there to expose
functionality to the internals of the authentication module. We
reserve the right to alter the interface of the implementation object
as it needs to.
:returns: The ShotgunUserImpl derived object.
"""
return self._impl
def serialize_user(user):
"""
Serializes a user. Meant to be consumed by deserialize.
:param user: User object that needs to be serialized.
:returns: The payload representing the user.
"""
return user_impl.serialize_user(user.impl)
def deserialize_user(payload):
"""
Converts a payload produced by serialize into any of the ShotgunUser
derived instance.
:param payload: Pickled dictionary of values
:returns: A ShotgunUser derived instance.
"""
return ShotgunUser(user_impl.deserialize_user(payload))
```
#### File: tank/bootstrap/async_bootstrap.py
```python
from ..util.qt_importer import QtImporter
importer = QtImporter()
QtCore = importer.QtCore
QtGui = importer.QtGui
if QtCore is None:
# Raise an exception when Qt is not available.
raise ImportError
class AsyncBootstrapWrapper(QtCore.QObject):
"""
Wrapper class that can bootstrap an :class:`~sgtk.Sgtk` instance
asynchronously in a background thread, followed by the synchronous launching
of an :class:`~sgtk.platform.Engine` instance in the main application thread.
"""
def __init__(self, toolkit_manager, engine_name, entity, completed_callback, failed_callback):
"""
Initializes an instance of the asynchronous bootstrap wrapper.
The callback functions used to signify the completion and the failure of the bootstrap
must have the following signatures:
``completed_callback(engine)``
where:
- ``engine`` is the launched :class:`~sgtk.platform.Engine` instance.
``failed_callback(phase, exception)``
where:
- ``phase`` is the bootstrap phase that raised the exception,
``ToolkitManager.TOOLKIT_BOOTSTRAP_PHASE`` or ``ToolkitManager.ENGINE_STARTUP_PHASE``.
Using this phase, the callback can decide if the toolkit core needs
to be re-imported to ensure usage of a swapped in version.
- ``exception`` is the python exception raised while bootstrapping.
:param toolkit_manager: :class:`~sgtk.bootstrap.ToolkitManager` instance bootstrapping the engine.
:param engine_name: Name of the engine to launch.
:param entity: Shotgun entity used to resolve a project context.
:type entity: Dictionary with keys ``type`` and ``id``, or ``None`` for the site.
:param completed_callback: Callback function that handles cleanup after successful completion of the bootstrap.
:param failed_callback: Callback function that handles cleanup after failed completion of the bootstrap.
"""
super(AsyncBootstrapWrapper, self).__init__()
self._toolkit_manager = toolkit_manager
self._engine_name = engine_name
self._entity = entity
self._completed_callback = completed_callback
self._failed_callback = failed_callback
# Create a worker that can bootstrap the toolkit asynchronously in a background thread.
self._worker = _BootstrapToolkitWorker(self._toolkit_manager, engine_name, entity)
# This QThread object will live in the main thread, not in the new thread it will manage.
self._thread = QtCore.QThread()
# Make the worker operate with the new thread affinity and use the QThread object event loop.
self._worker.moveToThread(self._thread)
# Start to work when the QThread object will have started its event loop in its new thread context.
self._thread.started.connect(self._worker.work)
# Make the worker report on the toolkit bootstrap progress.
self._worker.progressing.connect(self._progress_bootstrap)
# Handle completion of the toolkit bootstrap by the worker.
self._worker.completed.connect(self._complete_bootstrap)
# Handle failure of the toolkit bootstrap by the worker.
self._worker.failed.connect(self._fail_bootstrap)
# Make the QThread object exit its event loop when the work will be done.
self._worker.done.connect(self._thread.quit)
def bootstrap(self):
"""
Starts the asynchronous bootstrap logic.
"""
# Start the QThread object event loop in its new thread context.
self._thread.start()
# A decorator is used to shield against the slot threading issue described here:
# http://stackoverflow.com/questions/20752154/pyqt-connecting-a-signal-to-a-slot-to-start-a-background-operation
@QtCore.Slot(float, str)
def _progress_bootstrap(self, progress_value, message):
"""
Callback slot that reports back on the toolkit and engine bootstrap progress.
This method calls the toolkit manager progress reporting callback.
:param progress_value: Current progress value, ranging from 0.0 to 1.0.
:param message: Progress message to report.
"""
self._toolkit_manager.progress_callback(progress_value, message)
# A decorator is used to shield against the slot threading issue described here:
# http://stackoverflow.com/questions/20752154/pyqt-connecting-a-signal-to-a-slot-to-start-a-background-operation
@QtCore.Slot()
def _complete_bootstrap(self):
"""
Callback slot that handles cleanup after successful completion of the toolkit bootstrap.
"""
try:
# Ladies and Gentlemen, start your engines!
engine = self._toolkit_manager._start_engine(self._worker.get_sgtk(), self._engine_name, self._entity)
except Exception as exception:
# Handle cleanup after failed completion of the engine startup.
self._failed_callback(self._toolkit_manager.ENGINE_STARTUP_PHASE, exception)
return
# Handle cleanup after successful completion of the engine startup.
self._completed_callback(engine)
# A decorator is used to shield against the slot threading issue described here:
# http://stackoverflow.com/questions/20752154/pyqt-connecting-a-signal-to-a-slot-to-start-a-background-operation
@QtCore.Slot(Exception)
def _fail_bootstrap(self, exception):
"""
Callback slot that handles cleanup after failed completion of the toolkit bootstrap.
:param exception: Exception raised while bootstrapping the toolkit.
"""
# Handle cleanup after failed completion of the toolkit bootstrap.
self._failed_callback(self._toolkit_manager.TOOLKIT_BOOTSTRAP_PHASE, exception)
class _BootstrapToolkitWorker(QtCore.QObject):
"""
Bootstrap worker that can bootstrap an :class:`~sgtk.Sgtk` instance asynchronously in a background thread.
:signal: ``progressing(float, str)`` - Emitted while the bootstrap toolkit worker is progressing
in its work in the background. The parameters are the current progress value and
the progress message to report.
:signal: ``completed()`` - Emitted when the bootstrap toolkit worker successfully completes
its work in the background. Use ``get_sgtk()`` to retrieve the bootstrapped toolkit instance.
:signal: ``failed(Exception)`` - Emitted when the bootstrap toolkit worker fails to complete
its work in the background. The parameter is the python exception raised while bootstrapping.
:signal: ``done()`` - Emitted when the bootstrap toolkit worker has done (whether completed or failed)
its work in the background.
"""
# Qt signal emitted while the bootstrap toolkit worker is progressing in its work in the background.
progressing = QtCore.Signal(float, str)
# Qt signal emitted when the bootstrap toolkit worker successfully completes its work in the background.
completed = QtCore.Signal()
# Qt signal emitted when the bootstrap toolkit worker fails to complete its work in the background.
failed = QtCore.Signal(Exception)
# Qt signal emitted when the bootstrap toolkit worker has done its work in the background.
done = QtCore.Signal()
def __init__(self, toolkit_manager, engine_name, entity):
"""
Initializes an instance of the bootstrap toolkit worker.
:param toolkit_manager: :class:`~sgtk.bootstrap.ToolkitManager` instance bootstrapping the engine.
:param engine_name: Name of the engine used to resolve a configuration.
:param entity: Shotgun entity used to resolve a project context.
:type entity: Dictionary with keys ``type`` and ``id``, or ``None`` for the site.
"""
super(_BootstrapToolkitWorker, self).__init__()
self._toolkit_manager = toolkit_manager
self._engine_name = engine_name
self._entity = entity
# Bootstrapped toolkit instance for the given engine and entity.
self._sgtk = None
def get_sgtk(self):
"""
Get the toolkit instance bootstrapped by the worker.
:return: Bootstrapped :class:`~sgtk.Sgtk` instance.
"""
return self._sgtk
# A decorator is used to shield against the slot threading issue described here:
# http://stackoverflow.com/questions/20752154/pyqt-connecting-a-signal-to-a-slot-to-start-a-background-operation
@QtCore.Slot()
def work(self):
"""
Bootstraps a toolkit instance for the given engine and entity and
signal the progress, and the completion or the failure of this work.
"""
try:
# Bootstrap a toolkit instance for the given engine and entity,
# using a local thread-safe progress reporting callback.
self._sgtk = self._toolkit_manager._bootstrap_sgtk(
self._engine_name, self._entity, self._report_progress
)
# Signal completion of the toolkit bootstrap.
self.completed.emit()
except Exception as exception:
# Signal failure of the toolkit bootstrap.
self.failed.emit(exception)
# Make the worker operate with the main thread affinity
# where the main event loop can handle its deletion.
# Only the worker can push itself to the main thread.
self.moveToThread(QtCore.QCoreApplication.instance().thread())
# Signal that the work is done.
self.done.emit()
def _report_progress(self, progress_value, message):
"""
Callback function that reports back on the toolkit bootstrap progress.
:param progress_value: Current progress value, ranging from 0.0 to 1.0.
:param message: Progress message to report.
"""
# Signal the toolkit bootstrap progress.
self.progressing.emit(progress_value, message)
def _get_thread_info_msg(caller):
"""
Debugging function that generates a message about the thread the calling process is running in.
:param caller: Name of the calling process to include in the information message.
:return: Generated information message.
"""
if QtGui.QApplication.instance():
if QtCore.QThread.currentThread() == QtGui.QApplication.instance().thread():
msg = "%s is running in main Qt thread."
else:
msg = "%s is running in background Qt thread."
else:
msg = "%s in not running in a Qt thread!"
return msg % caller
```
#### File: tank/bootstrap/import_handler.py
```python
import imp
import uuid
import os
import sys
import warnings
from .. import LogManager
log = LogManager.get_logger(__name__)
class CoreImportHandler(object):
"""
A custom import handler to allow for core version switching.
The core path is used to locate modules attempting to be loaded. The core
path can be set via `set_core_path` to alter the location of existing and
future core imports.
For more information on custom import hooks, see PEP 302:
https://www.python.org/dev/peps/pep-0302/
"""
NAMESPACES_TO_TRACK = ["tank", "sgtk", "tank_vendor"]
@classmethod
def swap_core(cls, core_path):
"""
Swap the current core with the core located at the supplied path.
Actually just unloads the existing core and ensures an import handler
exists that points to the supplied core path. When this method completes,
all core namespaces will be removed from `sys.modules`.
:param core_path: The path to the new core to use upon import.
"""
# make sure handler is up
handler = cls._initialize()
log.debug("%s: Begin swapping core to %s" % (handler, core_path))
# swapping core means our logging singleton will be reset.
# make sure that there are no log handlers registered
# and associated with the singleton as these will be lost
# use local imports to ensure a fresh cut of the code
from ..log import LogManager
prev_log_file = LogManager().uninitialize_base_file_handler()
# logging to file is now disabled and will be renamed after the
# main tank import of the new code.
# make sure that this entire operation runs inside the import thread lock
# in order to not cause any type of cross-thread confusion during the swap
imp.acquire_lock()
try:
handler._swap_core(core_path)
# because we are swapping out the code that we are currently running, Python is
# generating a runtime warning:
#
# RuntimeWarning: Parent module 'tank.bootstrap' not found while handling absolute import
#
# We are fixing this issue by re-importing tank, so it's essentially a chicken and egg
# scenario. So it's ok to mute the warning. Interestingly, by muting the warning, the
# execution of the reload/import becomes more complete and it seems some parts of the
# code that weren't previously reloaded are now covered. So turning off the warnings
# display seems to have executionary side effects.
# Save the existing list of warning filters before we modify it using simplefilter().
# Note: the '[:]' causes a copy of the list to be created. Without it, original_filter
# would alias the one and only 'real' list and then we'd have nothing to restore.
original_filters = warnings.filters[:]
# Ignore all warnings
warnings.simplefilter("ignore")
log.debug("...core swap complete.")
log.debug("running explicit 'import tank' to re-initialize new core...")
try:
# Kick toolkit to re-import
import tank
finally:
# Restore the list of warning filters.
warnings.filters = original_filters
log.debug("...import complete")
finally:
imp.release_lock()
# and re-init our disk logging based on the new code
# access it from the new tank instance to ensure we get the new code
try:
if prev_log_file:
tank.LogManager().initialize_base_file_handler_from_path(prev_log_file)
except AttributeError as e:
# older versions of the API may not have this defined.
log.debug(
"Switching to a version of the core API that doesn't "
"have a LogManager.initialize_base_file_handler_from_path method defined."
)
@classmethod
def _initialize(cls):
"""
Boots up the import manager if it's not already up.
:returns: CoreImportHandler instance
"""
# see if there's already a core import handler in use
for handler in sys.meta_path:
if isinstance(handler, CoreImportHandler):
return handler
# no import handler found, so create one.
current_folder = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
handler = cls(current_folder)
sys.meta_path.append(handler)
log.debug("Added import handler to sys.meta_path to support core swapping.")
return handler
def __init__(self, core_path):
"""Initialize the custom importer.
:param core_path: A str path to the core location to import from.
"""
self._core_path = core_path
# a dictionary to hold module information after it is found,
# before it is loaded.
self._module_info = {}
def __repr__(self):
"""
A unique representation of the handler.
:return: str representation.
"""
return "<CoreImportHandler for '%s'>" % self._core_path
def _swap_core(self, core_path):
"""
Actual payload for the core swapping.
To swap a core, call CoreImportHandler.swap_core().
:param core_path: core path to swap to.
"""
if not os.path.exists(core_path):
raise ValueError(
"The supplied core path '%s' is not a valid directory." % core_path
)
# acquire a lock to prevent issues with other
# threads importing at the same time.
imp.acquire_lock()
try:
# sort by package depth, deeper modules first
module_names = sorted(
sys.modules.keys(),
key=lambda module_name: module_name.count("."),
reverse=True
)
# unique prefix for stashing this session
stash_prefix = "core_swap_%s" % uuid.uuid4().hex
for module_name in module_names:
# just to be safe, don't re-import this module.
# we always use the first one added to `sys.meta_path` anyway.
if module_name == __name__:
continue
# extract just the package name
pkg_name = module_name.split(".")[0]
if pkg_name in self.NAMESPACES_TO_TRACK:
# the package name is in one of the new core namespaces. we
# delete it from sys.modules so that the custom import can run.
module = sys.modules[module_name]
# note: module entries that are None can safely be left in sys.modules -
# these are optimizations used by the importer. Read more here:
# http://stackoverflow.com/questions/1958417/why-are-there-dummy-modules-in-sys-modules
if module:
# make sure we don't lose any references to it - for example
# via instances that have been inherited from base classes
# to make sure a reference is kept, keep the module object
# but move it out of the way in sys.modules to allow for
# a new version of the module to be imported alongside.
stashed_module_name = "%s_%s" % (stash_prefix, module_name)
# uncomment for copious amounts of debug
# log.debug(
# "Relocating module %s from sys.modules[%s] "
# "to sys.modules[%s]" % (module, module_name, stashed_module_name)
# )
sys.modules[stashed_module_name] = module
# and remove the official entry
# log.debug("Removing sys.modules[%s]" % module_name)
del sys.modules[module_name]
# reset importer to point at new core for future imports
self._module_info = {}
self._core_path = core_path
finally:
# release the lock so that other threads can continue importing from
# the new core location.
imp.release_lock()
def find_module(self, module_fullname, package_path=None):
"""Locates the given module in the current core.
This method is part of the custom import handler interface contract.
:param module_fullname: The fullname of the module to import
:param package_path: None for a top-level module, or
package.__path__ for submodules or subpackages
The package_path is currently ignored by this method as it ensures we're
importing the module from the current core path.
For further info, see the docs on find_module here:
https://docs.python.org/2/library/imp.html#imp.find_module
:returns: this object (also a loader) if module found, None otherwise.
"""
# get the package name (first part of the module fullname)
module_path_parts = module_fullname.split(".")
package_name = module_path_parts[0]
# make sure the package is in the list of namespaces before continuing.
if package_name not in self.NAMESPACES_TO_TRACK:
# the package is not in one of the core namespaces. returning
# None tells python to use the next importer available (likely the
# default import mechanism).
return None
if len(module_path_parts) > 1:
# this is a dotted path. we need to recursively import the parents
# with this logic. once we've found the immediate parent we
# can use it's `__path__` attribute to locate this module.
# this matches the suggested logic for finding nested modules in
# the `imp.find_module` docs found here:
# https://docs.python.org/2/library/imp.html
parent_module_parts = module_path_parts[:-1]
# this is the parent module's full package spec.
parent_path = ".".join(parent_module_parts)
if parent_path in sys.modules:
# if the parent has already been imported, then we can just grab
# it's path to locate this module
package_path = sys.modules[parent_path].__path__
else:
# parent hasn't been loaded. do a recursive find/load in order
# to get the parent's path
if self.find_module(parent_path):
parent_module = self.load_module(parent_path)
package_path = parent_module.__path__
else:
# could not find parent module. we'll try to build a path
# given what we know about core and the parent package path.
# this turns parent package "foo.bar" into:
# /path/to/current/core/foo/bar
package_path = [
os.path.join(self._core_path, *parent_module_parts)
]
else:
# this appears to be a top-level package. it should be in the
# current core's root path.
package_path = [self._core_path]
# module path without the target module name
module_name = module_path_parts.pop()
try:
# find the module and store its info in a lookup based on the
# full module name. The module info is a tuple of the form:
#
# (file_obj, filename, description)
#
# If this find is successful, we'll need the info in order
# to load it later.
module_info = imp.find_module(module_name, package_path)
self._module_info[module_fullname] = module_info
except ImportError:
# no module found, fall back to regular import
return None
# since this object is also the "loader" return itself
return self
def load_module(self, module_fullname):
"""Custom loader.
Called by python if the find_module was successful.
This method is part of the custom import handler interface contract.
For further info, see the docs on `load_module` here:
https://docs.python.org/2/library/imp.html#imp.load_module
:param module_fullname: The fullname of the module to import
:returns: The loaded module object.
"""
file_obj = None
try:
# retrieve the found module info
(file_obj, filename, desc) = self._module_info[module_fullname]
# uncomment for lots of import related debug :)
#log.debug("Custom load module! %s [%s]" % (module_fullname, filename))
# attempt to load the module. if this fails, allow it to raise
# the usual `ImportError`
module = imp.load_module(module_fullname, file_obj, filename, desc)
finally:
# as noted in the imp.load_module docs, must close the file handle.
if file_obj:
file_obj.close()
# no need to carry around the module info now that we've loaded it.
# once the module is in `sys.modules`, the custom importer will
# no longer run.
del self._module_info[module_fullname]
# the module needs to know the loader so that reload() works
module.__loader__ = self
# the module has been loaded from the proper core location!
return module
```
#### File: tank/commands/clone_configuration.py
```python
from ..util import ShotgunPath
from ..errors import TankError
from . import constants
from ..util import filesystem
from tank_vendor import yaml
from .action_base import Action
import sys
import os
import shutil
class CloneConfigAction(Action):
"""
Action that looks at the config and validates all parameters
"""
def __init__(self):
Action.__init__(self,
"clone_configuration",
Action.TK_INSTANCE,
"Clones the current configuration.",
"Configuration")
# no tank command support for this one
self.supports_tank_command = False
# this method can be executed via the API
self.supports_api = True
self.parameters = {}
self.parameters["source_id"] = { "description": "Id of source Pipeline Configuration to use.",
"default": None,
"type": "int" }
self.parameters["user_id"] = { "description": "Shotgun user id to associate the cloned configuration with.",
"default": None,
"type": "int" }
self.parameters["name"] = { "description": "The name of the new pipeline configuration.",
"default": None,
"type": "str" }
# note how the current platform's default value is None in order to make that required
self.parameters["path_mac"] = { "description": "Path to the new configuration on Macosx.",
"default": ( None if sys.platform == "darwin" else "" ),
"type": "str" }
self.parameters["path_win"] = { "description": "Path to the new configuration on Windows.",
"default": ( None if sys.platform == "win32" else "" ),
"type": "str" }
self.parameters["path_linux"] = { "description": "Path to the new configuration on Linux.",
"default": ( None if sys.platform == "linux2" else "" ),
"type": "str" }
self.parameters["return_value"] = { "description": "Returns the id of the created Pipeline Configuration",
"type": "int" }
def run_noninteractive(self, log, parameters):
"""
Tank command API accessor.
Called when someone runs a tank command through the core API.
:param log: std python logger
:param parameters: dictionary with tank command parameters
"""
# validate params and seed default values
computed_params = self._validate_parameters(parameters)
# execute
data = _do_clone(log,
self.tk,
computed_params["source_id"],
computed_params["user_id"],
computed_params["name"],
computed_params["path_linux"],
computed_params["path_mac"],
computed_params["path_win"])
return data["id"]
def run_interactive(self, log, args):
"""
Tank command accessor
:param log: std python logger
:param args: command line args
"""
raise TankError("This Action does not support command line access")
def clone_pipeline_configuration_html(log, tk, source_pc_id, user_id, new_name, target_linux, target_mac, target_win, is_localized):
"""
Clones a pipeline configuration, not necessarily the one associated with the current tk handle.
This script is called from the tank command directly and is what gets executed if someone
tries to run the clone command from inside of Shotgun by right clicking on a Pipeline
Configuration entry and go select the clone action.
"""
data = _do_clone(log, tk, source_pc_id, user_id, new_name, target_linux, target_mac, target_win)
source_folder = data["source"]
target_folder = data["target"]
log.info("<b>Clone Complete!</b>")
log.info("")
log.info("Your configuration has been copied from <code>%s</code> "
"to <code>%s</code>." % (source_folder, target_folder))
# if this new clone is using a shared core API, tell people how to localize.
if not is_localized:
log.info("")
log.info("")
log.info("Note: You are running a shared version of the Toolkit Core API for this new clone. "
"This means that when you make an upgrade to that shared API, all "
"the different projects that share it will be upgraded. This makes the upgrade "
"process quick and easy. However, sometimes you also want to break out of a shared "
"environment, for example if you want to test a new version of the Shotgun Pipeline Toolkit. ")
log.info("")
log.info("In order to change this pipeline configuration to use its own independent version "
"of the Toolkit API, you can execute the following command: ")
if sys.platform == "win32":
tank_cmd = os.path.join(target_folder, "tank.bat")
else:
tank_cmd = os.path.join(target_folder, "tank")
log.info("")
code_css_block = "display: block; padding: 0.5em 1em; border: 1px solid #bebab0; background: #faf8f0;"
log.info("<code style='%s'>%s localize</code>" % (code_css_block, tank_cmd))
###################################################################################################
# private methods
@filesystem.with_cleared_umask
def _do_clone(log, tk, source_pc_id, user_id, new_name, target_linux, target_mac, target_win):
"""
Clones the current configuration
"""
curr_os = ShotgunPath.get_shotgun_storage_key()
source_pc = tk.shotgun.find_one(constants.PIPELINE_CONFIGURATION_ENTITY,
[["id", "is", source_pc_id]],
["code", "project", "linux_path", "windows_path", "mac_path"])
source_folder = source_pc.get(curr_os)
target_folder = {"linux2":target_linux, "win32":target_win, "darwin":target_mac }[sys.platform]
log.debug("Cloning %s -> %s" % (source_folder, target_folder))
if not os.path.exists(source_folder):
raise TankError("Cannot clone! Source folder '%s' does not exist!" % source_folder)
if os.path.exists(target_folder):
raise TankError("Cannot clone! Target folder '%s' already exists!" % target_folder)
# copy files and folders across
try:
os.mkdir(target_folder, 0o777)
os.mkdir(os.path.join(target_folder, "cache"), 0o777)
filesystem.copy_folder(
os.path.join(source_folder, "config"),
os.path.join(target_folder, "config")
)
filesystem.copy_folder(
os.path.join(source_folder, "install"),
os.path.join(target_folder, "install")
)
shutil.copy(os.path.join(source_folder, "tank"), os.path.join(target_folder, "tank"))
shutil.copy(os.path.join(source_folder, "tank.bat"), os.path.join(target_folder, "tank.bat"))
os.chmod(os.path.join(target_folder, "tank.bat"), 0o777)
os.chmod(os.path.join(target_folder, "tank"), 0o777)
sg_code_location = os.path.join(target_folder, "config", "core", "install_location.yml")
if os.path.exists(sg_code_location):
os.chmod(sg_code_location, 0o666)
os.remove(sg_code_location)
fh = open(sg_code_location, "wt")
fh.write("# Shotgun Pipeline Toolkit configuration file\n")
fh.write("# This file was automatically created by tank clone\n")
fh.write("# This file reflects the paths in the pipeline configuration\n")
fh.write("# entity which is associated with this location (%s).\n" % new_name)
fh.write("\n")
fh.write("Windows: '%s'\n" % target_win)
fh.write("Darwin: '%s'\n" % target_mac)
fh.write("Linux: '%s'\n" % target_linux)
fh.write("\n")
fh.write("# End of file.\n")
fh.close()
except Exception as e:
raise TankError("Could not create file system structure: %s" % e)
# finally register with shotgun
data = {"linux_path": target_linux,
"windows_path":target_win,
"mac_path": target_mac,
"code": new_name,
"project": source_pc["project"],
"users": [ {"type": "HumanUser", "id": user_id} ]
}
log.debug("Create sg: %s" % str(data))
pc_entity = tk.shotgun.create(constants.PIPELINE_CONFIGURATION_ENTITY, data)
log.debug("Created in SG: %s" % str(pc_entity))
# lastly, update the pipeline_configuration.yml file
try:
sg_pc_location = os.path.join(
target_folder,
"config",
"core",
constants.PIPELINECONFIG_FILE
)
# read the file first
fh = open(sg_pc_location, "rt")
try:
data = yaml.load(fh)
finally:
fh.close()
# now delete it
if os.path.exists(sg_pc_location):
os.chmod(sg_pc_location, 0o666)
os.remove(sg_pc_location)
# now update some fields
data["pc_id"] = pc_entity["id"]
data["pc_name"] = new_name
# and write the new file
fh = open(sg_pc_location, "wt")
# using safe_dump instead of dump ensures that we
# don't serialize any non-std yaml content. In particular,
# this causes issues if a unicode object containing a 7-bit
# ascii string is passed as part of the data. in this case,
# dump will write out a special format which is later on
# *loaded in* as a unicode object, even if the content doesn't
# need unicode handling. And this causes issues down the line
# in toolkit code, assuming strings:
#
# >>> yaml.dump({"foo": u"bar"})
# "{foo: !!python/unicode 'bar'}\n"
# >>> yaml.safe_dump({"foo": u"bar"})
# '{foo: bar}\n'
#
yaml.safe_dump(data, fh)
fh.close()
except Exception as e:
raise TankError("Could not update pipeline_configuration.yml file: %s" % e)
return {"source": source_folder, "target": target_folder, "id": pc_entity["id"] }
```
#### File: tank/commands/console_utils.py
```python
from __future__ import print_function
import textwrap
from .. import pipelineconfig_utils
from ..platform import validation
from ..errors import TankError, TankNoDefaultValueError
from ..descriptor import CheckVersionConstraintsError
from ..platform.bundle import resolve_default_value
from ..util import shotgun
##########################################################################################
# user prompts
g_ask_questions = True
def ask_question(question, force_promt=False):
"""
Ask a yes-no-always question
returns true if user pressed yes (or previously always)
false if no
if force_prompt is True, it always ask, regardless of if the user
has previously pressed [a]lways
"""
global g_ask_questions
if g_ask_questions == False and force_promt == False:
# auto-press YES
return True
answer = raw_input("%s [Yna?]" % question)
answer = answer.lower()
if answer != "n" and answer != "a" and answer != "y" and answer != "":
print("Press ENTER or y for YES, n for NO and a for ALWAYS.")
answer = raw_input("%s [Yna?]" % question)
if answer == "a":
g_ask_questions = False
return True
if answer == "y" or answer == "":
return True
return False
def ask_yn_question(question):
"""
Ask a yes-no question
returns true if user pressed yes (or previously always)
false if no
"""
answer = raw_input("%s [yn]" % question )
answer = answer.lower()
if answer != "n" and answer != "y":
print("Press y for YES, n for NO")
answer = raw_input("%s [yn]" % question )
if answer == "y":
return True
return False
##########################################################################################
# displaying of info in the terminal, ascii-graphcics style
def format_bundle_info(log, descriptor, required_updates=None):
"""
Formats a release notes summary output for an app, engine or core.
:param log: A logging handle.
:param descriptor: The descriptor to summarize.
:param required_updates: A list of bundle names that require updating.
"""
# yay we can install! - get release notes
(summary, url) = descriptor.changelog
if required_updates:
add_padding = " "
else:
add_padding = ""
if summary is None:
summary = "No details provided."
log.info("/%s" % ("-" * 70))
log.info("| Item: %s%s" % (add_padding, descriptor))
log.info("|")
str_to_wrap = "Description: %s%s" % (add_padding, descriptor.description)
description = textwrap.wrap(
str_to_wrap,
width=68,
initial_indent="| ",
subsequent_indent="| %s" % add_padding,
)
for x in description:
log.info(x)
log.info("|")
str_to_wrap = "Change Log: %s%s" % (add_padding, summary)
change_log = textwrap.wrap(
str_to_wrap,
width=68,
initial_indent="| ",
subsequent_indent="| %s" % add_padding,
)
for x in change_log:
log.info(x)
if required_updates:
log.info("|")
name = required_updates[0]
fw_str = "| Required Updates: %s" % name
log.info(fw_str)
for name in required_updates[1:]:
log.info("| %s" % name)
log.info("\%s" % ("-" * 70))
##########################################################################################
# displaying of info in the terminal, ascii-graphcics style
def get_configuration(log, tank_api_instance, new_descriptor, old_descriptor, suppress_prompts, parent_engine_name):
"""
Retrieves all the parameters needed for an app, engine or framework.
May prompt the user for information.
For apps only, the parent_engine_name will contain the system name (e.g. tk-maya, tk-nuke) for
the engine under which the app is parented. This is so that the configuration defaults logic
can resolve parameter values based on engine, for example the {engine_name} token used in
hook settings.
Returns a hierarchical dictionary of param values to use:
{param1:value, param2:value, param3:{child_param1:value, child_param2:value}}
"""
# first get data for all new settings values in the config
param_diff = _generate_settings_diff(parent_engine_name, new_descriptor, old_descriptor)
if len(param_diff) > 0:
log.info("Several new settings are associated with %s." % new_descriptor)
log.info("You will now be prompted to input values for all settings")
log.info("that do not have default values defined.")
log.info("")
# recurse over new parameters:
params = _get_configuration_recursive(log,
tank_api_instance,
new_descriptor,
param_diff,
suppress_prompts,
parent_engine_name)
else:
# nothing new!
params = {}
return params
def _get_configuration_recursive(log, tank_api_instance, new_ver_descriptor, params, suppress_prompts, parent_engine_name, parent_path=None):
"""
Retrieves all the parameters needed for an app, engine or framework.
May prompt the user for information.
Only values for leaf level parameters are retrieved.
"""
parent_path = parent_path or []
param_values = {}
for param_name, param_data in params.iteritems():
if "children" in param_data:
# recurse to children:
param_path = list(parent_path) + ["%s (type: %s)" % (param_name, param_data["type"])]
child_params = _get_configuration_recursive(log,
tank_api_instance,
new_ver_descriptor,
param_data["children"],
suppress_prompts,
parent_engine_name,
param_path)
param_values[param_name] = child_params
else:
# leaf param so need to get value:
param_path = list(parent_path) + [param_name]
# output info about the setting
log.info("")
log.info("/%s" % ("-" * 70))
log.info("| Item: %s" % param_path[0])
for level, name in enumerate(param_path[1:]):
log.info("| %s \ %s" % (" " * level, name))
log.info("| Type: %s" % param_data["type"])
str_to_wrap = "Summary: %s" % param_data["description"]
for x in textwrap.wrap(str_to_wrap, width=68, initial_indent="| ", subsequent_indent="| "):
log.info(x)
log.info("\%s" % ("-" * 70))
if "value" in param_data:
# default value in param data, just log the info for the user
default_value = param_data["value"]
log.info("Using default value '%s'" % (str(default_value),))
else:
# no default value in the param_data, prompt the user
if suppress_prompts:
log.warning("No default value! Please update the environment by hand later!")
param_values[param_name] = None
continue
# get value from user
# loop around until happy
input_valid = False
while not input_valid:
# ask user
answer = raw_input("Please enter value (enter to skip): ")
if answer == "":
# user chose to skip
log.warning("You skipped this value! Please update the environment by hand later!")
param_values[param_name] = None
input_valid = True
else:
# validate value
try:
obj_value = _validate_parameter(tank_api_instance, new_ver_descriptor, param_name, answer)
except Exception as e:
log.error("Validation failed: %s" % e)
else:
input_valid = True
param_values[param_name] = obj_value
return param_values
def ensure_frameworks_installed(log, tank_api_instance, file_location, descriptor, environment, suppress_prompts):
"""
Recursively check that all required frameworks are installed.
Anything not installed will be downloaded from the app store.
"""
missing_fws = validation.get_missing_frameworks(descriptor, environment, file_location)
# this returns dictionaries with name and version keys, the way
# they are defined in the manifest for that descriptor
# [{'version': 'v0.1.x', 'name': 'tk-framework-widget'}]
installed_fw_descriptors = []
# first pass: install all frameworks that are required by this descriptor
for fw_dict in missing_fws:
name = fw_dict["name"]
version_pattern = fw_dict["version"]
# version pattern number can be on the following forms:
# - exact and arbitrary, but not containing an x: v0.1.2, v0.1.2.34, v0.12.3b
# - minor: v1.x.x
# - increment: v1.2.x
# get the latest version from the app store by
# first getting a stub and then looking for latest.
location_stub = {"type": "app_store", "name": name}
pc = tank_api_instance.pipeline_configuration
fw_descriptor = pc.get_latest_framework_descriptor(location_stub, version_pattern)
installed_fw_descriptors.append(fw_descriptor)
# and now process this framework
log.info("Installing required framework %s %s. Downloading %s..." % (name, version_pattern, fw_descriptor))
if not fw_descriptor.exists_local():
fw_descriptor.download_local()
# now assume a convention where we will name the fw_instance that we create in the environment
# on the form name_version
fw_instance_name = "%s_%s" % (name, version_pattern)
# now make sure all constraints are okay
try:
check_constraints_for_item(fw_descriptor, environment)
except TankError as e:
raise TankError("Cannot install framework: %s" % e)
# okay to install!
# create required shotgun fields
fw_descriptor.ensure_shotgun_fields_exist(tank_api_instance)
# run post install hook
fw_descriptor.run_post_install(tank_api_instance)
# now get data for all new settings values in the config
params = get_configuration(log, tank_api_instance, fw_descriptor, None, suppress_prompts, None)
# next step is to add the new configuration values to the environment
environment.create_framework_settings(file_location, fw_instance_name, params, fw_descriptor.get_dict())
# second pass: For all the missing frameworks that were installed, ensure that these in turn also
# have their dependency requirements satisfied...
for fw_descriptor in installed_fw_descriptors:
ensure_frameworks_installed(log, tank_api_instance, file_location, fw_descriptor, environment, suppress_prompts)
def check_constraints_for_item(descriptor, environment_obj, engine_instance_name=None):
"""
Validates the constraints for a single item. This will check that requirements for
minimum versions for shotgun, core API etc are fulfilled.
Raises a TankError if one or more constraints are blocking. The exception message
will contain details.
"""
# get the parent engine descriptor, if we are checking an app
if engine_instance_name:
# we are checking an engine object (it has no parent engine)
parent_engine_descriptor = environment_obj.get_engine_descriptor(engine_instance_name)
else:
parent_engine_descriptor = None
# check constraints (minimum versions etc)
try:
descriptor.check_version_constraints(
pipelineconfig_utils.get_currently_running_api_version(),
parent_engine_descriptor
)
except CheckVersionConstraintsError as e:
reasons = e.reasons[:]
reasons.insert(0, "%s requires an upgrade to one or more "
"of your installed components." % descriptor)
details = " ".join(reasons)
raise TankError(details)
##########################################################################################
# helpers
def _generate_settings_diff(parent_engine_name, new_descriptor, old_descriptor=None):
"""
Returns a list of settings which are needed if we were to upgrade
an environment based on old_descriptor to the one based on new_descriptor.
Settings in the config which have default values will have their values
populated in the return data structures.
By omitting old_descriptor you will effectively diff against nothing, meaning
that all the settings for the new version of the item (except default ones)
will be part of the listing.
For apps, the parent_engine_name parameter is passed in. This holds the value
of the system name for the parent engine (e.g. tk-maya, tk-nuke) and is used
to resolve engine specific default values.
Returns a hierarchical dictionary containing details for each new parameter and
where it exists in the tree, e.g.:
{
"param1": {"description" : "a required param (no default)", "type": "str", value: None }
"param2": {"description" : "an optional param (has default)", "type": "int", value: 123 }
"param3": {"description" : "param with new children", "type" : "dict", "children" : {
"child_param1" : {"description" : "a child param", "type": "str", value: "foo" }
"child_param2" : {"description" : "another child param", "type": "int", value: 123 }
}
}
"""
# get the new metadata (this will download the app potentially)
schema = new_descriptor.configuration_schema
old_schema = {}
if old_descriptor is not None:
try:
old_schema = old_descriptor.configuration_schema
except TankError:
# download to local failed? Assume that the old version is
# not valid. This is an edge case.
old_schema = {}
# find all new config parameters
new_parameters = _generate_settings_diff_recursive(parent_engine_name, old_schema, schema)
return new_parameters
def _generate_settings_diff_recursive(parent_engine_name, old_schema, new_schema):
"""
Recursively find all parameters in new_schema that don't exist in old_schema.
Returns a hierarchical dictionary containing details for each new parameter and
where it exists in the tree, e.g.:
{
"param1": {"description" : "a required param (no default)", "type": "str", value: None }
"param2": {"description" : "an optional param (has default)", "type": "int", value: 123 }
"param3": {"description" : "param with new children", "type" : "dict", "children" : {
"child_param1" : {"description" : "a child param", "type": "str", value: "foo" }
"child_param2" : {"description" : "another child param", "type": "int", value: 123 }
}
}
Only leaf parameters should be considered 'new'.
"""
new_params = {}
for param_name, new_param_definition_dict in new_schema.iteritems():
param_type = new_param_definition_dict.get("type", "Unknown")
param_desc = new_param_definition_dict.get("description", "No description.")
old_param_definition_dict = old_schema.get(param_name)
if not old_param_definition_dict:
# found a new param:
new_params[param_name] = {"description": param_desc, "type": param_type}
# attempt to resolve a default value from the new parameter def.
try:
default_value = resolve_default_value(new_param_definition_dict,
parent_engine_name, raise_if_missing=True)
except TankNoDefaultValueError:
# No default value exists. We won't add it to the dict.
# It will be prompted for later.
pass
else:
new_params[param_name]["value"] = default_value
else:
if old_param_definition_dict.get("type", "Unknown") != param_type:
# param type has been changed - currently we don't handle this!
continue
if param_type == "dict":
# compare schema items for new and old params:
new_items = new_param_definition_dict.get("items", {})
old_items = old_param_definition_dict.get("items", {})
new_child_params = _generate_settings_diff_recursive(parent_engine_name, old_items, new_items)
if new_child_params:
new_params[param_name] = {"description": param_desc, "type": param_type, "children":new_child_params}
elif param_type == "list":
# check to see if this is a list of dicts:
new_list_param_values = new_param_definition_dict.get("values", {})
old_list_param_values = old_param_definition_dict.get("values", {})
new_list_param_values_type = new_list_param_values.get("type")
if new_list_param_values_type != old_list_param_values.get("type"):
# list param type has changed - currently we don't handle this!
continue
if new_list_param_values_type == "dict":
new_items = new_list_param_values.get("items", {})
old_items = old_list_param_values.get("items", {})
new_child_params = _generate_settings_diff_recursive(parent_engine_name, old_items, new_items)
if new_child_params:
new_params[param_name] = {"description": param_desc, "type": param_type, "children":new_child_params}
elif new_list_param_values_type == "list":
# lists of lists are currently not handled!
continue
return new_params
def _validate_parameter(tank_api_instance, descriptor, parameter, str_value):
"""
Convenience wrapper. Validates a single parameter.
Will raise exceptions if validation fails.
Returns the object-ified value on success.
"""
schema = descriptor.configuration_schema
# get the type for the param we are dealing with
schema_type = schema.get(parameter, {}).get("type", "unknown")
# now convert string value input to objet (int, string, dict etc)
obj_value = validation.convert_string_to_type(str_value, schema_type)
# finally validate this object against the schema
validation.validate_single_setting(descriptor.display_name, tank_api_instance, schema, parameter, obj_value)
# we are here, must mean we are good to go!
return obj_value
```
#### File: tank/commands/push_pc.py
```python
from ..util import filesystem
from . import constants
from ..errors import TankError
from ..pipelineconfig import PipelineConfiguration
from . import console_utils
from .action_base import Action
from ..util import ShotgunPath
import os
import datetime
import shutil
# Core configuration files which are associated with the core API installation and not
# the pipeline configuration.
CORE_API_FILES = [
"interpreter_Linux.cfg",
"interpreter_Windows.cfg",
"interpreter_Darwin.cfg",
"shotgun.yml"
]
# Core configuration files which are associated with a particular
# pipeline config and should not be moved.
CORE_PC_FILES = ["install_location.yml", "pipeline_configuration.yml"]
class PushPCAction(Action):
"""
Action that pushes a config from one pipeline configuration up to its parent
"""
def __init__(self):
Action.__init__(
self,
"push_configuration",
Action.TK_INSTANCE, (
"Pushes any configuration changes made here to another configuration. "
"This is typically used when you have cloned your production configuration "
"into a staging sandbox, updated the apps in this sandbox and want to push "
"those updates back to your production configuration."
),
"Configuration"
)
# This method can be executed via the API
self.supports_api = True
# Parameters we need
self.parameters = {
"target_id": {
"description": "Id of the target Pipeline Configuration to push to.",
"default": None,
"type": "int"
},
"use_symlink": {
"description": "Use a symbolic link to copy the data over.",
"default": False,
"type": "bool"
},
}
# Keep track of we are running in interactive mode or not.
self._is_interactive = False
# Just a cache to query SG only once.
self._pipeline_configs = None
def run_noninteractive(self, log, parameters):
"""
Tank command API accessor.
Called when someone runs a tank command through the core API.
:param log: std python logger
:param parameters: dictionary with tank command parameters
"""
self._preflight()
# validate params and run the action
self._run(log, **(self._validate_parameters(parameters)))
def run_interactive(self, log, args):
"""
Tank command accessor.
:param log: Standard python logger.
:param args: Command line args.
"""
self._is_interactive = True
self._preflight()
if len(args) == 1 and args[0] == "--symlink":
use_symlink = True
else:
use_symlink = False
current_pc_name = self.tk.pipeline_configuration.get_name()
current_pc_id = self.tk.pipeline_configuration.get_shotgun_id()
log.info(
"This command will push the configuration in the current pipeline configuration "
"('%s') to another pipeline configuration in the project. By default, the data "
"will be copied to the target config folder. If pass a --symlink parameter, it will "
"create a symlink instead." % current_pc_name
)
log.info("")
log.info("Your existing configuration will be backed up.")
if use_symlink:
log.info("")
log.info("A symlink will be used.")
log.info("")
log.info("The following pipeline configurations are available to push to:")
path_hash = {}
for pc in self._pipeline_configs:
# skip self
if pc["id"] == current_pc_id:
continue
local_path = ShotgunPath.from_shotgun_dict(pc).current_os
path_hash[pc["id"]] = local_path
log.info(" - [%d] %s (%s)" % (pc["id"], pc["code"], local_path))
log.info("")
answer = raw_input(
"Please type in the id of the configuration to push to (ENTER to exit): "
)
if answer == "":
raise TankError("Aborted by user.")
try:
target_pc_id = int(answer)
except:
raise TankError("Please enter a number!")
self._run(
log,
**(self._validate_parameters({
"target_id": target_pc_id,
"use_symlink": use_symlink,
}))
)
def _preflight(self):
"""
Performs actions needed in both interactive/non interactive modes.
Validate we can run a push in the current context.
:raises: TankError if pushing is invalid.
"""
# get list of all PCs for this project
if self.tk.pipeline_configuration.is_site_configuration():
raise TankError("You can't push the site configuration.")
if self.tk.pipeline_configuration.is_unmanaged():
raise TankError("You can't push an unmanaged configuration.")
project_id = self.tk.pipeline_configuration.get_project_id()
self._pipeline_configs = self.tk.shotgun.find(
constants.PIPELINE_CONFIGURATION_ENTITY,
[["project", "is", {"type": "Project", "id": project_id}]],
["code", "linux_path", "windows_path", "mac_path"]
)
# We should have at least one pipeline config (the current one)
# We need a second one to push to, obviously...
if len(self._pipeline_configs) < 2:
raise TankError(
"Only one pipeline configuration for this project! Need at least two "
"configurations in order to push. Please start by cloning a pipeline "
"configuration inside of Shotgun."
)
def _run(self, log, target_id, use_symlink=False):
"""
Push the current pipeline configuration to the one with the given id.
:param log: A standard logger instance.
:param int target_id: The target pipeline config id.
:param bool use_symlink: Whether a symlink should be used
:raises: TankError on failure.
"""
# If using symlink, check they are available, which is not the case on
# Windows.
if use_symlink and not getattr(os, "symlink", None):
raise TankError(
"Symbolic links are not supported on this platform"
)
if target_id == self.tk.pipeline_configuration.get_shotgun_id():
raise TankError(
"The target pipeline config id must be different from the current one"
)
for config in self._pipeline_configs:
if config["id"] == target_id:
target_pc_path = ShotgunPath.from_shotgun_dict(config).current_os
break
else:
raise TankError("Id %d is not a valid pipeline config id" % target_id)
target_pc = PipelineConfiguration(target_pc_path)
# check that both pcs are using the same core version
target_core_version = target_pc.get_associated_core_version()
source_core_version = self.tk.pipeline_configuration.get_associated_core_version()
if target_core_version != source_core_version:
raise TankError(
"The configuration you are pushing to is using Core API %s and "
"the configuration you are pushing from is using Core API %s. "
"This is not supported - before pushing the changes, make sure "
"that both configurations are using the "
"same Core API!" % (target_core_version, source_core_version)
)
# check that there are no dev descriptors
dev_desc = None
for env_name in self.tk.pipeline_configuration.get_environments():
try:
env = self.tk.pipeline_configuration.get_environment(env_name)
except Exception as e:
raise TankError("Failed to load environment %s,"
" run 'tank validate' for more details, got error: %s" % (env_name, e))
for eng in env.get_engines():
desc = env.get_engine_descriptor(eng)
if desc.is_dev():
dev_desc = desc
break
for app in env.get_apps(eng):
desc = env.get_app_descriptor(eng, app)
if desc.is_dev():
dev_desc = desc
break
if dev_desc:
log.warning(
"Looks like you have one or more dev locations set up in your "
"configuration! We strongly recommend that you do not use dev locations "
"in any production based configs. Dev descriptors are for development "
"purposes only. You can easily switch a dev location using the "
"'tank switch_app' command."
)
# Assume "yes" in non interactive mode
if self._is_interactive and not console_utils.ask_yn_question("Okay to proceed?"):
raise TankError("Aborted.")
date_suffix = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
source_path = os.path.join(self.tk.pipeline_configuration.get_path(), "config")
# Protect ourself against an edge case which happens mostly in unit tests
# if multiple pushes are attempted within less than a second, which is the
# granularity of our date_suffix used for uniqueness.
target_tmp_path = filesystem.get_unused_path(
os.path.join(target_pc_path, "config.tmp.%s" % date_suffix)
)
symlink_path = filesystem.get_unused_path(
os.path.join(target_pc_path, "config.%s" % date_suffix)
)
target_path = os.path.join(target_pc_path, "config")
target_backup_path = filesystem.get_unused_path(
os.path.join(target_pc_path, "config.bak.%s" % date_suffix)
)
log.debug("Will push the config from %s to %s" % (source_path, target_path))
log.info("Hold on, pushing config...")
##########################################################################################
# I/O phase
old_umask = os.umask(0)
try:
# copy to temp location
try:
# copy everything!
log.debug("Copying %s -> %s" % (source_path, target_tmp_path))
filesystem.copy_folder(source_path, target_tmp_path)
# If the source and target configurations are both localized, then also copy the
# core-related api files to the target config. Otherwise, skip them.
copy_core_related_files = (self.tk.pipeline_configuration.is_localized() and
target_pc.is_localized())
# CORE_PC_FILES are specific to the pipeline configuration so we shouldn't copy them
if copy_core_related_files:
core_files_to_remove = CORE_PC_FILES
else:
core_files_to_remove = CORE_API_FILES + CORE_PC_FILES
if self.tk.pipeline_configuration.is_localized() and not target_pc.is_localized():
log.warning("The source configuration contains a local core but the target "
"configuration uses a shared core. The following core-related api "
"files will not be copied to the target configuration: "
"%s" % CORE_API_FILES)
# unlock and remove all the special core files from the temp dir so they aren't
# copied to the target
for core_file in core_files_to_remove:
path = os.path.join(target_tmp_path, "core", core_file)
if os.path.exists(path):
os.chmod(path, 0o666)
log.debug("Removing system file %s" % path )
os.remove(path)
# copy the pc specific special core files from target config to new config temp dir
# in order to preserve them
for core_file in CORE_PC_FILES:
curr_config_path = os.path.join(target_path, "core", core_file)
new_config_path = os.path.join(target_tmp_path, "core", core_file)
log.debug("Copying PC system file %s -> %s" % (curr_config_path, new_config_path) )
shutil.copy(curr_config_path, new_config_path)
except Exception as e:
raise TankError(
"Could not copy into temporary target folder '%s'. The target config "
"has not been altered. Check permissions and try again! "
"Error reported: %s" % (target_tmp_path, e)
)
# backup original config
created_backup_path = None
try:
if os.path.islink(target_path):
# If we are symlinked, no need to back up: just delete the
# current symlink.
# If remove fails, we don't have to worry about restoring the
# original `target_path`: if it failed, it is still there...
os.remove(target_path)
else:
# Move data to backup folder
# Try using os.rename first, which is a lot more efficient than
# copying all files over, as it just updates inode tables on Unix.
# If it fails, fall back to copying files and deleting them
# only after everything was copied over.
# We basically replicates what shutil.move does, but we use
# shutil.copytree and filesystem.safe_delete_folder to ensure
# we only delete data after everything was copied over in the
# backup folder.
try:
os.rename(target_path, target_backup_path)
created_backup_path = target_backup_path
except OSError as e:
log.debug("Falling back on copying folder...:%s" % e)
# Didn't work fall back to copying files
shutil.copytree(target_path, target_backup_path)
# Delete below could fail, but we do have a backup, so flag
# it now.
created_backup_path = target_backup_path
filesystem.safe_delete_folder(target_path)
except Exception as e:
raise TankError(
"Could not move target folder from '%s' to '%s'. "
"Error reported: %s" % (target_path, target_backup_path, e)
)
# lastly, move new config into place
if use_symlink:
try:
# If the symlink path exists, shutil.move will move the
# tmp folder inside it, instead of renaming the tmp folder with
# the target path, leading to invalid config. So check this
# and report it.
if os.path.exists(symlink_path):
raise RuntimeError("Target %s folder already exists..." % symlink_path)
shutil.move(target_tmp_path, symlink_path)
# It seems that when given a basename as the source for the
# link, symlink creates the link in the target directory, so
# this works without having to change the current directory?
os.symlink(os.path.basename(symlink_path), target_path)
except Exception as e:
raise TankError(
"Could not move new config folder from '%s' to '%s' or create symlink."
"Error reported: %s" % (target_tmp_path, symlink_path, e)
)
else:
try:
# If the target path still exists, shutil.move will move the
# tmp folder inside it, instead of renaming the tmp folder with
# the target path, leading to invalid config. So check this
# and report it.
if os.path.exists(target_path):
raise RuntimeError("Target %s folder already exists..." % target_path)
shutil.move(target_tmp_path, target_path)
except Exception as e:
raise TankError(
"Could not move new config folder from '%s' to '%s'. "
"Error reported: %s" % (target_tmp_path, target_path, e)
)
finally:
os.umask(old_umask)
if created_backup_path:
log.info(
"Your old configuration has been backed up "
"into the following folder: %s" % created_backup_path
)
##########################################################################################
# Post Process Phase
# now download all apps
log.info("Checking if there are any apps that need downloading...")
for env_name in target_pc.get_environments():
env = target_pc.get_environment(env_name)
for eng in env.get_engines():
desc = env.get_engine_descriptor(eng)
if not desc.exists_local():
log.info("Downloading Engine %s..." % eng)
desc.download_local()
for app in env.get_apps(eng):
desc = env.get_app_descriptor(eng, app)
if not desc.exists_local():
log.info("Downloading App %s..." % app)
desc.download_local()
log.info("")
log.info("Push Complete!")
log.info("")
```
#### File: tank/commands/switch.py
```python
from ..errors import TankError
from . import constants
from . import util
from . import console_utils
from .action_base import Action
import os
class SwitchAppAction(Action):
"""
Action that makes it easy to switch from one descriptor to another
"""
def __init__(self):
Action.__init__(self,
"switch_app",
Action.TK_INSTANCE,
"Switches an app from one code location to another.",
"Developer")
def run_interactive(self, log, args):
if len(args) < 4:
log.info("This command allows you to easily switch an app between different "
"locations. A location defines where toolkit picks and synchrononizes "
"App versions. It can be either the Toolkit App Store, a version control "
"system such as Git or a location on disk.")
log.info("")
log.info("Switching an app to use a raw disk location")
log.info("--------------------------------------------------")
log.info("If you want to do app development, it is handy to be able to "
"take an app in your configuration and tell it to load from a "
"specific folder on disk. The workflow is that you typically would "
"start off with a git repository (forked off one of Shotgun's git "
"repositories if you are modifying one of the standard toolkit apps). "
"Then, clone this repo into your local dev area where you intend to "
"make the actual changes. Now use the switch command to tell toolkit "
"to use this version of the code.")
log.info("")
log.info("Note! We advise against using dev locations in your primary configuration "
"when you want to do development work, start by cloning your primary "
"pipeline configuration. You can do this by right clicking on it in Shotgun.")
log.info("")
log.info("> Syntax: switch_app environment engine app path")
log.info("> Example: switch_app Asset tk-maya tk-multi-about /Users/foo/dev/tk-multi-about")
log.info("")
log.info("")
log.info("Switching an app to track a git repository")
log.info("--------------------------------------------------")
log.info("If you are using custom made apps or have modified Shotgun's built in apps "
"by forking them from github ('https://github.com/shotgunsoftware'), and you "
"have finished customization, you usually want to switch the app so that it "
"tracks your git repository instead of the Toolkit App Store. Toolkit will "
"read the list of tags from the repository and identify version-like tags "
"(such as 'v0.1.2' or 'v1.2.3.4' and use these to determine which version "
"is the latest one. If you create a new tag in the repository and then run "
"the Toolkit update checker, it will detect that a more recent version is "
"available and prompt you if you want to upgrade.")
log.info("")
log.info("> Syntax: switch_app environment engine app git_repo")
log.info("The git_repo part is a repository location that can be understood by git. "
"Examples include: ")
log.info(" - /path/to/repo.git")
log.info(" - user@remotehost:/path_to/repo.git")
log.info(" - git://github.com/manneohrstrom/tk-hiero-publish.git")
log.info(" - https://github.com/manneohrstrom/tk-hiero-publish.git")
log.info("")
log.info("")
log.info("Switching an app to track the Toolkit App Store")
log.info("--------------------------------------------------")
log.info("If you have been doing development and want to switch back to the "
"official app store version of an app, you can use the following syntax:")
log.info("")
log.info("> Syntax: switch_app environment engine app app_store")
log.info("> Example: switch_app Asset tk-maya tk-multi-about app_store")
log.info("")
log.info("")
log.info("For a list of environments, engines and apps, run the app_info command.")
log.info("")
log.info("If you add a %s flag, the original, non-structure-preserving "
"yaml parser will be used. This parser was used by default in core v0.17.x "
"and below." % constants.LEGACY_YAML_PARSER_FLAG)
log.info("")
return
(use_legacy_parser, args) = util.should_use_legacy_yaml_parser(args)
preserve_yaml = not use_legacy_parser
# get parameters
env_name = args[0]
engine_instance_name = args[1]
app_instance_name = args[2]
path = None
mode = None
fourth_param = args[3]
if fourth_param == "app_store":
mode = "app_store"
elif fourth_param.endswith(".git"):
mode = "git"
path = fourth_param
else:
mode = "dev"
path = fourth_param
# find descriptor
try:
env = self.tk.pipeline_configuration.get_environment(env_name, writable=True)
env.set_yaml_preserve_mode(preserve_yaml)
except Exception as e:
raise TankError("Environment '%s' could not be loaded! Error reported: %s" % (env_name, e))
# make sure the engine exists in the environment
if engine_instance_name not in env.get_engines():
raise TankError("Environment %s has no engine named %s!" % (env_name, engine_instance_name))
# and the app
apps_for_engine = env.get_apps(engine_instance_name)
if app_instance_name not in apps_for_engine:
raise TankError("Environment %s, engine %s has no app named '%s'! "
"Available app instances are: %s " % (env_name,
engine_instance_name,
app_instance_name,
", ".join(apps_for_engine) ))
# get the descriptor
descriptor = env.get_app_descriptor(engine_instance_name, app_instance_name)
log.info("")
if mode == "app_store":
new_descriptor = self.tk.pipeline_configuration.get_latest_app_descriptor(
{"type": "app_store", "name": descriptor.system_name}
)
elif mode == "dev":
if not os.path.exists(path):
raise TankError("Cannot find path '%s' on disk!" % path)
# run descriptor factory method
new_descriptor = self.tk.pipeline_configuration.get_app_descriptor(
{"type": "dev", "path": path}
)
elif mode == "git":
# run descriptor factory method
new_descriptor = self.tk.pipeline_configuration.get_latest_app_descriptor(
{"type": "git", "path": path}
)
else:
raise TankError("Unknown mode!")
# prompt user!
log.info("")
log.info("")
log.info("Current version")
log.info("------------------------------------")
for (k,v) in descriptor.get_dict().items():
log.info(" - %s: %s" % (k.capitalize(), v))
log.info("")
log.info("New version")
log.info("------------------------------------")
for (k,v) in new_descriptor.get_dict().items():
log.info(" - %s: %s" % (k.capitalize(), v))
log.info("")
if not console_utils.ask_yn_question("Okay to switch?"):
log.info("Switch aborted!")
return
if not new_descriptor.exists_local():
log.info("Downloading %s..." % new_descriptor)
new_descriptor.download_local()
# create required shotgun fields
new_descriptor.ensure_shotgun_fields_exist(self.tk)
# run post install hook
new_descriptor.run_post_install(self.tk)
# ensure that all required frameworks have been installed
# find the file where our item is being installed
(_, yml_file) = env.find_location_for_app(engine_instance_name, app_instance_name)
console_utils.ensure_frameworks_installed(log, self.tk, yml_file, new_descriptor, env, suppress_prompts=False)
# find the name of the engine
engine_system_name = env.get_engine_descriptor(engine_instance_name).system_name
# now get data for all new settings values in the config
params = console_utils.get_configuration(log, self.tk, new_descriptor, descriptor, False, engine_system_name)
# next step is to add the new configuration values to the environment
env.update_app_settings(engine_instance_name,
app_instance_name,
params,
new_descriptor.get_dict())
log.info("Switch complete!")
```
#### File: tank/commands/update.py
```python
import os
from .action_base import Action
from . import console_utils
from . import util
from ..platform.environment import WritableEnvironment
from ..descriptor import CheckVersionConstraintsError
from . import constants
from ..util.version import is_version_number, is_version_newer
from ..util import shotgun
from .. import pipelineconfig_utils
class AppUpdatesAction(Action):
"""
Action that updates apps and engines.
"""
def __init__(self):
Action.__init__(self,
"updates",
Action.TK_INSTANCE,
"Checks if there are any app or engine updates for the current configuration.",
"Configuration")
# this method can be executed via the API
self.supports_api = True
self.parameters = {}
self.parameters["environment_filter"] = { "description": "Name of environment to check.",
"default": "ALL",
"type": "str" }
self.parameters["engine_filter"] = { "description": "Name of engine to check.",
"default": "ALL",
"type": "str" }
self.parameters["app_filter"] = { "description": "Name of app to check.",
"default": "ALL",
"type": "str" }
self.parameters["external"] = { "description": "Specify an external config to update.",
"default": None,
"type": "str" }
self.parameters["preserve_yaml"] = { "description": ("Enable alternative yaml parser that better preserves "
"yaml structure and comments"),
"default": True,
"type": "bool" }
def run_noninteractive(self, log, parameters):
"""
Tank command API accessor.
Called when someone runs a tank command through the core API.
:param log: std python logger
:param parameters: dictionary with tank command parameters
"""
# validate params and seed default values
computed_params = self._validate_parameters(parameters)
if computed_params["environment_filter"] == "ALL":
computed_params["environment_filter"] = None
if computed_params["engine_filter"] == "ALL":
computed_params["engine_filter"] = None
if computed_params["app_filter"] == "ALL":
computed_params["app_filter"] = None
return check_for_updates(log,
self.tk,
computed_params["environment_filter"],
computed_params["engine_filter"],
computed_params["app_filter"],
computed_params["external"],
computed_params["preserve_yaml"],
suppress_prompts=True )
def run_interactive(self, log, args):
"""
Tank command accessor
:param log: std python logger
:param args: command line args
"""
(use_legacy_parser, args) = util.should_use_legacy_yaml_parser(args)
preserve_yaml = not use_legacy_parser
if len(args) == 0:
# update EVERYTHING!
log.info("This command will go through your current configuration and check if there "
"are any updates available. If there are updates, you will be asked if you "
"want to perform an upgrade. If settings has been added to the new version "
"that you are installing, you may be prompted to specified values for these.")
log.info("")
log.info("Running this command with no parameters will check all environments, engines "
"and app. This may take a long time. You can also run the updater on a subset "
"of your installed apps and engines.")
log.info("")
log.info("")
log.info("")
log.info("General syntax:")
log.info("---------------")
log.info("")
log.info("> tank updates [environment_name] "
"[engine_name] [app_name] [%s] "
"[--external='/path/to/config']" % constants.LEGACY_YAML_PARSER_FLAG)
log.info("")
log.info("- The special keyword ALL can be used to denote all items in a category.")
log.info("")
log.info("- If you want to update an external configuration instead of the current project, "
"pass in a path via the --external flag.")
log.info("")
log.info("If you add a %s flag, the original, non-structure-preserving "
"yaml parser will be used. This parser was used by default in core v0.17.x "
"and below." % constants.LEGACY_YAML_PARSER_FLAG)
log.info("")
log.info("")
log.info("")
log.info("Examples:")
log.info("---------")
log.info("")
log.info("Check everything:")
log.info("> tank updates")
log.info("")
log.info("Check the Shot environment:")
log.info("> tank updates Shot")
log.info("")
log.info("Check all maya apps in all environments:")
log.info("> tank updates ALL tk-maya")
log.info("")
log.info("Check all maya apps in the Shot environment:")
log.info("> tank updates Shot tk-maya")
log.info("")
log.info("Make sure the loader app is up to date everywhere:")
log.info("> tank updates ALL ALL tk-multi-loader")
log.info("")
log.info("Make sure the loader app is up to date in maya:")
log.info("> tank updates ALL tk-maya tk-multi-loader")
log.info("")
log.info("")
if console_utils.ask_yn_question("Continue with full update?"):
check_for_updates(log,
self.tk,
env_name=None,
engine_instance_name=None,
app_instance_name=None,
preserve_yaml=preserve_yaml)
return
env_filter = None
engine_filter = None
app_filter = None
external_path = None
# look for an --external argument
for arg in args:
if arg.startswith("--external="):
# remove it from args list
args.remove(arg)
# from '--external=/path/to/my config' get '/path/to/my config'
external_path = arg[len("--external="):]
if external_path == "":
log.error("You need to specify a path to a toolkit configuration!")
return
if len(args) > 0:
env_filter = args[0]
if env_filter == "ALL":
log.info("- Update will check all environments.")
env_filter = None
else:
log.info("- Update will only check the %s environment." % env_filter)
if len(args) > 1:
engine_filter = args[1]
if engine_filter == "ALL":
log.info("- Update will check all engines.")
engine_filter = None
else:
log.info("- Update will only check the %s engine." % engine_filter)
if len(args) > 2:
app_filter = args[2]
if app_filter == "ALL":
log.info("- Update will check all apps.")
app_filter = None
else:
log.info("- Update will only check the %s app." % app_filter)
check_for_updates(log,
self.tk,
env_name=env_filter,
engine_instance_name=engine_filter,
app_instance_name=app_filter,
external=external_path,
preserve_yaml=preserve_yaml)
################################################################################################
# helper methods for update
def check_for_updates(log,
tk,
env_name,
engine_instance_name,
app_instance_name,
external=None,
preserve_yaml=True,
suppress_prompts=False):
"""
Runs the update checker.
:param log: Python logger
:param tk: Toolkit instance
:param env_name: Environment name to update
:param engine_instance_name: Engine instance name to update
:param app_instance_name: App instance name to update
:param suppress_prompts: If True, run without prompting
:param preserve_yaml: If True, a comment preserving yaml parser is used.
:param external: Path to external config to operate on
"""
pc = tk.pipeline_configuration
processed_items = []
if external:
# try to load external file
external = os.path.expanduser(external)
if not os.path.exists(external):
log.error("Cannot find external config %s" % external)
return
env_path = os.path.join(external, "env")
if not os.path.exists(env_path):
log.error("Cannot find environment folder '%s'" % env_path)
return
# find all environment files
log.info("Looking for matching environments in %s:" % env_path)
log.info("")
env_filenames = []
for filename in os.listdir(env_path):
if filename.endswith(".yml"):
if env_name is None or ("%s.yml" % env_name) == filename:
# matching the env filter (or no filter set)
log.info("> found %s" % filename)
env_filenames.append(os.path.join(env_path, filename))
# now process them one after the other
for env_filename in env_filenames:
log.info("")
log.info("")
log.info("======================================================================")
log.info("Environment %s..." % env_name)
log.info("======================================================================")
log.info("")
env_obj = WritableEnvironment(env_filename, pc)
env_obj.set_yaml_preserve_mode(preserve_yaml)
log.info("Environment path: %s" % (env_obj.disk_location))
log.info("")
processed_items += _process_environment(tk,
log,
env_obj,
engine_instance_name,
app_instance_name,
suppress_prompts)
else:
# process non-external config
if env_name is None:
env_names_to_process = pc.get_environments()
else:
env_names_to_process = [env_name]
for env_name in env_names_to_process:
log.info("")
log.info("")
log.info("======================================================================")
log.info("Environment %s..." % env_name)
log.info("======================================================================")
env_obj = pc.get_environment(env_name, writable=True)
env_obj.set_yaml_preserve_mode(preserve_yaml)
log.info("Environment path: %s" % (env_obj.disk_location))
log.info("")
processed_items += _process_environment(tk,
log,
env_obj,
engine_instance_name,
app_instance_name,
suppress_prompts)
# display summary
log.info("")
summary = []
for x in processed_items:
if x["was_updated"]:
summary.append("%s was updated from %s to %s" % (x["new_descriptor"],
x["old_descriptor"].version,
x["new_descriptor"].version))
(_, url) = x["new_descriptor"].changelog
if url:
summary.append("Change Log: %s" % url)
summary.append("")
if len(summary) > 0:
log.info("Items were updated. Details follow below:")
log.info("-" * 70)
for x in summary:
log.info(x)
log.info("-" * 70)
log.info("")
# generate return data for api access
ret_val = []
for x in processed_items:
d = {}
d["engine_instance"] = x["engine_name"]
d["app_instance"] = x["app_name"]
d["environment"] = x["env_name"].name
d["updated"] = x["was_updated"]
if x["was_updated"]:
d["new_version"] = x["new_descriptor"].version
ret_val.append(d)
return ret_val
def _process_environment(tk,
log,
environment_obj,
engine_instance_name=None,
app_instance_name=None,
suppress_prompts=False):
"""
Updates a given environment object
:param log: Python logger
:param tk: Toolkit instance
:param environment_obj: Environment object to update
:param engine_instance_name: Engine instance name to update
:param app_instance_name: App instance name to update
:param suppress_prompts: If True, run without prompting
:returns: list of updated items
"""
items = []
if engine_instance_name is None:
# process all engines
engines_to_process = environment_obj.get_engines()
else:
# there is a filter! Ensure the filter matches something
# in this environment
if engine_instance_name in environment_obj.get_engines():
# the filter matches something in this environment
engines_to_process = [engine_instance_name]
else:
# the item we are filtering on does not exist in this env
engines_to_process = []
for engine in engines_to_process:
items.extend(_process_item(log, suppress_prompts, tk, environment_obj, engine))
log.info("")
if app_instance_name is None:
# no filter - process all apps
apps_to_process = environment_obj.get_apps(engine)
else:
# there is a filter! Ensure the filter matches
# something in the current engine apps listing
if app_instance_name in environment_obj.get_apps(engine):
# the filter matches something!
apps_to_process = [app_instance_name]
else:
# the app filter does not match anything in this engine
apps_to_process = []
for app in apps_to_process:
items.extend(_process_item(log, suppress_prompts, tk, environment_obj, engine, app))
log.info("")
if len(environment_obj.get_frameworks()) > 0:
log.info("")
log.info("Frameworks:")
log.info("-" * 70)
for framework in environment_obj.get_frameworks():
items.extend(_process_item(log, suppress_prompts, tk, environment_obj, framework_name=framework))
return items
def _update_item(log, suppress_prompts, tk, env, old_descriptor, new_descriptor, engine_name=None, app_name=None, framework_name=None):
"""
Performs an upgrade of an engine/app/framework.
"""
# note! Some of these methods further down are likely to pull the apps local
# in order to do deep introspection. In order to provide better error reporting,
# pull the apps local before we start
if not new_descriptor.exists_local():
log.info("Downloading %s..." % new_descriptor)
new_descriptor.download_local()
# create required shotgun fields
new_descriptor.ensure_shotgun_fields_exist(tk)
# run post install hook
new_descriptor.run_post_install(tk)
# ensure that all required frameworks have been installed
# find the file where our item is being installed
if framework_name:
(_, yml_file) = env.find_location_for_framework(framework_name)
elif app_name:
(_, yml_file) = env.find_location_for_app(engine_name, app_name)
else:
(_, yml_file) = env.find_location_for_engine(engine_name)
console_utils.ensure_frameworks_installed(log, tk, yml_file, new_descriptor, env, suppress_prompts)
# if we are updating an app, we pass the engine system name to the configuration method
# so that it can resolve engine based defaults
parent_engine_system_name = None
if app_name:
parent_engine_system_name = env.get_engine_descriptor(engine_name).system_name
# now get data for all new settings values in the config
params = console_utils.get_configuration(log,
tk,
new_descriptor,
old_descriptor,
suppress_prompts,
parent_engine_system_name)
# awesome. got all the values we need.
log.info("")
log.info("")
# next step is to add the new configuration values to the environment
if framework_name:
env.update_framework_settings(framework_name, params, new_descriptor.get_dict())
elif app_name:
env.update_app_settings(engine_name, app_name, params, new_descriptor.get_dict())
else:
env.update_engine_settings(engine_name, params, new_descriptor.get_dict())
def _process_item(log, suppress_prompts, tk, env, engine_name=None, app_name=None, framework_name=None):
"""
Checks if an app/engine/framework is up to date and potentially upgrades it.
Returns a dictionary with keys:
- was_updated (bool)
- old_descriptor
- new_descriptor (may be None if was_updated is False)
- app_name
- engine_name
- env_name
"""
if framework_name:
log.info("Framework %s (Environment %s)" % (framework_name, env.name))
elif app_name:
log.info("App %s (Engine %s, Environment %s)" % (app_name, engine_name, env.name))
else:
log.info("")
log.info("-" * 70)
log.info("Engine %s (Environment %s)" % (engine_name, env.name))
status = _check_item_update_status(env, engine_name, app_name, framework_name)
item_was_updated = False
updated_items = []
if status["can_update"]:
new_descriptor = status["latest"]
required_framework_updates = _get_framework_requirements(
log=log,
environment=env,
descriptor=new_descriptor,
)
# print summary of changes
console_utils.format_bundle_info(
log,
new_descriptor,
required_framework_updates,
)
# ask user
if suppress_prompts or console_utils.ask_question("Update to the above version?"):
curr_descriptor = status["current"]
_update_item(log,
suppress_prompts,
tk,
env,
curr_descriptor,
new_descriptor,
engine_name,
app_name,
framework_name)
# If we have frameworks that need to be updated along with
# this item, then we do so here. We're suppressing prompts
# for this because these framework updates are required for
# the proper functioning of the bundle that was just updated.
# This will be due to a minimum-required version setting for
# the bundle in its info.yml that isn't currently satisfied.
for fw_name in required_framework_updates:
updated_items.extend(
_process_item(log, True, tk, env, framework_name=fw_name)
)
item_was_updated = True
elif status["out_of_date"] == False and not status["current"].exists_local():
# app does not exist in the local app download cache area
if suppress_prompts or console_utils.ask_question("Current version does not exist locally - download it now?"):
log.info("Downloading %s..." % status["current"])
status["current"].download_local()
elif status["out_of_date"] == False:
log.info(" \-- You are running version %s which is the most recent release." % status["latest"].version)
else:
# cannot update for some reason
log.warning(status["update_status"])
# return data
d = {}
d["was_updated"] = item_was_updated
d["old_descriptor"] = status["current"]
d["new_descriptor"] = status["latest"]
d["app_name"] = app_name
d["engine_name"] = engine_name
d["env_name"] = env
updated_items.append(d)
return updated_items
def _check_item_update_status(environment_obj, engine_name=None, app_name=None, framework_name=None):
"""
Checks if an engine or app or framework is up to date.
Will locate the latest version of the item and run a comparison.
Will check for constraints and report about these
(if the new version requires minimum version of shotgun, the core API, etc.)
Returns a dictionary with the following keys:
- current: Current engine descriptor
- latest: Latest engine descriptor
- out_of_date: Is the current version out of date?
- deprecated: Is this item deprecated?
- can_update: Can we update?
- update_status: String with details describing the status.
"""
parent_engine_desc = None
if framework_name:
curr_desc = environment_obj.get_framework_descriptor(framework_name)
# framework_name follows a convention and is on the form 'frameworkname_version',
# where version is on the form v1.2.3, v1.2.x, v1.x.x
version_pattern = framework_name.split("_")[-1]
# use this pattern as a constraint as we check for updates
latest_desc = curr_desc.find_latest_version(version_pattern)
elif app_name:
curr_desc = environment_obj.get_app_descriptor(engine_name, app_name)
# for apps, also get the descriptor for their parent engine
parent_engine_desc = environment_obj.get_engine_descriptor(engine_name)
# and get potential upgrades
latest_desc = curr_desc.find_latest_version()
else:
curr_desc = environment_obj.get_engine_descriptor(engine_name)
# and get potential upgrades
latest_desc = curr_desc.find_latest_version()
# out of date check
out_of_date = (latest_desc.version != curr_desc.version)
# check deprecation
(is_dep, dep_msg) = latest_desc.deprecation_status
if is_dep:
# we treat deprecation as an out of date that cannot be upgraded!
out_of_date = True
can_update = False
status = "This item has been flagged as deprecated with the following status: %s" % dep_msg
elif not out_of_date:
can_update = False
status = "Item is up to date!"
else:
# maybe we can update!
# look at constraints
try:
latest_desc.check_version_constraints(
pipelineconfig_utils.get_currently_running_api_version(),
parent_engine_desc
)
except CheckVersionConstraintsError as e:
reasons = e.reasons
reasons.insert(0, "The latest version (%s) of the item requires an upgrade to one "
"or more of your installed components." % latest_desc.version)
status = " ".join(reasons)
can_update = False
else:
status = "A new version (%s) of the item is available for installation." % latest_desc.version
can_update = True
# prepare return data
data = {}
data["current"] = curr_desc
data["latest"] = latest_desc
data["out_of_date"] = out_of_date
data["can_update"] = can_update
data["update_status"] = status
return data
def _get_framework_requirements(log, environment, descriptor):
"""
Returns a list of framework names that will be require updating. This
is checking the given descriptor's required frameworks for any
minimum-required versions it might be expecting. Any version
requirements not already met by the frameworks configured for the
given environment will be returned by name.
:param log: The logging handle.
:param environment: The environment object.
:param descriptor: The descriptor object to check.
:returns: A list of framework names requiring update.
Example: ["tk-framework-widget_v0.2.x", ...]
"""
required_frameworks = descriptor.required_frameworks
if not required_frameworks:
return []
env_fw_descriptors = dict()
env_fw_instances = environment.get_frameworks()
for fw in env_fw_instances:
env_fw_descriptors[fw] = environment.get_framework_descriptor(fw)
frameworks_to_update = []
for fw in required_frameworks:
# Example: tk-framework-widget_v0.2.x
name = "%s_%s" % (fw.get("name"), fw.get("version"))
min_version = fw.get("minimum_version")
if not min_version:
log.debug("No minimum_version setting found for %s" % name)
continue
# If we don't have the framework configured then there's
# not going to be anything for us to check against. It's
# best to simply continue on.
if name not in env_fw_descriptors:
log.warning(
"Framework %s isn't configured; unable to check "
"its minimum-required version as a result." % name
)
continue
env_fw_version = env_fw_descriptors[name].version
if env_fw_version == "Undefined":
log.debug(
"Installed framework has no version specified. Not checking "
"the bundle's required framework version as a result."
)
continue
if not is_version_number(min_version) or not is_version_number(env_fw_version):
log.warning(
"Unable to check minimum-version requirements for %s "
"due to one or both version numbers being malformed: "
"%s and %s" % (name, min_version, env_fw_version)
)
if is_version_newer(min_version, env_fw_version):
frameworks_to_update.append(name)
return frameworks_to_update
```
#### File: tank/commands/util.py
```python
from . import constants
def should_use_legacy_yaml_parser(args):
"""
Given a set of command line args, determine if the
legacy yaml parser should be used.
:param args: list of arg strings
:returns: (use_legacy, adjusted_args) - tuple with bool to indicate
if the legacy parser should be used and a list of args where
the legacy flag has been removed.
"""
# look for a legacy parser flag
if constants.LEGACY_YAML_PARSER_FLAG in args:
legacy_parser = True
args.remove(constants.LEGACY_YAML_PARSER_FLAG)
else:
legacy_parser = False
return (legacy_parser, args)
```
#### File: tank/commands/validate_config.py
```python
from __future__ import print_function
import os
from .action_base import Action
from ..errors import TankError
from ..platform import validation, bundle
class ValidateConfigAction(Action):
"""
Action that looks at the config and validates all parameters
"""
def __init__(self):
Action.__init__(self,
"validate",
Action.TK_INSTANCE,
("Validates your current Configuration to check that all "
"environments have been correctly configured."),
"Configuration")
self.parameters = {}
self.parameters["envs"] = {
"description": ("A list of environment names to process. If not "
"specified, process all environments."),
"type": "list",
"default": [],
}
# this method can be executed via the API
self.supports_api = True
self._is_interactive = False
def run_noninteractive(self, log, parameters):
"""
Tank command API accessor.
Called when someone runs a tank command through the core API.
:param log: std python logger
:param parameters: dictionary with tank command parameters
"""
# validate params and seed default values
return self._run(log, self._validate_parameters(parameters))
def run_interactive(self, log, args):
"""
Tank command accessor
:param log: std python logger
:param args: command line args
"""
self._is_interactive = True
# currently, environment names are passed in as arguments for
# validation. Just translate the args to the env list and validate them
return self._run(log, self._validate_parameters({"envs": args}))
def _run(self, log, parameters):
"""
Actual execution payload
"""
log.info("")
log.info("")
log.info("Welcome to the Shotgun Pipeline Toolkit Configuration validator!")
log.info("")
log.info("Found the following environments:")
for x in parameters["envs"]:
log.info(" %s" % x)
log.info("")
log.info("")
# validate environments
for env_name in parameters["envs"]:
log.info("")
log.info("Environment %s" % env_name)
log.info("------------------------------------------")
env = self.tk.pipeline_configuration.get_environment(env_name)
log.info("Environment path: %s" % (env.disk_location))
_process_environment(log, self.tk, env)
log.info("")
log.info("")
log.info("")
# check templates that are orphaned
unused_templates = set(self.tk.templates.keys()) - g_templates
log.info("")
log.info("------------------------------------------------------------------------")
log.info("The following templates are not being used directly in any environments:")
log.info("(they may be used inside complex data structures)")
for ut in unused_templates:
log.info(ut)
log.info("")
log.info("")
log.info("")
# check hooks that are unused
all_hooks = []
# get rid of files not ending with .py and strip extension
for hook in os.listdir(self.tk.pipeline_configuration.get_hooks_location()):
if hook.endswith(".py"):
all_hooks.append( hook[:-3] )
unused_hooks = set(all_hooks) - g_hooks
log.info("")
log.info("--------------------------------------------------------------------")
log.info("The following hooks are not being used directly in any environments:")
log.info("(they may be used inside complex data structures)")
for uh in unused_hooks:
log.info(uh)
log.info("")
log.info("")
log.info("")
def _validate_parameters(self, parameters):
"""
Do validation of the parameters that are specific to this action.
:param parameters: The dict of parameters
:returns: The validated and fully populated dict of parameters.
"""
# do the base class default validation
parameters = super(ValidateConfigAction, self)._validate_parameters(
parameters)
# get a list of valid env names
valid_env_names = self.tk.pipeline_configuration.get_environments()
bad_env_names = []
env_names_to_process = []
if parameters["envs"]:
# some environment names supplied on the command line
for env_param in parameters["envs"]:
# see if this is a comma separated list
for env_name in env_param.split(","):
env_name = env_name.strip()
if env_name in valid_env_names:
env_names_to_process.append(env_name)
else:
bad_env_names.append(env_name)
else:
# nothing specified. process all
env_names_to_process = valid_env_names
# bail if any bad env names
if bad_env_names:
if self._is_interactive:
print("\nUsage: %s\n" % (self._usage(),))
raise TankError(
"Error retrieving environments mathing supplied arguments: %s"
% (", ".join(bad_env_names),)
)
parameters["envs"] = sorted(env_names_to_process)
return parameters
def _usage(self):
"""Return a string displaying the usage of this command."""
return "./tank validate [env_name, env_name, ...] "
g_templates = set()
g_hooks = set()
def _validate_bundle(log, tk, name, settings, descriptor, engine_name=None):
"""Validate the supplied bundle including the descriptor and all settings.
:param log: A logger instance for logging validation output.
:param tk: A toolkit api instance.
:param name: The bundle's name.
:param settings: The bundle's settings dict.
:param descriptor: A descriptor object for the bundle.
:param engine_name: The name of the containing engine or None.
This is used when the bundle is an app and needs to validate engine-
specific settings.
"""
log.info("")
log.info("Validating %s..." % name)
if not descriptor.exists_local():
log.info("Please wait, downloading...")
descriptor.download_local()
# out of date check
latest_desc = descriptor.find_latest_version()
if descriptor.version != latest_desc.version:
log.info(
"WARNING: Latest version is %s. You are running %s." % (latest_desc.version, descriptor.version)
)
manifest = descriptor.configuration_schema
for s in settings.keys():
if s not in manifest.keys():
log.info(" WARNING - Parameter not needed: %s" % s)
for s in manifest.keys():
default = bundle.resolve_default_value(manifest[s], engine_name=engine_name)
if s in settings:
value = settings.get(s)
else:
value = default
try:
validation.validate_single_setting(name, tk, manifest, s, value)
except TankError as e:
log.info(" ERROR - Parameter %s - Invalid value: %s" % (s,e))
else:
# validation is ok
if default is None:
# no default value
# don't report this
pass
elif manifest[s].get("type") == "hook" and value == "default":
# don't display when default values are used.
pass
elif default == value:
pass
else:
log.info(" Parameter %s - OK [using non-default value]" % s)
log.info(" |---> Current: %s" % value)
log.info(" \---> Default: %s" % default)
# remember templates
if manifest[s].get("type") == "template":
g_templates.add(value)
if manifest[s].get("type") == "hook":
g_hooks.add(value)
def _process_environment(log, tk, env):
"""Process an environment by validating each of its bundles.
:param log: A logger instance for logging validation output.
:param tk: A toolkit api instance.
:param env: An environment instance.
"""
for e in env.get_engines():
s = env.get_engine_settings(e)
descriptor = env.get_engine_descriptor(e)
name = "Engine %s / %s" % (env.name, e)
_validate_bundle(log, tk, name, s, descriptor, engine_name=e)
for a in env.get_apps(e):
s = env.get_app_settings(e, a)
descriptor = env.get_app_descriptor(e, a)
name = "%s / %s / %s" % (env.name, e, a)
_validate_bundle(log, tk, name, s, descriptor, engine_name=e)
```
#### File: descriptor/io_descriptor/appstore.py
```python
import os
import urllib
import fnmatch
import urllib2
import httplib
from tank_vendor.shotgun_api3.lib import httplib2
import cPickle as pickle
from ...util import shotgun
from ...util import UnresolvableCoreConfigurationError, ShotgunAttachmentDownloadError
from ...util.user_settings import UserSettings
from ..descriptor import Descriptor
from ..errors import TankAppStoreConnectionError
from ..errors import TankAppStoreError
from ..errors import TankDescriptorError
from ..errors import InvalidAppStoreCredentialsError
from ... import LogManager
from .. import constants
from .downloadable import IODescriptorDownloadable
from ...constants import SUPPORT_EMAIL
# use api json to cover py 2.5
from tank_vendor import shotgun_api3
json = shotgun_api3.shotgun.json
log = LogManager.get_logger(__name__)
# file where we cache the app store metadata for an item
METADATA_FILE = ".cached_metadata.pickle"
class IODescriptorAppStore(IODescriptorDownloadable):
"""
Represents a toolkit app store item.
{type: app_store, name: tk-core, version: v12.3.4}
{type: app_store, name: NAME, version: VERSION}
"""
# cache app store connections for performance
_app_store_connections = {}
# internal app store mappings
(APP, FRAMEWORK, ENGINE, CONFIG, CORE) = range(5)
_APP_STORE_OBJECT = {
Descriptor.APP: constants.TANK_APP_ENTITY_TYPE,
Descriptor.FRAMEWORK: constants.TANK_FRAMEWORK_ENTITY_TYPE,
Descriptor.ENGINE: constants.TANK_ENGINE_ENTITY_TYPE,
Descriptor.CONFIG: constants.TANK_CONFIG_ENTITY_TYPE,
Descriptor.INSTALLED_CONFIG: None,
Descriptor.CORE: None,
}
_APP_STORE_VERSION = {
Descriptor.APP: constants.TANK_APP_VERSION_ENTITY_TYPE,
Descriptor.FRAMEWORK: constants.TANK_FRAMEWORK_VERSION_ENTITY_TYPE,
Descriptor.ENGINE: constants.TANK_ENGINE_VERSION_ENTITY_TYPE,
Descriptor.CONFIG: constants.TANK_CONFIG_VERSION_ENTITY_TYPE,
Descriptor.INSTALLED_CONFIG: None,
Descriptor.CORE: constants.TANK_CORE_VERSION_ENTITY_TYPE,
}
_APP_STORE_LINK = {
Descriptor.APP: "sg_tank_app",
Descriptor.FRAMEWORK: "sg_tank_framework",
Descriptor.ENGINE: "sg_tank_engine",
Descriptor.CONFIG: "sg_tank_config",
Descriptor.INSTALLED_CONFIG: None,
Descriptor.CORE: None,
}
_DOWNLOAD_STATS_EVENT_TYPE = {
Descriptor.APP: "TankAppStore_App_Download",
Descriptor.FRAMEWORK: "TankAppStore_Framework_Download",
Descriptor.ENGINE: "TankAppStore_Engine_Download",
Descriptor.CONFIG: "TankAppStore_Config_Download",
Descriptor.INSTALLED_CONFIG: None,
Descriptor.CORE: "TankAppStore_CoreApi_Download",
}
_VERSION_FIELDS_TO_CACHE = [
"id",
"code",
"sg_status_list",
"description",
"tags",
"sg_detailed_release_notes",
"sg_documentation",
constants.TANK_CODE_PAYLOAD_FIELD
]
_BUNDLE_FIELDS_TO_CACHE = [
"id",
"sg_system_name",
"sg_status_list",
"sg_deprecation_message"
]
def __init__(self, descriptor_dict, sg_connection, bundle_type):
"""
Constructor
:param descriptor_dict: descriptor dictionary describing the bundle
:param sg_connection: Shotgun connection to associated site
:param bundle_type: Either Descriptor.APP, CORE, ENGINE or FRAMEWORK or CONFIG
:return: Descriptor instance
"""
super(IODescriptorAppStore, self).__init__(descriptor_dict)
self._validate_descriptor(
descriptor_dict,
required=["type", "name", "version"],
optional=["label"]
)
self._sg_connection = sg_connection
self._type = bundle_type
self._name = descriptor_dict.get("name")
self._version = descriptor_dict.get("version")
self._label = descriptor_dict.get("label")
def __str__(self):
"""
Human readable representation
"""
display_name_lookup = {
Descriptor.APP: "App",
Descriptor.FRAMEWORK: "Framework",
Descriptor.ENGINE: "Engine",
Descriptor.CONFIG: "Config",
Descriptor.CORE: "Core",
}
# Toolkit App Store App tk-multi-loader2 v1.2.3
# Toolkit App Store Framework tk-framework-shotgunutils v1.2.3
# Toolkit App Store Core v1.2.3
if self._type == Descriptor.CORE:
display_name = "Toolkit App Store Core %s" % self._version
else:
display_name = display_name_lookup[self._type]
display_name = "Toolkit App Store %s %s %s" % (display_name, self._name, self._version)
if self._label:
display_name += " [label %s]" % self._label
return display_name
def __load_cached_app_store_metadata(self, path):
"""
Loads the metadata for a path in the app store
:param path: path to bundle location on disk
:return: metadata dictionary or None if not found
"""
cache_file = os.path.join(path, METADATA_FILE)
if os.path.exists(cache_file):
fp = open(cache_file, "rt")
try:
metadata = pickle.load(fp)
finally:
fp.close()
else:
log.debug(
"%r Could not find cached metadata file %s - "
"will proceed with empty app store metadata." % (self, cache_file)
)
metadata = {}
return metadata
@LogManager.log_timing
def __refresh_metadata(self, path, sg_bundle_data=None, sg_version_data=None):
"""
Refreshes the metadata cache on disk. The metadata cache contains
app store information such as deprecation status, label information
and release note data.
For performance, the metadata can be provided by the caller. If
not provided, the method will retrieve it from the app store.
If the descriptor resides in a read-only bundle cache, for example
baked into a DCC distribution, the cache will not be updated.
:param path: The path to the bundle where cache info should be written
:param sg_bundle_data, sg_version_data: Shotgun data to cache
:returns: A dictionary with keys 'sg_bundle_data' and 'sg_version_data',
containing Shotgun metadata.
"""
log.debug("Attempting to refresh app store metadata for %r" % self)
cache_file = os.path.join(path, METADATA_FILE)
log.debug("Will attempt to refresh cache in %s" % cache_file)
if sg_version_data: # no none-check for sg_bundle_data param since this is none for tk-core
log.debug("Will cache pre-fetched cache data.")
else:
log.debug("Connecting to Shotgun to retrieve metadata for %r" % self)
# get the appropriate shotgun app store types and fields
bundle_entity_type = self._APP_STORE_OBJECT[self._type]
version_entity_type = self._APP_STORE_VERSION[self._type]
link_field = self._APP_STORE_LINK[self._type]
# connect to the app store
(sg, _) = self.__create_sg_app_store_connection()
if self._type == self.CORE:
# special handling of core since it doesn't have a high-level 'bundle' entity
sg_bundle_data = None
sg_version_data = sg.find_one(
constants.TANK_CORE_VERSION_ENTITY_TYPE,
[["code", "is", self._version]],
self._VERSION_FIELDS_TO_CACHE
)
if sg_version_data is None:
raise TankDescriptorError(
"The App store does not have a version '%s' of Core!" % self._version
)
else:
# engines, apps etc have a 'bundle level entity' in the app store,
# e.g. something representing the app or engine.
# then a version entity representing a particular version
sg_bundle_data = sg.find_one(
bundle_entity_type,
[["sg_system_name", "is", self._name]],
self._BUNDLE_FIELDS_TO_CACHE
)
if sg_bundle_data is None:
raise TankDescriptorError(
"The App store does not contain an item named '%s'!" % self._name
)
# now get the version
sg_version_data = sg.find_one(
version_entity_type,
[
[link_field, "is", sg_bundle_data],
["code", "is", self._version]
],
self._VERSION_FIELDS_TO_CACHE
)
if sg_version_data is None:
raise TankDescriptorError(
"The App store does not have a "
"version '%s' of item '%s'!" % (self._version, self._name)
)
# create metadata
metadata = {
"sg_bundle_data": sg_bundle_data,
"sg_version_data": sg_version_data
}
# try to write to location - but it may be located in a
# readonly bundle cache - if the caching fails, gracefully
# fall back and log
try:
fp = open(cache_file, "wt")
try:
pickle.dump(metadata, fp)
log.debug("Wrote app store metadata cache '%s'" % cache_file)
finally:
fp.close()
except Exception as e:
log.debug("Did not update app store metadata cache '%s': %s" % (cache_file, e))
return metadata
def _get_bundle_cache_path(self, bundle_cache_root):
"""
Given a cache root, compute a cache path suitable
for this descriptor, using the 0.18+ path format.
:param bundle_cache_root: Bundle cache root path
:return: Path to bundle cache location
"""
return os.path.join(
bundle_cache_root,
"app_store",
self.get_system_name(),
self.get_version()
)
def _get_cache_paths(self):
"""
Get a list of resolved paths, starting with the primary and
continuing with alternative locations where it may reside.
Note: This method only computes paths and does not perform any I/O ops.
:return: List of path strings
"""
# get default cache paths from base class
paths = super(IODescriptorAppStore, self)._get_cache_paths()
# for compatibility with older versions of core, prior to v0.18.x,
# add the old-style bundle cache path as a fallback. As of v0.18.x,
# the bundle cache subdirectory names were shortened and otherwise
# modified to help prevent MAX_PATH issues on windows. This call adds
# the old path as a fallback for cases where core has been upgraded
# for an existing project. NOTE: This only works because the bundle
# cache root didn't change (when use_bundle_cache is set to False).
# If the bundle cache root changes across core versions, then this will
# need to be refactored.
legacy_folder = self._get_legacy_bundle_install_folder(
"app_store",
self._bundle_cache_root,
self._type,
self.get_system_name(),
self.get_version()
)
if legacy_folder:
paths.append(legacy_folder)
return paths
###############################################################################################
# data accessors
def get_system_name(self):
"""
Returns a short name, suitable for use in configuration files
and for folders on disk
"""
return self._name
def get_deprecation_status(self):
"""
Returns information about deprecation.
May download the item from the app store in order
to retrieve the metadata.
:returns: Returns a tuple (is_deprecated, message) to indicate
if this item is deprecated.
"""
# make sure we have the app payload + metadata
self.ensure_local()
# grab metadata
metadata = self.__load_cached_app_store_metadata(
self.get_path()
)
sg_bundle_data = metadata.get("sg_bundle_data") or {}
if sg_bundle_data.get("sg_status_list") == "dep":
msg = sg_bundle_data.get("sg_deprecation_message", "No reason given.")
return (True, msg)
else:
return (False, "")
def get_version(self):
"""
Returns the version number string for this item
"""
return self._version
def get_changelog(self):
"""
Returns information about the changelog for this item.
May download the item from the app store in order
to retrieve the metadata.
:returns: A tuple (changelog_summary, changelog_url). Values may be None
to indicate that no changelog exists.
"""
summary = None
url = None
# make sure we have the app payload + metadata
self.ensure_local()
# grab metadata
metadata = self.__load_cached_app_store_metadata(
self.get_path()
)
try:
sg_version_data = metadata.get("sg_version_data") or {}
summary = sg_version_data.get("description")
url = sg_version_data.get("sg_detailed_release_notes").get("url")
except Exception:
pass
return (summary, url)
def _download_local(self, destination_path):
"""
Retrieves this version to local repo.
:param destination_path: The directory to which the app store descriptor
is to be downloaded to.
"""
# connect to the app store
(sg, script_user) = self.__create_sg_app_store_connection()
# fetch metadata from sg...
metadata = self.__refresh_metadata(destination_path)
# now get the attachment info
version = metadata.get("sg_version_data")
# attachment field is on the following form in the case a file has been uploaded:
# {'name': 'v1.2.3.zip',
# 'url': 'https://sg-media-usor-01.s3.amazonaws.com/...',
# 'content_type': 'application/zip',
# 'type': 'Attachment',
# 'id': 139,
# 'link_type': 'upload'}
attachment_id = version[constants.TANK_CODE_PAYLOAD_FIELD]["id"]
# download and unzip
try:
shotgun.download_and_unpack_attachment(sg, attachment_id, destination_path)
except ShotgunAttachmentDownloadError as e:
raise TankAppStoreError(
"Failed to download %s. Error: %s" % (self, e)
)
def _post_download(self, download_path):
"""
Code run after the descriptor is successfully downloaded to disk
:param download_path: The path to which the descriptor is downloaded to.
"""
# write a stats record to the tank app store
try:
# connect to the app store
(sg, script_user) = self.__create_sg_app_store_connection()
# fetch metadata from sg...
metadata = self.__refresh_metadata(download_path)
# now get the attachment info
version = metadata.get("sg_version_data")
# setup the data entry
data = {}
data["description"] = "%s: %s %s was downloaded" % (
self._sg_connection.base_url,
self._name,
self._version
)
data["event_type"] = self._DOWNLOAD_STATS_EVENT_TYPE[self._type]
data["entity"] = version
data["user"] = script_user
data["project"] = constants.TANK_APP_STORE_DUMMY_PROJECT
data["attribute_name"] = constants.TANK_CODE_PAYLOAD_FIELD
# log the data to shotgun
sg.create("EventLogEntry", data)
except Exception as e:
log.warning("Could not write app store download receipt: %s" % e)
#############################################################################
# searching for other versions
def get_latest_cached_version(self, constraint_pattern=None):
"""
Returns a descriptor object that represents the latest version
that is locally available in the bundle cache search path.
:param constraint_pattern: If this is specified, the query will be constrained
by the given pattern. Version patterns are on the following forms:
- v0.1.2, v0.12.3.2, v0.1.3beta - a specific version
- v0.12.x - get the highest v0.12 version
- v1.x.x - get the highest v1 version
:returns: instance deriving from IODescriptorBase or None if not found
"""
log.debug("Looking for cached versions of %r..." % self)
all_versions = self._get_locally_cached_versions()
log.debug("Found %d versions" % len(all_versions))
if self._label:
# now filter the list of versions to only include things with
# the sought-after label
version_numbers = []
log.debug("culling out versions not labelled '%s'..." % self._label)
for (version_str, path) in all_versions.iteritems():
metadata = self.__load_cached_app_store_metadata(path)
try:
tags = [x["name"] for x in metadata["sg_version_data"]["tags"]]
if self.__match_label(tags):
version_numbers.append(version_str)
except Exception as e:
log.debug(
"Could not determine label metadata for %s. Ignoring. Details: %s" % (path, e)
)
else:
# no label based filtering. all versions are valid.
version_numbers = all_versions.keys()
if len(version_numbers) == 0:
return None
version_to_use = self._find_latest_tag_by_pattern(version_numbers, constraint_pattern)
if version_to_use is None:
return None
# make a descriptor dict
descriptor_dict = {
"type": "app_store",
"name": self._name,
"version": version_to_use
}
if self._label:
descriptor_dict["label"] = self._label
# and return a descriptor instance
desc = IODescriptorAppStore(descriptor_dict, self._sg_connection, self._type)
desc.set_cache_roots(self._bundle_cache_root, self._fallback_roots)
log.debug("Latest cached version resolved to %r" % desc)
return desc
@LogManager.log_timing
def get_latest_version(self, constraint_pattern=None):
"""
Returns a descriptor object that represents the latest version.
This method will connect to the toolkit app store and download
metadata to determine the latest version.
:param constraint_pattern: If this is specified, the query will be constrained
by the given pattern. Version patterns are on the following forms:
- v0.1.2, v0.12.3.2, v0.1.3beta - a specific version
- v0.12.x - get the highest v0.12 version
- v1.x.x - get the highest v1 version
:returns: IODescriptorAppStore object
"""
log.debug(
"Determining latest version for %r given constraint pattern %s" % (self, constraint_pattern)
)
# connect to the app store
(sg, _) = self.__create_sg_app_store_connection()
# get latest get the filter logic for what to exclude
if constants.APP_STORE_QA_MODE_ENV_VAR in os.environ:
sg_filter = [["sg_status_list", "is_not", "bad"]]
else:
sg_filter = [
["sg_status_list", "is_not", "rev"],
["sg_status_list", "is_not", "bad"]
]
if self._type != self.CORE:
# find the main entry
sg_bundle_data = sg.find_one(
self._APP_STORE_OBJECT[self._type],
[["sg_system_name", "is", self._name]],
self._BUNDLE_FIELDS_TO_CACHE
)
if sg_bundle_data is None:
raise TankDescriptorError("App store does not contain an item named '%s'!" % self._name)
# now get all versions
link_field = self._APP_STORE_LINK[self._type]
entity_type = self._APP_STORE_VERSION[self._type]
sg_filter += [[link_field, "is", sg_bundle_data]]
else:
# core doesn't have a parent entity for its versions
sg_bundle_data = None
entity_type = constants.TANK_CORE_VERSION_ENTITY_TYPE
# optimization: if there is no constraint pattern and no label
# set, just download the latest record
if self._label is None and constraint_pattern is None:
# only download one record
limit = 1
else:
limit = 0 # all records
# now get all versions
sg_versions = sg.find(
entity_type,
filters=sg_filter,
fields=self._VERSION_FIELDS_TO_CACHE,
order=[{"field_name": "created_at", "direction": "desc"}],
limit=limit
)
log.debug("Downloaded data for %d versions from Shotgun." % len(sg_versions))
# now filter out all labels that aren't matching
matching_records = []
for sg_version_entry in sg_versions:
tags = [x["name"] for x in sg_version_entry["tags"]]
if self.__match_label(tags):
matching_records.append(sg_version_entry)
log.debug("After applying label filters, %d records remain." % len(matching_records))
if len(matching_records) == 0:
raise TankDescriptorError("Cannot find any versions for %s in the App store!" % self)
# and filter out based on version constraint
if constraint_pattern:
version_numbers = [x.get("code") for x in matching_records]
version_to_use = self._find_latest_tag_by_pattern(version_numbers, constraint_pattern)
if version_to_use is None:
raise TankDescriptorError(
"'%s' does not have a version matching the pattern '%s'. "
"Available versions are: %s" % (
self.get_system_name(),
constraint_pattern,
", ".join(version_numbers)
)
)
# get the sg data for the given version
sg_data_for_version = [d for d in matching_records if d["code"] == version_to_use][0]
else:
# no constraints applied. Pick first (latest) match
sg_data_for_version = matching_records[0]
version_to_use = sg_data_for_version["code"]
# make a descriptor dict
descriptor_dict = {
"type": "app_store",
"name": self._name,
"version": version_to_use
}
if self._label:
descriptor_dict["label"] = self._label
# and return a descriptor instance
desc = IODescriptorAppStore(descriptor_dict, self._sg_connection, self._type)
desc.set_cache_roots(self._bundle_cache_root, self._fallback_roots)
# if this item exists locally, attempt to update the metadata cache
# this ensures that if labels are added in the app store, these
# are correctly cached locally.
cached_path = desc.get_path()
if cached_path:
desc.__refresh_metadata(cached_path, sg_bundle_data, sg_data_for_version)
return desc
def __match_label(self, tag_list):
"""
Given a list of tags, see if it matches the given label
Shotgun tags are glob style: *, 2017.*, 2018.2
:param tag_list: list of tags (strings) from shotgun
:return: True if matching false if not
"""
if self._label is None:
# no label set - all matching!
return True
if tag_list is None:
# no tags defined, so no match
return False
# glob match each item
for tag in tag_list:
if fnmatch.fnmatch(self._label, tag):
return True
return False
@LogManager.log_timing
def __create_sg_app_store_connection(self):
"""
Creates a shotgun connection that can be used to access the Toolkit app store.
:returns: (sg, dict) where the first item is the shotgun api instance and the second
is an sg entity dictionary (keys type/id) corresponding to to the user used
to connect to the app store.
"""
# maintain a cache for performance
# cache is keyed by client shotgun site
# this assumes that there is a strict
# 1:1 relationship between app store accounts
# and shotgun sites.
sg_url = self._sg_connection.base_url
if sg_url not in self._app_store_connections:
# Connect to associated Shotgun site and retrieve the credentials to use to
# connect to the app store site
try:
(script_name, script_key) = self.__get_app_store_key_from_shotgun()
except urllib2.HTTPError as e:
if e.code == 403:
# edge case alert!
# this is likely because our session token in shotgun has expired.
# The authentication system is based around wrapping the shotgun API,
# and requesting authentication if needed. Because the app store
# credentials is a separate endpoint and doesn't go via the shotgun
# API, we have to explicitly check.
#
# trigger a refresh of our session token by issuing a shotgun API call
self._sg_connection.find_one("HumanUser", [])
# and retry
(script_name, script_key) = self.__get_app_store_key_from_shotgun()
else:
raise
log.debug("Connecting to %s..." % constants.SGTK_APP_STORE)
# Connect to the app store and resolve the script user id we are connecting with.
# Set the timeout explicitly so we ensure the connection won't hang in cases where
# a response is not returned in a reasonable amount of time.
app_store_sg = shotgun_api3.Shotgun(
constants.SGTK_APP_STORE,
script_name=script_name,
api_key=script_key,
http_proxy=self.__get_app_store_proxy_setting(),
connect=False
)
# set the default timeout for app store connections
app_store_sg.config.timeout_secs = constants.SGTK_APP_STORE_CONN_TIMEOUT
# determine the script user running currently
# get the API script user ID from shotgun
try:
script_user = app_store_sg.find_one(
"ApiUser",
filters=[["firstname", "is", script_name]],
fields=["type", "id"]
)
except shotgun_api3.AuthenticationFault:
raise InvalidAppStoreCredentialsError(
"The Toolkit App Store credentials found in Shotgun are invalid.\n"
"Please contact %s to resolve this issue." % SUPPORT_EMAIL
)
# Connection errors can occur for a variety of reasons. For example, there is no
# internet access or there is a proxy server blocking access to the Toolkit app store.
except (httplib2.HttpLib2Error, httplib2.socks.HTTPError, httplib.HTTPException) as e:
raise TankAppStoreConnectionError(e)
# In cases where there is a firewall/proxy blocking access to the app store, sometimes
# the firewall will drop the connection instead of rejecting it. The API request will
# timeout which unfortunately results in a generic SSLError with only the message text
# to give us a clue why the request failed.
# The exception raised in this case is "ssl.SSLError: The read operation timed out"
except httplib2.ssl.SSLError as e:
if "timed" in e.message:
raise TankAppStoreConnectionError(
"Connection to %s timed out: %s" % (app_store_sg.config.server, e)
)
else:
# other type of ssl error
raise TankAppStoreError(e)
except Exception as e:
raise TankAppStoreError(e)
if script_user is None:
raise TankAppStoreError(
"Could not evaluate the current App Store User! Please contact support."
)
self._app_store_connections[sg_url] = (app_store_sg, script_user)
return self._app_store_connections[sg_url]
def __get_app_store_proxy_setting(self):
"""
Retrieve the app store proxy settings. If the key app_store_http_proxy is not found in the
``shotgun.yml`` file, the proxy settings from the client site connection will be used. If the
key is found, than its value will be used. Note that if the ``app_store_http_proxy`` setting
is set to ``null`` or an empty string in the configuration file, it means that the app store
proxy is being forced to ``None`` and therefore won't be inherited from the http proxy setting.
:returns: The http proxy connection string.
"""
try:
config_data = shotgun.get_associated_sg_config_data()
except UnresolvableCoreConfigurationError:
config_data = None
if config_data and constants.APP_STORE_HTTP_PROXY in config_data:
return config_data[constants.APP_STORE_HTTP_PROXY]
settings = UserSettings()
if settings.app_store_proxy is not None:
return settings.app_store_proxy
# Use the http proxy from the connection so we don't have to run
# the connection hook again or look up the system settings as they
# will have been previously looked up to create the connection to Shotgun.
return self._sg_connection.config.raw_http_proxy
@LogManager.log_timing
def __get_app_store_key_from_shotgun(self):
"""
Given a Shotgun url and script credentials, fetch the app store key
for this shotgun instance using a special controller method.
Returns a tuple with (app_store_script_name, app_store_auth_key)
:returns: tuple of strings with contents (script_name, script_key)
"""
sg = self._sg_connection
log.debug("Retrieving app store credentials from %s" % sg.base_url)
# handle proxy setup by pulling the proxy details from the main shotgun connection
if sg.config.proxy_handler:
opener = urllib2.build_opener(sg.config.proxy_handler)
urllib2.install_opener(opener)
# now connect to our site and use a special url to retrieve the app store script key
session_token = sg.get_session_token()
post_data = {"session_token": session_token}
response = urllib2.urlopen("%s/api3/sgtk_install_script" % sg.base_url, urllib.urlencode(post_data))
html = response.read()
data = json.loads(html)
if not data["script_name"] or not data["script_key"]:
raise InvalidAppStoreCredentialsError(
"Toolkit App Store credentials could not be retrieved from Shotgun.\n"
"Please contact %s to resolve this issue." % SUPPORT_EMAIL
)
log.debug("Retrieved app store credentials for account '%s'." % data["script_name"])
return data["script_name"], data["script_key"]
def has_remote_access(self):
"""
Probes if the current descriptor is able to handle
remote requests. If this method returns, true, operations
such as :meth:`download_local` and :meth:`get_latest_version`
can be expected to succeed.
:return: True if a remote is accessible, false if not.
"""
# check if we can connect to Shotgun
can_connect = True
try:
log.debug("%r: Probing if a connection to the App Store can be established..." % self)
# connect to the app store
(sg, _) = self.__create_sg_app_store_connection()
log.debug("...connection established: %s" % sg)
except Exception as e:
log.debug("...could not establish connection: %s" % e)
can_connect = False
return can_connect
```
#### File: descriptor/io_descriptor/downloadable.py
```python
import os
import uuid
from .base import IODescriptorBase
from ..errors import TankDescriptorError, TankError
from ...util import filesystem
from ... import LogManager
log = LogManager.get_logger(__name__)
class IODescriptorDownloadable(IODescriptorBase):
"""
Base class for descriptors that can be downloaded locally to a path on disk.
In order to create a Descriptor associated with data that can
be downloaded locally to disk, it is necessary to derive from this class.
By default, the AppStore, Git and Shotgun entity descriptors can be downloaded
to disk and hence are of type :class: `IODescriptorDownloadable`.
Descriptor data can be downloaded by invoking the :meth: `download_local` on instances
of such derived classes. These classes are also expected to implement the
:meth: `_download_local` and optionally, the :meth: `_post_download`.
A general implementation of such a Descriptor class will be of the form:
eg. class MyNewDownloadableDescriptor(IODescriptorDownloadable):
def _download_local(self, destination_path):
# .. code to download data to destination_path
def _post_download(self, download_path):
# .. code that will be executed post download.
"""
def download_local(self):
"""
Downloads the data represented by the descriptor into the primary bundle
cache path.
It does so in a two step process. First, by downloading it to
a temporary bundle cache path (typically in a 'tmp/<uuid>' directory
in the bundle cache path), then, by moving the data to the primary bundle
cache path for that descriptor. This helps to guard against multiple
processes attempting to download the same descriptor simultaneously.
"""
# Return if the descriptor exists locally.
if self.exists_local():
return
# cache into a temporary location
temporary_path = self._get_temporary_cache_path()
# move into primary location
target = self._get_primary_cache_path()
# ensure that the parent directory of the target is present.
# make sure we guard against multiple processes attempting to create it simultaneously.
try:
filesystem.ensure_folder_exists(os.path.dirname(target))
except Exception as e:
if not os.path.exists(os.path.dirname(target)):
log.error("Failed to create parent directory %s: %s" % (os.path.dirname(target), e))
raise TankDescriptorError("Failed to create parent directory %s: %s" % (os.path.dirname(target), e))
try:
# attempt to download the descriptor to the temporary path.
log.debug("Downloading %s to the temporary download path %s." % (self, temporary_path))
self._download_local(temporary_path)
except Exception as e:
# something went wrong during the download, remove the temporary files.
log.error("Failed to download into path %s: %s. Attempting to remove it."
% (temporary_path, e))
filesystem.safe_delete_folder(temporary_path)
raise TankDescriptorError("Failed to download into path %s: %s" % (temporary_path, e))
log.debug("Attempting to move descriptor %s from temporary path %s to target path %s." % (
self, temporary_path, target)
)
try:
# atomically rename the directory temporary_path to the target.
os.rename(temporary_path, target)
log.debug("Successfully moved the downloaded descriptor to target path: %s." % target)
except Exception as e:
# if the target path does not already exist, something else might have gone wrong.
if not os.path.exists(target):
log.error("Failed to move descriptor from the temporary path %s " % temporary_path +
" to the bundle cache %s: %s" % (target, e))
raise TankError("Failed to move descriptor from the temporary path %s " % temporary_path +
" to the bundle cache %s: %s" % (target, e))
else:
self._post_download(target)
finally:
if os.path.exists(temporary_path):
log.debug("Removing temporary path: %s" % temporary_path)
filesystem.safe_delete_folder(temporary_path)
def _get_temporary_cache_path(self):
"""
Returns a temporary download cache path for this descriptor.
"""
return os.path.join(self._bundle_cache_root, "tmp", uuid.uuid4().hex)
def _download_local(self, destination_path):
"""
Downloads the data identified by the descriptor to the destination_path.
:param destination_path: The path on disk to which the descriptor is to
be downloaded.
eg. If the `destination_path` is
/shared/bundle_cache/tmp/2f601ff3d85c43aa97d5811a308d99b3 for a git
tag descriptor, this method is expected to download data directly to
into the destination path. Thus the .git folder of the descriptor will have
a path of /shared/bundle_cache/tmp/2f601ff3d85c43aa97d5811a308d99b3/.git
"""
raise NotImplementedError
def _post_download(self, download_path):
"""
Method executed after a descriptor has been downloaded successfully.
:param download_path: The path on disk to which the descriptor has been
downloaded.
"""
pass
```
#### File: descriptor/io_descriptor/git.py
```python
import os
import uuid
import shutil
import tempfile
from .downloadable import IODescriptorDownloadable
from ... import LogManager
from ...util.process import subprocess_check_output, SubprocessCalledProcessError
from ..errors import TankError
from ...util import filesystem
log = LogManager.get_logger(__name__)
class TankGitError(TankError):
"""
Errors related to git communication
"""
pass
class IODescriptorGit(IODescriptorDownloadable):
"""
Base class for git descriptors.
Abstracts operations around repositories, since all git
descriptors have a repository associated (via the 'path'
parameter).
"""
def __init__(self, descriptor_dict):
"""
Constructor
:param descriptor_dict: descriptor dictionary describing the bundle
:return: Descriptor instance
"""
super(IODescriptorGit, self).__init__(descriptor_dict)
self._path = descriptor_dict.get("path")
# strip trailing slashes - this is so that when we build
# the name later (using os.basename) we construct it correctly.
if self._path.endswith("/") or self._path.endswith("\\"):
self._path = self._path[:-1]
# Note: the git command always uses forward slashes
self._sanitized_repo_path = self._path.replace(os.path.sep, "/")
@LogManager.log_timing
def _clone_then_execute_git_commands(self, target_path, commands):
"""
Clones the git repository into the given location and
executes the given list of git commands::
# this will clone the associated git repo into
# /tmp/foo and then execute the given commands
# in order in a shell environment
commands = [
"checkout -q my_feature_branch",
"reset -q --hard -q a6512356a"
]
self._clone_then_execute_git_commands("/tmp/foo", commands)
The initial clone operation happens via an `os.system` call, ensuring
that there is an initialized shell environment, allowing git
to potentially request shell based authentication for repositories
which require credentials.
The subsequent list of commands are intended to be executed on the
recently cloned repository and will the cwd will be set so that they
are executed in the directory scope of the newly cloned repository.
:param target_path: path to clone into
:param commands: list git commands to execute, e.g. ['checkout x']
:returns: stdout and stderr of the last command executed as a string
:raises: TankGitError on git failure
"""
# ensure *parent* folder exists
parent_folder = os.path.dirname(target_path)
filesystem.ensure_folder_exists(parent_folder)
# first probe to check that git exists in our PATH
log.debug("Checking that git exists and can be executed...")
try:
output = subprocess_check_output(["git", "--version"])
except:
raise TankGitError(
"Cannot execute the 'git' command. Please make sure that git is "
"installed on your system and that the git executable has been added to the PATH."
)
log.debug("Git installed: %s" % output)
# Note: git doesn't like paths in single quotes when running on
# windows - it also prefers to use forward slashes
log.debug("Git Cloning %r into %s" % (self, target_path))
cmd = "git clone -q \"%s\" \"%s\"" % (self._path, target_path)
# Note that we use os.system here to allow for git to pop up (in a terminal
# if necessary) authentication prompting. This DOES NOT seem to be possible
# with subprocess.
status = os.system(cmd)
if status != 0:
raise TankGitError(
"Error executing git operation. The git command '%s' "
"returned error code %s." % (cmd, status)
)
log.debug("Git clone into '%s' successful." % target_path)
# clone worked ok! Now execute git commands on this repo
cwd = os.getcwd()
output = None
try:
log.debug("Setting cwd to '%s'" % target_path)
os.chdir(target_path)
for command in commands:
full_command = "git %s" % command
log.debug("Executing '%s'" % full_command)
try:
output = subprocess_check_output(
full_command,
shell=True
)
# note: it seems on windows, the result is sometimes wrapped in single quotes.
output = output.strip().strip("'")
except SubprocessCalledProcessError as e:
raise TankGitError(
"Error executing git operation '%s': %s (Return code %s)" % (full_command, e.output, e.returncode)
)
log.debug("Execution successful. stderr/stdout: '%s'" % output)
finally:
log.debug("Restoring cwd (to '%s')" % cwd)
os.chdir(cwd)
# return the last returned stdout/stderr
return output
def _tmp_clone_then_execute_git_commands(self, commands):
"""
Clone into a temp location and executes the given
list of git commands.
For more details, see :meth:`_clone_then_execute_git_commands`.
:param commands: list git commands to execute, e.g. ['checkout x']
:returns: stdout and stderr of the last command executed as a string
"""
clone_tmp = os.path.join(tempfile.gettempdir(), "sgtk_clone_%s" % uuid.uuid4().hex)
filesystem.ensure_folder_exists(clone_tmp)
try:
return self._clone_then_execute_git_commands(clone_tmp, commands)
finally:
log.debug("Cleaning up temp location '%s'" % clone_tmp)
shutil.rmtree(clone_tmp, ignore_errors=True)
def get_system_name(self):
"""
Returns a short name, suitable for use in configuration files
and for folders on disk, e.g. 'tk-maya'
"""
bn = os.path.basename(self._path)
(name, ext) = os.path.splitext(bn)
return name
def has_remote_access(self):
"""
Probes if the current descriptor is able to handle
remote requests. If this method returns, true, operations
such as :meth:`download_local` and :meth:`get_latest_version`
can be expected to succeed.
:return: True if a remote is accessible, false if not.
"""
# check if we can clone the repo
can_connect = True
try:
log.debug("%r: Probing if a connection to git can be established..." % self)
# clone repo into temp folder
self._tmp_clone_then_execute_git_commands([])
log.debug("...connection established")
except Exception as e:
log.debug("...could not establish connection: %s" % e)
can_connect = False
return can_connect
def _copy(self, target_path):
"""
Copy the contents of the descriptor to an external location
Subclassed git implementation which includes .git folders
in the copy.
:param target_path: target path to copy the descriptor to.
"""
log.debug("Copying %r -> %s" % (self, target_path))
# make sure item exists locally
self.ensure_local()
# copy descriptor into target.
# the skip list contains .git folders by default, so pass in []
# to turn that restriction off. In the case of the git descriptor,
# we want to transfer this folder as well.
filesystem.copy_folder(
self.get_path(),
target_path,
skip_list=[]
)
```
#### File: python/tank/errors.py
```python
class TankError(Exception):
"""
Top level exception for all toolkit-core level runtime errors
"""
class TankUnreadableFileError(TankError):
"""
Exception that indicates that a required file can't be read from disk.
"""
class TankFileDoesNotExistError(TankUnreadableFileError):
"""
Exception that indicates that a required file does not exist.
"""
class TankNoDefaultValueError(TankError):
"""
Exception that can be raised when a default value is required but none is found.
"""
class TankHookMethodDoesNotExistError(TankError):
"""
Exception that indicates that a called method does not exist in the hook.
"""
class TankInvalidCoreLocationError(TankError):
"""
Exception that indicates the core location file contained an invalid path.
"""
class TankNotPipelineConfigurationError(TankError):
"""
Exception that indicates that a folder doesn't contain a pipeline configuration.
"""
class TankErrorProjectIsSetup(TankError):
"""
Exception that indicates that a project already has a toolkit name but no pipeline configuration.
"""
def __init__(self):
"""
Include error message
"""
super(TankErrorProjectIsSetup, self).__init__(
"You are trying to set up a project which has already been set up. "
"If you want to do this, make sure to set the force parameter."
)
class TankContextDeserializationError(TankError):
"""
Exception that indicates that something went wrong while deserializating a context.
"""
class TankMultipleMatchingTemplatesError(TankError):
"""
Exception that indicates that a path matches multiple templates.
"""
```
#### File: folder/folder_types/expression_tokens.py
```python
import os
from ...errors import TankError
class SymlinkToken(object):
"""
Represents a folder level in a symlink target.
"""
def __init__(self, name):
"""
Constructor.
:param name: name of the symlink
"""
self._name = name
def __repr__(self):
return "<SymlinkToken token '%s'>" % self._name
def resolve_token(self, folder_obj, sg_data):
"""
Returns a resolved value for this token.
:param folder_obj: :class:`Folder` to evaluate
:param sg_data: Shotgun data dictionary.
"""
if self._name.startswith("$"):
# strip the dollar sign
token = self._name[1:]
# check that the referenced token is matching one of the tokens which
# has a computed name part to represent the dynamically created folder name
# this computed_name field exists for all entity folders for example.
valid_tokens = [x for x in sg_data if (isinstance(sg_data[x], dict) and "computed_name" in sg_data[x])]
if token not in valid_tokens:
raise TankError("Cannot compute symlink target for %s: The reference token '%s' cannot be resolved. "
"Available tokens are %s." % (folder_obj, self._name, valid_tokens))
name_value = sg_data[token].get("computed_name")
return name_value
else:
# not an expression
return self._name
class CurrentStepExpressionToken(object):
"""
Represents the current step within a configuration
"""
def __init__(self, sg_task_step_link_field):
"""
Constructor.
:param sg_task_step_link_field: The shotgun field that links together a task and a step.
"""
self._sg_task_step_link_field = sg_task_step_link_field
def __repr__(self):
return "<CurrentStepId token. Task link field: %s>" % self._sg_task_step_link_field
def resolve_shotgun_data(self, shotgun_data):
"""
Given a shotgun data dictionary, return an appropriate value
for this expression.
Because the entire design is centered around "normal" entities,
the task data is preloaded prior to calling the folder recursion.
If there is a notion of a current task, this data is contained
in a current_task_data dictionary which contains information about
the current task and its connections (for example to a pipeline step).
"""
sg_task_data = shotgun_data.get("current_task_data")
if sg_task_data:
# we have information about the currently processed task
# now see if there is a link field to a step
if self._sg_task_step_link_field in sg_task_data:
# this is a link field linking the task to its associated step
# (a step does not necessarily need to be a pipeline step)
# now get the id for this target entity.
sg_task_shot_link_data = sg_task_data[self._sg_task_step_link_field]
if sg_task_shot_link_data:
# there is a link from task -> step present
return sg_task_shot_link_data["id"]
# if data is missing, return None to indicate this.
return None
class CurrentTaskExpressionToken(object):
"""
Represents the current task
"""
def __init__(self):
"""
Constructor.
"""
pass
def __repr__(self):
return "<CurrentTaskId token>"
def resolve_shotgun_data(self, shotgun_data):
"""
Given a shotgun data dictionary, return an appropriate value
for this expression.
Because the entire design is centered around "normal" entities,
the task data is preloaded prior to calling the folder recursion.
If there is a notion of a current task, this data is contained
in a current_task_data dictionary which contains information about
the current task and its connections (for example to a pipeline step).
"""
sg_task_data = shotgun_data.get("current_task_data")
if sg_task_data:
return sg_task_data.get("id")
else:
return None
class FilterExpressionToken(object):
"""
Represents a $token in a filter expression for entity nodes.
"""
@classmethod
def sg_data_key_for_folder_obj(cls, folder_obj):
"""
Returns the data key to be used with a particular folder object
For list nodes this is EntityType.fieldname
For sg nodes this is EntityType
This data key is used in the data dictionary that is preloaded
and passed around the folder resolve methods.
"""
# avoid cyclic imports
from .entity import Entity
from .listfield import ListField
from .static import Static
if isinstance(folder_obj, Entity):
# append a token to the filter with the entity TYPE of the sg node
sg_data_key = folder_obj.get_entity_type()
elif isinstance(folder_obj, ListField):
# append a token to the filter of the form Asset.sg_asset_type
sg_data_key = "%s.%s" % (folder_obj.get_entity_type(), folder_obj.get_field_name())
elif isinstance(folder_obj, Static):
# Static folders cannot be used with folder $expressions. This error
# is typically caused by a missing .yml file
raise TankError("Static folder objects (%s) cannot be used in dynamic folder "
"expressions using the \"$\" syntax. Perhaps you are missing "
"the %s.yml file in your schema?" % (folder_obj, os.path.basename(folder_obj._full_path)))
else:
raise TankError("The folder object %s cannot be used in folder $expressions" % folder_obj)
return sg_data_key
def __init__(self, expression, parent):
"""
Constructor
"""
self._expression = expression
if self._expression.startswith("$"):
self._expression = self._expression[1:]
# now find which node is being pointed at
referenced_node = self._resolve_ref_r(parent)
if referenced_node is None:
raise TankError("The configuration expression $%s could not be found in %s or in "
"any of its parents." % (self._expression, parent))
self._sg_data_key = self.sg_data_key_for_folder_obj(referenced_node)
# all the nodes we refer to have a concept of an entity type.
# store that too so that for later use
self._associated_entity_type = referenced_node.get_entity_type()
def __repr__(self):
return "<FilterExpression '%s' >" % self._expression
def _resolve_ref_r(self, folder_obj):
"""
Resolves a $ref_token to an object by going up the tree
until it finds a match. The token is compared against the
folder name of the configuration item.
"""
full_folder_path = folder_obj.get_path()
folder_name = os.path.basename(full_folder_path)
if folder_name == self._expression:
# match!
return folder_obj
parent = folder_obj.get_parent()
if parent is None:
return parent # end recursion!
# try parent
return self._resolve_ref_r(parent)
def get_entity_type(self):
"""
Returns the shotgun entity type for this link
"""
return self._associated_entity_type
def resolve_shotgun_data(self, shotgun_data):
"""
Given a shotgun data dictionary, return an appropriate value
for this expression.
"""
if self._sg_data_key not in shotgun_data:
raise TankError("Cannot resolve data key %s from "
"shotgun data bundle %s" % (self._sg_data_key, shotgun_data))
value = shotgun_data[self._sg_data_key]
return value
def get_sg_data_key(self):
"""
Returns the data key that is associated with this expression.
When passing around pre-fetched shotgun data for node population,
this is done as a dictionary. The sg data key indicates which
part of this dictionary is associated with a particular $reference token.
"""
return self._sg_data_key
```
#### File: python/tank/log.py
```python
import logging
from logging.handlers import RotatingFileHandler
import os
import sys
import time
import weakref
import uuid
from functools import wraps
from . import constants
class LogManager(object):
"""
Main interface for logging in Toolkit.
This class contains a series of methods to help standardize log output
and access. Since Toolkit uses the standard python logging interface,
you can manually configure and associate your logging if you like.
.. note:: This is a singleton class, so every time you instantiate it,
the same object is returned.
"""
# keeps track of the single instance of the class
__instance = None
class _SafeRotatingFileHandler(RotatingFileHandler):
"""
Provides all the functionality provided by Python's built-in RotatingFileHandler, but with a
failsafe when an I/O error happens when doing the rollover. In that case, the failure to
rename files will be ignored and the handler will keep writing to the current file. A message
will also be logged at the debug level so the user is aware that something really bad just
happened. Finally, the handler will not try to rollover in the future and the handler will keep
appending to the current log file.
"""
def __init__(self, filename, mode="a", maxBytes=0, backupCount=0, encoding=None):
"""
:param str filename: Name of of the log file.
:param str mode: Mode to open the file, should be "w" or "a". Defaults to "a"
:param int maxBytes: Maximum file size before rollover. By default, rollover never happens.
:param int backupCount: Number of backups to make. Defaults to 0.
:param encoding: Encoding to use when writing to the file. Defaults to None.
File will be opened by default.
"""
RotatingFileHandler.__init__(self, filename, mode, maxBytes, backupCount, encoding)
self._disable_rollover = False
def doRollover(self):
"""
Rename every backups so the current log can be promoted to backup number one.
The new log file is empty. If this process fails due to any I/O error, rollover is
deactivated for this handler and logs will be appended to the current log file indefinitely.
"""
temp_backup_name = "%s.%s" % (self.baseFilename, uuid.uuid4())
# We need to close the file before renaming it (windows!)
if self.stream:
self.stream.close()
self.stream = None
# Before doing the rollover, check if the first file will fail at all.
# If it does, then it is a good thing that we checked otherwise the last
# backup would have been blown away before encountering the error.
# Take the scenario where there's only one backup. This means that
# doRollover would first delete the backup (.1) file so it can make
# room for the main file to be renamed to .1. However, if the main file
# can't be renamed, we've effectively lost 50% of the logs we had, which
# is not cool. Since most of the time only the first file will be locked,
# we will try to rename it first. If that fails right away as expected,
# we don't try any rollover and append to the current log file.
# and raise the _disable_rollover flag.
try:
os.rename(self.baseFilename, temp_backup_name)
except:
# It failed, so we'll simply append from now on.
log.debug(
"Cannot rotate log file '%s'. Logging will continue to this file, "
"exceeding the specified maximum size", self.baseFilename, exc_info=True
)
self._handle_rename_failure("a", disable_rollover=True)
return
# Everything went well, so now simply move the log file back into place
# so doRollover can do its work.
try:
os.rename(temp_backup_name, self.baseFilename)
except:
# For some reason we couldn't move the backup in its place.
log.debug(
"Unexpected issue while rotating log file '%s'. Logging will continue to this file, "
"exceeding the specified maximum size", self.baseFilename, exc_info=True
)
# The main log file doesn't exist anymore, so create a new file.
# Don't disable the rollover, this has nothing to do with rollover
# failing.
self._handle_rename_failure("w")
return
# Python 2.6 expects the file to be opened during rollover.
if not self.stream and sys.version_info[:2] < (2, 7):
self.mode = "a"
self.stream = self._open()
# Now, that we are back in the original state we were in,
# were pretty confident that the rollover will work. However, due to
# any number of reasons it could still fail. If it does, simply
# disable rollover and append to the current log.
try:
RotatingFileHandler.doRollover(self)
except:
# Something probably failed trying to rollover the backups,
# since the code above proved that in theory the main log file
# should be renamable. In any case, we didn't succeed in renaming,
# so disable rollover and reopen the main log file in append mode.
log.debug(
"Cannot rotate log file '%s'. Logging will continue to this file, "
"exceeding the specified maximum size", self.baseFilename, exc_info=True
)
self._handle_rename_failure("a", disable_rollover=True)
def _handle_rename_failure(self, mode, disable_rollover=False):
"""
Reopen the log file in the specific mode and optionally disable
future rollover operations.
:param str mode: Mode in which to reopen the main log file.
:param bool disable_rollover: If True, rollover won't be possible in the
future. Defaults to False.
"""
# Keep track that the rollover failed.
self._disable_rollover = disable_rollover
# If the file has been closed, reopen it in append mode.
if not self.stream:
self.mode = mode
self.stream = self._open()
def shouldRollover(self, record):
"""
Return if the log files should rollover.
If a rollover operation failed in the past this method will always return False.
:param logging.Record record: record that is about to be written to the logs.
:returns: True if rollover should happen, False otherwise.
:rtype: bool
"""
return not self._disable_rollover and RotatingFileHandler.shouldRollover(self, record)
def __new__(cls, *args, **kwargs):
#
# note - this init isn't currently threadsafe.
#
# create the instance if it hasn't been created already
if not cls.__instance:
# remember the instance so that no more are created
instance = super(LogManager, cls).__new__(
cls,
*args,
**kwargs
)
# a global and standard rotating log file handler
# for writing generic toolkit logs to disk
instance._std_file_handler = None
instance._std_file_handler_log_file = None
# collection of weak references to handlers
# that were created via the log manager.
instance._handlers = []
# the root logger, created at code init
instance._root_logger = logging.getLogger(constants.ROOT_LOGGER_NAME)
# check the TK_DEBUG flag at startup
# this controls the "global debug" state
# in the log manager
if constants.DEBUG_LOGGING_ENV_VAR in os.environ:
log.debug(
"%s environment variable detected. Enabling debug logging." % constants.DEBUG_LOGGING_ENV_VAR
)
instance._global_debug = True
else:
instance._global_debug = False
cls.__instance = instance
return cls.__instance
@staticmethod
def get_logger(log_name):
"""
Generates standard logger objects for Toolkit.
If you want to add standard toolkit logging to your code,
the easiest way is to include the following at the top of
your python file::
import sgtk
logger = sgtk.LogManager.get_logger(__name__)
This will pick up the module hierarchy of your code and
parent it under the standard Toolkit logger.
.. note:: This method is useful if you are writing scripts, tools or wrappers.
If you are developing a Toolkit app, framework or engine,
you typically want to use :meth:`sgtk.platform.get_logger`
for your logging.
.. note:: To output logging to screen or to a console,
we recommend using the :meth:`initialize_custom_handler`
convenience method.
:param log_name: Name of logger to create. This name will be parented under
the sgtk namespace. If the name begins with ``tank.``, it will
be automatically replaced with ``sgtk.``.
:returns: Standard python logger.
"""
if log_name.startswith("tank."):
# old style import of core
#
# this will be parented under sgtk.core.xxx
#
log_name = "%s.core.%s" % (constants.ROOT_LOGGER_NAME, log_name[5:])
elif log_name.startswith("sgtk."):
# new style import of core
#
# this will be parented under sgtk.core.xxx
#
log_name = "%s.core.%s" % (constants.ROOT_LOGGER_NAME, log_name[5:])
elif log_name.startswith("env."):
# engine logging
#
# this will be parented under sgtk.env.xxx
# for example sgtk.env.asset.tk-maya
#
log_name = "%s.%s" % (constants.ROOT_LOGGER_NAME, log_name)
else:
# some external script or tool
log_name = "%s.ext.%s" % (constants.ROOT_LOGGER_NAME, log_name)
return logging.getLogger(log_name)
@staticmethod
def log_timing(func):
"""
Decorator that times and logs the execution of a method.
Sometimes it is useful to log runtime statistics about
how long time a certain method takes to execute. In the
case of Toolkit, it is particularly helpful when debugging
issues to do with I/O or cloud connectivity.
If you have a method that for example connects to Shotgun to
retrieve data, you can decorate it::
@sgtk.LogManager.log_timing
def my_shotgun_publish_method():
'''
Publishes lots of files to Shotgun
'''
# shotgun code here
In the debug logs, timings will be written to the
``sgtk.stopwatch`` logger::
[DEBUG sgtk.stopwatch.module] my_shotgun_publish_method: 0.633s
"""
@wraps(func)
def wrapper(*args, **kwargs):
time_before = time.time()
try:
response = func(*args, **kwargs)
finally:
time_spent = time.time() - time_before
# log to special timing logger
timing_logger = logging.getLogger(
"%s.%s" % (constants.PROFILING_LOG_CHANNEL, func.__module__)
)
timing_logger.debug(
"%s: %fs" % (func.__name__, time_spent)
)
return response
return wrapper
def _set_global_debug(self, state):
"""
Sets the state of the global debug in toolkit.
"""
self._global_debug = state
if self._global_debug:
new_log_level = logging.DEBUG
else:
log.debug("Disabling debug logging.")
new_log_level = logging.INFO
# process handlers
for handler_weak_ref in self._handlers:
handler = handler_weak_ref()
if handler:
handler.setLevel(new_log_level)
# process backdoor logger
if self.base_file_handler:
self.base_file_handler.setLevel(new_log_level)
# log notifications
if self._global_debug:
log.debug(
"Debug logging enabled. To permanently enable it, "
"set the %s environment variable." % constants.DEBUG_LOGGING_ENV_VAR
)
def _get_global_debug(self):
"""
Controls the global debug flag in toolkit. Toggling this
flag will affect all log handlers that have been created
via :meth:`initialize_custom_handler`.
.. note:: Debug logging is off by default.
If you want to permanently enable debug logging,
set the environment variable ``TK_DEBUG``.
"""
return self._global_debug
global_debug = property(_get_global_debug, _set_global_debug)
@property
def log_folder(self):
"""
The folder where log files generated by :meth:`initialize_base_file_handler` are stored.
"""
# avoid cyclic references
from .util import LocalFileStorageManager
return LocalFileStorageManager.get_global_root(LocalFileStorageManager.LOGGING)
@property
def root_logger(self):
"""
Returns the root logger for Toolkit.
.. note:: If you want to add a custom logging handler to the root logger,
we recommend using the :meth:`initialize_custom_handler` method.
.. warning:: The root logger logs down to a debug resolution by default.
Do not change the output level of logger as this will have
a global effect. If you are connecting a logging handler
and want to limit the stream of messages that are being
emitted, instead adjust the logging level of the handler.
:return: log object
"""
return self._root_logger
@property
def base_file_handler(self):
"""
The base file handler that is used to write log files to disk
in a default location, or None if not defined.
"""
return self._std_file_handler
def initialize_custom_handler(self, handler=None):
"""
Convenience method that initializes a log handler
and attaches it to the toolkit logging root.
.. note:: If you want to display log messages inside a DCC,
implement :meth:`~sgtk.platform.Engine._emit_log_message`.
.. note:: If :meth:`global_debug` is set to True, the handler created
will be set to debug level, otherwise it will be set to info level.
Furthermore, the log handler will automatically adjust its log
level whenever the global debug flag changes its state.
Calling this without parameters will generate a standard
stream based logging handler that logs to stderr::
# start logging to stderr
import sgtk.LogManager
LogManager().initialize_custom_handler()
If you want to log to a file instead, create a log handler
and pass that to the method::
handler = logging.FileHandler("/tmp/toolkit.log)
LogManager().initialize_custom_handler(handler)
The log handler will be configured to output its messages
in a standard fashion.
:param handler: Logging handler to connect with the toolkit logger.
If not passed, a standard stream handler will be created.
:return: The configured log handler.
"""
if handler is None:
handler = logging.StreamHandler()
# example: [DEBUG tank.log] message message
formatter = logging.Formatter(
"[%(levelname)s %(name)s] %(message)s"
)
handler.setFormatter(formatter)
self._root_logger.addHandler(handler)
if self.global_debug:
handler.setLevel(logging.DEBUG)
else:
handler.setLevel(logging.INFO)
# add it to our list of handlers, but as a
# weak reference so that it can be destroyed
# elsewhere (e.g. at engine shutdown)
self._handlers.append(weakref.ref(handler))
return handler
def uninitialize_base_file_handler(self):
"""
Uninitialize base file handler created with :meth:`initialize_base_file_handler`.
:returns: The path to the previous log file that is being switched away from,
None if no base logger was previously active.
"""
if self._std_file_handler is None:
return None
base_log_file = self._std_file_handler_log_file
# there is a log handler, so terminate it
log.debug(
"Tearing down existing log handler '%s' (%s)" % (base_log_file, self._std_file_handler)
)
self._root_logger.removeHandler(self._std_file_handler)
self._std_file_handler = None
self._std_file_handler_log_file = None
# return the previous base log file path.
return base_log_file
def initialize_base_file_handler(self, log_name):
"""
Create a file handler and attach it to the stgk base logger.
This will write a rotating log file to disk in a standard
location and will capture all log messages passed through
the log hierarchy.
.. note:: Files will be written into the location on disk
defined by :meth:`log_folder`.
When you start an engine via the :meth:`sgtk.platform.start_engine` method,
a file handler will automatically be created if one doesn't already exist.
If you are manually launching toolkit, we recommend that you call
this method to initialize logging to file early on in your setup.
Calling it multiple times will not result in the information being
written to multiple different files - only one file logger can
exist per session.
:param log_name: Name of logger to create. This will form the
filename of the log file. The ``.log`` will be suffixed.
:returns: The path to the previous log file that is being switched away from,
None if no base logger was previously active.
"""
# avoid cyclic references
from .util import filesystem
return self.initialize_base_file_handler_from_path(
os.path.join(
self.log_folder,
"%s.log" % filesystem.create_valid_filename(log_name)
)
)
def initialize_base_file_handler_from_path(self, log_file):
"""
Create a file handler and attach it to the sgtk base logger.
This method is there for legacy Toolkit applications and shouldn't be used. Use
``initialize_base_file_handler`` instead.
:param log_file: Path of the file to write the logs to.
:returns: The path to the previous log file that is being switched away from,
None if no base logger was previously active.
"""
# shut down any previous logger
previous_log_file = self.uninitialize_base_file_handler()
log_folder, log_file_name = os.path.split(log_file)
log_name, _ = os.path.splitext(log_file_name)
log.debug("Switching file based std logger from '%s' to '%s'.", previous_log_file, log_file)
# store new log name
self._std_file_handler_log_file = log_file
# avoid cyclic references
from .util import filesystem
# set up logging root folder
filesystem.ensure_folder_exists(log_folder)
# create a rotating log file with a max size of 5 megs -
# this should make all log files easily attachable to support tickets.
# Python 2.5s implementation is way different that 2.6 and 2.7 and as such we can't
# as easily support it for safe rotation.
if sys.version_info[:2] > (2, 5):
handler_factory = self._SafeRotatingFileHandler
else:
handler_factory = RotatingFileHandler
self._std_file_handler = handler_factory(
log_file,
maxBytes=1024 * 1024 * 5, # 5 MiB
backupCount=1 # Need at least one backup in order to rotate
)
# set the level based on global debug flag
if self.global_debug:
self._std_file_handler.setLevel(logging.DEBUG)
else:
self._std_file_handler.setLevel(logging.INFO)
# Set up formatter. Example:
# 2016-04-25 08:56:12,413 [44862 DEBUG tank.log] message message
formatter = logging.Formatter(
"%(asctime)s [%(process)d %(levelname)s %(name)s] %(message)s"
)
self._std_file_handler.setFormatter(formatter)
self._root_logger.addHandler(self._std_file_handler)
# log the fact that we set up the log file :)
log.debug("Writing to standard log file %s" % log_file)
# return previous log name
return previous_log_file
# the logger for logging messages from this file :)
log = LogManager.get_logger(__name__)
# initialize toolkit logging
#
# retrieve top most logger in the sgtk hierarchy
sgtk_root_logger = logging.getLogger(constants.ROOT_LOGGER_NAME)
# 'cap it' so that log messages don't propagate
# further upwards in the hierarchy. This is to avoid
# log message spilling over into other loggers; if you
# want to receive toolkit log messages, you have to
# explicitly attach a log handler to the sgtk top level
# logger (or any of its child loggers).
sgtk_root_logger.propagate = False
# The top level logger object has its message throughput
# level set to DEBUG by default.
# this should not be changed, but any filtering
# should happen via log handlers
sgtk_root_logger.setLevel(logging.DEBUG)
#
# create a 'nop' log handler to be attached.
# this is to avoid warnings being reported that
# logging is missing.
#
class NullHandler(logging.Handler):
def emit(self, record):
pass
# and add it to the logger
sgtk_root_logger.addHandler(NullHandler())
```
#### File: python/tank/pipelineconfig.py
```python
import os
import glob
import cPickle as pickle
from tank_vendor import yaml
from .errors import TankError, TankUnreadableFileError
from .util.version import is_version_older
from . import constants
from .platform.environment import InstalledEnvironment, WritableEnvironment
from .util import shotgun, yaml_cache
from .util import ShotgunPath
from . import hook
from . import pipelineconfig_utils
from . import template_includes
from . import LogManager
from .descriptor import Descriptor, create_descriptor, descriptor_uri_to_dict
log = LogManager.get_logger(__name__)
class PipelineConfiguration(object):
"""
Represents a pipeline configuration in Tank.
Use the factory methods in pipelineconfig_factory
to construct this object, do not create directly via the constructor.
"""
def __init__(self, pipeline_configuration_path, descriptor=None):
"""
Constructor. Do not call this directly, use the factory methods
in pipelineconfig_factory.
NOTE ABOUT SYMLINKS!
The pipeline_configuration_path is always populated by the paths
that were registered in shotgun, regardless of how the symlink setup
is handled on the OS level.
:param str pipeline_configuration_path: Path to the pipeline configuration on disk.
:param descriptor: Descriptor that was used to create this pipeline configuration.
Defaults to ``None`` for backwards compatibility with Bootstrapper that only
pass down one argument. Also this argument was passed down by cores from
v0.18.72 to 0.18.94. The descriptor is now read from the disk inside
pipeline_configuration.yml.
:type descriptor: :class:`sgtk.descriptor.ConfigDescriptor`
"""
self._pc_root = pipeline_configuration_path
# validate that the current code version matches or is compatible with
# the code that is locally stored in this config!!!!
our_associated_api_version = self.get_associated_core_version()
# and get the version of the API currently in memory
current_api_version = pipelineconfig_utils.get_currently_running_api_version()
if our_associated_api_version not in [None, "unknown", "HEAD"] and \
is_version_older(current_api_version, our_associated_api_version):
# currently running API is too old!
current_api_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
# tell the user that their core is too old for this config
#
# this can happen if you are running a configuration but you are getting the core
# API from somewhere else. For example, if you have added a core to your pythonpath
# and then try to do sgtk_from_path("/path/to/pipeline/config") and that config
# is using a more recent version of the core.
raise TankError("You are running Toolkit %s located in '%s'. The configuration you are "
"trying to use needs core version %s or higher. To fix this, "
"use the tank command (or Toolkit core API) located at '%s' "
"which is associated with this configuration." % (current_api_version,
current_api_path,
our_associated_api_version,
self.get_install_location()))
self._roots = pipelineconfig_utils.get_roots_metadata(self._pc_root)
# get the project tank disk name (Project.tank_name),
# stored in the pipeline config metadata file.
pipeline_config_metadata = self._get_metadata()
self._project_name = pipeline_config_metadata.get("project_name")
self._project_id = pipeline_config_metadata.get("project_id")
self._pc_id = pipeline_config_metadata.get("pc_id")
self._plugin_id = pipeline_config_metadata.get("plugin_id")
self._pc_name = pipeline_config_metadata.get("pc_name")
self._published_file_entity_type = pipeline_config_metadata.get(
"published_file_entity_type",
"PublishedFile"
)
self._use_shotgun_path_cache = pipeline_config_metadata.get(
"use_shotgun_path_cache",
False
)
# figure out whether to use the bundle cache or the
# local pipeline configuration 'install' cache
if pipeline_config_metadata.get("use_bundle_cache"):
# use bundle cache
self._bundle_cache_root_override = None
else:
# use cache relative to core install
self._bundle_cache_root_override = os.path.join(self.get_install_location(), "install")
if pipeline_config_metadata.get("bundle_cache_fallback_roots"):
self._bundle_cache_fallback_paths = pipeline_config_metadata.get("bundle_cache_fallback_roots")
else:
self._bundle_cache_fallback_paths = []
# There are four ways this initializer can be invoked.
#
# 1) Classic: We're instantiated from sgtk_from_path with a single path.
# 2) Bootstrap: path is set, descriptor is unset and no descriptor inside
# pipeline_configuration.yml
# 3) Bootstrap: path is set, descriptor is set and no descriptor inside
# pipeline_configuration.yml
# 4) Bootstrap, path is set, descriptor is set and descriptor inside
# pipeline_configuration.yml
#
# The correct way to handle all of this is to go from a descriptor string or dictionary and
# instantiate the correct descriptor type.
#
# Note that since the boostapper can't tell if the pipeline configuration is going to use
# the file to read the descriptor or not, it is always going to pass down the descriptor in
# the arguments. We can however ignore that argument in favor of the descriptor on disk.
descriptor_dict = pipeline_config_metadata.get("source_descriptor")
# We'll first assume the pipeline configuration is not installed.
is_installed = False
# If there is a descriptor in the file (4), we know we're not installed and we're done!
if descriptor_dict:
# The bootstrapper wrote the descriptor in the pipeline_configuration.yml file, nothing
# more needs to be done.
pass
# If there's nothing in the file, but we're being passed down something by the bootstrapper,
# we should use it! (3)
elif descriptor:
# Up to 0.18.94, we could be passed in a descriptor pointing to what we now consider to
# be an Descriptor.INSTALLED_CONFIG, but the API back then didn't make the distinction
# and called it a Descriptor.CONFIG.
# We will test to see if the path referred to by the descriptor is the same as the
# current os path. If it is the same then the descriptor is an installed descriptor. If
# it isn't then it must be pointing to something inside the bundle cache, which means it
# isn't installed.
if self._pc_root == descriptor.get_path():
is_installed = True
descriptor_dict = descriptor.get_dict()
# Now we only have a path set. (1&2). We can't assume anything, but since all pipeline
# configurations, cached or installed, have the same layout on disk, we'll assume that we're
# in an installed one. Also, since installed configurations are a bit more lenient about
# things like info.yml, its a great fit since there are definitely installed configurations
# in the wild without an info.yml in their config folder.
else:
is_installed = True
descriptor_dict = {
"type": "path",
"path": self._pc_root
}
descriptor = create_descriptor(
shotgun.get_deferred_sg_connection(),
Descriptor.INSTALLED_CONFIG if is_installed else Descriptor.CONFIG,
descriptor_dict,
self._bundle_cache_root_override,
self._bundle_cache_fallback_paths
)
self._descriptor = descriptor
#
# Now handle the case of a baked and immutable configuration.
#
# In this case, Toolkit is always started via the bootstrap manager.
# A baked config means that the configuration isn't entirely determined
# from what is written into the pipeline configuration yaml file but that
# certain values, such as the project id, are specified at runtime.
#
# Such values are determined by the bootstrap process and passed via an
# environment variable which is probed and unpacked below.
#
if constants.ENV_VAR_EXTERNAL_PIPELINE_CONFIG_DATA in os.environ:
try:
external_data = pickle.loads(os.environ[constants.ENV_VAR_EXTERNAL_PIPELINE_CONFIG_DATA])
except Exception as e:
log.warning("Could not load external config data from: %s" % e)
if "project_id" in external_data:
self._project_id = external_data["project_id"]
log.debug("%s: Setting project id to %s from external config data" % (self, self._project_id))
if "project_name" in external_data:
self._project_name = external_data["project_name"]
log.debug("%s: Setting project name to %s from external config data" % (self, self._project_name))
if "pipeline_config_id" in external_data:
self._pc_id = external_data["pipeline_config_id"]
log.debug("%s: Setting pipeline config id to %s from external config data" % (self, self._pc_id))
if "pipeline_config_name" in external_data:
self._pc_name = external_data["pipeline_config_name"]
log.debug("%s: Setting pipeline config name to %s from external config data" % (self, self._pc_name))
if "bundle_cache_paths" in external_data:
self._bundle_cache_fallback_paths = external_data["bundle_cache_paths"]
log.debug(
"%s: Setting bundle cache fallbacks to %s from external config data" % (self, self._bundle_cache_fallback_paths)
)
# Populate the global yaml_cache if we find a pickled cache on disk.
# TODO: For immutable configs, move this into bootstrap
self._populate_yaml_cache()
# run init hook
self.execute_core_hook_internal(constants.PIPELINE_CONFIGURATION_INIT_HOOK_NAME, parent=self)
def __repr__(self):
return "<Sgtk Configuration %s>" % self._pc_root
########################################################################################
# handling pipeline config metadata
def _get_metadata(self):
"""
Loads the pipeline config metadata (the pipeline_configuration.yml) file from disk.
:param pipeline_config_path: path to a pipeline configuration root folder
:returns: deserialized content of the file in the form of a dict.
"""
# now read in the pipeline_configuration.yml file
cfg_yml = os.path.join(
self.get_config_location(),
"core",
constants.PIPELINECONFIG_FILE
)
if not os.path.exists(cfg_yml):
raise TankError("Configuration metadata file '%s' missing! "
"Please contact support." % cfg_yml)
fh = open(cfg_yml, "rt")
try:
data = yaml.load(fh)
if data is None:
raise Exception("File contains no data!")
except Exception as e:
raise TankError("Looks like a config file is corrupt. Please contact "
"support! File: '%s' Error: %s" % (cfg_yml, e))
finally:
fh.close()
return data
def _update_metadata(self, updates):
"""
Updates the pipeline configuration on disk with the passed in values.
:param updates: Dictionary of values to update in the pipeline configuration
"""
# get current settings
curr_settings = self._get_metadata()
# apply updates to existing cache
curr_settings.update(updates)
# write the record to disk
pipe_config_sg_id_path = os.path.join(
self.get_config_location(),
"core",
constants.PIPELINECONFIG_FILE
)
old_umask = os.umask(0)
try:
os.chmod(pipe_config_sg_id_path, 0o666)
# and write the new file
fh = open(pipe_config_sg_id_path, "wt")
# using safe_dump instead of dump ensures that we
# don't serialize any non-std yaml content. In particular,
# this causes issues if a unicode object containing a 7-bit
# ascii string is passed as part of the data. in this case,
# dump will write out a special format which is later on
# *loaded in* as a unicode object, even if the content doesn't
# need unicode handling. And this causes issues down the line
# in toolkit code, assuming strings:
#
# >>> yaml.dump({"foo": u"bar"})
# "{foo: !!python/unicode 'bar'}\n"
# >>> yaml.safe_dump({"foo": u"bar"})
# '{foo: bar}\n'
#
yaml.safe_dump(curr_settings, fh)
except Exception as exp:
raise TankError("Could not write to configuration file '%s'. "
"Error reported: %s" % (pipe_config_sg_id_path, exp))
finally:
fh.close()
os.umask(old_umask)
self._project_id = curr_settings.get("project_id")
self._pc_id = curr_settings.get("pc_id")
self._pc_name = curr_settings.get("pc_name")
def _populate_yaml_cache(self):
"""
Loads pickled yaml_cache items if they are found and merges them into
the global YamlCache.
"""
cache_file = os.path.join(self._pc_root, "yaml_cache.pickle")
if not os.path.exists(cache_file):
return
try:
fh = open(cache_file, 'rb')
except Exception as e:
log.warning("Could not read yaml cache %s: %s" % (cache_file, e))
return
try:
cache_items = pickle.load(fh)
yaml_cache.g_yaml_cache.merge_cache_items(cache_items)
except Exception as e:
log.warning("Could not merge yaml cache %s: %s" % (cache_file, e))
finally:
fh.close()
log.debug("Read %s items from yaml cache %s" % (len(cache_items), cache_file))
########################################################################################
# general access and properties
def get_path(self):
"""
Returns the master root for this pipeline configuration
"""
return self._pc_root
def get_bundle_cache_fallback_paths(self):
"""
Returns the list of bundle cache fallback location for this pipeline configuration.
"""
return self._bundle_cache_fallback_paths
def get_all_os_paths(self):
"""
Returns the path to this config for all operating systems,
as defined in the install_locations file.
:returns: ShotgunPath
"""
return pipelineconfig_utils.resolve_all_os_paths_to_config(self._pc_root)
def get_name(self):
"""
Returns the name of this PC.
"""
return self._pc_name
def is_auto_path(self):
"""
Returns true if this config was set up with auto path mode.
This method will connect to shotgun in order to determine the
auto path status.
January 2016:
DEPRECATED - DO NOT USE! At some stage this will be removed.
:returns: boolean indicating auto path state
"""
if self.is_unmanaged():
# unmanaged configs introduced in core 0.18 means that
# pipeline configurations now may not even have a
# pipeline configuration entity in shotgun at all. This means
# that the configuration is tracking a particular version of a
# config directly, without any config settings anywhere.
#
return False
sg = shotgun.get_sg_connection()
data = sg.find_one(constants.PIPELINE_CONFIGURATION_ENTITY,
[["id", "is", self.get_shotgun_id()]],
["linux_path", "windows_path", "mac_path"])
if data is None:
raise TankError("Cannot find a Pipeline configuration in Shotgun "
"that has id %s." % self.get_shotgun_id())
def _is_empty(d):
"""
Returns true if value is "" or None, False otherwise
"""
if d is None or d == "":
return True
else:
return False
if _is_empty(data.get("linux_path")) and \
_is_empty(data.get("windows_path")) and \
_is_empty(data.get("mac_path")):
# all three pipeline config fields are empty.
# This means that we are running an auto path config
return True
else:
return False
def is_unmanaged(self):
"""
Returns true if the configuration is unmanaged, e.g. it does not have a
corresponding pipeline configuration in Shotgun.
:return: boolean indicating if config is unmanaged
"""
return self.get_shotgun_id() is None
def is_localized(self):
"""
Returns true if this pipeline configuration has its own Core
:returns: boolean indicating if config is localized
"""
return pipelineconfig_utils.is_localized(self._pc_root)
def get_shotgun_id(self):
"""
Returns the shotgun id for this PC.
"""
return self._pc_id
def get_plugin_id(self):
"""
Returns the plugin id for this PC.
For more information, see :meth:`~sgtk.bootstrap.ToolkitManager.plugin_id`.
"""
return self._plugin_id
def get_project_id(self):
"""
Returns the shotgun id for the project associated with this PC.
Can return None if the pipeline config represents the site and not a project.
"""
return self._project_id
def is_site_configuration(self):
"""
Returns in the pipeline configuration is for the site configuration.
:returns: True if this is a site configuration, False otherwise.
"""
return self.get_project_id() is None
def get_project_disk_name(self):
"""
Returns the project name for the project associated with this PC.
"""
return self._project_name
def get_published_file_entity_type(self):
"""
Returns the type of entity being used
for the 'published file' entity
"""
return self._published_file_entity_type
def convert_to_site_config(self):
"""
Converts the pipeline configuration into the site configuration.
"""
self._update_metadata({"project_id": None})
########################################################################################
# path cache
def get_shotgun_path_cache_enabled(self):
"""
Returns true if the shotgun path cache should be used.
This should only ever return False for setups created before 0.15.
All projects created with 0.14+ automatically sets this to true.
"""
return self._use_shotgun_path_cache
def turn_on_shotgun_path_cache(self):
"""
Updates the pipeline configuration settings to have the shotgun based (v0.15+)
path cache functionality enabled.
Note that you need to force a full path sync once this command has been executed.
"""
if self.get_shotgun_path_cache_enabled():
raise TankError("Shotgun based path cache already turned on!")
self._update_metadata({"use_shotgun_path_cache": True})
self._use_shotgun_path_cache = True
########################################################################################
# storage roots related
def get_local_storage_roots(self):
"""
Returns local OS paths to all shotgun local storages used by toolkit.
Paths are validated and guaranteed not to be None.
:returns: dictionary of storages, for example {"primary": "/studio", "textures": "/textures"}
"""
proj_roots = {}
for storage_name in self._roots:
# get current os path
local_root_path = self._roots[storage_name].current_os
# validate it
if local_root_path is None:
raise TankError(
"Undefined toolkit storage! The local file storage '%s' is not defined for this "
"operating system! Please contact toolkit support." % storage_name)
proj_roots[storage_name] = local_root_path
return proj_roots
def get_all_platform_data_roots(self):
"""
Similar to get_data_roots but instead of returning the data roots for a single
operating system, the data roots for all operating systems are returned.
The return structure is a nested dictionary structure, for example:
{
"primary": {"win32": "z:\studio\my_project",
"linux2": "/studio/my_project",
"darwin": "/studio/my_project"},
"textures": {"win32": "z:\studio\my_project",
"linux2": None,
"darwin": "/studio/my_project"},
}
The operating system keys are returned on sys.platform-style notation.
If a data root has not been defined on a particular platform, None is
returned (see example above).
@todo - refactor to use ShotgunPath
:returns: dictionary of dictionaries. See above.
"""
proj_roots = {}
for storage_name in self._roots:
# join the project name to the storage ShotgunPath
project_path = self._roots[storage_name].join(self._project_name)
# break out the ShotgunPath object in sys.platform style dict
proj_roots[storage_name] = project_path.as_system_dict()
return proj_roots
def get_data_roots(self):
"""
Returns a dictionary of all the data roots available for this PC,
keyed by their storage name. Only returns paths for current platform.
Paths are guaranteed to be not None.
:returns: A dictionary keyed by storage name, for example
{"primary": "/studio/my_project", "textures": "/textures/my_project"}
"""
proj_roots = {}
for storage_name in self._roots:
# join the project name to the storage ShotgunPath
project_path = self._roots[storage_name].join(self._project_name)
# break out the ShotgunPath object in sys.platform style dict
proj_roots[storage_name] = project_path.current_os
return proj_roots
def has_associated_data_roots(self):
"""
Some configurations do not have a notion of a project storage and therefore
do not have any storages defined. This flag indicates whether a configuration
has any associated data storages.
:returns: true if the configuration has a primary data root defined, false if not
"""
return len(self.get_data_roots()) > 0
def get_primary_data_root(self):
"""
Returns the path to the primary data root for the current platform.
For configurations where there is no roots defined at all,
an exception will be raised.
:returns: str to local path on disk
"""
if len(self.get_data_roots()) == 0:
raise TankError("Your current pipeline configuration does not have any project data "
"storages defined and therefore does not have a primary project data root!")
return self.get_data_roots().get(constants.PRIMARY_STORAGE_NAME)
########################################################################################
# installation payload (core/apps/engines) disk locations
def get_associated_core_version(self):
"""
Returns the version string for the core api associated with this config.
This method is 'forgiving' and in the case no associated core API can be
found for this pipeline configuration, None will be returned rather than
an exception raised.
:returns: version str e.g. 'v1.2.3', None if no version could be determined.
"""
associated_api_root = self.get_install_location()
return pipelineconfig_utils.get_core_api_version(associated_api_root)
def get_install_location(self):
"""
Returns the core api install location associated with this pipeline configuration.
Tries to resolve it via the explicit link which exists between
the pipeline config and the its core. If this fails, it uses
runtime introspection to resolve it.
:returns: path string to the current core API install root location
"""
core_api_root = pipelineconfig_utils.get_core_path_for_config(self._pc_root)
if core_api_root is None:
# lookup failed. fall back onto runtime introspection
core_api_root = pipelineconfig_utils.get_path_to_current_core()
return core_api_root
def get_core_python_location(self):
"""
Returns the python root for this install.
:returns: path string
"""
return os.path.join(self.get_install_location(), "install", "core", "python")
########################################################################################
# descriptors and locations
def execute_post_install_bundle_hook(self, bundle_path):
"""
Executes a post install hook for a bundle.
Some bundles come with an associated script that is meant
to be executed after install. This method probes for such a script
and in case it exists, executes it.
:param bundle_path: Path to bundle (app/engine/framework)
"""
post_install_hook_path = os.path.join(
bundle_path,
"hooks",
constants.BUNDLE_POST_INSTALL_HOOK)
if os.path.exists(post_install_hook_path):
hook.execute_hook(
post_install_hook_path,
parent=None,
pipeline_configuration=self.get_path(),
path=bundle_path
)
def _preprocess_descriptor(self, descriptor_dict):
"""
Preprocess descriptor dictionary to resolve config-specific
constants and directives such as {PIPELINE_CONFIG}.
:param descriptor_dict: Descriptor dict to operate on
:returns: Descriptor dict with any directives resolved.
"""
if descriptor_dict.get("type") == "dev":
# several different path parameters are supported by the dev descriptor.
# scan through all path keys and look for pipeline config token
# platform specific resolve
platform_key = ShotgunPath.get_shotgun_storage_key()
if platform_key in descriptor_dict:
descriptor_dict[platform_key] = descriptor_dict[platform_key].replace(
constants.PIPELINE_CONFIG_DEV_DESCRIPTOR_TOKEN,
self.get_path()
)
# local path resolve
if "path" in descriptor_dict:
descriptor_dict["path"] = descriptor_dict["path"].replace(
constants.PIPELINE_CONFIG_DEV_DESCRIPTOR_TOKEN,
self.get_path()
)
return descriptor_dict
def _get_descriptor(self, descriptor_type, dict_or_uri, latest=False, constraint_pattern=None):
"""
Constructs a descriptor object given a descriptor dictionary.
:param descriptor_type: Descriptor type (APP, ENGINE, etc)
:param dict_or_uri: Descriptor dict or uri
:param latest: Resolve latest version of descriptor. This
typically requires some sort of remote lookup and may error
if the machine is not connected to the Internet.
:param constraint_pattern: If resolve_latest is True, this pattern can be used to constrain
the search for latest to only take part over a subset of versions.
This is a string that can be on the following form:
- v0.1.2, v0.12.3.2, v0.1.3beta - a specific version
- v0.12.x - get the highest v0.12 version
- v1.x.x - get the highest v1 version
:returns: Descriptor object
"""
# note: certain legacy methods, for example how shotgun menu actions are cached
# from the tank command, are not authenticated pathways. This is something we
# ultimately need to more away from, ensuring that the system is fully authenticated
# across the board. However, in the meantime, ensure that *basic* descriptor operations can
# be accessed without having a valid shotgun connection by using a deferred shotgun API wrapper
# rather than a wrapper that is initialized straight away. This ensures that a valid authentication
# state in toolkit is not required until the connection is actually needed. In the case of descriptors,
# a connection is typically only needed at download and when checking for latest. Path resolution
# methods do not require a connection.
sg_connection = shotgun.get_deferred_sg_connection()
if isinstance(dict_or_uri, basestring):
descriptor_dict = descriptor_uri_to_dict(dict_or_uri)
else:
descriptor_dict = dict_or_uri
descriptor_dict = self._preprocess_descriptor(descriptor_dict)
desc = create_descriptor(
sg_connection,
descriptor_type,
descriptor_dict,
self._bundle_cache_root_override,
self._bundle_cache_fallback_paths,
latest,
constraint_pattern
)
return desc
def get_app_descriptor(self, dict_or_uri):
"""
Convenience method that returns a descriptor for the app
that is associated with the given descriptor.
:param dict_or_uri: Descriptor dictionary or uri
:returns: Descriptor object
"""
return self._get_descriptor(Descriptor.APP, dict_or_uri)
def get_engine_descriptor(self, dict_or_uri):
"""
Convenience method that returns a descriptor for the engine
that is associated with the given descriptor.
:param dict_or_uri: Descriptor dictionary or uri
:returns: Descriptor object
"""
return self._get_descriptor(Descriptor.ENGINE, dict_or_uri)
def get_framework_descriptor(self, dict_or_uri):
"""
Convenience method that returns a descriptor for the framework
that is associated with the given descriptor.
:param dict_or_uri: Descriptor dictionary or uri
:returns: Descriptor object
"""
return self._get_descriptor(Descriptor.FRAMEWORK, dict_or_uri)
def get_latest_app_descriptor(self, dict_or_uri):
"""
Convenience method that returns the latest descriptor for the
given app. The descriptor dictionary or uri does not have to contain
any version information. This will be resolved as part of the call.
Please note that this call may be slow as it will typically connect
to an external source (git, toolkit app store etc) in order to determine
which the most recent version is.
:param dict_or_uri: Descriptor dictionary or uri
:returns: Descriptor object
"""
return self._get_descriptor(Descriptor.APP, dict_or_uri, latest=True)
def get_latest_engine_descriptor(self, dict_or_uri):
"""
Convenience method that returns the latest descriptor for the
given engine. The descriptor dictionary or uri does not have to contain
any version information. This will be resolved as part of the call.
Please note that this call may be slow as it will typically connect
to an external source (git, toolkit app store etc) in order to determine
which the most recent version is.
:param dict_or_uri: Descriptor dictionary or uri
:returns: Descriptor object
"""
return self._get_descriptor(Descriptor.ENGINE, dict_or_uri, latest=True)
def get_latest_framework_descriptor(self, dict_or_uri, constraint_pattern=None):
"""
Convenience method that returns the latest descriptor for the
given framework. The descriptor dictionary or uri does not have to contain
any version information. This will be resolved as part of the call.
Please note that this call may be slow as it will typically connect
to an external source (git, toolkit app store etc) in order to determine
which the most recent version is.
:param dict_or_uri: Descriptor dictionary or uri
:param constraint_pattern: This pattern can be used to constrain
the search for latest to only take part over a subset of versions.
This is a string that can be on the following form:
- v0.1.2, v0.12.3.2, v0.1.3beta - a specific version
- v0.12.x - get the highest v0.12 version
- v1.x.x - get the highest v1 version
:returns: Descriptor object
"""
return self._get_descriptor(
Descriptor.FRAMEWORK,
dict_or_uri,
latest=True,
constraint_pattern=constraint_pattern
)
def get_configuration_descriptor(self):
"""
Returns the :class:`~sgtk.descriptor.ConfigDescriptor` associated with
the pipeline configuration.
"""
return self._descriptor
########################################################################################
# configuration disk locations
def get_core_hooks_location(self):
"""
Returns the path to the core hooks location
:returns: path string
"""
return os.path.join(self._pc_root, "config", "core", "hooks")
def get_schema_config_location(self):
"""
Returns the location of the folder schema
:returns: path string
"""
return os.path.join(self._pc_root, "config", "core", "schema")
def get_config_location(self):
"""
Returns the config folder for the project
:returns: path string
"""
return os.path.join(self._pc_root, "config")
def get_hooks_location(self):
"""
Returns the hooks folder for the project
:returns: path string
"""
return os.path.join(self._pc_root, "config", "hooks")
def get_shotgun_menu_cache_location(self):
"""
returns the folder where shotgun menu cache files
(used by the browser plugin and java applet) are stored.
:returns: path string
"""
return os.path.join(self._pc_root, "cache")
########################################################################################
# configuration data access
def get_environments(self):
"""
Returns a list with all the environments in this configuration.
"""
env_root = os.path.join(self._pc_root, "config", "env")
env_names = []
for f in glob.glob(os.path.join(env_root, "*.yml")):
file_name = os.path.basename(f)
(name, _) = os.path.splitext(file_name)
env_names.append(name)
return env_names
def get_environment(self, env_name, context=None, writable=False):
"""
Returns an environment object given an environment name.
You can use the get_environments() method to get a list of
all the environment names.
:param env_name: name of the environment to load
:param context: context to seed the environment with
:param writable: If true, a writable environment object will be
returned, allowing a user to update it.
:returns: An environment object
"""
env_file = self.get_environment_path(env_name)
EnvClass = WritableEnvironment if writable else InstalledEnvironment
env_obj = EnvClass(env_file, self, context)
return env_obj
def get_environment_path(self, env_name):
"""
Returns the path to the environment yaml file for the given
environment name for this pipeline configuration.
:param env_name: The name of the environment.
:returns: String path to the environment yaml file.
"""
return os.path.join(self._pc_root, "config", "env", "%s.yml" % env_name)
def get_templates_config(self):
"""
Returns the templates configuration as an object
"""
templates_file = os.path.join(
self._pc_root,
"config",
"core",
constants.CONTENT_TEMPLATES_FILE,
)
try:
data = yaml_cache.g_yaml_cache.get(templates_file, deepcopy_data=False) or {}
data = template_includes.process_includes(templates_file, data)
except TankUnreadableFileError:
data = dict()
return data
########################################################################################
# helpers and internal
def execute_core_hook_internal(self, hook_name, parent, **kwargs):
"""
Executes an old-style core hook, passing it any keyword arguments supplied.
Typically you don't want to execute this method but instead
the tk.execute_core_hook method. Only use this one if you for
some reason do not have a tk object available.
:param hook_name: Name of hook to execute.
:param parent: Parent object to pass down to the hook
:param **kwargs: Named arguments to pass to the hook
:returns: Return value of the hook.
"""
# first look for the hook in the pipeline configuration
# if it does not exist, fall back onto core API default implementation.
hook_folder = self.get_core_hooks_location()
file_name = "%s.py" % hook_name
hook_path = os.path.join(hook_folder, file_name)
if not os.path.exists(hook_path):
# no custom hook detected in the pipeline configuration
# fall back on the hooks that come with the currently running version
# of the core API.
hooks_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "hooks"))
hook_path = os.path.join(hooks_path, file_name)
try:
return_value = hook.execute_hook(hook_path, parent, **kwargs)
except:
# log the full callstack to make sure that whatever the
# calling code is doing, this error is logged to help
# with troubleshooting and support
log.exception("Exception raised while executing hook '%s'" % hook_path)
raise
return return_value
def execute_core_hook_method_internal(self, hook_name, method_name, parent, **kwargs):
"""
Executes a new style core hook, passing it any keyword arguments supplied.
Typically you don't want to execute this method but instead
the tk.execute_core_hook method. Only use this one if you for
some reason do not have a tk object available.
:param hook_name: Name of hook to execute.
:param method_name: Name of hook method to execute
:param parent: Parent object to pass down to the hook
:param **kwargs: Named arguments to pass to the hook
:returns: Return value of the hook.
"""
# this is a new style hook which supports an inheritance chain
# first add the built-in core hook to the chain
file_name = "%s.py" % hook_name
hooks_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "hooks"))
hook_paths = [os.path.join(hooks_path, file_name)]
# the hook.method display name used when logging the metric
hook_method_display = "%s.%s" % (hook_name, method_name)
# now add a custom hook if that exists.
hook_folder = self.get_core_hooks_location()
hook_path = os.path.join(hook_folder, file_name)
if os.path.exists(hook_path):
hook_paths.append(hook_path)
try:
return_value = hook.execute_hook_method(hook_paths, parent, method_name, **kwargs)
except:
# log the full callstack to make sure that whatever the
# calling code is doing, this error is logged to help
# with troubleshooting and support
log.exception("Exception raised while executing hook '%s'" % hook_paths[-1])
raise
return return_value
```
#### File: platform/events/event_file_close.py
```python
from .event_engine import EngineEvent
class FileCloseEvent(EngineEvent):
"""
An object representation of a file-close event.
The event holds a :meth:file_path property, indicating which open file or
document the event is referring to. In engine implementations which
integrate with MDI applications, the path is required in order to
distinguish which document is being closed.
In engine implementations where the current file isn't known, well defined
or accessible, a None value should be returned to indicate this.
Note that the file_path may represent a document that has not yet been
saved. In this case, it may not be a full path but instead the name of the
document, for example "untitled" or an empty string "". The event
information should transparently reflect whatever is returned from the
underlying application.
"""
def __init__(self, file_path):
"""
Constructor.
:param str file_path: The path to the file closed.
"""
super(FileCloseEvent, self).__init__()
self._file_path = file_path
@property
def file_path(self):
"""
The string path of the file that was closed.
"""
return self._file_path
def __str__(self):
return ("%s: %s" % ("FileCloseEvent", self.file_path))
```
#### File: tank/util/environment.py
```python
import os, sys
def append_path_to_env_var(env_var_name, path):
"""
Append the path to the given environment variable.
Creates the env var if it doesn't exist already.
will concatenate paths using : on linux and ; on windows
"""
return _add_path_to_env_var(env_var_name, path, prepend=False)
def prepend_path_to_env_var(env_var_name, path):
"""
Prepend the path to the given environment variable.
Creates the env var if it doesn't exist already.
will concatenate paths using : on linux and ; on windows
"""
return _add_path_to_env_var(env_var_name, path, prepend=True)
def _add_path_to_env_var(env_var_name, path, prepend=False):
"""
Append or prepend the path to the given environment variable.
Creates the env var if it doesn't exist already.
will concatenate paths using : on linux and ; on windows
"""
if sys.platform == "win32":
env_var_sep = ";"
else:
env_var_sep = ":"
paths = os.environ.get(env_var_name, "").split(env_var_sep)
# clean up empty entries
paths = [x for x in paths if x != ""]
# Do not add path if it already exists in the list
if path not in paths:
if prepend:
paths.insert(0, path)
else:
paths.append(path)
# and put it back
os.environ[env_var_name] = env_var_sep.join(paths)
```
#### File: tank/util/errors.py
```python
from ..errors import TankError
class ShotgunAttachmentDownloadError(TankError):
"""
Raised when a Shotgun attachment could not be downloaded
"""
class UnresolvableCoreConfigurationError(TankError):
"""
Raises when Toolkit is not able to resolve the path
"""
def __init__(self, full_path_to_file):
"""
:param str full_path_to_file: Path to the folder where shotgun.yml was expected.
"""
TankError.__init__(
self,
"Cannot resolve the core configuration from the location of the Sgtk Code! "
"This can happen if you try to move or symlink the Sgtk API. The "
"Sgtk API is currently picked up from %s which is an "
"invalid location." % full_path_to_file
)
class EnvironmentVariableFileLookupError(TankError):
"""
Raised when an environment variable specifying a location points to configuration
file that doesn't exist.
"""
def __init__(self, var_name, path):
"""
:param str var_name: Name of the environment variable used.
:param str path: Path to the configuration file that doesn't exist.
"""
TankError.__init__(
self,
"The environment variable '%s' refers to a configuration file on disk at '%s' that doesn't exist." % (
var_name,
path
)
)
class ShotgunPublishError(TankError):
"""
Raised when Toolkit is not able to register a published file in Shotgun.
The original message for the reported error is available in the 'error_message' property.
If a published file entity was created before the error happened, it will be
available in the 'entity' property.
"""
def __init__(self, error_message, entity=None):
"""
:param str error_message: An error message, typically coming from a caught exception.
:param dict entity: The Shotgun entity which was created, if any.
"""
self.error_message = error_message
self.entity = entity
extra_message = "."
if self.entity:
# Mention the created entity in the message by appending something like:
# , although TankPublishedFile dummy_path.txt (id: 2) was created.
extra_message = ", although %s %s (id: %d) was created." % (
self.entity["type"], self.entity["code"], self.entity["id"]
)
TankError.__init__(
self,
"Unable to complete publishing because of the following error: %s%s" % (
self.error_message, extra_message
)
)
class PublishResolveError(TankError):
"""
Base class for all errors relating to resolution of paths from publishes.
"""
pass
class PublishPathNotDefinedError(PublishResolveError):
"""
Exception raised when a publish does not have a path
defined for the current operating system platform. It
may or may not have publish paths defined on other
platforms.
"""
pass
class PublishPathNotSupported(PublishResolveError):
"""
Exception raised when a publish has a path defined but it is using a path
definition that cannot be resolved into a local path. This includes for
example unsupported url schemes.
"""
pass
```
#### File: tank/util/local_file_storage.py
```python
import os
import sys
import urlparse
from . import filesystem
from .. import LogManager
from ..errors import TankError
log = LogManager.get_logger(__name__)
class LocalFileStorageManager(object):
"""
Class that encapsulates logic for resolving local storage paths.
Toolkit needs to store cache data, logs and other items at runtime.
Some of this data is global, other is per site or per configuration.
This class provides a consistent and centralized interface for resolving
such paths and also handles compatibility across generations of path
standards if and when these change between releases.
.. note:: All paths returned by this class are local to the currently running
user and typically private or with limited access settings for other users.
If the current user's home directory is not an appropriate location to store
your user files, you can use the ``SHOTGUN_HOME`` environment variable to
override the root location of the files. In that case, the location for the
user files on each platform will be:
- Logging: ``$SHOTGUN_HOME/logs``
- Cache: ``$SHOTGUN_HOME``
- Persistent: ``$SHOTGUN_HOME/data``
- Preferences: ``$SHOTGUN_HOME/preferences``
:constant CORE_V17: Indicates compatibility with Core 0.17 or earlier
:constant CORE_V18: Indicates compatibility with Core 0.18 or later
:constant LOGGING: Indicates a path suitable for storing logs, useful for debugging
:constant CACHE: Indicates a path suitable for storing cache data that can be deleted
without any loss of functionality or state.
:constant PERSISTENT: Indicates a path suitable for storing data that needs
to be retained between sessions.
:constant PREFERENCES: Indicates a path that suitable for storing settings files and preferences.
"""
# generation of path structures
(CORE_V17, CORE_V18) = range(2)
# supported types of paths
(LOGGING, CACHE, PERSISTENT, PREFERENCES) = range(4)
@classmethod
def get_global_root(cls, path_type, generation=CORE_V18):
"""
Returns a generic Shotgun storage root.
The following paths will be used:
- On the mac, paths will point into ``~/Library/PATH_TYPE/Shotgun``, where PATH_TYPE
is controlled by the path_type property.
- On Windows, paths will created below a ``%APPDATA%/Shotgun`` root point.
- On Linux, paths will be created below a ``~/.shotgun`` root point.
.. note:: This method does not ensure that the folder exists.
:param path_type: Type of path to return. One of ``LocalFileStorageManager.LOGGING``,
``LocalFileStorageManager.CACHE``, ``LocalFileStorageManager.PERSISTENT``, where
logging is a path where log- and debug related data should be stored,
cache is a location intended for cache data, e.g. data that can be deleted
without affecting the state of execution, and persistent is a location intended
for data that is meant to be persist. This includes things like settings and
preferences.
:param generation: Path standard generation to use. Defaults to ``LocalFileStorageManager.CORE_V18``,
which is the current generation of paths.
:return: Path as string
"""
# If SHOTGUN_HOME is set, the intent is to not use any of official locations and instead use
# a sandbox.
#
# If we still allowed the LocalFileStorageManager to return paths outside of SHOTGUN_HOME,
# it would mean that data from outside SHOTGUN_HOME could leak into it and that a user
# couldn't be confident that the sandbox was self-contained.
# If the environment variable is available and set to an actual value.
shotgun_home_override = os.environ.get("SHOTGUN_HOME")
if generation == cls.CORE_V18 or shotgun_home_override:
if shotgun_home_override:
# Make sure environment variables and ~ are evaluated.
shotgun_home_override = os.path.expanduser(
os.path.expandvars(shotgun_home_override)
)
# Make sure the path is an absolute path.
shotgun_home_override = os.path.abspath(shotgun_home_override)
# Root everything inside that custom path.
if path_type == cls.CACHE:
return shotgun_home_override
elif path_type == cls.PERSISTENT:
return os.path.join(shotgun_home_override, "data")
elif path_type == cls.PREFERENCES:
return os.path.join(shotgun_home_override, "preferences")
elif path_type == cls.LOGGING:
return os.path.join(shotgun_home_override, "logs")
else:
raise ValueError("Unsupported path type!")
# current generation of paths
elif sys.platform == "darwin":
if path_type == cls.CACHE:
return os.path.expanduser("~/Library/Caches/Shotgun")
elif path_type == cls.PERSISTENT:
return os.path.expanduser("~/Library/Application Support/Shotgun")
elif path_type == cls.PREFERENCES:
return os.path.expanduser("~/Library/Preferences/Shotgun")
elif path_type == cls.LOGGING:
return os.path.expanduser("~/Library/Logs/Shotgun")
else:
raise ValueError("Unsupported path type!")
elif sys.platform == "win32":
app_data = os.environ.get("APPDATA", "APPDATA_NOT_SET")
if path_type == cls.CACHE:
return os.path.join(app_data, "Shotgun")
elif path_type == cls.PERSISTENT:
return os.path.join(app_data, "Shotgun", "Data")
elif path_type == cls.PREFERENCES:
return os.path.join(app_data, "Shotgun", "Preferences")
elif path_type == cls.LOGGING:
return os.path.join(app_data, "Shotgun", "Logs")
else:
raise ValueError("Unsupported path type!")
elif sys.platform.startswith("linux"):
if path_type == cls.CACHE:
return os.path.expanduser("~/.shotgun")
elif path_type == cls.PERSISTENT:
return os.path.expanduser("~/.shotgun/data")
elif path_type == cls.PREFERENCES:
return os.path.expanduser("~/.shotgun/preferences")
elif path_type == cls.LOGGING:
return os.path.expanduser("~/.shotgun/logs")
else:
raise ValueError("Unsupported path type!")
else:
raise ValueError("Unknown platform: %s" % sys.platform)
if generation == cls.CORE_V17:
# previous generation of paths
if sys.platform == "darwin":
if path_type == cls.CACHE:
return os.path.expanduser("~/Library/Caches/Shotgun")
elif path_type == cls.PERSISTENT:
return os.path.expanduser("~/Library/Application Support/Shotgun")
elif path_type == cls.LOGGING:
return os.path.expanduser("~/Library/Logs/Shotgun")
else:
raise ValueError("Unsupported path type!")
elif sys.platform == "win32":
if path_type == cls.CACHE:
return os.path.join(os.environ.get("APPDATA", "APPDATA_NOT_SET"), "Shotgun")
elif path_type == cls.PERSISTENT:
return os.path.join(os.environ.get("APPDATA", "APPDATA_NOT_SET"), "Shotgun")
elif path_type == cls.LOGGING:
return os.path.join(os.environ.get("APPDATA", "APPDATA_NOT_SET"), "Shotgun")
else:
raise ValueError("Unsupported path type!")
elif sys.platform.startswith("linux"):
if path_type == cls.CACHE:
return os.path.expanduser("~/.shotgun")
elif path_type == cls.PERSISTENT:
return os.path.expanduser("~/.shotgun")
elif path_type == cls.LOGGING:
return os.path.expanduser("~/.shotgun")
else:
raise ValueError("Unsupported path type!")
else:
raise ValueError("Unknown platform: %s" % sys.platform)
@classmethod
def get_site_root(cls, hostname, path_type, generation=CORE_V18):
"""
Returns a cache root where items can be stored on a per site basis.
For more details, see :meth:`LocalFileStorageManager.get_global_root`.
.. note:: This method does not ensure that the folder exists.
:param hostname: Shotgun hostname as string, e.g. 'https://foo.shotgunstudio.com'
:param path_type: Type of path to return. One of ``LocalFileStorageManager.LOGGING``,
``LocalFileStorageManager.CACHE``, ``LocalFileStorageManager.PERSISTENT``, where
logging is a path where log- and debug related data should be stored,
cache is a location intended for cache data, e.g. data that can be deleted
without affecting the state of execution, and persistent is a location intended
for data that is meant to be persist. This includes things like settings and
preferences.
:param generation: Path standard generation to use. Defaults to ``LocalFileStorageManager.CORE_V18``,
which is the current generation of paths.
:return: Path as string
"""
if hostname is None:
raise TankError(
"Cannot compute path for local site specific storage - no shotgun hostname specified!"
)
# get site only; https://www.FOO.com:8080 -> www.foo.com
base_url = urlparse.urlparse(hostname).netloc.split(":")[0].lower()
if generation > cls.CORE_V17:
# for 0.18, in order to apply further shortcuts to avoid hitting
# MAX_PATH on windows, strip shotgunstudio.com from all
# hosted sites
#
# mysite.shotgunstudio.com -> mysite
# shotgun.internal.int -> shotgun.internal.int
#
base_url = base_url.replace(".shotgunstudio.com", "")
return os.path.join(
cls.get_global_root(path_type, generation),
base_url
)
@classmethod
def get_configuration_root(
cls,
hostname,
project_id,
plugin_id,
pipeline_config_id,
path_type,
generation=CORE_V18):
"""
Returns the storage root for any data that is project and config specific.
- A well defined project id should always be passed. Passing None as the project
id indicates that the *site* configuration, a special toolkit configuration
that represents the non-project state in Shotgun.
- Configurations that have a pipeline configuration in Shotgun should pass in
a pipeline configuration id. When a pipeline configuration is not registered
in Shotgun, this value should be None.
- If the configuration has been bootstrapped or has a known plugin id, this
should be specified via the plugin id parameter.
For more details, see :meth:`LocalFileStorageManager.get_global_root`.
Examples of paths that will be generated:
- Site config: ``ROOT/shotgunsite/p0``
- Project 123, config 33: ``ROOT/shotgunsite/p123c33``
- project 123, no config, plugin id review.rv: ``ROOT/shotgunsite/p123.review.rv``
.. note:: This method does not ensure that the folder exists.
:param hostname: Shotgun hostname as string, e.g. 'https://foo.shotgunstudio.com'
:param project_id: Shotgun project id as integer. For the site config, this should be None.
:param plugin_id: Plugin id string to identify the scope for a particular plugin
or integration. For more information,
see :meth:`~sgtk.bootstrap.ToolkitManager.plugin_id`. For
non-plugin based toolkit projects, this value is None.
:param pipeline_config_id: Shotgun pipeline config id. None for bootstraped configs.
:param path_type: Type of path to return. One of ``LocalFileStorageManager.LOGGING``,
``LocalFileStorageManager.CACHE``, ``LocalFileStorageManager.PERSISTENT``, where
logging is a path where log- and debug related data should be stored,
cache is a location intended for cache data, e.g. data that can be deleted
without affecting the state of execution, and persistent is a location intended
for data that is meant to be persist. This includes things like settings and
preferences.
:param generation: Path standard generation to use. Defaults to ``LocalFileStorageManager.CORE_V18``,
which is the current generation of paths.
:return: Path as string
"""
if generation == cls.CORE_V17:
# in order to be backwards compatible with pre-0.18 cache locations,
# handle the site configuration (e.g. when project id is None)
# as project id zero.
if project_id is None:
project_id = 0
# older paths are of the form root/mysite.shotgunstudio.com/project_123/config_123
return os.path.join(
cls.get_site_root(hostname, path_type, generation),
"project_%s" % project_id,
"config_%s" % pipeline_config_id
)
else:
# new paths are on the form
# project 123, config 33: root/mysite/p123c33
# project 123 with plugin id: root/mysite/p123.review.rv
# site project: root/mysite/p0
pc_suffix = ""
if pipeline_config_id and not plugin_id:
# a config that has a shotgun counterpart
pc_suffix = "c%d" % pipeline_config_id
elif plugin_id and not pipeline_config_id:
# no pc id but instead an plugin id string
pc_suffix = ".%s" % filesystem.create_valid_filename(plugin_id)
elif plugin_id and pipeline_config_id:
pc_suffix = "c%d.%s" % (pipeline_config_id, filesystem.create_valid_filename(plugin_id))
else:
# this is a possible, however not recommended state
pc_suffix = ""
if project_id is None:
# site config
project_config_folder = "site%s" % pc_suffix
else:
project_config_folder = "p%d%s" % (project_id, pc_suffix)
return os.path.join(
cls.get_site_root(hostname, path_type, generation),
project_config_folder
)
```
#### File: tank/util/process.py
```python
import subprocess
import pprint
import sys
from ..log import LogManager
logger = LogManager.get_logger(__name__)
class SubprocessCalledProcessError(Exception):
"""
Subprocess exception
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
def subprocess_check_output(*popenargs, **kwargs):
"""
Run command with arguments and return its output as a byte string.
A somewhat-python 2.6 compatible subprocess.check_output call.
Subprocess.check_output was added to Python 2.7. For docs, see
https://docs.python.org/2/library/subprocess.html#subprocess.check_output
Adopted from from http://stackoverflow.com/questions/2924310
This version however doesn't allow to override stderr, stdout and stdin. stdin
is always closed right after launch and stderr is always redirected to stdout. This
is done in order to avoid DUPLICATE_SAME_ACCESS errors on Windows. Learn more about
it here: https://bugs.python.org/issue3905.
:returns: The output from the command
:raises: If the return code was non-zero it raises a SubprocessCalledProcessError.
The SubprocessCalledProcessError object will have the return code in the returncode
attribute and any output in the output attribute.
"""
if "stdout" in kwargs or "stderr" in kwargs or "stdin" in kwargs:
raise ValueError("stdout, stderr and stdin arguments not allowed, they will be overridden.")
process = subprocess.Popen(
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE,
*popenargs, **kwargs
)
# Very important to close stdin on Windows. See issue mentioned above.
if sys.platform == "win32":
process.stdin.close()
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
logger.debug("Subprocess invocation failed:")
if popenargs:
logger.debug("Args : %s", pprint.pformat(popenargs))
if kwargs:
logger.debug("Kwargs: %s", pprint.pformat(kwargs))
logger.debug("Return code: %d", retcode)
logger.debug("Process stdout/stderr:")
logger.debug(output)
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise SubprocessCalledProcessError(retcode, cmd, output=output)
return output
```
#### File: tank/util/system_settings.py
```python
import urllib
class SystemSettings(object):
"""
Handles loading the system settings.
"""
@property
def http_proxy(self):
"""
Retrieves the operating system http proxy.
First, the method scans the environment for variables named http_proxy, in case insensitive way.
If both lowercase and uppercase environment variables exist (and disagree), lowercase is preferred.
When the method cannot find such environment variables:
- for Mac OS X, it will look for proxy information from Mac OS X System Configuration,
- for Windows, it will look for proxy information from Windows Systems Registry.
.. note:: There is a restriction when looking for proxy information from
Mac OS X System Configuration or Windows Systems Registry:
in these cases, the Toolkit does not support the use of proxies
which require authentication (username and password).
"""
# Get the dictionary of scheme to proxy server URL mappings; for example:
# {"http": "http://foo:[email protected]:80", "https": "http://172.16.17.32:443"}
# "getproxies" scans the environment for variables named <scheme>_proxy, in case insensitive way.
# When it cannot find it, for Mac OS X it looks for proxy information from Mac OSX System Configuration,
# and for Windows it looks for proxy information from Windows Systems Registry.
# If both lowercase and uppercase environment variables exist (and disagree), lowercase is preferred.
# Note the following restriction: "getproxies" does not support the use of proxies which
# require authentication (user and password) when looking for proxy information from
# Mac OSX System Configuration or Windows Systems Registry.
system_proxies = urllib.getproxies()
# Get the http proxy when it exists in the dictionary.
proxy = system_proxies.get("http")
if proxy:
# Remove any spurious "http://" from the http proxy string.
proxy = proxy.replace("http://", "", 1)
return proxy
```
#### File: tank/util/yaml_cache.py
```python
from __future__ import with_statement
import os
import copy
import threading
from tank_vendor import yaml
from ..errors import (
TankError,
TankUnreadableFileError,
TankFileDoesNotExistError,
)
class CacheItem(object):
"""
Represents a single item in the global yaml cache.
Each item carries with it a set of data, an stat from the .yml file that
it was sourced from (in os.stat form), and the path to the .yml file that
was sourced.
"""
def __init__(self, path, data=None, stat=None):
"""
Initializes the item.
:param path: The path to the .yml file on disk.
:param data: The data sourced from the .yml file.
:param stat: The stat of the file on disk. If not provided, an os.stat
will be run and the result stored.
:raises: tank.errors.TankUnreadableFileError: File stat failure.
"""
self._path = os.path.normpath(path)
self._data = data
if stat is None:
try:
self._stat = os.stat(self.path)
except Exception as exc:
raise TankUnreadableFileError(
"Unable to stat file '%s': %s" % (self.path, exc)
)
else:
self._stat = stat
def _get_data(self):
"""The item's data."""
return self._data
def _set_data(self, config_data):
self._data = config_data
data = property(_get_data, _set_data)
@property
def path(self):
"""The path to the file on disk that the item was sourced from."""
return self._path
@property
def stat(self):
"""The stat of the file on disk that the item was sourced from."""
return self._stat
def given_item_newer(self, other):
"""
Tests whether the given item is newer than this.
:param other: The CacheItem to test against.
:raises: TypeError: Given item is not a CacheItem.
:returns: bool, True if other is newer, False if not.
"""
if not isinstance(other, CacheItem):
raise TypeError("Given item must be of type CacheItem.")
return other.stat.st_mtime > self.stat.st_mtime
def size_differs(self, other):
"""
Tests whether the file size of the given item differs from this item.
:param other: The CacheItem to test against.
:raises: TypeError: Given item is not a CacheItem.
:returns: bool, True if other is a different size on disk, False if not.
"""
if not isinstance(other, CacheItem):
raise TypeError("Given item must be of type CacheItem.")
return other.stat.st_size != self.stat.st_size
def __eq__(self, other):
if not isinstance(other, CacheItem):
raise TypeError("Given item must be of type CacheItem.")
return (other.stat.st_mtime == self.stat.st_mtime and not self.size_differs(other))
def __getitem__(self, key):
# Backwards compatibility just in case something outside
# of this module is expecting the old dict structure.
if key == "modified_at":
return self.stat.st_mtime
elif key == "file_size":
return self.stat.st_size
elif key == "data":
return self._data
else:
return getattr(self._data, key)
def __str__(self):
return str(self.path)
class YamlCache(object):
"""
Main yaml cache class
"""
def __init__(self, cache_dict=None, is_static=False):
"""
Construction
"""
self._cache = cache_dict or dict()
self._lock = threading.Lock()
self._is_static = is_static
def _get_is_static(self):
"""
Whether the cache is considered static or not. If the cache is static,
CacheItems in the cache will not be invalidated based on file mtime
and size when they are requested from the cache.
"""
return self._is_static
def _set_is_static(self, state):
self._is_static = bool(state)
is_static = property(_get_is_static, _set_is_static)
def invalidate(self, path):
"""
Invalidates the cache for a given path. This is usually called when writing
to a yaml file.
"""
with self._lock:
if path in self._cache:
del self._cache[path]
def get(self, path, deepcopy_data=True):
"""
Retrieve the yaml data for the specified path. If it's not already
in the cache of the cached version is out of date then this will load
the Yaml file from disk.
:param path: The path of the yaml file to load.
:param deepcopy_data: Return deepcopy of data. Default is True.
:returns: The raw yaml data loaded from the file.
"""
# Adding a new CacheItem to the cache will cause the file mtime
# and size on disk to be checked against existing cache data,
# then the loading of the yaml data if necessary before returning
# the appropriate item back to us, which will be either the new
# item we have created here with the yaml data stored within, or
# the existing cached data.
item = self._add(CacheItem(path))
# If asked to, return a deep copy of the cached data to ensure that
# the cached data is not updated accidentally!
if deepcopy_data:
return copy.deepcopy(item.data)
else:
return item.data
def get_cached_items(self):
"""
Returns a list of all CacheItems stored in the cache.
"""
return self._cache.values()
def merge_cache_items(self, cache_items):
"""
Merges the given CacheItem objects into the cache if they are newer
or of a different size on disk than what's already in the cache.
:param cache_items: A list of CacheItem objects.
"""
for item in cache_items:
self._add(item)
def _add(self, item):
"""
Adds the given item to the cache in a thread-safe way. If the given item
is older (by file mtime) than the existing cache data for that file then
the already-cached item will be returned. If the item is identical in
file mtime and file size to what's cached, the already-cached item will be
returned. Otherwise the item will be added to the cache and returned to
the caller. If the given item is added to the cache and it has not already
been populated with the yaml data from disk, that data will be read prior
to the item being added to the cache.
:param item: The CacheItem to add to the cache.
:returns: The cached CacheItem.
"""
self._lock.acquire()
try:
path = item.path
cached_item = self._cache.get(path)
# If this is a static cache, we won't do any checks on
# mod time and file size. If it's in the cache we return
# it, otherwise we populate the item data from disk, cache
# it, and then return it.
if self.is_static:
if cached_item:
return cached_item
else:
if not item.data:
self._populate_cache_item_data(item)
self._cache[path] = item
return item
else:
# Since this isn't a static cache, we need to make sure
# that we don't need to invalidate and recache this item
# based on mod time and file size on disk.
if cached_item and cached_item == item:
# It's already in the cache and matches mtime
# and file size, so we can just return what we
# already have. It's technically identical in
# terms of data of what we got, but it's best
# to return the instance we have since that's
# what previous logic in the cache did.
return cached_item
else:
# Load the yaml data from disk. If it's not already populated.
if not item.data:
self._populate_cache_item_data(item)
self._cache[path] = item
return item
finally:
self._lock.release()
def _populate_cache_item_data(self, item):
"""
Loads the CacheItem's YAML data from disk.
"""
path = item.path
try:
with open(path, "r") as fh:
raw_data = yaml.load(fh)
except IOError:
raise TankFileDoesNotExistError("File does not exist: %s" % path)
except Exception as e:
raise TankError("Could not open file '%s'. Error reported: '%s'" % (path, e))
# Populate the item's data before adding it to the cache.
item.data = raw_data
# The global instance of the YamlCache.
g_yaml_cache = YamlCache()
```
#### File: tank_vendor/ruamel_yaml/make_win_whl.py
```python
import sys
import os
import shutil
def main():
src = sys.argv[1]
print src, '-->'
dir_name = os.path.dirname(src)
base_name = os.path.basename(src)
p, v, rest = base_name.split('-', 2)
#print dir_name
for pyver in ['cp26', 'cp27', 'cp33', 'cp34']:
for platform in ['win32', 'win_amd64']:
dst = os.path.join(dir_name,
'%s-%s-%s-none-%s.whl' % (
p, v, pyver, platform
))
print dst
shutil.copy(src, dst)
main()
``` |
{
"source": "joancafom/GeneticAlgorithm",
"score": 3
} |
#### File: joancafom/GeneticAlgorithm/CrossoverOperator.py
```python
import random
from abc import ABCMeta, abstractmethod
class CrossoverOperator:
__metaclass__ = ABCMeta
@abstractmethod
def crossover(self, ch1, ch2):
pass
class OnePointCrossover(CrossoverOperator):
def crossover(self, ch1, ch2):
assert len(ch1) > 1
assert len(ch1) == len(ch2)
point = random.randint(1, len(ch1) - 1)
chr1 = ch1[:point] + ch2[point:]
chr2 = ch2[:point] + ch1[point:]
return chr1, chr2
class TwoPointsCrossover(CrossoverOperator):
def crossover(self, ch1, ch2):
assert len(ch1) > 2
assert len(ch1) == len(ch2)
point1 = random.randint(1, len(ch1) - 2)
point2 = random.randint(point1, len(ch1) - 1)
chr1 = ch1[:point1] + ch2[point1:point2] + ch1[point2:]
chr2 = ch2[:point1] + ch1[point1:point2] + ch2[point2:]
return chr1, chr2
```
#### File: joancafom/GeneticAlgorithm/StoppingCondition.py
```python
from abc import ABCMeta, abstractmethod
import datetime
class StoppingCondition:
__metaclass__ = ABCMeta
@abstractmethod
def is_satisfied(self):
pass
@abstractmethod
def update(self):
pass
class ElapsedTimeStoppingCondition(StoppingCondition):
# Time in seconds must be provided
def __init__(self, time):
self.initTime = datetime.datetime.now()
self.time = datetime.timedelta(seconds=time)
def is_satisfied(self):
now = datetime.datetime.now()
return (now - self.initTime) >= self.time
def update(self):
pass
class NumGenerationsStoppingCondition(StoppingCondition):
def __init__(self, max_generations):
self.maxGenerations = max_generations
self.currentGen = 0
def is_satisfied(self):
return self.currentGen == self.maxGenerations
def update(self):
self.currentGen += 1
``` |
{
"source": "joancf/python-gatenlp",
"score": 3
} |
#### File: gatenlp/corpora/dirs.py
```python
import os
from gatenlp.urlfileutils import yield_lines_from
from gatenlp.document import Document
from gatenlp.corpora.base import DocumentSource, DocumentDestination, Corpus
from gatenlp.corpora.base import MultiProcessingAble
from gatenlp.corpora.base import EveryNthBase
def matching_paths(dirpath, exts=None, recursive=True, relative=True):
"""
Yields all relative file paths from dirpath which match the list of extensions
and which do not start with a dot.
Args:
dirpath: the directory to traverse
exts: a list of allowed extensions (inluding the dot)
recursive: if True (default) include all matching paths from all subdirectories as well, otherwise
only paths from the top directory.
relative: if True (default), the paths are relative to the directory path
"""
if recursive:
for root, _, filenames in os.walk(dirpath):
for fname in filenames:
if exts:
for ext in exts:
if fname.endswith(ext) and not fname.startswith("."):
if relative:
yield os.path.relpath(
os.path.join(root, fname), dirpath
)
else:
yield os.path.join(root, fname)
break
else:
if not fname.startswith("."):
if relative:
yield os.path.relpath(os.path.join(root, fname), dirpath)
else:
yield os.path.join(root, fname)
else:
for fname in os.listdir(dirpath):
full = os.path.join(dirpath, fname)
if not os.path.isfile(full) or fname.startswith("."):
pass
elif exts:
for ext in exts:
if fname.endswith(ext):
if relative:
yield os.path.relpath(full, dirpath)
else:
yield full
break
else:
if relative:
yield os.path.relpath(full, dirpath)
else:
yield full
def maker_file_path_fromidx(digits=1, levels=1):
"""
Creates a method that returns a file path for the given number of leading digits and levels.
Args:
digits: minimum number of digits to use for the path, any number with less digits will have leading zeros
added.
levels: how to split the original sequence of digits into a hierarchical path name. For example if digits=10
and levels=3, the generated function will convert the index number 23 into 0/000/000/023
Returns:
a function that takes the keyword arguments idx and doc and returns a relative path name (str)
"""
if (
not isinstance(digits, int)
or not isinstance(levels, int)
or digits < 1
or levels < 1
or digits < levels
):
raise Exception(
"digits and levels must be integers larger than 0 and digits must not be smaller than "
f"levels, got {digits}/{levels}"
)
def file_path_fromidx(doc=None, idx=None):
# NOTE: doc is unused here but used with other methods to create the file path!
if idx is None or not isinstance(idx, int) or idx < 0:
raise Exception("Index must be an integer >= 0")
per = int(digits / levels)
asstr = str(idx)
digs = max(0, digits - len(asstr))
tmp = "0" * digs
tmp += str(idx)
path = ""
fromdigit = len(tmp) - per
todigit = len(tmp)
for _lvl in range(levels - 1):
path = tmp[fromdigit:todigit] + path
# print("per=", per, "from=", fromdigit, "to=", todigit, "sec=", tmp[fromdigit:todigit])
path = "/" + path
fromdigit = fromdigit - per
todigit = todigit - per
path = tmp[:todigit] + path
return path
return file_path_fromidx
# TODO: set the special features for the relative path, index number, document id?
class DirFilesSource(DocumentSource, EveryNthBase, MultiProcessingAble):
"""
A document source which iterates over documents represented as files in a directory.
"""
def __init__(
self,
dirpath,
paths=None,
paths_from=None,
exts=None,
fmt=None,
recursive=True,
sort=False,
nparts=1,
partnr=0,
):
"""
Create a DirFilesSource.
Args:
dirpath: the directory that contains the file to load as documents.
paths: if not None, must be an iterable of relate file paths to load from the directory
paths_from: if not None, must be a file or URL to load a list of file paths from
exts: an iterable of allowed file extensions or file extension regexps
fmt: the format to use for loading files. This is only useful if all files have the same format
but the file extensions does not indicate the format.
recursive: recursively include paths from all subdirectories as well
sort: sort paths so they get processed in sort order. The paths get always sorted if every_n is > 1.
nshards: only yield every nshards-th document (default 1: every document)
shardnr: start with that index, before yieldieng every nshards-th document (default 0: start at beginning)
"""
self.dirpath = dirpath
if paths is not None and paths_from is not None:
raise Exception("Parameters paths and paths_from cannot be both specified")
super().__init__(nparts=nparts, partnr=partnr)
if paths is not None:
self.paths = paths
elif paths_from is not None:
self.paths = []
for pth in yield_lines_from(paths_from):
self.paths.append(pth.rstrip("\n\r"))
else:
self.paths = list(matching_paths(dirpath, exts=exts, recursive=recursive))
if sort or nparts > 1:
self.paths.sort()
if nparts > 1:
self.paths = [
p
for idx, p in enumerate(self.paths)
if ((idx - partnr) % nparts) == 0
]
self.fmt = fmt
def __iter__(self):
"""
Yield the next document from the source.
"""
for p in self.paths:
yield Document.load(os.path.join(self.dirpath, p), fmt=self.fmt)
class DirFilesDestination(DocumentDestination):
"""
A destination where each document is stored in a file in a directory or directory tree in some
known serialization format. The filename or path of the file can be derived from a document feature,
the document name, the running number of file added, or any function that can derive a file path
from the document and the running number.
"""
def __init__(self, dirpath, path_from="idx", ext="bdocjs", fmt=None):
"""
Create a destination to store documents in files inside a directory or directory tree.
Args:
dirpath: the directory to contain the files
path_from: one of options listed below. If a string is used as a path name, then the forward slash
is always used as the directory path separator, on all systems!
* "idx": just use the index/running number of the added document as the base name
* "idx:5": use the index/running number with at least 5 digits in the name.
* "idx:10:2": use the index and organize a total of 10 digits into a hierarchical
pathname of 2 levels, so 10:2 would mean the first 5 digits are for the name of the subdirectory
and the second 5 digits are for the file base name. 10:3 would have for levels, the first
subdirectory level with 1 digit, the next two with 3 digits and the remaining 3 digits for the
filename.
NOTE: "idx" by itself is equivalent to idx:1:1
* "feature:fname": use the document feature with the feature name fname as a relative path as is
but add the extension
* "name": use the document name as the relative path, but add extension.
* somefunction: a function that should return the pathname (without extension) and should take two
keyword arguments: doc (the document) and idx (the running index of the document).
ext: the file extension to add to all generated file names
fmt: the format to use for serializing the document, if None, will try to determine from the extension.
"""
if not os.path.isdir(dirpath):
raise Exception("Not a directory: ", dirpath)
self.dirpath = dirpath
self.idx = 0
if path_from.startswith("idx"):
rest = path_from[
3:
] # if we have digits or levels, there is a leading colon!
if len(rest) == 0:
digits = 1
levels = 1
else:
parms = rest.split(":")
parms.append(1)
digits, levels = parms[1:3]
digits = int(digits)
levels = int(levels)
self.file_path_maker = maker_file_path_fromidx(digits, levels)
elif path_from.startswith("feature"):
_, fname = path_from.split(":")
self.file_path_maker = lambda doc: doc.features[fname]
elif path_from == "name":
self.file_path_maker = lambda doc: doc.name
elif callable(path_from):
self.file_path_maker = path_from
else:
raise Exception(f"Not allowed for path_from: {path_from}")
if not ext.startswith("."):
ext = "." + ext
self.ext = ext
self.fmt = fmt
def append(self, doc):
"""
Add a document to the destination.
Args:
doc: the document or None, if None, no action is performed.
"""
if doc is None:
return
assert isinstance(doc, Document)
path = self.file_path_maker(doc=doc, idx=self.idx)
path = os.path.normpath(
path
) # convert forward slashes to backslashes on windows
path = os.path.join(self.dirpath, path) + self.ext
# check if we need to create the directories. For this we first need to get the directories part of the path,
# which is everything left of the last slash
if os.path.sep in path:
dirs = path[: path.rindex(os.path.sep)]
if not os.path.exists(os.path.normpath(dirs)):
os.makedirs(dirs)
Document.save(doc, path, fmt=self.fmt)
self.idx += 1
def close(self):
pass
class DirFilesCorpus(Corpus, MultiProcessingAble):
"""
A corpus representing all files in a directory that match the given extension.
"""
def __init__(self, dirpath, ext="bdocjs", fmt=None, recursive=True, sort=False, sort_reverse=False):
"""
Creates the DirCorpus.
Args:
dirpath: the directory path
ext: the file extension that must be matched by all files for the corpus
fmt: the format to use, if None, will be determined from the extension
recursive: if True (default) all matching files from all subdirectories are included
sort: if True, sort by file paths, if a function sort by that function (default: False)
sort_reverse: if sort is not False and this is True, sort in reverse order
"""
if not ext.startswith("."):
ext = "." + ext
self.dirpath = dirpath
self.ext = ext
self.fmt = fmt
self.paths = list(matching_paths(dirpath, exts=[ext], recursive=recursive))
if sort:
if callable(sort):
self.paths.sort(key=sort, reverse=sort_reverse)
else:
self.paths.sort(reverse=sort_reverse)
self.size = len(self.paths)
def __len__(self):
return self.size
def __getitem__(self, idx):
assert isinstance(idx, int)
path = self.paths[idx]
abspath = os.path.join(self.dirpath, path)
doc = Document.load(abspath, fmt=self.fmt)
doc.features[self.idxfeatname()] = idx
# doc.features["__idx"] = idx
# doc.features["__relpath"] = path
# doc.features["__abspath"] = abspath
return doc
def __setitem__(self, idx, doc):
"""
Set the document for a specific index.
Args:
idx: the index of the document
doc: the Document, if None, no action is performed and the existing document is left unchanged
"""
if doc is None:
return
assert isinstance(idx, int)
assert isinstance(doc, Document)
path = self.paths[idx]
doc.save(os.path.join(self.dirpath, path), fmt=self.fmt)
class NumberedDirFilesCorpus(Corpus, MultiProcessingAble):
"""
A corpus that represents files from a (nested) directory, where the filename is derived from
the index number of the document. This corpus can represent missing elements as None, both
on reading (when the corresponding expected document does not exist) and on writing (the
corresponding document gets deleted).
"""
def __init__(
self,
dirpath,
digits=1,
levels=1,
ext="bdocjs",
fmt=None,
size=None,
store_none=True,
):
"""
Creates the NumberedDirFilesCorpus. This corpus, is able to return None for non-existing documents
and remove document files by setting to None depending on the parameters.
Args:
dirpath: the directory path
digits: the number of digits to use for the file path
levels: the number of levels to split the digits up which are then used as subdire names.
ext: the file extension used for all files in the corpus
fmt: the format to use, if None, determined from the extension
size: the size of the corpus. This can be used to create a corpus from an empty directory
to contain only None elements initially. It can also be used to limit access to only the
first size elements if the directory contains more documents.
store_none: if True, will store None in the corpus, i.e. remove the corresponding file from
the directory. If False, will ignore the action and leave whatever is at the index unchanged.
"""
if not ext.startswith("."):
ext = "." + ext
self.dirpath = dirpath
self.ext = ext
self.fmt = fmt
self.size = size
self.store_none = store_none
self.file_path_maker = maker_file_path_fromidx(digits, levels)
def __len__(self):
return self.size
def __getitem__(self, idx):
assert isinstance(idx, int)
path = self.file_path_maker(idx)
path = path + self.ext
abspath = os.path.join(self.dirpath, path)
if os.path.exists(path):
doc = Document.load(abspath, fmt=self.fmt)
doc.features[self.idxfeatname()] = idx
# doc.features["__idx"] = idx
# doc.features["__relpath"] = path
# doc.features["__abspath"] = abspath
else:
doc = None
return doc
def __setitem__(self, idx, doc):
assert isinstance(idx, int)
assert doc is None or isinstance(doc, Document)
path = self.file_path_maker(idx)
path = path + self.ext
if doc is None:
if self.store_none:
if os.path.exists(path):
os.remove(path)
else:
Document.save(os.path.join(self.dirpath, path), fmt=self.fmt)
```
#### File: gatenlp/corpora/files.py
```python
import json
from gatenlp.urlfileutils import yield_lines_from
from gatenlp.document import Document
from gatenlp.corpora.base import DocumentSource, DocumentDestination
from gatenlp.corpora.base import MultiProcessingAble
class BdocjsLinesFileSource(DocumentSource, MultiProcessingAble):
"""
A document source which reads one bdoc json serialization of a document from each line of the given file.
"""
def __init__(self, file):
"""
Create a JsonLinesFileSource.
Args:
file: the file path (a string) or an open file handle.
"""
self.file = file
def __iter__(self):
with open(self.file, "rt", encoding="utf-8") as infp:
for line in infp:
yield Document.load_mem(line, fmt="json")
class BdocjsLinesFileDestination(DocumentDestination):
"""
Writes one line of JSON per document to the a single output file.
"""
def __init__(self, file):
"""
Args:
file: the file to write to. If it exists, it gets overwritten without warning.
Expected to be a string or an open file handle.
"""
if isinstance(file, str):
self.fh = open(file, "wt", encoding="utf-8")
else:
self.fh = file
self.n = 0
def __enter__(self):
return self
def __exit__(self, extype, value, traceback):
self.fh.close()
def append(self, doc):
"""
Append a document to the destination.
Args:
doc: the document, if None, no action is performed.
"""
if doc is None:
return
assert isinstance(doc, Document)
self.fh.write(doc.save_mem(fmt="json"))
self.fh.write("\n")
self.n += 1
def close(self):
self.fh.close()
class JsonLinesFileSource(DocumentSource, MultiProcessingAble):
"""
A document source which reads one json serialization per line, creates a document from one field
in the json and optionally stores all or a selection of remaining fields as document feature "__data".
"""
def __init__(self, file, text_field="text", data_fields=None, data_feature="__data"):
"""
Create a JsonLinesFileSource.
Args:
file: the file path (a string) or an open file handle.
text_field: the field name where to get the document text from.
data_fields: if a list of names, store these fields in the "__data" feature. if True, store all fields.
data_feature: the name of the data feature, default is "__data"
"""
# feature_fields: NOT YET IMPLEMENTED -- a mapping from original json fields to document features
self.file = file
self.text_field = text_field
self.data_fields = data_fields
self.data_feature = data_feature
def __iter__(self):
with open(self.file, "rt", encoding="utf-8") as infp:
for line in infp:
data = json.loads(line)
# TODO: what if the field does not exist? should we use get(text_field, "") instead?
text = data[self.text_field]
doc = Document(text)
if self.data_fields:
if isinstance(self.data_fields, list):
tmp = {}
for fname in self.data_fields:
# TODO: what if the field does not exist?
tmp[fname] = data[fname]
else:
tmp = data
doc.features[self.data_feature] = tmp
yield doc
class JsonLinesFileDestination(DocumentDestination):
"""
Writes one line of JSON per document to the a single output file. This will either write the document json
as nested data or the document text to the field designated for the document and will write other json
fields from the "__data" document feature.
"""
def __init__(self, file, document_field="text", document_bdocjs=False, data_fields=True, data_feature="__data"):
"""
Args:
file: the file to write to. If it exists, it gets overwritten without warning.
Expected to be a string or an open file handle.
document_field: the name of the json field that will contain the document either just the text or
the bdocjs representation if document_bdocjs is True.
document_bdocjs: if True store the bdocjs serialization into the document_field instead of just the text
data_fields: if a list, only store these fields in the json, if False, do not store any additional fields.
Default is True: store all fields as is.
data_feature: the name of the data feature, default is "__data"
"""
if isinstance(file, str):
self.fh = open(file, "wt", encoding="utf-8")
else:
self.fh = file
self.n = 0
self.document_field = document_field
self.document_bdocjs = document_bdocjs
self.data_fields = data_fields
self.data_feature = data_feature
def __enter__(self):
return self
def __exit__(self, _extype, _value, _traceback):
self.fh.close()
def append(self, doc):
"""
Append a document to the destination.
Args:
doc: the document, if None, no action is performed.
"""
if doc is None:
return
assert isinstance(doc, Document)
data = {}
if self.data_fields:
if isinstance(self.data_fields, list):
for fname in self.data_fields:
data[fname] = doc.features[self.data_feature][fname]
else:
data.update(doc.features[self.data_feature])
# assign the document field last so it overwrites anything that comes from the data feature!
if self.document_bdocjs:
data[self.document_field] = doc.save_mem(fmt="json")
else:
data[self.document_field] = doc.text
self.fh.write(json.dumps(data))
self.fh.write("\n")
self.n += 1
def close(self):
self.fh.close()
class TsvFileSource(DocumentSource, MultiProcessingAble):
"""
A TsvFileSource is a DocumentSource which is a single TSV file with a fixed number of tab-separated
values per row. Each document in sequence is created from the text in one of the columns and
document features can be set from arbitrary columns as well.
"""
def __init__(self, source, hdr=True, text_col=None, feature_cols=None, data_cols=None, data_feature="__data"):
"""
Creates the TsvFileSource.
Args:
source: a file path or URL
hdr: if True (default), expects a header line with the column names, if a list, should be the list
of column names, if False/None, no header line is expected.
text_col: the column which contains the text for creating the document. Either the column number,
or the name of the column (only possible if there is a header line) or a function that should
take the list of fields and arbitrary kwargs and return the text. Also passes "cols" and "n"
as keyward arguments.
feature_cols: if not None, must be either a dictionary mapping document feature names to the
column numbers or column names of where to get the feature value from;
or a function that should take the list of fields and arbitrary kwargs and return a dictionary
with the features. Also passes "cols" (dict mapping column names to column indices, or None) and
"n" (current line number) as keyword arguments.
data_cols: if not None, either an iterable of the names of columns to store in the special document
feature "__data" or if "True", stores all columns. At the moment this only works if the tsv file
has a header line. The values are stored as a list in the order of the names given or the original
order of the values in the TSV file.
data_feature: the name of the document feature where to store the data, default is "__data"
"""
assert text_col is not None
self.hdr = hdr
self.text_col = text_col
self.feature_cols = feature_cols
self.data_cols = data_cols
self.source = source
self.n = 0
self.hdr2col = {}
if data_cols and not hdr:
raise Exception("Header must be present if data_cols should be used")
self.data_feature = data_feature
def __iter__(self):
reader = yield_lines_from(self.source)
if self.hdr and self.n == 0:
self.n += 1
self.hdr = next(reader).rstrip("\n\r").split("\t")
if self.hdr:
self.hdr2col = {name: idx for idx, name in enumerate(self.hdr)}
for line in reader:
line = line.rstrip("\n\r")
fields = line.split("\t")
if isinstance(self.text_col, int):
text = fields[self.text_col]
elif callable(self.text_col):
text = self.text_col(fields, cols=self.hdr2col, n=self.n)
else:
text = fields[self.hdr2col[self.text_col]]
doc = Document(text)
if self.feature_cols:
if callable(self.feature_cols):
doc.features.update(
self.feature_cols(fields, cols=self.hdr2col, n=self.n)
)
else:
for fname, colid in self.feature_cols.items():
if isinstance(colid, int):
value = fields[colid]
else:
value = fields[self.hdr2col[colid]]
doc.features[fname] = value
if self.data_cols:
if isinstance(self.data_cols, list):
data = {}
for cname in self.data_cols:
if isinstance(cname, str):
data[cname] = fields[self.hdr2col[cname]]
else:
# assume it is the column index!
data[cname] = fields[cname]
else:
data = fields
doc.features[self.data_feature] = data
self.n += 1
yield doc
```
#### File: gatenlp/corpora/memory.py
```python
from gatenlp import Document
from gatenlp.corpora.base import DocumentSource, Corpus
class ListCorpus(Corpus):
"""
Make a Python list of documents available as a Corpus instance.
"""
@classmethod
def empty(cls, n):
"""
Create an empty corpus of size n where all elements are None.
Args:
n: size of corpus
Returns:
a ListCorpus instance with n elements which are all None
"""
l1 = [None] * n
return cls(l1)
def __init__(self, thelist, store_none=True):
"""
Provides a corpus interface to a list or list-like data structure.
Note that this provides the proper implementation of append which stores back to the index
provided in the document feature "__idx" instead of actually appending a new element to the list!
Args:
thelist: the list to wrap as a corpus
store_none: if True, a None value is stored into the corpus, otherwise, None will leave the
entry unchanged.
"""
super().__init__()
self._list = thelist
self.store_none = store_none
def __getitem__(self, idx):
doc = self._list[idx]
self.setidxfeature(doc, idx)
return doc
def __setitem__(self, key, value):
if value is None:
if self.store_none:
self._list[key] = value
else:
assert isinstance(value, Document)
self._list[key] = value
def __len__(self):
return len(self._list)
def append(self, doc: Document):
if doc is None:
if self.store_none:
self._list.append(doc)
else:
assert isinstance(doc, Document)
self._list.append(doc)
# TODO: implement data_cols
class PandasDfSource(DocumentSource):
"""
A document source which creates documents from the text in some data frame column for each row, and
sets features from arbitrary columns in the row.
"""
def __init__(self, df, text_col=None, feature_cols=None, data_cols=None, data_feature="__data"):
"""
Creates a PandasDfSource.
Args:
df: the data frae
text_col: the name of the column that contains the text
feature_cols: a dictionary that maps document feature names to column names of where to get the
feature value from (default: None)
data_cols: if a list, store those cols in the data feature, if True, store all cols.
data_feature: the name of the data feature, default is "__data"
"""
assert text_col is not None
self.text_col = text_col
self.feature_cols = feature_cols
self.source = df
self.reader = df.iterrows()
self.n = 0
self.data_cols = data_cols
self.data_feature = data_feature
self.colnames = list(df.columns)
def __iter__(self):
for _, row in self.reader:
text = row[self.text_col]
doc = Document(text)
if self.feature_cols:
for fname, colname in self.feature_cols.items():
value = row[colname]
doc.features[fname] = value
if self.data_cols:
if isinstance(self.data_cols, list):
data = {}
for cname in self.data_cols:
data[cname] = row[cname]
else:
data = {fname: row[fname] for fname in self.colnames}
doc.features[self.data_feature] = data
self.n += 1
yield doc
```
#### File: gatenlp/gateworker/gateworkerannotator.py
```python
from gatenlp.urlfileutils import is_url
from gatenlp.processing.annotator import Annotator
# NOTE: we delay importing py4j to the class initializer. This allows us to make GateWorker available via gatenlp
# but does not force everyone to actually have py4j installed if they do not use the GateWorker
# from py4j.java_gateway import JavaGateway, GatewayParameters
from gatenlp.utils import init_logger
logger = init_logger("gateworker-annotator")
class GateWorkerAnnotator(Annotator): # pragma: no cover
# TODO: parameter to influence how exceptions are handled
def __init__(
self,
pipeline,
gateworker,
annsets_send=None,
annsets_receive=None,
replace_anns=False,
update_document=False,
):
"""
Create a GateWorker annotator.
This starts the gate worker, loads the pipeline and
can then be used to annotate Python gatenlp Document instances with the Java GATE
pipeline.
Note: to make sure that start/finish callbacks on the Java side are invoked, the annotator
start() method should be invoked once before processing documents and finish() should
get called once after processing documents. (Any Executor implementation shoudl do this
autimatically)
If the GateWorkerAnnotator is not used any more, close() should be invoked to terminate
the Java GATE Worker process.
Example:
```python
pipeline = GateWorkerAnnotator("annie.xgapp", GateWorker())
for idx, doc in enumerate(mycorpus):
corpus[idx] = pipeline(doc)
```
Args:
pipeline: the path to a Java GATE pipeline to load into the GATE worker
gateworker: the gate home directory to use, if not set, uses environment variable GATE_HOME
annsets_send: a list of either annotation set names, or tuples where the first element
is the name of an annotation set and the second element is either the name of a type
or a list of type names. If not None, only the sets/types specified are sent to Java GATE.
If an empty list is specified, no annotations are sent at all.
annsets_receive: this only works if update_document is True: same format as annsets_send to specify
which annotation sets/types are
sent back to Python after the document has been processed on the Java side.
replace_anns: this is only relevant if update_document is True: if True and an annotation is received
which already exists (same set and annotation id)
then the existing annotation is replaced (if offsets and type are also same, only the features are
replaced). If False, all received annotations are added which may change their annotation id.
update_document: if True, then existing annotations in the gatenlp document are kept and the annotations
received from Java GATE are added. In this case, other changes to the document, e.g. the document
text or document features are not applied to the current python document.
If False, the existing document is completely replaced with what gets
received from Java GATE.
"""
self.pipeline = pipeline
self.annsets_send = annsets_send
self.annsets_receive = annsets_receive
self.replace_anns = replace_anns
self.update_document = update_document
self.gateworker = gateworker
isurl, ext = is_url(pipeline)
if isurl:
self.controller = self.gateworker.worker.loadPipelineFromUri(ext)
else:
self.controller = self.gateworker.worker.loadPipelineFromFile(ext)
self.corpus = self.gateworker.worker.newCorpus()
self.controller.setCorpus(self.corpus)
self.controller.setControllerCallbacksEnabled(False)
def start(self):
"""
Invoke the controller execution started method on the GATE controller.
"""
self.controller.invokeControllerExecutionStarted()
def finish(self):
"""
Invoke the controller execution finished method on the GATE controller.
"""
self.controller.invokeControllerExecutionFinished()
def __call__(self, doc, **_kwargs):
"""
Run the GATE controller on the given document.
This runs the GATE pipeline (controller) on the given document by first sending the document
to the GATE process and coverting it to a GATE document there, running the pipeline on it,
and sending the document back and converting back to a new gatenlp Document.
Args:
doc: the document to process
**kwargs: ignored so far
Returns:
the processed gatenlp document
"""
if self.annsets_send is not None:
# create shallow copy, we only need it for reading!
tmpdoc = doc.copy(annsets=self.annsets_send)
else:
tmpdoc = doc
gdoc = self.gateworker.pdoc2gdoc(tmpdoc)
self.gateworker.worker.run4Document(self.controller, gdoc)
if self.update_document:
self.gateworker.gdocanns2pdoc(gdoc, doc, annsets=self.annsets_receive, replace=self.replace_anns)
else:
doc = self.gateworker.gdoc2pdoc(gdoc)
self.gateworker.del_resource(gdoc)
return doc
```
#### File: python-gatenlp/gatenlp/lib_spacy.py
```python
import traceback
from gatenlp import Document, AnnotationSet
from gatenlp.processing.annotator import Annotator
import spacy
import numpy as np
if int(spacy.__version__.split(".")[0]) < 3:
SPACY_IS_PARSED = lambda doc: doc.is_parsed
SPACY_IS_TAGGED = lambda doc: doc.is_tagged
SPACY_IS_SENTENCED = lambda doc: doc.is_sentenced
SPACY_IS_NERED = lambda doc: doc.is_nered
else:
SPACY_IS_PARSED = lambda doc: doc.has_annotation("DEP")
SPACY_IS_TAGGED = lambda doc: doc.has_annotation("TAG")
SPACY_IS_SENTENCED = lambda doc: doc.has_annotation("SENT_START")
SPACY_IS_NERED = lambda doc: doc.has_annotation("ENT_IOB")
class AnnSpacy(Annotator):
""" """
def __init__(
self,
pipeline=None,
outsetname="",
token_type="Token",
space_token_type="SpaceToken",
sentence_type="Sentence",
nounchunk_type="NounChunk",
add_tokens=True,
# add_spacetokens=True, # not sure how to do this yet
add_entities=True,
add_sentences=True,
add_nounchunks=True,
add_deps=True,
ent_prefix=None,
):
"""
Create an annotator for running a spacy pipeline on documents.
Args:
pipeline: if this is specified, a pre-configured spacy pipeline (default: "en_core_web_sm"
pipeline)
outsetname: the annotation set name where to put the annotations
token_type: the annotation type for the token annotations
space_token_type: type of any space token annotations
sentence_type: the annotation type for the sentence annotations
nounchunk_type: annotation type for noun chunks
add_tokens: if token annotations should be added
add_entities: if true, add entity annotations
add_sentences: if sentence annotations should be added
add_nounchunks: if nounchunks should be added
add_deps: if dependencies should be added
ent_prefix: the prefix to add to all entity annotation types
"""
self.outsetname = outsetname
self.token_type = token_type
self.sentence_type = sentence_type
self.add_entities = add_entities
self.ent_prefix = ent_prefix
self.space_token_type = space_token_type
self.nounchunk_type = nounchunk_type
self.add_tokens = add_tokens
self.add_sentences = add_sentences
self.add_nounchunks = add_nounchunks
self.add_deps = add_deps
if pipeline:
self.pipeline = pipeline
else:
self.pipeline = spacy.load("en_core_web_sm")
def __call__(self, doc, **kwargs):
spacy_doc = self.pipeline(doc.text)
spacy2gatenlp(
spacy_doc,
doc,
setname=self.outsetname,
token_type=self.token_type,
space_token_type=self.space_token_type,
sentence_type=self.sentence_type,
nounchunk_type=self.nounchunk_type,
add_tokens=self.add_tokens,
add_ents=self.add_entities,
add_nounchunks=self.add_nounchunks,
add_sents=self.add_sentences,
add_dep=self.add_deps,
ent_prefix=self.ent_prefix,
)
return doc
def apply_spacy(
nlp,
gatenlpdoc,
setname="",
containing_anns=None,
component_cfg=None,
retrieve_spans=None,
include_trf=False,
):
"""Run the spacy nlp pipeline on the gatenlp document and transfer the annotations.
This modifies the gatenlp document in place.
Args:
nlp: spacy pipeline
gatenlpdoc: gatenlp document
setname: annotation set to receive the annotations (Default value = "")
containing_anns: annotation set or iterable of annotations. If not None, only the text covered be each
of the annotations is analyzed. The annotations should not overlap.
component_cfg: the component config to use for Spacy
retrieve_spans: if not None, a list of additional span types to retrieve from the SpaCy document
include_trf=False , it breaks the document viewer?
Returns:
The modified document.
"""
if containing_anns:
component_config = None
if isinstance(containing_anns, AnnotationSet):
annsiter = containing_anns.fast_iter()
else:
annsiter = containing_anns
# texts = [gatenlpdoc[ann.start : ann.end] for ann in annsiter]
# if component_cfg:
# component_config = {component_cfg: ann.features.to_dict()}
for ann in annsiter:
if component_cfg:
component_config = {component_cfg: ann.features.to_dict()}
covered = gatenlpdoc[ann.start : ann.end]
spacydoc = nlp(covered, component_cfg=component_config)
spacy2gatenlp(
spacydoc,
gatenlpdoc=gatenlpdoc,
setname=setname,
start_offset=ann.start,
retrieve_spans=retrieve_spans,
include_trf=include_trf,
)
elems = dir(spacydoc._)
for elem in elems:
if elem not in ["get", "set", "has", "trf_data"]:
ann.features[elem] = spacydoc._.get(elem)
if include_trf:
ann.features["trf"] = gatenlpdoc.features["trf"]
del gatenlpdoc.features["trf"]
return gatenlpdoc
else:
spacydoc = nlp(gatenlpdoc.text)
return spacy2gatenlp(
spacydoc, gatenlpdoc=gatenlpdoc, setname=setname, include_trf=include_trf
)
def get_tok_transformers(doc):
"""
This function gets spacy transformer data and produces an array with the transformer corresponding to each token
it should be expanded to consider that some tokens are duplicated!!
:param doc:Spacy document already processed with transformer models
:return: list of tokens having each a dictionary with the token number, the token, the list of parts and the sum of the transformers of each token
"""
toks = [tok for tok in doc]
tokens = doc._.trf_data.tokens
trf = doc._.trf_data.tensors
align = doc._.trf_data.align
# we can flatten the inputs and tensors(in ndarray not tensors), to apply alignment more easily
x, y, z = trf[0].shape
transf = trf[0].shape = (1, -1, z)
inputs = [x for ins in tokens["input_texts"] for x in ins]
res = {}
for (
i_tok,
tok,
parts,
) in zip(range(len(toks)), toks, align):
list = [x for y in parts.data for x in y]
if len(list) > 0:
tf = np.mean(trf[0][:, list, :], axis=1)
res[tok.i] = {"token": tok.text, "trf": tf}
return res
def spacy2gatenlp(
spacydoc,
gatenlpdoc=None,
setname="",
token_type="Token",
space_token_type="SpaceToken",
sentence_type="Sentence",
nounchunk_type="NounChunk",
add_tokens=True,
# add_spacetokens=True, # not sure how to do this yet
add_ents=True,
add_sents=True,
add_nounchunks=True,
add_dep=True,
ent_prefix=None,
start_offset=0,
retrieve_spans=None,
include_trf=False,
):
"""Convert a spacy document to a gatenlp document. If a gatenlp document is already
provided, add the annotations from the spacy document to it. In this case the
original gatenlpdoc is used and gets modified.
Args:
spacydoc: a spacy document
gatenlpdoc: if None, a new gatenlp document is created otherwise this
document is added to. (Default value = None)
setname: the annotation set name to which the annotations get added, empty string
for the default annotation set.
token_type: the annotation type to use for tokens (Default value = "Token")
space_token_type: the annotation type to use for space tokens (Default value = "SpaceToken")
sentence_type: the annotation type to use for sentence anntoations (Default value = "Sentence")
nounchunk_type: the annotation type to use for noun chunk annotations (Default value = "NounChunk")
add_tokens: should annotations for tokens get added? If not, dependency parser
info cannot be added either. (Default value = True)
add_ents: should annotations for entities get added
add_sents: should sentence annotations get added (Default value = True)
add_nounchunks: should noun chunk annotations get added (Default value = True)
add_dep: should dependency parser information get added (Default value = True)
add_ents: (Default value = True)
ent_prefix: (Default value = None)
start_offset: If a document is specified, an offset where the text starts can be defined.
This allows a part of a document with spacy and then include the annotations back to the document,
in the corresponding possition
retrieve_spans: if not None, a list of additional Spacy span types to retrieve
Returns:
the new or modified Document
"""
# add_spacetokens: (Default value = True)
# not sure how to do this yet
if retrieve_spans is None:
retrieve_spans = []
if gatenlpdoc is None:
retdoc = Document(spacydoc.text)
start_offset = 0
else:
retdoc = gatenlpdoc
toki2annid = {}
annset = retdoc.annset(setname)
if include_trf:
try:
# Extract transformer for tokens i (using all token parts)
trans = get_tok_transformers(spacydoc)
except Exception:
print("exception get tok_transformers")
print(traceback.format_exc())
trans = {}
for tok in spacydoc:
from_off = tok.idx
to_off = tok.idx + len(tok)
# is_space = tok.is_space
fm = {
"_i": tok.i,
"is_alpha": tok.is_alpha,
"is_bracket": tok.is_bracket,
"is_currency": tok.is_currency,
"is_digit": tok.is_digit,
"is_left_punct": tok.is_left_punct,
"is_lower": tok.is_lower,
"is_oov": tok.is_oov,
"is_punct": tok.is_punct,
"is_quote": tok.is_quote,
"is_right_punct": tok.is_right_punct,
"is_sent_start": tok.is_sent_start,
"is_space": tok.is_space,
"is_stop": tok.is_stop,
"is_title": tok.is_title,
"is_upper": tok.is_upper,
"lang": tok.lang_,
"lemma": tok.lemma_,
"like_email": tok.like_email,
"like_num": tok.like_num,
"like_url": tok.like_url,
"orth": tok.orth,
"pos": tok.pos_,
"prefix": tok.prefix_,
"prob": tok.prob,
"rank": tok.rank,
"sentiment": tok.sentiment,
"tag": tok.tag_,
"shape": tok.shape_,
"suffix": tok.suffix_,
}
if SPACY_IS_NERED(spacydoc) and add_ents:
fm["ent_type"] = tok.ent_type_
if SPACY_IS_PARSED(spacydoc) and add_dep:
fm["dep"] = tok.dep_
if include_trf:
try:
if trans[tok.i]["token"] != tok.text:
print(
f"token does not match {tok.i} , {tok.text} != {trans[tok.i]['token']} "
)
fm["trf"] = trans[tok.i]["trf"]
except Exception:
# print(traceback.format_exc())
# print(f"exception on token {tok.i} with ${tok.text}$ ")
pass # fm["trf"] = None
if tok.is_space:
anntype = space_token_type
else:
anntype = token_type
annid = annset.add(
from_off + start_offset, to_off + start_offset, anntype, fm
).id
toki2annid[tok.i] = annid
# print("Added annotation with id: {} for token {}".format(annid, tok.i))
ws = tok.whitespace_
if len(ws) < 0:
annset.add(
to_off + start_offset,
to_off + len(ws) + start_offset,
space_token_type,
{"is_space": True},
)
# if we have a dependency parse, now also add the parse edges
if SPACY_IS_PARSED(spacydoc) and add_tokens and add_dep:
for tok in spacydoc:
ann = annset.get(toki2annid[tok.i])
ann.features["head"] = toki2annid[tok.head.i]
ann.features["left_edge"] = toki2annid[tok.left_edge.i]
ann.features["right_edge"] = toki2annid[tok.right_edge.i]
if spacydoc.ents and add_ents:
for ent in spacydoc.ents:
if ent_prefix:
entname = ent_prefix + ent.label_
else:
entname = ent.label_
annset.add(
ent.start_char + start_offset,
ent.end_char + start_offset,
entname,
{"lemma": ent.lemma_},
)
if spacydoc.sents and add_sents:
for sent in spacydoc.sents:
annset.add(
sent.start_char + start_offset,
sent.end_char + start_offset,
sentence_type,
{},
)
if spacydoc.noun_chunks and add_nounchunks:
for chunk in spacydoc.noun_chunks:
annset.add(
chunk.start_char + start_offset,
chunk.end_char + start_offset,
nounchunk_type,
{},
)
for spanType in retrieve_spans:
for span in spacydoc.spans[spanType]:
annset.add(
span.start_char + start_offset,
span.end_char + start_offset,
spanType,
{},
)
if include_trf:
try:
retdoc.features["trf"] = spacydoc._.trf_data
except:
retdoc.features["trf"] = None
return retdoc
```
#### File: pam/pampac/actions.py
```python
from abc import ABC, abstractmethod
from gatenlp import Annotation
from gatenlp.features import Features
class Getter(ABC):
"""
Common base class of all Getter helper classes.
"""
@abstractmethod
def __call__(self, succ, context=None, location=None):
pass
def _get_match(succ, name, resultidx=0, matchidx=0, silent_fail=False):
"""
Helper method to return the match info for the given result index and name, or None.
Args:
succ: success instance
name: name of the match info
resultidx: index of the result in success
matchidx: if there is more than one matching match info with that name, which one to return
silent_fail: if True, return None, if False, raise an exception if the match info is not present
Returns:
the match info or None
"""
if resultidx >= len(succ):
if not silent_fail:
raise Exception(f"No resultidx {resultidx}, only {len(succ)} results")
return None
res = succ[resultidx]
matches = res.matches4name(name)
if not matches:
if not silent_fail:
raise Exception(f"No match info with name {name} in result")
return None
if matchidx >= len(matches):
if not silent_fail:
raise Exception(
f"No match info with index {matchidx}, length is {len(matches)}"
)
return None
return matches[matchidx]
# pylint: disable=R0912
def _get_span(succ, name, resultidx=0, matchidx=0, silent_fail=False):
"""
Helper method to return the span for the given result index and name, or None.
Args:
succ: success instance
name: name of the match info, if None, uses the entire span of the result
resultidx: index of the result in success
matchidx: if there is more than one match info with that name, which one to return, if no name, ignored
silent_fail: if True, return None, if False, raise an exception if the match info is not present
Returns:
the span or None if no Span exists
"""
if resultidx >= len(succ):
if not silent_fail:
raise Exception(f"No resultidx {resultidx}, only {len(succ)} results")
return None
res = succ[resultidx]
if name:
matches = res.matches4name(name)
if not matches:
if not silent_fail:
raise Exception(f"No match info with name {name} in result")
return None
if matchidx >= len(matches):
if not silent_fail:
raise Exception(
f"No match info with index {matchidx}, length is {len(matches)}"
)
return None
ret = matches[matchidx].get("span")
else:
ret = res.span
if ret is None:
if silent_fail:
return None
else:
raise Exception("No span found")
return ret
class Actions:
"""
A container to run several actions for a rule.
"""
def __init__(
self,
*actions,
):
"""
Wrap several actions for use in a rule.
Args:
*actions: any number of actions to run.
"""
self.actions = list(actions)
def __call__(self, succ, context=None, location=None):
"""
Invokes the actions defined for this wrapper in sequence and
returns one of the following: for no wrapped actions, no action is invoked and None is returned;
for exactly one action the return value of that action is returned, for 2 or more actions
a list with the return values of each of those actions is returned.
Args:
succ: the success object
context: the context
location: the location
Returns: None, action return value or list of action return values
"""
if len(self.actions) == 1:
return self.actions[0](succ, context=context, location=location)
elif len(self.actions) == 0:
return None
else:
ret = []
for action in self.actions:
ret.append(action(succ, context=context, location=location))
return ret
def add(self, action, tofront=False):
"""
Add an action to the list of existing actions.
Args:
action: the action to add
tofront: if True, add as first instead of last action
"""
if tofront:
self.actions.insert(0, action)
else:
self.actions.append(action)
class AddAnn:
"""
Action for adding an annotation.
"""
def __init__(
self,
name=None,
ann=None, # create a copy of this ann retrieved with GetAnn
type=None, # or create a new annotation with this type
annset=None, # if not none, create in this set instead of the one used for matching
features=None,
span=None, # use literal span, GetSpan, if none, span from match
resultidx=0,
matchidx=0,
silent_fail=False,
): # pylint: disable=W0622
"""
Create an action for adding a new annotation to the outset.
Args:
name: the name of the match to use for getting the annotation span, if None, use the
whole span of each match
ann: either an Annotation which will be (deep) copied to create the new annotation, or
a GetAnn helper for copying the annoation the helper returns. If this is specified the
other parameters for creating a new annotation are ignored.
type: the type of a new annotation to create
annset: if not None, create the new annotation in this set instead of the one used for matching
features: the features of a new annotation to create. This can be a GetFeatures helper for copying
the features from another annotation in the results
span: the span of the annotation, this can be a GetSpan helper for copying the span from another
annotation in the results
resultidx: the index of the result to use if more than one result is in the Success. If None,
the AddAnn action is performed for all results
matchidx: the index of the match info to use if more than one item matches the given name. If None,
the AddAnn action is performed for all match info items with that name.
silent_fail: if True and the annotation can not be created for some reason, just do silently nothing,
otherwise raises an Exception.
"""
# span is either a span, the index of a match info to take the span from, or a callable that will return the
# span at firing time
assert type is not None or ann is not None
self.name = name
self.anntype = type
self.ann = ann
self.features = features
self.span = span
self.resultidx = resultidx
self.matchidx = matchidx
self.silent_fail = silent_fail
self.annset = annset
# pylint: disable=R0912
def _add4span(self, span, succ, context, location):
if span is None:
return
if self.annset is not None:
outset = self.annset
else:
outset = context.outset
if self.ann:
if isinstance(self.ann, Annotation):
outset.add_ann(self.ann.deepcopy())
else:
ann = self.ann(succ)
if ann is None:
if self.silent_fail:
return
else:
raise Exception("No matching annotation found")
outset.add_ann(ann)
else:
if self.span:
if callable(self.span):
span = self.span(succ, context=context, location=location)
else:
span = self.span
if callable(self.anntype):
anntype = self.anntype(succ, context=context, location=location)
else:
anntype = self.anntype
if self.features:
if callable(self.features):
features = self.features(succ, context=context, location=location)
else:
# NOTE: if we got a dictionary where some values are helpers, we need to run the helper
# and replace the value with the result. However, this would change the original dictionary
# just the first time if there are several matches, so we always shallow copy the features
# first!
features = self.features.copy()
for k, v in features.items():
if isinstance(v, Getter):
features[k] = v(succ, context=context, location=location)
else:
features = None
outset.add(span.start, span.end, anntype, features=features)
def _add4result(self, succ, resultidx, context, location):
if self.matchidx is None:
for matchidx in range(len(succ[resultidx].matches)):
span = _get_span(succ, self.name, resultidx, matchidx, self.silent_fail)
# print(f"DEBUG: midx=None, running for {matchidx}, span={span}")
self._add4span(span, succ, context, location)
else:
span = _get_span(succ, self.name, resultidx, self.matchidx, self.silent_fail)
# print(f"DEBUG: running for {self.matchidx}, span={span}")
self._add4span(span, succ, context, location)
def __call__(self, succ, context=None, location=None):
if self.resultidx is None:
for resultidx in range(len(succ)):
# print(f"DEBUG: ridx=None, running for {resultidx}")
self._add4result(succ, resultidx, context, location)
else:
# print(f"DEBUG: running for {self.resultidx}")
self._add4result(succ, self.resultidx, context, location)
class UpdateAnnFeatures:
"""
Action for updating the features of an annotation.
"""
def __init__(
self,
name=None,
updateann=None,
fromann=None,
features=None,
replace=False, # replace existing features rather than updating
resultidx=0,
matchidx=0,
silent_fail=False,
deepcopy=False
):
"""
Create an UpdateAnnFeatures action. The features to use for updating can either come from
an existing annotation, an annotation fetched with a GetAnn annotation getter, or from a
a features instance, a feature getter or a dictionary.
Args:
name: the name of the match to use for getting the annotation to modify (if updateann is not
specified). This must be None if updateann is specified.
updateann: if specified, update the features of this annotation. This can be either a literal
annotation or a GetAnn help to access another annotation from the result.
fromann: if specified use the features from this annotation. This can be either a literal annotation
or a GetAnn helper to access another annotation from the result.
features: the features to use for updating, either literal features or dictionary,
or a GetFeatures helper.
replace: if True, replace the existing features with the new ones, otherwise update the existing features.
resultidx: the index of the result to use, if there is more than one (default: 0)
matchidx: the index of a matching info element to use, if more than one matches exist
with the given name (default: 0)
silent_fail: if True, do not raise an exception if the features cannot be updated
deepcopy: if True, existing features are deep-copied, otherwise a shallow copy or new instance
is created.
"""
# check parameters for getting the features:
if fromann is None and features is None:
raise Exception("Either fromann or features must be specified")
if fromann is not None and features is not None:
raise Exception(
"Parameters fromann and features must not be both specified at the same time"
)
# check parameters for setting features:
if name is None and updateann is None:
raise Exception("Either name or updateann must be specified")
if name is not None and updateann is not None:
raise Exception(
"Parameters name and updateann must not be both specified at the same time"
)
self.name = name
self.updateann = updateann
self.fromann = fromann
self.replace = replace
self.features = features
self.resultidx = resultidx
self.matchidx = matchidx
self.silent_fail = silent_fail
self.deepcopy = deepcopy
# pylint: disable=R0912
def __call__(self, succ, context=None, location=None):
# determine the annotation to modify
if self.updateann is not None:
if isinstance(self.updateann, Annotation):
updateann = self.updateann
else:
updateann = self.updateann(succ, context=context, location=location)
else:
match = _get_match(
succ, self.name, self.resultidx, self.matchidx, self.silent_fail
)
if not match:
if self.silent_fail:
return
else:
raise Exception(f"Could not find the name {self.name}")
updateann = match.get("ann")
if updateann is None:
if self.silent_fail:
return
else:
raise Exception(
f"Could not find an annotation for the name {self.name}"
)
updatefeats = updateann.features
# determine the features to use: either from an annotation/annotation getter or from
# features or a features getter
if self.fromann is not None:
if isinstance(self.fromann, Annotation):
fromfeats = self.fromann.features
else:
ann = self.fromann(succ)
if ann is None:
if self.silent_fail:
return
else:
raise Exception("No matching source annotation found")
fromfeats = ann.features
else: # get it from self.features
if callable(self.features):
fromfeats = self.features(succ, context=context, location=location)
else:
fromfeats = self.features
# make sure we have features and optionally make sure we have a deep copy
fromfeats = Features(fromfeats, deepcopy=self.deepcopy)
if self.replace:
updatefeats.clear()
updatefeats.update(fromfeats)
class RemoveAnn:
"""
Action for removing an anntoation.
"""
def __init__(self, name=None,
annset=None,
resultidx=0, matchidx=0,
silent_fail=True):
"""
Create a remove annoation action.
Args:
name: the name of a match from which to get the annotation to remove
annset: the annotation set to remove the annotation from. This must be a mutable set and
usually should be an attached set and has to be a set which contains the annotation
to be removed. Note that with complex patterns this may remove annotations which are
still being matched from the copy in the pampac context at a later time!
resultidx: index of the result to use, if several (default: 0)
matchidx: index of the match to use, if several (default: 0)
silent_fail: if True, silently ignore the error of no annotation to get removed
"""
assert name is not None
assert annset is not None
self.name = name
self.annset = annset
self.resultidx = resultidx
self.matchidx = matchidx
self.silent_fail = silent_fail
self.annset=annset
def __call__(self, succ, context=None, location=None):
match = _get_match(
succ, self.name, self.resultidx, self.matchidx, self.silent_fail
)
if not match:
if self.silent_fail:
return
else:
raise Exception(f"Could not find the name {self.name}")
theann = match.get("ann")
if theann is None:
if self.silent_fail:
return
else:
raise Exception(
f"Could not find an annotation for the name {self.name}"
)
self.annset.remove(theann)
```
#### File: processing/gazetteer/featuregazetteer.py
```python
from gatenlp.processing.annotator import Annotator
class FeatureGazetteer(Annotator):
def __init__(self,
stringgaz,
ann_type,
containing_type=None,
feature="",
annset_name="",
outset_name="",
out_type="Lookup",
match_at_start_only=True,
match_at_end_only=True,
processing_mode="add-features",
handle_multiple="first"
):
"""
Create a feature gazetteer. This gazetteer processes all annotations of some type in some
input annotation set and tries to match the value of some feature against the given string
gazetteer. If a match is found, the action defined through the processing_mode parameter is
taken. If the annotation does not have the feature or no match occurs, no action is performed.
The gazetteer uses any instance of StringGazetteer to perform the matches.
Args:
stringgaz:
ann_type:
containing_type:
feature:
annset_name:
outset_name:
out_type:
match_at_start_only:
match_at_end_only:
processing_mode:
handle_multiple:
"""
pass
def __call__(doc, **kwargs):
pass
```
#### File: processing/gazetteer/stringregex.py
```python
from gatenlp import Document
from gatenlp.processing.gazetteer.base import StringGazetteerAnnotator
class StringRegexAnnotator(StringGazetteerAnnotator):
"""
NOT YET IMPLEMENTED
"""
def __init__(self, source=None, source_fmt="file",
outset_name="", out_type="Match",
annset_name="", containing_type=None,
features=None,
skip_longest=False,
match="all",
engine='re'
):
"""
Create a StringRegexAnnotator and optionally load regex patterns.
Args:
source: where to load the regex rules from, format depends on source_fmt. If None, nothing is loaded.
source_fmt: the format of the source. Either "list" for a list of tuples, where the first element
is a compiled regular expression and the second element is a tuple. That tuple describes the
annotations to create for a match. The first element of the tuple is the annotation type or None
to use the out_type. The second element is a dictionary mapping each group number of the match
to a dictionary of features to assign. If the feature value is the string "$n" with n a group
number, then the value for that match group is used.
Or the source_fmt can be "file" in which case the rules are loaded from a file with that path.
outset_name: name of the output annotation set
out_type: default type for output annotations, if the type is not specified with the rule
annset_name: the input annotation set if matching is restricted to spans covered by
containing annotations.
containing_type: if this is not None, then matching is restricted to spans covered by annotations
of this type. The annotations should not overlap.
list_features: a dictionary of arbitrary default features to add to all annotations created
for matches from any of the rules loaded from the current source
skip_longest: if True, after a match, the next match is attempted after the longest current
match. If False, the next match is attempted at the next offset after the start of the
current match.
match: the strategy of which rule to apply. One of: "all": apply all matching rules.
"first": apply the first matching rule, do not attempt any others. "firstlongest":
try all rules, apply the first of all rules with the longest match. "alllongest":
try all rules, apply all rules with the longest match.
engine: identifies which Python regular expression engine to use. Currently either
"re" or "regexp" to use the package with the corresponding name. Only the package
used is attempted to get loaded.
"""
pass
def add(self, rule):
"""
Add a single rule.
Args:
rule: a tuple where the first element is a compiled regular expression and the second
element is a tuple that describes the annotations to create if a match occurs.
The first element of that tuple is the annotation type or None
to use the out_type. The second element is a dictionary mapping each group number of the match
to a dictionary of features to assign. If the feature value is the string "$n" with n a group
number, then the value for that match group is used.
"""
pass
def append(self, source, source_fmt="file", list_features=None):
"""
Load a list of rules.
Args:
source: where/what to load. See the init parameter description.
source_fmt: the format of the source, see the init parameter description.
list_features: if not None a dictionary of features to assign to annotations created
for any of the rules loaded by that method call.
"""
pass
def find_all(self, text, start, end):
# NOTE: this must be implemented for this gazetteer to be a proper StringGazetteer which
# can be used by the FeatureGazetteer
pass
def __call__(self, doc: Document, **kwargs):
pass
```
#### File: gatenlp/serialization/default.py
```python
import io
import os
import sys
import yaml
from collections import defaultdict
# import ruyaml as yaml
try:
from yaml import CFullLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import FullLoader as Loader, Dumper
yaml_loader = yaml.Loader
yaml_dumper = yaml.Dumper
from random import choice
from string import ascii_uppercase
from msgpack import pack, Unpacker
from gatenlp.document import Document
from gatenlp.annotation_set import AnnotationSet
from gatenlp.annotation import Annotation
from gatenlp.changelog import ChangeLog
from gatenlp.features import Features
from gatenlp.utils import get_nested
from gatenlp.urlfileutils import is_url, get_str_from_url, get_bytes_from_url
from gzip import open as gopen, compress, decompress
from bs4 import BeautifulSoup
from gatenlp.gatenlpconfig import gatenlpconfig
import bs4
import warnings
import pickle
try:
from bs4 import GuessedAtParserWarning
warnings.filterwarnings("ignore", category=GuessedAtParserWarning)
except ImportError as ex:
pass
# import orjson as usejson
# import json as usejson
# import rapidjson as usejson
# import ujson as usejson
# import hyperjson as usejson
import json
JSON_WRITE = "wt"
JSON_READ = "rt"
# # for replacing json by orjson
# class json:
# @staticmethod
# def load(fp):
# data = fp.read()
# return usejson.loads(data)
# @staticmethod
# def loads(data):
# return usejson.loads(data)
# @staticmethod
# def dump(obj, fp):
# buf = usejson.dumps(obj)
# fp.write(buf)
# @staticmethod
# def dumps(obj):
# return usejson.dumps(obj)
# # for replacing json with one of the other implementations
# class json:
# @staticmethod
# def load(fp):
# return usejson.load(fp)
# @staticmethod
# def loads(data):
# return usejson.loads(data)
# @staticmethod
# def dump(obj, fp):
# buf = usejson.dump(obj, fp)
# @staticmethod
# def dumps(obj):
# return usejson.dumps(obj)
# TODO: for ALL save options, allow to filter the annotations that get saved!
# TODO: then use this show only limited set of annotations in the viewer
# TODO: create Document.display(....) to show document in various ways in the current
# environment, e.g. Jupyter notebook, select anns, configure colour palette, size etc.
# TODO: when loading from a URL, allow for deciding on the format based on the mime type!
# So if we do not have the format, we should get the header for the file, check the mime type and see
# if we have a loder registered for that and then let the loader do the rest of the work. This may
# need loaders to be able to use an already open stream.
TWITTER_DEFAULT_INCLUDE_FIELDS = [
"id_str",
"user.id_str",
"user.screen_name",
"user.name" "created_at",
"is_quote_status",
"quote_count",
"retweet_count",
"favourite_count",
"favourited",
"retweeted",
"lang",
"$is_retweet_status",
"retweeted_status.user.screen_name",
]
class JsonSerializer:
"""
This class performs the saving and load of Documents and ChangeLog instances to and from the
BDOC JSON format files, optionally with gzip compression.
"""
@staticmethod
def save(
clazz,
inst,
to_ext=None,
to_mem=None,
offset_type=None,
offset_mapper=None,
gzip=False,
annsets=None,
**kwargs,
):
"""
Args:
clazz: the class of the object that gets saved
inst: the object to get saved
to_ext: where to save to, this should be a file path, only one of to_ext and to_mem should be specified
to_mem: if True, return a String serialization
offset_type: the offset type to use for saving, if None (default) use "p" (Python)
offset_mapper: the offset mapper to use, only needed if the type needs to get converted
gzip: if True, the JSON gets gzip compressed
annsets: which annotation sets and types to include, list of set names or (setanmes, types) tuples
**kwargs:
"""
d = inst.to_dict(offset_type=offset_type, offset_mapper=offset_mapper, annsets=annsets, **kwargs)
if to_mem:
if gzip:
compress(json.dumps(d).encode("UTF-8"))
else:
return json.dumps(d)
else:
if gzip:
with gopen(to_ext, JSON_WRITE) as outfp:
json.dump(d, outfp)
else:
with open(to_ext, JSON_WRITE) as outfp:
json.dump(d, outfp)
@staticmethod
def save_gzip(clazz, inst, **kwargs):
"""
Invokes the save method with gzip=True
"""
JsonSerializer.save(clazz, inst, gzip=True, **kwargs)
@staticmethod
def load(
clazz, from_ext=None, from_mem=None, offset_mapper=None, gzip=False, **kwargs
):
"""
Args:
clazz:
from_ext: (Default value = None)
from_mem: (Default value = None)
offset_mapper: (Default value = None)
gzip: (Default value = False)
**kwargs:
Returns:
"""
# print("RUNNING load with from_ext=", from_ext, " from_mem=", from_mem)
if from_ext is not None and from_mem is not None:
raise Exception("Exactly one of from_ext and from_mem must be specified ")
if from_ext is None and from_mem is None:
raise Exception("Exactly one of from_ext and from_mem must be specified ")
isurl, extstr = is_url(from_ext)
if from_ext is not None:
if isurl:
# print("DEBUG: we got a URL")
if gzip:
from_mem = get_bytes_from_url(extstr)
else:
from_mem = get_str_from_url(extstr, encoding="utf-8")
else:
# print("DEBUG: not a URL !!!")
pass
if from_mem is not None:
if gzip:
d = json.loads(decompress(from_mem).decode("UTF-8"))
else:
d = json.loads(from_mem)
doc = clazz.from_dict(d, offset_mapper=offset_mapper, **kwargs)
else: # from_ext must have been not None and a path
if gzip:
with gopen(extstr, JSON_READ) as infp:
d = json.load(infp)
else:
with open(extstr, JSON_READ) as infp:
d = json.load(infp)
doc = clazz.from_dict(d, offset_mapper=offset_mapper, **kwargs)
return doc
@staticmethod
def load_gzip(clazz, **kwargs):
"""
Args:
clazz:
**kwargs:
Returns:
"""
return JsonSerializer.load(clazz, gzip=True, **kwargs)
class PickleSerializer:
"""
This class performs the saving and load of Documents and ChangeLog instances to and from pickle format.
"""
@staticmethod
def save(
clazz,
inst,
to_ext=None,
to_mem=None,
offset_type=None,
offset_mapper=None,
gzip=False,
**kwargs,
):
"""
Args:
clazz: the class of the object that gets saved
inst: the object to get saved
to_ext: where to save to, this should be a file path, only one of to_ext and to_mem should be specified
to_mem: if True, return a String serialization
offset_type: the offset type to use for saving, if None (default) use "p" (Python)
offset_mapper: the offset mapper to use, only needed if the type needs to get converted
gzip: must be False, gzip is not supported
**kwargs:
"""
if gzip:
raise Exception("Gzip not supported for pickle")
if to_mem:
return pickle.dumps(inst, protocol=-1)
else:
with open(to_ext, "wb") as outfp:
pickle.dump(inst, outfp, protocol=-1)
@staticmethod
def load(
clazz, from_ext=None, from_mem=None, offset_mapper=None, gzip=False, **kwargs
):
"""
Args:
clazz:
from_ext: (Default value = None)
from_mem: (Default value = None)
offset_mapper: (Default value = None)
gzip: (Default value = False) must be False, True not supported
**kwargs:
Returns:
"""
# print("RUNNING load with from_ext=", from_ext, " from_mem=", from_mem)
if from_ext is not None and from_mem is not None:
raise Exception("Exactly one of from_ext and from_mem must be specified ")
if from_ext is None and from_mem is None:
raise Exception("Exactly one of from_ext and from_mem must be specified ")
isurl, extstr = is_url(from_ext)
if from_ext is not None:
if isurl:
from_mem = get_bytes_from_url(extstr)
else:
# print("DEBUG: not a URL !!!")
pass
if from_mem is not None:
doc = pickle.loads(from_mem)
else: # from_ext must have been not None and a path
with open(extstr, "rb") as infp:
doc = pickle.load(infp)
return doc
class PlainTextSerializer:
""" """
@staticmethod
def save(
clazz,
inst,
to_ext=None,
to_mem=None,
offset_type=None,
offset_mapper=None,
encoding="UTF-8",
gzip=False,
**kwargs,
):
"""
Args:
clazz:
inst:
to_ext: (Default value = None)
to_mem: (Default value = None)
offset_type: (Default value = None)
offset_mapper: (Default value = None)
encoding: (Default value = "UTF-8")
gzip: (Default value = False)
**kwargs:
Returns:
"""
txt = inst.text
if txt is None:
txt = ""
if to_mem:
if gzip:
compress(txt.encode(encoding))
else:
return txt
else:
if gzip:
with gopen(to_ext, "wt", encoding=encoding) as outfp:
outfp.write(txt)
else:
with open(to_ext, "wt", encoding=encoding) as outfp:
outfp.write(txt)
@staticmethod
def save_gzip(clazz, inst, **kwargs):
"""
Args:
clazz:
inst:
**kwargs:
Returns:
"""
PlainTextSerializer.save(clazz, inst, gzip=True, **kwargs)
@staticmethod
def load(
clazz,
from_ext=None,
from_mem=None,
offset_mapper=None,
encoding="UTF-8",
gzip=False,
**kwargs,
):
"""
Args:
clazz:
from_ext: (Default value = None)
from_mem: (Default value = None)
offset_mapper: (Default value = None)
encoding: (Default value = "UTF-8")
gzip: (Default value = False)
**kwargs:
Returns:
"""
isurl, extstr = is_url(from_ext)
if from_ext is not None:
if isurl:
if gzip:
from_mem = get_bytes_from_url(extstr)
else:
from_mem = get_str_from_url(extstr, encoding=encoding)
if from_mem is not None:
if gzip:
txt = decompress(from_mem).decode(encoding)
else:
txt = from_mem
doc = Document(txt)
else:
if gzip:
with gopen(extstr, "rt", encoding=encoding) as infp:
txt = infp.read()
else:
with open(extstr, "rt", encoding=encoding) as infp:
txt = infp.read()
doc = Document(txt)
return doc
@staticmethod
def load_gzip(clazz, **kwargs):
"""
Args:
clazz:
**kwargs:
Returns:
"""
return PlainTextSerializer.load(clazz, gzip=True, **kwargs)
class YamlSerializer:
""" """
@staticmethod
def save(
clazz,
inst,
to_ext=None,
to_mem=None,
offset_type=None,
offset_mapper=None,
gzip=False,
annsets=None,
**kwargs,
):
"""
Args:
clazz:
inst:
to_ext: (Default value = None)
to_mem: (Default value = None)
offset_type: (Default value = None)
offset_mapper: (Default value = None)
gzip: (Default value = False)
annsets: which annotation sets and types to include, list of set names or (setanmes, types) tuples
**kwargs:
"""
d = inst.to_dict(offset_type=offset_type, offset_mapper=offset_mapper, annsets=annsets, **kwargs)
if to_mem:
if gzip:
compress(yaml.dump(d, Dumper=yaml_dumper).encode("UTF-8"))
else:
return yaml.dump(d, Dumper=yaml_dumper)
else:
if gzip:
with gopen(to_ext, "wt") as outfp:
yaml.dump(d, outfp, Dumper=yaml_dumper)
else:
with open(to_ext, "wt") as outfp:
yaml.dump(d, outfp, Dumper=yaml_dumper)
@staticmethod
def save_gzip(clazz, inst, **kwargs):
"""
Args:
clazz:
inst:
**kwargs:
Returns:
"""
YamlSerializer.save(clazz, inst, gzip=True, **kwargs)
@staticmethod
def load(
clazz, from_ext=None, from_mem=None, offset_mapper=None, gzip=False, **kwargs
):
"""
Args:
clazz:
from_ext: (Default value = None)
from_mem: (Default value = None)
offset_mapper: (Default value = None)
gzip: (Default value = False)
**kwargs:
Returns:
"""
isurl, extstr = is_url(from_ext)
if from_ext is not None:
if isurl:
if gzip:
from_mem = get_bytes_from_url(extstr)
else:
from_mem = get_str_from_url(extstr, encoding="utf-8")
if from_mem is not None:
if gzip:
d = yaml.load(decompress(from_mem).decode("UTF-8"), Loader=yaml_loader)
else:
d = yaml.load(from_mem, Loader=yaml_loader)
doc = clazz.from_dict(d, offset_mapper=offset_mapper, **kwargs)
else:
if gzip:
with gopen(extstr, "rt") as infp:
d = yaml.load(infp, Loader=yaml_loader)
else:
with open(extstr, "rt") as infp:
d = yaml.load(infp, Loader=yaml_loader)
doc = clazz.from_dict(d, offset_mapper=offset_mapper, **kwargs)
return doc
@staticmethod
def load_gzip(clazz, **kwargs):
"""
Args:
clazz:
**kwargs:
Returns:
"""
return YamlSerializer.load(clazz, gzip=True, **kwargs)
MSGPACK_VERSION_HDR = "sm2"
class MsgPackSerializer:
""" """
@staticmethod
def document2stream(doc: Document, stream):
"""
Args:
doc: Document:
stream:
doc: Document:
Returns:
"""
pack(MSGPACK_VERSION_HDR, stream)
pack(doc.offset_type, stream)
pack(doc.text, stream)
pack(doc.name, stream)
pack(doc._features.to_dict(), stream)
pack(len(doc._annotation_sets), stream)
for name, annset in doc._annotation_sets.items():
pack(name, stream)
pack(annset._next_annid, stream)
pack(len(annset), stream)
for ann in annset.fast_iter():
pack(ann.type, stream)
pack(ann.start, stream)
pack(ann.end, stream)
pack(ann.id, stream)
pack(ann.features.to_dict(), stream)
@staticmethod
def stream2document(stream):
"""
Args:
stream:
Returns:
"""
u = Unpacker(stream)
version = u.unpack()
if version != MSGPACK_VERSION_HDR:
raise Exception("MsgPack data starts with wrong version")
doc = Document()
doc.offset_type = u.unpack()
doc._text = u.unpack()
doc.name = u.unpack()
doc._features = Features(u.unpack())
nsets = u.unpack()
setsdict = dict()
doc.annotation_sets = setsdict
for iset in range(nsets):
sname = u.unpack()
if sname is None:
sname = ""
annset = AnnotationSet(name=sname, owner_doc=doc)
annset._next_annid = u.unpack()
nanns = u.unpack()
for iann in range(nanns):
atype = u.unpack()
astart = u.unpack()
aend = u.unpack()
aid = u.unpack()
afeatures = u.unpack()
ann = Annotation(astart, aend, atype, annid=aid, features=afeatures)
annset._annotations[aid] = ann
setsdict[sname] = annset
doc._annotation_sets = setsdict
return doc
@staticmethod
def save(
clazz,
inst,
to_ext=None,
to_mem=None,
offset_type=None,
offset_mapper=None,
**kwargs,
):
"""
Args:
clazz:
inst:
to_ext: (Default value = None)
to_mem: (Default value = None)
offset_type: (Default value = None)
offset_mapper: (Default value = None)
**kwargs:
Returns:
"""
if isinstance(inst, Document):
writer = MsgPackSerializer.document2stream
elif isinstance(inst, ChangeLog):
raise Exception("Not implemented yet")
else:
raise Exception("Object not supported")
if to_mem:
f = io.BytesIO()
else:
f = open(to_ext, "wb")
writer(inst, f)
if to_mem:
return f.getvalue()
else:
f.close()
@staticmethod
def load(clazz, from_ext=None, from_mem=None, offset_mapper=None, **kwargs):
"""
Args:
clazz:
from_ext: (Default value = None)
from_mem: (Default value = None)
offset_mapper: (Default value = None)
**kwargs:
Returns:
"""
if clazz == Document:
reader = MsgPackSerializer.stream2document
elif clazz == ChangeLog:
raise Exception("Not implemented yet")
else:
raise Exception("Object not supported")
isurl, extstr = is_url(from_ext)
if from_ext is not None:
if isurl:
from_mem = get_bytes_from_url(extstr)
if from_mem:
f = io.BytesIO(from_mem)
else:
f = open(extstr, "rb")
doc = reader(f)
return doc
JS_JQUERY_URL = "https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"
JS_GATENLP_URL = "https://unpkg.com/[email protected]/gatenlp-ann-viewer.js"
JS_JQUERY = f"<script src=\"{JS_JQUERY_URL}\"></script>"
JS_GATENLP = f"<script src=\"{JS_GATENLP_URL}\"></script>"
HTML_TEMPLATE_FILE_NAME = "gatenlp-ann-viewer.html"
JS_GATENLP_FILE_NAME = "gatenlp-ann-viewer-merged.js"
html_ann_viewer_serializer_js_loaded = False
class HtmlAnnViewerSerializer:
""" """
@staticmethod
def javascript():
"""
Return the Javascript needed for the HTML Annotation viewer.
Returns: Javascript string.
"""
jsloc = os.path.join(
os.path.dirname(__file__), "_htmlviewer", JS_GATENLP_FILE_NAME
)
if not os.path.exists(jsloc):
raise Exception(
"Could not find JavsScript file, {} does not exist".format(jsloc)
)
with open(jsloc, "rt", encoding="utf-8") as infp:
js = infp.read()
js = """<script type="text/javascript">""" + js + "</script>"
return js
@staticmethod
def init_javscript():
import IPython
IPython.display.display_html(HtmlAnnViewerSerializer.javascript(), raw=True)
@staticmethod
def save(
clazz,
inst,
to_ext=None,
to_mem=None,
notebook=False,
offline=False,
add_js=True,
htmlid=None,
stretch_height=False,
annsets=None,
doc_style=None,
**kwargs,
):
"""Convert a document to HTML for visualizing it.
Args:
clazz: the class of the object to save
inst: the instance/object to save
to_ext: the destination where to save to unless to_mem is given
to_mem: if true, ignores to_ext and returns the representation
notebook: if True only create a div which can be injected into a notebook or other HTML, otherwise
generate a full HTML document
offline: if true, include all the Javascript needed in the generated HTML , otherwise load library
from the internet.
add_js: if true (default), add the necessary Javascript either directly or by loading a library from
the internet. If false, assume that the Javascript is already there (only makes sense with
notebook=True).
htmlid: the id to use for HTML ids so it is possible to have several independent viewers in the
same HTML page and to style the output from a separate notebook cell
max_height1: if this is set, then the maximum height of the first row of the viewer is set to the
given value (default: 20em). If this is None, then the height is set to
stretch_height: if False, rows 1 and 2 of the viewer will not have the height set, but only
min and max height (default min is 10em for row1 and 7em for row2, max is the double of those).
If True, no max haight is set and instead the height is set to a percentage (default is
67vh for row 1 and 30vh for row 2). The values used can be changed via gateconfig.
annsets: if None, include all annotation sets and types, otherwise this should be a list of either
set names, or tuples, where the first entry is a set name and the second entry is either a type
name or list of type names to include.
doc_style: if not None, any additional styling for the document text box, if None, use whatever
is defined in gatenlpconfig or do not use.
kwargs: swallow any other kwargs.
Returns: if to_mem is True, returns the representation, otherwise None.
"""
if not isinstance(inst, Document):
raise Exception("Not a document!")
# TODO: why are we doing a deepcopy here?
doccopy = inst.deepcopy(annsets=annsets)
doccopy.to_offset_type("j")
json = doccopy.save_mem(fmt="json", **kwargs)
htmlloc = os.path.join(
os.path.dirname(__file__), "_htmlviewer", HTML_TEMPLATE_FILE_NAME
)
if not os.path.exists(htmlloc):
raise Exception(
"Could not find HTML template, {} does not exist".format(htmlloc)
)
with open(htmlloc, "rt", encoding="utf-8") as infp:
html = infp.read()
txtcolor = gatenlpconfig.doc_html_repr_txtcolor
if notebook:
str_start = "<!--STARTDIV-->"
str_end = "<!--ENDDIV-->"
idx1 = html.find(str_start) + len(str_start)
idx2 = html.find(str_end)
if htmlid:
rndpref = str(htmlid)
else:
rndpref = "".join(choice(ascii_uppercase) for i in range(10))
html = html[idx1:idx2]
html = f"""<div><style>#{rndpref}-wrapper {{ color: {txtcolor} !important; }}</style>
<div id="{rndpref}-wrapper">
{html}
</div></div>"""
# replace the prefix with a random one
html = html.replace("GATENLPID", rndpref)
if offline:
# global html_ann_viewer_serializer_js_loaded
# if not html_ann_viewer_serializer_js_loaded:
if add_js:
jsloc = os.path.join(
os.path.dirname(__file__), "_htmlviewer", JS_GATENLP_FILE_NAME
)
if not os.path.exists(jsloc):
raise Exception(
"Could not find JavsScript file, {} does not exist".format(
jsloc
)
)
with open(jsloc, "rt", encoding="utf-8") as infp:
js = infp.read()
js = """<script type="text/javascript">""" + js + "</script>"
# html_ann_viewer_serializer_js_loaded = True
else:
js = ""
else:
js = JS_JQUERY + JS_GATENLP
if stretch_height:
height1 = gatenlpconfig.doc_html_repr_height1_stretch
height2 = gatenlpconfig.doc_html_repr_height2_stretch
else:
height1 = gatenlpconfig.doc_html_repr_height1_nostretch
height2 = gatenlpconfig.doc_html_repr_height2_nostretch
html = html.replace("$$JAVASCRIPT$$", js, 1).replace("$$JSONDATA$$", json, 1)
html = html.replace("$$HEIGHT1$$", height1, 1).replace(
"$$HEIGHT2$$", height2, 1
)
if doc_style is None:
doc_style = gatenlpconfig.doc_html_repr_doc_style
if doc_style is None:
doc_style = ""
html = html.replace("$$DOCTEXTSTYLE$$", doc_style, 1)
if to_mem:
return html
else:
with open(to_ext, "wt", encoding="utf-8") as outfp:
outfp.write(html)
class HtmlLoader:
""" """
@staticmethod
def load_rendered(
clazz,
from_ext=None,
from_mem=None,
parser=None,
markup_set_name="Original markups",
process_soup=None,
offset_mapper=None,
**kwargs,
):
"""
Args:
clazz:
from_ext: (Default value = None)
from_mem: (Default value = None)
parser: (Default value = None)
markup_set_name: (Default value = "Original markups")
process_soup: (Default value = None)
offset_mapper: (Default value = None)
**kwargs:
Returns:
"""
raise Exception("Rendered html parser not yet implemented")
@staticmethod
def load(
clazz,
from_ext=None,
from_mem=None,
parser="html.parser",
markup_set_name="Original markups",
encoding=None,
**kwargs,
):
"""Load a HTML file.
Args:
clazz: param from_ext:
from_ext: file our URL source
from_mem: string source
parser: one of "html.parser", "lxml", "lxml-xml", "html5lib" (default is "html.parser")
markup_set_name: the annotation set name for the set to contain the HTML
annotations (Default value = "Original markups")
encoding: the encoding to use for reading the file
"""
# NOTE: for now we have a simple heuristic for adding newlines to the text:
# before and after a block element, a newline is added unless there is already one
# NOTE: for now we use multi_valued_attributes=None which prevents attributes of the
# form "class='val1 val2'" to get converted into features with a list of values.
isurl, extstr = is_url(from_ext)
if from_ext is not None:
if isurl:
from_mem = get_str_from_url(extstr, encoding=encoding)
if from_mem:
bs = BeautifulSoup(from_mem, features=parser, multi_valued_attributes=None)
else:
with open(extstr, encoding=encoding) as infp:
bs = BeautifulSoup(infp, features=parser, multi_valued_attributes=None)
# we recursively iterate the tree depth first, going through the children
# and adding to a list that either contains the text or a dict with the information
# about annotations we want to add
nlels = {
"pre",
"br",
"p",
"div",
"tr",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"li",
"address",
"article",
"aside",
"blockquote",
"del",
"figure",
"figcaption",
"footer",
"header",
"hr",
"ins",
"main",
"nav",
"section",
"summary",
"input",
"legend",
"option",
"textarea",
"bdi",
"bdo",
"center",
"code",
"dfn",
"menu",
"dir",
"caption",
}
ignoreels = {"script", "style"}
docinfo = {"anninfos": [], "curoffset": 0, "curid": 0, "text": ""}
def walktree(el):
"""
Args:
el:
Returns:
"""
# print("DEBUG: type=", type(el))
if isinstance(el, bs4.element.Doctype):
# print("DEBUG: got doctype", type(el))
pass
elif isinstance(el, bs4.element.Comment):
# print("DEBUG: got Comment", type(el))
pass
elif isinstance(el, bs4.element.Script):
# print("DEBUG: got Script", type(el))
pass
elif isinstance(el, bs4.element.Tag):
# print("DEBUG: got tag: ", type(el), " name=",el.name)
# some tags we ignore completely:
if el.name in ignoreels:
return
# for some tags we insert a new line before, but only if we do not already have one
if not docinfo["text"].endswith("\n") and el.name in nlels:
docinfo["text"] += "\n"
# print("DEBUG: adding newline before at ", docinfo["curoffset"])
docinfo["curoffset"] += 1
ann = {
"type": el.name,
"features": el.attrs,
"id": docinfo["curid"],
"event": "start",
"start": docinfo["curoffset"],
}
thisid = docinfo["curid"]
docinfo["anninfos"].append(ann)
docinfo["curid"] += 1
for child in el.children:
walktree(child)
# for some tags we insert a new line after
if not docinfo["text"].endswith("\n") and el.name in nlels:
docinfo["text"] += "\n"
# print("DEBUG: adding newline after at ", docinfo["curoffset"])
docinfo["curoffset"] += 1
docinfo["anninfos"].append(
{"event": "end", "id": thisid, "end": docinfo["curoffset"]}
)
elif isinstance(el, bs4.element.NavigableString):
# print("DEBUG: got text: ", el)
text = str(el)
if text == "\n" and docinfo["text"].endswith("\n"):
return
docinfo["text"] += text
docinfo["curoffset"] += len(el)
else:
print("WARNING: odd element type", type(el))
walktree(bs)
# need to add the end corresponding to bs
# print("DEBUG: got docinfo:\n",docinfo)
id2anninfo = {} # from id to anninfo
nstart = 0
for anninfo in docinfo["anninfos"]:
if anninfo["event"] == "start":
nstart += 1
id2anninfo[anninfo["id"]] = anninfo
nend = 0
for anninfo in docinfo["anninfos"]:
if anninfo["event"] == "end":
nend += 1
end = anninfo["end"]
annid = anninfo["id"]
anninfo = id2anninfo[annid]
anninfo["end"] = end
# print("DEBUG: got nstart/nend", nstart, nend)
assert nstart == nend
# print("DEBUG: got id2anninfo:\n", id2anninfo)
doc = Document(docinfo["text"])
annset = doc.annset(markup_set_name)
for i in range(nstart):
anninfo = id2anninfo[i]
annset.add(
anninfo["start"],
anninfo["end"],
anntype=anninfo["type"],
features=anninfo["features"],
)
return doc
class TweetV1Serializer:
@staticmethod
def doc2twitterv1dict(doc, annsets=None, prefix_sep=None):
d = doc.to_dict(annsets=annsets)
ret = {"full_text": doc.text}
ents = defaultdict(list)
for setname, annset in d.get("annotation_sets", {}).items():
for ann in annset.get("annotations", []):
anntype = ann["type"]
if prefix_sep is not None and setname != "":
anntype = setname + prefix_sep + anntype
annlist = ents[anntype]
twitterann = {
"indices": [ann["start"], ann["end"]]
}
twitterann.update(ann["features"])
annlist.append(twitterann)
ret["entities"] = ents
return ret
@staticmethod
def save(
clazz,
inst,
to_ext=None,
to_mem=None,
annsets=None,
prefix_sep=None,
**kwargs,
):
"""
Args:
clazz: the class of the object that gets saved
inst: the object to get saved
to_ext: where to save to, this should be a file path, only one of to_ext and to_mem should be specified
to_mem: if True, return a String serialization
offset_type: the offset type to use for saving, if None (default) use "p" (Python)
offset_mapper: the offset mapper to use, only needed if the type needs to get converted
annsets: which annotation sets and types to include, list of set names or (setanmes, types) tuples
prefix_types: if not None, prefix all types with the name of the annotation set the annotation comes from
and use the given string as the separator (can be the empty string for no seaparator).
For annotations from the default set the type stays unchanged.
**kwargs:
"""
d = TweetV1Serializer.doc2twitterv1dict(inst, annsets=annsets, prefix_sep=prefix_sep)
if to_mem:
return json.dumps(d)
else:
with open(to_ext, JSON_WRITE) as outfp:
json.dump(d, outfp)
@staticmethod
def load(
clazz,
from_ext=None,
from_mem=None,
include_fields=None,
include_entities=True,
include_quote=False,
outsetname="Original markups",
tweet_ann="Tweet",
):
"""
Load a tweet from Twitter JSON format.
IMPORTANT: this is still very experimental, will change in the future!
Args:
clazz: internal use
from_ext: the file/url to load from
from_mem: string to load from
include_fields: a list of fields to include where nested field names are dot-separated, e.g.
"user.location". All these fields are included using the nested field name in either the
features of the tweet annotation with the Type specified, or the features of the document
if `tweet_ann` is None.
include_entities: create annotations for the tweet entities in the set with outsetname
include_quote: if True, add the quoted tweet after an empty line and treat it as a separate
tweet just like the original tweet.
outset: the annotation set where to put entity annotations and the tweet annotation(s)
tweet_ann: the annotation type to use to span the tweet and contain all the features.
Returns:
document representing the tweet
"""
if from_ext is not None:
isurl, extstr = is_url(from_ext)
if isurl:
jsonstr = get_str_from_url(extstr, encoding="utf-8")
tweet = json.loads(jsonstr)
else:
with open(extstr, "rt", encoding="utf-8") as infp:
tweet = json.load(infp)
elif from_mem is not None:
tweet = json.loads(from_mem)
else:
raise Exception("Cannot load from None")
if tweet is None:
raise Exception("Could not decode Tweet JSON")
if tweet.get("truncated"):
text = get_nested(tweet, "extended_tweet.full_text")
else:
text = get_nested(tweet, "text")
if text is None:
raise Exception("No text field found")
quoted_status = None
if include_quote:
quoted_status = tweet.get("quoted_status")
if quoted_status is not None:
qtext = quoted_status.get("text", "")
text += "\n" + qtext
doc = Document(text)
anns = doc.annset(outsetname)
if tweet_ann:
ann = anns.add(0, len(text), tweet_ann)
features = ann.features
else:
features = doc.features
if include_fields is None:
include_fields = TWITTER_DEFAULT_INCLUDE_FIELDS
for field in include_fields:
if field.startswith("$"):
if field == "$is_retweet_status":
rs = get_nested(tweet, "retweeted_status", silent=True)
if rs is not None:
features[field] = True
continue
val = get_nested(tweet, field, silent=True)
if val is not None:
features[field] = val
if include_entities:
if tweet.get("truncated"):
entities = get_nested(tweet, "extended_tweet.entities", default={})
else:
entities = get_nested(tweet, "entities", default={})
for etype, elist in entities.items():
for ent in elist:
start, end = ent["indices"]
anns.add(start, end, etype)
# TODO: if we have a quoted_status, add features and entities from there:
# Essentially the same processing as for the original tweet, but at document offset
# len(tweet)+1 (2?)
return doc
class GateXmlLoader:
""" """
@staticmethod
def value4objectwrapper(text):
"""This may one day convert things like lists, maps, shared objects to Python, but for
now we always throw an exeption.
Args:
text: return:
Returns:
"""
raise Exception(
"Cannot load GATE XML which contains gate.corpora.ObjectWrapper data"
)
@staticmethod
def load(clazz, from_ext=None, ignore_unknown_types=False):
"""
Args:
clazz:
from_ext: (Default value = None)
ignore_unknown_types: (Default value = False)
Returns:
"""
# TODO: the code below is just an outline and needs work!
# TODO: make use of the test document created in repo project-python-gatenlp
import xml.etree.ElementTree as ET
isurl, extstr = is_url(from_ext)
if isurl:
xmlstring = get_str_from_url(extstr, encoding="utf-8")
root = ET.fromstring(xmlstring)
else:
tree = ET.parse(extstr)
root = tree.getroot()
# or: root = ET.fromstring(xmlstring)
# check we do have a GATE document
assert root.tag == "GateDocument"
assert root.attrib == {"version": "3"}
def parsefeatures(feats):
"""
Args:
feats:
Returns:
"""
features = {}
for feat in list(feats):
name = None
value = None
for el in list(feat):
if el.tag == "Name":
if el.get("className") == "java.lang.String":
name = el.text
else:
raise Exception(
"Odd Feature Name type: " + el.get("className")
)
elif el.tag == "Value":
cls_name = el.get("className")
if cls_name == "java.lang.String":
value = el.text
elif cls_name == "java.lang.Integer":
value = int(el.text)
elif cls_name == "java.lang.Long":
value = int(el.text)
elif cls_name == "java.math.BigDecimal":
value = float(el.text)
elif cls_name == "java.lang.Boolean":
value = bool(el.text)
# elif cls_name == "gate.corpora.ObjectWrapper":
# value = GateXmlLoader.value4objectwrapper(el.text)
else:
if ignore_unknown_types:
print(
f"Warning: ignoring feature with serialization type: {cls_name}",
file=sys.stderr,
)
else:
raise Exception(
"Unsupported serialization type: "
+ el.get("className")
)
if name is not None and value is not None:
features[name] = value
return features
# get the document features
docfeatures = {}
feats = root.findall("./GateDocumentFeatures/Feature")
docfeatures = parsefeatures(feats)
textwithnodes = root.findall("./TextWithNodes")
text = ""
node2offset = {}
curoff = 0
for item in textwithnodes:
if item.text:
print("Got item text: ", item.text)
text += item.text
# TODO HTML unescape item text
curoff += len(item.text)
for node in item:
nodeid = node.get("id")
node2offset[nodeid] = curoff
if node.tail:
# TODO: unescape item.text?
print("Gote node tail: ", node.tail)
text += node.tail
curoff += len(node.tail)
annsets = root.findall("./AnnotationSet")
annotation_sets = {} # map name - set
for annset in annsets:
if annset.get("Name"):
setname = annset.get("Name")
else:
setname = ""
annots = annset.findall("./Annotation")
annotations = []
maxannid = 0
for ann in annots:
annid = int(ann.attrib["Id"])
maxannid = max(maxannid, annid)
anntype = ann.attrib["Type"]
startnode = ann.attrib["StartNode"]
endnode = ann.attrib["EndNode"]
startoff = node2offset[startnode]
endoff = node2offset[endnode]
feats = ann.findall("./Feature")
features = parsefeatures(feats)
if len(features) == 0:
features = None
annotation = {
"id": annid,
"type": anntype,
"start": startoff,
"end": endoff,
"features": features,
}
annotations.append(annotation)
annset = {
"name": setname,
"annotations": annotations,
"next_annid": maxannid + 1,
}
annotation_sets[setname] = annset
docmap = {
"text": text,
"features": docfeatures,
"offset_type": "p",
"annotation_sets": annotation_sets,
}
doc = Document.from_dict(docmap)
return doc
def determine_loader(
clazz, from_ext=None, from_mem=None, offset_mapper=None, gzip=False, **kwargs
):
"""
Args:
clazz:
from_ext: (Default value = None)
from_mem: (Default value = None)
offset_mapper: (Default value = None)
gzip: (Default value = False)
**kwargs:
Returns:
"""
first = None
if from_mem:
first = from_mem[0]
else:
with open(from_ext, "rt") as infp:
first = infp.read(1)
if first == "{":
return JsonSerializer.load(
clazz,
from_ext=from_ext,
from_mem=from_mem,
offset_mapper=offset_mapper,
gzip=gzip,
**kwargs,
)
else:
return MsgPackSerializer.load(
clazz,
from_ext=from_ext,
from_mem=from_mem,
offset_mapper=offset_mapper,
gzip=gzip,
**kwargs,
)
DOCUMENT_SAVERS = {
"text/plain": PlainTextSerializer.save,
"text/plain+gzip": PlainTextSerializer.save_gzip,
"text": PlainTextSerializer.save,
"json": JsonSerializer.save,
"jsongz": JsonSerializer.save_gzip,
"bdocjs": JsonSerializer.save,
"pickle": PickleSerializer.save,
"bdocjsgz": JsonSerializer.save_gzip,
"text/bdocjs": JsonSerializer.save,
"text/bdocjs+gzip": JsonSerializer.save_gzip,
"yaml": YamlSerializer.save,
"bdocym": YamlSerializer.save,
"yamlgz": YamlSerializer.save_gzip,
"text/bdocym": YamlSerializer.save,
"text/bdocym+gzip+": YamlSerializer.save_gzip,
"msgpack": MsgPackSerializer.save,
"bdocmp": MsgPackSerializer.save,
"tweet-v1": TweetV1Serializer.save,
"text/bdocmp": MsgPackSerializer.save,
"application/msgpack": MsgPackSerializer.save,
"html-ann-viewer": HtmlAnnViewerSerializer.save,
}
DOCUMENT_LOADERS = {
"json": JsonSerializer.load,
"jsongz": JsonSerializer.load_gzip,
"bdocjs": JsonSerializer.load,
"bdocjsgz": JsonSerializer.load_gzip,
"text/bdocjs": JsonSerializer.load,
"text/bdocjs+gzip": JsonSerializer.load_gzip,
"yaml": YamlSerializer.load,
"yamlgz": YamlSerializer.load_gzip,
"bdocym": YamlSerializer.load,
"bdocymzg: ": YamlSerializer.load_gzip,
"text/bdocym": YamlSerializer.load,
"text/bdocym+gzip": YamlSerializer.load_gzip,
"msgpack": MsgPackSerializer.load,
"bdocmp": MsgPackSerializer.load,
"application/msgpack": MsgPackSerializer.load,
"text/bdocmp": MsgPackSerializer.load,
"jsonormsgpack": determine_loader,
"text/plain": PlainTextSerializer.load,
"text/plain+gzip": PlainTextSerializer.load_gzip,
"text": PlainTextSerializer.load,
"text/html": HtmlLoader.load,
"html": HtmlLoader.load,
"html-rendered": HtmlLoader.load_rendered,
"gatexml": GateXmlLoader.load,
"tweet-v1": TweetV1Serializer.load,
"pickle": PickleSerializer.load,
}
CHANGELOG_SAVERS = {
"json": JsonSerializer.save,
"text/bdocjs+gzip": JsonSerializer.save_gzip,
"text/bdocjs": JsonSerializer.save,
}
CHANGELOG_LOADERS = {
"json": JsonSerializer.load,
"text/bdocjs+gzip": JsonSerializer.load_gzip,
"text/bdocjs": JsonSerializer.load,
}
# map extensions to document types
EXTENSIONS = {
"bdocjs": "json",
"bdocym": "yaml",
"bdocym.gz": "text/bdocym+gzip",
"bdoc.gz": "text/bdocjs+gzip", # lets assume it is compressed json
"bdoc": "jsonormsgpack",
"bdocjs.gz": "text/bdocjs+gzip",
"bdocjson": "json",
"bdocmp": "msgpack",
"txt": "text/plain",
"txt.gz": "text/plain+gzip",
"html": "text/html",
"htm": "text/html",
"pickle": "pickle",
}
def get_handler(filespec, fmt, handlers, saveload, what):
"""
Args:
filespec:
fmt:
handlers:
saveload:
what:
Returns:
"""
msg = f"Could not determine how to {saveload} {what} for format {fmt} in module gatenlp.serialization.default"
if fmt:
handler = handlers.get(fmt)
if not handler:
raise Exception(msg)
return handler
else:
if not filespec: # in case of save_mem
raise Exception(msg)
if isinstance(filespec, os.PathLike):
wf = os.fspath(filespec)
elif isinstance(filespec, str):
wf = filespec
else:
raise Exception(msg)
name, ext = os.path.splitext(wf)
if ext == ".gz":
ext2 = os.path.splitext(name)[1]
if ext2:
ext2 = ext2[1:]
ext = ext2 + ext
elif ext:
ext = ext[1:]
fmt = EXTENSIONS.get(ext)
msg = f"Could not determine how to {saveload} {what} for format {fmt} and with " \
"extension {ext} in module gatenlp.serialization.default"
if not fmt:
raise Exception(msg)
handler = handlers.get(fmt)
if not handler:
raise Exception(msg)
return handler
def get_document_saver(filespec, fmt):
"""
Args:
filespec:
fmt:
Returns:
"""
return get_handler(filespec, fmt, DOCUMENT_SAVERS, "save", "document")
def get_document_loader(filespec, fmt):
"""
Args:
filespec:
fmt:
Returns:
"""
return get_handler(filespec, fmt, DOCUMENT_LOADERS, "load", "document")
def get_changelog_saver(filespec, fmt):
"""
Args:
filespec:
fmt:
Returns:
"""
return get_handler(filespec, fmt, CHANGELOG_SAVERS, "save", "changelog")
def get_changelog_loader(filespec, fmt):
"""
Args:
filespec:
fmt:
Returns:
"""
return get_handler(filespec, fmt, CHANGELOG_LOADERS, "load", "changelog")
```
#### File: python-gatenlp/tests/test_annotation.py
```python
from gatenlp import Document, Annotation, Span
def make_doc():
"""
Make a doc for testing
"""
doc = Document("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz")
set1 = doc.annset("set1")
# starting positions:
# 0: Ann2
# 3: Ann1, Ann10
# 12: Ann5
# 18: Ann3, Ann7, Ann9, Ann11
# 24: Ann6, Ann8
# 36: Ann12
# 39: Ann4
set1.add(3, 42, "Ann1")
set1.add(0, 6, "Ann2")
set1.add(18, 24, "Ann3")
set1.add(39, 45, "Ann4")
set1.add(12, 18, "Ann5")
set1.add(24, 30, "Ann6")
set1.add(18, 18, "Ann7")
set1.add(24, 24, "Ann8")
set1.add(18, 24, "Ann9")
set1.add(3, 9, "Ann10")
set1.add(18, 18, "Ann11")
set1.add(36, 42, "Ann12")
return doc
class TestAnnotationRels:
def test_annotation_rels01(self):
"""
Unit test method (make linter happy)
"""
doc = make_doc()
set1 = doc.annset("set1")
ann1 = set1.with_type("Ann1").for_idx(0)
ann2 = set1.with_type("Ann2").for_idx(0)
ann3 = set1.with_type("Ann3").for_idx(0)
ann4 = set1.with_type("Ann4").for_idx(0)
ann5 = set1.with_type("Ann5").for_idx(0)
ann6 = set1.with_type("Ann6").for_idx(0)
ann7 = set1.with_type("Ann7").for_idx(0)
ann8 = set1.with_type("Ann8").for_idx(0)
ann9 = set1.with_type("Ann9").for_idx(0)
ann10 = set1.with_type("Ann10").for_idx(0)
ann11 = set1.with_type("Ann11").for_idx(0)
ann12 = set1.with_type("Ann12").for_idx(0)
assert ann1.iscovering(ann1)
assert ann1.iscovering(ann1.start)
assert ann1.iscovering(ann1.end-1)
assert ann1.isoverlapping(ann2)
assert ann1.isrightoverlapping(ann1)
assert ann1.iscovering(ann3)
assert not ann1.iscovering(ann2)
assert ann1.isendingwith(ann12)
assert not ann1.isafter(ann2)
assert ann1.isstartingat(ann10)
assert ann1.iscovering(ann5)
assert ann1.iscovering(ann7)
assert ann1.iscovering(ann11)
assert ann1.iscovering(ann10)
assert ann1.iscovering(ann12)
assert ann1.isoverlapping(ann3)
assert ann1.isoverlapping(ann7)
assert ann1.isoverlapping(ann10)
assert ann1.isoverlapping(ann12)
assert ann1.isoverlapping(ann4)
assert not ann2.isbefore(ann1)
assert ann2.isbefore(ann3)
assert ann2.isleftoverlapping(ann1)
assert ann2.isoverlapping(ann1)
assert ann2.isoverlapping(ann10)
assert not ann2.isoverlapping(ann5)
assert not ann2.isbefore(ann10)
assert ann2.gap(ann3) == 12
assert ann3.iscoextensive(ann9)
assert ann3.iswithin(ann1)
assert ann3.isafter(ann2)
assert ann3.isafter(ann5)
assert ann3.isafter(ann5, immediately=True)
assert ann3.isstartingat(ann9)
assert ann3.isstartingat(ann7)
assert ann3.isstartingat(ann11)
assert ann3.isendingwith(ann9)
assert ann3.isendingwith(ann8)
assert ann3.gap(ann2) == 12
assert ann4.isafter(ann3)
assert not ann4.isafter(ann1)
assert ann4.isrightoverlapping(ann1)
assert ann4.isrightoverlapping(ann12)
assert ann5.isbefore(ann3)
assert ann5.isbefore(ann3, immediately=True)
assert ann5.iswithin(ann1)
assert ann5.isafter(ann2)
assert not ann5.isafter(ann1)
assert ann6.isafter(ann3)
assert ann6.isafter(ann3, immediately=True)
assert ann7.iscovering(ann7.start)
assert ann7.isafter(ann5)
assert ann7.isbefore(ann3)
assert ann7.isbefore(ann11)
assert ann7.isafter(ann11)
assert ann7.isstartingat(ann11)
assert ann7.isendingwith(ann11)
assert ann7.isoverlapping(ann7)
assert ann7.isoverlapping(ann11)
assert ann7.iswithin(ann9)
assert ann7.iswithin(ann11)
assert ann7.isleftoverlapping(ann11)
assert ann7.isrightoverlapping(ann11)
assert ann7.iscovering(ann11)
assert ann11.iscovering(ann7)
assert ann11.isleftoverlapping(ann7)
assert ann11.isrightoverlapping(ann7)
assert ann11.isstartingat(ann7)
assert ann11.isafter(ann5)
assert ann11.isbefore(ann3)
assert ann11.isbefore(ann9)
# TODO: gaps
def test_annotation_exps(self):
"""
Unit test method (make linter happy)
"""
import pytest
with pytest.raises(Exception) as ex:
Annotation(3, 2, "X")
assert str(ex.value).startswith("Cannot create annotation")
with pytest.raises(Exception):
Annotation(1, 2, "X", annid="x")
with pytest.raises(Exception):
Annotation(1, 2, "X", features=12)
with pytest.raises(Exception):
Annotation(1, 2, "X", annid="x")
assert Annotation(2, 3, "X").span == Span(2, 3)
assert not (Annotation(1, 2, "X") == "X")
ann1 = Annotation(1, 2, "X")
ann2 = Annotation(1, 2, "X")
assert ann1 == ann1
assert ann1 != ann2
assert ann1.same(ann2)
assert ann1.equal(ann2)
ann3 = Annotation(1, 2, "X", annid=22)
assert ann3.equal(ann1)
assert not ann3.same(ann1)
with pytest.raises(Exception):
Annotation(1, 2, "X") < 33
assert Annotation(1, 2, "X") < Annotation(2, 3, "X")
assert not Annotation(1, 2, "X") < Annotation(0, 3, "X")
assert Annotation(1, 2, "X", annid=0) < Annotation(1, 2, "X", annid=1)
assert Annotation(1, 2, "X").length == 1
def test_annotation_misc01(self):
"""
Unit test method (make linter happy)
"""
ann1 = Annotation(1, 2, "x", dict(a=1), annid=3)
dict1 = ann1.to_dict()
ann2 = Annotation.from_dict(dict1)
assert ann1.equal(ann2)
assert ann2.features == {"a": 1}
assert str(ann2) == "Annotation(1,2,x,features=Features({'a': 1}),id=3)"
```
#### File: python-gatenlp/tests/test_corpora.py
```python
import sys
import os
import pytest
from gatenlp.document import Document
from gatenlp.corpora import ListCorpus, ShuffledCorpus, EveryNthCorpus
TEXTS = [
"00 This is the first document.",
"01 And this is the second document.",
"02 Here is another one, document three.",
"03 Yet another, this one is number four.",
"04 Here is document five.",
"05 This is the sixth document.",
"06 Finally, document number seven.",
]
class TestCorpora1:
def test_listcorpus(self):
"""
Unit test method (make linter happy)
"""
docs = [Document(t) for t in TEXTS]
lc1 = ListCorpus(docs)
assert len(lc1) == len(docs)
for idx, doc in enumerate(lc1):
# assert idx == doc.features["__idx"]
assert idx == doc.features[lc1.idxfeatname()]
assert doc.text == TEXTS[idx]
for doc in lc1:
doc.features["test1"] = "updated"
lc1.store(doc)
assert lc1[0].features["test1"] == "updated"
# wrap the list corpus into a shuffled corpus
sc1 = ShuffledCorpus(lc1, seed=42)
orig = [
"00", "01", "02", "03", "04", "05", "06"
]
shuffled = [
"01", "03", "04", "02", "06", "00", "05"
]
for idx, doc in enumerate(sc1):
assert doc.text[:2] == shuffled[idx]
for doc in sc1:
sc1.store(doc)
for idx, doc in enumerate(sc1):
assert doc.text[:2] == shuffled[idx]
for idx, doc in enumerate(lc1):
assert doc.text[:2] == orig[idx]
lc2 = ListCorpus.empty(10)
assert len(lc2) == 10
for doc in lc2:
assert doc == None
```
#### File: python-gatenlp/tests/test_gazetteer_string.py
```python
from gatenlp.document import Document
import re
from gatenlp.processing.gazetteer import StringGazetteer
DOC1_TEXT = "A simple document which has a number of words in it which we will use to test matching"
DOC2_TEXT = "A simple document which has a number of words in it which we will use to test matching, simple document"
def makedoc(text=DOC1_TEXT):
"""
Create and return document for testing.
"""
doc1 = Document(text)
set1 = doc1.annset()
whitespaces = [
m for m in re.finditer(r"[\s,.!?]+|^[\s,.!?]*|[\s,.!?]*$", text)
]
nrtokens = len(whitespaces) - 1
for k in range(nrtokens):
fromoff = whitespaces[k].end()
tooff = whitespaces[k + 1].start()
set1.add(fromoff, tooff, "Token")
return doc1
# no reference to list features
GAZLIST1 = [
("simple document", {"match": 1}),
("has", {"match": 2}),
("has a", {"match": 3}),
("has a number", {"match": 4}),
("has a number", {"match": 5}),
("completely different", {"match": 6})
]
# reference to list features of list 2
GAZLIST2 = [
("simple document", {"match": 11}),
("has", {"match": 22}),
("has a", {"match": 33}),
("has a number", {"match": 44}),
("has a number", {"match": 55}),
]
# three lists, so we have three elements in the list with features for each list
LISTFEATURES1 = {"list": 0, "feat1": "somevalue1"}
class TestStringGazetteer1:
def test_create(self):
"""
Unit test method (make linter happy)
"""
gaz = StringGazetteer(source=GAZLIST1, source_fmt="gazlist")
# print("\n!!!!!!!!!!! gaz=", gaz._root.format_node(recursive=False), "\n")
assert gaz._root is not None
assert "simple document" in gaz
assert "has" in gaz
node1 = gaz._get_node("simple")
# print("\n!!!!!!!!!!! node1=", node1.format_node(), "\n")
assert not node1.is_match()
assert node1.value is None
assert node1.listidxs is None
def test_match1(self):
"""
Unit test method (make linter happy)
"""
gaz = StringGazetteer(source=GAZLIST1, source_fmt="gazlist")
doc = makedoc()
# toks = list(doc.annset())
ret = gaz.match(doc.text, start=2)
assert len(ret) == 2 # list of matches, length of longest match
matches, longest = ret
assert isinstance(matches, list)
assert isinstance(longest, int)
assert longest == 15
assert len(matches) == 1
match = matches[0]
assert match.start == 2
assert match.end == 17
assert match.match == "simple document"
assert isinstance(match.data, list)
assert isinstance(match.listidxs, list)
assert len(match.data) == 1
assert len(match.listidxs) == 1
assert match.data[0] == {"match": 1}
assert match.listidxs[0] == 0
ret = gaz.find(doc.text, start=17)
matches, longest, where = ret
assert len(matches) == 3
assert longest == 12
assert where == 24
assert matches[0].match == "has"
assert matches[1].match == "has a"
assert matches[2].match == "has a number"
# same but only find longest
ret = gaz.find(doc.text, start=17, longest_only=True)
matches, longest, where = ret
assert len(matches) == 1
assert longest == 12
assert where == 24
assert matches[0].match == "has a number"
matches = list(gaz.find_all(doc.text))
assert len(matches) == 4
assert matches[0].match == "simple document"
assert matches[1].match == "has"
assert matches[2].match == "has a"
assert matches[3].match == "has a number"
def test_match2(self):
"""
Unit test method (make linter happy)
"""
gaz = StringGazetteer(source=GAZLIST1, source_fmt="gazlist")
doc = makedoc()
doc = gaz(doc)
lookups = list(doc.annset().with_type("Lookup"))
assert len(lookups) == 5
assert lookups[0].start == 2
assert lookups[0].end == 17
assert doc[lookups[0]] == "simple document"
assert lookups[1].start == 24
assert lookups[1].end == 27
assert doc[lookups[1]] == "has"
assert lookups[1].features.get("match") == 2
assert lookups[2].start == 24
assert lookups[2].end == 29
assert doc[lookups[2]] == "has a"
assert lookups[2].features.get("match") == 3
assert lookups[3].start == 24
assert lookups[3].end == 36
assert doc[lookups[3]] == "has a number"
assert lookups[3].features.get("match") == 4
assert lookups[4].start == 24
assert lookups[4].end == 36
assert doc[lookups[4]] == "has a number"
assert lookups[4].features.get("match") == 5
```
#### File: python-gatenlp/tests/test_normalizer.py
```python
from gatenlp.document import Document
from gatenlp.processing.normalizer import TextNormalizer
TEXT_NFC = "Äffin abcABC"
TEXT_NFKC = "Äffin abcABC"
TEXT_NFD = "Äffin abcABC"
TEXT_NFKD = "Äffin abcABC"
class TestTextNormalizer1:
def test_create(self):
"""
Unit test method (make linter happy)
"""
doc1 = Document(TEXT_NFC, features=dict(a=1))
tn_nfkc = TextNormalizer(form="NFKC")
doc2 = tn_nfkc(doc1)
assert doc2.text == TEXT_NFKC
assert doc2.features == dict(a=1)
tn_nfd = TextNormalizer(form="NFD")
doc3 = tn_nfd(doc1)
assert doc3.text == TEXT_NFD
tn_nfkd = TextNormalizer(form="NFKD")
doc4 = tn_nfkd(doc1)
assert doc4.text == TEXT_NFKD
tn_nfc = TextNormalizer(form="NFC")
doc5 = tn_nfc(doc3)
assert doc5.text == TEXT_NFC
```
#### File: python-gatenlp/tests/test_offsetmapper.py
```python
import sys
class TestOffsetMapper01:
def test_offsetmapper01m01(self):
"""
Unit test method (make linter happy)
"""
from gatenlp.document import OffsetMapper, Document
c_poo = "\U0001F4A9"
c_bridge = "\U0001f309"
doc1 = Document("01" + c_poo + "3" + c_bridge + c_bridge + c_bridge + "7")
assert len(doc1) == 8
assert doc1[2] == c_poo
om1 = OffsetMapper(doc1)
assert len(om1.java2python) == 13
p2j = [0, 1, 2, 4, 5, 7, 9, 11, 12]
# print("p2j={}".format(p2j), file=sys.stderr)
# print("om1.p2j={}".format(om1.python2java), file=sys.stderr)
assert om1.python2java == p2j
j2p = [0, 1, 2, 2, 3, 4, 4, 5, 5, 6, 6, 7, 8]
assert om1.java2python == j2p
for i in om1.java2python:
joff = om1.convert_to_java(i)
poff = om1.convert_to_python(joff)
assert poff == i
def test_offsetmapper01m02(self):
"""
Unit test method (make linter happy)
"""
# test identical offsets
from gatenlp.document import OffsetMapper, Document
doc1 = Document("Just some sample document")
om1 = OffsetMapper(doc1)
for idx in range(len(doc1)):
assert idx == om1.convert_to_java(idx)
assert idx == om1.convert_to_python(idx)
```
#### File: python-gatenlp/tests/test_pam.py
```python
from gatenlp import Document, Annotation, Span
from gatenlp.pam.pampac import Context, Location, Result
from gatenlp.pam.pampac import (
Ann,
AnnAt,
Call,
Find,
Text,
N,
Seq,
Or,
Success,
Failure,
Rule,
Pampac,
Function
)
# Disable:
# too-many-statements (R0915)
# import-outside-toplevel (C0415)
# pylint: disable=R0915, C0415
class TestPampac01:
def test01(self):
"""
Unit test method (make linter happy)
"""
doc = Document("Some test document")
doc.annset().add(0, 2, "Ann")
doc.annset().add(0, 1, "Ann")
doc.annset().add(1, 2, "Ann")
doc.annset().add(1, 2, "Token")
doc.annset().add(2, 3, "Ann")
annlist = list(doc.annset())
ctx = Context(doc, annlist)
parser = Ann(name="a1")
ret = parser.parse(Location(), ctx)
assert isinstance(ret, Success)
assert len(ret) == 1
loc = ret[0].location
assert loc.text_location == 2
assert loc.ann_location == 1
assert len(ret[0].matches) == 1
# do this with the match method
ret = parser(doc, annlist)
assert isinstance(ret, Success)
assert len(ret) == 1
loc = ret[0].location
assert loc.text_location == 2
assert loc.ann_location == 1
assert len(ret[0].matches) == 1
# this does NOT first advance the annotation index so the annotation start index
# is at least 2. So it matches the annotation at index 1 which ends at 1 which is
# BEFORE the text index we have now.
assert loc == Location(2, 1)
ret = Ann(name="tmp1", useoffset=False).parse(loc, ctx)
assert len(ret) == 1
loc = ret[0].location
assert loc == Location(1, 2)
assert len(ret[0].matches) == 1
# by default we do advance, so we match the last annotation and end up at text
# position 4 looking for annotation index 5
loc = Location(2, 1)
ret = Ann(name="tmp1", useoffset=True).parse(loc, ctx)
assert len(ret) == 1
loc = ret[0].location
assert loc == Location(3, 5)
assert len(ret[0].matches) == 1
# Try to fail
parser = Ann("Token")
ret = parser(doc, annlist)
assert isinstance(ret, Failure)
# Same without a name: should generate the same locations, but no matches
parser = Ann()
ret = parser.parse(Location(), ctx)
assert len(ret) == 1
loc = ret[0].location
assert loc.text_location == 2
assert loc.ann_location == 1
assert len(ret[0].matches) == 0
ret = Ann().parse(loc, ctx)
assert len(ret) == 1
loc = ret[0].location
assert loc.text_location == 3
assert loc.ann_location == 5
assert len(ret[0].matches) == 0
parser = AnnAt(name="a2")
ret = parser.parse(Location(), ctx)
assert len(ret) == 1
assert len(ret[0].matches) == 1
parser = AnnAt(matchtype="all", name="a3")
ret = parser.parse(Location(), ctx)
assert len(ret) == 2
assert len(ret[0].matches) == 1
assert len(ret[1].matches) == 1
# Try Rule
parser = Ann(name="a1")
tmp = dict(i=0)
def rhs1(_succ, **_kwargs):
tmp["i"] = 1
rule = Call(parser, rhs1)
ret = rule.parse(Location(), ctx)
assert len(ret) == 1
loc = ret[0].location
assert loc.text_location == 2
assert loc.ann_location == 1
assert len(ret[0].matches) == 1
assert tmp["i"] == 1
# use the call method instead
def rhs2(_succ, **_kwargs):
tmp["i"] = 2
parser = Ann(name="a1").call(rhs2)
ret = parser.parse(Location(), ctx)
print(ret)
assert tmp["i"] == 2
parser = Find(AnnAt(type="Token", name="at"), by_anns=False)
ret = parser.parse(Location(), ctx)
print(ret)
parser = Find(AnnAt(type="Token", name="at"), by_anns=True)
ret = parser.parse(Location(), ctx)
print(ret)
parser = Find(Text("document", name="t1"), by_anns=False)
ret = parser.parse(Location(), ctx)
print(ret)
parser = Seq(Ann("Ann", name="a1"), Ann("Ann", name="a2"), matchtype="longest")
ret = parser.parse(Location(), ctx)
print(ret)
parser = N(AnnAt("Ann", name="a1"), 1, 5, matchtype="first")
ret = parser.parse(Location(), ctx)
print(ret)
parser = Or(Ann("X", name="x1"), Ann("Ann", name="a1"))
ret = parser.parse(Location(), ctx)
print(ret)
parser = Ann("X", name="x1") | Ann("Y", name="y1") | Ann("Ann", name="a1")
ret = parser.parse(Location(), ctx)
print(ret)
parser = Ann("Ann", name="a1") >> Ann("Ann", name="a2")
ret = parser.parse(Location(), ctx)
print(ret)
parser = Ann("Ann", name="a1") * 2
ret = parser.parse(Location(), ctx)
print(ret)
parser = Ann("Ann", name="a1") * (1, 3)
ret = parser.parse(Location(), ctx)
print(ret)
def test02(self):
"""
Unit test method (make linter happy)
"""
# Test multiple result matches
doc = Document("Some test document")
doc.annset().add(0, 2, "Ann") # 0
doc.annset().add(0, 2, "Ann") # 1
doc.annset().add(0, 2, "Token") # 2
doc.annset().add(2, 4, "Ann") # 3
doc.annset().add(2, 4, "Ann") # 4
annlist = list(doc.annset())
# match all annotations at the document start
ret = AnnAt(matchtype="all").match(doc, annlist)
assert ret.issuccess()
assert len(ret) == 3
# match sequence Token/Ann, take first at each point
# this should match annotation ids 2 and 3
ret = Seq(AnnAt("Token", name="1"), AnnAt("Ann", name="2")).match(doc, annlist)
assert ret.issuccess()
assert len(ret) == 1
assert len(ret[0].matches) == 2
assert ret[0].matches[0]["ann"].id == 2
assert ret[0].matches[1]["ann"].id == 3
# match sequence Ann/Ann, take first at each point
ret = Seq(AnnAt("Ann", name="1"), AnnAt("Ann", name="2")).match(doc, annlist)
assert ret.issuccess()
assert len(ret) == 1
assert len(ret[0].matches) == 2
assert ret[0].matches[0]["ann"].id == 0
assert ret[0].matches[1]["ann"].id == 3
# match sequence Ann/Ann, take first at each point, set useoffset=False so we do not skip to the
# end offset of the previous before matching the next
# In that case the next ann we match is the second one at offset 0
ret = Seq(
AnnAt("Ann", name="1"), AnnAt("Ann", name="2", useoffset=False)
).match(doc, annlist)
assert ret.issuccess()
assert len(ret) == 1
assert len(ret[0].matches) == 2
assert ret[0].matches[0]["ann"].id == 0
assert ret[0].matches[1]["ann"].id == 1
# Make sure we get the correct set of annotations at position 0 and 2
ret = AnnAt("Ann", name="a", matchtype="all").match(doc, annlist)
assert ret.issuccess()
assert len(ret) == 2
assert ret[0].matches[0]["ann"].id == 0
assert ret[1].matches[0]["ann"].id == 1
# ret.pprint()
ret = AnnAt("Ann", name="a", matchtype="all").match(
doc, annlist, location=Location(2, 2)
)
assert ret.issuccess()
assert len(ret) == 2
assert ret[0].matches[0]["ann"].id == 3
assert ret[1].matches[0]["ann"].id == 4
# ret.pprint()
# Match sequence of two anns in order, take all results
ret = Seq(
AnnAt("Ann", name="1", matchtype="all"),
AnnAt("Ann", name="2", matchtype="all"),
select="all",
matchtype="all",
).match(doc, annlist)
assert ret.issuccess()
assert len(ret) == 4
assert len(ret[0].matches) == 2
assert len(ret[1].matches) == 2
assert len(ret[2].matches) == 2
assert len(ret[3].matches) == 2
assert ret[0].matches[0]["ann"].id == 0
assert ret[0].matches[1]["ann"].id == 3
assert ret[1].matches[0]["ann"].id == 0
assert ret[1].matches[1]["ann"].id == 4
assert ret[2].matches[0]["ann"].id == 1
assert ret[2].matches[1]["ann"].id == 3
assert ret[3].matches[0]["ann"].id == 1
assert ret[3].matches[1]["ann"].id == 4
def test03(self):
"""
Unit test method (make linter happy)
"""
# Test single result matches with N, with and without the until clause
doc = Document("Some test document")
doc.annset().add(0, 2, "Ann") # 0
doc.annset().add(0, 2, "Ann") # 1
doc.annset().add(0, 2, "Token") # 2
doc.annset().add(2, 4, "Ann") # 3
doc.annset().add(2, 4, "Ann") # 4
doc.annset().add(4, 6, "Ann") # 5
doc.annset().add(4, 6, "Ann") # 6
doc.annset().add(4, 6, "Person") # 7
doc.annset().add(6, 8, "Ann") # 8
doc.annset().add(6, 8, "Ann") # 9
doc.annset().add(8, 10, "XXXX") # 10
annlist = list(doc.annset())
# single Ann, single result from N
# this should return annotation ids 0, 3, 5
ret = N(
AnnAt("Ann", name="a1", matchtype="first"),
min=2,
max=3,
select="first",
matchtype="first",
).match(doc, annlist)
assert ret.issuccess()
assert len(ret) == 1
assert len(ret[0].matches) == 3
assert ret[0].matches[0]["ann"].id == 0
assert ret[0].matches[1]["ann"].id == 3
assert ret[0].matches[2]["ann"].id == 5
# Same as before, but with a name, so we should get one additional matches for the whole sequence
# with a span
ret = N(
AnnAt("Ann", name="a1", matchtype="first"),
min=2,
max=3,
select="first",
matchtype="first",
name="n1"
).match(doc, annlist)
assert ret.issuccess()
assert len(ret) == 1
assert len(ret[0].matches) == 4
assert ret[0].matches[0]["ann"].id == 0
assert ret[0].matches[1]["ann"].id == 3
assert ret[0].matches[2]["ann"].id == 5
assert ret[0].matches[3]["span"] == Span(0, 6)
# single Ann, single result from N
# this should return annotation ids 0, 3, 5, 8
ret = N(
AnnAt("Ann", name="a1", matchtype="first"),
min=2,
max=99,
select="first",
matchtype="first",
).match(doc, annlist)
assert ret.issuccess()
assert len(ret) == 1
assert len(ret[0].matches) == 4
assert ret[0].matches[0]["ann"].id == 0
assert ret[0].matches[1]["ann"].id == 3
assert ret[0].matches[2]["ann"].id == 5
assert ret[0].matches[3]["ann"].id == 8
# single Ann, single result from N, with early stopping at Person
# this should return annotation ids 0, 3, 7
ret = N(
AnnAt("Ann", name="a1", matchtype="first"),
until=AnnAt("Person", name="p"),
min=2,
max=99,
select="first",
matchtype="first",
).match(doc, annlist)
assert ret.issuccess()
assert len(ret) == 1
assert len(ret[0].matches) == 3
assert ret[0].matches[0]["ann"].id == 0
assert ret[0].matches[1]["ann"].id == 3
assert ret[0].matches[2]["ann"].id == 7
# Try a match with min=0 and max=99 that does not succeed
# single Ann, single result from N
# this should return an empty list for matches
ret = N(
AnnAt("NotThere", name="a1", matchtype="first"),
min=0,
max=99,
select="first",
matchtype="first",
).match(doc, annlist)
assert ret.issuccess()
assert len(ret) == 1
assert len(ret[0].matches) == 0
# Try a match with min=0 and max=99 that does not succeed
# single Ann, single result from N
# this should return an empty list for matches
ret = N(
AnnAt("Ann", name="a1", matchtype="first"),
min=0,
max=99,
select="first",
matchtype="first",
).match(doc, annlist)
assert ret.issuccess()
assert len(ret) == 1
assert len(ret[0].matches) == 4
assert ret[0].matches[0]["ann"].id == 0
assert ret[0].matches[1]["ann"].id == 3
assert ret[0].matches[2]["ann"].id == 5
assert ret[0].matches[3]["ann"].id == 8
def test04(self):
"""
Unit test method (make linter happy)
"""
# Test multiple result matches with N, with and without the until clause
doc = Document("Some test document")
doc.annset().add(0, 2, "Ann") # 0
doc.annset().add(0, 2, "Ann") # 1
doc.annset().add(0, 2, "Token") # 2
doc.annset().add(2, 4, "Ann") # 3
doc.annset().add(2, 4, "Ann") # 4
doc.annset().add(4, 6, "Ann") # 5
doc.annset().add(4, 6, "Ann") # 6
doc.annset().add(4, 6, "Person") # 7
doc.annset().add(6, 8, "Ann") # 8
doc.annset().add(6, 8, "Ann") # 9
doc.annset().add(8, 10, "XXXX") # 10
annlist = list(doc.annset())
# multiple Anns, single result from N: first
# This should find 0,3,5
ret = N(
AnnAt("Ann", name="a1", matchtype="all"),
min=2,
max=3,
select="all",
matchtype="first",
).match(doc, annlist)
assert ret.issuccess()
assert len(ret) == 1
assert len(ret[0].matches) == 3
assert ret[0].matches[0]["ann"].id == 0
assert ret[0].matches[1]["ann"].id == 3
assert ret[0].matches[2]["ann"].id == 5
# multiple Anns, all results from N
# should return 0,1
ret = N(
AnnAt("Ann", name="a1", matchtype="all"),
min=1,
max=1,
select="all",
matchtype="all",
).match(doc, annlist)
assert ret.issuccess()
assert len(ret) == 2
assert len(ret[0].matches) == 1
assert len(ret[1].matches) == 1
assert ret[0].matches[0]["ann"].id == 0
assert ret[1].matches[0]["ann"].id == 1
# multiple Anns, all results from N
ret = N(
AnnAt("Ann", name="a1", matchtype="all"),
min=1,
max=2,
select="all",
matchtype="all",
).match(doc, annlist)
assert ret.issuccess()
assert len(ret) == 4
assert len(ret[0].matches) == 2
assert len(ret[1].matches) == 2
assert len(ret[2].matches) == 2
assert len(ret[3].matches) == 2
assert ret[0].matches[0]["ann"].id == 0
assert ret[0].matches[1]["ann"].id == 3
assert ret[1].matches[0]["ann"].id == 0
assert ret[1].matches[1]["ann"].id == 4
assert ret[2].matches[0]["ann"].id == 1
assert ret[2].matches[1]["ann"].id == 3
assert ret[3].matches[0]["ann"].id == 1
assert ret[3].matches[1]["ann"].id == 4
# multiple Anns, all results from N
# just three for the first ann: 0,1,2
ret = N(
AnnAt(name="a1", matchtype="all"),
min=1,
max=1,
select="all",
matchtype="all",
).match(doc, annlist)
assert ret.issuccess()
assert len(ret) == 3
assert len(ret[0].matches) == 1
assert len(ret[1].matches) == 1
assert len(ret[2].matches) == 1
assert ret[0].matches[0]["ann"].id == 0
assert ret[1].matches[0]["ann"].id == 1
assert ret[2].matches[0]["ann"].id == 2
# This should just find the Token as the first and only match!
ret = N(AnnAt("Ann", name="a1", matchtype="all"),
until=AnnAt("Token", name="t", matchtype="first"),
min=0,
max=3,
select="all",
matchtype="all"
).match(doc, annlist)
assert ret.issuccess()
assert len(ret) == 1
assert len(ret[0].matches) == 1
assert ret[0].matches[0]["ann"].id == 2
# This should terminate with Person and find all paths that can lead up to PErson:
# 0,3 0,4 1,3 1,4
ret = N(AnnAt("Ann", name="a1", matchtype="all"),
until=AnnAt("Person", name="t", matchtype="first"),
min=1,
max=3,
select="all",
matchtype="all"
).match(doc, annlist)
assert ret.issuccess()
assert len(ret) == 4
assert len(ret[0].matches) == 3
assert len(ret[1].matches) == 3
assert len(ret[2].matches) == 3
assert len(ret[3].matches) == 3
assert ret[0].matches[0]["ann"].id == 0
assert ret[0].matches[1]["ann"].id == 3
assert ret[0].matches[2]["ann"].id == 7
assert ret[1].matches[0]["ann"].id == 0
assert ret[1].matches[1]["ann"].id == 4
assert ret[1].matches[2]["ann"].id == 7
assert ret[2].matches[0]["ann"].id == 1
assert ret[2].matches[1]["ann"].id == 3
assert ret[2].matches[2]["ann"].id == 7
assert ret[3].matches[0]["ann"].id == 1
assert ret[3].matches[1]["ann"].id == 4
assert ret[3].matches[2]["ann"].id == 7
def test05(self):
"""
Unit test method (make linter happy)
"""
# Rules and Pampac
doc = Document("Some test document")
doc.annset().add(0, 2, "Ann1") # 0
doc.annset().add(2, 4, "Ann2") # 1
doc.annset().add(3, 5, "Ann2") # 2
doc.annset().add(4, 5, "Ann2") # 3
doc.annset().add(8, 10, "Ann2") # 4
annset = doc.annset()
orig_len = len(annset)
annlist = list(doc.annset())
# first make sure the pattern works as we want
ctx = Context(doc=doc, anns=annlist)
pat1 = AnnAt("Ann2", name="a1") >> AnnAt("Ann2", name="a2")
loc = ctx.inc_location(Location(0, 0), by_offset=1)
pat1.parse(location=loc, context=ctx)
def r1_action(succ, _context=None, **_kwargs):
span = succ[0].span
ann = succ.context.outset.add(span.start, span.end, "NEW")
return ann
r1 = Rule(
AnnAt("Ann2") >> AnnAt("Ann2"),
r1_action
)
pampac = Pampac(r1)
pampac.set_skip = "longest"
pampac.set_select = "first"
outset = doc.annset()
ret = pampac.run(doc, annlist, outset=outset, debug=True)
assert len(ret) == 1
assert len(ret[0]) == 2
idx, retlist = ret[0]
assert idx == 1
assert len(retlist) == 1
a = retlist[0]
assert isinstance(a, Annotation)
assert a.start == 2
assert a.end == 5
assert a.type == "NEW"
assert len(outset) == orig_len + 1
class TestPampacRemoveAnn:
def test01(self):
"""
Unit test method (make linter happy)
"""
from gatenlp.pam.pampac.actions import RemoveAnn
def make_doc():
doc = Document("Some test document")
doc.annset().add(0, 2, "Ann") # 0
doc.annset().add(0, 2, "Ann") # 1
doc.annset().add(0, 2, "Token") # 2
doc.annset().add(2, 4, "Ann") # 3
doc.annset().add(2, 4, "Ann") # 4
doc.annset().add(4, 6, "Ann") # 5
doc.annset().add(4, 6, "Ann") # 6
doc.annset().add(4, 6, "Person") # 7
doc.annset().add(6, 8, "Ann") # 8
doc.annset().add(6, 8, "Ann") # 9
doc.annset().add(8, 10, "XXXX") # 10
return doc
doc = make_doc()
# match first match of Ann an remove
assert len(doc.annset()) == 11
Pampac(
Rule(
AnnAt("Ann", name="match", matchtype="all"),
RemoveAnn("match", doc.annset())
),
).run(doc, doc.annset().with_type("Ann", "XXX", "Person", "Token"))
assert len(doc.annset()) == 7
class TestPampacMisc:
def test01(self):
"""
Unit test method (make linter happy)
"""
loc1 = Location(0, 0)
loc2 = Location(0, 1)
loc3 = Location(1, 0)
loc4 = Location(0, 0)
assert loc1 != loc2
assert loc1 != loc3
assert loc1 == loc4
assert loc1 != "asa"
assert str(loc1) == "Location(0,0)"
res1 = Result(location=Location(10, 10), span=Span(4, 10))
assert list(res1.anns4matches()) == []
assert res1.matches4name("xxx") == []
res2 = Result(matches={"span": Span(3, 4), "name": "xx"}, location=Location(12, 12), span=Span(4, 12))
assert list(res2.anns4matches()) == []
assert res2.matches4name("xx") == [{"span": Span(3, 4), "name": "xx"}]
res3 = Result(matches=[{"span": Span(3, 4), "name": "xx"}, {"span": Span(3, 4), "name": "yy"}, ],
location=Location(10, 10), span=Span(4, 10))
assert list(res3.anns4matches()) == []
assert res3.matches4name("xx") == [{"span": Span(3, 4), "name": "xx"}]
assert str(res1) == "Result(loc=Location(10,10),span=Span(4,10),nmatches=0)"
assert res1.__repr__() == "Result(loc=Location(10,10),span=Span(4,10),matches=[])"
fail1 = Failure()
assert not fail1.issuccess()
assert fail1.describe() == """None at ?/?: Parser Error"""
fail2 = Failure(message="Some problem", parser="Parser1", location=loc1)
assert fail2.describe() == """Parser1 at 0/0: Some problem"""
fail3 = Failure(message="Another problem", parser="Parser2", location=loc1, causes=[fail2, fail1])
assert fail3.describe() == """Parser2 at 0/0: Another problem
Caused by:
Parser1 at 0/0: Some problem
None at ?/?: Parser Error"""
#
doc1 = Document("somedoc")
set1 = doc1.annset()
ctx1 = Context(doc1, set1)
assert ctx1.annset.size == 0
assert ctx1.get_ann(loc1) is None
succ1 = Success(res1, ctx1)
assert succ1.issuccess()
assert succ1.select_result([res1, res2]) == res1
assert succ1.select_result([res1, res2], matchtype="all") == [res1, res2]
assert succ1.select_result([res1, res2], matchtype="longest") == res2
assert succ1.select_result([res1, res2], matchtype="shortest") == res1
assert succ1.result(matchtype="first") == res1
for r in succ1:
assert isinstance(r, Result)
assert succ1[0] == res1
def test02(self):
"""
Unit test method (make linter happy)
"""
def fun1(location, context):
return location, context
parser1 = Function(fun1)
assert parser1.parse(1, 2) == (1, 2)
```
#### File: python-gatenlp/tests/test_sortedintvls.py
```python
import logging
from gatenlp.impl import SortedIntvls
logging.basicConfig()
logger = logging.getLogger("gatenlp")
logger.setLevel(logging.INFO)
# Simple simulation of the interaction: instead of calling interact() manually call
# the methods from the created wrapper.
class TestSortedIntvls01:
def test_sortedintvls01(self):
"""
Unit test method (make linter happy)
"""
si1 = SortedIntvls()
intvls = [
(0, 3, 0, "int1"),
(4, 5, 1, "int2"),
(9, 10, 2, "int9"),
(5, 9, 3, "int6"),
(4, 10, 4, "int4"),
(8, 10, 5, "int8"),
(5, 6, 6, "int5"),
(0, 20, 7, "int0"),
(8, 9, 9, "int7"),
(4, 5, 9, "int3"),
(4, 5, 9, "int33"),
]
si1.update(intvls)
logger.info("!! si1 is {}".format(si1))
ret1 = list(si1.firsts())
logger.info("firsts={}".format(ret1))
assert len(ret1) == 2
assert (0, 3, 0, "int1") in ret1
assert (0, 20, 7, "int0") in ret1
ret2 = list(si1.lasts())
logger.info("lasts={}".format(ret2))
assert len(ret2) == 1
assert (9, 10, 2, "int9") in ret2
ret3 = list(si1.starting_at(4))
logger.info("starting at 4={}".format(ret3))
assert len(ret3) == 4
assert (4, 5, 1, "int2") in ret3
assert (4, 10, 4, "int4") in ret3
assert (4, 5, 9, "int3") in ret3
assert (4, 5, 9, "int33") in ret3
ret4 = list(si1.within(3, 8))
logger.info("contained in 3,8={}".format(ret4))
assert len(ret4) == 4
assert (4, 5, 1, "int2") in ret4
assert (4, 5, 9, "int3") in ret4
assert (4, 5, 9, "int33") in ret4
assert (5, 6, 6, "int5") in ret4
ret5 = list(si1.within(0, 20))
logger.info("contained in 0,20={}".format(ret5))
assert len(ret5) == len(intvls)
ret6 = list(si1.covering(3, 4))
logger.info("containing 3,4={}".format(ret6))
assert len(ret6) == 1
assert (0, 20, 7, "int0") in ret6
ret7 = list(si1.covering(8, 9))
assert len(ret7) == 5
assert (0, 20, 7, "int0") in ret7
assert (4, 10, 4, "int4") in ret7
assert (5, 9, 3, "int6") in ret7
assert (8, 10, 5, "int8") in ret7
assert (8, 9, 9, "int7") in ret7
```
#### File: python-gatenlp/tests/test_spacy.py
```python
import os
from gatenlp import logger, Document
class TestSpacy01:
def test_spacy01a(self):
"""
Unit test method (make linter happy)
"""
try:
import spacy
from gatenlp.lib_spacy import spacy2gatenlp, AnnSpacy
nlp = spacy.load("en_core_web_sm")
except ImportError:
logger.warning("Module spacy or model en_core_web_sm not installed, skipping spacy test")
return
txt = "<NAME> was born in Hawaii. He was elected president in 2008."
sdoc = nlp(txt)
gdoc = spacy2gatenlp(sdoc)
anns = gdoc.annset()
sents = anns.with_type("Sentence")
assert len(sents) == 2
tokens = anns.with_type("Token")
assert len(tokens) == 14
annspacy = AnnSpacy(pipeline=nlp)
doc = Document(txt)
doc = annspacy(doc)
anns = doc.annset()
sents = anns.with_type("Sentence")
assert len(sents) == 2
tokens = anns.with_type("Token")
assert len(tokens) == 14
def test_spacy02a(self):
"""
Unit test method (make linter happy)
"""
try:
import spacy
from gatenlp.lib_spacy import spacy2gatenlp, apply_spacy
nlp = spacy.load("en_core_web_sm")
except ImportError:
logger.warning("Module spacy or model en_core_web_sm not installed, skipping spacy test")
return
txt = "<NAME> was born in Hawaii. He was elected president in 2008. "
doc = Document(txt)
annset = doc.annset()
annset.add(0, 32, "Sentence")
annset.add(33, 67, "Sentence")
anns = doc.annset()
containing_set1 = anns.with_type("Sentence")
assert len(containing_set1) == 2
tokens = anns.with_type("Token")
assert len(tokens) == 0
gdoc = apply_spacy(nlp, doc, setname="spacy1", containing_anns=containing_set1)
annsOut = gdoc.annset("spacy1")
sents = annsOut.with_type("Sentence")
assert len(sents) == 2
tokens = annsOut.with_type("Token")
assert len(tokens) == 14
containing_list1 = list(containing_set1)
gdoc = apply_spacy(nlp, doc, setname="spacy2", containing_anns=containing_list1)
annsOut = gdoc.annset("spacy2")
sents = annsOut.with_type("Sentence")
assert len(sents) == 2
tokens = annsOut.with_type("Token")
assert len(tokens) == 14
containing_list2 = containing_list1[:1]
assert len(containing_list2) == 1
gdoc = apply_spacy(nlp, doc, setname="spacy3", containing_anns=containing_list2)
annsOut = gdoc.annset("spacy3")
sents = annsOut.with_type("Sentence")
assert len(sents) == 1
tokens = annsOut.with_type("Token")
assert len(tokens) == 7
def test_spacy03(self):
"""
Unit test method to test data passing between spacy and gate
"""
try:
import spacy
from spacy.language import Language
from spacy.tokens import Doc
from spacy.matcher import Matcher
from gatenlp.lib_spacy import spacy2gatenlp, apply_spacy
import pkg_resources
nlp = spacy.load("en_core_web_sm")
except ImportError:
logger.warning("Module spacy or model en_core_web_sm not installed, skipping spacy test")
return
spv = pkg_resources.parse_version(spacy.__version__)
if spv < pkg_resources.parse_version("3.0"):
logger.warning(f"Skipping Spacy test test_spacy03, have version {spacy.__version__} need at least 3.0")
return
## create a language factory
@Language.factory('number_detector')
def create_number_detector_component(nlp: Language, name: str):
return NumberDetector(nlp)
class NumberDetector(object):
name = "number_detector"
matcher: Matcher
nlp: Language
def __init__(self, nlp: Language):
# Extensions include an identifier of the component that creates it, to avoid collisions
Doc.set_extension("Number_freq",default=False)
self.nlp = nlp
self.matcher = Matcher(nlp.vocab)
pattern = [{'IS_DIGIT': True}]
self.matcher.add("numbers", [pattern], greedy="LONGEST")
def __call__(self, doc: Doc,number:str,**kwargs) -> Doc:
# This method is invoked when the component is called on a Doc
doc._.Number_freq=number
matches = self.matcher(doc, with_alignments=True)
refSpans=[]
for match_id, start, end, matched_list in matches:
refSpans.append(doc[start:end])
# logging.debug('
# f" matched {self.nlp.vocab[match_id].text} - { ' '.join( [t.text for t in doc[start:end]])}"
# f" -- matching {matched_list} -- {ref_set}"
# )
doc.spans["Numbers"]=refSpans
return doc
txt = "When 2 plus 2 makes 5 then your system is doing something wrong!. " \
"But in life 2 and 2 not always makes 4. "
nlp.add_pipe('number_detector')
doc = Document(txt)
annset = doc.annset()
annset.add(0, 66, "Sentence")
annset.add(67, 106, "Sentence")
anns = doc.annset()
containing_set1 = anns.with_type("Sentence")
assert len(containing_set1) == 2
tokens = anns.with_type("Token")
assert len(tokens) == 0
i = 1
for ann in containing_set1:
ann.features["number"] = i
i += 1
gdoc = apply_spacy(nlp, doc, setname="spacy1",
containing_anns=containing_set1,
component_cfg="number_detector",
retrieve_spans=["Numbers"])
anns_out = gdoc.annset("spacy1")
sents = anns_out.with_type("Sentence")
assert len(sents) == 2
nums = anns_out.with_type("Numbers")
assert len(nums) == 6
for ann in containing_set1:
assert ann.features["number"] == ann.features["Number_freq"]
```
#### File: python-gatenlp/tests/test_stanza.py
```python
import os
import time
from gatenlp import logger, Document
class TestStanza01:
def test_stanza01a(self):
"""
Unit test method (make linter happy)
"""
try:
import stanza
from gatenlp.lib_stanza import stanza2gatenlp, AnnStanza
from stanza.resources.common import DEFAULT_MODEL_DIR
except ImportError:
logger.warning("Module stanza not installed, skipping stanza test")
return
modelfile = os.path.join(DEFAULT_MODEL_DIR, "en", "default.zip")
if not os.path.exists(modelfile):
stanza.download("en")
nlp = stanza.Pipeline(use_gpu=False)
if stanza is None:
logger.warning("Stanza could not be imported, Stanza tests skipped!")
return
txt = "<NAME> was born in Hawaii. He was elected president in 2008."
sdoc = nlp(txt)
gdoc = stanza2gatenlp(sdoc)
anns = gdoc.annset()
sents = anns.with_type("Sentence")
assert len(sents) == 2
# words = anns.with_type("Word")
# assert len(words) == 14
tokens = anns.with_type("Token")
assert len(tokens) == 14
doc = Document(txt)
annstanza = AnnStanza(pipeline=nlp, batchsize=50)
doc = annstanza(doc)
anns = doc.annset()
sents = anns.with_type("Sentence")
assert len(sents) == 2
tokens = anns.with_type("Token")
assert len(tokens) == 14
# test Stanza batching and check speed improvement
nlp = stanza.Pipeline(use_gpu=False, processors="tokenize")
annstanza = AnnStanza(pipeline=nlp)
docs_p = []
docs_c = []
for i in range(103):
docs_p.append(Document(txt))
docs_c.append(Document(txt))
time_pipe = time.perf_counter()
docs_processed_pipe = list(annstanza.pipe(docs_p))
time_pipe = time.perf_counter() - time_pipe
docs_processed_call = []
time_call = time.perf_counter()
for doc in docs_c:
docs_processed_call.append(annstanza(doc))
time_call = time.perf_counter() - time_call
# print(f"!!!!!!! PIPE={time_pipe}, CALL={time_call}, speedup is {time_call/time_pipe}")
# assert time_call > time_pipe
# check equality of both lists of processed documents by first converting to dicts
assert len(docs_p) == len(docs_processed_pipe)
assert len(docs_processed_call) == len(docs_processed_pipe)
d_pipe = docs_processed_pipe[0]
d_call = docs_processed_call[0]
assert d_pipe.text == d_call.text
assert d_pipe.annset_names() == d_call.annset_names()
for n in d_pipe.annset_names():
as_p = d_pipe.annset(n)
as_c = d_call.annset(n)
assert as_p.size == as_c.size
for ap, ac in zip(as_p, as_c):
assert ap.equal(ac)
d_pipe_d = d_pipe.to_dict()
d_call_d = d_call.to_dict()
assert d_pipe_d == d_call_d
``` |
{
"source": "JoanChirinos/CodeXAPI",
"score": 2
} |
#### File: JoanChirinos/CodeXAPI/__init__.py
```python
import sys
from flask import Flask, jsonify, render_template, request
from util import run
app = Flask(__name__)
app.secret_key = 'beans'
@app.route('/')
def home():
return 'version : {}'.format(sys.version)
@app.route('/test')
def test():
return render_template('test.html')
@app.route('/run', methods=["POST"])
def run_file():
print(request.form)
code = str(request.form['code'])
filename = str(request.form['filename'])
stdout, stderr = run.go(code, filename)
return jsonify(stdout=stdout,
stderr=stderr)
if __name__ == '__main__':
app.debug = True
app.run(host='192.168.3.11')
``` |
{
"source": "JoanChirinos/PeerColab",
"score": 2
} |
#### File: PeerColab/beta_0.0.1/__init__.py
```python
import sys
import os
# import datetime
from flask import (Flask, render_template, redirect, url_for, session, request,
flash, current_app)
from markupsafe import Markup
from util import db, helpers
import config
app = Flask(__name__)
# Production Config
# app.config.from_object(config.ProdConfig)
# Development Config
app.config.from_object(config.DevConfig)
# Database Manager with correct databse path and table defns path
with app.app_context():
cwd = os.getcwd()
dbm = db.DBManager(current_app.config['DATABASE_URI'],
f'{cwd}/static/table_definitions.sql')
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
'''
Catch-all route in case some typo happens or something
'''
flash(f'Invalid endpoint: /{path}', 'warning')
return redirect(url_for('home'))
@app.route('/')
def home():
'''
Render the homepage.
If user is logged in, redirect to their files.
'''
if 'email' in session:
return redirect(url_for('projects'))
return render_template('index.html')
@app.route('/login')
def login_page():
'''
Render the login page.
If user is logged in, redirect to their files.
'''
if 'email' in session:
return redirect(url_for('projects'))
return render_template('login.html')
@app.route('/register')
def register_page():
'''
Render the registration page.
If user is logged in, redirect to their files.
'''
if 'email' in session:
return redirect(url_for('projects'))
return render_template('register.html')
@app.route('/projects')
def projects():
'''
Render main projects page.
'''
if 'email' not in session:
flash('You must be logged in to view that page!', 'danger')
return redirect(url_for('home'))
else:
email = session['email']
ids = dbm.get_projects(email)
projects = list((dbm.get_project_name(id)[1],
id,
dbm.is_admin(email, id)) for id in ids)
projects = tuple(sorted(projects))
return render_template('projects.html', projects=projects)
@app.route('/project/<project_id>')
def project(project_id):
'''
Render page for project with given id
'''
if 'email' not in session:
flash('You must be logged in to view that page!', 'danger')
return redirect(url_for('home'))
else:
email = session['email']
if not dbm.is_member(email, project_id):
flash('You don\'t have permission to do that!', 'warning')
return redirect(url_for(projects))
project_name = dbm.get_project_name(project_id)
#files =
return render_template('project.html', **locals())
@app.route('/authenticate', methods=['POST'])
def authenticate():
'''
Attempt to log user in.
On failure, flashes error and redirects home.
On success, stores email in session and redirects to project page.
'''
email = request.form['email'].strip()
password = request.form['password'].strip()
if helpers.verify_auth_args(email, password)\
and dbm.authenticate_user(email, password):
session['email'] = email
return redirect(url_for('projects'))
else:
flash('Incorrect username or password!', 'danger')
return redirect(url_for('login_page'))
@app.route('/registerUser', methods=['POST'])
def register():
'''
Attempt to register user.
On failure, flashes error and redirects home.
On success, stores email in session and redirects to project page.
'''
first = request.form['first'].strip()
last = request.form['last'].strip()
email = request.form['email'].strip()
password = request.form['password'].strip()
if not helpers.verify_auth_args(first, last, email, password):
flash('One or more fields is improperly formatted!', 'danger')
return redirect(url_for('register_page'))
elif not dbm.register_user(email, password, first, last, 0):
s = ('Email already in use! <a href="/login" class="alert-link">'
+ 'Log in?</a>')
flash(Markup(s), 'danger')
return redirect(url_for('register_page'))
else:
session['email'] = email
flash('Account creastion successful!', 'success')
return redirect(url_for('projects'))
@app.route('/logout')
def logout():
'''
Attempt to log user out.
Regardless, will redirect to home page.
'''
if 'email' in session:
session.pop('email')
return redirect(url_for('home'))
@app.route('/create/project', methods=['POST'])
def create_project():
'''
Attempt to create project.
Redirects to projects page.
'''
if 'email' not in session:
flash('You need to be logged in to do that!', 'warning')
return redirect(url_for('home'))
email = session['email']
teacher = request.form['teacherEmail']
name = request.form['projectName']
forclass = 'forClass' in request.form
if forclass:
if not dbm.is_teacher(teacher):
flash('Teacher\'s email is invalid!', 'danger')
return redirect(url_for('projects'))
project_id = dbm.create_project(teacher, name)
dbm.add_member(email, project_id)
else:
dbm.create_project(email, name)
flash('Successfully created new project!', 'success')
return redirect(url_for('projects'))
@app.route('/create/file/<project_id>', methods=['POST'])
def create_file(project_id: str):
'''
Attempt to create file.
Redirects to project page.
'''
if 'email' not in session:
flash('You need to be logged in to do that!', 'warning')
return redirect(url_for('home'))
email = session['email']
name = request.form['fileName']
dbm.create_file(email, project_id, name)
flash('Successfully created new file!', 'success')
return redirect(url_for('projects'))
@app.route('/delete/<type>/<id>')
def delete(type: str, id: str):
'''
Attempt to delete project with given project_id.
Redirects to project page.
'''
if 'email' not in session:
flash('You need to be logged in to do that!', 'warning')
return redirect(url_for('home'))
email = session['email']
result, error_msg = None, None
if type == 'project':
result, error_msg = dbm.delete_project(email, id)
if type == 'file':
result, error_msg = dbm.delete_file(email, id)
if result is None:
flash('Invalid request!', 'warning')
elif not result:
flash(error_msg, 'warning')
else:
flash('Project deleted successfully!', 'success')
if type == 'project':
return redirect(url_for('projects'))
elif type == 'file':
return redirect(url_for('project', project_id=id))
if __name__ == '__main__':
if len(sys.argv) == 1:
app.run()
else:
if sys.argv[1] == 'create_db':
dbm.create_db()
elif sys.argv[1] == 'test_suite':
dbm.create_db()
dbm.register_user('<EMAIL>', 'password', '<PASSWORD>',
'<PASSWORD>', 0)
dbm.register_user('<EMAIL>', 'password', 'User', 'Userface',
0)
dbm.register_user('<EMAIL>', 'password', 'Teach', 'Er',
1)
dbm.create_project('<EMAIL>', 'pname')
dbm.create_project('<EMAIL>', 'swag name')
# Simulate creating a project that has a teacher
tpid = dbm.create_project('<EMAIL>', 'Teacher project')
dbm.add_member('<EMAIL>', tpid)
``` |
{
"source": "joandesonandrade/kiCaptcha",
"score": 3
} |
#### File: kiCaptcha/util/DownloadCaptcha.py
```python
import requests
def startDownload(url):
r = requests.get(url) #abre a url da imagem
with open('captcha/captcha.png','wb') as f: #abre um arquivo para escrita em binário
f.write(r.content)#escreve o conteúdo da imagem
return True#retorna verdadeiro
```
#### File: kiCaptcha/util/recognize.py
```python
import tensorflow as tf #importação do tensorflow para construção de redes neurais.
import numpy as np #importação da biblioteca de criação de conjuntos multidimensionais.
from tensorflow.examples.tutorials.mnist import input_data #importação de um exemplo de reconhecimento de números do proprio tensorflow.
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) #leitura do database de números para treinamento.
n_train = mnist.train.num_examples #números para treinamento.
n_validation = mnist.validation.num_examples #números para validação do treinamento.
n_test = mnist.test.num_examples #números para os testes de treinamento.
n_input = 784 #quantidade de entradas.
n_hidden1 = 512 #layer1 oculta.
n_hidden2 = 256 #layer2 oculta.
n_hidden3 = 128 #layer3 oculta.
n_output = 10 #saída.
learning_rate = 1e-4 #taxa de aprendizado.
n_iterations = 5000 #quantidade de interação.
batch_size = 128 #tamanho do lote.
dropout = 0.5 #tamanho do drop 50%.
X = tf.placeholder("float", [None, n_input]) #criando uma virtual variável X(entrada).
Y = tf.placeholder("float", [None, n_output]) #criando uma virtual variável Y(saída).
#redes neurais
#X(entrada) -> (Layer1Hidden -> Layer2Hidden -> Layer3Hidden)(processamento) -> Y(saída).
keep_prob = tf.placeholder(tf.float32) #taxa de desistência do treinamento, baseado no dropout.
weights = {
'w1': tf.Variable(tf.truncated_normal([n_input, n_hidden1], stddev=0.1)),
'w2': tf.Variable(tf.truncated_normal([n_hidden1, n_hidden2], stddev=0.1)),
'w3': tf.Variable(tf.truncated_normal([n_hidden2, n_hidden3], stddev=0.1)),
'out': tf.Variable(tf.truncated_normal([n_hidden3, n_output], stddev=0.1)),
} #conjunto de desenvolvimento da estrutura neural.
biases = {
'b1': tf.Variable(tf.constant(0.1, shape=[n_hidden1])),
'b2': tf.Variable(tf.constant(0.1, shape=[n_hidden2])),
'b3': tf.Variable(tf.constant(0.1, shape=[n_hidden3])),
'out': tf.Variable(tf.constant(0.1, shape=[n_output]))
}#conjunto de criação das variavéis da estrutura neural.
layer_1 = tf.add(tf.matmul(X, weights['w1']), biases['b1']) #Layer1.
layer_2 = tf.add(tf.matmul(layer_1, weights['w2']), biases['b2']) #Layer2.
layer_3 = tf.add(tf.matmul(layer_2, weights['w3']), biases['b3']) #Layer3.
layer_drop = tf.nn.dropout(layer_3, keep_prob) #LayerDrop.
output_layer = tf.matmul(layer_3, weights['out']) + biases['out'] #LayerOut.
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=output_layer)) #probabilidade de acerto com o output_layer.
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)#tempo de otimização de processamento neural.
correct_pred = tf.equal(tf.argmax(output_layer, 1), tf.argmax(Y, 1)) #retorna os resultados parecidos.
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) #taxa de precisão.
def initRecognize(img):
init = tf.global_variables_initializer() #inicializa as variavéis virtuais.
sess = tf.Session() #cria uma sessão.
sess.run(init) #carrega a sessão.
for i in range(n_iterations): #cria um range de interações.
batch_x, batch_y = mnist.train.next_batch(batch_size) #treina o neurônio com o lote, e retorna o lote x e y.
sess.run(train_step,feed_dict={X:batch_x,Y:batch_y,keep_prob:dropout}) #carrega o retorno do treinamento e executa.
if i%100==0: #caso o interação for divisível de 100 entra na condição.
minibatch_loss, minibatch_accuracy = sess.run([cross_entropy, accuracy], feed_dict={X: batch_x, Y: batch_y, keep_prob:1.0}) #carrega e retorna a parca e a precissão do treino.
print("Iteração", str(i), "\t| Perda =", str(minibatch_loss), "\t| Precisão =", str(minibatch_accuracy))#mostra o resultado da condição.
prediction = sess.run(tf.argmax(output_layer,1), feed_dict={X: [img]}) #executa o reconhecimento da imagem, e retorna uma previsão de qual seria o número.
prevision = np.squeeze(prediction)
print("Previsão do número:",prevision)
return prevision
``` |
{
"source": "joandesonandrade/lancap",
"score": 3
} |
#### File: lancap/util/audio.py
```python
import subprocess as sp
import tempfile as tmp
import os
import wave
import math
import audioop
class coversaoFLAC(object):
def __init__(self,audio, incluir_antes=0.25, incluir_depois=0.25):
self.audio = audio
self.incluir_antes = incluir_antes
self.incluir_depois = incluir_depois
def __call__(self,regiao):
inicio, fim = regiao
inicio = max(0, (inicio - self.incluir_antes))
fim += self.incluir_depois
temporario = tmp.NamedTemporaryFile(suffix='.flac', delete=False)
if not os.path.isfile(self.audio):
raise Exception('O áudio não foi encontrado.')
comando = ["ffmpeg",
"-ss", str(inicio),
"-t", str(fim - inicio),
"-y", "-i", self.audio,
"-loglevel", "error",
temporario.name
]
usarShell = True if os.name == "nt" else False
sp.check_output(comando, shell=usarShell)
dadosDoArquivoDeFlac = temporario.read()
temporario.close()
os.unlink(temporario.name)
#os.unlink(self.audio)
return dadosDoArquivoDeFlac
class asyncAudio(object):
def percentil(arr, porcentagem):
arr = sorted(arr)
index = (len(arr) - 1) * porcentagem
floor = math.floor(index)
ceil = math.ceil(index)
if floor == ceil:
return arr[int(index)]
valor_pequeno = arr[int(floor)] * (ceil - index)
valor_grande = arr[int(ceil)] * (index - floor)
return valor_pequeno + valor_grande
def buscandoRegiaosDeFala(self,audio,larguraDoFrame=4096,minTamanhoDaRegiao=0.5,maxTamanhoDaRegiao=6):
leitor = wave.open(audio)
larguraDaAmostra = leitor.getsampwidth()
taxa = leitor.getframerate()
numeroDeCanal = leitor.getnchannels()
pedassoDaDuracao = float(larguraDoFrame) / taxa
numeroDePedassos = int(math.ceil(leitor.getnframes() * 1.0 / larguraDoFrame))
energias = []
for i in range(numeroDePedassos):
pedasso = leitor.readframes(larguraDoFrame)
energias.append(audioop.rms(pedasso,larguraDaAmostra * numeroDeCanal))
limite = asyncAudio.percentil(energias,0.2)
tempoGasto = 0
regioes = []
regiaoDeInicio = None
for energia in energias:
estaEmSilencio = limite >= energia
maxExcedido = regiaoDeInicio and tempoGasto - regiaoDeInicio >= maxTamanhoDaRegiao
if (maxExcedido or estaEmSilencio) and regiaoDeInicio:
if tempoGasto - regiaoDeInicio >= minTamanhoDaRegiao:
regioes.append((regiaoDeInicio,tempoGasto))
regiaoDeInicio = None
elif (not regiaoDeInicio) and (not estaEmSilencio):
regiaoDeInicio = tempoGasto
tempoGasto += pedassoDaDuracao
return regioes
def extrairAudioDeUmVideo(self,video,canals=1,taxa=16000):
print('[!] Tentando extrair áudio do vídeo...')
temporario = tmp.NamedTemporaryFile(suffix='.wav',delete=False)
if not os.path.isfile(video):
raise Exception('O vídeo não foi encontrado.')
comando = ['ffmpeg',
'-y',
'-i', video,
'-ac', str(canals),
'-ar', str(taxa),
'-loglevel', 'error',
temporario.name
]
usarShell = True if os.name == "nt" else False
sp.check_output(comando,stdin=open(os.devnull),shell=usarShell)
print('[+] áudio salvo em -->',temporario.name)
return temporario.name,taxa
```
#### File: lancap/util/configuracoes.py
```python
import json
class config:
def __init__(self):
with open('config.json','rt') as f:
r = f.read()
f.close()
self.configuracao = r
def obterLiguagemDeDestino(self):
resposta = json.loads(self.configuracao)
return resposta['destinoLinguagem']
def obterFonteDeLinguagem(self):
resposta = json.loads(self.configuracao)
return resposta['fonteLinguagem']
def obterFormatoDoArquivoDeLegenda(self):
resposta = json.loads(self.configuracao)
return resposta['formatoLegenda']
``` |
{
"source": "joandesonandrade/nebulosa",
"score": 3
} |
#### File: nebulosa/src/api.py
```python
from flask import Flask, request
from flask_cors import CORS
import json
import requests as rq
app = Flask(__name__)
CORS(app)
LOGS_PATH = 'logs/'
URL = 'http://ip-api.com/json/{}'
host = '127.0.0.1'
port = 8888
def maps(data):
info = []
src = [x for x in data if x != "0"]
locations = []
ips = []
for ip in src:
if ip not in ips:
req = rq.get(URL.format(ip))
try:
jsonData = req.json()
if jsonData['status'] == "success":
region = jsonData['city'] + ' - ' + jsonData['regionName'] + ', ' + jsonData['country'] + ', ' + jsonData['countryCode']
coords = [jsonData['lat'], jsonData['lon']]
if region not in locations:
info.append([region, coords, jsonData['isp']])
locations.append(region)
ips.append(ip)
except:
continue
return json.dumps({'success': 1, 'result': info})
@app.route('/api', methods=['GET'])
def Api():
req = request.args
with open(LOGS_PATH + 'logs.csv', 'rt') as rt:
read = rt.read().split('\n')
rt.close()
ndata = [x.split(',') for x in read]
try:
qnt = int(req['qnt'])
except IndexError:
qnt = 50
try:
qry = req['query']
except IndexError:
qry = "plot"
data = []
for i, x in enumerate(ndata):
if i > (len(ndata) - qnt): # offset
if x[0] is not '':
data.append(x)
type = [int(x[0]) for x in data]
src = [x[1] for x in data]
if qry == "plot":
return json.dumps({'success': 1, 'data': [sum(type)]})
elif qry == "maps":
return maps(src)
else:
return
def start():
app.run(host=host, port=port, debug=False)
```
#### File: nebulosa/src/intercept.py
```python
import datetime
from threading import Thread
import pcapy
from impacket.ImpactDecoder import LinuxSLLDecoder, EthDecoder
import netifaces as ni
class DecoderThread(Thread):
def __init__(self, pcapObj=None, FilePath=None, Interface=None):
datalink = pcapObj.datalink()
self.FilePath = FilePath
self.Interface = Interface
if pcapy.DLT_EN10MB == datalink:
self.decoder = EthDecoder()
elif pcapy.DLT_LINUX_SLL == datalink:
self.decoder = LinuxSLLDecoder()
else:
raise Exception("Unsupported datalink type: " % datalink)
self.pcap = pcapObj
Thread.__init__(self)
def run(self):
self.pcap.loop(0, self.packetHandler)
def display_hex(self, pkt):
return pkt.get_data_as_string()
def packetHandler(self, hdr, data):
p = self.decoder.decode(data)
ip = p.child()
tcp = ip.child()
payload = self.display_hex(p)
try:
srcPort = tcp.get_th_sport()
except AttributeError:
srcPort = 0
try:
dstPort = tcp.get_th_dport()
except AttributeError:
dstPort = 0
if dstPort > 0:
sumBytePayload = sum(payload) / dstPort
lenPayload = len(payload) / dstPort
else:
sumBytePayload = sum(payload)
lenPayload = len(payload)
try:
protocol = tcp.protocol
except AttributeError:
return
io = 1#input
if protocol is None:#ARP
return
#protocol 17 = UDP
if protocol == 1:#ICMP
if tcp.get_icmp_num_addrs() > 0:
io = 0#output
if protocol == 6:#TCP
myAddr = ni.ifaddresses(self.Interface)[ni.AF_INET][0]['addr']
if myAddr != ip.get_ip_dst():
io = 0#output
#Protocol|I/O|SrcPort|DstPort|SumPayload|LenPayload
LOGS = str(protocol) + '|' + str(io) + '|' + str(srcPort) + '|' + str(dstPort) + '|' + str(sumBytePayload) + '|' +\
str(lenPayload) + '\n'
with open(self.FilePath, 'a') as wf:
wf.write(LOGS)
wf.close()
class intercept:
def __init__(self, type=None, interface=None):
try:
if int(type) == 1:
type = 'normal'
elif int(type) == 2:
type = 'attack'
else:
type = 'normal'
print('Type was set to normal default.')
except ValueError:
raise('Invalid type [ Select number 1 to normal or 2 to attack ]')
self.interface = interface
self.type = type
self.fileName = None
self.timeLog = None
self.path = "logs/" + self.type + "/"
self.net = None
self.mask = None
self.datalink = None
def saveToLog(self):
now = datetime.datetime.now()
self.timeLog = str(now.day) + "-" + str(now.month) + "-" + str(now.year) + "_" + str(now.hour) + \
":" + str(now.minute) + ".log"
self.fileName = self.type + "-" + self.timeLog
return self.path + self.fileName
def start(self):
absolutePathFile = self.saveToLog()
cap = pcapy.open_live(self.interface, 65536, 0, 100)
#cap.setfilter(r'ip proto \tcp')
self.net = cap.getnet()
self.mask = cap.getmask()
self.datalink = cap.datalink()
DecoderThread(cap, absolutePathFile, self.interface).start()
``` |
{
"source": "joandesonandrade/spyZap",
"score": 3
} |
#### File: joandesonandrade/spyZap/spyZap.py
```python
from selenium import webdriver
from time import sleep
import selenium.common.exceptions
import sys
from datetime import datetime
try:
amigo = sys.argv[1]
except IndexError:
raise Exception('expecifique o nome do chat alvo')
url = 'https://web.whatsapp.com'
Firefox = webdriver.Firefox()
Firefox.get(url)
def registrarData(info):
online = 1 if info.lower() == 'online' else 0
data_atual = datetime.now()
data = str(data_atual.day)+'/'+str(data_atual.month)+'/'+str(data_atual.year)
hora = data_atual.hour
minuto = data_atual.minute
segundos = data_atual.second
dados = '"'+str(sys.argv[1])+'",'+str(online)+',"'+data+'",'\
+str(hora)+','+str(minuto)+','+str(segundos)
with open('data/spyZap.csv','a') as f:
f.write('\n'+dados)
f.close()
return True
def gerarLogs(info):
data_e_hora_atuais = datetime.now()
registrarData(info)
info = '['+str(data_e_hora_atuais)+'] ' + info
print(info)
with open('logs','a') as f:
f.write(info)
f.close()
#Firefox.save_screenshot('screenshot/'+info+'.png')
return True
while True:
if not 'landing-title' in Firefox.page_source:
break
print('[+] spyZap foi autenticado')
element = None
while element == None:
try:
element = Firefox.find_element_by_class_name('_1WliW')
except selenium.common.exceptions.NoSuchElementException:
continue
while True:
try:
element.click()
break
except selenium.common.exceptions.ElementNotInteractableException:
continue
while True:
if '_2S1VP' in Firefox.page_source:
break;
sleep(2)
'''user = None
while user == None:
if not '_2S1VP' in Firefox.page_source:
Firefox.find_element_by_class_name('_1WliW').click()
try:
user = Firefox.find_element_by_class_name('_2S1VP').text
except selenium.common.exceptions.StaleElementReferenceException:
continue
print('[+] Usuário: '+user)'''
while True:
if '_2S1VP' in Firefox.page_source:
break
Firefox.find_element_by_class_name('_1aTxu').click()
while True:
if '_2wP_Y' in Firefox.page_source:
break
listaConversas = None
while listaConversas == None:
listaConversas = Firefox.find_elements_by_class_name('_2wP_Y')
for conversa in listaConversas:
try:
chat = conversa.find_element_by_class_name('_1wjpf').text
except selenium.common.exceptions.StaleElementReferenceException:
continue
if amigo in chat:
print('click: '+chat)
conversa.find_element_by_tag_name('div').click()
break
print('[+] Chat selecionado...['+chat+']')
sender = None
while sender == None:
sender = Firefox.find_element_by_class_name('_1Plpp')
sender.click()
listaMensagens = None
while listaMensagens == None:
listaMensagens = Firefox.find_elements_by_class_name('vW7d1')
divTexto = None
while divTexto == None:
divTexto = Firefox.find_element_by_class_name('_3zb-j')
def obterMensagens():
textos = []
for mensagem in Firefox.find_elements_by_class_name('vW7d1'):
try:
envio = mensagem.find_element_by_class_name('Tkt2p').find_element_by_class_name('copyable-text').get_attribute('data-pre-plain-text')
texto = mensagem.find_element_by_class_name('Tkt2p').find_element_by_class_name('selectable-text').text
#texto = tratamento.removerEmoji(texto)
#texto = tratamento.removerLink(texto)
if envio != None:
envio = str(envio).split('2019]')
envio = envio[1].replace(':','')
envio = envio[1:(len(envio) - 1)]
if texto != '':
textos.append([envio,texto])
except selenium.common.exceptions.NoSuchElementException:
continue
except selenium.common.exceptions.NoSuchAttributeException:
print('NoSuchAttributeException')
continue
except selenium.common.exceptions.StaleElementReferenceException:
print('StaleElementReferenceException')
continue
return textos
while True:
sleep(10)
try:
online = Firefox.find_element_by_xpath('//*[@title="online"]')
if online is not None:
gerarLogs('Online')
except selenium.common.exceptions.NoSuchElementException:
gerarLogs('Offline')
``` |
{
"source": "JoanDGG/Maat-Teey",
"score": 4
} |
#### File: Maat-Teey/Python version/Asistentes.py
```python
from Enemigos import Enemigo
import random
class Asistente(Enemigo):
def __init__(self, salud: int, fuerza:int, resistencia:int, carisma:int,
inteligencia:int, sabiduria:int, nombre:str, condicion:dict,
dropeo:str, categoria:str, rango:int, cantidad:int, zona:str,
dueno, apodo:str):
super().__init__(salud, fuerza, resistencia, carisma, inteligencia,
sabiduria, nombre, condicion, dropeo, categoria,
rango, cantidad, zona)
self.dueno = dueno
self.apodo = apodo
def bautizo(self):
archivo = open("Nombres.txt", "r")
texto = archivo.read()
nombres = texto.split(",")
archivo.close()
nombre = random.choice(nombres).split()[0].replace('"', '')
self.apodo = nombre
def is_ded(self):
self.dueno.asistentes.remove(self)
mensaje = input("Escribe un mensaje de despedida "
+ "(Enter para continuar)\n")
if(mensaje == ""):
mensaje = "F"
print(f"\n\tAdios... {self.nombre}... {mensaje}\n")
self.dueno.in_memoriam.update({self.apodo: mensaje})
if(self.salud <= 0):
self.dropear()
self.salud = 0
return True
def __str__(self):
salud_index = super().__str__().find("Salud")
dropeos_index = super().__str__().find("Dropeos")
texto = (f"\n{self.apodo:.^50} \n "
+ super().__str__()[salud_index:dropeos_index - 4]
+ f" | Especie: {self.nombre} \n Dueno: {self.dueno.nombre} \n "
+ super().__str__()[dropeos_index:])
return texto
# =============================================================================
#compa = Asistente(15, 14, 13, 10, 11, 15, "Aguila", "Saludable", "%Esencia velocidad II/Esencia sabiduria II", "Animal", 3, 1, "Cabana", )
#print(compa)
# =============================================================================
```
#### File: Maat-Teey/Python version/Objetos.py
```python
lecturas = ["Nota de consejo", "Fragmento de libro de secretos",
"Archivo de doctor", "Manual de supervivencia"]
class Objeto:
def __init__(self, nombre:str, boosteo:int, estadistica:str, peso:float,
usos:int, cantidad:int, precio:int):
self.nombre = nombre
self.boosteo = boosteo
if(self.nombre not in lecturas):
self.boosteo = int(boosteo)
self.estadistica = estadistica
self.peso = peso
self.usos = usos
self.cantidad = cantidad
self.precio = precio
def __str__(self):
texto = (f"\n\t{self.nombre} \n "
+f"Boosteo: {self.boosteo} \t| Estadistica: {self.estadistica}"
+f"\n Peso: {self.peso} \t| Usos: {self.usos} "
+f"\t| Precio: {self.precio}")
return texto
# =============================================================================
#obj = Objeto("Wea", 0, "tu caca", 3.0, 8, 1, -3)
#obj = Objeto("Nota de consejo", "uhhhhhhh", "tu caca", 3.0, 8, 1, -3)
#print(obj)
# =============================================================================
```
#### File: Maat-Teey/Python version/Pruebas.py
```python
def hola(lista, texto, dic):
print("Queso")
lista=[1]
texto = "hola"
dic = {"hola":1}
suma = (1+
2) + 4
if(1
> 0):
print("egvhjkl")
hola(lista,
texto,
dic)
print(suma)
``` |
{
"source": "JoanDM/macos-mouse-and-keyboard-automator",
"score": 3
} |
#### File: macos-mouse-and-keyboard-automator/scripts/form_filler.py
```python
from pynput.keyboard import Key, Listener
from automator_class import Automator
def run_form_filler():
automator = Automator()
def on_press(key):
# print('{0} pressed'.format(
# key))
check_key(key)
def on_release(key):
# print('{0} release'.format(
# key))
if key == Key.esc:
# Stop listener
return False
def check_key(key):
if str(key) == "'a'":
automator.erase_character()
automator.type("Type something for me! :)")
# Collect events until released
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
if __name__ == "__main__":
run_form_filler()
```
#### File: macos-mouse-and-keyboard-automator/scripts/screenshot_taker.py
```python
from automator_class import Automator
from config import _stored_mouse_positions_dir_pathlib
FILE_PATH = _stored_mouse_positions_dir_pathlib / "mouse_positions_example.txt"
IDLE_TIME = 5
def run_infinite_mouse_click():
automator = Automator()
print(f"\nStarted automated process!!\n" f"To stop the process use ctrl+C...\n")
# screenshot_top_left_corner = (129, 187)
# screenshot_bottom_right_corner = (2049, 1266)
automator.idle_time(IDLE_TIME)
try:
for i in range(90):
automator.hold_cmd_key()
automator.press_right_arrow()
automator.idle_time(2)
automator.take_screenshot_with_coordinates(
129, 187, 2049 - 129, 1266 - 187, f"{str(i).zfill(8)}"
)
automator.idle_time(1)
except KeyboardInterrupt:
print("\nProcess interrupted sucessfully")
if __name__ == "__main__":
run_infinite_mouse_click()
``` |
{
"source": "joan-domingo/futupolis",
"score": 3
} |
#### File: joan-domingo/futupolis/antisocial.py
```python
import time
import brickpi3
import random
BP = brickpi3.BrickPi3()
POWER = 50
MIN_DIST = 50 #cm
# call this function to turn off the motors and exit safely.
def SafeExit():
# Unconfigure the sensors, disable the motors
# and restore the LED to the control of the BrickPi3 firmware.
BP.reset_all()
#Power motor A and D, forward or backward
def powerMotors():
#print('power motors', power)
BP.set_motor_power(BP.PORT_A + BP.PORT_D, POWER)
# delay for 0.02 seconds (20ms) to reduce the Raspberry Pi CPU load.
time.sleep(0.02)
#Steering motor C, turn left or right
def steeringMotor(position):
try:
target = BP.get_motor_encoder(BP.PORT_C) # read motor C's position
except IOError as error:
print(error)
#print("turn to ", position)
BP.set_motor_position(BP.PORT_C, position)
# delay for 0.02 seconds (20ms) to reduce the Raspberry Pi CPU load.
time.sleep(0.02)
def setUpBrickPi():
# setup motors
try:
BP.offset_motor_encoder(BP.PORT_A, BP.get_motor_encoder(BP.PORT_A))
BP.offset_motor_encoder(BP.PORT_D, BP.get_motor_encoder(BP.PORT_D))
except IOError as error:
print(error)
# make sure voltage is high enough
if BP.get_voltage_battery() < 7:
print("Battery voltage below 7v, too low to run motors reliably. Exiting.")
SafeExit()
# setup ultrasonic sensor
BP.set_sensor_type(BP.PORT_1, BP.SENSOR_TYPE.EV3_ULTRASONIC_CM)
def getDistance():
return BP.get_sensor(BP.PORT_1) # distance is in CM from 0 to 255
def initUltrasonicSensor():
value = 0
while value == 0:
try:
value = getDistance()
except:
pass
time.sleep(1) # it takes a few seconds until the ultrasonic sensor starts emitting
def getPositionIndex(list):
maxValue = max(list)
return list.index(maxValue)
def turnAround():
#print('turn around')
tempDistance = 0
turningTime = 0
#random turn to left or right
randomValue = random.randint(0,1)
if randomValue == 0:
BP.set_motor_power(BP.PORT_A, POWER)
BP.set_motor_power(BP.PORT_D, -POWER)
else:
BP.set_motor_power(BP.PORT_A, -POWER)
BP.set_motor_power(BP.PORT_D, POWER)
while tempDistance < 60:
if turningTime > 2:
break
try:
tempDistance = getDistance()
turningTime += 0.1
time.sleep(0.1)
#print(tempDistance, turningTime)
except:
pass
def stop():
#print('stop')
BP.set_motor_power(BP.PORT_A + BP.PORT_D, 0)
time.sleep(0.02)
def moveForward():
#print('move forward')
powerMotors()
#print('check distance while going forward')
for x in range(30):
tempDist = getDistance()
if tempDist < MIN_DIST:
stop()
break
time.sleep(0.1)
stop()
time.sleep(0.02)
def runAway():
turnAround()
moveForward()
def main():
try:
setUpBrickPi()
initUltrasonicSensor()
while True:
distance = getDistance()
#print(distance)
if distance < MIN_DIST:
runAway()
time.sleep(0.02)
except KeyboardInterrupt:
SafeExit()
main()
``` |
{
"source": "joan-domingo/Raspberry-Pi-Controlled-Lego-Car",
"score": 3
} |
#### File: joan-domingo/Raspberry-Pi-Controlled-Lego-Car/keyboard-controlled.py
```python
import time
import brickpi3
import pygame
BP = brickpi3.BrickPi3()
# call this function to turn off the motors and exit safely.
def SafeExit():
# Unconfigure the sensors, disable the motors
# and restore the LED to the control of the BrickPi3 firmware.
BP.reset_all()
#Power motor A and B, forward or backward
def powerMotors(power):
try:
if power > 100:
power = 100
elif power < -100:
power = -100
except IOError as error:
print(error)
power = 0
#print('power', power)
BP.set_motor_power(BP.PORT_A + BP.PORT_B, power)
# delay for 0.02 seconds (20ms) to reduce the Raspberry Pi CPU load.
time.sleep(0.02)
#Steering motor C, turn left or right
def steeringMotor(position):
try:
target = BP.get_motor_encoder(BP.PORT_C) # read motor C's position
except IOError as error:
print(error)
#print("turn to ", position)
BP.set_motor_position(BP.PORT_C, position)
# delay for 0.02 seconds (20ms) to reduce the Raspberry Pi CPU load.
time.sleep(0.02)
def isMovingLeft(value, events):
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
return 1
if event.type == pygame.KEYUP:
if event.key == pygame.K_a:
return 0
return value
def isMovingRight(value, events):
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_d:
return 1
if event.type == pygame.KEYUP:
if event.key == pygame.K_d:
return 0
return value
def isMovingBackward(value, events):
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_s:
return 1
if event.type == pygame.KEYUP:
if event.key == pygame.K_s:
return 0
return value
def isMovingForward(value, events):
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
return 1
if event.type == pygame.KEYUP:
if event.key == pygame.K_w:
return 0
return value
def isImmediateStop(events):
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
return 1
if event.type == pygame.KEYUP:
if event.key == pygame.K_SPACE:
return 0
return 0
def initBrickPi():
# make sure voltage is high enough
if BP.get_voltage_battery() < 7:
print("Battery voltage below 7v, too low to run motors reliably. Exiting.")
SafeExit()
try:
BP.offset_motor_encoder(BP.PORT_A, BP.get_motor_encoder(BP.PORT_A))
BP.offset_motor_encoder(BP.PORT_B, BP.get_motor_encoder(BP.PORT_B))
BP.offset_motor_encoder(BP.PORT_C, BP.get_motor_encoder(BP.PORT_C))
except IOError as error:
print(error)
BP.set_motor_power(BP.PORT_C, BP.MOTOR_FLOAT)
BP.set_motor_position(BP.PORT_C, 0)
def initPygame():
pygame.init()
screen = pygame.display.set_mode((200, 100))
clock = pygame.time.Clock()
def main():
try:
initBrickPi()
initPygame()
forward = 0
backward = 0
right = 0
left = 0
speed = 0
turn = 0
while True:
events = pygame.event.get()
forward = isMovingForward(forward, events)
backward = isMovingBackward(backward, events)
right = isMovingRight(right, events)
left = isMovingLeft(left, events)
immediateStop = isImmediateStop(events)
if forward:
if speed > -100:
speed = speed - 1
else:
speed = -100
elif backward:
if speed < 100:
speed = speed + 1
else:
speed = 100
else:
if speed > 0:
speed = speed - 1
elif speed < 0:
speed = speed + 1
else:
speed = 0
if immediateStop:
powerMotors(0)
else:
powerMotors(speed)
if right:
if turn < 100:
turn = turn +10
else:
turn = 100
elif left:
if turn > -100:
turn = turn -10
else:
turn = -100
else:
if turn > 0:
turn = turn -10
elif turn < 0:
turn = turn +10
else:
turn = 0
steeringMotor(turn)
except KeyboardInterrupt:
SafeExit()
main()
``` |
{
"source": "JoanEliot/ayewa",
"score": 2
} |
#### File: ayewa/templatetags/ayewa_tags.py
```python
from django import template
from wagtail.core.models import Page, Site
register = template.Library()
# https://docs.djangoproject.com/en/1.9/howto/custom-template-tags/
# Retrieves the top menu items - the immediate children of the parent page
# The has_menu_children method is necessary because the Foundation menu requires
# a dropdown class to be applied to a parent
@register.inclusion_tag('tags/index_page_menu.html', takes_context=True)
def index_page_menu(context, parent, calling_page=None):
menuitems = parent.get_children().live().in_menu()
for menuitem in menuitems:
menuitem.nav_description = 'Foobar'
# We don't directly check if calling_page is None since the template
# engine can pass an empty string to calling_page
# if the variable passed as calling_page does not exist.
return {
'calling_page': calling_page,
'menuitems': [],
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
``` |
{
"source": "JoanFM/mmfashion",
"score": 3
} |
#### File: models/roi_pool/roi_pooling.py
```python
from __future__ import division
import numpy as np
import torch
import torch.nn as nn
from ..registry import ROIPOOLING
@ROIPOOLING.register_module
class RoIPooling(nn.Module):
def __init__(self,
pool_plane,
inter_channels,
outchannels,
crop_size=7,
img_size=(224, 224),
num_lms=8,
roi_size=2):
super(RoIPooling, self).__init__()
self.maxpool = nn.MaxPool2d(pool_plane)
self.linear = nn.Sequential(
nn.Linear(num_lms * inter_channels, outchannels), nn.ReLU(True),
nn.Dropout())
self.inter_channels = inter_channels
self.outchannels = outchannels
self.num_lms = num_lms
self.crop_size = crop_size
assert img_size[0] == img_size[
1], 'img width should equal to img height'
self.img_size = img_size[0]
self.roi_size = roi_size
self.a = self.roi_size / float(self.crop_size)
self.b = self.roi_size / float(self.crop_size)
def forward(self, features, landmarks):
"""batch-wise RoI pooling.
Args:
features(tensor): the feature maps to be pooled.
landmarks(tensor): crop the region of interest based on the
landmarks(bs, self.num_lms).
"""
batch_size = features.size(0)
# transfer landmark coordinates from original image to feature map
landmarks = landmarks / self.img_size * self.crop_size
landmarks = landmarks.view(batch_size, self.num_lms, 2)
ab = [np.array([[self.a, 0], [0, self.b]]) for _ in range(batch_size)]
ab = np.stack(ab, axis=0)
ab = torch.from_numpy(ab).float().cuda()
size = torch.Size(
(batch_size, features.size(1), self.roi_size, self.roi_size))
pooled = []
for l in range(self.num_lms):
tx = -1 + 2 * landmarks[:, l, 0] / float(self.crop_size)
ty = -1 + 2 * landmarks[:, l, 1] / float(self.crop_size)
t_xy = torch.stack((tx, ty)).view(batch_size, 2, 1)
theta = torch.cat((ab, t_xy), 2)
flowfield = nn.functional.affine_grid(theta, size)
one_pooled = nn.functional.grid_sample(
features,
flowfield.to(torch.float32),
mode='bilinear',
padding_mode='border')
one_pooled = self.maxpool(one_pooled).view(batch_size,
self.inter_channels)
pooled.append(one_pooled)
pooled = torch.stack(pooled, dim=1).view(batch_size, -1)
pooled = self.linear(pooled)
return pooled
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
``` |
{
"source": "JoanFM/mmlspark",
"score": 3
} |
#### File: io/http/HTTPFunctions.py
```python
from pyspark import SparkContext
from pyspark.sql.types import StructType
from pyspark.sql.functions import col, udf
import json
def requests_to_spark(p):
return {
"requestLine": {
"method": p.method,
"uri": p.url},
"headers": [{"name": name, "value": value} for name, value in p.headers.items() if name != "Content-Length"],
"entity": None if p.body is None else (
{"content": p.body,
"isChunked": False,
"isRepeatable": True,
"isStreaming": False}
)
}
# SparkContext._active_spark_context._jvm.com.microsoft.ml.spark.io.http.HTTPRequestData.schema().json()
# TODO figure out why we cannot just grab from SparkContext on databricks
HTTPRequestDataType = StructType().fromJson(json.loads(
'{"type":"struct","fields":[{"name":"requestLine","type":{"type":"struct","fields":[{"name":"method",'
'"type":"string","nullable":true,"metadata":{}},{"name":"uri","type":"string","nullable":true,"metadata":{}},'
'{"name":"protocolVersion","type":{"type":"struct","fields":[{"name":"protocol","type":"string",'
'"nullable":true,"metadata":{}},{"name":"major","type":"integer","nullable":false,"metadata":{}},'
'{"name":"minor","type":"integer","nullable":false,"metadata":{}}]},"nullable":true,"metadata":{}}]},'
'"nullable":true,"metadata":{}},{"name":"headers","type":{"type":"array","elementType":{"type":"struct",'
'"fields":[{"name":"name","type":"string","nullable":true,"metadata":{}},{"name":"value","type":"string",'
'"nullable":true,"metadata":{}}]},"containsNull":true},"nullable":true,"metadata":{}},{"name":"entity",'
'"type":{"type":"struct","fields":[{"name":"content","type":"binary","nullable":true,"metadata":{}},'
'{"name":"contentEncoding","type":{"type":"struct","fields":[{"name":"name","type":"string","nullable":true,'
'"metadata":{}},{"name":"value","type":"string","nullable":true,"metadata":{}}]},"nullable":true,"metadata":{}},'
'{"name":"contentLength","type":"long","nullable":true,"metadata":{}},{"name":"contentType",'
'"type":{"type":"struct","fields":[{"name":"name","type":"string","nullable":true,"metadata":{}},'
'{"name":"value","type":"string","nullable":true,"metadata":{}}]},"nullable":true,"metadata":{}},'
'{"name":"isChunked","type":"boolean","nullable":false,"metadata":{}},'
'{"name":"isRepeatable","type":"boolean","nullable":false,"metadata":{}},'
'{"name":"isStreaming","type":"boolean","nullable":false,"metadata":{}}]},"nullable":true,"metadata":{}}]}'
))
def http_udf(func):
def composition(*args):
return requests_to_spark(func(*args).prepare())
return udf(composition, HTTPRequestDataType)
```
#### File: mmlspark/recommendation/SARModel.py
```python
import sys
if sys.version >= '3':
basestring = str
from pyspark.ml.param.shared import *
from pyspark.ml.common import inherit_doc
from mmlspark.core.schema.Utils import *
from mmlspark.recommendation._SARModel import _SARModel as sarModel
@inherit_doc
class SARModel(sarModel):
def recommendForAllUsers(self, numItems):
return self._call_java("recommendForAllUsers", numItems)
```
#### File: mmlspark/vw/test_vw.py
```python
import os
import unittest
import tempfile
import pyspark
from mmlspark.vw.VowpalWabbitClassifier import VowpalWabbitClassifier
from mmlspark.vw.VowpalWabbitRegressor import VowpalWabbitRegressor
from mmlspark.vw.VowpalWabbitFeaturizer import VowpalWabbitFeaturizer
from pyspark.sql.types import *
from pyspark.sql import SparkSession
spark = SparkSession.builder \
.master("local[*]") \
.appName("_VW") \
.config("spark.jars.packages", "com.microsoft.ml.spark:mmlspark_2.11:" + os.environ["MML_VERSION"]) \
.config("spark.executor.heartbeatInterval", "60s") \
.getOrCreate()
sc = spark.sparkContext
class VowpalWabbitSpec(unittest.TestCase):
def save_model(self, estimator):
# create sample data
schema = StructType([StructField("label", DoubleType()),
StructField("text", StringType())])
data = pyspark.sql.SparkSession.builder.getOrCreate().createDataFrame([
(-1.0, "mountains are nice"),
( 1.0, "do you have the TPS reports ready?")], schema)
# featurize data
featurizer = VowpalWabbitFeaturizer(stringSplitInputCols=['text'])
featurized_data = featurizer.transform(data)
# train model
model = estimator.fit(featurized_data)
# write model to file and validate it's there
with tempfile.TemporaryDirectory() as tmpdirname:
modelFile = '{}/model'.format(tmpdirname)
model.saveNativeModel(modelFile)
self.assertTrue(os.stat(modelFile).st_size > 0)
def test_save_model_classification(self):
self.save_model(VowpalWabbitClassifier())
def test_save_model_regression(self):
self.save_model(VowpalWabbitRegressor())
if __name__ == "__main__":
result = unittest.main()
``` |
{
"source": "joanfont/laas",
"score": 3
} |
#### File: laas/lib/base.py
```python
import requests
from bs4 import BeautifulSoup
class ParserMixin:
BASE_URL = None
@classmethod
def get_raw_content(cls, url):
response = requests.get(url, headers={
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:44.0) Gecko/20100101 Firefox/44.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
})
return response.content
@classmethod
def get_soup(cls, raw_content):
return BeautifulSoup(raw_content, 'lxml')
```
#### File: lib/extractors/azlyrics.py
```python
from lib.extractors.base import BaseExtractor
class Extractor(BaseExtractor):
@classmethod
def parse(cls, soup):
song_div = soup.find('div', {'class': None, 'id': None})
return song_div.text
```
#### File: lib/seekers/azlyrics.py
```python
from urllib.parse import urlencode
from lib.entities import Song
from lib.seekers.base import BaseSeeker
class Seeker(BaseSeeker):
@classmethod
def parse(cls, soup):
results_table = soup.find('table', {'class': 'table table-condensed'})
result_items = results_table.find_all('td')
songs = map(cls.parse_item, result_items)
return list(songs)
@classmethod
def parse_item(cls, td):
td_link = td.find('a')
title, artist = cls.get_title_and_artist_from_td(td)
url = td_link.get('href')
return Song(title, artist, url)
@classmethod
def get_title_and_artist_from_td(cls, td):
bold_items = td.find_all('b')
if len(bold_items) >= 2:
song, artist, *_ = bold_items
return song.text, artist.text
else:
return None, None
@classmethod
def get_url(cls, query):
query_string = urlencode({
'q': query
})
return 'http://search.azlyrics.com/search.php?{query_string}'.format(query_string=query_string)
```
#### File: lib/seekers/base.py
```python
from lib.base import ParserMixin
from lib.entities import Song
from lib.helper import Helper
class BaseSeeker(ParserMixin):
@classmethod
def seek(cls, query):
url = cls.get_url(query)
raw_content = cls.get_raw_content(url)
soup = cls.get_soup(raw_content)
songs = cls.parse(soup)
if not Helper.array_of(songs, Song):
raise ValueError('parse() response must be an array of Song objects')
return songs
@classmethod
def parse(cls, soup):
raise NotImplementedError
@classmethod
def get_url(cls, query):
raise NotImplementedError
``` |
{
"source": "joangerard/ai-genre-recognition",
"score": 3
} |
#### File: gclass/core/neural_network.py
```python
import numpy as np
import scipy.optimize as opt
from .text import Text
"""
Responsible to handle neural network training, prediction, accurary, etc.
"""
class NeuralNetwork:
def __init__(self):
self.dimi_1 = 25000
self.dimo_1 = 100
self.dimi_2 = 100
self.dimo_2 = 10
self.epsilon_theta = 0.12
self.theta1 = []
self.theta2 = []
self.lamb = 1.2
self.text = Text()
def predict_custom(self, data):
thetha_opt = self.text.read('media/trained/theta_opt_l12.txt')
theta1, theta2 = self.extract_thetas(thetha_opt, self.dimi_1, self.dimo_1, self.dimo_2)
ones = np.ones(1)
a1 = np.hstack((ones, data))
z2 = np.matmul(a1, theta1.T)
a2 = self.sigmoid_function(z2)
a2 = np.hstack((ones, a2))
z3 = np.matmul(a2, theta2.T)
a3 = self.sigmoid_function(z3)
return np.argmax(a3), a3
"""
Test the accuracy of test inputs.
"""
def accuracy(self, test_input, test_output):
thetha_opt = self.text.read('theta_opt_l12.txt')
theta1, theta2 = self.extract_thetas(thetha_opt, self.dimi_1, self.dimo_1, self.dimo_2)
accurates = 0
m = len(test_input)
for i in range(0, m):
ones = np.ones(1)
a1 = np.hstack((ones, test_input[i]))
z2 = np.matmul(a1, theta1.T)
a2 = self.sigmoid_function(z2) # 800, 100
a2 = np.hstack((ones, a2))
z3 = np.matmul(a2, theta2.T)
a3 = self.sigmoid_function(z3)
if test_output[i][np.argmax(a3)] == 1:
accurates += 1
return accurates/m
"""
By convenience we save the optimized weights into a text file as a binary object so we
need to train the NN only once and the reload the weights wherever we need them.
"""
def fit(self, training_inputs, training_outputs):
self.theta1 = self.random_init_theta(self.dimi_1 + 1, self.dimo_1, self.epsilon_theta) # 100 x 25001
self.theta2 = self.random_init_theta(self.dimi_2 + 1, self.dimo_2, self.epsilon_theta) # 10 x 101
theta = np.concatenate((self.theta1, self.theta2), axis=None)
theta_opt = opt.fmin_cg(f=self.costFunction, x0=theta, fprime=self.gradientFunction,
args=(training_inputs, training_outputs, self.lamb, self.dimo_1, self.dimi_1, self.dimo_2), maxiter=50)
self.text.write('theta_opt_l12.txt', theta_opt)
# self.gradientCheck(theta, backprop_params, self.dimi_1, self.dimo_1, self.dimo_2, self.lamb, training_inputs, training_outputs)
# print("Cost Function", cf)
# def predict(self, theta_opt):
"""
This method was helpful to check the correct implementation of gradient.
"""
def gradientCheck(self, theta, backprop_params, input_layer_size, hidden_layer_size, num_labels, lamb, training_inputs, training_outputs):
epsilon = 0.0001
n_elems = len(theta)
for i in range(10):
x = int(np.random.rand()*n_elems)
epsilon_vec = np.zeros((n_elems, 1))
epsilon_vec[x] = epsilon
cost_high = self.costFunction(theta + epsilon_vec.flatten(), training_inputs, training_outputs, lamb, hidden_layer_size, input_layer_size, num_labels)
cost_low = self.costFunction(theta - epsilon_vec.flatten(), training_inputs, training_outputs, lamb, hidden_layer_size, input_layer_size, num_labels)
aprox_grad = (cost_high - cost_low)/float(2 * epsilon)
print("Element: {0}. Numerical Gradient = {1:.9f}. BackProp Gradient = {2:.9f}.".format(x, aprox_grad, backprop_params[x]))
def extract_thetas(self, theta, input_layer_size, hidden_layer_size, num_labels):
theta1 = np.reshape(theta[:(hidden_layer_size * (input_layer_size + 1))], (hidden_layer_size, input_layer_size + 1))
theta2 = np.reshape(theta[(hidden_layer_size * (input_layer_size + 1)):], (num_labels, hidden_layer_size + 1))
return theta1, theta2
def gradientFunction(self, theta, training_inputs, training_outputs, lamb, hidden_layer_size, input_layer_size, num_labels):
theta1, theta2 = self.extract_thetas(theta, input_layer_size, hidden_layer_size, num_labels)
delta1 = np.zeros(theta1.shape)
delta2 = np.zeros(theta2.shape)
m = len(training_outputs)
for i in range(training_inputs.shape[0]):
ones = np.ones(1)
a1 = np.hstack((ones, training_inputs[i]))
z2 = np.matmul(a1, theta1.T)
a2 = self.sigmoid_function(z2) # 800, 100
a2 = np.hstack((ones, a2))
z3 = np.matmul(a2, theta2.T)
a3 = self.sigmoid_function(z3)
d3 = a3 - training_outputs[i]
z2 = np.hstack((ones, z2))
d2 = np.multiply(np.matmul(theta2.T, d3), self.sigmoid_derivate_function(z2).T)
delta1 = delta1 + d2[1:, np.newaxis] @ a1[np.newaxis, :]
delta2 = delta2 + d3[:, np.newaxis] @ a2 [np.newaxis, :]
delta1[:, 1:] = 1/m * delta1[:, 1:] + lamb * theta1[:, 1:]/m # j != 0
delta1[:, 0] = 1/m * delta1[:, 0]/m # j == 0
delta2[:, 1:] = 1/m * delta2[:, 1:] + lamb * theta2[:, 1:]/m
delta2[:, 0] = 1/m * delta2[:, 0]/m
print('Gradient function finishing... ')
return np.hstack((delta1.ravel(), delta2.ravel()))
def costFunction(self, theta, training_inputs, training_outputs, lamb, hidden_layer_size, input_layer_size, num_labels):
theta1 = np.reshape(theta[:(hidden_layer_size * (input_layer_size + 1))], (hidden_layer_size, input_layer_size + 1))
theta2 = np.reshape(theta[(hidden_layer_size * (input_layer_size + 1)):], (num_labels, hidden_layer_size + 1))
m = len(training_outputs)
ones = np.ones((m, 1))
a1 = np.hstack((ones, training_inputs))
a2 = self.sigmoid_function(np.matmul(a1, theta1.T)) # 800, 100
a2 = np.hstack((ones, a2))
h = self.sigmoid_function((np.matmul(a2, theta2.T)))
temp1 = np.multiply(training_outputs, np.log(h))
temp2 = np.multiply(1-training_outputs, np.log(1-h))
temp3 = np.sum(temp1+temp2)
sum1 = np.sum(np.sum(np.power(theta1[:,1:], 2), axis = 1))
sum2 = np.sum(np.sum(np.power(theta2[:,1:], 2), axis = 1))
val = np.sum(-(1/m)*temp3) + (sum1 + sum2) * lamb/(2 * m)
print('Cost function: ', val)
return val
def forward_propagation(self, a_1, theta1, theta2):
# forward propagation
a_1 = np.insert(a_1, 0, 1)
z_1 = np.matmul(theta1, a_1) # 100 x 1
a_2 = self.sigmoid_function(z_1)
a_2 = np.insert(a_2, 0, 1) # 101 x 1
z_2 = np.matmul(theta2, a_2) # 10 x 1
h = self.sigmoid_function(z_2)
def sigmoid_function(self, z):
return 1/(1 + np.exp(-z))
def sigmoid_derivate_function(self, z):
return np.multiply(self.sigmoid_function(z), 1 - self.sigmoid_function(z))
def random_init_theta(self, dimi, dimo, epsilon):
return np.random.rand(dimo, dimi)*2*epsilon - epsilon
```
#### File: gclass/core/text.py
```python
import pickle
"""
Responsible to save/retrieve objects to/from a text file as a binary object.
"""
class Text:
def read(self, file_name):
file = open(file_name, 'rb')
data = pickle.load(file)
file.close()
return data
def write(self, file_name, data):
file = open(file_name, 'wb')
pickle.dump(data, file)
file.close()
``` |
{
"source": "joangerard/webots-thesis",
"score": 2
} |
#### File: controllers/pos-prediction/params.py
```python
import numpy as np
class Params:
def __init__(self,
init_x=1,
init_y=.75,
experiment_duration_steps=2000,
particles_number=200,
sigma_xy=0.001,
sigma_theta=2,
calculate_pred_error=False,
calculate_odo_error=False,
pred_error_file='data_pred_error.pckl',
pred_odo_file='data_odo_error.pckl',
go_straiht_move=False,
capturing_data=False,
global_localization=False):
self.MAX_SPEED = 6
self.TIME_STEP = 8
self.WHEEL_RADIUS = 0.05
self.SAMPLING_PERIOD = 10
self.MAX_X = 3
self.MAX_Y = 3
self.ENCODER_UNIT = 159.23
self.INIT_X = init_x
self.INIT_Y = init_y
self.INIT_ANGLE = np.pi
self.PRED_STEPS = 1
self.CAPTURING_DATA = capturing_data
self.MOVING_ROBOT_STEPS = 100
self.EXPERIMENT_DURATION_STEPS = experiment_duration_steps
self.CALCULATE_PRED_ERROR = calculate_pred_error
self.CALCULATE_ODO_ERROR = calculate_odo_error
self.PRED_ERROR_FILE = pred_error_file
self.ODO_ERROR_FILE = pred_odo_file
self.GO_STRAIGHT_MOVE = go_straiht_move
self.PARTICLES_NUMBER = particles_number
self.SIGMA_XY = sigma_xy
self.SIGMA_THETA = sigma_theta
self.GLOBAL_LOCALIZATION = global_localization
```
#### File: pos-prediction/particles_filter/robot_configuration.py
```python
class RobotConfiguration:
def __init__(self, x, y, theta):
self.x = x
self.y = y
self.theta = theta
```
#### File: pos-prediction/robot_movement/coordinate.py
```python
class Coordinate:
def __init__(self, x, y, theta):
self.x = x
self.y = y
self.theta = theta
def print_info(self):
print(self.x, self.y, self.theta)
```
#### File: pos-prediction/robot_movement/movement_controller.py
```python
import numpy as np
import random
class MovementController:
GOING_STRAIGHT = 0
TURNING_RIGHT = 1
TURNING_LEFT = 2
TURN_CAPACITY_MAX = 30
MAX_SPEED = 6
THRESHOLD_PROXIMITY = 0.15
THRESHOLD_PROB_TURN = 0.1
SENSOR_ERROR = 0.1
def __init__(self):
self.status = self.GOING_STRAIGHT
self.turn_capacity = 500
self.avoiding_obstacle = False
def calculate_velocity(self, distanceSensors):
# Process sensor data here
sensorValues = [distanceSensor.getValue() for distanceSensor in distanceSensors]
rightObstacle = sensorValues[0] < 0.15 or sensorValues[1] < 0.15
leftObstacle = sensorValues[6] < 0.15 or sensorValues[7] < 0.15
left_speed = .5 * self.MAX_SPEED
right_speed = .5 * self.MAX_SPEED
# avoid collition
if leftObstacle:
left_speed += .7 * self.MAX_SPEED
right_speed -= .7 * self.MAX_SPEED
elif rightObstacle:
left_speed -= .7 * self.MAX_SPEED
right_speed += .7 * self.MAX_SPEED
return left_speed, right_speed
def move_straight(self):
left_speed = .5 * self.MAX_SPEED
right_speed = .5 * self.MAX_SPEED
return left_speed, right_speed
def move_left(self):
left_speed = -.5 * self.MAX_SPEED
right_speed = .5 * self.MAX_SPEED
return left_speed, right_speed
def move_right(self):
left_speed = .5 * self.MAX_SPEED
right_speed = -.5 * self.MAX_SPEED
return left_speed, right_speed
def move_backwards(self):
left_speed = -.5 * self.MAX_SPEED
right_speed = -.5 * self.MAX_SPEED
return left_speed, right_speed
def calculate_velocity_random_move(self, distance_sensors):
left_speed = .5 * self.MAX_SPEED
right_speed = .5 * self.MAX_SPEED
# Process sensor data here
sensorValues = [distanceSensor.getValue() for distanceSensor in distance_sensors]
rightObstacle = sensorValues[0] < self.THRESHOLD_PROXIMITY or sensorValues[1] < self.THRESHOLD_PROXIMITY
leftObstacle = sensorValues[6] < self.THRESHOLD_PROXIMITY or sensorValues[7] < self.THRESHOLD_PROXIMITY
# The robot detected an obstacle so avoid it
if (rightObstacle or leftObstacle) and not self.avoiding_obstacle:
self.avoiding_obstacle = True
# The robot did not detect any obstacle so go ahead
elif self.avoiding_obstacle and not (rightObstacle or leftObstacle):
self.avoiding_obstacle = False
# avoid collition
if self.avoiding_obstacle:
# print("[INFO] - avoiding obstacle")
if leftObstacle:
left_speed += .7 * self.MAX_SPEED
right_speed -= .7 * self.MAX_SPEED
elif rightObstacle:
left_speed -= .7 * self.MAX_SPEED
right_speed += .7 * self.MAX_SPEED
# make a movement: go straight, turn right or turn left
else:
if self.status == self.GOING_STRAIGHT:
# print("[INFO] - going straight")
# with x% chance turn right or turn left
p = np.random.uniform(0, 1)
if 1 - self.THRESHOLD_PROB_TURN < p <= 1 - (self.THRESHOLD_PROB_TURN / 2):
# print("[INFO] - TURN RIGHT order")
self.status = self.TURNING_RIGHT
self.turn_capacity = np.random.randint(self.TURN_CAPACITY_MAX)
elif 1 - (self.THRESHOLD_PROB_TURN / 2) < p < 1:
# print("[INFO] - TURN LEFT order")
self.status = self.TURNING_LEFT
self.turn_capacity = np.random.randint(self.TURN_CAPACITY_MAX)
if self.status == self.TURNING_LEFT and self.turn_capacity > 0:
# print("[INFO]: turning left. Capacity: ", self.turn_capacity)
left_speed -= .7 * self.MAX_SPEED
right_speed += .7 * self.MAX_SPEED
self.turn_capacity -= 1
if self.status == self.TURNING_RIGHT and self.turn_capacity > 0:
# print("[INFO]: turning right. Capacity: ", self.turn_capacity)
left_speed += .7 * self.MAX_SPEED
right_speed -= .7 * self.MAX_SPEED
self.turn_capacity -= 1
# if turn right or left is finished then go straight again
if self.turn_capacity <= 0:
self.status = self.GOING_STRAIGHT
return left_speed, right_speed
```
#### File: pos-prediction/robot_movement/odometry.py
```python
from robot_movement.wheels_state import WheelsState
from robot_movement.coordinate import Coordinate
import numpy as np
class Odometry:
def __init__(self, pos_left, pos_right, x=0, y=0, angle=0):
self.increments_per_tour = 1000
# self.axis_wheel_ratio = 1.660
self.axis_wheel_ratio = 1.885
# self.axis_wheel_ratio = 1.935
self.wheel_diameter_left = 0.05
self.wheel_diameter_right = 0.05
self.scaling_factor = 1.01
# self.scaling_factor = .8662
self.wheels_state = WheelsState()
# angle adjustment to coordinates x, y
self.coordinate = Coordinate(x+0.01190, y-0.0001642675221653, angle - 0.08325221)
self.wheels_state.pos_left_prev = int(pos_left)
self.wheels_state.pos_right = int(pos_right)
# print(pos_left, pos_right)
self.wheel_distance = self.axis_wheel_ratio * self.scaling_factor * (self.wheel_diameter_left
+ self.wheel_diameter_right) / 2
self.wheel_conversion_left = self.wheel_diameter_left * self.scaling_factor * np.pi / self.increments_per_tour
self.wheel_conversion_right = self.wheel_diameter_right * self.scaling_factor * np.pi / self.increments_per_tour
def track_step(self, pos_left, pos_right):
# self.wheel_distance = self.axis_wheel_ratio * self.scaling_factor * (self.wheel_diameter_left
# + self.wheel_diameter_right) / 2
# self.wheel_conversion_left = self.wheel_diameter_left * self.scaling_factor * np.pi / self.increments_per_tour
# self.wheel_conversion_right = self.wheel_diameter_right * self.scaling_factor * np.pi / self.increments_per_tour
delta_pos_left = int(pos_left - self.wheels_state.pos_left_prev)
delta_pos_right = int(pos_right - self.wheels_state.pos_right_prev)
delta_left = delta_pos_left * self.wheel_conversion_left
delta_right = delta_pos_right * self.wheel_conversion_right
delta_theta = (delta_right - delta_left) / self.wheel_distance
theta2 = self.coordinate.theta + delta_theta * 0.5
delta_x = (delta_left + delta_right) * 0.5 * np.cos(theta2)
delta_y = (delta_left + delta_right) * 0.5 * np.sin(theta2)
self.coordinate.x += delta_x
self.coordinate.y += delta_y
self.coordinate.theta += delta_theta
if self.coordinate.theta > np.pi:
self.coordinate.theta -= 2 * np.pi
if self.coordinate.theta < - np.pi:
self.coordinate.theta += 2 * np.pi
self.wheels_state.pos_left_prev = pos_left
self.wheels_state.pos_right_prev = pos_right
return self.coordinate, np.array([delta_x, delta_y, delta_theta])
``` |
{
"source": "joangines/transformers",
"score": 2
} |
#### File: examples/seq2seq/pack_dataset.py
```python
import argparse
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def pack_examples(tok, src_examples, tgt_examples, max_tokens=1024):
finished_src, finished_tgt = [], []
new_src, new_tgt = "", ""
sorted_examples = list(sorted(zip(src_examples, tgt_examples), key=lambda x: len(x[0])))
def is_too_big(strang):
return tok(strang, return_tensors="pt").input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples):
cand_src = new_src + " " + src
cand_tgt = new_tgt + " " + tgt
if is_too_big(cand_src) or is_too_big(cand_tgt): # cant fit, finalize example
finished_src.append(new_src)
finished_tgt.append(new_tgt)
new_src, new_tgt = src, tgt
else: # can fit, keep adding
new_src, new_tgt = cand_src, cand_tgt
return finished_src, finished_tgt
def pack_data_dir(tok, data_dir: Path, max_tokens, save_path):
save_path = Path(save_path)
save_path.mkdir(exist_ok=True)
for split in ["val", "test", "train"]:
src_path, tgt_path = data_dir / f"{split}.source", data_dir / f"{split}.target"
src_docs = list(Path(src_path).open().readlines())
tgt_docs = list(Path(tgt_path).open().readlines())
src, tgt = pack_examples(tok, src_docs, tgt_docs, max_tokens)
print(f"packed {split} split from {len(src_docs)} examples -> {len(src)}.")
Path(save_path / f"{split}.source").open("w").write("\n".join(src))
Path(save_path / f"{split}.target").open("w").write("\n".join(tgt))
def packer_cli():
parser = argparse.ArgumentParser()
parser.add_argument("--tok_name", type=str, help="like facebook/bart-large-cnn,t5-base, etc.")
parser.add_argument("--max_seq_len", type=int, default=128)
parser.add_argument("--data_dir", type=str)
parser.add_argument("--save_path", type=str)
args = parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained(args.tok_name)
return pack_data_dir(tokenizer, Path(args.data_dir), args.max_seq_len, args.save_path)
if __name__ == "__main__":
packer_cli()
``` |
{
"source": "Joan-GQ/LeetCode",
"score": 3
} |
#### File: contains-duplicate/Python3/Solution.py
```python
class Solution(object):
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
nums_set = set(nums)
output = len(nums_set) != len(nums)
return output
``` |
{
"source": "Joanguitar/ACO",
"score": 3
} |
#### File: ACO/py_aco/method.py
```python
import numpy as np
from . import codebook
##### DESCRIPTION #####
# This part of the module handles the
# high level ACO method logic
class ACO_low(object):
"""
ACO_low.
The method itself implemented
"""
# Initialize the method to a given number of antennas and a maximum codebook length
def __init__(self, n_antennas, maximum_bps=64): # n_antennas is the number of antennas thils maximum_bps stands for the maximum codebook length
super(ACO_low, self).__init__()
# Parameters
self.n_antennas = n_antennas
self.maximum_bps = maximum_bps
self.initial_codebook = [ # The method is provided with a default codebook, overwrite this variable with your own codebook if you have one
np.array([codebook.get_phased_coef(a) for a in bp])
for bp in np.fft.fft(np.eye(n_antennas))
]
# Flow control
self.stage = 0 # The method starts at stage 0
self.bp = None # Here will be stored the winner bp for communication
self.antenna_index = [] # Here will be stored the indices of the antennas that are being estimated with the selected codebook
# Byproduct
self.channel = np.zeros(n_antennas, dtype='complex') # Here will be stored the channel estimation (only the indices estimated, the rest will be zero)
# Create the codebook for ACO's next estimation
def get_codebook(self):
if self.stage == 0: # During the first stage we return the initial_codebook as we still don't have a good beam-pattern
return self.initial_codebook[:self.maximum_bps] # The codebook is sent trimmed to not exceed the maximum codebook length
return codebook.get_codebook(self.bp, self.antenna_index) # Else, we compute the codebook required to estimate the selected channel antenna coefficients
# This function is called to set the antenna_index variable as a function of self.bp
# Keep in mind the formula "codebook length = 1 + 3*n_active_antennas + 4*n_search_antennas"
def set_antenna_index(self):
active_antennas = np.argwhere(self.bp != 0)[:, 0] # Get the active antennas
if 1+3*len(active_antennas) >= self.maximum_bps: # If the set of active antennas is bigger than what the codebook allows to measure we trim it to a feasible set of indices and return it without search antennas, as active ones are more important
self.antenna_index = active_antennas[:int(np.floor((self.maximum_bps-1)/3))]
return
inactive_antennas = np.argwhere(self.bp[:] == 0)[:, 0] # Get the inactive antennas
n_search_antennas = int(np.floor( # Compute how many inactive antennas we can make fit in the estimation
(self.maximum_bps-(1+3*len(active_antennas)))/4
))
if n_search_antennas == 0: # If we can't fit any more antennas in the estimation, just set the antenna_index to the active antennas
self.antenna_index = active_antennas
return
if n_search_antennas > len(inactive_antennas): # If we can estimate all antennas, then set antenna_index to the index of all antennas
self.antenna_index = np.arange(self.n_antennas)
return
search_antennas = np.random.choice(inactive_antennas, n_search_antennas)# Otherwise, choose n_search_antennas from the set of inactive antennas
self.antenna_index = np.concatenate((active_antennas, search_antennas)) # Set the antenna_index to be the union of active_antennas and search_antennas
# Compute the beam-pattern for communication given the RSS measurement from the given codebook
def get_winner_bp(self, rss): # rss is a vector containing the measurements of the RSS
if self.stage == 0:
self.stage = 1 # Mode into the next stage for the next iteration
bp_max_index = np.argmax(rss) # Find the strongest beam-pattern from the initial_codebook
self.bp = self.initial_codebook[bp_max_index] # Set the beam-pattern for communication as the strongest one
self.set_antenna_index() # Update the antenna index for generating the codebook
return self.bp
else:
subchannel_est = codebook.get_subchannel(self.bp, self.antenna_index, rss) # Make a subchannel estimation with the rss variable
self.channel = np.zeros(self.n_antennas, dtype='complex') # Initialize the channel estimation with zeros
for ii, coef in zip(self.antenna_index, subchannel_est):
self.channel[ii] = coef # Fill the know coefficients for the channel
self.bp = codebook.get_winner_bp(self.channel) # Get the winner bp for communication from the channel estimation
self.set_antenna_index() # Update the antenna index for generating the codebook
return self.bp
``` |
{
"source": "Joanguitar/HiveMind-voice-sat",
"score": 3
} |
#### File: HiveMind-voice-sat/useful_scripts/tune_wake_word_threshold.py
```python
import os
import time
import tempfile
import pickle
from pocketsphinx.pocketsphinx import *
from sphinxbase.sphinxbase import *
from speech_recognition import Microphone
# Load/Save configuration
bool_load = False
bool_save = False
audio_dir = os.path.join('.', 'wakedump')
# Define hotword (the threshold for detection needs to be low)
"""
hotword = {
"name": "<NAME>",
"phonemes": "HH EY . M AY K R AO F T",
"lang": "en-us",
"threshold": 1e-90,
}
"""
hotword = {
"name": "<NAME>",
"phonemes": "e s k u ch a . p a k o",
"lang": "es-es",
"threshold": 1e-90,
}
# Model dir
modeldir = os.path.join('.', 'mycroft_voice_satellite', 'speech', 'recognizer', 'model', hotword['lang'], 'hmm')
# Sampling rate
sample_rate=16000
# Create file for sphinx
(fd, file_name) = tempfile.mkstemp()
words = hotword['name'].split()
phoneme_groups = hotword['phonemes'].split('.')
with os.fdopen(fd, 'w') as f:
for word, phoneme in zip(words, phoneme_groups):
f.write(word + ' ' + phoneme + '\n')
# sphinx hotword engine
def hotword_engine(threshold):
config = Decoder.default_config()
config.set_string('-hmm', modeldir)
config.set_string('-dict', file_name)
config.set_string('-keyphrase', hotword['name'])
config.set_float('-kws_threshold', float(threshold))
config.set_float('-samprate', sample_rate)
config.set_int('-nfft', 2048)
if os.name == 'nt': config.set_string('-logfn', 'NUL')
else: config.set_string('-logfn', '/dev/null')
config.set_string('-featparams', os.path.join(modeldir, "feat.params"))
return Decoder(config)
if bool_load:
audio_samples = pickle.load(open(audio_dir, "rb"))
N_realizations = len(audio_samples)
else:
# Create sphinx model for detecting hotwords
hwe = hotword_engine(hotword['threshold'])
# Define Microphone
mic = Microphone(device_index=None, sample_rate=sample_rate, chunk_size=1024)
# Record hotword
N_realizations = 5
audio_samples = []
for realization in range(N_realizations):
sample = b''
if realization > 0: time.sleep(0.5)
with mic as source:
hyp = False
sample += source.stream.read(source.CHUNK)
print('say "{}" {}/{} ... '.format(hotword['name'], realization+1, N_realizations))
while not hyp:
# Capture
chunk = source.stream.read(source.CHUNK)
sample += chunk
# Find hotword
hwe.start_utt()
hwe.process_raw(sample, False, False)
hwe.end_utt()
hyp = hwe.hyp()
audio_samples.append(sample)
print('detected')
if bool_save:
pickle.dump(audio_samples, open(audio_dir, "wb"))
# Found hotword function
def found_hotwork(hwe, sample):
hwe.start_utt()
hwe.process_raw(sample, False, False)
hwe.end_utt()
return hwe.hyp() is not None
# Find maximum threshold for which all samples are detected
# Refine B
detected = [False for sample in audio_samples]
for nthreshold_B in range(50):
threshold = pow(10, -nthreshold_B)
hwe = hotword_engine(threshold)
print('detected')
detected = [found_hotwork(hwe, sample) for sample in audio_samples]
print(detected)
detected = [found_hotwork(hwe, sample) for sample in audio_samples]
print(detected)
nthr_B = nthreshold_B
if all(detected):
break
# Refine dB
detected = [False for sample in audio_samples]
for nthreshold_dB in range(10*nthr_B-10, 10*nthr_B):
threshold = pow(10, -nthreshold_dB/10)
hwe = hotword_engine(threshold)
detected = [found_hotwork(hwe, sample) for sample in audio_samples]
detected = [found_hotwork(hwe, sample) for sample in audio_samples]
nthr_dB = nthreshold_dB
if all(detected):
break
# Refine cB
detected = [False for sample in audio_samples]
for nthreshold_cB in range(10*nthr_dB-10, 10*nthr_dB):
threshold = pow(10, -nthreshold_cB/100)
hwe = hotword_engine(threshold)
detected = [found_hotwork(hwe, sample) for sample in audio_samples]
detected = [found_hotwork(hwe, sample) for sample in audio_samples]
nthr_cB = nthreshold_cB
if all(detected):
break
# Print result
threshold = pow(10, -nthr_cB/100)
print('threshold: {:5e}'.format(threshold))
``` |
{
"source": "Joanguitar/text2speech",
"score": 2
} |
#### File: text2speech/modules/google_tts.py
```python
from gtts import gTTS
from gtts.lang import tts_langs
from text2speech.modules import TTS, TTSValidator
from ovos_utils.log import LOG
import logging
logging.getLogger('gtts.tts').setLevel(logging.CRITICAL)
logging.getLogger("gtts.lang").setLevel(logging.CRITICAL)
class GoogleTTS(TTS):
works_offline = False
audio_ext = "mp3"
_default_langs = {'af': 'Afrikaans', 'sq': 'Albanian', 'ar': 'Arabic',
'hy': 'Armenian', 'bn': 'Bengali', 'bs': 'Bosnian',
'ca': 'Catalan', 'hr': 'Croatian', 'cs': 'Czech',
'da': 'Danish', 'nl': 'Dutch', 'en': 'English',
'eo': 'Esperanto', 'et': 'Estonian', 'tl': 'Filipino',
'fi': 'Finnish', 'fr': 'French', 'de': 'German',
'el': 'Greek', 'gu': 'Gujarati', 'hi': 'Hindi',
'hu': 'Hungarian', 'is': 'Icelandic', 'id': 'Indonesian',
'it': 'Italian', 'ja': 'Japanese', 'jw': 'Javanese',
'kn': 'Kannada', 'km': 'Khmer', 'ko': 'Korean',
'la': 'Latin', 'lv': 'Latvian', 'mk': 'Macedonian',
'ml': 'Malayalam', 'mr': 'Marathi',
'my': 'Myanmar (Burmese)', 'ne': 'Nepali',
'no': 'Norwegian', 'pl': 'Polish', 'pt': 'Portuguese',
'ro': 'Romanian', 'ru': 'Russian', 'sr': 'Serbian',
'si': 'Sinhala', 'sk': 'Slovak', 'es': 'Spanish',
'su': 'Sundanese', 'sw': 'Swahili', 'sv': 'Swedish',
'ta': 'Tamil', 'te': 'Telugu', 'th': 'Thai',
'tr': 'Turkish',
'uk': 'Ukrainian', 'ur': 'Urdu', 'vi': 'Vietnamese',
'cy': 'Welsh', 'zh-cn': 'Chinese (Mandarin/China)',
'zh-tw': 'Chinese (Mandarin/Taiwan)',
'en-us': 'English (US)', 'en-ca': 'English (Canada)',
'en-uk': 'English (UK)', 'en-gb': 'English (UK)',
'en-au': 'English (Australia)',
'en-gh': 'English (Ghana)',
'en-in': 'English (India)', 'en-ie': 'English (Ireland)',
'en-nz': 'English (New Zealand)',
'en-ng': 'English (Nigeria)',
'en-ph': 'English (Philippines)',
'en-za': 'English (South Africa)',
'en-tz': 'English (Tanzania)',
'fr-ca': 'French (Canada)',
'fr-fr': 'French (France)',
'pt-br': 'Portuguese (Brazil)',
'pt-pt': 'Portuguese (Portugal)',
'es-es': 'Spanish (Spain)',
'es-us': 'Spanish (United States)'
}
def __init__(self, config=None):
config = config or {"module": "google",
"lang": "en-us"}
super(GoogleTTS, self).__init__(config, GoogleTTSValidator(self))
self._voices = None
voices = self.describe_voices()
if self.lang not in voices and self.lang.split("-")[0] in voices:
self.lang = self.lang.split("-")[0]
self.voice = self.describe_voices()[self.lang][0]
def get_tts(self, sentence, wav_file, lang=None):
lang = lang or self.lang
voices = self.describe_voices()
if lang not in voices and lang.split("-")[0] in voices:
lang = lang.split("-")[0]
tts = gTTS(sentence, lang=lang)
tts.save(wav_file)
return (wav_file, None) # No phonemes
def describe_voices(self):
if self._voices is None:
self._voices = {}
try:
langs = tts_langs()
except:
langs = self._default_langs
for lang_code in langs:
self._voices[lang_code] = [langs[lang_code]]
return self._voices
class GoogleTTSValidator(TTSValidator):
def __init__(self, tts):
super(GoogleTTSValidator, self).__init__(tts)
def validate_lang(self):
assert self.tts.lang in self.tts.describe_voices()
def validate_connection(self):
try:
gTTS(text='Hi')
except:
LOG.warning(
'GoogleTTS server could not be verified. Please check your '
'internet connection.')
def get_tts_class(self):
return GoogleTTS
```
#### File: text2speech/plugins/__init__.py
```python
import pkg_resources
from ovos_utils.log import LOG
def find_plugins(plug_type):
"""Finds all plugins matching specific entrypoint type.
Arguments:
plug_type (str): plugin entrypoint string to retrieve
Returns:
dict mapping plugin names to plugin entrypoints
"""
return {
entry_point.name: entry_point.load()
for entry_point
in pkg_resources.iter_entry_points(plug_type)
}
def load_plugin(plug_type, plug_name):
"""Load a specific plugin from a specific plugin type.
Arguments:
plug_type: (str) plugin type name. Ex. "mycroft.plugin.tts".
plug_name: (str) specific plugin name
Returns:
Loaded plugin Object or None if no matching object was found.
"""
plugins = find_plugins(plug_type)
if plug_name in plugins:
ret = plugins[plug_name]
else:
LOG.warning('Could not find the plugin {}.{}'.format(plug_type,
plug_name))
ret = None
return ret
``` |
{
"source": "joanibal/baseclasses",
"score": 3
} |
#### File: baseclasses/baseclasses/pyAero_problem.py
```python
import numpy
import warnings
from .ICAOAtmosphere import ICAOAtmosphere
from .FluidProperties import FluidProperties
from .utils import CaseInsensitiveDict, Error
class AeroProblem(FluidProperties):
"""
The main purpose of this class is to represent all relevant
information for a single aerodynamic analysis. This will
include the thermodynamic parameters defining the flow,
condition, the reference quantities for normalization.
There are several different ways of specifying thermodynamic
conditions. The following describes several of the possible
ways and the appropriate situations.
'mach' + 'altitude'
This is the preferred method. The 1976 standard atmosphere is
used to generate all therodynamic properties in a consistent
manner. This is suitable for all aerodynamic analysis codes,
including aerostructral analysis.
'mach' + 'reynolds' + 'reynoldsLength' + 'T':
Used to precisely match reynolds numbers. Complete
thermodynamic state is computed.
'V' + 'reynolds' + 'reynoldsLength' + 'T':
Used to precisely match reynolds numbers for low speed cases.
Complete thermodynamic state is computed.
'mach' + 'T' + 'P':
Any arbitrary temperature and pressure.
'mach' + 'T' + 'rho':
Any arbitrary temperature and density.
'mach' + 'rho' + 'P':
Any arbitrary density and pressure.
'V' + 'rho' + 'T'
Generally for low speed specifications
'V' + 'rho' + 'P'
Generally for low speed specifications
'V' + 'T' + 'P'
Generally for low speed specifications
The combinations listed above are the **only** valid combinations
of arguments that are permitted. Furthermore, since the internal
processing is based (permenantly) on these parameters, it is
important that the parameters given on initialization are
sufficient for the required analysis. For example, if only the
Mach number is given, an error will be raised if the user tried to
set the 'P' (pressure) variable.
All parameters are optinonal except for the `name` argument which
is required. All of the parameters listed below can be acessed and
set directly after class creation by calling::
<aeroProblem>.<variable> = <value>
An attempt is made internally to maintain consistency of the
supplied arguments. For example, if the altitude variable is set
directly, the other thermodynamic properties (rho, P, T, mu, a)
are updated accordingly.
Parameters
----------
name : str
Name of this aerodynamic problem.
funcs : iteratble object containing strings
The names of the functions the user wants evaluated with this
aeroProblem.
mach : float. Default is 0.0
Set the mach number for the simulation
machRef : float. Default is None
Sets the reference mach number for the simulation.
machGrid : float. Default is None
Set the mach number for the grid.
alpha : float. Default is 0.0
Set the angle of attack
beta : float. Default is 0.0
Set side-slip angle
altitude : float. Default is 0.0
Set all thermodynamic parameters from the 1976 standard atmosphere.
the altitude must be given in meters.
phat : float. Default is 0.0
Set the rolling rate coefficient
qhat : float. Default is 0.0
Set the pitch rate coefficient
rhat : float. Default is 0.0
Set the yawing rate coefficient
degPol : integer. Default is 0
Degree of polynominal for prescribed motion. ADflow only
coefPol : array_like. Default is [0.0]
Coefficients of polynominal motion. ADflow only
degFourier : integer. Default is 0
Degree of fourrier coefficient for prescribed motion. ADflow only
omegaFourier : float. Default is 0.0
Fundamental circular freqnecy for oscillatory motino (ADflow only)
cosCoefFourier : array_like. Default is [0.0]
Coefficients for cos terms
sinCoefFourier : array_like. Default is [0.0]
Coefficients for the sin terms
P : float.
Set the ambient pressure
T : float.
Set the ambient temperature
gamma : float. Default is 1.4
Set the ratio of the specific heats in ideal gas law
reynolds : float. Default is None
Set the reynolds number
reynoldslength : float. Default is 1.0
Set the length reference for the reynolds number calculations
areaRef : float. Default is 1.0
Set the reference area used for normalization of Lift, Drag etc.
chordRef : float. Default is 1.0
Set the reference length used for moment normaliziation.
spanRef : float. Default is 1.0
Set reference length for span. Only used for normalization of
p-derivatives
xRef : float. Default is 0.0
Set the x-coordinate location of the center about which moments
are taken
yRef : float. Default is 0.0
Set the y-coordinate location of the center about which moments
are taken
zRef : float. Default is 0.0
Set the z-coordinate location of the center about which moments
are taken
momentAxis : iterable object containing floats.
Defualt is [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]]
Set the reference axis for non-x/y/z based moment calculations
R : float
The gas constant. By defalut we use air. R=287.05
englishUnits : bool
Flag to use all English units: pounds, feet, Rankine etc.
solverOptions : dict
A set of solver specific options that temprorily overide the solver's
internal options for this aero problem only. It must contain the name of
the solver followed by a dictionary of options for that solver. For example
solverOptions ={'adflow':{'vis4':0.018}}. Currently only the only solver
supported is 'adflow' and must use the specific key 'adflow'.
Examples
--------
>>> # DPW4 Test condition (metric)
>>> ap = AeroProblem('tunnel_condition', mach=0.85, reynolds=5e6, \
reynoldsLength=275.8*.0254, T=310.93, areaRef=594720*.0254**2, \
chordRef=275.8*.0254, xRef=1325.9*0.0254, zRef=177.95*.0254)
>>> # DPW4 Flight condition (metric)
>>> ap = AeroProblem('flight_condition', mach=0.85, altitude=37000*.3048, \
areaRef=594720*.0254**2, chordRef=275.8*.0254, \
xRef=1325.9*0.0254, zRef=177.95*.0254)
>>> # OneraM6 Test condition (euler)
>>> ap = AeroProblem('m6_tunnel', mach=0.8395, areaRef=0.772893541, chordRef=0.64607 \
xRef=0.0, zRef=0.0, alpha=3.06)
>>> # OneraM6 Test condition (RANS)
>>> ap = AeroProblem('m6_tunnel', mach=0.8395, reynolds=11.72e6, reynoldsLenght=0.64607, \
areaRef=0.772893541, chordRef=0.64607, xRef=0.0, zRef=0.0, alpha=3.06, T=255.56)
"""
def __init__(self, name, **kwargs):
# Set basic fluid properties
super().__init__(**kwargs)
# Always have to have the name
self.name = name
# These are the parameters that can be simply set directly in
# the class.
paras = set(
(
"alpha",
"beta",
"areaRef",
"chordRef",
"spanRef",
"xRef",
"yRef",
"zRef",
"xRot",
"yRot",
"zRot",
"phat",
"qhat",
"rhat",
"momentAxis",
"degreePol",
"coefPol",
"degreeFourier",
"omegaFourier",
"cosCoefFourier",
"sinCoefFourier",
"machRef",
"machGrid",
)
)
# By default everything is None
for para in paras:
setattr(self, para, None)
# create an internal instance of the atmosphere to use
if "altitude" in kwargs:
self.atm = ICAOAtmosphere(englishUnits=self.englishUnits)
# Set or create an empty dictionary for additional solver
# options
self.solverOptions = CaseInsensitiveDict({})
if "solverOptions" in kwargs:
for key in kwargs["solverOptions"]:
self.solverOptions[key] = kwargs["solverOptions"][key]
# Any matching key from kwargs that is in 'paras'
for key in kwargs:
if key in paras:
setattr(self, key, kwargs[key])
# Check for function list:
self.evalFuncs = set()
if "evalFuncs" in kwargs:
self.evalFuncs = set(kwargs["evalFuncs"])
if "funcs" in kwargs:
warnings.warn("funcs should **not** be an argument. Use 'evalFuncs' instead.")
self.evalFuncs = set(kwargs["funcs"])
# we cast the set to a sorted list, so that each proc can loop over in the same order
self.evalFuncs = sorted(list(self.evalFuncs))
# these are the possible input values
possibleInputStates = set(["mach", "V", "P", "T", "rho", "altitude", "reynolds", "reynoldsLength"])
# turn the kwargs into a set
keys = set(kwargs.keys())
# save the initials states
self.inputs = {}
for key in keys:
if key in possibleInputStates:
self.inputs[key] = kwargs[key]
# full list of states in the class
self.fullState = set(
["mach", "V", "P", "T", "rho", "mu", "nu", "a", "q", "altitude", "re", "reynolds", "reynoldsLength"]
)
# now call the routine to setup the states
self._setStates(self.inputs)
# Specify the set of possible design variables:
self.allVarFuncs = [
"alpha",
"beta",
"areaRef",
"chordRef",
"spanRef",
"xRef",
"yRef",
"zRef",
"xRot",
"yRot",
"zRot",
"momentAxis",
"phat",
"qhat",
"rhat",
"mach",
"altitude",
"P",
"T",
"reynolds",
"reynoldsLength",
]
self.possibleDVs = set()
for var in self.allVarFuncs:
if getattr(self, var) is not None:
self.possibleDVs.add(var)
BCVarFuncs = ["Pressure", "PressureStagnation", "Temperature", "TemperatureStagnation", "Thrust"]
self.possibleBCDVs = set(BCVarFuncs)
# Now determine the possible functions. Any possible design
# variable CAN also be a function (pass through)
self.possibleFunctions = set(self.possibleDVs)
# And anything in fullState can be a function:
for var in self.fullState:
if getattr(self, var) is not None:
self.possibleFunctions.add(var)
# When a solver calls its evalFunctions() it must write the
# unique name it gives to funcNames.
self.funcNames = {}
# Storage of DVs
self.DVs = {}
# Storage of BC varible values
# vars are keyed by (bcVarName, Family)
self.bcVarData = {}
def _setStates(self, inputDict):
"""
Take in a dictionary and set up the full set of states.
"""
# Now we can do the name matching for the data for the
# thermodynamic condition. We actually can work backwards from
# the list given in the doc string.
for key in self.fullState:
self.__dict__[key] = None
keys = set(inputDict.keys())
inKeys = set(self.inputs.keys())
# first check that the keys in inputDict are valid
for key in keys:
if key in self.inputs.keys():
pass
else:
validKeys = ""
for vkey in self.inputs:
validKeys += vkey + ", "
raise Error(
"Invalid input parameter: %s . Only values initially specifed"
" as inputs may be modifed. valid inputs include: %s" % (key, validKeys)
)
# now we know our inputs are valid. update self.Input and update states
for key in inputDict:
self.inputs[key] = inputDict[key]
if set(("mach", "T", "P")) <= inKeys:
self.__dict__["mach"] = self.inputs["mach"]
self.__dict__["T"] = self.inputs["T"]
self.__dict__["P"] = self.inputs["P"]
self.__dict__["rho"] = self.P / (self.R * self.T)
# now calculate remaining states
self._updateFromM()
elif set(("mach", "T", "rho")) <= inKeys:
self.__dict__["mach"] = self.inputs["mach"]
self.__dict__["T"] = self.inputs["T"]
self.__dict__["rho"] = self.inputs["rho"]
self.__dict__["P"] = self.rho * self.R * self.T
# now calculate remaining states
self._updateFromM()
elif set(("mach", "P", "rho")) <= inKeys:
self.__dict__["mach"] = self.inputs["mach"]
self.__dict__["rho"] = self.inputs["rho"]
self.__dict__["P"] = self.inputs["P"]
self.__dict__["T"] = self.P / (self.rho * self.R)
# now calculate remaining states
self._updateFromM()
elif set(("mach", "reynolds", "reynoldsLength", "T")) <= inKeys:
self.__dict__["mach"] = self.inputs["mach"]
self.__dict__["T"] = self.inputs["T"]
self.__dict__["re"] = self.inputs["reynolds"] / self.inputs["reynoldsLength"]
self.__dict__["reynolds"] = self.inputs["reynolds"]
self.__dict__["reynoldsLength"] = self.inputs["reynoldsLength"]
# now calculate remaining states
self._updateFromRe()
elif set(("V", "reynolds", "reynoldsLength", "T")) <= inKeys:
self.__dict__["V"] = self.inputs["V"]
self.__dict__["T"] = self.inputs["T"]
self.__dict__["re"] = self.inputs["reynolds"] / self.inputs["reynoldsLength"]
self.__dict__["reynolds"] = self.inputs["reynolds"]
self.__dict__["reynoldsLength"] = self.inputs["reynoldsLength"]
# now calculate remaining states
self._updateFromRe()
elif set(("mach", "altitude")) <= inKeys:
self.__dict__["mach"] = self.inputs["mach"]
self.__dict__["altitude"] = self.inputs["altitude"]
P, T = self.atm(self.inputs["altitude"])
self.__dict__["T"] = T
self.__dict__["P"] = P
self.__dict__["rho"] = self.P / (self.R * self.T)
self._updateFromM()
elif set(("V", "rho", "T")) <= inKeys:
self.__dict__["V"] = self.inputs["V"]
self.__dict__["rho"] = self.inputs["rho"]
self.__dict__["T"] = self.inputs["T"]
# calculate pressure
self.__dict__["P"] = self.rho * self.R * self.T
self._updateFromV()
elif set(("V", "rho", "P")) <= inKeys:
self.__dict__["V"] = self.inputs["V"]
self.__dict__["rho"] = self.inputs["rho"]
self.__dict__["P"] = self.inputs["P"]
# start by calculating the T
self.__dict__["T"] = self.P / (self.rho * self.R)
self._updateFromV()
elif set(("V", "T", "P")) <= inKeys:
self.__dict__["V"] = self.inputs["V"]
self.__dict__["T"] = self.inputs["T"]
self.__dict__["P"] = self.inputs["P"]
# start by calculating the T
self.__dict__["rho"] = self.P / (self.R * self.T)
self._updateFromV()
else:
raise Error(
"There was not sufficient information to form "
"an aerodynamic state. See AeroProblem documentation "
"in for pyAero_problem.py for information on how "
"to correctly specify the aerodynamic state"
)
def setBCVar(self, varName, value, familyName):
"""
set the value of a BC variable on a specific variable
"""
self.bcVarData[varName, familyName] = value
print("update bc", value)
def addDV(
self,
key,
value=None,
lower=None,
upper=None,
scale=1.0,
name=None,
offset=0.0,
dvOffset=0.0,
addToPyOpt=True,
family=None,
units=None,
):
"""
Add one of the class attributes as an 'aerodynamic' design
variable. Typical variables are alpha, mach, altitude,
chordRef etc. An error will be given if the requested DV is
not allowed to be added
Parameters
----------
key : str
Name of variable to add. See above for possible ones
value : float. Default is None
Initial value for variable. If not given, current value
of the attribute will be used.
lower : float. Default is None
Optimization lower bound. Default is unbonded.
upper : float. Default is None
Optimization upper bound. Default is unbounded.
scale : float. Default is 1.0
Set scaling parameter for the optimization to use.
name : str. Default is None
Overwrite the name of this variable. This is typically
only used when the user wishes to have multiple
aeroProblems to explictly use the same design variable.
offset : float. Default is 0.0
Specify a specific (constant!) offset of the value used,
as compared to the actual design variable. This is most
often used when a single aerodynamic variable is used to
change multiple aeroProblems. For example. if you have
three aeroProblems for a multiPoint analysis, and you want
mach numbers of 0.84, 0.85 and 0.86, but want want only to
change the center one, and have the other two slave, we
would do this::
>>> ap1.addDV('mach',...,name='centerMach', offset=-0.01)
>>> ap2.addDV('mach',...,name='centerMach', offset= 0.00)
>>> ap3.addDV('mach',...,name='centerMach', offset=+0.01)
The result is a single design variable driving three
different mach numbers.
dvOffset : float. Default is 0.0
This is the offset used to give to pyOptSparse. It can be used
to re-center the value about zero.
addToPyOpt : bool. Default True.
Flag specifying if this variable should be added. Normally this
is True. However, if there are multiple aeroProblems sharing
the same variable, only one needs to add the variables to pyOpt
and the others can set this to False.
units : str or None. Default None
physical units of the variable
Examples
--------
>>> # Add alpha variable with typical bounds
>>> ap.addDV('alpha', value=2.5, lower=0.0, upper=10.0, scale=0.1)
"""
if (key not in self.allVarFuncs) and (key not in self.possibleBCDVs):
raise ValueError("%s is not a valid design variable" % key)
# First check if we are allowed to add the DV:
elif (key not in self.possibleDVs) and (key in self.allVarFuncs):
raise Error(
"The DV '%s' could not be added. Potential DVs MUST "
"be specified when the aeroProblem class is created. "
"For example, if you want alpha as a design variable "
"(...,alpha=value, ...) must be given. The list of "
"possible DVs are: %s." % (key, repr(self.possibleDVs))
)
if key in self.possibleBCDVs:
if family is None:
raise Error("The family must be given for BC design variables")
if name is None:
dvName = "%s_%s_%s" % (key, family, self.name)
else:
dvName = name
if value is None:
if (key, family) not in self.bcVarData:
raise Error("The value must be given or set using the setBCVar routine")
value = self.bcVarData[key, family]
else:
if name is None:
dvName = key + "_%s" % self.name
else:
dvName = name
if value is None:
value = getattr(self, key)
family = None
self.DVs[dvName] = aeroDV(key, value, lower, upper, scale, offset, dvOffset, addToPyOpt, family, units)
def updateInternalDVs(self):
"""
A specialized function that allows for the updating of the
internally stored DVs. This would be used for, example, if a
CLsolve is done before the optimization and that value needs
to be used."""
for dvName in self.DVs:
if self.DVs[dvName].family is None:
self.DVs[dvName].value = getattr(self, self.DVs[dvName].key)
def setDesignVars(self, x):
"""
Set the variables in the x-dict for this object.
Parameters
----------
x : dict
Dictionary of variables which may or may not contain the
design variable names this object needs
"""
for dvName in self.DVs:
if dvName in x:
key = self.DVs[dvName].key
family = self.DVs[dvName].family
value = x[dvName] + self.DVs[dvName].offset
if family is None:
setattr(self, key, value)
else:
self.bcVarData[key, family] = value
try: # To set in the DV as well if the DV exists:
self.DVs[dvName].value = x[dvName]
except: # noqa
pass # DV doesn't exist
def addVariablesPyOpt(self, optProb):
"""
Add the current set of variables to the optProb object.
Parameters
----------
optProb : pyOpt_optimization class
Optimization problem definition to which variables are added
"""
for dvName in self.DVs:
dv = self.DVs[dvName]
if dv.addToPyOpt:
if type(dv.value) == numpy.ndarray:
optProb.addVarGroup(
dvName,
dv.value.size,
"c",
value=dv.value,
lower=dv.lower,
upper=dv.upper,
scale=dv.scale,
offset=dv.dvOffset,
units=dv.units,
)
else:
optProb.addVar(
dvName,
"c",
value=dv.value,
lower=dv.lower,
upper=dv.upper,
scale=dv.scale,
offset=dv.dvOffset,
units=dv.units,
)
def __getitem__(self, key):
return self.funcNames[key]
def __str__(self):
output_str = ""
for key, val in self.__dict__.items():
output_str += "{0:20} : {1:<16}\n".format(key, val)
return output_str
def evalFunctions(self, funcs, evalFuncs, ignoreMissing=False):
"""
Evaluate the desired aerodynamic functions. It may seem
strange that the aeroProblem has 'functions' associated with
it, but in certain instances, this is the case.
For an aerodynamic optimization, consider the case when 'mach'
is a design variable, and the objective is ML/D. We need the
mach variable explictly in our our objCon function. In this
case, the 'function' is simply the design variable itself, and
the derivative of the function with respect the design
variable is 1.0.
A more complex example is when 'altitude' is used for an
aerostructural optimization. If we use the Breguet range
equation is used for either the objective or constraints we
need to know the flight velocity, 'V', which is a non-trivial
function of the altitue (and Mach number).
Also, even if 'altitude' and 'mach' are not parameters, this
function can be used to evaluate the 'V' value for example. In
this case, 'V' is simply constant and no sensitivties would be
calculated which is fine.
Note that the list of available functions depends on how the
user has initialized the flight condition.
Parameters
----------
funcs : dict
Dictionary into which the functions are save
evalFuncs : iterable object containing strings
The functions that the user wants evaluated
"""
if set(evalFuncs) <= self.possibleFunctions:
# All the functions are ok:
for f in evalFuncs:
# Save the key into funcNames
key = self.name <KEY>
self.funcNames[f] = key
funcs[key] = getattr(self, f)
else:
if not ignoreMissing:
raise Error(
"One of the functions in 'evalFunctionsSens' was "
"not valid. The valid list of functions is: %s." % (repr(self.possibleFunctions))
)
def evalFunctionsSens(self, funcsSens, evalFuncs, ignoreMissing=True):
"""
Evaluate the sensitivity of the desired aerodynamic functions.
Parameters
----------
funcsSens : dict
Dictionary into which the function sensitivities are saved
evalFuncs : iterable object containing strings
The functions that the user wants evaluated
"""
# Make sure all the functions have been evaluated.
tmp = {}
self.evalFunctions(tmp, evalFuncs)
# Check that all functions are ok:
if set(evalFuncs) <= self.possibleFunctions:
for f in evalFuncs:
funcsSens[self.funcNames[f]] = self._getDVSens(f)
else:
if not ignoreMissing:
raise Error(
"One of the functions in 'evalFunctionsSens' was "
"not valid. The valid list of functions is: %s." % (repr(self.possibleFunctions))
)
def _set_aeroDV_val(self, key, value):
# Find the DV matching this value. This is inefficient, but
# there are not generally *that* many aero DVs
for dvName in self.DVs:
if self.DVs[dvName].key.lower() == key.lower():
self.DVs[dvName].value
@property
def mach(self):
return self.__dict__["mach"]
@mach.setter
def mach(self, value):
self._setStates({"mach": value})
self._set_aeroDV_val("mach", value)
@property
def T(self):
return self.__dict__["T"]
@T.setter
def T(self, value):
self._setStates({"T": value})
self._set_aeroDV_val("T", value)
@property
def P(self):
return self.__dict__["P"]
@P.setter
def P(self, value):
self._setStates({"P": value})
self._set_aeroDV_val("P", value)
@property
def rho(self):
return self.__dict__["rho"]
@rho.setter
def rho(self, value):
self._setStates({"rho": value})
self._set_aeroDV_val("rho", value)
@property
def re(self):
return self.__dict__["re"]
@re.setter
def re(self, value):
self._setStates({"re": value})
self._set_aeroDV_val("re", value)
@property
def reynolds(self):
return self.__dict__["reynolds"]
@reynolds.setter
def reynolds(self, value):
self._setStates({"reynolds": value})
self._set_aeroDV_val("reynolds", value)
@property
def reynoldsLength(self):
return self.__dict__["reynoldsLength"]
@reynoldsLength.setter
def reynoldsLength(self, value):
self._setStates({"reynoldsLength": value})
self._set_aeroDV_val("reynoldsLength", value)
@property
def altitude(self):
return self.__dict__["altitude"]
@altitude.setter
def altitude(self, value):
self._setStates({"altitude": value})
self._set_aeroDV_val("altitude", value)
# def _update(self):
# """
# Try to finish the complete state:
# """
# if self.T is not None:
# self.a = numpy.sqrt(self.gamma*self.R*self.T)
# if self.englishUnits:
# mu = (self.muSuthDim * (
# (self.TSuthDim + self.SSuthDim) / (self.T/1.8 + self.SSuthDim)) *
# (((self.T/1.8)/self.TSuthDim)**1.5))
# self.mu = mu / 47.9
# else:
# self.mu = (self.muSuthDim * (
# (self.TSuthDim + self.SSuthDim) / (self.T + self.SSuthDim)) *
# ((self.T/self.TSuthDim)**1.5))
# if self.mach is not None and self.a is not None:
# self.V = self.mach * self.a
# if self.a is not None and self.V is not None:
# self.__dict__['mach'] = self.V/self.a
# if self.P is not None and self.T is not None:
# self.__dict__['rho'] = self.P/(self.R*self.T)
# if self.rho is not None and self.T is not None:
# self.__dict__['P'] = self.rho*self.R*self.T
# if self.rho is not None and self.P is not None:
# self.__dict__['T'] = self.P /(self.rho*self.R)
# if self.mu is not None and self.rho is not None:
# self.nu = self.mu / self.rho
# if self.rho is not None and self.V is not None:
# self.q = 0.5*self.rho*self.V**2
# if self.rho is not None and self.V is not None and self.mu is not None:
# self.__dict__['re'] = self.rho*self.V/self.mu
# if self.re is not None and self.mu is not None and self.V is not None:
# self.__dict__['rho'] = self.re*self.mu/self.V
def _updateFromRe(self):
"""
update the full set of states from M,T,P
"""
# calculate the speed of sound
self.a = numpy.sqrt(self.gamma * self.R * self.T)
# Update the dynamic viscosity based on T using Sutherland's Law
self.updateViscosity(self.T)
# calculate Velocity
if self.V is None:
self.V = self.mach * self.a
else:
self.__dict__["mach"] = self.V / self.a
# calculate density
self.__dict__["rho"] = self.re * self.mu / self.V
# calculate pressure
self.__dict__["P"] = self.rho * self.R * self.T
# calculate kinematic viscosity
self.nu = self.mu / self.rho
# calculate dynamic pressure
self.q = 0.5 * self.rho * self.V ** 2
def _updateFromM(self):
"""
update the full set of states from M,T,P, Rho
"""
# calculate the speed of sound
self.a = numpy.sqrt(self.gamma * self.R * self.T)
# Update the dynamic viscosity based on T using Sutherland's Law
self.updateViscosity(self.T)
# calculate Velocity
self.V = self.mach * self.a
# calulate reynolds per length
self.__dict__["re"] = self.rho * self.V / self.mu
# calculate kinematic viscosity
self.nu = self.mu / self.rho
# calculate dynamic pressure
self.q = 0.5 * self.rho * self.V ** 2
def _updateFromV(self):
"""
update the full set of states from V,T,P, Rho
"""
# calculate the speed of sound
self.a = numpy.sqrt(self.gamma * self.R * self.T)
# Update the dynamic viscosity based on T using Sutherland's Law
self.updateViscosity(self.T)
# calculate kinematic viscosity
self.nu = self.mu / self.rho
# calculate dynamic pressure
self.q = 0.5 * self.rho * self.V ** 2
# calculate Mach Number
self.__dict__["mach"] = self.V / self.a
# calulate reynolds per length
self.__dict__["re"] = self.rho * self.V / self.mu
def _getDVSens(self, func):
"""
Function that computes the derivative of the functions in
evalFuncs, wrt the design variable key 'key'
"""
rDict = {}
h = 1e-40j
hr = 1e-40
for dvName in self.DVs:
key = self.DVs[dvName].key
family = self.DVs[dvName].family
if family is None:
setattr(self, key, getattr(self, key) + h)
rDict[dvName] = numpy.imag(self.__dict__[func]) / hr
setattr(self, key, numpy.real(getattr(self, key)))
return rDict
class aeroDV(object):
"""
A container storing information regarding an 'aerodynamic' variable.
"""
def __init__(self, key, value, lower, upper, scale, offset, dvOffset, addToPyOpt, family, units):
self.key = key
self.value = value
self.lower = lower
self.upper = upper
self.scale = scale
self.offset = offset
self.dvOffset = offset
self.addToPyOpt = addToPyOpt
self.family = family
self.units = units
```
#### File: baseclasses/baseclasses/utils.py
```python
class CaseInsensitiveDict(dict):
"""
Python dictionary where the keys are case-insensitive.
Note that this assumes the keys are strings, and indeed will fail if you try to
create an instance where keys are not strings.
All common Python dictionary operations are supported, and additional operations
can be added easily.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# convert keys to lower case
for k in list(self.keys()):
v = super().pop(k)
self.__setitem__(k, v)
def __setitem__(self, key, value):
super().__setitem__(key.lower(), value)
def __getitem__(self, key):
return super().__getitem__(key.lower())
def __contains__(self, key):
return super().__contains__(key.lower())
def __delitem__(self, key):
super().__delitem__(key.lower())
def pop(self, key, *args, **kwargs):
super().pop(key.lower(), *args, **kwargs)
def get(self, key, *args, **kwargs):
return super().get(key.lower(), *args, **kwargs)
class CaseInsensitiveSet(set):
"""
Python set where the elements are case-insensitive.
Note that this assumes the elements are strings, and indeed will fail if you try to
create an instance where elements are not strings.
All common Python set operations are supported, and additional operations
can be added easily.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# convert entries to lowe case
for k in self:
super().remove(k)
self.add(k)
def add(self, item):
super().add(item.lower())
def __contains__(self, item):
return super().__contains__(item.lower())
class Error(Exception):
"""
Format the error message in a box to make it clear this
was a explicitly raised exception.
"""
def __init__(self, message):
self.message = message
msg = "\n+" + "-" * 78 + "+" + "\n" + "| Error: "
i = 8
for word in message.split():
if len(word) + i + 1 > 78: # Finish line and start new one
msg += " " * (78 - i) + "|\n| " + word + " "
i = 1 + len(word) + 1
else:
msg += word + " "
i += len(word) + 1
msg += " " * (78 - i) + "|\n" + "+" + "-" * 78 + "+" + "\n"
super().__init__(msg)
``` |
{
"source": "joanibal/multipoint",
"score": 2
} |
#### File: joanibal/multipoint/testMP.py
```python
import sys, os, time
# =============================================================================
# External Python modules
# =============================================================================
import numpy
# =============================================================================
# Extension modules
# =============================================================================
from mdo_import_helper import MPI, mpiPrint
from . import multiPoint
# First create multipoint object on the communicator that will contain
# all multipoint processes. This is often MPI.COMM_WORLD.
MP = multiPoint.multiPoint(MPI.COMM_WORLD)
# Next add a "ProcessorSet". A ProcessorSet is a set of communicators
# that will all compute the same information. A typical 'ProcessorSet'
# may be each analysis point in a weighted sum drag minimization. Each
# aerodynamic problem will compute the same information, (like lift,
# drag and moment). We wil tell MP how many members we want of this
# ProcessorSet type as well as the size of each of the member.
# Create processor set for group1. This requies N_GROUP_1*N_PROCS_1
# processors
N_GROUP_1 = 2
N_PROCS_1 = 1
MP.addProcessorSet('group1',N_GROUP_1, N_PROCS_1)
# Next we tell MP what information or functionals we would like it to
# generate. The first argument is the processorSet we just added
# (group1), the second is the keyword name defining the quantity, the
# third is the rank of the data, and the fourth is whether or not the
# data is unique on each processor (unique=True (each one generates a
# different value) or not unique (unique=False) (each processor
# generates the same values). Currently rank=1,unique=True is NOT
# supported.
MP.addFunctionals('group1', 'group1_drag', rank=0, unique=True)
MP.addFunctionals('group1', 'group1_lift', rank=0, unique=True)
MP.addFunctionals('group1', 'group1_thickness', rank=1, unique=False)
# We will now add a second processor set that will generate more
# functionals. Note that the name given to the addFunctionals command
# MUST NOT be the same as one already added.
N_GROUP_2 = 1
N_PROCS_2 = 1
MP.addProcessorSet('group2',N_GROUP_2, N_PROCS_2)
MP.addFunctionals('group2','group2_drag',rank=0,unique=True)
# Continue adding ProcessorSets and the associated functionals for
# however many different types of analysis are required.
# -------------------------------------------------------------------
# Now that we've given MP all the required information of how we want
# our complicated multipoint problem setup, we can use the handy
# command createCommunicators() to automatically create all the
# required communicators:
comm, setComm, setFlags, groupFlags, pt_id = MP.createCommunicators()
# comm: is the communicator for a given member in a ProcessorSet. This
# is the lowest comm that comes out. The analysis to create the
# functionals should be created on this comm.
# setComm: is the communicator over the set that this processor
# belongs to. This is typically not frequently used.
# setFlags: Flags to determine which ProcessorSet you belong to.
# setFlags['group1'] will be True if the processor belongs to group1
# and False if the processor belongs to any other group. A typical way
# to using set flags is:
# if setFlags['group1']:
# # Do stuff for group 1'
# if setFlags['group2']:
# # Do stuff for group 2'
# groupFlags and pt_id: These are used to determine which index a
# communicator is within a ProcessorSet. pt_id is the index of this
# processor inside the ProcessorSet. groupFlags[pt_id] is True on the
# 'pt_id' member.
# -------------------------------------------------------------------
# We now must define functions that will compute the functionals for
# each processor set.
def group1_obj(x):
# We must compute a single value for g1_drag, g1_lift and a vector
# of values for g1_thickness.
g1_drag = x['v1'] ** 2 * (pt_id + 1)
g1_lift = x['v1'] * 2 * 3.14159 * (pt_id + 1)
g1_thick = numpy.ones(5)
comm_values = {'group1_lift': g1_lift,
'group1_drag': g1_drag,
'group1_thickness': g1_thick,
'fail':False}
# There is a special value in comm_values called 'fail' that can
# be used to indicate that the analysis did not produce a usable
# value. This is then allReduced with an MPI.or over all
# processorSets.
return comm_values
def group2_obj(x):
g2_drag = x['v2'] ** 3
comm_values = {'group2_drag': g2_drag}
return comm_values
# -------------------------------------------------------------------
# We now must define functions that will compute the SENSITIIVTY of
# functionals with respect a set of design variables for each
# processor set.
def group1_sens(x,obj,con):
# We must evalue the sensitivity of the required functionals with
# respect to our design variables. Note that the MP doesn't care
# how many design variables you have, they just have to be
# consistent. Now single values like g1_lift are returned as
# vectors and vectors like g1_thick are returned as matrices.
g1_drag_deriv = [2*x['v1']*(pt_id + 1),0]
g1_lift_deriv = [2*3.14159*(pt_id+1), 0]
g1_thick_deriv = numpy.zeros([5,2])
comm_values = {'group1_lift': g1_lift_deriv,
'group1_drag': g1_drag_deriv,
'group1_thickness': g1_thick_deriv}
return comm_values
def group2_sens(x,obj,con):
g2_drag_deriv = [0, 3*x['v2']**2]
comm_values = {'group2_drag': g2_drag_deriv}
return comm_values
# -------------------------------------------------------------------
# Next we must define how these functionals are going to be combined
# into our objective and constraint functions. objective and
# constraints are user defined functions that are called from MP with
# the argument 'funcs'. 'funcs' is a dictionary of all functions from
# all ProcessorSets that is now available on all processors. What
# we're now computing is how the objective and constraints are related
# to these functionals. Typically these functions are very simple and
# are entirely written in Python with just a few lines of code.
def objective(funcs, printOK):
# We have N_GROUP_1 drag values from group1 which we will average,
# and then we will add the single value from g2_drag
tmp = numpy.average(funcs['group1_drag'])
total_drag = tmp + funcs['group2_drag']
# Now simply return our objective
return total_drag
def constraints(funcs, printOK):
# Assemble all the constraint functions from the computed funcs:
f_con = []
f_con.extend(funcs['group1_lift'])
f_con.extend(funcs['group1_thickness'])
return f_con
# -------------------------------------------------------------------
# Finally we need to tell MP the functions we just defined for the
# functional evaluation and gradient as well as the objective and
# constraint functions.
# Set the objective/Sens functions:
MP.setObjFunc("group1", group1_obj)
MP.setSensFunc("group1", group1_sens)
MP.setObjFunc("group2", group2_obj)
MP.setSensFunc("group2", group2_sens)
MP.setObjectiveFunction(objective)
MP.setConstraintsFunction(constraints)
# -------------------------------------------------------------------
# Now when we setup an Optimization problem with pyOpt we use:
# opt_prob = Optimization('Ex Opt',MP.fun_obj,use_groups=True)
# ....
# Run Optimization
# snopt(opt_prob, MP.sens)
x = {}
x['v1'] = 5
x['v2'] = 2
obj_value, con_values, fail = MP.fun_obj(x)
if MPI.COMM_WORLD.rank == 0:
print('obj_value:',obj_value)
print('con_values:',con_values)
print('Fail Flag:',fail)
g_obj, g_con, fail = MP.sens(x, obj_value, con_values)
if MPI.COMM_WORLD.rank == 0:
print('g_obj',g_obj)
print('g_con',g_con)
print('Fail Flag',fail)
``` |
{
"source": "joanibal/pygeo",
"score": 2
} |
#### File: tests/reg_tests/commonUtils.py
```python
from __future__ import print_function
import os
import numpy
from pygeo import DVGeometry, DVGeometryAxi
from pyspline import Curve
def printHeader(testName):
if MPI.COMM_WORLD.rank == 0:
print('+' + '-'*78 + '+')
print('| Test Name: ' + '%-66s'%testName + '|')
print('+' + '-'*78 + '+')
##################
# DVGeometry Tests
##################
def setupDVGeo(base_path):
#create the Parent FFD
FFDFile = os.path.join(base_path,'../inputFiles/outerBoxFFD.xyz')
DVGeo = DVGeometry(FFDFile)
# create a reference axis for the parent
axisPoints = [[ -1.0, 0. , 0.],[ 1.5, 0., 0.]]
c1 = Curve(X=axisPoints,k=2)
DVGeo.addRefAxis('mainAxis',curve=c1, axis='y')
# create the child FFD
FFDFile = os.path.join(base_path,'../inputFiles/simpleInnerFFD.xyz')
DVGeoChild = DVGeometry(FFDFile,child=True)
# create a reference axis for the child
axisPoints = [[ -0.5, 0. , 0.],[ 0.5, 0., 0.]]
c1 = Curve(X=axisPoints,k=2)
DVGeoChild.addRefAxis('nestedAxis',curve=c1, axis='y')
return DVGeo,DVGeoChild
def setupDVGeoD8(base_path, isComplex):
#create the Parent FFD
FFDFile = os.path.join(base_path,'../inputFiles/bodyFFD.xyz')
DVGeo = DVGeometry(FFDFile,complex=isComplex)
# create a reference axis for the parent
axisPoints = [[0.,0.,0.],[26.,0.,0.],[30.5,0.,0.9],
[ 32.5, 0., 1.01],[ 34.0, 0., 0.95]]
c1 = Curve(X=axisPoints,k=2)
DVGeo.addRefAxis('mainAxis',curve=c1, axis='y')
# create the child FFD
FFDFile = os.path.join(base_path,'../inputFiles/nozzleFFD.xyz')
DVGeoChild = DVGeometry(FFDFile,child=True,complex=isComplex)
# create a reference axis for the child
axisPoints = [[32.4, 1. , 1.],[ 34, 1., 0.9]]
c1 = Curve(X=axisPoints,k=2)
DVGeoChild.addRefAxis('nestedAxis',curve=c1, axis='y')
return DVGeo, DVGeoChild
def setupDVGeoAxi(base_path):
FFDFile = os.path.join(base_path,"../inputFiles/axiTestFFD.xyz")
DVGeo = DVGeometryAxi(FFDFile, center=(0., 0., 0.), collapse_into=("x", "z"))
axisPoints = [[ 0, 0. , 0.],[ 0, 0., 1.]]
c1 = Curve(X=axisPoints,k=2)
DVGeo.addRefAxis('stretch',curve=c1, axis='z')
return DVGeo
# define a nested global design variable
def childAxisPoints(val,geo):
C = geo.extractCoef('nestedAxis')
# Set the coefficients
C[0,0] = val[0]
geo.restoreCoef(C, 'nestedAxis')
return
#define a nested global design variable
def mainAxisPoints(val,geo):
C = geo.extractCoef('mainAxis')
# Set the coefficients
C[0,0] = val[0]
geo.restoreCoef(C, 'mainAxis')
return
#define a nested global design variable
def childAxisPointsD8(val,geo):
C = geo.extractCoef('nestedAxis')
# Set the coefficients
for i in range(len(val)):
C[i,0] = val[i]
geo.restoreCoef(C, 'nestedAxis')
return
#define a nested global design variable
def mainAxisPointsD8(val,geo):
C = geo.extractCoef('mainAxis')
# Set the coefficients
for i in range(len(val)):
C[i,0] = val[i]
geo.restoreCoef(C, 'mainAxis')
return
def mainAxisPointAxi(val, DVgeo):
C = DVgeo.extractCoef('stretch')
C[0,2] = val[0]
DVgeo.restoreCoef(C, 'stretch')
return
def totalSensitivityFD(DVGeo,nPt,ptName,step=1e-1):
xDV = DVGeo.getValues()
refPoints = DVGeo.update(ptName)
#now get FD Sensitivity
dIdxFD = {}
#step = 1e-1#8
for key in xDV:
baseVar = xDV[key].copy()
nDV = len(baseVar)
dIdxFD[key] = numpy.zeros([nPt,nDV])
for i in range(nDV):
#print('perturbing',key)
xDV[key][i] = baseVar[i]+step
#print('setting design vars')
DVGeo.setDesignVars(xDV)
#print('calling top level update')
newPoints = DVGeo.update(ptName)
deriv = (newPoints-refPoints)/step
dIdxFD[key][:,i] = deriv.flatten()
#print('Deriv',key, i,deriv)
xDV[key][i] = baseVar[i]
return dIdxFD
def totalSensitivityCS(DVGeo,nPt,ptName):
xDV = DVGeo.getValues()
#now get CS Sensitivity
dIdxCS = {}
step = 1e-40j
for key in xDV:
baseVar = xDV[key].copy()
dIdxCS[key] = numpy.zeros([nPt,len(baseVar)])
for i in range(len(baseVar)):
xDV[key][i] = baseVar[i]+step
DVGeo.setDesignVars(xDV)
newPoints = DVGeo.update(ptName)
deriv = numpy.imag(newPoints)/numpy.imag(step)
dIdxCS[key][:,i] = deriv.flatten()
#print 'Deriv',key, i,deriv
xDV[key][i] = baseVar[i]
# Before we exit make sure we have reset the DVs
DVGeo.setDesignVars(xDV)
return dIdxCS
def testSensitivities(DVGeo,refDeriv,handler):
#create test points
points = numpy.zeros([2,3])
points[0,:] = [0.25,0,0]
points[1,:] = [-0.25,0,0]
# add points to the geometry object
ptName = 'testPoints'
DVGeo.addPointSet(points,ptName)
# generate dIdPt
nPt = 6
dIdPt = numpy.zeros([nPt,2,3])
dIdPt[0,0,0] = 1.0
dIdPt[1,0,1] = 1.0
dIdPt[2,0,2] = 1.0
dIdPt[3,1,0] = 1.0
dIdPt[4,1,1] = 1.0
dIdPt[5,1,2] = 1.0
#get analytic sensitivity
if refDeriv:
dIdx = totalSensitivityFD(DVGeo,nPt,ptName)
else:
dIdx = DVGeo.totalSensitivity(dIdPt,ptName)
handler.root_add_dict(dIdx,1e-7,1e-7)
def testSensitivitiesD8(DVGeo,refDeriv,handler):
#create test points
nPoints = 50
points = numpy.zeros([nPoints,3])
for i in range(nPoints):
nose = 0.01
tail = 34.0
delta = (tail-nose)/nPoints
points[i,:] = [nose+i*delta,1.0,0.5]
#print('points',points)
# add points to the geometry object
ptName = 'testPoints'
DVGeo.addPointSet(points,ptName,faceFreeze={})
# generate dIdPt
nPt = nPoints*3
dIdPt = numpy.zeros([nPt,nPoints,3])
counter = 0
for i in range(nPoints):
for j in range(3):
dIdPt[counter,i,j] = 1.0
counter+=1
#get analytic sensitivity
if refDeriv:
# dIdx = totalSensitivityFD(DVGeo,nPt,ptName)
dIdx = totalSensitivityCS(DVGeo,nPt,ptName)
else:
dIdx = DVGeo.totalSensitivity(dIdPt,ptName)
handler.root_add_dict(dIdx,1e-7,1e-7)
``` |
{
"source": "joanibal/pyspline",
"score": 2
} |
#### File: tests/reg_tests/test_surfaces.py
```python
from __future__ import print_function
# =============================================================================
# Standard Python modules
# =============================================================================
import os
# =============================================================================
# External Python modules
# =============================================================================
import numpy
import unittest
# =============================================================================
# Extension modules
# =============================================================================
from pyspline import pySpline
from baseclasses import BaseRegTest
def eval_test(surface, handler):
'''Eval fixed points from the surface'''
# Evaluations are only good to about 1e-10 since there is fitting
# involved
#----------- Evaluation and derivative functions ---------------
pts = [[0,0],[1,0],[0,1],[1,1],[.25,.25],[.75,.25]]
for pt in pts:
# print('Testing pt (%f %f)'%(pt[0],pt[1]))
# print('Value:')
handler.root_add_val(surface(pt[0],pt[1]),1e-10,1e-10)
# print('Deriv:')
handler.root_add_val(surface.getDerivative(pt[0],pt[1]),1e-10,1e-10)
# print('Second Derivative')
handler.root_add_val(surface.getSecondDerivative(pt[0],pt[1]),1e-8,1e-8)
# print('Orig values at each corner')
if surface.origData:
handler.root_add_val(surface.getOrigValueCorner(0))
handler.root_add_val(surface.getOrigValueCorner(1))
handler.root_add_val(surface.getOrigValueCorner(2))
handler.root_add_val(surface.getOrigValueCorner(3))
# print('Orig values on edges')
if surface.origData:
handler.root_add_val(surface.getOrigValuesEdge(0))
handler.root_add_val(surface.getOrigValuesEdge(1))
handler.root_add_val(surface.getOrigValuesEdge(2))
handler.root_add_val(surface.getOrigValuesEdge(3))
# print('getValueEdge:')
handler.root_add_val(surface.getValueEdge(0, 0.25))
handler.root_add_val(surface.getValueEdge(0, 0.75))
handler.root_add_val(surface.getValueEdge(1, 0.25))
handler.root_add_val(surface.getValueEdge(1, 0.75))
handler.root_add_val(surface.getValueEdge(2, 0.25))
handler.root_add_val(surface.getValueEdge(2, 0.75))
handler.root_add_val(surface.getValueEdge(3, 0.25))
handler.root_add_val(surface.getValueEdge(3, 0.75))
def run_surface_test(surface, handler):
''' This function is used to test the functions that are apart of
the curve class. They operate on the 'curve' that is passed. '''
# Test the evaluations
eval_test(surface, handler)
# Test the windowing (same surface)
surf2 = surface.windowSurface([0,0],[1,1])
# print('Evaluations on surf2 should be same as original:')
eval_test(surf2, handler)
surf2 = surface.windowSurface([0.25,.25],[.75,.5])
# print('These points should be the same:')
handler.root_add_val(surface(0.25,.25))
handler.root_add_val(surf2(0,0))
# print('These points should be the same:')
handler.root_add_val(surface(0.75,.5))
handler.root_add_val(surf2(1,1))
# print('Test get bounds')
handler.root_add_val(surface.getBounds())
def run_project_test(surface, handler):
# Run a bunch of point projections: Only try to match to 1e-8
eps = 1e-8
# print('------------- These points should be fully inside of domain')
pts= [[0,0,0],[2,3,-1],[3,2.5,-.1]]
for pt in pts:
# print('Projecting point (%f %f %f)'%(pt[0],pt[1],pt[2]))
u,v,D = surface.projectPoint(pt,eps=1e-12)
# print('u:')
handler.root_add_val(u, eps, eps)
# print('v:')
handler.root_add_val(v, eps, eps)
# print('D:')
handler.root_add_val(D, eps*10, eps*10)
# print(' ----------- This should be (0,0) corner')
u,v,D = surface.projectPoint([-1,-1,0],eps=1e-12)
# print('u:')
handler.root_add_val(u, eps, eps)
# print('v:')
handler.root_add_val(v, eps, eps)
# print(' ---------- This should be (0,1) corner')
u,v,D = surface.projectPoint([-1,5,0],eps=1e-12)
# print('u:')
handler.root_add_val(u, eps, eps)
# print('v:')
handler.root_add_val(v, eps, eps)
# print(' ---------- This should be (1,0) corner')
u,v,D = surface.projectPoint([6,-1,0],eps=1e-12)
# print('u:')
handler.root_add_val(u, eps, eps)
# print('v:')
handler.root_add_val(v, eps, eps)
# print(' ---------- This should be (1,1) corner')
u,v,D = surface.projectPoint([6,6,0],eps=1e-12)
# print('u:')
handler.root_add_val(u, eps, eps)
# print('v:')
handler.root_add_val(v, eps, eps)
# print(' ---------- This should be edge zero (*,0)')
u,v,D = surface.projectPoint([2.54,-1,0],eps=1e-12)
# print('u:')
handler.root_add_val(u, eps, eps)
# print('v:')
handler.root_add_val(v,eps, eps)
# Curve projection
for kc in [2,3,4]:
x = [0,1,2,0]
y = [4,3,2,1]
z = [-3,1,3,5]
curve = pySpline.Curve(k=kc,x=x,y=y,z=z)
u,v,s,D = surface.projectCurve(curve)
# print(' ---------- surface-curve projection with kc=%d'%(kc))
# print('u:')
handler.root_add_val(u, eps, eps)
# print('v:')
handler.root_add_val(v, eps, eps)
# print('s:')
handler.root_add_val(s, eps, eps)
# print('D:')
handler.root_add_val(D, eps*10, eps*10)
def io_test(surface, handler):
'''Test the writing functions'''
surface.writeTecplot('tmp.dat', surf=True, coef=True, orig=True,
directions=True)
f = open('tmp.dat','w')
# These three calls, are private functions normally only called
# from pyGeo. We are not checking their output, rather just making
# sure they run.
surface.writeIGES_directory(f, 0, 0)
surface.writeIGES_directory(f, 0, 0)
surface.writeTin(f)
os.remove('tmp.dat')
return
class Test(unittest.TestCase):
def setUp(self):
self.ref_file = 'ref/test_surfaces.ref'
def train(self):
with BaseRegTest(self.ref_file, train=True) as handler:
self.regression_test(handler)
def test(self):
with BaseRegTest(self.ref_file, train=False) as handler:
self.regression_test(handler)
def regression_test(self, handler, solve=False):
# Create a generic surface
nu = 10
nv = 10
u = numpy.linspace(0,4,nu)
v = numpy.linspace(0,4,nv)
[V,U] = numpy.meshgrid(v,u)
Z = numpy.cos(U)*numpy.sin(V)
for ku in [2, 3, 4]:
for kv in [2, 3, 4]:
for nCtlu in [5, 10]:
for nCtlv in [5, 10]:
# print('+'+'-'*78+'+')
# print(' '*20 + 'Testing Surface with ku=%d, kv=%d, nCtlu=%d,\
# nCtlv=%d'%(ku,kv,nCtlu,nCtlv))
# print('+'+'-'*78+'+')
surface = pySpline.Surface(x=U, y=V, z=Z, ku=ku, kv=kv,
nCtlu=nCtlu, nCtlv=nCtlv)
surface.recompute()
run_surface_test(surface, handler)
run_project_test(surface, handler)
io_test(surface, handler)
``` |
{
"source": "joanibal/pyXDSM",
"score": 2
} |
#### File: pyxdsm/tests/test_xdsm.py
```python
import unittest
import os
from pyxdsm.XDSM import XDSM
class TestXDSM(unittest.TestCase):
def setUp(self):
import os
import tempfile
self.startdir = os.getcwd()
self.tempdir = tempfile.mkdtemp(prefix='testdir-')
os.chdir(self.tempdir)
def tearDown(self):
import os
import shutil
os.chdir(self.startdir)
try:
shutil.rmtree(self.tempdir)
except OSError:
pass
def test_options(self):
filename = 'xdsm_test_options'
spec_dir = filename + '_specs'
# Change `use_sfmath` to False to use computer modern
x = XDSM(use_sfmath=False)
x.add_system('opt', 'Optimization', r'\text{Optimizer}')
x.add_system('solver', 'MDA', r'\text{Newton}')
x.add_system('D1', 'Function', 'D_1', text_width=2.0)
x.add_system('D2', 'Function', 'D_2', stack=False)
x.add_system('F', 'Function', 'F', faded=True)
x.add_system('G', 'Function', 'G', spec_name="G_spec")
x.connect('opt', 'D1', 'x, z')
x.connect('opt', 'D2', 'z')
x.connect('opt', 'F', 'x, z')
x.connect('solver', 'D1', 'y_2')
x.connect('solver', 'D2', 'y_1')
x.connect('D1', 'solver', r'\mathcal{R}(y_1)')
x.connect('solver', 'F', 'y_1, y_2')
x.connect('D2', 'solver', r'\mathcal{R}(y_2)')
x.connect('solver', 'G', 'y_1, y_2')
x.connect('F', 'opt', 'f')
x.connect('G', 'opt', 'g')
x.add_output('opt', 'x^*, z^*', side='right')
x.add_output('D1', 'y_1^*', side='left', stack=True)
x.add_output('D2', 'y_2^*', side='left')
x.add_output('F', 'f^*', side='left')
x.add_output('G', 'g^*')
x.write(filename)
x.write_sys_specs(spec_dir)
# Test if files where created
self.assertTrue(os.path.isfile(filename + '.tikz'))
self.assertTrue(os.path.isfile(filename + '.tex'))
self.assertTrue(os.path.isdir(spec_dir))
self.assertTrue(os.path.isfile(os.path.join(spec_dir, 'F.json')))
self.assertTrue(os.path.isfile(os.path.join(spec_dir, 'G_spec.json')))
def test_tikz_content(self):
# Check if TiKZ file was created.
# Compare the content of the sample below and the newly created TiKZ file.
tikz_txt = r"""
%%% Preamble Requirements %%%
% \usepackage{geometry}
% \usepackage{amsfonts}
% \usepackage{amsmath}
% \usepackage{amssymb}
% \usepackage{tikz}
% Optional packages such as sfmath set through python interface
% \usepackage{sfmath}
% \usetikzlibrary{arrows,chains,positioning,scopes,shapes.geometric,shapes.misc,shadows}
%%% End Preamble Requirements %%%
\input{"D:/Documents/GitHub/mypyXDSM/pyXDSM/pyxdsm/diagram_styles"}
\begin{tikzpicture}
\matrix[MatrixSetup]{
%Row 0
\node [DataIO] (left_output_opt) {$x^*, z^*$};&
\node [Optimization] (opt) {$\text{Optimizer}$};&
&
\node [DataInter] (opt-D1) {$x, z$};&
\node [DataInter] (opt-D2) {$z$};&
\node [DataInter] (opt-F) {$x, z$};&
\\
%Row 1
&
&
\node [MDA] (solver) {$\text{Newton}$};&
\node [DataInter] (solver-D1) {$y_2$};&
\node [DataInter] (solver-D2) {$y_1$};&
\node [DataInter] (solver-F) {$y_1, y_2$};&
\node [DataInter] (solver-G) {$y_1, y_2$};\\
%Row 2
\node [DataIO] (left_output_D1) {$y_1^*$};&
&
\node [DataInter] (D1-solver) {$\mathcal{R}(y_1)$};&
\node [Function] (D1) {$D_1$};&
&
&
\\
%Row 3
\node [DataIO] (left_output_D2) {$y_2^*$};&
&
\node [DataInter] (D2-solver) {$\mathcal{R}(y_2)$};&
&
\node [Function] (D2) {$D_2$};&
&
\\
%Row 4
\node [DataIO] (left_output_F) {$f^*$};&
\node [DataInter] (F-opt) {$f$};&
&
&
&
\node [Function] (F) {$F$};&
\\
%Row 5
\node [DataIO] (left_output_G) {$g^*$};&
\node [DataInter] (G-opt) {$g$};&
&
&
&
&
\node [Function] (G) {$G$};\\
%Row 6
&
&
&
&
&
&
\\
};
% XDSM process chains
\begin{pgfonlayer}{data}
\path
% Horizontal edges
(opt) edge [DataLine] (opt-D1)
(opt) edge [DataLine] (opt-D2)
(opt) edge [DataLine] (opt-F)
(solver) edge [DataLine] (solver-D1)
(solver) edge [DataLine] (solver-D2)
(D1) edge [DataLine] (D1-solver)
(solver) edge [DataLine] (solver-F)
(D2) edge [DataLine] (D2-solver)
(solver) edge [DataLine] (solver-G)
(F) edge [DataLine] (F-opt)
(G) edge [DataLine] (G-opt)
(opt) edge [DataLine] (left_output_opt)
(D1) edge [DataLine] (left_output_D1)
(D2) edge [DataLine] (left_output_D2)
(F) edge [DataLine] (left_output_F)
(G) edge [DataLine] (left_output_G)
% Vertical edges
(opt-D1) edge [DataLine] (D1)
(opt-D2) edge [DataLine] (D2)
(opt-F) edge [DataLine] (F)
(solver-D1) edge [DataLine] (D1)
(solver-D2) edge [DataLine] (D2)
(D1-solver) edge [DataLine] (solver)
(solver-F) edge [DataLine] (F)
(D2-solver) edge [DataLine] (solver)
(solver-G) edge [DataLine] (G)
(F-opt) edge [DataLine] (opt)
(G-opt) edge [DataLine] (opt);
\end{pgfonlayer}
\end{tikzpicture}"""
def filter_lines(lns):
# Empty lines are excluded.
# Leading and trailing whitespaces are removed
# Comments are removed.
return [ln.strip() for ln in lns if ln.strip() and not ln.strip().startswith('%')]
filename = 'xdsm_test_tikz'
x = XDSM(use_sfmath=True)
x.add_system('opt', 'Optimization', r'\text{Optimizer}')
x.add_system('solver', 'MDA', r'\text{Newton}')
x.add_system('D1', 'Function', 'D_1')
x.add_system('D2', 'Function', 'D_2')
x.add_system('F', 'Function', 'F')
x.add_system('G', 'Function', 'G')
x.connect('opt', 'D1', 'x, z')
x.connect('opt', 'D2', 'z')
x.connect('opt', 'F', 'x, z')
x.connect('solver', 'D1', 'y_2')
x.connect('solver', 'D2', 'y_1')
x.connect('D1', 'solver', r'\mathcal{R}(y_1)')
x.connect('solver', 'F', 'y_1, y_2')
x.connect('D2', 'solver', r'\mathcal{R}(y_2)')
x.connect('solver', 'G', 'y_1, y_2')
x.connect('F', 'opt', 'f')
x.connect('G', 'opt', 'g')
x.add_output('opt', 'x^*, z^*', side='left')
x.add_output('D1', 'y_1^*', side='left')
x.add_output('D2', 'y_2^*', side='left')
x.add_output('F', 'f^*', side='left')
x.add_output('G', 'g^*', side='left')
x.write(filename)
# Check if file was created
tikz_file = filename + '.tikz'
self.assertTrue(os.path.isfile(tikz_file))
tikz_lines = tikz_txt.split('\n')
tikz_lines = filter_lines(tikz_lines)
with open(tikz_file, "r") as f:
lines = filter_lines(f.readlines())
sample_no_match = [] # Sample text
new_no_match = [] # New text
for line1, line2 in zip(lines, tikz_lines):
if line1 != line2: # else everything is okay
# This can be because of the different ordering of lines or because of an error.
sample_no_match.append(line1)
new_no_match.append(line2)
# Sort both sets of suspicious lines
sample_no_match.sort()
new_no_match.sort()
for line1, line2 in zip(sample_no_match, new_no_match):
# Now the lines should match, if only the ordering was different
self.assertEqual(line1, line2)
# To be sure, check the length, otherwise a missing last line could get unnoticed because of using zip
self.assertEqual(len(lines), len(tikz_lines))
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "joanisc/lingua-franca",
"score": 2
} |
#### File: lingua_franca/lang/format_sv.py
```python
from .format_common import convert_to_mixed_fraction
from math import floor
months = ['januari', 'februari', 'mars', 'april', 'maj', 'juni',
'juli', 'augusti', 'september', 'oktober', 'november',
'december']
NUM_STRING_SV = {
0: 'noll',
1: 'en',
2: 'två',
3: 'tre',
4: 'fyra',
5: 'fem',
6: 'sex',
7: 'sju',
8: 'åtta',
9: 'nio',
10: 'tio',
11: 'elva',
12: 'tolv',
13: 'tretton',
14: 'fjorton',
15: 'femton',
16: 'sexton',
17: 'sjutton',
18: 'arton',
19: 'nitton',
20: 'tjugo',
30: 'trettio',
40: 'fyrtio',
50: 'femtio',
60: 'sextio',
70: 'sjuttio',
80: 'åttio',
90: 'nittio',
100: 'hundra'
}
NUM_POWERS_OF_TEN = [
'hundra',
'tusen',
'miljon',
'miljard',
'biljon',
'biljard',
'triljon',
'triljard'
]
FRACTION_STRING_SV = {
2: 'halv',
3: 'tredjedel',
4: 'fjärdedel',
5: 'femtedel',
6: 'sjättedel',
7: 'sjundedel',
8: 'åttondel',
9: 'niondel',
10: 'tiondel',
11: 'elftedel',
12: 'tolftedel',
13: 'trettondel',
14: 'fjortondel',
15: 'femtondel',
16: 'sextondel',
17: 'sjuttondel',
18: 'artondel',
19: 'nittondel',
20: 'tjugondel'
}
EXTRA_SPACE = " "
def nice_number_sv(number, speech, denominators=range(1, 21)):
""" Swedish helper for nice_number
This function formats a float to human understandable functions. Like
4.5 becomes "4 och en halv" for speech and "4 1/2" for text
Args:
number (int or float): the float to format
speech (bool): format for speech (True) or display (False)
denominators (iter of ints): denominators to use, default [1 .. 20]
Returns:
(str): The formatted string.
"""
result = convert_to_mixed_fraction(number, denominators)
if not result:
# Give up, just represent as a 3 decimal number
return str(round(number, 3))
whole, num, den = result
if not speech:
if num == 0:
# TODO: Number grouping? E.g. "1,000,000"
return str(whole)
else:
return '{} {}/{}'.format(whole, num, den)
if num == 0:
return str(whole)
den_str = FRACTION_STRING_SV[den]
if whole == 0:
if num == 1:
return_string = 'en {}'.format(den_str)
else:
return_string = '{} {}'.format(num, den_str)
elif num == 1:
return_string = '{} och en {}'.format(whole, den_str)
else:
return_string = '{} och {} {}'.format(whole, num, den_str)
if num > 1:
return_string += 'ar'
return return_string
def pronounce_number_sv(num, places=2):
"""
Convert a number to its spoken equivalent
For example, '5.2' would return 'five point two'
Args:
num(float or int): the number to pronounce (set limit below)
places(int): maximum decimal places to speak
Returns:
(str): The pronounced number
"""
def pronounce_triplet_sv(num):
result = ""
num = floor(num)
if num > 99:
hundreds = floor(num / 100)
if hundreds > 0:
if hundreds == 1:
result += 'ett' + 'hundra'
else:
result += NUM_STRING_SV[hundreds] + 'hundra'
num -= hundreds * 100
if num == 0:
result += '' # do nothing
elif num == 1:
result += 'ett'
elif num <= 20:
result += NUM_STRING_SV[num]
elif num > 20:
tens = num % 10
ones = num - tens
if ones > 0:
result += NUM_STRING_SV[ones]
if tens > 0:
result += NUM_STRING_SV[tens]
return result
def pronounce_fractional_sv(num, places):
# fixed number of places even with trailing zeros
result = ""
place = 10
while places > 0:
# doesn't work with 1.0001 and places = 2: int(
# num*place) % 10 > 0 and places > 0:
result += " " + NUM_STRING_SV[int(num * place) % 10]
place *= 10
places -= 1
return result
def pronounce_whole_number_sv(num, scale_level=0):
if num == 0:
return ''
num = floor(num)
result = ''
last_triplet = num % 1000
if last_triplet == 1:
if scale_level == 0:
if result != '':
result += '' + 'ett'
else:
result += 'en'
elif scale_level == 1:
result += 'ettusen' + EXTRA_SPACE
else:
result += 'en ' + NUM_POWERS_OF_TEN[scale_level] + EXTRA_SPACE
elif last_triplet > 1:
result += pronounce_triplet_sv(last_triplet)
if scale_level == 1:
result += 'tusen' + EXTRA_SPACE
if scale_level >= 2:
result += NUM_POWERS_OF_TEN[scale_level]
if scale_level >= 2:
result += 'er' + EXTRA_SPACE # MiljonER
num = floor(num / 1000)
scale_level += 1
return pronounce_whole_number_sv(num, scale_level) + result
result = ""
if abs(num) >= 1000000000000000000000000: # cannot do more than this
return str(num)
elif num == 0:
return str(NUM_STRING_SV[0])
elif num < 0:
return "minus " + pronounce_number_sv(abs(num), places)
else:
if num == int(num):
return pronounce_whole_number_sv(num)
else:
whole_number_part = floor(num)
fractional_part = num - whole_number_part
result += pronounce_whole_number_sv(whole_number_part)
if places > 0:
result += " komma"
result += pronounce_fractional_sv(fractional_part, places)
return result
def pronounce_ordinal_sv(num):
# ordinals for 1, 3, 7 and 8 are irregular
# this produces the base form, it will have to be adapted for genus,
# casus, numerus
ordinals = ["noll", "första", "andra", "tredje", "fjärde", "femte",
"sjätte", "sjunde", "åttonde", "nionde", "tionde"]
tens = int(floor(num / 10.0)) * 10
ones = num % 10
if num < 0 or num != int(num):
return num
if num == 0:
return ordinals[num]
result = ""
if num > 10:
result += pronounce_number_sv(tens).rstrip()
if ones > 0:
result += ordinals[ones]
else:
result += 'de'
return result
def nice_time_sv(dt, speech=True, use_24hour=False, use_ampm=False):
"""
Format a time to a comfortable human format
For example, generate 'five thirty' for speech or '5:30' for
text display.
Args:
dt (datetime): date to format (assumes already in local timezone)
speech (bool): format for speech (default/True) or display (False)=Fal
use_24hour (bool): output in 24-hour/military or 12-hour format
use_ampm (bool): include the am/pm for 12-hour format
Returns:
(str): The formatted time string
"""
if use_24hour:
# e.g. "03:01" or "14:22"
string = dt.strftime("%H:%M")
else:
if use_ampm:
# e.g. "3:01 AM" or "2:22 PM"
string = dt.strftime("%I:%M %p")
else:
# e.g. "3:01" or "2:22"
string = dt.strftime("%I:%M")
if not speech:
return string
# Generate a speakable version of the time
speak = ""
if use_24hour:
if dt.hour == 1:
speak += "ett" # 01:00 is "ett" not "en"
else:
speak += pronounce_number_sv(dt.hour)
if not dt.minute == 0:
if dt.minute < 10:
speak += ' noll'
if dt.minute == 1:
speak += ' ett'
else:
speak += " " + pronounce_number_sv(dt.minute)
return speak # ampm is ignored when use_24hour is true
else:
hour = dt.hour
if not dt.minute == 0:
if dt.minute < 30:
if dt.minute != 15:
speak += pronounce_number_sv(dt.minute)
else:
speak += 'kvart'
if dt.minute == 1:
speak += ' minut över '
elif dt.minute != 10 and dt.minute != 5 and dt.minute != 15:
speak += ' minuter över '
else:
speak += ' över '
elif dt.minute > 30:
if dt.minute != 45:
speak += pronounce_number_sv((60 - dt.minute))
else:
speak += 'kvart'
if dt.minute == 1:
speak += ' minut i '
elif dt.minute != 50 and dt.minute != 55 and dt.minute != 45:
speak += ' minuter i '
else:
speak += ' i '
hour = (hour + 1) % 12
elif dt.minute == 30:
speak += 'halv '
hour = (hour + 1) % 12
if hour == 0 and dt.minute == 0:
return "midnatt"
if hour == 12 and dt.minute == 0:
return "middag"
# TODO: "half past 3", "a quarter of 4" and other idiomatic times
if hour == 0:
speak += pronounce_number_sv(12)
elif hour <= 13:
if hour == 1 or hour == 13: # 01:00 and 13:00 is "ett"
speak += 'ett'
else:
speak += pronounce_number_sv(hour)
else:
speak += pronounce_number_sv(hour - 12)
if use_ampm:
if dt.hour > 11:
if dt.hour < 18:
# 12:01 - 17:59 nachmittags/afternoon
speak += " på eftermiddagen"
elif dt.hour < 22:
# 18:00 - 21:59 abends/evening
speak += " på kvällen"
else:
# 22:00 - 23:59 nachts/at night
speak += " på natten"
elif dt.hour < 3:
# 00:01 - 02:59 nachts/at night
speak += " på natten"
else:
# 03:00 - 11:59 morgens/in the morning
speak += " på morgonen"
return speak
def nice_response_sv(text):
# check for months and call nice_ordinal_sv declension of ordinals
# replace "^" with "hoch" (to the power of)
words = text.split()
for idx, word in enumerate(words):
if word.lower() in months:
text = nice_ordinal_sv(text)
if word == '^':
wordNext = words[idx + 1] if idx + 1 < len(words) else ""
if wordNext.isnumeric():
words[idx] = "upphöjt till"
text = " ".join(words)
return text
def nice_ordinal_sv(text):
# check for months for declension of ordinals before months
# depending on articles/prepositions
normalized_text = text
words = text.split()
for idx, word in enumerate(words):
wordNext = words[idx + 1] if idx + 1 < len(words) else ""
wordPrev = words[idx - 1] if idx > 0 else ""
if word[-1:] == ".":
if word[:-1].isdecimal():
if wordNext.lower() in months:
word = pronounce_ordinal_sv(int(word[:-1]))
if wordPrev.lower() in ["om", "den", "från", "till",
"(från", "(om", "till"]:
word += "n"
elif wordPrev.lower() not in ["den"]:
word += "r"
words[idx] = word
normalized_text = " ".join(words)
return normalized_text
```
#### File: lingua-franca/test/test_format_fr.py
```python
import unittest
import datetime
from lingua_franca.format import nice_number
from lingua_franca.format import nice_time
from lingua_franca.format import pronounce_number
NUMBERS_FIXTURE_FR = {
1.435634: '1,436',
2: '2',
5.0: '5',
1234567890: '1234567890',
12345.67890: '12345,679',
0.027: '0,027',
0.5: 'un demi',
1.333: '1 et 1 tiers',
2.666: '2 et 2 tiers',
0.25: 'un quart',
1.25: '1 et 1 quart',
0.75: '3 quarts',
1.75: '1 et 3 quarts',
3.4: '3 et 2 cinquièmes',
16.8333: '16 et 5 sixièmes',
12.5714: '12 et 4 septièmes',
9.625: '9 et 5 huitièmes',
6.777: '6 et 7 neuvièmes',
3.1: '3 et 1 dixième',
2.272: '2 et 3 onzièmes',
5.583: '5 et 7 douzièmes',
8.384: '8 et 5 treizièmes',
0.071: 'un quatorzième',
6.466: '6 et 7 quinzièmes',
8.312: '8 et 5 seizièmes',
2.176: '2 et 3 dix-septièmes',
200.722: '200 et 13 dix-huitièmes',
7.421: '7 et 8 dix-neuvièmes',
0.05: 'un vingtième'
}
class TestNiceNumberFormat_fr(unittest.TestCase):
def test_convert_float_to_nice_number_fr(self):
for number, number_str in NUMBERS_FIXTURE_FR.items():
self.assertEqual(nice_number(number, lang="fr-fr"), number_str,
'should format {} as {} and not {}'.format(
number, number_str, nice_number(
number, lang="fr-fr")))
def test_specify_denominator_fr(self):
self.assertEqual(nice_number(5.5, lang="fr-fr",
denominators=[1, 2, 3]),
'5 et demi',
'should format 5.5 as 5 et demi not {}'.format(
nice_number(5.5, lang="fr-fr",
denominators=[1, 2, 3])))
self.assertEqual(nice_number(2.333, lang="fr-fr",
denominators=[1, 2]),
'2,333',
'should format 2.333 as 2,333 not {}'.format(
nice_number(2.333, lang="fr-fr",
denominators=[1, 2])))
def test_no_speech_fr(self):
self.assertEqual(nice_number(6.777, lang="fr-fr", speech=False),
'6 7/9',
'should format 6.777 as 6 7/9 not {}'.format(
nice_number(6.777, lang="fr-fr", speech=False)))
self.assertEqual(nice_number(6.0, lang="fr-fr", speech=False),
'6',
'should format 6.0 as 6 not {}'.format(
nice_number(6.0, lang="fr-fr", speech=False)))
self.assertEqual(nice_number(1234567890, lang="fr-fr", speech=False),
'1 234 567 890',
'should format 1234567890 as'
'1 234 567 890 not {}'.format(
nice_number(1234567890, lang="fr-fr",
speech=False)))
self.assertEqual(nice_number(12345.6789, lang="fr-fr", speech=False),
'12 345,679',
'should format 12345.6789 as'
'12 345,679 not {}'.format(
nice_number(12345.6789, lang="fr-fr",
speech=False)))
# def pronounce_number(number, lang="en-us", places=2):
class TestPronounceNumber_fr(unittest.TestCase):
def test_convert_int_fr(self):
self.assertEqual(pronounce_number(0, lang="fr-fr"), "zéro")
self.assertEqual(pronounce_number(1, lang="fr-fr"), "un")
self.assertEqual(pronounce_number(10, lang="fr-fr"), "dix")
self.assertEqual(pronounce_number(15, lang="fr-fr"), "quinze")
self.assertEqual(pronounce_number(20, lang="fr-fr"), "vingt")
self.assertEqual(pronounce_number(27, lang="fr-fr"), "vingt-sept")
self.assertEqual(pronounce_number(30, lang="fr-fr"), "trente")
self.assertEqual(pronounce_number(33, lang="fr-fr"), "trente-trois")
self.assertEqual(pronounce_number(71, lang="fr-fr"),
"soixante-et-onze")
self.assertEqual(pronounce_number(80, lang="fr-fr"), "quatre-vingts")
self.assertEqual(pronounce_number(74, lang="fr-fr"),
"soixante-quatorze")
self.assertEqual(pronounce_number(79, lang="fr-fr"),
"soixante-dix-neuf")
self.assertEqual(pronounce_number(91, lang="fr-fr"),
"quatre-vingt-onze")
self.assertEqual(pronounce_number(97, lang="fr-fr"),
"quatre-vingt-dix-sept")
self.assertEqual(pronounce_number(300, lang="fr-fr"), "300")
def test_convert_negative_int_fr(self):
self.assertEqual(pronounce_number(-1, lang="fr-fr"), "moins un")
self.assertEqual(pronounce_number(-10, lang="fr-fr"), "moins dix")
self.assertEqual(pronounce_number(-15, lang="fr-fr"), "moins quinze")
self.assertEqual(pronounce_number(-20, lang="fr-fr"), "moins vingt")
self.assertEqual(pronounce_number(-27, lang="fr-fr"),
"moins vingt-sept")
self.assertEqual(pronounce_number(-30, lang="fr-fr"), "moins trente")
self.assertEqual(pronounce_number(-33, lang="fr-fr"),
"moins trente-trois")
def test_convert_decimals_fr(self):
self.assertEqual(pronounce_number(0.05, lang="fr-fr"),
"zéro virgule zéro cinq")
self.assertEqual(pronounce_number(-0.05, lang="fr-fr"),
"moins zéro virgule zéro cinq")
self.assertEqual(pronounce_number(1.234, lang="fr-fr"),
"un virgule deux trois")
self.assertEqual(pronounce_number(21.234, lang="fr-fr"),
"vingt-et-un virgule deux trois")
self.assertEqual(pronounce_number(21.234, lang="fr-fr", places=1),
"vingt-et-un virgule deux")
self.assertEqual(pronounce_number(21.234, lang="fr-fr", places=0),
"vingt-et-un")
self.assertEqual(pronounce_number(21.234, lang="fr-fr", places=3),
"vingt-et-un virgule deux trois quatre")
self.assertEqual(pronounce_number(21.234, lang="fr-fr", places=4),
"vingt-et-un virgule deux trois quatre")
self.assertEqual(pronounce_number(21.234, lang="fr-fr", places=5),
"vingt-et-un virgule deux trois quatre")
self.assertEqual(pronounce_number(-1.234, lang="fr-fr"),
"moins un virgule deux trois")
self.assertEqual(pronounce_number(-21.234, lang="fr-fr"),
"moins vingt-et-un virgule deux trois")
self.assertEqual(pronounce_number(-21.234, lang="fr-fr", places=1),
"moins vingt-et-un virgule deux")
self.assertEqual(pronounce_number(-21.234, lang="fr-fr", places=0),
"moins vingt-et-un")
self.assertEqual(pronounce_number(-21.234, lang="fr-fr", places=3),
"moins vingt-et-un virgule deux trois quatre")
self.assertEqual(pronounce_number(-21.234, lang="fr-fr", places=4),
"moins vingt-et-un virgule deux trois quatre")
self.assertEqual(pronounce_number(-21.234, lang="fr-fr", places=5),
"moins vingt-et-un virgule deux trois quatre")
# def nice_time(dt, lang="en-us", speech=True, use_24hour=False,
# use_ampm=False):
class TestNiceDateFormat_fr(unittest.TestCase):
def test_convert_times_fr(self):
dt = datetime.datetime(2017, 1, 31,
13, 22, 3)
self.assertEqual(nice_time(dt, lang="fr-fr"),
"une heure vingt-deux")
self.assertEqual(nice_time(dt, lang="fr-fr", use_ampm=True),
"une heure vingt-deux de l'après-midi")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False),
"1:22")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False,
use_ampm=True),
"1:22 PM")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False,
use_24hour=True),
"13:22")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False,
use_24hour=True, use_ampm=True),
"13:22")
self.assertEqual(nice_time(dt, lang="fr-fr", use_24hour=True,
use_ampm=True),
"treize heures vingt-deux")
self.assertEqual(nice_time(dt, lang="fr-fr", use_24hour=True,
use_ampm=False),
"treize heures vingt-deux")
dt = datetime.datetime(2017, 1, 31,
13, 0, 3)
self.assertEqual(nice_time(dt, lang="fr-fr"),
"une heure")
self.assertEqual(nice_time(dt, lang="fr-fr", use_ampm=True),
"une heure de l'après-midi")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False),
"1:00")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False,
use_ampm=True),
"1:00 PM")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False,
use_24hour=True),
"13:00")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False,
use_24hour=True, use_ampm=True),
"13:00")
self.assertEqual(nice_time(dt, lang="fr-fr", use_24hour=True,
use_ampm=True),
"treize heures")
self.assertEqual(nice_time(dt, lang="fr-fr", use_24hour=True,
use_ampm=False),
"treize heures")
dt = datetime.datetime(2017, 1, 31,
13, 2, 3)
self.assertEqual(nice_time(dt, lang="fr-fr"),
"une heure deux")
self.assertEqual(nice_time(dt, lang="fr-fr", use_ampm=True),
"une heure deux de l'après-midi")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False),
"1:02")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False,
use_ampm=True),
"1:02 PM")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False,
use_24hour=True),
"13:02")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False,
use_24hour=True, use_ampm=True),
"13:02")
self.assertEqual(nice_time(dt, lang="fr-fr", use_24hour=True,
use_ampm=True),
"treize heures deux")
self.assertEqual(nice_time(dt, lang="fr-fr", use_24hour=True,
use_ampm=False),
"treize heures deux")
dt = datetime.datetime(2017, 1, 31,
0, 2, 3)
self.assertEqual(nice_time(dt, lang="fr-fr"),
"minuit deux")
self.assertEqual(nice_time(dt, lang="fr-fr", use_ampm=True),
"minuit deux")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False),
"12:02")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False,
use_ampm=True),
"12:02 AM")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False,
use_24hour=True),
"00:02")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False,
use_24hour=True, use_ampm=True),
"00:02")
self.assertEqual(nice_time(dt, lang="fr-fr", use_24hour=True,
use_ampm=True),
"minuit deux")
self.assertEqual(nice_time(dt, lang="fr-fr", use_24hour=True,
use_ampm=False),
"minuit deux")
dt = datetime.datetime(2017, 1, 31,
12, 15, 9)
self.assertEqual(nice_time(dt, lang="fr-fr"),
"midi et quart")
self.assertEqual(nice_time(dt, lang="fr-fr", use_ampm=True),
"midi et quart")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False),
"12:15")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False,
use_ampm=True),
"12:15 PM")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False,
use_24hour=True),
"12:15")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False,
use_24hour=True, use_ampm=True),
"12:15")
self.assertEqual(nice_time(dt, lang="fr-fr", use_24hour=True,
use_ampm=True),
"midi quinze")
self.assertEqual(nice_time(dt, lang="fr-fr", use_24hour=True,
use_ampm=False),
"midi quinze")
dt = datetime.datetime(2017, 1, 31,
19, 40, 49)
self.assertEqual(nice_time(dt, lang="fr-fr"),
"huit heures moins vingt")
self.assertEqual(nice_time(dt, lang="fr-fr", use_ampm=True),
"huit heures moins vingt du soir")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False),
"7:40")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False,
use_ampm=True),
"7:40 PM")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False,
use_24hour=True),
"19:40")
self.assertEqual(nice_time(dt, lang="fr-fr", speech=False,
use_24hour=True, use_ampm=True),
"19:40")
self.assertEqual(nice_time(dt, lang="fr-fr", use_24hour=True,
use_ampm=True),
"dix-neuf heures quarante")
self.assertEqual(nice_time(dt, lang="fr-fr", use_24hour=True,
use_ampm=False),
"dix-neuf heures quarante")
dt = datetime.datetime(2017, 1, 31,
1, 15, 00)
self.assertEqual(nice_time(dt, lang="fr-fr", use_24hour=True),
"une heure quinze")
dt = datetime.datetime(2017, 1, 31,
1, 35, 00)
self.assertEqual(nice_time(dt, lang="fr-fr"),
"deux heures moins vingt-cinq")
dt = datetime.datetime(2017, 1, 31,
1, 45, 00)
self.assertEqual(nice_time(dt, lang="fr-fr"),
"deux heures moins le quart")
dt = datetime.datetime(2017, 1, 31,
4, 50, 00)
self.assertEqual(nice_time(dt, lang="fr-fr"),
"cinq heures moins dix")
dt = datetime.datetime(2017, 1, 31,
5, 55, 00)
self.assertEqual(nice_time(dt, lang="fr-fr"),
"six heures moins cinq")
dt = datetime.datetime(2017, 1, 31,
5, 30, 00)
self.assertEqual(nice_time(dt, lang="fr-fr", use_ampm=True),
"cinq heures et demi du matin")
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "joanise/g2p",
"score": 2
} |
#### File: g2p/g2p/__init__.py
```python
import sys
import io
from networkx import shortest_path
from networkx.exception import NetworkXNoPath
from flask import Flask, render_template
from flask_socketio import SocketIO, emit
from flask_talisman import Talisman
from g2p.mappings import Mapping
from g2p.mappings.langs import LANGS, LANGS_NETWORK
from g2p.transducer import CompositeTransducer, Transducer
from g2p.transducer.indices import Indices
from g2p.mappings.utils import expand_abbreviations, flatten_abbreviations
from g2p._version import VERSION
from g2p.log import LOGGER
if sys.stdout.encoding != 'utf8':
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf8")
if sys.stderr.encoding != 'utf8':
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf8")
APP = Flask(__name__)
SOCKETIO = SocketIO(APP)
DEFAULT_N = 10
def return_echart_data(indices: Indices):
input_string = indices.input()
input_x = 300
input_y = 300
output_string = indices.output()
output_x = 500
output_y = 300
inputs = [{'name': f"{x} (in-{i})", "x": input_x, "y": input_y + (i*50)} for i,x in enumerate(input_string)]
outputs = [{'name': f"{x} (out-{i})", "x": output_x, "y": output_y + (i*50)} for i,x in enumerate(output_string)]
data = inputs + outputs
links = [{"source": x[0][0], "target": x[1][0] + len(input_string)} for x in indices()]
return data, links
def return_empty_mappings(n=DEFAULT_N):
''' Return 'n' * empty mappings
'''
y = 0
mappings = []
while y < n:
mappings.append({
"in": '',
"out": '',
"context_before": '',
"context_after": ''
})
y += 1
return mappings
def hot_to_mappings(hot_data):
''' Parse data from HandsOnTable to Mapping format
'''
return [{"context_before": str(x[2] or ''), "in": str(x[0] or ''), "context_after": str(x[3] or ''),
"out": str(x[1] or '')} for x in hot_data if x[0] or x[1]]
@APP.route('/')
def home():
""" Return homepage of g2p Studio
"""
return render_template('index.html', langs=LANGS)
@SOCKETIO.on('index conversion event', namespace='/convert')
def index_convert(message):
""" Convert input text and return output with indices for echart
"""
mappings = Mapping(hot_to_mappings(message['data']['mappings']), abbreviations=flatten_abbreviations(
message['data']['abbreviations']), **message['data']['kwargs'])
transducer = Transducer(mappings)
output_string, indices = transducer(message['data']['input_string'], index=True)
data, links = return_echart_data(indices)
emit('index conversion response', {'output_string': output_string, 'index_data': data, 'index_links': links})
@SOCKETIO.on('conversion event', namespace='/convert')
def convert(message):
""" Convert input text and return output
"""
mappings = Mapping(hot_to_mappings(message['data']['mappings']), abbreviations=flatten_abbreviations(
message['data']['abbreviations']), **message['data']['kwargs'])
transducer = Transducer(mappings)
output_string = transducer(message['data']['input_string'])
emit('conversion response', {'output_string': output_string})
@SOCKETIO.on('table event', namespace='/table')
def change_table(message):
""" Change the lookup table
"""
if message['in_lang'] == 'custom' or message['out_lang'] == 'custom':
mappings = Mapping(return_empty_mappings())
else:
mappings = Mapping(
in_lang=message['in_lang'], out_lang=message['out_lang'])
emit('table response', {'mappings': mappings.plain_mapping(),
'abbs': expand_abbreviations(mappings.abbreviations),
'kwargs': mappings.kwargs})
@SOCKETIO.on('connect', namespace='/connect')
def test_connect():
""" Let client know disconnected
"""
emit('connection response', {'data': 'Connected'})
@SOCKETIO.on('disconnect', namespace='/connect')
def test_disconnect():
""" Let client know disconnected
"""
emit('connection response', {'data': 'Disconnected'})
def make_g2p(in_lang: str, out_lang: str):
# Check in_lang is a node in network
if not in_lang in LANGS_NETWORK.nodes:
LOGGER.error(f"No lang called {in_lang}. Please try again.")
raise(FileNotFoundError)
# Check out_lang is a node in network
if not out_lang in LANGS_NETWORK.nodes:
LOGGER.error(f"No lang called {out_lang}. Please try again.")
raise(FileNotFoundError)
# Try to find the shortest path between the nodes
try:
path = shortest_path(LANGS_NETWORK, in_lang, out_lang)
except NetworkXNoPath:
LOGGER.error(f"Sorry, we couldn't find a way to convert {in_lang} to {out_lang}. Please update your langs by running `g2p update` and try again.")
raise(NetworkXNoPath)
# Find all mappings needed
mappings_needed = []
for i, lang in enumerate(path):
try:
mapping = Mapping(in_lang=path[i], out_lang=path[i+1])
LOGGER.info(f"Adding mapping between {path[i]} and {path[i+1]} to composite transducer.")
mappings_needed.append(mapping)
except IndexError:
continue
# Either return Transducer or Composite Transducer
if len(mappings_needed) == 1:
return Transducer(mappings_needed[0])
else:
return CompositeTransducer([Transducer(x) for x in mappings_needed])
```
#### File: g2p/mappings/create_fallback_mapping.py
```python
from unidecode import unidecode
from g2p import make_g2p
from g2p.log import LOGGER
from g2p.mappings import Mapping
from g2p.mappings.create_ipa_mapping import align_inventories
from g2p.mappings.utils import generate_config, is_ipa, write_generated_mapping_to_file, unicode_escape
def align_to_dummy_fallback(mapping: Mapping, io: str = 'in', write_to_file: bool = False):
dummy_inventory = ["ɑ", "i", "u", "t", "s", "n"]
display_name = mapping.kwargs.get('language_name', 'No Language display name in Config')
config = generate_config(mapping.kwargs[f'{io}_lang'], 'dummy', display_name, display_name)
default_char = 't'
if is_ipa(mapping.kwargs[f'{io}_lang']):
mapping = align_inventories(mapping.inventory(io), dummy_inventory)
else:
und_g2p = make_g2p('und', 'und-ipa')
mapping = [{"in": unicode_escape(x), "out": und_g2p(unidecode(x).lower())} for x in mapping.inventory(io)]
dummy_list = align_inventories([x['out'] for x in mapping], dummy_inventory)
dummy_dict = {}
for x in dummy_list:
if x['in']:
dummy_dict[x['in']] = x['out']
for x in mapping:
try:
x['out'] = dummy_dict[x['out']]
except KeyError:
LOGGER.warn(f"We couldn't guess at what {x['in']} means, so it's being replaced with '{default_char}' instead.")
x['out'] = default_char
if write_to_file:
write_generated_mapping_to_file(config, mapping)
return config, mapping
if __name__ == "__main__":
test = Mapping(in_lang='git', out_lang='git-ipa')
dummy_config, dummy_mapping = align_to_dummy_fallback(test, write_to_file=True)
```
#### File: g2p/tests/test_langs.py
```python
from unittest import main, TestCase
import os
from g2p import make_g2p
from g2p.mappings import Mapping
from g2p.transducer import Transducer
class LangTest(TestCase):
''' Basic Test for individual lookup tables
'''
def setUp(self):
git = [
{"in_lang": "git",
"out_lang": "git-ipa",
"samples": [
('gwila', '\u0261ʷilæ'),
("hlik\u0332'sxw", "ɬiq\u0294sx\u02b7")
],
},
{'in_lang': 'git',
'out_lang': 'eng-arpabet',
"samples": [
("K̲'ay", 'K HH AE Y'),
("guts'uusgi'y", 'G UW T S HH UW S G IY HH Y')
]},
{'in_lang': 'str-sen',
'out_lang': 'eng-arpabet',
"samples": [
('X̱I¸ÁM¸', 'SH W IY HH EY M HH')
]},
{'in_lang': 'ctp',
'out_lang': 'eng-arpabet',
"samples": [
('Qneᴬ', 'HH N EY')
]}
]
self.langs_to_test = git
def test_io(self):
# go through each language declared in the test case set up
for lang in self.langs_to_test:
in_lang = lang['in_lang']
out_lang = lang['out_lang']
transducer = make_g2p(in_lang, out_lang)
# go through each table in the current lang
for sample in lang['samples']:
# assert that the transduced first item in the tuple is equal to the second item in the tuple
self.assertEqual(transducer(sample[0]), sample[1])
if __name__ == "__main__":
main()
```
#### File: g2p/tests/test_network.py
```python
from unittest import main, TestCase
import os
from networkx.exception import NetworkXNoPath
from g2p.mappings import Mapping
from g2p.transducer import CompositeTransducer, Transducer
from g2p import make_g2p
class NetworkTest(TestCase):
''' Basic Test for available networks
'''
def setUp(self):
pass
def test_not_found(self):
with self.assertRaises(FileNotFoundError):
make_g2p('foo', 'eng-ipa')
with self.assertRaises(FileNotFoundError):
make_g2p('git', 'bar')
def test_no_path(self):
with self.assertRaises(NetworkXNoPath):
make_g2p('hei', 'git')
def test_valid_composite(self):
transducer = make_g2p('atj', 'eng-ipa')
self.assertTrue(isinstance(transducer, CompositeTransducer))
self.assertEqual('niɡiɡw', transducer('nikikw'))
def test_valid_transducer(self):
transducer = make_g2p('atj', 'atj-ipa')
self.assertTrue(isinstance(transducer, Transducer))
self.assertEqual('niɡiɡw', transducer('nikikw'))
if __name__ == "__main__":
main()
``` |
{
"source": "joanise/ReadAlong-Studio",
"score": 3
} |
#### File: ReadAlong-Studio/readalongs/tempfile.py
```python
import os
from tempfile import NamedTemporaryFile, _TemporaryFileWrapper, template # type: ignore
class _PortableNamedTemporaryFileWrapperSubclass(_TemporaryFileWrapper):
def __init__(self):
pass
class _PortableNamedTemporaryFileWrapper:
def __init__(self, named_temporary_file):
self.named_temporary_file = named_temporary_file
self.name = named_temporary_file.name
def __enter__(self):
self.named_temporary_file.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
result = self.named_temporary_file.__exit__(exc_type, exc_value, traceback)
self.cleanup()
return result
def __del__(self):
self.cleanup()
def __getattr__(self, name):
return self.named_temporary_file.__getattr__(name)
def close(self):
return self.named_temporary_file.close()
# def __iter__(self):
# for line in self.named_temporary_file:
# yield line
def cleanup(self):
self.close()
try:
os.unlink(self.named_temporary_file.name)
except FileNotFoundError:
pass # cleaning up more than once is not an error
def PortableNamedTemporaryFile(
mode="w+b", suffix="", prefix=template, dir=None, delete=True
):
"""
Wrap tempfile.NamedTemporaryFile() with a portable behaviour that works on Windows, Linux and Mac
See https://docs.python.org/3/library/tempfile.html for full documentation.
The difference is that if you specify delete=True, the temporary file will be deleted when the returned
object is destroyed rather than when the file is closed. On windows, it is not possible to reopen the
file while the original handle is still open, so this function makes temporary files work across OS's.
"""
if not delete:
return NamedTemporaryFile(
mode=mode, suffix=suffix, prefix=prefix, delete=delete
)
else:
named_temporary_file = NamedTemporaryFile(
mode=mode, suffix=suffix, prefix=prefix, delete=False
)
return _PortableNamedTemporaryFileWrapper(
named_temporary_file=named_temporary_file
)
```
#### File: readalongs/text/make_fsg.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import pystache
from slugify import slugify
from readalongs.log import LOGGER
from readalongs.text.util import load_xml, save_txt
FSG_TEMPLATE = """FSG_BEGIN {{name}}
NUM_STATES {{num_states}}
START_STATE 0
FINAL_STATE {{final_state}}
{{#states}}
TRANSITION {{current}} {{next}} 1.0 {{id}}
{{/states}}
FSG_END
"""
def make_fsg(xml, filename, unit="m"):
name = slugify(os.path.splitext(os.path.basename(filename))[0])
data = {
"name": name, # If name includes special characters, pocketsphinx throws a RuntimeError: new_Decoder returned -1
"states": [],
"num_states": 0,
}
for e in xml.xpath(".//" + unit):
if "id" not in e.attrib: # don't put in elements with no id
continue
if not e.text or not e.text.strip():
LOGGER.warning("No text in node %s", e.attrib["id"])
continue
text = e.text.strip()
# if not e.text.strip(): # don't put in elements with no text
# continue
data["states"].append(
{
"id": e.attrib["id"] if text else "",
"current": data["num_states"],
"next": data["num_states"] + 1,
}
)
data["num_states"] += 1
data["final_state"] = data["num_states"]
data["num_states"] += 1
return pystache.render(FSG_TEMPLATE, data)
def go(input_filename, output_filename, unit):
xml = load_xml(input_filename)
fsg = make_fsg(xml, input_filename, unit)
save_txt(output_filename, fsg)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Make an FSG grammar from an XML file with IDs"
)
parser.add_argument("input", type=str, help="Input XML")
parser.add_argument("output_fsg", type=str, help="Output .fsg file")
parser.add_argument(
"--unit",
type=str,
default="m",
help="XML tag of the unit of analysis " '(e.g. "w" for word, "m" for morpheme)',
)
args = parser.parse_args()
go(args.input, args.output_fsg, args.unit)
```
#### File: ReadAlong-Studio/test/test_prepare_cli.py
```python
import io
import os
import sys
import tempfile
from shutil import copyfile
from unittest import TestCase, main
from readalongs.app import app
from readalongs.cli import prepare
from readalongs.log import LOGGER
class TestPrepareCli(TestCase):
LOGGER.setLevel("DEBUG")
data_dir = os.path.join(os.path.dirname(__file__), "data")
def setUp(self):
app.logger.setLevel("DEBUG")
self.runner = app.test_cli_runner()
self.tempdirobj = tempfile.TemporaryDirectory(
prefix="test_prepare_cli_tmpdir", dir="."
)
self.tempdir = self.tempdirobj.name
# Alternative tempdir code keeps it after running, for manual inspection:
# self.tempdir = tempfile.mkdtemp(prefix="test_prepare_cli_tmpdir", dir=".")
# print('tmpdir={}'.format(self.tempdir))
self.empty_file = os.path.join(self.tempdir, "empty.txt")
with io.open(self.empty_file, "wb") as f:
pass
def tearDown(self):
self.tempdirobj.cleanup()
def test_invoke_prepare(self):
results = self.runner.invoke(
prepare,
["-l", "atj", "-d", self.empty_file, os.path.join(self.tempdir, "delme")],
)
self.assertEqual(results.exit_code, 0)
self.assertRegex(results.stdout, "Running readalongs prepare")
# print('Prepare.stdout: {}'.format(results.stdout))
def test_no_lang(self):
results = self.runner.invoke(
prepare, [self.empty_file, self.empty_file + ".xml"]
)
self.assertNotEqual(results.exit_code, 0)
self.assertRegex(results.stdout, "Missing.*language")
def test_inputfile_not_exist(self):
results = self.runner.invoke(prepare, "-l atj /file/does/not/exist delme")
self.assertNotEqual(results.exit_code, 0)
self.assertRegex(results.stdout, "No such file or directory")
def test_outputfile_exists(self):
results = self.runner.invoke(
prepare,
["-l", "atj", self.empty_file, os.path.join(self.tempdir, "exists")],
)
results = self.runner.invoke(
prepare,
["-l", "atj", self.empty_file, os.path.join(self.tempdir, "exists")],
)
self.assertNotEqual(results.exit_code, 0)
self.assertRegex(results.stdout, "exists.*overwrite")
def test_output_exists(self):
xmlfile = os.path.join(self.tempdir, "fra.xml")
results = self.runner.invoke(
prepare, ["-l", "fra", os.path.join(self.data_dir, "fra.txt"), xmlfile]
)
self.assertEqual(results.exit_code, 0)
self.assertTrue(os.path.exists(xmlfile), "output xmlfile did not get created")
def test_input_is_stdin(self):
results = self.runner.invoke(prepare, "-l fra -", input="Ceci est un test.")
# LOGGER.warning("Output: {}".format(results.output))
# LOGGER.warning("Exception: {}".format(results.exception))
self.assertEqual(results.exit_code, 0)
self.assertIn("<s>Ceci est un test", results.stdout)
self.assertIn('<text xml:lang="fra">', results.stdout)
def test_generate_output_name(self):
input_file = os.path.join(self.tempdir, "someinput.txt")
copyfile(os.path.join(self.data_dir, "fra.txt"), input_file)
results = self.runner.invoke(prepare, ["-l", "fra", input_file])
LOGGER.warning("Output: {}".format(results.output))
LOGGER.warning("Exception: {}".format(results.exception))
self.assertEqual(results.exit_code, 0)
self.assertRegex(results.stdout, "Wrote.*someinput[.]xml")
self.assertTrue(os.path.exists(os.path.join(self.tempdir, "someinput.xml")))
if __name__ == "__main__":
main()
```
#### File: ReadAlong-Studio/test/test_tokenize_cli.py
```python
import io
import os
import sys
import tempfile
from shutil import copyfile
from unittest import TestCase, main
from readalongs.app import app
from readalongs.cli import prepare, tokenize
from readalongs.log import LOGGER
class TestTokenizeCli(TestCase):
LOGGER.setLevel("DEBUG")
data_dir = os.path.join(os.path.dirname(__file__), "data")
def setUp(self):
app.logger.setLevel("DEBUG")
self.runner = app.test_cli_runner()
self.tempdirobj = tempfile.TemporaryDirectory(
prefix="test_tokenize_cli_tmpdir", dir="."
)
self.tempdir = self.tempdirobj.name
# Alternative tempdir code keeps it after running, for manual inspection:
# self.tempdir = tempfile.mkdtemp(prefix="test_tokenize_cli_tmpdir", dir=".")
# print('tmpdir={}'.format(self.tempdir))
self.xmlfile = os.path.join(self.tempdir, "fra.xml")
_ = self.runner.invoke(
prepare, ["-l", "fra", os.path.join(self.data_dir, "fra.txt"), self.xmlfile]
)
def tearDown(self):
self.tempdirobj.cleanup()
def test_invoke_tok(self):
results = self.runner.invoke(
tokenize, [self.xmlfile, os.path.join(self.tempdir, "delme")]
)
self.assertEqual(results.exit_code, 0)
self.assertTrue(os.path.exists(os.path.join(self.tempdir, "delme.xml")))
def test_generate_output_name(self):
results = self.runner.invoke(tokenize, [self.xmlfile])
self.assertEqual(results.exit_code, 0)
self.assertTrue(os.path.exists(os.path.join(self.tempdir, "fra.tokenized.xml")))
def test_with_stdin(self):
with io.open(self.xmlfile) as f:
inputtext = f.read()
results = self.runner.invoke(tokenize, "-", input=inputtext)
self.assertEqual(results.exit_code, 0)
self.assertIn(
"<s><w>Ceci</w> <w>est</w> <w>une</w> <w>phrase</w>", results.output
)
def test_file_already_exists(self):
results = self.runner.invoke(tokenize, [self.xmlfile, self.xmlfile])
self.assertNotEqual(results.exit_code, 0)
self.assertIn("use -f to overwrite", results.output)
def test_bad_input(self):
results = self.runner.invoke(tokenize, "- -", input="this is not XML!")
self.assertNotEqual(results.exit_code, 0)
self.assertIn("Error parsing", results.output)
# LOGGER.warning("Output: {}".format(results.output))
# LOGGER.warning("Exception: {}".format(results.exception))
if __name__ == "__main__":
main()
``` |
{
"source": "joankaradimov/random-map-generator",
"score": 2
} |
#### File: random-map-generator/model/game.py
```python
import enum
import mpq
import os
from tileset import *
from scenario import *
from string_table import *
class PlayerType(enum.Enum):
@property
def is_active(self):
return self == self.HUMAN or self == self.COMPUTER
class Game:
def __new__(cls, game_directory):
import armageddon.game
import starcraft.game
import warcraft.game
import warcraft2.game
games_types = [armageddon.game.Game, warcraft.game.Game, warcraft2.game.Game, starcraft.game.Game]
for game in games_types:
data_file_paths = (os.path.join(game_directory, x) for x in game.data_files())
if all(os.path.exists(x) for x in data_file_paths):
instance = super().__new__(game)
return instance
raise Exception('Could not detect game')
def __init__(self, game_directory):
self.directory = game_directory
self._tiles_cache = {}
for data_file in self.data_files():
self.load_data_file(data_file)
def close(self):
self.data.close()
def process_game_scenarios(self):
scenarios = []
scenarios += self.process_game_archive()
scenarios += self.process_directory(os.path.join(self.directory, 'Maps'))
return scenarios
def process_game_archive(self):
scenarios = []
for filename in self.scenario_filenames():
file = None
try:
if filename in self.data:
file = self.data.open(filename)
scenarios += self.process_chk(os.path.basename(filename), file)
finally:
if file != None:
file.close()
return scenarios
def process_directory(self, directory):
scenarios = []
for dir_name, subdir_list, file_list in os.walk(directory):
for filename in file_list:
file_path = os.path.join(dir_name, filename)
scenarios += self.process_file(filename, file_path)
return scenarios
def process_chk(self, filename, chk_file):
try:
return [self.scenario_buider(filename, chk_file).to_scenario()]
except Exception as e:
return []
def process_tileset_file(self, tileset, entry_type):
file = None
try:
file = self.data.open(self.tileset_basename(tileset) + '.' + entry_type.EXTENSION)
entries = []
while file.tell() != file.size():
data = file.read(entry_type.SIZE)
entry = entry_type(data)
entries.append(entry)
return entries
finally:
if file != None:
file.close()
class MpqBasedGame(Game):
def __init__(self, game_directory):
self.data = mpq.MPQFile()
super().__init__(game_directory)
def load_data_file(self, data_file):
self.data.add_archive(os.path.join(self.directory, data_file))
```
#### File: random-map-generator/model/scenario.py
```python
import enum
import numpy as np
import os
import struct
import graphics
class ScenarioError(Exception):
pass
class ScenarioVersion(enum.Enum):
WARCRAFT2 = 17
WARCRAFT2_EXP = 19
STARCRAFT_BETA = 47
STARCRAFT_VANILLA = 59
STARCRAFT_1_04 = 63
STARCRAFT_REMASTERED = 64
BROOD_WAR_BETA = 75
BROOD_WAR = 205
BROOD_WAR_REMASTERED = 206
class ScenarioBuilder:
MAX_PLAYER_COUNT = 8
MAX_FORCE_COUNT = 4
def __init__(self, game, filename, chk_file):
self.game = game
self.filename = filename
while True:
try:
chunk_code = chk_file.read(4)
if len(chunk_code) < 4:
break
except Exception as e:
raise ScenarioError('Error reading chunk in file "%s"' % filename) from e
try:
chunk_name = chunk_code.decode('ascii').strip()
except UnicodeDecodeError as e:
chunk_name = ''
try:
chunk_size = int.from_bytes(chk_file.read(4), byteorder='little', signed=True)
chunk_handler_name = 'handle_' + chunk_name
if chunk_size > 0 and hasattr(self, chunk_handler_name):
chunk_handler = getattr(self, chunk_handler_name)
chunk_data = chk_file.read(chunk_size)
chunk_handler(chunk_data)
else:
chk_file.seek(chunk_size, os.SEEK_CUR)
except Exception as e:
raise ScenarioError('Error reading chunk "%s"' % chunk_name) from e
def handle_VER(self, data):
"""Handles the version"""
self.version = ScenarioVersion(int.from_bytes(data, byteorder='little'))
def handle_OWNR(self, data):
"""Handles player types (e.g. human/computer/rescuable)"""
self.player_types = list(map(self.game.player_type, data))
def handle_ERA(self, data):
"""Handles the tileset"""
tileset_index = int.from_bytes(data, byteorder='little')
self.tileset = self.game.tileset(tileset_index % len(self.game.tileset))
def handle_DIM(self, data):
"""Handles the dimentions of the map"""
self.height, self.width = struct.unpack('<HH', data)
def handle_MTXM(self, data):
"""Handles the map tiles"""
if hasattr(self, 'mtmx_data'):
self.mtmx_data += data
else:
self.mtmx_data = data
def process_MTMX(self):
tiles = struct.unpack_from('<%dH' % (self.width * self.height), self.mtmx_data)
tiles = [self.get_tile(tile) for tile in tiles]
self.tiles = np.array(tiles, dtype=object).reshape(self.width, self.height)
del self.width
del self.height
del self.mtmx_data
def xhandle_UNIT(self, data):
"""Handles the units on the map"""
pass # TODO: extract start location and resources data
def get_tile(self, tile_index):
tiles = self.game.tiles(self.tileset)
return tiles[tile_index if tile_index < len(tiles) else 0]
class Scenario:
__slots__ = [
'version', 'name', 'description', 'strings', 'filename', 'tileset', 'alliances',
'player_types', 'human_players', 'computer_players', 'tiles', 'game',
]
def __init__(self, game, name, description, version, strings, tileset, filename, alliances, player_types, tiles):
self.game = game
self.name = name
self.description = description
self.version = version
self.filename = filename
self.strings = strings
self.tileset = tileset
self.alliances = alliances
self.player_types = player_types
self.human_players = self.player_types.count(game.player_type.HUMAN)
self.computer_players = self.player_types.count(game.player_type.COMPUTER)
self.tiles = tiles
self.__assert_attribute('name')
self.__assert_attribute('description')
self.__assert_attribute('player_types')
self.__assert_attribute('alliances')
self.__assert_attribute('tileset')
self.__assert_attribute('tiles')
def __assert_attribute(self, attribute):
if not hasattr(self, attribute):
raise ScenarioError('Required attribute "%s" missing in file "%s"' % (attribute, self.filename))
@property
def width(self):
return self.tiles.shape[0]
@property
def height(self):
return self.tiles.shape[1]
@property
def graphics(self):
return graphics.tile(self.tiles)
__all__ = ['ScenarioError', 'ScenarioVersion', 'Scenario']
```
#### File: model/starcraft/scenario.py
```python
import struct
import scenario
class ScenarioBuilder(scenario.ScenarioBuilder):
def handle_FORC(self, data):
"""Handles force (alliance) information"""
data = data.ljust(20, b'\0')
self.player_forces = struct.unpack_from('B' * self.MAX_PLAYER_COUNT, data)
self.force_flags = struct.unpack_from('B' * self.MAX_FORCE_COUNT, data, offset=16)
def process_FORC(self):
is_active_player = [x.is_active for x in self.player_types[: self.MAX_PLAYER_COUNT]]
is_allied_force = [bool(x & 2) for x in self.force_flags]
is_active_force = [False] * self.MAX_FORCE_COUNT
for player in range(8):
if is_active_player[player]:
is_active_force[self.player_forces[player]] = True
non_allied_players = 0
for player, force in enumerate(self.player_forces):
if is_active_player[player] and not is_allied_force[force]:
non_allied_players += 1
allied_forces = 0
for force in range(self.MAX_FORCE_COUNT):
if is_active_force[force] and is_allied_force[force]:
allied_forces += 1
if allied_forces == 1 and non_allied_players == 0:
self.alliances = is_active_player.count(True)
else:
self.alliances = allied_forces + non_allied_players
del self.player_forces
del self.force_flags
def xhandle_THG2(self, data):
"""Handles the thingies on the map"""
pass # TODO: extract trees and other decorations
def handle_STR(self, data):
if len(data) < 2:
return
string_count = int.from_bytes(data[:2], byteorder='little')
offsets = struct.unpack_from('<%dH' % string_count, data, offset=2)
self.strings = []
for i in range(string_count):
string_start = offsets[i]
string_end = data.find(b'\0', string_start)
self.strings.append(data[string_start: string_end].decode('ISO-8859-1'))
def handle_SPRP(self, data):
self.name_index, self.description_index = struct.unpack('<HH', data)
def process_SPRP(self):
if hasattr(self, 'name_index') and 0 < self.name_index < len(self.strings):
self.name = self.strings[self.name_index - 1]
else:
self.name = self.filename
if hasattr(self, 'description_index') and 0 < self.description_index < len(self.strings):
self.description = self.strings[self.description_index - 1]
else:
self.description = 'Destroy all enemy buildings.'
del self.name_index
del self.description_index
def to_scenario(self):
self.process_FORC()
self.process_MTMX()
self.process_SPRP()
return Scenario(**self.__dict__)
class Scenario(scenario.Scenario):
"""Implements a StarCraft scenario.
The format specs are taken from here:
http://www.staredit.net/wiki/index.php?title=Scenario.chk
"""
def to_chunk_data(self):
result = b''
result += b'TYPE'
result += struct.pack('<L', 4)
result += b'RAWB'
result += b'VER '
result += struct.pack('<L', 2)
result += struct.pack('<H', ScenarioVersion.STARCRAFT_EXP.value)
# TODO: VCOD
result += b'OWNR'
result += struct.pack('<L', 12)
for i in range(8):
player_type = PlayerType.HUMAN if i < self.human_players else PlayerType.INACTIVE
result += struct.pack('B', player_type.value)
for i in range(4):
result += struct.pack('B', PlayerType.INACTIVE.value)
result += b'ERA '
result += struct.pack('<L', 2)
result += struct.pack('<H', self.tileset.value)
result += b'DIM '
result += struct.pack('<L', 4)
result += struct.pack('<HH', self.width, self.height)
result += b'SIDE'
result += struct.pack('<L', 12)
# 4 for "neutral", 5 for "user selectable", 7 for "inactive"
for i in range(8):
side = 5 if i < self.human_players else 7
result += struct.pack('B', side)
result += struct.pack('BBBB', 7, 7, 7, 4)
result += b'MTMX'
result += struct.pack('<L', self.width * self.height * 2)
for y in range(self.height):
for x in range(self.width):
result += struct.pack('<H', self.tiles[y, x].index)
result += b'UNIT' # TODO: unit data
result += struct.pack('<L', 0)
result += b'THG2' # TODO: thingies data
result += struct.pack('<L', 0)
result += b'STR ' # TODO: strings
result += struct.pack('<L', 2)
result += struct.pack('<H', 0)
result += b'SPRP' # TODO: name and description
result += struct.pack('<L', 4)
result += struct.pack('<HH', 0, 0)
result += b'FORC' # TODO: make this human readable
result += struct.pack('<L', 20)
result += b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1\1\1\1'
result += b'COLR' # TODO: make this human readable
result += struct.pack('<L', 8)
result += b'\0\1\2\3\4\5\6\7'
return result
```
#### File: model/starcraft/tileset.py
```python
import enum
import numpy
import struct
class CV5Entry:
SIZE = 52
EXTENSION = 'cv5'
def __init__(self, data):
self.data = struct.unpack_from('HBBHHHHHHHH', data)
self.megatiles = struct.unpack_from('H' * 16, data, offset=20)
class WPEEntry:
SIZE = 4
EXTENSION = 'wpe'
def __init__(self, data):
self.data = numpy.array(struct.unpack_from('BBB', data), dtype=numpy.uint8)
class Tileset(enum.Enum):
"""Implements an enum with all tilesets in the game
It exposes an abstraction for reading cv5/vf4/vx4/vr4/wpe files.
The format specs are taken from here:
http://www.staredit.net/wiki/index.php?title=Terrain_Format
"""
BADLANDS = 0
SPACE_PLATFORM = 1
INSTALLATION = 2
ASHWORLD = 3
JUNGLE = 4
DESERT = 5
ARCTIC = 6
TWILIGHT = 7
```
#### File: model/warcraft2/tileset.py
```python
import enum
import numpy
import struct
import tileset
class CV4Entry:
SIZE = 42
EXTENSION = 'cv4'
def __init__(self, data):
self.data = struct.unpack_from('10B', data, offset=32)
self.megatiles = struct.unpack_from('H' * 16, data)
class PPLEntry:
SIZE = 3
EXTENSION = 'ppl'
def __init__(self, data):
self.data = numpy.array(struct.unpack_from('BBB', data), dtype=numpy.uint8)
class Tileset(enum.Enum):
"""Implements an enum with all tilesets in the game
It exposes an abstraction for reading cv4/vx4/vr4/ppl files.
The format specs are taken from here:
http://cade.datamax.bg/war2x/wc2tile.html
"""
FOREST = 0
WINTER = 1
WASTELAND = 2
SWAMP = 3
``` |
{
"source": "joankirui/Blog_site",
"score": 2
} |
#### File: Blog_site/app/requests.py
```python
import requests,json
# from .models import User,Comment,Blog
def get_quotes():
response = requests.get('http://quotes.stormconsultancy.co.uk/random.json')
quotes = response.json()
return quotes
```
#### File: Blog_site/tests/test_comments.py
```python
import unittest
from app.models import Comment,Blog,User
from app import db
class TestBlog(unittest.TestCase):
def setUp(self):
self.new_blog = Blog(content = "blogger")
self.new_comment = Comment(comment = "comment",blog = self.new_blog)
def tearDown(self):
db.session.delete(self)
User.query.commit()
def test_instance(self):
self.assertTrue(isinstance(self.new_comment.comment,"comment"))
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.comment,"comment")
self.assertEquals(self.new_comment.comment,self.new_blog,"blog")
if __name__=="__main__":
unittest.main()
``` |
{
"source": "joankirui/Gallery",
"score": 2
} |
#### File: Gallery/gallery/models.py
```python
from django.db import models
from cloudinary.models import CloudinaryField
# Create your models here.
class Location(models.Model):
location = models.CharField(max_length = 30)
def __str__(self):
return self.location
def save_location(self):
self.save()
def delete_location(self):
self.delete()
@classmethod
def get_location(cls):
place = cls.objects.all()
return place
def update_location(self):
self.update_location
class Category(models.Model):
category = models.CharField(max_length=30)
def __str__(self):
return self.category
def save_category(self):
self.save()
def delete_category(self):
self.delete()
def update_category(self):
self.update_category
class Image(models.Model):
image = CloudinaryField('image',null=True)
image_name = models.CharField(max_length = 30)
image_description = models.TextField()
image_location = models.ForeignKey(Location, on_delete=models.CASCADE)
category = models.ForeignKey(Category,on_delete=models.CASCADE)
def __str__(self):
return self.image_name
def save_image(self):
self.save()
def delete_image(self):
self.delete()
@classmethod
def get_image_by_id(cls,id):
image = cls.objects.filter(id=id).all()
return image
def update_image(self):
self.update_image()
@classmethod
def search_image(cls,search_term):
images = cls.objects.filter(category__category__icontains=search_term)
return images
@classmethod
def filter_by_location(cls,location):
img_location = Image.objects.filter(location = location).all()
return img_location
class Meta:
ordering = ['image_name']
``` |
{
"source": "joankirui/News-api",
"score": 3
} |
#### File: app/main/views.py
```python
from flask import render_template
from . import main
from ..models import Articles,Sources
from ..request import get_sources,get_articles,search_articles
# Views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
title = 'Home-Welcome to the best News Bulletin'
general_news = get_sources('general')
business_news = get_sources('business')
sports_news = get_sources('sports')
return render_template('index.html', title = title,general=general_news,business=business_news,sports=sports_news)
@main.route('/NewsArticles')
def NewsArticles():
"""
View that would return news articles
"""
tesla_news = get_articles('tesla')
apple_news = get_articles('apple')
return render_template('articles.html',tesla = tesla_news,apple = apple_news)
@main.route('/search/<article_name>')
def articleSearch(article_name):
'''
Function that returns the searched article
'''
search_article_name = article_name.split(" ")
search_name_format = "+".join(search_article_name)
searched_articles = search_articles(search_name_format)
return render_template('search.html',articles = searched_articles)
``` |
{
"source": "joankorir/Docs-Appointment",
"score": 2
} |
#### File: Docs-Appointment/Proj/views.py
```python
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.shortcuts import render,redirect,reverse
from django.views.generic import View
from.models import Album
from django.views import generic
class IndexView(generic.ListView):
template_name='Proj/index.html'
context_object_name='all_albums'
def get_queryset(self):
return Album.objects.all()
class DetailView(generic.DetailView):
model=Album
template_name='Proj/detail.html'
class AlbumCreate(CreateView):
model=Album
fields=['artist','album_title','genre','album_logo']
class AlbumUpdate(UpdateView):
model=Album
fields=['artist','album_title','genre','album_logo']
class AlbumDelete(DeleteView):
model=Album
def get_success_url(self):
return reverse('Proj:index')
``` |
{
"source": "joankorir/pitchMinute",
"score": 3
} |
#### File: pitchMinute/tests/test_pitchcategory.py
```python
from app.models import PitchCategory
from app import db
def setUp(self):
self.new_category = PitchCategory(id=345,name = Interview , description = Finerr)
def tearDown(self):
PitchCategory.query.delete()
def_test_check_instance_variables(self):
self.assertEquals(self.new_category.id,345)
self.assertEquals(self.new_category.name,Interview)
self.assertEquals(self.new_category.description,Finerr)
def_save_review(self):
self.new_category.save_category()
self.assertEquals(len(PitchCategory.query.all())>0)
def test_get_categories_by_id(self):
self.new_category.save_category()
got_categories = PitchCategory(345)
self.assertTrue(len(got_categories) ==1 )
``` |
{
"source": "JoanLee0826/amazon",
"score": 3
} |
#### File: amazon/amazon/goods_review_thread.py
```python
import pandas as pd
import requests
from lxml import etree
import re, time, random, datetime
from queue import Queue
import threading
class Review:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/69.0.3497.81 Safari/537.36"
}
proxies = {
"http": "http://172.16.17.32:9999",
}
def __init__(self, domain):
self.view_list = []
self.page_list = []
self.url_queue = Queue()
if domain.strip().lower() == 'jp':
self.row_url = "https://www.amazon.co.jp"
elif domain.strip().lower == 'com':
self.row_url = "https://www.amazon.com"
self.s = requests.Session()
self.s.get(url=self.row_url, headers=self.headers, proxies=self.proxies)
def get_review(self, url):
res = self.s.get(url, headers=self.headers, proxies=self.proxies)
if res.status_code != 200:
print("请求出错,状态码为:%s" % res.status_code)
print(res.text)
return
res_html = etree.HTML(res.text)
# 商品评价名称
view_goods = res_html.xpath('//span[@class="a-list-item"]/a/text()')[0]
# 商品评价容器
view_con = res_html.xpath('//div[@class="a-section review aok-relative"]')
for each_view in view_con:
# 评价人
view_name = each_view.xpath('.//span[@class="a-profile-name"]/text()')[0]
view_star_raw = each_view.xpath('.//div[@class="a-row"]/a[@class="a-link-normal"]/@title')[0]
# 评价星级
view_star = view_star_raw.split(' ')[0]
# 评价title
view_title = each_view.xpath('.//a[@data-hook="review-title"]/span/text()')[0]
# 评价日期
view_date = each_view.xpath('.//span[@data-hook="review-date"]/text()')[0]
view_format = each_view.xpath('.//a[@data-hook="format-strip"]/text()')
view_colour = None
view_size = None
try:
for each in view_format:
if re.search("color|colour|色", each, re.I):
view_colour = each.split(':')[1].strip()
if re.search("size|style|サイズ", each, re.I):
view_size = each.split(":")[1].strip()
except:
pass
# 评价内容
view_body = each_view.xpath('string(.//span[@data-hook="review-body"]/span)')
# 评价有用数量
try:
view_useful_raw = each_view.xpath('.//span[@data-hook="helpful-vote-statement"]/text()')[0]
view_useful = view_useful_raw.split(' ')[0]
if view_useful == 'one':
view_useful = 1
try:
view_useful = int(view_useful)
except:
pass
except:
view_useful = 0
# 商品的评价信息表
each_view_list = [view_goods, view_name, view_star, view_title, view_date, view_colour, view_size,
view_body, view_useful]
self.view_list.append(each_view_list)
# print(self.view_list[-1])
def run(self, data):
goods_data = pd.read_excel(data, encoding='utf-8')
base_url = self.row_url + "/product-reviews/"
# goods_data.drop_duplicates(subset=['r','评价数量'],inplace=True)
for each_asin, each_count in zip(goods_data['ASIN'][5:50], goods_data['goods_review_count'][5:50]):
if each_asin and int(each_count) > 0:
if int(each_count) % 10 == 0:
end_page = int(each_count) // 10 + 1
else:
end_page = int(each_count) // 10 + 2
for page in range(1, end_page):
if page == 1:
url = base_url + each_asin
else:
url = base_url + each_asin + '?pageNumber=' + str(page)
self.url_queue.put(url)
print("review_page_%d" % page, url)
time.sleep(1.5)
while True:
try:
review_threads = [threading.Thread(target=self.get_review, args=(self.url_queue.get(),))
for m in range(30) if not self.url_queue.empty()]
for each in review_threads:
each.start()
print("队列剩余数量", self.url_queue.qsize())
for each in review_threads:
each.join()
except:
print("请求链接出错,重试中...")
pass
time.sleep(random.uniform(0.5,2.1))
if self.url_queue.empty():
break
view_goods_pd = pd.DataFrame(self.view_list,
columns=['review_goods', 'review_name', 'review_star', 'review_title',
'review_date', 'review_colour', 'review_size', 'review_body',
'review_useful'])
view_goods_pd.drop_duplicates(subset=['review_name', 'review_date','review_body'], inplace=True)
aft = datetime.datetime.now().strftime('%m%d%H%M')
file_name = r'../data/goods_review/' + "reviews_" + aft + ".xlsx"
view_goods_pd.to_excel(file_name, encoding='utf-8', engine='xlsxwriter')
print("共获取评论数量:", len(self.view_list))
if __name__ == '__main__':
data = r"../data/category/Kid's Weighted Blankets_08_28_13_22.xlsx"
review = Review(domain='com')
review.run(data=data)
```
#### File: amazon/amazon/prime_day_jp.py
```python
import numpy as np
import pandas as pd
import requests, lxml
from lxml import etree
import re, time, random, datetime, time
class AmazonGoods:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763"
}
proxies = {
"http": "http://172.16.58.3:9999",
}
url_base = "https://www.amazon.co.jp"
s = requests.Session()
s.get(url=url_base, headers=headers, proxies=proxies, verify=False)
def __init__(self):
self.goods_list = []
def get_goods(self, url):
res = self.s.get(url, headers=self.headers, proxies=self.proxies, verify=False)
time.sleep(10)
if res.status_code != 200:
print("请求出错,状态码为:%s" % res.status_code)
print(res.text)
return
print(res.text)
res_html = etree.HTML(res.text)
for each in res_html.xpath("//div[@class='a-row dealContainer dealTile']")[:3]:
try:
pic_url = each.xpath(".//a[@id='dealImage']/@href")
print(pic_url)
except:
pic_url = None
try:
price = each.xpath(".//span[@class='a-size-medium inlineBlock unitLineHeight dealPriceText'/text()")
except:
price = None
try:
percent = each.xpath(".//span[class='a-size-mini a-color-secondary inlineBlock unitLineHeight/text()")
except:
percent = None
try:
total = each.xpath(".//span[@class='a-size-mini a-color-secondary inlineBlock unitLineHeight/text()")
except:
total = None
try:
end_time = each.xpath(".//span[@class='a-size-mini a-color-secondary inlineBlock unitLineHeight'/text()")
except:
end_time = None
now_time = time.time()
self.goods_list.append([pic_url, price, percent, total, end_time, now_time])
print(self.goods_list[-1])
if __name__ == '__main__':
jp = AmazonGoods()
url = 'https://www.amazon.co.jp/l/4429743051/ref=gbps_ftr_m-8_8a89_wht_14304371?gb_f_ALLDEALS=dealStates:AVAILABLE%252CWAITLIST%252CWAITLISTFULL%252CEXPIRED%252CSOLDOUT,sortOrder:BY_SCORE,MARKETING_ID:PD19%252CPDSD%252CPDAY%252CPDPMP%252CAMZDEVICES,enforcedCategories:344845011%252C14304371&pf_rd_p=4ac480f4-5fbf-4022-9ab8-9a887afc8a89&pf_rd_s=merchandised-search-8&pf_rd_t=101&pf_rd_i=4429743051&pf_rd_m=AN1VRQENFRJN5&pf_rd_r=1F7NDN9TKJ69KQE06TG6&ie=UTF8'
jp.get_goods(url)
```
#### File: amazon/keepa/keepa_api.py
```python
import datetime
import numpy as np
import pandas as pd
import time
from dateutil.parser import parse
import keepa
ACCESSKEY = ''
k_api = keepa.Keepa(ACCESSKEY)
def get_info(items):
check_basic = ['asin', 'title', 'imagesCSV', 'categories','categoryTree', 'brand', 'color', 'size', 'packageLength', 'itemWidth',
'itemHeight', 'itemWeight', 'packageLength', 'packageWidth', 'packageHeight', 'packageWeight', 'frequentlyBoughtTogether']
check_date = ['NEW_time', 'NEW','SALES_time', 'SALES']
info_list = []
for each in k_api.query(items, domain='US'):
info_each = {}
for item in check_basic:
info_each[item] = each.get(item, None)
for date_item in check_date:
info_each[date_item] = each.get('data', {}).get(date_item, np.array([]))
info_list.append(info_each)
print("已经获取的数据:", len(info_list))
aft = datetime.datetime.now().strftime("%m%d%H%M") + '.xlsx'
data = pd.DataFrame(info_list)
print("数据转换中...")
amazon_pic_domain = 'https://images-na.ssl-images-amazon.com/images/I/'
amazon_pic_size = '_AC_UL320_SR360,360_.jpg' # 图片大小为360 实物占据320 其余留白
# 主图获取
data['pic_url_main'] = data['imagesCSV'].apply(lambda x : amazon_pic_domain + x.split(',')[0] + amazon_pic_size if x else None)
# 确定图片数量
data['pic_num'] = data['imagesCSV'].apply(lambda x: len(x.split(',')) if x else None)
# 转化为excel可用的格式
data['table_pic'] = '<table> <img src=' + '\"' + data['pic_url_main'] + '\"' + 'height="140" >'
print(data['table_pic'])
# 第一次销售时间
data['data_on_sale'] = data['SALES_time'].apply(lambda x: x[0])
print(data['data_on_sale'])
# 当前价格
data['price_now'] = data['NEW'].apply(lambda x: x[-1] if x.any() else None)
print(data['price_now'])
data['max_rank'] = data['SALES'].apply(lambda x: x.max() if x.any() else None)
print(data['max_rank'])
# data['min_rank'] = data['SALES'].apply(lambda x: x.min() if )
# data['max_time'] = data[data['SALES'] == data['max_rank']]['SALES_time']
# data['pre_rank'] = np.mean(data['SALES'])
data.to_excel('asin_info_' + aft, encoding='utf-8', engine='xlsxwriter')
print('转换完成, 存储至excel')
return data
def get_keepa_time(date):
"""
把普通的时间格式转化为keep时间格式
:param date:
:return:
"""
return int(time.mktime(parse(date).timetuple())/60-21564000)
cate_info = {
"Apps & Games": 2350149011,
"Baby Products": 165796011,
"Digital Music": 163856011,
"Toys & Games": 165793011,
"Patio, Lawn & Garden": 2972638011,
"Books": 283155,
"Arts, Crafts & Sewing": 2617941011,
"Software": 229534,
"Sports & Outdoors": 3375251,
"Handmade Products": 11260432011,
"Video Games": 468642,
"Clothing, Shoes & Jewelry": 7141123011,
"Office Products": 1064954,
"Grocery & Gourmet Food": 16310101,
"Tools & Home Improvement": 228013,
"Movies & TV": 2625373011,
"Musical Instruments": 11091801,
"Appliances": 2619525011,
"Collectibles & Fine Art": 4991425011,
"Pet Supplies": 2619533011,
"Industrial & Scientific": 16310091,
"Cell Phones & Accessories": 2335752011,
"Everything Else": 10272111,
"Home & Kitchen": 1055398,
"Beauty & Personal Care": 3760911,
"CDs & Vinyl": 5174,
"Electronics": 172282,
"Automotive": 15684181,
"Health & Household": 3760901,
"Vehicles": 10677469011,
}
def get_selection(
rank_in=5000,
rank_out=8000,
review_count=60,
date_in='2019-1-1',
date_out='2019-9-15',
category=1055398): # category查上述字典
query_json = {
"current_SALES_gte": rank_in, # 最高排名
"current_SALES_lte": rank_out, # 最底排名
"current_COUNT_REVIEWS_gte": 0, # 最低评价数量
"current_COUNT_REVIEWS_lte": review_count, # 最高评价数量
"current_NEW_FBA_gte": 2000, # FBA发货价格 最低20刀
"current_NEW_FBA_lte": 5000,
"avg30_NEW_FBA_gte": 2000, # 30天平均最低发货价格20刀
"avg30_NEW_FBA_lte": 5000,
"trackingSince_gte": get_keepa_time(date_in), # 跟踪时间不早于
"trackingSince_lte": get_keepa_time(date_out), # 跟踪时间不晚于
"rootCategory": category, # 跟类目节点
"packageLength_gte": 1,
"packageLength_lte": 450,
"packageWidth_gte": 1,
"packageWidth_lte": 450,
"packageHeight_gte": 1,
"packageHeight_lte": 450, # 打包最大边长不高于450mm
"packageWeight_gte": 1,
"packageWeight_lte": 1800, # 打包重量不高于1800g
"sort": [["current_SALES", "asc"]],
"lastOffersUpdate_gte": 4631500, # int(time.time()/60-21564000)
"lastRatingUpdate_gte": 4615660,
"productType": [0, 1, 5],
"perPage": 2000, # 每一页展示的数据
"page": 0 # 第几页
}
return query_json
def get_cate(row_cat=0):
cate_info = k_api.category_lookup(row_cat)
pd.DataFrame(cate_info).T.to_excel('categories_'+str(row_cat)+'.xlsx', encoding='utf-8')
if __name__ == '__main__':
# 从文件获取ASIN
# file_path = r'asin_lsit_10251719.xlsx'
# items = pd.read_excel(file_path)['asin'].tolist()
# aft = datetime.datetime.now().strftime("%m%d%H%M") + '.xlsx'
# pd.DataFrame(items, columns=['ASIN']).to_excel(aft, encoding='utf-8', engine='xlsxwriter')
# 从keepa请求获取asin
category=cate_info['Home']
items = k_api.product_finder(get_selection())
get_info(items=items)
```
#### File: amazon/others/prime_com.py
```python
import numpy as np
import pandas as pd
import requests, lxml
from lxml import etree
import re, time, random, datetime, time
import json
class AmazonGoods:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763"
}
proxies = {
"http": "http://172.16.58.3:9999",
}
url_base = "https://www.amazon.com"
s = requests.Session()
s.get(url=url_base, headers=headers, proxies=proxies, verify=False)
def __init__(self):
self.goods_list = []
def get_id(self, url):
res = self.s.get(url, headers=self.headers, proxies=self.proxies, verify=False)
if res.status_code != 200:
print("请求出错,状态码为:%s" % res.status_code)
print(res.text)
return
patt = re.compile(r'"sortedDealIDs" : [[](.*?)[]]', re.S)
# return re.search(string=text, pattern=patt).group(1)
row_list = re.search(string=res.text, pattern=patt).group(1).split(",")
id_list = [{"dealID": each.strip().replace("\r\n", "").replace("\n", "")[1:-1]} for each in row_list]
return id_list
def get_req_json(self, id_list, start, end):
try:
sessionID = self.s.cookies.get('session-id')
print(sessionID)
except:
sessionID = '147-0093206-6105777'
req_json = {
"requestMetadata": {"marketplaceID": "ATVPDKIKX0DER", "clientID": "goldbox_mobile_pc",
"sessionID": sessionID},
"responseSize": "STATUS_ONLY", "itemResponseSize": "DEFAULT_WITH_PREEMPTIVE_LEAKING",
}
req_json['dealTargets'] = id_list[start:end]
return req_json
def get_data(self, req_json, i):
# time_str = str(time.time()).replace(".", '')[:13]
time_str = '1563177600123'
req_url = "https://www.amazon.com/xa/dealcontent/v2/GetDealStatus" + "?nocache=" + time_str
headers = {
"Server": "Server",
"Content-Type": "application/json",
# "Content-Length": "704",
"Strict-Transport-Security": "max-age=47474747; includeSubDomains; preload",
# "x-amzn-RequestId": "f0419251-a767-11e9-a5f6-7b6a12dcce7e",
# "X-Amz-Date": "Tue, 16 Jul 2019 01:20:46 GMT",
"Vary": "Accept-Encoding,X-Amzn-CDN-Cache,X-Amzn-AX-Treatment,User-Agent",
# "x-amz-rid": "99Z46S27MN34QKRDX9X1",
"X-Frame-Options": "SAMEORIGIN",
"Date": "Tue, 16 Jul 2019 01:20:46 GMT",
"Connection": "keep-alive",
}
res = self.s.post(req_url, headers=headers, data=json.dumps(req_json))
file_str = time.strftime('%m_%d_%H_%M', time.localtime()) + "_" + str(i)
file_path = r'E:\\产品开发\\prime day 数据/'
with open(file_path + "com_" + file_str + ".json", 'w') as f:
f.write((json.dumps(res.json())))
def run(self, url):
id_list = self.get_id(url)
for i in range(len(id_list)//100 + 1):
req_json = self.get_req_json(id_list, i*100, i*100+100)
self.get_data(req_json, i)
if __name__ == '__main__':
prime = AmazonGoods()
# url = "https://www.amazon.com/l/13887280011"
# url = "https://www.amazon.com/b/ref=gbps_ftr_m-6_3b69_sort_BSEL?node=14611812011&gb_f_GB-SUPPLE=enforcedCategories:284507%252C1055398,dealTypes:DEAL_OF_THE_DAY%252CLIGHTNING_DEAL%252CBEST_DEAL,sortOrder:BY_BEST_SELLING,dealStates:AVAILABLE%252CWAITLIST%252CWAITLISTFULL%252CEXPIRED%252CSOLDOUT,MARKETING_ID:PDAY&gb_ttl_GB-SUPPLE=Deals%2520on%2520Home%2520and%2520Kitchen&pf_rd_p=db08f08d-45f1-490b-aa6c-1f4d543b3b69&pf_rd_s=merchandised-search-6&pf_rd_t=101&pf_rd_i=14611812011&pf_rd_m=ATVPDKIKX0DER&pf_rd_r=XDTVBNJEXKJ7Q0C8Q0KN&ie=UTF8"
# hk 类目按照销量排序
url = "https://www.amazon.com/b/ref=gbps_ftr_m-6_3b69_sort_BSEL?node=14611812011&gb_f_GB-SUPPLE=enforcedCategories:284507%252C1055398,dealStates:AVAILABLE%252CWAITLIST%252CWAITLISTFULL%252CEXPIRED%252CSOLDOUT,dealTypes:DEAL_OF_THE_DAY%252CLIGHTNING_DEAL%252CBEST_DEAL,sortOrder:BY_BEST_SELLING,MARKETING_ID:PDAY&gb_ttl_GB-SUPPLE=Deals%2520on%2520Home%2520and%2520Kitchen&pf_rd_p=db08f08d-45f1-490b-aa6c-1f4d543b3b69&pf_rd_s=merchandised-search-6&pf_rd_t=101&pf_rd_i=14611812011&pf_rd_m=ATVPDKIKX0DER&pf_rd_r=WEBYWNX4ZXZYRYCYGMKY&ie=UTF8"
prime.run(url)
``` |
{
"source": "JoanLindo/BaianoBot-backup-sexo",
"score": 2
} |
#### File: userbot/modules/clone.py
```python
import html
from telethon.tl import functions
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.types import MessageEntityMentionName
from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot
from userbot.events import register
@register(outgoing=True, pattern=r"^\.clone ?(.*)")
async def _(event):
if event.fwd_from:
return
reply_message = await event.get_reply_message()
replied_user, error_i_a = await get_full_user(event)
if replied_user is None:
await event.edit(str(error_i_a))
return False
user_id = replied_user.user.id
profile_pic = await event.client.download_profile_photo(
user_id, TEMP_DOWNLOAD_DIRECTORY
)
# some people have weird HTML in their names
first_name = html.escape(replied_user.user.first_name)
# https://stackoverflow.com/a/5072031/4723940
# some Deleted Accounts do not have first_name
if first_name is not None:
# some weird people (like me) have more than 4096 characters in their
# names
first_name = first_name.replace("\u2060", "")
last_name = replied_user.user.last_name
# last_name is not Manadatory in @Telegram
if last_name is not None:
last_name = html.escape(last_name)
last_name = last_name.replace("\u2060", "")
if last_name is None:
last_name = " "
# inspired by https://telegram.dog/afsaI181
user_bio = replied_user.about
if user_bio is not None:
user_bio = html.escape(replied_user.about)
await bot(functions.account.UpdateProfileRequest(first_name=first_name))
await bot(functions.account.UpdateProfileRequest(last_name=last_name))
await bot(functions.account.UpdateProfileRequest(about=user_bio))
pfile = await bot.upload_file(profile_pic) # pylint:disable=E060
await bot(functions.photos.UploadProfilePhotoRequest(pfile)) # pylint:disable=E0602
# message_id_to_reply = event.message.reply_to_msg_id
# if not message_id_to_reply:
# message_id_to_reply = event.message.id
# await bot.send_message(
# event.chat_id,
# "Hey ? Whats Up !",
# reply_to=message_id_to_reply,
# )
await event.delete()
await bot.send_message(event.chat_id, "**SEJAMOS UM SÓ**", reply_to=reply_message)
async def get_full_user(event):
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
if previous_message.forward:
replied_user = await event.client(
GetFullUserRequest(
previous_message.forward.from_id
or previous_message.forward.channel_id
)
)
return replied_user, None
else:
replied_user = await event.client(
GetFullUserRequest(previous_message.from_id)
)
return replied_user, None
else:
input_str = None
try:
input_str = event.pattern_match.group(1)
except IndexError as e:
return None, e
if event.message.entities is not None:
mention_entity = event.message.entities
probable_user_mention_entity = mention_entity[0]
if isinstance(probable_user_mention_entity, MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
replied_user = await event.client(GetFullUserRequest(user_id))
return replied_user, None
else:
try:
user_object = await event.client.get_entity(input_str)
user_id = user_object.id
replied_user = await event.client(GetFullUserRequest(user_id))
return replied_user, None
except Exception as e:
return None, e
elif event.is_private:
try:
user_id = event.chat_id
replied_user = await event.client(GetFullUserRequest(user_id))
return replied_user, None
except Exception as e:
return None, e
else:
try:
user_object = await event.client.get_entity(int(input_str))
user_id = user_object.id
replied_user = await event.client(GetFullUserRequest(user_id))
return replied_user, None
except Exception as e:
return None, e
CMD_HELP.update(
{
"cloneuser": "\
.clone <nome de usuário> ou responda a uma mensagem\
\nUso: Copia a foto de perfil do alvo,nome...etc e define como seu."
}
)
```
#### File: userbot/modules/lyrics.py
```python
import os
import lyricsgenius
from pylast import User
from userbot import CMD_HELP, GENIUS, LASTFM_USERNAME, lastfm
from userbot.events import register
if GENIUS is not None:
genius = lyricsgenius.Genius(GENIUS)
@register(outgoing=True, pattern="^.lyrics (?:(now)|(.*) - (.*))")
async def lyrics(lyric):
await lyric.edit("`Obtendo informações...`")
if GENIUS is None:
await lyric.edit(
"`Forneça o token de acesso genius nas ConfigVars do Heroku...`"
)
return False
if lyric.pattern_match.group(1) == "now":
playing = User(LASTFM_USERNAME, lastfm).get_now_playing()
if playing is None:
await lyric.edit("`Sem informações do scrobble atual do lastfm...`")
return False
artist = playing.get_artist()
song = playing.get_title()
else:
artist = lyric.pattern_match.group(2)
song = lyric.pattern_match.group(3)
await lyric.edit(f"`Procurando letras por {artist} - {song}...`")
songs = genius.search_song(song, artist)
if songs is None:
await lyric.edit(f"`Música` **{artist} - {song}** `não encontrada...`")
return False
if len(songs.lyrics) > 4096:
await lyric.edit("`A letra é muito grande, visualize o arquivo para vê-la.`")
with open("lyrics.txt", "w+") as f:
f.write(f"Search query: \n{artist} - {song}\n\n{songs.lyrics}")
await lyric.client.send_file(
lyric.chat_id,
"lyrics.txt",
reply_to=lyric.id,
)
os.remove("lyrics.txt")
return True
else:
await lyric.edit(
f"**Consulta de pesquisa**:\n`{artist}` - `{song}`"
f"\n\n```{songs.lyrics}```"
)
return True
CMD_HELP.update(
{
"lyrics": ".lyrics **<nome do artista> - <nome da música>**"
"\nUso: Obtenha as letras do artista e da música correspondentes."
"\n\n.lyrics now"
"\nUso: Obtenha as letras do artista e música atuais do scrobble do lastfm."
}
)
```
#### File: userbot/modules/system_stats.py
```python
import platform
import shutil
import sys
import time
from asyncio import create_subprocess_exec as asyncrunapp
from asyncio.subprocess import PIPE as asyncPIPE
from datetime import datetime
from os import remove
from platform import python_version, uname
from shutil import which
import psutil
from git import Repo
from telethon import __version__, version
from userbot import ALIVE_LOGO, ALIVE_NAME, CMD_HELP, USERBOT_VERSION, StartTime, bot
from userbot.events import register
# ================= CONSTANT =================
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
repo = Repo()
modules = CMD_HELP
# ============================================
async def get_readable_time(seconds: int) -> str:
count = 0
up_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "dias"]
while count < 4:
count += 1
if count < 3:
remainder, result = divmod(seconds, 60)
else:
remainder, result = divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
up_time += time_list.pop() + ", "
time_list.reverse()
up_time += ":".join(time_list)
return up_time
@register(outgoing=True, pattern=r"^\.spc")
async def psu(event):
uname = platform.uname()
softw = "**Informação de Sistema**\n"
softw += f"`Sistema : {uname.system}`\n"
softw += f"`Lançamento : {uname.release}`\n"
softw += f"`Versão : {uname.version}`\n"
softw += f"`Máquina : {uname.machine}`\n"
# Boot Time
boot_time_timestamp = psutil.boot_time()
bt = datetime.fromtimestamp(boot_time_timestamp)
softw += f"`Tempo de Boot: {bt.day}/{bt.month}/{bt.year} {bt.hour}:{bt.minute}:{bt.second}`\n"
# CPU Cores
cpuu = "**CPU Info**\n"
cpuu += "`Núcleos físicos : " + str(psutil.cpu_count(logical=False)) + "`\n"
cpuu += "`Núcleos totais : " + str(psutil.cpu_count(logical=True)) + "`\n"
# CPU frequencies
cpufreq = psutil.cpu_freq()
cpuu += f"`Frequência máxima : {cpufreq.max:.2f}Mhz`\n"
cpuu += f"`Frequência mínima : {cpufreq.min:.2f}Mhz`\n"
cpuu += f"`Frequência atual: {cpufreq.current:.2f}Mhz`\n\n"
# CPU usage
cpuu += "**Uso de CPU por núcleo**\n"
for i, percentage in enumerate(psutil.cpu_percent(percpu=True)):
cpuu += f"`Núcleo {i} : {percentage}%`\n"
cpuu += "\n**Uso de CPU total**\n"
cpuu += f"`Todos núcleos: {psutil.cpu_percent()}%`\n"
# RAM Usage
svmem = psutil.virtual_memory()
memm = "**Uso de memória**\n"
memm += f"`Total : {get_size(svmem.total)}`\n"
memm += f"`Disponível : {get_size(svmem.available)}`\n"
memm += f"`Usado : {get_size(svmem.used)} ({svmem.percent}%)`\n"
# Disk Usage
dtotal, dused, dfree = shutil.disk_usage(".")
disk = "**Uso de disco**\n"
disk += f"`Total : {get_size(dtotal)}`\n"
disk += f"`Livre : {get_size(dused)}`\n"
disk += f"`Usado : {get_size(dfree)}`\n"
# Bandwidth Usage
bw = "**Uso de banda**\n"
bw += f"`Upload : {get_size(psutil.net_io_counters().bytes_sent)}`\n"
bw += f"`Download: {get_size(psutil.net_io_counters().bytes_recv)}`\n"
help_string = f"{str(softw)}\n"
help_string += f"{str(cpuu)}\n"
help_string += f"{str(memm)}\n"
help_string += f"{str(disk)}\n"
help_string += f"{str(bw)}\n"
help_string += "**Informação de Engine**\n"
help_string += f"`Python {sys.version}`\n"
help_string += f"`Telethon {__version__}`"
await event.edit(help_string)
def get_size(bytes, suffix="B"):
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < factor:
return f"{bytes:.2f}{unit}{suffix}"
bytes /= factor
@register(outgoing=True, pattern=r"^\.sysd$")
async def sysdetails(sysd):
""" For .sysd command, get system info using neofetch. """
if not sysd.text[0].isalpha() and sysd.text[0] not in ("/", "#", "@", "!"):
try:
fetch = await asyncrunapp(
"neofetch",
"--stdout",
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await fetch.communicate()
result = str(stdout.decode().strip()) + str(stderr.decode().strip())
await sysd.edit("`" + result + "`")
except FileNotFoundError:
await sysd.edit("`Instale o neofetch primeiro !!`")
@register(outgoing=True, pattern="^.botver$")
async def bot_ver(event):
""" For .botver command, get the bot version. """
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
if which("git") is not None:
ver = await asyncrunapp(
"git",
"describe",
"--all",
"--long",
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await ver.communicate()
verout = str(stdout.decode().strip()) + str(stderr.decode().strip())
rev = await asyncrunapp(
"git",
"rev-list",
"--all",
"--count",
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await rev.communicate()
revout = str(stdout.decode().strip()) + str(stderr.decode().strip())
await event.edit(
"`Versão do Userbot: " f"{verout}" "` \n" "`Revisão: " f"{revout}" "`"
)
else:
await event.edit(
"Pena que você não tem git, você está executando - 'v2.5' de qualquer jeito!"
)
@register(outgoing=True, pattern="^.pip(?: |$)(.*)")
async def pipcheck(pip):
""" For .pip command, do a pip search. """
if not pip.text[0].isalpha() and pip.text[0] not in ("/", "#", "@", "!"):
pipmodule = pip.pattern_match.group(1)
if pipmodule:
await pip.edit("`Procurando . . .`")
pipc = await asyncrunapp(
"pip3",
"search",
pipmodule,
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await pipc.communicate()
pipout = str(stdout.decode().strip()) + str(stderr.decode().strip())
if pipout:
if len(pipout) > 4096:
await pip.edit("`Resultado muito grande, enviando como arquivo`")
file = open("output.txt", "w+")
file.write(pipout)
file.close()
await pip.client.send_file(
pip.chat_id,
"output.txt",
reply_to=pip.id,
)
remove("output.txt")
return
await pip.edit(
"**Consulta: **\n`"
f"pip3 search {pipmodule}"
"`\n**Resultado: **\n`"
f"{pipout}"
"`"
)
else:
await pip.edit(
"**Consulta: **\n`"
f"pip3 search {pipmodule}"
"`\n**Resultado: **\n`Nenhum resultado encontrado/falso`"
)
else:
await pip.edit("`Use .help pip para ver um exemplo`")
@register(outgoing=True, pattern=r"^.(alive|on)$")
async def amireallyalive(alive):
""" For .alive command, check if the bot is running. """
uptime = await get_readable_time((time.time() - StartTime))
output = (
"`ta tudo pegando!...`\n"
"`⊷⊷⊷⊷⊷⊷⊷⊷⊷⊷⊶⊷⊶⊶⊶⊶⊶⊶⊶`\n"
f"• 🚩 `Telethon : v{version.__version__} `\n"
f"• 🐍 `Python : v{python_version()} `\n"
f"• 🥱 `JoanLindo :` {DEFAULTUSER} \n"
"`-----------------------------`\n"
f"• 🛌 ` : {repo.active_branch.name} `\n"
f"• 🗃 `Módulos : {len(modules)} `\n"
f"• 😴 `BaianoBot : v{USERBOT_VERSION} `\n"
f"• 🕒 `Bot Uptime : {uptime} `\n"
"`⊷⊷⊷⊷⊷⊷⊷⊷⊷⊷⊶⊷⊶⊶⊶⊶⊶⊶⊶`"
)
if ALIVE_LOGO:
try:
logo = ALIVE_LOGO
await bot.send_file(alive.chat_id, logo, caption=output)
await alive.delete()
except BaseException:
await alive.edit(
output + "\n\n *`O logotipo fornecido é inválido."
"\nCertifique-se de que o link seja direcionado para a imagem do logotipo`"
)
else:
await alive.edit(output)
@register(outgoing=True, pattern="^.aliveu")
async def amireallyaliveuser(username):
""" For .aliveu command, change the username in the .alive command. """
message = username.text
output = ".aliveu [novo usuário sem colchetes] nem pode estar vazio"
if not (message == ".aliveu" or message[7:8] != " "):
newuser = message[8:]
global DEFAULTUSER
DEFAULTUSER = newuser
output = "Usuário alterado com sucesso para " + newuser + "!"
await username.edit("`" f"{output}" "`")
@register(outgoing=True, pattern="^.resetalive$")
async def amireallyalivereset(ureset):
""" For .resetalive command, reset the username in the .alive command. """
global DEFAULTUSER
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
await ureset.edit("`" "Usuário redefinido com sucesso para .alive/on!" "`")
CMD_HELP.update(
{
"sysd": ".sysd\
\nUso: Mostra informações do sistema usando neofetch.\
\n\n.spc\
\nUso: Mostrar especificação do sistema."
}
)
CMD_HELP.update(
{
"botver": ".botver\
\nUso: Mostra a versão do userbot."
}
)
CMD_HELP.update(
{
"pip": ".pip <módulo(s)>\
\nUso: Faz uma pesquisa de módulos pip."
}
)
CMD_HELP.update(
{
"alive": ".alive | .on\
\nUso: Digite .alive/.on para ver se seu bot está funcionando ou não.\
\n\n.aliveu <texto>\
\nUso: Muda o 'usuário' do .alive/.on para o texto que você deseja.\
\n\n.resetalive\
\nUso: Redefine o usuário para o padrãoUso."
}
)
``` |
{
"source": "JoanLindo/sexosemcompromisso",
"score": 3
} |
#### File: userbot/modules/covid.py
```python
from covid import Covid
from userbot import CMD_HELP
from userbot.events import register
@register(outgoing=True, pattern="^.covid (.*)")
async def corona(event):
await event.edit("`Processando...`")
country = event.pattern_match.group(1)
covid = Covid(source="worldometers")
try:
country_data = covid.get_status_by_country_name(country)
output_text = (
f"`Confirmado : {format_integer(country_data['confirmed'])}`\n"
+ f"`Ativo : {format_integer(country_data['active'])}`\n"
+ f"`Mortes : {format_integer(country_data['deaths'])}`\n"
+ f"`Recuperados : {format_integer(country_data['recovered'])}`\n\n"
+ f"`Novos Casos : {format_integer(country_data['new_cases'])}`\n"
+ f"`Novas Mortes : {format_integer(country_data['new_deaths'])}`\n"
+ f"`Crítico : {format_integer(country_data['critical'])}`\n"
+ f"`Total de testes : {format_integer(country_data['total_tests'])}`\n\n"
+ f"Dados fornecidos por [Worldometer](https://www.worldometers.info/coronavirus/country/{country})"
)
await event.edit(f"Informações do covid-19 {country}:\n\n{output_text}")
except ValueError:
await event.edit(
f"Nenhuma informação encontrada para: {country}!\nVerifique a ortografia e tente novamente."
)
def format_integer(number, thousand_separator="."):
def reverse(string):
string = "".join(reversed(string))
return string
s = reverse(str(number))
count = 0
result = ""
for char in s:
count = count + 1
if count % 3 == 0:
if len(s) == count:
result = char + result
else:
result = thousand_separator + char + result
else:
result = char + result
return result
CMD_HELP.update(
{
"covid": ".covid <país>"
"\nUso: Obtenha informações sobre os dados do covid-19 em seu país.\n"
}
)
```
#### File: userbot/modules/fake_load.py
```python
from asyncio import sleep
from userbot import CMD_HELP
from userbot.events import register
@register(outgoing=True, pattern="^.fl(?: |$)(.*)")
async def f_load(event):
event.pattern_match.group(1)
await event.edit("`Carregamento iniciado...`")
await sleep(1)
await event.edit("0%")
number = 1
await event.edit(str(number) + "% ▎")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ▍")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ▌")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ▊")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ▉")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% █")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% █▎")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% █▍")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% █▌")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% █▊")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% █▉")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ██")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ██▎")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ██▍")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ██▌")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ██▊")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ██▉")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ███")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ███▎")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ███▍")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ███▌")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ███▊")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ███▉")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ████")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ████▎")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ████▍")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ████▌")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ████▊")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ████▉")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% █████")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% █████▎")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% █████▍")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% █████▌")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% █████▊")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% █████▉")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ██████")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ██████▎")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ██████▍")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ██████▌")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ██████▊")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ██████▉")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ███████")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ███████▎")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ███████▍")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ███████▌")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ███████▊")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ███████▉")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ████████")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ████████▎")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ████████▍")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ████████▌")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ████████▊")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ████████▉")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% █████████")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% █████████▎")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% █████████▍")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% █████████▌")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% █████████▊")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% █████████▉")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ██████████")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ██████████▎")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ██████████▍")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ██████████▌")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ██████████▊")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ██████████▉")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ███████████")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ███████████▎")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ███████████▍")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ███████████▌")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ███████████▊")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ███████████▉")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ████████████")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ████████████▎")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ████████████▍")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ████████████▌")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ████████████▊")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ████████████▉")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% █████████████")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% █████████████▎")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% █████████████▍")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% █████████████▌")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% █████████████▊")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% █████████████▉")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ██████████████")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ██████████████▎")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ██████████████▍")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ██████████████▌")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ██████████████▊")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ██████████████▉")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ███████████████")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ███████████████▎")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ███████████████▍")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ███████████████▌")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ███████████████▊")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ███████████████▉")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ████████████████")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ████████████████▎")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ████████████████▍")
number = number + 1
await sleep(0.03)
await event.edit(str(number) + "% ████████████████▌")
number += 1
await sleep(0.03)
await event.edit(str(number) + "% ████████████████▌\n`Carregamento completado!`")
CMD_HELP.update(
{
"fake_load": ".fl\
\nInicia um carregamento falso."
}
)
``` |
{
"source": "joanlopez/tuenti-contest-5",
"score": 4
} |
#### File: tuenti-contest-5/tests/test_challange2.py
```python
from unittest import TestCase
from src.challange2.prime import Primes
class TestChallange2(TestCase):
def test_that_prime_factors_of_315_are_3_3_5_and_7(self):
self.assertEquals(Primes.get_primes_of(315), [3, 3, 5, 7])
def test_that_6_is_an_almost_prime(self):
self.assertTrue(Primes.is_almost_prime(6))
def test_that_25_is_an_almost_prime(self):
self.assertTrue(Primes.is_almost_prime(25))
def test_that_17_is_not_an_almost_prime(self):
self.assertFalse(Primes.is_almost_prime(17))
def test_that_81_is_not_an_almost_prime(self):
self.assertFalse(Primes.is_almost_prime(81))
def test_that_there_are_4_almost_primes_between_1_and_10(self):
self.assertEquals(4, len(Primes.almost_primes_between(1, 10)))
def test_that_there_are_3_almost_primes_between_10_and_20(self):
self.assertEquals(3, len(Primes.almost_primes_between(10, 20)))
``` |
{
"source": "joanlopez/tuenti-contest-6",
"score": 4
} |
#### File: src/challenge1/main.py
```python
from tablerow import *
def print_output_line(case, result):
print("Case #" + str(case) + ": " + str(result))
def count_needed_tables(num_of_diners):
table_row = TableRow()
while table_row.max_diners < num_of_diners:
table_row.add_table()
return table_row.num_tables
def main():
num_of_cases = int(raw_input())
for i in range(num_of_cases):
num_of_diners = int(raw_input())
needed_tables = count_needed_tables(num_of_diners)
print_output_line(i+1, needed_tables)
if __name__ == '__main__':
main()
``` |
{
"source": "joanlyq/groundtruth-object-detection",
"score": 3
} |
#### File: joanlyq/groundtruth-object-detection/parse_annots.py
```python
from io import StringIO
import json
import s3fs
import boto3
import pandas as pd
def parse_gt_output(manifest_path, job_name):
"""
Captures the json GroundTruth bounding box annotations into a pandas dataframe
Input:
manifest_path: S3 path to the annotation file
job_name: name of the GroundTruth job
Returns:
df_bbox: pandas dataframe with bounding box coordinates
for each item in every image
"""
filesys = s3fs.S3FileSystem()
with filesys.open(manifest_path) as fin:
annot_list = []
for line in fin.readlines():
record = json.loads(line)
if job_name in record.keys(): # is it necessary?
image_file_path = record["source-ref"]
image_file_name = image_file_path.split("/")[-1]
class_maps = record[f"{job_name}-metadata"]["class-map"]
imsize_list = record[job_name]["image_size"]
assert len(imsize_list) == 1
image_width = imsize_list[0]["width"]
image_height = imsize_list[0]["height"]
for annot in record[job_name]["annotations"]:
left = annot["left"]
top = annot["top"]
height = annot["height"]
width = annot["width"]
class_name = class_maps[f'{annot["class_id"]}']
annot_list.append(
[
image_file_name,
class_name,
left,
top,
height,
width,
image_width,
image_height,
]
)
df_bbox = pd.DataFrame(
annot_list,
columns=[
"img_file",
"category",
"box_left",
"box_top",
"box_height",
"box_width",
"img_width",
"img_height",
],
)
return df_bbox
def save_df_to_s3(df_local, s3_bucket, destination):
"""
Saves a pandas dataframe to S3
Input:
df_local: Dataframe to save
s3_bucket: Bucket name
destination: Prefix
"""
csv_buffer = StringIO()
s3_resource = boto3.resource("s3")
df_local.to_csv(csv_buffer, index=False)
s3_resource.Object(s3_bucket, destination).put(Body=csv_buffer.getvalue())
def main():
"""
Performs the following tasks:
1. Reads input from 'input.json'
2. Parses the GroundTruth annotations and creates a dataframe
3. Saves the dataframe to S3
"""
with open("input.json") as fjson:
input_dict = json.load(fjson)
s3_bucket = input_dict["s3_bucket"]
job_id = input_dict["job_id"]
gt_job_name = input_dict["ground_truth_job_name"]
mani_path = f"s3://{s3_bucket}/{job_id}/ground_truth_annots/{gt_job_name}/manifests/output/output.manifest"
df_annot = parse_gt_output(mani_path, gt_job_name)
dest = f"{job_id}/ground_truth_annots/{gt_job_name}/annot.csv"
save_df_to_s3(df_annot, s3_bucket, dest)
if __name__ == "__main__":
main()
``` |
{
"source": "joanmarticarreras/BKTyper",
"score": 2
} |
#### File: joanmarticarreras/BKTyper/BKTyper.py
```python
from Bio import SeqIO
from Bio import AlignIO
from Bio.Emboss.Applications import NeedleCommandline
from Bio import Phylo
from Bio.Phylo import BaseTree
from Bio.Phylo.TreeConstruction import DistanceCalculator
from Bio.Phylo.TreeConstruction import DistanceTreeConstructor
from io import StringIO
import Bio
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast import NCBIXML
from Bio.Graphics import GenomeDiagram
from Bio.SeqFeature import FeatureLocation, SeqFeature
from reportlab.lib import colors
from Bio.Align.Applications import MafftCommandline
import os
import sys
import re
import regex
import numpy as np
import pandas as pn
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
import pylab
import subprocess
from Bio._py3k import basestring
from Bio.Application import _Option, _Switch, AbstractCommandline
from Bio.Phylo.Applications import PhymlCommandline
from Bio.SeqRecord import SeqRecord
from PyPDF2 import PdfFileMerger
import shutil
# Set globals
pn.set_option('mode.chained_assignment', None)
###################
#######USAGE#######
###################
# $ python3 BKTyper.py <input-sequences> <mode: VP1, NCCR, complete>
###################
# Functions
def NCCR_classification(blast_out):
# NCCR Block classification with BLAST
array_block = blast_out.split("\n")
array_block = (pn.DataFrame(array_block[:-1]))
dataframe = array_block[0].str.split("\t",expand=True)
dataframe.columns = ['qseqid','sseqid','pident','length','mismatch','gapopen','qstart','qend','sstart','send','evalue','score']
dataframe.qstart = dataframe.qstart.astype(int)
dataframe.qend = dataframe.qend.astype(int)
dataframe_sorted = dataframe.sort_values('qstart')
dataframe_sorted_nr = dataframe_sorted.drop_duplicates(subset=["qstart","qend"], keep='last')
dataframe_sorted_nr.qstart = dataframe_sorted_nr.qstart.astype(str)
dataframe_sorted_nr.qend = dataframe_sorted_nr.qend.astype(str)
dataframe_sorted_nr['NCCR_positions'] = dataframe_sorted_nr.qstart + '-' + dataframe_sorted_nr.sseqid + '-' + dataframe_sorted_nr.qend
NCCR = ''.join(dataframe_sorted_nr['sseqid'].tolist())
NCCR_complex = '|'.join(dataframe_sorted_nr['NCCR_positions'].tolist())
return (NCCR,NCCR_complex)
def motif_finder(sequence,motif_file,NCCR):
motifs = []
motif_file = motif_file.read().splitlines()
NCCR_length = int(NCCR[-3:])
for i in motif_file:
(id,nucleotides,length) = i.split("\t")
motif = (re.finditer(nucleotides,sequence[1:NCCR_length]))
for m in motif:
start = m.start()+1
end = m.end()+1
motif = (id,start,end)
motifs.append(motif)
blocks = NCCR.split("|")
for i in blocks:
(Start,Block,End) = tuple(i.split("-"))
block = (Block,int(Start),int(End))
motifs.append(block)
df = pn.DataFrame(motifs,columns = ["Motif","Start","End"])
df_ordered = df.sort_values(by=["Start"])
return(df_ordered)
def write_visuals(seq_name,df,seq_length,results_pdf):
gd_diagram = GenomeDiagram.Diagram(seq_name, track_size=1)
new_row = pn.DataFrame({"Motif":seq_name,"Start":1,"End":1},index=[0])
df = pn.concat([new_row, df]).reset_index(drop = True)
NCCR = GenomeDiagram.FeatureSet()
for index, row in df.iterrows():
(motif_name,start_motif,end_motif) = (row["Motif"],row["Start"],row["End"])
cols = [motif_name,start_motif,end_motif]
if index is 0:
block = SeqFeature(FeatureLocation(int(cols[1]), int(cols[2]), strand=-1), type="blocks", id=motif_name)
NCCR.add_feature(block,color=colors.HexColor("#8DD35F"), name=motif_name, label=True, label_size=8, label_position="middle", label_angle=180)
else:
if motif_name.islower():
motif = SeqFeature(FeatureLocation(int(cols[1]), int(cols[2]), strand=+1), type="motifs",id=motif_name)
NCCR.add_feature(motif, color=colors.HexColor("#8DD35F"), name=motif_name, label=True, label_size=10, label_position="left", label_angle=90)
else:
block = SeqFeature(FeatureLocation(int(cols[1]), int(cols[2]), strand=-1), type="blocks", id=motif_name)
if motif_name == "O":
NCCR.add_feature(block,color=colors.HexColor("#ffc69e"), name=motif_name, label=True, label_size=10, label_position="center", label_angle=180)
elif motif_name == "P":
NCCR.add_feature(block,color=colors.HexColor("#fff6d4"), name=motif_name, label=True, label_size=10, label_position="middle", label_angle=180)
elif motif_name == "Q":
NCCR.add_feature(block,color=colors.HexColor("#f6f9eb"), name=motif_name, label=True, label_size=10, label_position="middle", label_angle=180)
elif motif_name == "R":
NCCR.add_feature(block,color=colors.HexColor("#ebf9f6"), name=motif_name, label=True, label_size=10, label_position="middle", label_angle=180)
elif motif_name == "S":
NCCR.add_feature(block,color=colors.HexColor("#f9ebf6"), name=motif_name, label=True, label_size=10, label_position="middle", label_angle=180)
else:
NCCR.add_feature(block,color=colors.HexColor("#C8C4B7"), name=motif_name, label=True, label_size=20, label_position="right", label_angle=180)
NCCR_track = GenomeDiagram.Track(name="Annotated Features", height=0.3)
NCCR_track.add_set(NCCR)
gd_diagram.add_track(NCCR_track,3)
seq_length = int(cols[2])
rows = max(2, int(round(seq_length / 100)))
gd_diagram.draw(format='linear', tracklines=0, pagesize='A4', orientation = 'landscape', fragments=4, start=1, end=int(seq_length))
pdf_filepath = os.path.join('results','{}.pdf'.format(seq_name))
gd_diagram.write(pdf_filepath, 'PDF', dpi=300)
results_pdf.append(pdf_filepath)
def VP1_classification(aln):
# Decision tree for Polyoma BK subtyping based on VP1 polymorphisms (Morel et al Journal of Clinical Microbiology 2017, https://doi.org/10.11128/JCM.01180-16)
subgroup = "NA"
coordinates = {426:'aaaacctat',513:'aaagtac',456:'ctttgctg',465:'aggtggagaa',444:'taatttccacttctttg',495:'gctaatgaattacag',433:'attcaaggcagtaattt',414:'gcatggtggaggaaa'}
for position in coordinates:
for i in re.finditer(coordinates[position],str(aln[1,:].seq)):
coordinates[position] = i.start()
if aln[:,coordinates[426]][0].upper() == "G".upper():
subgroup = "Ic"
elif aln[:,coordinates[513]][0].upper() == "C".upper():
subgroup = "Ib-1"
elif aln[:,coordinates[426]][0].upper() == "A".upper():
subgroup = "Ia"
elif aln[:,coordinates[456]][0].upper() == "T".upper() and aln[:,coordinates[465]][0].upper() == "T".upper():
subgroup = "Ib-2"
elif aln[:,coordinates[426]][0].upper() == "T".upper():
subgroup = "III"
elif aln[:,coordinates[444]][0].upper() == "T".upper():
subgroup = "II"
elif aln[:,coordinates[495]][0].upper() == "G".upper():
subgroup = "IVa-1"
elif aln[:,coordinates[495]][0].upper() == "C".upper():
subgroup = "IVa-2"
elif aln[:,coordinates[433]][0].upper() == "G".upper():
subgroup = "IVc-1"
elif aln[:,coordinates[414]][0].upper() == "A".upper():
subgroup = "IVc-2"
else:
subgroup = "IVb-1,2"
subgroup_detail = aln[:,coordinates[426]][0].upper()+aln[:,coordinates[513]][0].upper()+aln[:,coordinates[456]][0].upper()+aln[:,coordinates[465]][0].upper()+aln[:,coordinates[444]][0].upper()+aln[:,coordinates[495]][0].upper()+aln[:,coordinates[433]][0].upper()+aln[:,coordinates[414]][0].upper()
return(subgroup,subgroup_detail)
def VP1_tree(seq_name,seq,db):
sequences = ""
sequences = sequences.join([db,"\n",">",seq_name,"\n",seq,"\n"])
sequences_file = open("query_mafft_vp1.fasta", "w")
sequences_file.write(sequences)
sequences_file.close()
mafft_cline = MafftCommandline(input='query_mafft_vp1.fasta')
(mafft_stdout,mafft_stderr) = mafft_cline()
with open("aligned.fasta", "w") as handle:
handle.write(mafft_stdout)
mafft_align = AlignIO.read(StringIO(mafft_stdout), "fasta")
VP1_alignment = trim_alignment(mafft_align)
BKTGR_alignment = VP1_alignment[:,425:512]
AlignIO.write(BKTGR_alignment, "BKTGR_alignment.aln", "clustal")
os.system("~/bin/iqtree-1.6.12-Linux/bin/iqtree -s BKTGR_alignment.aln -bb 10000 -redo -m TEST -pre model_testing")
tree = Phylo.read("iqtree_GTR_fast.nwk", "newick")
for node in tree.get_nonterminals():
node.name = None
tree.ladderize()
tree.root_at_midpoint()
matplotlib.rc('font', size=5)
fig = pylab.figure(figsize=(10, 30))
axes = fig.add_subplot(1, 1, 1)
morel_vp1=[]
with open("source/VP1_BKTyper_MLtree_list.txt") as vp1_typing:
next(vp1_typing)
for i in vp1_typing:
(q,vp1) = tuple(i.split("\t"))
line = ("num",q,"s","nccr","dnccr",vp1,"dvp1")
morel_vp1.append(line)
morel_vp1.append(("num",seq_name,"s","nccr","dnccr","Query","dvp1"))
df = pn.DataFrame(morel_vp1,columns=["Num","Query","Strand","NCCR","Detail_NCCR","VP1_Subtype","VP1_Subtype_Detailed"])
df = df.replace('\n','', regex=True)
label_color = {}
color_legend = {"Query":"#FF0000","Ia":"#FF8080","Ib-1":"#FF9955","Ib-2":"#AC9D93","Ic":"#FFE680","II":"#37C871","III":"#8DD35F","IVa-1":"#93AC9D","IVa-2":"#80E5FF","IVb-1":"#8787DE","IVb-2":"#8787DE","IVc-1":"#E580FF","IVc-2":"#DE87AA"}
for index, row in df.iterrows():
(name,type) = row["Query"],row["VP1_Subtype"]
color = color_legend[type]
label_color[name] = color
pyplot.rcParams.update({'font.size': 18})
Phylo.draw(tree, do_show=False,axes=axes,label_colors=label_color,show_confidence=False)
pylab.title(seq_name,loc="center")
pylab.axis("off")
pylab.savefig("iqtree_GTR_fast.svg",format='svg', papertype = "a5", transparent = True, dpi=300)
def trim_alignment(alignment):
start_VP1 = 0
end_VP1 = 1
for col in range(alignment.get_alignment_length()):
if not "-" in alignment[:, col]:
start_VP1 = col
break
else:
pass
for col in reversed(range(alignment.get_alignment_length())):
if not "-" in alignment[:, col]:
end_VP1 = col
break
else:
pass
trim_alignment = alignment[:, start_VP1:end_VP1]
AlignIO.write(trim_alignment, "test.aln", "clustal")
return(trim_alignment)
# Create outputs
end_table = pn.DataFrame(columns=['Query','Strand','NCCR','Detail_NCCR','VP1_Subtype','VP1_Subtype_Detailed'])
convert_fasta = []
results_pdf = []
os.mkdir("results")
# Open (multi)fasta
for sequence in SeqIO.parse(sys.argv[1],"fasta"):
sequence.seq = sequence.seq.upper()
SeqIO.write(sequence,"target_sequence","fasta")
# Detect and correct strandess of the sequence with BLAST
strand = NcbiblastnCommandline(query="target_sequence",subject="source/dunlop.fasta", outfmt=5, max_hsps=1)()[0]
blast_result_record = NCBIXML.read(StringIO(strand))
sign = ""
for description in blast_result_record.descriptions:
if (description.e < 0.000001):
pass
else:
print(sequence.id+" Input sequence is not similar enough to polyoma BK")
continue
for alignment in blast_result_record.alignments:
for hsp in alignment.hsps:
if ( (hsp.sbjct_end - hsp.sbjct_start) > 0 ):
sign = "+"
else:
sequence = sequence.reverse_complement()
sign = "-"
SeqIO.write(sequence,"target_sequence","fasta")
# Alignments
if sys.argv[2] == 'complete':
if re.search('TTTTGC(.AAAA|A.AA|AA.A|AAA.)',str(sequence.seq)) is not None:
if re.search('TTTTGC(.AAAA|A.AA|AA.A|AAA.)',str(sequence.seq)).start() > 0:
seq_pieces = re.split('(TTTTTGC[.AAAA|A.AA|AA.A|AAA.])',str(sequence.seq))
seq_pieces_fix = seq_pieces[1]
sequence.seq = Bio.Seq.Seq(seq_pieces_fix[1:] + seq_pieces[2] + seq_pieces[0] + "T")
SeqIO.write(sequence,"target_sequence","fasta") # Output the input sequence restructured as Dunlop reference
else:
print(sequence.id+" lacks the origin of replication")
continue
# NCCR BLAST
block = NcbiblastnCommandline(query="target_sequence", subject="source/NCCR_BKTyper.fasta", outfmt=6, word_size=12, perc_identity=75, evalue=0.05)()[0] ###
# VP1 Needleman and Wunch
a = NeedleCommandline(asequence="target_sequence", \
bsequence="source/VP1_Dunlop.fasta", \
gapopen=10, \
gapextend=0.5, \
outfile="needle_fname")
a() # execute the alignment
# Export the alignment back to Python
VP1_alignment = AlignIO.read("needle_fname", "emboss")
# Call functions based on mode
NCCR=NCCR_complex=subgroup=subgroup_detail = 'NA' # definition of table objects
motif_list=(open("source/motif_list.txt","r"))
vp1_db_file=(open("source/VP1_BKTyper_MLtree_list.fasta","r"))
vp1_db = vp1_db_file.read()
if sys.argv[2] == 'VP1':
(subgroup,subgroup_detail) = VP1_classification(VP1_alignment)
if sys.argv[3] == 'Tree':
VP1_tree(str(sequence.id),str(sequence.seq),vp1_db)
else:
pass
elif sys.argv[2] == 'NCCR':
(NCCR,NCCR_complex) = NCCR_classification(block)
regions = motif_finder(str(sequence.seq),motif_list,NCCR_complex)
length=len(sequence.seq)
write_visuals(sequence.id,regions,length,results_pdf)
elif sys.argv[2] == 'complete':
block = NcbiblastnCommandline(query="target_sequence", subject="source/NCCR_BKTyper.fasta", outfmt=6, word_size=12, perc_identity=75, evalue=0.05)()[0] ### # if complete genome is provided, NCCR numbering should be based on the re-ordered sequence
(subgroup,subgroup_detail) = VP1_classification(VP1_alignment)
if sys.argv[3] == 'Tree':
VP1_tree(str(sequence.id),str(sequence.seq),vp1_db)
else:
pass
(NCCR,NCCR_complex) = NCCR_classification(block)
regions = motif_finder(str(sequence.seq),motif_list,NCCR_complex)
length=len(sequence.seq)
write_visuals(sequence.id,regions,length,results_pdf)
else:
sys.exit("Select analysis mode: NCCR, VP1 or complete" + "\n")
# Append results to the results table and (multi)fasta file
end_table = end_table.append({'Query':sequence.id,'Strand':sign,'NCCR':NCCR,'Detail_NCCR':NCCR_complex,'VP1_Subtype':subgroup,'VP1_Subtype_Detailed':subgroup_detail},ignore_index=True)
convert_fasta.append(sequence)
end_table.to_csv(r'results.csv',sep="\t")
SeqIO.write(convert_fasta,"results.fasta","fasta")
merger = PdfFileMerger()
for pdf in results_pdf:
merger.append(pdf)
merger.write("results.pdf")
merger.close()
# Delete intermediate files
os.remove("target_sequence")
os.remove("needle_fname")
shutil.rmtree("results/")
``` |
{
"source": "joanmasco/aerostructures",
"score": 3
} |
#### File: aerostructures/aerodynamics/aerodynamics_problem_params.py
```python
from __future__ import print_function
import numpy as np
from aerostructures.number_formatting.is_number import isfloat
class AeroProblemParams:
#Jig shape geometry
jig_shape = 'aero_template.wgs'
def __init__(self):
#Dictionary containing the structural parameters
self.aero_params = self.get_aero_params()
#Aerodynamic mesh points coordinates
self.apoints_coord = self.aero_params['apoints_coord']
#Unique aerodynamic mesh points coordinates
self.apoints_coord_unique = self.aero_params['apoints_coord_unique']
#Function that returns the aerodynamic points coordinates
def get_aero_params(self):
apoints_coord = []
#Write the aerodynamic grid points coordinates into a list (excluding the root section)
with open(self.jig_shape) as f:
lines = f.readlines()
lines = [i.split() for i in lines]
for line in lines:
if all(isfloat(item) for item in line):
if len(line) == 3:
apoints_coord.append([float(line[0]), float(line[1]), float(line[2])])
if len(line) == 6:
apoints_coord.append([float(line[0]), float(line[1]), float(line[2])])
apoints_coord.append([float(line[3]), float(line[4]), float(line[5])])
apoints_coord = np.asarray(apoints_coord)
apoints_coord_unique, ind = np.unique(apoints_coord, axis=0, return_index=True)
apoints_coord_unique = apoints_coord_unique[np.argsort(ind)]
aero_params = {}
aero_params['apoints_coord'] = apoints_coord
aero_params['apoints_coord_unique'] = apoints_coord_unique
return aero_params
```
#### File: aerostructures/aerodynamics/aerodynamics_ref_coord.py
```python
from __future__ import print_function
import numpy as np
from aerostructures.number_formatting.is_number import isfloat
class AeroRefCoord:
inflight_ref_shape_c = 'aero_inflight_ref_c.wgs'
inflight_ref_shape_2 = 'aero_inflight_ref_2.wgs'
def __init__(self):
#Dictionary containing the aerodynamic reference coordinates
self.aero_refs = self.get_aero_refs()
#Coordinates of the in-flight shape of the aeroodynamic surface (reference aircraft)
self.xa_ref_c = self.aero_refs['xa_ref_c']
#Coordinates of the in-flight shape of the aeroodynamic surface (reference aircraft)
self.xa_ref_2 = self.aero_refs['xa_ref_2']
#Function that returns the aerodynamic points coordinates
def get_aero_refs(self):
xa_ref_c = []
xa_ref_2 = []
#Write the aerodynamic grid points coordinates into a list
with open(self.inflight_ref_shape_c) as f:
lines = f.readlines()
lines = [i.split() for i in lines]
for line in lines:
if all(isfloat(item) for item in line):
if len(line) == 3:
xa_ref_c.append([float(line[0]), float(line[1]), float(line[2])])
if len(line) == 6:
xa_ref_c.append([float(line[0]), float(line[1]), float(line[2])])
xa_ref_c.append([float(line[3]), float(line[4]), float(line[5])])
#Write the aerodynamic grid points coordinates into a list (excluding the root section)
with open(self.inflight_ref_shape_2) as f:
lines = f.readlines()
lines = [i.split() for i in lines]
for line in lines:
if all(isfloat(item) for item in line):
if len(line) == 3:
xa_ref_2.append([float(line[0]), float(line[1]), float(line[2])])
if len(line) == 6:
xa_ref_2.append([float(line[0]), float(line[1]), float(line[2])])
xa_ref_2.append([float(line[3]), float(line[4]), float(line[5])])
xa_ref_c = np.asarray(xa_ref_c)
xa_ref_2 = np.asarray(xa_ref_2)
aero_refs = {}
aero_refs['xa_ref_c'] = xa_ref_c
aero_refs['xa_ref_2'] = xa_ref_2
return aero_refs
``` |
{
"source": "JoanMouba/software-development",
"score": 4
} |
#### File: software-development/clean-code/clean_code_functions.py
```python
import csv
from pathlib import Path
from typing import Dict
from pprint import pprint
def load_data(csv_file_path: str) -> Dict:
""" Returns data from a csv to a dictionary
Args:
csv_file_path (str): path to the file containing the data
Returns:
dict
"""
path = Path(csv_file_path)
with path.open(mode="r") as fd:
file_read = csv.reader(fd)
next(file_read) # skip the header
data = {
row[0].strip(): row[1].strip()
for row in file_read
} # dict comprehension
return data
if __name__ == '__main__':
extracted_data = load_data("./data.csv")
pprint(extracted_data)
``` |
{
"source": "Joanna0123/mmsegmentation",
"score": 2
} |
#### File: models/backbones/pvt_sr_block.py
```python
import math
import torch
import torch.nn as nn
from torch import einsum
from einops import rearrange
import torch.nn.functional as F
from functools import partial
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
from timm.models.vision_transformer import _cfg
from detectron2.layers import DeformUnfold
class Mlp_sr(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention_sr(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, H, W):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SpatialReduction_Conv33(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, dim=256, depth=0):
super().__init__()
self.depth = depth
if depth == 0:
self.conv = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=1, groups=1, bias=False, dilation=1)
self.norm = nn.BatchNorm2d(dim)
else:
self.convs = nn.ModuleList([
nn.Conv2d(dim, dim, kernel_size=3, stride=2, padding=1, groups=1, bias=False, dilation=1)
for i in range(depth)])
self.norms = nn.ModuleList([
nn.BatchNorm2d(dim)
for i in range(depth)])
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
if self.depth == 0:
x = self.relu(self.norm(self.conv(x)))
else:
for i in range(self.depth):
x = self.relu(self.norms[i](self.convs[i](x)))
return x
class Block_sr(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):
super().__init__()
self.sr_ratio = sr_ratio
self.sr = SpatialReduction_Conv33(dim, int(math.log(sr_ratio, 2)))
self.norm1 = norm_layer(dim)
self.attn = Attention_sr(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp_sr(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, H, W):
B, N, C = x.shape
x = self.sr(x.reshape(B, H, W, C).permute(0, 3, 1, 2)).reshape(B, C, -1).permute(0, 2, 1)
H_sr, W_sr = H // self.sr_ratio, W // self.sr_ratio
x = x + self.drop_path(self.attn(self.norm1(x), H_sr, W_sr))
x = x + self.drop_path(self.mlp(self.norm2(x)))
x = F.interpolate(
x.reshape(B, H_sr, W_sr, C).permute(0, 3, 1, 2),
size=(H, W), mode="bilinear", align_corners=True).reshape(B, C, H * W).permute(0, 2, 1)
return x
``` |
{
"source": "Joanna0123/QueryInst",
"score": 2
} |
#### File: models/dense_heads/embedding_rpn_head.py
```python
import torch
import torch.nn as nn
from mmcv.runner import BaseModule
from mmdet.models.builder import HEADS
from ...core import bbox_cxcywh_to_xyxy
@HEADS.register_module()
class EmbeddingRPNHead(BaseModule):
"""RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/2011.12450>`_ .
Unlike traditional RPNHead, this module does not need FPN input, but just
decode `init_proposal_bboxes` and expand the first dimension of
`init_proposal_bboxes` and `init_proposal_features` to the batch_size.
Args:
num_proposals (int): Number of init_proposals. Default 100.
proposal_feature_channel (int): Channel number of
init_proposal_feature. Defaults to 256.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
num_proposals=100,
proposal_feature_channel=256,
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(EmbeddingRPNHead, self).__init__(init_cfg)
self.num_proposals = num_proposals
self.proposal_feature_channel = proposal_feature_channel
self._init_layers()
def _init_layers(self):
"""Initialize a sparse set of proposal boxes and proposal features."""
self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4)
self.init_proposal_features = nn.Embedding(
self.num_proposals, self.proposal_feature_channel)
def init_weights(self):
"""Initialize the init_proposal_bboxes as normalized.
[c_x, c_y, w, h], and we initialize it to the size of the entire
image.
"""
super(EmbeddingRPNHead, self).init_weights()
nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5)
nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1)
def _decode_init_proposals(self, imgs, img_metas):
"""Decode init_proposal_bboxes according to the size of images and
expand dimension of init_proposal_features to batch_size.
Args:
imgs (list[Tensor]): List of FPN features.
img_metas (list[dict]): List of meta-information of
images. Need the img_shape to decode the init_proposals.
Returns:
Tuple(Tensor):
- proposals (Tensor): Decoded proposal bboxes,
has shape (batch_size, num_proposals, 4).
- init_proposal_features (Tensor): Expanded proposal
features, has shape
(batch_size, num_proposals, proposal_feature_channel).
- imgs_whwh (Tensor): Tensor with shape
(batch_size, 4), the dimension means
[img_width, img_height, img_width, img_height].
"""
proposals = self.init_proposal_bboxes.weight.clone()
proposals = bbox_cxcywh_to_xyxy(proposals)
num_imgs = len(imgs[0])
imgs_whwh = []
for meta in img_metas:
h, w, _ = meta['img_shape']
imgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]]))
imgs_whwh = torch.cat(imgs_whwh, dim=0)
imgs_whwh = imgs_whwh[:, None, :]
# imgs_whwh has shape (batch_size, 1, 4)
# The shape of proposals change from (num_proposals, 4)
# to (batch_size ,num_proposals, 4)
proposals = proposals * imgs_whwh
init_proposal_features = self.init_proposal_features.weight.clone()
init_proposal_features = init_proposal_features[None].expand(
num_imgs, *init_proposal_features.size())
return proposals, init_proposal_features, imgs_whwh
def forward_dummy(self, img, img_metas):
"""Dummy forward function.
Used in flops calculation.
"""
return self._decode_init_proposals(img, img_metas)
def forward_train(self, img, img_metas):
"""Forward function in training stage."""
return self._decode_init_proposals(img, img_metas)
def simple_test_rpn(self, img, img_metas):
"""Forward function in testing stage."""
return self._decode_init_proposals(img, img_metas)
def aug_test_rpn(self, imgs, img_metas):
aug_proposal_boxes = []
aug_proposal_features = []
aug_imgs_whwh = []
for img, img_meta in zip(imgs, img_metas):
proposal_boxes, proposal_features, imgs_whwh = self.simple_test_rpn(img, img_meta)
aug_proposal_boxes.append(proposal_boxes)
aug_proposal_features.append(proposal_features)
aug_imgs_whwh.append(imgs_whwh)
return aug_proposal_boxes, aug_proposal_features, aug_imgs_whwh
``` |
{
"source": "JoannaBroniarek/self-driving-AttGAN",
"score": 3
} |
#### File: self-driving-AttGAN/code/model.py
```python
import tensorflow as tf
def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.002, name="conv2d", padding = 'SAME'):
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer= tf.contrib.layers.xavier_initializer())
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding=padding)
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.02))
return tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
def deconv2d(input_, output_shape, k_h=5, k_w=5, d_h=2, d_w=2, name="deconv2d", stddev=0.002, with_w=False):
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
initializer= tf.contrib.layers.xavier_initializer(),)
deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.02))
return tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
def classifier_and_discriminator(input_var, name='C_D', reuse=None, NUM_CLASSES=2):
with tf.variable_scope(name,reuse=reuse):
leakyrelu_alpha = 0.25
num_blocks = 5
filters = 64
kernel_size = 4
strides = 2
# Five intermediate blocks : conv + layer norm + instance norm + leaky relu
for i in range(num_blocks):
conv = conv2d(input_var, filters, kernel_size, kernel_size, strides, strides, name = str(i+1))
layer_norm = tf.contrib.layers.layer_norm(conv)
leaky_relu_out = tf.nn.leaky_relu(layer_norm, alpha = leakyrelu_alpha)
input_var = leaky_relu_out
filters += filters
### CLASSIFIER PART
# Output block : fc(1024) + LN + leaky relu + fc(1)
flatten_c = tf.contrib.layers.flatten(input_var)
fc_c = tf.contrib.layers.fully_connected(flatten_c, num_outputs = 1024, activation_fn=None)
dropout_c = tf.nn.dropout(fc_c, rate=0.2)
layer_norm_c = tf.contrib.layers.layer_norm(dropout_c)
leaky_relu_out_c = tf.nn.leaky_relu(layer_norm_c, alpha = leakyrelu_alpha)
# Classifier output
flatten_out_c = tf.contrib.layers.flatten(leaky_relu_out_c)
out_classifier = tf.contrib.layers.fully_connected(flatten_out_c,num_outputs=NUM_CLASSES, activation_fn=None)
out_classifier = tf.nn.sigmoid(out_classifier)
### DISCRIMINATOR PART
# Output block : fc(1024) + LN + leaky relu + fc(NUM_CLASS)
flatten_d = tf.contrib.layers.flatten(input_var)
fc_d = tf.contrib.layers.fully_connected(flatten_d, num_outputs = 1024, activation_fn=None)
dropout_d = tf.nn.dropout(fc_d, rate=0.35)
layer_norm_d = tf.contrib.layers.layer_norm(dropout_d)
leaky_relu_out_d = tf.nn.leaky_relu(layer_norm_d, alpha = leakyrelu_alpha)
# Classifier output
flatten_out_d = tf.contrib.layers.flatten(leaky_relu_out_d)
out_discriminator = tf.contrib.layers.fully_connected(flatten_out_d,num_outputs=1, activation_fn=None)
return out_discriminator, out_classifier
def encoder(inputs, name = 'G_encoder', reuse=tf.compat.v1.AUTO_REUSE, is_training = True):
"""
encoder function
:param: inputs
:param: name
:return list of layers:
"""
with tf.variable_scope(name, reuse=reuse):
leakyrelu_alpha = 0.2
num_blocks = 5
filters = 64
kernel_size = 4
strides = 2
layers = []
layers.append(inputs)
for i in range(num_blocks):
conv = conv2d(inputs, filters, kernel_size, kernel_size, strides, strides, name = str(i+1))
#batch_norm = tf.contrib.layers.layer_norm(conv, trainable=is_training)
batch_norm = tf.contrib.layers.batch_norm(conv, scale=True, trainable=is_training)
leaky_relu = tf.nn.leaky_relu(batch_norm, alpha = leakyrelu_alpha)
inputs = leaky_relu
filters += filters
layers.append(inputs)
return layers
def decoder(inputs, label, name = 'G_decoder', reuse=None, is_training = True):
"""
decoder function
:param: inputs (list of layers from encoder)
:param: name
:return tanh(conv5):
"""
leakyrelu_alpha = 0.2
filters = 1024
kernel_size = 4
strides = 2
input_ = inputs[-1]
def _attribute_concat(label, z):
label = tf.expand_dims(label, 1)
label = tf.expand_dims(label, 1)
#label = label[:,tf.newaxis, tf.newaxis,:] #or use expand_dims twice
label = tf.tile(label, [1, *z.get_shape().as_list()[1:3], 1])
label = tf.cast(label, dtype=tf.float32)
label = tf.concat([z, label], axis=3)
return label
input_ = _attribute_concat(label, input_)
with tf.variable_scope(name, reuse=reuse):
for ind in list(reversed(range(len(inputs)))):
outout_shape = inputs[ind-1].get_shape().as_list()
if ind==1:
deconv = deconv2d(input_, outout_shape, kernel_size, kernel_size, strides, strides, name = "deconv_{}".format(ind))
#deconv = tf.nn.conv2d_transpose(input_, output_shape=outout_shape, strides=[1, 2, 2, 1])
return tf.nn.tanh(deconv)
deconv = deconv2d(input_, outout_shape, kernel_size, kernel_size, strides, strides, name = str(ind-1))
concatenated = tf.concat([deconv, inputs[ind-1]], axis=3)
#batch_norm = tf.contrib.layers.layer_norm(concatenated)
batch_norm = tf.contrib.layers.batch_norm(concatenated, scale=True)
input_ = leaky_relu = tf.nn.relu(batch_norm, name = "ReLU_{}".format(ind))
def gradient_penalty(f, real, fake=None):
def _interpolate(a, b=None):
with tf.name_scope('interpolate'):
shape = [tf.shape(a)[0]] + [1] * (a.shape.ndims - 1)
alpha = tf.random_uniform(shape=shape, minval=0., maxval=1.)
inter = a + alpha * (b - a)
inter.set_shape(a.get_shape().as_list())
return inter
with tf.name_scope('gradient_penalty'):
x = _interpolate(real, fake)
pred = f(x, reuse=tf.compat.v1.AUTO_REUSE)
if isinstance(pred, tuple):
pred = pred[0]
grad = tf.gradients(pred, x)[0]
norm = 1e-10 + tf.norm(tf.contrib.slim.flatten(grad), axis=1)
gp = tf.reduce_mean((norm - 1.)**2)
return gp
def buildMinibatchDiscriminator(features, numFeatures, kernels, kernelDim=5, reuse=False):
"""
taken from https://github.com/matteson/tensorflow-minibatch-discriminator
"""
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
# TODO: no undefined dimensions until 1.0 release
batchTensor = tf.get_variable('disc_minibatch',
shape=[numFeatures, kernels, kernelDim],
initializer=tf.truncated_normal_initializer(stddev=0.1),
regularizer=slim.l2_regularizer(0.05))
flatFeatures = slim.flatten(features)
multFeatures = tf.einsum('ij,jkl->ikl',flatFeatures, batchTensor)
multFeaturesExpanded1 = tf.expand_dims(multFeatures,[1])
fn = lambda x: x - multFeatures
multFeaturesDiff = tf.exp(
-tf.reduce_sum(
tf.abs(
tf.map_fn(fn, multFeaturesExpanded1)
),
axis=[3])
)
output = tf.reduce_sum(multFeaturesDiff, axis=[1]) - 1 # differs from paper, but convergence seems better with -1 in my experiments
return output
```
#### File: self-driving-AttGAN/code/test.py
```python
import os
import tqdm
import re
import matplotlib.pyplot as plt
import numpy as np
import time
import json
import warnings
from datetime import timedelta
import logging
logging.getLogger('tensorflow').disabled = True
import tensorflow as tf
from utils import *
from model import *
import argparse
tf.get_logger().setLevel('ERROR')
"""parsing and configuration"""
def parse_args():
desc = "Tensorflow implementation of Attribute GANs for data augmentation"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--images_number', type=int, default=128, help='images number to produce')
parser.add_argument('--model_name', type=str, default=None, help='path to model if starting from checkpoint')
parser.add_argument('--graph_path', type=str, default=None, help='path to graph to restore and write to')
parser.add_argument('--num_classes', type=int, default=2, help='Number of classes')
return parser.parse_args()
def main(args):
BATCH_SIZE = args.images_number
NUM_CLASSES = args.num_classes
EPOCHS = args.epoch
restore_graph_path = args.graph_path
images_number = args.images_number
IMG_WIDTH, IMG_HEIGHT = 128, 128
BETA_1, BETA_2 = 0.4, 0.99
thres_int = 0.5
""" saving paths """
output_dir = "output"
if args.model_name is None:
model_name = time.strftime('%Y-%m-%d_%H:%M:%S_%z') + "_" + str(BATCH_SIZE)
print("[*] created model folder")
model_dir = '{}/{}'.format(output_dir, model_name)
else:
model_name = args.model_name
print("[*] proceeding to load model: {}".format(model_name))
model_dir = model_name
image_dir = '{}/images'.format(model_dir)
checkpoints_dir = '{}/checkpoints'.format(model_dir)
for path in [output_dir, model_dir, image_dir, checkpoints_dir]:
if not os.path.exists(path):
os.mkdir(path)
""" tf session definitions """
tf.reset_default_graph()
tf.random.set_random_seed(args.rand_seed)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
#config.log_device_placement = True
config.gpu_options.per_process_gpu_memory_fraction = 0.9
sess = tf.Session(config=config)
""" load TFRecordDataset """
validation_data = Barkley_Deep_Drive('../resources/test.tfrecords')
val_iterator = validation_data.get_batch(EPOCHS, BATCH_SIZE, shuffle = True)
val_image_iterator, val_label_iterator = val_iterator.get_next()
""" Placeholders """
xa = tf.placeholder(tf.float32,shape=[BATCH_SIZE,IMG_WIDTH,IMG_HEIGHT,3],name="xa") #orignal image
z = encoder(xa, reuse=tf.AUTO_REUSE) #encoder output
a = tf.placeholder(tf.float32, shape=[BATCH_SIZE, NUM_CLASSES],name="a") #original attributes
b = tf.placeholder(tf.float32, shape=[BATCH_SIZE, NUM_CLASSES],name="b") #desired attributes
xb_hat = decoder(z, b, reuse=tf.AUTO_REUSE) #decoder output
with tf.control_dependencies([xb_hat]):
xa_hat = decoder(z, a, reuse=tf.AUTO_REUSE) #decoder output
xa_logit_D, xa_logit_C = classifier_and_discriminator(xa, reuse=tf.AUTO_REUSE, NUM_CLASSES = NUM_CLASSES)
xb_logit_D, xb_logit_C = classifier_and_discriminator(xb_hat, reuse=tf.AUTO_REUSE, NUM_CLASSES = NUM_CLASSES)
""" checkpoints load """
saver = tf.train.Saver()
try:
epoch_to_restore = load_checkpoint(checkpoints_dir, sess)
print("[*] restoring from epoch".format(epoch_to_restore))
except:
epoch_to_restore = 0
print("[*] failed to load checkpoints")
sess.run(tf.global_variables_initializer())
""" mapping defintions """
d_loss_epoch, g_loss_epoch = [], []
if NUM_CLASSES == 2:
a_label_mapping = {'daytime': [1, 0], 'night': [0, 1]}
b_label_mapping = {'daytime': [0, 1], 'night': [1, 0]}
else:
label_mapping = {'daytime': [1], 'night': [0]}
flip = {'1':[0], '0': [1] }
""" image generation """
sess.run(val_iterator.initializer)
# Generating reconstructed image xa_hat and flipped attribute image xb_hat
image_batch, label_batch = sess.run([val_image_iterator, val_label_iterator])
# Transform label batch in our simple one hot encoded version
if NUM_CLASSES == 2:
a_label_batch = [a_label_mapping[label.decode("utf-8")] for label in label_batch]
b_label_batch = [b_label_mapping[label.decode("utf-8")] for label in label_batch]
else:
a_label_batch = [label_mapping[label.decode("utf-8")] for label in label_batch]
b_label_batch = [flip[str(label[0])] for label in a_label_batch]
# Transform label batch in our simple one hot encoded version
if truncated_uniform_scale_flag:
#TO DO: fix a_label_batch dtype from list to array
b_label_batch = tf.random_shuffle(a_label_batch)
a_label_batch = (tf.to_float(a_label_batch) * 2 - 1) * thres_int
b_label_batch = (tf.to_float(b_label_batch) * 2 - 1) * (tf.truncated_normal(tf.shape(b_label_batch)) + 2) / 4.0 * (2 * thres_int)
a_label_batch = sess.run(a_label_batch)
b_label_batch = sess.run(b_label_batch)
else:
a_label_batch = np.asarray(a_label_batch, dtype=np.float32)
b_label_batch = np.asarray(b_label_batch, dtype=np.float32)
step_xb_hat = sess.run(xb_hat, feed_dict={a:a_label_batch, b:b_label_batch, xa:image_batch})
step_xa_hat = sess.run(xa_hat, feed_dict={a:a_label_batch, b:b_label_batch, xa:image_batch})
"""image saving loop"""
for i in tqdm.tqdm(range(images_number), total = images_number):
output_path_xa = os.path.join(image_dir, "xa_image_" + str(i+1) + ".png")
output_path_xa_hat = os.path.join(image_dir, "xa_hat_image_" + str(i+1) + ".png")
output_path_xb_hat = os.path.join(image_dir, "xb_hat_image_" + str(i+1) + ".png")
plt.imsave(output_path_xa, image_batch[i]*255).astype(np.uint8))
plt.imsave(output_path_xa_hat, step_xa_hat[i]*255).astype(np.uint8))
plt.imsave(output_path_xb_hat, step_xb_hat[i]*255).astype(np.uint8))
print("[*] image saved!")
if __name__ == "__main__":
#tf.app.run(main=main)
args = parse_args()
if args is None:
exit()
main(args)
``` |
{
"source": "joanna-chen/schoolwork",
"score": 4
} |
#### File: schoolwork/Widgets/parts.py
```python
class Parts():
def __init__(self, name, price, quantity):
self.partName = name
self.partPrice = price
self.partQuantity = quantity
# getter methods
def getName(self):
return self.partName
def getPrice(self):
return self.partPrice
def getQuantity(self):
return int(self.partQuantity)
# setter methods
def setName(self, name):
self.partName = name
def setPrice(self, price):
self.partPrice = price
def setQuantity(self, quantity):
self.partQuantity = quantity
def equals(self, other):
if self.partName == other.partName:
return True
else:
return False
class PartInventory():
def __init__(self):
self.partsInventory = []
# mutator methods
def addParts(self, fileName):
partNum = 0
file = open(fileName, "r")
for line in file:
l = line.split()
partNum = Parts(l[0], l[1], l[2])
self.partsInventory.append(partNum)
def removePart(self, part):
self.partsInventory.remove(part)
# getter method
def getInventory(self):
return self.partsInventory
def main():
wFile = open("widgets.txt", "r")
# add the parts to the inventory
inventory = PartInventory()
inventory.addParts("parts.txt")
# output starting inventory
print('Starting Inventory: ')
for part in inventory.getInventory():
print(part.getName() + ' | Quantity: ' + str(part.getQuantity()))
print()
# declare and initialize variables
buildable = True
cost = 0
missingParts = ''
widget = {}
# output widget building information
print(wFile.readline())
for line in wFile:
# for each widget
if line == '\n':
if buildable == True:
print('The widget can be built for $%.2f' % (cost))
print('Parts Used:')
for part in inventory.getInventory():
if part.getName() in widget:
print(part.getName() + ': ' + str(int(part.getQuantity()) - widget[part.getName()]))
part.setQuantity(widget[part.getName()]) # reduce quantity used
if part.getQuantity() == 0:
inventory.removePart(part)
else:
print('It cannot be built')
print('There is an insufficient supply of the following parts:' + missingParts[1:])
# reset variable values
missingParts = ''
buildable = True
widget = {}
cost = 0
print()
print(wFile.readline())
continue
# iterate through all the parts required for a widget
else:
thePart = line.split()
widget[thePart[0]] = int(thePart[1])
found = False
while found == False:
# check if the part required is in inventory
for part in inventory.getInventory():
if thePart[0] == part.getName() and int(thePart[1]) <= part.getQuantity():
found = True
# compute cost
cost = cost + float(part.getPrice()) * int(thePart[1])
widget[thePart[0]] = part.getQuantity() - int(thePart[1])
if found == False:
buildable = False
missingParts = missingParts + ', ' + thePart[0]
break
# output ending inventory
print('Ending Inventory: ')
for part in inventory.getInventory():
print(part.getName() + ' | Quantity: ' + str(part.getQuantity()))
wFile.close()
main()
``` |
{
"source": "joannachuang/2019_summer_joanna",
"score": 3
} |
#### File: 0409/0409/0709.py
```python
import re
import io
import time
import numpy
import pandas
import sqlite3
import datetime
import requests
import lxml.html as LH
import xml.etree.ElementTree as xmltree
import Lily.ctao.hostmetadata as chmd
import Lily.ctao.database as cdb
def cwb_melt0():
import lzma
#db = cdb.database('data_crawler_cwb_earthquake_list.sqlite')
#df = db.to_dataframe('data_crawler_cwb_earthquake_list')
#df = df.reindex ( columns=['id', 'html', 'lzma_html'], fill_value='' )
#for ind, row in df.iterrows():
# df.at[ind,'lzma_html'] = lzma.compress(df.at[ind,'html'])
# print (df.at[ind,'id'] )
#df = df.drop(columns=['html'])
#df.to_sql('data_crawler_cwb_sensible_earthquake_download', db.connect, if_exists='replace', index=False)
return
def cwb_melt1():
import lzma
db = cdb.database('data_crawler.sqlite')
sql = '''
select Id, routeId, nameZh, nameEn, seqNo, pgp, longitude, showLon, showLat, vector from group by id
'''
df = pandas.read_sql(sql, db.connect , index_col=['rowid'])
df = df.reindex ( columns=[ 'a','b','c', 'd', 'e', 'f', 'g', 'h','i' ], fill_value='' )
for ind, row in df.iterrows():
# print ('melt', row[0])
json_tables = pandas.read_html( lzma.decompress( sqlite3.Binary(row['lzma_html']) ), encoding='utf-8')
arg2 = json_tables[2]
df.at[ind,'routeId'] = arg2.iat[0,1]
df.at[ind,'nameZh'] = arg2.iat[1,1]
df.at[ind,'nameEn'] = arg2.iat[2,1]
df.at[ind,'seqNo'] = arg2.iat[3,1]
df.at[ind,'pgp'] = arg2.iat[4,1]
df.at[ind,'longitude'] = arg2.iat[5,1]
for ind2, row2 in json_tables[3].iterrows():
if isinstance(row2, pandas.core.series.Series):
for elem in row2:
if isinstance(elem,str):
df.at[ind, 'i'] = df.at[ind, 'i'] + ',' + elem
else:
if isinstance(elem,str):
df.at[ind, 'i'] = df.at[ind, 'i'] + ',' + row2
#df = df.drop(columns=['lzma_html'])
df.to_sql('data_rdset_pylily', db.connect, if_exists='append', index=False)
db.connect.execute('''delete from {0} where rowid not in
(select max (rowid) from {0} group by id)'''.format('data_rdset_pylily') )
db.connect.commit()
return
def cwb_melt2():
db = cdb.database('data_crawler.sqlite')
df = db.to_dataframe('data_rdset_pylily_cwb_sensible_earthquake')
station = []
for ind, row in df.iterrows():
for st in df.at[ind, 'Stations'].split(';'):
if u'''地區最大震度''' not in st and st != '':
rdset = [df.at[ind, 'id'],
df.at[ind, 'time'],
float(df.at[ind, 'px'][4:-2]),
float(df.at[ind, 'py'][4:-2]),
float(df.at[ind, 'depth'][:-3]),
float(df.at[ind, 'ML']),
df.at[ind, 'Location'],
''.join(st.split('\u3000')[:-1]) , float(st.split('\u3000')[-1:][0]) ]
station.append(rdset)
df2 = pandas.DataFrame(station, columns=['a','b','c','d', 'e','f','g', 'h', 'i'])
df2.to_sql('data_rdset_pylily_cwb_sensible_earthquake_LocalSeismicIntensity', db.connect, if_exists='replace', index=False)
return
if __name__ == '__console__' or __name__ == '__main__':
import os
thost = chmd.hostmetadata()
os.chdir (thost.warehouse)
cwb_crawler()
cwb_melt1()
cwb_melt2()
```
#### File: 0409/0409/0710_road_latlng_cp.py
```python
import Lily.ctao.database as cdb
import Lily.crawler.url_string as url
import requests
import pandas as pd
import numpy
import io
import json
import sqlite3
import os
import xlrd
from xlutils.copy import copy
from xlwt import Style
df = pd.read_excel('''d:/pylily/0409/0409/data_opendata_tb_107lane_name.xls''', header = 0)
#for i in range(0, 236):
# target = '''https://maps.googleapis.com/maps/api/geocode/json?address={0}&key=<KEY>'''
# station_code = df.loc[:,'station_code']
# station_name = df.loc[:,'station_name']
# target = target.format(station_name.loc[i])
# filename = station_code.loc[i]
# file = '''C:/Users/User/Desktop/0710_road_latlng/{0}.json'''.format(filename)
# arg1 = url.url_string(target)
# arg1.to_file(file)
# print (target, file)
path = '''C:/Users/User/Desktop/0710_road_latlng'''
files = os.listdir(path)
px = []
py = []
for file in files:
with open(path +'/'+ file , 'r', encoding='utf8') as reader:
jf = json.loads(reader.read())
# N016與另一個路口找不到點位
if not len(jf['results']) == 0:
px.append( jf['results'][0]['geometry']['location']['lat'])
py.append( jf['results'][0]['geometry']['location']['lng'])
else :
px.append('X')
py.append('Y')
#print(jf['results'][0]['geometry']['location']['lat']) if not len(jf['results']) == 0 else print('ZERO_RESULTS')
#print(jf['results'][0]['geometry']['location']['lng']) if not len(jf['results']) == 0 else print('ZERO_RESULTS')
#def get_px(file):
# if not len(jf['results']) == 0:
# file['px'] = jf['results'][0]['geometry']['location']['lat']
# else :
# file['px'] = 'ZERO_RESULTS'
#def get_py(file):
# if not len(jf['results']) == 0:
# file['py'] = jf['results'][0]['geometry']['location']['lng']
# else :
# file['py'] = 'ZERO_RESULTS'
#df = df.apply(get_px, axis = 0)
#df = df.apply(get_py, axis = 0)
#print(px)
#print(py)
df['px'] = px
df['py'] = py
#df['station_name_from'] = df['station_name'].apply(lambda x : x.split('~')[0])
#df['station_name_to'] = df['station_name'].apply(lambda x : x.split('~')[1])
new = df["station_name"].str.split("~", expand = True)
df["station_1"]= new[0]
df["station_2"]= new[1]
df["station_3"]= new[2] if not new[2].empty else ''
cols = df.columns.tolist()
cols = cols[:2] + cols[-3:] + cols[3:9] + cols[10:12] + cols[9:10]
df = df[cols]
df.to_excel("0711_road_latlng_cp.xls", encoding='big5', header=True, index=False)
```
#### File: Lily/0507/cook_reposrt.py
```python
import csv
import docx
import pandas
import datetime
from docx import Document
def check_report_datetime(sz_datetime):
return
def check_time():
import Lily.ctao.database as cdb
import Lily.ctao.nsgstring as nstr
import Lily.ctao.hostmetadata as chmd
import re
host = chmd.hostmetadata()
db = cdb.database(host.database)
#^\d\d\d\d-(0?[1-9]|1[0-2])-(0?[1-9]|[12][0-9]|3[01]) (00|[0-9]|1[0-9]|2[0-3]):([0-9]|[0-5][0-9]):([0-9]|[0-5][0-9])$
patern0 = r'''(0?[1-9]|1[0-2])/(0[1-9]|[12][0-9]|3[01])'''
patern1 = r'''([0-2][0-9]):([0-5][0-9])'''
patern2 = r'''^(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])|(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])$'''
df = db.to_dataframe('hln_0206_3')
df = df.iloc[1:]
for ind, row in df.iterrows():
twoday = [ day for day in re.findall(patern0, row[1])]
twotim = [ tim for tim in re.findall(patern1, row[2])]
if len(twoday) == 0:
twoday = [('01','01'), ('01','01')]
if len(twoday) == 1:
twoday = [twoday[0],twoday[0]]
if len(twotim) == 0:
twotim = [('00','00'), ('00','00')]
if len(twotim) == 1:
twotim = [twotim[0], twotim[0]]
date1 = '2018-{0}-{1} {2}:{3}'.format( twoday[0][0], twoday[0][1], twotim[0][0], twotim[0][1] )
date2 = '2018-{0}-{1} {2}:{3}'.format( twoday[1][0], twoday[1][1], twotim[1][0], twotim[1][1] )
df.iloc[ind]['beg'] = datetime.datetime.strptime(date1, '%Y%m%d %H%M')
df.iloc[ind]['end'] = datetime.datetime.strptime(date2, '%Y%m%d %H%M')
def check_docx(docx_file_name):
from Lily.ctao.database import database
from Lily.ctao.nsgstring import alnum
from Lily.ctao.hostmetadata import hostmetadata
from Lily.blacksmith.file_feature import get_feature
host = hostmetadata()
db = database(host.database)
doc = Document(docx_file_name)
feature = get_feature(docx_file_name)
excelfile = feature['path'] + '/' + feature['name'] + '.xlsx'
tablename = (feature['name'] + '_{0}')
writer = pandas.ExcelWriter( excelfile , engine = 'xlsxwriter')
counter = 1
sheetlist = []
for tab in doc.tables:
data1=[]
for row in tab.rows:
data1.append( [cell.text for cell in row.cells] )
df = pandas.DataFrame(data1)
counter = counter + 1
table_name = tablename.format( str(counter).zfill(3) )
sheetlist.append(table_name)
df.to_sql(table_name, db.connect, if_exists='replace')
df.to_excel(writer, sheet_name=table_name)
writer.save()
writer.close()
return sheetlist
if __name__ == '__main__':
sheetlist = check_docx('''D:/kiki7_factory/hln_0206_report.docx''')
print(sheetlist)
```
#### File: Lily/blacksmith/file_feature.py
```python
def get_feature(filename):
import os, datetime
f_dict = {}
f_dict['name'], f_dict['extension'] = os.path.splitext(filename)
f_dict['path'], f_dict['name'] = os.path.split( f_dict['name'] )
statinfo = os.stat(filename)
f_dict['size'] = statinfo.st_size
f_dict['time_a'] = datetime.datetime.utcfromtimestamp(int( statinfo.st_atime))
f_dict['time_m'] = datetime.datetime.utcfromtimestamp(int( statinfo.st_mtime))
f_dict['time_c'] = datetime.datetime.utcfromtimestamp(int( statinfo.st_ctime))
return f_dict
def get_md5sum(filename):
import hashlib
fileobj = open(filename, 'rb')
content = fileobj.read()
md5sum = hashlib.md5( content ).hexdigest()
fileobj.close()
print (filename)
return md5sum
def get_feature_with_md5sum(filename):
f_dict = get_feature(filename)
f_dict['md5_sum'] = get_md5sum(filename)
return f_dict
def get_all_filename(target_directory):
import os, pandas
rdset_rows = []
for path, dirs, files in os.walk(target_directory):
for fname in files:
src_filename = os.path.join(path, fname)
rdset_rows.append( {'file_name' : src_filename })
df = pandas.DataFrame.from_dict(rdset_rows, orient='columns')
return df
def get_all_filefeature_with_md5sum(target_directory):
import pandas
from Lily.blacksmith.mppool import mppool
pool = mppool()
file_list = get_all_filename(target_directory)['file_name'].tolist()
md5 = pool.map(get_feature_with_md5sum, file_list)
df = pandas.DataFrame.from_dict(md5, orient='columns')
return df
def to_database( target_dir ):
import Lily.ctao.database as cdb
import Lily.ctao.nsgstring as nstr
import Lily.ctao.hostmetadata as chmd
host = chmd.hostmetadata()
p1 = nstr.alnum(host.platform)
h1 = nstr.alnum(host.hostname)
d1 = nstr.alnum(target_dir)
db = cdb.database(host.database)
dflist = get_all_filefeature_with_md5sum(target_dir)
table_name = '''data_rdset_filemd5_{0}_{1}_hhot_{2}'''.format(p1, h1, d1)
dflist.to_sql(table_name, db.connect, if_exists='replace', index=False)
## TODO 執行這個目錄
## 執行指定目錄 指定database
## 可獨立執行
def check_moudle():
import sys
import Lily.ctao.hostmetadata as chmd
from Lily.blacksmith.mppool import mppool
pool = mppool()
this_host= chmd.hostmetadata()
if this_host.platform[:7] =='Windows':
from Lily.ctao.userargument import tkui
ui = tkui('select_target_directory',[['target','sel', 'directory']])
pool.run(to_database, ui.values['target'], 'get_all_file_feature')
elif sys.argv == 2:
pool.run(to_database, sys.argv[1], 'get_all_file_feature')
else:
target = input("Enter a directory name:(path)")
pool.run(to_database, target, 'get_all_file_feature')
if __name__ == '__main__':
check_moudle()
```
#### File: Lily/blacksmith/mppool.py
```python
class mppool:
def __init__(self):
import Lily.ctao.database as cdb
import Lily.ctao.hostmetadata as chmd
self.this_host = chmd.hostmetadata()
self.log_database = cdb.database(self.this_host.database)
def map(self, your_function, your_datalist, message = 'mpool'):
import pandas
import datetime
from multiprocessing import Pool
cpu_code = self.this_host.cpu_code
mpPool = Pool( self.this_host.cpu_code)
dict = { 'time_beg' : datetime.datetime.now(),
'type_fun' : type(your_function).__name__,
'type_data': type(your_datalist).__name__ + '_size(' + str(len(your_datalist)) +')_message(' + message + ')',
'host_name': self.this_host.hostname,
'host_platform': self.this_host.platform,
'host_code_number': self.this_host.cpu_code}
content = mpPool.map(your_function, your_datalist)
dict['time_end'] = datetime.datetime.now()
dict['time_cost'] = (dict['time_end']- dict['time_beg']).seconds
df = pandas.DataFrame.from_dict( [dict], orient='columns' )
df.to_sql('data_lily_mppool_log', self.log_database.connect, if_exists='append', index =False)
mpPool.close()
return content
def run(self, your_function, your_data, message = 'run'):
import pandas
import datetime
dict = { 'time_beg' : datetime.datetime.now(),
'type_fun' : type(your_function).__name__,
'type_data': type(your_data).__name__ + '_message(' + message + ')',
'host_name': self.this_host.hostname,
'host_platform': self.this_host.platform,
'host_code_number': self.this_host.cpu_code}
content = your_function(your_data)
dict['time_end'] = datetime.datetime.now()
dict['time_cost'] = (dict['time_end']- dict['time_beg']).seconds
df = pandas.DataFrame.from_dict( [dict], orient='columns' )
df.to_sql('data_lily_mppool_log', self.log_database.connect, if_exists='append', index =False)
return content
```
#### File: Lily/crawler/etc_archive.py
```python
import re
import os
import datetime
import pandas, numpy
#CREATE TABLE data_crawler_ETC_M03A_pull(
# date NUM,
# pull TEXT,
# cloud_filename TEXT,
# local_filename TEXT,
# size INT,
# mtime NUM,
# ctime NUM,
# melt REAL
#)
class etc_archive:
def __init__(self, sub_group ='M03A'):
#
import Lily.ctao.hostmetadata as chmd
import Lily.ctao.database as cdb
self.sub_group = sub_group
self.hostmetadata = chmd.hostmetadata()
self.database = cdb.database(self.hostmetadata.database)
self.sub_warehouse = '{0}/crawler_ETC_{1}'.format(self.hostmetadata.warehouse, self.sub_group)
self.excel_filename = '{0}/data_clawler_ETC_{1}_list.xlsx'.format(self.hostmetadata.warehouse , self.sub_group)
self.sqlite_tablename = 'data_crawler_ETC_{0}_list'.format(self.sub_group)
self.sqlite_tablepull = 'data_crawler_ETC_{0}_pull'.format(self.sub_group)
#check/create if not exists directory
if not os.path.exists(self.sub_warehouse) :
os.mkdir(self.sub_warehouse)
#date regular expresstion YYYYMMDD
date_YYYYMMDD_pattern = '''([12]\d{3}(0[1-9]|1[0-2])(0[1-9]|[12]\d|3[01]))'''
self.url = 'http://tisvcloud.freeway.gov.tw/history/TDCS/{0}/'.format(self.sub_group) #
self.cloud_archive_pattern = 'href=\"({0}_{1}\.tar\.gz)\"' .format(self.sub_group, date_YYYYMMDD_pattern)
self.local_archive_pattern = '({0}_{1}\.tar\.gz)' .format(self.sub_group , date_YYYYMMDD_pattern)
self.check_archive_list()
def download_archive(self, keydate):
import requests
cloud_filename = self.archive_list.loc[keydate]['cloud_filename']
local_filename = self.archive_list.loc[keydate]['local_filename']
pull = self.archive_list.loc[keydate]['pull']
year_path = self.sub_warehouse +'/'+ keydate.strftime('%Y')
file_path = year_path + '/' + cloud_filename
#check/create if not exists directory
if not os.path.exists(year_path) :
os.mkdir(year_path)
if cloud_filename not in [None, numpy.nan] and ( local_filename in [None, numpy.nan] or pull == 'enforce') :
curl_path = self.url + '/' + cloud_filename
#download file from cloud
rrd = requests.get( curl_path, stream = True)
with open( file_path, 'wb') as output:
for chunk in rrd.iter_content(chunk_size=1024):
if chunk:
output.write(chunk)
#mark as downloaded
self.archive_list.loc[keydate]['pull'] = 'downloaded'
self.archive_list.loc[keydate]['local_filename'] = file_path
self.archive_list.loc[keydate]['size'] = os.path.getsize(file_path)
self.archive_list.loc[keydate]['ctime'] = datetime.datetime.fromtimestamp(os.path.getctime(file_path))
self.archive_list.loc[keydate]['mtime'] = datetime.datetime.fromtimestamp(os.path.getmtime(file_path))
print (keydate, cloud_filename)
def download_archive_list(self):
for keydate in self.archive_list.index:
cloud_filename = self.archive_list.loc[keydate]['cloud_filename']
if cloud_filename not in [None, numpy.nan]:
self.download_archive(keydate)
def save_archive_list(self):
self.archive_list.to_sql( self.sqlite_tablename , self.database.connect, if_exists='replace', index=True)
writer = pandas.ExcelWriter(self.excel_filename, engine='xlsxwriter', datetime_format='yyyy-mm-dd hh:mm', date_format='yyyy-mm-dd')
self.archive_list.to_excel(writer , self.sub_group )
workbook = writer.book
sheet_all = writer.sheets[ self.sub_group ]
sheet_all.set_column('A:C', 20)
sheet_all.set_column('D:D', 60)
sheet_all.set_column('E:G', 15)
writer.save()
return
def check_archive_list(self):
import pathlib
import requests
#step 1
days = pandas.date_range(datetime.datetime.strptime('2014-01-01', '%Y-%m-%d'), datetime.date.today() + datetime.timedelta(days=90), freq='D')
self.archive_list = pandas.DataFrame( columns =['date', 'pull', 'cloud_filename', 'local_filename', 'size', 'mtime', 'ctime' , 'melt'])
self.archive_list['date'] = days
self.archive_list = self.archive_list.set_index ('date')
#step 2 check list from webpage
html = requests.get(self.url, verify=False).text
could_list = re.findall(self.cloud_archive_pattern, html)
for match_rdset in could_list:
self.archive_list.loc[ datetime.datetime.strptime(match_rdset[1], '%Y%m%d') ]['cloud_filename'] = match_rdset[0]
#step 3 check list from work-path (disk)
for path, dirs, files in os.walk( self.sub_warehouse ):
for archive in files :
filename = path + '/' + archive
if re.match( self.local_archive_pattern, archive) and os.path.isfile(filename):
keydate = datetime.datetime.strptime(archive[5:13], '%Y%m%d')
self.archive_list.loc[keydate]['local_filename'] = filename
self.archive_list.loc[keydate]['ctime'] = datetime.datetime.fromtimestamp(os.path.getctime(filename))
self.archive_list.loc[keydate]['mtime'] = datetime.datetime.fromtimestamp(os.path.getmtime(filename))
self.archive_list.loc[keydate]['size'] = os.path.getsize(filename)
self.archive_list.loc[keydate]['pull'] = 'do_nothing'
#step3 read user pull list
self.pull_list = pandas.DataFrame()
if self.sqlite_tablepull in self.database.tables().to_dict('index'):
self.pull_list = pandas.read_sql_query('''select * from {0};'''.format(self.sqlite_tablepull), self.database.connect, index_col=['date'], parse_dates=['date'])
for keydate in self.pull_list.index:
self.archive_list.loc[keydate]['pull'] = self.pull_list.loc[keydate]['pull']
self.archive_list.loc[keydate]['melt'] = self.pull_list.loc[keydate]['melt']
return self.archive_list
if __name__ == '__console__' or __name__ == '__main__':
import sys
try:
for sub_group in ['M03A','M04A','M05A','M06A', 'M07A', 'M08A']:
print(sub_group)
ca_etc = etc_archive(sub_group)
ca_etc.save_archive_list()
ca_etc.download_archive_list()
print('the end')
except IOError :
print ("")
except ValueError:
print ("Could not convert data to an integer.")
except Exception as ex:
print('')
except BaseException as ex:
import sys
print ('Unexpected error:', sys.exc_info()[0])
except :
print('')
``` |
{
"source": "joannadiong/Kishimoto_et_al_2021_JAP",
"score": 2
} |
#### File: joannadiong/Kishimoto_et_al_2021_JAP/process_R1.py
```python
import os, shutil
import numpy as np
import scipy.signal
from scipy import interpolate
import matplotlib.pyplot as plt
import trials_key
def find_trial_MVC_by_time(sub_info, sub_data):
# get activation levels and trials key-value pairs
sub_key = trials_key.gen(sub_info.sub)
# make dict of trials sorted by activation level
levels = ['01', '05', '10', '15', '25', '50', '75']
bysort_level = {}
for level in levels:
trial = sub_key[level]
bysort_level.update({level: trial})
# make dict of trials sorted by test order over time
bysort_time = {k: v for k, v in sorted(bysort_level.items(), key=lambda item: item[1])}
levels_by_time = list(bysort_time.keys())
# get MVC torques sorted by tested order over time
mvcs_by_time = []
levels_by_time_ = [] # get activation level only if MVC exists
for level in levels_by_time:
mvc_torque = _find_trial_MVC_normalize_torque_signals(sub_info, sub_data, level)
if mvc_torque:
mvcs_by_time.append(mvc_torque[0])
levels_by_time_.append(level)
return mvcs_by_time, levels_by_time_
def find_mmax_amp(sub_info, sub_data):
# index the last maximal stimulation
idx = int(sub_data['max_curr'].sig['trig'].times[-1] * sub_info.freq)
# calculate the peak to peak within a 50 ms window, 5 ms after the stimulus
ptp_start, ptp_stop = 0.005, 0.055 # in sec
idx1 = int(idx + ptp_start * sub_info.freq)
idx2 = int(idx1 + ptp_stop * sub_info.freq)
emg = sub_data['max_curr'].sig['emgSO'].raw
time = sub_data['max_curr'].sig['emgSO'].times
mmax_amp = np.ptp(emg[idx1: idx2])
buffer = int(0.040 * sub_info.freq) # in ms
# plot window and check that it is correct
fig = plt.figure(figsize=(11, 7))
plt.subplot(1, 3, 1)
plt.grid()
plt.plot((time[idx1: idx2] - time[idx1]) * 1000, emg[idx1: idx2], 'r')
plt.xlabel('Time (ms)')
plt.ylabel('EMG SO (mV)')
plt.subplot(1, 3, (2, 3))
# set stim instance to zero, plot time in ms
plt.plot(0, emg[idx] + max(emg), 'r|', markersize=6) # idx / sub_info.freq
plt.plot((time[idx - buffer: idx2 + buffer] - time[idx]) * 1000,
emg[idx - buffer: idx2 + buffer], 'k')
plt.plot((time[idx1: idx2] - time[idx1] + ptp_start) * 1000, emg[idx1: idx2], 'r')
plt.xlabel('Time (ms)')
plt.ylabel('EMG SO (mV)')
plt.tight_layout()
plt.savefig('mmax.png', dpi=300)
shutil.move('mmax.png', os.path.join('.', 'data', 'proc', sub_info.sub, 'mmax.png'))
plt.close()
return mmax_amp, idx1, idx2
def find_mmax_rms(sub_info, sub_data, idx1, idx2):
# identify samples over which soleus M wave occurs; only soleus M waves were checked
emg = sub_data['max_curr'].sig['emgSO'].raw
time = sub_data['max_curr'].sig['emgSO'].times
mmax = emg[idx1: idx2]
# interpolate over the M wave
xaxis = list(range(0, len(mmax)))
f = interpolate.interp1d(xaxis, mmax)
xaxis_new = np.arange(0, len(mmax) - 1, 0.1)
mmax_new = f(xaxis_new)
# identify the sample indexes where the first phase of the M wave crosses 0 volts
# similarly to <NAME> (1997) Fatigue in human thenar muscles paralysed by spinal cord injury
min_val = abs(min(mmax_new))
max_val = max(mmax_new)
height = np.mean([min_val, max_val]) * .7
indexes, _ = scipy.signal.find_peaks(abs(mmax_new), height=height, distance=5)
plt.plot(mmax_new,'.-')
plt.plot(indexes, mmax_new[indexes], 'ro', label='min, max')
peak_index = indexes[0]
if mmax_new[peak_index] < 0:
mmax_new *= -1
for i in range(peak_index, 0, -1):
if mmax_new[i] > 0 and mmax_new[i - 1] < 0:
idx_start_p1_mmax = i-1
break
else:
idx_start_p1_mmax = 0
for i in range(peak_index, len(mmax_new)):
if mmax_new[i] > 0 and mmax_new[i + 1] < 0:
idx_stop_p1_mmax = i + 1
break
# print(idx_start_p1_mmax, idx_stop_p1_mmax)
plt.plot([idx_start_p1_mmax, idx_stop_p1_mmax],
[mmax_new[idx_start_p1_mmax], mmax_new[idx_stop_p1_mmax]],
'bo', label='cross 0V')
plt.legend()
plt.xlabel('Samples')
plt.ylabel('EMG SO (mV)')
plt.tight_layout()
plt.savefig('mmax-p1.png', dpi=300)
shutil.move('mmax-p1.png', os.path.join('.', 'data', 'proc', sub_info.sub, 'mmax-p1.png'))
plt.close()
# calculate the root-mean-square of the first phase of the M wave
# from sklearn.metrics import mean_squared_error
# i = np.zeros(len(mmax_new[idx_start_p1_mmax: idx_stop_p1_mmax]))
# np.sqrt(mean_squared_error(i, mmax_new[idx_start_p1_mmax: idx_stop_p1_mmax])) # gets same answer
mmax_p1_rms = np.sqrt(np.sum(mmax_new[idx_start_p1_mmax: idx_stop_p1_mmax] ** 2) / len(mmax_new[idx_start_p1_mmax: idx_stop_p1_mmax]))
return mmax_p1_rms
def find_mvc_emg_rms(sub_info, max_vals_and_indexes, signals_above_threshold):
# use index of MVC torque to find MVC EMG
mvc_torque_idx = max_vals_and_indexes.mvc_torque[1]
mvc_torque = max_vals_and_indexes.mvc_torque[0]
torque = signals_above_threshold.torques
emgSO = signals_above_threshold.emgSO # rectified EMG, not enveloped
plt.subplot(3,1,1)
plt.plot(torque, 'k')
plt.plot(mvc_torque_idx, mvc_torque, 'ro')
plt.ylabel('Torque (Nm)')
plt.subplot(3,1,2)
plt.plot(emgSO, 'k')
plt.plot(mvc_torque_idx, emgSO[mvc_torque_idx], 'ro')
plt.ylabel('EMG SO (mV)')
# get root mean square soleus EMG over 50 ms window over the MVC index
half_win = int(sub_info.freq * 0.05 / 2)
mvc_indexes = list(range(mvc_torque_idx - half_win, mvc_torque_idx + half_win))
mvc_emg = emgSO[mvc_torque_idx - half_win: mvc_torque_idx + half_win]
mvc_emg_rms = np.sqrt(np.sum(mvc_emg ** 2) / len(mvc_emg))
# from sklearn.metrics import mean_squared_error
# i = np.zeros(len(mvc_emg))
# np.sqrt(mean_squared_error(i, mvc_emg)) # gets same answer
plt.subplot(3,1,3)
plt.plot(list(range(mvc_torque_idx - int(sub_info.freq / 6), mvc_torque_idx + int(sub_info.freq / 6))),
emgSO[mvc_torque_idx - int(sub_info.freq / 6): mvc_torque_idx + int(sub_info.freq / 6)], 'k')
for i, j in zip(mvc_indexes, mvc_emg):
plt.plot(i, j, 'go', markersize=2)
plt.xlabel('Samples')
plt.ylabel('EMG SO (mV)')
plt.tight_layout()
plt.savefig('mvc_torq_emg.png', dpi=300)
shutil.move('mvc_torq_emg.png', os.path.join('.', 'data', 'proc', sub_info.sub, 'mvc_torq_emg.png'))
plt.close()
return mvc_emg_rms
def find_trial_emg_rms(sub_info, sub_data):
nsamples_before_trig = int(sub_info.freq * 0.05) # get EMG over 50 ms window
emgs_rect = dict()
i = 1
j = len(list(sub_data.keys())[4:])
def _determine_sit_rest_indexes(sub_info, sub_data, key):
nsamples_before_trig = int(sub_info.freq * 0.5)
idx1 = int(sub_data[key].sig['trig'].times[0] * sub_info.freq)
idx2 = int(sub_data[key].sig['trig'].times[1] * sub_info.freq)
if np.mean(sub_data[key].sig['torque'].proc[idx1 - nsamples_before_trig: idx1]) > \
np.mean(sub_data[key].sig['torque'].proc[idx2 - nsamples_before_trig: idx2]):
index_sit = idx1
index_rest = idx2
else:
index_sit = idx2
index_rest = idx1
return index_rest, index_sit
for key in list(sub_data.keys())[4:]:
index_rest, index_sit = _determine_sit_rest_indexes(sub_info, sub_data, key)
# shift indexed EMG region away from filter artefact close to stimulus artefact
filter_artefact_length = int(sub_info.freq * 0.05)
index_start, index_stop = index_sit - (filter_artefact_length + nsamples_before_trig), index_sit - filter_artefact_length
emgSO = sub_data[key].sig['emgSO'].rect[index_start: index_stop]
emgMG = sub_data[key].sig['emgMG'].rect[index_start: index_stop]
emgLG = sub_data[key].sig['emgLG'].rect[index_start: index_stop]
emgs_rect_ = {key: {'emgSO': emgSO, 'emgMG': emgMG, 'emgLG': emgLG}}
emgs_rect.update(emgs_rect_)
plt.subplot(j, 1, i)
plt.plot(emgSO, 'k', label='SO')
plt.plot(emgMG, 'r', label='MG')
plt.plot(emgLG, 'b', label='LG')
plt.ylim(0, 0.5)
plt.yticks(ticks=[], labels=[])
if i == 2:
plt.legend()
if i == 6:
plt.ylabel('EMG (ylim 0-0.2 mV)')
i += 1
plt.xlabel('Samples')
plt.tight_layout()
plt.savefig('emg_rect.png', dpi=300)
shutil.move('emg_rect.png', os.path.join('.', 'data', 'proc', sub_info.sub, 'emg_rect.png'))
plt.close()
return emgs_rect
def normalise_emg(sub_data, mvc_emg_rms, mmax_p1_rms, emgs_rect):
emg_norm_mvc, emg_norm_mmax = dict(), dict()
for key in list(sub_data.keys())[4:]:
emg = emgs_rect[key]['emgSO']
emg_rms = np.sqrt(np.sum(emg ** 2) / len(emg))
# from sklearn.metrics import mean_squared_error
# i = np.zeros(len(emg))
# np.sqrt(mean_squared_error(i, emg)) # gets same answer
emg_mvc = emg_rms / mvc_emg_rms * 100
emg_norm_mvc_ = {key: {'norm_mvc': emg_mvc}}
emg_norm_mvc.update(emg_norm_mvc_)
emg_mmax = emg_rms / mmax_p1_rms * 100
emg_norm_mmax_ = {key: {'norm_mmax': emg_mmax}}
emg_norm_mmax.update(emg_norm_mmax_)
return emg_norm_mvc, emg_norm_mmax
def _find_trial_MVC_normalize_torque_signals(sub_info, sub_data, level):
# get calibrated max torque during the MVC for activation levels 1-75% trials
torque = sub_data[level].sig['torque'].proc
index_above_threshold = list(torque > 30) # set torque threshold at 30 Nm
count = 0
indexes = []
# Extract indexes of torque data during the MVC preconditioning
for i in range(0, len(index_above_threshold), 1):
if index_above_threshold[i]:
indexes.append(i)
if not index_above_threshold[i-1]:
count += 1
if count == 5:
break
if not indexes: # if MVC was not recorded
pass
else:
mvc_torque = max(torque[indexes])
mvc_torque_index = np.argmax(torque[indexes])
mvc_torque = (mvc_torque, mvc_torque_index) # only torques above threshold are indexed
fig = plt.figure(figsize=(11, 7))
plt.subplot(1, 3, 1)
plt.grid()
plt.plot(torque[indexes], 'k')
plt.plot(mvc_torque[1], mvc_torque[0] + 2, 'ro')
plt.ylabel('Torque (Nm)')
plt.subplot(1, 3, (2, 3))
plt.plot(torque, 'k')
plt.ylabel('Torque (Nm)')
plt.tight_layout()
plt.savefig('mvc_' + level + '.png', dpi=300)
shutil.move('mvc_' + level + '.png', os.path.join('.', 'data', 'proc', sub_info.sub, 'mvc_' + level + '.png'))
plt.close()
return mvc_torque
``` |
{
"source": "joannadiong/zEpid",
"score": 2
} |
#### File: zEpid/tests/test_superlearner.py
```python
import pytest
import numpy as np
import pandas as pd
import numpy.testing as npt
import pandas.testing as pdt
import statsmodels.api as sm
import statsmodels.formula.api as smf
from sklearn.linear_model import LogisticRegression, LinearRegression
from zepid.superlearner import EmpiricalMeanSL, GLMSL, StepwiseSL, SuperLearner
@pytest.fixture
def data():
data = pd.DataFrame()
data['C'] = [5, 10, 12, 13, -10, 0, 37]
data['B'] = [0, 0, 0, 1, 1, 1, 1]
data['M'] = [0, 0, 1, np.nan, 0, 1, 1]
return data
@pytest.fixture
def data_test():
# True Models: y ~ a + w + w*x + N(0, 1)
# True Models: Pr(b=1) ~ logit(a + w - w*x)
data = pd.DataFrame()
data['X'] = [1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0]
data['W'] = [-3, 2, -2, -1, 2, -2, 2, -2, -1, -1, 1, 2, -1, 0, -2, -1, -1, -3, -1, 1]
data['A'] = [0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0]
data['Y'] = [-6.6, 4.2, -2.0, -0.6, 6.6, -2.2, 1.2, -4.9, -2.2, 0.8, 1.3, 3.4, 0.3, 1.4, -1.8, -2.4, -1.6,
-4.1, -2.5, 2.5]
data['B'] = [0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1]
return data
class TestEmpiricalMeanSL:
def test_error_missing_data(self, data):
empm = EmpiricalMeanSL()
with pytest.raises(ValueError, match="missing values in X or y"):
empm.fit(np.asarray(data['M']), np.asarray(data['C']))
with pytest.raises(ValueError, match="missing values in X or y"):
empm.fit(np.asarray(data['C']), np.asarray(data['M']))
def test_error_shapes(self, data):
empm = EmpiricalMeanSL()
with pytest.raises(ValueError, match="same number of observations"):
empm.fit(np.asarray(data['C']), np.array([0, 1, 1]))
def test_mean_correct(self, data):
empm = EmpiricalMeanSL()
# Continuous
empm.fit(X=np.asarray(data['B']), y=np.asarray(data['C']))
npt.assert_allclose(empm.empirical_mean, np.mean(data['C']))
# Binary
empm.fit(X=np.asarray(data['C']), y=np.asarray(data['B']))
npt.assert_allclose(empm.empirical_mean, np.mean(data['B']))
def test_predict(self, data):
empm = EmpiricalMeanSL()
# Continuous
empm.fit(X=np.asarray(data['B']), y=np.asarray(data['C']))
X_pred = np.array([1, 1, 1])
pred_y = empm.predict(X=X_pred)
assert pred_y.shape[0] == X_pred.shape[0] # Same shape in output
npt.assert_allclose(empm.empirical_mean,
[np.mean(data['C'])] * X_pred.shape[0])
# Binary
empm.fit(X=np.asarray(data['C']), y=np.asarray(data['B']))
X_pred = np.array([1, 1, 1, 0])
pred_y = empm.predict(X=X_pred)
assert pred_y.shape[0] == X_pred.shape[0] # Same shape in output
npt.assert_allclose(empm.empirical_mean,
[np.mean(data['B'])] * X_pred.shape[0])
class TestGLMSL:
def test_error_missing_data(self, data):
f = sm.families.family.Binomial()
glm = GLMSL(f)
with pytest.raises(ValueError, match="missing values in X or y"):
glm.fit(np.asarray(data['M']), np.asarray(data['C']))
with pytest.raises(ValueError, match="missing values in X or y"):
glm.fit(np.asarray(data['C']), np.asarray(data['M']))
def test_error_shapes(self, data):
f = sm.families.family.Binomial()
glm = GLMSL(f)
with pytest.raises(ValueError, match="same number of observations"):
glm.fit(np.asarray(data['C']), np.array([0, 1, 1]))
def test_match_statsmodels_continuous(self, data_test):
f = sm.families.family.Gaussian()
glm = GLMSL(f)
glm.fit(np.asarray(data_test[['A', 'W', 'X']]), np.asarray(data_test['Y']))
# Checking chosen covariates
sm_glm = smf.glm("Y ~ A + W + X", data_test, family=f).fit()
npt.assert_allclose(glm.model.params,
sm_glm.params)
# Checking predictions from model
step_preds = glm.predict(np.asarray(data_test.loc[0:5, ['A', 'W', 'X']]))
npt.assert_allclose(step_preds,
sm_glm.predict(data_test.loc[0:5, ]))
def test_match_statsmodels_binary(self, data_test):
f = sm.families.family.Binomial()
glm = GLMSL(f)
glm.fit(np.asarray(data_test[['A', 'W']]), np.asarray(data_test['B']))
# Checking chosen covariates
sm_glm = smf.glm("B ~ A + W", data_test, family=f).fit()
npt.assert_allclose(glm.model.params,
sm_glm.params)
# Checking predictions from model
step_preds = glm.predict(np.asarray(data_test.loc[0:5, ['A', 'W']]))
npt.assert_allclose(step_preds,
sm_glm.predict(data_test.loc[0:5, ]))
class TestStepWiseSL:
def test_error_setup(self):
f = sm.families.family.Binomial()
# Testing selection method error
with pytest.raises(ValueError, match="`method` must be one"):
StepwiseSL(f, selection="wrong")
# Testing interaction_order < 0
with pytest.raises(ValueError, match="interaction_order"):
StepwiseSL(f, order_interaction=-1)
# Testing interaction_order != int
with pytest.raises(ValueError, match="interaction_order"):
StepwiseSL(f, order_interaction=0.4)
def test_error_missing_data(self, data):
f = sm.families.family.Binomial()
step = StepwiseSL(f)
with pytest.raises(ValueError, match="missing values in X or y"):
step.fit(np.asarray(data['M']), np.asarray(data['C']))
with pytest.raises(ValueError, match="missing values in X or y"):
step.fit(np.asarray(data['C']), np.asarray(data['M']))
def test_error_shapes(self, data):
f = sm.families.family.Binomial()
step = StepwiseSL(f)
with pytest.raises(ValueError, match="same number of observations"):
step.fit(np.asarray(data['C']), np.array([0, 1, 1]))
def test_error_backward_saturated(self, data_test):
f = sm.families.family.Binomial()
step = StepwiseSL(f, selection="backward", order_interaction=1, verbose=True)
with pytest.raises(ValueError, match="Saturated model"):
step.fit(np.asarray(data_test[['A', 'W']]), np.asarray(data_test['B']))
def test_forward_continuous(self, data_test):
f = sm.families.family.Gaussian()
step = StepwiseSL(f, selection="forward", order_interaction=1)
step.fit(np.asarray(data_test[['A', 'W', 'X']]), np.asarray(data_test['Y']))
# Checking chosen covariates
best_x_indices = np.asarray((1, 5, 4, 3)) # This is the order the AIC's got forward
npt.assert_array_equal(np.asarray(step.cols_optim),
best_x_indices)
# Checking predictions from model
best_x_preds = np.array([-6.79917101, 5.38279072, -1.86983794, -1.22659046, 5.38279072, -1.86983794])
step_preds = step.predict(np.asarray(data_test.loc[0:5, ['A', 'W', 'X']]))
npt.assert_allclose(step_preds,
best_x_preds)
def test_backward_continuous(self, data_test):
f = sm.families.family.Gaussian()
step = StepwiseSL(f, selection="backward", order_interaction=1)
step.fit(np.asarray(data_test[['A', 'W', 'X']]), np.asarray(data_test['Y']))
# Checking chosen covariates
best_x_indices = np.asarray((1, 3, 4, 5)) # This is the order the AIC's got backward
npt.assert_array_equal(np.asarray(step.cols_optim),
best_x_indices)
# Checking predictions from model
best_x_preds = np.array([-6.79917101, 5.38279072, -1.86983794, -1.22659046, 5.38279072, -1.86983794])
step_preds = step.predict(np.asarray(data_test.loc[0:5, ['A', 'W', 'X']]))
npt.assert_allclose(step_preds,
best_x_preds)
def test_forward_binary(self, data_test):
f = sm.families.family.Binomial()
step = StepwiseSL(f, selection="forward", order_interaction=1)
step.fit(np.asarray(data_test[['A', 'W', 'X']]), np.asarray(data_test['B']))
# Checking chosen covariates
best_x_indices = np.asarray((1, 3)) # This is the order the AIC's got backward
npt.assert_array_equal(np.asarray(step.cols_optim),
best_x_indices)
# Checking predictions from model
best_x_preds = np.array([0.00646765, 0.96985036, 0.7380893, 0.45616085, 0.96985036, 0.7380893])
step_preds = step.predict(np.asarray(data_test.loc[0:5, ['A', 'W', 'X']]))
npt.assert_allclose(step_preds,
best_x_preds, rtol=1e-5)
def test_backward_binary(self, data_test):
f = sm.families.family.Binomial()
step = StepwiseSL(f, selection="backward", order_interaction=1)
step.fit(np.asarray(data_test[['A', 'X']]), np.asarray(data_test['B']))
# Checking chosen covariates
best_x_indices = np.asarray([]) # This is the order the AIC's got backward
npt.assert_array_equal(np.asarray(step.cols_optim),
best_x_indices)
# Checking predictions from model
best_x_preds = np.array([0.7, 0.7, 0.7, 0.7, 0.7, 0.7])
step_preds = step.predict(np.asarray(data_test.loc[0:5, ['A', 'X']]))
npt.assert_allclose(step_preds,
best_x_preds, rtol=1e-5)
class TestSuperLearner:
@pytest.fixture
def load_estimators_continuous(self):
emp = EmpiricalMeanSL()
linr = LinearRegression()
step = StepwiseSL(family=sm.families.family.Gaussian(), selection="forward", order_interaction=1)
return [emp, linr, step]
@pytest.fixture
def load_estimators_binary(self):
emp = EmpiricalMeanSL()
logr = LogisticRegression()
step = StepwiseSL(family=sm.families.family.Binomial(), selection="forward", order_interaction=1)
return [emp, logr, step]
def test_error_estimator_length(self, load_estimators_continuous):
with pytest.raises(ValueError, match="estimators and estimator_labels"):
SuperLearner(estimators=load_estimators_continuous, estimator_labels=["wrong", "number"])
def test_error_solver(self, load_estimators_continuous):
with pytest.raises(ValueError, match="The solver INVALID_SOLVER is not currently"):
SuperLearner(estimators=load_estimators_continuous, estimator_labels=["Mean", "LineR", "Step"],
solver="INVALID_SOLVER")
def test_error_lossf(self, load_estimators_continuous):
with pytest.raises(ValueError, match="The loss function INVALID_LOSSF is not currently"):
SuperLearner(estimators=load_estimators_continuous, estimator_labels=["Mean", "LineR", "Step"],
loss_function="INVALID_LOSSF")
def test_error_shapes(self, data, load_estimators_continuous):
sl = SuperLearner(estimators=load_estimators_continuous, estimator_labels=["Mean", "LineR", "Step"])
with pytest.raises(ValueError, match="same number of observations"):
sl.fit(np.asarray(data['C']), np.array([0, 1, 1]))
with pytest.raises(ValueError, match="same number of observations"):
sl.fit(np.array([0, 1, 1]), np.asarray(data['C']))
def test_error_nan(self, data, load_estimators_continuous):
sl = SuperLearner(estimators=load_estimators_continuous, estimator_labels=["Mean", "LineR", "Step"], folds=2)
with pytest.raises(ValueError, match="missing values in X or y"):
sl.fit(np.asarray(data['C']), np.asarray(data['M']))
with pytest.raises(ValueError, match="missing values in X or y"):
sl.fit(np.asarray(data['M']), np.asarray(data['C']))
fsl = sl.fit(np.asarray(data['B']).reshape(-1, 1), np.asarray(data['C']))
with pytest.raises(ValueError, match="missing values in X"):
fsl.predict(np.asarray(data['M']))
def test_error_before_fit(self, data, load_estimators_continuous):
sl = SuperLearner(estimators=load_estimators_continuous, estimator_labels=["Mean", "LineR", "Step"])
with pytest.raises(ValueError, match="must be called before"):
sl.predict(np.asarray(data['C']))
with pytest.raises(ValueError, match="must be called before"):
sl.summary()
def test_warn_lossf(self, data_test, load_estimators_binary):
sl = SuperLearner(estimators=load_estimators_binary, estimator_labels=["Mean", "LineR", "Step"], folds=3)
with pytest.warns(UserWarning, match="looks like your `y` is binary"):
sl.fit(np.asarray(data_test[['A', 'W', 'X']]), np.asarray(data_test['B']))
def test_continuous_superlearner(self, data_test, load_estimators_continuous):
sl = SuperLearner(estimators=load_estimators_continuous, estimator_labels=["Mean", "LineR", "Step"], folds=5)
fsl = sl.fit(np.asarray(data_test[['A', 'W', 'X']]), np.asarray(data_test['Y']))
# Coefficients and CV-Error
expected = pd.DataFrame.from_records([{"estimator": "Mean", "cv_error": 10.2505625, "coefs": 0.097767},
{"estimator": "LineR", "cv_error": 1.90231789, "coefs": 0.357968},
{"estimator": "Step", "cv_error": 1.66769069, "coefs": 0.544265}])
pdt.assert_frame_equal(fsl.est_performance,
expected)
# Predicted values
expected = np.array([-5.65558813, 4.45487519, -1.91811241, -1.46252119, 4.45487519, -1.91811241])
npt.assert_allclose(fsl.predict(np.asarray(data_test.loc[0:5, ["A", "W", "X"]])),
expected)
def test_binary_superlearner(self, data_test, load_estimators_binary):
sl = SuperLearner(estimators=load_estimators_binary, estimator_labels=["Mean", "LogR", "Step"],
loss_function='nloglik', folds=5)
fsl = sl.fit(np.asarray(data_test[['A', 'X']]), np.asarray(data_test['B']))
# Coefficients and CV-Error
expected = pd.DataFrame.from_records([{"estimator": "Mean", "cv_error": -0.049431, "coefs": 0.966449},
{"estimator": "LogR", "cv_error": -0.030154, "coefs": 0.033551},
{"estimator": "Step", "cv_error": 1.797190, "coefs": 0.}])
pdt.assert_frame_equal(fsl.est_performance,
expected)
# Predicted values
expected = np.array([0.69634645, 0.70191334, 0.70322108, 0.69766808, 0.70191334, 0.70322108])
npt.assert_allclose(fsl.predict(np.asarray(data_test.loc[0:5, ["A", "X"]])),
expected)
```
#### File: causal/doublyrobust/utils.py
```python
import numpy as np
# Utilities only meant for the doubly-robust branch
def tmle_unit_bounds(y, mini, maxi, bound):
# bounding for continuous outcomes
v = (y - mini) / (maxi - mini)
v = np.where(np.less(v, bound), bound, v)
v = np.where(np.greater(v, 1-bound), 1-bound, v)
return v
def tmle_unit_unbound(ystar, mini, maxi):
# unbounding of bounded continuous outcomes
return ystar*(maxi - mini) + mini
``` |
{
"source": "joanna-janos/torchdata",
"score": 3
} |
#### File: torchdata/tests/samplers_test.py
```python
import torch
import torchdata
def create_example(over: bool):
labels = torch.tensor([0, 0, 0, 1, 0, 0, 1])
sampler_class = getattr(
torchdata.samplers, "Random" + ("Over" if over else "Under") + "Sampler"
)
sampler = sampler_class(labels)
return torch.tensor([labels[index] for index in sampler])
def test_random_oversampler():
oversampled = create_example(True)
assert (oversampled == 0).sum() == (oversampled == 1).sum()
def test_random_oversampler_length():
oversampled = create_example(True)
assert len(oversampled) == 2 * 5
def test_random_undersampler():
undersampled = create_example(False)
assert (undersampled == 0).sum() == (undersampled == 1).sum()
def test_random_undersampler_length():
undersampled = create_example(False)
assert len(undersampled) == 2 * 2
``` |
{
"source": "joannak-vmware/wavefront-pyformance",
"score": 2
} |
#### File: joannak-vmware/wavefront-pyformance/example_runtime_metrics.py
```python
import argparse
import time
from wavefront_pyformance import tagged_registry
from wavefront_pyformance import wavefront_reporter
def report_metrics(host, server='', token=''):
"""Runtime Metric Reporting Function Example."""
reg = tagged_registry.TaggedRegistry()
wf_proxy_reporter = wavefront_reporter.WavefrontProxyReporter(
host=host, port=2878, distribution_port=2878, registry=reg,
source='runtime-metric-test',
tags={'global_tag1': 'val1', 'global_tag2': 'val2'},
prefix='python.proxy.',
enable_runtime_metrics=True).report_minute_distribution()
wf_direct_reporter = wavefront_reporter.WavefrontDirectReporter(
server=server, token=token, registry=reg,
source='runtime-metric-test',
tags={'global_tag1': 'val1', 'global_tag2': 'val2'},
prefix='python.direct.',
enable_runtime_metrics=True).report_minute_distribution()
wf_proxy_reporter.report_now()
wf_proxy_reporter.stop()
wf_direct_reporter.report_now()
wf_direct_reporter.stop()
if __name__ == '__main__':
# python example_runtime_metrics.py proxy_host server_url server_token
arg = argparse.ArgumentParser()
arg.add_argument('host', help='Wavefront proxy host name.')
arg.add_argument('server', help='Wavefront server for direct ingestion.')
arg.add_argument('token', help='Wavefront API token.')
ARGS = arg.parse_args()
while True:
report_metrics(ARGS.host, ARGS.server, ARGS.token)
time.sleep(5)
``` |
{
"source": "JoannaMadNat/ATCProject",
"score": 2
} |
#### File: ATCProject/ATC2_0/gate_views.py
```python
from django.views.generic import CreateView, DeleteView, UpdateView, ListView
from django.forms import ModelForm, ModelChoiceField
from django.urls import reverse_lazy
from .models import Gate, Airport
FIELDS = ["identifier", "size", "airport"]
class GateForm(ModelForm):
class Meta:
model = Gate
fields = FIELDS
airport = ModelChoiceField(required=True, queryset=Airport.objects.all())
class GateList(ListView):
queryset = Gate.objects.order_by("identifier")
template_name = "gate/index.html"
class GateCreate(CreateView):
model = Gate
form_class = GateForm
template_name = "create_edit.html"
success_url = reverse_lazy("gate_index")
def form_invalid(self, form):
response = super().form_invalid(form)
response.status_code = 400
return response
class GateUpdate(UpdateView):
model = Gate
form_class = GateForm
template_name = 'create_edit.html'
success_url = reverse_lazy("gate_index")
def form_invalid(self, form):
response = super().form_invalid(form)
response.status_code = 400
return response
class GateDelete(DeleteView):
model = Gate
template_name = 'confirm_delete.html'
success_url = reverse_lazy("gate_index")
```
#### File: ATC2_0/original_views/runway_views.py
```python
from django.shortcuts import render, redirect
from ATC.views.validation import validate_identifier, get_size
from ..models import Runway, Plane, Airport
def runways_index(request):
return render(request, 'runways/index.html', {
'runways': Runway.objects.all().order_by("identifier"),
'planes': Plane.objects.all().order_by("identifier"),
})
def runways_create(request):
if request.method == "POST":
identifier = request.POST.get("identifier")
if Runway.objects.filter(identifier=identifier).count() != 0:
return render(request, 'landing/bad-input.html', status=420)
runway = Runway()
runway.identifier = identifier
runway.size = get_size(request.POST.get("size")) # no need to validate because select statement
runway.airport = Airport.objects.get(pk=request.POST.get("airport"))
runway.save()
return redirect(runways_index)
return render(request, 'runways/create.html', {'airports': Airport.objects.all().order_by("name")})
def runways_update(request, runway_id):
if Runway.objects.filter(pk=runway_id).count() != 1:
return render(request, 'landing/not-found.html', status=404)
if request.method == "POST":
identifier = request.POST.get("identifier")
if not validate_identifier(identifier, Runway, runway_id):
return render(request, 'landing/bad-input.html', status=420)
runway = Runway.objects.get(pk=runway_id)
runway.identifier = identifier
runway.size = get_size(request.POST.get("size"))
runway.airport = Airport.objects.get(pk=request.POST.get("airport"))
runway.save()
return redirect(runways_index)
return render(request, 'runways/update.html', {
'runway': Runway.objects.get(pk=runway_id), 'airports': Airport.objects.all().order_by("name")
})
def runways_delete(request, runway_id):
if Runway.objects.filter(pk=runway_id).count() != 1:
return render(request, 'landing/not-found.html', status=404)
Runway.objects.get(pk=runway_id).delete()
return redirect(runways_index)
```
#### File: ATC2_0/original_views/validation.py
```python
from ATC.models import SIZES
def validate_name(str, obj, id): # the string to validate, the object type, the element id
res = obj.objects.filter(name=str)
if res.count() > 0 and res[0].id != id:
return False
return True
# check if username exists and is not same object
def validate_username(str, obj, id):
res = obj.objects.filter(username=str)
if res.count() > 0 and res[0].id != id:
return False
return True
def validate_identifier(str, obj, id):
res = obj.objects.filter(identifier=str)
if res.count() > 0 and res[0].id != id:
return False
return True
def set_coords(airport, request): # specific for airports
res = request.POST.get("x")
if res:
airport.x = res
res = request.POST.get("y")
if res:
airport.y = res
airport.save()
def get_size(str):
if str == 'SMALL':
return SIZES[0][0]
elif str == 'MEDIUM':
return SIZES[1][0]
elif str == 'LARGE':
return SIZES[2][0]
``` |
{
"source": "joannamickamedina/-OOP-58001",
"score": 4
} |
#### File: joannamickamedina/-OOP-58001/Program 2 Final.py
```python
from tkinter import *
class MyWindow:
def __init__(self, win):
self.lbl1 = Label(win, text='Enter your Fullname:', fg="red")
self.lbl1.place(x=20, y=50)
self.t1 = Entry(bd=3)
self.t1.place(x=200, y=50)
self.b1 = Button(win, text='Click to display your Fullname', fg="red", command=self.click)
self.b1.place(x=20, y=100)
self.t3 = Entry(bd=3)
self.t3.place(x=200, y=100)
def click(self):
result= str(self.t1.get())
self.t3.insert(END, str(result))
window=Tk()
mywin=MyWindow(window)
window.title('Midterm in OOP')
window.geometry("400x300+10+10")
window.mainloop()
``` |
{
"source": "JoannaNitek/ScoutingBox",
"score": 2
} |
#### File: ScoutingBox/users/views.py
```python
from django.shortcuts import render, redirect
from django.views import View
# Create your views here.
from django.contrib import messages
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.forms import PasswordChangeForm
from django.shortcuts import render, redirect
def change_password(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) # Important!
messages.success(request, 'Your password was successfully updated!')
return redirect('/accounts/login/')
else:
messages.error(request, 'Please correct the error below.')
else:
form = PasswordChangeForm(request.user)
return render(request, 'change_password.html', {
'form': form
})
# class SingUpView(View):
#
# def get(self, request):
# form = SignUpForm()
# return render(request, 'signup.html', {'form': form})
#
# def post(self, request):
# form = SignUpForm(request.POST)
# if form.is_valid():
# new_user = form.save()
# return redirect('/accounts/login/')
# else:
# return render(request, 'signup.html', {'form': form})
# from django.contrib.auth import login, authenticate
# from django.shortcuts import render, redirect
# from .forms import SignUpForm
#
# def signup(request):
# if request.method == 'POST':
# form = SignUpForm(request.POST)
# if form.is_valid():
# form.save()
# username = form.cleaned_data.get('email')
# raw_password = form.cleaned_data.get('<PASSWORD>')
# user = authenticate(email=username, password=<PASSWORD>)
# login(request, user)
# return redirect('accounts/login/')
# else:
# form = SignUpForm()
# return render(request, 'signup.html', {'form': form})
``` |
{
"source": "Joanna-O-Ben/ADM-HW1",
"score": 4
} |
#### File: Problem1/Introduction/Write a function.py
```python
a function.py<gh_stars>0
def is_leap(year):
# Write your logic here
for s in range(1900, 10 ** 5 + 1, 100):
if year == s and s % 400 != 0:
return False
for i in range(1900, 10 ** 5 + 1, 4):
if year == i:
return True
return False
year = int(input())
print(is_leap(year))
```
#### File: Problem1/Strings/Mutations.py
```python
def mutate_string(string, position, character):
a = list(string)
a[position] = character
b = "".join(a)
return b
``` |
{
"source": "JoannaSakowska/simple_delvemc",
"score": 2
} |
#### File: simple_delvemc/simple_adl/parallel_plot_hotspot.py
```python
__author__ = "<NAME>"
import yaml
import os
import glob
import subprocess
import simple_adl.search
import csv
import pandas as pd
import multiprocessing
from multiprocessing import Pool
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~~~~~~~~~~~~~~~~~~~ DELVE-MC DWARF SEARCH 2021 ~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Enter number of threads to use
# Use $ lspcu to check how many threads per core
# e.g. Joanna's has 2 per core, so max 14 threads = 7 cores
n_threads = 14
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sub-routines ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
def submit_plot_job(ra, dec, mod):
# Output
outfile = 'candidate_{:0.2f}_{:0.2f}.png'.format(ra, dec)
# Commands
command = 'python {}/plot_hotspot.py --ra {:0.2f} --dec {:0.2f} --mod {:0.2f} --outfile {}'.format(os.path.dirname(simple_adl.search.__file__), ra, dec, mod, outfile)
print(command)
print('Preparing plotting jobs...')
subprocess.run(command.split(' '), shell=False)
return
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Main search ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
if __name__ == '__main__':
# Reading config
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config',type=str,required=False,default='config.yaml',
help='config file [.yaml]')
args = parser.parse_args()
with open(args.config, 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.SafeLoader)
# Reading in candidates
# JDS: Temporarily setting result directory
# JDS: Need to formalise this to use the config
results_dir = '/home/js01093/dwarf/simple_adl/simple_adl/results_dir'
plots_dir = '/home/js01093/dwarf/simple_adl/simple_adl/plot_dir/'
# Reading in candidates from folder
#candidates = pd.read_csv('{}/{}.csv'.format(results_dir, 'out'), delimiter=',', header=None)
# To read in candidates after make_list.py
#candidates = pd.read_csv('{}.csv'.format('candidate_list'), delimiter=',', header=None)
# To read in candidates AFTER 5 sigma cut
candidates = pd.read_csv('{}.csv'.format('candidate_list_5sig'), delimiter=',', header=None)
ra_candidate_all, dec_candidate_all, mod_candidate_all = [], [], []
ra_candidate_all.extend(candidates.iloc[:,1])
dec_candidate_all.extend(candidates.iloc[:,2])
mod_candidate_all.extend(candidates.iloc[:,3])
ra_candidate, dec_candidate, mod_candidate = [], [], []
# JDS: Method to skip overwriting plots in plot_dir
for ra, dec, mod in zip(ra_candidate_all, dec_candidate_all, mod_candidate_all):
if os.path.exists(os.path.join(plots_dir, 'candidate_{:0.2f}_{:0.2f}.png'.format(ra, dec))):
print('EXISTS candidate_{:0.2f}_{:0.2f}.png'.format(ra, dec))
else:
ra_candidate.append(ra)
dec_candidate.append(dec)
mod_candidate.append(mod)
# Clean memory
ra_candidate_all, dec_candidate_all, mod_candidate_all = [], [], []
print('Ready to plot candidates!')
# Zipping arguments to feed into command
plot_arguments = [*zip(ra_candidate,dec_candidate,mod_candidate)]
with Pool(n_threads) as p:
p.starmap(submit_plot_job, plot_arguments)
``` |
{
"source": "joanna-salek/master_thesis",
"score": 2
} |
#### File: classic_free_aligment/complexity_z/complexity_z_example.py
```python
from classic_free_aligment.complexity_z.complexity_z import fasta_tree_z_complexity
################################################
# EXAMPLE 1
################################################
def example1():
fasta_tree_z_complexity("../../in/influenza_viruses.fasta",
"Influenza viruses phylogenetic tree - z complexity method")
################################################
# EXAMPLE 2
################################################
def example2():
fasta_tree_z_complexity("../../in/SPARC_refseq_transcript.fasta",
"SPARC phylogenetic tree - z complexity method")
```
#### File: master_thesis/DFT/DFT.py
```python
from app.chaos_game import CGR
from scipy.fftpack import fft
import cmath
import math
from app.read_data import fasta_parser
class DFT_CGR(CGR):
def __init__(self, seq, kind="RY"):
super().__init__(seq, kind)
self.F = fft(self.complex)
self.PS = []
self.AS = []
for x in self.F:
self.PS.append((abs(x)) ** 2)
self.AS.append(cmath.phase(x))
def get_DFT(self):
return self.PS
def T_m(seq, m):
n = len(seq)
if n == m:
return seq
seq2 = [seq[0]]
for k in range(1, m):
q = k * n / m
r = math.floor(q)
if r == 0:
r = 1
if q.is_integer():
seq2.append(seq[int(q)])
else:
if r < n - 1:
seq2.append(seq[r] + (q - r) * (seq[r + 1] - seq[r]))
else:
seq2.append(seq[r])
return seq2
def DFT_data(kind, seq, draw_CGR):
cgr = DFT_CGR(seq, kind)
if draw_CGR:
cgr.plot_CGR()
DFT = cgr.get_DFT()
return DFT
def DFT_from_fasta(input_file, draw_CGR=False, CGR_types=("RY", "MK", "WS")):
items = {}
data = fasta_parser(input_file)
if isinstance(CGR_types, list) and len(CGR_types) > 1:
for ele in CGR_types:
d = {}
for r in range(len(data[0])):
d[data[0][r]] = DFT_data(ele, data[1][r], draw_CGR)
items[ele] = d
else:
d = {}
for r in range(len(data[0])):
d[data[0][r]] = DFT_data(str(CGR_types), data[1][r], draw_CGR)
items[str(CGR_types)] = d
return items
```
#### File: master_thesis/hurst_exponent/hurst_example.py
```python
from app.helpers import draw_tree, euclidean_matrix, distance_tree
from hurst_exponent import h
from app.read_data import fasta_parser
################################################
# EXAMPLE 1
################################################
# read seq directly by pasting it
def example1():
seq = "AATCTTCTCTGAGAGAGAGAGAAATCTTCTCTGAGAGAGAGAGAAATCTTCT" \
"CTGAGAGAGAGAGAAATCTTCTCTGAGAGAGAGAGAAATCTTCTCTGAGAGA" \
"GAGAGAAATCTTCTCTGAGAGAATCTTCTCTGAGAGAGAGAGAAATCTTCTC" \
"TGAGAGAGAGAGAAATCTTCTCTGAGAGAGAGAGAAATCTTCTCTGAGAGAG" \
"AGAGAAATCTTCTCTGAGAGAGAGAGAAATCTTCTCTGAGAGAATCTTCTCT" \
"GAGAGAGAGAGAAATCTTCTCTGAGAGAGAGAGAAATCTTCTCTGAGAGAGA" \
"GAGAAATCTTCTCTGAGAGAGAGAGAAATCTTCTCTGAGAGAGAGAGAAATC" \
"TTCTCTGAGAGAATCTTCTCTGAGAGAGAGAGAAATCTTCTCTGAGAGAGAG" \
"AGAAATCTTCTCTGAGAGAGAGAGAAATCTTCTCTGAGAGAGAGAGAAATCT" \
"TCTCTGAGAGAGAGAGAAATCTTCTCTGAGAG"
z = h.Hurst_CGR(seq)
print(h.get_hurst())
z.plot_CGR()
################################################
# EXAMPLE 2
################################################
# read file from fasta with only one seq in it
# you can enable drawing chaos game representation
# and choose CGR type
def example2():
hurst = h.hurst_from_fasta("../in/one_file_test.fasta", True, CGR_types="RY")
for key in hurst.keys():
print(key, hurst[key])
################################################
# EXAMPLE 3
################################################
# read file from fasta with multiple seq in it
# fasta parser will divide it on sequences and species
def example3():
hurst = h.hurst_from_fasta("../in/SPARC_refseq_transcript.fasta")
for key in hurst.keys():
print(key, hurst[key])
################################################
# EXAMPLE 4
################################################
# save as tables from pandas to hmtl file
def example4():
hurst = h.hurst_from_fasta("../in/SPARC_refseq_transcript.fasta")
h.save_hurst_table(hurst)
################################################
# EXAMPLE 5
################################################
# return dictionary
def example5():
hurst = h.hurst_from_fasta("../in/SPARC_refseq_transcript.fasta")
for key in hurst.keys():
i = hurst[key]
new_dict = {k: v for k, v in sorted(i.items(), key=lambda item: item[1])}
return new_dict
################################################
# EXAMPLE 6
################################################
# draw phylogenetic tree based on hurst_exponent values for species
def example6():
hurst = h.hurst_from_fasta("../in/SPARC_refseq_transcript.fasta", CGR_types=["RY"])
arr = []
for key in hurst["RY"].keys():
arr.append([hurst["RY"][key]])
draw_tree(arr, list(hurst["RY"].keys()), "SPARC gene phylogenetic tree - Hurst")
################################################
# EXAMPLE 7
################################################
# neuroamidaza wirusów grypy
def example7():
from statistics import mean, stdev
hurst = h.hurst_from_fasta("../in/influenza_viruses", CGR_types=["WS"])
type_cgr = "WS"
arr = []
count_H1N1 = []
count_H7N3 = []
count_H5N1 = []
count_H2N2 = []
count_H7N9 = []
for key in hurst[type_cgr].keys():
arr.append([hurst[type_cgr][key]])
type_virus = key[-5:-1]
if type_virus == "H1N1":
count_H1N1.append(hurst[type_cgr][key])
elif type_virus == "H7N3":
count_H7N3.append(hurst[type_cgr][key])
elif type_virus == "H5N1":
count_H5N1.append(hurst[type_cgr][key])
elif type_virus == "H2N2":
count_H2N2.append(hurst[type_cgr][key])
elif type_virus == "H7N9":
count_H7N9.append(hurst[type_cgr][key])
file = open(f"Influenza_hurst_statistics_{type_cgr}.txt", "a")
file.write("H1N1 mean: " + str(mean(count_H1N1)) + " stdev: " +str(stdev(count_H1N1)) + "\n")
file.write("H7N3 mean: " + str(mean(count_H7N3)) + " stdev: " +str(stdev(count_H7N3)) + "\n")
file.write("H1N1 mean: " + str(mean(count_H5N1)) + " stdev: " +str(stdev(count_H5N1)) + "\n")
file.write("H1N1 mean: " + str(mean(count_H2N2)) + " stdev: " +str(stdev(count_H2N2)) + "\n")
file.write("H1N1 mean: " + str(mean(count_H7N9)) + " stdev: " +str(stdev(count_H7N9)) + "\n")
file.close()
draw_tree(arr, list(hurst[type_cgr].keys()), "Influenza phylogenetic tree - hurst_exponent method")
################################################
# EXAMPLE 8
################################################
# macież różnic odległości eukolidesowej hurst_exponent
def example8():
data = fasta_parser("../in/influenza_viruses")
hurst_list = []
for h in data[1]:
hurst = h.Hurst_CGR(h).get_hurst()
hurst_list.append(hurst)
matrix = euclidean_matrix(hurst_list)
title = "influenza viruses phylogenetic tree - Hurst method (EUCLIDIAN DISTANCE)"
organisms = data[0]
distance_tree(matrix, organisms, title)
# example1()
# example2()
# example3()
# example4()
# print (example5())
# example6()
# example7()
``` |
{
"source": "joannasmoura/intmed",
"score": 3
} |
#### File: intmed/clinica/forms.py
```python
from django import forms
from django.core.exceptions import ValidationError
from django.utils import timezone
from .models import Agenda
timeNow = timezone.localtime(timezone.now())
class AgendaAdminForm(forms.ModelForm):
id = forms.CharField(widget = forms.HiddenInput(),required=False)
def clean(self):
check = False
try:
agenda = Agenda.objects.get(medico=self.cleaned_data['medico'],dia=self.cleaned_data['dia'])
id = self.cleaned_data['id']
check = str(agenda.id) != str(id)
except:
id = None
agenda = None
if self.cleaned_data['dia'] < timeNow.today().date():
raise ValidationError("Não pode criar agenda para um dia passado!")
if id != None and check:
raise ValidationError("Já existe uma agenda para esse médico no dia selecionado!")
```
#### File: intmed/clinica/views.py
```python
import datetime
from django.utils import timezone
from django.conf import settings
from django.shortcuts import get_object_or_404,render
from django.db.models import Q, Count
from rest_framework import status,exceptions
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from django.http import HttpResponse,Http404
from .models import Consulta, Medico, Especialidade, Agenda,User,HorarioAgenda
from rest_framework import generics
from rest_framework.permissions import IsAuthenticated,AllowAny
from .serializers import EspecialidadeSerializer, MedicoSerializer, AgendaSerializer, ConsultaSerializer,UserSerializer
from .filters import MedicoFilter, EspecialidadeFilter, AgendaFilter
from django.contrib.auth import get_user_model
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from rest_framework_simplejwt.views import TokenObtainPairView
timeNow = timezone.localtime(timezone.now())
class MyTokenObtainPairSerializer(TokenObtainPairSerializer):
@classmethod
def get_token(cls, user):
token = super().get_token(user)
# Add custom claims
token['firstName'] = user.first_name
token['username'] = user.username
token['id'] = user.id
# ...
return token
class MyTokenObtainPairView(TokenObtainPairView):
serializer_class = MyTokenObtainPairSerializer
class UserCreate(generics.CreateAPIView):
User = get_user_model()
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = (AllowAny, )
class MedicoList(generics.ListCreateAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = MedicoSerializer
queryset = Medico.objects.all()
filter_class = MedicoFilter
class EspecialidadeList(generics.ListCreateAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = EspecialidadeSerializer
queryset = Especialidade.objects.all()
filter_class = EspecialidadeFilter
class AgendaList(generics.ListCreateAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = AgendaSerializer
queryset = Agenda.objects.all().order_by('dia').annotate(
no_of_horarios=Count('horarios')
).filter(
no_of_horarios__gte=1
).exclude(
dia__lt=datetime.date.today(),
)
filter_class = AgendaFilter
class ConsultaList(generics.ListCreateAPIView):
permission_classes = (IsAuthenticated,)
queryset = Consulta.objects.all()
serializer_class = ConsultaSerializer
permission_classes = (IsAuthenticated,)
def get_queryset(self):
print(timeNow.time())
user = User.objects.get(username=self.request.user)
consultas = Consulta.objects.filter(owner__id=user.id).exclude(
horario_agenda__in= HorarioAgenda.objects.exclude(
agenda__dia__gte=timeNow.date(),
)
).exclude(
horario_agenda__agenda__dia=timeNow.date(),
horario_agenda__horario__hora__lte=timeNow.time()
).order_by(
'horario_agenda__agenda__dia',
'horario_agenda__horario__hora'
)
return consultas
def post(self, request, format=None):
if hasConsultaDiaHorario(request):
return Response(status=status.HTTP_400_BAD_REQUEST,data={"detail":"Você ja possui uma consulta marcada para esse dia e horário."})
if hasDiaHorarioPassed(request):
return Response(status=status.HTTP_400_BAD_REQUEST,data={"detail":"O dia e o horário que você está tentando marcar a consulta ja passaram."})
if hasDiaHorarioBeenFilled(request):
return Response(status=status.HTTP_400_BAD_REQUEST,data={"detail":"A data e o horario que você está tentando marcar não estão disponíveis para esse médico!"})
serializer = ConsultaSerializer(data=request.data,context={'request':request})
if serializer.is_valid():
serializer.save(owner=request.user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
try:
consulta = self.get_object()
except Consulta.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if consulta.owner != request.user:
return Response(status=status.HTTP_403_FORBIDDEN,data={"detail":"Apenas o usuário que marcou a consulta pode desmarcá-la."})
if consulta.horario_agenda.agenda.dia < datetime.date.today():
return Response(status=status.HTTP_406_NOT_ACCEPTABLE,data={"detail":"Não é possível desmarcar pois a data da consulta ja passou."})
consulta.delete()
ha = HorarioAgenda.objects.get(pk=consulta.horario_agenda.id)
ha.disponivel = True
ha.save()
return Response(status=status.HTTP_204_NO_CONTENT)
def index(request):
permission_classes = (IsAuthenticated,)
consultas = Consulta.objects.all()
context = {'consultas':consultas}
return render(request, 'clinica/index.html', context)
def hasConsultaDiaHorario(request):
user = User.objects.get(username=request.user)
agenda = Agenda.objects.get(pk=request.data['agenda_id'])
horario = request.data['horario']
try:
consultaExistente = Consulta.objects.get(owner=user,horario_agenda__agenda__dia=agenda.dia,horario_agenda__horario__hora=horario)
except:
return False
return consultaExistente
def hasDiaHorarioPassed(request):
try:
agenda = Agenda.objects.get(pk=request.data['agenda_id'])
except:
return False
horario = request.data['horario']
if agenda.dia < timeNow.today().date():
return True
# elif horario < str(timeNow.time()):
# return True
else:
return False
def hasDiaHorarioBeenFilled(request):
agenda = Agenda.objects.get(pk=request.data['agenda_id'])
horario = request.data['horario']
try:
consultaExistente = Consulta.objects.get(horario_agenda__agenda__dia=agenda.dia,horario_agenda__horario__hora=horario)
except:
return False
return consultaExistente
``` |
{
"source": "joannatao97/django-cyoa",
"score": 3
} |
#### File: cyoa/adventures/models.py
```python
from django.core.urlresolvers import reverse
from django.db import models
from sortedm2m.fields import SortedManyToManyField
# Create your models here.
# used django tutorials
# Question class: Question text, page title, choices shown, input field if needed
class Question(models.Model):
question_text = models.TextField(max_length=64000)
title = models.CharField(max_length=200)
choices = SortedManyToManyField('Choice', blank=True)
input_field = models.CharField(max_length=200, blank=True)
def __str__(self):
return self.title
# Choice class: leads to a question, has text/label
class Choice(models.Model):
to_question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=1000)
def __str__(self):
return self.choice_text
# Adventure class: leads to first question, has a title and description
class Adventure(models.Model):
first_question = models.ForeignKey(Question, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
description = models.CharField(max_length=200, blank=True)
def __str__(self):
return self.title
``` |
{
"source": "joanna-vas/Programming-for-Data-Science-with-Python",
"score": 4
} |
#### File: joanna-vas/Programming-for-Data-Science-with-Python/Explore US Bikeshare Data_Ioanna_Vasilopoulou.py
```python
import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
while True:
city = input("\nWhich city are you interested in? Chicago, New York City or Washington?\n").lower()
if city not in ('chicago', 'new york city', 'washington'):
print("Oops! Something wasn't quite right. Please, try again!")
continue
else:
break
# TO DO: get user input for month (all, january, february, ... , june)
while True:
month = input("\nWhich month are you interested in? January, February, March, April, May, June or all?\n").lower()
if month not in ('january', 'february', 'march', 'april', 'may', 'june', 'all'):
print("Oops! Something wasn't quite right. Please, try again!")
continue
else:
break
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
while True:
day = input("\nAre you interested in a specific day? If so please type in the day you want or simply select them all.\n").lower()
if day not in ('monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday', 'all'):
print("Oops! Something wasn't quite right. Please, try again!")
continue
else:
break
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
# load data file into a dataframe
df = pd.read_csv(CITY_DATA[city])
# convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
# extract month and day of week from Start Time to create new columns
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = months.index(month) + 1
# filter by month to create the new dataframe
df = df[df['month'] == month]
# filter by day of week if applicable
if day != 'all':
# filter by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# TO DO: display the most common month
mc_month = df['month'].mode()[0]
print('Most Common Month:', mc_month)
# TO DO: display the most common day of week
mc_day = df['day_of_week'].mode()[0]
print('Most Common day:', mc_day)
# TO DO: display the most common start hour
df['hour'] = df['Start Time'].dt.hour
mc_hour = df['hour'].mode()[0]
print('Most Common Hour:', mc_hour)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# TO DO: display most commonly used start station
Start_Station = df['Start Station'].value_counts().idxmax()
print('Most commonly used start station:', Start_Station)
# TO DO: display most commonly used end station
End_Station = df['End Station'].value_counts().idxmax()
print('\nMost commonly used end station:', End_Station)
# TO DO: display most frequent combination of start station and end station trip
Station_Combination = df.groupby(['Start Station', 'End Station']).count()
print('\nMost commonly used combination of start station and end station trip:', Start_Station, " & ", End_Station)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# TO DO: display total travel time
Total_Travel_Time = sum(df['Trip Duration'])
print('Total travel time:', Total_Travel_Time/86400, " Days")
# TO DO: display mean travel time
Mean_Travel_Time = df['Trip Duration'].mean()
print('Mean travel time:', Mean_Travel_Time/60, " Minutes")
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# TO DO: Display counts of user types
user_types = df['User Type'].value_counts()
print('User Types:\n', user_types)
# TO DO: Display counts of gender
try:
gender_types = df['Gender'].value_counts()
print('\nGender Types:\n', gender_types)
except KeyError:
print("\nGender Types:\nNo data available for this month.")
# TO DO: Display earliest, most recent, and most common year of birth
try:
Earliest_Year = df['Birth Year'].min()
print('\nEarliest Year:', Earliest_Year)
except KeyError:
print("\nEarliest Year:\nNo data available for this month.")
try:
Most_Recent_Year = df['Birth Year'].max()
print('\nMost Recent Year:', Most_Recent_Year)
except KeyError:
print("\nMost Recent Year:\nNo data available for this month.")
try:
Most_Common_Year = df['Birth Year'].value_counts().idxmax()
print('\nMost Common Year:', Most_Common_Year)
except KeyError:
print("\nMost Common Year:\nNo data available for this month.")
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def display_raw_data(df):
""" Ask if the user would like to see some lines of data from the specific dataset.
Display first 5 lines. Continue asking whether they want to see more until user says no.
"""
display_rows = 5
starting_row = 0
ending_row = display_rows - 1
while True:
raw_data = input("\nWould you like to see the raw data for the results\n Yes or No?\n").lower()
if raw_data == "yes":
print("\nThese are rows from {} to {}".format(starting_row + 1, ending_row + 1))
print('\n', df.iloc[starting_row : ending_row + 1])
starting_row += display_rows
ending_row += display_rows
print("\nWould you like to see {} more rows?\n".format(display_rows))
continue
else:
break
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
display_raw_data(df)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
``` |
{
"source": "JoannaWangBK/Attention-Based-Aspect-Extraction",
"score": 3
} |
#### File: Attention-Based-Aspect-Extraction/code/w2vEmbReader.py
```python
import logging
import os
import re
import numpy as np
import gensim
from sklearn.cluster import KMeans
import pymorphy2
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger(__name__)
class W2VEmbReader:
def __init__(self, data_path, emb_name):
self.data_path = data_path
if os.path.sep not in emb_name:
emb_path = os.path.join(data_path, emb_name)
else:
emb_path = emb_name
logger.info('Loading embeddings from: ' + emb_path)
self.embeddings = {}
emb_matrix = []
model = gensim.models.KeyedVectors.load(emb_path)
self.emb_dim = model.vector_size
for word in model.wv.vocab:
self.embeddings[word] = list(model[word])
emb_matrix.append(list(model[word]))
# if emb_dim != None:
# assert self.emb_dim == len(self.embeddings['nice'])
self.vector_size = len(self.embeddings)
self.emb_matrix = np.asarray(emb_matrix)
self.aspect_size = None
logger.info(' #vectors: %i, #dimensions: %i' % (self.vector_size, self.emb_dim))
def get_emb_given_word(self, word):
try:
return self.embeddings[word]
except KeyError:
return None
def get_emb_matrix_given_vocab(self, vocab, emb_matrix):
counter = 0.
for word, index in vocab.items():
try:
emb_matrix[index] = self.embeddings[word]
counter += 1
except KeyError:
pass
logger.info(
'%i/%i word vectors initialized (hit rate: %.2f%%)' % (counter, len(vocab), 100 * counter / len(vocab)))
# L2 normalization
norm_emb_matrix = emb_matrix / np.linalg.norm(emb_matrix, axis=-1, keepdims=True)
return norm_emb_matrix
def get_aspect_matrix(self, n_clusters=0):
seed_words_path = os.path.join(self.data_path, "seed_words.txt")
self.aspect_size = n_clusters
km = KMeans(n_clusters=n_clusters)
km.fit(self.emb_matrix)
km_aspects = km.cluster_centers_
if os.path.exists(seed_words_path):
aspects = []
morph = pymorphy2.MorphAnalyzer()
with open(seed_words_path) as f:
for line in f:
one_aspect = []
for word in re.split('\W+', line.lower()):
w = morph.parse(word)[0].normal_form
if w in self.embeddings:
one_aspect.append(self.embeddings[w])
if one_aspect:
one_aspect = np.mean(one_aspect, axis=0)
else:
print("Not initialized:\t" + line)
one_aspect = np.random.random((self.emb_dim,))
aspects.append(one_aspect)
self.aspect_size += len(aspects)
aspects = np.concatenate((km_aspects, np.stack(aspects)), axis=0)
else:
aspects = km_aspects
# L2 normalization
norm_aspect_matrix = aspects / np.linalg.norm(aspects, axis=-1, keepdims=True)
return norm_aspect_matrix
def get_emb_dim(self):
return self.emb_dim
``` |
{
"source": "joannawetesko/django-channels-celery",
"score": 2
} |
#### File: app/example/models.py
```python
from django.db import models
class CeleryTask(models.Model):
name = models.CharField(max_length=255)
created = models.DateTimeField(auto_now_add=True)
finished = models.DateTimeField(null=True, blank=True)
status = models.CharField(max_length=255, null=True, blank=True)
celery_task_id = models.CharField(max_length=255, null=True, blank=True)
def __str__(self):
return self.name
``` |
{
"source": "JoanneBogart/skyCatalogs",
"score": 2
} |
#### File: desc/skycatalogs/create_catalog.py
```python
import os
import re
import argparse
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from astropy.coordinates import SkyCoord
import h5py
import sqlite3
import GCRCatalogs
from desc.skycatalogs.utils.common_utils import print_date, print_callinfo
# from dm stack
from dustmaps.sfd import SFDQuery
"""
Code to create a sky catalog for a particular object type
"""
pixels = [9556, 9683, 9684, 9812, 9813, 9940]
'''
Dict of MW av column names and multipliers needed to create from ebv, MW_rv
Multipliers come from
https://iopscience.iop.org/article/10.1088/0004-637X/737/2/103#apj398709t6
appendix, table 6
'''
MW_extinction_bands = {'MW_av_lsst_u' : 4.145, 'MW_av_lsst_g' : 3.237,
'MW_av_lsst_r' : 2.273, 'MW_av_lsst_i' : 1.684,
'MW_av_lsst_z' : 1.323, 'MW_av_lsst_y' : 1.088}
# Unused for now. This schema is not the same as the one taken from the data,
# probably because of the indexing in the schema derived from a pandas df.
def make_galaxy_schema():
fields = [pa.field('galaxy_id', pa.int64()),
pa.field('ra', pa.float64() , True),
## metadata={"units" : "radians"}),
pa.field('dec', pa.float64() , True),
## metadata={"units" : "radians"}),
pa.field('redshift', pa.float64(), True),
pa.field('shear_1', pa.float64(), True),
pa.field('shear_2', pa.float64(), True),
pa.field('convergence', pa.float64(), True),
pa.field('size_bulge_true', pa.float32(), True),
pa.field('size_minor_bulge_true', pa.float32(), True),
pa.field('sersic_bulge', pa.float32(), True),
pa.field('size_disk_true', pa.float32(), True),
pa.field('size_minor_disk_true', pa.float32(), True),
pa.field('sersic_disk', pa.float32(), True),
pa.field('position_angle', pa.float64(), True),
pa.field('sed_val_bulge', pa.list_(pa.float64()), True),
pa.field('sed_val_disk', pa.list_(pa.float64()), True),
pa.field('internalAv_bulge', pa.float64(), True),
pa.field('internalRv_bulge', pa.float64(), True),
pa.field('internalAv_disk', pa.float64(), True),
pa.field('internalRv_disk', pa.float64(), True),
pa.field('bulge_magnorm', pa.float64(), True),
pa.field('disk_magnorm', pa.float64(), True),
pa.field('MW_rv', pa.float32(), True),
pa.field('MW_av_lsst_u', pa.float32(), True),
pa.field('MW_av_lsst_g', pa.float32(), True),
pa.field('MW_av_lsst_r', pa.float32(), True),
pa.field('MW_av_lsst_i', pa.float32(), True),
pa.field('MW_av_lsst_z', pa.float32(), True),
pa.field('MW_av_lsst_y', pa.float32(), True)]
return pa.schema(fields)
def make_star_schema():
'''
Minimal schema for non-variable stars. For variables will need to add fields
to express variability. Will also likely have to make changes to accomodate SNe.
If AGN also go in this file will need to include gamma1, gamma2, kappa.
Could add field for galactic extinction model, but currently it's always 'CCM'
so will put it in config.
'''
fields = [pa.field('object_type', pa.string(), False),
pa.field('id', pa.int64(), False),
pa.field('ra', pa.float64(), False),
pa.field('dec', pa.float64(), False),
pa.field('host_galaxy_id', pa.int64(), True),
pa.field('magnorm', pa.float64(), True),
pa.field('sed_filepath', pa.string(), True),
pa.field('MW_rv', pa.float32(), True),
pa.field('MW_av_lsst_u', pa.float32(), True),
pa.field('MW_av_lsst_g', pa.float32(), True),
pa.field('MW_av_lsst_r', pa.float32(), True),
pa.field('MW_av_lsst_i', pa.float32(), True),
pa.field('MW_av_lsst_z', pa.float32(), True),
pa.field('MW_av_lsst_y', pa.float32(), True)]
return pa.schema(fields)
def create_galaxy_catalog(parts, area_partition, output_dir=None,
galaxy_truth=None, sedLookup_dir=None,
output_type='parquet'):
"""
Parameters
----------
parts Segments for which catalog is to be generated. If partition
type is HEALpix, parts would be a collection of HEALpix pixels
area_partition Dict characterizing partition; e.g. HEALpix, nside=<something>
output_dir Where to put created sky catalog. Defaults to
current directory.
galaxy_truth GCRCatalogs name for galaxy truth (e.g. cosmoDC2)
sedLookup_dir Where to find files with some per-galaxy information relevant
to finding and using appropriate SED file
output_type A format. For now only parquet is supported
Might want to add a way to specify template for output file name
and template for input sedLookup file name.
Returns
-------
Dict describing catalog produced
"""
# Following directory contains per-healpix pixel files, each of which
# has some per-galaxy information, including internal Av, Rv for
# disk and bulge components, fluxes, mags for lsst bands, appropriate
# index into sed_names array (which is typically around a 1000 entries,
# whereas #galaxies is of order 10 million) and so forth.
_sedLookup_dir = '/global/cfs/cdirs/lsst/groups/SSim/DC2/cosmoDC2_v1.1.4/sedLookup'
_cosmo_cat = 'cosmodc2_v1.1.4_image_addon_knots'
if area_partition['type'] != 'healpix':
raise NotImplementedError('Unknown partition type ')
if output_type != 'parquet':
raise NotImplementedError('Unknown partition type ')
gal_truth = galaxy_truth
if gal_truth is None:
gal_truth = _cosmo_cat
print('gal_truth is ', gal_truth)
# If multiprocessing probably should defer this to create_pixel
gal_cat = GCRCatalogs.load_catalog(gal_truth)
lookup = sedLookup_dir
if lookup is None:
lookup = _sedLookup_dir
arrow_schema = make_galaxy_schema()
for p in parts:
print("Starting on pixel ", p)
print_date()
create_galaxy_pixel(p, area_partition, output_dir, gal_cat, lookup,
arrow_schema, output_type)
print("completed pixel ", p)
print_date()
def create_galaxy_pixel(pixel, area_partition, output_dir, gal_cat, lookup_dir,
arrow_schema, output_type='parquet'):
# Filename templates: input (sedLookup) and our output. Hardcode for now.
sedLookup_template = 'sed_fit_{}.h5'
output_template = 'galaxy_{}.parquet'
tophat_bulge_re = r'sed_(?P<start>\d+)_(?P<width>\d+)_bulge'
tophat_disk_re = r'sed_(?P<start>\d+)_(?P<width>\d+)_disk'
# Number of rows to include in a row group
stride = 1000000
# to_fetch = all columns of interest in gal_cat
non_sed = ['galaxy_id', 'ra', 'dec', 'redshift', 'shear_1',
'shear_2',
'convergence', 'position_angle_true',
'size_bulge_true', 'size_minor_bulge_true', 'sersic_bulge',
'size_disk_true', 'size_minor_disk_true', 'sersic_disk']
# Find all the tophat sed numbers
q = gal_cat.list_all_quantities()
sed_bulge_names = [i for i in q if (i.startswith('sed') and
i.endswith('bulge'))]
sed_disk_names = [i for i in q if (i.startswith('sed') and
i.endswith('disk'))]
#Sort sed columns by start value, descending
def _sed_bulge_key(s):
return int(re.match(tophat_bulge_re, s)['start'])
def _sed_disk_key(s):
return int(re.match(tophat_disk_re, s)['start'])
sed_bulge_names.sort(key=_sed_bulge_key, reverse=True)
sed_disk_names.sort(key=_sed_disk_key, reverse=True)
#Fetch the data
to_fetch = non_sed + sed_bulge_names + sed_disk_names
# Save the 'start' and 'width' values; they'll be needed for our output
# config. Though we only need to do this once, not once per pixel
tophat_parms = []
for s in sed_bulge_names:
m = re.match(tophat_bulge_re, s)
if m:
tophat_parms.append((m['start'], m['width']))
df = gal_cat.get_quantities(to_fetch,
native_filters=f'healpix_pixel=={pixel}')
# Re-form sed columns into two arrays
bulge_seds = (np.array([df[sbn] for sbn in sed_bulge_names]).T).tolist()
disk_seds = (np.array([df[sdn] for sdn in sed_disk_names]).T).tolist()
# Look up internal A_v, R_v
with h5py.File(os.path.join(lookup_dir,
sedLookup_template.format(pixel))) as lookup:
lookup_gid = np.array(lookup['galaxy_id'])
bulge_av = np.array(lookup['bulge_av'])
bulge_rv = np.array(lookup['bulge_rv'])
disk_av = np.array(lookup['disk_av'])
disk_rv = np.array(lookup['disk_rv'])
# The following will be of interest when using file sed from our
# lookup file
# Note shape of bulge_magnorm, disk_magnorm is (6, #objects)
# Pick a middle column to use
magnorm_col = 3
bulge_magnorm = np.array(lookup['bulge_magnorm'][magnorm_col])
disk_magnorm = np.array(lookup['disk_magnorm'][magnorm_col])
print('bulge_magnorm shape: ', bulge_magnorm.shape)
print('disk_magnorm shape: ', disk_magnorm.shape)
# Check that galaxies match and are ordered the same way
cosmo_gid = np.array(df['galaxy_id'])
if cosmo_gid.shape != lookup_gid.shape:
print('#lookup galaxies != #cosmodc2 galaxies')
exit(1)
if not (cosmo_gid == lookup_gid).all():
print('lookup galaxies differ from cosmodc2 galaxies in content or ordering')
exit(1)
# Assume R(V) = 3.1. Calculate A(V) from R(V), E(B-V). See "Plotting
# Dust Maps" example in
# https://dustmaps.readthedocs.io/en/latest/examples.html
MW_rv_constant = 3.1
MW_rv = np.full_like(df['sersic_bulge'], MW_rv_constant)
MW_av_columns = make_MW_extinction(df['ra'], df['dec'],
MW_rv_constant,MW_extinction_bands)
#MW_av = 2.742 * ebv_raw
print("Made extinction")
# Write row groups of size stride (or less) until input is exhausted
total_row = lookup_gid.shape[0] - 1
u_bnd = min(stride, total_row)
l_bnd = 0
rg_written = 0
writer = None
while u_bnd > l_bnd:
out_dict = { k : df[k][l_bnd : u_bnd] for k in non_sed if k != 'position_angle_true'}
out_dict['position_angle'] = np.radians(df['position_angle_true'][l_bnd : u_bnd])
out_dict['sed_val_bulge'] = bulge_seds[l_bnd : u_bnd]
out_dict['sed_val_disk'] = disk_seds[l_bnd : u_bnd]
out_dict['internalAv_bulge'] = bulge_av[l_bnd : u_bnd]
out_dict['internalRv_bulge'] = bulge_rv[l_bnd : u_bnd]
out_dict['internalAv_disk'] = disk_av[l_bnd : u_bnd]
out_dict['internalRv_disk'] = disk_rv[l_bnd : u_bnd]
out_dict['bulge_magnorm'] = bulge_magnorm[l_bnd : u_bnd]
out_dict['disk_magnorm'] = disk_magnorm[l_bnd : u_bnd]
out_dict['MW_rv'] = MW_rv[l_bnd : u_bnd]
for k,v in MW_av_columns.items():
out_dict[k] = v[l_bnd : u_bnd]
out_df = pd.DataFrame.from_dict(out_dict)
out_table = pa.Table.from_pandas(out_df, schema=arrow_schema)
if not writer:
#arrow_schema = out_table.schema
writer = pq.ParquetWriter(os.path.join(output_dir, output_template.format(pixel)), arrow_schema)
writer.write_table(out_table)
rg_written += 1
l_bnd = u_bnd
u_bnd = min(l_bnd + stride, total_row)
writer.close()
print("# row groups written: ", rg_written)
def create_pointsource_pixel(pixel, area_partition, output_dir, arrow_schema,
star_cat=None, sn_cat=None,
output_type='parquet'):
if not star_cat and not sn_cat:
print("no point source inputs specified")
return
output_template = 'pointsource_{}.parquet'
if star_cat:
# Get data for this pixel
cols = ','.join(['simobjid as id', 'ra', 'decl as dec', 'magNorm as magnorm',
'sedFilename as sed_filepath'])
q = f'select {cols} from stars where hpid={pixel} '
with sqlite3.connect(star_cat) as conn:
star_df = pd.read_sql_query(q, conn)
nobj = len(star_df['id'])
print(f"Found {nobj} stars")
star_df['object_type'] = np.full((nobj,), 'star')
star_df['host_galaxy_id'] = np.zeros((nobj,), np.int64())
MW_rv_constant = 3.1
star_df['MW_rv'] = np.full((nobj,), MW_rv_constant, np.float32())
MW_av_columns = make_MW_extinction(np.array(star_df['ra']),
np.array(star_df['dec']),
MW_rv_constant, MW_extinction_bands)
for k,v in MW_av_columns.items():
# No need for multiple row groups. Data size is small.
#star_df[k] = v[l_bnd : u_bnd]
star_df[k] = v
out_table = pa.Table.from_pandas(star_df, schema=arrow_schema)
print("created arrow table from dataframe")
writer = pq.ParquetWriter(os.path.join(output_dir, output_template.format(pixel)), arrow_schema)
writer.write_table(out_table)
writer.close()
if sn_cat:
raise NotImplementedError('SNe not yet supported. Have a nice day.')
return
def make_MW_extinction(ra, dec, MW_rv_constant, band_dict):
'''
Given arrays of ra & dec, fixed Rv and per-band column names
and multipliers, create a MW Av column for each band
Parameters:
ra, dec - arrays specifying positions where Av is to be computed
MW_rv - single constant value for Rv
band_dict - keys are column names; values are multipliers
Return:
dict with keys = column names. Value for each column is array of
Av values for a particular band at the ra,dec positions
'''
sfd = SFDQuery()
ebv_raw = np.array(sfd.query_equ(ra, dec))
av_dict = {}
for k,v in band_dict.items():
av_dict[k] = MW_rv_constant * v * ebv_raw
return av_dict
# May want a base truth class for this
# class GalaxyTruth():
# """
# Responsible for reading from a source like CosmoDC2 catalog
# and making available to GalaxySkyCatalog object
# """
def create_pointsource_catalog(parts, area_partition, output_dir=None,
star_truth=None, sne_truth=None,
output_type='parquet'):
"""
Parameters
----------
parts Segments for which catalog is to be generated. If partition
type is HEALpix, parts would be a collection of HEALpix pixels
area_partition Dict characterizing partition; e.g.
HEALpix, nside=<something>
output_dir Where to put created sky catalog. Defaults to
current directory.
star_truth Where to find star parameters. If None, omit stars
sne_truth Where to find SNe parameters. If None, omit SNe
output_type A format. For now only parquet is supported
Might want to add a way to specify template for output file name
Returns
-------
Dict describing catalog produced
"""
# For now fixed location for star, SNe parameter files.
_star_db = '/global/cfs/cdirs/lsst/groups/SSim/DC2/dc2_stellar_healpixel.db'
##_sn_db = '/global/projecta/projectdirs/lsst/groups/SSim/DC2/cosmoDC2_v1.1.4/sne_cosmoDC2_v1.1.4_MS_DDF.db'
_sn_db = '/global/cfs/cdirs/lsst/groups/SSim/DC2/cosmoDC2_v1.1.4/sne_cosmoDC2_v1.1.4_MS_DDF_healpix.db'
if area_partition['type'] != 'healpix':
raise NotImplementedError('Unknown partition type ')
if output_type != 'parquet':
raise NotImplementedError('Unknown partition type ')
arrow_schema = make_star_schema()
# Need a way to indicate which object types to include; deal with that
# later. For now, default is stars only. Use default star parameter file.
for p in parts:
print("Point sources. Starting on pixel ", p)
print_date()
create_pointsource_pixel(p, area_partition, output_dir, arrow_schema,
star_cat=_star_db)
print("completed pixel ", p)
print_date()
# Try it out
# Note: root dir for SED files is $SIMS_SED_LIBRARY_DIR, defined by
# LSST Science Pipelines setup
# This applies for galaxy, star and AGN SEDs.
if __name__ == "__main__":
'''
Create sky catalogs for one or more healpixels. Invoke with --help
for details
'''
# For now partitioning is fixed
area_partition = {'type' : 'healpix', 'ordering' : 'ring', 'nside' : 32}
parser = argparse.ArgumentParser(description='''
Create Sky Catalogs. By default create a galaxy catalog for a
single healpix pixel''')
parser.add_argument('--pointsource', action='store_true',
help='if used, create point source catalog(s)')
parser.add_argument('--no-galaxies', action='store_true',
help='if used galaxy catalogs will NOT be created')
parser.add_argument('--pixels', type=int, nargs='*', default=[9556],
help='healpix pixels for which catalogs will be created')
out_dir = os.path.join(os.getenv('SCRATCH'), 'desc', 'skycatalogs', 'test')
parser.add_argument('--output-dir', help='directory for output files',
default=out_dir)
args = parser.parse_args()
print_callinfo('create_catalog', args)
##parts = pixels[0:1]
output_dir = args.output_dir
parts = args.pixels
print('Starting with healpix pixel ', parts[0])
if not args.no_galaxies:
print("Creating galaxy catalogs")
create_galaxy_catalog(parts, area_partition, output_dir=output_dir)
if args.pointsource:
print("Creating point source catalogs")
create_pointsource_catalog(parts, area_partition, output_dir=output_dir)
print('All done')
```
#### File: desc/skycatalogs/skyCatalogs.py
```python
import os
import re
import yaml
from collections import namedtuple
import healpy
import numpy as np
import numpy.ma as ma
import pyarrow.parquet as pq
from astropy import units
from desc.skycatalogs.objects import *
from desc.skycatalogs.readers import *
from desc.skycatalogs.readers import ParquetReader
__all__ = ['SkyCatalog', 'open_catalog', 'Box', 'Disk']
Box = namedtuple('Box', ['ra_min', 'ra_max', 'dec_min', 'dec_max'])
# radius is measured in arcseconds
Disk = namedtuple('Disk', ['ra', 'dec', 'radius_as'])
_aDisk = Disk(1.0, 1.0, 1.0)
_aBox = Box(-1.0, 1.0, -2.0, 2.0)
# This function should maybe be moved to utils
def _get_intersecting_hps(hp_ordering, nside, region):
'''
Given healpixel structure defined by hp_ordering and nside, find
all healpixels which instersect region, defined by min/max ra and dec
Return as some kind of iterable
Note it's possible extra hps which don't actually intersect the region
will be returned
'''
# First convert region description to an array (4,3) with (x,y,z) coords
# for each vertex
if type(region) == type(_aBox):
vec = healpy.pixelfunc.ang2vec([region.ra_min, region.ra_max,
region.ra_max, region.ra_min],
[region.dec_min, region.dec_min,
region.dec_max, region.dec_max],
lonlat=True)
return healpy.query_polygon(nside, vec, inclusive=True, nest=False)
if type(region) == type(_aDisk):
# Convert inputs to the types query_disk expects
center = healpy.pixelfunc.ang2vec([region.ra], [region.dec],
lonlat=True)
radius_rad = (region.radius_as * units.arcsec).to_value('radian')
return healpy.query_disc(nside, center, radius_rad, inclusive=True,
nest=False)
class SkyCatalog(object):
'''
A base class with derived classes for galaxies, static (w.r.t. coordinates)
point sources, SSOs
'''
def __init__(self, config, mp=False):
'''
Parameters
----------
config: dict. Typically the result of loading a yaml file
mp: boolean Default False. Set True to enable multiprocessing.
'''
self._config = config
self._mp = mp
# There may be more to do at this point but not too much.
# In particular, don't read in anything from data files
# One might check that the config is complete and well-formed
# - for example require certain keys, such as catalog_name,
# data_file_type, area_parition, root_directory, object_types -
# to exist, and check that that the data directory (value of
# root_directory) exists.
# create an empty dict for
# per-HEALpix pixel information and so forth.
self._validate_config()
# Outer dict: hpid for key. Value is another dict
# with keys 'files', 'object_types', each with value another dict
# for 'files', map filepath to handle (initially None)
# for 'object_types', map object type to filepath
self._hp_info = dict()
self._find_all_hps()
def _validate_config(self):
pass
def _find_all_hps(self):
# for each healpix with files matching pattern in the directory,
# update _hp_info
# self._hp_info[hpid]['files'] =
# {'relpath' : the_filename, 'handle' : None} and,
# for each object_type represented in the file,
# self._hp_info[hpid]['object_types'][ot] = the_filename
# When file is open, set handle to the Parquet file object
# (or perhaps something else if underlying format is not Parquet)
cat_dir = self._config['root_directory']
# If major organization is by healpix, healpix # will be in
# subdirectory name. Otherwise there may be no subdirectories
# or data may be organized by component type.
# Here only handle case where data files are directly in root dir
files = os.listdir(cat_dir)
o_types = self._config['object_types']
hp_set = set()
for f in files:
for ot in o_types:
if 'file_template' in o_types[ot]:
m = re.match(o_types[ot]['file_template'], f)
if m:
hp = int(m['healpix'])
hp_set.add(hp)
if hp not in self._hp_info:
self._hp_info[hp] = {'files' : {f : None},
'object_types' : {ot : f}}
else:
this_hp = self._hp_info[hp]
if f not in this_hp['files'] :
this_hp['files'][f] = None
this_hp['object_types'][ot] = f
return hp_set
def get_hps_by_region(self, region):
'''
Region can be a box (named 4-tuple (min-ra, max-ra, min-dec, max-dec))
or a circle (named 3-tuple (ra, dec, radius))
Catalog area partition must be by healpix
'''
# If area_partition doesn't use healpix, raise exception
return _get_intersecting_hps(
self._config['area_partition']['ordering'],
self._config['area_partition']['nside'],
region)
#.intersection(self._hps.keys())
def get_object_type_names(self):
return set(self._config['object_types'].keys())
# Add more functions to return parts of config of possible interest
# to user
def get_objects_by_region(self, datetime, region, obj_type_set=None):
'''
Parameters
----------
datetime Python datetime object.
region region is a named tuple. May be box or circle
obj_type_set Return only these objects. Defaults to all available
Returns
-------
ObjectList containing sky objects visible in the region
[at the specified time]
'''
# Take intersection of obj_type_list and available object types
# Determine healpix intersecting the region
print("Region ", region)
print("obj_type_set ", obj_type_set)
if self._config['area_partition']['type'] == 'healpix':
hps = self.get_hps_by_region(region)
# otherwise raise a not-supported exception
object_list = ObjectList()
if obj_type_set is None:
obj_types = self.get_object_type_names()
else:
obj_types = self.get_object_type_names().intersection(obj_type_set)
for hp in hps:
# Maybe have a multiprocessing switch? Run-time option when
# catalog is opened?
c = self.get_objects_by_hp(datetime, hp, region, obj_types)
if (len(c)) > 0:
###obj_colls = obj_colls + c
object_list.append_object_list(c)
return object_list
def get_object_iterator_by_region(self, datetime, region=None,
obj_type_set=None, max_chunk=None):
'''
Parameters
----------
datetime Python datetime object.
region min & max for ra and dec
obj_type_set Return only these objects. Defaults to all available
max_chunk If specified, iterator will return no more than this
number of objections per iteration
Returns
-------
An iterator
'''
pass
def get_objects_by_hp(self, datetime, hp, region=None, obj_type_set=None):
# Find the right Sky Catalog file or files (depends on obj_type_set)
# Get file handle(s) and store if we don't already have it (them)
# Returns: ObjectList containing sky objects in the region and the hp
object_list = ObjectList()
G_COLUMNS = ['galaxy_id', 'ra', 'dec']
print('Working on healpix pixel ', hp)
obj_types = obj_type_set
if obj_types is None:
obj_types = self._config['object_types'].keys()
else:
parents = set()
for ot in obj_types:
if 'parent' in self._config['object_types'][ot]:
parents.add(self._config['object_types'][ot]['parent'])
obj_types = obj_types.union(parents)
# Associate object types with readers. May be > one type per reader
rdr_ot = dict()
root_dir = self._config['root_directory']
for ot in obj_types:
if 'file_template' in self._config['object_types'][ot]:
f = self._hp_info[hp]['object_types'][ot]
elif 'parent' in self._config['object_types'][ot]:
f = self._hp_info[hp]['object_types'][obj_types[ot]['parent']]
if f not in self._hp_info[hp]:
the_reader = parquet_reader.ParquetReader(os.path.join(root_dir,f), mask=None)
self._hp_info[hp][f] = the_reader
the_reader = self._hp_info[hp][f]
if the_reader in rdr_ot:
rdr_ot[the_reader].add(ot)
else:
rdr_ot[the_reader] = set([ot])
# Now get minimal columns for objects using the readers
for rdr in rdr_ot:
if 'galaxy' in rdr_ot[rdr]:
arrow_t = rdr.read_columns(G_COLUMNS, None) # or read_row_group
# Make a boolean array, value set to 1 for objects
# outside the region
if region is not None:
if type(region) == type(_aBox):
mask = np.logical_or((arrow_t['ra'] < region.ra_min),
(arrow_t['ra'] > region.ra_max))
mask = np.logical_or(mask, (arrow_t['dec'] < region.dec_min))
mask = np.logical_or(mask, (arrow_t['dec'] > region.dec_max))
if type(region) == type(_aDisk):
# Change positions to 3d vectors to measure distance
p_vec = healpy.pixelfunc.ang2vec(arrow_t['ra'],
arrow_t['dec'],
lonlat=True)
c_vec = healpy.pixelfunc.ang1vec([region.ra],
[region.dec],
lonlat=True)[0]
# change disk radius to radians
radius_rad = (region.radius_as * units.arcsec).to_value('radian')
inners = [np.dot(pos, c_vec) for pos in p_vec]
mask = np.arccos(inners) > radius_rad
else:
mask = None
if mask is not None:
masked_ra = ma.array(arrow_t['ra'], mask=mask)
print("Masked array size: ", masked_ra.size)
print("Masked array compressed size: ", masked_ra.compressed().size)
ra_compress = masked_ra.compressed()
if ra_compress.size > 0:
dec_compress = ma.array(arrow_t['dec'], mask=mask).compressed()
id_compress = ma.array(arrow_t['galaxy_id'], mask=mask).compressed()
else:
continue
else:
ra_compress = arrow_t['ra']
dec_compress = arrow_t['dec']
id_compress = arrow_t['galaxy_id']
new_collection = ObjectCollection(ra_compress,
dec_compress,
id_compress,
'galaxy',
hp,
region=region,
mask=mask,
reader=rdr)
object_list.append_collection(new_collection)
return object_list
# For generator version, do this a row group at a time
# but if region cut leaves too small a list, read more rowgroups
# to achieve a reasonable size list (or exhaust the file)
def get_object_iterator_by_hp(self, datetime, hp, obj_type_set=None,
max_chunk=None):
'''
Parameters
----------
datetime Python datetime object.
hp A healpix id
obj_type_set Return only these objects. Defaults to all available
max_chunk If specified, iterator will return no more than this
number of objections per iteration
Returns
-------
An iterator
'''
pass
def open_catalog(config_file, mp=False):
'''
Parameters
----------
yaml file containing config
Returns
-------
SkyCatalog
'''
with open(config_file) as f:
return SkyCatalog(yaml.safe_load(f), mp)
if __name__ == '__main__':
cfg_file = '/global/homes/j/jrbogart/Joanne_git/skyCatalogs/cfg/galaxy.yaml'
# For tract 3828
# 55.73604 < ra < 57.563452
# -37.19001 < dec < -35.702481
cat = open_catalog(cfg_file)
hps = cat._find_all_hps()
print('Found {} healpix pixels '.format(len(hps)))
for h in hps: print(h)
ra_min_tract = 55.736
ra_max_tract = 57.564
dec_min_tract = -37.190
dec_max_tract = -35.702
##ra_min_small = 56.0
##ra_max_small = 56.2
ra_min_small = 55.9
ra_max_small = 56.1
dec_min_small = -36.2
dec_max_small = -36.0
rgn = Box(ra_min_small, ra_max_small, dec_min_small, dec_max_small)
intersect_hps = _get_intersecting_hps('ring', 32, rgn)
print("For region ", rgn)
print("intersecting pixels are ", intersect_hps)
print('Invoke get_objects_by_region with box region')
object_list = cat.get_objects_by_region(0, rgn,
obj_type_set=set(['galaxy']) )
# Try out get_objects_by_hp with no region
#colls = cat.get_objects_by_hp(0, 9812, None, set(['galaxy']) )
print('Number of collections returned: ', object_list.collection_count)
colls = object_list.get_collections()
for c in colls:
print("For hpid ", c.get_partition_id(), "found ", len(c), " objects")
print("First object: ")
print(c[0], '\nid=', c[0].id, ' ra=', c[0].ra, ' dec=', c[0].dec,
' belongs_index=', c[0]._belongs_index)
print("Slice [1:3]")
slice13 = c[1:3]
for o in slice13:
print('id=',o.id, ' ra=',o.ra, ' dec=',o.dec, ' belongs_index=',
o._belongs_index)
print("Object 1000")
print(c[1000], '\nid=', c[1000].id, ' ra=', c[1000].ra, ' dec=',
c[1000].dec,
' belongs_index=', c[1000]._belongs_index)
slice_late = c[163994:163997]
print('\nobjects indexed 163994 through 163996')
for o in slice_late:
print('id=',o.id, ' ra=',o.ra, ' dec=',o.dec, ' belongs_index=',
o._belongs_index)
print('Total object count: ', len(object_list))
obj = object_list[0]
print("Type of element in object_list:", type(obj))
redshift0 = object_list[0].redshift
print('First redshift: ', redshift0)
sed_bulges = colls[0].get_attribute('sed_val_bulge')
print("first bulge sed:")
for v in sed_bulges[0]:
print(v)
#convergence = coll.get_attribute('convergence')
#print("first convergence: ", convergence[0])
``` |
{
"source": "JoanneBogart/truth_reorg",
"score": 3
} |
#### File: desc/truth_reorg/truth_reorg_utils.py
```python
import sqlite3
import numpy as np
def connect_read(path):
'''
Not obvious how to connect read-only to SQLite db. Package it up here
'''
conn = sqlite3.connect(f'file:{path}?mode=ro', uri=True)
return conn
def assemble_create_table(table_name, columns):
'''
Return string which will create table with supplied names
and column specifications (a tuple (col_name, col_type) )
'''
stmt = 'CREATE TABLE ' + table_name + '('
col_specs = [f'{c[0]} {c[1]}' for c in columns]
stmt += ','.join(col_specs) + ')'
return stmt
_SN_OBJECT_TYPE = 22
_MAX_STAR_ID = 41021613038
def make_sn_int_id(host):
'''
Parameters
----------
host int id of host galaxy
When host is a real galaxy, new id will be
host * 1024 + (object-type-id), which is probably 22
Otherwise assign int id to be host_id + CONSTANT
where CONSTANT is large enough that all int ids are larger
than MAX_STAR_ID. Least host id is 0.
'''
OFFSET = _MAX_STAR_ID + 1
if host < 100000:
new_id = host + OFFSET
else:
new_id = host * 1024 + _SN_OBJECT_TYPE
return new_id
``` |
{
"source": "JoanneCh/NLP",
"score": 3
} |
#### File: NLP/HMM/hmmdecode3.py
```python
import json
import sys
import codecs
# read parameters from files
with open('emission.txt', mode='r') as f:
em = json.load(f)
with open('transition.txt', mode='r') as f:
tr = json.load(f)
with open('word_tag.txt', mode='r') as f:
w_t = json.load(f)
tags = set(tr['start'])
words = set(w_t)
def prob_emission(word_low, words, em, state):
if word_low in words:
p_em = em[state].get(word_low, 0)
else:
# p_em = 1
p_em = 1.0 * em[state]['<ONCE>'] / (unknow * em[state]['<TOTAL>'])
return p_em
# viterbi algorithm
res = ""
# count unknown words
unknow = 0
for line in open(sys.argv[-1], encoding='utf-8', mode='r'):
if 1 < 20:
line_s = line.split()
for idx, word in enumerate(line_s):
if word not in words:
unknow += 1
for line in open(sys.argv[-1], encoding='utf-8', mode='r'):
if 1 < 20:
v = []
line_s = line.split()
for idx, word in enumerate(line_s):
word_low = word.lower()
v.append({})
if word_low not in words:
search_states = tr['start']
else:
search_states = w_t[word_low]
if idx == 0:
for state in search_states:
p_em = prob_emission(word_low, words, em, state)
v[idx][state] = {}
v[idx][state]['prob'] = tr['start'][state] * p_em
v[idx][state]['pre'] = None
else:
for tag in search_states:
for idx_t, pre in enumerate(v[idx - 1]):
if idx_t == 0:
max_p = v[idx - 1][pre]['prob'] * tr[pre][tag]
max_pre = pre
else:
if max_p < v[idx - 1][pre]['prob'] * tr[pre][tag]:
max_p = v[idx - 1][pre]['prob'] * tr[pre][tag]
max_pre = pre
p_em = prob_emission(word_low, words, em, tag)
v[idx][tag] = {}
v[idx][tag]['prob'] = max_p * p_em
v[idx][tag]['pre'] = max_pre
states = []
for i in range(idx, -1, -1):
if i == idx:
for idx_s, tag in enumerate(v[i]):
if idx_s == 0:
max_s_p = v[i][tag]['prob']
max_pre_state = v[i][tag]['pre']
max_cur_state = tag
else:
if max_s_p < v[i][tag]['prob']:
max_s_p = v[i][tag]['prob']
max_pre_state = v[i][tag]['pre']
max_cur_state = tag
states.append(max_cur_state)
else:
states.append(max_pre_state)
max_pre_state = v[i][max_pre_state]['pre']
for i in range(len(line_s)):
if i == 0:
w = line_s[i] + '/' + states[len(line_s) - i - 1]
else:
w += line_s[i] + '/' + states[len(line_s) - i - 1]
if i < len(line_s) - 1:
w += ' '
else:
w += "\n"
res += w
f = open("hmmoutput.txt", 'w')
f.write(res)
f.close()
# (22453, 25148, 0.8928344202322253) for en
# (11087, 12663, 0.8755429203190397) for zh
``` |
{
"source": "joannekoye/parking-system",
"score": 3
} |
#### File: app/auth/views.py
```python
from . import auth
from flask import render_template, redirect, url_for, flash, request
from ..models import User
from .forms import RegistrationForm, LoginForm
from .. import db
from flask_login import login_user, current_user, logout_user, login_required
from ..email import mail_message
@auth.route("/login", methods=['GET','POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('parking.home'))
form = LoginForm()
if form.validate_on_submit():
user=User.query.filter_by(email=form.email.data).first()
if user and user.verify_password(form.password.data):
login_user(user, remember=form.remember.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('parking.home'))
flash('You have been logged in!, success')
else:
flash('Login Unsuccessful. Please Check username and password', 'danger')
return render_template('auth/login.html', title='login', form=form)
@auth.route("/register",methods=['GET','POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('parking.home'))
form = RegistrationForm()
if form.validate_on_submit():
new_user = User(username=form.username.data, email= form.email.data, password=form.password.data)
db.session.add(new_user)
db.session.commit()
mail_message("Welcome to Parking System","email/welcome_user",new_user.email,user=new_user)
flash(f'Account created for {form.username.data}!', 'success')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', title='Register', form=form)
@auth.route("/logout")
def logout():
logout_user()
return redirect(url_for('auth.login'))
@auth.route("/account")
@login_required
def account():
image_file = url_for('static', filename='profile_pics/' + current_user.image_file)
return render_template('auth/account.html', title='Account', image_file=image_file)
@auth.route('/make_me_admin')
@login_required
def make_me_admin():
user = User.query.get(current_user.id)
user.role = 'admin'
db.session.add(user)
db.session.commit()
return redirect(url_for('admin.index'))
```
#### File: parking-system/app/__init__.py
```python
from flask import Flask
from config import config_options
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from flask_marshmallow import Marshmallow
from flask_login import LoginManager
from flask_mail import Mail
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
login_manager.session_protection='strong'
db = SQLAlchemy()
bootstrap = Bootstrap()
ma = Marshmallow()
mail = Mail()
# from app.models import User
# create an application factory
def create_app(config_name):
"""
creates an instances of the application
and passes the config name, i.e development
or production, the will then pick the environments
from the configuration classes in config
"""
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# set the configurations
app.config.from_object(config_options[config_name])
# initialiaze the database
db.init_app(app)
bootstrap.init_app(app)
ma.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
# register your blueprints here
from app.auth import auth as auth_blueprint
from app.main import main as main_blueprint
from .parking import parking as parking_blueprint
from .admin import admin as admin_blueprint
app.register_blueprint(main_blueprint)
app.register_blueprint(auth_blueprint)
app.register_blueprint(parking_blueprint,url_prefix='/parking')
app.register_blueprint(admin_blueprint,url_prefix='/admin')
return app
```
#### File: app/main/views.py
```python
from flask import render_template,redirect,url_for
from . import main
from flask_login import login_required,current_user
# your views go here i.e for home,about
@main.route("/")
def index():
if current_user.is_authenticated:
return redirect(url_for('parking.home'))
return render_template('index.html')
@main.route("/about")
def about():
pass
```
#### File: joannekoye/parking-system/manage.py
```python
from flask_script import Manager, Server
from flask_migrate import Migrate, MigrateCommand
from app import create_app, db
from app.models import Institution,User
app = create_app('production')
manager = Manager(app)
manager.add_command('server', Server)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
@manager.shell
def make_shell_context():
return dict(app=app, db=db,Institution=Institution,User=User)
if __name__ == '__main__':
manager.run()
db.create_all()
```
#### File: parking-system/tests/test_user.py
```python
import unittest
from app.models import User
class UserModelTest(unittest.TestCase):
def setUp(self):
self.new_user = User(password = '<PASSWORD>')
def test_password_setter(self):
self.assertTrue(self.new_user.password is not None)
``` |
{
"source": "joannekoye/password-locker",
"score": 4
} |
#### File: joannekoye/password-locker/credentials-test.py
```python
import unittest
from credentials import Credentials
class TestCredentials(unittest.TestCase):
'''
Test class that defines test cases for the credentials class behaviors.
Args:
unittest.TestCases: TestCase class that helps in creatng test cases
'''
def setUp(self):
'''
Set up method to run before each test case.
'''
self.new_user_list = Credentials('Twitter', 'Joan', 'Nekoye','NekoyeJoan','12345')
def tearDown(self):
'''
tearDown method that does clean up after each test case has run.
'''
Credentials.new_user_list = []
def test_init(self):
'''
test_init test case to test if the object is initialized properly
'''
self.assertEqual(self.new_user_list.platform,"Twitter")
self.assertEqual(self.new_user_list.first_name,"Joan")
self.assertEqual(self.new_user_list.last_name,"Nekoye")
self.assertEqual(self.new_user_list.username,"NekoyeJoan")
self.assertEqual(self.new_user_list.password,"<PASSWORD>")
def test_display_all_credentials(self):
'''
method that returns a list of all users saved
'''
self.assertEqual(Credentials.display_credentials(), Credentials.new_user_list)
if __name__ == '__main__':
unittest.main(verbosity=2)
``` |
{
"source": "joannelynch92/christmastree",
"score": 3
} |
#### File: joannelynch92/christmastree/twinkle.py
```python
import random
import time
from rpi_ws281x import PixelStrip, Color
import argparse
# LED strip configuration:
LED_COUNT = 90 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
def twinkler():
base_brightness = int(LED_BRIGHTNESS/4)
delta_brightness = LED_BRIGHTNESS - base_brightness
while True:
for i in range(10):
c = base_brightness + int(delta_brightness/10.0*i)
yield Color(c, c, c)
for i in range(9, -1, -1):
c = base_brightness + int(delta_brightness/10.0*i)
yield Color(c, c, c)
for i in range(int(random.random()*300)):
yield Color(base_brightness, base_brightness, base_brightness)
def twinkle(strip, wait_ms=50):
twinklers = [twinkler() for i in range(strip.numPixels())]
while True:
for i in range(strip.numPixels()):
strip.setPixelColor(i, next(twinklers[i]))
strip.show()
time.sleep(wait_ms / 1000.0)
if __name__ == '__main__':
# Process arguments
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--clear', action='store_true', help='clear the display on exit')
args = parser.parse_args()
# Create NeoPixel object with appropriate configuration.
strip = PixelStrip(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
# Intialize the library (must be called once before other functions).
strip.begin()
print('Press Ctrl-C to quit.')
if not args.clear:
print('Use "-c" argument to clear LEDs on exit')
try:
while True:
print('Twinkle')
twinkle(strip)
except KeyboardInterrupt:
if args.clear:
colorWipe(strip, Color(0, 0, 0), 10)
``` |
{
"source": "joanneong/Waveman",
"score": 3
} |
#### File: Waveman/utils/ops.py
```python
import os
import sys
import csv
import time
import librosa
import shutil
import glob
import pandas as pd
import numpy as np
import logging
from logging.handlers import RotatingFileHandler
def ChkFile(file):
'''
check file exists or not
'''
if not os.path.isfile(file):
print('Error! Cannot find audio file {}. \
\nPlease check the path or filename!'.format(file))
sys.exit(1)
def ChkDir(Dir):
'''
Create folder if not exists
'''
if not os.path.exists(Dir):
os.mkdir(Dir)
def GetEpisodes(array, length, episodes=None, repeat=0, Range=0):
'''
extract sub array according to the episodes number.
It equal to None means extract all.
'''
matrix = []
labels = []
sub_length = 0
img_number = 0
while sub_length + length < len(array):
sub_array = []
for i in array[sub_length:(sub_length+length)]:
sub_array.append(i)
if episodes == None: # first round selection
matrix.append(sub_array)
labels.append(img_number)
elif img_number in episodes: # second round check
start = int(sub_length - Range*length + 1000)
end = int(sub_length + (Range+1)*length + 1000)
if start < 0:
start = 0
if end > len(array):
end = len(array)
stride = int((end - start)/(repeat+1))
tmp_start = start
for i in range(repeat):
sub_array = []
tmp_start = start + stride*i
for i in array[tmp_start:(tmp_start+length)]:
sub_array.append(i)
matrix.append(sub_array)
labels.append(img_number)
sub_length += length
img_number += 1
return matrix, labels
def decode_and_readcsv(path, List):
'''
create a list file with format of csv to store image and its path
'''
pic_path = []
for cur_path, folders, pics in os.walk(path):
for i in range(len(pics)):
pics[i] = pics[i].split('.')
pics[i][0] = int(pics[i][0])
pics.sort()
for i in range(len(pics)):
pics[i][0] = str(pics[i][0])
pics[i] = pics[i][0] + '.' + pics[i][1]
for pic in pics:
pic = os.path.join(cur_path, pic)
pic_path.append(pic)
csvFile = open(List, 'w', newline='')
writer = csv.writer(csvFile)
m = len(pic_path)
for i in range(m):
writer.writerow(np.expand_dims(pic_path[i],0))
csvFile.close()
def CheckResult(PredLabel, Prob, number, TrueLabel, ProbThreshold):
correct_number = 0
for i in range(number):
# print(PredLabel[i])
if np.max(np.squeeze(Prob)[i]) >= ProbThreshold:
if PredLabel[i] == TrueLabel:
correct_number += 1
if correct_number >= (number/2-1):
return True
else:
return False
def TMPList(Dir, List):
'''
temp image list file for prediction
'''
if os.path.exists(List):
os.remove(List)
decode_and_readcsv(Dir, List)
def statistic(List):
'''
statistic frequency
'''
species_frequency = {}
labels = []
for record in List:
content = record.split('\t')
if int(content[1]) > 1:
labels.append(int(content[1]))
for i in labels:
species_frequency[i] = species_frequency.get(i, 0) + 1
return len(species_frequency), species_frequency
def Logs(log_time, output, type=logging.INFO):
for handler in list(log_time.handlers):
log_time.removeHandler(handler)
if type == logging.INFO:
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s: %(message)s',
datefmt='%H:%M:%S')
else:
formatter = logging.Formatter('%(message)s')
console = logging.StreamHandler(sys.stdout) # print log in the screen
console.setFormatter(formatter)
log_time.setLevel(type)
loghandle = RotatingFileHandler(output, mode='a') # store log in the file
loghandle.setLevel(type)
loghandle.setFormatter(formatter)
log_time.addHandler(loghandle)
log_time.addHandler(console)
return log_time
def CheckAudio(audio_list):
'''
Check audio file and coresponding species and folder!
'''
audio_list = pd.read_excel(audio_list)
IDs = []
AudioFiles = []
for index, row in audio_list.iterrows():
# check audio file!
if not os.path.isfile(row['Audiofile']):
print('Error! Cannot find audio file: ', row['Audiofile'])
sys.exit(1)
else:
## check if librosa could read audio file
try:
librosa.load(row['Audiofile'], sr=None)
except:
print('Error2! Cannot read audio file: ', row['Audiofile'])
sys.exit(1)
# check the ID is repeated!
if row['ID'] in IDs:
print('The ID {} already exists in the list! \
\nPlease change to a new one!'.format(row['ID']))
sys.exit(1)
else:
IDs.append(row['ID'])
AudioFiles.append((row['Audiofile'], row['ID']))
return AudioFiles
def Species(species_list):
'''
extract species and its label
'''
species_dict = {}
species_list = pd.read_excel(species_list)
for index, row in species_list.iterrows():
species_dict.update({row['Species']:row['Label']})
return species_dict
def GenerateList(Dir, rate, ValidRate):
'''
Generate list for all the distributed images
'''
image_files = []
image_label_files = []
image = Dir + '/*.jpg'
image_files.extend(glob.glob(image))
if float(rate) <= 1:
select_number = int(float(rate)*len(image_files))
else:
select_number = rate
if select_number == 0:
print('Warning: No picture is selected in the folder ', Dir)
else:
np.random.shuffle(image_files)
image_files = image_files[:select_number]
if ValidRate > 0:
TrainLength = int((1- ValidRate)*len(image_files))
train_list = image_files[:TrainLength]
valid_list = image_files[TrainLength:]
return train_list, valid_list
def TransferImages(List, targetDir, Valid=False):
'''
copy images to another folder according to class label
'''
if Valid == True:
targetDir = targetDir.replace('/train', '/valid')
for file in List:
target_file = os.path.join(targetDir, os.path.basename(file))
shutil.copyfile(file, target_file)
``` |
{
"source": "JoannePeel/The-Surf-is-Up-",
"score": 3
} |
#### File: JoannePeel/The-Surf-is-Up-/climate_app.py
```python
import datetime as dt
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
###########################################
# Setup Database
###########################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
#Create references to Measurement and Station tables
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
####################################
# Setup Flask app
####################################
app = Flask(__name__)
################################
#Setup Flask Routes
################################
@app.route("/")
def home():
"""List all available api routes."""
return (
f"<H3>Welcome to the Hawaii Climate Analysis API!<br/><br />"
f"Available Routes:<br/>"
f"This API returns dates and precipitation: "
f"/api/v1.0/precipitation<br/>"
f"This API returns a list of stations: "
f"/api/v1.0/stations<br/>"
f"This API returns dates and temperature observations from a year from the last data point: "
f"/api/v1.0/tobs<br/>"
f"This API allows you to consult average, min and max temperatures entering a start date. <br/>"
f"Please enter start date as YYYY-mm-dd: "
f"/api/v1.0/<start><br/>"
f"This API allows you to consult average, min and max temperatures between two dates: <br/>"
f"/api/v1.0/<start>/<end>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
"""Returns dates and precipitation"""
prcp = session.query(Measurement.date, Measurement.prcp).all()
# Create a list of dicts with `date` and `prcp` as the keys and values
all_prcp = []
for result in prcp:
row = {}
row["date"] = prcp[0]
row["prcp"] = prcp[1]
all_prcp.append(row)
return jsonify(all_prcp)
@app.route("/api/v1.0/stations")
def stations():
# Query all stations from the station table
station_results = session.query(Station.station, Station.station.name).all()
station_list = list(np.ravel(station_results))
return jsonify(station_list)
@app.route("/api/v1.0/tobs")
def tobs():
#query for the dates and temperature observations from a year from the last data point.
last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
last_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
tobs = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.date > last_year).\
order_by(Measurement.date).all()
# Create a list of dicts with `date` and `tobs` as the keys and values
temperature_all = []
for result in tobs:
row = {}
row["date"] = tobs[0]
row["tobs"] = tobs[1]
temperature_all.append(row)
return jsonify(temperature_all)
#This one works sometimes :(
@app.route("/api/v1.0/<start>")
def start_temp(start):
startdate=start
#startdate=dt.datetime.strptime(start, '%Y-%m-%d')
results = session.query(func.avg(Measurement.tobs), func.max(Measurement.tobs), func.min(Measurement.tobs)).filter(Measurement.date >= startdate).all()
print(results)
for row in session.query(func.avg(Measurement.tobs), func.max(Measurement.tobs), func.min(Measurement.tobs)).filter(Measurement.date >= startdate).all():
print(row)
#Create JSON
data_list = []
for result in results:
row = {}
row['startdate'] = startdate
row['avg'] = float(result[0])
row['max'] = float(result[1])
row['min'] = float(result[2])
data_list.append(row)
return jsonify(data_list)
@app.route("/api/v1.0/<start>/<end>")
def between_temp(start, end):
start_date=start
end_date= end
results = session.query(func.avg(Measurement.tobs), func.max(Measurement.tobs), func.min(Measurement.tobs)).filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
print(results)
#Create JSON
data_list = []
for result in results:
row = {}
row['startdate'] = start_date
row['end_date'] = end_date
row['avg'] = float(result[0])
row['max'] = float(result[1])
row['min'] = float(result[2])
data_list.append(row)
return jsonify(data_list)
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "joannetruong/habitat-api",
"score": 2
} |
#### File: habitat-api/evaluation/evaluate_reality.py
```python
from collections import OrderedDict, defaultdict
import argparse
import random
import numpy as np
import torch
from gym import spaces
from gym.spaces.dict_space import Dict as SpaceDict
from PIL import Image
#from map_and_plan_agent.slam import DepthMapperAndPlanner
import habitat
from habitat.sims import make_sim
from habitat_baselines.config.default import get_config
from habitat_baselines.rl.ddppo.policy.resnet_policy import (
PointNavResNetPolicy,
)
from habitat_baselines.rl.ppo.ppo_trainer import PPOTrainer
DEVICE = torch.device("cpu")
SIMULATOR_REALITY_ACTIONS = {0: "stop", 1: "forward", 2: "left", 3: "right"}
LOG_FILENAME = "exp.navigation.log"
MAX_DEPTH = 10.0
class NavEnv:
def __init__(
self, forward_step, angle_step, is_blind=False, sensors=["RGB_SENSOR"]
):
config = habitat.get_config()
log_mesg(
"env: forward_step: {}, angle_step: {}".format(
forward_step, angle_step
)
)
config.defrost()
config.PYROBOT.SENSORS = sensors
config.PYROBOT.RGB_SENSOR.WIDTH = 256
config.PYROBOT.RGB_SENSOR.HEIGHT = 256
config.PYROBOT.DEPTH_SENSOR.WIDTH = 256
config.PYROBOT.DEPTH_SENSOR.HEIGHT = 256
config.freeze()
self._reality = make_sim(id_sim="PyRobot-v0", config=config.PYROBOT)
self._angle = (angle_step / 180) * np.pi
self._pointgoal_key = "pointgoal_with_gps_compass"
self.is_blind = is_blind
if not is_blind:
sensors_dict = {
**self._reality.sensor_suite.observation_spaces.spaces
}
else:
sensors_dict = {}
sensors_dict[self._pointgoal_key] = spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(2,),
dtype=np.float32,
)
self.observation_space = SpaceDict(sensors_dict)
self.action_space = spaces.Discrete(4)
self._actions = {
"forward": [forward_step, 0, 0],
"left": [0, 0, self._angle],
"right": [0, 0, -self._angle],
"stop": [0, 0, 0],
}
def _pointgoal(self, agent_state, goal):
agent_x, agent_y, agent_rotation = agent_state
agent_coordinates = np.array([agent_x, agent_y])
rho = np.linalg.norm(agent_coordinates - goal)
theta = (
np.arctan2(
goal[1] - agent_coordinates[1], goal[0] - agent_coordinates[0]
)
- agent_rotation
)
theta = theta % (2 * np.pi)
if theta >= np.pi:
theta = -((2 * np.pi) - theta)
return rho, theta
@property
def pointgoal_key(self):
return self._pointgoal_key
def reset(self, goal_location):
self._goal_location = np.array(goal_location)
observations = self._reality.reset()
base_state = self._get_base_state()
assert np.all(base_state == 0) == True, (
"Please restart the roslaunch command. "
"Current base_state is {}".format(base_state)
)
observations[self._pointgoal_key] = self._pointgoal(
base_state, self._goal_location
)
return observations
def _get_base_state(self):
base_state = self._reality.base.get_state("odom")
base_state = np.array(base_state, dtype=np.float32)
log_mesg("base_state: {:.3f} {:.3f} {:.3f}".format(*base_state))
return base_state
@property
def reality(self):
return self._reality
def step(self, action):
if action not in self._actions:
raise ValueError("Invalid action type: {}".format(action))
if action == "stop":
raise NotImplementedError("stop action not implemented")
observations = self._reality.step(
"go_to_relative",
{
"xyt_position": self._actions[action],
"use_map": False,
"close_loop": True,
"smooth": False,
},
)
base_state = self._get_base_state()
observations[self._pointgoal_key] = self._pointgoal(
base_state, self._goal_location
)
return observations
def log_mesg(mesg):
print(mesg)
with open(LOG_FILENAME, "a") as f:
f.write(mesg + "\n")
def load_model(
path,
observation_space,
action_space,
hidden_size,
normalize_visual_inputs,
backbone,
num_recurrent_layers,
device,
):
model = PointNavResNetPolicy(
observation_space=observation_space,
action_space=action_space,
hidden_size=hidden_size,
normalize_visual_inputs=normalize_visual_inputs,
backbone=backbone,
num_recurrent_layers=num_recurrent_layers
)
model.to(device)
new_model_params = sum(
[torch.numel(p) for _, p in model.named_parameters()]
)
saved_model = torch.load(path, map_location=device)
saved_model_params = sum(
[torch.numel(v) for k, v in saved_model["state_dict"].items()]
)
print(
"new_model_params: {}, saved_model_params: {}".format(
new_model_params, saved_model_params
)
)
saved_model_state_dict = OrderedDict()
for k, v in saved_model["state_dict"].items():
new_k = k.replace("actor_critic.", "")
new_k2 = new_k.replace("net.visual_encoder.final_fc.0.weight", "net.visual_fc.1.weight")
new_k3 = new_k2.replace("net.visual_encoder.final_fc.0.bias", "net.visual_fc.1.bias")
saved_model_state_dict[new_k3] = v
model.load_state_dict(saved_model_state_dict)
return model
def to_tensor(v):
if torch.is_tensor(v):
return v
elif isinstance(v, np.ndarray):
return torch.from_numpy(v)
else:
return torch.tensor(v, dtype=torch.float)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model-path", type=str, required=True)
parser.add_argument("--sensors", type=str, required=True)
parser.add_argument("--hidden-size", type=int, required=True)
parser.add_argument(
"--normalize-visual-inputs", type=int, required=True, choices=[0, 1]
)
parser.add_argument(
"--backbone",
type=str,
required=True,
choices=["resnet50", "se_resneXt50"],
)
parser.add_argument("--num-recurrent-layers", type=int, required=True)
parser.add_argument("--goal", type=str, required=False, default="0.2,0.0")
parser.add_argument("--goal-x", type=float, required=True)
parser.add_argument("--goal-y", type=float, required=True)
parser.add_argument("--depth-model", type=str, required=False, default="")
parser.add_argument("--depth-only", action="store_true")
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--map-plan-baseline", action="store_true")
args = parser.parse_args()
vtorch = "1.2.0"
assert torch.__version__ == vtorch, "Please use torch {}".format(vtorch)
if args.map_plan_baseline is True:
assert "RGB_SENSOR" in args.sensors and "DEPTH_SENSOR" in args.sensors
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
log_mesg("Starting new episode")
env = NavEnv(
forward_step=0.25,
angle_step=30,
is_blind=(args.sensors == ""),
sensors=args.sensors.split(","),
)
goal_list = [args.goal_x, args.goal_y]
goal_location = np.array(goal_list, dtype=np.float32)
log_mesg("Goal location: {}".format(goal_location))
device = torch.device("cpu")
if args.depth_model != "":
d_model = torch.load(args.depth_model, map_location=device)["model"]
d_model = d_model.eval()
print("depth_model:")
print(d_model)
sensors_dict = {**env._reality.sensor_suite.observation_spaces.spaces}
if args.depth_only:
del sensors_dict["rgb"]
print("Deleting Sensor from model: rgb")
sensors_dict[env.pointgoal_key] = spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(2,),
dtype=np.float32,
)
num_processes = 1
if args.map_plan_baseline is False:
model = load_model(
path=args.model_path,
observation_space=SpaceDict(sensors_dict),
action_space=env.action_space,
hidden_size=args.hidden_size,
normalize_visual_inputs=bool(args.normalize_visual_inputs),
backbone=args.backbone,
num_recurrent_layers=args.num_recurrent_layers,
device=device,
)
model = model.eval()
test_recurrent_hidden_states = torch.zeros(
model.net.num_recurrent_layers,
num_processes,
args.hidden_size,
device=DEVICE,
)
test_recurrent_hidden_states = torch.zeros(
model.net.num_recurrent_layers,
num_processes,
args.hidden_size,
device=DEVICE,
)
prev_actions = torch.zeros(num_processes, 1, device=DEVICE)
# else:
# model = DepthMapperAndPlanner(
# map_size_cm=1200,
# out_dir=None,
# mark_locs=True,
# reset_if_drift=True,
# count=-1,
# close_small_openings=True,
# recover_on_collision=True,
# fix_thrashing=True,
# goal_f=1.1,
# point_cnt=2,
# )
# model.reset()
old_new_action_mapping = {0: 1, 1: 2, 2: 3, 3: 0}
not_done_masks = torch.zeros(num_processes, 1, device=DEVICE)
observations = env.reset(goal_location)
timestep = -1
while True:
timestep += 1
observations = [observations]
goal = observations[0][env.pointgoal_key]
log_mesg(
"Your goal is to get to: {:.3f}, {:.3f} "
"rad ({:.2f} degrees)".format(
goal[0], goal[1], (goal[1] / np.pi) * 180
)
)
batch = defaultdict(list)
for obs in observations:
for sensor in obs:
batch[sensor].append(to_tensor(obs[sensor]))
for sensor in batch:
batch[sensor] = torch.stack(batch[sensor], dim=0).to(
device=DEVICE, dtype=torch.float
)
if args.depth_model != "":
with torch.no_grad():
rgb_stretch = batch["rgb"].permute(0, 3, 1, 2) / 255.0
# FASTDEPTH expects a NCHW order
depth_stretch = d_model(rgb_stretch)
depth_stretch = torch.clamp(depth_stretch / MAX_DEPTH, 0, 1.0)
batch["depth"] = depth_stretch.permute(0, 2, 3, 1)
# torch.save(batch, "episode/timestep_{}.pt".format(timestep))
if args.map_plan_baseline is False:
with torch.no_grad():
_, actions, _, test_recurrent_hidden_states = model.act(
batch,
test_recurrent_hidden_states,
prev_actions,
not_done_masks,
deterministic=False,
)
prev_actions.copy_(actions)
else:
assert "rgb" in batch
assert "depth" in batch
assert batch["rgb"].shape[0] == 1
slam_batch_input = {}
slam_batch_input["rgb"] = batch["rgb"].numpy()[0]
slam_batch_input["depth"] = batch["depth"].numpy()[0]
slam_batch_input["pointgoal"] = batch[
"pointgoal_with_gps_compass"
].numpy()[0]
slam_action = model.act(slam_batch_input)
actions = torch.Tensor(
[old_new_action_mapping[slam_action]]
).unsqueeze(0)
simulation_action = actions[0].item()
reality_action = SIMULATOR_REALITY_ACTIONS[simulation_action]
print("reality_action:", reality_action)
# input("Press key to continue")
if reality_action != "stop":
observations = env.step(reality_action)
not_done_masks = torch.ones(num_processes, 1, device=DEVICE)
else:
print("STOP called, episode over.")
print("Distance to goal: {:.3f}m".format(goal[0]))
return
if __name__ == "__main__":
main()
```
#### File: habitat-api/evaluation/evaluate_simulation_coda_gen.py
```python
import matplotlib.pyplot as plt
import argparse
import os
from collections import defaultdict
import habitat
import numpy as np
import quaternion
import torch
from evaluate_reality import load_model
from gym.spaces.dict_space import Dict as SpaceDict
from habitat.tasks.utils import cartesian_to_polar
from habitat.utils.geometry_utils import quaternion_rotate_vector
from habitat.utils.visualizations.utils import (images_to_video,
observations_to_image)
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.env_utils import construct_envs
from habitat_baselines.common.environments import get_env_class
from habitat_baselines.common.utils import batch_obs, generate_video
from habitat_baselines.config.default import get_config
from habitat_sim import geo
from habitat_sim.utils.common import quat_from_two_vectors, quat_rotate_vector
from PIL import Image
def quat_to_rad(rotation):
heading_vector = quaternion_rotate_vector(
rotation.inverse(), np.array([0, 0, -1])
)
phi = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]
return phi
def create_state(position, rotation):
rotation_mp3d_habitat = quat_from_two_vectors(geo.GRAVITY, np.array([0, 0, -1]))
pt_mp3d = quat_rotate_vector(rotation_mp3d_habitat, position) # That point in the mp3d scene mesh coordinate frame.
state_xyt = [pt_mp3d[0], pt_mp3d[1]]
theta = quat_to_rad(rotation)
state_xyt.append(theta)
return state_xyt
def create_traj_labels(input_arr):
r, c = input_arr.shape
# labels: d_x, d_y, cos_d_t, sin_d_t
diff = np.diff(input_arr, axis=0)
labels_arr = np.zeros((r-1, 4))
labels_arr[:, :2] = diff[:, :2]
labels_arr[:, 2] = np.cos(diff[:, 2])
labels_arr[:, 3] = np.sin(diff[:, 2])
return labels_arr
def convert_embedding(input_arr_embed):
# SIMULATOR_REALITY_ACTIONS = {"stop": 0, "forward": 1 , "left": 2 , "right": 3}
ONE_HOT_ACTIONS = {"0": [0, 0, 0], "1": [0, 0, 1] , "2": [0, 1, 0] , "3": [1, 0, 0]}
r, c = input_arr_embed.shape
input_arr_oneHot = np.zeros((r, c+2))
input_arr_oneHot[:, :4] = input_arr_embed[:, :4]
for row in range(r):
input_arr_oneHot[row, 4:] = ONE_HOT_ACTIONS[str(int(input_arr_embed[row, 4]))]
## if logging collisions
# input_arr_oneHot[row, 4:7] = ONE_HOT_ACTIONS[str(int(input_arr_embed[row, 4]))]
# input_arr_embed[:, -1] = input_arr_embed[:, 5]
return input_arr_oneHot
def save_trajectory(data, datasplit, traj_dir, traj_ctr, datatype, embed_type=""):
pathend = datasplit + '_' + '%03d'%traj_ctr
if embed_type != "":
embed_type += "_"
filename = os.path.join(traj_dir, datatype + '_LRF_' + embed_type + pathend)
print('saving: ', filename)
np.save(filename, data[:, :])
np.savetxt(filename + '.csv', data[:, :], delimiter=",")
def create_labels_trajectory(labels_arr):
r, c = labels_arr.shape
# input embed: x, y, cost, sint, a
final_labels_arr = np.zeros((r, c+1))
## if logging collisions
# input_arr_embed = np.zeros((r, c+2))
final_labels_arr[:, :2] = labels_arr[:, :2]
final_labels_arr[:, 2] = np.cos(labels_arr[:, 2])
final_labels_arr[:, 3] = np.sin(labels_arr[:, 2])
return final_labels_arr
def create_input_trajectory(final_input_arr):
r, c = final_input_arr.shape
# input embed: x, y, cost, sint, a
input_arr_embed = np.zeros((r, c+1))
## if logging collisions
# input_arr_embed = np.zeros((r, c+2))
input_arr_embed[:, :2] = final_input_arr[:, :2]
input_arr_embed[:, 2] = np.cos(final_input_arr[:, 2])
input_arr_embed[:, 3] = np.sin(final_input_arr[:, 2])
input_arr_embed[:, 4] = final_input_arr[:, 3]
## if logging collisions
# input_arr_embed[:, 5] = final_input_arr[:, 4]
# input oneHot: x, y, cost, sint, a1, a2, a3
input_arr_oneHot = convert_embedding(input_arr_embed)
return input_arr_embed, input_arr_oneHot
def create_dir(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return
def get_last_idx(dir_path):
f = sorted(os.listdir(dir_path))
if not f:
ctr = 0
else:
ctr = int(f[-1].split('.')[0].split('_')[-1]) +1
return ctr
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model-path", type=str, required=True)
# parser.add_argument("--noisy", action="store_true")
parser.add_argument("--noise", type=str, required=True)
parser.add_argument("--save-imgs", action="store_true")
parser.add_argument("--save-traj", action="store_true")
parser.add_argument("--data-split", type=str, required=True)
parser.add_argument("--sensors", type=str, required=True)
parser.add_argument("--hidden-size", type=int, required=True)
parser.add_argument("--noise-type", type=str, required=True)
parser.add_argument(
"--normalize-visual-inputs", type=int, required=True, choices=[0, 1]
)
parser.add_argument("--depth-only", action="store_true")
parser.add_argument(
"--backbone",
type=str,
required=True,
choices=["resnet50", "se_resneXt50"],
)
parser.add_argument("--num-recurrent-layers", type=int, required=True)
parser.add_argument(
"opts",
default=None,
nargs=argparse.REMAINDER,
help="Modify config options from command line",
)
args = parser.parse_args()
# Check torch version
# vtorch = "1.2.0"
#x assert torch.__version__ == vtorch, "Please use torch {}".format(vtorch)
if args.noise_type == 'poisson_ilqr':
if args.noise == 'all':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_poisson_ilqr.yaml"
elif args.noise == 'actuation':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_actuation_ilqr.yaml"
elif args.noise == 'sensors':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_sensors_poisson.yaml"
elif args.noise == 'no_noise':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_no_noise.yaml"
else:
print('no noise specified. using all noise')
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_poisson_ilqr.yaml"
elif args.noise_type == 'speckle_mb':
if args.noise == 'all':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_speckle_mb.yaml"
if args.gen:
cfg_file="habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_speckle_mb_gen.yaml"
elif args.noise == 'actuation':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_actuation_mb.yaml"
elif args.noise == 'sensors':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_sensors_speckle.yaml"
elif args.noise == 'no_noise':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_no_noise.yaml"
else:
print('no noise specified. using all noise')
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_poisson_ilqr.yaml"
elif args.noise_type == 'gaussian_proportional':
if args.noise == 'all':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_gaussian_proportional.yaml"
elif args.noise == 'actuation':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_actuation_proportional.yaml"
elif args.noise == 'sensors':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_sensors_gaussian.yaml"
elif args.noise == 'no_noise':
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_no_noise.yaml"
else:
print('no noise specified. using all noise')
cfg_file = "habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_gaussian_proportional.yaml"
config = get_config(
cfg_file, args.opts
)
if args.save_traj:
datasplit = args.data_split.split('_')[1]
split = 'train'
if datasplit == 'med':
split = 'test'
if args.save_imgs:
split='train'
if args.noise!="no_noise":
depth_save_path = 'depth_' + config.TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.NOISE_MODEL + '_' + split
rgb_save_path = 'rgb_' + config.TASK_CONFIG.SIMULATOR.RGB_SENSOR.NOISE_MODEL + '_' + str(config.TASK_CONFIG.SIMULATOR.RGB_SENSOR.NOISE_MODEL_KWARGS.intensity_constant) + '_' + split
else:
depth_save_path = 'depth_no_noise_' + split
rgb_save_path = 'rgb_no_noise_' + split
if args.save_traj:
if args.noise!="no_noise":
traj_save_path = 'traj_' + config.TASK_CONFIG.SIMULATOR.NOISE_MODEL.CONTROLLER + '_' + str(config.TASK_CONFIG.SIMULATOR.NOISE_MODEL.NOISE_MULTIPLIER) + '_' + split
else:
traj_save_path = 'traj_no_noise_' + split
config.defrost()
config.TASK_CONFIG.TASK.BASE_STATE = habitat.Config()
config.TASK_CONFIG.TASK.BASE_STATE.TYPE = "BaseState"
# Add the measure to the list of measures in use
config.TASK_CONFIG.TASK.MEASUREMENTS.append("BASE_STATE")
if args.sensors == "":
config.SENSORS = []
else:
config.SENSORS = args.sensors.split(",")
config.TASK_CONFIG.TASK.MEASUREMENTS.append("COLLISIONS")
config.TASK_CONFIG.TASK.MEASUREMENTS.append("SOFT_SPL")
config.TASK_CONFIG.TASK.MEASUREMENTS.append("TOP_DOWN_MAP")
config.TASK_CONFIG.TASK.MEASUREMENTS.append("EPISODE_DISTANCE")
config.freeze()
envs = construct_envs(config, get_env_class(config.ENV_NAME))
sensors_obs = envs.observation_spaces[0]
if args.depth_only:
config.defrost()
config.SENSORS=["DEPTH_SENSOR"]
config.freeze()
envs2 = construct_envs(config, get_env_class(config.ENV_NAME))
sensors_obs = envs2.observation_spaces[0]
device = (
torch.device("cuda:{}".format(config.TORCH_GPU_ID))
if torch.cuda.is_available()
else torch.device("cpu")
)
model = load_model(
path=args.model_path,
observation_space=sensors_obs,
# observation_space=envs.observation_spaces[0],
action_space=envs.action_spaces[0],
hidden_size=args.hidden_size,
normalize_visual_inputs=bool(args.normalize_visual_inputs),
backbone=args.backbone,
num_recurrent_layers=args.num_recurrent_layers,
device=device,
)
model.eval()
print('METRICS: ', config.TASK_CONFIG.TASK.MEASUREMENTS)
metric_name = "SPL"
metric_cfg = getattr(config.TASK_CONFIG.TASK, metric_name)
measure_type = baseline_registry.get_measure(metric_cfg.TYPE)
assert measure_type is not None, "invalid measurement type {}".format(
metric_cfg.TYPE
)
metric_uuid = measure_type(None, None)._get_uuid()
print('METRIC UUID: ', metric_uuid)
observations = envs.reset()
batch = batch_obs(observations, device)
current_episode_reward = torch.zeros(envs.num_envs, 1, device=device)
test_recurrent_hidden_states = torch.zeros(
model.net.num_recurrent_layers,
config.NUM_PROCESSES,
args.hidden_size,
device=device,
)
prev_actions = torch.zeros(
config.NUM_PROCESSES, 1, device=device, dtype=torch.long
)
not_done_masks = torch.zeros(config.NUM_PROCESSES, 1, device=device)
stats_episodes = dict() # dict of dicts that stores stats per episode
stats_actions = defaultdict(int)
rgb_frames = [
[] for _ in range(config.NUM_PROCESSES)
] # type: List[List[np.ndarray]]
if len(config.VIDEO_OPTION) > 0:
os.makedirs(config.VIDEO_DIR, exist_ok=True)
sensor_path = 'sim_sensor_imgs_gen'
traj_path = 'sim_traj'
if args.save_imgs:
depth_dir = os.path.join(sensor_path, depth_save_path)
rgb_dir = os.path.join(sensor_path, rgb_save_path)
create_dir(depth_dir)
create_dir(rgb_dir)
img_ctr = get_last_idx(depth_dir)
if args.save_traj:
traj_dir = os.path.join(traj_path, traj_save_path)
create_dir(traj_dir)
traj_ctr = get_last_idx(traj_dir)
## not logging collisions
final_input_arr = np.array([0, 0, 0, 0])
## if logging collisions
# input_arr = np.array([0, 0, 0, 0, 0])
# final_input_arr = np.array([0, 0, 0, 0, 0])
tmp_labels_arr = np.array([0, 0, 0])
prev_base_state = [0, 0, 0]
num_actions = 0
# datasplit = args.data_split.split('_')[1]
print_once = True
called_stop = False
while (
len(stats_episodes) < config.TEST_EPISODE_COUNT and envs.num_envs > 0
):
current_episodes = envs.current_episodes()
if print_once:
print("Ep_id: ", current_episodes[0].episode_id, "Start_pos: ", current_episodes[0].start_position, current_episodes[0].start_rotation, "Goal_pos: ", current_episodes[0].goals[0].position)
print_once = False
with torch.no_grad():
_, actions, _, test_recurrent_hidden_states = model.act(
batch,
test_recurrent_hidden_states,
prev_actions,
not_done_masks,
deterministic=False,
)
prev_actions.copy_(actions)
outputs = envs.step([a[0].item() for a in actions])
num_actions +=1
for a in actions:
stats_actions[a[0].item()] += 1
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
batch = batch_obs(observations, device)
not_done_masks = torch.tensor(
[[0.0] if done else [1.0] for done in dones],
dtype=torch.float,
device=device,
)
rewards = torch.tensor(
rewards, dtype=torch.float, device=device
).unsqueeze(1)
current_episode_reward += rewards
next_episodes = envs.current_episodes()
envs_to_pause = []
n_envs = envs.num_envs
for i in range(n_envs):
if (
next_episodes[i].scene_id,
next_episodes[i].episode_id,
) in stats_episodes:
envs_to_pause.append(i)
# x, y, t, a
input_row = prev_base_state + [actions[i][0].cpu().detach().tolist()]
#input_row = prev_base_state + [actions[i][0].cpu().detach().tolist()] + [int(infos[i]["collisions"]["is_collision"])]
curr_state = create_state(infos[i]["base_state"]['position'], infos[i]["base_state"]['rotation'])
delta_row = np.subtract(curr_state, prev_base_state)
prev_base_state = curr_state
print(input_row + [int(infos[i]["collisions"]["is_collision"])])
if int(infos[i]["collisions"]["is_collision"]) == 0:
final_input_arr = np.vstack((final_input_arr, input_row))
tmp_labels_arr = np.vstack((tmp_labels_arr, delta_row))
# plt.ioff()
# _ = plt.hist(observations[i]["depth"].flatten(), bins='auto')
# plt.savefig('hist.jpg')
# TODO: save only good trajectories
if args.save_imgs:
obz = observations[i]
depth_obs = obz["depth"]
depth_obs = np.squeeze(depth_obs)
depth_img = Image.fromarray((depth_obs * 255).astype(np.uint8), mode="L")
depth_img.save(os.path.join(depth_dir, "depth_" + "%05d"%img_ctr + ".jpg"), "JPEG")
rgb_obs = obz["rgb"]
rgb_img = Image.fromarray(rgb_obs, mode="RGB")
rgb_img.save(os.path.join(rgb_dir, "rgb_" + "%05d"%img_ctr + ".jpg"), "JPEG")
img_ctr +=1
# episode ended
if not_done_masks[i].item() == 0:
episode_stats = dict()
episode_stats[metric_uuid] = infos[i][metric_uuid]
episode_stats["success"] = int(infos[i][metric_uuid] > 0)
episode_stats["reward"] = current_episode_reward[i].item()
if actions[i][0].cpu().detach().tolist() == 0:
called_stop = True
# if infos[i]["collisions"] == 0:
# final_input_arr = np.vstack((final_input_arr, input_arr[2:-1, :]))
# final_labels_arr = np.vstack((final_labels_arr, labels_arr[2:-1,:]))
# final_input_arr = np.vstack((final_input_arr, input_arr[2:-1, :]))
# final_labels_arr = np.vstack((final_labels_arr, create_traj_labels(input_arr[2:, :])))
print(final_input_arr.ndim)
if final_input_arr.ndim > 1:
print("Final Shape: {}".format(final_input_arr[2:-1, :].shape))
input_arr_embed, input_arr_oneHot = create_input_trajectory(final_input_arr[2:-1, :])
final_labels_arr = create_labels_trajectory(tmp_labels_arr[2:-1, :])
if args.save_traj:
save_trajectory(input_arr_embed, datasplit, traj_dir, traj_ctr, 'input', embed_type="embed")
save_trajectory(input_arr_oneHot, datasplit, traj_dir, traj_ctr, 'input', embed_type="oneHot")
save_trajectory(final_labels_arr, datasplit, traj_dir, traj_ctr, 'labels', embed_type="")
traj_ctr +=1
print("# Actions: {}".format(num_actions))
print("# Collisions: {}".format(infos[i]["collisions"]["count"]))
print("Success: {}".format(episode_stats["success"]))
print("Agent Episode Distance: {}".format(infos[i]['episode_distance']['agent_episode_distance'])) #TODO
print("Final Distance to Goal: {}".format(infos[i]['episode_distance']['goal_distance'])) #TODO
print("SPL: {}".format(episode_stats[metric_uuid]))
print("Soft SPL: {}".format(infos[i]["softspl"]))
print("Called Stop: {}".format(called_stop))
current_episode_reward[i] = 0
## not logging collisions
final_input_arr = np.array([0, 0, 0, 0])
## if logging collisions
# input_arr = np.array([0, 0, 0, 0, 0])
# final_input_arr = np.array([0, 0, 0, 0, 0])
tmp_labels_arr = np.array([0, 0, 0])
prev_base_state = [0, 0, 0]
num_actions = 0
print_once = True
called_stop = False
# use scene_id + episode_id as unique id for storing stats
stats_episodes[
(
current_episodes[i].scene_id,
current_episodes[i].episode_id,
)
] = episode_stats
if len(config.VIDEO_OPTION) > 0:
metric_value = episode_stats[metric_uuid]
video_name = (
f"episode_{current_episodes[i].episode_id}"
f"_{metric_name}_{metric_value:.2f}"
)
images_to_video(
rgb_frames[i], config.VIDEO_DIR, video_name
)
rgb_frames[i] = []
print("Episodes finished: {}".format(len(stats_episodes)))
# episode continues
elif len(config.VIDEO_OPTION) > 0:
frame = observations_to_image(observations[i], infos[i])
rgb_frames[i].append(frame)
# pausing self.envs with no new episode
if len(envs_to_pause) > 0:
state_index = list(range(envs.num_envs))
for idx in reversed(envs_to_pause):
state_index.pop(idx)
envs.pause_at(idx)
# indexing along the batch dimensions
test_recurrent_hidden_states = test_recurrent_hidden_states[
:, state_index
]
not_done_masks = not_done_masks[state_index]
current_episode_reward = current_episode_reward[state_index]
prev_actions = prev_actions[state_index]
for k, v in batch.items():
batch[k] = v[state_index]
if len(config.VIDEO_OPTION) > 0:
rgb_frames = [rgb_frames[i] for i in state_index]
aggregated_stats = dict()
for stat_key in next(iter(stats_episodes.values())).keys():
aggregated_stats[stat_key] = sum(
[v[stat_key] for v in stats_episodes.values()]
)
num_episodes = len(stats_episodes)
episode_reward_mean = aggregated_stats["reward"] / num_episodes
episode_metric_mean = aggregated_stats[metric_uuid] / num_episodes
episode_success_mean = aggregated_stats["success"] / num_episodes
print(f"Number of episodes: {num_episodes}")
print(f"Average episode reward: {episode_reward_mean:.6f}")
print(f"Average episode success: {episode_success_mean:.6f}")
print(f"Average episode {metric_uuid}: {episode_metric_mean:.6f}")
print("Stats actions:", stats_actions)
envs.close()
if __name__ == "__main__":
main()
```
#### File: datasets/vln/r2r_vln_dataset.py
```python
import gzip
import json
import os
from typing import List, Optional
from habitat.config import Config
from habitat.core.dataset import Dataset
from habitat.core.registry import registry
from habitat.datasets.utils import VocabDict
from habitat.tasks.nav.nav import NavigationGoal
from habitat.tasks.vln.vln import InstructionData, VLNEpisode
DEFAULT_SCENE_PATH_PREFIX = "data/scene_datasets/"
@registry.register_dataset(name="R2RVLN-v1")
class VLNDatasetV1(Dataset):
r"""Class inherited from Dataset that loads a Vision and Language
Navigation dataset.
"""
episodes: List[VLNEpisode]
instruction_vocab: VocabDict
@staticmethod
def check_config_paths_exist(config: Config) -> bool:
return os.path.exists(
config.DATA_PATH.format(split=config.SPLIT)
) and os.path.exists(config.SCENES_DIR)
def __init__(self, config: Optional[Config] = None) -> None:
self.episodes = []
if config is None:
return
dataset_filename = config.DATA_PATH.format(split=config.SPLIT)
with gzip.open(dataset_filename, "rt") as f:
self.from_json(f.read(), scenes_dir=config.SCENES_DIR)
self.episodes = list(
filter(self.build_content_scenes_filter(config), self.episodes)
)
def from_json(
self, json_str: str, scenes_dir: Optional[str] = None
) -> None:
deserialized = json.loads(json_str)
self.instruction_vocab = VocabDict(
word_list=deserialized["instruction_vocab"]["word_list"]
)
for episode in deserialized["episodes"]:
episode = VLNEpisode(**episode)
if scenes_dir is not None:
if episode.scene_id.startswith(DEFAULT_SCENE_PATH_PREFIX):
episode.scene_id = episode.scene_id[
len(DEFAULT_SCENE_PATH_PREFIX) :
]
episode.scene_id = os.path.join(scenes_dir, episode.scene_id)
episode.instruction = InstructionData(**episode.instruction)
for g_index, goal in enumerate(episode.goals):
episode.goals[g_index] = NavigationGoal(**goal)
self.episodes.append(episode)
```
#### File: tasks/eqa/__init__.py
```python
from habitat.core.embodied_task import EmbodiedTask
from habitat.core.registry import registry
def _try_register_eqa_task():
try:
from habitat.tasks.eqa.eqa import EQATask # noqa: F401
except ImportError as e:
eqatask_import_error = e
@registry.register_task(name="EQA-v0")
class EQATaskImportError(EmbodiedTask):
def __init__(self, *args, **kwargs):
raise eqatask_import_error
``` |
{
"source": "joannezhouyi/visual_textual_cross_retrieval",
"score": 2
} |
#### File: joannezhouyi/visual_textual_cross_retrieval/test.py
```python
import pickle
import os
import time
import shutil
import torch
import data
from vocab import Vocabulary # NOQA
from model import VSE
from evaluation import i2t, t2i, AverageMeter, LogCollector, encode_data
import logging
import tensorboard_logger as tb_logger
import argparse
def main():
from vocab import Vocabulary
import evaluation
#evaluation.evalrank("runs/coco_vse++/model_best.pth.tar", data_path="data", split="test")
evaluation.evalrank("runs/coco_vse++_vse/model_best.pth.tar", data_path="data", split="test")
if __name__ == '__main__':
main()
``` |
{
"source": "JoanNgatia/Just-Do-It",
"score": 3
} |
#### File: Just-Do-It/apiv1/permissions.py
```python
from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
"""Define account permissions."""
def has_object_permission(self, request, view, obj):
"""Check user associated with request is same object as account.
Read permissions allowed to any request.
"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.creator == request.user
```
#### File: apiv1/serializers/accountserializer.py
```python
from rest_framework import serializers
from bucketlist.models import Account
class AccountSerializer(serializers.ModelSerializer):
"""Define User serialization fields."""
class Meta:
"""Define metadata the serializer should use."""
model = Account
fields = ('username', 'password', 'tagline')
extra_kwargs = {'password': {'<PASSWORD>': <PASSWORD>}}
def create(self, **validated_data):
"""Override create method and set password."""
user = Account(
username=validated_data['username'],
tagline=validated_data['tagline']
)
user.set_password(validated_data['password'])
user.save()
return user
```
#### File: apiv1/views/accounts.py
```python
from rest_framework import generics, permissions, status
from rest_framework.response import Response
from bucketlist.models import Account
from apiv1.serializers import accountserializer
class AccountsList(generics.ListCreateAPIView):
"""Use DRF viewset to define Account CRUD methods.
Handle the URL to create and account and list all accounts.
URL : /api/v1/users/
Args:
To create a user:
required - username , password
optional - tagline
Returns:
Dictionary containing users' details inclusive of
username, tagline and bucketlists created.
"""
queryset = Account.objects.all()
serializer_class = accountserializer.AccountSerializer
def get_permissions(self):
"""Set access permissions to Account model."""
# allow only account owner to update or delete an account.
if self.request.method in permissions.SAFE_METHODS:
return (permissions.AllowAny(), )
# allow any user to create an account.
if self.request.method == 'POST':
return (permissions.AllowAny(), )
return(permissions.IsAuthenticated(), )
def create(self, request):
"""Override viewsets .save method to allow for password hashing."""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
Account.objects.create_user(**serializer.validated_data)
return Response(serializer.validated_data,
status=status.HTTP_201_CREATED)
return Response({
'status': 'Bad Request',
'message': 'Account could not be created with given details.'
}, status=status.HTTP_400_BAD_REQUEST)
class AccountsDetail(generics.RetrieveAPIView):
"""Allow admin access to user details.
Handle the URL to list a particular users details.
URL : /api/v1/user/<user_id>
Args:
pk -- the user id lookup field from the URL
Returns:
Dictionary containing a user's details inclusive of
username, tagline and bucketlists created.
"""
queryset = Account.objects.all()
serializer_class = accountserializer.AccountSerializer
```
#### File: bucketlist/tests/test_approutes.py
```python
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
from bucketlist.models import Account, Bucketlist, Bucketlistitem
class TestBucketlistView(TestCase):
"""Test django bucketlist views."""
# Include 1 user,4 bucketlists and 4 bucketlistitems as dummy data
fixtures = ['bucketlist.json']
def setUp(self):
"""Create base data for testing."""
self.client = Client()
self.user = Account.objects.create_user(
username='test',
password='<PASSWORD>'
)
self.user.set_password('<PASSWORD>')
self.user.save()
self.login = self.client.login(
username='test', password='<PASSWORD>')
self.bucketlist = Bucketlist.objects.create(
name='test_bucketlist', creator=self.user)
self.bucketlistitem = Bucketlistitem.objects.create(
name='test_bucketlistitem', bucketlist=self.bucketlist)
def tearDown(self):
"""Databse clean up after successful test run."""
Account.objects.all().delete()
Bucketlist.objects.all().delete()
def test_user_registration(self):
"""Test that a new user can get registered on the system."""
response = self.client.post(reverse('register'),
{'username': 'test',
'password': '<PASSWORD>',
'confirm_password': '<PASSWORD>'})
self.assertEqual(response.status_code, 302)
def test_user_login(self):
"""Test that a registered user can login."""
response = self.client.post(reverse('login'),
{'username': 'test',
'password': '<PASSWORD>'})
self.assertEqual(response.status_code, 302)
# Test that a non-registered user cannot login
response = self.client.post(reverse('login'),
{'username': 'not test',
'password': '<PASSWORD>'})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/login')
def test_user_logout(self):
"""Test that a logged in user can logout."""
response = self.client.post(reverse('logout'))
self.assertEqual(response.status_code, 302)
def test_can_access_bucketlists_view(self):
"""Test that a logged in user can access their bucketlists."""
response = self.client.get(
reverse('all_bucketlists'))
self.assertEqual(response.status_code, 200)
def test_access_to_bucketlist_creation(self):
"""Test that a user can create a bucketlist."""
# successful creation
response = self.client.post(
reverse('all_bucketlists'), {'name': 'Lets go to the zoo'})
self.assertEqual(response.status_code, 302)
self.assertEqual(Bucketlist.objects.count(), 6)
# unsuccessful creation
response = self.client.post(
reverse('all_bucketlists'), {'name': ''})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bucketlist')
def test_access_to_bucketlist_update(self):
"""Test that a user can edit a bucketlist."""
response = self.client.post(
reverse('single_bucketlist_edit',
kwargs={'pk': self.bucketlist.id}),
{'name': 'Lets go to the zoo'})
self.assertEqual(response.status_code, 302)
self.assertEqual(Bucketlist.objects.count(), 5)
def test_access_to_bucketlist_deletion(self):
"""Test that a user can deletet a bucketlist."""
response = self.client.get(
reverse('single_bucketlist_delete',
kwargs={'pk': self.bucketlist.id}))
self.assertEqual(response.status_code, 302)
self.assertEqual(Bucketlist.objects.count(), 4)
def test_access_to_bucketlistitems(self):
"""Test that a user can view items in their bucketlists."""
response = self.client.get(
reverse('bucketlistitems_get',
kwargs={'pk': self.bucketlist.id}))
self.assertEqual(response.status_code, 200)
def test_addition_of_bucketlist_items(self):
"""Test that a user can create a bucketlist item."""
response = self.client.post(
reverse('bucketlistitems_get', kwargs={'pk': self.bucketlist.id}),
{'name': 'MAdtraxx!!'})
self.assertEqual(response.status_code, 302)
self.assertEqual(Bucketlistitem.objects.count(), 6)
# unsuccessful item creation
response = self.client.post(
reverse('bucketlistitems_get', kwargs={'pk': self.bucketlist.id}),
{'name': ''})
self.assertEqual(response.status_code, 302)
self.assertEqual(Bucketlistitem.objects.count(), 6)
def test_edition_of_bucketlistitems(self):
"""Test that a user can change the name of a bucketlist item."""
response = self.client.post(
reverse('bucketlistitems_update',
kwargs={'pk': self.bucketlistitem.id,
'bucketlist': self.bucketlist.id}),
{'name': 'MAdtraxx!!'})
self.assertEqual(response.status_code, 302)
self.assertEqual(Bucketlistitem.objects.count(), 5)
self.assertEqual(
Bucketlistitem.objects.get(id=self.bucketlistitem.id).name,
'MAdtraxx!!')
def test_deletion_of_bucketlistitem(self):
"""Test that a user can delete a bucektlist item."""
response = self.client.get(
reverse('bucketlistitems_delete',
kwargs={'pk': self.bucketlistitem.id,
'bucketlist': self.bucketlist.id}))
self.assertEqual(response.status_code, 302)
self.assertEqual(Bucketlistitem.objects.count(), 4)
def test_edition_of_bucektlistitem_status(self):
"""Test that a user can change the status of a bucketlist item."""
response = self.client.get(
reverse('bucketlistitems_status',
kwargs={'pk': self.bucketlistitem.id,
'bucketlist': self.bucketlist.id}))
self.assertEqual(response.status_code, 302)
self.assertTrue(
Bucketlistitem.objects.get(id=self.bucketlistitem.id).done)
```
#### File: bucketlist/views/authentication_view.py
```python
from django.shortcuts import render, redirect
from django.views.generic import TemplateView
from django.contrib.auth import authenticate, login
from django.contrib import messages
from django.template import RequestContext
from bucketlist.forms.forms_authentication import RegistrationForm, LoginForm
class IndexView(TemplateView):
"""Base view where they will all inherit data from."""
template_name = 'bucketlists/dashboard.html'
def get_context_data(self, **kwargs):
"""Return dictionary representing passed in context."""
context = super(IndexView, self).get_context_data(**kwargs)
context['registrationform'] = RegistrationForm()
context['loginform'] = LoginForm()
return context
class RegistrationView(IndexView):
"""Define Registration view on template."""
form_class = RegistrationForm
def post(self, request, **kwargs):
"""Method to create a new user."""
# Check that method is post to access data passed
form = self.form_class(request.POST)
# import ipdb; ipdb.set_trace()
if form.is_valid():
# save data passed into the database
new_user = form.save()
new_user = authenticate(
username=request.POST['username'],
password=request.POST['password'])
login(request, new_user)
messages.success(
request, "You've been successfully registered!")
return redirect(
'/bucketlists',
context_instance=RequestContext(request)
)
else:
messages.error(
request, 'Oops there was a problem on registration!')
for error in form.errors.values():
messages.add_message(request, messages.ERROR, error[0])
return redirect(
'/register',
context_instance=RequestContext(request)
)
class LoginView(IndexView):
"""Define login on index template view."""
form_class = LoginForm
def post(self, request, **kwargs):
"""Method to login a registered user."""
form = self.form_class(request.POST)
if form.is_valid():
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None and user.is_active:
login(request, user)
messages.success(
request, 'Glad to have you back!!')
return redirect(
'/bucketlists',
context_instance=RequestContext(request)
)
else:
messages.error(
request, 'Incorrect username or password!')
return redirect(
'/login',
context_instance=RequestContext(request)
)
else:
context = super(LoginView, self).get_context_data(**kwargs)
context['loginform'] = form
return render(request, self.template_name, context)
```
#### File: bucketlist/views/bucketlistitem_view.py
```python
from django.shortcuts import redirect
from django.views.generic import TemplateView
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib import messages
from django.template import RequestContext
from bucketlist.models import Bucketlistitem, Bucketlist
from bucketlist.forms.forms_bucketlist import BucketlistItemForm
class LoginRequiredMixin(object):
"""Enforce login for particular views."""
@method_decorator(login_required(login_url='/'))
def dispatch(self, request, *args, **kwargs):
"""Add login required functionality to all decorated class views."""
return super(LoginRequiredMixin, self).dispatch(
request, *args, **kwargs)
class AllBucketlistitemsView(LoginRequiredMixin, TemplateView):
"""View to handle retrieval and creation of bucketlist items."""
template_name = 'bucketlists/bucketitems.html'
form_class = BucketlistItemForm
def get_context_data(self, **kwargs):
"""Return dictionary representing passed in context."""
context = super(
AllBucketlistitemsView, self).get_context_data(**kwargs)
bucketlist = kwargs['pk']
context['bucketlist'] = Bucketlist.objects.get(id=bucketlist)
context['bucketitems'] = Bucketlistitem.objects.filter(
bucketlist_id=kwargs['pk'])
context['bucketlistitemform'] = BucketlistItemForm()
return context
def post(self, request, **kwargs):
"""Method to create a new bucketlist."""
form = self.form_class(request.POST)
if form.is_valid():
item_name = request.POST.get('name')
new_bucketitem = Bucketlistitem(
name=item_name,
bucketlist=Bucketlist.objects.get(id=kwargs['pk']))
new_bucketitem.save()
messages.success(
request, 'New Bucketlistitem added successfully!')
return redirect(
'/bucketlists/' + kwargs['pk'] + '/items/',
context_instance=RequestContext(request)
)
else:
messages.error(
request, 'Error at creation!')
return redirect(
'/bucketlists/' + kwargs['pk'] + '/items/',
context_instance=RequestContext(request)
)
class BucketlistitemUpdate(LoginRequiredMixin, TemplateView):
"""View logic to handle bucketlistitem name edition."""
def post(self, request, **kwargs):
"""Retrieve new details from request body."""
bucketlist = kwargs['bucketlist']
bucketlistitem = Bucketlistitem.objects.filter(
id=kwargs['pk'], bucketlist_id=bucketlist).first()
bucketlistitem.name = request.POST.get('name')
bucketlistitem.save()
messages.success(
request, 'Bucketlistitem edited successfully!')
return redirect('/bucketlists/' + kwargs['bucketlist'] + '/items/',
context_instance=RequestContext(request))
class BucketlistItemStatus(LoginRequiredMixin, TemplateView):
"""View logic for marking item as done or not."""
def get(self, request, **kwargs):
"""Retrieve item id from url passed."""
bucketlistitem_id = kwargs['pk']
bucketlistitem = Bucketlistitem.objects.get(id=bucketlistitem_id)
bucketlistitem.done = False if bucketlistitem.done else True
bucketlistitem.save()
return redirect('/bucketlists/' + kwargs['bucketlist'] + '/items/',
context_instance=RequestContext(request))
class BucketlistitemDelete(LoginRequiredMixin, TemplateView):
"""View logic to handle bucketlistitem deletion."""
def get(self, request, **kwargs):
"""Retrieve bucketlist id from request body and delete it."""
bucketlist = kwargs['bucketlist']
bucketlistitem = Bucketlistitem.objects.filter(
id=kwargs['pk'], bucketlist_id=bucketlist).first()
bucketlistitem.delete()
messages.success(
request, 'Bucketlistitem has been deleted!')
return redirect('/bucketlists/' + kwargs['bucketlist'] + '/items/',
context_instance=RequestContext(request))
``` |
{
"source": "joannhsiao/Cryptograhpy",
"score": 4
} |
#### File: joannhsiao/Cryptograhpy/Kid-RSA.py
```python
option = input("[E]ncryption, [D]ecryption, or [Q]uit -- ")
def key_generation(a, b, a1, b1):
M = a * b - 1
e = a1 * M + a
d = b1 * M + b
n = (e * d - 1) / M
return int(e), int(d), int(n)
def encryption(a, b, a1, b1):
e, d, n = key_generation(a, b, a1, b1)
print("You may publish your public key (n,e) = (", n, ", ", e, ")")
print("and keep your private key (n,d) = (",n, ", ", d, ") secret.")
plaintext = input("Plaintext - ")
cipher = []
for i in range(len(plaintext)):
cipher.append(str((ord(plaintext[i]) * e) % n))
ciphertext = " ".join(cipher)
print(ciphertext)
def decryption():
private = input("Your private key (n, d), separated by a space or comma -- ").replace(",", " ")
n, d = list(map(int, private.split(" ")))
ciphertext = input("Ciphertext (integers separated by spaces) -- ")
cipher = list(map(int, ciphertext.split(" ")))
plain = []
for i in range(len(cipher)):
plain.append(str((cipher[i] * d) % n))
print(" ".join(plain))
plaintext = ""
for i in range(len(plain)):
plaintext += chr(int(plain[i]))
print("Plaintext - ", plaintext)
def main():
if option.upper() == "E":
ints = input("Input 4 integers a, b, a', b' -- ")
a, b, a1, b1 = list(map(int, ints.split(" ")))
encryption(a, b, a1, b1)
elif option.upper() == "D":
decryption()
else:
return
if __name__ == "__main__":
main()
```
#### File: joannhsiao/Cryptograhpy/Merkle-Hellman Knapsack Cryptosystem.py
```python
import math
import sys
import random
random.seed(0)
def random_num(n):
cnt = 1
a = [0] * n
while cnt <= n:
num = random.randint(sum(a), sum(a)+1000)
a[cnt-1] = num
cnt += 1
return a
def generate_key(n):
print("Input a list of n superincreasing integers, separated by commas or spaces.")
print("If you simply press , we shall randomly generate one for you.")
W = input("W? ").replace(",", "")
if W == '':
A = random_num(n)
else:
A = list(map(int, W.split(" ")))
print("Please input an integer larger than ", sum(A))
q = int(input("q? "))
while q < sum(A):
print("Please input an integer larger than ", sum(A))
q = int(input("q? "))
print("Please input an integer which is relatively prime with q.")
r = int(input("r? "))
while math.gcd(q, r) != 1:
print("Please input an integer which is relatively prime with q.")
r = int(input("r? "))
B = [(a * r) % q for a in A]
print("=========================")
print("Announce your public key:")
print("n = ", n)
print("B = ", B)
print("q = ", q)
print("=========================")
def encryption(n):
print("Input a list of n integers, separated by commas or spaces.")
b = input("B? ").replace(",", "")
B = list(map(int, b.split(" ")))
plaintext = input("Plaintext - ")
cipher = []
for i in range(len(plaintext)):
Ascii = str(bin(ord(plaintext[i])))[2:].zfill(n)
for j in range(len(Ascii)):
cipher.append(Ascii[j])
if len(cipher) % n != 0:
for i in range(n-int(len(cipher)%n)):
cipher.append('0')
ciphertext = []
for i in range(0, len(cipher), n):
num = 0
for j in range(n):
num += int(cipher[i+j]) * B[j]
ciphertext.append(num)
print("Ciphertext: ", ciphertext)
def decryption(n):
print("Input a list of n superincreasing integers, separated by commas or spaces.")
print("If you simply press , we shall randomly generate one for you.")
W = input("W? ")
if W == "\n":
A = random_num(n)
else:
A = list(map(int, W.split(" ")))
print("Please input an integer larger than ", sum(A))
q = int(input("q? "))
while q < sum(A):
print("Please input an integer larger than ", sum(A))
q = int(input("q? "))
print("Please input an integer which is relatively prime with q.")
r = int(input("r? "))
while math.gcd(q, r) != 1:
print("Please input an integer which is relatively prime with q.")
r = int(input("r? "))
ciphertext = input("Input ciphertext (separated by spaces) - ").replace(",", "")
cipher = list(map(int, ciphertext.split(" ")))
x = pow(r, -1, q)
plaintext = []
for i in range(len(cipher)):
y = (cipher[i] * x) % q
tmp = [0] * n
for j in range(n-1, -1, -1):
if y >= A[j]:
tmp[j] = 1
y -= A[j]
plain = ""
for j in range(n):
plain += str(tmp[j])
plaintext.append(int(plain, 2))
print("Plaintext: ", plaintext)
Plaintext = []
for i in range(len(plaintext)):
Plaintext.append(chr(plaintext[i]))
print("".join(Plaintext))
def main():
option = input("[K]ey generation| [E]ncryption | [D]ecryption?")
print("Select the block size.")
n = int(input("n?"))
if option.upper() == "K":
generate_key(n)
elif option.upper() == "E":
encryption(n)
elif option.upper() == "D":
decryption(n)
else:
print("Follow the rule please!!")
if __name__ == '__main__':
main()
```
#### File: joannhsiao/Cryptograhpy/Rail_Fence-encrypt.py
```python
import argparse
parser = argparse.ArgumentParser(description="Rail Fence Cipher")
parser.add_argument("key", help="Number of rows (rails)")
parser.add_argument("-c", "--comma", help="display spaces as commas ", action='store_true')
args = parser.parse_args()
print(__file__, args.key)
if args.comma:
print("Show a space as ','")
else:
print("Show a space as ' '")
plaintext = ""
while True:
try:
n = input().replace("\n", "")
plaintext += n
plaintext += " "
except EOFError:
break
plaintext = plaintext[0:-1].upper()
if args.comma:
plaintext = plaintext.replace(" ", ",")
length = int(len(plaintext))
def fetch(text, key, num, length):
result = ""
for i in range(length):
if i % num == key:
result += text[i]
if i % num == num - key and i % num != 0 and i % num != int(num/2):
result += text[i]
return result
key = int(args.key)
ciphertext = ""
cmd = (key - 1) * 2
for i in range(key):
ciphertext += fetch(plaintext, i, cmd, length)
print(ciphertext)
```
#### File: joannhsiao/Cryptograhpy/RC4-decrypt.py
```python
import sys
len_argv = len(sys.argv)
str_key = ""
for i in range(1, len_argv):
str_key += sys.argv[i]
def two_ascii(text):
result = []
for i in range(len(text)):
result.append(ord(text[i]))
return result
def shuffle_S(S, key):
j = 0
for i in range(len(S)):
j = (j + S[i] + key[i%len(key)]) % len(S)
S[i], S[j] = S[j], S[i]
return S
def generate_key_stream(S, text):
j = 0
i = 0
key_stream = []
for k in range(len(text)):
i = (i + 1) % len(S)
j = (j + S[i]) % len(S)
S[i], S[j] = S[j], S[i]
t = (S[i] + S[j]) % len(S)
key_stream.append(S[t])
return key_stream
def two_decimal(text):
result = []
for i in range(0, len(text), 2):
result.append(int(text[i:i+2], 16))
return result
def main():
key = two_ascii(str_key)
S = list(range(0, 256))
S = shuffle_S(S, key)
ciphertext = input()
cipher = two_decimal(ciphertext)
Key_stream = generate_key_stream(S, cipher)
plaintext = ""
for i in range(len(cipher)):
plaintext += chr(cipher[i] ^ Key_stream[i])
print(plaintext)
if __name__ == "__main__":
main()
```
#### File: joannhsiao/Cryptograhpy/RSA.py
```python
import math
import random
option = input("[E]ncryption, [D]ecryption, or [Q]uit -- ")
def random_choose():
prime_list = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]
cnt1 = random.randint(0, len(prime_list)-1)
cnt2 = random.randint(0, len(prime_list)-1)
while cnt2 == cnt1:
cnt2 = random.randint(0, len(prime_list)-1)
return prime_list[cnt1], prime_list[cnt2]
def euclid(n):
result = []
for i in range(n):
if math.gcd(i, n) == 1:
result.append(str(i))
return result
def encryption():
print("2 3 5 7 11")
print("13 17 19 23 29")
print("31 37 41 43 47")
print("53 59 61 67 71")
print("73 79 83 89 97")
print("Select two prime numbers from the above, separated by a space.")
prime_num = input("Or press ENTER and I'll randomly select two for you -- ")
if prime_num == '':
p, q = random_choose()
else:
p, q = list(map(int, prime_num.split(" ")))
print("The two prime numbers are", p, "and", q, ".")
n = p * q
print("n = ", p, "*", q, "=", n)
eu_n = euclid(n)
num_eu = len(eu_n)
print("Possible values of e which are coprime to", num_eu, ": ")
print(" ".join(euclid(num_eu)))
e = int(input("Choose one -- "))
print("You may publish your public key (n,e) = (", n, ",", e, ")")
d = pow(e, -1, num_eu)
print("and keep your private key (n,d) = (", n, ",", d, ") secret.")
plaintext = input("Plaintext - ")
cipher = []
for i in range(len(plaintext)):
cipher.append(str(pow(ord(plaintext[i]), e) % n))
print(" ".join(cipher))
def decryption():
private = input("Your private key (n, d), separated by a space or comma -- ").replace(",", " ")
n, d = list(map(int, private.split(" ")))
ciphertext = input("Ciphertext (integers separated by spaces) -- ")
cipher = list(map(int, ciphertext.split(" ")))
plain = []
for i in range(len(cipher)):
plain.append(str(pow(cipher[i], d) % n))
plaintext = ""
print(" ".join(plain))
for i in range(len(plain)):
plaintext += chr(int(plain[i]))
print(plaintext)
def main():
if option.upper() == "E":
encryption()
elif option.upper() == "D":
decryption()
else:
return
if __name__ == "__main__":
main()
``` |
{
"source": "joannhsiao/Real-or-not-NLP-with-disaster-tweets",
"score": 3
} |
#### File: joannhsiao/Real-or-not-NLP-with-disaster-tweets/real_or_not.py
```python
import numpy as np
import pandas as pd
import nltk
import re
import string
from nltk import word_tokenize, pos_tag
from nltk.corpus import wordnet
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from spellchecker import SpellChecker
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn import linear_model, model_selection
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
train_data = pd.read_csv("train.csv", encoding="L1")
test_data = pd.read_csv("test.csv")
# one hot vector
count_vectorizer = CountVectorizer()
def clean_text (text) :
# make text lowercase
text = text.lower()
# remove square brackets
text = re.sub( '\[.*?\]' , '' , text)
# remove links
text = re.sub( 'https?://\S+|www\.\S+' , '' , text)
# remove <>
text = re.sub( '<.*?>+' , '' , text)
# remove punctuation
text = re.sub( '[%s]' % re.escape(string.punctuation),'' , text)
# remove \n
text = re.sub( '\n' , '' , text)
# remove numbers
text = re.sub( '\w*\d\w*' , '' , text)
#删除text列中的特殊字元
text = re.sub('Û|û|Å|å|Â|â|Ã|ã|Ê|ê|È|è|ï|Ï|Ì|ì|Ó|ó|Ò|ò|ª|ñ���|_','',text)
return text
train_data['text']=train_data['text'].apply(lambda x : clean_text(x))
#Tokenization
#print(train_data['text'].head())
train_data['text']=train_data['text'].apply(lambda x:word_tokenize(x))
#stop word
def remove_stopwords(words):
text=[]
for word in words:
if word not in stopwords.words('english'):
text.append(word)
return text
train_data['text'] = train_data['text'].apply(lambda x: remove_stopwords(x))
#獲取詞性
def get_wordnet_pos(tag):
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
else:
return None
#還原型
def Lemmatizing(word):
text=[]
tagged_sent = pos_tag(word)
wnl = WordNetLemmatizer()
for tag in tagged_sent:
wordnet_pos = (get_wordnet_pos(tag[1]) or wordnet.NOUN)
text.append(wnl.lemmatize(tag[0], pos=wordnet_pos))
return text
train_data['text']=train_data['text'].apply(lambda x : Lemmatizing(x))
# spelling corrected
def correct_spellings(words):
spell = SpellChecker()
corrected_text = []
for word in words:
misspelled_words = spell.unknown(word)
if word in misspelled_words:
corrected_text.append(spell.correction(word))
else:
corrected_text.append(word)
return corrected_text
train_data['text']=train_data['text'].apply(lambda x : correct_spellings(x))
#final text
def final_text(words):
return ' '.join(words)
train_data['text']=train_data['text'].apply(lambda x:final_text(x))
#print(train_data['text'].head())
X_test = count_vectorizer.transform(test_data["text"])
X_train = count_vectorizer.fit_transform(train_data["text"])
X_train_text = count_vectorizer.get_feature_names()
y_train = train_data["target"]
print(X_train.todense().shape)
#print(X_train.todense().shape)
model = [linear_model.SGDClassifier(loss='modified_huber', penalty='l1', alpha=1e-05, n_iter_no_change=5, random_state=42),
linear_model.LogisticRegression(C=50,multi_class='ovr', penalty='l2', tol=0.1,solver='sag'),
SVC(C=100, gamma=0.001, kernel='rbf', probability=True),
MultinomialNB()]
for clf in model:
scores = model_selection.cross_val_score(clf, X_train, y_train, cv=3, scoring="f1")
print(clf.__class__.__name__, scores)
y_train_pred = cross_val_predict(clf, X_train, y_train, cv=3)
conf_mx = confusion_matrix(y_train, y_train_pred)
print(conf_mx)
clf.fit(X_train, y_train)
sample_submission = pd.read_csv("sample_submission.csv")
sample_submission["target"] = clf.predict(X_test)
sample_submission.to_csv("submission.csv", index=False)
``` |
{
"source": "joannjacob/dashboard-for-socialmedia-trend",
"score": 2
} |
#### File: api/corona_tweet_analysis/views.py
```python
import mongoengine
from json import loads, dumps
from django.shortcuts import render
from django.core.exceptions import PermissionDenied
from corona_tweet_analysis.utils.base_view import BaseViewManager
from corona_tweet_analysis.utils.responses import send_response
from corona_tweet_analysis.utils.constants import SUCCESS, FAIL, INVALID_PARAMETERS, BAD_REQUEST, UNAUTHORIZED
from corona_tweet_analysis.models import TwitterData, Category, CoronaReport, Data
from corona_tweet_analysis import serializers
from rest_framework.authentication import TokenAuthentication
from rest_framework import permissions, generics
from rest_framework.response import Response
from rest_framework.decorators import permission_classes
from corona_tweet_analysis.serializers import TwitterDataSerializer, CategorySerializer
class CategoryView(generics.ListAPIView):
queryset = Category.objects.all()
serializer_class = CategorySerializer
class CoronaWorldReportView(generics.ListCreateAPIView):
http_method_names = ['get', 'put']
def get(self, request, *args, **kwargs):
corona_report = CoronaReport.objects.order_by('-created_at').first()
data = loads(corona_report.to_json())
world = {}
for country in data['data']:
if country['name'] == "World":
world = country
return Response({
'status': SUCCESS,
'data': world,
'created_at': corona_report.created_at
})
def put(self, request, *args, **kwargs):
permission_classes = [permissions.IsAdminUser, permissions.IsAuthenticated]
if request.user.is_authenticated == False or request.user.is_superuser == False:
raise PermissionDenied
corona_report = CoronaReport.objects.order_by('-created_at').first()
new_cases = request.query_params.get('new_cases')
new_deaths = request.query_params.get('new_deaths')
total_deaths = request.query_params.get('total_deaths')
total_cases = request.query_params.get('total_cases')
if not (new_cases or new_deaths or total_deaths or total_cases):
return send_response({'status': SUCCESS, 'message':'No update values were given'})
report_data = loads(corona_report.to_json())
data_objects_list = []
for data in report_data['data']:
if data['name'] == 'World':
data['new_cases'] = int(new_cases) if new_cases else data['new_cases']
data['new_deaths'] = int(new_deaths) if new_deaths else data['new_deaths']
data['total_deaths'] = int(total_deaths) if total_deaths else data['total_deaths']
data['total_cases'] = int(total_cases) if total_cases else data['total_cases']
data_obj = Data(name=data['name'], new_cases=data['new_cases'], new_deaths=data['new_deaths'],
total_deaths=data['total_deaths'], total_cases=data['total_cases'])
data_objects_list.append(data_obj)
new_report = CoronaReport(data=data_objects_list)
new_report.save()
return send_response({'status': SUCCESS, 'message':'Corona Report updated'})
class CoronaReportView(generics.ListCreateAPIView):
http_method_names = ['get', 'put']
def get(self, request, *args, **kwargs):
country = request.query_params.get('country')
if not country:
return send_response({'status': INVALID_PARAMETERS, 'message':'Country not sent'})
country_data_report = CoronaReport.objects(data__name=country).order_by('-created_at').first()
if not country_data_report:
return send_response({'status': INVALID_PARAMETERS, 'message':'Country not found'})
corona_report = CoronaReport.objects.order_by('-created_at').first()
report_data = loads(corona_report.to_json())
created_at = report_data['created_at']
data = {}
for country_data in report_data['data']:
if country_data['name'] == country:
data = country_data
return Response({
'status': SUCCESS,
'data': data,
'created_at': corona_report.created_at
})
def put(self, request, *args, **kwargs):
permission_classes = [permissions.IsAdminUser, permissions.IsAuthenticated]
if request.user.is_authenticated == False or request.user.is_superuser == False:
raise PermissionDenied
country = request.query_params.get('country')
if not country:
return send_response({'status': INVALID_PARAMETERS, 'message':'Country not sent'})
country_data_report = CoronaReport.objects(data__name=country).order_by('-created_at').first()
if not country_data_report:
return send_response({'status': INVALID_PARAMETERS, 'message':'Country not found'})
corona_report = CoronaReport.objects.order_by('-created_at').first()
new_cases = request.query_params.get('new_cases')
new_deaths = request.query_params.get('new_deaths')
total_deaths = request.query_params.get('total_deaths')
total_cases = request.query_params.get('total_cases')
if not (new_cases or new_deaths or total_deaths or total_cases):
return send_response({'status': SUCCESS, 'message':'No update values were given'})
report_data = loads(corona_report.to_json())
data_objects_list = []
for data in report_data['data']:
if data['name'] == country:
data['new_cases'] = int(new_cases) if new_cases else data['new_cases']
data['new_deaths'] = int(new_deaths) if new_deaths else data['new_deaths']
data['total_deaths'] = int(total_deaths) if total_deaths else data['total_deaths']
data['total_cases'] = int(total_cases) if total_cases else data['total_cases']
data_obj = Data(name=data['name'], new_cases=data['new_cases'], new_deaths=data['new_deaths'],
total_deaths=data['total_deaths'], total_cases=data['total_cases'])
data_objects_list.append(data_obj)
new_report = CoronaReport(data=data_objects_list)
new_report.save()
return send_response({'status': SUCCESS, 'message':'Corona Report updated'})
class TwitterDataView(generics.ListAPIView):
queryset = TwitterData.objects(is_spam__ne=True).order_by('-created_at', '-_id')
serializer_class = TwitterDataSerializer
def get(self, request, *args, **kwargs):
category = request.query_params.get('category')
if category:
category_obj = Category.objects(_id=category).first()
if not category_obj:
return send_response({'status': INVALID_PARAMETERS, 'message':'Category not found'})
else:
self.queryset = self.queryset(category=category).order_by('-created_at', '-_id')
return super().get(request, *args, **kwargs)
class SpamCountView(generics.ListCreateAPIView):
http_method_names = ['put']
queryset = TwitterData.objects.all()
serializer_class = TwitterDataSerializer
authentication_classes = (TokenAuthentication,)
permission_classes = [permissions.IsAuthenticated]
def put(self, request, *args, **kwargs):
try:
tweet_id = request.query_params.get('tweet_id')
if not tweet_id:
return send_response({'status': INVALID_PARAMETERS, 'message':'Tweet id is required'})
tweet = TwitterData.objects(id=tweet_id).first()
if not tweet:
return send_response({'status': FAIL, 'message':'Tweet not found'})
# Handling spam tweets
spam_users = tweet.spam_users
spam_count = tweet.spam_count
is_spam = False
if request.user.email in spam_users:
return send_response({'status': BAD_REQUEST, 'data': 'You have already mark this as spam'})
else:
spam_users.append(request.user.email)
spam_count = tweet.spam_count + 1
if len(spam_users) > 10 or request.user.is_superuser:
is_spam = True
tweet.update(spam_count=spam_count, is_spam=is_spam, spam_users=spam_users)
return send_response({'status': SUCCESS, 'data': 'Spam count updated'})
except Exception as err:
return send_response({'status': FAIL})
class StatisticsView(generics.ListCreateAPIView):
queryset = TwitterData.objects.all()
serializer_class = TwitterDataSerializer
def get(self, request, *args, **kwargs):
try:
statistics_dict = {}
country_confirmed_dict = {}
# get the number of infected cases for eah country
countries = TwitterData.objects(country__ne='--NA--').distinct('country')
for country in countries:
recovered_count = TwitterData.objects(category='INFECTED', country=country).count()
country_confirmed_dict[country] = recovered_count
# Calculate the number of infected cases, deaths and recovery cases based on category
infected_count = TwitterData.objects(category='INFECTED').count()
death_count = TwitterData.objects(category='DEATH').count()
recovered_count = TwitterData.objects(category='RECOVERED').count()
statistics_dict['country_confirmed_cases'] = country_confirmed_dict
statistics_dict['infected_count'] = infected_count
statistics_dict['death_count'] = death_count
statistics_dict['recovered_count'] = recovered_count
return send_response({'status': SUCCESS, 'data': statistics_dict})
except Exception as err:
return send_response({'status': FAIL})
``` |
{
"source": "joannjacob/PySparkCli",
"score": 3
} |
#### File: examples/mllib_sample/heart_test.py
```python
import pyspark.sql.functions as F
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import StandardScaler
from pyspark.sql import *
import pandas as pd
from pyspark.sql.types import StructType, StructField, NumericType
from pyspark.ml.tuning import ParamGridBuilder, TrainValidationSplit
from pyspark.ml.classification import LogisticRegression
def isSick(x):
if x in (3, 7):
return 0
else:
return 1
spark = SparkSession.builder.appName("Predict Heart Disease").getOrCreate()
cols = ('age',
'sex',
'chest pain',
'resting blood pressure',
'serum cholesterol',
'fasting blood sugar',
'resting electrocardiographic results',
'maximum heart rate achieved',
'exercise induced angina',
'ST depression induced by exercise relative to rest',
'the slope of the peak exercise ST segment',
'number of major vessels ',
'thal',
'last')
data = pd.read_csv('./datasets/heart.csv', delimiter=' ', names=cols)
data = data.iloc[:, 0:13]
data['isSick'] = data['thal'].apply(isSick)
df = spark.createDataFrame(data)
print("HELLOO", df)
print("\n")
features = ('age',
'sex',
'chest pain',
'resting blood pressure',
'serum cholesterol',
'fasting blood sugar',
'resting electrocardiographic results',
'maximum heart rate achieved',
'exercise induced angina',
'ST depression induced by exercise relative to rest',
'the slope of the peak exercise ST segment',
'number of major vessels ')
assembler = VectorAssembler(inputCols=features, outputCol="features")
raw_data = assembler.transform(df)
raw_data.select("features").show(truncate=False)
standardscaler = StandardScaler().setInputCol(
"features").setOutputCol("Scaled_features")
raw_data = standardscaler.fit(raw_data).transform(raw_data)
raw_data.select("features", "Scaled_features").show(5)
training, test = raw_data.randomSplit([0.5, 0.5], seed=12345)
lr = LogisticRegression(
labelCol="isSick", featuresCol="Scaled_features", maxIter=10)
model = lr.fit(training)
predict_train = model.transform(training)
predict_test = model.transform(test)
predict_test.select("isSick", "prediction").show(10)
print("Multinomial coefficients: " + str(model.coefficientMatrix))
print("Multinomial intercepts: " + str(model.interceptVector))
check = predict_test.withColumn('correct', F.when(
F.col('isSick') == F.col('prediction'), 1).otherwise(0))
check.groupby("correct").count().show()
```
#### File: src/jobs/transformation_job.py
```python
import json
from pyspark.sql import SparkSession
# Our transformation function:
def process_tweets(tweet):
json_tweet = json.loads(tweet)
print(json_tweet)
spark = SparkSession.builder.getOrCreate()
data_rdd = spark.read.json(json_tweet).rdd
transformed_data = transformfunc(data_rdd)
return transformed_data
def transformfunc(result):
favCount = 0
user = None
if result["user"]["followers_count"]:
if result["user"]['followers_count'] > favCount:
favCount = result["user"]['followers_count']
print(favCount)
user = result["user"]["name"]
return {"user": result['user']['name'], "location": result['user']['location'], "text": result["text"]}
def getSparkSessionInstance(sparkConf):
if ('sparkSessionSingletonInstance' not in globals()):
globals()['sparkSessionSingletonInstance'] = SparkSession\
.builder\
.config(conf=sparkConf)\
.getOrCreate()
return globals()['sparkSessionSingletonInstance']
```
#### File: project_name/tests/test_etl_job.py
```python
import unittest
import json
import sys
import os
# Add to system path for pysparkcli modules
SRC_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../src/')
sys.path.append(os.path.join(SRC_PATH))
from pyspark.sql.functions import mean
from pysparkcli.core.admin import SparkBuilder
from jobs.etl_job import transform_data
class SparkETLTests(unittest.TestCase):
"""Test suite for transformation in etl_job.py
"""
def setUp(self):
"""Start Spark, define config and path to test data
"""
with open(SRC_PATH + "configs/etl_config.json", "r") as f:
self.config = json.loads(f.read())
self.spark = SparkBuilder("test").build_sc()
self.test_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../tests/test_data/')
def tearDown(self):
"""Stop Spark
"""
self.spark.stop()
def test_transform_data(self):
"""Test data transformer.
Using small chunks of input data and expected output data, we
test the transformation step to make sure it's working as
expected.
"""
# assemble
input_data = (
self.spark
.read
.parquet(self.test_data_path + 'employees'))
expected_data = (
self.spark
.read
.parquet(self.test_data_path + 'employees_report'))
expected_cols = len(expected_data.columns)
expected_rows = expected_data.count()
expected_avg_steps = (
expected_data
.agg(mean('steps_to_desk').alias('avg_steps_to_desk'))
.collect()[0]
['avg_steps_to_desk'])
# act
data_transformed = transform_data(input_data, 21)
cols = len(expected_data.columns)
rows = expected_data.count()
avg_steps = (
expected_data
.agg(mean('steps_to_desk').alias('avg_steps_to_desk'))
.collect()[0]
['avg_steps_to_desk'])
# assert
self.assertEqual(expected_cols, cols)
self.assertEqual(expected_rows, rows)
self.assertEqual(expected_avg_steps, avg_steps)
self.assertTrue([col in expected_data.columns
for col in data_transformed.columns])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "Joannna-qiaohui/traffic-condition-classification",
"score": 2
} |
#### File: Joannna-qiaohui/traffic-condition-classification/RNN.py
```python
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_sequence
from torch.utils.data import Dataset,DataLoader
import os
import sys
import time
import pickle
import numpy as np
import math
import random
import matplotlib.pyplot as plt
SEQ_LENGTH = 4
VEC_LENGTH = 30720
BATCH_SIZE = 35
learning_rate = 0.000001
TRAIN = 1200
#获取label
with open("/media/lscsc/export/qiaohui/pytorch/code/status.txt", "r", encoding="utf-8") as f:
text = f.read()
label_txt = []
for i in range(len(text)//3):
k = 3*i
label_txt.append(text[k])
data_x=[]
data_y=label_txt
#加载图片对应特征向量
for i in range(1,1501):
if i < 10:
number = '00000' + str(i)
if i>=10 and i<=99:
number = '0000' + str(i)
if i>=100 and i < 1000:
number = '000' + str(i)
if i>=1000:
number = '00' + str(i)
for j in range(1,5):
pkl_file = open('/media/lscsc/export/qiaohui/new/vector/'+number+'/vector_'+str(j)+'.pkl',"rb")
vector = pickle.load(pkl_file)
data_x.append(vector)
#获取训练集
def get_train_data(batch_size=4,time_step=4,train_begin=0,train_end=4*TRAIN):
#batch_index=[]
data_train=data_x[train_begin:train_end]
# normalized_train_data=(data_train-np.mean(data_train,axis=0))/np.std(data_train,axis=0) #标准化
train_x,train_y=[],[] #训练集
for i in range(int(len(data_train)/time_step)):
# if i % batch_size==0:
# batch_index.append(i)
x=data_train[4*i:4*i+time_step]
y=data_y[i]
train_x.append(x)
train_y.append(y)
# print normalized_train_data
#batch_index.append((len(data_train)-time_step))
return train_x,train_y
# rnn = nn.LSTM(3*5*2048,10,2)
#把所有array转化成tensor,输出[4,30720]
#形式如下:
#[img1
# img2
# img3
# img4]
def turn_into_tensor(x):
bag = []
x_tensor = torch.zeros((4,3*5*2048))
for i in range(len(x)):
x_tensor = torch.zeros((4,3*5*2048))
for j in range(4):
x_tensor[j] = (torch.Tensor(x[i][j]))
bag.append(x_tensor)
return bag
#得到打包好的训练集和验证集
x_train,y_train = get_train_data()
x_train = turn_into_tensor(x_train)
x_test,y_test = get_train_data(train_begin=4*TRAIN,train_end=4*1500)
x_test = turn_into_tensor(x_test)
#将数据打包成dataset
class Mydata(Dataset):
def __init__(self,whole_x_collection,whole_y_collection):
self.x = whole_x_collection
self.label = whole_y_collection
def __getitem__(self,idx):
tensor = self.x[idx]
label = self.label[idx]
return tensor,label
def __len__(self):
return (len(self.x))
train_set = Mydata(x_train,y_train)
test_set = Mydata(x_test,y_test)
train_set = DataLoader(train_set,batch_size=BATCH_SIZE,shuffle=True)
test_set = DataLoader(test_set,batch_size=BATCH_SIZE,shuffle=True)
#网络模型
class Classifier(nn.Module):
def __init__(self, kwargs):
super(Classifier, self).__init__()
self.lstm = nn.LSTM(input_size=kwargs['vector_length'], hidden_size=256, num_layers=2)
self.fc1 = nn.Linear(4 * 256, 1024)
self.fc2 = nn.Linear(1024, 512)
self.output = nn.Linear(512, kwargs['output_dim'])
self.dropout = nn.Dropout(0.1)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, input_tensor, ph, pc):
feature, (h, c) = self.lstm(input_tensor, (ph, pc))
feature = feature.permute((1,0,2))
feature = torch.reshape(feature, (BATCH_SIZE, -1))
x = self.relu(self.fc1(feature))
x = self.relu(self.fc2(x))
x = self.output(x)
x = self.dropout(x)
x = self.softmax(x)
return x
cls = Classifier(
kwargs={'seq_length':4, 'vector_length':30720, 'output_dim':3}
)
#计算平均数,用以计算平均accuracy和loss
def averagenum(num):
nsum = 0
for i in range(len(num)):
nsum += num[i]
return nsum / len(num)
def train(dataloader):
model.train()
loss_bag = []
acc_bag = []
count = 0
for batch, (X, y) in enumerate(dataloader):
count+=1
if batch == len(dataloader.dataset)//BATCH_SIZE -1:
break
train_correct = 0
selected_label=np.array(y).astype(int)
selected_label=torch.from_numpy(selected_label)
selected_label = selected_label.cuda()
selected_tensor=X.cuda()
x = selected_tensor.permute(1,0,2)
ph0, pc0 = torch.zeros(size=(2, BATCH_SIZE, 256)).cuda(), torch.zeros(size=(2, BATCH_SIZE, 256)).cuda()
optimizer.zero_grad()
output = model(x, ph0, pc0)#一个batch出来output
# selected_label = torch.autograd.Variable(selected_label.long()).cuda()
loss = criterion(output, selected_label)
loss_bag.append(loss/BATCH_SIZE)
loss.backward()
optimizer.step()
for j in range(len(selected_label)):
if output.max(1)[1][j] == selected_label[j]:
train_correct += 1
accu = train_correct/BATCH_SIZE
acc_bag.append(accu)
if batch % 20 == 0:
loss, current = loss.item(), batch
print(f"loss: {loss/BATCH_SIZE:>7f} [{batch:>5d}/{TRAIN//BATCH_SIZE:>5d}],Accuracy:{(100*(train_correct/BATCH_SIZE)):>0.1f}%")
avg_loss = averagenum(loss_bag)
avg_acc = averagenum(acc_bag)
print(f"Train loss: {avg_loss:>7f},Train accuracy:{(100*(avg_acc)):>0.1f}%")
# Add parameters' gradients to their values, multiplied by learning rate
# for p in model.parameters():
# p.data.add_(p.grad.data, alpha=-learning_rate)
return avg_loss,avg_acc
def test(dataloader):
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
count = 0
for batch, (X, y) in enumerate(dataloader):
count+=1
if batch == len(dataloader.dataset)//BATCH_SIZE -1:
break
selected_label=np.array(y).astype(int)
selected_label=torch.from_numpy(selected_label)
selected_label = selected_label.cuda()
selected_tensor=X.cuda()
x = selected_tensor.permute(1,0,2)
ph0, pc0 = torch.zeros(size=(2, BATCH_SIZE, 256)).cuda(), torch.zeros(size=(2, BATCH_SIZE, 256)).cuda()
pred = model(x, ph0, pc0)#一个batch出来output
test_loss += criterion(pred,selected_label).item()
for i in range(len(selected_label)):
if pred.max(1)[1][i] == selected_label[i]:
correct += 1
# correct += (pred.argmax(1) == selected_label).type(torch.float).sum().item()
test_loss /= BATCH_SIZE*count
correct /= BATCH_SIZE*count
print(f"Test loss: {test_loss:>8f},Test Accuracy: {(100*correct):>0.1f}% \n")
return correct
model = cls
device = torch.device('cuda:0')
model.to(device)
criterion = nn.CrossEntropyLoss()
epochs = 300
current_accuracy = 0
filepath = os.path.join('/media/lscsc/export/qiaohui/new', 'checkpoint_model_epoch_{}.pth.tar') #保存最优模型
correct_bag = []
train_correct_bag=[]
train_loss_bag=[]
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [20, 30, 40, 50], 0.1, last_epoch=-1)
optimizer.zero_grad()
batch_number_collection = []
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
avg_loss,avg_acc = train(train_set)
train_correct_bag.append(avg_acc)
train_loss_bag.append(avg_loss)
scheduler.step()
new_correct = test(test_set)
correct_bag.append(new_correct)
if avg_acc > current_accuracy:
torch.save(model, filepath)
current_accuracy = avg_acc
#画图,train accuracy,train loss以及test accuracy
plt.figure()
plt.plot(range(1,len(train_correct_bag)+1),train_correct_bag)
plt.title('Train Average Accuracy')
plt.savefig('/media/lscsc/export/qiaohui/new/Train Average Accuracy.jpg')
plt.figure()
plt.plot(range(1,len(train_loss_bag)+1),train_loss_bag)
plt.title('Train Average Loss')
plt.savefig('/media/lscsc/export/qiaohui/new/Train Average Loss.jpg')
plt.figure()
plt.plot(range(1,len(correct_bag)+1),correct_bag)
plt.title('Test Accuracy')
plt.savefig('/media/lscsc/export/qiaohui/new/Test Accuracy.jpg')
print("Done!")
``` |
{
"source": "Joannn-li/RM_ICS_Debugging_Challenge",
"score": 3
} |
#### File: Joannn-li/RM_ICS_Debugging_Challenge/Joanna_ICS_Debugging_Challenge_01.py
```python
import random
mt = ['zombie', 'goblin', 'dragon']
class Monster:
def __init__(self, x==18, y=='Anthy', z=='male', b=="100"):
self.age = x
self.name = y
self.gender = z
self.hitpoint = b
if len(self.name) > 4
self.type = 'zombie'
else
self.type = 'goblin'
def readout(self):
print(self.name)
print(self.age)
print(self.gender)
print(self.type)
print(self.hitpoint)
c=Monster()
c.readout()
``` |
{
"source": "Joannsaj/blog",
"score": 2
} |
#### File: blog/app/request.py
```python
import urllib.request,json
from .models import Quote
# Getting the base url
base_url = None
def configure_request(app):
global base_url
base_url = app.config['QUOTE_BASE_URL']
def get_quotes():
with urllib.request.urlopen(base_url) as url:
get_quotes_data = url.read()
get_quotes_response = json.loads(get_quotes_data)
qoute_object = None
if get_quotes_response:
author = get_quotes_response.get('author')
id = get_quotes_response.get('id')
quote = get_quotes_response.get('quote')
permalink = get_quotes_response.get('permalink')
quote_object = Quote(author, id, quote, permalink)
return quote_object
```
#### File: blog/tests/test_blog.py
```python
import unittest
from app.models import Blog, User
from app import db
class TestBlog(unittest.TestCase):
def setUp(self):
self.user_James = User(username= 'James', email='<EMAIL>', bio= 'Hello I am James', profile_pic_path='app/static/photos', pass_secure= 'potato' )
self.new_blog = Blog(user= self.user_James, title= 'BLM', blog= 'Our Dear Lives Matter')
def test_check_instance_variables(self):
self.assertEquals(self.new_blog.user, self.user_James)
self.assertEquals(self.new_blog.title,'BLM')
self.assertEquals(self.new_blog.blog,'Our Dear Lives Matter')
def test_save_blog(self):
self.new_blog.save_blog()
self.assertTrue(len(Blog.query.all())>0)
```
#### File: blog/tests/test_quote.py
```python
import unittest
from app.models import Quote
class QuoteTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Quote class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_quote = Quote('Joan',1234,'Python Must Be Crazy','https://something/something')
def test_instance(self):
self.assertTrue(isinstance(self.new_quote,Quote))
``` |
{
"source": "Joannsaj/gallery",
"score": 2
} |
#### File: gallery/pics/models.py
```python
from django.db import models
from cloudinary.models import CloudinaryField
class Location(models.Model):
location = models.CharField(max_length =30)
def __str__(self):
return self.location
def save_location(self):
self.save
def delete_location(self):
self.delete
class Category(models.Model):
category = models.CharField(max_length =30)
def __str__(self):
return self.category
def save_category(self):
self.save
def delete_category(self):
self.delete
class Photo(models.Model):
image = CloudinaryField('image')
name = models.CharField(max_length =30)
description = models.TextField()
location = models.ForeignKey(Location, on_delete= models.SET_NULL, null= True)
category = models.ForeignKey(Category, on_delete= models.CASCADE, default='')
def __str__(self):
return self.name
def save_image(self):
self.save
def delete_image(self):
self.delete
@classmethod
def images(cls):
images = cls.objects.all()
return images
@classmethod
def update_image(cls, id, image):
images = cls.objects.filter(id=id).update(image = image)
@classmethod
def get_image_by_id(cls, id):
image = cls.objects.get(id = id)
@classmethod
def search_image(cls, search_term):
images = cls.objects.filter(category__category__icontains=search_term)
return images
@classmethod
def filter_by_location(cls,location):
images = cls.objects.filter(location__location= location)
``` |
{
"source": "Joannsaj/impression",
"score": 3
} |
#### File: impression/tests/pitch_test.py
```python
import unittest
from app.models import Pitch,User
from app import db
class TestReview(unittest.TestCase):
def setUp(self):
self.user_James = User(username = 'James',password = '<PASSWORD>', email = '<EMAIL>')
self.new_pitch = Pitch(id=2, pitch_title='title', category='slogan', pitch ='This pitch is the best thing since sliced bread',user = self.user_James )
def tearDown(self):
Review.query.delete()
User.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_pitch.id,2)
self.assertEquals(self.new_pitch.pitch_title,'title')
self.assertEquals(self.new_pitch.category,'slogan')
self.assertEquals(self.new_pitch.pitch,'This pitch is the best thing since sliced bread')
self.assertEquals(self.new_review.user,self.user_James)
def test_save_pitch(self):
self.new_pitch.save_pitch()
self.assertTrue(len(Pitch.query.all())>0)
``` |
{
"source": "Joannsaj/password",
"score": 4
} |
#### File: Joannsaj/password/credentials.py
```python
class Credentials:
"""
Class that generates new instances of credentials.
"""
credentials_list = [] #empty user list
def __init__(self,account,username,password):
self.account = account
self.username = username
self.password = password
def save_credentials(self):
'''
save_credentials method saves credentials objects into credentials_list
'''
Credentials.credentials_list.append(self)
def delete_credentials(self):
'''
delete_credentials method deletes a saved credential from the credentials_list
'''
Credentials.credentials_list.remove(self)
@classmethod
def find_by_account(cls,account):
'''
Method that takes in a account and returns a credentials that matches that account.
Args:
account: Phone account to search for
Returns :
Credentials of person that matches the account.
'''
for credentials in cls.credentials_list:
if credentials.account == account:
return credentials
@classmethod
def display_credentials(cls):
'''
method that returns the credentials list
'''
return cls.credentials_list
``` |
{
"source": "Joannsaj/projects",
"score": 2
} |
#### File: projects/projects/models.py
```python
from django.db import models
from cloudinary.models import CloudinaryField
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class Profile(models.Model):
profile_pic = CloudinaryField('image', null=True)
contact = models.CharField(max_length=30)
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')
bio = models.TextField(null=True)
name = models.CharField(max_length=100 ,null=True)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
def __str__(self):
return self.bio
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
class Project(models.Model):
title = models.CharField(max_length=60)
project_image = CloudinaryField('image')
description = models.TextField()
owner = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name='project')
created = models.DateTimeField(auto_now_add=True, null=True)
link = models.URLField(max_length=200)
def __str__(self):
return self.title
@classmethod
def projects(cls):
projects = cls.objects.all()
return projects
@classmethod
def search_project(cls,search_term ):
return cls.objects.filter(title__icontains=search_term).all()
class Rating(models.Model):
RATING_CHOICES = (
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
(6, 6),
(7, 7),
(8, 8),
(9, 9),
(10, 10),
)
design = models.DecimalField(choices = RATING_CHOICES, max_digits=3, decimal_places=2)
usability = models.DecimalField(choices = RATING_CHOICES, max_digits=3, decimal_places=2)
content = models.DecimalField(choices = RATING_CHOICES, max_digits=3, decimal_places=2)
rater = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name='rater')
def __str__(self):
return self.rater
def overall(self):
avg_rating = (self.design + self.usability + self.content )/3
return avg_rating
``` |
{
"source": "joanpe/dPCA",
"score": 2
} |
#### File: dPCA/dualtask/data3task.py
```python
import numpy as np
import matplotlib.pylab as plt
import random
def get_inputs_outputs(n_batch, n_time, n_bits, gng_time, lamb,
delay_max, noise, mat_conv=[0, 1]):
gng_time = 10
# Get random task type trials
task_choice = np.random.choice(3, size=n_batch)
inputs = []
outputs =[]
stim_conf = []
vec_tau = []
for ind_btch in range(n_batch):
if task_choice[ind_btch]==0:
# Get trial for Dualtask
inp, out, conf, tau = dual_task(n_time, n_bits, gng_time,
lamb, delay_max, noise,
mat_conv=[0, 1])
elif task_choice[ind_btch]==1:
# Get trial for DPA
inp, out, conf, tau = dpa(n_time, n_bits, gng_time, lamb,
delay_max, noise, mat_conv=[0, 1])
else:
# Get trial for GNG
inp, out, conf, tau = gng(n_time, n_bits, gng_time, lamb,
delay_max, noise, mat_conv=[0, 1])
# Add together all trials in a batch
inputs.append(inp)
outputs.append(out)
stim_conf.append(conf)
vec_tau.append(tau)
inputs = np.array(inputs)
outputs = np.array(outputs)
stim_conf = np.array(stim_conf)
stim_conf.reshape(n_batch, 4)
vec_tau = np.array(vec_tau)
vec_tau.reshape(n_batch, 1)
return {'inputs': inputs, 'output': outputs, 'task_choice': task_choice,
'stim_conf': stim_conf, 'vec_tau': vec_tau}
# Dual Task stimulus structure
def dual_task(n_time, n_bits, gng_time, lamb, delay_max, noise,
mat_conv=[0, 1]):
# inputs mat
inputs = np.zeros([n_time, n_bits])
# build dpa structure
dpa_stim1 = np.arange((n_bits-2)/2)
stim1_seq, choice1 = get_stims(dpa_stim1, 1)
dpa_stim2 = np.arange((n_bits-2)/2, (n_bits-2))
stim2_seq, choice2 = get_stims(dpa_stim2, 1)
# ground truth dpa
gt_dpa = choice1 == choice2
gt_dpa = gt_dpa*1
# build go-noGo task:
gng_stim = np.arange((n_bits-2), n_bits)
gng_stim_seq, gt_gng = get_stims(gng_stim, 1)
# DPA1 stimulus
inputs[1, stim1_seq] = 1
# dpa2 presented at delay gng_time + random delay between gng_time + 2
# and gng_time + 2 + delay_max. tau in range[0,9]
if delay_max == 0:
inputs[n_time-5, stim2_seq] = 1
tau = 0
else:
# tau= time at which dpa2 appears
tau = np.random.choice(delay_max, size=1)+gng_time+2
if tau < n_time:
inputs[tau, stim2_seq] = 1
else:
raise ValueError('Delay exceed trial time.')
# Parametrization of gng stimulus
inputs[gng_time-1, gng_stim_seq] = 1-lamb
# Example: S5 --> index 4, S1 --> index 0, mat_conv[S5] = 0
inputs[gng_time-1, mat_conv[gt_gng]] = lamb
# output (note that n_bits could actually be 1 here because we just
# need one decision. I kept it as it is for the flipFlop task
# to avoid issues in other parts of the algorithm)
outputs = np.zeros([n_time, n_bits])
outputs[n_time-1, 0] = gt_dpa
# distractor time = gng_time
outputs[gng_time, 0] = gt_gng
# Adding noise to the inputs
inputs += np.random.normal(scale=noise, size=inputs.shape)
# stim configuration
stim_conf = np.array([choice1, choice2, gt_gng,
gt_dpa])
return inputs, outputs, stim_conf, tau
# DPA task stimulus structure
def dpa(n_time, n_bits, gng_time, lamb, delay_max, noise,
mat_conv=[0, 1]):
gt_gng = 2
# inputs mat
inputs = np.zeros([n_time, n_bits])
# build dpa structure
dpa_stim1 = np.arange((n_bits-2)/2)
stim1_seq, choice1 = get_stims(dpa_stim1, 1)
dpa_stim2 = np.arange((n_bits-2)/2, (n_bits-2))
stim2_seq, choice2 = get_stims(dpa_stim2, 1)
# ground truth dpa
gt_dpa = choice1 == choice2
gt_dpa = gt_dpa*1
# DPA1 stimulus
inputs[1, stim1_seq] = 1
# dpa2 presented at delay gng_time + random delay between gng_time + 2
# and gng_time + 2 + delay_max. tau in range[0,9]
if delay_max == 0:
inputs[n_time-5, stim2_seq] = 1
tau = 0
else:
# tau= time at which dpa2 appears
tau = np.random.choice(delay_max, size=1)+gng_time+2
if tau < n_time:
inputs[tau, stim2_seq] = 1
else:
raise ValueError('Delay exceed trial time.')
# output (note that n_bits could actually be 1 here because we just
# need one decision. I kept it as it is for the flipFlop task
# to avoid issues in other parts of the algorithm)
outputs = np.zeros([n_time, n_bits])
outputs[n_time-1, 0] = gt_dpa
# Adding noise to the inputs
inputs += np.random.normal(scale=noise, size=inputs.shape)
# stim configuration
stim_conf = np.array([choice1, choice2, gt_gng,
gt_dpa])
return inputs, outputs, stim_conf, tau
# Go no Go task stimulus structure
def gng(n_time, n_bits, gng_time, lamb, delay_max, noise,
mat_conv=[0, 1]):
gt_dpa = 2
choice1, choice2 = 2, 2
# inputs mat
inputs = np.zeros([n_time, n_bits])
# build go-noGo task
gng_stim = np.arange((n_bits-2), n_bits)
gng_stim_seq, gt_gng = get_stims(gng_stim, 1)
# Parametrization of gng stimulus
inputs[gng_time-1, gng_stim_seq] = 1-lamb
# Example: S5 --> index 4, S1 --> index 0, mat_conv[S5] = 0
inputs[gng_time-1, mat_conv[gt_gng]] = lamb
# output (note that n_bits could actually be 1 here because we just
# need one decision. I kept it as it is for the flipFlop task
# to avoid issues in other parts of the algorithm)
outputs = np.zeros([n_time, n_bits])
# distractor time = gng_time
outputs[gng_time, 0] = gt_gng
# Adding noise to the inputs
inputs += np.random.normal(scale=noise, size=inputs.shape)
# Vector with the delays of dp2
tau = 0
# stim configuration
stim_conf = np.array([choice1, choice2, gt_gng,
gt_dpa])
return inputs, outputs, stim_conf, tau
def get_stims(stim, n_batch):
choice = np.random.choice(stim.shape[0])
# choice = np.concatenate((np.zeros(int(n_batch/2,)),
# np.ones(int(n_batch/2,)))).astype(int)
stim_seq = stim[choice].astype(int)
return stim_seq, choice
if __name__ == '__main__':
plt.close('all')
n_batch = 10
n_time = 8
n_bits = 6
gng_time = 3
example_trials = get_inputs_outputs(n_batch, n_time, n_bits, gng_time)
# print(example_trials['inputs'][0, :, :].T)
# print('----')
# print(example_trials['inputs'][1, :, :].T)
# print('----')
# print(example_trials['inputs'][2, :, :].T)
# print('----')
# print(example_trials['inputs'][3, :, :].T)
# print('----')
# print(example_trials['output'].shape)
plt.figure()
plt.subplot(2, 1, 1)
plt.imshow(np.squeeze(example_trials['inputs'][0, :, :].T), aspect='auto')
plt.subplot(2, 1, 2)
plt.plot(np.squeeze(example_trials['output'][0, :, 0]))
```
#### File: dPCA/dualtask/DualTask.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import data
import data3task
from RecurrentWhisperer import RecurrentWhisperer
class DualTask(RecurrentWhisperer):
''' Class for training an RNN to perform the dual task described in
Zhang et al. 2018 bioRxiv
Task:
Briefly, the task is composed of two nested tasks: a delay pair
association task that requires the RNN to compare two stimuli
presented with a delay, and a simple Go-NoGo task that works as
a distractor.
This class generates synthetic data for the task via
generate_dualtask_trials(...).
Usage:
This class trains an RNN to generate the correct outputs given the
inputs of the dual-task. All that is needed to get started is to
construct a dualtask object and to call .train on that object:
# dict of hyperparameter key/value pairs
# (see 'Hyperparameters' section below)
hps = {...}
ff = DualTask(**hps)
ff.train()
Hyperparameters:
rnn_type: string specifying the architecture of the RNN. Currently
must be one of {'vanilla', 'gru', 'lstm'}. Default: 'vanilla'.
n_hidden: int specifying the number of hidden units in the RNN.
Default: 24.
data_hps: dict containing hyperparameters for generating synthetic
data. Contains the following keys:
'n_batch': int specifying the number of synthetic trials to use
per training batch (i.e., for one gradient step). Default: 1024.
'n_time': int specifying the duration of each synthetic trial
(measured in timesteps). Default: 32.
'n_bits': int specifying the number of input channels into the
FlipFlop device (which will also be the number of output channels).
Default: 6 (corresponding to the six stimuli).
'gng_time': time at which the go-noGo stimulus will be presented.
Should be smaller than n_time
'noise': std of gaussian noise added independently to each channel.
'lamb': parametrization of the stimulus S5, S6. Default: 0
'delay_max': Maximum delay of appearence of the dpa2. Should be
between Default: 5
log_dir: string specifying the top-level directory for saving various
training runs (where each training run is specified by a different set
of hyperparameter settings). When tuning hyperparameters, log_dir is
meant to be constant across models. Default: '/tmp/dualtask_logs/'.
n_trials_plot: int specifying the number of synthetic trials to plot
per visualization update. Default: 4.
'''
@staticmethod
def _default_hash_hyperparameters():
'''Defines default hyperparameters, specific to DualTask, for the set
of hyperparameters that are hashed to define a directory structure for
easily managing multiple runs of the RNN training (i.e., using
different hyperparameter settings). Additional default hyperparameters
are defined in RecurrentWhisperer (from which DualTask inherits).
Args:
None.
Returns:
dict of hyperparameters.
'''
return {
'rnn_type': 'vanilla',
'n_hidden': 24,
'data_hps': {
'n_batch': 1028,
'n_time': 32,
'n_bits': 6,
'gng_time': 10,
'noise': 0.1,
'lamb': 0.25,
'delay_max': 5}
}
@staticmethod
def _default_non_hash_hyperparameters():
'''Defines default hyperparameters, specific to DualTask, for the set
of hyperparameters that are NOT hashed. Additional default
hyperparameters are defined in RecurrentWhisperer (from which DualTask
inherits).
Args:
None.
Returns:
dict of hyperparameters.
'''
return {
'log_dir': '/tmp/dualtask_logs/',
'n_trials_plot': 6,
}
def _setup_model(self):
'''Defines an RNN in Tensorflow.
See docstring in RecurrentWhisperer.
'''
tf.reset_default_graph()
hps = self.hps
n_hidden = hps.n_hidden
data_hps = hps.data_hps
n_batch = data_hps['n_batch']
n_time = data_hps['n_time']
n_inputs = data_hps['n_bits']
n_output = n_inputs
gng_time = data_hps['gng_time']
# Data handling
self.inputs_bxtxd = tf.placeholder(tf.float32,
[n_batch, n_time, n_inputs])
self.output_bxtxd = tf.placeholder(tf.float32,
[n_batch, n_time, n_output])
self.task = tf.placeholder(tf.float32, n_batch)
# RNN
if hps.rnn_type == 'vanilla':
self.rnn_cell = tf.nn.rnn_cell.BasicRNNCell(n_hidden)
elif hps.rnn_type == 'gru':
self.rnn_cell = tf.nn.rnn_cell.GRUCell(n_hidden)
elif hps.rnn_type == 'lstm':
self.rnn_cell = tf.nn.rnn_cell.LSTMCell(n_hidden)
else:
raise ValueError('Hyperparameter rnn_type must be one of '
'[vanilla, gru, lstm] but was %s' % hps.rnn_type)
initial_state = self.rnn_cell.zero_state(n_batch, dtype=tf.float32)
self.hidden_bxtxd, _ = tf.nn.dynamic_rnn(self.rnn_cell,
self.inputs_bxtxd,
initial_state=initial_state)
# # add noise to hidden states (neurons) with:
# self.noise = tf.constant(0.2, dtype='float32')
# self.hidden_bxtxd += tf.random.normal(shape=tf.shape(
# self.hidden_bxtxd), stddev=self.noise, dtype=tf.float32)
# Readout from RNN
np_W_out, np_b_out = self._np_init_weight_matrix(n_hidden, n_output)
self.W_out = tf.Variable(np_W_out, dtype=tf.float32)
self.b_out = tf.Variable(np_b_out, dtype=tf.float32)
self.pred_output_bxtxd = tf.tensordot(self.hidden_bxtxd,
self.W_out, axes=1) + self.b_out
# Loss
# self.loss =\
# tf.nn.sigmoid_cross_entropy_with_logits(labels=self.output_bxtxd,
# logits=self.pred_output_bxtxd)
# COMPUTE LOSS IN ALL TIMESTEPS
self.loss = tf.reduce_mean(tf.squared_difference(
self.output_bxtxd, self.pred_output_bxtxd))
# MEASURE OF ACCURACY
comparison_dpa = tf.equal(tf.round(
self.pred_output_bxtxd[:, n_time-1, 0]),
self.output_bxtxd[:, n_time-1, 0])
self.acc_dpa = tf.reduce_mean(tf.cast(comparison_dpa, tf.float32))
comparison_gng = tf.equal(tf.round(
self.pred_output_bxtxd[:, 10, 0]),
self.output_bxtxd[:, 10, 0])
self.acc_gng = tf.reduce_mean(tf.cast(comparison_gng, tf.float32))
# Measure of accuracy for each of the task: dual, DPA, GNG
if gng_time == -1:
task = self.task
zeros = tf.zeros(n_batch, tf.float32)
ones = tf.ones(n_batch, tf.float32)
twos = ones*2
# Dual
mask = tf.equal(task, zeros)
out = self.output_bxtxd[:, n_time-1, 0]
out_dual = tf.boolean_mask(out, mask)
pred_out = self.pred_output_bxtxd[:, n_time-1, 0]
self.pred_out_dpa_dual = tf.boolean_mask(pred_out, mask)
self.comparison_dpa_dual = tf.equal(tf.round(self.pred_out_dpa_dual), out_dual)
self.acc_dpa_dual = tf.reduce_mean(tf.cast(self.comparison_dpa_dual,
tf.float32))
out = self.output_bxtxd[:, 10, 0]
out_dual = tf.boolean_mask(out, mask)
pred_out = self.pred_output_bxtxd[:, 10, 0]
pred_out_dual = tf.boolean_mask(pred_out, mask)
self.comparison_gng_dual = tf.equal(tf.round(pred_out_dual), out_dual)
self.acc_gng_dual = tf.reduce_mean(tf.cast(self.comparison_gng_dual,
tf.float32))
# DPA alone
mask = tf.equal(task, ones)
out = self.output_bxtxd[:, n_time-1, 0]
out_dual = tf.boolean_mask(out, mask)
pred_out = self.pred_output_bxtxd[:, n_time-1, 0]
self.pred_out_dpa_dpa = tf.boolean_mask(pred_out, mask)
self.comparison_dpa_dpa = tf.equal(tf.round(self.pred_out_dpa_dpa), out_dual)
self.acc_dpa_dpa = tf.reduce_mean(tf.cast(self.comparison_dpa_dpa,
tf.float32))
out = self.output_bxtxd[:, 10, 0]
out_dual = tf.boolean_mask(out, mask)
pred_out = self.pred_output_bxtxd[:, 10, 0]
pred_out_dual = tf.boolean_mask(pred_out, mask)
self.comparison_gng_dpa = tf.equal(tf.round(pred_out_dual), out_dual)
self.acc_gng_dpa = tf.reduce_mean(tf.cast(self.comparison_gng_dpa,
tf.float32))
# GNG alone
mask = tf.equal(task, twos)
out = self.output_bxtxd[:, n_time-1, 0]
out_dual = tf.boolean_mask(out, mask)
pred_out = self.pred_output_bxtxd[:, n_time-1, 0]
pred_out_dual = tf.boolean_mask(pred_out, mask)
self.comparison_dpa_gng = tf.equal(tf.round(pred_out_dual), out_dual)
self.acc_dpa_gng = tf.reduce_mean(tf.cast(self.comparison_dpa_gng,
tf.float32))
out = self.output_bxtxd[:, 10, 0]
out_dual = tf.boolean_mask(out, mask)
pred_out = self.pred_output_bxtxd[:, 10, 0]
pred_out_dual = tf.boolean_mask(pred_out, mask)
self.comparison_gng_gng = tf.equal(tf.round(pred_out_dual), out_dual)
self.acc_gng_gng = tf.reduce_mean(tf.cast(self.comparison_gng_gng,
tf.float32))
def _setup_saver(self):
'''See docstring in RecurrentWhisperer.'''
self.saver = tf.train.Saver(tf.global_variables(),
max_to_keep=self.hps.max_ckpt_to_keep)
def _setup_training(self, train_data, valid_data):
'''Does nothing. Required by RecurrentWhisperer.'''
pass
def _train_batch(self, batch_data):
'''Performs a training step over a single batch of data.
Args:
batch_data: dict containing one training batch of data. Contains
the following key/value pairs:
'inputs': [n_batch x n_time x n_bits] numpy array specifying
the inputs to the RNN.
'outputs': [n_batch x n_time x n_bits] numpy array specifying
the correct output responses to the 'inputs.'
Returns:
summary: dict containing the following summary key/value pairs
from the training step:
'loss': scalar float evalutaion of the loss function over the
data batch.
'grad_global_norm': scalar float evaluation of the norm of
the gradient of the loss function with respect to all trainable
variables, taken over the data batch.
'''
hps = self.hps
data_hps = hps.data_hps
n_batch = data_hps['n_batch']
gng_time = data_hps['gng_time']
ops_to_eval = [self.train_op,
self.grad_global_norm,
self.loss,
self.merged_opt_summary,
self.acc_dpa,
self.acc_gng]
if gng_time == -1:
inputs_dual = []
outputs_dual = []
inputs_dpa = []
outputs_dpa = []
inputs_gng = []
outputs_gng = []
for ind_btch in range(n_batch):
task_choice = batch_data['task_choice']
inputs = batch_data['inputs'][ind_btch, :, :]
outputs = batch_data['output'][ind_btch, :, :]
if task_choice[ind_btch]==0:
# Get trial for Dualtask
inputs_dual.append(inputs)
outputs_dual.append(outputs)
elif task_choice[ind_btch]==1:
# Get trial for DPA
inputs_dpa.append(inputs)
outputs_dpa.append(outputs)
else:
# Get trial for GNG
inputs_gng.append(inputs)
outputs_gng.append(outputs)
inputs_dual = np.array(inputs_dual)
outputs_dual = np.array(outputs_dual)
inputs_dpa = np.array(inputs_dpa)
outputs_dpa = np.array(outputs_dpa)
inputs_gng = np.array(inputs_gng)
outputs_gng = np.array(outputs_gng)
# Get accuracy for dual task only trials
acc_eval = [self.acc_dpa_dual, self.acc_gng_dual]
feed_dict = dict()
feed_dict[self.inputs_bxtxd] = batch_data['inputs']
feed_dict[self.output_bxtxd] = batch_data['output']
feed_dict[self.learning_rate] = self.adaptive_learning_rate()
feed_dict[self.grad_norm_clip_val] = self.adaptive_grad_norm_clip()
feed_dict[self.task] = task_choice
[ev_acc_dpa_dual,
ev_acc_gng_dual] = self.session.run(acc_eval, feed_dict=feed_dict)
# Get accuracy for DPA task only trials
acc_eval = [self.acc_dpa_dpa, self.acc_gng_dpa]
feed_dict = dict()
feed_dict[self.inputs_bxtxd] = batch_data['inputs']
feed_dict[self.output_bxtxd] = batch_data['output']
feed_dict[self.learning_rate] = self.adaptive_learning_rate()
feed_dict[self.grad_norm_clip_val] = self.adaptive_grad_norm_clip()
feed_dict[self.task] = task_choice
[ev_acc_dpa_dpa,
ev_acc_gng_dpa] = self.session.run(acc_eval, feed_dict=feed_dict)
# Get accuracy for GNG task only trials
acc_eval = [self.acc_dpa_gng, self.acc_gng_gng]
feed_dict = dict()
feed_dict[self.inputs_bxtxd] = batch_data['inputs']
feed_dict[self.output_bxtxd] = batch_data['output']
feed_dict[self.learning_rate] = self.adaptive_learning_rate()
feed_dict[self.grad_norm_clip_val] = self.adaptive_grad_norm_clip()
feed_dict[self.task] = task_choice
[ev_acc_dpa_gng,
ev_acc_gng_gng] = self.session.run(acc_eval, feed_dict=feed_dict)
#General accuracy
feed_dict = dict()
feed_dict[self.inputs_bxtxd] = batch_data['inputs']
feed_dict[self.output_bxtxd] = batch_data['output']
feed_dict[self.learning_rate] = self.adaptive_learning_rate()
feed_dict[self.grad_norm_clip_val] = self.adaptive_grad_norm_clip()
[ev_train_op,
ev_grad_global_norm,
ev_loss,
ev_merged_opt_summary,
ev_acc_dpa,
ev_acc_gng] = self.session.run(ops_to_eval, feed_dict=feed_dict)
if self.hps.do_save_tensorboard_events:
if self._epoch() == 0:
'''Hack to prevent throwing the vertical axis on the
Tensorboard figure for grad_norm_clip_val (grad_norm_clip val
is initialized to an enormous number to prevent clipping
before we know the scale of the gradients).'''
feed_dict[self.grad_norm_clip_val] = np.nan
ev_merged_opt_summary = \
self.session.run(self.merged_opt_summary, feed_dict)
self.writer.add_summary(ev_merged_opt_summary, self._step())
summary = {'loss': ev_loss, 'grad_global_norm': ev_grad_global_norm,
'acc_dpa': ev_acc_dpa, 'acc_gng': ev_acc_gng,
'acc_dpa_dual': ev_acc_dpa_dual, 'acc_gng_dual': ev_acc_gng_dual,
'acc_dpa_dpa': ev_acc_dpa_dpa, 'acc_gng_dpa': ev_acc_gng_dpa,
'acc_dpa_gng': ev_acc_dpa_gng, 'acc_gng_gng': ev_acc_gng_gng}
return summary
def predict(self, batch_data, do_predict_full_LSTM_state=False):
'''Runs the RNN given its inputs.
Args:
batch_data:
dict containing the key 'inputs': [n_batch x n_time x n_bits]
numpy array specifying the inputs to the RNN.
do_predict_full_LSTM_state (optional): bool indicating, if the RNN
is an LSTM, whether to return the concatenated hidden and cell
states (True) or simply the hidden states (False). Default: False.
Returns:
predictions: dict containing the following key/value pairs:
'state': [n_batch x n_time x n_states] numpy array containing
the activations of the RNN units in response to the inputs.
Here, n_states is the dimensionality of the hidden state,
which, depending on the RNN architecture and
do_predict_full_LSTM_state, may or may not include LSTM cell
states.
'output': [n_batch x n_time x n_bits] numpy array containing
the readouts from the RNN.
'''
if do_predict_full_LSTM_state:
return self._predict_with_LSTM_cell_states(batch_data)
else:
ops_to_eval = [self.hidden_bxtxd, self.pred_output_bxtxd,
self.acc_dpa, self.acc_gng, self.acc_dpa_dual,
self.acc_dpa_gng, self.acc_dpa_dpa,
self.acc_gng_gng, self.comparison_dpa_dual,
self.comparison_dpa_dpa, self.pred_out_dpa_dual,
self.pred_out_dpa_dpa]
feed_dict = dict()
feed_dict[self.inputs_bxtxd] = batch_data['inputs']
feed_dict[self.output_bxtxd] = batch_data['output']
feed_dict[self.task] = batch_data['task_choice']
[ev_hidden_bxtxd,
ev_pred_output_bxtxd,
ev_pred_acc_dpa,
ev_pred_acc_gng,
ev_pred_acc_dpa_dual,
ev_pred_acc_gng_dual,
ev_pred_acc_dpa_dpa,
ev_pred_acc_gng_gng,
vec_pred_acc_dpa_dual,
vec_pred_acc_dpa_dpa,
pred_out_dual,
pred_out_dpa] = self.session.run(ops_to_eval, feed_dict=feed_dict)
predictions = {
'state': ev_hidden_bxtxd,
'output': ev_pred_output_bxtxd,
'ev_acc_dpa': ev_pred_acc_dpa,
'ev_acc_gng': ev_pred_acc_gng,
'ev_acc_dpa_dual': ev_pred_acc_dpa_dual,
'ev_acc_gng_dual': ev_pred_acc_gng_dual,
'ev_acc_dpa_dpa': ev_pred_acc_dpa_dpa,
'ev_acc_gng_gng': ev_pred_acc_gng_gng,
'vec_acc_dpa_dual': vec_pred_acc_dpa_dual,
'vec_acc_dpa_dpa': vec_pred_acc_dpa_dpa,
'pred_out_dual': pred_out_dual,
'pred_out_dpa': pred_out_dpa}
return predictions
def _predict_with_LSTM_cell_states(self, batch_data):
'''Runs the RNN given its inputs.
The following is added for execution only when LSTM predictions are
needed for both the hidden and cell states. Tensorflow does not make
it easy to access the cell states via dynamic_rnn.
Args:
batch_data: as specified by predict.
Returns:
predictions: as specified by predict.
'''
hps = self.hps
if hps.rnn_type != 'lstm':
return self.predict(batch_data)
n_hidden = hps.n_hidden
[n_batch, n_time, n_bits] = batch_data['inputs'].shape
initial_state = self.rnn_cell.zero_state(n_batch, dtype=tf.float32)
''' Add ops to the graph for getting the complete LSTM state
(i.e., hidden and cell) at every timestep.'''
self.full_state_list = []
for t in range(n_time):
input_ = self.inputs_bxtxd[:, t, :]
if t == 0:
full_state_t_minus_1 = initial_state
else:
full_state_t_minus_1 = self.full_state_list[-1]
_, full_state_bxd = self.rnn_cell(input_, full_state_t_minus_1)
self.full_state_list.append(full_state_bxd)
'''Evaluate those ops'''
ops_to_eval = [self.full_state_list, self.pred_output_bxtxd]
feed_dict = {self.inputs_bxtxd: batch_data['inputs']}
ev_full_state_list, ev_pred_output_bxtxd = \
self.session.run(ops_to_eval, feed_dict=feed_dict)
'''Package the results'''
h = np.zeros([n_batch, n_time, n_hidden]) # hidden states: bxtxd
c = np.zeros([n_batch, n_time, n_hidden]) # cell states: bxtxd
for t in range(n_time):
h[:, t, :] = ev_full_state_list[t].h
c[:, t, :] = ev_full_state_list[t].c
ev_LSTMCellState = tf.nn.rnn_cell.LSTMStateTuple(h=h, c=c)
predictions = {
'state': ev_LSTMCellState,
'output': ev_pred_output_bxtxd
}
return predictions
def _get_data_batches(self, train_data):
'''See docstring in RecurrentWhisperer.'''
return [self.generate_dualtask_trials()]
def _get_batch_size(self, batch_data):
'''See docstring in RecurrentWhisperer.'''
return batch_data['inputs'].shape[0]
def generate_dualtask_trials(self):
'''Generates synthetic data (i.e., ground truth trials) for the
dual task. See comments following DualTask class definition for a
description of the input-output relationship in the task.
Args:
None.
Returns:
dict containing 'inputs' and 'outputs'.
'inputs': [n_batch x n_time x n_bits] numpy array containing
input pulses.
'outputs': [n_batch x n_time x n_bits] numpy array specifying
the correct behavior of the Dual task device.
'''
data_hps = self.hps.data_hps
n_batch = data_hps['n_batch']
n_time = data_hps['n_time']
n_bits = data_hps['n_bits']
gng_time = data_hps['gng_time']
lamb = data_hps['lamb']
delay_max = data_hps['delay_max']
noise = data_hps['noise']
if gng_time==-1:
dataset = data3task.get_inputs_outputs(n_batch, n_time, n_bits,
gng_time, lamb, delay_max,
noise)
else:
dataset = data.get_inputs_outputs(n_batch, n_time, n_bits,
gng_time, lamb, delay_max, noise)
return dataset
def _update_visualizations(self, train_data=None, valid_data=None):
'''See docstring in RecurrentWhisperer.'''
data = self.generate_dualtask_trials()
# self.plot_trials(data)
def _setup_visualizations(self):
'''See docstring in RecurrentWhisperer.'''
#
# def plot_trials(self, data, start_time=0, stop_time=None):
# '''Plots example trials, complete with input pulses, correct outputs,
# and RNN-predicted outputs.
#
# Args:
# data: dict as returned by generate_dualtask_trials.
#
# start_time (optional): int specifying the first timestep to plot.
# Default: 0.
#
# stop_time (optional): int specifying the last timestep to plot.
# Default: n_time.
#
# Returns:
# None.
# '''
# hps = self.hps
# n_batch = self.hps.data_hps['n_batch']
# n_time = self.hps.data_hps['n_time']
# gng_time = self.hps.data_hps['gng_time']
## n_plot = np.min([hps.n_trials_plot, n_batch])
# n_plot = 36
# dpa2_time = data['vec_tau']
# if gng_time==-1:
# task_type = data['task_choice']
# else:
# task_type = 0
#
# f = plt.figure(self.fig.number)
# plt.clf()
#
# inputs = data['inputs']
# output = data['output']
# predictions = self.predict(data)
# pred_output = predictions['output']
# ev_acc_dpa = predictions['ev_acc_dpa']
# ev_acc_dpa_dual = predictions['ev_acc_dpa_dual']
# ev_acc_gng_dual = predictions['ev_acc_gng_dual']
# ev_acc_dpa_dpa = predictions['ev_acc_dpa_dpa']
# ev_acc_gng_gng = predictions['ev_acc_gng_gng']
# vec_acc_dpa_dual = predictions['vec_acc_dpa_dual']
# vec_acc_dpa_dpa = predictions['vec_acc_dpa_dpa']
# pred_out_dual = predictions['pred_out_dual']
# pred_out_dpa = predictions['pred_out_dpa']
#
# if stop_time is None:
# stop_time = n_time
#
# time_idx = range(start_time, stop_time)
#
# for trial_idx in range(n_plot):
# plt.subplot(6, 6, trial_idx+1)
# if n_plot == 1:
# plt.title('Example trial', fontweight='bold')
# else:
# if gng_time==-1:
# if task_type[trial_idx] == 0:
# plt.title('Dual-task | Acc DPA %d | Pred %.4e | Out %.2e' %
# (vec_acc_dpa_dual[np.where(np.where(task_type==0)[0]==trial_idx)[0]],
# pred_output[trial_idx, n_time-1, 0],
# output[trial_idx, n_time-1, 0]), fontweight='bold')
# elif task_type[trial_idx] == 1:
# plt.title('DPA task | Acc DPA %d | Pred %.4e | Out %.2e' %
# (vec_acc_dpa_dpa[np.where(np.where(task_type==1)[0]==trial_idx)[0]],
# pred_output[trial_idx, n_time-1, 0],
# output[trial_idx, n_time-1, 0]),
# fontweight='bold')
# else:
# plt.title('GNG task | Acc GNG %d' %
# (ev_acc_gng_gng),
# fontweight='bold')
# else:
# plt.title('Example trial %d | Acc %d' % (trial_idx + 1,
# ev_acc_dpa),
# fontweight='bold')
#
# self._plot_single_trial(
# inputs[trial_idx, time_idx, :],
# output[trial_idx, time_idx, :],
# pred_output[trial_idx, time_idx, :])
#
# # Only plot x-axis ticks and labels on the bottom subplot
# if trial_idx < (n_plot-1):
# plt.xticks([])
# else:
# plt.xlabel('Timestep', fontweight='bold')
#
# f = plt.gcf()
## plt.ion()
## plt.show()
## plt.pause(1e-10)
# return f
#
# @staticmethod
# def _plot_single_trial(input_txd, output_txd, pred_output_txd):
#
# VERTICAL_SPACING = 2.5
# [n_time, n_bits] = input_txd.shape
# tt = range(n_time)
#
# y_ticks = [VERTICAL_SPACING*bit_idx for bit_idx in range(n_bits)]
# y_tick_labels = ['S %d' % (bit_idx+1) for bit_idx in range(n_bits)]
# plt.yticks(y_ticks, y_tick_labels, fontweight='bold')
# for bit_idx in range(n_bits):
#
# vertical_offset = VERTICAL_SPACING*bit_idx
#
# # Input pulses
# plt.fill_between(
# tt,
# vertical_offset + input_txd[:, bit_idx],
# vertical_offset,
# step='mid',
# color='gray')
#
# # Correct outputs
# plt.step(
# tt,
# vertical_offset + output_txd[:, bit_idx],
# where='mid',
# linewidth=2,
# color='cyan')
#
# if bit_idx == 0:
# # RNN outputsp
# plt.step(
# tt,
# vertical_offset + pred_output_txd[:, 0],
# where='mid',
# color='purple',
# linewidth=1.5,
# linestyle='--')
#
# plt.xlim(-1, n_time)
# plt.ylim(-1, n_bits*2+2)
```
#### File: dPCA/dualtask/plots.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import random
# from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
PATH_SAVE = '/home/joan/dPCA/python/dPCA/dualtask/'
PATH_LOAD = '/home/joan/cluster_home/dPCA/python/dPCA/dualtask/'
sys.path.insert(0, PATH_LOAD)
# Noise range for the input to the RNN
noise_rng = np.array([0.0])
noise = noise_rng[0]
#noise_rng = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
# Time of appearence of the go- no go task. 0 for no task. if gng_rng = [-1]
# then it runs a ramdom trial either of the dualtask, dpa alone or gng alone.
gng_rng = np.array(-1)
gng = gng_rng
#gng_rng = np.array([0, 10])
lamb = np.array([0.0])
l = lamb[0]
#lamb = np.array([0.0, 0.2, 0.4, 0.6, 0.8, 1])
delay_max = np.array([0])
delay = delay_max[0]
num_neurons = np.array([64])
neuron = num_neurons[0]
# number of RNN instances
INST = 50
#Number of example plots to show
n_plot = 36
load_dir = os.path.join(PATH_LOAD, 'data_trainedwithnoise')
save_dir = os.path.join(PATH_SAVE, 'Figures_noise_' + str(noise))
if os.path.isdir(save_dir) is False:
os.mkdir(save_dir)
#data = np.load(os.path.join(load_dir, 'data_' + str(gng) + '_'
# + str(l) + '_' + str(delay)
# + '_i' + str(INST) + '_n' + str(noise_rng[0])
# + '-' + str(noise_rng[-1])
# + '_neu' + str(num_neurons[0])
# + '-' + str(num_neurons[-1]) + '.npz'))
#Plot example trials of the RNN
def plot_trials(inputs, output, pred_output, vec_acc_dpa_dpa, vec_acc_dpa_dual,
task_type, start_time=0, stop_time=None):
'''Plots example trials, complete with input pulses, correct outputs,
and RNN-predicted outputs.
Args:
data: dict as returned by generate_dualtask_trials.
start_time (optional): int specifying the first timestep to plot.
Default: 0.
stop_time (optional): int specifying the last timestep to plot.
Default: n_time.
Returns:
None.
'''
n_time = 20
gng_time = gng
# n_plot = np.min([hps.n_trials_plot, n_batch])
if stop_time is None:
stop_time = n_time
time_idx = range(start_time, stop_time)
for trial_idx in range(n_plot):
plt.subplot(n_plot/6, n_plot/6, trial_idx+1)
if n_plot == 1:
plt.title('Example trial', fontweight='bold')
else:
if gng_time==-1:
if task_type[trial_idx] == 0:
plt.title('Dual-task | Acc DPA %d | Pred %.4e | Out %.2e' %
(vec_acc_dpa_dual[np.where(np.where(task_type==0)[0]==trial_idx)[0]],
pred_output[trial_idx, n_time-1, 0],
output[trial_idx, n_time-1, 0]), fontweight='bold')
elif task_type[trial_idx] == 1:
plt.title('DPA task | Acc DPA %d | Pred %.4e | Out %.2e' %
(vec_acc_dpa_dpa[np.where(np.where(task_type==1)[0]==trial_idx)[0]],
pred_output[trial_idx, n_time-1, 0],
output[trial_idx, n_time-1, 0]),
fontweight='bold')
else:
plt.title('GNG task | Acc GNG 1', fontweight='bold')
else:
plt.title('Example trial %d | Acc %d' % (trial_idx + 1,
ev_acc_dpa),
fontweight='bold')
_plot_single_trial(
inputs[trial_idx, time_idx, :],
output[trial_idx, time_idx, :],
pred_output[trial_idx, time_idx, :])
# Only plot x-axis ticks and labels on the bottom subplot
if trial_idx < (n_plot-1):
plt.xticks([])
else:
plt.xlabel('Timestep', fontweight='bold')
f = plt.gcf()
# plt.ion()
# plt.show()
# plt.pause(1e-10)
return f
def _plot_single_trial(input_txd, output_txd, pred_output_txd):
VERTICAL_SPACING = 2.5
[n_time, n_bits] = input_txd.shape
tt = range(n_time)
y_ticks = [VERTICAL_SPACING*bit_idx for bit_idx in range(n_bits)]
y_tick_labels = ['S %d' % (bit_idx+1) for bit_idx in range(n_bits)]
plt.yticks(y_ticks, y_tick_labels, fontweight='bold')
for bit_idx in range(n_bits):
vertical_offset = VERTICAL_SPACING*bit_idx
# Input pulses
plt.fill_between(
tt,
vertical_offset + input_txd[:, bit_idx],
vertical_offset,
step='mid',
color='gray')
# Correct outputs
plt.step(
tt,
vertical_offset + output_txd[:, bit_idx],
where='mid',
linewidth=2,
color='cyan')
if bit_idx == 0:
# RNN outputsp
plt.step(
tt,
vertical_offset + pred_output_txd[:, 0],
where='mid',
color='purple',
linewidth=1.5,
linestyle='--')
plt.xlim(-1, n_time)
plt.ylim(-1, n_bits*2+2)
# Plot example trials
#
#for inst in range(INST):
#
# FIG_WIDTH = n_plot # inches
# FIX_HEIGHT = 9 # inches
# fig = plt.figure(figsize=(FIG_WIDTH, FIX_HEIGHT),
# tight_layout=True)
# if gng==-1:
# task_type = data['task'][inst]
# else:
# task_type = 0
#
# f = plt.figure(fig.number)
# plt.clf()
#
# inputs = data['inputs'][inst]
# output = data['output'][inst]
# pred_output = data['pred_out'][inst]
# ev_acc_dpa = data['acc'][0][1][inst]
# ev_acc_dpa_dual = data['acc'][0][3][inst]
# ev_acc_gng_dual = data['acc'][0][4][inst]
# ev_acc_dpa_dpa = data['acc'][0][5][inst]
# ev_acc_gng_gng = data['acc'][0][6][inst]
# vec_acc_dpa_dual = data['acc'][0][7][inst]
# vec_acc_dpa_dpa = data['acc'][0][8][inst]
# f = plot_trials(inputs, output, pred_output, vec_acc_dpa_dpa,
# vec_acc_dpa_dual, task_type)
#
# task_dir = os.path.join(save_dir, 'task_plots_noise' + str(noise) + '_lamb'
# + str(lamb))
# plot_dir = os.path.join(save_dir, 'count_plots_noise' + str(noise) + '_lamb'
# + str(lamb))
# if os.path.isdir(plot_dir) is False:
# os.mkdir(task_dir)
# os.mkdir(plot_dir)
# f.savefig(os.path.join(task_dir, 'Inst' + str(inst) + '.png'))
# else:
# f.savefig(os.path.join(task_dir, 'Inst' + str(inst) + '.png'))
#
# plt.close()
'''Plot mean data together'''
## Plots for number of neurons against accuracy
## Loading the data for task without distractor
#plt.figure()
#
#for l in lamb:
# for delay in delay_max:
# data = np.load(fig_dir + '/data_0_' + str(l) + '_' + str(delay) +
# '_i' + str(INST) + '_n' + str(noise_rng[0]) + '-' +
# str(noise_rng[-1]) + '_neu' + str(num_neurons[0])
# + '-' + str(num_neurons[-1]) + '.npz')
# data = data['acc']
#
# # Compute the mean accuracy across instances
# mean_acc = []
# std = []
# for n in range(num_neurons.shape[0]):
# mean_acc.append(np.mean(data[n][1]))
# std.append(np.std(data[n][1]))
# plt.scatter(np.repeat(num_neurons[n], INST), data[n][1], marker='.', color='b')
#
# # Plot with error bars of the accuracy / loss
# plt.plot(num_neurons, mean_acc, marker='+', ms=15, color='b',
# label='DPA accuracy dpa gng0 lamb' + str(l))
#
## Loading data for task with distractor
#for l in lamb:
# for delay in delay_max:
# data10 = np.load(fig_dir + '/data_10_' + str(l) + '_' + str(delay) +
# '_i' + str(INST) + '_n' + str(noise_rng[0]) + '-' +
# str(noise_rng[-1]) + '_neu' + str(num_neurons[0])
# + '-' + str(num_neurons[-1]) + '.npz')
# data10 = data10['acc']
#
# # Compute the mean accuracy across instances
# mean_acc = []
# std = []
# for n in range(num_neurons.shape[0]):
# mean_acc.append(np.mean(data10[n][1]))
# std.append(np.std(data10[n][1]))
# plt.scatter(np.repeat(num_neurons[n], INST), data10[n][1], marker='.', color='r')
#
# # Plot with error bars of the accuracy / loss
# plt.plot(num_neurons, mean_acc, marker='+', ms=15, color='r',
# label='DPA accuracy gng10 lamb' + str(l))
#
#plt.xlabel('Number of neurons')
#plt.ylabel('Mean accuracy')
#plt.legend()
#plt.show()
#
#if os.path.isdir(fig_dir) is False:
# os.mkdir(fig_dir)
# plt.savefig(os.path.join(fig_dir, 'mean_acc_neurons_'
# + str(l) + '_' + str(delay)
# + '_i' + str(INST) + '_n' + str(noise_rng[0])
# + '-' + str(noise_rng[-1]) + '.png'))
#else:
# plt.savefig(os.path.join(fig_dir, 'mean_acc_neurons_'
# + str(l) + '_' + str(delay)
# + '_i' + str(INST) + '_n' + str(noise_rng[0])
# + '-' + str(noise_rng[-1]) + '.png'))
#
## Plots for lambda against accuracy
## Loading the data for both tasks
#f = plt.figure()
#mean_acc_dpa0 = []
#mean_acc_gng0 = []
#std_dpa0 = []
#std_gng0 =[]
#mean_acc_dpa10 = []
#mean_acc_gng10 = []
#std_dpa10 = []
#std_gng10 =[]
#for gng in gng_rng:
# for l in lamb:
# for delay in delay_max:
# data = np.load(fig_dir + '/data_' + str(gng) + '_' + str(l) + '_'
# + str(delay) + '_i' + str(INST) + '_n'
# + str(noise_rng[0]) + '-' + str(noise_rng[-1])
# + '_neu' + str(num_neurons[0])
# + '-' + str(num_neurons[-1]) + '.npz')
# datal = data['acc'][0]
## datal.append(data['acc'])
#
# # Compute the mean accuracy across instances
# if gng > 0:
# mean_acc_dpa10.append(np.mean(datal[1]))
# std_dpa10.append(np.std(datal[1]))
# mean_acc_gng10.append(np.mean(datal[2]))
# std_gng10.append(np.std(datal[2]))
# else:
# mean_acc_dpa0.append(np.mean(datal[1]))
# std_dpa0.append(np.std(datal[1]))
# mean_acc_gng0.append(np.mean(datal[2]))
# std_gng0.append(np.std(datal[2]))
#
## Plot with error bars of the accuracy / loss
## plt.errorbar(l, mean_acc_dpa, yerr=std_dpa, marker='+',
## label='DPA with gng' + str(gng))
##plt.plot(lamb, mean_acc_dpa10, color='r', label='DPA with distractor')
#plt.errorbar(lamb+0.02, mean_acc_dpa10, yerr=std_dpa10, marker='+', color='r', label='DPA with distractor')
## plt.errorbar(l, mean_acc_gng, yerr=std_gng, marker='+',
## label='GNG with gng' + str(gng))
##plt.plot(lamb, mean_acc_dpa0, color='g', label='DPA no distractor')
#plt.errorbar(lamb-0.02, mean_acc_dpa0, yerr=std_dpa0, marker='+', color='g', label='DPA no distractor')
#
##plt.plot(lamb, mean_acc_gng10, color='k', label='GNG')
#plt.errorbar(lamb, mean_acc_gng10, yerr=std_gng10, marker='+', color='k', label='GNG')
#
#plt.xlabel('Parametrization')
#plt.ylabel('Mean accuracy')
#plt.legend()
#plt.show()
#
#if os.path.isdir(fig_dir) is False:
# os.mkdir(fig_dir)
# plt.savefig(os.path.join(fig_dir, 'mean_ acc_lambda_'
# + str(l) + '_' + str(delay)
# + '_i' + str(INST) + '_n' + str(noise_rng[0])
# + '-' + str(noise_rng[-1]) + '_neu'
# + str(num_neurons[0]) + '-'
# + str(num_neurons[-1]) + '.png'))
#else:
# plt.savefig(os.path.join(fig_dir, 'mean_acc_lambda_'
# + str(l) + '_' + str(delay)
# + '_i' + str(INST) + '_n' + str(noise_rng[0])
# + '-' + str(noise_rng[-1]) + '_neu'
# + str(num_neurons[0]) + '-'
# + str(num_neurons[-1]) + '.png'))
#
## Plots for noise against accuracy
## Loading data for both tasks
#f = plt.figure()
#for gng in gng_rng:
# for l in lamb:
# for delay in delay_max:
# data10 = np.load(fig_dir + '/data_' + str(gng) + '_' + str(l)
# + '_' + str(delay) + '_i' + str(INST) + '_n'
# + str(noise_rng[0]) + '-' +
# str(noise_rng[-1]) + '_neu' + str(num_neurons[0])
# + '-' + str(num_neurons[-1]) + '.npz')
# data10 = data10['acc']
#
# # Compute the mean accuracy across instances
# mean_acc = []
# std = []
# for n in range(noise_rng.shape[0]):
# mean_acc.append(np.mean(data10[n][1]))
# std.append(np.std(data10[n][1]))
#
# # Plot with error bars of the accuracy / loss
# plt.errorbar(noise_rng, mean_acc, yerr=std, marker='+',
# label='DPA accuracy dpa gng' + str(l))
#
#
#plt.xlabel('Noise')
#plt.ylabel('Mean accuracy')
#plt.legend()
#plt.show()
#
#if os.path.isdir(fig_dir) is False:
# os.mkdir(fig_dir)
# plt.savefig(os.path.join(fig_dir, 'mean_ acc_noise_'
# + str(l) + '_' + str(delay)
# + '_i' + str(INST) + '_n' + str(noise_rng[0])
# + '-' + str(noise_rng[-1]) + '_neu'
# + str(num_neurons[0]) + '-'
# + str(num_neurons[-1]) + '.png'))
#else:
# plt.savefig(os.path.join(fig_dir, 'mean_acc_noise_'
# + str(l) + '_' + str(delay)
# + '_i' + str(INST) + '_n' + str(noise_rng[0])
# + '-' + str(noise_rng[-1]) + '_neu'
# + str(num_neurons[0]) + '-'
# + str(num_neurons[-1]) + '.png'))
#
#
# Plots for accuracy depending on task.
# General accuracy
# Plots for accuracy depending on task.
#Accuracy across training
noise = noise_rng[0]
plt.figure()
label_added = False
epochslist = []
for i in range(INST):
data = np.load(PATH_LOAD + '/logs_-1/lamb0.0/noise' + str(noise) +
'/delay0/neurons64/inst' + str(i) +
'/9afbb8777a/accuracies.npz')
acc_dpa = data['acc_dpa']
acc_gng = data['acc_gng']
acc_dpa_dual = data['acc_dpa_dual']
acc_gng_dual = data['acc_gng_dual']
acc_dpa_dpa = data['acc_dpa_dpa']
acc_gng_gng = data['acc_gng_gng']
n_epochs = data['n_epochs']
epochslist.append(n_epochs//10)
epochs = np.arange(n_epochs//10)
if not label_added:
plt.plot(epochs, acc_dpa_dual, label='Dual DPA', color='r')
plt.plot(epochs, acc_gng_dual, label='Dual GNG', color='b')
plt.plot(epochs, acc_dpa_dpa, label='DPA DPA', color='g')
plt.plot(epochs, acc_gng_gng, label='GNG GNG', color='cyan')
label_added = True
else:
plt.plot(epochs, acc_dpa_dual, color='r')
plt.plot(epochs, acc_gng_dual, color='b')
plt.plot(epochs, acc_dpa_dpa, color='g')
plt.plot(epochs, acc_gng_gng, color='cyan')
#plt.xlim([0, 100])
plt.legend()
plt.xticks(np.arange(0, max(epochslist), step=10))
plt.xlabel('Epoch/10')
plt.ylabel('Accuracy')
fig = plt.gcf()
# plt.show()
fig.savefig(os.path.join(save_dir, 'acc_across_train_inst' + str(INST) +
'_noise_' + str(noise) + '.png'))
plt.close()
#Plot for the mean accuracy of DPA and dual with all trials
acc_dpa_dual = []
acc_gng_dual = []
acc_dpa_dpa = []
acc_gng_gng = []
n_epochs = []
for i in range(INST):
#TDO find as before
data = np.load(PATH_LOAD + '/logs_-1/lamb0.0/noise' + str(noise) +
'/delay0/neurons64/inst' + str(i) +
'/9afbb8777a/accuracies.npz')
acc = data['acc_dpa_dual']
acc_dpa_dual.append(acc)
acc = data['acc_gng_dual']
acc_gng_dual.append(acc)
acc = data['acc_dpa_dpa']
acc_dpa_dpa.append(acc)
acc = data['acc_gng_gng']
acc_gng_gng.append(acc)
n = data['n_epochs']
n_epochs.append(n)
min_epochs = np.min(tuple(n_epochs[i] for i in range(INST)))//10
acc_dpa_dualstack = acc_dpa_dual[0][0:min_epochs]
acc_gng_dualstack = acc_gng_dual[0][0:min_epochs]
acc_dpa_dpastack = acc_dpa_dpa[0][0:min_epochs]
acc_gng_gngstack = acc_gng_gng[0][0:min_epochs]
for i in range(INST-1):
acc_dpa_dualstack = np.column_stack((acc_dpa_dualstack,
acc_dpa_dual[i+1][0:min_epochs]))
acc_gng_dualstack = np.column_stack((acc_gng_dualstack,
acc_gng_dual[i+1][0:min_epochs]))
acc_dpa_dpastack = np.column_stack((acc_dpa_dpastack,
acc_dpa_dpa[i+1][0:min_epochs]))
acc_gng_gngstack = np.column_stack((acc_gng_gngstack,
acc_gng_gng[i+1][0:min_epochs]))
acc_dpa_dualmean = np.mean(acc_dpa_dualstack, axis=1)
acc_gng_dualmean = np.mean(acc_gng_dualstack, axis=1)
acc_dpa_dpamean = np.mean(acc_dpa_dpastack, axis=1)
acc_gng_gngmean = np.mean(acc_gng_gngstack, axis=1)
epochs = np.arange(min_epochs)
plt.figure()
plt.plot(epochs, acc_dpa_dualmean, label='dual DPA', color='r', linewidth=3)
plt.plot(epochs, acc_dpa_dpamean, label='DPA DPA', color='g', linewidth=3)
plt.plot(epochs, acc_gng_dualmean, label='GNG GNG', color='c', linewidth=3)
plt.plot(epochs, acc_gng_gngmean, label='dual GNG', color='b', linewidth=3)
for i in range(INST):
plt.plot(epochs, acc_dpa_dualstack[:, i], color='r', alpha=0.1)
plt.plot(epochs, acc_dpa_dpastack[:, i], color='g', alpha=0.1)
plt.plot(epochs, acc_gng_dualstack[:, i], color='c', alpha=0.1)
plt.plot(epochs, acc_gng_gngstack[:, i], color='b', alpha=0.1)
#plt.xlim([0, 100])
plt.legend(loc='upper right', bbox_to_anchor=(1.1,1))
plt.xticks(np.arange(0, min_epochs, step=20))
plt.xlabel('Epoch/10')
plt.ylabel('Mean accuracy')
fig = plt.gcf()
#plt.show()
fig.savefig(os.path.join(save_dir, 'mean_acc_across_train' + str(noise) + '.png'))
plt.close()
# Plot accuracy of DPA in dual task against accuracy of DPA in dpa alone with numbers
data = np.load(os.path.join(load_dir, 'data_-1_0.0_0_i50_n' + str(noise) + '-' +
str(noise) + '_neu64-64.npz'))
dual_acc = data['acc'][0][3]
dpa_acc = data['acc'][0][5]
n = np.arange(INST)
f, ax = plt.subplots()
ax.scatter(dpa_acc, dual_acc, color='b', s=5)
ax.plot([0.4, 1], [0.4, 1], ls='--', color='grey')
for i, num in enumerate(n):
plt.annotate(num, (dpa_acc[i], dual_acc[i]))
plt.xlabel('DPA acc')
plt.ylabel('dual DPA acc')
f.savefig(os.path.join(save_dir, 'dpa_vs_dual_accnumber_' + str(noise) + '.png'))
# Plot accuracy of DPA in dual task against accuracy of DPA in dpa alone without numbers
f, ax = plt.subplots()
ax.scatter(dpa_acc, dual_acc, color='b', s=5)
if dual_acc & dpa_acc > 0.5:
ax.scatter(dpa_acc, dual_acc, color='g', s=5)
if dual_acc & dpa_acc == 1:
ax.scatter(dpa_acc, dual_acc, color='r', s=5)
ax.plot([0.4, 1], [0.4, 1], ls='--', color='grey')
plt.xlabel('DPA acc')
plt.ylabel('dual DPA acc')
f.savefig(os.path.join(save_dir, 'dpa_vs_dual_acc_' + str(noise) + '.png'))
# Bar plot acc dual vs acc dpa
plt.figure()
x = np.arange(2)
means = [np.mean(dual_acc), np.mean(dpa_acc)]
plt.bar(x, means, color='b', width=0.3)
plt.xticks(x, ('Dual-task', 'DPA task'))
plt.title('Mean accuracy')
plt.savefig(os.path.join(save_dir, 'mean_acc_bar' + str(noise) + '.png'))
plt.close('all')
# Count which number of stimulus pairs (s1-s3/s4 or s2-s3/s4) are correct
# for the conditions that appears s5 or s6 during the distractor
task = data['task']
stim = data['stim_conf']
stim_dual = []
stim_dpa = []
acc_dual = []
acc_dpa = []
for i in range(INST):
stim_dual.append(stim[i, task[i, :]==0])
stim_dpa.append(stim[i, task[i, :]==1])
acc_dual.append(data['acc'][0][7][i]*1)
acc_dpa.append(data['acc'][0][8][i]*1)
matdual_inst = []
matdpa_inst = []
for i in range(INST):
matdual = np.zeros((2, 2, 2))
for gng in range(2):
matdpa = np.zeros((2, 2))
for dpa1 in range(2):
for dpa2 in range(2):
ind_dual = np.logical_and.reduce((stim_dual[i][:, 0]==dpa1,
stim_dual[i][:, 1]==dpa2,
stim_dual[i][:, 2]==gng))
matdual[dpa1, dpa2, gng] = np.sum(acc_dual[i][ind_dual])
ind_dpa = np.logical_and(stim_dpa[i][:, 0]==dpa1,
stim_dpa[i][:, 1]==dpa2)
matdpa[dpa1, dpa2] = np.sum(acc_dpa[i][ind_dpa])
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(9.75, 3))
plt.subplots_adjust(wspace=0.4)
matdualS5_perc = np.divide(matdual[:, :, 0], np.sum(matdual[:, :, 0]))
im = ax[0].imshow(matdualS5_perc, cmap='GnBu', vmin=0, vmax=1)
ax[0].set_title('Dual-task trials \n Stimulus G2 appears')
ax[0].set_xticklabels(['', 'T1', 'T2', ''])
ax[0].set_yticklabels(['', 'S1', '', 'S2', ''])
# fig.colorbar(im)
matdualS6_perc = np.divide(matdual[:, :, 1], np.sum(matdual[:, :, 1]))
im2 = ax[1].imshow(matdualS6_perc, cmap='GnBu', vmin=0, vmax=1)
ax[1].set_title('Dual-task trials \n Stimulus G1 appears')
# plt.colorbar()
ax[1].set_xticklabels(['', 'T1', 'T2', ''])
ax[1].set_yticklabels(['', 'S1', '', 'S2', ''])
matdpa_perc = np.divide(matdpa, np.sum(matdpa))
im3 = ax[2].imshow(matdpa_perc, cmap='GnBu', vmin=0, vmax=1)
ax[2].set_title('DPA task trials \n ')
# plt.colorbar()
ax[2].set_xticklabels(['', 'T1', 'T2', ''])
ax[2].set_yticklabels(['', 'S1', '', 'S2', ''])
fig.colorbar(im, ax=ax.ravel().tolist(), shrink=0.7)
plot_dir = os.path.join(save_dir, 'count_plots_noise' + str(noise) + '_lamb'
+ str(lamb))
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
plt.savefig(os.path.join(plot_dir, 'Inst' + str(i) + '.png'))
else:
plt.savefig(os.path.join(plot_dir, 'Inst' + str(i) + '.png'))
plt.close()
matdual_inst.append(matdual)
matdpa_inst.append(matdpa)
```
#### File: dPCA/dualtask/run_Parallel.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# import pdb
import sys
import os
import random
# from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
PATH = '/home/joanpe/dPCA/python/dPCA/dualtask/'
sys.path.insert(0, PATH)
from DualTask import DualTask
#from dPCA import dPCA
from joblib import Parallel, delayed
import multiprocessing
from matplotlib import cm
from mpl_toolkits.axes_grid1.inset_locator import InsetPosition
# *****************************************************************************
# STEP 1: Train RNNs to solve the dual task *********************************
# *****************************************************************************
# Noise range for the input to the RNN
noise_rng = np.array([0.0])
#noise_rng = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
# Time of appearence of the go- no go task. 0 for no task. if gng_rng = [-1]
# then it runs a ramdom trial either of the dualtask, dpa alone or gng alone.
gng_rng = np.array(-1)
#gng_rng = np.array([0, 10])
lamb = np.array([0.0])
#lamb = np.array([0.0, 0.2, 0.4, 0.6, 0.8, 1])
delay_max = np.array([0])
num_neurons = np.array([64])
# number of RNN instances
INST = 50
def trainDualTask(noise, gng, inst, lamb, delay, neuron):
'''Train an RNN with a given noise and compute the value of the accuracy
of its predictions'''
# Hyperparameters for AdaptiveLearningRate
alr_hps = {'initial_rate': 0.1}
# Hyperparameters for DualTask
# See DualTask.py for detailed descriptions.
hps = {
'rnn_type': 'vanilla',
'n_hidden': neuron,
'min_loss': 1e-6, # 1e-4
'min_learning_rate': 1e-5,
'max_n_epochs': 5000,
'do_restart_run': False,
'log_dir': './logs_' + str(gng) + '/lamb' + str(lamb) + '/noise' +
str(noise) + '/delay' + str(delay) + '/neurons' +
str(neuron) + '/inst' + str(inst),
'data_hps': {
'n_batch': 2048,
'n_time': 20,
'n_bits': 6,
'noise': noise,
'gng_time': gng,
'lamb': lamb,
'delay_max': delay_max},
'alr_hps': alr_hps
}
# Create DualTask object
dt = DualTask(**hps)
# Train the RNN instance for the specific noise
dt.train()
# Get inputs and outputs from example trials
example_trials = dt.generate_dualtask_trials()
is_lstm = dt.hps.rnn_type == 'lstm'
# Compute RNN predictions from example trials
example_predictions = dt.predict(example_trials,
do_predict_full_LSTM_state=is_lstm)
# Accuracy of the predictions
acc_dpa = example_predictions['ev_acc_dpa']
acc_gng = example_predictions['ev_acc_gng']
state = example_predictions['state']
acc_dpa_dual = example_predictions['ev_acc_dpa_dual']
acc_gng_dual = example_predictions['ev_acc_gng_dual']
acc_dpa_dpa = example_predictions['ev_acc_dpa_dpa']
acc_gng_gng = example_predictions['ev_acc_gng_gng']
pred_out = example_predictions['output']
inputs = example_trials['inputs']
outputs = example_trials['output']
stim_conf = example_trials['stim_conf']
vec_tau = example_trials['vec_tau']
vec_acc_dual = example_predictions['vec_acc_dpa_dual']
vec_acc_dpa = example_predictions['vec_acc_dpa_dpa']
if gng == -1:
task_type = example_trials['task_choice']
else:
task_type = 0
stim_conf = example_trials['stim_conf']
return [acc_dpa, acc_gng, state, task_type, acc_dpa_dual, acc_gng_dual,
acc_dpa_dpa, acc_gng_gng, vec_acc_dual, vec_acc_dpa,
pred_out, inputs, outputs, stim_conf, vec_tau]
# Condition for which we assign 1 different task of 3 in each trial
if gng_rng == -1:
# Train various RNNs with diferent noise
#for gng in gng_rng:
gng = gng_rng
acc = []
state = []
for l in lamb:
for delay in delay_max:
for noise in noise_rng:
for neuron in num_neurons:
numcores = multiprocessing.cpu_count()
ops = Parallel(n_jobs=numcores)(delayed(
trainDualTask)(noise, gng, inst, l, delay,
neuron) for inst in range(INST))
# Save data in a list
acc_dpa = []
acc_gng = []
task = []
acc_dpa_dual = []
acc_gng_dual = []
acc_dpa_dpa = []
acc_gng_gng = []
inputs = []
output = []
pred_out = []
stim_conf = []
vec_tau = []
vec_acc_dual = []
vec_acc_dpa = []
stim_conf = []
for i in range(INST):
acc_dpa.append(ops[i][0])
acc_gng.append(ops[i][1])
state.append([noise, ops[i][2]])
task.append(ops[i][3])
acc_dpa_dual.append(ops[i][4])
acc_gng_dual.append(ops[i][5])
acc_dpa_dpa.append(ops[i][6])
acc_gng_gng.append(ops[i][7])
vec_acc_dual.append(ops[i][8])
vec_acc_dpa.append(ops[i][9])
pred_out.append(ops[i][10])
output.append(ops[i][12])
inputs.append(ops[i][11])
stim_conf.append(ops[i][13])
vec_tau.append(ops[i][14])
acc.append([noise, acc_dpa, acc_gng, acc_dpa_dual,
acc_gng_dual, acc_dpa_dpa, acc_gng_gng,
vec_acc_dual, vec_acc_dpa])
# save data and figure
data = {'acc': acc, 'state': state, 'task': task, 'inputs': inputs,
'output': output, 'pred_out': pred_out,
'stim_conf': stim_conf, 'vec_tau': vec_tau}
fig_dir = os.path.join(PATH, 'data_trainedwithnoise')
if os.path.isdir(fig_dir) is False:
os.mkdir(fig_dir)
np.savez(os.path.join(fig_dir, 'data_' + str(gng) + '_'
+ str(l) + '_' + str(delay)
+ '_i' + str(INST) + '_n' + str(noise_rng[0])
+ '-' + str(noise_rng[-1])
+ '_neu' + str(num_neurons[0])
+ '-' + str(num_neurons[-1])), **data)
else:
np.savez(os.path.join(fig_dir, 'data_' + str(gng) + '_'
+ str(l) + '_' + str(delay)
+ '_i' + str(INST) + '_n' + str(noise_rng[0])
+ '-' + str(noise_rng[-1])
+ '_neu' + str(num_neurons[0])
+ '-' + str(num_neurons[-1])), **data)
# Runs DPA + GNG task
else:
# Train various RNNs with diferent noise
for gng in gng_rng:
for l in lamb:
acc = []
state = []
for delay in delay_max:
for noise in noise_rng:
for neuron in num_neurons:
numcores = multiprocessing.cpu_count()
ops = Parallel(n_jobs=numcores)(delayed(
trainDualTask)(noise, gng, inst, l, delay,
neuron) for inst in range(INST))
# Save data in a list
NOISE = np.repeat(l, INST)
acc_dpa = []
acc_gng = []
task_type = []
for i in range(INST):
acc_dpa.append(ops[i][0])
acc_gng.append(ops[i][1])
state.append([noise, ops[i][2]])
acc.append([noise, acc_dpa, acc_gng])
# save data and figure
data = {'acc': acc, 'state': state}
fig_dir = os.path.join(PATH, 'data_trainedwithnoise')
if os.path.isdir(fig_dir) is False:
os.mkdir(fig_dir)
np.savez(os.path.join(fig_dir, 'data_' + str(gng) + '_'
+ str(l) + '_' + str(delay)
+ '_i' + str(INST) + '_n' + str(noise_rng[0])
+ '-' + str(noise_rng[-1])
+ '_neu' + str(num_neurons[0])
+ '-' + str(num_neurons[-1])), **data)
else:
np.savez(os.path.join(fig_dir, 'data_' + str(gng) + '_'
+ str(l) + '_' + str(delay)
+ '_i' + str(INST) + '_n' + str(noise_rng[0])
+ '-' + str(noise_rng[-1])
+ '_neu' + str(num_neurons[0])
+ '-' + str(num_neurons[-1])), **data)
``` |
{
"source": "JoanPuig/PyFIT",
"score": 2
} |
#### File: PyFIT/examples/example_decode_fit_file.py
```python
from FIT.decoder import Decoder
def main():
# This sample code shows how to decode a FIT file into a File object
# A File object is a low level representation of the FIT file
# Modify to fit your directory setup
file_name = './data/FIT/MY_ACTIVITY_FILE.fit'
file = Decoder.decode_fit_file(file_name)
print(file)
if __name__ == "__main__":
main()
```
#### File: PyFIT/FIT/decoder.py
```python
import importlib
import warnings
from typing import Dict, Union, Optional, Tuple, Any
import sys
from FIT.base_types import UnsignedInt8, UnsignedInt16, UnsignedInt32, UnsignedInt64, BASE_TYPE_NUMBER_TO_CLASS
from FIT.model import MessageDefinition, File, FileHeader, Record, NormalRecordHeader, CompressedTimestampRecordHeader, FieldDefinition, Architecture, RecordField, MessageContent, Message, UndocumentedMessage, ManufacturerSpecificMessage, \
UndocumentedMessageField, DeveloperMessageField
import numpy as np
class FITFileContentError(Exception):
pass
class FITFileContentWarning(Warning):
pass
class UnsupportedFITFeature(Exception):
pass
class FITGeneratedCodeNotFoundError(Exception):
pass
class CRCCalculator:
CRC_TABLE = [
UnsignedInt16(0x0000),
UnsignedInt16(0xCC01),
UnsignedInt16(0xD801),
UnsignedInt16(0x1400),
UnsignedInt16(0xF001),
UnsignedInt16(0x3C00),
UnsignedInt16(0x2800),
UnsignedInt16(0xE401),
UnsignedInt16(0xA001),
UnsignedInt16(0x6C00),
UnsignedInt16(0x7800),
UnsignedInt16(0xB401),
UnsignedInt16(0x5000),
UnsignedInt16(0x9C01),
UnsignedInt16(0x8801),
UnsignedInt16(0x4400),
]
x000F = UnsignedInt16(0x000F)
x0FFF = UnsignedInt16(0x0FFF)
def __init__(self):
self.current = UnsignedInt16(0)
def reset(self) -> None:
self.current = UnsignedInt16(0)
def new_byte(self, byte) -> None:
crc = self.current
tmp = CRCCalculator.CRC_TABLE[crc & CRCCalculator.x000F] # tmp = crc_table[crc & 0xF];
crc = (crc >> 4) & CRCCalculator.x0FFF # crc = (crc >> 4) & 0x0FFF;
crc = crc ^ tmp ^ CRCCalculator.CRC_TABLE[byte & CRCCalculator.x000F] # crc = crc ^ tmp ^ crc_table[byte & 0xF];
tmp = CRCCalculator.CRC_TABLE[crc & CRCCalculator.x000F] # tmp = crc_table[crc & 0xF];
crc = (crc >> 4) & CRCCalculator.x0FFF # crc = (crc >> 4) & 0x0FFF;
crc = crc ^ tmp ^ CRCCalculator.CRC_TABLE[(byte >> 4) & CRCCalculator.x000F] # crc = crc ^ tmp ^ crc_table[(byte >> 4) & 0xF];
self.current = crc
class ByteReader:
bytes_read: int
crc_calculator: CRCCalculator
raw_bytes: Union[bytearray, bytes]
def __init__(self, raw_bytes: bytes):
self.bytes_read = 0
self.raw_bytes = raw_bytes
self.crc_calculator = CRCCalculator()
def read_byte(self) -> UnsignedInt8:
if self.bytes_left() == 0:
raise FITFileContentError('Unexpected end of file encountered')
byte = UnsignedInt8(self.raw_bytes[self.bytes_read])
self.bytes_read = self.bytes_read + 1
self.crc_calculator.new_byte(byte)
return byte
def read_double_byte(self) -> UnsignedInt16:
return UnsignedInt16(np.array([self.read_byte() for _ in range(0, 2)]).view(UnsignedInt16)[0])
def read_quad_byte(self) -> UnsignedInt32:
return UnsignedInt32(np.array([self.read_byte() for _ in range(0, 4)]).view(UnsignedInt32)[0])
def read_octo_byte(self) -> UnsignedInt32:
return UnsignedInt64(np.array([self.read_byte() for _ in range(0, 8)]).view(UnsignedInt64)[0])
def read_bytes(self, count: int) -> bytes:
return bytes([self.read_byte() for _ in range(0, count)])
def bytes_left(self):
return len(self.raw_bytes) - self.bytes_read
class Decoder:
TIMESTAMP_FIELD_NUMBER = 253
MESSAGE_INDEX_FIELD_NUMBER = 254
PART_INDEX_FIELD_NUMBER = 250
IS_COMPRESSED_TIMESTAMP_HEADER_POSITION = 8 - 1
IS_DEFINITION_MESSAGE_POSITION = 7 - 1
HAS_DEVELOPER_DATA_POSITION = 6 - 1
RESERVED_BIT_POSITION = 5 - 1
reader: ByteReader
most_recent_timestamp: Optional[UnsignedInt32]
message_definitions: Dict[int, MessageDefinition]
def __init__(self, reader: ByteReader):
self.reader = reader
self.message_definitions = {}
self.most_recent_timestamp = None
def decode_file(self) -> File:
header = self.decode_file_header()
records = self.decode_records(header.data_size)
crc = self.decode_crc(False)
return File(header, records, crc)
def decode_file_header(self) -> FileHeader:
header_size = self.reader.read_byte()
protocol_version = self.reader.read_byte()
profile_version = self.reader.read_double_byte()
data_size = self.reader.read_quad_byte()
data_type = ''.join([chr(self.reader.read_byte()) for _ in range(0, 4)])
if header_size not in [12, 14]:
raise FITFileContentError(f'Invalid header size, Expected: 12 or 14, read: {header_size}')
if data_type != '.FIT':
raise FITFileContentError(f'Invalid header text. Expected: ".FIT", read: "{data_type}"')
if header_size == 14:
crc = self.decode_crc(True)
else:
crc = None
return FileHeader(header_size, protocol_version, profile_version, data_size, data_type, crc)
def decode_records(self, data_size: UnsignedInt32) -> Tuple[Record]:
initial_bytes_read = self.reader.bytes_read
records = []
while self.reader.bytes_read - initial_bytes_read < data_size:
records.append(self.decode_record())
return tuple(records)
def decode_record(self) -> Record:
header_byte = self.reader.read_byte()
is_compressed_timestamp_header = Decoder.bit_get(header_byte, Decoder.IS_COMPRESSED_TIMESTAMP_HEADER_POSITION)
if is_compressed_timestamp_header:
header = self.decode_compressed_timestamp_record_header(header_byte)
else:
header = self.decode_normal_record_header(header_byte)
if header.is_definition_message:
content = self.decode_message_definition(header)
else:
content = self.decode_message_content(header)
return Record(header, content)
def decode_normal_record_header(self, header: UnsignedInt8) -> NormalRecordHeader:
is_definition_message = Decoder.bit_get(header, Decoder.IS_DEFINITION_MESSAGE_POSITION)
has_developer_data = Decoder.bit_get(header, Decoder.HAS_DEVELOPER_DATA_POSITION)
reserved_bit = Decoder.bit_get(header, Decoder.RESERVED_BIT_POSITION)
if reserved_bit:
raise FITFileContentError('Reserved bit on record header is 1, expected 0')
local_message_type = header & UnsignedInt8(15) # 1st to 4th bits
return NormalRecordHeader(True, is_definition_message, has_developer_data, local_message_type)
def decode_compressed_timestamp_record_header(self, header_byte: UnsignedInt8) -> CompressedTimestampRecordHeader:
local_message_type = (header_byte >> 5) & 0x3 # 5th to 7th bits
time_offset = header_byte & 0x1F # 1st to 4th bits
return CompressedTimestampRecordHeader(False, False, False, local_message_type, time_offset, self.most_recent_timestamp)
def decode_field_definition(self) -> FieldDefinition:
number = self.reader.read_byte()
size = self.reader.read_byte()
type_byte = self.reader.read_byte()
endian_ability = Decoder.bit_get(type_byte, 8 - 1)
base_type = type_byte & UnsignedInt8(31) # 1st to 5th bits
reserved_bits = type_byte & UnsignedInt8(96) # 6th to 7th bits
if reserved_bits:
raise FITFileContentError(f'Invalid FieldDefinition reserved bits, expected 0, read {reserved_bits}')
return FieldDefinition(number, size, endian_ability, base_type)
def decode_message_definition(self, header: NormalRecordHeader) -> MessageDefinition:
reserved_byte = self.reader.read_byte()
if reserved_byte:
raise FITFileContentError('Reserved byte after record header is not 0')
architecture = Architecture(self.reader.read_byte())
global_message_number = UnsignedInt16.from_bytes(self.reader.read_bytes(2))
number_of_fields = self.reader.read_byte()
field_definitions = tuple([self.decode_field_definition() for _ in range(0, number_of_fields)])
number_of_developer_fields = self.reader.read_byte() if header.has_developer_data else 0
developer_field_definitions = tuple([self.decode_field_definition() for _ in range(0, number_of_developer_fields)])
definition = MessageDefinition(reserved_byte, architecture, global_message_number, field_definitions, developer_field_definitions)
self.message_definitions[header.local_message_type] = definition
return definition
def decode_field(self, field_definition: FieldDefinition) -> RecordField:
raw_bytes = self.reader.read_bytes(field_definition.size) # TODO endianness
type_class = BASE_TYPE_NUMBER_TO_CLASS[field_definition.base_type]
decoded_value = type_class.from_bytes(raw_bytes)
if field_definition.number == Decoder.MESSAGE_INDEX_FIELD_NUMBER:
if field_definition.base_type != UnsignedInt16.metadata().base_type_number:
raise FITFileContentError('Message Index field number {} is expected to be of type {}, {} found', Decoder.MESSAGE_INDEX_FIELD_NUMBER, UnsignedInt16.__name__, type_class.__name__)
if field_definition.number == Decoder.PART_INDEX_FIELD_NUMBER:
if field_definition.base_type != UnsignedInt32.metadata().base_type_number:
raise FITFileContentError('Part Index field number {} is expected to be of type {}, {} found', Decoder.MESSAGE_INDEX_FIELD_NUMBER, UnsignedInt32.__name__, type_class.__name__)
if field_definition.number == Decoder.TIMESTAMP_FIELD_NUMBER:
if field_definition.base_type != UnsignedInt32.metadata().base_type_number:
raise FITFileContentError('Timestamp field number {} is expected to be of type {}, {} found', Decoder.TIMESTAMP_FIELD_NUMBER, UnsignedInt32.__name__, type_class.__name__)
self.most_recent_timestamp = decoded_value
return RecordField(decoded_value)
def decode_message_content(self, header: NormalRecordHeader) -> MessageContent:
message_definition = self.message_definitions.get(header.local_message_type)
if not message_definition:
raise FITFileContentError(f'Unable to find local message type definition {header.local_message_type}')
fields = tuple([self.decode_field(field_definition) for field_definition in message_definition.field_definitions])
developer_fields = tuple([self.decode_field(developer_field_definition) for developer_field_definition in message_definition.developer_field_definitions])
return MessageContent(fields, developer_fields)
def decode_crc(self, allow_zero) -> UnsignedInt16:
computed_crc = self.reader.crc_calculator.current
expected_crc = self.reader.read_double_byte()
self.reader.crc_calculator.reset()
if allow_zero and expected_crc == UnsignedInt16(0):
return expected_crc
if computed_crc != expected_crc:
raise FITFileContentError(f'Invalid CRC. Expected: {expected_crc}, computed: {computed_crc}')
return expected_crc
@staticmethod
def bit_get(byte: UnsignedInt8, position: int) -> bool:
return byte & (1 << position) > 0
@staticmethod
def decode_fit_file(file_name: str) -> File:
# Reads the binary data of the .FIT file
file_bytes = open(file_name, "rb").read()
# Constructs a ByteReader and Decoder object
byte_reader = ByteReader(file_bytes)
decoder = Decoder(byte_reader)
# Decodes the file
return decoder.decode_file()
@staticmethod
def decode_fit_messages(file_name: str, error_on_undocumented_message: bool = False, error_on_undocumented_field: bool = False, error_on_invalid_enum_value: bool = False) -> Tuple[Message]:
# Reads the FIT file
file = Decoder.decode_fit_file(file_name)
try:
from FIT.types import MesgNum
except ModuleNotFoundError:
raise FITGeneratedCodeNotFoundError('Unable to load FIT.types, make sure you have generated the code first')
messages = []
definitions = {}
warned_undocumented_msg_num = []
warned_manufacturer_specific_messages = []
warned_undocumented_fields = []
for record in file.records:
if isinstance(record.content, MessageDefinition):
definitions[record.header.local_message_type] = record.content
global_message_number = record.content.global_message_number
if global_message_number not in MesgNum._value2member_map_:
is_manufacturer_specific = MesgNum.MfgRangeMin.value <= global_message_number <= MesgNum.MfgRangeMax.value
if is_manufacturer_specific:
warning_message = f'DefinitionMessage references MesgNum {global_message_number} which is manufacturer specific'
if warning_message not in warned_manufacturer_specific_messages:
warnings.warn(warning_message, FITFileContentWarning)
warned_manufacturer_specific_messages.append(warning_message)
else:
error_message = f'DefinitionMessage references MesgNum {global_message_number} which is not documented'
if error_on_undocumented_message:
raise FITFileContentError(error_message)
else:
if error_message not in warned_undocumented_msg_num:
warnings.warn(error_message, FITFileContentWarning)
warned_undocumented_msg_num.append(error_message)
elif isinstance(record.content, MessageContent):
local_message_type = record.header.local_message_type
if local_message_type not in definitions:
raise FITFileContentError(f'Local message type {local_message_type} has not been previously defined')
message_definition = definitions[local_message_type]
is_manufacturer_specific = MesgNum.MfgRangeMin.value <= message_definition.global_message_number <= MesgNum.MfgRangeMax.value
if is_manufacturer_specific:
message_class = ManufacturerSpecificMessage # TODO custom manufacturer specific messages
class_name = ManufacturerSpecificMessage.__name__
else:
if message_definition.global_message_number in MesgNum._value2member_map_:
global_message_number = MesgNum(message_definition.global_message_number)
mod = importlib.import_module('FIT.messages')
message_class = getattr(mod, global_message_number.name)
class_name = global_message_number.name
else:
message_class = UndocumentedMessage
class_name = UndocumentedMessage.__name__
developer_fields = Decoder.extract_developer_fields(record, message_definition, error_on_invalid_enum_value)
expected_field_numbers = message_class.expected_field_numbers()
undocumented_fields = Decoder.extract_undocumented_fields(record.content, message_definition, expected_field_numbers, error_on_invalid_enum_value)
fields = Decoder.extract_fields(record.content, message_definition, expected_field_numbers)
message = message_class.from_extracted_fields(fields, developer_fields, undocumented_fields, error_on_invalid_enum_value)
for undocumented_field in message.undocumented_fields:
error_message = f'{class_name} message has undocumented field number {undocumented_field.definition.number}'
if error_on_undocumented_field:
raise FITFileContentError(error_message)
else:
if error_message not in warned_undocumented_fields:
warnings.warn(error_message, FITFileContentWarning)
warned_undocumented_fields.append(error_message)
messages.append(message)
else:
raise FITFileContentError(f'Unexpected record type: {type(record)}')
return tuple(messages)
@staticmethod
def extract_developer_fields(record: Record, message_definition: MessageDefinition, error_on_invalid_enum_value: bool = True) -> Tuple[DeveloperMessageField]:
developer_fields = []
for developer_field in record.content.developer_fields:
pass # TODO developer fields from record
return tuple(developer_fields)
@staticmethod
def extract_undocumented_fields(content: MessageContent, definition: MessageDefinition, expected_field_numbers: Tuple[int] = (), error_on_invalid_enum_value: bool = True) -> Tuple[UndocumentedMessageField]:
undocumented = []
for field_id, (field_position, field_definition) in definition.mapped_field_definitions().items():
if field_id not in expected_field_numbers:
undocumented.append(UndocumentedMessageField(field_definition, content.fields[field_position].value))
return tuple(undocumented)
@staticmethod
def extract_fields(content: MessageContent, definition: MessageDefinition, expected_field_numbers: Tuple[int]) -> Dict[UnsignedInt8, Any]:
# TODO compressed timestamp
extracted_fields = {}
field_number_to_index_map = definition.mapped_field_definitions()
for field_number in expected_field_numbers:
if field_number in field_number_to_index_map:
extracted_fields[field_number] = content.fields[field_number_to_index_map[field_number][0]].value
else:
extracted_fields[field_number] = None
return extracted_fields
@staticmethod
def cast_value(value, new_type, error_on_invalid_enum_value: bool):
if value is None:
return None
if isinstance(value, tuple):
return tuple(Decoder.cast_value(v, new_type, error_on_invalid_enum_value) for v in value)
try:
casted = new_type(value)
except ValueError:
if error_on_invalid_enum_value:
raise sys.exc_info()[1]
else:
casted = new_type.Invalid
return casted
```
#### File: PyFIT/FIT/model.py
```python
import functools
from dataclasses import dataclass
from enum import Enum
from typing import Tuple, Dict, Union
from FIT.base_types import BaseType, UnsignedInt8, UnsignedInt16, UnsignedInt32
class Architecture(Enum):
LittleEndian = UnsignedInt8(0)
BigEndian = UnsignedInt8(1)
@dataclass(frozen=True)
class RecordHeader:
is_normal_header: bool
is_definition_message: bool
has_developer_data: bool
local_message_type: UnsignedInt8
@dataclass(frozen=True)
class NormalRecordHeader(RecordHeader):
pass
@dataclass(frozen=True)
class CompressedTimestampRecordHeader(RecordHeader):
time_offset: UnsignedInt8
previous_Timestamp: UnsignedInt32
@dataclass(frozen=True)
class RecordContent:
pass
@dataclass(frozen=True)
class RecordField:
value: BaseType
@dataclass(frozen=True)
class FieldDefinition:
number: UnsignedInt8
size: UnsignedInt8
endian_ability: bool
base_type: UnsignedInt8
@dataclass(frozen=True)
class MessageDefinition(RecordContent):
reserved_byte: UnsignedInt8
architecture: Architecture
global_message_number: UnsignedInt16
field_definitions: Tuple[FieldDefinition]
developer_field_definitions: Tuple[FieldDefinition]
@functools.lru_cache(1)
def mapped_field_definitions(self) -> Dict[UnsignedInt8, Tuple[int, FieldDefinition]]:
return {definition.number: (i, definition) for i, definition in enumerate(self.field_definitions)}
@functools.lru_cache(1)
def mapped_developer_field_definitions(self) -> Dict[UnsignedInt8, Tuple[int, FieldDefinition]]:
return {definition.number: (i, definition) for i, definition in enumerate(self.developer_field_definitions)}
def field_definition(self, number: UnsignedInt8) -> Tuple[int, FieldDefinition]:
return self.mapped_field_definitions()[number]
def developer_field_definition(self, number: UnsignedInt8) -> Tuple[int, FieldDefinition]:
return self.mapped_developer_field_definitions()[number]
@dataclass(frozen=True)
class MessageContent(RecordContent):
fields: Tuple[RecordField]
developer_fields: Tuple[RecordField]
@dataclass(frozen=True)
class Record:
header: RecordHeader
content: RecordContent
@dataclass(frozen=True)
class FileHeader:
header_size: UnsignedInt8
protocol_version: UnsignedInt8
profile_version: UnsignedInt16
data_size: UnsignedInt32
data_type: str
crc: UnsignedInt16
@dataclass(frozen=True)
class File:
header: FileHeader
records: Tuple[Record]
crc: UnsignedInt16
@dataclass(frozen=True)
class DeveloperMessageField:
definition: FieldDefinition
value: BaseType
@dataclass(frozen=True)
class UndocumentedMessageField:
definition: FieldDefinition
value: BaseType
@dataclass(frozen=True)
class Message:
developer_fields: Tuple[DeveloperMessageField]
undocumented_fields: Tuple[UndocumentedMessageField]
def _xstr_(self):
last_fields = ['developer_fields', 'undocumented_fields']
new_last_fields = []
fields = list(self.__dataclass_fields__.keys())
for last_field in last_fields:
if last_field in fields:
fields.remove(last_field)
new_last_fields.append(last_field)
field_strs = []
for k in fields:
field_val = self.__dict__[k]
if field_val is not None:
if isinstance(field_val, tuple):
if len(field_val) == 0:
continue
field_strs.append(k + '=' + str(field_val))
fields_str = ", ".join(field_strs)
if self.undocumented_fields is not None and len(self.undocumented_fields) > 0:
undocumented_field_strs = ''
undocumented_fields_str = f'undocumented_fields=[{", ".join([undocumented_field_str for undocumented_field_str in undocumented_field_strs])}]'
if len(fields_str) >= 0:
fields_str = fields_str + undocumented_fields_str
else:
fields_str = undocumented_fields_str
return f'{type(self).__name__}({fields_str})'
@dataclass(frozen=True)
class ManufacturerSpecificMessage(Message):
@staticmethod
def expected_field_numbers() -> Tuple[int]:
return ()
@staticmethod
def from_extracted_fields(extracted_fields, developer_fields: Tuple[DeveloperMessageField], undocumented_fields: Tuple[UndocumentedMessageField], error_on_invalid_enum_value: bool) -> "ManufacturerSpecificMessage":
return ManufacturerSpecificMessage(developer_fields, undocumented_fields)
@dataclass(frozen=True)
class UndocumentedMessage(Message):
@staticmethod
def expected_field_numbers() -> Tuple[int]:
return ()
@staticmethod
def from_extracted_fields(extracted_fields, developer_fields: Tuple[DeveloperMessageField], undocumented_fields: Tuple[UndocumentedMessageField], error_on_invalid_enum_value: bool) -> "UndocumentedMessage":
return UndocumentedMessage(developer_fields, undocumented_fields)
@dataclass(frozen=True)
class FieldMetadata:
name: str
type: str
#array: str
#components: str
scale: int
offset: int
units: str
#bits: int
#accumulated: Union[str, int]
@dataclass(frozen=True)
class DynamicFieldMetadata(FieldMetadata):
ref_field_name: str
ref_field_value: str
@dataclass(frozen=True)
class NormalFieldMetadata(FieldMetadata):
number: int
@dataclass(frozen=True)
class MessageMetadata:
fields_metadata: Tuple[FieldMetadata]
@functools.lru_cache(1)
def field_numbers(self) -> Tuple[int]:
return tuple([field_metadata.number for field_metadata in self.fields_metadata if isinstance(field_metadata, NormalFieldMetadata)])
@functools.lru_cache(1)
def field_names(self) -> Tuple[str]:
return tuple([field_metadata.name for field_metadata in self.fields_metadata])
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.