_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q278300
|
create_record_and_pid
|
test
|
def create_record_and_pid(data):
"""Create the deposit record metadata and persistent identifier.
:param data: Raw JSON dump of the deposit.
:type data: dict
:returns: A deposit object and its pid
:rtype: (`invenio_records.api.Record`,
`invenio_pidstore.models.PersistentIdentifier`)
"""
from invenio_records.api import Record
from invenio_pidstore.models import PersistentIdentifier, PIDStatus, \
RecordIdentifier
deposit = Record.create(data=data)
created = arrow.get(data['_p']['created']).datetime
deposit.model.created = created.replace(tzinfo=None)
depid = deposit['_p']['id']
pid = PersistentIdentifier.create(
pid_type='depid',
pid_value=str(depid),
object_type='rec',
object_uuid=str(deposit.id),
status=PIDStatus.REGISTERED
)
if RecordIdentifier.query.get(int(depid)) is None:
RecordIdentifier.insert(int(depid))
deposit.commit()
return deposit, pid
|
python
|
{
"resource": ""
}
|
q278301
|
_loadrecord
|
test
|
def _loadrecord(record_dump, source_type, eager=False):
"""Load a single record into the database.
:param record_dump: Record dump.
:type record_dump: dict
:param source_type: 'json' or 'marcxml'
:param eager: If ``True`` execute the task synchronously.
"""
if eager:
import_record.s(record_dump, source_type=source_type).apply(throw=True)
elif current_migrator.records_post_task:
chain(
import_record.s(record_dump, source_type=source_type),
current_migrator.records_post_task.s()
)()
else:
import_record.delay(record_dump, source_type=source_type)
|
python
|
{
"resource": ""
}
|
q278302
|
loadrecords
|
test
|
def loadrecords(sources, source_type, recid):
"""Load records migration dump."""
# Load all record dumps up-front and find the specific JSON
if recid is not None:
for source in sources:
records = json.load(source)
for item in records:
if str(item['recid']) == str(recid):
_loadrecord(item, source_type, eager=True)
click.echo("Record '{recid}' loaded.".format(recid=recid))
return
click.echo("Record '{recid}' not found.".format(recid=recid))
else:
for idx, source in enumerate(sources, 1):
click.echo('Loading dump {0} of {1} ({2})'.format(
idx, len(sources), source.name))
data = json.load(source)
with click.progressbar(data) as records:
for item in records:
_loadrecord(item, source_type)
|
python
|
{
"resource": ""
}
|
q278303
|
inspectrecords
|
test
|
def inspectrecords(sources, recid, entity=None):
"""Inspect records in a migration dump."""
for idx, source in enumerate(sources, 1):
click.echo('Loading dump {0} of {1} ({2})'.format(idx, len(sources),
source.name))
data = json.load(source)
# Just print record identifiers if none are selected.
if not recid:
click.secho('Record identifiers', fg='green')
total = 0
for r in (d['recid'] for d in data):
click.echo(r)
total += 1
click.echo('{0} records found in dump.'.format(total))
return
data = list(filter(lambda d: d['recid'] == recid, data))
if not data:
click.secho("Record not found.", fg='yellow')
return
for record in data:
if entity is None:
click.echo(json.dumps(record, indent=2))
if entity == 'files':
click.secho('Files', fg='green')
click.echo(
json.dumps(record['files'], indent=2))
if entity == 'json':
click.secho('Records (JSON)', fg='green')
for revision in record['record']:
click.secho('Revision {0}'.format(
revision['modification_datetime']), fg='yellow')
click.echo(json.dumps(revision['json'], indent=2))
if entity == 'marcxml':
click.secho('Records (MARCXML)', fg='green')
for revision in record['record']:
click.secho(
'Revision {0}'.format(revision['marcxml']),
fg='yellow')
click.echo(revision)
|
python
|
{
"resource": ""
}
|
q278304
|
loadcommon
|
test
|
def loadcommon(sources, load_task, asynchronous=True, predicate=None,
task_args=None, task_kwargs=None):
"""Common helper function for load simple objects.
.. note::
Keyword arguments ``task_args`` and ``task_kwargs`` are passed to the
``load_task`` function as ``*task_args`` and ``**task_kwargs``.
.. note::
The `predicate` argument is used as a predicate function to load only
a *single* item from across all dumps (this CLI function will return
after loading the item). This is primarily used for debugging of
the *dirty* data within the dump. The `predicate` should be a function
with a signature ``f(dict) -> bool``, i.e. taking a single parameter
(an item from the dump) and return ``True`` if the item
should be loaded. See the ``loaddeposit`` for a concrete example.
:param sources: JSON source files with dumps
:type sources: list of str (filepaths)
:param load_task: Shared task which loads the dump.
:type load_task: function
:param asynchronous: Flag for serial or asynchronous execution of the task.
:type asynchronous: bool
:param predicate: Predicate for selecting only a single item from the dump.
:type predicate: function
:param task_args: positional arguments passed to the task.
:type task_args: tuple
:param task_kwargs: named arguments passed to the task.
:type task_kwargs: dict
"""
# resolve the defaults for task_args and task_kwargs
task_args = tuple() if task_args is None else task_args
task_kwargs = dict() if task_kwargs is None else task_kwargs
click.echo('Loading dumps started.')
for idx, source in enumerate(sources, 1):
click.echo('Opening dump file {0} of {1} ({2})'.format(
idx, len(sources), source.name))
data = json.load(source)
with click.progressbar(data) as data_bar:
for d in data_bar:
# Load a single item from the dump
if predicate is not None:
if predicate(d):
load_task.s(d, *task_args, **task_kwargs).apply(
throw=True)
click.echo("Loaded a single record.")
return
# Load dumps normally
else:
if asynchronous:
load_task.s(d, *task_args, **task_kwargs).apply_async()
else:
load_task.s(d, *task_args, **task_kwargs).apply(
throw=True)
|
python
|
{
"resource": ""
}
|
q278305
|
loadcommunities
|
test
|
def loadcommunities(sources, logos_dir):
"""Load communities."""
from invenio_migrator.tasks.communities import load_community
loadcommon(sources, load_community, task_args=(logos_dir, ))
|
python
|
{
"resource": ""
}
|
q278306
|
loadusers
|
test
|
def loadusers(sources):
"""Load users."""
from .tasks.users import load_user
# Cannot be executed asynchronously due to duplicate emails and usernames
# which can create a racing condition.
loadcommon(sources, load_user, asynchronous=False)
|
python
|
{
"resource": ""
}
|
q278307
|
loaddeposit
|
test
|
def loaddeposit(sources, depid):
"""Load deposit.
Usage:
invenio dumps loaddeposit ~/data/deposit_dump_*.json
invenio dumps loaddeposit -d 12345 ~/data/deposit_dump_*.json
"""
from .tasks.deposit import load_deposit
if depid is not None:
def pred(dep):
return int(dep["_p"]["id"]) == depid
loadcommon(sources, load_deposit, predicate=pred, asynchronous=False)
else:
loadcommon(sources, load_deposit)
|
python
|
{
"resource": ""
}
|
q278308
|
get_profiler_statistics
|
test
|
def get_profiler_statistics(sort="cum_time", count=20, strip_dirs=True):
"""Return profiler statistics.
:param str sort: dictionary key to sort by
:param int|None count: the number of results to return, None returns all results.
:param bool strip_dirs: if True strip the directory, otherwise return the full path
"""
json_stats = []
pstats = yappi.convert2pstats(yappi.get_func_stats())
if strip_dirs:
pstats.strip_dirs()
for func, func_stat in pstats.stats.iteritems():
path, line, func_name = func
cc, num_calls, total_time, cum_time, callers = func_stat
json_stats.append({
"path": path,
"line": line,
"func_name": func_name,
"num_calls": num_calls,
"total_time": total_time,
"total_time_per_call": total_time/num_calls if total_time else 0,
"cum_time": cum_time,
"cum_time_per_call": cum_time/num_calls if cum_time else 0
})
return sorted(json_stats, key=itemgetter(sort), reverse=True)[:count]
|
python
|
{
"resource": ""
}
|
q278309
|
main
|
test
|
def main(port=8888):
"""Run as sample test server."""
import tornado.ioloop
routes = [] + TornadoProfiler().get_routes()
app = tornado.web.Application(routes)
app.listen(port)
tornado.ioloop.IOLoop.current().start()
|
python
|
{
"resource": ""
}
|
q278310
|
CProfileStatsDumpHandler.post
|
test
|
def post(self):
"""Dump current profiler statistics into a file."""
filename = self.get_argument('filename', 'dump.prof')
CProfileWrapper.profiler.dump_stats(filename)
self.finish()
|
python
|
{
"resource": ""
}
|
q278311
|
CProfileStatsHandler.delete
|
test
|
def delete(self):
"""Clear profiler statistics."""
CProfileWrapper.profiler.create_stats()
self.enable()
self.set_status(204)
self.finish()
|
python
|
{
"resource": ""
}
|
q278312
|
CProfileHandler.delete
|
test
|
def delete(self):
"""Stop the profiler."""
CProfileWrapper.profiler.disable()
self.running = False
self.set_status(204)
self.finish()
|
python
|
{
"resource": ""
}
|
q278313
|
CProfileHandler.get
|
test
|
def get(self):
"""Check if the profiler is running."""
self.write({"running": self.running})
self.set_status(200)
self.finish()
|
python
|
{
"resource": ""
}
|
q278314
|
disable_timestamp
|
test
|
def disable_timestamp(method):
"""Disable timestamp update per method."""
@wraps(method)
def wrapper(*args, **kwargs):
result = None
with correct_date():
result = method(*args, **kwargs)
return result
return wrapper
|
python
|
{
"resource": ""
}
|
q278315
|
load_user
|
test
|
def load_user(data):
"""Load user from data dump.
NOTE: This task takes into account the possible duplication of emails and
usernames, hence it should be called synchronously.
In such case of collision it will raise UserEmailExistsError or
UserUsernameExistsError, if email or username are already existing in
the database. Caller of this task should take care to to resolve those
collisions beforehand or after catching an exception.
:param data: Dictionary containing user data.
:type data: dict
"""
from invenio_accounts.models import User
from invenio_userprofiles.api import UserProfile
email = data['email'].strip()
if User.query.filter_by(email=email).count() > 0:
raise UserEmailExistsError(
"User email '{email}' already exists.".format(email=email))
last_login = None
if data['last_login']:
last_login = arrow.get(data['last_login']).datetime
confirmed_at = None
if data['note'] == '1':
confirmed_at = datetime.utcnow()
salt = data['password_salt']
checksum = data['password']
if not checksum:
new_password = None
# Test if password hash is in Modular Crypt Format
elif checksum.startswith('$'):
new_password = checksum
else:
new_password = str.join('$', ['', u'invenio-aes', salt, checksum])
with db.session.begin_nested():
obj = User(
id=data['id'],
password=new_password,
email=email,
confirmed_at=confirmed_at,
last_login_at=last_login,
active=(data['note'] != '0'),
)
db.session.add(obj)
nickname = data['nickname'].strip()
overwritten_username = ('username' in data and 'displayname' in data)
# NOTE: 'username' and 'displayname' will exist in data dump only
# if it was inserted there after dumping. It normally should not come from
# Invenio 1.x or 2.x data dumper script. In such case, those values will
# have precedence over the 'nickname' field.
if nickname or overwritten_username:
p = UserProfile(user=obj)
p.full_name = data.get('full_name', '').strip()
if overwritten_username:
p._username = data['username'].lower()
p._displayname = data['displayname']
elif nickname:
if UserProfile.query.filter(
UserProfile._username == nickname.lower()).count() > 0:
raise UserUsernameExistsError(
"Username '{username}' already exists.".format(
username=nickname))
try:
p.username = nickname
except ValueError:
current_app.logger.warn(
u'Invalid username {0} for user_id {1}'.format(
nickname, data['id']))
p._username = nickname.lower()
p._displayname = nickname
db.session.add(p)
db.session.commit()
|
python
|
{
"resource": ""
}
|
q278316
|
calc_translations_parallel
|
test
|
def calc_translations_parallel(images):
"""Calculate image translations in parallel.
Parameters
----------
images : ImageCollection
Images as instance of ImageCollection.
Returns
-------
2d array, (ty, tx)
ty and tx is translation to previous image in respectively
x or y direction.
"""
w = Parallel(n_jobs=_CPUS)
res = w(delayed(images.translation)(img) for img in images)
# save results to Image object, as Parallel is spawning another process
for i,translation in enumerate(res):
images[i].translation = translation
return np.array(res)
|
python
|
{
"resource": ""
}
|
q278317
|
stitch
|
test
|
def stitch(images):
"""Stitch regular spaced images.
Parameters
----------
images : ImageCollection or list of tuple(path, row, column)
Each image-tuple should contain path, row and column. Row 0,
column 0 is top left image.
Example:
>>> images = [('1.png', 0, 0), ('2.png', 0, 1)]
Returns
-------
tuple (stitched, offset)
Stitched image and registered offset (y, x).
"""
if type(images) != ImageCollection:
images = ImageCollection(images)
calc_translations_parallel(images)
_translation_warn(images)
yoffset, xoffset = images.median_translation()
if xoffset != yoffset:
warn('yoffset != xoffset: %s != %s' % (yoffset, xoffset))
# assume all images have the same shape
y, x = imread(images[0].path).shape
height = y*len(images.rows) + yoffset*(len(images.rows)-1)
width = x*len(images.cols) + xoffset*(len(images.cols)-1)
# last dimension is number of images on top of each other
merged = np.zeros((height, width, 2), dtype=np.int)
for image in images:
r, c = image.row, image.col
mask = _merge_slice(r, c, y, x, yoffset, xoffset)
# last dim is used for averaging the seam
img = _add_ones_dim(imread(image.path))
merged[mask] += img
# average seam, possible improvement: use gradient
merged[..., 0] /= merged[..., 1]
return merged[..., 0].astype(np.uint8), (yoffset, xoffset)
|
python
|
{
"resource": ""
}
|
q278318
|
_add_ones_dim
|
test
|
def _add_ones_dim(arr):
"Adds a dimensions with ones to array."
arr = arr[..., np.newaxis]
return np.concatenate((arr, np.ones_like(arr)), axis=-1)
|
python
|
{
"resource": ""
}
|
q278319
|
RecordDumpLoader.create
|
test
|
def create(cls, dump):
"""Create record based on dump."""
# If 'record' is not present, just create the PID
if not dump.data.get('record'):
try:
PersistentIdentifier.get(pid_type='recid',
pid_value=dump.recid)
except PIDDoesNotExistError:
PersistentIdentifier.create(
'recid', dump.recid,
status=PIDStatus.RESERVED
)
db.session.commit()
return None
dump.prepare_revisions()
dump.prepare_pids()
dump.prepare_files()
# Create or update?
existing_files = []
if dump.record:
existing_files = dump.record.get('_files', [])
record = cls.update_record(revisions=dump.revisions,
created=dump.created,
record=dump.record)
pids = dump.missing_pids
else:
record = cls.create_record(dump)
pids = dump.pids
if pids:
cls.create_pids(record.id, pids)
if dump.files:
cls.create_files(record, dump.files, existing_files)
# Update files.
if dump.is_deleted(record):
cls.delete_record(record)
return record
|
python
|
{
"resource": ""
}
|
q278320
|
RecordDumpLoader.create_record
|
test
|
def create_record(cls, dump):
"""Create a new record from dump."""
# Reserve record identifier, create record and recid pid in one
# operation.
timestamp, data = dump.latest
record = Record.create(data)
record.model.created = dump.created.replace(tzinfo=None)
record.model.updated = timestamp.replace(tzinfo=None)
RecordIdentifier.insert(dump.recid)
PersistentIdentifier.create(
pid_type='recid',
pid_value=str(dump.recid),
object_type='rec',
object_uuid=str(record.id),
status=PIDStatus.REGISTERED
)
db.session.commit()
return cls.update_record(revisions=dump.rest, record=record,
created=dump.created)
|
python
|
{
"resource": ""
}
|
q278321
|
RecordDumpLoader.update_record
|
test
|
def update_record(cls, revisions, created, record):
"""Update an existing record."""
for timestamp, revision in revisions:
record.model.json = revision
record.model.created = created.replace(tzinfo=None)
record.model.updated = timestamp.replace(tzinfo=None)
db.session.commit()
return Record(record.model.json, model=record.model)
|
python
|
{
"resource": ""
}
|
q278322
|
RecordDumpLoader.create_pids
|
test
|
def create_pids(cls, record_uuid, pids):
"""Create persistent identifiers."""
for p in pids:
PersistentIdentifier.create(
pid_type=p.pid_type,
pid_value=p.pid_value,
pid_provider=p.provider.pid_provider if p.provider else None,
object_type='rec',
object_uuid=record_uuid,
status=PIDStatus.REGISTERED,
)
db.session.commit()
|
python
|
{
"resource": ""
}
|
q278323
|
RecordDumpLoader.delete_record
|
test
|
def delete_record(cls, record):
"""Delete a record and it's persistent identifiers."""
record.delete()
PersistentIdentifier.query.filter_by(
object_type='rec', object_uuid=record.id,
).update({PersistentIdentifier.status: PIDStatus.DELETED})
cls.delete_buckets(record)
db.session.commit()
|
python
|
{
"resource": ""
}
|
q278324
|
RecordDumpLoader.create_files
|
test
|
def create_files(cls, record, files, existing_files):
"""Create files.
This method is currently limited to a single bucket per record.
"""
default_bucket = None
# Look for bucket id in existing files.
for f in existing_files:
if 'bucket' in f:
default_bucket = f['bucket']
break
# Create a bucket in default location if none is found.
if default_bucket is None:
b = Bucket.create()
BucketTag.create(b, 'record', str(record.id))
default_bucket = str(b.id)
db.session.commit()
else:
b = Bucket.get(default_bucket)
record['_files'] = []
for key, meta in files.items():
obj = cls.create_file(b, key, meta)
ext = splitext(obj.key)[1].lower()
if ext.startswith('.'):
ext = ext[1:]
record['_files'].append(dict(
bucket=str(obj.bucket.id),
key=obj.key,
version_id=str(obj.version_id),
size=obj.file.size,
checksum=obj.file.checksum,
type=ext,
))
db.session.add(
RecordsBuckets(record_id=record.id, bucket_id=b.id)
)
record.commit()
db.session.commit()
return [b]
|
python
|
{
"resource": ""
}
|
q278325
|
RecordDumpLoader.create_file
|
test
|
def create_file(self, bucket, key, file_versions):
"""Create a single file with all versions."""
objs = []
for file_ver in file_versions:
f = FileInstance.create().set_uri(
file_ver['full_path'],
file_ver['size'],
'md5:{0}'.format(file_ver['checksum']),
)
obj = ObjectVersion.create(bucket, key).set_file(f)
obj.created = arrow.get(
file_ver['creation_date']).datetime.replace(tzinfo=None)
objs.append(obj)
# Set head version
db.session.commit()
return objs[-1]
|
python
|
{
"resource": ""
}
|
q278326
|
RecordDumpLoader.delete_buckets
|
test
|
def delete_buckets(cls, record):
"""Delete the bucket."""
files = record.get('_files', [])
buckets = set()
for f in files:
buckets.add(f.get('bucket'))
for b_id in buckets:
b = Bucket.get(b_id)
b.deleted = True
|
python
|
{
"resource": ""
}
|
q278327
|
RecordDump.missing_pids
|
test
|
def missing_pids(self):
"""Filter persistent identifiers."""
missing = []
for p in self.pids:
try:
PersistentIdentifier.get(p.pid_type, p.pid_value)
except PIDDoesNotExistError:
missing.append(p)
return missing
|
python
|
{
"resource": ""
}
|
q278328
|
RecordDump.prepare_revisions
|
test
|
def prepare_revisions(self):
"""Prepare data."""
# Prepare revisions
self.revisions = []
it = [self.data['record'][0]] if self.latest_only \
else self.data['record']
for i in it:
self.revisions.append(self._prepare_revision(i))
|
python
|
{
"resource": ""
}
|
q278329
|
RecordDump.prepare_files
|
test
|
def prepare_files(self):
"""Get files from data dump."""
# Prepare files
files = {}
for f in self.data['files']:
k = f['full_name']
if k not in files:
files[k] = []
files[k].append(f)
# Sort versions
for k in files.keys():
files[k].sort(key=lambda x: x['version'])
self.files = files
|
python
|
{
"resource": ""
}
|
q278330
|
RecordDump.prepare_pids
|
test
|
def prepare_pids(self):
"""Prepare persistent identifiers."""
self.pids = []
for fetcher in self.pid_fetchers:
val = fetcher(None, self.revisions[-1][1])
if val:
self.pids.append(val)
|
python
|
{
"resource": ""
}
|
q278331
|
RecordDump.is_deleted
|
test
|
def is_deleted(self, record=None):
"""Check if record is deleted."""
record = record or self.revisions[-1][1]
return any(
col == 'deleted'
for col in record.get('collections', [])
)
|
python
|
{
"resource": ""
}
|
q278332
|
load_community
|
test
|
def load_community(data, logos_dir):
"""Load community from data dump.
:param data: Dictionary containing community data.
:type data: dict
:param logos_dir: Path to a local directory with community logos.
:type logos_dir: str
"""
from invenio_communities.models import Community
from invenio_communities.utils import save_and_validate_logo
logo_ext_washed = logo_ext_wash(data['logo_ext'])
c = Community(
id=data['id'],
id_user=data['id_user'],
title=data['title'],
description=data['description'],
page=data['page'],
curation_policy=data['curation_policy'],
last_record_accepted=iso2dt_or_none(data['last_record_accepted']),
logo_ext=logo_ext_washed,
ranking=data['ranking'],
fixed_points=data['fixed_points'],
created=iso2dt(data['created']),
updated=iso2dt(data['last_modified']),
)
logo_path = join(logos_dir, "{0}.{1}".format(c.id, logo_ext_washed))
db.session.add(c)
if isfile(logo_path):
with open(logo_path, 'rb') as fp:
save_and_validate_logo(fp, logo_path, c.id)
db.session.commit()
|
python
|
{
"resource": ""
}
|
q278333
|
load_featured
|
test
|
def load_featured(data):
"""Load community featuring from data dump.
:param data: Dictionary containing community featuring data.
:type data: dict
"""
from invenio_communities.models import FeaturedCommunity
obj = FeaturedCommunity(id=data['id'],
id_community=data['id_community'],
start_date=iso2dt(data['start_date']))
db.session.add(obj)
db.session.commit()
|
python
|
{
"resource": ""
}
|
q278334
|
dump
|
test
|
def dump(thing, query, from_date, file_prefix, chunk_size, limit, thing_flags):
"""Dump data from Invenio legacy."""
init_app_context()
file_prefix = file_prefix if file_prefix else '{0}_dump'.format(thing)
kwargs = dict((f.strip('-').replace('-', '_'), True) for f in thing_flags)
try:
thing_func = collect_things_entry_points()[thing]
except KeyError:
click.Abort(
'{0} is not in the list of available things to migrate: '
'{1}'.format(thing, collect_things_entry_points()))
click.echo("Querying {0}...".format(thing))
count, items = thing_func.get(query, from_date, limit=limit, **kwargs)
progress_i = 0 # Progress bar counter
click.echo("Dumping {0}...".format(thing))
with click.progressbar(length=count) as bar:
for i, chunk_ids in enumerate(grouper(items, chunk_size)):
with open('{0}_{1}.json'.format(file_prefix, i), 'w') as fp:
fp.write("[\n")
for _id in chunk_ids:
try:
json.dump(
thing_func.dump(_id, from_date, **kwargs),
fp,
default=set_serializer
)
fp.write(",")
except Exception as e:
click.secho("Failed dump {0} {1} ({2})".format(
thing, _id, e.message), fg='red')
progress_i += 1
bar.update(progress_i)
# Strip trailing comma.
fp.seek(fp.tell()-1)
fp.write("\n]")
|
python
|
{
"resource": ""
}
|
q278335
|
check
|
test
|
def check(thing):
"""Check data in Invenio legacy."""
init_app_context()
try:
thing_func = collect_things_entry_points()[thing]
except KeyError:
click.Abort(
'{0} is not in the list of available things to migrate: '
'{1}'.format(thing, collect_things_entry_points()))
click.echo("Querying {0}...".format(thing))
count, items = thing_func.get_check()
i = 0
click.echo("Checking {0}...".format(thing))
with click.progressbar(length=count) as bar:
for _id in items:
thing_func.check(_id)
i += 1
bar.update(i)
|
python
|
{
"resource": ""
}
|
q278336
|
BasicWidget.delete
|
test
|
def delete(self):
"""
Deletes resources of this widget that require manual cleanup.
Currently removes all actions, event handlers and the background.
The background itself should automatically remove all vertex lists to avoid visual artifacts.
Note that this method is currently experimental, as it seems to have a memory leak.
"""
# TODO: fix memory leak upon widget deletion
del self.bg.widget
del self.bg
#self.clickable=False
del self._pos
del self._size
self.actions = {}
for e_type,e_handlers in self.peng.eventHandlers.items():
if True or e_type in eh:
to_del = []
for e_handler in e_handlers:
# Weird workaround due to implementation details of WeakMethod
if isinstance(e_handler,weakref.ref):
if super(weakref.WeakMethod,e_handler).__call__() is self:
to_del.append(e_handler)
elif e_handler is self:
to_del.append(e_handler)
for d in to_del:
try:
#print("Deleting handler %s of type %s"%(d,e_type))
del e_handlers[e_handlers.index(d)]
except Exception:
#print("Could not delete handler %s, memory leak may occur"%d)
import traceback;traceback.print_exc()
|
python
|
{
"resource": ""
}
|
q278337
|
v_magnitude
|
test
|
def v_magnitude(v):
"""
Simple vector helper function returning the length of a vector.
``v`` may be any vector, with any number of dimensions
"""
return math.sqrt(sum(v[i]*v[i] for i in range(len(v))))
|
python
|
{
"resource": ""
}
|
q278338
|
v_normalize
|
test
|
def v_normalize(v):
"""
Normalizes the given vector.
The vector given may have any number of dimensions.
"""
vmag = v_magnitude(v)
return [ v[i]/vmag for i in range(len(v)) ]
|
python
|
{
"resource": ""
}
|
q278339
|
Material.transformTexCoords
|
test
|
def transformTexCoords(self,data,texcoords,dims=2):
"""
Transforms the given texture coordinates using the internal texture coordinates.
Currently, the dimensionality of the input texture coordinates must always be 2 and the output is 3-dimensional with the last coordinate always being zero.
The given texture coordinates are fitted to the internal texture coordinates. Note that values higher than 1 or lower than 0 may result in unexpected visual glitches.
The length of the given texture coordinates should be divisible by the dimensionality.
"""
assert dims==2 # TODO
out = []
origcoords = self.tex_coords
min_u,min_v = origcoords[0],origcoords[1]
max_u,max_v = origcoords[6],origcoords[7]
diff_u,diff_v = max_u-min_u, max_v-min_v
itexcoords = iter(texcoords)
for u,v in zip(itexcoords,itexcoords): # Based on http://stackoverflow.com/a/5389547/3490549
out_u = min_u+(diff_u*u)
out_v = min_v+(diff_v*v)
out.extend((out_u,out_v,0))
return out
|
python
|
{
"resource": ""
}
|
q278340
|
Bone.ensureBones
|
test
|
def ensureBones(self,data):
"""
Helper method ensuring per-entity bone data has been properly initialized.
Should be called at the start of every method accessing per-entity data.
``data`` is the entity to check in dictionary form.
"""
if "_bones" not in data:
data["_bones"]={}
if self.name not in data["_bones"]:
data["_bones"][self.name]={"rot":self.start_rot[:],"length":self.blength}
|
python
|
{
"resource": ""
}
|
q278341
|
Bone.setLength
|
test
|
def setLength(self,data,blength):
"""
Sets the length of this bone on the given entity.
``data`` is the entity to modify in dictionary form.
``blength`` is the new length of the bone.
"""
self.ensureBones(data)
data["_bones"][self.name]["length"]=blength
|
python
|
{
"resource": ""
}
|
q278342
|
Bone.setParent
|
test
|
def setParent(self,parent):
"""
Sets the parent of this bone for all entities.
Note that this method must be called before many other methods to ensure internal state has been initialized.
This method also registers this bone as a child of its parent.
"""
self.parent = parent
self.parent.child_bones[self.name]=self
|
python
|
{
"resource": ""
}
|
q278343
|
Bone.getPivotPoint
|
test
|
def getPivotPoint(self,data):
"""
Returns the point this bone pivots around on the given entity.
This method works recursively by calling its parent and then adding its own offset.
The resulting coordinate is relative to the entity, not the world.
"""
ppos = self.parent.getPivotPoint(data)
rot = self.parent.getRot(data)
length = self.parent.getLength(data)
out = calcSphereCoordinates(ppos,length,rot)
return out
|
python
|
{
"resource": ""
}
|
q278344
|
Animation.startAnimation
|
test
|
def startAnimation(self,data,jumptype):
"""
Callback that is called to initialize this animation on a specific actor.
Internally sets the ``_anidata`` key of the given dict ``data``\ .
``jumptype`` is either ``jump`` or ``animate`` to define how to switch to this animation.
"""
data["_anidata"]={}
adata = data["_anidata"]
adata["keyframe"]=0
adata["last_tick"]=time.time()
adata["jumptype"]=jumptype
adata["phase"]="transition"
|
python
|
{
"resource": ""
}
|
q278345
|
JSONModelGroup.set_state
|
test
|
def set_state(self):
"""
Sets the state required for this actor.
Currently translates the matrix to the position of the actor.
"""
x,y,z = self.obj.pos
glTranslatef(x,y,z)
|
python
|
{
"resource": ""
}
|
q278346
|
JSONModelGroup.unset_state
|
test
|
def unset_state(self):
"""
Resets the state required for this actor to the default state.
Currently resets the matrix to its previous translation.
"""
x,y,z = self.obj.pos
glTranslatef(-x,-y,-z)
|
python
|
{
"resource": ""
}
|
q278347
|
JSONRegionGroup.set_state
|
test
|
def set_state(self):
"""
Sets the state required for this vertex region.
Currently binds and enables the texture of the material of the region.
"""
glEnable(self.region.material.target)
glBindTexture(self.region.material.target, self.region.material.id)
self.region.bone.setRotate(self.data)
|
python
|
{
"resource": ""
}
|
q278348
|
JSONRegionGroup.unset_state
|
test
|
def unset_state(self):
"""
Resets the state required for this actor to the default state.
Currently only disables the target of the texture of the material, it may still be bound.
"""
glDisable(self.region.material.target)
self.region.bone.unsetRotate(self.data)
|
python
|
{
"resource": ""
}
|
q278349
|
Model.ensureModelData
|
test
|
def ensureModelData(self,obj):
"""
Ensures that the given ``obj`` has been initialized to be used with this model.
If the object is found to not be initialized, it will be initialized.
"""
if not hasattr(obj,"_modeldata"):
self.create(obj,cache=True)
if "_modelcache" not in obj._modeldata:
# Assume all initialization is missing, simply reinitialize
self.create(obj,cache=True)
|
python
|
{
"resource": ""
}
|
q278350
|
Model.redraw
|
test
|
def redraw(self,obj):
"""
Redraws the model of the given object.
Note that currently this method probably won't change any data since all movement and animation is done through pyglet groups.
"""
self.ensureModelData(obj)
data = obj._modeldata
vlists = data["_modelcache"]["vlists"]
for name,region in self.modeldata["regions"].items():
vlists[name].vertices = region.getVertices(data)
if region.enable_tex:
vlists[name].tex_coords = region.getTexCoords(data)
|
python
|
{
"resource": ""
}
|
q278351
|
Model.draw
|
test
|
def draw(self,obj):
"""
Actually draws the model of the given object to the render target.
Note that if the batch used for this object already existed, drawing will be skipped as the batch should be drawn by the owner of it.
"""
self.ensureModelData(obj)
data = obj._modeldata
if data.get("_manual_render",False):
obj.batch3d.draw()
|
python
|
{
"resource": ""
}
|
q278352
|
Actor.setModel
|
test
|
def setModel(self,model):
"""
Sets the model this actor should use when drawing.
This method also automatically initializes the new model and removes the old, if any.
"""
if self.model is not None:
self.model.cleanup(self)
self.model = model
model.create(self)
|
python
|
{
"resource": ""
}
|
q278353
|
XunitDestination.write_reports
|
test
|
def write_reports(self, relative_path, suite_name, reports,
package_name=None):
"""write the collection of reports to the given path"""
dest_path = self.reserve_file(relative_path)
with open(dest_path, 'wb') as outf:
outf.write(toxml(reports, suite_name, package_name=package_name))
return dest_path
|
python
|
{
"resource": ""
}
|
q278354
|
toxml
|
test
|
def toxml(test_reports, suite_name,
hostname=gethostname(), package_name="tests"):
"""convert test reports into an xml file"""
testsuites = et.Element("testsuites")
testsuite = et.SubElement(testsuites, "testsuite")
test_count = len(test_reports)
if test_count < 1:
raise ValueError('there must be at least one test report')
assert test_count > 0, 'expecting at least one test'
error_count = len([r for r in test_reports if r.errors])
failure_count = len([r for r in test_reports if r.failures])
ts = test_reports[0].start_ts
start_timestamp = datetime.fromtimestamp(ts).isoformat()
total_duration = test_reports[-1].end_ts - test_reports[0].start_ts
def quote_attribute(value):
return value if value is not None else "(null)"
testsuite.attrib = dict(
id="0",
errors=str(error_count),
failures=str(failure_count),
tests=str(test_count),
hostname=quote_attribute(hostname),
timestamp=quote_attribute(start_timestamp),
time="%f" % total_duration,
name=quote_attribute(suite_name),
package=quote_attribute(package_name),
)
for r in test_reports:
test_name = r.name
test_duration = r.end_ts - r.start_ts
class_name = r.src_location
testcase = et.SubElement(testsuite, "testcase")
testcase.attrib = dict(
name=test_name,
classname=quote_attribute(class_name),
time="%f" % test_duration,
)
if r.errors or r.failures:
if r.failures:
failure = et.SubElement(testcase, "failure")
failure.attrib = dict(
type="exception",
message=quote_attribute('\n'.join(['%s' % e for e in r.failures])),
)
else:
error = et.SubElement(testcase, "error")
error.attrib = dict(
type="exception",
message=quote_attribute('\n'.join(['%s' % e for e in r.errors])),
)
return et.tostring(testsuites, encoding="utf-8")
|
python
|
{
"resource": ""
}
|
q278355
|
PengWindow.addMenu
|
test
|
def addMenu(self,menu):
"""
Adds a menu to the list of menus.
"""
# If there is no menu selected currently, this menu will automatically be made active.
# Add the line above to the docstring if fixed
self.menus[menu.name]=menu
self.peng.sendEvent("peng3d:window.menu.add",{"peng":self.peng,"window":self,"menu":menu})
|
python
|
{
"resource": ""
}
|
q278356
|
Label.redraw_label
|
test
|
def redraw_label(self):
"""
Re-draws the text by calculating its position.
Currently, the text will always be centered on the position of the label.
"""
# Convenience variables
sx,sy = self.size
x,y = self.pos
# Label position
self._label.font_name = self.font_name
self._label.font_size = self.font_size
self._label.font_color = self.font_color
self._label.x = int(x+sx/2.)
self._label.y = int(y+sy/2.)
self._label.width = self.size[0]
self._label.height = self.size[1]
self._label._update()
|
python
|
{
"resource": ""
}
|
q278357
|
TextInput.redraw_label
|
test
|
def redraw_label(self):
"""
Re-draws the label by calculating its position.
Currently, the label will always be centered on the position of the label.
"""
# Convenience variables
sx,sy = self.size
x,y = self.pos
# Label position
x = x+self.bg.border[0]
y = y+sy/2.-self._text.font_size/4.
w = self.size[0]
h = self.size[1]
self._text.x,self._text.y = x,y
self._text.width,self._text.height=w,h
self._default.x,self._default.y = x,y
self._default.width,self._default.height=w,h
self._text._update() # Needed to prevent the label from drifting to the top-left after resizing by odd amounts
self._default._update()
|
python
|
{
"resource": ""
}
|
q278358
|
SubMenu.draw
|
test
|
def draw(self):
"""
Draws the submenu and its background.
Note that this leaves the OpenGL state set to 2d drawing.
"""
# Sets the OpenGL state for 2D-Drawing
self.window.set2d()
# Draws the background
if isinstance(self.bg,Layer):
self.bg._draw()
elif hasattr(self.bg,"draw") and callable(self.bg.draw):
self.bg.draw()
elif isinstance(self.bg,list) or isinstance(self.bg,tuple):
self.bg_vlist.draw(GL_QUADS)
elif callable(self.bg):
self.bg()
elif isinstance(self.bg,Background):
# The background will be drawn via the batch
if not self.bg.initialized:
self.bg.init_bg()
self.bg.redraw_bg()
self.bg.initialized=True
elif self.bg=="blank":
pass
else:
raise TypeError("Unknown background type")
# In case the background modified relevant state
self.window.set2d()
# Check that all widgets that need redrawing have been redrawn
for widget in self.widgets.values():
if widget.do_redraw:
widget.on_redraw()
widget.do_redraw = False
# Actually draw the content
self.batch2d.draw()
# Call custom draw methods where needed
for widget in self.widgets.values():
widget.draw()
|
python
|
{
"resource": ""
}
|
q278359
|
SubMenu.delWidget
|
test
|
def delWidget(self,widget):
"""
Deletes the widget by the given name.
Note that this feature is currently experimental as there seems to be a memory leak with this method.
"""
# TODO: fix memory leak upon widget deletion
#print("*"*50)
#print("Start delWidget")
if isinstance(widget,BasicWidget):
widget = widget.name
if widget not in self.widgets:
return
w = self.widgets[widget]
#print("refs: %s"%sys.getrefcount(w))
w.delete()
del self.widgets[widget]
del widget
#w_wref = weakref.ref(w)
#print("GC: GARBAGE")
#print(gc.garbage)
#print("Widget Info")
#print(w_wref())
#import objgraph
#print("Objgraph")
#objgraph.show_refs([w], filename='./mem_widget.png')
#print("refs: %s"%sys.getrefcount(w))
#w_r = gc.get_referrers(w)
#print("GC: REFS")
#for w_ref in w_r:
# print(repr(w_ref)[:512])
#print("GC: END")
#print("len: %s"%len(w_r))
#del w_ref,w_r
#print("after del %s"%w_wref())
#print("refs: %s"%sys.getrefcount(w))
del w
|
python
|
{
"resource": ""
}
|
q278360
|
Checkbox.redraw_label
|
test
|
def redraw_label(self):
"""
Re-calculates the position of the Label.
"""
# Convenience variables
sx,sy = self.size
x,y = self.pos
# Label position
self._label.anchor_x = "left"
self._label.x = x+sx/2.+sx
self._label.y = y+sy/2.+sy*.15
self._label._update()
|
python
|
{
"resource": ""
}
|
q278361
|
EgoMouseRotationalController.registerEventHandlers
|
test
|
def registerEventHandlers(self):
"""
Registers the motion and drag handlers.
Note that because of the way pyglet treats mouse dragging, there is also an handler registered to the on_mouse_drag event.
"""
self.world.registerEventHandler("on_mouse_motion",self.on_mouse_motion)
self.world.registerEventHandler("on_mouse_drag",self.on_mouse_drag)
|
python
|
{
"resource": ""
}
|
q278362
|
BasicFlightController.registerEventHandlers
|
test
|
def registerEventHandlers(self):
"""
Registers the up and down handlers.
Also registers a scheduled function every 60th of a second, causing pyglet to redraw your window with 60fps.
"""
# Crouch/fly down
self.peng.keybinds.add(self.peng.cfg["controls.controls.crouch"],"peng3d:actor.%s.player.controls.crouch"%self.actor.uuid,self.on_crouch_down,False)
# Jump/fly up
self.peng.keybinds.add(self.peng.cfg["controls.controls.jump"],"peng3d:actor.%s.player.controls.jump"%self.actor.uuid,self.on_jump_down,False)
pyglet.clock.schedule_interval(self.update,1.0/60)
|
python
|
{
"resource": ""
}
|
q278363
|
DialogSubMenu.add_label_main
|
test
|
def add_label_main(self,label_main):
"""
Adds the main label of the dialog.
This widget can be triggered by setting the label ``label_main`` to a string.
This widget will be centered on the screen.
"""
# Main Label
self.wlabel_main = text.Label("label_main",self,self.window,self.peng,
pos=lambda sw,sh, bw,bh: (sw/2-bw/2,sh/2-bh/2),
size=[0,0],
label=label_main,
#multiline=True, # TODO: implement multine dialog
)
self.wlabel_main.size = lambda sw,sh: (sw,self.wlabel_main._label.font_size)
self.addWidget(self.wlabel_main)
|
python
|
{
"resource": ""
}
|
q278364
|
DialogSubMenu.add_btn_ok
|
test
|
def add_btn_ok(self,label_ok):
"""
Adds an OK button to allow the user to exit the dialog.
This widget can be triggered by setting the label ``label_ok`` to a string.
This widget will be mostly centered on the screen, but below the main label
by the double of its height.
"""
# OK Button
self.wbtn_ok = button.Button("btn_ok",self,self.window,self.peng,
pos=lambda sw,sh, bw,bh: (sw/2-bw/2,sh/2-bh/2-bh*2),
size=[0,0],
label=label_ok,
borderstyle=self.borderstyle
)
self.wbtn_ok.size = lambda sw,sh: (self.wbtn_ok._label.font_size*8,self.wbtn_ok._label.font_size*2)
self.addWidget(self.wbtn_ok)
def f():
self.doAction("click_ok")
self.exitDialog()
self.wbtn_ok.addAction("click",f)
|
python
|
{
"resource": ""
}
|
q278365
|
DialogSubMenu.exitDialog
|
test
|
def exitDialog(self):
"""
Helper method that exits the dialog.
This method will cause the previously active submenu to activate.
"""
if self.prev_submenu is not None:
# change back to the previous submenu
# could in theory form a stack if one dialog opens another
self.menu.changeSubMenu(self.prev_submenu)
self.prev_submenu = None
|
python
|
{
"resource": ""
}
|
q278366
|
ConfirmSubMenu.add_btn_confirm
|
test
|
def add_btn_confirm(self,label_confirm):
"""
Adds a confirm button to let the user confirm whatever action they were presented with.
This widget can be triggered by setting the label ``label_confirm`` to a string.
This widget will be positioned slightly below the main label and to the left
of the cancel button.
"""
# Confirm Button
self.wbtn_confirm = button.Button("btn_confirm",self,self.window,self.peng,
pos=lambda sw,sh, bw,bh: (sw/2-bw-4,sh/2-bh/2-bh*2),
size=[0,0],
label=label_confirm,
borderstyle=self.borderstyle
)
self.wbtn_confirm.size = lambda sw,sh: (self.wbtn_confirm._label.font_size*8,self.wbtn_confirm._label.font_size*2)
self.addWidget(self.wbtn_confirm)
def f():
self.doAction("confirm")
self.exitDialog()
self.wbtn_confirm.addAction("click",f)
|
python
|
{
"resource": ""
}
|
q278367
|
ConfirmSubMenu.add_btn_cancel
|
test
|
def add_btn_cancel(self,label_cancel):
"""
Adds a cancel button to let the user cancel whatever choice they were given.
This widget can be triggered by setting the label ``label_cancel`` to a string.
This widget will be positioned slightly below the main label and to the right
of the confirm button.
"""
# Cancel Button
self.wbtn_cancel = button.Button("btn_cancel",self,self.window,self.peng,
pos=lambda sw,sh, bw,bh: (sw/2+4,sh/2-bh/2-bh*2),
size=[0,0],
label=label_cancel,
borderstyle=self.borderstyle
)
self.wbtn_cancel.size = lambda sw,sh: (self.wbtn_cancel._label.font_size*8,self.wbtn_cancel._label.font_size*2)
self.addWidget(self.wbtn_cancel)
def f():
self.doAction("cancel")
self.exitDialog()
self.wbtn_cancel.addAction("click",f)
|
python
|
{
"resource": ""
}
|
q278368
|
ProgressSubMenu.update_progressbar
|
test
|
def update_progressbar(self):
"""
Updates the progressbar by re-calculating the label.
It is not required to manually call this method since setting any of the
properties of this class will automatically trigger a re-calculation.
"""
n,nmin,nmax = self.wprogressbar.n,self.wprogressbar.nmin,self.wprogressbar.nmax
if (nmax-nmin)==0:
percent = 0 # prevents ZeroDivisionError
else:
percent = max(min((n-nmin)/(nmax-nmin),1.),0.)*100
dat = {"value":round(n,4),"n":round(n,4),"nmin":round(nmin,4),"nmax":round(nmax,4),"percent":round(percent,4),"p":round(percent,4)}
txt = self._label_progressbar.format(**dat)
self.wprogresslabel.label = txt
|
python
|
{
"resource": ""
}
|
q278369
|
World.render3d
|
test
|
def render3d(self,view=None):
"""
Renders the world in 3d-mode.
If you want to render custom terrain, you may override this method. Be careful that you still call the original method or else actors may not be rendered.
"""
for actor in self.actors.values():
actor.render(view)
|
python
|
{
"resource": ""
}
|
q278370
|
StaticWorld.render3d
|
test
|
def render3d(self,view=None):
"""
Renders the world.
"""
super(StaticWorld,self).render3d(view)
self.batch3d.draw()
|
python
|
{
"resource": ""
}
|
q278371
|
Recorder.step
|
test
|
def step(self, step_name):
"""Start a new step. returns a context manager which allows you to
report an error"""
@contextmanager
def step_context(step_name):
if self.event_receiver.current_case is not None:
raise Exception('cannot open a step within a step')
self.event_receiver.begin_case(step_name, self.now_seconds(), self.name)
try:
yield self.event_receiver
except:
etype, evalue, tb = sys.exc_info()
self.event_receiver.error('%r' % [etype, evalue, tb])
raise
finally:
self.event_receiver.end_case(step_name, self.now_seconds())
return step_context(step_name)
|
python
|
{
"resource": ""
}
|
q278372
|
ResourceManager.resourceExists
|
test
|
def resourceExists(self,name,ext=""):
"""
Returns whether or not the resource with the given name and extension exists.
This must not mean that the resource is meaningful, it simply signals that the file exists.
"""
return os.path.exists(self.resourceNameToPath(name,ext))
|
python
|
{
"resource": ""
}
|
q278373
|
ResourceManager.addCategory
|
test
|
def addCategory(self,name):
"""
Adds a new texture category with the given name.
If the category already exists, it will be overridden.
"""
self.categories[name]={}
self.categoriesTexCache[name]={}
self.categoriesTexBin[name]=pyglet.image.atlas.TextureBin(self.texsize,self.texsize)
self.peng.sendEvent("peng3d:rsrc.category.add",{"peng":self.peng,"category":name})
|
python
|
{
"resource": ""
}
|
q278374
|
ResourceManager.getMissingTexture
|
test
|
def getMissingTexture(self):
"""
Returns a texture to be used as a placeholder for missing textures.
A default missing texture file is provided in the assets folder of the source distribution.
It consists of a simple checkerboard pattern of purple and black, this image may be copied to any project using peng3d for similar behavior.
If this texture cannot be found, a pattern is created in-memory, simply a solid square of purple.
This texture will also be cached separately from other textures.
"""
if self.missingTexture is None:
if self.resourceExists(self.missingtexturename,".png"):
self.missingTexture = pyglet.image.load(self.resourceNameToPath(self.missingtexturename,".png"))
return self.missingTexture
else: # Falls back to create pattern in-memory
self.missingTexture = pyglet.image.create(1,1,pyglet.image.SolidColorImagePattern([255,0,255,255]))
return self.missingTexture
else:
return self.missingTexture
|
python
|
{
"resource": ""
}
|
q278375
|
ResourceManager.getModel
|
test
|
def getModel(self,name):
"""
Gets the model object by the given name.
If it was loaded previously, a cached version will be returned.
If it was not loaded, it will be loaded and inserted into the cache.
"""
if name in self.modelobjcache:
return self.modelobjcache[name]
return self.loadModel(name)
|
python
|
{
"resource": ""
}
|
q278376
|
ResourceManager.loadModel
|
test
|
def loadModel(self,name):
"""
Loads the model of the given name.
The model will also be inserted into the cache.
"""
m = model.Model(self.peng,self,name)
self.modelobjcache[name]=m
self.peng.sendEvent("peng3d:rsrc.model.load",{"peng":self.peng,"name":name})
return m
|
python
|
{
"resource": ""
}
|
q278377
|
ResourceManager.getModelData
|
test
|
def getModelData(self,name):
"""
Gets the model data associated with the given name.
If it was loaded, a cached copy will be returned.
It it was not loaded, it will be loaded and cached.
"""
if name in self.modelcache:
return self.modelcache[name]
return self.loadModelData(name)
|
python
|
{
"resource": ""
}
|
q278378
|
ResourceManager.loadModelData
|
test
|
def loadModelData(self,name):
"""
Loads the model data of the given name.
The model file must always be a .json file.
"""
path = self.resourceNameToPath(name,".json")
try:
data = json.load(open(path,"r"))
except Exception:
# Temporary
print("Exception during model load: ")
import traceback;traceback.print_exc()
return {}# will probably cause other exceptions later on, TODO
out = {}
if data.get("version",1)==1:
# Currently only one version, basic future-proofing
# This version should get incremented with breaking changes to the structure
# Materials
out["materials"]={}
for name,matdata in data.get("materials",{}).items():
m = model.Material(self,name,matdata)
out["materials"][name]=m
out["default_material"]=out["materials"][data.get("default_material",list(out["materials"].keys())[0])]
# Bones
out["bones"]={"__root__":model.RootBone(self,"__root__",{"start_rot":[0,0],"length":0})}
for name,bonedata in data.get("bones",{}).items():
b = model.Bone(self,name,bonedata)
out["bones"][name]=b
for name,bone in out["bones"].items():
if name == "__root__":
continue
bone.setParent(out["bones"][bone.bonedata["parent"]])
# Regions
out["regions"]={}
for name,regdata in data.get("regions",{}).items():
r = model.Region(self,name,regdata)
r.material = out["materials"][regdata.get("material",out["default_material"])]
r.bone = out["bones"][regdata.get("bone","__root__")]
out["bones"][regdata.get("bone","__root__")].addRegion(r)
out["regions"][name]=r
# Animations
out["animations"]={}
out["animations"]["static"]=model.Animation(self,"static",{"type":"static","bones":{}})
for name,anidata in data.get("animations",{}).items():
a = model.Animation(self,name,anidata)
a.setBones(out["bones"])
out["animations"][name]=a
out["default_animation"]=out["animations"][data.get("default_animation",out["animations"]["static"])]
else:
raise ValueError("Unknown version %s of model '%s'"%(data.get("version",1),name))
self.modelcache[name]=out
return out
|
python
|
{
"resource": ""
}
|
q278379
|
Container.addWidget
|
test
|
def addWidget(self,widget):
"""
Adds a widget to this container.
Note that trying to add the Container to itself will be ignored.
"""
if self is widget: # Prevents being able to add the container to itself, causing a recursion loop on redraw
return
self.widgets[widget.name]=widget
|
python
|
{
"resource": ""
}
|
q278380
|
Container.draw
|
test
|
def draw(self):
"""
Draws the submenu and its background.
Note that this leaves the OpenGL state set to 2d drawing and may modify the scissor settings.
"""
if not self.visible:
# Simple visibility check, has to be tested to see if it works properly
return
if not isinstance(self.submenu,Container):
glEnable(GL_SCISSOR_TEST)
glScissor(*self.pos+self.size)
SubMenu.draw(self)
if not isinstance(self.submenu,Container):
glDisable(GL_SCISSOR_TEST)
|
python
|
{
"resource": ""
}
|
q278381
|
Container.on_redraw
|
test
|
def on_redraw(self):
"""
Redraws the background and any child widgets.
"""
x,y = self.pos
sx,sy = self.size
self.bg_vlist.vertices = [x,y, x+sx,y, x+sx,y+sy, x,y+sy]
self.stencil_vlist.vertices = [x,y, x+sx,y, x+sx,y+sy, x,y+sy]
if isinstance(self.bg,Background):
if not self.bg.initialized:
self.bg.init_bg()
self.bg.initialized=True
self.bg.redraw_bg()
|
python
|
{
"resource": ""
}
|
q278382
|
ScrollableContainer.on_redraw
|
test
|
def on_redraw(self):
"""
Redraws the background and contents, including scrollbar.
This method will also check the scrollbar for any movement and will be automatically called on movement of the slider.
"""
n = self._scrollbar.n
self.offset_y = -n # Causes the content to move in the opposite direction of the slider
# Size of scrollbar
sx=24 # Currently constant, TODO: add dynamic sx of scrollbar
sy=self.size[1]
# Pos of scrollbar
x=self.size[0]-sx
y=0 # Currently constant, TODO: add dynamic y-pos of scrollbar
# Dynamic pos/size may be added via align/lambda/etc.
# Note that the values are written to the _* variant of the attribute to avoid 3 uneccessary redraws
self._scrollbar._size = sx,sy
self._scrollbar._pos = x,y
self._scrollbar._nmax = self.content_height
super(ScrollableContainer,self).on_redraw()
|
python
|
{
"resource": ""
}
|
q278383
|
mouse_aabb
|
test
|
def mouse_aabb(mpos,size,pos):
"""
AABB Collision checker that can be used for most axis-aligned collisions.
Intended for use in widgets to check if the mouse is within the bounds of a particular widget.
"""
return pos[0]<=mpos[0]<=pos[0]+size[0] and pos[1]<=mpos[1]<=pos[1]+size[1]
|
python
|
{
"resource": ""
}
|
q278384
|
Slider.p
|
test
|
def p(self):
"""
Helper property containing the percentage this slider is "filled".
This property is read-only.
"""
return (self.n-self.nmin)/max((self.nmax-self.nmin),1)
|
python
|
{
"resource": ""
}
|
q278385
|
Menu.addLayer
|
test
|
def addLayer(self,layer,z=-1):
"""
Adds a new layer to the stack, optionally at the specified z-value.
``layer`` must be an instance of Layer or subclasses.
``z`` can be used to override the index of the layer in the stack. Defaults to ``-1`` for appending.
"""
# Adds a new layer to the stack, optionally at the specified z-value
# The z-value is the index this layer should be inserted in, or -1 for appending
if not isinstance(layer,Layer):
raise TypeError("layer must be an instance of Layer!")
if z==-1:
self.layers.append(layer)
else:
self.layers.insert(z,layer)
|
python
|
{
"resource": ""
}
|
q278386
|
_get_region
|
test
|
def _get_region(self, buffer, start, count):
'''Map a buffer region using this attribute as an accessor.
The returned region can be modified as if the buffer was a contiguous
array of this attribute (though it may actually be interleaved or
otherwise non-contiguous).
The returned region consists of a contiguous array of component
data elements. For example, if this attribute uses 3 floats per
vertex, and the `count` parameter is 4, the number of floats mapped
will be ``3 * 4 = 12``.
:Parameters:
`buffer` : `AbstractMappable`
The buffer to map.
`start` : int
Offset of the first vertex to map.
`count` : int
Number of vertices to map
:rtype: `AbstractBufferRegion`
'''
byte_start = self.stride * start
byte_size = self.stride * count
array_count = self.count * count
if self.stride == self.size or not array_count:
# non-interleaved
ptr_type = ctypes.POINTER(self.c_type * array_count)
return buffer.get_region(byte_start, byte_size, ptr_type)
else:
# interleaved
byte_start += self.offset
byte_size -= self.offset
elem_stride = self.stride // ctypes.sizeof(self.c_type)
elem_offset = self.offset // ctypes.sizeof(self.c_type)
ptr_type = ctypes.POINTER(
self.c_type * int((count * elem_stride - elem_offset)))
region = buffer.get_region(byte_start, byte_size, ptr_type)
return vertexbuffer.IndirectArrayRegion(
region, array_count, self.count, elem_stride)
|
python
|
{
"resource": ""
}
|
q278387
|
_draw
|
test
|
def _draw(self, mode, vertex_list=None):
'''Draw vertices in the domain.
If `vertex_list` is not specified, all vertices in the domain are
drawn. This is the most efficient way to render primitives.
If `vertex_list` specifies a `VertexList`, only primitives in that
list will be drawn.
:Parameters:
`mode` : int
OpenGL drawing mode, e.g. ``GL_POINTS``, ``GL_LINES``, etc.
`vertex_list` : `VertexList`
Vertex list to draw, or ``None`` for all lists in this domain.
'''
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
for buffer, attributes in self.buffer_attributes:
buffer.bind()
for attribute in attributes:
attribute.enable()
attribute.set_pointer(attribute.buffer.ptr)
if vertexbuffer._workaround_vbo_finish:
glFinish()
if vertex_list is not None:
glDrawArrays(mode, vertex_list.start, vertex_list.count)
else:
starts, sizes = self.allocator.get_allocated_regions()
primcount = len(starts)
if primcount == 0:
pass
elif primcount == 1:
# Common case
glDrawArrays(mode, starts[0], int(sizes[0]))
elif gl_info.have_version(1, 4):
starts = (GLint * primcount)(*starts)
sizes = (GLsizei * primcount)(*sizes)
glMultiDrawArrays(mode, starts, sizes, primcount)
else:
for start, size in zip(starts, sizes):
glDrawArrays(mode, start, size)
for buffer, _ in self.buffer_attributes:
buffer.unbind()
glPopClientAttrib()
|
python
|
{
"resource": ""
}
|
q278388
|
ActionDispatcher.addAction
|
test
|
def addAction(self,action,func,*args,**kwargs):
"""
Adds a callback to the specified action.
All other positional and keyword arguments will be stored and passed to the function upon activation.
"""
if not hasattr(self,"actions"):
self.actions = {}
if action not in self.actions:
self.actions[action] = []
self.actions[action].append((func,args,kwargs))
|
python
|
{
"resource": ""
}
|
q278389
|
ActionDispatcher.doAction
|
test
|
def doAction(self,action):
"""
Helper method that calls all callbacks registered for the given action.
"""
if not hasattr(self,"actions"):
return
for f,args,kwargs in self.actions.get(action,[]):
f(*args,**kwargs)
|
python
|
{
"resource": ""
}
|
q278390
|
SmartRegistry.register
|
test
|
def register(self,name,force_id=None):
"""
Registers a name to the registry.
``name`` is the name of the object and must be a string.
``force_id`` can be optionally set to override the automatic ID generation
and force a specific ID.
Note that using ``force_id`` is discouraged, since it may cause problems when ``reuse_ids`` is false.
"""
with self.registry_lock:
if force_id is None:
new_id = self.genNewID()
else:
new_id = force_id
self._data["reg"][new_id]=name
return new_id
|
python
|
{
"resource": ""
}
|
q278391
|
LayeredWidget.addLayer
|
test
|
def addLayer(self,layer,z_index=None):
"""
Adds the given layer at the given Z Index.
If ``z_index`` is not given, the Z Index specified by the layer will be used.
"""
if z_index is None:
z_index = layer.z_index
i = 0
for l,z in self.layers:
if z>z_index:
break
i+=1
self._layers[layer.name]=layer
self.layers.insert(i,[layer,z_index])
|
python
|
{
"resource": ""
}
|
q278392
|
LayeredWidget.draw
|
test
|
def draw(self):
"""
Draws all layers of this LayeredWidget.
This should normally be unneccessary, since it is recommended that layers use Vertex Lists instead of OpenGL Immediate Mode.
"""
super(LayeredWidget,self).draw()
for layer,_ in self.layers:
layer._draw()
|
python
|
{
"resource": ""
}
|
q278393
|
LayeredWidget.delete
|
test
|
def delete(self):
"""
Deletes all layers within this LayeredWidget before deleting itself.
Recommended to call if you are removing the widget, but not yet exiting the interpreter.
"""
for layer,_ in self.layers:
layer.delete()
self.layers = []
self._layers = {}
super(LayeredWidget,self).delete()
|
python
|
{
"resource": ""
}
|
q278394
|
WidgetLayer.border
|
test
|
def border(self):
"""
Property to be used for setting and getting the border of the layer.
Note that setting this property causes an immediate redraw.
"""
if callable(self._border):
return util.WatchingList(self._border(*(self.widget.pos+self.widget.size)),self._wlredraw_border)
else:
return util.WatchingList(self._border,self._wlredraw_border)
|
python
|
{
"resource": ""
}
|
q278395
|
WidgetLayer.offset
|
test
|
def offset(self):
"""
Property to be used for setting and getting the offset of the layer.
Note that setting this property causes an immediate redraw.
"""
if callable(self._offset):
return util.WatchingList(self._offset(*(self.widget.pos+self.widget.size)),self._wlredraw_offset)
else:
return util.WatchingList(self._offset,self._wlredraw_offset)
|
python
|
{
"resource": ""
}
|
q278396
|
WidgetLayer.getSize
|
test
|
def getSize(self):
"""
Returns the size of the layer, with the border size already subtracted.
"""
return self.widget.size[0]-self.border[0]*2,self.widget.size[1]-self.border[1]*2
|
python
|
{
"resource": ""
}
|
q278397
|
read_h5
|
test
|
def read_h5(hdfstore, group = ""):
"""
DEPRECATED
Reads a mesh saved in the HDF5 format.
"""
m = Mesh()
m.elements.data = hdf["elements/connectivity"]
m.nodes.data = hdf["nodes/xyz"]
for key in hdf.keys():
if key.startswith("/nodes/sets"):
k = key.replace("/nodes/sets/", "")
m.nodes.sets[k] = set(hdf[key])
if key.startswith("/elements/sets"):
k = key.replace("/elements/sets/", "")
m.elements.sets[k] = set(hdf[key])
if key.startswith("/elements/surfaces"):
k = key.replace("/elements/surfaces/", "")
m.elements.surfaces[k] = hdf[key]
if key.startswith("/fields/"):
if key.endswith("/metadata"):
tag = key.split("/")[2]
f = Field()
f.metadata = hdf["fields/{0}/metadata".format(tag)]
f.metadata = hdf["fields/{0}/data".format(tag)]
f.master = m
m.add_field(tag, f)
hdf.close()
return m
|
python
|
{
"resource": ""
}
|
q278398
|
_make_conn
|
test
|
def _make_conn(shape):
"""
Connectivity builder using Numba for speed boost.
"""
shape = np.array(shape)
Ne = shape.prod()
if len(shape) == 2:
nx, ny = np.array(shape) +1
conn = np.zeros((Ne, 4), dtype = np.int32)
counter = 0
pattern = np.array([0,1,1+nx,nx])
for j in range(shape[1]):
for i in range(shape[0]):
conn[counter] = pattern + 1 + i + j*nx
counter += 1
if len(shape) == 3:
nx, ny, nz = np.array(shape) +1
conn = np.zeros((Ne, 8), dtype = np.int32)
counter = 0
pattern = np.array([0,1,1+nx,nx,nx*ny,1+nx*ny,1+(nx+1)*ny,(nx+1)*ny])
for k in range(shape[2]):
for j in range(shape[1]):
for i in range(shape[0]):
conn[counter] = pattern + 1 + i + j*nx+ k*nx*ny
counter += 1
return conn
|
python
|
{
"resource": ""
}
|
q278399
|
Mesh.set_fields
|
test
|
def set_fields(self, fields = None, **kwargs):
"""
Sets the fields.
"""
self.fields = []
if fields != None:
for field in fields:
self.fields.append(field)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.