text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Recalculate the projection, hash_key, and range_key for the given index. <END_TASK> <USER_TASK:> Description: def refresh_index(meta, index) -> None: """Recalculate the projection, hash_key, and range_key for the given index. :param meta: model.Meta to find columns by name :param index: The index to refresh """
# All projections include model + index keys projection_keys = set.union(meta.keys, index.keys) proj = index.projection mode = proj["mode"] if mode == "keys": proj["included"] = projection_keys elif mode == "all": proj["included"] = meta.columns elif mode == "include": # pragma: no branch if all(isinstance(p, str) for p in proj["included"]): proj["included"] = set(meta.columns_by_name[n] for n in proj["included"]) else: proj["included"] = set(proj["included"]) proj["included"].update(projection_keys) if proj["strict"]: proj["available"] = proj["included"] else: proj["available"] = meta.columns
<SYSTEM_TASK:> Unconditionally remove any columns or indexes bound to the given name or dynamo_name. <END_TASK> <USER_TASK:> Description: def unbind(meta, name=None, dynamo_name=None) -> None: """Unconditionally remove any columns or indexes bound to the given name or dynamo_name. .. code-block:: python import bloop.models class User(BaseModel): id = Column(String, hash_key=True) email = Column(String, dynamo_name="e") by_email = GlobalSecondaryIndex(projection="keys", hash_key=email) for dynamo_name in ("id", "e", "by_email"): bloop.models.unbind(User.Meta, dynamo_name=dynamo_name) assert not User.Meta.columns assert not User.Meta.indexes assert not User.Meta.keys .. warning:: This method does not pre- or post- validate the model with the requested changes. You are responsible for ensuring the model still has a hash key, that required columns exist for each index, etc. :param meta: model.Meta to remove the columns or indexes from :param name: column or index name to unbind by. Default is None. :param dynamo_name: column or index name to unbind by. Default is None. """
if name is not None: columns = {x for x in meta.columns if x.name == name} indexes = {x for x in meta.indexes if x.name == name} elif dynamo_name is not None: columns = {x for x in meta.columns if x.dynamo_name == dynamo_name} indexes = {x for x in meta.indexes if x.dynamo_name == dynamo_name} else: raise RuntimeError("Must provide name= or dynamo_name= to unbind from meta") # Nothing in bloop should allow name or dynamo_name # collisions to exist, so this is either a bug or # the user manually hacked up meta. assert len(columns) <= 1 assert len(indexes) <= 1 assert not (columns and indexes) if columns: [column] = columns meta.columns.remove(column) # If these don't line up, there's likely a bug in bloop # or the user manually hacked up columns_by_name expect_same = meta.columns_by_name[column.name] assert expect_same is column meta.columns_by_name.pop(column.name) if column in meta.keys: meta.keys.remove(column) if meta.hash_key is column: meta.hash_key = None if meta.range_key is column: meta.range_key = None delattr(meta.model, column.name) if indexes: [index] = indexes meta.indexes.remove(index) if index in meta.gsis: meta.gsis.remove(index) if index in meta.lsis: meta.lsis.remove(index) delattr(meta.model, index.name)
<SYSTEM_TASK:> Returns True if the actual index is a valid superset of the expected index <END_TASK> <USER_TASK:> Description: def is_valid_superset(actual_projection, index): """Returns True if the actual index is a valid superset of the expected index"""
projection_type = actual_projection["ProjectionType"] if projection_type == "ALL": return True meta = index.model.Meta # all index types provide index keys and model keys provides = set.union(meta.keys, index.keys) if projection_type == "KEYS_ONLY": pass elif projection_type == "INCLUDE": # pragma: no branch (unknown projections break loud) by_dynamo_name = {column.dynamo_name: column for column in meta.columns} provides.update( by_dynamo_name[name] for name in actual_projection["NonKeyAttributes"] if name in by_dynamo_name # ignore columns the projection provides if the model doesn't care about them ) else: logger.info(f"unexpected index ProjectionType '{projection_type}'") return False expects = index.projection["included"] return provides.issuperset(expects)
<SYSTEM_TASK:> Save an object to DynamoDB. <END_TASK> <USER_TASK:> Description: def save_item(self, item): """Save an object to DynamoDB. :param item: Unpacked into kwargs for :func:`boto3.DynamoDB.Client.update_item`. :raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met. """
try: self.dynamodb_client.update_item(**item) except botocore.exceptions.ClientError as error: handle_constraint_violation(error)
<SYSTEM_TASK:> Delete an object in DynamoDB. <END_TASK> <USER_TASK:> Description: def delete_item(self, item): """Delete an object in DynamoDB. :param item: Unpacked into kwargs for :func:`boto3.DynamoDB.Client.delete_item`. :raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met. """
try: self.dynamodb_client.delete_item(**item) except botocore.exceptions.ClientError as error: handle_constraint_violation(error)
<SYSTEM_TASK:> Loads any number of items in chunks, handling continuation tokens. <END_TASK> <USER_TASK:> Description: def load_items(self, items): """Loads any number of items in chunks, handling continuation tokens. :param items: Unpacked in chunks into "RequestItems" for :func:`boto3.DynamoDB.Client.batch_get_item`. """
loaded_items = {} requests = collections.deque(create_batch_get_chunks(items)) while requests: request = requests.pop() try: response = self.dynamodb_client.batch_get_item(RequestItems=request) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while loading items.") from error # Accumulate results for table_name, table_items in response.get("Responses", {}).items(): loaded_items.setdefault(table_name, []).extend(table_items) # Push additional request onto the deque. # "UnprocessedKeys" is {} if this request is done if response["UnprocessedKeys"]: requests.append(response["UnprocessedKeys"]) return loaded_items
<SYSTEM_TASK:> Create the model's table. Returns True if the table is being created, False otherwise. <END_TASK> <USER_TASK:> Description: def create_table(self, table_name, model): """Create the model's table. Returns True if the table is being created, False otherwise. Does not wait for the table to create, and does not validate an existing table. Will not raise "ResourceInUseException" if the table exists or is being created. :param str table_name: The name of the table to create for the model. :param model: The :class:`~bloop.models.BaseModel` to create the table for. :return: True if the table is being created, False if the table exists :rtype: bool """
table = create_table_request(table_name, model) try: self.dynamodb_client.create_table(**table) is_creating = True except botocore.exceptions.ClientError as error: handle_table_exists(error, model) is_creating = False return is_creating
<SYSTEM_TASK:> Polls until the table is ready, then returns the first result when the table was ready. <END_TASK> <USER_TASK:> Description: def describe_table(self, table_name): """ Polls until the table is ready, then returns the first result when the table was ready. The returned dict is standardized to ensure all fields are present, even when empty or across different DynamoDB API versions. TTL information is also inserted. :param table_name: The name of the table to describe :return: The (sanitized) result of DescribeTable["Table"] :rtype: dict """
if table_name in self._tables: return self._tables[table_name] status, description = None, {} calls = 0 while status is not ready: calls += 1 try: description = self.dynamodb_client.describe_table(TableName=table_name)["Table"] except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while describing table.") from error status = simple_table_status(description) logger.debug("describe_table: table \"{}\" was in ACTIVE state after {} calls".format(table_name, calls)) try: ttl = self.dynamodb_client.describe_time_to_live(TableName=table_name) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while describing ttl.") from error try: backups = self.dynamodb_client.describe_continuous_backups(TableName=table_name) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while describing continuous backups.") from error description["TimeToLiveDescription"] = { "AttributeName": _read_field(ttl, None, "TimeToLiveDescription", "AttributeName"), "TimeToLiveStatus": _read_field(ttl, None, "TimeToLiveDescription", "TimeToLiveStatus"), } description["ContinuousBackupsDescription"] = { "ContinuousBackupsStatus": _read_field( backups, None, "ContinuousBackupsDescription", "ContinuousBackupsStatus"), } table = self._tables[table_name] = sanitize_table_description(description) return table
<SYSTEM_TASK:> Polls until a creating table is ready, then verifies the description against the model's requirements. <END_TASK> <USER_TASK:> Description: def validate_table(self, table_name, model): """Polls until a creating table is ready, then verifies the description against the model's requirements. The model may have a subset of all GSIs and LSIs on the table, but the key structure must be exactly the same. The table must have a stream if the model expects one, but not the other way around. When read or write units are not specified for the model or any GSI, the existing values will always pass validation. :param str table_name: The name of the table to validate the model against. :param model: The :class:`~bloop.models.BaseModel` to validate the table of. :raises bloop.exceptions.TableMismatch: When the table does not meet the constraints of the model. """
actual = self.describe_table(table_name) if not compare_tables(model, actual): raise TableMismatch("The expected and actual tables for {!r} do not match.".format(model.__name__)) # Fill in values that Meta doesn't know ahead of time (such as arns). # These won't be populated unless Meta explicitly cares about the value if model.Meta.stream: stream_arn = model.Meta.stream["arn"] = actual["LatestStreamArn"] logger.debug(f"Set {model.__name__}.Meta.stream['arn'] to '{stream_arn}' from DescribeTable response") if model.Meta.ttl: ttl_enabled = actual["TimeToLiveDescription"]["TimeToLiveStatus"].lower() == "enabled" model.Meta.ttl["enabled"] = ttl_enabled logger.debug(f"Set {model.__name__}.Meta.ttl['enabled'] to '{ttl_enabled}' from DescribeTable response") # Fill in meta values that the table didn't care about (eg. billing=None) if model.Meta.encryption is None: sse_enabled = actual["SSEDescription"]["Status"].lower() == "enabled" model.Meta.encryption = {"enabled": sse_enabled} logger.debug( f"Set {model.__name__}.Meta.encryption['enabled'] to '{sse_enabled}' from DescribeTable response") if model.Meta.backups is None: backups = actual["ContinuousBackupsDescription"]["ContinuousBackupsStatus"] == "ENABLED" model.Meta.backups = {"enabled": backups} logger.debug(f"Set {model.__name__}.Meta.backups['enabled'] to '{backups}' from DescribeTable response") if model.Meta.billing is None: billing_mode = { "PAY_PER_REQUEST": "on_demand", "PROVISIONED": "provisioned" }[actual["BillingModeSummary"]["BillingMode"]] model.Meta.billing = {"mode": billing_mode} logger.debug(f"Set {model.__name__}.Meta.billing['mode'] to '{billing_mode}' from DescribeTable response") if model.Meta.read_units is None: read_units = model.Meta.read_units = actual["ProvisionedThroughput"]["ReadCapacityUnits"] logger.debug( f"Set {model.__name__}.Meta.read_units to {read_units} from DescribeTable response") if model.Meta.write_units is None: write_units = model.Meta.write_units = actual["ProvisionedThroughput"]["WriteCapacityUnits"] logger.debug( f"Set {model.__name__}.Meta.write_units to {write_units} from DescribeTable response") # Replace any ``None`` values for read_units, write_units in GSIs with their actual values gsis = {index["IndexName"]: index for index in actual["GlobalSecondaryIndexes"]} for index in model.Meta.gsis: read_units = gsis[index.dynamo_name]["ProvisionedThroughput"]["ReadCapacityUnits"] write_units = gsis[index.dynamo_name]["ProvisionedThroughput"]["WriteCapacityUnits"] if index.read_units is None: index.read_units = read_units logger.debug( f"Set {model.__name__}.{index.name}.read_units to {read_units} from DescribeTable response") if index.write_units is None: index.write_units = write_units logger.debug( f"Set {model.__name__}.{index.name}.write_units to {write_units} from DescribeTable response")
<SYSTEM_TASK:> Only allows == against query_on.hash_key <END_TASK> <USER_TASK:> Description: def check_hash_key(query_on, key): """Only allows == against query_on.hash_key"""
return ( isinstance(key, BaseCondition) and (key.operation == "==") and (key.column is query_on.hash_key) )
<SYSTEM_TASK:> BeginsWith, Between, or any Comparison except '!=' against query_on.range_key <END_TASK> <USER_TASK:> Description: def check_range_key(query_on, key): """BeginsWith, Between, or any Comparison except '!=' against query_on.range_key"""
return ( isinstance(key, BaseCondition) and key.operation in ("begins_with", "between", "<", ">", "<=", ">=", "==") and key.column is query_on.range_key )
<SYSTEM_TASK:> Number of items that have been loaded from DynamoDB so far, including buffered items. <END_TASK> <USER_TASK:> Description: def count(self): """Number of items that have been loaded from DynamoDB so far, including buffered items."""
if self.request["Select"] == "COUNT": while not self.exhausted: next(self, None) return self._count
<SYSTEM_TASK:> Number of items that DynamoDB evaluated, before any filter was applied. <END_TASK> <USER_TASK:> Description: def scanned(self): """Number of items that DynamoDB evaluated, before any filter was applied."""
if self.request["Select"] == "COUNT": while not self.exhausted: next(self, None) return self._scanned
<SYSTEM_TASK:> Reset to the initial state, clearing the buffer and zeroing count and scanned. <END_TASK> <USER_TASK:> Description: def reset(self): """Reset to the initial state, clearing the buffer and zeroing count and scanned."""
self.buffer.clear() self._count = 0 self._scanned = 0 self._exhausted = False self.request.pop("ExclusiveStartKey", None)
<SYSTEM_TASK:> Create a new PreparedTransaction that can be committed. <END_TASK> <USER_TASK:> Description: def prepare(self): """ Create a new PreparedTransaction that can be committed. This is called automatically when exiting the transaction as a context: .. code-block:: python >>> engine = Engine() >>> tx = WriteTransaction(engine) >>> prepared = tx.prepare() >>> prepared.commit() # automatically calls commit when exiting >>> with WriteTransaction(engine) as tx: ... # modify the transaction here ... pass >>> # tx commits here :return: """
tx = PreparedTransaction() tx.prepare( engine=self.engine, mode=self.mode, items=self._items, ) return tx
<SYSTEM_TASK:> Create a unique transaction id and dumps the items into a cached request object. <END_TASK> <USER_TASK:> Description: def prepare(self, engine, mode, items) -> None: """ Create a unique transaction id and dumps the items into a cached request object. """
self.tx_id = str(uuid.uuid4()).replace("-", "") self.engine = engine self.mode = mode self.items = items self._prepare_request()
<SYSTEM_TASK:> Commit the transaction with a fixed transaction id. <END_TASK> <USER_TASK:> Description: def commit(self) -> None: """ Commit the transaction with a fixed transaction id. A read transaction can call commit() any number of times, while a write transaction can only use the same tx_id for 10 minutes from the first call. """
now = datetime.now(timezone.utc) if self.first_commit_at is None: self.first_commit_at = now if self.mode == "r": response = self.engine.session.transaction_read(self._request) elif self.mode == "w": if now - self.first_commit_at > MAX_TOKEN_LIFETIME: raise TransactionTokenExpired response = self.engine.session.transaction_write(self._request, self.tx_id) else: raise ValueError(f"unrecognized mode {self.mode}") self._handle_response(response)
<SYSTEM_TASK:> Add one or more objects to be loaded in this transaction. <END_TASK> <USER_TASK:> Description: def load(self, *objs) -> "ReadTransaction": """ Add one or more objects to be loaded in this transaction. At most 10 items can be loaded in the same transaction. All objects will be loaded each time you call commit(). :param objs: Objects to add to the set that are loaded in this transaction. :return: this transaction for chaining :raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded. """
self._extend([TxItem.new("get", obj) for obj in objs]) return self
<SYSTEM_TASK:> Add a condition which must be met for the transaction to commit. <END_TASK> <USER_TASK:> Description: def check(self, obj, condition) -> "WriteTransaction": """ Add a condition which must be met for the transaction to commit. While the condition is checked against the provided object, that object will not be modified. It is only used to provide the hash and range key to apply the condition to. At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object multiple times. :param obj: The object to use for the transaction condition. This object will not be modified. :param condition: A condition on an object which must hold for the transaction to commit. :return: this transaction for chaining """
self._extend([TxItem.new("check", obj, condition)]) return self
<SYSTEM_TASK:> Add one or more objects to be saved in this transaction. <END_TASK> <USER_TASK:> Description: def save(self, *objs, condition=None, atomic=False) -> "WriteTransaction": """ Add one or more objects to be saved in this transaction. At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object multiple times. :param objs: Objects to add to the set that are updated in this transaction. :param condition: A condition for these objects which must hold for the transaction to commit. :param bool atomic: only commit the transaction if the local and DynamoDB versions of the object match. :return: this transaction for chaining """
self._extend([TxItem.new("save", obj, condition, atomic) for obj in objs]) return self
<SYSTEM_TASK:> Produces a numpy array of integers which encode <END_TASK> <USER_TASK:> Description: def encode(self, cube_dimensions): """ Produces a numpy array of integers which encode the supplied cube dimensions. """
return np.asarray([getattr(cube_dimensions[d], s) for d in self._dimensions for s in self._schema], dtype=np.int32)
<SYSTEM_TASK:> Produce a list of dictionaries for each dimension in this transcoder <END_TASK> <USER_TASK:> Description: def decode(self, descriptor): """ Produce a list of dictionaries for each dimension in this transcoder """
i = iter(descriptor) n = len(self._schema) # Add the name key to our schema schema = self._schema + ('name',) # For each dimensions, generator takes n items off iterator # wrapping the descriptor, making a tuple with the dimension # name appended tuple_gen = (tuple(itertools.islice(i, n)) + (d, ) for d in self._dimensions) # Generate dictionary by mapping schema keys to generated tuples return [{ k: v for k, v in zip(schema, t) } for t in tuple_gen]
<SYSTEM_TASK:> Downloads and installs cub into mb_inc_path <END_TASK> <USER_TASK:> Description: def install_cub(mb_inc_path): """ Downloads and installs cub into mb_inc_path """
cub_url = 'https://github.com/NVlabs/cub/archive/1.6.4.zip' cub_sha_hash = '0d5659200132c2576be0b3959383fa756de6105d' cub_version_str = 'Current release: v1.6.4 (12/06/2016)' cub_zip_file = 'cub.zip' cub_zip_dir = 'cub-1.6.4' cub_unzipped_path = os.path.join(mb_inc_path, cub_zip_dir) cub_new_unzipped_path = os.path.join(mb_inc_path, 'cub') cub_header = os.path.join(cub_new_unzipped_path, 'cub', 'cub.cuh') cub_readme = os.path.join(cub_new_unzipped_path, 'README.md' ) # Check for a reasonably valid install cub_installed, _ = is_cub_installed(cub_readme, cub_header, cub_version_str) if cub_installed: log.info("NVIDIA cub installation found " "at '{}'".format(cub_new_unzipped_path)) return log.info("No NVIDIA cub installation found") # Do we already have a valid cub zip file have_valid_cub_file = (os.path.exists(cub_zip_file) and os.path.isfile(cub_zip_file) and sha_hash_file(cub_zip_file) == cub_sha_hash) if have_valid_cub_file: log.info("Valid NVIDIA cub archive found '{}'".format(cub_zip_file)) # Download if we don't have a valid file else: log.info("Downloading cub archive '{}'".format(cub_url)) dl_cub(cub_url, cub_zip_file) cub_file_sha_hash = sha_hash_file(cub_zip_file) # Compare against our supplied hash if cub_sha_hash != cub_file_sha_hash: msg = ('Hash of file %s downloaded from %s ' 'is %s and does not match the expected ' 'hash of %s. Please manually download ' 'as per the README.md instructions.') % ( cub_zip_file, cub_url, cub_file_sha_hash, cub_sha_hash) raise InstallCubException(msg) # Unzip into montblanc/include/cub with zipfile.ZipFile(cub_zip_file, 'r') as zip_file: # Remove any existing installs shutil.rmtree(cub_unzipped_path, ignore_errors=True) shutil.rmtree(cub_new_unzipped_path, ignore_errors=True) # Unzip zip_file.extractall(mb_inc_path) # Rename. cub_unzipped_path is mb_inc_path/cub_zip_dir shutil.move(cub_unzipped_path, cub_new_unzipped_path) log.info("NVIDIA cub archive unzipped into '{}'".format( cub_new_unzipped_path)) there, reason = is_cub_installed(cub_readme, cub_header, cub_version_str) if not there: raise InstallCubException(reason)
<SYSTEM_TASK:> Create an extension that builds the custom tensorflow ops <END_TASK> <USER_TASK:> Description: def create_tensorflow_extension(nvcc_settings, device_info): """ Create an extension that builds the custom tensorflow ops """
import tensorflow as tf import glob use_cuda = (bool(nvcc_settings['cuda_available']) and tf.test.is_built_with_cuda()) # Source and includes source_path = os.path.join('montblanc', 'impl', 'rime', 'tensorflow', 'rime_ops') sources = glob.glob(os.path.join(source_path, '*.cpp')) # Header dependencies depends = glob.glob(os.path.join(source_path, '*.h')) # Include directories tf_inc = tf.sysconfig.get_include() include_dirs = [os.path.join('montblanc', 'include'), source_path] include_dirs += [tf_inc, os.path.join(tf_inc, "external", "nsync", "public")] # Libraries library_dirs = [tf.sysconfig.get_lib()] libraries = ['tensorflow_framework'] extra_link_args = ['-fPIC', '-fopenmp', '-g0'] # Macros define_macros = [ ('_MWAITXINTRIN_H_INCLUDED', None), ('_FORCE_INLINES', None), ('_GLIBCXX_USE_CXX11_ABI', 0)] # Common flags flags = ['-std=c++11'] gcc_flags = flags + ['-g0', '-fPIC', '-fopenmp', '-O2'] gcc_flags += ['-march=native', '-mtune=native'] nvcc_flags = flags + [] # Add cuda specific build information, if it is available if use_cuda: # CUDA source files sources += glob.glob(os.path.join(source_path, '*.cu')) # CUDA include directories include_dirs += nvcc_settings['include_dirs'] # CUDA header dependencies depends += glob.glob(os.path.join(source_path, '*.cuh')) # CUDA libraries library_dirs += nvcc_settings['library_dirs'] libraries += nvcc_settings['libraries'] # Flags nvcc_flags += ['-x', 'cu'] nvcc_flags += ['--compiler-options', '"-fPIC"'] # --gpu-architecture=sm_xy flags nvcc_flags += cuda_architecture_flags(device_info) # Ideally this would be set in define_macros, but # this must be set differently for gcc and nvcc nvcc_flags += ['-DGOOGLE_CUDA=%d' % int(use_cuda)] return Extension(tensorflow_extension_name, sources=sources, include_dirs=include_dirs, depends=depends, library_dirs=library_dirs, libraries=libraries, define_macros=define_macros, # this syntax is specific to this build system # we're only going to use certain compiler args with nvcc and not with gcc # the implementation of this trick is in customize_compiler_for_nvcc() above extra_compile_args={ 'gcc': gcc_flags, 'nvcc': nvcc_flags }, extra_link_args=extra_link_args, )
<SYSTEM_TASK:> Supply point source lm coordinates to montblanc <END_TASK> <USER_TASK:> Description: def point_lm(self, context): """ Supply point source lm coordinates to montblanc """
# Shape (npsrc, 2) (ls, us), _ = context.array_extents(context.name) return np.asarray(lm_coords[ls:us], dtype=context.dtype)
<SYSTEM_TASK:> Supply point source stokes parameters to montblanc <END_TASK> <USER_TASK:> Description: def point_stokes(self, context): """ Supply point source stokes parameters to montblanc """
# Shape (npsrc, ntime, 4) (ls, us), (lt, ut), (l, u) = context.array_extents(context.name) data = np.empty(context.shape, context.dtype) data[ls:us,:,l:u] = np.asarray(lm_stokes)[ls:us,None,:] return data
<SYSTEM_TASK:> Supply UVW antenna coordinates to montblanc <END_TASK> <USER_TASK:> Description: def uvw(self, context): """ Supply UVW antenna coordinates to montblanc """
# Shape (ntime, na, 3) (lt, ut), (la, ua), (l, u) = context.array_extents(context.name) # Create empty UVW coordinates data = np.empty(context.shape, context.dtype) data[:,:,0] = np.arange(la+1, ua+1) # U = antenna index data[:,:,1] = 0 # V = 0 data[:,:,2] = 0 # W = 0 return data
<SYSTEM_TASK:> Compute the number of baselines for the <END_TASK> <USER_TASK:> Description: def nr_of_baselines(na, auto_correlations=False): """ Compute the number of baselines for the given number of antenna. Can specify whether auto-correlations should be taken into account """
m = (na-1) if auto_correlations is False else (na+1) return (na*m)//2
<SYSTEM_TASK:> Compute the number of antenna for the <END_TASK> <USER_TASK:> Description: def nr_of_antenna(nbl, auto_correlations=False): """ Compute the number of antenna for the given number of baselines. Can specify whether auto-correlations should be taken into account """
t = 1 if auto_correlations is False else -1 return int(t + math.sqrt(1 + 8*nbl)) // 2
<SYSTEM_TASK:> Estimates the memory in bytes required for an array of the supplied shape and dtype <END_TASK> <USER_TASK:> Description: def array_bytes(shape, dtype): """ Estimates the memory in bytes required for an array of the supplied shape and dtype """
return np.product(shape)*np.dtype(dtype).itemsize
<SYSTEM_TASK:> Returns a random array of the same shape and type as the <END_TASK> <USER_TASK:> Description: def random_like(ary=None, shape=None, dtype=None): """ Returns a random array of the same shape and type as the supplied array argument, or the supplied shape and dtype """
if ary is not None: shape, dtype = ary.shape, ary.dtype elif shape is None or dtype is None: raise ValueError(( 'random_like(ary, shape, dtype) must be supplied ' 'with either an array argument, or the shape and dtype ' 'of the desired random array.')) if np.issubdtype(dtype, np.complexfloating): return (np.random.random(size=shape) + \ np.random.random(size=shape)*1j).astype(dtype) else: return np.random.random(size=shape).astype(dtype)
<SYSTEM_TASK:> Return a flatten version of the nested argument <END_TASK> <USER_TASK:> Description: def flatten(nested): """ Return a flatten version of the nested argument """
flat_return = list() def __inner_flat(nested,flat): for i in nested: __inner_flat(i, flat) if isinstance(i, list) else flat.append(i) return flat __inner_flat(nested,flat_return) return flat_return
<SYSTEM_TASK:> Return the number of bytes required by an array <END_TASK> <USER_TASK:> Description: def dict_array_bytes(ary, template): """ Return the number of bytes required by an array Arguments --------------- ary : dict Dictionary representation of an array template : dict A dictionary of key-values, used to replace any string values in the array with concrete integral values Returns ----------- The number of bytes required to represent the array. """
shape = shape_from_str_tuple(ary['shape'], template) dtype = dtype_from_str(ary['dtype'], template) return array_bytes(shape, dtype)
<SYSTEM_TASK:> Return the number of bytes required by <END_TASK> <USER_TASK:> Description: def dict_array_bytes_required(arrays, template): """ Return the number of bytes required by a dictionary of arrays. Arguments --------------- arrays : list A list of dictionaries defining the arrays template : dict A dictionary of key-values, used to replace any string values in the arrays with concrete integral values Returns ----------- The number of bytes required to represent all the arrays. """
return np.sum([dict_array_bytes(ary, template) for ary in arrays])
<SYSTEM_TASK:> Returns the number of timesteps possible, given the registered arrays <END_TASK> <USER_TASK:> Description: def viable_dim_config(bytes_available, arrays, template, dim_ord, nsolvers=1): """ Returns the number of timesteps possible, given the registered arrays and a memory budget defined by bytes_available Arguments ---------------- bytes_available : int The memory budget, or available number of bytes for solving the problem. arrays : list List of dictionaries describing the arrays template : dict Dictionary containing key-values that will be used to replace any string representations of dimensions and types. slvr.template_dict() will return something suitable. dim_ord : list list of dimension string names that the problem should be subdivided by. e.g. ['ntime', 'nbl', 'nchan']. Multple dimensions can be reduced simultaneously using the following syntax 'nbl&na'. This is mostly useful for the baseline-antenna equivalence. nsolvers : int Number of solvers to budget for. Defaults to one. Returns ---------- A tuple (boolean, dict). The boolean is True if the problem can fit within the supplied budget, False otherwise. THe dictionary contains the reduced dimensions as key and the reduced size as value. e.g. (True, { 'time' : 1, 'nbl' : 1 }) For a dim_ord = ['ntime', 'nbl', 'nchan'], this method will try and fit a ntime x nbl x nchan problem into the available number of bytes. If this is not possible, it will first set ntime=1, and then try fit an 1 x nbl x nchan problem into the budget, then a 1 x 1 x nchan problem. One can specify reductions for specific dimensions. For e.g. ['ntime=20', 'nbl=1&na=2', 'nchan=50%'] will reduce ntime to 20, but no lower. nbl=1&na=2 sets both nbl and na to 1 and 2 in the same operation respectively. nchan=50\% will continuously halve the nchan dimension until it reaches a value of 1. """
if not isinstance(dim_ord, list): raise TypeError('dim_ord should be a list') # Don't accept non-negative memory budgets if bytes_available < 0: bytes_available = 0 modified_dims = {} T = template.copy() bytes_used = dict_array_bytes_required(arrays, T)*nsolvers # While more bytes are used than are available, set # dimensions to one in the order specified by the # dim_ord argument. while bytes_used > bytes_available: try: dims = dim_ord.pop(0) montblanc.log.debug('Applying reduction {s}. ' 'Bytes available: {a} used: {u}'.format( s=dims, a=fmt_bytes(bytes_available), u=fmt_bytes(bytes_used))) dims = dims.strip().split('&') except IndexError: # No more dimensions available for reducing # the problem size. Unable to fit the problem # within the specified memory budget return False, modified_dims # Can't fit everything into memory, # Lower dimensions and re-evaluate for dim in dims: match = re.match(__DIM_REDUCTION_RE, dim) if not match: raise ValueError( "{d} is an invalid dimension reduction string " "Valid strings are for e.g. " "'ntime', 'ntime=20' or 'ntime=20%'" .format(d=dim)) dim_name = match.group('name') dim_value = match.group('value') dim_percent = match.group('percent') dim_value = 1 if dim_value is None else int(dim_value) # Attempt reduction by a percentage if dim_percent == '%': dim_value = int(T[dim_name] * int(dim_value) / 100.0) if dim_value < 1: # This can't be reduced any further dim_value = 1 else: # Allows another attempt at reduction # by percentage on this dimension dim_ord.insert(0, dim) # Apply the dimension reduction if T[dim_name] > dim_value: modified_dims[dim_name] = dim_value T[dim_name] = dim_value else: montblanc.log.info(('Ignored reduction of {d} ' 'of size {s} to {v}. ').format( d=dim_name, s=T[dim_name], v=dim_value)) bytes_used = dict_array_bytes_required(arrays, T)*nsolvers return True, modified_dims
<SYSTEM_TASK:> Substitutes string values in the supplied shape parameter <END_TASK> <USER_TASK:> Description: def shape_from_str_tuple(sshape, variables, ignore=None): """ Substitutes string values in the supplied shape parameter with integer variables stored in a dictionary Parameters ---------- sshape : tuple/string composed of integers and strings. The strings should related to integral properties registered with this Solver object variables : dictionary Keys with associated integer values. Used to replace string values within the tuple ignore : list A list of tuple strings to ignore >>> print self.shape_from_str_tuple((4,'na','ntime'),ignore=['ntime']) (4, 3) """
if ignore is None: ignore = [] if not isinstance(sshape, tuple) and not isinstance(sshape, list): raise TypeError, 'sshape argument must be a tuple or list' if not isinstance(ignore, list): raise TypeError, 'ignore argument must be a list' return tuple([int(eval_expr(v,variables)) if isinstance(v,str) else int(v) for v in sshape if v not in ignore])
<SYSTEM_TASK:> Shape a list of lists into the appropriate shape and data type <END_TASK> <USER_TASK:> Description: def shape_list(l,shape,dtype): """ Shape a list of lists into the appropriate shape and data type """
return np.array(l, dtype=dtype).reshape(shape)
<SYSTEM_TASK:> Return a function defining the conversion process between two NumPy <END_TASK> <USER_TASK:> Description: def array_convert_function(sshape_one, sshape_two, variables): """ Return a function defining the conversion process between two NumPy arrays of different shapes """
if not isinstance(sshape_one, tuple): sshape_one = (sshape_one,) if not isinstance(sshape_two, tuple): sshape_two = (sshape_two,) s_one = flatten([eval_expr_names_and_nrs(d) if isinstance(d,str) else d for d in sshape_one]) s_two = flatten([eval_expr_names_and_nrs(d) if isinstance(d,str) else d for d in sshape_two]) if len(s_one) != len(s_two): raise ValueError, ('Flattened shapes %s and %s '\ 'do not have the same length. ' 'Original shapes were %s and %s') % \ (s_one, s_two, sshape_one, sshape_two) # Reason about the transpose t_idx = tuple([s_one.index(v) for v in s_two]) # Figure out the actual numeric shape values to use n_one = shape_from_str_tuple(s_one, variables) n_two = [eval_expr(d,variables) if isinstance(d,str) else d for d in sshape_two] def f(ary): return np.reshape(ary, n_one).transpose(t_idx).reshape(n_two) return f
<SYSTEM_TASK:> Redistribute threads from the Z dimension towards the X dimension. <END_TASK> <USER_TASK:> Description: def redistribute_threads(blockdimx, blockdimy, blockdimz, dimx, dimy, dimz): """ Redistribute threads from the Z dimension towards the X dimension. Also clamp number of threads to the problem dimension size, if necessary """
# Shift threads from the z dimension # into the y dimension while blockdimz > dimz: tmp = blockdimz // 2 if tmp < dimz: break blockdimy *= 2 blockdimz = tmp # Shift threads from the y dimension # into the x dimension while blockdimy > dimy: tmp = blockdimy // 2 if tmp < dimy: break blockdimx *= 2 blockdimy = tmp # Clamp the block dimensions # if necessary if dimx < blockdimx: blockdimx = dimx if dimy < blockdimy: blockdimy = dimy if dimz < blockdimz: blockdimz = dimz return blockdimx, blockdimy, blockdimz
<SYSTEM_TASK:> Register the default dimensions for a RIME solver <END_TASK> <USER_TASK:> Description: def register_default_dimensions(cube, slvr_cfg): """ Register the default dimensions for a RIME solver """
import montblanc.src_types as mbs # Pull out the configuration options for the basics autocor = slvr_cfg['auto_correlations'] ntime = 10 na = 7 nbands = 1 nchan = 16 npol = 4 # Infer number of baselines from number of antenna, nbl = nr_of_baselines(na, autocor) if not npol == 4: raise ValueError("npol set to {}, but only 4 polarisations " "are currently supported.") # Register these dimensions on this solver. cube.register_dimension('ntime', ntime, description="Timesteps") cube.register_dimension('na', na, description="Antenna") cube.register_dimension('nbands', nbands, description="Bands") cube.register_dimension('nchan', nchan, description="Channels") cube.register_dimension('npol', npol, description="Polarisations") cube.register_dimension('nbl', nbl, description="Baselines") # Register dependent dimensions cube.register_dimension('npolchan', nchan*npol, description='Polarised channels') cube.register_dimension('nvis', ntime*nbl*nchan, description='Visibilities') # Convert the source types, and their numbers # to their number variables and numbers # { 'point':10 } => { 'npsrc':10 } src_cfg = default_sources() src_nr_vars = sources_to_nr_vars(src_cfg) # Sum to get the total number of sources cube.register_dimension('nsrc', sum(src_nr_vars.itervalues()), description="Sources (Total)") # Register the individual source types for nr_var, nr_of_src in src_nr_vars.iteritems(): cube.register_dimension(nr_var, nr_of_src, description='{} sources'.format(mbs.SOURCE_DIM_TYPES[nr_var]))
<SYSTEM_TASK:> Hack to get IP address from the interface <END_TASK> <USER_TASK:> Description: def get_ip_address(ifname): """ Hack to get IP address from the interface """
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl( s.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', ifname[:15]) )[20:24])
<SYSTEM_TASK:> Find nvcc and the CUDA installation <END_TASK> <USER_TASK:> Description: def nvcc_compiler_settings(): """ Find nvcc and the CUDA installation """
search_paths = os.environ.get('PATH', '').split(os.pathsep) nvcc_path = find_in_path('nvcc', search_paths) default_cuda_path = os.path.join('usr', 'local', 'cuda') cuda_path = os.environ.get('CUDA_PATH', default_cuda_path) nvcc_found = os.path.exists(nvcc_path) cuda_path_found = os.path.exists(cuda_path) # Can't find either NVCC or some CUDA_PATH if not nvcc_found and not cuda_path_found: raise InspectCudaException("Neither nvcc '{}' " "or the CUDA_PATH '{}' were found!".format( nvcc_path, cuda_path)) # No NVCC, try find it in the CUDA_PATH if not nvcc_found: log.warn("nvcc compiler not found at '{}'. " "Searching within the CUDA_PATH '{}'" .format(nvcc_path, cuda_path)) bin_dir = os.path.join(cuda_path, 'bin') nvcc_path = find_in_path('nvcc', bin_dir) nvcc_found = os.path.exists(nvcc_path) if not nvcc_found: raise InspectCudaException("nvcc not found in '{}' " "or under the CUDA_PATH at '{}' " .format(search_paths, cuda_path)) # No CUDA_PATH found, infer it from NVCC if not cuda_path_found: cuda_path = os.path.normpath( os.path.join(os.path.dirname(nvcc_path), "..")) log.warn("CUDA_PATH not found, inferring it as '{}' " "from the nvcc location '{}'".format( cuda_path, nvcc_path)) cuda_path_found = True # Set up the compiler settings include_dirs = [] library_dirs = [] define_macros = [] if cuda_path_found: include_dirs.append(os.path.join(cuda_path, 'include')) if sys.platform == 'win32': library_dirs.append(os.path.join(cuda_path, 'bin')) library_dirs.append(os.path.join(cuda_path, 'lib', 'x64')) else: library_dirs.append(os.path.join(cuda_path, 'lib64')) library_dirs.append(os.path.join(cuda_path, 'lib')) if sys.platform == 'darwin': library_dirs.append(os.path.join(default_cuda_path, 'lib')) return { 'cuda_available' : True, 'nvcc_path' : nvcc_path, 'include_dirs': include_dirs, 'library_dirs': library_dirs, 'define_macros': define_macros, 'libraries' : ['cudart', 'cuda'], 'language': 'c++', }
<SYSTEM_TASK:> Returns a dictionary suitable for templating strings with <END_TASK> <USER_TASK:> Description: def template_dict(self): """ Returns a dictionary suitable for templating strings with properties and dimensions related to this Solver object. Used in templated GPU kernels. """
slvr = self D = { # Constants 'LIGHTSPEED': montblanc.constants.C, } # Map any types D.update(self.type_dict()) # Update with dimensions D.update(self.dim_local_size_dict()) # Add any registered properties to the dictionary for p in self._properties.itervalues(): D[p.name] = getattr(self, p.name) return D
<SYSTEM_TASK:> Factory function that produces a RIME solver <END_TASK> <USER_TASK:> Description: def rime_solver(slvr_cfg): """ Factory function that produces a RIME solver """
from montblanc.impl.rime.tensorflow.RimeSolver import RimeSolver return RimeSolver(slvr_cfg)
<SYSTEM_TASK:> Computes parallactic angles per timestep for the given <END_TASK> <USER_TASK:> Description: def parallactic_angles(times, antenna_positions, field_centre): """ Computes parallactic angles per timestep for the given reference antenna position and field centre. Arguments: times: ndarray Array of unique times with shape (ntime,), obtained from TIME column of MS table antenna_positions: ndarray of shape (na, 3) Antenna positions, obtained from POSITION column of MS ANTENNA sub-table field_centre : ndarray of shape (2,) Field centre, should be obtained from MS PHASE_DIR Returns: An array of parallactic angles per time-step """
import pyrap.quanta as pq try: # Create direction measure for the zenith zenith = pm.direction('AZEL','0deg','90deg') except AttributeError as e: if pm is None: raise ImportError("python-casacore import failed") raise # Create position measures for each antenna reference_positions = [pm.position('itrf', *(pq.quantity(x,'m') for x in pos)) for pos in antenna_positions] # Compute field centre in radians fc_rad = pm.direction('J2000', *(pq.quantity(f,'rad') for f in field_centre)) return np.asarray([ # Set current time as the reference frame pm.do_frame(pm.epoch("UTC", pq.quantity(t, "s"))) and [ # Set antenna position as the reference frame pm.do_frame(rp) and pm.posangle(fc_rad, zenith).get_value("rad") for rp in reference_positions ] for t in times])
<SYSTEM_TASK:> Caches constant arrays associated with an array name. <END_TASK> <USER_TASK:> Description: def constant_cache(method): """ Caches constant arrays associated with an array name. The intent of this decorator is to avoid the cost of recreating and storing many arrays of constant data, especially data created by np.zeros or np.ones. Instead, a single array of the first given shape is created and any further requests for constant data of the same (or smaller) shape are served from the cache. Requests for larger shapes or different types are regarded as a cache miss and will result in replacement of the existing cache value. """
@functools.wraps(method) def wrapper(self, context): # Defer to method if no caching is enabled if not self._is_cached: return method(self, context) name = context.name cached = self._constant_cache.get(name, None) # No cached value, call method and return if cached is None: data = self._constant_cache[name] = method(self, context) return data # Can we just slice the existing cache entry? # 1. Are all context.shape's entries less than or equal # to the shape of the cached data? # 2. Do they have the same dtype? cached_ok = (cached.dtype == context.dtype and all(l <= r for l,r in zip(context.shape, cached.shape))) # Need to return something bigger or a different type if not cached_ok: data = self._constant_cache[name] = method(self, context) return data # Otherwise slice the cached data return cached[tuple(slice(0, s) for s in context.shape)] f = wrapper f.__decorator__ = constant_cache.__name__ return f
<SYSTEM_TASK:> Caches chunks of default data. <END_TASK> <USER_TASK:> Description: def chunk_cache(method): """ Caches chunks of default data. This decorator caches generated default data so as to avoid recomputing it on a subsequent queries to the provider. """
@functools.wraps(method) def wrapper(self, context): # Defer to the method if no caching is enabled if not self._is_cached: return method(self, context) # Construct the key for the given index name = context.name idx = context.array_extents(name) key = tuple(i for t in idx for i in t) # Access the sub-cache for this array array_cache = self._chunk_cache[name] # Cache miss, call the function if key not in array_cache: array_cache[key] = method(self, context) return array_cache[key] f = wrapper f.__decorator__ = chunk_cache.__name__ return f
<SYSTEM_TASK:> Create a DefaultsSourceProvider object. This provides default <END_TASK> <USER_TASK:> Description: def _create_defaults_source_provider(cube, data_source): """ Create a DefaultsSourceProvider object. This provides default data sources for each array defined on the hypercube. The data sources may either by obtained from the arrays 'default' data source or the 'test' data source. """
from montblanc.impl.rime.tensorflow.sources import ( find_sources, DEFAULT_ARGSPEC) from montblanc.impl.rime.tensorflow.sources import constant_cache # Obtain default data sources for each array, # Just take from defaults if test data isn't specified staging_area_data_source = ('default' if not data_source == 'test' else data_source) cache = True default_prov = DefaultsSourceProvider(cache=cache) # Create data sources on the source provider from # the cube array data sources for n, a in cube.arrays().iteritems(): # Unnecessary for temporary arrays if 'temporary' in a.tags: continue # Obtain the data source data_source = a.get(staging_area_data_source) # Array marked as constant, decorate the data source # with a constant caching decorator if cache is True and 'constant' in a.tags: data_source = constant_cache(data_source) method = types.MethodType(data_source, default_prov) setattr(default_prov, n, method) def _sources(self): """ Override the sources method to also handle lambdas that look like lambda s, c: ..., as defined in the config module """ try: return self._sources except AttributeError: self._sources = find_sources(self, [DEFAULT_ARGSPEC] + [['s', 'c']]) return self._sources # Monkey patch the sources method default_prov.sources = types.MethodType(_sources, default_prov) return default_prov
<SYSTEM_TASK:> Supply data to the data sink <END_TASK> <USER_TASK:> Description: def _supply_data(data_sink, context): """ Supply data to the data sink """
try: data_sink.sink(context) except Exception as e: ex = ValueError("An exception occurred while " "supplying data to data sink '{ds}'\n\n" "{e}\n\n" "{help}".format(ds=context.name, e=str(e), help=context.help())) raise ex, None, sys.exc_info()[2]
<SYSTEM_TASK:> Sets up the hypercube given a solver configuration <END_TASK> <USER_TASK:> Description: def _setup_hypercube(cube, slvr_cfg): """ Sets up the hypercube given a solver configuration """
mbu.register_default_dimensions(cube, slvr_cfg) # Configure the dimensions of the beam cube cube.register_dimension('beam_lw', 2, description='E Beam cube l width') cube.register_dimension('beam_mh', 2, description='E Beam cube m height') cube.register_dimension('beam_nud', 2, description='E Beam cube nu depth') # ========================================= # Register hypercube Arrays and Properties # ========================================= from montblanc.impl.rime.tensorflow.config import (A, P) def _massage_dtypes(A, T): def _massage_dtype_in_dict(D): new_dict = D.copy() new_dict['dtype'] = mbu.dtype_from_str(D['dtype'], T) return new_dict return [_massage_dtype_in_dict(D) for D in A] dtype = slvr_cfg['dtype'] is_f32 = dtype == 'float' T = { 'ft' : np.float32 if is_f32 else np.float64, 'ct' : np.complex64 if is_f32 else np.complex128, 'int' : int, } cube.register_properties(_massage_dtypes(P, T)) cube.register_arrays(_massage_dtypes(A, T))
<SYSTEM_TASK:> Partition data sources into <END_TASK> <USER_TASK:> Description: def _partition(iter_dims, data_sources): """ Partition data sources into 1. Dictionary of data sources associated with radio sources. 2. List of data sources to feed multiple times. 3. List of data sources to feed once. """
src_nr_vars = set(source_var_types().values()) iter_dims = set(iter_dims) src_data_sources = collections.defaultdict(list) feed_many = [] feed_once = [] for ds in data_sources: # Is this data source associated with # a radio source (point, gaussian, etc.?) src_int = src_nr_vars.intersection(ds.shape) if len(src_int) > 1: raise ValueError("Data source '{}' contains multiple " "source types '{}'".format(ds.name, src_int)) elif len(src_int) == 1: # Yep, record appropriately and iterate src_data_sources[src_int.pop()].append(ds) continue # Are we feeding this data source multiple times # (Does it possess dimensions on which we iterate?) if len(iter_dims.intersection(ds.shape)) > 0: feed_many.append(ds) continue # Assume this is a data source that we only feed once feed_once.append(ds) return src_data_sources, feed_many, feed_once
<SYSTEM_TASK:> Implementation of staging_area feeding <END_TASK> <USER_TASK:> Description: def _feed_impl(self, cube, data_sources, data_sinks, global_iter_args): """ Implementation of staging_area feeding """
session = self._tf_session FD = self._tf_feed_data LSA = FD.local # Get source strides out before the local sizes are modified during # the source loops below src_types = LSA.sources.keys() src_strides = [int(i) for i in cube.dim_extent_size(*src_types)] src_staging_areas = [[LSA.sources[t][s] for t in src_types] for s in range(self._nr_of_shards)] compute_feed_dict = { ph: cube.dim_global_size(n) for n, ph in FD.src_ph_vars.iteritems() } compute_feed_dict.update({ ph: getattr(cube, n) for n, ph in FD.property_ph_vars.iteritems() }) chunks_fed = 0 which_shard = itertools.cycle([self._shard(d,s) for s in range(self._shards_per_device) for d, dev in enumerate(self._devices)]) while True: try: # Get the descriptor describing a portion of the RIME result = session.run(LSA.descriptor.get_op) descriptor = result['descriptor'] except tf.errors.OutOfRangeError as e: montblanc.log.exception("Descriptor reading exception") # Quit if EOF if descriptor[0] == -1: break # Make it read-only so we can hash the contents descriptor.flags.writeable = False # Find indices of the emptiest staging_areas and, by implication # the shard with the least work assigned to it emptiest_staging_areas = np.argsort(self._inputs_waiting.get()) shard = emptiest_staging_areas[0] shard = which_shard.next() feed_f = self._feed_executors[shard].submit(self._feed_actual, data_sources.copy(), cube.copy(), descriptor, shard, src_types, src_strides, src_staging_areas[shard], global_iter_args) compute_f = self._compute_executors[shard].submit(self._compute, compute_feed_dict, shard) consume_f = self._consumer_executor.submit(self._consume, data_sinks.copy(), cube.copy(), global_iter_args) self._inputs_waiting.increment(shard) yield (feed_f, compute_f, consume_f) chunks_fed += 1 montblanc.log.info("Done feeding {n} chunks.".format(n=chunks_fed))
<SYSTEM_TASK:> Call the tensorflow compute <END_TASK> <USER_TASK:> Description: def _compute(self, feed_dict, shard): """ Call the tensorflow compute """
try: descriptor, enq = self._tfrun(self._tf_expr[shard], feed_dict=feed_dict) self._inputs_waiting.decrement(shard) except Exception as e: montblanc.log.exception("Compute Exception") raise
<SYSTEM_TASK:> Produces a SolverConfiguration object, inherited from <END_TASK> <USER_TASK:> Description: def rime_solver_cfg(**kwargs): """ Produces a SolverConfiguration object, inherited from a simple python dict, and containing the options required to configure the RIME Solver. Keyword arguments ----------------- Any keyword arguments are inserted into the returned dict. Returns ------- A SolverConfiguration object. """
from configuration import (load_config, config_validator, raise_validator_errors) def _merge_copy(d1, d2): return { k: _merge_copy(d1[k], d2[k]) if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], dict) else d2[k] for k in d2 } try: cfg_file = kwargs.pop('cfg_file') except KeyError as e: slvr_cfg = kwargs else: cfg = load_config(cfg_file) slvr_cfg = _merge_copy(cfg, kwargs) # Validate the configuration, raising any errors validator = config_validator() validator.validate(slvr_cfg) raise_validator_errors(validator) return validator.document
<SYSTEM_TASK:> Initialise the object by generating appropriate filenames, <END_TASK> <USER_TASK:> Description: def _initialise(self, feed_type="linear"): """ Initialise the object by generating appropriate filenames, opening associated file handles and inspecting the FITS axes of these files. """
self._filenames = filenames = _create_filenames(self._filename_schema, feed_type) self._files = files = _open_fits_files(filenames) self._axes = axes = _create_axes(filenames, files) self._dim_indices = dim_indices = l_ax, m_ax, f_ax = tuple( axes.iaxis(d) for d in self._fits_dims) # Complain if we can't find required axes for i, ax in zip(dim_indices, self._fits_dims): if i == -1: raise ValueError("'%s' axis not found!" % ax) self._cube_extents = _cube_extents(axes, l_ax, m_ax, f_ax, self._l_sign, self._m_sign) self._shape = tuple(axes.naxis[d] for d in dim_indices) + (4,) self._beam_freq_map = axes.grid[f_ax] # Now describe our dimension sizes self._dim_updates = [(n, axes.naxis[i]) for n, i in zip(self._beam_dims, dim_indices)] self._initialised = True
<SYSTEM_TASK:> Decorator for caching data source return values <END_TASK> <USER_TASK:> Description: def _cache(method): """ Decorator for caching data source return values Create a key index for the proxied array in the context. Iterate over the array shape descriptor e.g. (ntime, nbl, 3) returning tuples containing the lower and upper extents of string dimensions. Takes (0, d) in the case of an integer dimensions. """
@functools.wraps(method) def memoizer(self, context): # Construct the key for the given index idx = context.array_extents(context.name) key = tuple(i for t in idx for i in t) with self._lock: # Access the sub-cache for this data source array_cache = self._cache[context.name] # Cache miss, call the data source if key not in array_cache: array_cache[key] = method(context) return array_cache[key] return memoizer
<SYSTEM_TASK:> Decorator returning a method that proxies a data source. <END_TASK> <USER_TASK:> Description: def _proxy(method): """ Decorator returning a method that proxies a data source. """
@functools.wraps(method) def memoizer(self, context): return method(context) return memoizer
<SYSTEM_TASK:> Perform any logic on solution start <END_TASK> <USER_TASK:> Description: def start(self, start_context): """ Perform any logic on solution start """
for p in self._providers: p.start(start_context) if self._clear_start: self.clear_cache()
<SYSTEM_TASK:> Perform any logic on solution stop <END_TASK> <USER_TASK:> Description: def stop(self, stop_context): """ Perform any logic on solution stop """
for p in self._providers: p.stop(stop_context) if self._clear_stop: self.clear_cache()
<SYSTEM_TASK:> Load the tensorflow library <END_TASK> <USER_TASK:> Description: def load_tf_lib(): """ Load the tensorflow library """
from os.path import join as pjoin import pkg_resources import tensorflow as tf path = pjoin('ext', 'rime.so') rime_lib_path = pkg_resources.resource_filename("montblanc", path) return tf.load_op_library(rime_lib_path)
<SYSTEM_TASK:> Raise any errors associated with the validator. <END_TASK> <USER_TASK:> Description: def raise_validator_errors(validator): """ Raise any errors associated with the validator. Parameters ---------- validator : :class:`cerberus.Validator` Validator Raises ------ ValueError Raised if errors existed on `validator`. Message describing each error and information associated with the configuration option causing the error. """
if len(validator._errors) == 0: return def _path_str(path, name=None): """ String of the document/schema path. `cfg["foo"]["bar"]` """ L = [name] if name is not None else [] L.extend('["%s"]' % p for p in path) return "".join(L) def _path_leaf(path, dicts): """ Dictionary Leaf of the schema/document given the path """ for p in path: dicts = dicts[p] return dicts wrap = partial(textwrap.wrap, initial_indent=' '*4, subsequent_indent=' '*8) msg = ["There were configuration errors:"] for e in validator._errors: schema_leaf = _path_leaf(e.document_path, validator.schema) doc_str = _path_str(e.document_path, "cfg") msg.append("Invalid configuration option %s == '%s'." % (doc_str, e.value)) try: otype = schema_leaf["type"] msg.extend(wrap("Type must be '%s'." % otype)) except KeyError: pass try: allowed = schema_leaf["allowed"] msg.extend(wrap("Allowed values are '%s'." % allowed)) except KeyError: pass try: description = schema_leaf["__description__"] msg.extend(wrap("Description: %s" % description)) except KeyError: pass raise ValueError("\n".join(msg))
<SYSTEM_TASK:> Take a multiline text and indent it as a block <END_TASK> <USER_TASK:> Description: def indented(text, level, indent=2): """Take a multiline text and indent it as a block"""
return "\n".join("%s%s" % (level * indent * " ", s) for s in text.splitlines())
<SYSTEM_TASK:> Put curly brackets round an indented text <END_TASK> <USER_TASK:> Description: def dumped(text, level, indent=2): """Put curly brackets round an indented text"""
return indented("{\n%s\n}" % indented(text, level + 1, indent) or "None", level, indent) + "\n"
<SYSTEM_TASK:> Perform a shell-based file copy. Copying in <END_TASK> <USER_TASK:> Description: def copy_file( source_path, target_path, allow_undo=True, no_confirm=False, rename_on_collision=True, silent=False, extra_flags=0, hWnd=None ): """Perform a shell-based file copy. Copying in this way allows the possibility of undo, auto-renaming, and showing the "flying file" animation during the copy. The default options allow for undo, don't automatically clobber on a name clash, automatically rename on collision and display the animation. """
return _file_operation( shellcon.FO_COPY, source_path, target_path, allow_undo, no_confirm, rename_on_collision, silent, extra_flags, hWnd )
<SYSTEM_TASK:> Perform a shell-based file move. Moving in <END_TASK> <USER_TASK:> Description: def move_file( source_path, target_path, allow_undo=True, no_confirm=False, rename_on_collision=True, silent=False, extra_flags=0, hWnd=None ): """Perform a shell-based file move. Moving in this way allows the possibility of undo, auto-renaming, and showing the "flying file" animation during the copy. The default options allow for undo, don't automatically clobber on a name clash, automatically rename on collision and display the animation. """
return _file_operation( shellcon.FO_MOVE, source_path, target_path, allow_undo, no_confirm, rename_on_collision, silent, extra_flags, hWnd )
<SYSTEM_TASK:> Perform a shell-based file rename. Renaming in <END_TASK> <USER_TASK:> Description: def rename_file( source_path, target_path, allow_undo=True, no_confirm=False, rename_on_collision=True, silent=False, extra_flags=0, hWnd=None ): """Perform a shell-based file rename. Renaming in this way allows the possibility of undo, auto-renaming, and showing the "flying file" animation during the copy. The default options allow for undo, don't automatically clobber on a name clash, automatically rename on collision and display the animation. """
return _file_operation( shellcon.FO_RENAME, source_path, target_path, allow_undo, no_confirm, rename_on_collision, silent, extra_flags, hWnd )
<SYSTEM_TASK:> Perform a shell-based file delete. Deleting in <END_TASK> <USER_TASK:> Description: def delete_file( source_path, allow_undo=True, no_confirm=False, silent=False, extra_flags=0, hWnd=None ): """Perform a shell-based file delete. Deleting in this way uses the system recycle bin, allows the possibility of undo, and showing the "flying file" animation during the delete. The default options allow for undo, don't automatically clobber on a name clash and display the animation. """
return _file_operation( shellcon.FO_DELETE, source_path, None, allow_undo, no_confirm, False, silent, extra_flags, hWnd )
<SYSTEM_TASK:> Given a list of arrays to feed in fed_arrays, return <END_TASK> <USER_TASK:> Description: def _get_queue_types(fed_arrays, data_sources): """ Given a list of arrays to feed in fed_arrays, return a list of associated queue types, obtained from tuples in the data_sources dictionary """
try: return [data_sources[n].dtype for n in fed_arrays] except KeyError as e: raise ValueError("Array '{k}' has no data source!" .format(k=e.message)), None, sys.exc_info()[2]
<SYSTEM_TASK:> Parses a string, containing assign statements <END_TASK> <USER_TASK:> Description: def parse_python_assigns(assign_str): """ Parses a string, containing assign statements into a dictionary. .. code-block:: python h5 = katdal.open('123456789.h5') kwargs = parse_python_assigns("spw=3; scans=[1,2];" "targets='bpcal,radec';" "channels=slice(0,2048)") h5.select(**kwargs) Parameters ---------- assign_str: str Assignment string. Should only contain assignment statements assigning python literals or builtin function calls, to variable names. Multiple assignment statements should be separated by semi-colons. Returns ------- dict Dictionary { name: value } containing assignment results. """
if not assign_str: return {} def _eval_value(stmt_value): # If the statement value is a call to a builtin, try evaluate it if isinstance(stmt_value, ast.Call): func_name = stmt_value.func.id if func_name not in _BUILTIN_WHITELIST: raise ValueError("Function '%s' in '%s' is not builtin. " "Available builtins: '%s'" % (func_name, assign_str, list(_BUILTIN_WHITELIST))) # Recursively pass arguments through this same function if stmt_value.args is not None: args = tuple(_eval_value(a) for a in stmt_value.args) else: args = () # Recursively pass keyword arguments through this same function if stmt_value.keywords is not None: kwargs = {kw.arg : _eval_value(kw.value) for kw in stmt_value.keywords} else: kwargs = {} return getattr(__builtin__, func_name)(*args, **kwargs) # Try a literal eval else: return ast.literal_eval(stmt_value) # Variable dictionary variables = {} # Parse the assignment string stmts = ast.parse(assign_str, mode='single').body for i, stmt in enumerate(stmts): if not isinstance(stmt, ast.Assign): raise ValueError("Statement %d in '%s' is not a " "variable assignment." % (i, assign_str)) # Evaluate assignment lhs values = _eval_value(stmt.value) # "a = b = c" => targets 'a' and 'b' with 'c' as result for target in stmt.targets: # a = 2 if isinstance(target, ast.Name): variables[target.id] = values # Tuple/List unpacking case # (a, b) = 2 elif isinstance(target, (ast.Tuple, ast.List)): # Require all tuple/list elements to be variable names, # although anything else is probably a syntax error if not all(isinstance(e, ast.Name) for e in target.elts): raise ValueError("Tuple unpacking in assignment %d " "in expression '%s' failed as not all " "tuple contents are variable names.") # Promote for zip and length checking if not isinstance(values, (tuple, list)): elements = (values,) else: elements = values if not len(target.elts) == len(elements): raise ValueError("Unpacking '%s' into a tuple/list in " "assignment %d of expression '%s' failed. " "The number of tuple elements did not match " "the number of values." % (values, i, assign_str)) # Unpack for variable, value in zip(target.elts, elements): variables[variable.id] = value else: raise TypeError("'%s' types are not supported" "as assignment targets." % type(target)) return variables
<SYSTEM_TASK:> numba implementation of antenna_uvw <END_TASK> <USER_TASK:> Description: def _antenna_uvw(uvw, antenna1, antenna2, chunks, nr_of_antenna): """ numba implementation of antenna_uvw """
if antenna1.ndim != 1: raise ValueError("antenna1 shape should be (row,)") if antenna2.ndim != 1: raise ValueError("antenna2 shape should be (row,)") if uvw.ndim != 2 or uvw.shape[1] != 3: raise ValueError("uvw shape should be (row, 3)") if not (uvw.shape[0] == antenna1.shape[0] == antenna2.shape[0]): raise ValueError("First dimension of uvw, antenna1 " "and antenna2 do not match") if chunks.ndim != 1: raise ValueError("chunks shape should be (utime,)") if nr_of_antenna < 1: raise ValueError("nr_of_antenna < 1") ant_uvw_shape = (chunks.shape[0], nr_of_antenna, 3) antenna_uvw = np.full(ant_uvw_shape, np.nan, dtype=uvw.dtype) start = 0 for ci, chunk in enumerate(chunks): end = start + chunk # one pass should be enough! _antenna_uvw_loop(uvw, antenna1, antenna2, antenna_uvw, ci, start, end) start = end return antenna_uvw
<SYSTEM_TASK:> Raises an informative error for missing antenna <END_TASK> <USER_TASK:> Description: def _raise_missing_antenna_errors(ant_uvw, max_err): """ Raises an informative error for missing antenna """
# Find antenna uvw coordinates where any UVW component was nan # nan + real == nan problems = np.nonzero(np.add.reduce(np.isnan(ant_uvw), axis=2)) problem_str = [] for c, a in zip(*problems): problem_str.append("[chunk %d antenna %d]" % (c, a)) # Exit early if len(problem_str) >= max_err: break # Return early if nothing was wrong if len(problem_str) == 0: return # Add a preamble and raise exception problem_str = ["Antenna were missing"] + problem_str raise AntennaMissingError('\n'.join(problem_str))
<SYSTEM_TASK:> Computes per-antenna UVW coordinates from baseline ``uvw``, <END_TASK> <USER_TASK:> Description: def antenna_uvw(uvw, antenna1, antenna2, chunks, nr_of_antenna, check_missing=False, check_decomposition=False, max_err=100): """ Computes per-antenna UVW coordinates from baseline ``uvw``, ``antenna1`` and ``antenna2`` coordinates logically grouped into baseline chunks. The example below illustrates two baseline chunks of size 6 and 5, respectively. .. code-block:: python uvw = ... ant1 = np.array([0, 0, 0, 1, 1, 2, 0, 0, 0, 1, 1], dtype=np.int32) ant2 = np.array([1, 2, 3, 2, 3, 3, 1, 2, 3, 1, 2], dtype=np.int32) chunks = np.array([6, 5], dtype=np.int32) ant_uv = antenna_uvw(uvw, ant1, ant2, chunks, nr_of_antenna=4) The first antenna of the first baseline of a chunk is chosen as the origin of the antenna coordinate system, while the second antenna is set to the negative of the baseline UVW coordinate. Subsequent antenna UVW coordinates are iteratively derived from the first two coordinates. Thus, the baseline indices need not be properly ordered (within the chunk). If it is not possible to derive coordinates for an antenna, it's coordinate will be set to nan. Parameters ---------- uvw : np.ndarray Baseline UVW coordinates of shape (row, 3) antenna1 : np.ndarray Baseline first antenna of shape (row,) antenna2 : np.ndarray Baseline second antenna of shape (row,) chunks : np.ndarray Number of baselines per unique timestep with shape (chunks,) :code:`np.sum(chunks) == row` should hold. nr_of_antenna : int Total number of antenna in the solution. check_missing (optional) : bool If ``True`` raises an exception if it was not possible to compute UVW coordinates for all antenna (i.e. some were nan). Defaults to ``False``. check_decomposition (optional) : bool If ``True``, checks that the antenna decomposition accurately reproduces the coordinates in ``uvw``, or that :code:`ant_uvw[c,ant1,:] - ant_uvw[c,ant2,:] == uvw[s:e,:]` where ``s`` and ``e`` are the start and end rows of chunk ``c`` respectively. Defaults to ``False``. max_err (optional) : integer Maximum numbers of errors when checking for missing antenna or innacurate decompositions. Defaults to ``100``. Returns ------- np.ndarray Antenna UVW coordinates of shape (chunks, nr_of_antenna, 3) """
ant_uvw = _antenna_uvw(uvw, antenna1, antenna2, chunks, nr_of_antenna) if check_missing: _raise_missing_antenna_errors(ant_uvw, max_err=max_err) if check_decomposition: _raise_decomposition_errors(uvw, antenna1, antenna2, chunks, ant_uvw, max_err=max_err) return ant_uvw
<SYSTEM_TASK:> Returns a dictionary mapping source types <END_TASK> <USER_TASK:> Description: def default_sources(**kwargs): """ Returns a dictionary mapping source types to number of sources. If the number of sources for the source type is supplied in the kwargs these will be placed in the dictionary. e.g. if we have 'point', 'gaussian' and 'sersic' source types, then default_sources(point=10, gaussian=20) will return an OrderedDict {'point': 10, 'gaussian': 20, 'sersic': 0} """
S = OrderedDict() total = 0 invalid_types = [t for t in kwargs.keys() if t not in SOURCE_VAR_TYPES] for t in invalid_types: montblanc.log.warning('Source type %s is not yet ' 'implemented in montblanc. ' 'Valid source types are %s' % (t, SOURCE_VAR_TYPES.keys())) # Zero all source types for k, v in SOURCE_VAR_TYPES.iteritems(): # Try get the number of sources for this source # from the kwargs value = kwargs.get(k, 0) try: value = int(value) except ValueError: raise TypeError(('Supplied value %s ' 'for source %s cannot be ' 'converted to an integer') % \ (value, k)) total += value S[k] = value # Add a point source if no others exist if total == 0: S[POINT_TYPE] = 1 return S
<SYSTEM_TASK:> Converts a source type to number of sources mapping into <END_TASK> <USER_TASK:> Description: def sources_to_nr_vars(sources): """ Converts a source type to number of sources mapping into a source numbering variable to number of sources mapping. If, for example, we have 'point', 'gaussian' and 'sersic' source types, then passing the following dict as an argument sources_to_nr_vars({'point':10, 'gaussian': 20}) will return an OrderedDict {'npsrc': 10, 'ngsrc': 20, 'nssrc': 0 } """
sources = default_sources(**sources) try: return OrderedDict((SOURCE_VAR_TYPES[name], nr) for name, nr in sources.iteritems()) except KeyError as e: raise KeyError(( 'No source type ''%s'' is ' 'registered. Valid source types ' 'are %s') % (e, SOURCE_VAR_TYPES.keys()))
<SYSTEM_TASK:> Given a range of source numbers, as well as a dictionary <END_TASK> <USER_TASK:> Description: def source_range_slices(start, end, nr_var_dict): """ Given a range of source numbers, as well as a dictionary containing the numbers of each source, returns a dictionary containing slices for each source variable type. """
return OrderedDict((k, slice(s,e,1)) for k, (s, e) in source_range_tuple(start, end, nr_var_dict).iteritems())
<SYSTEM_TASK:> Return a lm coordinate array to montblanc <END_TASK> <USER_TASK:> Description: def point_lm(self, context): """ Return a lm coordinate array to montblanc """
lm = np.empty(context.shape, context.dtype) # Print the array schema montblanc.log.info(context.array_schema.shape) # Print the space of iteration montblanc.log.info(context.iter_args) (ls, us) = context.dim_extents('npsrc') lm[:,0] = 0.0008 lm[:,1] = 0.0036 lm[:,:] = 0 return lm
<SYSTEM_TASK:> Return a stokes parameter array to montblanc <END_TASK> <USER_TASK:> Description: def point_stokes(self, context): """ Return a stokes parameter array to montblanc """
stokes = np.empty(context.shape, context.dtype) stokes[:,:,0] = 1 stokes[:,:,1:4] = 0 return stokes
<SYSTEM_TASK:> Return a reference frequency array to montblanc <END_TASK> <USER_TASK:> Description: def ref_frequency(self, context): """ Return a reference frequency array to montblanc """
ref_freq = np.empty(context.shape, context.dtype) ref_freq[:] = 1.415e9 return ref_freq
<SYSTEM_TASK:> Update this authorization. <END_TASK> <USER_TASK:> Description: def update(self, scopes=[], add_scopes=[], rm_scopes=[], note='', note_url=''): """Update this authorization. :param list scopes: (optional), replaces the authorization scopes with these :param list add_scopes: (optional), scopes to be added :param list rm_scopes: (optional), scopes to be removed :param str note: (optional), new note about authorization :param str note_url: (optional), new note URL about this authorization :returns: bool """
success = False json = None if scopes: d = {'scopes': scopes} json = self._json(self._post(self._api, data=d), 200) if add_scopes: d = {'add_scopes': add_scopes} json = self._json(self._post(self._api, data=d), 200) if rm_scopes: d = {'remove_scopes': rm_scopes} json = self._json(self._post(self._api, data=d), 200) if note or note_url: d = {'note': note, 'note_url': note_url} json = self._json(self._post(self._api, data=d), 200) if json: self._update_(json) success = True return success
<SYSTEM_TASK:> Iterate over the labels for every issue associated with this <END_TASK> <USER_TASK:> Description: def iter_labels(self, number=-1, etag=None): """Iterate over the labels for every issue associated with this milestone. .. versionchanged:: 0.9 Add etag parameter. :param int number: (optional), number of labels to return. Default: -1 returns all available labels. :param str etag: (optional), ETag header from a previous response :returns: generator of :class:`Label <github3.issues.label.Label>`\ s """
url = self._build_url('labels', base_url=self._api) return self._iter(int(number), url, Label, etag=etag)
<SYSTEM_TASK:> Build a record of called functions using the trace mechanism <END_TASK> <USER_TASK:> Description: def _trace(self, frame, event, arg): """ Build a record of called functions using the trace mechanism """
# Return if this is not a function call if event != 'call': return # Filter calling and called functions by module names src_mod = current_module_name(frame.f_back) dst_mod = current_module_name(frame) # Avoid tracing the tracer (specifically, call from # ContextCallTracer.__exit__ to CallTracer.stop) if src_mod == __modulename__ or dst_mod == __modulename__: return # Apply source and destination module filters if not self.srcmodflt.match(src_mod): return if not self.dstmodflt.match(dst_mod): return # Get calling and called functions src_func = current_function(frame.f_back) dst_func = current_function(frame) # Filter calling and called functions by qnames if not self.srcqnmflt.match(function_qname(src_func)): return if not self.dstqnmflt.match(function_qname(dst_func)): return # Get calling and called function full names src_name = function_fqname(src_func) dst_name = function_fqname(dst_func) # Modify full function names if necessary if self.fnmsub is not None: src_name = re.sub(self.fnmsub[0], self.fnmsub[1], src_name) dst_name = re.sub(self.fnmsub[0], self.fnmsub[1], dst_name) # Update calling function count if src_func is not None: if src_name in self.fncts: self.fncts[src_name][0] += 1 else: self.fncts[src_name] = [1, 0] # Update called function count if dst_func is not None and src_func is not None: if dst_name in self.fncts: self.fncts[dst_name][1] += 1 else: self.fncts[dst_name] = [0, 1] # Update caller/calling pair count if dst_func is not None and src_func is not None: key = (src_name, dst_name) if key in self.calls: self.calls[key] += 1 else: self.calls[key] = 1
<SYSTEM_TASK:> Default colour generating function <END_TASK> <USER_TASK:> Description: def _clrgen(n, h0, hr): """Default colour generating function Parameters ---------- n : int Number of colours to generate h0 : float Initial H value in HSV colour specification hr : float Size of H value range to use for colour generation (final H value is h0 + hr) Returns ------- clst : list of strings List of HSV format colour specification strings """
n0 = n if n == 1 else n-1 clst = ['%f,%f,%f' % (h0 + hr*hi/n0, 0.35, 0.85) for hi in range(n)] return clst
<SYSTEM_TASK:> Create a review comment on this pull request. <END_TASK> <USER_TASK:> Description: def create_review_comment(self, body, commit_id, path, position): """Create a review comment on this pull request. All parameters are required by the GitHub API. :param str body: The comment text itself :param str commit_id: The SHA of the commit to comment on :param str path: The relative path of the file to comment on :param int position: The line index in the diff to comment on. :returns: The created review comment. :rtype: :class:`~github3.pulls.ReviewComment` """
url = self._build_url('comments', base_url=self._api) data = {'body': body, 'commit_id': commit_id, 'path': path, 'position': int(position)} json = self._json(self._post(url, data=data), 201) return ReviewComment(json, self) if json else None
<SYSTEM_TASK:> Checks to see if the pull request was merged. <END_TASK> <USER_TASK:> Description: def is_merged(self): """Checks to see if the pull request was merged. :returns: bool """
url = self._build_url('merge', base_url=self._api) return self._boolean(self._get(url), 204, 404)
<SYSTEM_TASK:> Iterate over the comments on this pull request. <END_TASK> <USER_TASK:> Description: def iter_comments(self, number=-1, etag=None): """Iterate over the comments on this pull request. :param int number: (optional), number of comments to return. Default: -1 returns all available comments. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`ReviewComment <ReviewComment>`\ s """
url = self._build_url('comments', base_url=self._api) return self._iter(int(number), url, ReviewComment, etag=etag)
<SYSTEM_TASK:> Iterate over the files associated with this pull request. <END_TASK> <USER_TASK:> Description: def iter_files(self, number=-1, etag=None): """Iterate over the files associated with this pull request. :param int number: (optional), number of files to return. Default: -1 returns all available files. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`PullFile <PullFile>`\ s """
url = self._build_url('files', base_url=self._api) return self._iter(int(number), url, PullFile, etag=etag)
<SYSTEM_TASK:> Iterate over the issue comments on this pull request. <END_TASK> <USER_TASK:> Description: def iter_issue_comments(self, number=-1, etag=None): """Iterate over the issue comments on this pull request. :param int number: (optional), number of comments to return. Default: -1 returns all available comments. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`IssueComment <IssueComment>`\ s """
url = self._build_url(base_url=self.links['comments']) return self._iter(int(number), url, IssueComment, etag=etag)
<SYSTEM_TASK:> Merge this pull request. <END_TASK> <USER_TASK:> Description: def merge(self, commit_message='', sha=None): """Merge this pull request. :param str commit_message: (optional), message to be used for the merge commit :returns: bool """
parameters = {'commit_message': commit_message} if sha: parameters['sha'] = sha url = self._build_url('merge', base_url=self._api) json = self._json(self._put(url, data=dumps(parameters)), 200) self.merge_commit_sha = json['sha'] return json['merged']
<SYSTEM_TASK:> Update this pull request. <END_TASK> <USER_TASK:> Description: def update(self, title=None, body=None, state=None): """Update this pull request. :param str title: (optional), title of the pull :param str body: (optional), body of the pull request :param str state: (optional), ('open', 'closed') :returns: bool """
data = {'title': title, 'body': body, 'state': state} json = None self._remove_none(data) if data: json = self._json(self._patch(self._api, data=dumps(data)), 200) if json: self._update_(json) return True return False
<SYSTEM_TASK:> Reply to this review comment with a new review comment. <END_TASK> <USER_TASK:> Description: def reply(self, body): """Reply to this review comment with a new review comment. :param str body: The text of the comment. :returns: The created review comment. :rtype: :class:`~github3.pulls.ReviewComment` """
url = self._build_url('comments', base_url=self.pull_request_url) index = self._api.rfind('/') + 1 in_reply_to = self._api[index:] json = self._json(self._post(url, data={ 'body': body, 'in_reply_to': in_reply_to }), 201) return ReviewComment(json, self) if json else None
<SYSTEM_TASK:> Add ``login`` to this team. <END_TASK> <USER_TASK:> Description: def add_member(self, login): """Add ``login`` to this team. :returns: bool """
warnings.warn( 'This is no longer supported by the GitHub API, see ' 'https://developer.github.com/changes/2014-09-23-one-more-week' '-before-the-add-team-member-api-breaking-change/', DeprecationWarning) url = self._build_url('members', login, base_url=self._api) return self._boolean(self._put(url), 204, 404)
<SYSTEM_TASK:> Add ``repo`` to this team. <END_TASK> <USER_TASK:> Description: def add_repo(self, repo): """Add ``repo`` to this team. :param str repo: (required), form: 'user/repo' :returns: bool """
url = self._build_url('repos', repo, base_url=self._api) return self._boolean(self._put(url), 204, 404)
<SYSTEM_TASK:> Edit this team. <END_TASK> <USER_TASK:> Description: def edit(self, name, permission=''): """Edit this team. :param str name: (required) :param str permission: (optional), ('pull', 'push', 'admin') :returns: bool """
if name: data = {'name': name, 'permission': permission} json = self._json(self._patch(self._api, data=dumps(data)), 200) if json: self._update_(json) return True return False
<SYSTEM_TASK:> Checks if this team has access to ``repo`` <END_TASK> <USER_TASK:> Description: def has_repo(self, repo): """Checks if this team has access to ``repo`` :param str repo: (required), form: 'user/repo' :returns: bool """
url = self._build_url('repos', repo, base_url=self._api) return self._boolean(self._get(url), 204, 404)
<SYSTEM_TASK:> Invite the user to join this team. <END_TASK> <USER_TASK:> Description: def invite(self, username): """Invite the user to join this team. This returns a dictionary like so:: {'state': 'pending', 'url': 'https://api.github.com/teams/...'} :param str username: (required), user to invite to join this team. :returns: dictionary """
url = self._build_url('memberships', username, base_url=self._api) return self._json(self._put(url), 200)
<SYSTEM_TASK:> Retrieve the membership information for the user. <END_TASK> <USER_TASK:> Description: def membership_for(self, username): """Retrieve the membership information for the user. :param str username: (required), name of the user :returns: dictionary """
url = self._build_url('memberships', username, base_url=self._api) json = self._json(self._get(url), 200) return json or {}
<SYSTEM_TASK:> Remove ``login`` from this team. <END_TASK> <USER_TASK:> Description: def remove_member(self, login): """Remove ``login`` from this team. :param str login: (required), login of the member to remove :returns: bool """
warnings.warn( 'This is no longer supported by the GitHub API, see ' 'https://developer.github.com/changes/2014-09-23-one-more-week' '-before-the-add-team-member-api-breaking-change/', DeprecationWarning) url = self._build_url('members', login, base_url=self._api) return self._boolean(self._delete(url), 204, 404)
<SYSTEM_TASK:> Revoke this user's team membership. <END_TASK> <USER_TASK:> Description: def revoke_membership(self, username): """Revoke this user's team membership. :param str username: (required), name of the team member :returns: bool """
url = self._build_url('memberships', username, base_url=self._api) return self._boolean(self._delete(url), 204, 404)
<SYSTEM_TASK:> Remove ``repo`` from this team. <END_TASK> <USER_TASK:> Description: def remove_repo(self, repo): """Remove ``repo`` from this team. :param str repo: (required), form: 'user/repo' :returns: bool """
url = self._build_url('repos', repo, base_url=self._api) return self._boolean(self._delete(url), 204, 404)
<SYSTEM_TASK:> Add ``login`` to ``team`` and thereby to this organization. <END_TASK> <USER_TASK:> Description: def add_member(self, login, team): """Add ``login`` to ``team`` and thereby to this organization. .. warning:: This method is no longer valid. To add a member to a team, you must now retrieve the team directly, and use the ``invite`` method. Any user that is to be added to an organization, must be added to a team as per the GitHub api. .. note:: This method is of complexity O(n). This iterates over all teams in your organization and only adds the user when the team name matches the team parameter above. If you want constant time, you should retrieve the team and call ``add_member`` on that team directly. :param str login: (required), login name of the user to be added :param str team: (required), team name :returns: bool """
warnings.warn( 'This is no longer supported by the GitHub API, see ' 'https://developer.github.com/changes/2014-09-23-one-more-week' '-before-the-add-team-member-api-breaking-change/', DeprecationWarning) for t in self.iter_teams(): if team == t.name: return t.add_member(login) return False