in_source_id
stringlengths
13
58
issue
stringlengths
3
241k
before_files
listlengths
0
3
after_files
listlengths
0
3
pr_diff
stringlengths
109
107M
pytorch__torchdynamo-1205
debug saving pre- and post-fusion IR doesn't work Running repro in #1178 with TORCHINDUCTOR_TRACE=1 results in ``` File "/scratch/ngimel/work/repos/torchdynamo/torchinductor/debug.py", line 304, in ir_post_fusion self._write_ir("ir_post_fusion.txt", nodes) File "/scratch/ngimel/work/repos/torchdynamo/torchinductor/debug.py", line 309, in _write_ir fd.write(node.debug_str()) File "/scratch/ngimel/work/repos/torchdynamo/torchinductor/scheduler.py", line 83, in debug_str f"{name}.met_dependencies = {pformat(self.read_writes.reads - self.unmet_dependencies)}", File "/scratch/ngimel/work/repos/torchdynamo/torchinductor/scheduler.py", line 38, in pformat result = pprint.pformat(obj, indent=4) File "/scratch/ngimel/work/env/lib/python3.9/pprint.py", line 58, in pformat return PrettyPrinter(indent=indent, width=width, depth=depth, File "/scratch/ngimel/work/env/lib/python3.9/pprint.py", line 153, in pformat self._format(object, sio, 0, 0, {}, 0) File "/scratch/ngimel/work/env/lib/python3.9/pprint.py", line 176, in _format p(self, object, stream, indent, allowance, context, level + 1) File "/scratch/ngimel/work/env/lib/python3.9/pprint.py", line 248, in _pprint_set object = sorted(object, key=_safe_key) File "/scratch/ngimel/work/env/lib/python3.9/site-packages/sympy/core/relational.py", line 511, in __bool__ raise TypeError("cannot determine truth value of Relational") TypeError: cannot determine truth value of Relational ``` (similar for pre_fusion)
[ { "content": "import collections\nimport dataclasses\nimport functools\nimport itertools\nimport logging\nimport os\nimport pprint\nimport textwrap\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Set\nfrom typing import Union\n\nimport numpy as np\nimport sympy\nimport torch\n\nfrom torchdynamo.utils import dynamo_timed\n\nfrom . import config\nfrom . import dependencies\nfrom . import ir\nfrom .codegen.triton_template import should_use_template\nfrom .codegen.triton_template import template_can_fuse\nfrom .codegen.triton_template import template_codegen\nfrom .dependencies import MemoryDep\nfrom .dependencies import StarDep\nfrom .sizevars import SimplifyIndexing\nfrom .utils import cache_on_self\nfrom .utils import cmp\nfrom .virtualized import V\n\nlog = logging.getLogger(__name__)\n\n\ndef pformat(obj):\n result = pprint.pformat(obj, indent=4)\n if \"\\n\" in result:\n return f\"\\n{textwrap.indent(result, ' '*4)}\"\n return result\n\n\nclass OutputNode:\n def __init__(self, dep):\n self.unmet_dependencies = {dep}\n self.inverse_users = []\n\n def is_reduction(self):\n return False\n\n def get_alias_names(self):\n return ()\n\n def get_name(self):\n return \"OUTPUT\"\n\n __repr__ = get_name\n\n\nclass BaseSchedulerNode:\n def __init__(self, scheduler: \"Scheduler\", node: ir.Buffer):\n self.scheduler: \"Scheduler\" = scheduler\n self.node: ir.Buffer = node\n self.users: Optional[List[NodeUser]] = None\n self.inverse_users: List[BaseSchedulerNode] = []\n self.set_read_writes(node.get_read_writes())\n self.recursive_predecessors: Optional[Set[str]] = None\n self.min_order: Optional[int] = None\n self.max_order: Optional[int] = None\n self.last_usage: Set[str] = None # buffers that won't be used after this kernel\n\n def __repr__(self):\n return f\"{type(self).__name__}(name={self.get_name()!r})\"\n\n def debug_str(self):\n \"\"\"Longer form printout for trace logs\"\"\"\n name = self.get_name()\n lines = [\n f\"{name}: {type(self).__name__}({type(self.node).__name__})\",\n f\"{name}.writes = {pformat(self.read_writes.writes)}\",\n f\"{name}.unmet_dependencies = {pformat(self.unmet_dependencies)}\",\n f\"{name}.met_dependencies = {pformat(self.read_writes.reads - self.unmet_dependencies)}\",\n ]\n try:\n lines += [\n self.debug_str_extra(),\n ]\n except Exception:\n log.warning(\"Ignoring error in debug_str()\", exc_info=True)\n return \"\\n\".join(lines).rstrip()\n\n def debug_str_extra(self):\n return \"\"\n\n def log_details(self):\n log.info(\n \"%s: unmet_dependencies = %s, writes = %s\",\n self,\n self.unmet_dependencies,\n self.read_writes.writes,\n )\n\n def update_mutated_names(self, renames: Dict[str, str]):\n self.set_read_writes(self.read_writes.rename(renames))\n\n def add_mutation_dep(self, name):\n self.set_read_writes(self.read_writes.with_read(name))\n\n def set_users(self, users: List[\"NodeUser\"]):\n # deduplicate\n result: Dict[int, NodeUser] = {}\n for use in users:\n if id(use.node) in result:\n result[id(use.node)] = NodeUser(\n use.node, result[id(use.node)].can_inplace and use.can_inplace\n )\n else:\n result[id(use.node)] = use\n self.users = list(result.values())\n\n def get_aliases(self):\n return self.node.get_alias_names()\n\n def get_mutations(self):\n return self.node.get_mutation_names()\n\n def set_read_writes(self, rw: dependencies.ReadWrites):\n self.read_writes: dependencies.ReadWrites = rw\n self.unmet_dependencies = self.read_writes.reads\n self.prune_deps()\n\n def used_buffer_names(self) -> Set[str]:\n return {\n dep.name\n for dep in itertools.chain(self.read_writes.reads, self.read_writes.writes)\n }\n\n def prune_deps(self):\n self.unmet_dependencies = {\n dep\n for dep in self.unmet_dependencies\n if dep.name not in self.scheduler.available_buffer_names\n }\n\n def get_name(self) -> str:\n return self.node.get_name()\n\n def get_first_name(self) -> str:\n return self.get_name()\n\n def get_names(self) -> Set[str]:\n return set([self.get_name()])\n\n def get_nodes(self) -> List[\"BaseSchedulerNode\"]:\n return [self]\n\n def get_device(self):\n return self.node.get_device()\n\n def is_reduction(self):\n return False\n\n def is_template(self):\n return False\n\n def is_extern(self):\n return False\n\n def can_inplace(self, read_dep: dependencies.MemoryDep):\n return False\n\n def allocate(self):\n if self.node.should_allocate() or should_use_template(self.node):\n # if self.node should allocate or\n # if self.node is generated by TritonKernelTemplates\n # because Triton kernel could not allocate tensor itself\n V.graph.wrapper_code.codegen_allocation(self.node)\n\n def can_free(self):\n for use in self.users:\n if isinstance(use.node, OutputNode):\n return False\n return True\n\n\nclass ExternKernelSchedulerNode(BaseSchedulerNode):\n def debug_str_extra(self):\n return f\"{self.get_name()}.node.kernel = {getattr(self.node, 'kernel', None)}\"\n\n def is_extern(self):\n return True\n\n\nclass TemplateSchedulerNode(BaseSchedulerNode):\n def __init__(self, scheduler: \"Scheduler\", node: ir.ExternKernel, group_fn):\n super().__init__(scheduler, node)\n (self._sizes, self._stride) = node.get_group_stride()\n self.group = (node.get_device(), group_fn(self._sizes))\n self.set_read_writes(node.get_read_writes())\n self.update_dep_type()\n\n def is_template(self):\n return True\n\n def update_dep_type(self):\n assert len(self.read_writes.writes) == 1\n write = self.read_writes.writes.pop()\n if isinstance(write, StarDep):\n name = write.name\n canonicalized_index, canonicalized_size = self.node.canonicalize()\n new_dep = MemoryDep(name, canonicalized_index, canonicalized_size)\n self.read_writes.writes.add(new_dep)\n else:\n self.read_writes.writes.add(write)\n\n def get_ranges(self):\n return self._sizes\n\n\nclass NopKernelSchedulerNode(BaseSchedulerNode):\n pass\n\n\nclass SchedulerNode(BaseSchedulerNode):\n def __init__(self, scheduler: \"Scheduler\", node: ir.ComputedBuffer, group_fn):\n super().__init__(scheduler, node)\n (\n self._sizes,\n self._body,\n ) = node.simplify_and_reorder()\n\n self.group = (node.get_device(), group_fn(self._sizes))\n\n self.set_read_writes(\n dependencies.extract_read_writes(self._body, *self._sizes, normalize=True)\n )\n if self.is_reduction():\n # reduction has last (reduced) dim in its sizes, and some\n # downstream dependencies get confused by it\n self.read_writes.writes = self.read_writes.writes | {\n w.strip_last_size() for w in self.read_writes.writes\n }\n # reduction not on the last dim swaps the sizes, and downstream\n # dependencies expect unswapped\n # TODO swapping sizes doesn't work, leads to\n # File \"/scratch/ngimel/work/repos/torchdynamo/torchinductor/sizevars.py\", line 130, in guard_equals\n # if len(right.free_symbols) < len(left.free_symbols):\n # AttributeError: 'int' object has no attribute 'free_symbols'\n # even though memory dep looks correct\n # self.read_writes.writes = self.read_writes.writes | {\n # w.maybe_swap_sizes() for w in self.read_writes.writes\n # }\n\n def debug_str_extra(self):\n name = self.get_name()\n lines = [\n f\"{name}.group.device = {self.group[0]}\",\n f\"{name}.group.iteration = {self.group[1]}\",\n f\"{name}.sizes = {self._sizes}\",\n ]\n if self.get_aliases():\n lines.append(f\"{name}.aliases = {pformat(self.get_aliases())}\")\n if self.get_mutations():\n lines.append(f\"{name}.mutations = {pformat(self.get_mutations())}\")\n if isinstance(self._body, ir.LoopBody):\n lines.append(f\"class {name}_loop_body:\")\n lines.append(textwrap.indent(self._body.debug_str(), \" \"))\n return \"\\n\".join(lines)\n\n def get_ranges(self):\n return self._sizes\n\n def is_reduction(self):\n return bool(self.node.data.get_reduction_type())\n\n def allocate(self):\n if (\n not self.node.should_allocate()\n or self.node.get_alias_names()\n or self.node.get_mutation_names()\n ):\n return super().allocate()\n\n if config.inplace_buffers:\n assert False, \"https://github.com/pytorch/torchdynamo/issues/823\"\n \"\"\"\n for read in self.read_writes.reads:\n input_node: BaseSchedulerNode = self.scheduler.name_to_node.get(\n read.name\n )\n if input_node and V.graph.wrapper_code.can_reuse(input_node):\n remaining_uses = [\n x\n for x in input_node.users\n if x.node.get_name()\n not in self.scheduler.available_buffer_names\n ]\n if (\n len(remaining_uses) == 1\n and remaining_uses[0].can_inplace\n and remaining_uses[0].node is self\n ):\n V.graph.wrapper_code.codegen_inplace_reuse(\n input_node.node, self.node\n )\n V.kernel.args.make_inplace(\n input_node.get_name(), self.get_name()\n )\n return\n \"\"\"\n super().allocate()\n\n def run(self, *index_vars):\n self.mark_run()\n self.codegen(index_vars)\n\n def mark_run(self):\n self.allocate()\n\n def codegen(self, index_vars):\n sizes = self._sizes\n assert sum(map(len, sizes)) == sum(map(len, index_vars))\n var_ranges = dict(\n zip(\n itertools.chain.from_iterable(index_vars),\n itertools.chain.from_iterable(sizes),\n )\n )\n try:\n with V.set_ops_handler(\n SimplifyIndexing(V.get_ops_handler(), var_ranges)\n ), V.kernel.set_current_node(self):\n self._body(*index_vars)\n except Exception:\n log.fatal(\"Error in codegen for %s\", self.node)\n raise\n\n def pointwise_read_writes(self):\n \"\"\"\n Get the memory dependencies in the non-reduction axis.\n \"\"\"\n sizes, reduction_sizes = self._sizes\n\n def fn(index):\n return self._body(index, [sympy.Integer(0) for _ in reduction_sizes])\n\n return dependencies.extract_read_writes(fn, sizes)\n\n def can_inplace(self, read_dep: dependencies.MemoryDep):\n if self.get_aliases():\n return False\n if len(self.read_writes.writes) == 1 and hasattr(read_dep, \"index\"):\n write_dep = next(iter(self.read_writes.writes))\n return read_dep.index == write_dep.index and read_dep.size == write_dep.size\n return False\n\n\nclass FusedSchedulerNode(BaseSchedulerNode):\n \"\"\"\n This is a \"fake\" scheduler node that represents a group of scheduler nodes\n that are meant to be fused together. The way it does this is by maintaining\n its unmet dependencies as the union of its constituent nodes.\n \"\"\"\n\n @classmethod\n def fuse(cls, node1: BaseSchedulerNode, node2: BaseSchedulerNode):\n assert node1.scheduler is node2.scheduler\n return cls(node1.scheduler, node1.get_nodes() + node2.get_nodes())\n\n def __init__(self, scheduler: \"Scheduler\", snodes: List[SchedulerNode]):\n # NB: No need to call super().__init__() because we don't need to re-use any of its logic.\n self.snodes = snodes\n self.scheduler = scheduler\n self.node = None # type: ignore[assignment]\n self.users = None\n self.inverse_users = []\n self.group = max(snodes, key=lambda x: int(x.is_reduction())).group\n self.recursive_predecessors = functools.reduce(\n set.union, [x.recursive_predecessors for x in snodes]\n )\n self.set_read_writes(\n functools.reduce(\n dependencies.ReadWrites.merge, [x.read_writes for x in snodes]\n )\n )\n names = set(self.get_names())\n self.unmet_dependencies = {\n dep\n for dep in functools.reduce(\n set.union, [x.unmet_dependencies for x in snodes]\n )\n if dep.name not in names\n } - self.read_writes.writes\n self.min_order = min([x.min_order for x in self.snodes])\n self.max_order = max([x.max_order for x in self.snodes])\n\n @cache_on_self\n def get_name(self) -> str:\n return \"_\".join([x.get_name() for x in self.snodes])\n\n def get_first_name(self) -> str:\n return self.snodes[0].get_name()\n\n @cache_on_self\n def get_names(self) -> Set[str]:\n return functools.reduce(set.union, [x.get_names() for x in self.snodes])\n\n def debug_str_extra(self):\n return (\n f\"{self.get_name()}.snodes = {pformat([x.get_name() for x in self.snodes])}\"\n )\n\n @cache_on_self\n def used_buffer_names(self) -> Set[str]:\n return functools.reduce(set.union, [x.used_buffer_names() for x in self.snodes])\n\n def get_nodes(self) -> List[BaseSchedulerNode]:\n return self.snodes\n\n def __repr__(self):\n return f\"{type(self).__name__}(nodes={self.get_name()})\"\n\n @cache_on_self\n def is_reduction(self):\n return any(x.is_reduction() for x in self.snodes)\n\n @cache_on_self\n def is_template(self):\n return any(x.is_template() for x in self.snodes)\n\n def get_device(self):\n return self.group[0]\n\n # None of these need to be implemented, as a FusedSchedulerNode is just an\n # abstraction for scheduling purposes\n def update_mutated_names(self, renames: Dict[str, str]):\n raise NotImplementedError\n\n def add_mutation_dep(self, name):\n raise NotImplementedError\n\n def set_users(self, users: List[\"NodeUser\"]):\n raise NotImplementedError\n\n def get_aliases(self):\n raise NotImplementedError\n\n def get_mutations(self):\n raise NotImplementedError\n\n def can_inplace(self, read_dep: dependencies.MemoryDep):\n raise NotImplementedError\n\n def allocate(self):\n raise NotImplementedError\n\n def can_free(self):\n raise NotImplementedError\n\n\ndef pick_loop_order(stride_lengths, sizes, priority_idx=[]):\n \"\"\"\n A heuristic to decide loop iteration orders. This has not been well\n tuned and may be something we should autotune.\n \"\"\"\n\n @functools.cmp_to_key\n def index_cmp(a, b):\n if sizes[a] == 1 or sizes[b] == 1:\n # 1-sizes don't matter, just move them to the end\n return cmp(sizes[a] == 1, sizes[b] == 1)\n\n a_first = np.logical_or(\n stride_lengths[:, b] == 0, stride_lengths[:, a] < stride_lengths[:, b]\n ).all()\n b_first = np.logical_or(\n stride_lengths[:, a] == 0, stride_lengths[:, a] > stride_lengths[:, b]\n ).all()\n\n if a_first and not b_first:\n return -1\n if b_first and not a_first:\n return 1\n\n # otherwise contiguous\n return cmp(b, a)\n\n order = list(reversed(range(stride_lengths.shape[1])))\n if len(priority_idx) > 0:\n # if we have priority node, only use that node's order\n stride_lengths = stride_lengths[priority_idx]\n if config.pick_loop_orders:\n order.sort(key=index_cmp)\n return order\n\n\[email protected]\nclass NodeUser:\n node: BaseSchedulerNode\n can_inplace: bool = False\n\n def get_name(self):\n return self.node.get_name()\n\n\nclass Scheduler:\n @dynamo_timed\n def __init__(self, nodes):\n super(Scheduler, self).__init__()\n self.backends = {}\n\n self.nodes = []\n self.available_buffer_names = {\n *V.graph.graph_inputs.keys(),\n *V.graph.constants.keys(),\n }\n for node in nodes:\n assert (\n node.origins is not None\n ), \"All nodes passed to scheduling must have an origin\"\n if node.is_no_op():\n self.nodes.append(NopKernelSchedulerNode(self, node))\n elif isinstance(node, ir.ComputedBuffer):\n group_fn = self.get_backend(node.get_device()).group_fn\n self.nodes.append(SchedulerNode(self, node, group_fn))\n elif isinstance(node, ir.ExternKernel) and should_use_template(node):\n group_fn = self.get_backend(node.get_device()).group_fn\n self.nodes.append(TemplateSchedulerNode(self, node, group_fn))\n elif isinstance(node, ir.ExternKernel):\n self.nodes.append(ExternKernelSchedulerNode(self, node))\n else:\n assert False, node\n # some new constants could have been created above\n self.available_buffer_names.update(V.graph.constants.keys())\n for node in self.nodes:\n node.prune_deps()\n\n self.name_to_node = {node.get_name(): node for node in self.nodes}\n self.name_to_fused_node = None # set in fuse_nods()\n\n # we handle mutation by renaming modified versions of the same\n # buffer in the dependency graph to prevent cycles.\n # mutation_renames: tracks the current name for a given buffer\n # (changed once per mutation)\n self.mutation_real_name = {}\n # mutation_real_name: maps back to the original name for codegen\n self.mutation_renames = {}\n\n self.compute_dependencies()\n self.topological_sort_schedule()\n self.compute_predecessors()\n self.dead_node_elimination()\n\n V.debug.ir_pre_fusion(self.nodes)\n self.num_orig_nodes = len(self.nodes)\n self.name_to_fused_node = {n.get_name(): n for n in self.nodes}\n self.fuse_nodes()\n self.compute_last_usage()\n V.debug.ir_post_fusion(self.nodes)\n V.debug.graph_diagram(self.nodes)\n self.debug_draw_graph()\n\n # used during codegen:\n self.current_device = None\n self.buffer_names_to_free = set()\n self.buffer_names_no_longer_needed = set()\n\n def debug_draw_graph(self):\n \"\"\"Generate an image of the graph for debugging\"\"\"\n if os.environ.get(\"INDUCTOR_WRITE_SCHEDULER_GRAPH\", None) == \"1\":\n from .debug import draw_buffers\n\n draw_buffers(self.nodes, print_graph=True)\n\n def debug_print_nodes(self, label):\n if log.isEnabledFor(logging.INFO):\n log.info(\"%s:\", label)\n for node in self.nodes:\n node.log_details()\n\n def compute_dependencies(self):\n \"\"\"\n Create dependency edges between nodes, handling aliasing and\n mutation properly.\n \"\"\"\n name_to_users = collections.defaultdict(list)\n\n # handle aliasing by using python aliasing in name_to_users\n # if foo aliases bar then we will make name_to_users[\"foo\"] point\n # to the same python list as name_to_users[\"bar\"]\n for node1 in self.nodes:\n node1_name = node1.get_name()\n for node2_name in node1.get_aliases():\n if node1_name in name_to_users and node2_name in name_to_users:\n # merge the two\n list1 = name_to_users[node1_name]\n list2 = name_to_users[node2_name]\n combined = list1 + list2\n for key in name_to_users.keys():\n if name_to_users[key] is list1 or name_to_users[key] is list2:\n name_to_users[key] = combined\n elif node1_name in name_to_users:\n name_to_users[node2_name] = name_to_users[node1_name]\n else:\n name_to_users[node1_name] = name_to_users[node2_name]\n\n def rename(n):\n if n in self.mutation_renames:\n return rename(self.mutation_renames[n])\n return n\n\n def dep_closure(node_name):\n reachable_names = {node_name}\n node = self.name_to_node[node_name]\n write_dep = list(node.read_writes.writes)[0]\n for read_dep in node.read_writes.reads:\n if (\n read_dep.name in self.name_to_node\n and read_dep.index == write_dep.index\n and read_dep.size == write_dep.size\n ):\n reachable_names.update(dep_closure(read_dep.name))\n return reachable_names\n\n def add_user(used_by_name, user_node, can_inplace=False):\n name_to_users[rename(used_by_name)].append(NodeUser(user_node, can_inplace))\n\n for node in self.nodes:\n # a node will mutate either 0 or 1 buffers\n for alt_name in node.get_mutations():\n alt_name = rename(alt_name)\n # this node must run after the prior writer\n add_user(alt_name, node)\n node.add_mutation_dep(alt_name)\n for other_node in name_to_users[alt_name]:\n # this node must run after all prior readers\n other_name = rename(other_node.get_name())\n known_dep_node_names = dep_closure(node.get_name())\n if other_name not in known_dep_node_names:\n # If this node alreay directly or indirectly depends on other_node,\n # we don't need to insert an extra StarDep.\n node.add_mutation_dep(other_name)\n add_user(other_name, node)\n\n # add normal non-mutation dependencies\n for read in node.read_writes.reads:\n add_user(read.name, node, node.can_inplace(read))\n\n node.update_mutated_names(self.mutation_renames)\n\n # update our renaming scheme for the next iteration\n for alt_name in node.get_mutations():\n self.mutation_renames[rename(alt_name)] = node.get_name()\n self.mutation_renames[alt_name] = node.get_name()\n self.mutation_real_name[node.get_name()] = self.mutation_real_name.get(\n alt_name, alt_name\n )\n\n # make sure outputs aren't dead-code-eliminated\n for node_name in V.graph.get_output_names():\n add_user(node_name, OutputNode(StarDep(node_name)))\n\n # make sure input mutation isn't dead-code-eliminated\n for name in self.mutation_renames:\n if name in V.graph.graph_inputs:\n add_user(name, OutputNode(StarDep(name)))\n V.graph.mutated_inputs.add(name)\n\n # copy users information onto the nodes\n for node in self.nodes:\n node.set_users(name_to_users[node.get_name()])\n\n # populate inverse_users\n for node in self.nodes:\n for user in node.users:\n user.node.inverse_users.append(node)\n\n def dead_node_elimination(self):\n \"\"\"\n Remove any nodes without users\n \"\"\"\n updated_nodes = []\n for node in self.nodes:\n if node.users:\n updated_nodes.append(node)\n else:\n # dead code\n log.debug(\"removed dead node: %s\", node.get_name())\n V.graph.removed_buffers.add(node.get_name())\n self.nodes = updated_nodes\n\n def topological_sort_schedule(self):\n \"\"\"\n Ensure self.nodes is in topologically sorted order\n \"\"\"\n seen = set()\n name_to_node = dict()\n result = []\n\n def visit(n):\n if n not in seen:\n seen.add(n)\n for dep in sorted(n.unmet_dependencies, key=lambda d: d.name):\n visit(name_to_node[dep.name])\n result.append(n)\n\n for node in self.nodes:\n for name in node.get_names():\n name_to_node[name] = node\n for node in self.nodes:\n visit(node)\n self.nodes = result\n\n def compute_predecessors(self):\n \"\"\"\n Populate each node.recursive_predecessors\n \"\"\"\n # note self.nodes is topologically sorted\n name_to_predecessors = {}\n for node in self.nodes:\n recursive_predecessors = set()\n for dep in node.unmet_dependencies:\n recursive_predecessors.add(dep.name)\n recursive_predecessors |= name_to_predecessors[dep.name]\n name_to_predecessors[node.get_name()] = recursive_predecessors\n node.recursive_predecessors = recursive_predecessors\n\n for order, node in enumerate(self.nodes):\n node.min_order = order\n node.max_order = order\n\n def fuse_nodes(self):\n \"\"\"\n Mutates self.nodes to combine nodes into FusedSchedulerNodes.\n \"\"\"\n for _ in range(10):\n old_len = len(self.nodes)\n self.fuse_nodes_once()\n if len(self.nodes) == old_len:\n break\n\n def fuse_nodes_once(self):\n \"\"\"\n Mutates self.nodes to combine nodes into FusedSchedulerNodes.\n\n This relies on two key functions to control the logic:\n - self.can_fuses(): checks if a fusion is legal\n - self.score_fusion(): assigns priority to a given fusion\n \"\"\"\n fused_nodes = set(self.nodes)\n for node1, node2 in self.get_possible_fusions():\n node1 = self.name_to_fused_node[node1.get_first_name()]\n node2 = self.name_to_fused_node[node2.get_first_name()]\n if self.can_fuse(node1, node2) and not self.will_fusion_create_cycle(\n node1, node2\n ):\n node3 = FusedSchedulerNode.fuse(node1, node2)\n fused_nodes.remove(node1)\n fused_nodes.remove(node2)\n fused_nodes.add(node3)\n self.name_to_fused_node.update(\n {n.get_name(): node3 for n in node3.get_nodes()}\n )\n self.nodes = sorted(fused_nodes, key=lambda x: x.min_order)\n self.topological_sort_schedule()\n\n def get_possible_fusions(self):\n \"\"\"\n Helper to find all legal fusion opportunities, sorted by self.score_fusion()\n \"\"\"\n possible_fusions = []\n seen = set()\n\n def check_all_pairs(nodes):\n for node1_index, node1 in enumerate(nodes):\n for node2 in nodes[node1_index + 1 :]:\n key = (node1, node2)\n if key in seen:\n continue\n seen.add(key)\n\n if self.can_fuse(node1, node2):\n possible_fusions.append(key)\n elif node2.is_template() and self.can_fuse(node2, node1):\n # epilogue fusions are order dependent\n possible_fusions.append((node2, node1))\n\n buffer_names_grouping = collections.defaultdict(list)\n for node in self.nodes:\n for buf in node.used_buffer_names():\n buffer_names_grouping[buf].append(node)\n for node_grouping in buffer_names_grouping.values():\n check_all_pairs(node_grouping)\n\n if config.aggressive_fusion:\n group_grouping = collections.defaultdict(list)\n for node in self.nodes:\n group = getattr(node, \"group\", None)\n if group:\n group_grouping[group].append(node)\n for node_grouping in group_grouping.values():\n check_all_pairs(node_grouping)\n\n return sorted(possible_fusions, key=self.score_fusion_key, reverse=True)\n\n def will_fusion_create_cycle(self, node1, node2):\n \"\"\"Finds whether there's a path from src to dst caused indirectly by fusion\"\"\"\n\n def check(node):\n if isinstance(node, FusedSchedulerNode) and node not in visited:\n visited.add(node)\n return bool(combined_names & node.recursive_predecessors) or any(\n check(self.name_to_fused_node[n])\n for n in node.recursive_predecessors - combined_predecessors\n )\n return False\n\n visited = set()\n combined_names = node1.get_names() | node2.get_names()\n combined_predecessors = (\n node1.recursive_predecessors | node2.recursive_predecessors\n ) - combined_names\n return any(check(self.name_to_fused_node[n]) for n in combined_predecessors)\n\n def can_fuse(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):\n \"\"\"\n Determine if it is possible to combine node1 and node2 into a\n single fused node.\n \"\"\"\n if node1 is node2:\n return False\n if (\n isinstance(node1, (ExternKernelSchedulerNode, NopKernelSchedulerNode))\n and not node1.is_template()\n ):\n return False\n if (\n isinstance(node2, (ExternKernelSchedulerNode, NopKernelSchedulerNode))\n and not node2.is_template()\n ):\n return False\n if node2.get_names() & node1.recursive_predecessors:\n return False # node2 must go before node1\n if node2.is_template():\n return False # only epilogues\n\n device = node1.get_device()\n if device != node2.get_device():\n return False # wrong device\n\n no_shared_data = self.score_fusion_memory(node1, node2) == 0\n if no_shared_data and (\n not config.aggressive_fusion or node1.is_reduction() or node2.is_reduction()\n ):\n return False # heuristic not needed for correctness\n\n if len(node1.get_nodes()) + len(node2.get_nodes()) > config.max_fusion_size:\n return False # heuristic not needed for correctness\n\n if node1.get_names() & node2.recursive_predecessors:\n # node2 depends on node1 outputs\n if not self.can_fuse_vertical(node1, node2):\n return False\n if node1.is_template():\n return template_can_fuse(node1, node2)\n return self.get_backend(device).can_fuse_vertical(node1, node2)\n else: # nodes don't depend on each other, but may have common reads\n if node1.is_template():\n return False\n return self.get_backend(device).can_fuse_horizontal(node1, node2)\n\n def can_fuse_vertical(self, node1, node2):\n \"\"\"\n Check if it is legal to fuse a consumer (node2) into a producer (node1).\n\n We can fuse them if all the reads of node2 either match\n corresponding writes in node1, or are written by nodes that can\n be scheduled before the fusion of node1 and node2.\n \"\"\"\n node1_names = node1.get_names()\n remaining_deps = {\n dep.name for dep in node2.unmet_dependencies - node1.read_writes.writes\n }\n if remaining_deps & node1_names:\n # MemoryDeps didn't match and read different locations of the same buffer.\n # Examples here include:\n # - MemoryDep(\"foo\", x) != MemoryDep(\"foo\", x + 1)\n # - MemoryDep(\"foo\", x) != StarDep(\"foo\")\n return False\n for name in remaining_deps:\n if node1_names & self.name_to_fused_node[name].recursive_predecessors:\n return False\n return True\n\n def score_fusion(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):\n \"\"\"\n Assign a score (higher comes first) to the fusion of node1\n and node2. When different fusions conflict with each other,\n this is the way we decide what order to run them in.\n\n Our current score is based on:\n - Estimate of the saved memory operations\n - Fusions closer together in original order\n \"\"\"\n memory_score = self.score_fusion_memory(node1, node1)\n proximity_score = -max(\n abs(node1.min_order - node2.max_order),\n abs(node2.min_order - node1.max_order),\n )\n return (\n node1.is_reduction() == node2.is_reduction() and memory_score > 0,\n memory_score,\n proximity_score,\n )\n\n def score_fusion_memory(self, node1, node2):\n \"\"\"\n The first term in our fusion score that estimates number of saved memory operations.\n \"\"\"\n common_memory_deps = (node1.read_writes.reads | node1.read_writes.writes) & (\n node2.read_writes.reads | node2.read_writes.writes\n )\n return sum(dep.numel_hint() for dep in common_memory_deps)\n\n def score_fusion_key(self, nodes):\n \"\"\"\n Shim for list.sort(key=...)\n \"\"\"\n node1, node2 = nodes\n return self.score_fusion(node1, node2)\n\n def compute_last_usage(self):\n \"\"\"\n Populate node.last_usage\n \"\"\"\n\n future_used_buffers = set()\n for node_name in V.graph.get_output_names():\n future_used_buffers.add(node_name)\n\n for node in reversed(self.nodes):\n used_buffers = node.used_buffer_names()\n used_buffers = {self.mutation_real_name.get(k, k) for k in used_buffers}\n node.last_usage = used_buffers - future_used_buffers\n future_used_buffers.update(used_buffers)\n\n def free_buffers(self):\n \"\"\"Free any buffers that are no longer needed\"\"\"\n for name in sorted(self.buffer_names_to_free - V.graph.removed_buffers):\n if name in self.name_to_node:\n node = self.name_to_node[name]\n if node.can_free():\n V.graph.wrapper_code.codegen_free(node.node)\n self.buffer_names_to_free.clear()\n\n def remove_kernel_local_buffers(self):\n \"\"\"\n Any buffers that are both created and have a last use in the\n same kernel can be removed.\n \"\"\"\n for name in V.kernel.store_buffer_names & self.buffer_names_no_longer_needed:\n if (\n name not in V.kernel.must_keep_buffers\n and name not in V.kernel.args.input_buffers\n and name not in self.mutation_renames\n and name not in self.mutation_real_name\n ):\n self.remove_buffer(name)\n\n def remove_buffer(self, name):\n # Assign a special value instead of deleting the entry\n # because we still rely on output_buffers's length to\n # generate unique arg name.\n log.debug(\"remove_buffer(%r)\", name)\n V.kernel.args.output_buffers[name] = \"REMOVED\"\n V.graph.removed_buffers.add(name)\n\n def flush(self):\n for backend in self.backends.values():\n backend.flush()\n self.free_buffers()\n\n def codegen_extern_call(self, scheduler_node: ExternKernelSchedulerNode):\n assert isinstance(scheduler_node, ExternKernelSchedulerNode)\n scheduler_node.allocate()\n node = scheduler_node.node\n node.codegen(V.graph.wrapper_code)\n self.free_buffers()\n\n def codegen_template_call(\n self, scheduler_node: Union[FusedSchedulerNode, TemplateSchedulerNode]\n ):\n node, *epilogue = scheduler_node.get_nodes()\n node.allocate()\n template_codegen(self, node, epilogue)\n self.free_buffers()\n\n def create_backend(self, device: torch.device):\n assert (\n device.type != \"cuda\" or device.index is not None\n ), f\"{device} should have been normalized in lowering\"\n V.graph.device_types.add(device.type)\n if device.type == \"cpu\":\n from .codegen.cpp import CppScheduling\n\n return CppScheduling(self)\n else:\n from .codegen.triton import TritonScheduling\n\n return TritonScheduling(self)\n\n def get_backend(self, device: torch.device):\n if device not in self.backends:\n self.backends[device] = self.create_backend(device)\n return self.backends[device]\n\n @dynamo_timed\n def codegen(self):\n for node in self.nodes:\n self.buffer_names_no_longer_needed.update(node.last_usage)\n\n if not isinstance(node, NopKernelSchedulerNode):\n device = node.get_device()\n if (\n device != self.current_device\n or node.is_extern()\n or node.is_template()\n ):\n self.flush()\n self.current_device = device\n\n self.buffer_names_to_free.update(node.last_usage)\n\n if node.is_template():\n self.codegen_template_call(node)\n elif node.is_extern():\n self.codegen_extern_call(node)\n elif isinstance(node, (FusedSchedulerNode, SchedulerNode)):\n self.get_backend(device).codegen_nodes(node.get_nodes())\n else:\n assert isinstance(node, NopKernelSchedulerNode)\n node.allocate()\n\n self.flush()\n", "path": "torchinductor/scheduler.py" } ]
[ { "content": "import collections\nimport dataclasses\nimport functools\nimport itertools\nimport logging\nimport os\nimport pprint\nimport textwrap\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Set\nfrom typing import Union\n\nimport numpy as np\nimport sympy\nimport torch\n\nfrom torchdynamo.utils import dynamo_timed\n\nfrom . import config\nfrom . import dependencies\nfrom . import ir\nfrom .codegen.triton_template import should_use_template\nfrom .codegen.triton_template import template_can_fuse\nfrom .codegen.triton_template import template_codegen\nfrom .dependencies import MemoryDep\nfrom .dependencies import StarDep\nfrom .sizevars import SimplifyIndexing\nfrom .utils import cache_on_self\nfrom .utils import cmp\nfrom .virtualized import V\n\nlog = logging.getLogger(__name__)\n\n\ndef pformat(obj):\n if isinstance(obj, set):\n # pformat has trouble with sets of sympy exprs\n obj = sorted(obj, key=str)\n result = pprint.pformat(obj, indent=4)\n if \"\\n\" in result:\n return f\"\\n{textwrap.indent(result, ' '*4)}\"\n return result\n\n\nclass OutputNode:\n def __init__(self, dep):\n self.unmet_dependencies = {dep}\n self.inverse_users = []\n\n def is_reduction(self):\n return False\n\n def get_alias_names(self):\n return ()\n\n def get_name(self):\n return \"OUTPUT\"\n\n __repr__ = get_name\n\n\nclass BaseSchedulerNode:\n def __init__(self, scheduler: \"Scheduler\", node: ir.Buffer):\n self.scheduler: \"Scheduler\" = scheduler\n self.node: ir.Buffer = node\n self.users: Optional[List[NodeUser]] = None\n self.inverse_users: List[BaseSchedulerNode] = []\n self.set_read_writes(node.get_read_writes())\n self.recursive_predecessors: Optional[Set[str]] = None\n self.min_order: Optional[int] = None\n self.max_order: Optional[int] = None\n self.last_usage: Set[str] = None # buffers that won't be used after this kernel\n\n def __repr__(self):\n return f\"{type(self).__name__}(name={self.get_name()!r})\"\n\n def debug_str(self):\n \"\"\"Longer form printout for trace logs\"\"\"\n name = self.get_name()\n lines = [\n f\"{name}: {type(self).__name__}({type(self.node).__name__})\",\n f\"{name}.writes = {pformat(self.read_writes.writes)}\",\n f\"{name}.unmet_dependencies = {pformat(self.unmet_dependencies)}\",\n f\"{name}.met_dependencies = {pformat(self.read_writes.reads - self.unmet_dependencies)}\",\n ]\n try:\n lines += [\n self.debug_str_extra(),\n ]\n except Exception:\n log.warning(\"Ignoring error in debug_str()\", exc_info=True)\n return \"\\n\".join(lines).rstrip()\n\n def debug_str_extra(self):\n return \"\"\n\n def log_details(self):\n log.info(\n \"%s: unmet_dependencies = %s, writes = %s\",\n self,\n self.unmet_dependencies,\n self.read_writes.writes,\n )\n\n def update_mutated_names(self, renames: Dict[str, str]):\n self.set_read_writes(self.read_writes.rename(renames))\n\n def add_mutation_dep(self, name):\n self.set_read_writes(self.read_writes.with_read(name))\n\n def set_users(self, users: List[\"NodeUser\"]):\n # deduplicate\n result: Dict[int, NodeUser] = {}\n for use in users:\n if id(use.node) in result:\n result[id(use.node)] = NodeUser(\n use.node, result[id(use.node)].can_inplace and use.can_inplace\n )\n else:\n result[id(use.node)] = use\n self.users = list(result.values())\n\n def get_aliases(self):\n return self.node.get_alias_names()\n\n def get_mutations(self):\n return self.node.get_mutation_names()\n\n def set_read_writes(self, rw: dependencies.ReadWrites):\n self.read_writes: dependencies.ReadWrites = rw\n self.unmet_dependencies = self.read_writes.reads\n self.prune_deps()\n\n def used_buffer_names(self) -> Set[str]:\n return {\n dep.name\n for dep in itertools.chain(self.read_writes.reads, self.read_writes.writes)\n }\n\n def prune_deps(self):\n self.unmet_dependencies = {\n dep\n for dep in self.unmet_dependencies\n if dep.name not in self.scheduler.available_buffer_names\n }\n\n def get_name(self) -> str:\n return self.node.get_name()\n\n def get_first_name(self) -> str:\n return self.get_name()\n\n def get_names(self) -> Set[str]:\n return set([self.get_name()])\n\n def get_nodes(self) -> List[\"BaseSchedulerNode\"]:\n return [self]\n\n def get_device(self):\n return self.node.get_device()\n\n def is_reduction(self):\n return False\n\n def is_template(self):\n return False\n\n def is_extern(self):\n return False\n\n def can_inplace(self, read_dep: dependencies.MemoryDep):\n return False\n\n def allocate(self):\n if self.node.should_allocate() or should_use_template(self.node):\n # if self.node should allocate or\n # if self.node is generated by TritonKernelTemplates\n # because Triton kernel could not allocate tensor itself\n V.graph.wrapper_code.codegen_allocation(self.node)\n\n def can_free(self):\n for use in self.users:\n if isinstance(use.node, OutputNode):\n return False\n return True\n\n\nclass ExternKernelSchedulerNode(BaseSchedulerNode):\n def debug_str_extra(self):\n return f\"{self.get_name()}.node.kernel = {getattr(self.node, 'kernel', None)}\"\n\n def is_extern(self):\n return True\n\n\nclass TemplateSchedulerNode(BaseSchedulerNode):\n def __init__(self, scheduler: \"Scheduler\", node: ir.ExternKernel, group_fn):\n super().__init__(scheduler, node)\n (self._sizes, self._stride) = node.get_group_stride()\n self.group = (node.get_device(), group_fn(self._sizes))\n self.set_read_writes(node.get_read_writes())\n self.update_dep_type()\n\n def is_template(self):\n return True\n\n def update_dep_type(self):\n assert len(self.read_writes.writes) == 1\n write = self.read_writes.writes.pop()\n if isinstance(write, StarDep):\n name = write.name\n canonicalized_index, canonicalized_size = self.node.canonicalize()\n new_dep = MemoryDep(name, canonicalized_index, canonicalized_size)\n self.read_writes.writes.add(new_dep)\n else:\n self.read_writes.writes.add(write)\n\n def get_ranges(self):\n return self._sizes\n\n\nclass NopKernelSchedulerNode(BaseSchedulerNode):\n pass\n\n\nclass SchedulerNode(BaseSchedulerNode):\n def __init__(self, scheduler: \"Scheduler\", node: ir.ComputedBuffer, group_fn):\n super().__init__(scheduler, node)\n (\n self._sizes,\n self._body,\n ) = node.simplify_and_reorder()\n\n self.group = (node.get_device(), group_fn(self._sizes))\n\n self.set_read_writes(\n dependencies.extract_read_writes(self._body, *self._sizes, normalize=True)\n )\n if self.is_reduction():\n # reduction has last (reduced) dim in its sizes, and some\n # downstream dependencies get confused by it\n self.read_writes.writes = self.read_writes.writes | {\n w.strip_last_size() for w in self.read_writes.writes\n }\n # reduction not on the last dim swaps the sizes, and downstream\n # dependencies expect unswapped\n # TODO swapping sizes doesn't work, leads to\n # File \"/scratch/ngimel/work/repos/torchdynamo/torchinductor/sizevars.py\", line 130, in guard_equals\n # if len(right.free_symbols) < len(left.free_symbols):\n # AttributeError: 'int' object has no attribute 'free_symbols'\n # even though memory dep looks correct\n # self.read_writes.writes = self.read_writes.writes | {\n # w.maybe_swap_sizes() for w in self.read_writes.writes\n # }\n\n def debug_str_extra(self):\n name = self.get_name()\n lines = [\n f\"{name}.group.device = {self.group[0]}\",\n f\"{name}.group.iteration = {self.group[1]}\",\n f\"{name}.sizes = {self._sizes}\",\n ]\n if self.get_aliases():\n lines.append(f\"{name}.aliases = {pformat(self.get_aliases())}\")\n if self.get_mutations():\n lines.append(f\"{name}.mutations = {pformat(self.get_mutations())}\")\n if isinstance(self._body, ir.LoopBody):\n lines.append(f\"class {name}_loop_body:\")\n lines.append(textwrap.indent(self._body.debug_str(), \" \"))\n return \"\\n\".join(lines)\n\n def get_ranges(self):\n return self._sizes\n\n def is_reduction(self):\n return bool(self.node.data.get_reduction_type())\n\n def allocate(self):\n if (\n not self.node.should_allocate()\n or self.node.get_alias_names()\n or self.node.get_mutation_names()\n ):\n return super().allocate()\n\n if config.inplace_buffers:\n assert False, \"https://github.com/pytorch/torchdynamo/issues/823\"\n \"\"\"\n for read in self.read_writes.reads:\n input_node: BaseSchedulerNode = self.scheduler.name_to_node.get(\n read.name\n )\n if input_node and V.graph.wrapper_code.can_reuse(input_node):\n remaining_uses = [\n x\n for x in input_node.users\n if x.node.get_name()\n not in self.scheduler.available_buffer_names\n ]\n if (\n len(remaining_uses) == 1\n and remaining_uses[0].can_inplace\n and remaining_uses[0].node is self\n ):\n V.graph.wrapper_code.codegen_inplace_reuse(\n input_node.node, self.node\n )\n V.kernel.args.make_inplace(\n input_node.get_name(), self.get_name()\n )\n return\n \"\"\"\n super().allocate()\n\n def run(self, *index_vars):\n self.mark_run()\n self.codegen(index_vars)\n\n def mark_run(self):\n self.allocate()\n\n def codegen(self, index_vars):\n sizes = self._sizes\n assert sum(map(len, sizes)) == sum(map(len, index_vars))\n var_ranges = dict(\n zip(\n itertools.chain.from_iterable(index_vars),\n itertools.chain.from_iterable(sizes),\n )\n )\n try:\n with V.set_ops_handler(\n SimplifyIndexing(V.get_ops_handler(), var_ranges)\n ), V.kernel.set_current_node(self):\n self._body(*index_vars)\n except Exception:\n log.fatal(\"Error in codegen for %s\", self.node)\n raise\n\n def pointwise_read_writes(self):\n \"\"\"\n Get the memory dependencies in the non-reduction axis.\n \"\"\"\n sizes, reduction_sizes = self._sizes\n\n def fn(index):\n return self._body(index, [sympy.Integer(0) for _ in reduction_sizes])\n\n return dependencies.extract_read_writes(fn, sizes)\n\n def can_inplace(self, read_dep: dependencies.MemoryDep):\n if self.get_aliases():\n return False\n if len(self.read_writes.writes) == 1 and hasattr(read_dep, \"index\"):\n write_dep = next(iter(self.read_writes.writes))\n return read_dep.index == write_dep.index and read_dep.size == write_dep.size\n return False\n\n\nclass FusedSchedulerNode(BaseSchedulerNode):\n \"\"\"\n This is a \"fake\" scheduler node that represents a group of scheduler nodes\n that are meant to be fused together. The way it does this is by maintaining\n its unmet dependencies as the union of its constituent nodes.\n \"\"\"\n\n @classmethod\n def fuse(cls, node1: BaseSchedulerNode, node2: BaseSchedulerNode):\n assert node1.scheduler is node2.scheduler\n return cls(node1.scheduler, node1.get_nodes() + node2.get_nodes())\n\n def __init__(self, scheduler: \"Scheduler\", snodes: List[SchedulerNode]):\n # NB: No need to call super().__init__() because we don't need to re-use any of its logic.\n self.snodes = snodes\n self.scheduler = scheduler\n self.node = None # type: ignore[assignment]\n self.users = None\n self.inverse_users = []\n self.group = max(snodes, key=lambda x: int(x.is_reduction())).group\n self.recursive_predecessors = functools.reduce(\n set.union, [x.recursive_predecessors for x in snodes]\n )\n self.set_read_writes(\n functools.reduce(\n dependencies.ReadWrites.merge, [x.read_writes for x in snodes]\n )\n )\n names = set(self.get_names())\n self.unmet_dependencies = {\n dep\n for dep in functools.reduce(\n set.union, [x.unmet_dependencies for x in snodes]\n )\n if dep.name not in names\n } - self.read_writes.writes\n self.min_order = min([x.min_order for x in self.snodes])\n self.max_order = max([x.max_order for x in self.snodes])\n\n @cache_on_self\n def get_name(self) -> str:\n return \"_\".join([x.get_name() for x in self.snodes])\n\n def get_first_name(self) -> str:\n return self.snodes[0].get_name()\n\n @cache_on_self\n def get_names(self) -> Set[str]:\n return functools.reduce(set.union, [x.get_names() for x in self.snodes])\n\n def debug_str_extra(self):\n return (\n f\"{self.get_name()}.snodes = {pformat([x.get_name() for x in self.snodes])}\"\n )\n\n @cache_on_self\n def used_buffer_names(self) -> Set[str]:\n return functools.reduce(set.union, [x.used_buffer_names() for x in self.snodes])\n\n def get_nodes(self) -> List[BaseSchedulerNode]:\n return self.snodes\n\n def __repr__(self):\n return f\"{type(self).__name__}(nodes={self.get_name()})\"\n\n @cache_on_self\n def is_reduction(self):\n return any(x.is_reduction() for x in self.snodes)\n\n @cache_on_self\n def is_template(self):\n return any(x.is_template() for x in self.snodes)\n\n def get_device(self):\n return self.group[0]\n\n # None of these need to be implemented, as a FusedSchedulerNode is just an\n # abstraction for scheduling purposes\n def update_mutated_names(self, renames: Dict[str, str]):\n raise NotImplementedError\n\n def add_mutation_dep(self, name):\n raise NotImplementedError\n\n def set_users(self, users: List[\"NodeUser\"]):\n raise NotImplementedError\n\n def get_aliases(self):\n raise NotImplementedError\n\n def get_mutations(self):\n raise NotImplementedError\n\n def can_inplace(self, read_dep: dependencies.MemoryDep):\n raise NotImplementedError\n\n def allocate(self):\n raise NotImplementedError\n\n def can_free(self):\n raise NotImplementedError\n\n\ndef pick_loop_order(stride_lengths, sizes, priority_idx=[]):\n \"\"\"\n A heuristic to decide loop iteration orders. This has not been well\n tuned and may be something we should autotune.\n \"\"\"\n\n @functools.cmp_to_key\n def index_cmp(a, b):\n if sizes[a] == 1 or sizes[b] == 1:\n # 1-sizes don't matter, just move them to the end\n return cmp(sizes[a] == 1, sizes[b] == 1)\n\n a_first = np.logical_or(\n stride_lengths[:, b] == 0, stride_lengths[:, a] < stride_lengths[:, b]\n ).all()\n b_first = np.logical_or(\n stride_lengths[:, a] == 0, stride_lengths[:, a] > stride_lengths[:, b]\n ).all()\n\n if a_first and not b_first:\n return -1\n if b_first and not a_first:\n return 1\n\n # otherwise contiguous\n return cmp(b, a)\n\n order = list(reversed(range(stride_lengths.shape[1])))\n if len(priority_idx) > 0:\n # if we have priority node, only use that node's order\n stride_lengths = stride_lengths[priority_idx]\n if config.pick_loop_orders:\n order.sort(key=index_cmp)\n return order\n\n\[email protected]\nclass NodeUser:\n node: BaseSchedulerNode\n can_inplace: bool = False\n\n def get_name(self):\n return self.node.get_name()\n\n\nclass Scheduler:\n @dynamo_timed\n def __init__(self, nodes):\n super(Scheduler, self).__init__()\n self.backends = {}\n\n self.nodes = []\n self.available_buffer_names = {\n *V.graph.graph_inputs.keys(),\n *V.graph.constants.keys(),\n }\n for node in nodes:\n assert (\n node.origins is not None\n ), \"All nodes passed to scheduling must have an origin\"\n if node.is_no_op():\n self.nodes.append(NopKernelSchedulerNode(self, node))\n elif isinstance(node, ir.ComputedBuffer):\n group_fn = self.get_backend(node.get_device()).group_fn\n self.nodes.append(SchedulerNode(self, node, group_fn))\n elif isinstance(node, ir.ExternKernel) and should_use_template(node):\n group_fn = self.get_backend(node.get_device()).group_fn\n self.nodes.append(TemplateSchedulerNode(self, node, group_fn))\n elif isinstance(node, ir.ExternKernel):\n self.nodes.append(ExternKernelSchedulerNode(self, node))\n else:\n assert False, node\n # some new constants could have been created above\n self.available_buffer_names.update(V.graph.constants.keys())\n for node in self.nodes:\n node.prune_deps()\n\n self.name_to_node = {node.get_name(): node for node in self.nodes}\n self.name_to_fused_node = None # set in fuse_nods()\n\n # we handle mutation by renaming modified versions of the same\n # buffer in the dependency graph to prevent cycles.\n # mutation_renames: tracks the current name for a given buffer\n # (changed once per mutation)\n self.mutation_real_name = {}\n # mutation_real_name: maps back to the original name for codegen\n self.mutation_renames = {}\n\n self.compute_dependencies()\n self.topological_sort_schedule()\n self.compute_predecessors()\n self.dead_node_elimination()\n\n V.debug.ir_pre_fusion(self.nodes)\n self.num_orig_nodes = len(self.nodes)\n self.name_to_fused_node = {n.get_name(): n for n in self.nodes}\n self.fuse_nodes()\n self.compute_last_usage()\n V.debug.ir_post_fusion(self.nodes)\n V.debug.graph_diagram(self.nodes)\n self.debug_draw_graph()\n\n # used during codegen:\n self.current_device = None\n self.buffer_names_to_free = set()\n self.buffer_names_no_longer_needed = set()\n\n def debug_draw_graph(self):\n \"\"\"Generate an image of the graph for debugging\"\"\"\n if os.environ.get(\"INDUCTOR_WRITE_SCHEDULER_GRAPH\", None) == \"1\":\n from .debug import draw_buffers\n\n draw_buffers(self.nodes, print_graph=True)\n\n def debug_print_nodes(self, label):\n if log.isEnabledFor(logging.INFO):\n log.info(\"%s:\", label)\n for node in self.nodes:\n node.log_details()\n\n def compute_dependencies(self):\n \"\"\"\n Create dependency edges between nodes, handling aliasing and\n mutation properly.\n \"\"\"\n name_to_users = collections.defaultdict(list)\n\n # handle aliasing by using python aliasing in name_to_users\n # if foo aliases bar then we will make name_to_users[\"foo\"] point\n # to the same python list as name_to_users[\"bar\"]\n for node1 in self.nodes:\n node1_name = node1.get_name()\n for node2_name in node1.get_aliases():\n if node1_name in name_to_users and node2_name in name_to_users:\n # merge the two\n list1 = name_to_users[node1_name]\n list2 = name_to_users[node2_name]\n combined = list1 + list2\n for key in name_to_users.keys():\n if name_to_users[key] is list1 or name_to_users[key] is list2:\n name_to_users[key] = combined\n elif node1_name in name_to_users:\n name_to_users[node2_name] = name_to_users[node1_name]\n else:\n name_to_users[node1_name] = name_to_users[node2_name]\n\n def rename(n):\n if n in self.mutation_renames:\n return rename(self.mutation_renames[n])\n return n\n\n def dep_closure(node_name):\n reachable_names = {node_name}\n node = self.name_to_node[node_name]\n write_dep = list(node.read_writes.writes)[0]\n for read_dep in node.read_writes.reads:\n if (\n read_dep.name in self.name_to_node\n and read_dep.index == write_dep.index\n and read_dep.size == write_dep.size\n ):\n reachable_names.update(dep_closure(read_dep.name))\n return reachable_names\n\n def add_user(used_by_name, user_node, can_inplace=False):\n name_to_users[rename(used_by_name)].append(NodeUser(user_node, can_inplace))\n\n for node in self.nodes:\n # a node will mutate either 0 or 1 buffers\n for alt_name in node.get_mutations():\n alt_name = rename(alt_name)\n # this node must run after the prior writer\n add_user(alt_name, node)\n node.add_mutation_dep(alt_name)\n for other_node in name_to_users[alt_name]:\n # this node must run after all prior readers\n other_name = rename(other_node.get_name())\n known_dep_node_names = dep_closure(node.get_name())\n if other_name not in known_dep_node_names:\n # If this node alreay directly or indirectly depends on other_node,\n # we don't need to insert an extra StarDep.\n node.add_mutation_dep(other_name)\n add_user(other_name, node)\n\n # add normal non-mutation dependencies\n for read in node.read_writes.reads:\n add_user(read.name, node, node.can_inplace(read))\n\n node.update_mutated_names(self.mutation_renames)\n\n # update our renaming scheme for the next iteration\n for alt_name in node.get_mutations():\n self.mutation_renames[rename(alt_name)] = node.get_name()\n self.mutation_renames[alt_name] = node.get_name()\n self.mutation_real_name[node.get_name()] = self.mutation_real_name.get(\n alt_name, alt_name\n )\n\n # make sure outputs aren't dead-code-eliminated\n for node_name in V.graph.get_output_names():\n add_user(node_name, OutputNode(StarDep(node_name)))\n\n # make sure input mutation isn't dead-code-eliminated\n for name in self.mutation_renames:\n if name in V.graph.graph_inputs:\n add_user(name, OutputNode(StarDep(name)))\n V.graph.mutated_inputs.add(name)\n\n # copy users information onto the nodes\n for node in self.nodes:\n node.set_users(name_to_users[node.get_name()])\n\n # populate inverse_users\n for node in self.nodes:\n for user in node.users:\n user.node.inverse_users.append(node)\n\n def dead_node_elimination(self):\n \"\"\"\n Remove any nodes without users\n \"\"\"\n updated_nodes = []\n for node in self.nodes:\n if node.users:\n updated_nodes.append(node)\n else:\n # dead code\n log.debug(\"removed dead node: %s\", node.get_name())\n V.graph.removed_buffers.add(node.get_name())\n self.nodes = updated_nodes\n\n def topological_sort_schedule(self):\n \"\"\"\n Ensure self.nodes is in topologically sorted order\n \"\"\"\n seen = set()\n name_to_node = dict()\n result = []\n\n def visit(n):\n if n not in seen:\n seen.add(n)\n for dep in sorted(n.unmet_dependencies, key=lambda d: d.name):\n visit(name_to_node[dep.name])\n result.append(n)\n\n for node in self.nodes:\n for name in node.get_names():\n name_to_node[name] = node\n for node in self.nodes:\n visit(node)\n self.nodes = result\n\n def compute_predecessors(self):\n \"\"\"\n Populate each node.recursive_predecessors\n \"\"\"\n # note self.nodes is topologically sorted\n name_to_predecessors = {}\n for node in self.nodes:\n recursive_predecessors = set()\n for dep in node.unmet_dependencies:\n recursive_predecessors.add(dep.name)\n recursive_predecessors |= name_to_predecessors[dep.name]\n name_to_predecessors[node.get_name()] = recursive_predecessors\n node.recursive_predecessors = recursive_predecessors\n\n for order, node in enumerate(self.nodes):\n node.min_order = order\n node.max_order = order\n\n def fuse_nodes(self):\n \"\"\"\n Mutates self.nodes to combine nodes into FusedSchedulerNodes.\n \"\"\"\n for _ in range(10):\n old_len = len(self.nodes)\n self.fuse_nodes_once()\n if len(self.nodes) == old_len:\n break\n\n def fuse_nodes_once(self):\n \"\"\"\n Mutates self.nodes to combine nodes into FusedSchedulerNodes.\n\n This relies on two key functions to control the logic:\n - self.can_fuses(): checks if a fusion is legal\n - self.score_fusion(): assigns priority to a given fusion\n \"\"\"\n fused_nodes = set(self.nodes)\n for node1, node2 in self.get_possible_fusions():\n node1 = self.name_to_fused_node[node1.get_first_name()]\n node2 = self.name_to_fused_node[node2.get_first_name()]\n if self.can_fuse(node1, node2) and not self.will_fusion_create_cycle(\n node1, node2\n ):\n node3 = FusedSchedulerNode.fuse(node1, node2)\n fused_nodes.remove(node1)\n fused_nodes.remove(node2)\n fused_nodes.add(node3)\n self.name_to_fused_node.update(\n {n.get_name(): node3 for n in node3.get_nodes()}\n )\n self.nodes = sorted(fused_nodes, key=lambda x: x.min_order)\n self.topological_sort_schedule()\n\n def get_possible_fusions(self):\n \"\"\"\n Helper to find all legal fusion opportunities, sorted by self.score_fusion()\n \"\"\"\n possible_fusions = []\n seen = set()\n\n def check_all_pairs(nodes):\n for node1_index, node1 in enumerate(nodes):\n for node2 in nodes[node1_index + 1 :]:\n key = (node1, node2)\n if key in seen:\n continue\n seen.add(key)\n\n if self.can_fuse(node1, node2):\n possible_fusions.append(key)\n elif node2.is_template() and self.can_fuse(node2, node1):\n # epilogue fusions are order dependent\n possible_fusions.append((node2, node1))\n\n buffer_names_grouping = collections.defaultdict(list)\n for node in self.nodes:\n for buf in node.used_buffer_names():\n buffer_names_grouping[buf].append(node)\n for node_grouping in buffer_names_grouping.values():\n check_all_pairs(node_grouping)\n\n if config.aggressive_fusion:\n group_grouping = collections.defaultdict(list)\n for node in self.nodes:\n group = getattr(node, \"group\", None)\n if group:\n group_grouping[group].append(node)\n for node_grouping in group_grouping.values():\n check_all_pairs(node_grouping)\n\n return sorted(possible_fusions, key=self.score_fusion_key, reverse=True)\n\n def will_fusion_create_cycle(self, node1, node2):\n \"\"\"Finds whether there's a path from src to dst caused indirectly by fusion\"\"\"\n\n def check(node):\n if isinstance(node, FusedSchedulerNode) and node not in visited:\n visited.add(node)\n return bool(combined_names & node.recursive_predecessors) or any(\n check(self.name_to_fused_node[n])\n for n in node.recursive_predecessors - combined_predecessors\n )\n return False\n\n visited = set()\n combined_names = node1.get_names() | node2.get_names()\n combined_predecessors = (\n node1.recursive_predecessors | node2.recursive_predecessors\n ) - combined_names\n return any(check(self.name_to_fused_node[n]) for n in combined_predecessors)\n\n def can_fuse(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):\n \"\"\"\n Determine if it is possible to combine node1 and node2 into a\n single fused node.\n \"\"\"\n if node1 is node2:\n return False\n if (\n isinstance(node1, (ExternKernelSchedulerNode, NopKernelSchedulerNode))\n and not node1.is_template()\n ):\n return False\n if (\n isinstance(node2, (ExternKernelSchedulerNode, NopKernelSchedulerNode))\n and not node2.is_template()\n ):\n return False\n if node2.get_names() & node1.recursive_predecessors:\n return False # node2 must go before node1\n if node2.is_template():\n return False # only epilogues\n\n device = node1.get_device()\n if device != node2.get_device():\n return False # wrong device\n\n no_shared_data = self.score_fusion_memory(node1, node2) == 0\n if no_shared_data and (\n not config.aggressive_fusion or node1.is_reduction() or node2.is_reduction()\n ):\n return False # heuristic not needed for correctness\n\n if len(node1.get_nodes()) + len(node2.get_nodes()) > config.max_fusion_size:\n return False # heuristic not needed for correctness\n\n if node1.get_names() & node2.recursive_predecessors:\n # node2 depends on node1 outputs\n if not self.can_fuse_vertical(node1, node2):\n return False\n if node1.is_template():\n return template_can_fuse(node1, node2)\n return self.get_backend(device).can_fuse_vertical(node1, node2)\n else: # nodes don't depend on each other, but may have common reads\n if node1.is_template():\n return False\n return self.get_backend(device).can_fuse_horizontal(node1, node2)\n\n def can_fuse_vertical(self, node1, node2):\n \"\"\"\n Check if it is legal to fuse a consumer (node2) into a producer (node1).\n\n We can fuse them if all the reads of node2 either match\n corresponding writes in node1, or are written by nodes that can\n be scheduled before the fusion of node1 and node2.\n \"\"\"\n node1_names = node1.get_names()\n remaining_deps = {\n dep.name for dep in node2.unmet_dependencies - node1.read_writes.writes\n }\n if remaining_deps & node1_names:\n # MemoryDeps didn't match and read different locations of the same buffer.\n # Examples here include:\n # - MemoryDep(\"foo\", x) != MemoryDep(\"foo\", x + 1)\n # - MemoryDep(\"foo\", x) != StarDep(\"foo\")\n return False\n for name in remaining_deps:\n if node1_names & self.name_to_fused_node[name].recursive_predecessors:\n return False\n return True\n\n def score_fusion(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):\n \"\"\"\n Assign a score (higher comes first) to the fusion of node1\n and node2. When different fusions conflict with each other,\n this is the way we decide what order to run them in.\n\n Our current score is based on:\n - Estimate of the saved memory operations\n - Fusions closer together in original order\n \"\"\"\n memory_score = self.score_fusion_memory(node1, node1)\n proximity_score = -max(\n abs(node1.min_order - node2.max_order),\n abs(node2.min_order - node1.max_order),\n )\n return (\n node1.is_reduction() == node2.is_reduction() and memory_score > 0,\n memory_score,\n proximity_score,\n )\n\n def score_fusion_memory(self, node1, node2):\n \"\"\"\n The first term in our fusion score that estimates number of saved memory operations.\n \"\"\"\n common_memory_deps = (node1.read_writes.reads | node1.read_writes.writes) & (\n node2.read_writes.reads | node2.read_writes.writes\n )\n return sum(dep.numel_hint() for dep in common_memory_deps)\n\n def score_fusion_key(self, nodes):\n \"\"\"\n Shim for list.sort(key=...)\n \"\"\"\n node1, node2 = nodes\n return self.score_fusion(node1, node2)\n\n def compute_last_usage(self):\n \"\"\"\n Populate node.last_usage\n \"\"\"\n\n future_used_buffers = set()\n for node_name in V.graph.get_output_names():\n future_used_buffers.add(node_name)\n\n for node in reversed(self.nodes):\n used_buffers = node.used_buffer_names()\n used_buffers = {self.mutation_real_name.get(k, k) for k in used_buffers}\n node.last_usage = used_buffers - future_used_buffers\n future_used_buffers.update(used_buffers)\n\n def free_buffers(self):\n \"\"\"Free any buffers that are no longer needed\"\"\"\n for name in sorted(self.buffer_names_to_free - V.graph.removed_buffers):\n if name in self.name_to_node:\n node = self.name_to_node[name]\n if node.can_free():\n V.graph.wrapper_code.codegen_free(node.node)\n self.buffer_names_to_free.clear()\n\n def remove_kernel_local_buffers(self):\n \"\"\"\n Any buffers that are both created and have a last use in the\n same kernel can be removed.\n \"\"\"\n for name in V.kernel.store_buffer_names & self.buffer_names_no_longer_needed:\n if (\n name not in V.kernel.must_keep_buffers\n and name not in V.kernel.args.input_buffers\n and name not in self.mutation_renames\n and name not in self.mutation_real_name\n ):\n self.remove_buffer(name)\n\n def remove_buffer(self, name):\n # Assign a special value instead of deleting the entry\n # because we still rely on output_buffers's length to\n # generate unique arg name.\n log.debug(\"remove_buffer(%r)\", name)\n V.kernel.args.output_buffers[name] = \"REMOVED\"\n V.graph.removed_buffers.add(name)\n\n def flush(self):\n for backend in self.backends.values():\n backend.flush()\n self.free_buffers()\n\n def codegen_extern_call(self, scheduler_node: ExternKernelSchedulerNode):\n assert isinstance(scheduler_node, ExternKernelSchedulerNode)\n scheduler_node.allocate()\n node = scheduler_node.node\n node.codegen(V.graph.wrapper_code)\n self.free_buffers()\n\n def codegen_template_call(\n self, scheduler_node: Union[FusedSchedulerNode, TemplateSchedulerNode]\n ):\n node, *epilogue = scheduler_node.get_nodes()\n node.allocate()\n template_codegen(self, node, epilogue)\n self.free_buffers()\n\n def create_backend(self, device: torch.device):\n assert (\n device.type != \"cuda\" or device.index is not None\n ), f\"{device} should have been normalized in lowering\"\n V.graph.device_types.add(device.type)\n if device.type == \"cpu\":\n from .codegen.cpp import CppScheduling\n\n return CppScheduling(self)\n else:\n from .codegen.triton import TritonScheduling\n\n return TritonScheduling(self)\n\n def get_backend(self, device: torch.device):\n if device not in self.backends:\n self.backends[device] = self.create_backend(device)\n return self.backends[device]\n\n @dynamo_timed\n def codegen(self):\n for node in self.nodes:\n self.buffer_names_no_longer_needed.update(node.last_usage)\n\n if not isinstance(node, NopKernelSchedulerNode):\n device = node.get_device()\n if (\n device != self.current_device\n or node.is_extern()\n or node.is_template()\n ):\n self.flush()\n self.current_device = device\n\n self.buffer_names_to_free.update(node.last_usage)\n\n if node.is_template():\n self.codegen_template_call(node)\n elif node.is_extern():\n self.codegen_extern_call(node)\n elif isinstance(node, (FusedSchedulerNode, SchedulerNode)):\n self.get_backend(device).codegen_nodes(node.get_nodes())\n else:\n assert isinstance(node, NopKernelSchedulerNode)\n node.allocate()\n\n self.flush()\n", "path": "torchinductor/scheduler.py" } ]
diff --git a/torchinductor/scheduler.py b/torchinductor/scheduler.py index 24588a9484..fa0d5e5bda 100644 --- a/torchinductor/scheduler.py +++ b/torchinductor/scheduler.py @@ -35,6 +35,9 @@ def pformat(obj): + if isinstance(obj, set): + # pformat has trouble with sets of sympy exprs + obj = sorted(obj, key=str) result = pprint.pformat(obj, indent=4) if "\n" in result: return f"\n{textwrap.indent(result, ' '*4)}"
ray-project__ray-5169
[autoscaler] GCP error missing required parameter body <!-- General questions should be asked on the mailing list [email protected]. Questions about how to use Ray should be asked on [StackOverflow](https://stackoverflow.com/questions/tagged/ray). Before submitting an issue, please fill out the following form. --> ### System information - **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux - **Ray installed from (source or binary)**: - **Ray version**: 0.6.2 - **Python version**: 3.6 - **Exact command to reproduce**: ray up gcp_trainer.yaml <!-- You can obtain the Ray version with python -c "import ray; print(ray.__version__)" --> ### Describe the problem <!-- Describe the problem clearly here. --> Code worked well until today (no update). I got an error at the beginning after the getIamPolicy function in /autoscaler/gcp/config.py. I have all the rights / permissions in my GCP. The yaml file is similar to the example-full.yaml ### Source code / logs <!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. --> file_cache is unavailable when using oauth2client >= 4.0.0 Traceback (most recent call last): File "/opt/tools/anaconda3/lib/python3.6/site-packages/googleapiclient/discovery_cache/__init__.py", line 36, in autodetect from google.appengine.api import memcache ModuleNotFoundError: No module named 'google.appengine' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/opt/tools/anaconda3/lib/python3.6/site-packages/googleapiclient/discovery_cache/file_cache.py", line 33, in <module> from oauth2client.contrib.locked_file import LockedFile ModuleNotFoundError: No module named 'oauth2client.contrib.locked_file' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/opt/tools/anaconda3/lib/python3.6/site-packages/googleapiclient/discovery_cache/file_cache.py", line 37, in <module> from oauth2client.locked_file import LockedFile ModuleNotFoundError: No module named 'oauth2client.locked_file' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/opt/tools/anaconda3/lib/python3.6/site-packages/googleapiclient/discovery_cache/__init__.py", line 41, in autodetect from . import file_cache File "/opt/tools/anaconda3/lib/python3.6/site-packages/googleapiclient/discovery_cache/file_cache.py", line 41, in <module> 'file_cache is unavailable when using oauth2client >= 4.0.0') ImportError: file_cache is unavailable when using oauth2client >= 4.0.0 URL being requested: GET https://www.googleapis.com/discovery/v1/apis/cloudresourcemanager/v1/rest /opt/tools/anaconda3/lib/python3.6/site-packages/google/auth/_default.py:66: UserWarning: Your application has authenticated using end user credentials from Google Cloud SDK. We recommend that most server applications use service accounts instead. If your application continues to use end user credentials from Cloud SDK, you might receive a "quota exceeded" or "API not enabled" error. For more information about service accounts, see https://cloud.google.com/docs/authentication/ warnings.warn(_CLOUD_SDK_CREDENTIALS_WARNING) file_cache is unavailable when using oauth2client >= 4.0.0 Traceback (most recent call last): File "/opt/tools/anaconda3/lib/python3.6/site-packages/googleapiclient/discovery_cache/__init__.py", line 36, in autodetect from google.appengine.api import memcache ModuleNotFoundError: No module named 'google.appengine' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/opt/tools/anaconda3/lib/python3.6/site-packages/googleapiclient/discovery_cache/file_cache.py", line 33, in <module> from oauth2client.contrib.locked_file import LockedFile ModuleNotFoundError: No module named 'oauth2client.contrib.locked_file' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/opt/tools/anaconda3/lib/python3.6/site-packages/googleapiclient/discovery_cache/file_cache.py", line 37, in <module> from oauth2client.locked_file import LockedFile ModuleNotFoundError: No module named 'oauth2client.locked_file' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/opt/tools/anaconda3/lib/python3.6/site-packages/googleapiclient/discovery_cache/__init__.py", line 41, in autodetect from . import file_cache File "/opt/tools/anaconda3/lib/python3.6/site-packages/googleapiclient/discovery_cache/file_cache.py", line 41, in <module> 'file_cache is unavailable when using oauth2client >= 4.0.0') ImportError: file_cache is unavailable when using oauth2client >= 4.0.0 URL being requested: GET https://www.googleapis.com/discovery/v1/apis/iam/v1/rest /opt/tools/anaconda3/lib/python3.6/site-packages/google/auth/_default.py:66: UserWarning: Your application has authenticated using end user credentials from Google Cloud SDK. We recommend that most server applications use service accounts instead. If your application continues to use end user credentials from Cloud SDK, you might receive a "quota exceeded" or "API not enabled" error. For more information about service accounts, see https://cloud.google.com/docs/authentication/ warnings.warn(_CLOUD_SDK_CREDENTIALS_WARNING) file_cache is unavailable when using oauth2client >= 4.0.0 Traceback (most recent call last): File "/opt/tools/anaconda3/lib/python3.6/site-packages/googleapiclient/discovery_cache/__init__.py", line 36, in autodetect from google.appengine.api import memcache ModuleNotFoundError: No module named 'google.appengine' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/opt/tools/anaconda3/lib/python3.6/site-packages/googleapiclient/discovery_cache/file_cache.py", line 33, in <module> from oauth2client.contrib.locked_file import LockedFile ModuleNotFoundError: No module named 'oauth2client.contrib.locked_file' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/opt/tools/anaconda3/lib/python3.6/site-packages/googleapiclient/discovery_cache/file_cache.py", line 37, in <module> from oauth2client.locked_file import LockedFile ModuleNotFoundError: No module named 'oauth2client.locked_file' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/opt/tools/anaconda3/lib/python3.6/site-packages/googleapiclient/discovery_cache/__init__.py", line 41, in autodetect from . import file_cache File "/opt/tools/anaconda3/lib/python3.6/site-packages/googleapiclient/discovery_cache/file_cache.py", line 41, in <module> 'file_cache is unavailable when using oauth2client >= 4.0.0') ImportError: file_cache is unavailable when using oauth2client >= 4.0.0 URL being requested: GET https://www.googleapis.com/discovery/v1/apis/compute/v1/rest /opt/tools/anaconda3/lib/python3.6/site-packages/google/auth/_default.py:66: UserWarning: Your application has authenticated using end user credentials from Google Cloud SDK. We recommend that most server applications use service accounts instead. If your application continues to use end user credentials from Cloud SDK, you might receive a "quota exceeded" or "API not enabled" error. For more information about service accounts, see https://cloud.google.com/docs/authentication/ warnings.warn(_CLOUD_SDK_CREDENTIALS_WARNING) URL being requested: GET https://cloudresourcemanager.googleapis.com/v1/projects/d-dls-dlsi?alt=json URL being requested: GET https://iam.googleapis.com/v1/projects/d-dls-dlsi/serviceAccounts/[email protected]?alt=json Traceback (most recent call last): File "/opt/tools/anaconda3/bin/ray", line 10, in <module> sys.exit(main()) File "/opt/tools/anaconda3/lib/python3.6/site-packages/ray/scripts/scripts.py", line 744, in main return cli() File "/opt/tools/anaconda3/lib/python3.6/site-packages/click/core.py", line 722, in __call__ return self.main(*args, **kwargs) File "/opt/tools/anaconda3/lib/python3.6/site-packages/click/core.py", line 697, in main rv = self.invoke(ctx) File "/opt/tools/anaconda3/lib/python3.6/site-packages/click/core.py", line 1066, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/opt/tools/anaconda3/lib/python3.6/site-packages/click/core.py", line 895, in invoke return ctx.invoke(self.callback, **ctx.params) File "/opt/tools/anaconda3/lib/python3.6/site-packages/click/core.py", line 535, in invoke return callback(*args, **kwargs) File "/opt/tools/anaconda3/lib/python3.6/site-packages/ray/scripts/scripts.py", line 463, in create_or_update no_restart, restart_only, yes, cluster_name) File "/opt/tools/anaconda3/lib/python3.6/site-packages/ray/autoscaler/commands.py", line 43, in create_or_update_cluster config = _bootstrap_config(config) File "/opt/tools/anaconda3/lib/python3.6/site-packages/ray/autoscaler/commands.py", line 65, in _bootstrap_config resolved_config = bootstrap_config(config) File "/opt/tools/anaconda3/lib/python3.6/site-packages/ray/autoscaler/gcp/config.py", line 109, in bootstrap_gcp config = _configure_iam_role(config) File "/opt/tools/anaconda3/lib/python3.6/site-packages/ray/autoscaler/gcp/config.py", line 169, in _configure_iam_role _add_iam_policy_binding(service_account, DEFAULT_SERVICE_ACCOUNT_ROLES) File "/opt/tools/anaconda3/lib/python3.6/site-packages/ray/autoscaler/gcp/config.py", line 381, in _add_iam_policy_binding policy = crm.projects().getIamPolicy(resource=project_id).execute() File "/opt/tools/anaconda3/lib/python3.6/site-packages/googleapiclient/discovery.py", line 730, in method raise TypeError('Missing required parameter "%s"' % name) TypeError: Missing required parameter "body"
[ { "content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport logging\nimport time\n\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.backends import default_backend\nfrom googleapiclient import discovery, errors\n\nlogger = logging.getLogger(__name__)\n\ncrm = discovery.build(\"cloudresourcemanager\", \"v1\")\niam = discovery.build(\"iam\", \"v1\")\ncompute = discovery.build(\"compute\", \"v1\")\n\nVERSION = \"v1\"\n\nRAY = \"ray-autoscaler\"\nDEFAULT_SERVICE_ACCOUNT_ID = RAY + \"-sa-\" + VERSION\nSERVICE_ACCOUNT_EMAIL_TEMPLATE = (\n \"{account_id}@{project_id}.iam.gserviceaccount.com\")\nDEFAULT_SERVICE_ACCOUNT_CONFIG = {\n \"displayName\": \"Ray Autoscaler Service Account ({})\".format(VERSION),\n}\nDEFAULT_SERVICE_ACCOUNT_ROLES = (\"roles/storage.objectAdmin\",\n \"roles/compute.admin\")\n\nMAX_POLLS = 12\nPOLL_INTERVAL = 5\n\n\ndef wait_for_crm_operation(operation):\n \"\"\"Poll for cloud resource manager operation until finished.\"\"\"\n logger.info(\"wait_for_crm_operation: \"\n \"Waiting for operation {} to finish...\".format(operation))\n\n for _ in range(MAX_POLLS):\n result = crm.operations().get(name=operation[\"name\"]).execute()\n if \"error\" in result:\n raise Exception(result[\"error\"])\n\n if \"done\" in result and result[\"done\"]:\n logger.info(\"wait_for_crm_operation: Operation done.\")\n break\n\n time.sleep(POLL_INTERVAL)\n\n return result\n\n\ndef wait_for_compute_global_operation(project_name, operation):\n \"\"\"Poll for global compute operation until finished.\"\"\"\n logger.info(\"wait_for_compute_global_operation: \"\n \"Waiting for operation {} to finish...\".format(\n operation[\"name\"]))\n\n for _ in range(MAX_POLLS):\n result = compute.globalOperations().get(\n project=project_name,\n operation=operation[\"name\"],\n ).execute()\n if \"error\" in result:\n raise Exception(result[\"error\"])\n\n if result[\"status\"] == \"DONE\":\n logger.info(\"wait_for_compute_global_operation: \"\n \"Operation done.\")\n break\n\n time.sleep(POLL_INTERVAL)\n\n return result\n\n\ndef key_pair_name(i, region, project_id, ssh_user):\n \"\"\"Returns the ith default gcp_key_pair_name.\"\"\"\n key_name = \"{}_gcp_{}_{}_{}\".format(RAY, region, project_id, ssh_user, i)\n return key_name\n\n\ndef key_pair_paths(key_name):\n \"\"\"Returns public and private key paths for a given key_name.\"\"\"\n public_key_path = os.path.expanduser(\"~/.ssh/{}.pub\".format(key_name))\n private_key_path = os.path.expanduser(\"~/.ssh/{}.pem\".format(key_name))\n return public_key_path, private_key_path\n\n\ndef generate_rsa_key_pair():\n \"\"\"Create public and private ssh-keys.\"\"\"\n\n key = rsa.generate_private_key(\n backend=default_backend(), public_exponent=65537, key_size=2048)\n\n public_key = key.public_key().public_bytes(\n serialization.Encoding.OpenSSH,\n serialization.PublicFormat.OpenSSH).decode(\"utf-8\")\n\n pem = key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()).decode(\"utf-8\")\n\n return public_key, pem\n\n\ndef bootstrap_gcp(config):\n config = _configure_project(config)\n config = _configure_iam_role(config)\n config = _configure_key_pair(config)\n config = _configure_subnet(config)\n\n return config\n\n\ndef _configure_project(config):\n \"\"\"Setup a Google Cloud Platform Project.\n\n Google Compute Platform organizes all the resources, such as storage\n buckets, users, and instances under projects. This is different from\n aws ec2 where everything is global.\n \"\"\"\n project_id = config[\"provider\"].get(\"project_id\")\n assert config[\"provider\"][\"project_id\"] is not None, (\n \"'project_id' must be set in the 'provider' section of the autoscaler\"\n \" config. Notice that the project id must be globally unique.\")\n project = _get_project(project_id)\n\n if project is None:\n # Project not found, try creating it\n _create_project(project_id)\n project = _get_project(project_id)\n\n assert project is not None, \"Failed to create project\"\n assert project[\"lifecycleState\"] == \"ACTIVE\", (\n \"Project status needs to be ACTIVE, got {}\".format(\n project[\"lifecycleState\"]))\n\n config[\"provider\"][\"project_id\"] = project[\"projectId\"]\n\n return config\n\n\ndef _configure_iam_role(config):\n \"\"\"Setup a gcp service account with IAM roles.\n\n Creates a gcp service acconut and binds IAM roles which allow it to control\n control storage/compute services. Specifically, the head node needs to have\n an IAM role that allows it to create further gce instances and store items\n in google cloud storage.\n\n TODO: Allow the name/id of the service account to be configured\n \"\"\"\n email = SERVICE_ACCOUNT_EMAIL_TEMPLATE.format(\n account_id=DEFAULT_SERVICE_ACCOUNT_ID,\n project_id=config[\"provider\"][\"project_id\"])\n service_account = _get_service_account(email, config)\n\n if service_account is None:\n logger.info(\"_configure_iam_role: \"\n \"Creating new service account {}\".format(\n DEFAULT_SERVICE_ACCOUNT_ID))\n\n service_account = _create_service_account(\n DEFAULT_SERVICE_ACCOUNT_ID, DEFAULT_SERVICE_ACCOUNT_CONFIG, config)\n\n assert service_account is not None, \"Failed to create service account\"\n\n _add_iam_policy_binding(service_account, DEFAULT_SERVICE_ACCOUNT_ROLES)\n\n config[\"head_node\"][\"serviceAccounts\"] = [{\n \"email\": service_account[\"email\"],\n # NOTE: The amount of access is determined by the scope + IAM\n # role of the service account. Even if the cloud-platform scope\n # gives (scope) access to the whole cloud-platform, the service\n # account is limited by the IAM rights specified below.\n \"scopes\": [\"https://www.googleapis.com/auth/cloud-platform\"]\n }]\n\n return config\n\n\ndef _configure_key_pair(config):\n \"\"\"Configure SSH access, using an existing key pair if possible.\n\n Creates a project-wide ssh key that can be used to access all the instances\n unless explicitly prohibited by instance config.\n\n The ssh-keys created by ray are of format:\n\n [USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME]\n\n where:\n\n [USERNAME] is the user for the SSH key, specified in the config.\n [KEY_VALUE] is the public SSH key value.\n \"\"\"\n\n if \"ssh_private_key\" in config[\"auth\"]:\n return config\n\n ssh_user = config[\"auth\"][\"ssh_user\"]\n\n project = compute.projects().get(\n project=config[\"provider\"][\"project_id\"]).execute()\n\n # Key pairs associated with project meta data. The key pairs are general,\n # and not just ssh keys.\n ssh_keys_str = next(\n (item for item in project[\"commonInstanceMetadata\"].get(\"items\", [])\n if item[\"key\"] == \"ssh-keys\"), {}).get(\"value\", \"\")\n\n ssh_keys = ssh_keys_str.split(\"\\n\") if ssh_keys_str else []\n\n # Try a few times to get or create a good key pair.\n key_found = False\n for i in range(10):\n key_name = key_pair_name(i, config[\"provider\"][\"region\"],\n config[\"provider\"][\"project_id\"], ssh_user)\n public_key_path, private_key_path = key_pair_paths(key_name)\n\n for ssh_key in ssh_keys:\n key_parts = ssh_key.split(\" \")\n if len(key_parts) != 3:\n continue\n\n if key_parts[2] == ssh_user and os.path.exists(private_key_path):\n # Found a key\n key_found = True\n break\n\n # Create a key since it doesn't exist locally or in GCP\n if not key_found and not os.path.exists(private_key_path):\n logger.info(\"_configure_key_pair: \"\n \"Creating new key pair {}\".format(key_name))\n public_key, private_key = generate_rsa_key_pair()\n\n _create_project_ssh_key_pair(project, public_key, ssh_user)\n\n with open(private_key_path, \"w\") as f:\n f.write(private_key)\n os.chmod(private_key_path, 0o600)\n\n with open(public_key_path, \"w\") as f:\n f.write(public_key)\n\n key_found = True\n\n break\n\n if key_found:\n break\n\n assert key_found, \"SSH keypair for user {} not found for {}\".format(\n ssh_user, private_key_path)\n assert os.path.exists(private_key_path), (\n \"Private key file {} not found for user {}\"\n \"\".format(private_key_path, ssh_user))\n\n logger.info(\"_configure_key_pair: \"\n \"Private key not specified in config, using\"\n \"{}\".format(private_key_path))\n\n config[\"auth\"][\"ssh_private_key\"] = private_key_path\n\n return config\n\n\ndef _configure_subnet(config):\n \"\"\"Pick a reasonable subnet if not specified by the config.\"\"\"\n\n # Rationale: avoid subnet lookup if the network is already\n # completely manually configured\n if (\"networkInterfaces\" in config[\"head_node\"]\n and \"networkInterfaces\" in config[\"worker_nodes\"]):\n return config\n\n subnets = _list_subnets(config)\n\n if not subnets:\n raise NotImplementedError(\"Should be able to create subnet.\")\n\n # TODO: make sure that we have usable subnet. Maybe call\n # compute.subnetworks().listUsable? For some reason it didn't\n # work out-of-the-box\n default_subnet = subnets[0]\n\n if \"networkInterfaces\" not in config[\"head_node\"]:\n config[\"head_node\"][\"networkInterfaces\"] = [{\n \"subnetwork\": default_subnet[\"selfLink\"],\n \"accessConfigs\": [{\n \"name\": \"External NAT\",\n \"type\": \"ONE_TO_ONE_NAT\",\n }],\n }]\n\n if \"networkInterfaces\" not in config[\"worker_nodes\"]:\n config[\"worker_nodes\"][\"networkInterfaces\"] = [{\n \"subnetwork\": default_subnet[\"selfLink\"],\n \"accessConfigs\": [{\n \"name\": \"External NAT\",\n \"type\": \"ONE_TO_ONE_NAT\",\n }],\n }]\n\n return config\n\n\ndef _list_subnets(config):\n response = compute.subnetworks().list(\n project=config[\"provider\"][\"project_id\"],\n region=config[\"provider\"][\"region\"]).execute()\n\n return response[\"items\"]\n\n\ndef _get_subnet(config, subnet_id):\n subnet = compute.subnetworks().get(\n project=config[\"provider\"][\"project_id\"],\n region=config[\"provider\"][\"region\"],\n subnetwork=subnet_id,\n ).execute()\n\n return subnet\n\n\ndef _get_project(project_id):\n try:\n project = crm.projects().get(projectId=project_id).execute()\n except errors.HttpError as e:\n if e.resp.status != 403:\n raise\n project = None\n\n return project\n\n\ndef _create_project(project_id):\n operation = crm.projects().create(body={\n \"projectId\": project_id,\n \"name\": project_id\n }).execute()\n\n result = wait_for_crm_operation(operation)\n\n return result\n\n\ndef _get_service_account(account, config):\n project_id = config[\"provider\"][\"project_id\"]\n full_name = (\"projects/{project_id}/serviceAccounts/{account}\"\n \"\".format(project_id=project_id, account=account))\n try:\n service_account = iam.projects().serviceAccounts().get(\n name=full_name).execute()\n except errors.HttpError as e:\n if e.resp.status != 404:\n raise\n service_account = None\n\n return service_account\n\n\ndef _create_service_account(account_id, account_config, config):\n project_id = config[\"provider\"][\"project_id\"]\n\n service_account = iam.projects().serviceAccounts().create(\n name=\"projects/{project_id}\".format(project_id=project_id),\n body={\n \"accountId\": account_id,\n \"serviceAccount\": account_config,\n }).execute()\n\n return service_account\n\n\ndef _add_iam_policy_binding(service_account, roles):\n \"\"\"Add new IAM roles for the service account.\"\"\"\n project_id = service_account[\"projectId\"]\n email = service_account[\"email\"]\n member_id = \"serviceAccount:\" + email\n\n policy = crm.projects().getIamPolicy(resource=project_id).execute()\n\n already_configured = True\n\n for role in roles:\n role_exists = False\n for binding in policy[\"bindings\"]:\n if binding[\"role\"] == role:\n if member_id not in binding[\"members\"]:\n binding[\"members\"].append(member_id)\n already_configured = False\n role_exists = True\n\n if not role_exists:\n already_configured = False\n policy[\"bindings\"].append({\n \"members\": [member_id],\n \"role\": role,\n })\n\n if already_configured:\n # In some managed environments, an admin needs to grant the\n # roles, so only call setIamPolicy if needed.\n return\n\n result = crm.projects().setIamPolicy(\n resource=project_id, body={\n \"policy\": policy,\n }).execute()\n\n return result\n\n\ndef _create_project_ssh_key_pair(project, public_key, ssh_user):\n \"\"\"Inserts an ssh-key into project commonInstanceMetadata\"\"\"\n\n key_parts = public_key.split(\" \")\n\n # Sanity checks to make sure that the generated key matches expectation\n assert len(key_parts) == 2, key_parts\n assert key_parts[0] == \"ssh-rsa\", key_parts\n\n new_ssh_meta = \"{ssh_user}:ssh-rsa {key_value} {ssh_user}\".format(\n ssh_user=ssh_user, key_value=key_parts[1])\n\n common_instance_metadata = project[\"commonInstanceMetadata\"]\n items = common_instance_metadata.get(\"items\", [])\n\n ssh_keys_i = next(\n (i for i, item in enumerate(items) if item[\"key\"] == \"ssh-keys\"), None)\n\n if ssh_keys_i is None:\n items.append({\"key\": \"ssh-keys\", \"value\": new_ssh_meta})\n else:\n ssh_keys = items[ssh_keys_i]\n ssh_keys[\"value\"] += \"\\n\" + new_ssh_meta\n items[ssh_keys_i] = ssh_keys\n\n common_instance_metadata[\"items\"] = items\n\n operation = compute.projects().setCommonInstanceMetadata(\n project=project[\"name\"], body=common_instance_metadata).execute()\n\n response = wait_for_compute_global_operation(project[\"name\"], operation)\n\n return response\n", "path": "python/ray/autoscaler/gcp/config.py" } ]
[ { "content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport logging\nimport time\n\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.backends import default_backend\nfrom googleapiclient import discovery, errors\n\nlogger = logging.getLogger(__name__)\n\ncrm = discovery.build(\"cloudresourcemanager\", \"v1\")\niam = discovery.build(\"iam\", \"v1\")\ncompute = discovery.build(\"compute\", \"v1\")\n\nVERSION = \"v1\"\n\nRAY = \"ray-autoscaler\"\nDEFAULT_SERVICE_ACCOUNT_ID = RAY + \"-sa-\" + VERSION\nSERVICE_ACCOUNT_EMAIL_TEMPLATE = (\n \"{account_id}@{project_id}.iam.gserviceaccount.com\")\nDEFAULT_SERVICE_ACCOUNT_CONFIG = {\n \"displayName\": \"Ray Autoscaler Service Account ({})\".format(VERSION),\n}\nDEFAULT_SERVICE_ACCOUNT_ROLES = (\"roles/storage.objectAdmin\",\n \"roles/compute.admin\")\n\nMAX_POLLS = 12\nPOLL_INTERVAL = 5\n\n\ndef wait_for_crm_operation(operation):\n \"\"\"Poll for cloud resource manager operation until finished.\"\"\"\n logger.info(\"wait_for_crm_operation: \"\n \"Waiting for operation {} to finish...\".format(operation))\n\n for _ in range(MAX_POLLS):\n result = crm.operations().get(name=operation[\"name\"]).execute()\n if \"error\" in result:\n raise Exception(result[\"error\"])\n\n if \"done\" in result and result[\"done\"]:\n logger.info(\"wait_for_crm_operation: Operation done.\")\n break\n\n time.sleep(POLL_INTERVAL)\n\n return result\n\n\ndef wait_for_compute_global_operation(project_name, operation):\n \"\"\"Poll for global compute operation until finished.\"\"\"\n logger.info(\"wait_for_compute_global_operation: \"\n \"Waiting for operation {} to finish...\".format(\n operation[\"name\"]))\n\n for _ in range(MAX_POLLS):\n result = compute.globalOperations().get(\n project=project_name,\n operation=operation[\"name\"],\n ).execute()\n if \"error\" in result:\n raise Exception(result[\"error\"])\n\n if result[\"status\"] == \"DONE\":\n logger.info(\"wait_for_compute_global_operation: \"\n \"Operation done.\")\n break\n\n time.sleep(POLL_INTERVAL)\n\n return result\n\n\ndef key_pair_name(i, region, project_id, ssh_user):\n \"\"\"Returns the ith default gcp_key_pair_name.\"\"\"\n key_name = \"{}_gcp_{}_{}_{}\".format(RAY, region, project_id, ssh_user, i)\n return key_name\n\n\ndef key_pair_paths(key_name):\n \"\"\"Returns public and private key paths for a given key_name.\"\"\"\n public_key_path = os.path.expanduser(\"~/.ssh/{}.pub\".format(key_name))\n private_key_path = os.path.expanduser(\"~/.ssh/{}.pem\".format(key_name))\n return public_key_path, private_key_path\n\n\ndef generate_rsa_key_pair():\n \"\"\"Create public and private ssh-keys.\"\"\"\n\n key = rsa.generate_private_key(\n backend=default_backend(), public_exponent=65537, key_size=2048)\n\n public_key = key.public_key().public_bytes(\n serialization.Encoding.OpenSSH,\n serialization.PublicFormat.OpenSSH).decode(\"utf-8\")\n\n pem = key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()).decode(\"utf-8\")\n\n return public_key, pem\n\n\ndef bootstrap_gcp(config):\n config = _configure_project(config)\n config = _configure_iam_role(config)\n config = _configure_key_pair(config)\n config = _configure_subnet(config)\n\n return config\n\n\ndef _configure_project(config):\n \"\"\"Setup a Google Cloud Platform Project.\n\n Google Compute Platform organizes all the resources, such as storage\n buckets, users, and instances under projects. This is different from\n aws ec2 where everything is global.\n \"\"\"\n project_id = config[\"provider\"].get(\"project_id\")\n assert config[\"provider\"][\"project_id\"] is not None, (\n \"'project_id' must be set in the 'provider' section of the autoscaler\"\n \" config. Notice that the project id must be globally unique.\")\n project = _get_project(project_id)\n\n if project is None:\n # Project not found, try creating it\n _create_project(project_id)\n project = _get_project(project_id)\n\n assert project is not None, \"Failed to create project\"\n assert project[\"lifecycleState\"] == \"ACTIVE\", (\n \"Project status needs to be ACTIVE, got {}\".format(\n project[\"lifecycleState\"]))\n\n config[\"provider\"][\"project_id\"] = project[\"projectId\"]\n\n return config\n\n\ndef _configure_iam_role(config):\n \"\"\"Setup a gcp service account with IAM roles.\n\n Creates a gcp service acconut and binds IAM roles which allow it to control\n control storage/compute services. Specifically, the head node needs to have\n an IAM role that allows it to create further gce instances and store items\n in google cloud storage.\n\n TODO: Allow the name/id of the service account to be configured\n \"\"\"\n email = SERVICE_ACCOUNT_EMAIL_TEMPLATE.format(\n account_id=DEFAULT_SERVICE_ACCOUNT_ID,\n project_id=config[\"provider\"][\"project_id\"])\n service_account = _get_service_account(email, config)\n\n if service_account is None:\n logger.info(\"_configure_iam_role: \"\n \"Creating new service account {}\".format(\n DEFAULT_SERVICE_ACCOUNT_ID))\n\n service_account = _create_service_account(\n DEFAULT_SERVICE_ACCOUNT_ID, DEFAULT_SERVICE_ACCOUNT_CONFIG, config)\n\n assert service_account is not None, \"Failed to create service account\"\n\n _add_iam_policy_binding(service_account, DEFAULT_SERVICE_ACCOUNT_ROLES)\n\n config[\"head_node\"][\"serviceAccounts\"] = [{\n \"email\": service_account[\"email\"],\n # NOTE: The amount of access is determined by the scope + IAM\n # role of the service account. Even if the cloud-platform scope\n # gives (scope) access to the whole cloud-platform, the service\n # account is limited by the IAM rights specified below.\n \"scopes\": [\"https://www.googleapis.com/auth/cloud-platform\"]\n }]\n\n return config\n\n\ndef _configure_key_pair(config):\n \"\"\"Configure SSH access, using an existing key pair if possible.\n\n Creates a project-wide ssh key that can be used to access all the instances\n unless explicitly prohibited by instance config.\n\n The ssh-keys created by ray are of format:\n\n [USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME]\n\n where:\n\n [USERNAME] is the user for the SSH key, specified in the config.\n [KEY_VALUE] is the public SSH key value.\n \"\"\"\n\n if \"ssh_private_key\" in config[\"auth\"]:\n return config\n\n ssh_user = config[\"auth\"][\"ssh_user\"]\n\n project = compute.projects().get(\n project=config[\"provider\"][\"project_id\"]).execute()\n\n # Key pairs associated with project meta data. The key pairs are general,\n # and not just ssh keys.\n ssh_keys_str = next(\n (item for item in project[\"commonInstanceMetadata\"].get(\"items\", [])\n if item[\"key\"] == \"ssh-keys\"), {}).get(\"value\", \"\")\n\n ssh_keys = ssh_keys_str.split(\"\\n\") if ssh_keys_str else []\n\n # Try a few times to get or create a good key pair.\n key_found = False\n for i in range(10):\n key_name = key_pair_name(i, config[\"provider\"][\"region\"],\n config[\"provider\"][\"project_id\"], ssh_user)\n public_key_path, private_key_path = key_pair_paths(key_name)\n\n for ssh_key in ssh_keys:\n key_parts = ssh_key.split(\" \")\n if len(key_parts) != 3:\n continue\n\n if key_parts[2] == ssh_user and os.path.exists(private_key_path):\n # Found a key\n key_found = True\n break\n\n # Create a key since it doesn't exist locally or in GCP\n if not key_found and not os.path.exists(private_key_path):\n logger.info(\"_configure_key_pair: \"\n \"Creating new key pair {}\".format(key_name))\n public_key, private_key = generate_rsa_key_pair()\n\n _create_project_ssh_key_pair(project, public_key, ssh_user)\n\n with open(private_key_path, \"w\") as f:\n f.write(private_key)\n os.chmod(private_key_path, 0o600)\n\n with open(public_key_path, \"w\") as f:\n f.write(public_key)\n\n key_found = True\n\n break\n\n if key_found:\n break\n\n assert key_found, \"SSH keypair for user {} not found for {}\".format(\n ssh_user, private_key_path)\n assert os.path.exists(private_key_path), (\n \"Private key file {} not found for user {}\"\n \"\".format(private_key_path, ssh_user))\n\n logger.info(\"_configure_key_pair: \"\n \"Private key not specified in config, using\"\n \"{}\".format(private_key_path))\n\n config[\"auth\"][\"ssh_private_key\"] = private_key_path\n\n return config\n\n\ndef _configure_subnet(config):\n \"\"\"Pick a reasonable subnet if not specified by the config.\"\"\"\n\n # Rationale: avoid subnet lookup if the network is already\n # completely manually configured\n if (\"networkInterfaces\" in config[\"head_node\"]\n and \"networkInterfaces\" in config[\"worker_nodes\"]):\n return config\n\n subnets = _list_subnets(config)\n\n if not subnets:\n raise NotImplementedError(\"Should be able to create subnet.\")\n\n # TODO: make sure that we have usable subnet. Maybe call\n # compute.subnetworks().listUsable? For some reason it didn't\n # work out-of-the-box\n default_subnet = subnets[0]\n\n if \"networkInterfaces\" not in config[\"head_node\"]:\n config[\"head_node\"][\"networkInterfaces\"] = [{\n \"subnetwork\": default_subnet[\"selfLink\"],\n \"accessConfigs\": [{\n \"name\": \"External NAT\",\n \"type\": \"ONE_TO_ONE_NAT\",\n }],\n }]\n\n if \"networkInterfaces\" not in config[\"worker_nodes\"]:\n config[\"worker_nodes\"][\"networkInterfaces\"] = [{\n \"subnetwork\": default_subnet[\"selfLink\"],\n \"accessConfigs\": [{\n \"name\": \"External NAT\",\n \"type\": \"ONE_TO_ONE_NAT\",\n }],\n }]\n\n return config\n\n\ndef _list_subnets(config):\n response = compute.subnetworks().list(\n project=config[\"provider\"][\"project_id\"],\n region=config[\"provider\"][\"region\"]).execute()\n\n return response[\"items\"]\n\n\ndef _get_subnet(config, subnet_id):\n subnet = compute.subnetworks().get(\n project=config[\"provider\"][\"project_id\"],\n region=config[\"provider\"][\"region\"],\n subnetwork=subnet_id,\n ).execute()\n\n return subnet\n\n\ndef _get_project(project_id):\n try:\n project = crm.projects().get(projectId=project_id).execute()\n except errors.HttpError as e:\n if e.resp.status != 403:\n raise\n project = None\n\n return project\n\n\ndef _create_project(project_id):\n operation = crm.projects().create(body={\n \"projectId\": project_id,\n \"name\": project_id\n }).execute()\n\n result = wait_for_crm_operation(operation)\n\n return result\n\n\ndef _get_service_account(account, config):\n project_id = config[\"provider\"][\"project_id\"]\n full_name = (\"projects/{project_id}/serviceAccounts/{account}\"\n \"\".format(project_id=project_id, account=account))\n try:\n service_account = iam.projects().serviceAccounts().get(\n name=full_name).execute()\n except errors.HttpError as e:\n if e.resp.status != 404:\n raise\n service_account = None\n\n return service_account\n\n\ndef _create_service_account(account_id, account_config, config):\n project_id = config[\"provider\"][\"project_id\"]\n\n service_account = iam.projects().serviceAccounts().create(\n name=\"projects/{project_id}\".format(project_id=project_id),\n body={\n \"accountId\": account_id,\n \"serviceAccount\": account_config,\n }).execute()\n\n return service_account\n\n\ndef _add_iam_policy_binding(service_account, roles):\n \"\"\"Add new IAM roles for the service account.\"\"\"\n project_id = service_account[\"projectId\"]\n email = service_account[\"email\"]\n member_id = \"serviceAccount:\" + email\n\n policy = crm.projects().getIamPolicy(\n resource=project_id, body={}).execute()\n\n already_configured = True\n\n for role in roles:\n role_exists = False\n for binding in policy[\"bindings\"]:\n if binding[\"role\"] == role:\n if member_id not in binding[\"members\"]:\n binding[\"members\"].append(member_id)\n already_configured = False\n role_exists = True\n\n if not role_exists:\n already_configured = False\n policy[\"bindings\"].append({\n \"members\": [member_id],\n \"role\": role,\n })\n\n if already_configured:\n # In some managed environments, an admin needs to grant the\n # roles, so only call setIamPolicy if needed.\n return\n\n result = crm.projects().setIamPolicy(\n resource=project_id, body={\n \"policy\": policy,\n }).execute()\n\n return result\n\n\ndef _create_project_ssh_key_pair(project, public_key, ssh_user):\n \"\"\"Inserts an ssh-key into project commonInstanceMetadata\"\"\"\n\n key_parts = public_key.split(\" \")\n\n # Sanity checks to make sure that the generated key matches expectation\n assert len(key_parts) == 2, key_parts\n assert key_parts[0] == \"ssh-rsa\", key_parts\n\n new_ssh_meta = \"{ssh_user}:ssh-rsa {key_value} {ssh_user}\".format(\n ssh_user=ssh_user, key_value=key_parts[1])\n\n common_instance_metadata = project[\"commonInstanceMetadata\"]\n items = common_instance_metadata.get(\"items\", [])\n\n ssh_keys_i = next(\n (i for i, item in enumerate(items) if item[\"key\"] == \"ssh-keys\"), None)\n\n if ssh_keys_i is None:\n items.append({\"key\": \"ssh-keys\", \"value\": new_ssh_meta})\n else:\n ssh_keys = items[ssh_keys_i]\n ssh_keys[\"value\"] += \"\\n\" + new_ssh_meta\n items[ssh_keys_i] = ssh_keys\n\n common_instance_metadata[\"items\"] = items\n\n operation = compute.projects().setCommonInstanceMetadata(\n project=project[\"name\"], body=common_instance_metadata).execute()\n\n response = wait_for_compute_global_operation(project[\"name\"], operation)\n\n return response\n", "path": "python/ray/autoscaler/gcp/config.py" } ]
diff --git a/python/ray/autoscaler/gcp/config.py b/python/ray/autoscaler/gcp/config.py index d165fb8b7b09..ba5026700e51 100644 --- a/python/ray/autoscaler/gcp/config.py +++ b/python/ray/autoscaler/gcp/config.py @@ -383,7 +383,8 @@ def _add_iam_policy_binding(service_account, roles): email = service_account["email"] member_id = "serviceAccount:" + email - policy = crm.projects().getIamPolicy(resource=project_id).execute() + policy = crm.projects().getIamPolicy( + resource=project_id, body={}).execute() already_configured = True
googleapis__google-auth-library-python-913
Setuptools as dependency is problematic w/ pip-tools https://github.com/googleapis/google-auth-library-python/commit/908da752d01fef728bd5cb3eb5b13f2b5c335e51 (#322) added `setuptools` as a dependency in this package. However, the [pip-tools](https://github.com/jazzband/pip-tools) package that's commonly used for pinning dependencies considers `setuptools` an unsafe dependency to have in a project at all (as discussed in #492), and as such doesn't save it in the pinned requirements file at all. Since `google-auth` depends on Setuptools but a version couldn't have been pinned in the requirements, we're seeing ``` Collecting setuptools>=40.3.0 (from google-auth==1.19.1->our-proprietary-package==0.31.1) Downloading https://files.pythonhosted.org/packages/b0/8b/379494d7dbd3854aa7b85b216cb0af54edcb7fce7d086ba3e35522a713cf/setuptools-50.0.0-py3-none-any.whl (783kB) ``` which wreaks havoc on Ubuntu 16.04 + Python 3.5 machines due to https://github.com/pypa/setuptools/issues/2352 / https://github.com/pypa/setuptools/issues/2350 / https://github.com/pypa/setuptools/issues/2356 ... The workaround is to add `--allow-unsafe` or manually pin `setuptools`, but is the requirement _actually_ necessary in this package? No other package in the 48-line `requirements.txt` for this particular project would have required a version of `setuptools`. #### Environment details - OS: Ubuntu 16.04 - Python version: 3.5 - pip version: irrelevant - `google-auth` version: 1.19.1 #### Steps to reproduce 1. Install `google-auth` on an Ubuntu 16.04 machine 2. It installs `setuptools==50.0.0` 3. https://github.com/pypa/setuptools/issues/2352 and friends
[ { "content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n \"cachetools>=2.0.0,<5.0\",\n \"pyasn1-modules>=0.2.1\",\n # rsa==4.5 is the last version to support 2.7\n # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233\n 'rsa<4.6; python_version < \"3.6\"',\n 'rsa>=3.1.4,<5; python_version >= \"3.6\"',\n # install enum34 to support 2.7. enum34 only works up to python version 3.3.\n 'enum34>=1.1.10; python_version < \"3.4\"',\n \"setuptools>=40.3.0\",\n \"six>=1.9.0\",\n)\n\nextras = {\n \"aiohttp\": [\n \"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'\",\n \"requests >= 2.20.0, < 3.0.0dev\",\n ],\n \"pyopenssl\": \"pyopenssl>=20.0.0\",\n \"reauth\": \"pyu2f>=0.1.5\",\n}\n\nwith io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nversion = {}\nwith open(os.path.join(package_root, \"google/auth/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\nsetup(\n name=\"google-auth\",\n version=version,\n author=\"Google Cloud Platform\",\n author_email=\"[email protected]\",\n description=\"Google Authentication Library\",\n long_description=long_description,\n url=\"https://github.com/googleapis/google-auth-library-python\",\n packages=find_packages(exclude=(\"tests*\", \"system_tests*\")),\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n \"cachetools>=2.0.0,<5.0\",\n \"pyasn1-modules>=0.2.1\",\n # rsa==4.5 is the last version to support 2.7\n # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233\n 'rsa<4.6; python_version < \"3.6\"',\n 'rsa>=3.1.4,<5; python_version >= \"3.6\"',\n # install enum34 to support 2.7. enum34 only works up to python version 3.3.\n 'enum34>=1.1.10; python_version < \"3.4\"',\n \"six>=1.9.0\",\n)\n\nextras = {\n \"aiohttp\": [\n \"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'\",\n \"requests >= 2.20.0, < 3.0.0dev\",\n ],\n \"pyopenssl\": \"pyopenssl>=20.0.0\",\n \"reauth\": \"pyu2f>=0.1.5\",\n}\n\nwith io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nversion = {}\nwith open(os.path.join(package_root, \"google/auth/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\nsetup(\n name=\"google-auth\",\n version=version,\n author=\"Google Cloud Platform\",\n author_email=\"[email protected]\",\n description=\"Google Authentication Library\",\n long_description=long_description,\n url=\"https://github.com/googleapis/google-auth-library-python\",\n packages=find_packages(exclude=(\"tests*\", \"system_tests*\")),\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 301e99643..44c512128 100644 --- a/setup.py +++ b/setup.py @@ -28,7 +28,6 @@ 'rsa>=3.1.4,<5; python_version >= "3.6"', # install enum34 to support 2.7. enum34 only works up to python version 3.3. 'enum34>=1.1.10; python_version < "3.4"', - "setuptools>=40.3.0", "six>=1.9.0", )
encode__uvicorn-1269
Shutdown process is broken in 0.15 ### Checklist <!-- Please make sure you check all these items before submitting your bug report. --> - [x] The bug is reproducible against the latest release and/or `master`. - [x] There are no similar issues or pull requests to fix it yet. ### Describe the bug My FastAPI ASGI server cannot shutdown properly with uvicorn==0.15 while it can with 0.14 ### To reproduce Setup minimal FastAPI app and add some functions with logs(prints) to shutdown event ### Expected behavior You see all logs(prints) from functions on shutdown ### Actual behavior Get `ASGI 'lifespan' protocol appears unsupported.` without --lifespan on Get error trace with --lifespan on ### Debugging material uvicorn scheduler.main:app --host=0.0.0.0 --port ${WEB_PORT:-8000} --reload --lifespan on INFO: Will watch for changes in these directories: ['/home/dmytro/storage/chimplie/projects/raok-main/raok-scheduler'] INFO: Uvicorn running on http://0.0.0.0:8004 (Press CTRL+C to quit) INFO: Started reloader process [177653] using statreload INFO: Started server process [177655] INFO: Waiting for application startup. INFO: Tortoise-ORM started, {'default': <tortoise.backends.asyncpg.client.AsyncpgDBClient object at 0x7f63d4a10e50>}, {'models': {'Task': <class 'scheduler.models.task.Task'>, 'Aerich': <class 'aerich.models.Aerich'>}} INFO: Application startup complete. ^CINFO: Shutting down INFO: Finished server process [177655] ERROR: Exception in 'lifespan' protocol Traceback (most recent call last): File "/home/dmytro/.local/share/virtualenvs/raok-scheduler-hpGGYNLi/lib/python3.8/site-packages/uvicorn/lifespan/on.py", line 84, in main await app(scope, self.receive, self.send) File "/home/dmytro/.local/share/virtualenvs/raok-scheduler-hpGGYNLi/lib/python3.8/site-packages/uvicorn/middleware/proxy_headers.py", line 75, in __call__ return await self.app(scope, receive, send) File "/home/dmytro/.local/share/virtualenvs/raok-scheduler-hpGGYNLi/lib/python3.8/site-packages/fastapi/applications.py", line 199, in __call__ await super().__call__(scope, receive, send) File "/home/dmytro/.local/share/virtualenvs/raok-scheduler-hpGGYNLi/lib/python3.8/site-packages/starlette/applications.py", line 112, in __call__ await self.middleware_stack(scope, receive, send) File "/home/dmytro/.local/share/virtualenvs/raok-scheduler-hpGGYNLi/lib/python3.8/site-packages/starlette/middleware/errors.py", line 146, in __call__ await self.app(scope, receive, send) File "/home/dmytro/.local/share/virtualenvs/raok-scheduler-hpGGYNLi/lib/python3.8/site-packages/starlette/middleware/cors.py", line 70, in __call__ await self.app(scope, receive, send) File "/home/dmytro/.local/share/virtualenvs/raok-scheduler-hpGGYNLi/lib/python3.8/site-packages/starlette/exceptions.py", line 58, in __call__ await self.app(scope, receive, send) File "/home/dmytro/.local/share/virtualenvs/raok-scheduler-hpGGYNLi/lib/python3.8/site-packages/starlette/routing.py", line 569, in __call__ await self.lifespan(scope, receive, send) File "/home/dmytro/.local/share/virtualenvs/raok-scheduler-hpGGYNLi/lib/python3.8/site-packages/starlette/routing.py", line 544, in lifespan await receive() File "/home/dmytro/.local/share/virtualenvs/raok-scheduler-hpGGYNLi/lib/python3.8/site-packages/uvicorn/lifespan/on.py", line 135, in receive return await self.receive_queue.get() File "/usr/lib64/python3.8/asyncio/queues.py", line 163, in get await getter asyncio.exceptions.CancelledError INFO: Stopping reloader process [177653] ![image](https://user-images.githubusercontent.com/29193926/129881063-c363dcd2-ed99-45cd-9cda-271d78c05ce8.png) ### Environment - Fedora 34 / Python 3.8 / Uvicorn version: 0.15 - bug, 0.14 - ok - command to run: `uvicorn main:app --host=0.0.0.0 --port 8000 --reload` Revert "When receiving a SIGTERM supervisors should terminate their processes before joining them" Reverts encode/uvicorn#1069 Closes #1160 I've taken my time to understand the situation. I'm going to explain it and then suggest a solution to the previous issue. Right now, the issue on #1160 (`CancelledError`) is caused because the `CTRL + C` sends a `SIGINT (2)` to both parent and child processes, and given that we merged #1069, we have that the parent is also sending a `SIGTERM (15)` to the child. In other words, the child is receiving two signals, and the way `uvicorn` deals with multiple signals (two) is to forcefully exit the process. To be more precise, when we press `CTRL + C` we send a `kill` signal to the process group, not to a single process. Ok. Now, let's go back to the original issue: if we send `SIGINT` to the parent process, it doesn't terminate the children. And that's expected, because `kill -2 <uvicorn_pid>` will only send a signal to the `<uvicorn_pid>`. The solution here would be to use `kill -2 -<uvicorn_pid>`, which sends a signal to the process group instead of only the parent. That being said, this solves the issue that #1069 (`process.terminate()`) solved, but we also avoid #1160 (`CancelledError`). Reference: https://stackoverflow.com/a/392155
[ { "content": "import asyncio\nimport logging\nimport os\nimport platform\nimport signal\nimport socket\nimport sys\nimport threading\nimport time\nfrom email.utils import formatdate\nfrom types import FrameType\nfrom typing import TYPE_CHECKING, Any, List, Optional, Set, Tuple, Union\n\nimport click\n\nfrom uvicorn._handlers.http import handle_http\nfrom uvicorn.config import Config\n\nif TYPE_CHECKING:\n from uvicorn.protocols.http.h11_impl import H11Protocol\n from uvicorn.protocols.http.httptools_impl import HttpToolsProtocol\n from uvicorn.protocols.websockets.websockets_impl import WebSocketProtocol\n from uvicorn.protocols.websockets.wsproto_impl import WSProtocol\n\n Protocols = Union[H11Protocol, HttpToolsProtocol, WSProtocol, WebSocketProtocol]\n\nif sys.platform != \"win32\":\n from asyncio import start_unix_server as _start_unix_server\nelse:\n\n async def _start_unix_server(*args: Any, **kwargs: Any) -> Any:\n raise NotImplementedError(\"Cannot start a unix server on win32\")\n\n\nHANDLED_SIGNALS = (\n signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.\n signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.\n)\n\nlogger = logging.getLogger(\"uvicorn.error\")\n\n\nclass ServerState:\n \"\"\"\n Shared servers state that is available between all protocol instances.\n \"\"\"\n\n def __init__(self) -> None:\n self.total_requests = 0\n self.connections: Set[\"Protocols\"] = set()\n self.tasks: Set[asyncio.Task] = set()\n self.default_headers: List[Tuple[bytes, bytes]] = []\n\n\nclass Server:\n def __init__(self, config: Config) -> None:\n self.config = config\n self.server_state = ServerState()\n\n self.started = False\n self.should_exit = False\n self.force_exit = False\n self.last_notified = 0.0\n\n def run(self, sockets: Optional[List[socket.socket]] = None) -> None:\n self.config.setup_event_loop()\n if sys.version_info >= (3, 7):\n return asyncio.run(self.serve(sockets=sockets))\n return asyncio.get_event_loop().run_until_complete(self.serve(sockets=sockets))\n\n async def serve(self, sockets: Optional[List[socket.socket]] = None) -> None:\n process_id = os.getpid()\n\n config = self.config\n if not config.loaded:\n config.load()\n\n self.lifespan = config.lifespan_class(config)\n\n self.install_signal_handlers()\n\n message = \"Started server process [%d]\"\n color_message = \"Started server process [\" + click.style(\"%d\", fg=\"cyan\") + \"]\"\n logger.info(message, process_id, extra={\"color_message\": color_message})\n\n await self.startup(sockets=sockets)\n if self.should_exit:\n return\n await self.main_loop()\n await self.shutdown(sockets=sockets)\n\n message = \"Finished server process [%d]\"\n color_message = \"Finished server process [\" + click.style(\"%d\", fg=\"cyan\") + \"]\"\n logger.info(message, process_id, extra={\"color_message\": color_message})\n\n async def startup(self, sockets: list = None) -> None:\n await self.lifespan.startup()\n if self.lifespan.should_exit:\n self.should_exit = True\n return\n\n config = self.config\n\n async def handler(\n reader: asyncio.StreamReader, writer: asyncio.StreamWriter\n ) -> None:\n await handle_http(\n reader, writer, server_state=self.server_state, config=config\n )\n\n if sockets is not None:\n # Explicitly passed a list of open sockets.\n # We use this when the server is run from a Gunicorn worker.\n\n def _share_socket(sock: socket.SocketType) -> socket.SocketType:\n # Windows requires the socket be explicitly shared across\n # multiple workers (processes).\n from socket import fromshare # type: ignore\n\n sock_data = sock.share(os.getpid()) # type: ignore\n return fromshare(sock_data)\n\n self.servers = []\n for sock in sockets:\n if config.workers > 1 and platform.system() == \"Windows\":\n sock = _share_socket(sock)\n server = await asyncio.start_server(\n handler, sock=sock, ssl=config.ssl, backlog=config.backlog\n )\n self.servers.append(server)\n listeners = sockets\n\n elif config.fd is not None:\n # Use an existing socket, from a file descriptor.\n sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM)\n server = await asyncio.start_server(\n handler, sock=sock, ssl=config.ssl, backlog=config.backlog\n )\n assert server.sockets is not None # mypy\n listeners = server.sockets\n self.servers = [server]\n\n elif config.uds is not None:\n # Create a socket using UNIX domain socket.\n uds_perms = 0o666\n if os.path.exists(config.uds):\n uds_perms = os.stat(config.uds).st_mode\n server = await _start_unix_server(\n handler, path=config.uds, ssl=config.ssl, backlog=config.backlog\n )\n os.chmod(config.uds, uds_perms)\n assert server.sockets is not None # mypy\n listeners = server.sockets\n self.servers = [server]\n\n else:\n # Standard case. Create a socket from a host/port pair.\n try:\n server = await asyncio.start_server(\n handler,\n host=config.host,\n port=config.port,\n ssl=config.ssl,\n backlog=config.backlog,\n )\n except OSError as exc:\n logger.error(exc)\n await self.lifespan.shutdown()\n sys.exit(1)\n\n assert server.sockets is not None\n listeners = server.sockets\n self.servers = [server]\n\n if sockets is None:\n self._log_started_message(listeners)\n else:\n # We're most likely running multiple workers, so a message has already been\n # logged by `config.bind_socket()`.\n pass\n\n self.started = True\n\n def _log_started_message(self, listeners: List[socket.SocketType]) -> None:\n config = self.config\n\n if config.fd is not None:\n sock = listeners[0]\n logger.info(\n \"Uvicorn running on socket %s (Press CTRL+C to quit)\",\n sock.getsockname(),\n )\n\n elif config.uds is not None:\n logger.info(\n \"Uvicorn running on unix socket %s (Press CTRL+C to quit)\", config.uds\n )\n\n else:\n addr_format = \"%s://%s:%d\"\n host = \"0.0.0.0\" if config.host is None else config.host\n if \":\" in host:\n # It's an IPv6 address.\n addr_format = \"%s://[%s]:%d\"\n\n port = config.port\n if port == 0:\n port = listeners[0].getsockname()[1]\n\n protocol_name = \"https\" if config.ssl else \"http\"\n message = f\"Uvicorn running on {addr_format} (Press CTRL+C to quit)\"\n color_message = (\n \"Uvicorn running on \"\n + click.style(addr_format, bold=True)\n + \" (Press CTRL+C to quit)\"\n )\n logger.info(\n message,\n protocol_name,\n host,\n port,\n extra={\"color_message\": color_message},\n )\n\n async def main_loop(self) -> None:\n counter = 0\n should_exit = await self.on_tick(counter)\n while not should_exit:\n counter += 1\n counter = counter % 864000\n await asyncio.sleep(0.1)\n should_exit = await self.on_tick(counter)\n\n async def on_tick(self, counter: int) -> bool:\n # Update the default headers, once per second.\n if counter % 10 == 0:\n current_time = time.time()\n current_date = formatdate(current_time, usegmt=True).encode()\n\n if self.config.date_header:\n date_header = [(b\"date\", current_date)]\n else:\n date_header = []\n\n self.server_state.default_headers = (\n date_header + self.config.encoded_headers\n )\n\n # Callback to `callback_notify` once every `timeout_notify` seconds.\n if self.config.callback_notify is not None:\n if current_time - self.last_notified > self.config.timeout_notify:\n self.last_notified = current_time\n await self.config.callback_notify()\n\n # Determine if we should exit.\n if self.should_exit:\n return True\n if self.config.limit_max_requests is not None:\n return self.server_state.total_requests >= self.config.limit_max_requests\n return False\n\n async def shutdown(self, sockets: Optional[List[socket.socket]] = None) -> None:\n logger.info(\"Shutting down\")\n\n # Stop accepting new connections.\n for server in self.servers:\n server.close()\n for sock in sockets or []:\n sock.close()\n for server in self.servers:\n await server.wait_closed()\n\n # Request shutdown on all existing connections.\n for connection in list(self.server_state.connections):\n connection.shutdown()\n await asyncio.sleep(0.1)\n\n # Wait for existing connections to finish sending responses.\n if self.server_state.connections and not self.force_exit:\n msg = \"Waiting for connections to close. (CTRL+C to force quit)\"\n logger.info(msg)\n while self.server_state.connections and not self.force_exit:\n await asyncio.sleep(0.1)\n\n # Wait for existing tasks to complete.\n if self.server_state.tasks and not self.force_exit:\n msg = \"Waiting for background tasks to complete. (CTRL+C to force quit)\"\n logger.info(msg)\n while self.server_state.tasks and not self.force_exit:\n await asyncio.sleep(0.1)\n\n # Send the lifespan shutdown event, and wait for application shutdown.\n if not self.force_exit:\n await self.lifespan.shutdown()\n\n def install_signal_handlers(self) -> None:\n if threading.current_thread() is not threading.main_thread():\n # Signals can only be listened to from the main thread.\n return\n\n loop = asyncio.get_event_loop()\n\n try:\n for sig in HANDLED_SIGNALS:\n loop.add_signal_handler(sig, self.handle_exit, sig, None)\n except NotImplementedError: # pragma: no cover\n # Windows\n for sig in HANDLED_SIGNALS:\n signal.signal(sig, self.handle_exit)\n\n def handle_exit(self, sig: signal.Signals, frame: FrameType) -> None:\n\n if self.should_exit:\n self.force_exit = True\n else:\n self.should_exit = True\n", "path": "uvicorn/server.py" } ]
[ { "content": "import asyncio\nimport logging\nimport os\nimport platform\nimport signal\nimport socket\nimport sys\nimport threading\nimport time\nfrom email.utils import formatdate\nfrom types import FrameType\nfrom typing import TYPE_CHECKING, Any, List, Optional, Set, Tuple, Union\n\nimport click\n\nfrom uvicorn._handlers.http import handle_http\nfrom uvicorn.config import Config\n\nif TYPE_CHECKING:\n from uvicorn.protocols.http.h11_impl import H11Protocol\n from uvicorn.protocols.http.httptools_impl import HttpToolsProtocol\n from uvicorn.protocols.websockets.websockets_impl import WebSocketProtocol\n from uvicorn.protocols.websockets.wsproto_impl import WSProtocol\n\n Protocols = Union[H11Protocol, HttpToolsProtocol, WSProtocol, WebSocketProtocol]\n\nif sys.platform != \"win32\":\n from asyncio import start_unix_server as _start_unix_server\nelse:\n\n async def _start_unix_server(*args: Any, **kwargs: Any) -> Any:\n raise NotImplementedError(\"Cannot start a unix server on win32\")\n\n\nHANDLED_SIGNALS = (\n signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.\n signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.\n)\n\nlogger = logging.getLogger(\"uvicorn.error\")\n\n\nclass ServerState:\n \"\"\"\n Shared servers state that is available between all protocol instances.\n \"\"\"\n\n def __init__(self) -> None:\n self.total_requests = 0\n self.connections: Set[\"Protocols\"] = set()\n self.tasks: Set[asyncio.Task] = set()\n self.default_headers: List[Tuple[bytes, bytes]] = []\n\n\nclass Server:\n def __init__(self, config: Config) -> None:\n self.config = config\n self.server_state = ServerState()\n\n self.started = False\n self.should_exit = False\n self.force_exit = False\n self.last_notified = 0.0\n\n def run(self, sockets: Optional[List[socket.socket]] = None) -> None:\n self.config.setup_event_loop()\n if sys.version_info >= (3, 7):\n return asyncio.run(self.serve(sockets=sockets))\n return asyncio.get_event_loop().run_until_complete(self.serve(sockets=sockets))\n\n async def serve(self, sockets: Optional[List[socket.socket]] = None) -> None:\n process_id = os.getpid()\n\n config = self.config\n if not config.loaded:\n config.load()\n\n self.lifespan = config.lifespan_class(config)\n\n self.install_signal_handlers()\n\n message = \"Started server process [%d]\"\n color_message = \"Started server process [\" + click.style(\"%d\", fg=\"cyan\") + \"]\"\n logger.info(message, process_id, extra={\"color_message\": color_message})\n\n await self.startup(sockets=sockets)\n if self.should_exit:\n return\n await self.main_loop()\n await self.shutdown(sockets=sockets)\n\n message = \"Finished server process [%d]\"\n color_message = \"Finished server process [\" + click.style(\"%d\", fg=\"cyan\") + \"]\"\n logger.info(message, process_id, extra={\"color_message\": color_message})\n\n async def startup(self, sockets: list = None) -> None:\n await self.lifespan.startup()\n if self.lifespan.should_exit:\n self.should_exit = True\n return\n\n config = self.config\n\n async def handler(\n reader: asyncio.StreamReader, writer: asyncio.StreamWriter\n ) -> None:\n await handle_http(\n reader, writer, server_state=self.server_state, config=config\n )\n\n if sockets is not None:\n # Explicitly passed a list of open sockets.\n # We use this when the server is run from a Gunicorn worker.\n\n def _share_socket(sock: socket.SocketType) -> socket.SocketType:\n # Windows requires the socket be explicitly shared across\n # multiple workers (processes).\n from socket import fromshare # type: ignore\n\n sock_data = sock.share(os.getpid()) # type: ignore\n return fromshare(sock_data)\n\n self.servers = []\n for sock in sockets:\n if config.workers > 1 and platform.system() == \"Windows\":\n sock = _share_socket(sock)\n server = await asyncio.start_server(\n handler, sock=sock, ssl=config.ssl, backlog=config.backlog\n )\n self.servers.append(server)\n listeners = sockets\n\n elif config.fd is not None:\n # Use an existing socket, from a file descriptor.\n sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM)\n server = await asyncio.start_server(\n handler, sock=sock, ssl=config.ssl, backlog=config.backlog\n )\n assert server.sockets is not None # mypy\n listeners = server.sockets\n self.servers = [server]\n\n elif config.uds is not None:\n # Create a socket using UNIX domain socket.\n uds_perms = 0o666\n if os.path.exists(config.uds):\n uds_perms = os.stat(config.uds).st_mode\n server = await _start_unix_server(\n handler, path=config.uds, ssl=config.ssl, backlog=config.backlog\n )\n os.chmod(config.uds, uds_perms)\n assert server.sockets is not None # mypy\n listeners = server.sockets\n self.servers = [server]\n\n else:\n # Standard case. Create a socket from a host/port pair.\n try:\n server = await asyncio.start_server(\n handler,\n host=config.host,\n port=config.port,\n ssl=config.ssl,\n backlog=config.backlog,\n )\n except OSError as exc:\n logger.error(exc)\n await self.lifespan.shutdown()\n sys.exit(1)\n\n assert server.sockets is not None\n listeners = server.sockets\n self.servers = [server]\n\n if sockets is None:\n self._log_started_message(listeners)\n else:\n # We're most likely running multiple workers, so a message has already been\n # logged by `config.bind_socket()`.\n pass\n\n self.started = True\n\n def _log_started_message(self, listeners: List[socket.SocketType]) -> None:\n config = self.config\n\n if config.fd is not None:\n sock = listeners[0]\n logger.info(\n \"Uvicorn running on socket %s (Press CTRL+C to quit)\",\n sock.getsockname(),\n )\n\n elif config.uds is not None:\n logger.info(\n \"Uvicorn running on unix socket %s (Press CTRL+C to quit)\", config.uds\n )\n\n else:\n addr_format = \"%s://%s:%d\"\n host = \"0.0.0.0\" if config.host is None else config.host\n if \":\" in host:\n # It's an IPv6 address.\n addr_format = \"%s://[%s]:%d\"\n\n port = config.port\n if port == 0:\n port = listeners[0].getsockname()[1]\n\n protocol_name = \"https\" if config.ssl else \"http\"\n message = f\"Uvicorn running on {addr_format} (Press CTRL+C to quit)\"\n color_message = (\n \"Uvicorn running on \"\n + click.style(addr_format, bold=True)\n + \" (Press CTRL+C to quit)\"\n )\n logger.info(\n message,\n protocol_name,\n host,\n port,\n extra={\"color_message\": color_message},\n )\n\n async def main_loop(self) -> None:\n counter = 0\n should_exit = await self.on_tick(counter)\n while not should_exit:\n counter += 1\n counter = counter % 864000\n await asyncio.sleep(0.1)\n should_exit = await self.on_tick(counter)\n\n async def on_tick(self, counter: int) -> bool:\n # Update the default headers, once per second.\n if counter % 10 == 0:\n current_time = time.time()\n current_date = formatdate(current_time, usegmt=True).encode()\n\n if self.config.date_header:\n date_header = [(b\"date\", current_date)]\n else:\n date_header = []\n\n self.server_state.default_headers = (\n date_header + self.config.encoded_headers\n )\n\n # Callback to `callback_notify` once every `timeout_notify` seconds.\n if self.config.callback_notify is not None:\n if current_time - self.last_notified > self.config.timeout_notify:\n self.last_notified = current_time\n await self.config.callback_notify()\n\n # Determine if we should exit.\n if self.should_exit:\n return True\n if self.config.limit_max_requests is not None:\n return self.server_state.total_requests >= self.config.limit_max_requests\n return False\n\n async def shutdown(self, sockets: Optional[List[socket.socket]] = None) -> None:\n logger.info(\"Shutting down\")\n\n # Stop accepting new connections.\n for server in self.servers:\n server.close()\n for sock in sockets or []:\n sock.close()\n for server in self.servers:\n await server.wait_closed()\n\n # Request shutdown on all existing connections.\n for connection in list(self.server_state.connections):\n connection.shutdown()\n await asyncio.sleep(0.1)\n\n # Wait for existing connections to finish sending responses.\n if self.server_state.connections and not self.force_exit:\n msg = \"Waiting for connections to close. (CTRL+C to force quit)\"\n logger.info(msg)\n while self.server_state.connections and not self.force_exit:\n await asyncio.sleep(0.1)\n\n # Wait for existing tasks to complete.\n if self.server_state.tasks and not self.force_exit:\n msg = \"Waiting for background tasks to complete. (CTRL+C to force quit)\"\n logger.info(msg)\n while self.server_state.tasks and not self.force_exit:\n await asyncio.sleep(0.1)\n\n # Send the lifespan shutdown event, and wait for application shutdown.\n if not self.force_exit:\n await self.lifespan.shutdown()\n\n def install_signal_handlers(self) -> None:\n if threading.current_thread() is not threading.main_thread():\n # Signals can only be listened to from the main thread.\n return\n\n loop = asyncio.get_event_loop()\n\n try:\n for sig in HANDLED_SIGNALS:\n loop.add_signal_handler(sig, self.handle_exit, sig, None)\n except NotImplementedError: # pragma: no cover\n # Windows\n for sig in HANDLED_SIGNALS:\n signal.signal(sig, self.handle_exit)\n\n def handle_exit(self, sig: signal.Signals, frame: FrameType) -> None:\n\n if self.should_exit and sig == signal.SIGINT:\n self.force_exit = True\n else:\n self.should_exit = True\n", "path": "uvicorn/server.py" } ]
diff --git a/uvicorn/server.py b/uvicorn/server.py index 6aa694cae..bc5a4fddf 100644 --- a/uvicorn/server.py +++ b/uvicorn/server.py @@ -310,7 +310,7 @@ def install_signal_handlers(self) -> None: def handle_exit(self, sig: signal.Signals, frame: FrameType) -> None: - if self.should_exit: + if self.should_exit and sig == signal.SIGINT: self.force_exit = True else: self.should_exit = True
projectmesa__mesa-989
Color patches takes a long time to load When you hit "reset" to load the color patches example, it takes a long time to load. Not sure why. As a result, I thought it was broken. To recreate... ``` cd examples/color_patches python run.py ``` Wait for patches to load when browser window pops up. OR hit reset and wait for color patches to load. This is what it should look something like... <img width="407" alt="screen shot 2018-04-01 at 10 03 33 pm" src="https://user-images.githubusercontent.com/166734/38180194-95c2acb0-35f8-11e8-8c1b-8bd7a6d25098.png"> Color patches takes a long time to load When you hit "reset" to load the color patches example, it takes a long time to load. Not sure why. As a result, I thought it was broken. To recreate... ``` cd examples/color_patches python run.py ``` Wait for patches to load when browser window pops up. OR hit reset and wait for color patches to load. This is what it should look something like... <img width="407" alt="screen shot 2018-04-01 at 10 03 33 pm" src="https://user-images.githubusercontent.com/166734/38180194-95c2acb0-35f8-11e8-8c1b-8bd7a6d25098.png">
[ { "content": "\"\"\"\nhandles the definition of the canvas parameters and\nthe drawing of the model representation on the canvas\n\"\"\"\n# import webbrowser\n\nfrom mesa.visualization.modules import CanvasGrid\nfrom mesa.visualization.ModularVisualization import ModularServer\n\nfrom .model import ColorPatches\n\n_COLORS = [\n \"Aqua\",\n \"Blue\",\n \"Fuchsia\",\n \"Gray\",\n \"Green\",\n \"Lime\",\n \"Maroon\",\n \"Navy\",\n \"Olive\",\n \"Orange\",\n \"Purple\",\n \"Red\",\n \"Silver\",\n \"Teal\",\n \"White\",\n \"Yellow\",\n]\n\n\ngrid_rows = 50\ngrid_cols = 25\ncell_size = 10\ncanvas_width = grid_rows * cell_size\ncanvas_height = grid_cols * cell_size\n\n\ndef color_patch_draw(cell):\n \"\"\"\n This function is registered with the visualization server to be called\n each tick to indicate how to draw the cell in its current state.\n\n :param cell: the cell in the simulation\n\n :return: the portrayal dictionary.\n\n \"\"\"\n assert cell is not None\n portrayal = {\"Shape\": \"rect\", \"w\": 1, \"h\": 1, \"Filled\": \"true\", \"Layer\": 0}\n portrayal[\"x\"] = cell.get_row()\n portrayal[\"y\"] = cell.get_col()\n portrayal[\"Color\"] = _COLORS[cell.get_state()]\n return portrayal\n\n\ncanvas_element = CanvasGrid(\n color_patch_draw, grid_rows, grid_cols, canvas_width, canvas_height\n)\n\nserver = ModularServer(\n ColorPatches,\n [canvas_element],\n \"Color Patches\",\n {\"width\": canvas_width, \"height\": canvas_height},\n)\n\n# webbrowser.open('http://127.0.0.1:8521') # TODO: make this configurable\n", "path": "examples/color_patches/color_patches/server.py" } ]
[ { "content": "\"\"\"\nhandles the definition of the canvas parameters and\nthe drawing of the model representation on the canvas\n\"\"\"\n# import webbrowser\n\nfrom mesa.visualization.modules import CanvasGrid\nfrom mesa.visualization.ModularVisualization import ModularServer\n\nfrom .model import ColorPatches\n\n_COLORS = [\n \"Aqua\",\n \"Blue\",\n \"Fuchsia\",\n \"Gray\",\n \"Green\",\n \"Lime\",\n \"Maroon\",\n \"Navy\",\n \"Olive\",\n \"Orange\",\n \"Purple\",\n \"Red\",\n \"Silver\",\n \"Teal\",\n \"White\",\n \"Yellow\",\n]\n\n\ngrid_rows = 50\ngrid_cols = 25\ncell_size = 10\ncanvas_width = grid_rows * cell_size\ncanvas_height = grid_cols * cell_size\n\n\ndef color_patch_draw(cell):\n \"\"\"\n This function is registered with the visualization server to be called\n each tick to indicate how to draw the cell in its current state.\n\n :param cell: the cell in the simulation\n\n :return: the portrayal dictionary.\n\n \"\"\"\n assert cell is not None\n portrayal = {\"Shape\": \"rect\", \"w\": 1, \"h\": 1, \"Filled\": \"true\", \"Layer\": 0}\n portrayal[\"x\"] = cell.get_row()\n portrayal[\"y\"] = cell.get_col()\n portrayal[\"Color\"] = _COLORS[cell.get_state()]\n return portrayal\n\n\ncanvas_element = CanvasGrid(\n color_patch_draw, grid_rows, grid_cols, canvas_width, canvas_height\n)\n\nserver = ModularServer(\n ColorPatches,\n [canvas_element],\n \"Color Patches\",\n {\"width\": grid_rows, \"height\": grid_cols},\n)\n\n# webbrowser.open('http://127.0.0.1:8521') # TODO: make this configurable\n", "path": "examples/color_patches/color_patches/server.py" } ]
diff --git a/examples/color_patches/color_patches/server.py b/examples/color_patches/color_patches/server.py index 711c31304bb..e0636c2492b 100644 --- a/examples/color_patches/color_patches/server.py +++ b/examples/color_patches/color_patches/server.py @@ -62,7 +62,7 @@ def color_patch_draw(cell): ColorPatches, [canvas_element], "Color Patches", - {"width": canvas_width, "height": canvas_height}, + {"width": grid_rows, "height": grid_cols}, ) # webbrowser.open('http://127.0.0.1:8521') # TODO: make this configurable
getnikola__nikola-2363
output of 'nikola auto' not visible with light themes I use solarized-light theme in my(gnome) terminal and when I e.g. run 'nikola build', most of the output is not clearly visible - see the shot: http://pasteboard.co/1pIJ9kw9.png, so wonder if something can be done to make it more ui-friendly for light color themes?
[ { "content": "# -*- coding: utf-8 -*-\n\n# Copyright © 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"The main function of Nikola.\"\"\"\n\nfrom __future__ import print_function, unicode_literals\nfrom collections import defaultdict\nimport os\nimport shutil\ntry:\n import readline # NOQA\nexcept ImportError:\n pass # This is only so raw_input/input does nicer things if it's available\nimport sys\nimport traceback\n\nfrom doit.loader import generate_tasks\nfrom doit.cmd_base import TaskLoader\nfrom doit.reporter import ExecutedOnlyReporter\nfrom doit.doit_cmd import DoitMain\nfrom doit.cmd_help import Help as DoitHelp\nfrom doit.cmd_run import Run as DoitRun\nfrom doit.cmd_clean import Clean as DoitClean\nfrom doit.cmd_completion import TabCompletion\nfrom doit.cmd_auto import Auto as DoitAuto\nfrom logbook import NullHandler\nfrom blinker import signal\n\nfrom . import __version__\nfrom .plugin_categories import Command\nfrom .nikola import Nikola\nfrom .utils import sys_decode, sys_encode, get_root_dir, req_missing, LOGGER, STRICT_HANDLER, STDERR_HANDLER, ColorfulStderrHandler\n\nif sys.version_info[0] == 3:\n import importlib.machinery\nelse:\n import imp\n\nconfig = {}\n\n# DO NOT USE unless you know what you are doing!\n_RETURN_DOITNIKOLA = False\n\n\ndef main(args=None):\n \"\"\"Run Nikola.\"\"\"\n colorful = False\n if sys.stderr.isatty() and os.name != 'nt':\n colorful = True\n\n ColorfulStderrHandler._colorful = colorful\n\n if args is None:\n args = sys.argv[1:]\n\n oargs = args\n args = [sys_decode(arg) for arg in args]\n\n conf_filename = 'conf.py'\n conf_filename_bytes = b'conf.py'\n conf_filename_changed = False\n for index, arg in enumerate(args):\n if arg[:7] == '--conf=':\n del args[index]\n del oargs[index]\n conf_filename = arg[7:]\n conf_filename_bytes = sys_encode(arg[7:])\n conf_filename_changed = True\n break\n\n quiet = False\n strict = False\n if len(args) > 0 and args[0] == 'build' and '--strict' in args:\n LOGGER.notice('Running in strict mode')\n STRICT_HANDLER.push_application()\n strict = True\n if len(args) > 0 and args[0] == 'build' and '-q' in args or '--quiet' in args:\n NullHandler().push_application()\n quiet = True\n if not quiet and not strict:\n NullHandler().push_application()\n STDERR_HANDLER[0].push_application()\n\n global config\n\n original_cwd = os.getcwd()\n\n # Those commands do not require a `conf.py`. (Issue #1132)\n # Moreover, actually having one somewhere in the tree can be bad, putting\n # the output of that command (the new site) in an unknown directory that is\n # not the current working directory. (does not apply to `version`)\n argname = args[0] if len(args) > 0 else None\n if argname and argname not in ['init', 'version'] and not argname.startswith('import_'):\n root = get_root_dir()\n if root:\n os.chdir(root)\n # Help and imports don't require config, but can use one if it exists\n needs_config_file = (argname != 'help') and not argname.startswith('import_')\n else:\n needs_config_file = False\n\n sys.path.append('')\n try:\n if sys.version_info[0] == 3:\n loader = importlib.machinery.SourceFileLoader(\"conf\", conf_filename)\n conf = loader.load_module()\n else:\n conf = imp.load_source(\"conf\", conf_filename_bytes)\n config = conf.__dict__\n except Exception:\n if os.path.exists(conf_filename):\n msg = traceback.format_exc(0)\n LOGGER.error('\"{0}\" cannot be parsed.\\n{1}'.format(conf_filename, msg))\n return 1\n elif needs_config_file and conf_filename_changed:\n LOGGER.error('Cannot find configuration file \"{0}\".'.format(conf_filename))\n return 1\n config = {}\n\n if conf_filename_changed:\n LOGGER.info(\"Using config file '{0}'\".format(conf_filename))\n\n invariant = False\n\n if len(args) > 0 and args[0] == 'build' and '--invariant' in args:\n try:\n import freezegun\n freeze = freezegun.freeze_time(\"2038-01-01\")\n freeze.start()\n invariant = True\n except ImportError:\n req_missing(['freezegun'], 'perform invariant builds')\n\n if config:\n if os.path.exists('plugins') and not os.path.exists('plugins/__init__.py'):\n with open('plugins/__init__.py', 'w') as fh:\n fh.write('# Plugin modules go here.')\n\n config['__colorful__'] = colorful\n config['__invariant__'] = invariant\n config['__quiet__'] = quiet\n config['__configuration_filename__'] = conf_filename\n config['__cwd__'] = original_cwd\n site = Nikola(**config)\n DN = DoitNikola(site, quiet)\n if _RETURN_DOITNIKOLA:\n return DN\n _ = DN.run(oargs)\n\n if site.invariant:\n freeze.stop()\n return _\n\n\nclass Help(DoitHelp):\n \"\"\"Show Nikola usage.\"\"\"\n\n @staticmethod\n def print_usage(cmds):\n \"\"\"Print nikola \"usage\" (basic help) instructions.\"\"\"\n # Remove 'run'. Nikola uses 'build', though we support 'run' for\n # people used to it (eg. doit users).\n # WARNING: 'run' is the vanilla doit command, without support for\n # --strict, --invariant and --quiet.\n del cmds['run']\n\n print(\"Nikola is a tool to create static websites and blogs. For full documentation and more information, please visit https://getnikola.com/\\n\\n\")\n print(\"Available commands:\")\n for cmd_name in sorted(cmds.keys()):\n cmd = cmds[cmd_name]\n print(\" nikola {:20s} {}\".format(cmd_name, cmd.doc_purpose))\n print(\"\")\n print(\" nikola help show help / reference\")\n print(\" nikola help <command> show command usage\")\n print(\" nikola help <task-name> show task usage\")\n\n\nclass Build(DoitRun):\n \"\"\"Expose \"run\" command as \"build\" for backwards compatibility.\"\"\"\n\n def __init__(self, *args, **kw):\n \"\"\"Initialize Build.\"\"\"\n opts = list(self.cmd_options)\n opts.append(\n {\n 'name': 'strict',\n 'long': 'strict',\n 'default': False,\n 'type': bool,\n 'help': \"Fail on things that would normally be warnings.\",\n }\n )\n opts.append(\n {\n 'name': 'invariant',\n 'long': 'invariant',\n 'default': False,\n 'type': bool,\n 'help': \"Generate invariant output (for testing only!).\",\n }\n )\n opts.append(\n {\n 'name': 'quiet',\n 'long': 'quiet',\n 'short': 'q',\n 'default': False,\n 'type': bool,\n 'help': \"Run quietly.\",\n }\n )\n self.cmd_options = tuple(opts)\n super(Build, self).__init__(*args, **kw)\n\n\nclass Clean(DoitClean):\n \"\"\"Clean site, including the cache directory.\"\"\"\n\n def clean_tasks(self, tasks, dryrun):\n \"\"\"Clean tasks.\"\"\"\n if not dryrun and config:\n cache_folder = config.get('CACHE_FOLDER', 'cache')\n if os.path.exists(cache_folder):\n shutil.rmtree(cache_folder)\n return super(Clean, self).clean_tasks(tasks, dryrun)\n\n# Nikola has its own \"auto\" commands that uses livereload.\n# Expose original doit \"auto\" command as \"doit_auto\".\nDoitAuto.name = 'doit_auto'\n\n\nclass NikolaTaskLoader(TaskLoader):\n \"\"\"Nikola-specific task loader.\"\"\"\n\n def __init__(self, nikola, quiet=False):\n \"\"\"Initialize the loader.\"\"\"\n self.nikola = nikola\n self.quiet = quiet\n\n def load_tasks(self, cmd, opt_values, pos_args):\n \"\"\"Load Nikola tasks.\"\"\"\n if self.quiet:\n DOIT_CONFIG = {\n 'verbosity': 0,\n 'reporter': 'zero',\n }\n else:\n DOIT_CONFIG = {\n 'reporter': ExecutedOnlyReporter,\n 'outfile': sys.stderr,\n }\n DOIT_CONFIG['default_tasks'] = ['render_site', 'post_render']\n DOIT_CONFIG.update(self.nikola._doit_config)\n tasks = generate_tasks(\n 'render_site',\n self.nikola.gen_tasks('render_site', \"Task\", 'Group of tasks to render the site.'))\n latetasks = generate_tasks(\n 'post_render',\n self.nikola.gen_tasks('post_render', \"LateTask\", 'Group of tasks to be executed after site is rendered.'))\n signal('initialized').send(self.nikola)\n return tasks + latetasks, DOIT_CONFIG\n\n\nclass DoitNikola(DoitMain):\n \"\"\"Nikola-specific implementation of DoitMain.\"\"\"\n\n # overwite help command\n DOIT_CMDS = list(DoitMain.DOIT_CMDS) + [Help, Build, Clean, DoitAuto]\n TASK_LOADER = NikolaTaskLoader\n\n def __init__(self, nikola, quiet=False):\n \"\"\"Initialzie DoitNikola.\"\"\"\n super(DoitNikola, self).__init__()\n self.nikola = nikola\n nikola.doit = self\n self.task_loader = self.TASK_LOADER(nikola, quiet)\n\n def get_cmds(self):\n \"\"\"Get commands.\"\"\"\n # core doit commands\n cmds = DoitMain.get_cmds(self)\n # load nikola commands\n for name, cmd in self.nikola._commands.items():\n cmds[name] = cmd\n return cmds\n\n def run(self, cmd_args):\n \"\"\"Run Nikola.\"\"\"\n args = self.process_args(cmd_args)\n args = [sys_decode(arg) for arg in args]\n\n if len(args) == 0:\n cmd_args = ['help']\n args = ['help']\n\n if '--help' in args or '-h' in args:\n new_cmd_args = ['help'] + cmd_args\n new_args = ['help'] + args\n\n cmd_args = []\n args = []\n\n for arg in new_cmd_args:\n if arg not in ('--help', '-h'):\n cmd_args.append(arg)\n for arg in new_args:\n if arg not in ('--help', '-h'):\n args.append(arg)\n\n if args[0] == 'help':\n self.nikola.init_plugins(commands_only=True)\n elif args[0] == 'plugin':\n self.nikola.init_plugins(load_all=True)\n else:\n self.nikola.init_plugins()\n\n sub_cmds = self.get_cmds()\n\n if any(arg in (\"--version\", '-V') for arg in args):\n cmd_args = ['version']\n args = ['version']\n if args[0] not in sub_cmds.keys():\n LOGGER.error(\"Unknown command {0}\".format(args[0]))\n sugg = defaultdict(list)\n sub_filtered = (i for i in sub_cmds.keys() if i != 'run')\n for c in sub_filtered:\n d = levenshtein(c, args[0])\n sugg[d].append(c)\n if sugg.keys():\n best_sugg = sugg[min(sugg.keys())]\n if len(best_sugg) == 1:\n LOGGER.info('Did you mean \"{}\"?'.format(best_sugg[0]))\n else:\n LOGGER.info('Did you mean \"{}\" or \"{}\"?'.format('\", \"'.join(best_sugg[:-1]), best_sugg[-1]))\n return 3\n\n if not sub_cmds[args[0]] in (Help, TabCompletion) and not isinstance(sub_cmds[args[0]], Command):\n if not self.nikola.configured:\n LOGGER.error(\"This command needs to run inside an \"\n \"existing Nikola site.\")\n return 3\n return super(DoitNikola, self).run(cmd_args)\n\n @staticmethod\n def print_version():\n \"\"\"Print Nikola version.\"\"\"\n print(\"Nikola v\" + __version__)\n\n\ndef levenshtein(s1, s2):\n u\"\"\"Calculate the Levenshtein distance of two strings.\n\n Implementation from Wikibooks:\n https://en.wikibooks.org/w/index.php?title=Algorithm_Implementation/Strings/Levenshtein_distance&oldid=2974448#Python\n Copyright © The Wikibooks contributors (CC BY-SA/fair use citation); edited to match coding style and add an exception.\n \"\"\"\n if len(s1) < len(s2):\n return levenshtein(s2, s1)\n\n # len(s1) >= len(s2)\n if len(s2) == 0:\n return len(s1)\n\n previous_row = range(len(s2) + 1)\n for i, c1 in enumerate(s1):\n current_row = [i + 1]\n for j, c2 in enumerate(s2):\n # j+1 instead of j since previous_row and current_row are one character longer than s2\n insertions = previous_row[j + 1] + 1\n deletions = current_row[j] + 1\n substitutions = previous_row[j] + (c1 != c2)\n current_row.append(min(insertions, deletions, substitutions))\n previous_row = current_row\n\n return previous_row[-1]\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv[1:]))\n", "path": "nikola/__main__.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\n# Copyright © 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"The main function of Nikola.\"\"\"\n\nfrom __future__ import print_function, unicode_literals\nfrom collections import defaultdict\nimport os\nimport shutil\ntry:\n import readline # NOQA\nexcept ImportError:\n pass # This is only so raw_input/input does nicer things if it's available\nimport sys\nimport traceback\n\nfrom doit.loader import generate_tasks\nfrom doit.cmd_base import TaskLoader\nfrom doit.reporter import ExecutedOnlyReporter\nfrom doit.doit_cmd import DoitMain\nfrom doit.cmd_help import Help as DoitHelp\nfrom doit.cmd_run import Run as DoitRun\nfrom doit.cmd_clean import Clean as DoitClean\nfrom doit.cmd_completion import TabCompletion\nfrom doit.cmd_auto import Auto as DoitAuto\nfrom logbook import NullHandler\nfrom blinker import signal\n\nfrom . import __version__\nfrom .plugin_categories import Command\nfrom .nikola import Nikola\nfrom .utils import sys_decode, sys_encode, get_root_dir, req_missing, LOGGER, STRICT_HANDLER, STDERR_HANDLER, ColorfulStderrHandler\n\nif sys.version_info[0] == 3:\n import importlib.machinery\nelse:\n import imp\n\nconfig = {}\n\n# DO NOT USE unless you know what you are doing!\n_RETURN_DOITNIKOLA = False\n\n\ndef main(args=None):\n \"\"\"Run Nikola.\"\"\"\n colorful = False\n if sys.stderr.isatty() and os.name != 'nt' and os.getenv('NIKOLA_MONO') is None:\n colorful = True\n\n ColorfulStderrHandler._colorful = colorful\n\n if args is None:\n args = sys.argv[1:]\n\n oargs = args\n args = [sys_decode(arg) for arg in args]\n\n conf_filename = 'conf.py'\n conf_filename_bytes = b'conf.py'\n conf_filename_changed = False\n for index, arg in enumerate(args):\n if arg[:7] == '--conf=':\n del args[index]\n del oargs[index]\n conf_filename = arg[7:]\n conf_filename_bytes = sys_encode(arg[7:])\n conf_filename_changed = True\n break\n\n quiet = False\n strict = False\n if len(args) > 0 and args[0] == 'build' and '--strict' in args:\n LOGGER.notice('Running in strict mode')\n STRICT_HANDLER.push_application()\n strict = True\n if len(args) > 0 and args[0] == 'build' and '-q' in args or '--quiet' in args:\n NullHandler().push_application()\n quiet = True\n if not quiet and not strict:\n NullHandler().push_application()\n STDERR_HANDLER[0].push_application()\n\n global config\n\n original_cwd = os.getcwd()\n\n # Those commands do not require a `conf.py`. (Issue #1132)\n # Moreover, actually having one somewhere in the tree can be bad, putting\n # the output of that command (the new site) in an unknown directory that is\n # not the current working directory. (does not apply to `version`)\n argname = args[0] if len(args) > 0 else None\n if argname and argname not in ['init', 'version'] and not argname.startswith('import_'):\n root = get_root_dir()\n if root:\n os.chdir(root)\n # Help and imports don't require config, but can use one if it exists\n needs_config_file = (argname != 'help') and not argname.startswith('import_')\n else:\n needs_config_file = False\n\n sys.path.append('')\n try:\n if sys.version_info[0] == 3:\n loader = importlib.machinery.SourceFileLoader(\"conf\", conf_filename)\n conf = loader.load_module()\n else:\n conf = imp.load_source(\"conf\", conf_filename_bytes)\n config = conf.__dict__\n except Exception:\n if os.path.exists(conf_filename):\n msg = traceback.format_exc(0)\n LOGGER.error('\"{0}\" cannot be parsed.\\n{1}'.format(conf_filename, msg))\n return 1\n elif needs_config_file and conf_filename_changed:\n LOGGER.error('Cannot find configuration file \"{0}\".'.format(conf_filename))\n return 1\n config = {}\n\n if conf_filename_changed:\n LOGGER.info(\"Using config file '{0}'\".format(conf_filename))\n\n invariant = False\n\n if len(args) > 0 and args[0] == 'build' and '--invariant' in args:\n try:\n import freezegun\n freeze = freezegun.freeze_time(\"2038-01-01\")\n freeze.start()\n invariant = True\n except ImportError:\n req_missing(['freezegun'], 'perform invariant builds')\n\n if config:\n if os.path.exists('plugins') and not os.path.exists('plugins/__init__.py'):\n with open('plugins/__init__.py', 'w') as fh:\n fh.write('# Plugin modules go here.')\n\n config['__colorful__'] = colorful\n config['__invariant__'] = invariant\n config['__quiet__'] = quiet\n config['__configuration_filename__'] = conf_filename\n config['__cwd__'] = original_cwd\n site = Nikola(**config)\n DN = DoitNikola(site, quiet)\n if _RETURN_DOITNIKOLA:\n return DN\n _ = DN.run(oargs)\n\n if site.invariant:\n freeze.stop()\n return _\n\n\nclass Help(DoitHelp):\n \"\"\"Show Nikola usage.\"\"\"\n\n @staticmethod\n def print_usage(cmds):\n \"\"\"Print nikola \"usage\" (basic help) instructions.\"\"\"\n # Remove 'run'. Nikola uses 'build', though we support 'run' for\n # people used to it (eg. doit users).\n # WARNING: 'run' is the vanilla doit command, without support for\n # --strict, --invariant and --quiet.\n del cmds['run']\n\n print(\"Nikola is a tool to create static websites and blogs. For full documentation and more information, please visit https://getnikola.com/\\n\\n\")\n print(\"Available commands:\")\n for cmd_name in sorted(cmds.keys()):\n cmd = cmds[cmd_name]\n print(\" nikola {:20s} {}\".format(cmd_name, cmd.doc_purpose))\n print(\"\")\n print(\" nikola help show help / reference\")\n print(\" nikola help <command> show command usage\")\n print(\" nikola help <task-name> show task usage\")\n\n\nclass Build(DoitRun):\n \"\"\"Expose \"run\" command as \"build\" for backwards compatibility.\"\"\"\n\n def __init__(self, *args, **kw):\n \"\"\"Initialize Build.\"\"\"\n opts = list(self.cmd_options)\n opts.append(\n {\n 'name': 'strict',\n 'long': 'strict',\n 'default': False,\n 'type': bool,\n 'help': \"Fail on things that would normally be warnings.\",\n }\n )\n opts.append(\n {\n 'name': 'invariant',\n 'long': 'invariant',\n 'default': False,\n 'type': bool,\n 'help': \"Generate invariant output (for testing only!).\",\n }\n )\n opts.append(\n {\n 'name': 'quiet',\n 'long': 'quiet',\n 'short': 'q',\n 'default': False,\n 'type': bool,\n 'help': \"Run quietly.\",\n }\n )\n self.cmd_options = tuple(opts)\n super(Build, self).__init__(*args, **kw)\n\n\nclass Clean(DoitClean):\n \"\"\"Clean site, including the cache directory.\"\"\"\n\n def clean_tasks(self, tasks, dryrun):\n \"\"\"Clean tasks.\"\"\"\n if not dryrun and config:\n cache_folder = config.get('CACHE_FOLDER', 'cache')\n if os.path.exists(cache_folder):\n shutil.rmtree(cache_folder)\n return super(Clean, self).clean_tasks(tasks, dryrun)\n\n# Nikola has its own \"auto\" commands that uses livereload.\n# Expose original doit \"auto\" command as \"doit_auto\".\nDoitAuto.name = 'doit_auto'\n\n\nclass NikolaTaskLoader(TaskLoader):\n \"\"\"Nikola-specific task loader.\"\"\"\n\n def __init__(self, nikola, quiet=False):\n \"\"\"Initialize the loader.\"\"\"\n self.nikola = nikola\n self.quiet = quiet\n\n def load_tasks(self, cmd, opt_values, pos_args):\n \"\"\"Load Nikola tasks.\"\"\"\n if self.quiet:\n DOIT_CONFIG = {\n 'verbosity': 0,\n 'reporter': 'zero',\n }\n else:\n DOIT_CONFIG = {\n 'reporter': ExecutedOnlyReporter,\n 'outfile': sys.stderr,\n }\n DOIT_CONFIG['default_tasks'] = ['render_site', 'post_render']\n DOIT_CONFIG.update(self.nikola._doit_config)\n tasks = generate_tasks(\n 'render_site',\n self.nikola.gen_tasks('render_site', \"Task\", 'Group of tasks to render the site.'))\n latetasks = generate_tasks(\n 'post_render',\n self.nikola.gen_tasks('post_render', \"LateTask\", 'Group of tasks to be executed after site is rendered.'))\n signal('initialized').send(self.nikola)\n return tasks + latetasks, DOIT_CONFIG\n\n\nclass DoitNikola(DoitMain):\n \"\"\"Nikola-specific implementation of DoitMain.\"\"\"\n\n # overwite help command\n DOIT_CMDS = list(DoitMain.DOIT_CMDS) + [Help, Build, Clean, DoitAuto]\n TASK_LOADER = NikolaTaskLoader\n\n def __init__(self, nikola, quiet=False):\n \"\"\"Initialzie DoitNikola.\"\"\"\n super(DoitNikola, self).__init__()\n self.nikola = nikola\n nikola.doit = self\n self.task_loader = self.TASK_LOADER(nikola, quiet)\n\n def get_cmds(self):\n \"\"\"Get commands.\"\"\"\n # core doit commands\n cmds = DoitMain.get_cmds(self)\n # load nikola commands\n for name, cmd in self.nikola._commands.items():\n cmds[name] = cmd\n return cmds\n\n def run(self, cmd_args):\n \"\"\"Run Nikola.\"\"\"\n args = self.process_args(cmd_args)\n args = [sys_decode(arg) for arg in args]\n\n if len(args) == 0:\n cmd_args = ['help']\n args = ['help']\n\n if '--help' in args or '-h' in args:\n new_cmd_args = ['help'] + cmd_args\n new_args = ['help'] + args\n\n cmd_args = []\n args = []\n\n for arg in new_cmd_args:\n if arg not in ('--help', '-h'):\n cmd_args.append(arg)\n for arg in new_args:\n if arg not in ('--help', '-h'):\n args.append(arg)\n\n if args[0] == 'help':\n self.nikola.init_plugins(commands_only=True)\n elif args[0] == 'plugin':\n self.nikola.init_plugins(load_all=True)\n else:\n self.nikola.init_plugins()\n\n sub_cmds = self.get_cmds()\n\n if any(arg in (\"--version\", '-V') for arg in args):\n cmd_args = ['version']\n args = ['version']\n if args[0] not in sub_cmds.keys():\n LOGGER.error(\"Unknown command {0}\".format(args[0]))\n sugg = defaultdict(list)\n sub_filtered = (i for i in sub_cmds.keys() if i != 'run')\n for c in sub_filtered:\n d = levenshtein(c, args[0])\n sugg[d].append(c)\n if sugg.keys():\n best_sugg = sugg[min(sugg.keys())]\n if len(best_sugg) == 1:\n LOGGER.info('Did you mean \"{}\"?'.format(best_sugg[0]))\n else:\n LOGGER.info('Did you mean \"{}\" or \"{}\"?'.format('\", \"'.join(best_sugg[:-1]), best_sugg[-1]))\n return 3\n\n if not sub_cmds[args[0]] in (Help, TabCompletion) and not isinstance(sub_cmds[args[0]], Command):\n if not self.nikola.configured:\n LOGGER.error(\"This command needs to run inside an \"\n \"existing Nikola site.\")\n return 3\n return super(DoitNikola, self).run(cmd_args)\n\n @staticmethod\n def print_version():\n \"\"\"Print Nikola version.\"\"\"\n print(\"Nikola v\" + __version__)\n\n\ndef levenshtein(s1, s2):\n u\"\"\"Calculate the Levenshtein distance of two strings.\n\n Implementation from Wikibooks:\n https://en.wikibooks.org/w/index.php?title=Algorithm_Implementation/Strings/Levenshtein_distance&oldid=2974448#Python\n Copyright © The Wikibooks contributors (CC BY-SA/fair use citation); edited to match coding style and add an exception.\n \"\"\"\n if len(s1) < len(s2):\n return levenshtein(s2, s1)\n\n # len(s1) >= len(s2)\n if len(s2) == 0:\n return len(s1)\n\n previous_row = range(len(s2) + 1)\n for i, c1 in enumerate(s1):\n current_row = [i + 1]\n for j, c2 in enumerate(s2):\n # j+1 instead of j since previous_row and current_row are one character longer than s2\n insertions = previous_row[j + 1] + 1\n deletions = current_row[j] + 1\n substitutions = previous_row[j] + (c1 != c2)\n current_row.append(min(insertions, deletions, substitutions))\n previous_row = current_row\n\n return previous_row[-1]\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv[1:]))\n", "path": "nikola/__main__.py" } ]
diff --git a/CHANGES.txt b/CHANGES.txt index b51bf68dd6..c221b36f34 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -4,6 +4,7 @@ New in master Features -------- +* Option to disable color output using NIKOLA_MONO envvar (Issue #2360) * Improved locale detection in Windows (Issue #2343) * Added ``enclosure_length`` meta field for better interop (Issue #2338) * New Lithuanian translation by Antanas Lasys diff --git a/nikola/__main__.py b/nikola/__main__.py index 92a009f557..f002768a4f 100644 --- a/nikola/__main__.py +++ b/nikola/__main__.py @@ -68,7 +68,7 @@ def main(args=None): """Run Nikola.""" colorful = False - if sys.stderr.isatty() and os.name != 'nt': + if sys.stderr.isatty() and os.name != 'nt' and os.getenv('NIKOLA_MONO') is None: colorful = True ColorfulStderrHandler._colorful = colorful
django-json-api__django-rest-framework-json-api-831
Drop end of life Django versions (2.1, 1.11) This has been discussed but it is good to have its own single issue for dropping and adding features :smile: See https://www.djangoproject.com/download/ for release schedule.
[ { "content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\n\nneeds_wheel = {'bdist_wheel'}.intersection(sys.argv)\nwheel = ['wheel'] if needs_wheel else []\n\n\ndef read(*paths):\n \"\"\"\n Build a file path from paths and return the contents.\n \"\"\"\n with open(os.path.join(*paths), 'r') as f:\n return f.read()\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\n\nif sys.argv[-1] == 'publish':\n os.system(\"python setup.py sdist upload\")\n os.system(\"python setup.py bdist_wheel upload\")\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a {0} -m 'version {0}'\".format(\n get_version('rest_framework_json_api')))\n print(\" git push --tags\")\n sys.exit()\n\nsetup(\n name='djangorestframework-jsonapi',\n version=get_version('rest_framework_json_api'),\n url='https://github.com/django-json-api/django-rest-framework-json-api',\n license='BSD',\n description='A Django REST framework API adapter for the JSON API spec.',\n long_description=read('README.rst'),\n author='Jerel Unruh',\n author_email='',\n packages=get_packages('rest_framework_json_api'),\n package_data=get_package_data('rest_framework_json_api'),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=[\n 'inflection>=0.3.0',\n 'djangorestframework>=3.10,<3.12',\n 'django>=1.11,<3.1',\n ],\n extras_require={\n 'django-polymorphic': ['django-polymorphic>=2.0'],\n 'django-filter': ['django-filter>=2.0']\n },\n setup_requires=wheel,\n python_requires=\">=3.5\",\n zip_safe=False,\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\n\nneeds_wheel = {'bdist_wheel'}.intersection(sys.argv)\nwheel = ['wheel'] if needs_wheel else []\n\n\ndef read(*paths):\n \"\"\"\n Build a file path from paths and return the contents.\n \"\"\"\n with open(os.path.join(*paths), 'r') as f:\n return f.read()\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\n\nif sys.argv[-1] == 'publish':\n os.system(\"python setup.py sdist upload\")\n os.system(\"python setup.py bdist_wheel upload\")\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a {0} -m 'version {0}'\".format(\n get_version('rest_framework_json_api')))\n print(\" git push --tags\")\n sys.exit()\n\nsetup(\n name='djangorestframework-jsonapi',\n version=get_version('rest_framework_json_api'),\n url='https://github.com/django-json-api/django-rest-framework-json-api',\n license='BSD',\n description='A Django REST framework API adapter for the JSON API spec.',\n long_description=read('README.rst'),\n author='Jerel Unruh',\n author_email='',\n packages=get_packages('rest_framework_json_api'),\n package_data=get_package_data('rest_framework_json_api'),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=[\n 'inflection>=0.3.0',\n 'djangorestframework>=3.10,<3.12',\n 'django>=2.2,<3.1',\n ],\n extras_require={\n 'django-polymorphic': ['django-polymorphic>=2.0'],\n 'django-filter': ['django-filter>=2.0']\n },\n setup_requires=wheel,\n python_requires=\">=3.5\",\n zip_safe=False,\n)\n", "path": "setup.py" } ]
diff --git a/.travis.yml b/.travis.yml index 301ed0cc..ad495df9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,11 +5,6 @@ cache: pip # Favor explicit over implicit and use an explicit build matrix. matrix: allow_failures: - - env: TOXENV=py35-django111-drfmaster - - env: TOXENV=py36-django111-drfmaster - - env: TOXENV=py35-django21-drfmaster - - env: TOXENV=py36-django21-drfmaster - - env: TOXENV=py37-django21-drfmaster - env: TOXENV=py35-django22-drfmaster - env: TOXENV=py36-django22-drfmaster - env: TOXENV=py37-django22-drfmaster @@ -24,18 +19,6 @@ matrix: - python: 3.6 env: TOXENV=docs - - python: 3.5 - env: TOXENV=py35-django111-drf310 - - python: 3.5 - env: TOXENV=py35-django111-drf311 - - python: 3.5 - env: TOXENV=py35-django111-drfmaster - - python: 3.5 - env: TOXENV=py35-django21-drf310 - - python: 3.5 - env: TOXENV=py35-django21-drf311 - - python: 3.5 - env: TOXENV=py35-django21-drfmaster - python: 3.5 env: TOXENV=py35-django22-drf310 - python: 3.5 @@ -43,18 +26,6 @@ matrix: - python: 3.5 env: TOXENV=py35-django22-drfmaster - - python: 3.6 - env: TOXENV=py36-django111-drf310 - - python: 3.6 - env: TOXENV=py36-django111-drf311 - - python: 3.6 - env: TOXENV=py36-django111-drfmaster - - python: 3.6 - env: TOXENV=py36-django21-drf310 - - python: 3.6 - env: TOXENV=py36-django21-drf311 - - python: 3.6 - env: TOXENV=py36-django21-drfmaster - python: 3.6 env: TOXENV=py36-django22-drf310 - python: 3.6 @@ -66,12 +37,6 @@ matrix: - python: 3.6 env: TOXENV=py36-django30-drfmaster - - python: 3.7 - env: TOXENV=py37-django21-drf310 - - python: 3.7 - env: TOXENV=py37-django21-drf311 - - python: 3.7 - env: TOXENV=py37-django21-drfmaster - python: 3.7 env: TOXENV=py37-django22-drf310 - python: 3.7 diff --git a/CHANGELOG.md b/CHANGELOG.md index dcd05605..f4b110ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,8 +8,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 Note that in line with [Django REST Framework policy](http://www.django-rest-framework.org/topics/release-notes/), any parts of the framework not mentioned in the documentation should generally be considered private API, and may be subject to change. +## [Unreleased] + +### Removed + +* Removed support for Django 1.11. +* Removed support for Django 2.1. + ## [3.2.0] - 2020-08-26 +This is the last release supporting Django 1.11 and Django 2.1. + ### Added * Added support for serializing nested serializers as attribute json value introducing setting `JSON_API_SERIALIZE_NESTED_SERIALIZERS_AS_ATTRIBUTE` diff --git a/README.rst b/README.rst index bc52bbf5..07f18a8d 100644 --- a/README.rst +++ b/README.rst @@ -88,7 +88,7 @@ Requirements ------------ 1. Python (3.5, 3.6, 3.7, 3.8) -2. Django (1.11, 2.1, 2.2, 3.0) +2. Django (2.2, 3.0) 3. Django REST Framework (3.10, 3.11) We **highly** recommend and only officially support the latest patch release of each Python, Django and REST Framework series. diff --git a/docs/getting-started.md b/docs/getting-started.md index 58768e39..39ef6a88 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -52,7 +52,7 @@ like the following: ## Requirements 1. Python (3.5, 3.6, 3.7, 3.8) -2. Django (1.11, 2.1, 2.2, 3.0) +2. Django (2.2, 3.0) 3. Django REST Framework (3.10, 3.11) We **highly** recommend and only officially support the latest patch release of each Python, Django and REST Framework series. diff --git a/setup.py b/setup.py index d37c66f3..42d7d8c4 100755 --- a/setup.py +++ b/setup.py @@ -91,7 +91,7 @@ def get_package_data(package): install_requires=[ 'inflection>=0.3.0', 'djangorestframework>=3.10,<3.12', - 'django>=1.11,<3.1', + 'django>=2.2,<3.1', ], extras_require={ 'django-polymorphic': ['django-polymorphic>=2.0'], diff --git a/tox.ini b/tox.ini index 04b970ac..58956ee5 100644 --- a/tox.ini +++ b/tox.ini @@ -1,15 +1,12 @@ [tox] envlist = - py{35,36}-django{111}-drf{310,311,master}, - py{35,36,37}-django{21,22}-drf{310,311,master}, + py{35,36,37}-django22-drf{310,311,master}, py38-django22-drf{311,master}, - py{36,37,38}-django{30}-drf{311,master}, + py{36,37,38}-django30-drf{311,master}, lint,docs [testenv] deps = - django111: Django>=1.11,<1.12 - django21: Django>=2.1,<2.2 django22: Django>=2.2,<2.3 django30: Django>=3.0,<3.1 drf310: djangorestframework>=3.10.2,<3.11
googleapis__python-bigquery-648
chore: use paths for --cov arguments in noxfile https://github.com/googleapis/python-bigquery/blob/6a48e80bc7d347f381b181f4cf81fef105d0ad0d/noxfile.py#L80-L81 To pull https://github.com/googleapis/synthtool/pull/859 from templates.
[ { "content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport pathlib\nimport os\nimport shutil\n\nimport nox\n\n\nPYTYPE_VERSION = \"pytype==2021.4.9\"\nBLACK_VERSION = \"black==19.10b0\"\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\n\nDEFAULT_PYTHON_VERSION = \"3.8\"\nSYSTEM_TEST_PYTHON_VERSIONS = [\"3.8\"]\nUNIT_TEST_PYTHON_VERSIONS = [\"3.6\", \"3.7\", \"3.8\", \"3.9\"]\nCURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()\n\n# 'docfx' is excluded since it only needs to run in 'docs-presubmit'\nnox.options.sessions = [\n \"unit_noextras\",\n \"unit\",\n \"system\",\n \"snippets\",\n \"cover\",\n \"lint\",\n \"lint_setup_py\",\n \"blacken\",\n \"pytype\",\n \"docs\",\n]\n\n\ndef default(session, install_extras=True):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in-place.\n session.install(\n \"mock\",\n \"pytest\",\n \"google-cloud-testutils\",\n \"pytest-cov\",\n \"freezegun\",\n \"-c\",\n constraints_path,\n )\n\n install_target = \".[all]\" if install_extras else \".\"\n session.install(\"-e\", install_target, \"-c\", constraints_path)\n\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google.cloud.bigquery\",\n \"--cov=tests.unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=0\",\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=UNIT_TEST_PYTHON_VERSIONS)\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=UNIT_TEST_PYTHON_VERSIONS[-1])\ndef unit_noextras(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session, install_extras=False)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef pytype(session):\n \"\"\"Run type checks.\"\"\"\n session.install(\"-e\", \".[all]\")\n session.install(\"ipython\")\n session.install(PYTYPE_VERSION)\n session.run(\"pytype\")\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SYSTEM_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SYSTEM_TESTS is set to false, skipping\")\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\", \"-c\", constraints_path)\n\n # Install all test dependencies, then install local packages in place.\n session.install(\n \"mock\", \"pytest\", \"psutil\", \"google-cloud-testutils\", \"-c\", constraints_path\n )\n if os.environ.get(\"GOOGLE_API_USE_CLIENT_CERTIFICATE\", \"\") == \"true\":\n # mTLS test requires pyopenssl and latest google-cloud-storage\n session.install(\"google-cloud-storage\", \"pyopenssl\")\n else:\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the system tests.\n session.run(\"py.test\", \"--quiet\", os.path.join(\"tests\", \"system\"), *session.posargs)\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Check the value of `RUN_SNIPPETS_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SNIPPETS_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SNIPPETS_TESTS is set to false, skipping\")\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"google-cloud-testutils\", \"-c\", constraints_path)\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n session.install(\"grpcio\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n\n # Run py.test against the snippets tests.\n # Skip tests in samples/snippets, as those are run in a different session\n # using the nox config from that directory.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\n \"py.test\",\n \"samples\",\n \"--ignore=samples/snippets\",\n \"--ignore=samples/geography\",\n *session.posargs,\n )\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef prerelease_deps(session):\n \"\"\"Run all tests with prerelease versions of dependencies installed.\n\n https://github.com/googleapis/python-bigquery/issues/95\n \"\"\"\n # PyArrow prerelease packages are published to an alternative PyPI host.\n # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages\n session.install(\n \"--extra-index-url\", \"https://pypi.fury.io/arrow-nightlies/\", \"--pre\", \"pyarrow\"\n )\n session.install(\"--pre\", \"grpcio\", \"pandas\")\n session.install(\n \"freezegun\",\n \"google-cloud-storage\",\n \"google-cloud-testutils\",\n \"IPython\",\n \"mock\",\n \"psutil\",\n \"pytest\",\n \"pytest-cov\",\n )\n session.install(\"-e\", \".[all]\")\n\n # Print out prerelease package versions.\n session.run(\"python\", \"-c\", \"import grpc; print(grpc.__version__)\")\n session.run(\"python\", \"-c\", \"import pandas; print(pandas.__version__)\")\n session.run(\"python\", \"-c\", \"import pyarrow; print(pyarrow.__version__)\")\n\n # Run all tests, except a few samples tests which require extra dependencies.\n session.run(\"py.test\", \"tests/unit\")\n session.run(\"py.test\", \"tests/system\")\n session.run(\"py.test\", \"samples/tests\")\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"flake8\", BLACK_VERSION)\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef docfx(session):\n \"\"\"Build the docfx yaml files for this library.\"\"\"\n\n session.install(\"-e\", \".\")\n session.install(\"sphinx\", \"alabaster\", \"recommonmark\", \"gcp-sphinx-docfx-yaml\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-D\",\n (\n \"extensions=sphinx.ext.autodoc,\"\n \"sphinx.ext.autosummary,\"\n \"docfx_yaml.extension,\"\n \"sphinx.ext.intersphinx,\"\n \"sphinx.ext.coverage,\"\n \"sphinx.ext.napoleon,\"\n \"sphinx.ext.todo,\"\n \"sphinx.ext.viewcode,\"\n \"recommonmark\"\n ),\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py" } ]
[ { "content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport pathlib\nimport os\nimport shutil\n\nimport nox\n\n\nPYTYPE_VERSION = \"pytype==2021.4.9\"\nBLACK_VERSION = \"black==19.10b0\"\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\n\nDEFAULT_PYTHON_VERSION = \"3.8\"\nSYSTEM_TEST_PYTHON_VERSIONS = [\"3.8\"]\nUNIT_TEST_PYTHON_VERSIONS = [\"3.6\", \"3.7\", \"3.8\", \"3.9\"]\nCURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()\n\n# 'docfx' is excluded since it only needs to run in 'docs-presubmit'\nnox.options.sessions = [\n \"unit_noextras\",\n \"unit\",\n \"system\",\n \"snippets\",\n \"cover\",\n \"lint\",\n \"lint_setup_py\",\n \"blacken\",\n \"pytype\",\n \"docs\",\n]\n\n\ndef default(session, install_extras=True):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in-place.\n session.install(\n \"mock\",\n \"pytest\",\n \"google-cloud-testutils\",\n \"pytest-cov\",\n \"freezegun\",\n \"-c\",\n constraints_path,\n )\n\n install_target = \".[all]\" if install_extras else \".\"\n session.install(\"-e\", install_target, \"-c\", constraints_path)\n\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google/cloud/bigquery\",\n \"--cov=tests/unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=0\",\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=UNIT_TEST_PYTHON_VERSIONS)\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=UNIT_TEST_PYTHON_VERSIONS[-1])\ndef unit_noextras(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session, install_extras=False)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef pytype(session):\n \"\"\"Run type checks.\"\"\"\n session.install(\"-e\", \".[all]\")\n session.install(\"ipython\")\n session.install(PYTYPE_VERSION)\n session.run(\"pytype\")\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SYSTEM_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SYSTEM_TESTS is set to false, skipping\")\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\", \"-c\", constraints_path)\n\n # Install all test dependencies, then install local packages in place.\n session.install(\n \"mock\", \"pytest\", \"psutil\", \"google-cloud-testutils\", \"-c\", constraints_path\n )\n if os.environ.get(\"GOOGLE_API_USE_CLIENT_CERTIFICATE\", \"\") == \"true\":\n # mTLS test requires pyopenssl and latest google-cloud-storage\n session.install(\"google-cloud-storage\", \"pyopenssl\")\n else:\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the system tests.\n session.run(\"py.test\", \"--quiet\", os.path.join(\"tests\", \"system\"), *session.posargs)\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Check the value of `RUN_SNIPPETS_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SNIPPETS_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SNIPPETS_TESTS is set to false, skipping\")\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"google-cloud-testutils\", \"-c\", constraints_path)\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n session.install(\"grpcio\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n\n # Run py.test against the snippets tests.\n # Skip tests in samples/snippets, as those are run in a different session\n # using the nox config from that directory.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\n \"py.test\",\n \"samples\",\n \"--ignore=samples/snippets\",\n \"--ignore=samples/geography\",\n *session.posargs,\n )\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef prerelease_deps(session):\n \"\"\"Run all tests with prerelease versions of dependencies installed.\n\n https://github.com/googleapis/python-bigquery/issues/95\n \"\"\"\n # PyArrow prerelease packages are published to an alternative PyPI host.\n # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages\n session.install(\n \"--extra-index-url\", \"https://pypi.fury.io/arrow-nightlies/\", \"--pre\", \"pyarrow\"\n )\n session.install(\"--pre\", \"grpcio\", \"pandas\")\n session.install(\n \"freezegun\",\n \"google-cloud-storage\",\n \"google-cloud-testutils\",\n \"IPython\",\n \"mock\",\n \"psutil\",\n \"pytest\",\n \"pytest-cov\",\n )\n session.install(\"-e\", \".[all]\")\n\n # Print out prerelease package versions.\n session.run(\"python\", \"-c\", \"import grpc; print(grpc.__version__)\")\n session.run(\"python\", \"-c\", \"import pandas; print(pandas.__version__)\")\n session.run(\"python\", \"-c\", \"import pyarrow; print(pyarrow.__version__)\")\n\n # Run all tests, except a few samples tests which require extra dependencies.\n session.run(\"py.test\", \"tests/unit\")\n session.run(\"py.test\", \"tests/system\")\n session.run(\"py.test\", \"samples/tests\")\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"flake8\", BLACK_VERSION)\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef docfx(session):\n \"\"\"Build the docfx yaml files for this library.\"\"\"\n\n session.install(\"-e\", \".\")\n session.install(\"sphinx\", \"alabaster\", \"recommonmark\", \"gcp-sphinx-docfx-yaml\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-D\",\n (\n \"extensions=sphinx.ext.autodoc,\"\n \"sphinx.ext.autosummary,\"\n \"docfx_yaml.extension,\"\n \"sphinx.ext.intersphinx,\"\n \"sphinx.ext.coverage,\"\n \"sphinx.ext.napoleon,\"\n \"sphinx.ext.todo,\"\n \"sphinx.ext.viewcode,\"\n \"recommonmark\"\n ),\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py" } ]
diff --git a/noxfile.py b/noxfile.py index 7ba081660..654bbd093 100644 --- a/noxfile.py +++ b/noxfile.py @@ -77,8 +77,8 @@ def default(session, install_extras=True): session.run( "py.test", "--quiet", - "--cov=google.cloud.bigquery", - "--cov=tests.unit", + "--cov=google/cloud/bigquery", + "--cov=tests/unit", "--cov-append", "--cov-config=.coveragerc", "--cov-report=",
bookwyrm-social__bookwyrm-695
Remove the "Cover" text from the alt text of book covers if one is present Currently, when a book cover is present and is displayed, it's alt text consists of the book title, the text *Cover*, edition name, and the first published date. For example, via VoiceOver under Safari: ``` image The Night Circus cover (Hardcover, 2011) ``` The fact that this is a cover image is redundant, because users are already notified about the presence of an image. In this case, the textual data is actually more important, e.g. book title and edition name, since the presence of a cover does not add more information when using a screen reader. The expected result, via VoiceOver (and possibly other screen readers as well) is: ``` image The Night Circus (Hardcover, 2011) ```
[ { "content": "''' database schema for books and shelves '''\nimport re\n\nfrom django.db import models\nfrom model_utils.managers import InheritanceManager\n\nfrom bookwyrm import activitypub\nfrom bookwyrm.settings import DOMAIN\n\nfrom .activitypub_mixin import OrderedCollectionPageMixin, ObjectMixin\nfrom .base_model import BookWyrmModel\nfrom . import fields\n\nclass BookDataModel(ObjectMixin, BookWyrmModel):\n ''' fields shared between editable book data (books, works, authors) '''\n origin_id = models.CharField(max_length=255, null=True, blank=True)\n openlibrary_key = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True)\n librarything_key = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True)\n goodreads_key = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True)\n\n last_edited_by = models.ForeignKey(\n 'User', on_delete=models.PROTECT, null=True)\n\n class Meta:\n ''' can't initialize this model, that wouldn't make sense '''\n abstract = True\n\n def save(self, *args, **kwargs):\n ''' ensure that the remote_id is within this instance '''\n if self.id:\n self.remote_id = self.get_remote_id()\n else:\n self.origin_id = self.remote_id\n self.remote_id = None\n return super().save(*args, **kwargs)\n\n\nclass Book(BookDataModel):\n ''' a generic book, which can mean either an edition or a work '''\n connector = models.ForeignKey(\n 'Connector', on_delete=models.PROTECT, null=True)\n\n # book/work metadata\n title = fields.CharField(max_length=255)\n sort_title = fields.CharField(max_length=255, blank=True, null=True)\n subtitle = fields.CharField(max_length=255, blank=True, null=True)\n description = fields.HtmlField(blank=True, null=True)\n languages = fields.ArrayField(\n models.CharField(max_length=255), blank=True, default=list\n )\n series = fields.CharField(max_length=255, blank=True, null=True)\n series_number = fields.CharField(max_length=255, blank=True, null=True)\n subjects = fields.ArrayField(\n models.CharField(max_length=255), blank=True, null=True, default=list\n )\n subject_places = fields.ArrayField(\n models.CharField(max_length=255), blank=True, null=True, default=list\n )\n authors = fields.ManyToManyField('Author')\n cover = fields.ImageField(\n upload_to='covers/', blank=True, null=True, alt_field='alt_text')\n first_published_date = fields.DateTimeField(blank=True, null=True)\n published_date = fields.DateTimeField(blank=True, null=True)\n\n objects = InheritanceManager()\n\n @property\n def author_text(self):\n ''' format a list of authors '''\n return ', '.join(a.name for a in self.authors.all())\n\n @property\n def latest_readthrough(self):\n ''' most recent readthrough activity '''\n return self.readthrough_set.order_by('-updated_date').first()\n\n @property\n def edition_info(self):\n ''' properties of this edition, as a string '''\n items = [\n self.physical_format if hasattr(self, 'physical_format') else None,\n self.languages[0] + ' language' if self.languages and \\\n self.languages[0] != 'English' else None,\n str(self.published_date.year) if self.published_date else None,\n ]\n return ', '.join(i for i in items if i)\n\n @property\n def alt_text(self):\n ''' image alt test '''\n text = '%s cover' % self.title\n if self.edition_info:\n text += ' (%s)' % self.edition_info\n return text\n\n def save(self, *args, **kwargs):\n ''' can't be abstract for query reasons, but you shouldn't USE it '''\n if not isinstance(self, Edition) and not isinstance(self, Work):\n raise ValueError('Books should be added as Editions or Works')\n return super().save(*args, **kwargs)\n\n def get_remote_id(self):\n ''' editions and works both use \"book\" instead of model_name '''\n return 'https://%s/book/%d' % (DOMAIN, self.id)\n\n def __repr__(self):\n return \"<{} key={!r} title={!r}>\".format(\n self.__class__,\n self.openlibrary_key,\n self.title,\n )\n\n\nclass Work(OrderedCollectionPageMixin, Book):\n ''' a work (an abstract concept of a book that manifests in an edition) '''\n # library of congress catalog control number\n lccn = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True)\n # this has to be nullable but should never be null\n default_edition = fields.ForeignKey(\n 'Edition',\n on_delete=models.PROTECT,\n null=True,\n load_remote=False\n )\n\n def save(self, *args, **kwargs):\n ''' set some fields on the edition object '''\n # set rank\n for edition in self.editions.all():\n edition.save()\n return super().save(*args, **kwargs)\n\n def get_default_edition(self):\n ''' in case the default edition is not set '''\n return self.default_edition or self.editions.order_by(\n '-edition_rank'\n ).first()\n\n def to_edition_list(self, **kwargs):\n ''' an ordered collection of editions '''\n return self.to_ordered_collection(\n self.editions.order_by('-edition_rank').all(),\n remote_id='%s/editions' % self.remote_id,\n **kwargs\n )\n\n activity_serializer = activitypub.Work\n serialize_reverse_fields = [('editions', 'editions', '-edition_rank')]\n deserialize_reverse_fields = [('editions', 'editions')]\n\n\nclass Edition(Book):\n ''' an edition of a book '''\n # these identifiers only apply to editions, not works\n isbn_10 = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True)\n isbn_13 = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True)\n oclc_number = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True)\n asin = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True)\n pages = fields.IntegerField(blank=True, null=True)\n physical_format = fields.CharField(max_length=255, blank=True, null=True)\n publishers = fields.ArrayField(\n models.CharField(max_length=255), blank=True, default=list\n )\n shelves = models.ManyToManyField(\n 'Shelf',\n symmetrical=False,\n through='ShelfBook',\n through_fields=('book', 'shelf')\n )\n parent_work = fields.ForeignKey(\n 'Work', on_delete=models.PROTECT, null=True,\n related_name='editions', activitypub_field='work')\n edition_rank = fields.IntegerField(default=0)\n\n activity_serializer = activitypub.Edition\n name_field = 'title'\n\n def get_rank(self):\n ''' calculate how complete the data is on this edition '''\n if self.parent_work and self.parent_work.default_edition == self:\n # default edition has the highest rank\n return 20\n rank = 0\n rank += int(bool(self.cover)) * 3\n rank += int(bool(self.isbn_13))\n rank += int(bool(self.isbn_10))\n rank += int(bool(self.oclc_number))\n rank += int(bool(self.pages))\n rank += int(bool(self.physical_format))\n rank += int(bool(self.description))\n # max rank is 9\n return rank\n\n def save(self, *args, **kwargs):\n ''' set some fields on the edition object '''\n # calculate isbn 10/13\n if self.isbn_13 and self.isbn_13[:3] == '978' and not self.isbn_10:\n self.isbn_10 = isbn_13_to_10(self.isbn_13)\n if self.isbn_10 and not self.isbn_13:\n self.isbn_13 = isbn_10_to_13(self.isbn_10)\n\n # set rank\n self.edition_rank = self.get_rank()\n\n return super().save(*args, **kwargs)\n\n\ndef isbn_10_to_13(isbn_10):\n ''' convert an isbn 10 into an isbn 13 '''\n isbn_10 = re.sub(r'[^0-9X]', '', isbn_10)\n # drop the last character of the isbn 10 number (the original checkdigit)\n converted = isbn_10[:9]\n # add \"978\" to the front\n converted = '978' + converted\n # add a check digit to the end\n # multiply the odd digits by 1 and the even digits by 3 and sum them\n try:\n checksum = sum(int(i) for i in converted[::2]) + \\\n sum(int(i) * 3 for i in converted[1::2])\n except ValueError:\n return None\n # add the checksum mod 10 to the end\n checkdigit = checksum % 10\n if checkdigit != 0:\n checkdigit = 10 - checkdigit\n return converted + str(checkdigit)\n\n\ndef isbn_13_to_10(isbn_13):\n ''' convert isbn 13 to 10, if possible '''\n if isbn_13[:3] != '978':\n return None\n\n isbn_13 = re.sub(r'[^0-9X]', '', isbn_13)\n\n # remove '978' and old checkdigit\n converted = isbn_13[3:-1]\n # calculate checkdigit\n # multiple each digit by 10,9,8.. successively and sum them\n try:\n checksum = sum(int(d) * (10 - idx) for (idx, d) in enumerate(converted))\n except ValueError:\n return None\n checkdigit = checksum % 11\n checkdigit = 11 - checkdigit\n if checkdigit == 10:\n checkdigit = 'X'\n return converted + str(checkdigit)\n", "path": "bookwyrm/models/book.py" } ]
[ { "content": "''' database schema for books and shelves '''\nimport re\n\nfrom django.db import models\nfrom model_utils.managers import InheritanceManager\n\nfrom bookwyrm import activitypub\nfrom bookwyrm.settings import DOMAIN\n\nfrom .activitypub_mixin import OrderedCollectionPageMixin, ObjectMixin\nfrom .base_model import BookWyrmModel\nfrom . import fields\n\nclass BookDataModel(ObjectMixin, BookWyrmModel):\n ''' fields shared between editable book data (books, works, authors) '''\n origin_id = models.CharField(max_length=255, null=True, blank=True)\n openlibrary_key = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True)\n librarything_key = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True)\n goodreads_key = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True)\n\n last_edited_by = models.ForeignKey(\n 'User', on_delete=models.PROTECT, null=True)\n\n class Meta:\n ''' can't initialize this model, that wouldn't make sense '''\n abstract = True\n\n def save(self, *args, **kwargs):\n ''' ensure that the remote_id is within this instance '''\n if self.id:\n self.remote_id = self.get_remote_id()\n else:\n self.origin_id = self.remote_id\n self.remote_id = None\n return super().save(*args, **kwargs)\n\n\nclass Book(BookDataModel):\n ''' a generic book, which can mean either an edition or a work '''\n connector = models.ForeignKey(\n 'Connector', on_delete=models.PROTECT, null=True)\n\n # book/work metadata\n title = fields.CharField(max_length=255)\n sort_title = fields.CharField(max_length=255, blank=True, null=True)\n subtitle = fields.CharField(max_length=255, blank=True, null=True)\n description = fields.HtmlField(blank=True, null=True)\n languages = fields.ArrayField(\n models.CharField(max_length=255), blank=True, default=list\n )\n series = fields.CharField(max_length=255, blank=True, null=True)\n series_number = fields.CharField(max_length=255, blank=True, null=True)\n subjects = fields.ArrayField(\n models.CharField(max_length=255), blank=True, null=True, default=list\n )\n subject_places = fields.ArrayField(\n models.CharField(max_length=255), blank=True, null=True, default=list\n )\n authors = fields.ManyToManyField('Author')\n cover = fields.ImageField(\n upload_to='covers/', blank=True, null=True, alt_field='alt_text')\n first_published_date = fields.DateTimeField(blank=True, null=True)\n published_date = fields.DateTimeField(blank=True, null=True)\n\n objects = InheritanceManager()\n\n @property\n def author_text(self):\n ''' format a list of authors '''\n return ', '.join(a.name for a in self.authors.all())\n\n @property\n def latest_readthrough(self):\n ''' most recent readthrough activity '''\n return self.readthrough_set.order_by('-updated_date').first()\n\n @property\n def edition_info(self):\n ''' properties of this edition, as a string '''\n items = [\n self.physical_format if hasattr(self, 'physical_format') else None,\n self.languages[0] + ' language' if self.languages and \\\n self.languages[0] != 'English' else None,\n str(self.published_date.year) if self.published_date else None,\n ]\n return ', '.join(i for i in items if i)\n\n @property\n def alt_text(self):\n ''' image alt test '''\n text = '%s' % self.title\n if self.edition_info:\n text += ' (%s)' % self.edition_info\n return text\n\n def save(self, *args, **kwargs):\n ''' can't be abstract for query reasons, but you shouldn't USE it '''\n if not isinstance(self, Edition) and not isinstance(self, Work):\n raise ValueError('Books should be added as Editions or Works')\n return super().save(*args, **kwargs)\n\n def get_remote_id(self):\n ''' editions and works both use \"book\" instead of model_name '''\n return 'https://%s/book/%d' % (DOMAIN, self.id)\n\n def __repr__(self):\n return \"<{} key={!r} title={!r}>\".format(\n self.__class__,\n self.openlibrary_key,\n self.title,\n )\n\n\nclass Work(OrderedCollectionPageMixin, Book):\n ''' a work (an abstract concept of a book that manifests in an edition) '''\n # library of congress catalog control number\n lccn = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True)\n # this has to be nullable but should never be null\n default_edition = fields.ForeignKey(\n 'Edition',\n on_delete=models.PROTECT,\n null=True,\n load_remote=False\n )\n\n def save(self, *args, **kwargs):\n ''' set some fields on the edition object '''\n # set rank\n for edition in self.editions.all():\n edition.save()\n return super().save(*args, **kwargs)\n\n def get_default_edition(self):\n ''' in case the default edition is not set '''\n return self.default_edition or self.editions.order_by(\n '-edition_rank'\n ).first()\n\n def to_edition_list(self, **kwargs):\n ''' an ordered collection of editions '''\n return self.to_ordered_collection(\n self.editions.order_by('-edition_rank').all(),\n remote_id='%s/editions' % self.remote_id,\n **kwargs\n )\n\n activity_serializer = activitypub.Work\n serialize_reverse_fields = [('editions', 'editions', '-edition_rank')]\n deserialize_reverse_fields = [('editions', 'editions')]\n\n\nclass Edition(Book):\n ''' an edition of a book '''\n # these identifiers only apply to editions, not works\n isbn_10 = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True)\n isbn_13 = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True)\n oclc_number = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True)\n asin = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True)\n pages = fields.IntegerField(blank=True, null=True)\n physical_format = fields.CharField(max_length=255, blank=True, null=True)\n publishers = fields.ArrayField(\n models.CharField(max_length=255), blank=True, default=list\n )\n shelves = models.ManyToManyField(\n 'Shelf',\n symmetrical=False,\n through='ShelfBook',\n through_fields=('book', 'shelf')\n )\n parent_work = fields.ForeignKey(\n 'Work', on_delete=models.PROTECT, null=True,\n related_name='editions', activitypub_field='work')\n edition_rank = fields.IntegerField(default=0)\n\n activity_serializer = activitypub.Edition\n name_field = 'title'\n\n def get_rank(self):\n ''' calculate how complete the data is on this edition '''\n if self.parent_work and self.parent_work.default_edition == self:\n # default edition has the highest rank\n return 20\n rank = 0\n rank += int(bool(self.cover)) * 3\n rank += int(bool(self.isbn_13))\n rank += int(bool(self.isbn_10))\n rank += int(bool(self.oclc_number))\n rank += int(bool(self.pages))\n rank += int(bool(self.physical_format))\n rank += int(bool(self.description))\n # max rank is 9\n return rank\n\n def save(self, *args, **kwargs):\n ''' set some fields on the edition object '''\n # calculate isbn 10/13\n if self.isbn_13 and self.isbn_13[:3] == '978' and not self.isbn_10:\n self.isbn_10 = isbn_13_to_10(self.isbn_13)\n if self.isbn_10 and not self.isbn_13:\n self.isbn_13 = isbn_10_to_13(self.isbn_10)\n\n # set rank\n self.edition_rank = self.get_rank()\n\n return super().save(*args, **kwargs)\n\n\ndef isbn_10_to_13(isbn_10):\n ''' convert an isbn 10 into an isbn 13 '''\n isbn_10 = re.sub(r'[^0-9X]', '', isbn_10)\n # drop the last character of the isbn 10 number (the original checkdigit)\n converted = isbn_10[:9]\n # add \"978\" to the front\n converted = '978' + converted\n # add a check digit to the end\n # multiply the odd digits by 1 and the even digits by 3 and sum them\n try:\n checksum = sum(int(i) for i in converted[::2]) + \\\n sum(int(i) * 3 for i in converted[1::2])\n except ValueError:\n return None\n # add the checksum mod 10 to the end\n checkdigit = checksum % 10\n if checkdigit != 0:\n checkdigit = 10 - checkdigit\n return converted + str(checkdigit)\n\n\ndef isbn_13_to_10(isbn_13):\n ''' convert isbn 13 to 10, if possible '''\n if isbn_13[:3] != '978':\n return None\n\n isbn_13 = re.sub(r'[^0-9X]', '', isbn_13)\n\n # remove '978' and old checkdigit\n converted = isbn_13[3:-1]\n # calculate checkdigit\n # multiple each digit by 10,9,8.. successively and sum them\n try:\n checksum = sum(int(d) * (10 - idx) for (idx, d) in enumerate(converted))\n except ValueError:\n return None\n checkdigit = checksum % 11\n checkdigit = 11 - checkdigit\n if checkdigit == 10:\n checkdigit = 'X'\n return converted + str(checkdigit)\n", "path": "bookwyrm/models/book.py" } ]
diff --git a/bookwyrm/models/book.py b/bookwyrm/models/book.py index f1f2083033..6a1a18b1ea 100644 --- a/bookwyrm/models/book.py +++ b/bookwyrm/models/book.py @@ -91,7 +91,7 @@ def edition_info(self): @property def alt_text(self): ''' image alt test ''' - text = '%s cover' % self.title + text = '%s' % self.title if self.edition_info: text += ' (%s)' % self.edition_info return text diff --git a/bookwyrm/tests/models/test_book_model.py b/bookwyrm/tests/models/test_book_model.py index 98d6d446ef..b4a099d050 100644 --- a/bookwyrm/tests/models/test_book_model.py +++ b/bookwyrm/tests/models/test_book_model.py @@ -81,7 +81,7 @@ def test_get_edition_info(self): book.save() self.assertEqual(book.edition_info, 'worm, Glorbish language, 2020') self.assertEqual( - book.alt_text, 'Test Edition cover (worm, Glorbish language, 2020)') + book.alt_text, 'Test Edition (worm, Glorbish language, 2020)') def test_get_rank(self): diff --git a/bookwyrm/tests/models/test_status_model.py b/bookwyrm/tests/models/test_status_model.py index c6911b6df9..29be5c0724 100644 --- a/bookwyrm/tests/models/test_status_model.py +++ b/bookwyrm/tests/models/test_status_model.py @@ -150,7 +150,7 @@ def test_generated_note_to_pure_activity(self, _): self.assertEqual(activity['attachment'][0].url, 'https://%s%s' % \ (settings.DOMAIN, self.book.cover.url)) self.assertEqual( - activity['attachment'][0].name, 'Test Edition cover') + activity['attachment'][0].name, 'Test Edition') def test_comment_to_activity(self, _): ''' subclass of the base model version with a "pure" serializer ''' @@ -177,7 +177,7 @@ def test_comment_to_pure_activity(self, _): self.assertEqual(activity['attachment'][0].url, 'https://%s%s' % \ (settings.DOMAIN, self.book.cover.url)) self.assertEqual( - activity['attachment'][0].name, 'Test Edition cover') + activity['attachment'][0].name, 'Test Edition') def test_quotation_to_activity(self, _): ''' subclass of the base model version with a "pure" serializer ''' @@ -207,7 +207,7 @@ def test_quotation_to_pure_activity(self, _): self.assertEqual(activity['attachment'][0].url, 'https://%s%s' % \ (settings.DOMAIN, self.book.cover.url)) self.assertEqual( - activity['attachment'][0].name, 'Test Edition cover') + activity['attachment'][0].name, 'Test Edition') def test_review_to_activity(self, _): ''' subclass of the base model version with a "pure" serializer ''' @@ -238,7 +238,7 @@ def test_review_to_pure_activity(self, _): self.assertEqual(activity['attachment'][0].url, 'https://%s%s' % \ (settings.DOMAIN, self.book.cover.url)) self.assertEqual( - activity['attachment'][0].name, 'Test Edition cover') + activity['attachment'][0].name, 'Test Edition') def test_favorite(self, _): ''' fav a status '''
apache__airflow-16108
Could not get scheduler_job_id **Apache Airflow version:** 2.0.0 **Kubernetes version (if you are using kubernetes) (use kubectl version):** 1.18.3 **Environment:** Cloud provider or hardware configuration: AWS **What happened:** When trying to run a DAG, it gets scheduled, but task is never run. When attempting to run task manually, it shows an error: ``` Something bad has happened. Please consider letting us know by creating a bug report using GitHub. Python version: 3.8.7 Airflow version: 2.0.0 Node: airflow-web-ffdd89d6-h98vj ------------------------------------------------------------------------------- Traceback (most recent call last): File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app response = self.full_dispatch_request() File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request rv = self.handle_user_exception(e) File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception reraise(exc_type, exc_value, tb) File "/usr/local/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise raise value File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request rv = self.dispatch_request() File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request return self.view_functions[rule.endpoint](**req.view_args) File "/usr/local/lib/python3.8/site-packages/airflow/www/auth.py", line 34, in decorated return func(*args, **kwargs) File "/usr/local/lib/python3.8/site-packages/airflow/www/decorators.py", line 60, in wrapper return f(*args, **kwargs) File "/usr/local/lib/python3.8/site-packages/airflow/www/views.py", line 1366, in run executor.start() File "/usr/local/lib/python3.8/site-packages/airflow/executors/kubernetes_executor.py", line 493, in start raise AirflowException("Could not get scheduler_job_id") airflow.exceptions.AirflowException: Could not get scheduler_job_id ``` **What you expected to happen:** The task to be run successfully without **How to reproduce it:** Haven't pinpointed what causes the issue, besides an attempted upgrade from Airflow 1.10.14 to Airflow 2.0.0 **Anything else we need to know:** This error is encountered in an upgrade of Airflow from 1.10.14 to Airflow 2.0.0 EDIT: Formatted to fit the issue template
[ { "content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Task sub-commands\"\"\"\nimport importlib\nimport json\nimport logging\nimport os\nimport textwrap\nfrom contextlib import contextmanager, redirect_stderr, redirect_stdout\nfrom typing import List\n\nfrom airflow import settings\nfrom airflow.cli.simple_table import AirflowConsole\nfrom airflow.configuration import conf\nfrom airflow.exceptions import AirflowException\nfrom airflow.executors.executor_loader import ExecutorLoader\nfrom airflow.jobs.local_task_job import LocalTaskJob\nfrom airflow.models import DagPickle, TaskInstance\nfrom airflow.models.dag import DAG\nfrom airflow.ti_deps.dep_context import DepContext\nfrom airflow.ti_deps.dependencies_deps import SCHEDULER_QUEUED_DEPS\nfrom airflow.utils import cli as cli_utils\nfrom airflow.utils.cli import (\n get_dag,\n get_dag_by_file_location,\n get_dag_by_pickle,\n get_dags,\n suppress_logs_and_warning,\n)\nfrom airflow.utils.log.logging_mixin import StreamLogWriter\nfrom airflow.utils.net import get_hostname\nfrom airflow.utils.session import create_session\n\n\ndef _run_task_by_selected_method(args, dag: DAG, ti: TaskInstance) -> None:\n \"\"\"\n Runs the task in one of 3 modes\n\n - using LocalTaskJob\n - as raw task\n - by executor\n \"\"\"\n if args.local and args.raw:\n raise AirflowException(\n \"Option --raw and --local are mutually exclusive. \"\n \"Please remove one option to execute the command.\"\n )\n if args.local:\n _run_task_by_local_task_job(args, ti)\n elif args.raw:\n _run_raw_task(args, ti)\n else:\n _run_task_by_executor(args, dag, ti)\n\n\ndef _run_task_by_executor(args, dag, ti):\n \"\"\"\n Sends the task to the executor for execution. This can result in the task being started by another host\n if the executor implementation does\n \"\"\"\n pickle_id = None\n if args.ship_dag:\n try:\n # Running remotely, so pickling the DAG\n with create_session() as session:\n pickle = DagPickle(dag)\n session.add(pickle)\n pickle_id = pickle.id\n # TODO: This should be written to a log\n print(f'Pickled dag {dag} as pickle_id: {pickle_id}')\n except Exception as e:\n print('Could not pickle the DAG')\n print(e)\n raise e\n executor = ExecutorLoader.get_default_executor()\n executor.start()\n print(\"Sending to executor.\")\n executor.queue_task_instance(\n ti,\n mark_success=args.mark_success,\n pickle_id=pickle_id,\n ignore_all_deps=args.ignore_all_dependencies,\n ignore_depends_on_past=args.ignore_depends_on_past,\n ignore_task_deps=args.ignore_dependencies,\n ignore_ti_state=args.force,\n pool=args.pool,\n )\n executor.heartbeat()\n executor.end()\n\n\ndef _run_task_by_local_task_job(args, ti):\n \"\"\"Run LocalTaskJob, which monitors the raw task execution process\"\"\"\n run_job = LocalTaskJob(\n task_instance=ti,\n mark_success=args.mark_success,\n pickle_id=args.pickle,\n ignore_all_deps=args.ignore_all_dependencies,\n ignore_depends_on_past=args.ignore_depends_on_past,\n ignore_task_deps=args.ignore_dependencies,\n ignore_ti_state=args.force,\n pool=args.pool,\n )\n try:\n run_job.run()\n\n finally:\n if args.shut_down_logging:\n logging.shutdown()\n\n\nRAW_TASK_UNSUPPORTED_OPTION = [\n \"ignore_all_dependencies\",\n \"ignore_depends_on_past\",\n \"ignore_dependencies\",\n \"force\",\n]\n\n\ndef _run_raw_task(args, ti: TaskInstance) -> None:\n \"\"\"Runs the main task handling code\"\"\"\n unsupported_options = [o for o in RAW_TASK_UNSUPPORTED_OPTION if getattr(args, o)]\n\n if unsupported_options:\n raise AirflowException(\n \"Option --raw does not work with some of the other options on this command. You \"\n \"can't use --raw option and the following options: {}. You provided the option {}. \"\n \"Delete it to execute the command\".format(\n \", \".join(f\"--{o}\" for o in RAW_TASK_UNSUPPORTED_OPTION),\n \", \".join(f\"--{o}\" for o in unsupported_options),\n )\n )\n ti._run_raw_task( # pylint: disable=protected-access\n mark_success=args.mark_success,\n job_id=args.job_id,\n pool=args.pool,\n error_file=args.error_file,\n )\n\n\n@contextmanager\ndef _capture_task_logs(ti):\n \"\"\"Manage logging context for a task run\n\n - Replace the root logger configuration with the airflow.task configuration\n so we can capture logs from any custom loggers used in the task.\n\n - Redirect stdout and stderr to the task instance log, as INFO and WARNING\n level messages, respectively.\n\n \"\"\"\n modify = not settings.DONOT_MODIFY_HANDLERS\n\n if modify:\n root_logger, task_logger = logging.getLogger(), logging.getLogger('airflow.task')\n\n orig_level = root_logger.level\n root_logger.setLevel(task_logger.level)\n orig_handlers = root_logger.handlers.copy()\n root_logger.handlers[:] = task_logger.handlers\n\n try:\n info_writer = StreamLogWriter(ti.log, logging.INFO)\n warning_writer = StreamLogWriter(ti.log, logging.WARNING)\n\n with redirect_stdout(info_writer), redirect_stderr(warning_writer):\n yield\n\n finally:\n if modify:\n # Restore the root logger to its original state.\n root_logger.setLevel(orig_level)\n root_logger.handlers[:] = orig_handlers\n\n\n@cli_utils.action_logging\ndef task_run(args, dag=None):\n \"\"\"Runs a single task instance\"\"\"\n # Load custom airflow config\n if args.cfg_path:\n with open(args.cfg_path) as conf_file:\n conf_dict = json.load(conf_file)\n\n if os.path.exists(args.cfg_path):\n os.remove(args.cfg_path)\n\n conf.read_dict(conf_dict, source=args.cfg_path)\n settings.configure_vars()\n\n settings.MASK_SECRETS_IN_LOGS = True\n\n # IMPORTANT, have to use the NullPool, otherwise, each \"run\" command may leave\n # behind multiple open sleeping connections while heartbeating, which could\n # easily exceed the database connection limit when\n # processing hundreds of simultaneous tasks.\n settings.configure_orm(disable_connection_pool=True)\n\n if dag and args.pickle:\n raise AirflowException(\"You cannot use the --pickle option when using DAG.cli() method.\")\n elif args.pickle:\n print(f'Loading pickle id: {args.pickle}')\n dag = get_dag_by_pickle(args.pickle)\n elif not dag:\n dag = get_dag(args.subdir, args.dag_id)\n else:\n # Use DAG from parameter\n pass\n\n task = dag.get_task(task_id=args.task_id)\n ti = TaskInstance(task, args.execution_date)\n ti.refresh_from_db()\n ti.init_run_context(raw=args.raw)\n\n hostname = get_hostname()\n\n print(f\"Running {ti} on host {hostname}\")\n\n if args.interactive:\n _run_task_by_selected_method(args, dag, ti)\n else:\n with _capture_task_logs(ti):\n _run_task_by_selected_method(args, dag, ti)\n\n\n@cli_utils.action_logging\ndef task_failed_deps(args):\n \"\"\"\n Returns the unmet dependencies for a task instance from the perspective of the\n scheduler (i.e. why a task instance doesn't get scheduled and then queued by the\n scheduler, and then run by an executor).\n >>> airflow tasks failed-deps tutorial sleep 2015-01-01\n Task instance dependencies not met:\n Dagrun Running: Task instance's dagrun did not exist: Unknown reason\n Trigger Rule: Task's trigger rule 'all_success' requires all upstream tasks\n to have succeeded, but found 1 non-success(es).\n \"\"\"\n dag = get_dag(args.subdir, args.dag_id)\n task = dag.get_task(task_id=args.task_id)\n ti = TaskInstance(task, args.execution_date)\n\n dep_context = DepContext(deps=SCHEDULER_QUEUED_DEPS)\n failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))\n # TODO, Do we want to print or log this\n if failed_deps:\n print(\"Task instance dependencies not met:\")\n for dep in failed_deps:\n print(f\"{dep.dep_name}: {dep.reason}\")\n else:\n print(\"Task instance dependencies are all met.\")\n\n\n@cli_utils.action_logging\n@suppress_logs_and_warning\ndef task_state(args):\n \"\"\"\n Returns the state of a TaskInstance at the command line.\n >>> airflow tasks state tutorial sleep 2015-01-01\n success\n \"\"\"\n dag = get_dag(args.subdir, args.dag_id)\n task = dag.get_task(task_id=args.task_id)\n ti = TaskInstance(task, args.execution_date)\n print(ti.current_state())\n\n\n@cli_utils.action_logging\n@suppress_logs_and_warning\ndef task_list(args, dag=None):\n \"\"\"Lists the tasks within a DAG at the command line\"\"\"\n dag = dag or get_dag(args.subdir, args.dag_id)\n if args.tree:\n dag.tree_view()\n else:\n tasks = sorted(t.task_id for t in dag.tasks)\n print(\"\\n\".join(tasks))\n\n\nSUPPORTED_DEBUGGER_MODULES: List[str] = [\n \"pudb\",\n \"web_pdb\",\n \"ipdb\",\n \"pdb\",\n]\n\n\ndef _guess_debugger():\n \"\"\"\n Trying to guess the debugger used by the user. When it doesn't find any user-installed debugger,\n returns ``pdb``.\n\n List of supported debuggers:\n\n * `pudb <https://github.com/inducer/pudb>`__\n * `web_pdb <https://github.com/romanvm/python-web-pdb>`__\n * `ipdb <https://github.com/gotcha/ipdb>`__\n * `pdb <https://docs.python.org/3/library/pdb.html>`__\n \"\"\"\n for mod in SUPPORTED_DEBUGGER_MODULES:\n try:\n return importlib.import_module(mod)\n except ImportError:\n continue\n return importlib.import_module(\"pdb\")\n\n\n@cli_utils.action_logging\n@suppress_logs_and_warning\ndef task_states_for_dag_run(args):\n \"\"\"Get the status of all task instances in a DagRun\"\"\"\n with create_session() as session:\n tis = (\n session.query(\n TaskInstance.dag_id,\n TaskInstance.execution_date,\n TaskInstance.task_id,\n TaskInstance.state,\n TaskInstance.start_date,\n TaskInstance.end_date,\n )\n .filter(TaskInstance.dag_id == args.dag_id, TaskInstance.execution_date == args.execution_date)\n .all()\n )\n\n if len(tis) == 0:\n raise AirflowException(\"DagRun does not exist.\")\n\n AirflowConsole().print_as(\n data=tis,\n output=args.output,\n mapper=lambda ti: {\n \"dag_id\": ti.dag_id,\n \"execution_date\": ti.execution_date.isoformat(),\n \"task_id\": ti.task_id,\n \"state\": ti.state,\n \"start_date\": ti.start_date.isoformat() if ti.start_date else \"\",\n \"end_date\": ti.end_date.isoformat() if ti.end_date else \"\",\n },\n )\n\n\n@cli_utils.action_logging\ndef task_test(args, dag=None):\n \"\"\"Tests task for a given dag_id\"\"\"\n # We want to log output from operators etc to show up here. Normally\n # airflow.task would redirect to a file, but here we want it to propagate\n # up to the normal airflow handler.\n\n settings.MASK_SECRETS_IN_LOGS = True\n\n handlers = logging.getLogger('airflow.task').handlers\n already_has_stream_handler = False\n for handler in handlers:\n already_has_stream_handler = isinstance(handler, logging.StreamHandler)\n if already_has_stream_handler:\n break\n if not already_has_stream_handler:\n logging.getLogger('airflow.task').propagate = True\n\n env_vars = {'AIRFLOW_TEST_MODE': 'True'}\n if args.env_vars:\n env_vars.update(args.env_vars)\n os.environ.update(env_vars)\n\n dag = dag or get_dag(args.subdir, args.dag_id)\n\n task = dag.get_task(task_id=args.task_id)\n # Add CLI provided task_params to task.params\n if args.task_params:\n passed_in_params = json.loads(args.task_params)\n task.params.update(passed_in_params)\n ti = TaskInstance(task, args.execution_date)\n\n try:\n if args.dry_run:\n ti.dry_run()\n else:\n ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True)\n except Exception: # pylint: disable=broad-except\n if args.post_mortem:\n debugger = _guess_debugger()\n debugger.post_mortem()\n else:\n raise\n finally:\n if not already_has_stream_handler:\n # Make sure to reset back to normal. When run for CLI this doesn't\n # matter, but it does for test suite\n logging.getLogger('airflow.task').propagate = False\n\n\n@cli_utils.action_logging\n@suppress_logs_and_warning\ndef task_render(args):\n \"\"\"Renders and displays templated fields for a given task\"\"\"\n dag = get_dag(args.subdir, args.dag_id)\n task = dag.get_task(task_id=args.task_id)\n ti = TaskInstance(task, args.execution_date)\n ti.render_templates()\n for attr in task.__class__.template_fields:\n print(\n textwrap.dedent(\n f\"\"\" # ----------------------------------------------------------\n # property: {attr}\n # ----------------------------------------------------------\n {getattr(task, attr)}\n \"\"\"\n )\n )\n\n\n@cli_utils.action_logging\ndef task_clear(args):\n \"\"\"Clears all task instances or only those matched by regex for a DAG(s)\"\"\"\n logging.basicConfig(level=settings.LOGGING_LEVEL, format=settings.SIMPLE_LOG_FORMAT)\n\n if args.dag_id and not args.subdir and not args.dag_regex and not args.task_regex:\n dags = get_dag_by_file_location(args.dag_id)\n else:\n # todo clear command only accepts a single dag_id. no reason for get_dags with 's' except regex?\n dags = get_dags(args.subdir, args.dag_id, use_regex=args.dag_regex)\n\n if args.task_regex:\n for idx, dag in enumerate(dags):\n dags[idx] = dag.partial_subset(\n task_ids_or_regex=args.task_regex,\n include_downstream=args.downstream,\n include_upstream=args.upstream,\n )\n\n DAG.clear_dags(\n dags,\n start_date=args.start_date,\n end_date=args.end_date,\n only_failed=args.only_failed,\n only_running=args.only_running,\n confirm_prompt=not args.yes,\n include_subdags=not args.exclude_subdags,\n include_parentdag=not args.exclude_parentdag,\n )\n", "path": "airflow/cli/commands/task_command.py" } ]
[ { "content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Task sub-commands\"\"\"\nimport importlib\nimport json\nimport logging\nimport os\nimport textwrap\nfrom contextlib import contextmanager, redirect_stderr, redirect_stdout\nfrom typing import List\n\nfrom airflow import settings\nfrom airflow.cli.simple_table import AirflowConsole\nfrom airflow.configuration import conf\nfrom airflow.exceptions import AirflowException\nfrom airflow.executors.executor_loader import ExecutorLoader\nfrom airflow.jobs.local_task_job import LocalTaskJob\nfrom airflow.models import DagPickle, TaskInstance\nfrom airflow.models.dag import DAG\nfrom airflow.ti_deps.dep_context import DepContext\nfrom airflow.ti_deps.dependencies_deps import SCHEDULER_QUEUED_DEPS\nfrom airflow.utils import cli as cli_utils\nfrom airflow.utils.cli import (\n get_dag,\n get_dag_by_file_location,\n get_dag_by_pickle,\n get_dags,\n suppress_logs_and_warning,\n)\nfrom airflow.utils.log.logging_mixin import StreamLogWriter\nfrom airflow.utils.net import get_hostname\nfrom airflow.utils.session import create_session\n\n\ndef _run_task_by_selected_method(args, dag: DAG, ti: TaskInstance) -> None:\n \"\"\"\n Runs the task in one of 3 modes\n\n - using LocalTaskJob\n - as raw task\n - by executor\n \"\"\"\n if args.local and args.raw:\n raise AirflowException(\n \"Option --raw and --local are mutually exclusive. \"\n \"Please remove one option to execute the command.\"\n )\n if args.local:\n _run_task_by_local_task_job(args, ti)\n elif args.raw:\n _run_raw_task(args, ti)\n else:\n _run_task_by_executor(args, dag, ti)\n\n\ndef _run_task_by_executor(args, dag, ti):\n \"\"\"\n Sends the task to the executor for execution. This can result in the task being started by another host\n if the executor implementation does\n \"\"\"\n pickle_id = None\n if args.ship_dag:\n try:\n # Running remotely, so pickling the DAG\n with create_session() as session:\n pickle = DagPickle(dag)\n session.add(pickle)\n pickle_id = pickle.id\n # TODO: This should be written to a log\n print(f'Pickled dag {dag} as pickle_id: {pickle_id}')\n except Exception as e:\n print('Could not pickle the DAG')\n print(e)\n raise e\n executor = ExecutorLoader.get_default_executor()\n executor.job_id = \"manual\"\n executor.start()\n print(\"Sending to executor.\")\n executor.queue_task_instance(\n ti,\n mark_success=args.mark_success,\n pickle_id=pickle_id,\n ignore_all_deps=args.ignore_all_dependencies,\n ignore_depends_on_past=args.ignore_depends_on_past,\n ignore_task_deps=args.ignore_dependencies,\n ignore_ti_state=args.force,\n pool=args.pool,\n )\n executor.heartbeat()\n executor.end()\n\n\ndef _run_task_by_local_task_job(args, ti):\n \"\"\"Run LocalTaskJob, which monitors the raw task execution process\"\"\"\n run_job = LocalTaskJob(\n task_instance=ti,\n mark_success=args.mark_success,\n pickle_id=args.pickle,\n ignore_all_deps=args.ignore_all_dependencies,\n ignore_depends_on_past=args.ignore_depends_on_past,\n ignore_task_deps=args.ignore_dependencies,\n ignore_ti_state=args.force,\n pool=args.pool,\n )\n try:\n run_job.run()\n\n finally:\n if args.shut_down_logging:\n logging.shutdown()\n\n\nRAW_TASK_UNSUPPORTED_OPTION = [\n \"ignore_all_dependencies\",\n \"ignore_depends_on_past\",\n \"ignore_dependencies\",\n \"force\",\n]\n\n\ndef _run_raw_task(args, ti: TaskInstance) -> None:\n \"\"\"Runs the main task handling code\"\"\"\n unsupported_options = [o for o in RAW_TASK_UNSUPPORTED_OPTION if getattr(args, o)]\n\n if unsupported_options:\n raise AirflowException(\n \"Option --raw does not work with some of the other options on this command. You \"\n \"can't use --raw option and the following options: {}. You provided the option {}. \"\n \"Delete it to execute the command\".format(\n \", \".join(f\"--{o}\" for o in RAW_TASK_UNSUPPORTED_OPTION),\n \", \".join(f\"--{o}\" for o in unsupported_options),\n )\n )\n ti._run_raw_task( # pylint: disable=protected-access\n mark_success=args.mark_success,\n job_id=args.job_id,\n pool=args.pool,\n error_file=args.error_file,\n )\n\n\n@contextmanager\ndef _capture_task_logs(ti):\n \"\"\"Manage logging context for a task run\n\n - Replace the root logger configuration with the airflow.task configuration\n so we can capture logs from any custom loggers used in the task.\n\n - Redirect stdout and stderr to the task instance log, as INFO and WARNING\n level messages, respectively.\n\n \"\"\"\n modify = not settings.DONOT_MODIFY_HANDLERS\n\n if modify:\n root_logger, task_logger = logging.getLogger(), logging.getLogger('airflow.task')\n\n orig_level = root_logger.level\n root_logger.setLevel(task_logger.level)\n orig_handlers = root_logger.handlers.copy()\n root_logger.handlers[:] = task_logger.handlers\n\n try:\n info_writer = StreamLogWriter(ti.log, logging.INFO)\n warning_writer = StreamLogWriter(ti.log, logging.WARNING)\n\n with redirect_stdout(info_writer), redirect_stderr(warning_writer):\n yield\n\n finally:\n if modify:\n # Restore the root logger to its original state.\n root_logger.setLevel(orig_level)\n root_logger.handlers[:] = orig_handlers\n\n\n@cli_utils.action_logging\ndef task_run(args, dag=None):\n \"\"\"Runs a single task instance\"\"\"\n # Load custom airflow config\n if args.cfg_path:\n with open(args.cfg_path) as conf_file:\n conf_dict = json.load(conf_file)\n\n if os.path.exists(args.cfg_path):\n os.remove(args.cfg_path)\n\n conf.read_dict(conf_dict, source=args.cfg_path)\n settings.configure_vars()\n\n settings.MASK_SECRETS_IN_LOGS = True\n\n # IMPORTANT, have to use the NullPool, otherwise, each \"run\" command may leave\n # behind multiple open sleeping connections while heartbeating, which could\n # easily exceed the database connection limit when\n # processing hundreds of simultaneous tasks.\n settings.configure_orm(disable_connection_pool=True)\n\n if dag and args.pickle:\n raise AirflowException(\"You cannot use the --pickle option when using DAG.cli() method.\")\n elif args.pickle:\n print(f'Loading pickle id: {args.pickle}')\n dag = get_dag_by_pickle(args.pickle)\n elif not dag:\n dag = get_dag(args.subdir, args.dag_id)\n else:\n # Use DAG from parameter\n pass\n\n task = dag.get_task(task_id=args.task_id)\n ti = TaskInstance(task, args.execution_date)\n ti.refresh_from_db()\n ti.init_run_context(raw=args.raw)\n\n hostname = get_hostname()\n\n print(f\"Running {ti} on host {hostname}\")\n\n if args.interactive:\n _run_task_by_selected_method(args, dag, ti)\n else:\n with _capture_task_logs(ti):\n _run_task_by_selected_method(args, dag, ti)\n\n\n@cli_utils.action_logging\ndef task_failed_deps(args):\n \"\"\"\n Returns the unmet dependencies for a task instance from the perspective of the\n scheduler (i.e. why a task instance doesn't get scheduled and then queued by the\n scheduler, and then run by an executor).\n >>> airflow tasks failed-deps tutorial sleep 2015-01-01\n Task instance dependencies not met:\n Dagrun Running: Task instance's dagrun did not exist: Unknown reason\n Trigger Rule: Task's trigger rule 'all_success' requires all upstream tasks\n to have succeeded, but found 1 non-success(es).\n \"\"\"\n dag = get_dag(args.subdir, args.dag_id)\n task = dag.get_task(task_id=args.task_id)\n ti = TaskInstance(task, args.execution_date)\n\n dep_context = DepContext(deps=SCHEDULER_QUEUED_DEPS)\n failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))\n # TODO, Do we want to print or log this\n if failed_deps:\n print(\"Task instance dependencies not met:\")\n for dep in failed_deps:\n print(f\"{dep.dep_name}: {dep.reason}\")\n else:\n print(\"Task instance dependencies are all met.\")\n\n\n@cli_utils.action_logging\n@suppress_logs_and_warning\ndef task_state(args):\n \"\"\"\n Returns the state of a TaskInstance at the command line.\n >>> airflow tasks state tutorial sleep 2015-01-01\n success\n \"\"\"\n dag = get_dag(args.subdir, args.dag_id)\n task = dag.get_task(task_id=args.task_id)\n ti = TaskInstance(task, args.execution_date)\n print(ti.current_state())\n\n\n@cli_utils.action_logging\n@suppress_logs_and_warning\ndef task_list(args, dag=None):\n \"\"\"Lists the tasks within a DAG at the command line\"\"\"\n dag = dag or get_dag(args.subdir, args.dag_id)\n if args.tree:\n dag.tree_view()\n else:\n tasks = sorted(t.task_id for t in dag.tasks)\n print(\"\\n\".join(tasks))\n\n\nSUPPORTED_DEBUGGER_MODULES: List[str] = [\n \"pudb\",\n \"web_pdb\",\n \"ipdb\",\n \"pdb\",\n]\n\n\ndef _guess_debugger():\n \"\"\"\n Trying to guess the debugger used by the user. When it doesn't find any user-installed debugger,\n returns ``pdb``.\n\n List of supported debuggers:\n\n * `pudb <https://github.com/inducer/pudb>`__\n * `web_pdb <https://github.com/romanvm/python-web-pdb>`__\n * `ipdb <https://github.com/gotcha/ipdb>`__\n * `pdb <https://docs.python.org/3/library/pdb.html>`__\n \"\"\"\n for mod in SUPPORTED_DEBUGGER_MODULES:\n try:\n return importlib.import_module(mod)\n except ImportError:\n continue\n return importlib.import_module(\"pdb\")\n\n\n@cli_utils.action_logging\n@suppress_logs_and_warning\ndef task_states_for_dag_run(args):\n \"\"\"Get the status of all task instances in a DagRun\"\"\"\n with create_session() as session:\n tis = (\n session.query(\n TaskInstance.dag_id,\n TaskInstance.execution_date,\n TaskInstance.task_id,\n TaskInstance.state,\n TaskInstance.start_date,\n TaskInstance.end_date,\n )\n .filter(TaskInstance.dag_id == args.dag_id, TaskInstance.execution_date == args.execution_date)\n .all()\n )\n\n if len(tis) == 0:\n raise AirflowException(\"DagRun does not exist.\")\n\n AirflowConsole().print_as(\n data=tis,\n output=args.output,\n mapper=lambda ti: {\n \"dag_id\": ti.dag_id,\n \"execution_date\": ti.execution_date.isoformat(),\n \"task_id\": ti.task_id,\n \"state\": ti.state,\n \"start_date\": ti.start_date.isoformat() if ti.start_date else \"\",\n \"end_date\": ti.end_date.isoformat() if ti.end_date else \"\",\n },\n )\n\n\n@cli_utils.action_logging\ndef task_test(args, dag=None):\n \"\"\"Tests task for a given dag_id\"\"\"\n # We want to log output from operators etc to show up here. Normally\n # airflow.task would redirect to a file, but here we want it to propagate\n # up to the normal airflow handler.\n\n settings.MASK_SECRETS_IN_LOGS = True\n\n handlers = logging.getLogger('airflow.task').handlers\n already_has_stream_handler = False\n for handler in handlers:\n already_has_stream_handler = isinstance(handler, logging.StreamHandler)\n if already_has_stream_handler:\n break\n if not already_has_stream_handler:\n logging.getLogger('airflow.task').propagate = True\n\n env_vars = {'AIRFLOW_TEST_MODE': 'True'}\n if args.env_vars:\n env_vars.update(args.env_vars)\n os.environ.update(env_vars)\n\n dag = dag or get_dag(args.subdir, args.dag_id)\n\n task = dag.get_task(task_id=args.task_id)\n # Add CLI provided task_params to task.params\n if args.task_params:\n passed_in_params = json.loads(args.task_params)\n task.params.update(passed_in_params)\n ti = TaskInstance(task, args.execution_date)\n\n try:\n if args.dry_run:\n ti.dry_run()\n else:\n ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True)\n except Exception: # pylint: disable=broad-except\n if args.post_mortem:\n debugger = _guess_debugger()\n debugger.post_mortem()\n else:\n raise\n finally:\n if not already_has_stream_handler:\n # Make sure to reset back to normal. When run for CLI this doesn't\n # matter, but it does for test suite\n logging.getLogger('airflow.task').propagate = False\n\n\n@cli_utils.action_logging\n@suppress_logs_and_warning\ndef task_render(args):\n \"\"\"Renders and displays templated fields for a given task\"\"\"\n dag = get_dag(args.subdir, args.dag_id)\n task = dag.get_task(task_id=args.task_id)\n ti = TaskInstance(task, args.execution_date)\n ti.render_templates()\n for attr in task.__class__.template_fields:\n print(\n textwrap.dedent(\n f\"\"\" # ----------------------------------------------------------\n # property: {attr}\n # ----------------------------------------------------------\n {getattr(task, attr)}\n \"\"\"\n )\n )\n\n\n@cli_utils.action_logging\ndef task_clear(args):\n \"\"\"Clears all task instances or only those matched by regex for a DAG(s)\"\"\"\n logging.basicConfig(level=settings.LOGGING_LEVEL, format=settings.SIMPLE_LOG_FORMAT)\n\n if args.dag_id and not args.subdir and not args.dag_regex and not args.task_regex:\n dags = get_dag_by_file_location(args.dag_id)\n else:\n # todo clear command only accepts a single dag_id. no reason for get_dags with 's' except regex?\n dags = get_dags(args.subdir, args.dag_id, use_regex=args.dag_regex)\n\n if args.task_regex:\n for idx, dag in enumerate(dags):\n dags[idx] = dag.partial_subset(\n task_ids_or_regex=args.task_regex,\n include_downstream=args.downstream,\n include_upstream=args.upstream,\n )\n\n DAG.clear_dags(\n dags,\n start_date=args.start_date,\n end_date=args.end_date,\n only_failed=args.only_failed,\n only_running=args.only_running,\n confirm_prompt=not args.yes,\n include_subdags=not args.exclude_subdags,\n include_parentdag=not args.exclude_parentdag,\n )\n", "path": "airflow/cli/commands/task_command.py" } ]
diff --git a/airflow/cli/commands/task_command.py b/airflow/cli/commands/task_command.py index 6c05d7776d3ea..c0cfb03f66258 100644 --- a/airflow/cli/commands/task_command.py +++ b/airflow/cli/commands/task_command.py @@ -88,6 +88,7 @@ def _run_task_by_executor(args, dag, ti): print(e) raise e executor = ExecutorLoader.get_default_executor() + executor.job_id = "manual" executor.start() print("Sending to executor.") executor.queue_task_instance(
LMFDB__lmfdb-5179
PIP dependencies We have several deprecated dependencies that we should fix ASAP ``` flask<=1.1.4 markupsafe<=2.0.1 itsdangerous<=2.0.1 ``` in particular, this prevents using lmfdb in an environment with jupyterlab installed, which is something we would like to have working on a short time basis.
[ { "content": "# -*- coding: utf-8 -*-\nfrom lmfdb.app import app\nfrom lmfdb.logger import make_logger\nfrom flask import Blueprint, request, redirect\n\nlocal_fields_page = Blueprint(\"local_fields\", __name__, template_folder='templates', static_folder=\"static\")\nlogger = make_logger(local_fields_page)\n\n\n@local_fields_page.context_processor\ndef body_class():\n return {'body_class': 'local_fields'}\n\nfrom . import main\nassert main\n\nfrom urllib.parse import urlparse, urlunparse\n\n\n@local_fields_page.before_request\ndef redirect_local():\n urlparts = urlparse(request.url)\n if 'LocalNumberField' in urlparts.path:\n urlparts = urlparts._replace(path=urlparts.path.replace('LocalNumberField', 'padicField'))\n return redirect(urlunparse(urlparts), 301)\n return\n\n\napp.register_blueprint(local_fields_page, url_prefix=\"/padicField\")\napp.register_blueprint(local_fields_page, url_prefix=\"/LocalNumberField\")\n\n# API2 has been disabled for now\n#from lmfdb.api2.searchers import register_search_function\n#register_search_function(\n# \"$p$-adic_fields\",\n# \"$p$-adic fields\",\n# \"Search over $p$-adic fields\",\n# auto_search = 'lf_fields'\n#)\n", "path": "lmfdb/local_fields/__init__.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom lmfdb.app import app\nfrom lmfdb.logger import make_logger\nfrom flask import Blueprint, request, redirect\n\nlocal_fields_page = Blueprint(\"local_fields\", __name__, template_folder='templates', static_folder=\"static\")\nlogger = make_logger(local_fields_page)\n\n\n@local_fields_page.context_processor\ndef body_class():\n return {'body_class': 'local_fields'}\n\nfrom . import main\nassert main\n\nfrom urllib.parse import urlparse, urlunparse\n\n\n@local_fields_page.before_request\ndef redirect_local():\n urlparts = urlparse(request.url)\n if 'LocalNumberField' in urlparts.path:\n urlparts = urlparts._replace(path=urlparts.path.replace('LocalNumberField', 'padicField'))\n return redirect(urlunparse(urlparts), 301)\n return\n\n\napp.register_blueprint(local_fields_page, url_prefix=\"/padicField\")\n\n# API2 has been disabled for now\n#from lmfdb.api2.searchers import register_search_function\n#register_search_function(\n# \"$p$-adic_fields\",\n# \"$p$-adic fields\",\n# \"Search over $p$-adic fields\",\n# auto_search = 'lf_fields'\n#)\n", "path": "lmfdb/local_fields/__init__.py" } ]
diff --git a/lmfdb/local_fields/__init__.py b/lmfdb/local_fields/__init__.py index fbb8b9cb60..d7f659084b 100644 --- a/lmfdb/local_fields/__init__.py +++ b/lmfdb/local_fields/__init__.py @@ -27,7 +27,6 @@ def redirect_local(): app.register_blueprint(local_fields_page, url_prefix="/padicField") -app.register_blueprint(local_fields_page, url_prefix="/LocalNumberField") # API2 has been disabled for now #from lmfdb.api2.searchers import register_search_function diff --git a/requirements.txt b/requirements.txt index d50709e588..b226f06ebd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,11 +1,11 @@ bcrypt cython -flask<=1.1.4 +flask flask-cache flask-login flask-markdown -markupsafe<=2.0.1 -itsdangerous<=2.0.1 +markupsafe +itsdangerous psycopg2-binary pyflakes pytest
ansible-collections__community.general-6695
read_csv - Key 'Name' was not found in the CSV header fields ##### SUMMARY The `read_csv` module fails to identify a field, yet displaces the field in the list of available fields. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME read_csv ##### ANSIBLE VERSION ``` ansible 2.9.10 config file = /home/anton/git/ansible-deploy-vmware-vm/ansible.cfg configured module search path = ['/home/anton/git/ansible-deploy-vmware-vm/library'] ansible python module location = /home/anton/.local/lib/python3.6/site-packages/ansible executable location = /home/anton/.local/bin/ansible python version = 3.6.9 (default, Apr 18 2020, 01:56:04) [GCC 8.4.0] ``` ##### CONFIGURATION ``` # config file for ansible -- http://ansible.com/ # ============================================== # nearly all parameters can be overridden in ansible-playbook # or with command line flags. ansible will read ANSIBLE_CONFIG, # ansible.cfg in the current working directory, .ansible.cfg in # the home directory or /etc/ansible/ansible.cfg, whichever it # finds first [defaults] host_key_checking = False host_key_check = False ansible_python_interpreter=/usr/bin/python3 log_path = ./ansible.log #bin_ansible_callbacks=True #stdout_callback = debug # some basic default values... library = ./library # additional paths to search for roles in, colon separated roles_path = ./roles [ssh_connection] # ssh arguments to use ssh_args = -o StrictHostKeyChecking=no timeout=60 ``` ##### OS / ENVIRONMENT Ubuntu 20:04 ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> <!--- Paste example playbooks or commands between quotes below --> ``` --- - name: Right-size VMs gather_facts: false hosts: all connection: local tasks: # Read a CSV file and access the first item - name: Read users from CSV file and return a list read_csv: path: "files/vms/6-19-20 Optimization Report - Oversized Virtual Machines Prod2.csv" key: Name register: users - debug: msg: 'User {{ users.list.2.Name}}' # msg: 'User {{ users.list.2.Name}} has UID {{ users.list.2.ReclaimablevCPUs}} and GID {{ users.list.2.ReclaimableMemory}}' # msg: "{{ users }}" ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### EXPECTED RESULTS Expect to be able to read CSV values by col name (field) as based on module documentation. ##### ACTUAL RESULTS ``` fatal: [localhost]: FAILED! => {"ansible_facts": {"discovered_interpreter_python": "/usr/bin/python"}, "changed": false, "msg": "Key 'Name' was not found in the CSV header fields: Name, Configured-vCPU, ReclaimablevCPUs, ConfiguredMemory, ReclaimableMemory, ParentvCenter"} ```
[ { "content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) <[email protected]>\n# Copyright (c) 2018, Dag Wieers (@dagwieers) <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nimport csv\nfrom io import BytesIO, StringIO\n\nfrom ansible.module_utils.common.text.converters import to_native\nfrom ansible.module_utils.six import PY3\n\n\nclass CustomDialectFailureError(Exception):\n pass\n\n\nclass DialectNotAvailableError(Exception):\n pass\n\n\nCSVError = csv.Error\n\n\ndef initialize_dialect(dialect, **kwargs):\n # Add Unix dialect from Python 3\n class unix_dialect(csv.Dialect):\n \"\"\"Describe the usual properties of Unix-generated CSV files.\"\"\"\n delimiter = ','\n quotechar = '\"'\n doublequote = True\n skipinitialspace = False\n lineterminator = '\\n'\n quoting = csv.QUOTE_ALL\n\n csv.register_dialect(\"unix\", unix_dialect)\n\n if dialect not in csv.list_dialects():\n raise DialectNotAvailableError(\"Dialect '%s' is not supported by your version of python.\" % dialect)\n\n # Create a dictionary from only set options\n dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None)\n if dialect_params:\n try:\n csv.register_dialect('custom', dialect, **dialect_params)\n except TypeError as e:\n raise CustomDialectFailureError(\"Unable to create custom dialect: %s\" % to_native(e))\n dialect = 'custom'\n\n return dialect\n\n\ndef read_csv(data, dialect, fieldnames=None):\n\n data = to_native(data, errors='surrogate_or_strict')\n\n if PY3:\n fake_fh = StringIO(data)\n else:\n fake_fh = BytesIO(data)\n\n reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)\n\n return reader\n", "path": "plugins/module_utils/csv.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) <[email protected]>\n# Copyright (c) 2018, Dag Wieers (@dagwieers) <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nimport csv\nfrom io import BytesIO, StringIO\n\nfrom ansible.module_utils.common.text.converters import to_native\nfrom ansible.module_utils.six import PY3\n\n\nclass CustomDialectFailureError(Exception):\n pass\n\n\nclass DialectNotAvailableError(Exception):\n pass\n\n\nCSVError = csv.Error\n\n\ndef initialize_dialect(dialect, **kwargs):\n # Add Unix dialect from Python 3\n class unix_dialect(csv.Dialect):\n \"\"\"Describe the usual properties of Unix-generated CSV files.\"\"\"\n delimiter = ','\n quotechar = '\"'\n doublequote = True\n skipinitialspace = False\n lineterminator = '\\n'\n quoting = csv.QUOTE_ALL\n\n csv.register_dialect(\"unix\", unix_dialect)\n\n if dialect not in csv.list_dialects():\n raise DialectNotAvailableError(\"Dialect '%s' is not supported by your version of python.\" % dialect)\n\n # Create a dictionary from only set options\n dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None)\n if dialect_params:\n try:\n csv.register_dialect('custom', dialect, **dialect_params)\n except TypeError as e:\n raise CustomDialectFailureError(\"Unable to create custom dialect: %s\" % to_native(e))\n dialect = 'custom'\n\n return dialect\n\n\ndef read_csv(data, dialect, fieldnames=None):\n BOM = to_native(u'\\ufeff')\n data = to_native(data, errors='surrogate_or_strict')\n if data.startswith(BOM):\n data = data[len(BOM):]\n\n if PY3:\n fake_fh = StringIO(data)\n else:\n fake_fh = BytesIO(data)\n\n reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)\n\n return reader\n", "path": "plugins/module_utils/csv.py" } ]
diff --git a/changelogs/fragments/6662-csv-bom.yml b/changelogs/fragments/6662-csv-bom.yml new file mode 100644 index 00000000000..e9c617219c8 --- /dev/null +++ b/changelogs/fragments/6662-csv-bom.yml @@ -0,0 +1,2 @@ +bugfixes: + - csv module utils - detects and remove unicode BOM markers from incoming CSV content (https://github.com/ansible-collections/community.general/pull/6662). diff --git a/plugins/module_utils/csv.py b/plugins/module_utils/csv.py index 50d2cb38686..200548a46da 100644 --- a/plugins/module_utils/csv.py +++ b/plugins/module_utils/csv.py @@ -55,8 +55,10 @@ class unix_dialect(csv.Dialect): def read_csv(data, dialect, fieldnames=None): - + BOM = to_native(u'\ufeff') data = to_native(data, errors='surrogate_or_strict') + if data.startswith(BOM): + data = data[len(BOM):] if PY3: fake_fh = StringIO(data) diff --git a/tests/integration/targets/read_csv/meta/main.yml b/tests/integration/targets/read_csv/meta/main.yml new file mode 100644 index 00000000000..982de6eb035 --- /dev/null +++ b/tests/integration/targets/read_csv/meta/main.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/read_csv/tasks/main.yml b/tests/integration/targets/read_csv/tasks/main.yml index a21af95182e..c09349dd5b8 100644 --- a/tests/integration/targets/read_csv/tasks/main.yml +++ b/tests/integration/targets/read_csv/tasks/main.yml @@ -11,16 +11,16 @@ # Create basic CSV file - name: Create unique CSV file copy: - content: | + content: &users_content | name,uid,gid,gecos dag,500,500,Dag Wieërs jeroen,501,500,Jeroen Hoekx - dest: users_unique.csv + dest: "{{ remote_tmp_dir }}/users_unique.csv" # Read a CSV file and access user 'dag' - name: Read users from CSV file and return a dictionary read_csv: - path: users_unique.csv + path: "{{ remote_tmp_dir }}/users_unique.csv" key: name register: users_unique @@ -35,10 +35,10 @@ - users_unique.dict.jeroen.uid == '501' - users_unique.dict.jeroen.gid == '500' - # Read a CSV file and access the first item +# Read a CSV file and access the first item - name: Read users from CSV file and return a list read_csv: - path: users_unique.csv + path: "{{ remote_tmp_dir }}/users_unique.csv" register: users_unique - assert: @@ -61,12 +61,12 @@ dag;500;500;Dag Wieërs jeroen;501;500;Jeroen Hoekx dag;502;500;Dag Wieers - dest: users_nonunique.csv + dest: "{{ remote_tmp_dir }}/users_nonunique.csv" # Read a CSV file and access user 'dag' - name: Read users from CSV file and return a dictionary read_csv: - path: users_nonunique.csv + path: "{{ remote_tmp_dir }}/users_nonunique.csv" key: name unique: false delimiter: ';' @@ -87,7 +87,7 @@ # Read a CSV file using an non-existing dialect - name: Read users from CSV file and return a dictionary read_csv: - path: users_nonunique.csv + path: "{{ remote_tmp_dir }}/users_nonunique.csv" dialect: placebo register: users_placebo ignore_errors: true @@ -104,12 +104,12 @@ content: | dag,500,500,Dag Wieërs jeroen,501,500,Jeroen Hoekx - dest: users_noheader.csv + dest: "{{ remote_tmp_dir }}/users_noheader.csv" # Read a CSV file and access user 'dag' - name: Read users from CSV file and return a dictionary read_csv: - path: users_noheader.csv + path: "{{ remote_tmp_dir }}/users_noheader.csv" key: name fieldnames: name,uid,gid,gecos register: users_noheader @@ -133,12 +133,12 @@ name,uid,gid,gecos dag,500,500,Dag Wieërs jeroen,501,500,"Jeroen"Hoekx" - dest: users_broken.csv + dest: "{{ remote_tmp_dir }}/users_broken.csv" # Read a broken CSV file using strict - name: Read users from a broken CSV file read_csv: - path: users_broken.csv + path: "{{ remote_tmp_dir }}/users_broken.csv" key: name strict: true register: users_broken @@ -148,3 +148,29 @@ that: - users_broken is failed - "'Unable to process file' in users_broken.msg" + +# Create basic CSV file with BOM +- name: Create unique CSV file with BOM + copy: + content: "{{ bom + content }}" + dest: "{{ remote_tmp_dir }}/users_bom.csv" + vars: + content: *users_content + bom: "{{ '\ufeff' }}" + + # Read a CSV file and access the first item +- name: Read users from CSV file and return a list + read_csv: + path: "{{ remote_tmp_dir }}/users_bom.csv" + register: users_bom + +- assert: + that: + - users_bom.list.0.name == 'dag' + - users_bom.list.0.gecos == 'Dag Wieërs' + - users_bom.list.0.uid == '500' + - users_bom.list.0.gid == '500' + - users_bom.list.1.name == 'jeroen' + - users_bom.list.1.gecos == 'Jeroen Hoekx' + - users_bom.list.1.uid == '501' + - users_bom.list.1.gid == '500'
netbox-community__netbox-12012
Support for multiple remote auth backends ### NetBox version v3.4.6 ### Feature type New functionality ### Proposed functionality Currently, [REMOTE_AUTH_BACKEND](https://docs.netbox.dev/en/stable/configuration/remote-authentication/#remote_auth_backend) only supports a single auth backend. Modify REMOTE_AUTH_BACKEND to accept an iterable, which may reference one or more Django authentication backends. Backends would then be attempted in sequence when authenticating a user. ### Use case Valid NetBox users may not all exist in the same authentication directory or service, e.g. multiple AD forests. **Note**: For multiple AD domains within a single forest, point `AUTH_LDAP_SERVER_URI` to the AD Global Catalog port 3269 as described in the [docs](https://docs.netbox.dev/en/stable/installation/6-ldap/#general-server-configuration). As Django [supports multiple auth backends](https://django-auth-ldap.readthedocs.io/en/latest/multiconfig.html), extending this functionality to NetBox seems feasible. This has come up several times in discussion: - [Multiple Authentication methods at the same time, meaning Ldap OR SSO possible?](https://github.com/netbox-community/netbox/discussions/10733) - [Multiple LDAP sources Docker](https://github.com/netbox-community/netbox/discussions/5457) - [Multiple remote auth backends](https://github.com/netbox-community/netbox/discussions/9479) - [Multiple LDAP error](https://github.com/netbox-community/netbox/discussions/9360) ### Database changes None ### External dependencies None Support for multiple remote auth backends ### NetBox version v3.4.6 ### Feature type New functionality ### Proposed functionality Currently, [REMOTE_AUTH_BACKEND](https://docs.netbox.dev/en/stable/configuration/remote-authentication/#remote_auth_backend) only supports a single auth backend. Modify REMOTE_AUTH_BACKEND to accept an iterable, which may reference one or more Django authentication backends. Backends would then be attempted in sequence when authenticating a user. ### Use case Valid NetBox users may not all exist in the same authentication directory or service, e.g. multiple AD forests. **Note**: For multiple AD domains within a single forest, point `AUTH_LDAP_SERVER_URI` to the AD Global Catalog port 3269 as described in the [docs](https://docs.netbox.dev/en/stable/installation/6-ldap/#general-server-configuration). As Django [supports multiple auth backends](https://django-auth-ldap.readthedocs.io/en/latest/multiconfig.html), extending this functionality to NetBox seems feasible. This has come up several times in discussion: - [Multiple Authentication methods at the same time, meaning Ldap OR SSO possible?](https://github.com/netbox-community/netbox/discussions/10733) - [Multiple LDAP sources Docker](https://github.com/netbox-community/netbox/discussions/5457) - [Multiple remote auth backends](https://github.com/netbox-community/netbox/discussions/9479) - [Multiple LDAP error](https://github.com/netbox-community/netbox/discussions/9360) ### Database changes None ### External dependencies None
[ { "content": "import hashlib\nimport importlib\nimport importlib.util\nimport os\nimport platform\nimport sys\nimport warnings\nfrom urllib.parse import urlsplit\n\nimport django\nimport sentry_sdk\nfrom django.contrib.messages import constants as messages\nfrom django.core.exceptions import ImproperlyConfigured, ValidationError\nfrom django.core.validators import URLValidator\nfrom django.utils.encoding import force_str\nfrom extras.plugins import PluginConfig\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\nfrom netbox.config import PARAMS\nfrom netbox.constants import RQ_QUEUE_DEFAULT, RQ_QUEUE_HIGH, RQ_QUEUE_LOW\n\n\n#\n# Environment setup\n#\n\nVERSION = '3.4.7-dev'\n\n# Hostname\nHOSTNAME = platform.node()\n\n# Set the base directory two levels up\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# Validate Python version\nif sys.version_info < (3, 8):\n raise RuntimeError(\n f\"NetBox requires Python 3.8 or later. (Currently installed: Python {platform.python_version()})\"\n )\n\nDEFAULT_SENTRY_DSN = 'https://[email protected]/6396485'\n\n#\n# Configuration import\n#\n\n# Import configuration parameters\nconfig_path = os.getenv('NETBOX_CONFIGURATION', 'netbox.configuration')\ntry:\n configuration = importlib.import_module(config_path)\nexcept ModuleNotFoundError as e:\n if getattr(e, 'name') == config_path:\n raise ImproperlyConfigured(\n f\"Specified configuration module ({config_path}) not found. Please define netbox/netbox/configuration.py \"\n f\"per the documentation, or specify an alternate module in the NETBOX_CONFIGURATION environment variable.\"\n )\n raise\n\n# Enforce required configuration parameters\nfor parameter in ['ALLOWED_HOSTS', 'DATABASE', 'SECRET_KEY', 'REDIS']:\n if not hasattr(configuration, parameter):\n raise ImproperlyConfigured(f\"Required parameter {parameter} is missing from configuration.\")\n\n# Set required parameters\nALLOWED_HOSTS = getattr(configuration, 'ALLOWED_HOSTS')\nDATABASE = getattr(configuration, 'DATABASE')\nREDIS = getattr(configuration, 'REDIS')\nSECRET_KEY = getattr(configuration, 'SECRET_KEY')\n\n# Calculate a unique deployment ID from the secret key\nDEPLOYMENT_ID = hashlib.sha256(SECRET_KEY.encode('utf-8')).hexdigest()[:16]\n\n# Set static config parameters\nADMINS = getattr(configuration, 'ADMINS', [])\nALLOW_TOKEN_RETRIEVAL = getattr(configuration, 'ALLOW_TOKEN_RETRIEVAL', True)\nAUTH_PASSWORD_VALIDATORS = getattr(configuration, 'AUTH_PASSWORD_VALIDATORS', [])\nBASE_PATH = getattr(configuration, 'BASE_PATH', '')\nif BASE_PATH:\n BASE_PATH = BASE_PATH.strip('/') + '/' # Enforce trailing slash only\nCSRF_COOKIE_PATH = LANGUAGE_COOKIE_PATH = SESSION_COOKIE_PATH = f'/{BASE_PATH.rstrip(\"/\")}'\nCORS_ORIGIN_ALLOW_ALL = getattr(configuration, 'CORS_ORIGIN_ALLOW_ALL', False)\nCORS_ORIGIN_REGEX_WHITELIST = getattr(configuration, 'CORS_ORIGIN_REGEX_WHITELIST', [])\nCORS_ORIGIN_WHITELIST = getattr(configuration, 'CORS_ORIGIN_WHITELIST', [])\nCSRF_COOKIE_NAME = getattr(configuration, 'CSRF_COOKIE_NAME', 'csrftoken')\nCSRF_TRUSTED_ORIGINS = getattr(configuration, 'CSRF_TRUSTED_ORIGINS', [])\nDATE_FORMAT = getattr(configuration, 'DATE_FORMAT', 'N j, Y')\nDATETIME_FORMAT = getattr(configuration, 'DATETIME_FORMAT', 'N j, Y g:i a')\nDEBUG = getattr(configuration, 'DEBUG', False)\nDEVELOPER = getattr(configuration, 'DEVELOPER', False)\nDOCS_ROOT = getattr(configuration, 'DOCS_ROOT', os.path.join(os.path.dirname(BASE_DIR), 'docs'))\nEMAIL = getattr(configuration, 'EMAIL', {})\nEXEMPT_VIEW_PERMISSIONS = getattr(configuration, 'EXEMPT_VIEW_PERMISSIONS', [])\nFIELD_CHOICES = getattr(configuration, 'FIELD_CHOICES', {})\nFILE_UPLOAD_MAX_MEMORY_SIZE = getattr(configuration, 'FILE_UPLOAD_MAX_MEMORY_SIZE', 2621440)\nHTTP_PROXIES = getattr(configuration, 'HTTP_PROXIES', None)\nINTERNAL_IPS = getattr(configuration, 'INTERNAL_IPS', ('127.0.0.1', '::1'))\nJINJA2_FILTERS = getattr(configuration, 'JINJA2_FILTERS', {})\nLANGUAGE_CODE = getattr(configuration, 'DEFAULT_LANGUAGE', 'en-us')\nLOGGING = getattr(configuration, 'LOGGING', {})\nLOGIN_PERSISTENCE = getattr(configuration, 'LOGIN_PERSISTENCE', False)\nLOGIN_REQUIRED = getattr(configuration, 'LOGIN_REQUIRED', False)\nLOGIN_TIMEOUT = getattr(configuration, 'LOGIN_TIMEOUT', None)\nLOGOUT_REDIRECT_URL = getattr(configuration, 'LOGOUT_REDIRECT_URL', 'home')\nMEDIA_ROOT = getattr(configuration, 'MEDIA_ROOT', os.path.join(BASE_DIR, 'media')).rstrip('/')\nMETRICS_ENABLED = getattr(configuration, 'METRICS_ENABLED', False)\nPLUGINS = getattr(configuration, 'PLUGINS', [])\nPLUGINS_CONFIG = getattr(configuration, 'PLUGINS_CONFIG', {})\nQUEUE_MAPPINGS = getattr(configuration, 'QUEUE_MAPPINGS', {})\nRELEASE_CHECK_URL = getattr(configuration, 'RELEASE_CHECK_URL', None)\nREMOTE_AUTH_AUTO_CREATE_USER = getattr(configuration, 'REMOTE_AUTH_AUTO_CREATE_USER', False)\nREMOTE_AUTH_BACKEND = getattr(configuration, 'REMOTE_AUTH_BACKEND', 'netbox.authentication.RemoteUserBackend')\nREMOTE_AUTH_DEFAULT_GROUPS = getattr(configuration, 'REMOTE_AUTH_DEFAULT_GROUPS', [])\nREMOTE_AUTH_DEFAULT_PERMISSIONS = getattr(configuration, 'REMOTE_AUTH_DEFAULT_PERMISSIONS', {})\nREMOTE_AUTH_ENABLED = getattr(configuration, 'REMOTE_AUTH_ENABLED', False)\nREMOTE_AUTH_HEADER = getattr(configuration, 'REMOTE_AUTH_HEADER', 'HTTP_REMOTE_USER')\nREMOTE_AUTH_GROUP_HEADER = getattr(configuration, 'REMOTE_AUTH_GROUP_HEADER', 'HTTP_REMOTE_USER_GROUP')\nREMOTE_AUTH_GROUP_SYNC_ENABLED = getattr(configuration, 'REMOTE_AUTH_GROUP_SYNC_ENABLED', False)\nREMOTE_AUTH_SUPERUSER_GROUPS = getattr(configuration, 'REMOTE_AUTH_SUPERUSER_GROUPS', [])\nREMOTE_AUTH_SUPERUSERS = getattr(configuration, 'REMOTE_AUTH_SUPERUSERS', [])\nREMOTE_AUTH_STAFF_GROUPS = getattr(configuration, 'REMOTE_AUTH_STAFF_GROUPS', [])\nREMOTE_AUTH_STAFF_USERS = getattr(configuration, 'REMOTE_AUTH_STAFF_USERS', [])\nREMOTE_AUTH_GROUP_SEPARATOR = getattr(configuration, 'REMOTE_AUTH_GROUP_SEPARATOR', '|')\nREPORTS_ROOT = getattr(configuration, 'REPORTS_ROOT', os.path.join(BASE_DIR, 'reports')).rstrip('/')\nRQ_DEFAULT_TIMEOUT = getattr(configuration, 'RQ_DEFAULT_TIMEOUT', 300)\nSCRIPTS_ROOT = getattr(configuration, 'SCRIPTS_ROOT', os.path.join(BASE_DIR, 'scripts')).rstrip('/')\nSEARCH_BACKEND = getattr(configuration, 'SEARCH_BACKEND', 'netbox.search.backends.CachedValueSearchBackend')\nSENTRY_DSN = getattr(configuration, 'SENTRY_DSN', DEFAULT_SENTRY_DSN)\nSENTRY_ENABLED = getattr(configuration, 'SENTRY_ENABLED', False)\nSENTRY_SAMPLE_RATE = getattr(configuration, 'SENTRY_SAMPLE_RATE', 1.0)\nSENTRY_TRACES_SAMPLE_RATE = getattr(configuration, 'SENTRY_TRACES_SAMPLE_RATE', 0)\nSENTRY_TAGS = getattr(configuration, 'SENTRY_TAGS', {})\nSESSION_FILE_PATH = getattr(configuration, 'SESSION_FILE_PATH', None)\nSESSION_COOKIE_NAME = getattr(configuration, 'SESSION_COOKIE_NAME', 'sessionid')\nSHORT_DATE_FORMAT = getattr(configuration, 'SHORT_DATE_FORMAT', 'Y-m-d')\nSHORT_DATETIME_FORMAT = getattr(configuration, 'SHORT_DATETIME_FORMAT', 'Y-m-d H:i')\nSHORT_TIME_FORMAT = getattr(configuration, 'SHORT_TIME_FORMAT', 'H:i:s')\nSTORAGE_BACKEND = getattr(configuration, 'STORAGE_BACKEND', None)\nSTORAGE_CONFIG = getattr(configuration, 'STORAGE_CONFIG', {})\nTIME_FORMAT = getattr(configuration, 'TIME_FORMAT', 'g:i a')\nTIME_ZONE = getattr(configuration, 'TIME_ZONE', 'UTC')\nENABLE_LOCALIZATION = getattr(configuration, 'ENABLE_LOCALIZATION', False)\n\n# Check for hard-coded dynamic config parameters\nfor param in PARAMS:\n if hasattr(configuration, param.name):\n globals()[param.name] = getattr(configuration, param.name)\n\n# Validate update repo URL and timeout\nif RELEASE_CHECK_URL:\n validator = URLValidator(\n message=(\n \"RELEASE_CHECK_URL must be a valid API URL. Example: \"\n \"https://api.github.com/repos/netbox-community/netbox\"\n )\n )\n try:\n validator(RELEASE_CHECK_URL)\n except ValidationError as err:\n raise ImproperlyConfigured(str(err))\n\n\n#\n# Database\n#\n\n# Only PostgreSQL is supported\nif METRICS_ENABLED:\n DATABASE.update({\n 'ENGINE': 'django_prometheus.db.backends.postgresql'\n })\nelse:\n DATABASE.update({\n 'ENGINE': 'django.db.backends.postgresql'\n })\n\nDATABASES = {\n 'default': DATABASE,\n}\n\n\n#\n# Media storage\n#\n\nif STORAGE_BACKEND is not None:\n DEFAULT_FILE_STORAGE = STORAGE_BACKEND\n\n # django-storages\n if STORAGE_BACKEND.startswith('storages.'):\n\n try:\n import storages.utils # type: ignore\n except ModuleNotFoundError as e:\n if getattr(e, 'name') == 'storages':\n raise ImproperlyConfigured(\n f\"STORAGE_BACKEND is set to {STORAGE_BACKEND} but django-storages is not present. It can be \"\n f\"installed by running 'pip install django-storages'.\"\n )\n raise e\n\n # Monkey-patch django-storages to fetch settings from STORAGE_CONFIG\n def _setting(name, default=None):\n if name in STORAGE_CONFIG:\n return STORAGE_CONFIG[name]\n return globals().get(name, default)\n storages.utils.setting = _setting\n\nif STORAGE_CONFIG and STORAGE_BACKEND is None:\n warnings.warn(\n \"STORAGE_CONFIG has been set in configuration.py but STORAGE_BACKEND is not defined. STORAGE_CONFIG will be \"\n \"ignored.\"\n )\n\n\n#\n# Redis\n#\n\n# Background task queuing\nif 'tasks' not in REDIS:\n raise ImproperlyConfigured(\n \"REDIS section in configuration.py is missing the 'tasks' subsection.\"\n )\nTASKS_REDIS = REDIS['tasks']\nTASKS_REDIS_HOST = TASKS_REDIS.get('HOST', 'localhost')\nTASKS_REDIS_PORT = TASKS_REDIS.get('PORT', 6379)\nTASKS_REDIS_SENTINELS = TASKS_REDIS.get('SENTINELS', [])\nTASKS_REDIS_USING_SENTINEL = all([\n isinstance(TASKS_REDIS_SENTINELS, (list, tuple)),\n len(TASKS_REDIS_SENTINELS) > 0\n])\nTASKS_REDIS_SENTINEL_SERVICE = TASKS_REDIS.get('SENTINEL_SERVICE', 'default')\nTASKS_REDIS_SENTINEL_TIMEOUT = TASKS_REDIS.get('SENTINEL_TIMEOUT', 10)\nTASKS_REDIS_USERNAME = TASKS_REDIS.get('USERNAME', '')\nTASKS_REDIS_PASSWORD = TASKS_REDIS.get('PASSWORD', '')\nTASKS_REDIS_DATABASE = TASKS_REDIS.get('DATABASE', 0)\nTASKS_REDIS_SSL = TASKS_REDIS.get('SSL', False)\nTASKS_REDIS_SKIP_TLS_VERIFY = TASKS_REDIS.get('INSECURE_SKIP_TLS_VERIFY', False)\nTASKS_REDIS_CA_CERT_PATH = TASKS_REDIS.get('CA_CERT_PATH', False)\n\n# Caching\nif 'caching' not in REDIS:\n raise ImproperlyConfigured(\n \"REDIS section in configuration.py is missing caching subsection.\"\n )\nCACHING_REDIS_HOST = REDIS['caching'].get('HOST', 'localhost')\nCACHING_REDIS_PORT = REDIS['caching'].get('PORT', 6379)\nCACHING_REDIS_DATABASE = REDIS['caching'].get('DATABASE', 0)\nCACHING_REDIS_USERNAME = REDIS['caching'].get('USERNAME', '')\nCACHING_REDIS_USERNAME_HOST = '@'.join(filter(None, [CACHING_REDIS_USERNAME, CACHING_REDIS_HOST]))\nCACHING_REDIS_PASSWORD = REDIS['caching'].get('PASSWORD', '')\nCACHING_REDIS_SENTINELS = REDIS['caching'].get('SENTINELS', [])\nCACHING_REDIS_SENTINEL_SERVICE = REDIS['caching'].get('SENTINEL_SERVICE', 'default')\nCACHING_REDIS_PROTO = 'rediss' if REDIS['caching'].get('SSL', False) else 'redis'\nCACHING_REDIS_SKIP_TLS_VERIFY = REDIS['caching'].get('INSECURE_SKIP_TLS_VERIFY', False)\nCACHING_REDIS_CA_CERT_PATH = REDIS['caching'].get('CA_CERT_PATH', False)\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django_redis.cache.RedisCache',\n 'LOCATION': f'{CACHING_REDIS_PROTO}://{CACHING_REDIS_USERNAME_HOST}:{CACHING_REDIS_PORT}/{CACHING_REDIS_DATABASE}',\n 'OPTIONS': {\n 'CLIENT_CLASS': 'django_redis.client.DefaultClient',\n 'PASSWORD': CACHING_REDIS_PASSWORD,\n }\n }\n}\n\n\nif CACHING_REDIS_SENTINELS:\n DJANGO_REDIS_CONNECTION_FACTORY = 'django_redis.pool.SentinelConnectionFactory'\n CACHES['default']['LOCATION'] = f'{CACHING_REDIS_PROTO}://{CACHING_REDIS_SENTINEL_SERVICE}/{CACHING_REDIS_DATABASE}'\n CACHES['default']['OPTIONS']['CLIENT_CLASS'] = 'django_redis.client.SentinelClient'\n CACHES['default']['OPTIONS']['SENTINELS'] = CACHING_REDIS_SENTINELS\nif CACHING_REDIS_SKIP_TLS_VERIFY:\n CACHES['default']['OPTIONS'].setdefault('CONNECTION_POOL_KWARGS', {})\n CACHES['default']['OPTIONS']['CONNECTION_POOL_KWARGS']['ssl_cert_reqs'] = False\nif CACHING_REDIS_CA_CERT_PATH:\n CACHES['default']['OPTIONS'].setdefault('CONNECTION_POOL_KWARGS', {})\n CACHES['default']['OPTIONS']['CONNECTION_POOL_KWARGS']['ssl_ca_certs'] = CACHING_REDIS_CA_CERT_PATH\n\n#\n# Sessions\n#\n\nif LOGIN_TIMEOUT is not None:\n # Django default is 1209600 seconds (14 days)\n SESSION_COOKIE_AGE = LOGIN_TIMEOUT\nSESSION_SAVE_EVERY_REQUEST = bool(LOGIN_PERSISTENCE)\nif SESSION_FILE_PATH is not None:\n SESSION_ENGINE = 'django.contrib.sessions.backends.file'\n\n\n#\n# Email\n#\n\nEMAIL_HOST = EMAIL.get('SERVER')\nEMAIL_HOST_USER = EMAIL.get('USERNAME')\nEMAIL_HOST_PASSWORD = EMAIL.get('PASSWORD')\nEMAIL_PORT = EMAIL.get('PORT', 25)\nEMAIL_SSL_CERTFILE = EMAIL.get('SSL_CERTFILE')\nEMAIL_SSL_KEYFILE = EMAIL.get('SSL_KEYFILE')\nEMAIL_SUBJECT_PREFIX = '[NetBox] '\nEMAIL_USE_SSL = EMAIL.get('USE_SSL', False)\nEMAIL_USE_TLS = EMAIL.get('USE_TLS', False)\nEMAIL_TIMEOUT = EMAIL.get('TIMEOUT', 10)\nSERVER_EMAIL = EMAIL.get('FROM_EMAIL')\n\n\n#\n# Django\n#\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'corsheaders',\n 'debug_toolbar',\n 'graphiql_debug_toolbar',\n 'django_filters',\n 'django_tables2',\n 'django_prometheus',\n 'graphene_django',\n 'mptt',\n 'rest_framework',\n 'social_django',\n 'taggit',\n 'timezone_field',\n 'circuits',\n 'dcim',\n 'ipam',\n 'extras',\n 'tenancy',\n 'users',\n 'utilities',\n 'virtualization',\n 'wireless',\n 'django_rq', # Must come after extras to allow overriding management commands\n 'drf_yasg',\n]\n\n# Middleware\nMIDDLEWARE = [\n 'graphiql_debug_toolbar.middleware.DebugToolbarMiddleware',\n 'django_prometheus.middleware.PrometheusBeforeMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'netbox.middleware.ExceptionHandlingMiddleware',\n 'netbox.middleware.RemoteUserMiddleware',\n 'netbox.middleware.LoginRequiredMiddleware',\n 'netbox.middleware.DynamicConfigMiddleware',\n 'netbox.middleware.APIVersionMiddleware',\n 'netbox.middleware.ObjectChangeMiddleware',\n 'django_prometheus.middleware.PrometheusAfterMiddleware',\n]\n\nif not ENABLE_LOCALIZATION:\n MIDDLEWARE.remove(\"django.middleware.locale.LocaleMiddleware\")\n\nROOT_URLCONF = 'netbox.urls'\n\nTEMPLATES_DIR = BASE_DIR + '/templates'\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [TEMPLATES_DIR],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'builtins': [\n 'utilities.templatetags.builtins.filters',\n 'utilities.templatetags.builtins.tags',\n ],\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.template.context_processors.media',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'netbox.context_processors.settings_and_registry',\n ],\n },\n },\n]\n\n# Set up authentication backends\nAUTHENTICATION_BACKENDS = [\n REMOTE_AUTH_BACKEND,\n 'netbox.authentication.ObjectPermissionBackend',\n]\n\n# Time zones\nUSE_TZ = True\n\n# WSGI\nWSGI_APPLICATION = 'netbox.wsgi.application'\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\nUSE_X_FORWARDED_HOST = True\nX_FRAME_OPTIONS = 'SAMEORIGIN'\n\n# Static files (CSS, JavaScript, Images)\nSTATIC_ROOT = BASE_DIR + '/static'\nSTATIC_URL = f'/{BASE_PATH}static/'\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'project-static', 'dist'),\n os.path.join(BASE_DIR, 'project-static', 'img'),\n os.path.join(BASE_DIR, 'project-static', 'js'),\n ('docs', os.path.join(BASE_DIR, 'project-static', 'docs')), # Prefix with /docs\n)\n\n# Media\nMEDIA_URL = '/{}media/'.format(BASE_PATH)\n\n# Disable default limit of 1000 fields per request. Needed for bulk deletion of objects. (Added in Django 1.10.)\nDATA_UPLOAD_MAX_NUMBER_FIELDS = None\n\n# Messages\nMESSAGE_TAGS = {\n messages.ERROR: 'danger',\n}\n\n# Authentication URLs\nLOGIN_URL = f'/{BASE_PATH}login/'\nLOGIN_REDIRECT_URL = f'/{BASE_PATH}'\n\nDEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'\n\nTEST_RUNNER = \"django_rich.test.RichRunner\"\n\n# Exclude potentially sensitive models from wildcard view exemption. These may still be exempted\n# by specifying the model individually in the EXEMPT_VIEW_PERMISSIONS configuration parameter.\nEXEMPT_EXCLUDE_MODELS = (\n ('auth', 'group'),\n ('auth', 'user'),\n ('users', 'objectpermission'),\n)\n\n# All URLs starting with a string listed here are exempt from login enforcement\nEXEMPT_PATHS = (\n f'/{BASE_PATH}api/',\n f'/{BASE_PATH}graphql/',\n f'/{BASE_PATH}login/',\n f'/{BASE_PATH}oauth/',\n f'/{BASE_PATH}metrics',\n)\n\nSERIALIZATION_MODULES = {\n 'json': 'utilities.serializers.json',\n}\n\n\n#\n# Sentry\n#\n\nif SENTRY_ENABLED:\n if not SENTRY_DSN:\n raise ImproperlyConfigured(\"SENTRY_ENABLED is True but SENTRY_DSN has not been defined.\")\n # If using the default DSN, force sampling rates\n if SENTRY_DSN == DEFAULT_SENTRY_DSN:\n SENTRY_SAMPLE_RATE = 1.0\n SENTRY_TRACES_SAMPLE_RATE = 0\n # Initialize the SDK\n sentry_sdk.init(\n dsn=SENTRY_DSN,\n release=VERSION,\n integrations=[DjangoIntegration()],\n sample_rate=SENTRY_SAMPLE_RATE,\n traces_sample_rate=SENTRY_TRACES_SAMPLE_RATE,\n send_default_pii=True,\n http_proxy=HTTP_PROXIES.get('http') if HTTP_PROXIES else None,\n https_proxy=HTTP_PROXIES.get('https') if HTTP_PROXIES else None\n )\n # Assign any configured tags\n for k, v in SENTRY_TAGS.items():\n sentry_sdk.set_tag(k, v)\n # If using the default DSN, append a unique deployment ID tag for error correlation\n if SENTRY_DSN == DEFAULT_SENTRY_DSN:\n sentry_sdk.set_tag('netbox.deployment_id', DEPLOYMENT_ID)\n\n\n#\n# Django social auth\n#\n\nSOCIAL_AUTH_PIPELINE = (\n 'social_core.pipeline.social_auth.social_details',\n 'social_core.pipeline.social_auth.social_uid',\n 'social_core.pipeline.social_auth.social_user',\n 'social_core.pipeline.user.get_username',\n 'social_core.pipeline.social_auth.associate_by_email',\n 'social_core.pipeline.user.create_user',\n 'social_core.pipeline.social_auth.associate_user',\n 'netbox.authentication.user_default_groups_handler',\n 'social_core.pipeline.social_auth.load_extra_data',\n 'social_core.pipeline.user.user_details',\n)\n\n# Load all SOCIAL_AUTH_* settings from the user configuration\nfor param in dir(configuration):\n if param.startswith('SOCIAL_AUTH_'):\n globals()[param] = getattr(configuration, param)\n\n# Force usage of PostgreSQL's JSONB field for extra data\nSOCIAL_AUTH_JSONFIELD_ENABLED = True\nSOCIAL_AUTH_CLEAN_USERNAME_FUNCTION = 'users.utils.clean_username'\n\n#\n# Django Prometheus\n#\n\nPROMETHEUS_EXPORT_MIGRATIONS = False\n\n\n#\n# Django filters\n#\n\nFILTERS_NULL_CHOICE_LABEL = 'None'\nFILTERS_NULL_CHOICE_VALUE = 'null'\n\n\n#\n# Django REST framework (API)\n#\n\nREST_FRAMEWORK_VERSION = '.'.join(VERSION.split('-')[0].split('.')[:2]) # Use major.minor as API version\nREST_FRAMEWORK = {\n 'ALLOWED_VERSIONS': [REST_FRAMEWORK_VERSION],\n 'COERCE_DECIMAL_TO_STRING': False,\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.SessionAuthentication',\n 'netbox.api.authentication.TokenAuthentication',\n ),\n 'DEFAULT_FILTER_BACKENDS': (\n 'django_filters.rest_framework.DjangoFilterBackend',\n 'rest_framework.filters.OrderingFilter',\n ),\n 'DEFAULT_METADATA_CLASS': 'netbox.api.metadata.BulkOperationMetadata',\n 'DEFAULT_PAGINATION_CLASS': 'netbox.api.pagination.OptionalLimitOffsetPagination',\n 'DEFAULT_PARSER_CLASSES': (\n 'rest_framework.parsers.JSONParser',\n 'rest_framework.parsers.MultiPartParser',\n ),\n 'DEFAULT_PERMISSION_CLASSES': (\n 'netbox.api.authentication.TokenPermissions',\n ),\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n 'netbox.api.renderers.FormlessBrowsableAPIRenderer',\n ),\n 'DEFAULT_VERSION': REST_FRAMEWORK_VERSION,\n 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.AcceptHeaderVersioning',\n 'SCHEMA_COERCE_METHOD_NAMES': {\n # Default mappings\n 'retrieve': 'read',\n 'destroy': 'delete',\n # Custom operations\n 'bulk_destroy': 'bulk_delete',\n },\n 'VIEW_NAME_FUNCTION': 'utilities.api.get_view_name',\n}\n\n\n#\n# Graphene\n#\n\nGRAPHENE = {\n # Avoids naming collision on models with 'type' field; see\n # https://github.com/graphql-python/graphene-django/issues/185\n 'DJANGO_CHOICE_FIELD_ENUM_V3_NAMING': True,\n}\n\n\n#\n# drf_yasg (OpenAPI/Swagger)\n#\n\nSWAGGER_SETTINGS = {\n 'DEFAULT_AUTO_SCHEMA_CLASS': 'utilities.custom_inspectors.NetBoxSwaggerAutoSchema',\n 'DEFAULT_FIELD_INSPECTORS': [\n 'utilities.custom_inspectors.CustomFieldsDataFieldInspector',\n 'utilities.custom_inspectors.NullableBooleanFieldInspector',\n 'utilities.custom_inspectors.ChoiceFieldInspector',\n 'utilities.custom_inspectors.SerializedPKRelatedFieldInspector',\n 'drf_yasg.inspectors.CamelCaseJSONFilter',\n 'drf_yasg.inspectors.ReferencingSerializerInspector',\n 'drf_yasg.inspectors.RelatedFieldInspector',\n 'drf_yasg.inspectors.ChoiceFieldInspector',\n 'drf_yasg.inspectors.FileFieldInspector',\n 'drf_yasg.inspectors.DictFieldInspector',\n 'drf_yasg.inspectors.JSONFieldInspector',\n 'drf_yasg.inspectors.SerializerMethodFieldInspector',\n 'drf_yasg.inspectors.SimpleFieldInspector',\n 'drf_yasg.inspectors.StringDefaultFieldInspector',\n ],\n 'DEFAULT_FILTER_INSPECTORS': [\n 'drf_yasg.inspectors.CoreAPICompatInspector',\n ],\n 'DEFAULT_INFO': 'netbox.urls.openapi_info',\n 'DEFAULT_MODEL_DEPTH': 1,\n 'DEFAULT_PAGINATOR_INSPECTORS': [\n 'utilities.custom_inspectors.NullablePaginatorInspector',\n 'drf_yasg.inspectors.DjangoRestResponsePagination',\n 'drf_yasg.inspectors.CoreAPICompatInspector',\n ],\n 'SECURITY_DEFINITIONS': {\n 'Bearer': {\n 'type': 'apiKey',\n 'name': 'Authorization',\n 'in': 'header',\n }\n },\n 'VALIDATOR_URL': None,\n}\n\n\n#\n# Django RQ (Webhooks backend)\n#\n\nif TASKS_REDIS_USING_SENTINEL:\n RQ_PARAMS = {\n 'SENTINELS': TASKS_REDIS_SENTINELS,\n 'MASTER_NAME': TASKS_REDIS_SENTINEL_SERVICE,\n 'SOCKET_TIMEOUT': None,\n 'CONNECTION_KWARGS': {\n 'socket_connect_timeout': TASKS_REDIS_SENTINEL_TIMEOUT\n },\n }\nelse:\n RQ_PARAMS = {\n 'HOST': TASKS_REDIS_HOST,\n 'PORT': TASKS_REDIS_PORT,\n 'SSL': TASKS_REDIS_SSL,\n 'SSL_CERT_REQS': None if TASKS_REDIS_SKIP_TLS_VERIFY else 'required',\n }\nRQ_PARAMS.update({\n 'DB': TASKS_REDIS_DATABASE,\n 'USERNAME': TASKS_REDIS_USERNAME,\n 'PASSWORD': TASKS_REDIS_PASSWORD,\n 'DEFAULT_TIMEOUT': RQ_DEFAULT_TIMEOUT,\n})\n\nif TASKS_REDIS_CA_CERT_PATH:\n RQ_PARAMS.setdefault('REDIS_CLIENT_KWARGS', {})\n RQ_PARAMS['REDIS_CLIENT_KWARGS']['ssl_ca_certs'] = TASKS_REDIS_CA_CERT_PATH\n\nRQ_QUEUES = {\n RQ_QUEUE_HIGH: RQ_PARAMS,\n RQ_QUEUE_DEFAULT: RQ_PARAMS,\n RQ_QUEUE_LOW: RQ_PARAMS,\n}\n\n# Add any queues defined in QUEUE_MAPPINGS\nRQ_QUEUES.update({\n queue: RQ_PARAMS for queue in set(QUEUE_MAPPINGS.values()) if queue not in RQ_QUEUES\n})\n\n#\n# Localization\n#\n\nif not ENABLE_LOCALIZATION:\n USE_I18N = False\n USE_L10N = False\n\n#\n# Plugins\n#\n\nfor plugin_name in PLUGINS:\n # Import plugin module\n try:\n plugin = importlib.import_module(plugin_name)\n except ModuleNotFoundError as e:\n if getattr(e, 'name') == plugin_name:\n raise ImproperlyConfigured(\n \"Unable to import plugin {}: Module not found. Check that the plugin module has been installed within the \"\n \"correct Python environment.\".format(plugin_name)\n )\n raise e\n\n # Determine plugin config and add to INSTALLED_APPS.\n try:\n plugin_config: PluginConfig = plugin.config\n except AttributeError:\n raise ImproperlyConfigured(\n \"Plugin {} does not provide a 'config' variable. This should be defined in the plugin's __init__.py file \"\n \"and point to the PluginConfig subclass.\".format(plugin_name)\n )\n\n plugin_module = \"{}.{}\".format(plugin_config.__module__, plugin_config.__name__) # type: ignore\n\n # Gather additional apps to load alongside this plugin\n django_apps = plugin_config.django_apps\n if plugin_name in django_apps:\n django_apps.pop(plugin_name)\n if plugin_module not in django_apps:\n django_apps.append(plugin_module)\n\n # Test if we can import all modules (or its parent, for PluginConfigs and AppConfigs)\n for app in django_apps:\n if \".\" in app:\n parts = app.split(\".\")\n spec = importlib.util.find_spec(\".\".join(parts[:-1]))\n else:\n spec = importlib.util.find_spec(app)\n if spec is None:\n raise ImproperlyConfigured(\n f\"Failed to load django_apps specified by plugin {plugin_name}: {django_apps} \"\n f\"The module {app} cannot be imported. Check that the necessary package has been \"\n \"installed within the correct Python environment.\"\n )\n\n INSTALLED_APPS.extend(django_apps)\n\n # Preserve uniqueness of the INSTALLED_APPS list, we keep the last occurence\n sorted_apps = reversed(list(dict.fromkeys(reversed(INSTALLED_APPS))))\n INSTALLED_APPS = list(sorted_apps)\n\n # Validate user-provided configuration settings and assign defaults\n if plugin_name not in PLUGINS_CONFIG:\n PLUGINS_CONFIG[plugin_name] = {}\n plugin_config.validate(PLUGINS_CONFIG[plugin_name], VERSION)\n\n # Add middleware\n plugin_middleware = plugin_config.middleware\n if plugin_middleware and type(plugin_middleware) in (list, tuple):\n MIDDLEWARE.extend(plugin_middleware)\n\n # Create RQ queues dedicated to the plugin\n # we use the plugin name as a prefix for queue name's defined in the plugin config\n # ex: mysuperplugin.mysuperqueue1\n if type(plugin_config.queues) is not list:\n raise ImproperlyConfigured(\n \"Plugin {} queues must be a list.\".format(plugin_name)\n )\n RQ_QUEUES.update({\n f\"{plugin_name}.{queue}\": RQ_PARAMS for queue in plugin_config.queues\n })\n", "path": "netbox/netbox/settings.py" } ]
[ { "content": "import hashlib\nimport importlib\nimport importlib.util\nimport os\nimport platform\nimport sys\nimport warnings\nfrom urllib.parse import urlsplit\n\nimport django\nimport sentry_sdk\nfrom django.contrib.messages import constants as messages\nfrom django.core.exceptions import ImproperlyConfigured, ValidationError\nfrom django.core.validators import URLValidator\nfrom django.utils.encoding import force_str\nfrom extras.plugins import PluginConfig\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\nfrom netbox.config import PARAMS\nfrom netbox.constants import RQ_QUEUE_DEFAULT, RQ_QUEUE_HIGH, RQ_QUEUE_LOW\n\n\n#\n# Environment setup\n#\n\nVERSION = '3.4.7-dev'\n\n# Hostname\nHOSTNAME = platform.node()\n\n# Set the base directory two levels up\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# Validate Python version\nif sys.version_info < (3, 8):\n raise RuntimeError(\n f\"NetBox requires Python 3.8 or later. (Currently installed: Python {platform.python_version()})\"\n )\n\nDEFAULT_SENTRY_DSN = 'https://[email protected]/6396485'\n\n#\n# Configuration import\n#\n\n# Import configuration parameters\nconfig_path = os.getenv('NETBOX_CONFIGURATION', 'netbox.configuration')\ntry:\n configuration = importlib.import_module(config_path)\nexcept ModuleNotFoundError as e:\n if getattr(e, 'name') == config_path:\n raise ImproperlyConfigured(\n f\"Specified configuration module ({config_path}) not found. Please define netbox/netbox/configuration.py \"\n f\"per the documentation, or specify an alternate module in the NETBOX_CONFIGURATION environment variable.\"\n )\n raise\n\n# Enforce required configuration parameters\nfor parameter in ['ALLOWED_HOSTS', 'DATABASE', 'SECRET_KEY', 'REDIS']:\n if not hasattr(configuration, parameter):\n raise ImproperlyConfigured(f\"Required parameter {parameter} is missing from configuration.\")\n\n# Set required parameters\nALLOWED_HOSTS = getattr(configuration, 'ALLOWED_HOSTS')\nDATABASE = getattr(configuration, 'DATABASE')\nREDIS = getattr(configuration, 'REDIS')\nSECRET_KEY = getattr(configuration, 'SECRET_KEY')\n\n# Calculate a unique deployment ID from the secret key\nDEPLOYMENT_ID = hashlib.sha256(SECRET_KEY.encode('utf-8')).hexdigest()[:16]\n\n# Set static config parameters\nADMINS = getattr(configuration, 'ADMINS', [])\nALLOW_TOKEN_RETRIEVAL = getattr(configuration, 'ALLOW_TOKEN_RETRIEVAL', True)\nAUTH_PASSWORD_VALIDATORS = getattr(configuration, 'AUTH_PASSWORD_VALIDATORS', [])\nBASE_PATH = getattr(configuration, 'BASE_PATH', '')\nif BASE_PATH:\n BASE_PATH = BASE_PATH.strip('/') + '/' # Enforce trailing slash only\nCSRF_COOKIE_PATH = LANGUAGE_COOKIE_PATH = SESSION_COOKIE_PATH = f'/{BASE_PATH.rstrip(\"/\")}'\nCORS_ORIGIN_ALLOW_ALL = getattr(configuration, 'CORS_ORIGIN_ALLOW_ALL', False)\nCORS_ORIGIN_REGEX_WHITELIST = getattr(configuration, 'CORS_ORIGIN_REGEX_WHITELIST', [])\nCORS_ORIGIN_WHITELIST = getattr(configuration, 'CORS_ORIGIN_WHITELIST', [])\nCSRF_COOKIE_NAME = getattr(configuration, 'CSRF_COOKIE_NAME', 'csrftoken')\nCSRF_TRUSTED_ORIGINS = getattr(configuration, 'CSRF_TRUSTED_ORIGINS', [])\nDATE_FORMAT = getattr(configuration, 'DATE_FORMAT', 'N j, Y')\nDATETIME_FORMAT = getattr(configuration, 'DATETIME_FORMAT', 'N j, Y g:i a')\nDEBUG = getattr(configuration, 'DEBUG', False)\nDEVELOPER = getattr(configuration, 'DEVELOPER', False)\nDOCS_ROOT = getattr(configuration, 'DOCS_ROOT', os.path.join(os.path.dirname(BASE_DIR), 'docs'))\nEMAIL = getattr(configuration, 'EMAIL', {})\nEXEMPT_VIEW_PERMISSIONS = getattr(configuration, 'EXEMPT_VIEW_PERMISSIONS', [])\nFIELD_CHOICES = getattr(configuration, 'FIELD_CHOICES', {})\nFILE_UPLOAD_MAX_MEMORY_SIZE = getattr(configuration, 'FILE_UPLOAD_MAX_MEMORY_SIZE', 2621440)\nHTTP_PROXIES = getattr(configuration, 'HTTP_PROXIES', None)\nINTERNAL_IPS = getattr(configuration, 'INTERNAL_IPS', ('127.0.0.1', '::1'))\nJINJA2_FILTERS = getattr(configuration, 'JINJA2_FILTERS', {})\nLANGUAGE_CODE = getattr(configuration, 'DEFAULT_LANGUAGE', 'en-us')\nLOGGING = getattr(configuration, 'LOGGING', {})\nLOGIN_PERSISTENCE = getattr(configuration, 'LOGIN_PERSISTENCE', False)\nLOGIN_REQUIRED = getattr(configuration, 'LOGIN_REQUIRED', False)\nLOGIN_TIMEOUT = getattr(configuration, 'LOGIN_TIMEOUT', None)\nLOGOUT_REDIRECT_URL = getattr(configuration, 'LOGOUT_REDIRECT_URL', 'home')\nMEDIA_ROOT = getattr(configuration, 'MEDIA_ROOT', os.path.join(BASE_DIR, 'media')).rstrip('/')\nMETRICS_ENABLED = getattr(configuration, 'METRICS_ENABLED', False)\nPLUGINS = getattr(configuration, 'PLUGINS', [])\nPLUGINS_CONFIG = getattr(configuration, 'PLUGINS_CONFIG', {})\nQUEUE_MAPPINGS = getattr(configuration, 'QUEUE_MAPPINGS', {})\nRELEASE_CHECK_URL = getattr(configuration, 'RELEASE_CHECK_URL', None)\nREMOTE_AUTH_AUTO_CREATE_USER = getattr(configuration, 'REMOTE_AUTH_AUTO_CREATE_USER', False)\nREMOTE_AUTH_BACKEND = getattr(configuration, 'REMOTE_AUTH_BACKEND', 'netbox.authentication.RemoteUserBackend')\nREMOTE_AUTH_DEFAULT_GROUPS = getattr(configuration, 'REMOTE_AUTH_DEFAULT_GROUPS', [])\nREMOTE_AUTH_DEFAULT_PERMISSIONS = getattr(configuration, 'REMOTE_AUTH_DEFAULT_PERMISSIONS', {})\nREMOTE_AUTH_ENABLED = getattr(configuration, 'REMOTE_AUTH_ENABLED', False)\nREMOTE_AUTH_HEADER = getattr(configuration, 'REMOTE_AUTH_HEADER', 'HTTP_REMOTE_USER')\nREMOTE_AUTH_GROUP_HEADER = getattr(configuration, 'REMOTE_AUTH_GROUP_HEADER', 'HTTP_REMOTE_USER_GROUP')\nREMOTE_AUTH_GROUP_SYNC_ENABLED = getattr(configuration, 'REMOTE_AUTH_GROUP_SYNC_ENABLED', False)\nREMOTE_AUTH_SUPERUSER_GROUPS = getattr(configuration, 'REMOTE_AUTH_SUPERUSER_GROUPS', [])\nREMOTE_AUTH_SUPERUSERS = getattr(configuration, 'REMOTE_AUTH_SUPERUSERS', [])\nREMOTE_AUTH_STAFF_GROUPS = getattr(configuration, 'REMOTE_AUTH_STAFF_GROUPS', [])\nREMOTE_AUTH_STAFF_USERS = getattr(configuration, 'REMOTE_AUTH_STAFF_USERS', [])\nREMOTE_AUTH_GROUP_SEPARATOR = getattr(configuration, 'REMOTE_AUTH_GROUP_SEPARATOR', '|')\nREPORTS_ROOT = getattr(configuration, 'REPORTS_ROOT', os.path.join(BASE_DIR, 'reports')).rstrip('/')\nRQ_DEFAULT_TIMEOUT = getattr(configuration, 'RQ_DEFAULT_TIMEOUT', 300)\nSCRIPTS_ROOT = getattr(configuration, 'SCRIPTS_ROOT', os.path.join(BASE_DIR, 'scripts')).rstrip('/')\nSEARCH_BACKEND = getattr(configuration, 'SEARCH_BACKEND', 'netbox.search.backends.CachedValueSearchBackend')\nSENTRY_DSN = getattr(configuration, 'SENTRY_DSN', DEFAULT_SENTRY_DSN)\nSENTRY_ENABLED = getattr(configuration, 'SENTRY_ENABLED', False)\nSENTRY_SAMPLE_RATE = getattr(configuration, 'SENTRY_SAMPLE_RATE', 1.0)\nSENTRY_TRACES_SAMPLE_RATE = getattr(configuration, 'SENTRY_TRACES_SAMPLE_RATE', 0)\nSENTRY_TAGS = getattr(configuration, 'SENTRY_TAGS', {})\nSESSION_FILE_PATH = getattr(configuration, 'SESSION_FILE_PATH', None)\nSESSION_COOKIE_NAME = getattr(configuration, 'SESSION_COOKIE_NAME', 'sessionid')\nSHORT_DATE_FORMAT = getattr(configuration, 'SHORT_DATE_FORMAT', 'Y-m-d')\nSHORT_DATETIME_FORMAT = getattr(configuration, 'SHORT_DATETIME_FORMAT', 'Y-m-d H:i')\nSHORT_TIME_FORMAT = getattr(configuration, 'SHORT_TIME_FORMAT', 'H:i:s')\nSTORAGE_BACKEND = getattr(configuration, 'STORAGE_BACKEND', None)\nSTORAGE_CONFIG = getattr(configuration, 'STORAGE_CONFIG', {})\nTIME_FORMAT = getattr(configuration, 'TIME_FORMAT', 'g:i a')\nTIME_ZONE = getattr(configuration, 'TIME_ZONE', 'UTC')\nENABLE_LOCALIZATION = getattr(configuration, 'ENABLE_LOCALIZATION', False)\n\n# Check for hard-coded dynamic config parameters\nfor param in PARAMS:\n if hasattr(configuration, param.name):\n globals()[param.name] = getattr(configuration, param.name)\n\n# Validate update repo URL and timeout\nif RELEASE_CHECK_URL:\n validator = URLValidator(\n message=(\n \"RELEASE_CHECK_URL must be a valid API URL. Example: \"\n \"https://api.github.com/repos/netbox-community/netbox\"\n )\n )\n try:\n validator(RELEASE_CHECK_URL)\n except ValidationError as err:\n raise ImproperlyConfigured(str(err))\n\n\n#\n# Database\n#\n\n# Only PostgreSQL is supported\nif METRICS_ENABLED:\n DATABASE.update({\n 'ENGINE': 'django_prometheus.db.backends.postgresql'\n })\nelse:\n DATABASE.update({\n 'ENGINE': 'django.db.backends.postgresql'\n })\n\nDATABASES = {\n 'default': DATABASE,\n}\n\n\n#\n# Media storage\n#\n\nif STORAGE_BACKEND is not None:\n DEFAULT_FILE_STORAGE = STORAGE_BACKEND\n\n # django-storages\n if STORAGE_BACKEND.startswith('storages.'):\n\n try:\n import storages.utils # type: ignore\n except ModuleNotFoundError as e:\n if getattr(e, 'name') == 'storages':\n raise ImproperlyConfigured(\n f\"STORAGE_BACKEND is set to {STORAGE_BACKEND} but django-storages is not present. It can be \"\n f\"installed by running 'pip install django-storages'.\"\n )\n raise e\n\n # Monkey-patch django-storages to fetch settings from STORAGE_CONFIG\n def _setting(name, default=None):\n if name in STORAGE_CONFIG:\n return STORAGE_CONFIG[name]\n return globals().get(name, default)\n storages.utils.setting = _setting\n\nif STORAGE_CONFIG and STORAGE_BACKEND is None:\n warnings.warn(\n \"STORAGE_CONFIG has been set in configuration.py but STORAGE_BACKEND is not defined. STORAGE_CONFIG will be \"\n \"ignored.\"\n )\n\n\n#\n# Redis\n#\n\n# Background task queuing\nif 'tasks' not in REDIS:\n raise ImproperlyConfigured(\n \"REDIS section in configuration.py is missing the 'tasks' subsection.\"\n )\nTASKS_REDIS = REDIS['tasks']\nTASKS_REDIS_HOST = TASKS_REDIS.get('HOST', 'localhost')\nTASKS_REDIS_PORT = TASKS_REDIS.get('PORT', 6379)\nTASKS_REDIS_SENTINELS = TASKS_REDIS.get('SENTINELS', [])\nTASKS_REDIS_USING_SENTINEL = all([\n isinstance(TASKS_REDIS_SENTINELS, (list, tuple)),\n len(TASKS_REDIS_SENTINELS) > 0\n])\nTASKS_REDIS_SENTINEL_SERVICE = TASKS_REDIS.get('SENTINEL_SERVICE', 'default')\nTASKS_REDIS_SENTINEL_TIMEOUT = TASKS_REDIS.get('SENTINEL_TIMEOUT', 10)\nTASKS_REDIS_USERNAME = TASKS_REDIS.get('USERNAME', '')\nTASKS_REDIS_PASSWORD = TASKS_REDIS.get('PASSWORD', '')\nTASKS_REDIS_DATABASE = TASKS_REDIS.get('DATABASE', 0)\nTASKS_REDIS_SSL = TASKS_REDIS.get('SSL', False)\nTASKS_REDIS_SKIP_TLS_VERIFY = TASKS_REDIS.get('INSECURE_SKIP_TLS_VERIFY', False)\nTASKS_REDIS_CA_CERT_PATH = TASKS_REDIS.get('CA_CERT_PATH', False)\n\n# Caching\nif 'caching' not in REDIS:\n raise ImproperlyConfigured(\n \"REDIS section in configuration.py is missing caching subsection.\"\n )\nCACHING_REDIS_HOST = REDIS['caching'].get('HOST', 'localhost')\nCACHING_REDIS_PORT = REDIS['caching'].get('PORT', 6379)\nCACHING_REDIS_DATABASE = REDIS['caching'].get('DATABASE', 0)\nCACHING_REDIS_USERNAME = REDIS['caching'].get('USERNAME', '')\nCACHING_REDIS_USERNAME_HOST = '@'.join(filter(None, [CACHING_REDIS_USERNAME, CACHING_REDIS_HOST]))\nCACHING_REDIS_PASSWORD = REDIS['caching'].get('PASSWORD', '')\nCACHING_REDIS_SENTINELS = REDIS['caching'].get('SENTINELS', [])\nCACHING_REDIS_SENTINEL_SERVICE = REDIS['caching'].get('SENTINEL_SERVICE', 'default')\nCACHING_REDIS_PROTO = 'rediss' if REDIS['caching'].get('SSL', False) else 'redis'\nCACHING_REDIS_SKIP_TLS_VERIFY = REDIS['caching'].get('INSECURE_SKIP_TLS_VERIFY', False)\nCACHING_REDIS_CA_CERT_PATH = REDIS['caching'].get('CA_CERT_PATH', False)\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django_redis.cache.RedisCache',\n 'LOCATION': f'{CACHING_REDIS_PROTO}://{CACHING_REDIS_USERNAME_HOST}:{CACHING_REDIS_PORT}/{CACHING_REDIS_DATABASE}',\n 'OPTIONS': {\n 'CLIENT_CLASS': 'django_redis.client.DefaultClient',\n 'PASSWORD': CACHING_REDIS_PASSWORD,\n }\n }\n}\n\n\nif CACHING_REDIS_SENTINELS:\n DJANGO_REDIS_CONNECTION_FACTORY = 'django_redis.pool.SentinelConnectionFactory'\n CACHES['default']['LOCATION'] = f'{CACHING_REDIS_PROTO}://{CACHING_REDIS_SENTINEL_SERVICE}/{CACHING_REDIS_DATABASE}'\n CACHES['default']['OPTIONS']['CLIENT_CLASS'] = 'django_redis.client.SentinelClient'\n CACHES['default']['OPTIONS']['SENTINELS'] = CACHING_REDIS_SENTINELS\nif CACHING_REDIS_SKIP_TLS_VERIFY:\n CACHES['default']['OPTIONS'].setdefault('CONNECTION_POOL_KWARGS', {})\n CACHES['default']['OPTIONS']['CONNECTION_POOL_KWARGS']['ssl_cert_reqs'] = False\nif CACHING_REDIS_CA_CERT_PATH:\n CACHES['default']['OPTIONS'].setdefault('CONNECTION_POOL_KWARGS', {})\n CACHES['default']['OPTIONS']['CONNECTION_POOL_KWARGS']['ssl_ca_certs'] = CACHING_REDIS_CA_CERT_PATH\n\n#\n# Sessions\n#\n\nif LOGIN_TIMEOUT is not None:\n # Django default is 1209600 seconds (14 days)\n SESSION_COOKIE_AGE = LOGIN_TIMEOUT\nSESSION_SAVE_EVERY_REQUEST = bool(LOGIN_PERSISTENCE)\nif SESSION_FILE_PATH is not None:\n SESSION_ENGINE = 'django.contrib.sessions.backends.file'\n\n\n#\n# Email\n#\n\nEMAIL_HOST = EMAIL.get('SERVER')\nEMAIL_HOST_USER = EMAIL.get('USERNAME')\nEMAIL_HOST_PASSWORD = EMAIL.get('PASSWORD')\nEMAIL_PORT = EMAIL.get('PORT', 25)\nEMAIL_SSL_CERTFILE = EMAIL.get('SSL_CERTFILE')\nEMAIL_SSL_KEYFILE = EMAIL.get('SSL_KEYFILE')\nEMAIL_SUBJECT_PREFIX = '[NetBox] '\nEMAIL_USE_SSL = EMAIL.get('USE_SSL', False)\nEMAIL_USE_TLS = EMAIL.get('USE_TLS', False)\nEMAIL_TIMEOUT = EMAIL.get('TIMEOUT', 10)\nSERVER_EMAIL = EMAIL.get('FROM_EMAIL')\n\n\n#\n# Django\n#\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'corsheaders',\n 'debug_toolbar',\n 'graphiql_debug_toolbar',\n 'django_filters',\n 'django_tables2',\n 'django_prometheus',\n 'graphene_django',\n 'mptt',\n 'rest_framework',\n 'social_django',\n 'taggit',\n 'timezone_field',\n 'circuits',\n 'dcim',\n 'ipam',\n 'extras',\n 'tenancy',\n 'users',\n 'utilities',\n 'virtualization',\n 'wireless',\n 'django_rq', # Must come after extras to allow overriding management commands\n 'drf_yasg',\n]\n\n# Middleware\nMIDDLEWARE = [\n 'graphiql_debug_toolbar.middleware.DebugToolbarMiddleware',\n 'django_prometheus.middleware.PrometheusBeforeMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'netbox.middleware.ExceptionHandlingMiddleware',\n 'netbox.middleware.RemoteUserMiddleware',\n 'netbox.middleware.LoginRequiredMiddleware',\n 'netbox.middleware.DynamicConfigMiddleware',\n 'netbox.middleware.APIVersionMiddleware',\n 'netbox.middleware.ObjectChangeMiddleware',\n 'django_prometheus.middleware.PrometheusAfterMiddleware',\n]\n\nif not ENABLE_LOCALIZATION:\n MIDDLEWARE.remove(\"django.middleware.locale.LocaleMiddleware\")\n\nROOT_URLCONF = 'netbox.urls'\n\nTEMPLATES_DIR = BASE_DIR + '/templates'\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [TEMPLATES_DIR],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'builtins': [\n 'utilities.templatetags.builtins.filters',\n 'utilities.templatetags.builtins.tags',\n ],\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.template.context_processors.media',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'netbox.context_processors.settings_and_registry',\n ],\n },\n },\n]\n\n# Set up authentication backends\nif type(REMOTE_AUTH_BACKEND) not in (list, tuple):\n REMOTE_AUTH_BACKEND = [REMOTE_AUTH_BACKEND]\nAUTHENTICATION_BACKENDS = [\n *REMOTE_AUTH_BACKEND,\n 'netbox.authentication.ObjectPermissionBackend',\n]\n\n# Time zones\nUSE_TZ = True\n\n# WSGI\nWSGI_APPLICATION = 'netbox.wsgi.application'\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\nUSE_X_FORWARDED_HOST = True\nX_FRAME_OPTIONS = 'SAMEORIGIN'\n\n# Static files (CSS, JavaScript, Images)\nSTATIC_ROOT = BASE_DIR + '/static'\nSTATIC_URL = f'/{BASE_PATH}static/'\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'project-static', 'dist'),\n os.path.join(BASE_DIR, 'project-static', 'img'),\n os.path.join(BASE_DIR, 'project-static', 'js'),\n ('docs', os.path.join(BASE_DIR, 'project-static', 'docs')), # Prefix with /docs\n)\n\n# Media\nMEDIA_URL = '/{}media/'.format(BASE_PATH)\n\n# Disable default limit of 1000 fields per request. Needed for bulk deletion of objects. (Added in Django 1.10.)\nDATA_UPLOAD_MAX_NUMBER_FIELDS = None\n\n# Messages\nMESSAGE_TAGS = {\n messages.ERROR: 'danger',\n}\n\n# Authentication URLs\nLOGIN_URL = f'/{BASE_PATH}login/'\nLOGIN_REDIRECT_URL = f'/{BASE_PATH}'\n\nDEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'\n\nTEST_RUNNER = \"django_rich.test.RichRunner\"\n\n# Exclude potentially sensitive models from wildcard view exemption. These may still be exempted\n# by specifying the model individually in the EXEMPT_VIEW_PERMISSIONS configuration parameter.\nEXEMPT_EXCLUDE_MODELS = (\n ('auth', 'group'),\n ('auth', 'user'),\n ('users', 'objectpermission'),\n)\n\n# All URLs starting with a string listed here are exempt from login enforcement\nEXEMPT_PATHS = (\n f'/{BASE_PATH}api/',\n f'/{BASE_PATH}graphql/',\n f'/{BASE_PATH}login/',\n f'/{BASE_PATH}oauth/',\n f'/{BASE_PATH}metrics',\n)\n\nSERIALIZATION_MODULES = {\n 'json': 'utilities.serializers.json',\n}\n\n\n#\n# Sentry\n#\n\nif SENTRY_ENABLED:\n if not SENTRY_DSN:\n raise ImproperlyConfigured(\"SENTRY_ENABLED is True but SENTRY_DSN has not been defined.\")\n # If using the default DSN, force sampling rates\n if SENTRY_DSN == DEFAULT_SENTRY_DSN:\n SENTRY_SAMPLE_RATE = 1.0\n SENTRY_TRACES_SAMPLE_RATE = 0\n # Initialize the SDK\n sentry_sdk.init(\n dsn=SENTRY_DSN,\n release=VERSION,\n integrations=[DjangoIntegration()],\n sample_rate=SENTRY_SAMPLE_RATE,\n traces_sample_rate=SENTRY_TRACES_SAMPLE_RATE,\n send_default_pii=True,\n http_proxy=HTTP_PROXIES.get('http') if HTTP_PROXIES else None,\n https_proxy=HTTP_PROXIES.get('https') if HTTP_PROXIES else None\n )\n # Assign any configured tags\n for k, v in SENTRY_TAGS.items():\n sentry_sdk.set_tag(k, v)\n # If using the default DSN, append a unique deployment ID tag for error correlation\n if SENTRY_DSN == DEFAULT_SENTRY_DSN:\n sentry_sdk.set_tag('netbox.deployment_id', DEPLOYMENT_ID)\n\n\n#\n# Django social auth\n#\n\nSOCIAL_AUTH_PIPELINE = (\n 'social_core.pipeline.social_auth.social_details',\n 'social_core.pipeline.social_auth.social_uid',\n 'social_core.pipeline.social_auth.social_user',\n 'social_core.pipeline.user.get_username',\n 'social_core.pipeline.social_auth.associate_by_email',\n 'social_core.pipeline.user.create_user',\n 'social_core.pipeline.social_auth.associate_user',\n 'netbox.authentication.user_default_groups_handler',\n 'social_core.pipeline.social_auth.load_extra_data',\n 'social_core.pipeline.user.user_details',\n)\n\n# Load all SOCIAL_AUTH_* settings from the user configuration\nfor param in dir(configuration):\n if param.startswith('SOCIAL_AUTH_'):\n globals()[param] = getattr(configuration, param)\n\n# Force usage of PostgreSQL's JSONB field for extra data\nSOCIAL_AUTH_JSONFIELD_ENABLED = True\nSOCIAL_AUTH_CLEAN_USERNAME_FUNCTION = 'users.utils.clean_username'\n\n#\n# Django Prometheus\n#\n\nPROMETHEUS_EXPORT_MIGRATIONS = False\n\n\n#\n# Django filters\n#\n\nFILTERS_NULL_CHOICE_LABEL = 'None'\nFILTERS_NULL_CHOICE_VALUE = 'null'\n\n\n#\n# Django REST framework (API)\n#\n\nREST_FRAMEWORK_VERSION = '.'.join(VERSION.split('-')[0].split('.')[:2]) # Use major.minor as API version\nREST_FRAMEWORK = {\n 'ALLOWED_VERSIONS': [REST_FRAMEWORK_VERSION],\n 'COERCE_DECIMAL_TO_STRING': False,\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.SessionAuthentication',\n 'netbox.api.authentication.TokenAuthentication',\n ),\n 'DEFAULT_FILTER_BACKENDS': (\n 'django_filters.rest_framework.DjangoFilterBackend',\n 'rest_framework.filters.OrderingFilter',\n ),\n 'DEFAULT_METADATA_CLASS': 'netbox.api.metadata.BulkOperationMetadata',\n 'DEFAULT_PAGINATION_CLASS': 'netbox.api.pagination.OptionalLimitOffsetPagination',\n 'DEFAULT_PARSER_CLASSES': (\n 'rest_framework.parsers.JSONParser',\n 'rest_framework.parsers.MultiPartParser',\n ),\n 'DEFAULT_PERMISSION_CLASSES': (\n 'netbox.api.authentication.TokenPermissions',\n ),\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n 'netbox.api.renderers.FormlessBrowsableAPIRenderer',\n ),\n 'DEFAULT_VERSION': REST_FRAMEWORK_VERSION,\n 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.AcceptHeaderVersioning',\n 'SCHEMA_COERCE_METHOD_NAMES': {\n # Default mappings\n 'retrieve': 'read',\n 'destroy': 'delete',\n # Custom operations\n 'bulk_destroy': 'bulk_delete',\n },\n 'VIEW_NAME_FUNCTION': 'utilities.api.get_view_name',\n}\n\n\n#\n# Graphene\n#\n\nGRAPHENE = {\n # Avoids naming collision on models with 'type' field; see\n # https://github.com/graphql-python/graphene-django/issues/185\n 'DJANGO_CHOICE_FIELD_ENUM_V3_NAMING': True,\n}\n\n\n#\n# drf_yasg (OpenAPI/Swagger)\n#\n\nSWAGGER_SETTINGS = {\n 'DEFAULT_AUTO_SCHEMA_CLASS': 'utilities.custom_inspectors.NetBoxSwaggerAutoSchema',\n 'DEFAULT_FIELD_INSPECTORS': [\n 'utilities.custom_inspectors.CustomFieldsDataFieldInspector',\n 'utilities.custom_inspectors.NullableBooleanFieldInspector',\n 'utilities.custom_inspectors.ChoiceFieldInspector',\n 'utilities.custom_inspectors.SerializedPKRelatedFieldInspector',\n 'drf_yasg.inspectors.CamelCaseJSONFilter',\n 'drf_yasg.inspectors.ReferencingSerializerInspector',\n 'drf_yasg.inspectors.RelatedFieldInspector',\n 'drf_yasg.inspectors.ChoiceFieldInspector',\n 'drf_yasg.inspectors.FileFieldInspector',\n 'drf_yasg.inspectors.DictFieldInspector',\n 'drf_yasg.inspectors.JSONFieldInspector',\n 'drf_yasg.inspectors.SerializerMethodFieldInspector',\n 'drf_yasg.inspectors.SimpleFieldInspector',\n 'drf_yasg.inspectors.StringDefaultFieldInspector',\n ],\n 'DEFAULT_FILTER_INSPECTORS': [\n 'drf_yasg.inspectors.CoreAPICompatInspector',\n ],\n 'DEFAULT_INFO': 'netbox.urls.openapi_info',\n 'DEFAULT_MODEL_DEPTH': 1,\n 'DEFAULT_PAGINATOR_INSPECTORS': [\n 'utilities.custom_inspectors.NullablePaginatorInspector',\n 'drf_yasg.inspectors.DjangoRestResponsePagination',\n 'drf_yasg.inspectors.CoreAPICompatInspector',\n ],\n 'SECURITY_DEFINITIONS': {\n 'Bearer': {\n 'type': 'apiKey',\n 'name': 'Authorization',\n 'in': 'header',\n }\n },\n 'VALIDATOR_URL': None,\n}\n\n\n#\n# Django RQ (Webhooks backend)\n#\n\nif TASKS_REDIS_USING_SENTINEL:\n RQ_PARAMS = {\n 'SENTINELS': TASKS_REDIS_SENTINELS,\n 'MASTER_NAME': TASKS_REDIS_SENTINEL_SERVICE,\n 'SOCKET_TIMEOUT': None,\n 'CONNECTION_KWARGS': {\n 'socket_connect_timeout': TASKS_REDIS_SENTINEL_TIMEOUT\n },\n }\nelse:\n RQ_PARAMS = {\n 'HOST': TASKS_REDIS_HOST,\n 'PORT': TASKS_REDIS_PORT,\n 'SSL': TASKS_REDIS_SSL,\n 'SSL_CERT_REQS': None if TASKS_REDIS_SKIP_TLS_VERIFY else 'required',\n }\nRQ_PARAMS.update({\n 'DB': TASKS_REDIS_DATABASE,\n 'USERNAME': TASKS_REDIS_USERNAME,\n 'PASSWORD': TASKS_REDIS_PASSWORD,\n 'DEFAULT_TIMEOUT': RQ_DEFAULT_TIMEOUT,\n})\n\nif TASKS_REDIS_CA_CERT_PATH:\n RQ_PARAMS.setdefault('REDIS_CLIENT_KWARGS', {})\n RQ_PARAMS['REDIS_CLIENT_KWARGS']['ssl_ca_certs'] = TASKS_REDIS_CA_CERT_PATH\n\nRQ_QUEUES = {\n RQ_QUEUE_HIGH: RQ_PARAMS,\n RQ_QUEUE_DEFAULT: RQ_PARAMS,\n RQ_QUEUE_LOW: RQ_PARAMS,\n}\n\n# Add any queues defined in QUEUE_MAPPINGS\nRQ_QUEUES.update({\n queue: RQ_PARAMS for queue in set(QUEUE_MAPPINGS.values()) if queue not in RQ_QUEUES\n})\n\n#\n# Localization\n#\n\nif not ENABLE_LOCALIZATION:\n USE_I18N = False\n USE_L10N = False\n\n#\n# Plugins\n#\n\nfor plugin_name in PLUGINS:\n # Import plugin module\n try:\n plugin = importlib.import_module(plugin_name)\n except ModuleNotFoundError as e:\n if getattr(e, 'name') == plugin_name:\n raise ImproperlyConfigured(\n \"Unable to import plugin {}: Module not found. Check that the plugin module has been installed within the \"\n \"correct Python environment.\".format(plugin_name)\n )\n raise e\n\n # Determine plugin config and add to INSTALLED_APPS.\n try:\n plugin_config: PluginConfig = plugin.config\n except AttributeError:\n raise ImproperlyConfigured(\n \"Plugin {} does not provide a 'config' variable. This should be defined in the plugin's __init__.py file \"\n \"and point to the PluginConfig subclass.\".format(plugin_name)\n )\n\n plugin_module = \"{}.{}\".format(plugin_config.__module__, plugin_config.__name__) # type: ignore\n\n # Gather additional apps to load alongside this plugin\n django_apps = plugin_config.django_apps\n if plugin_name in django_apps:\n django_apps.pop(plugin_name)\n if plugin_module not in django_apps:\n django_apps.append(plugin_module)\n\n # Test if we can import all modules (or its parent, for PluginConfigs and AppConfigs)\n for app in django_apps:\n if \".\" in app:\n parts = app.split(\".\")\n spec = importlib.util.find_spec(\".\".join(parts[:-1]))\n else:\n spec = importlib.util.find_spec(app)\n if spec is None:\n raise ImproperlyConfigured(\n f\"Failed to load django_apps specified by plugin {plugin_name}: {django_apps} \"\n f\"The module {app} cannot be imported. Check that the necessary package has been \"\n \"installed within the correct Python environment.\"\n )\n\n INSTALLED_APPS.extend(django_apps)\n\n # Preserve uniqueness of the INSTALLED_APPS list, we keep the last occurence\n sorted_apps = reversed(list(dict.fromkeys(reversed(INSTALLED_APPS))))\n INSTALLED_APPS = list(sorted_apps)\n\n # Validate user-provided configuration settings and assign defaults\n if plugin_name not in PLUGINS_CONFIG:\n PLUGINS_CONFIG[plugin_name] = {}\n plugin_config.validate(PLUGINS_CONFIG[plugin_name], VERSION)\n\n # Add middleware\n plugin_middleware = plugin_config.middleware\n if plugin_middleware and type(plugin_middleware) in (list, tuple):\n MIDDLEWARE.extend(plugin_middleware)\n\n # Create RQ queues dedicated to the plugin\n # we use the plugin name as a prefix for queue name's defined in the plugin config\n # ex: mysuperplugin.mysuperqueue1\n if type(plugin_config.queues) is not list:\n raise ImproperlyConfigured(\n \"Plugin {} queues must be a list.\".format(plugin_name)\n )\n RQ_QUEUES.update({\n f\"{plugin_name}.{queue}\": RQ_PARAMS for queue in plugin_config.queues\n })\n", "path": "netbox/netbox/settings.py" } ]
diff --git a/docs/configuration/remote-authentication.md b/docs/configuration/remote-authentication.md index 07adf5c6a24..1fda8d0d35d 100644 --- a/docs/configuration/remote-authentication.md +++ b/docs/configuration/remote-authentication.md @@ -16,7 +16,7 @@ If true, NetBox will automatically create local accounts for users authenticated Default: `'netbox.authentication.RemoteUserBackend'` -This is the Python path to the custom [Django authentication backend](https://docs.djangoproject.com/en/stable/topics/auth/customizing/) to use for external user authentication. NetBox provides two built-in backends (listed below), though custom authentication backends may also be provided by other packages or plugins. +This is the Python path to the custom [Django authentication backend](https://docs.djangoproject.com/en/stable/topics/auth/customizing/) to use for external user authentication. NetBox provides two built-in backends (listed below), though custom authentication backends may also be provided by other packages or plugins. Provide a string for a single backend, or an iterable for multiple backends, which will be attempted in the order given. * `netbox.authentication.RemoteUserBackend` * `netbox.authentication.LDAPBackend` diff --git a/netbox/netbox/settings.py b/netbox/netbox/settings.py index 45fe3284104..cf2b06a8b25 100644 --- a/netbox/netbox/settings.py +++ b/netbox/netbox/settings.py @@ -396,8 +396,10 @@ def _setting(name, default=None): ] # Set up authentication backends +if type(REMOTE_AUTH_BACKEND) not in (list, tuple): + REMOTE_AUTH_BACKEND = [REMOTE_AUTH_BACKEND] AUTHENTICATION_BACKENDS = [ - REMOTE_AUTH_BACKEND, + *REMOTE_AUTH_BACKEND, 'netbox.authentication.ObjectPermissionBackend', ]
sbi-dev__sbi-674
small bug about the variable clipped_batch_size in SNRE I believe this line of code https://github.com/mackelab/sbi/blob/4bcba0568ba408156eab28328a562425fb8ba89d/sbi/inference/snre/snre_base.py#L172 should read `clipped_batch_size = min(training_batch_size, val_loader.batch_size)`. Is that correct or am I fundamentally misunderstanding something about what this variable does?
[ { "content": "from abc import ABC, abstractmethod\nfrom copy import deepcopy\nfrom typing import Any, Callable, Dict, Optional, Union\n\nimport torch\nfrom torch import Tensor, eye, nn, ones, optim\nfrom torch.distributions import Distribution\nfrom torch.nn.utils.clip_grad import clip_grad_norm_\nfrom torch.utils import data\nfrom torch.utils.tensorboard.writer import SummaryWriter\n\nfrom sbi import utils as utils\nfrom sbi.inference.base import NeuralInference\nfrom sbi.inference.posteriors import MCMCPosterior, RejectionPosterior, VIPosterior\nfrom sbi.inference.potentials import ratio_estimator_based_potential\nfrom sbi.utils import (\n check_estimator_arg,\n check_prior,\n clamp_and_warn,\n validate_theta_and_x,\n x_shape_from_simulation,\n)\nfrom sbi.utils.sbiutils import mask_sims_from_prior\n\n\nclass RatioEstimator(NeuralInference, ABC):\n def __init__(\n self,\n prior: Optional[Distribution] = None,\n classifier: Union[str, Callable] = \"resnet\",\n device: str = \"cpu\",\n logging_level: Union[int, str] = \"warning\",\n summary_writer: Optional[SummaryWriter] = None,\n show_progress_bars: bool = True,\n ):\n r\"\"\"Sequential Neural Ratio Estimation.\n\n We implement two inference methods in the respective subclasses.\n\n - SNRE_A / AALR is limited to `num_atoms=2`, but allows for density evaluation\n when training for one round.\n - SNRE_B / SRE can use more than two atoms, potentially boosting performance,\n but allows for posterior evaluation **only up to a normalizing constant**,\n even when training only one round.\n\n Args:\n classifier: Classifier trained to approximate likelihood ratios. If it is\n a string, use a pre-configured network of the provided type (one of\n linear, mlp, resnet). Alternatively, a function that builds a custom\n neural network can be provided. The function will be called with the\n first batch of simulations (theta, x), which can thus be used for shape\n inference and potentially for z-scoring. It needs to return a PyTorch\n `nn.Module` implementing the classifier.\n\n See docstring of `NeuralInference` class for all other arguments.\n \"\"\"\n\n super().__init__(\n prior=prior,\n device=device,\n logging_level=logging_level,\n summary_writer=summary_writer,\n show_progress_bars=show_progress_bars,\n )\n\n # As detailed in the docstring, `density_estimator` is either a string or\n # a callable. The function creating the neural network is attached to\n # `_build_neural_net`. It will be called in the first round and receive\n # thetas and xs as inputs, so that they can be used for shape inference and\n # potentially for z-scoring.\n check_estimator_arg(classifier)\n if isinstance(classifier, str):\n self._build_neural_net = utils.classifier_nn(model=classifier)\n else:\n self._build_neural_net = classifier\n\n # Ratio-based-specific summary_writer fields.\n self._summary.update({\"mcmc_times\": []}) # type: ignore\n\n def append_simulations(\n self,\n theta: Tensor,\n x: Tensor,\n from_round: int = 0,\n ) -> \"RatioEstimator\":\n r\"\"\"Store parameters and simulation outputs to use them for later training.\n\n Data are stored as entries in lists for each type of variable (parameter/data).\n\n Stores $\\theta$, $x$, prior_masks (indicating if simulations are coming from the\n prior or not) and an index indicating which round the batch of simulations came\n from.\n\n Args:\n theta: Parameter sets.\n x: Simulation outputs.\n from_round: Which round the data stemmed from. Round 0 means from the prior.\n With default settings, this is not used at all for `SNRE`. Only when\n the user later on requests `.train(discard_prior_samples=True)`, we\n use these indices to find which training data stemmed from the prior.\n\n Returns:\n NeuralInference object (returned so that this function is chainable).\n \"\"\"\n\n theta, x = validate_theta_and_x(theta, x, training_device=self._device)\n\n self._theta_roundwise.append(theta)\n self._x_roundwise.append(x)\n self._prior_masks.append(mask_sims_from_prior(int(from_round), theta.size(0)))\n self._data_round_index.append(int(from_round))\n\n return self\n\n def train(\n self,\n num_atoms: int = 10,\n training_batch_size: int = 50,\n learning_rate: float = 5e-4,\n validation_fraction: float = 0.1,\n stop_after_epochs: int = 20,\n max_num_epochs: int = 2**31 - 1,\n clip_max_norm: Optional[float] = 5.0,\n exclude_invalid_x: bool = True,\n resume_training: bool = False,\n discard_prior_samples: bool = False,\n retrain_from_scratch: bool = False,\n show_train_summary: bool = False,\n dataloader_kwargs: Optional[Dict] = None,\n ) -> nn.Module:\n r\"\"\"Return classifier that approximates the ratio $p(\\theta,x)/p(\\theta)p(x)$.\n\n Args:\n num_atoms: Number of atoms to use for classification.\n exclude_invalid_x: Whether to exclude simulation outputs `x=NaN` or `x=±∞`\n during training. Expect errors, silent or explicit, when `False`.\n resume_training: Can be used in case training time is limited, e.g. on a\n cluster. If `True`, the split between train and validation set, the\n optimizer, the number of epochs, and the best validation log-prob will\n be restored from the last time `.train()` was called.\n discard_prior_samples: Whether to discard samples simulated in round 1, i.e.\n from the prior. Training may be sped up by ignoring such less targeted\n samples.\n retrain_from_scratch: Whether to retrain the conditional density\n estimator for the posterior from scratch each round.\n dataloader_kwargs: Additional or updated kwargs to be passed to the training\n and validation dataloaders (like, e.g., a collate_fn).\n\n Returns:\n Classifier that approximates the ratio $p(\\theta,x)/p(\\theta)p(x)$.\n \"\"\"\n\n # Starting index for the training set (1 = discard round-0 samples).\n start_idx = int(discard_prior_samples and self._round > 0)\n # Load data from most recent round.\n self._round = max(self._data_round_index)\n theta, x, _ = self.get_simulations(\n start_idx, exclude_invalid_x, warn_on_invalid=True\n )\n\n # Dataset is shared for training and validation loaders.\n dataset = data.TensorDataset(theta, x)\n\n train_loader, val_loader = self.get_dataloaders(\n dataset,\n training_batch_size,\n validation_fraction,\n resume_training,\n dataloader_kwargs=dataloader_kwargs,\n )\n\n clipped_batch_size = min(training_batch_size, len(val_loader))\n\n num_atoms = int(\n clamp_and_warn(\n \"num_atoms\", num_atoms, min_val=2, max_val=clipped_batch_size\n )\n )\n\n # First round or if retraining from scratch:\n # Call the `self._build_neural_net` with the rounds' thetas and xs as\n # arguments, which will build the neural network\n # This is passed into NeuralPosterior, to create a neural posterior which\n # can `sample()` and `log_prob()`. The network is accessible via `.net`.\n if self._neural_net is None or retrain_from_scratch:\n self._neural_net = self._build_neural_net(\n theta[self.train_indices], x[self.train_indices]\n )\n self._x_shape = x_shape_from_simulation(x)\n\n self._neural_net.to(self._device)\n\n if not resume_training:\n self.optimizer = optim.Adam(\n list(self._neural_net.parameters()),\n lr=learning_rate,\n )\n self.epoch, self._val_log_prob = 0, float(\"-Inf\")\n\n while self.epoch <= max_num_epochs and not self._converged(\n self.epoch, stop_after_epochs\n ):\n\n # Train for a single epoch.\n self._neural_net.train()\n train_log_probs_sum = 0\n for batch in train_loader:\n self.optimizer.zero_grad()\n theta_batch, x_batch = (\n batch[0].to(self._device),\n batch[1].to(self._device),\n )\n\n train_losses = self._loss(theta_batch, x_batch, num_atoms)\n train_loss = torch.mean(train_losses)\n train_log_probs_sum -= train_losses.sum().item()\n\n train_loss.backward()\n if clip_max_norm is not None:\n clip_grad_norm_(\n self._neural_net.parameters(),\n max_norm=clip_max_norm,\n )\n self.optimizer.step()\n\n self.epoch += 1\n\n train_log_prob_average = train_log_probs_sum / (\n len(train_loader) * train_loader.batch_size # type: ignore\n )\n self._summary[\"train_log_probs\"].append(train_log_prob_average)\n\n # Calculate validation performance.\n self._neural_net.eval()\n val_log_prob_sum = 0\n with torch.no_grad():\n for batch in val_loader:\n theta_batch, x_batch = (\n batch[0].to(self._device),\n batch[1].to(self._device),\n )\n val_losses = self._loss(theta_batch, x_batch, num_atoms)\n val_log_prob_sum -= val_losses.sum().item()\n # Take mean over all validation samples.\n self._val_log_prob = val_log_prob_sum / (\n len(val_loader) * val_loader.batch_size # type: ignore\n )\n # Log validation log prob for every epoch.\n self._summary[\"validation_log_probs\"].append(self._val_log_prob)\n\n self._maybe_show_progress(self._show_progress_bars, self.epoch)\n\n self._report_convergence_at_end(self.epoch, stop_after_epochs, max_num_epochs)\n\n # Update summary.\n self._summary[\"epochs\"].append(self.epoch)\n self._summary[\"best_validation_log_probs\"].append(self._best_val_log_prob)\n\n # Update TensorBoard and summary dict.\n self._summarize(\n round_=self._round,\n x_o=None,\n theta_bank=theta,\n x_bank=x,\n )\n\n # Update description for progress bar.\n if show_train_summary:\n print(self._describe_round(self._round, self._summary))\n\n # Avoid keeping the gradients in the resulting network, which can\n # cause memory leakage when benchmarking.\n self._neural_net.zero_grad(set_to_none=True)\n\n return deepcopy(self._neural_net)\n\n def _classifier_logits(self, theta: Tensor, x: Tensor, num_atoms: int) -> Tensor:\n \"\"\"Return logits obtained through classifier forward pass.\n\n The logits are obtained from atomic sets of (theta,x) pairs.\n \"\"\"\n batch_size = theta.shape[0]\n repeated_x = utils.repeat_rows(x, num_atoms)\n\n # Choose `1` or `num_atoms - 1` thetas from the rest of the batch for each x.\n probs = ones(batch_size, batch_size) * (1 - eye(batch_size)) / (batch_size - 1)\n\n choices = torch.multinomial(probs, num_samples=num_atoms - 1, replacement=False)\n\n contrasting_theta = theta[choices]\n\n atomic_theta = torch.cat((theta[:, None, :], contrasting_theta), dim=1).reshape(\n batch_size * num_atoms, -1\n )\n\n return self._neural_net([atomic_theta, repeated_x])\n\n @abstractmethod\n def _loss(self, theta: Tensor, x: Tensor, num_atoms: int) -> Tensor:\n raise NotImplementedError\n\n def build_posterior(\n self,\n density_estimator: Optional[nn.Module] = None,\n prior: Optional[Distribution] = None,\n sample_with: str = \"mcmc\",\n mcmc_method: str = \"slice_np\",\n vi_method: str = \"rKL\",\n mcmc_parameters: Dict[str, Any] = {},\n vi_parameters: Dict[str, Any] = {},\n rejection_sampling_parameters: Dict[str, Any] = {},\n ) -> Union[MCMCPosterior, RejectionPosterior, VIPosterior]:\n r\"\"\"Build posterior from the neural density estimator.\n\n SNRE trains a neural network to approximate likelihood ratios. The\n posterior wraps the trained network such that one can directly evaluate the\n unnormalized posterior log probability $p(\\theta|x) \\propto p(x|\\theta) \\cdot\n p(\\theta)$ and draw samples from the posterior with MCMC or rejection sampling.\n Note that, in the case of single-round SNRE_A / AALR, it is possible to\n evaluate the log-probability of the **normalized** posterior, but sampling\n still requires MCMC (or rejection sampling).\n\n Args:\n density_estimator: The density estimator that the posterior is based on.\n If `None`, use the latest neural density estimator that was trained.\n prior: Prior distribution.\n sample_with: Method to use for sampling from the posterior. Must be one of\n [`mcmc` | `rejection` | `vi`].\n mcmc_method: Method used for MCMC sampling, one of `slice_np`, `slice`,\n `hmc`, `nuts`. Currently defaults to `slice_np` for a custom numpy\n implementation of slice sampling; select `hmc`, `nuts` or `slice` for\n Pyro-based sampling.\n vi_method: Method used for VI, one of [`rKL`, `fKL`, `IW`, `alpha`]. Note\n that some of the methods admit a `mode seeking` property (e.g. rKL)\n whereas some admit a `mass covering` one (e.g fKL).\n mcmc_parameters: Additional kwargs passed to `MCMCPosterior`.\n vi_parameters: Additional kwargs passed to `VIPosterior`.\n rejection_sampling_parameters: Additional kwargs passed to\n `RejectionPosterior`.\n\n Returns:\n Posterior $p(\\theta|x)$ with `.sample()` and `.log_prob()` methods\n (the returned log-probability is unnormalized).\n \"\"\"\n if prior is None:\n assert (\n self._prior is not None\n ), \"\"\"You did not pass a prior. You have to pass the prior either at\n initialization `inference = SNRE(prior)` or to `.build_posterior\n (prior=prior)`.\"\"\"\n prior = self._prior\n else:\n check_prior(prior)\n\n if density_estimator is None:\n ratio_estimator = self._neural_net\n # If internal net is used device is defined.\n device = self._device\n else:\n ratio_estimator = density_estimator\n # Otherwise, infer it from the device of the net parameters.\n device = next(density_estimator.parameters()).device.type\n\n potential_fn, theta_transform = ratio_estimator_based_potential(\n ratio_estimator=ratio_estimator, prior=prior, x_o=None\n )\n\n if sample_with == \"mcmc\":\n self._posterior = MCMCPosterior(\n potential_fn=potential_fn,\n theta_transform=theta_transform,\n proposal=prior,\n method=mcmc_method,\n device=device,\n x_shape=self._x_shape,\n **mcmc_parameters,\n )\n elif sample_with == \"rejection\":\n self._posterior = RejectionPosterior(\n potential_fn=potential_fn,\n proposal=prior,\n device=device,\n x_shape=self._x_shape,\n **rejection_sampling_parameters,\n )\n elif sample_with == \"vi\":\n self._posterior = VIPosterior(\n potential_fn=potential_fn,\n theta_transform=theta_transform,\n prior=prior, # type: ignore\n vi_method=vi_method,\n device=device,\n x_shape=self._x_shape,\n **vi_parameters,\n )\n else:\n raise NotImplementedError\n\n # Store models at end of each round.\n self._model_bank.append(deepcopy(self._posterior))\n\n return deepcopy(self._posterior)\n", "path": "sbi/inference/snre/snre_base.py" } ]
[ { "content": "from abc import ABC, abstractmethod\nfrom copy import deepcopy\nfrom typing import Any, Callable, Dict, Optional, Union\n\nimport torch\nfrom torch import Tensor, eye, nn, ones, optim\nfrom torch.distributions import Distribution\nfrom torch.nn.utils.clip_grad import clip_grad_norm_\nfrom torch.utils import data\nfrom torch.utils.tensorboard.writer import SummaryWriter\n\nfrom sbi import utils as utils\nfrom sbi.inference.base import NeuralInference\nfrom sbi.inference.posteriors import MCMCPosterior, RejectionPosterior, VIPosterior\nfrom sbi.inference.potentials import ratio_estimator_based_potential\nfrom sbi.utils import (\n check_estimator_arg,\n check_prior,\n clamp_and_warn,\n validate_theta_and_x,\n x_shape_from_simulation,\n)\nfrom sbi.utils.sbiutils import mask_sims_from_prior\n\n\nclass RatioEstimator(NeuralInference, ABC):\n def __init__(\n self,\n prior: Optional[Distribution] = None,\n classifier: Union[str, Callable] = \"resnet\",\n device: str = \"cpu\",\n logging_level: Union[int, str] = \"warning\",\n summary_writer: Optional[SummaryWriter] = None,\n show_progress_bars: bool = True,\n ):\n r\"\"\"Sequential Neural Ratio Estimation.\n\n We implement two inference methods in the respective subclasses.\n\n - SNRE_A / AALR is limited to `num_atoms=2`, but allows for density evaluation\n when training for one round.\n - SNRE_B / SRE can use more than two atoms, potentially boosting performance,\n but allows for posterior evaluation **only up to a normalizing constant**,\n even when training only one round.\n\n Args:\n classifier: Classifier trained to approximate likelihood ratios. If it is\n a string, use a pre-configured network of the provided type (one of\n linear, mlp, resnet). Alternatively, a function that builds a custom\n neural network can be provided. The function will be called with the\n first batch of simulations (theta, x), which can thus be used for shape\n inference and potentially for z-scoring. It needs to return a PyTorch\n `nn.Module` implementing the classifier.\n\n See docstring of `NeuralInference` class for all other arguments.\n \"\"\"\n\n super().__init__(\n prior=prior,\n device=device,\n logging_level=logging_level,\n summary_writer=summary_writer,\n show_progress_bars=show_progress_bars,\n )\n\n # As detailed in the docstring, `density_estimator` is either a string or\n # a callable. The function creating the neural network is attached to\n # `_build_neural_net`. It will be called in the first round and receive\n # thetas and xs as inputs, so that they can be used for shape inference and\n # potentially for z-scoring.\n check_estimator_arg(classifier)\n if isinstance(classifier, str):\n self._build_neural_net = utils.classifier_nn(model=classifier)\n else:\n self._build_neural_net = classifier\n\n # Ratio-based-specific summary_writer fields.\n self._summary.update({\"mcmc_times\": []}) # type: ignore\n\n def append_simulations(\n self,\n theta: Tensor,\n x: Tensor,\n from_round: int = 0,\n ) -> \"RatioEstimator\":\n r\"\"\"Store parameters and simulation outputs to use them for later training.\n\n Data are stored as entries in lists for each type of variable (parameter/data).\n\n Stores $\\theta$, $x$, prior_masks (indicating if simulations are coming from the\n prior or not) and an index indicating which round the batch of simulations came\n from.\n\n Args:\n theta: Parameter sets.\n x: Simulation outputs.\n from_round: Which round the data stemmed from. Round 0 means from the prior.\n With default settings, this is not used at all for `SNRE`. Only when\n the user later on requests `.train(discard_prior_samples=True)`, we\n use these indices to find which training data stemmed from the prior.\n\n Returns:\n NeuralInference object (returned so that this function is chainable).\n \"\"\"\n\n theta, x = validate_theta_and_x(theta, x, training_device=self._device)\n\n self._theta_roundwise.append(theta)\n self._x_roundwise.append(x)\n self._prior_masks.append(mask_sims_from_prior(int(from_round), theta.size(0)))\n self._data_round_index.append(int(from_round))\n\n return self\n\n def train(\n self,\n num_atoms: int = 10,\n training_batch_size: int = 50,\n learning_rate: float = 5e-4,\n validation_fraction: float = 0.1,\n stop_after_epochs: int = 20,\n max_num_epochs: int = 2**31 - 1,\n clip_max_norm: Optional[float] = 5.0,\n exclude_invalid_x: bool = True,\n resume_training: bool = False,\n discard_prior_samples: bool = False,\n retrain_from_scratch: bool = False,\n show_train_summary: bool = False,\n dataloader_kwargs: Optional[Dict] = None,\n ) -> nn.Module:\n r\"\"\"Return classifier that approximates the ratio $p(\\theta,x)/p(\\theta)p(x)$.\n\n Args:\n num_atoms: Number of atoms to use for classification.\n exclude_invalid_x: Whether to exclude simulation outputs `x=NaN` or `x=±∞`\n during training. Expect errors, silent or explicit, when `False`.\n resume_training: Can be used in case training time is limited, e.g. on a\n cluster. If `True`, the split between train and validation set, the\n optimizer, the number of epochs, and the best validation log-prob will\n be restored from the last time `.train()` was called.\n discard_prior_samples: Whether to discard samples simulated in round 1, i.e.\n from the prior. Training may be sped up by ignoring such less targeted\n samples.\n retrain_from_scratch: Whether to retrain the conditional density\n estimator for the posterior from scratch each round.\n dataloader_kwargs: Additional or updated kwargs to be passed to the training\n and validation dataloaders (like, e.g., a collate_fn).\n\n Returns:\n Classifier that approximates the ratio $p(\\theta,x)/p(\\theta)p(x)$.\n \"\"\"\n\n # Starting index for the training set (1 = discard round-0 samples).\n start_idx = int(discard_prior_samples and self._round > 0)\n # Load data from most recent round.\n self._round = max(self._data_round_index)\n theta, x, _ = self.get_simulations(\n start_idx, exclude_invalid_x, warn_on_invalid=True\n )\n\n # Dataset is shared for training and validation loaders.\n dataset = data.TensorDataset(theta, x)\n\n train_loader, val_loader = self.get_dataloaders(\n dataset,\n training_batch_size,\n validation_fraction,\n resume_training,\n dataloader_kwargs=dataloader_kwargs,\n )\n\n clipped_batch_size = min(training_batch_size, val_loader.batch_size) # type: ignore\n\n num_atoms = int(\n clamp_and_warn(\n \"num_atoms\", num_atoms, min_val=2, max_val=clipped_batch_size\n )\n )\n\n # First round or if retraining from scratch:\n # Call the `self._build_neural_net` with the rounds' thetas and xs as\n # arguments, which will build the neural network\n # This is passed into NeuralPosterior, to create a neural posterior which\n # can `sample()` and `log_prob()`. The network is accessible via `.net`.\n if self._neural_net is None or retrain_from_scratch:\n self._neural_net = self._build_neural_net(\n theta[self.train_indices], x[self.train_indices]\n )\n self._x_shape = x_shape_from_simulation(x)\n\n self._neural_net.to(self._device)\n\n if not resume_training:\n self.optimizer = optim.Adam(\n list(self._neural_net.parameters()),\n lr=learning_rate,\n )\n self.epoch, self._val_log_prob = 0, float(\"-Inf\")\n\n while self.epoch <= max_num_epochs and not self._converged(\n self.epoch, stop_after_epochs\n ):\n\n # Train for a single epoch.\n self._neural_net.train()\n train_log_probs_sum = 0\n for batch in train_loader:\n self.optimizer.zero_grad()\n theta_batch, x_batch = (\n batch[0].to(self._device),\n batch[1].to(self._device),\n )\n\n train_losses = self._loss(theta_batch, x_batch, num_atoms)\n train_loss = torch.mean(train_losses)\n train_log_probs_sum -= train_losses.sum().item()\n\n train_loss.backward()\n if clip_max_norm is not None:\n clip_grad_norm_(\n self._neural_net.parameters(),\n max_norm=clip_max_norm,\n )\n self.optimizer.step()\n\n self.epoch += 1\n\n train_log_prob_average = train_log_probs_sum / (\n len(train_loader) * train_loader.batch_size # type: ignore\n )\n self._summary[\"train_log_probs\"].append(train_log_prob_average)\n\n # Calculate validation performance.\n self._neural_net.eval()\n val_log_prob_sum = 0\n with torch.no_grad():\n for batch in val_loader:\n theta_batch, x_batch = (\n batch[0].to(self._device),\n batch[1].to(self._device),\n )\n val_losses = self._loss(theta_batch, x_batch, num_atoms)\n val_log_prob_sum -= val_losses.sum().item()\n # Take mean over all validation samples.\n self._val_log_prob = val_log_prob_sum / (\n len(val_loader) * val_loader.batch_size # type: ignore\n )\n # Log validation log prob for every epoch.\n self._summary[\"validation_log_probs\"].append(self._val_log_prob)\n\n self._maybe_show_progress(self._show_progress_bars, self.epoch)\n\n self._report_convergence_at_end(self.epoch, stop_after_epochs, max_num_epochs)\n\n # Update summary.\n self._summary[\"epochs\"].append(self.epoch)\n self._summary[\"best_validation_log_probs\"].append(self._best_val_log_prob)\n\n # Update TensorBoard and summary dict.\n self._summarize(\n round_=self._round,\n x_o=None,\n theta_bank=theta,\n x_bank=x,\n )\n\n # Update description for progress bar.\n if show_train_summary:\n print(self._describe_round(self._round, self._summary))\n\n # Avoid keeping the gradients in the resulting network, which can\n # cause memory leakage when benchmarking.\n self._neural_net.zero_grad(set_to_none=True)\n\n return deepcopy(self._neural_net)\n\n def _classifier_logits(self, theta: Tensor, x: Tensor, num_atoms: int) -> Tensor:\n \"\"\"Return logits obtained through classifier forward pass.\n\n The logits are obtained from atomic sets of (theta,x) pairs.\n \"\"\"\n batch_size = theta.shape[0]\n repeated_x = utils.repeat_rows(x, num_atoms)\n\n # Choose `1` or `num_atoms - 1` thetas from the rest of the batch for each x.\n probs = ones(batch_size, batch_size) * (1 - eye(batch_size)) / (batch_size - 1)\n\n choices = torch.multinomial(probs, num_samples=num_atoms - 1, replacement=False)\n\n contrasting_theta = theta[choices]\n\n atomic_theta = torch.cat((theta[:, None, :], contrasting_theta), dim=1).reshape(\n batch_size * num_atoms, -1\n )\n\n return self._neural_net([atomic_theta, repeated_x])\n\n @abstractmethod\n def _loss(self, theta: Tensor, x: Tensor, num_atoms: int) -> Tensor:\n raise NotImplementedError\n\n def build_posterior(\n self,\n density_estimator: Optional[nn.Module] = None,\n prior: Optional[Distribution] = None,\n sample_with: str = \"mcmc\",\n mcmc_method: str = \"slice_np\",\n vi_method: str = \"rKL\",\n mcmc_parameters: Dict[str, Any] = {},\n vi_parameters: Dict[str, Any] = {},\n rejection_sampling_parameters: Dict[str, Any] = {},\n ) -> Union[MCMCPosterior, RejectionPosterior, VIPosterior]:\n r\"\"\"Build posterior from the neural density estimator.\n\n SNRE trains a neural network to approximate likelihood ratios. The\n posterior wraps the trained network such that one can directly evaluate the\n unnormalized posterior log probability $p(\\theta|x) \\propto p(x|\\theta) \\cdot\n p(\\theta)$ and draw samples from the posterior with MCMC or rejection sampling.\n Note that, in the case of single-round SNRE_A / AALR, it is possible to\n evaluate the log-probability of the **normalized** posterior, but sampling\n still requires MCMC (or rejection sampling).\n\n Args:\n density_estimator: The density estimator that the posterior is based on.\n If `None`, use the latest neural density estimator that was trained.\n prior: Prior distribution.\n sample_with: Method to use for sampling from the posterior. Must be one of\n [`mcmc` | `rejection` | `vi`].\n mcmc_method: Method used for MCMC sampling, one of `slice_np`, `slice`,\n `hmc`, `nuts`. Currently defaults to `slice_np` for a custom numpy\n implementation of slice sampling; select `hmc`, `nuts` or `slice` for\n Pyro-based sampling.\n vi_method: Method used for VI, one of [`rKL`, `fKL`, `IW`, `alpha`]. Note\n that some of the methods admit a `mode seeking` property (e.g. rKL)\n whereas some admit a `mass covering` one (e.g fKL).\n mcmc_parameters: Additional kwargs passed to `MCMCPosterior`.\n vi_parameters: Additional kwargs passed to `VIPosterior`.\n rejection_sampling_parameters: Additional kwargs passed to\n `RejectionPosterior`.\n\n Returns:\n Posterior $p(\\theta|x)$ with `.sample()` and `.log_prob()` methods\n (the returned log-probability is unnormalized).\n \"\"\"\n if prior is None:\n assert (\n self._prior is not None\n ), \"\"\"You did not pass a prior. You have to pass the prior either at\n initialization `inference = SNRE(prior)` or to `.build_posterior\n (prior=prior)`.\"\"\"\n prior = self._prior\n else:\n check_prior(prior)\n\n if density_estimator is None:\n ratio_estimator = self._neural_net\n # If internal net is used device is defined.\n device = self._device\n else:\n ratio_estimator = density_estimator\n # Otherwise, infer it from the device of the net parameters.\n device = next(density_estimator.parameters()).device.type\n\n potential_fn, theta_transform = ratio_estimator_based_potential(\n ratio_estimator=ratio_estimator, prior=prior, x_o=None\n )\n\n if sample_with == \"mcmc\":\n self._posterior = MCMCPosterior(\n potential_fn=potential_fn,\n theta_transform=theta_transform,\n proposal=prior,\n method=mcmc_method,\n device=device,\n x_shape=self._x_shape,\n **mcmc_parameters,\n )\n elif sample_with == \"rejection\":\n self._posterior = RejectionPosterior(\n potential_fn=potential_fn,\n proposal=prior,\n device=device,\n x_shape=self._x_shape,\n **rejection_sampling_parameters,\n )\n elif sample_with == \"vi\":\n self._posterior = VIPosterior(\n potential_fn=potential_fn,\n theta_transform=theta_transform,\n prior=prior, # type: ignore\n vi_method=vi_method,\n device=device,\n x_shape=self._x_shape,\n **vi_parameters,\n )\n else:\n raise NotImplementedError\n\n # Store models at end of each round.\n self._model_bank.append(deepcopy(self._posterior))\n\n return deepcopy(self._posterior)\n", "path": "sbi/inference/snre/snre_base.py" } ]
diff --git a/sbi/inference/snre/snre_base.py b/sbi/inference/snre/snre_base.py index cc9be42d4..09898527e 100644 --- a/sbi/inference/snre/snre_base.py +++ b/sbi/inference/snre/snre_base.py @@ -169,7 +169,7 @@ def train( dataloader_kwargs=dataloader_kwargs, ) - clipped_batch_size = min(training_batch_size, len(val_loader)) + clipped_batch_size = min(training_batch_size, val_loader.batch_size) # type: ignore num_atoms = int( clamp_and_warn(
getredash__redash-1119
User should be able to delete an Alert Can't remove Alert with UI. Directly run sql as below. ``` sql delete from alerts where id = 〜 ```
[ { "content": "import time\n\nfrom flask import request\nfrom funcy import project\n\nfrom redash import models\nfrom redash.permissions import require_access, require_admin_or_owner, view_only, require_permission\nfrom redash.handlers.base import BaseResource, require_fields, get_object_or_404\n\n\nclass AlertResource(BaseResource):\n def get(self, alert_id):\n alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org)\n require_access(alert.groups, self.current_user, view_only)\n return alert.to_dict()\n\n def post(self, alert_id):\n req = request.get_json(True)\n params = project(req, ('options', 'name', 'query_id', 'rearm'))\n alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org)\n require_admin_or_owner(alert.user.id)\n\n if 'query_id' in params:\n params['query'] = params.pop('query_id')\n\n alert.update_instance(**params)\n\n self.record_event({\n 'action': 'edit',\n 'timestamp': int(time.time()),\n 'object_id': alert.id,\n 'object_type': 'alert'\n })\n\n return alert.to_dict()\n\n\nclass AlertListResource(BaseResource):\n def post(self):\n req = request.get_json(True)\n require_fields(req, ('options', 'name', 'query_id'))\n\n query = models.Query.get_by_id_and_org(req['query_id'], self.current_org)\n require_access(query.groups, self.current_user, view_only)\n\n alert = models.Alert.create(\n name=req['name'],\n query=query,\n user=self.current_user,\n options=req['options']\n )\n\n self.record_event({\n 'action': 'create',\n 'timestamp': int(time.time()),\n 'object_id': alert.id,\n 'object_type': 'alert'\n })\n\n return alert.to_dict()\n\n @require_permission('list_alerts')\n def get(self):\n return [alert.to_dict() for alert in models.Alert.all(groups=self.current_user.groups)]\n\n\nclass AlertSubscriptionListResource(BaseResource):\n def post(self, alert_id):\n req = request.get_json(True)\n\n alert = models.Alert.get_by_id_and_org(alert_id, self.current_org)\n require_access(alert.groups, self.current_user, view_only)\n kwargs = {'alert': alert, 'user': self.current_user}\n\n if 'destination_id' in req:\n destination = models.NotificationDestination.get_by_id_and_org(req['destination_id'], self.current_org)\n kwargs['destination'] = destination\n\n subscription = models.AlertSubscription.create(**kwargs)\n\n self.record_event({\n 'action': 'subscribe',\n 'timestamp': int(time.time()),\n 'object_id': alert_id,\n 'object_type': 'alert',\n 'destination': req.get('destination_id')\n })\n\n return subscription.to_dict()\n\n def get(self, alert_id):\n alert = models.Alert.get_by_id_and_org(alert_id, self.current_org)\n require_access(alert.groups, self.current_user, view_only)\n\n subscriptions = models.AlertSubscription.all(alert_id)\n return [s.to_dict() for s in subscriptions]\n\n\nclass AlertSubscriptionResource(BaseResource):\n def delete(self, alert_id, subscriber_id):\n \n subscription = get_object_or_404(models.AlertSubscription.get_by_id, subscriber_id)\n require_admin_or_owner(subscription.user.id)\n subscription.delete_instance()\n\n self.record_event({\n 'action': 'unsubscribe',\n 'timestamp': int(time.time()),\n 'object_id': alert_id,\n 'object_type': 'alert'\n })\n\n", "path": "redash/handlers/alerts.py" } ]
[ { "content": "import time\n\nfrom flask import request\nfrom funcy import project\n\nfrom redash import models\nfrom redash.permissions import require_access, require_admin_or_owner, view_only, require_permission\nfrom redash.handlers.base import BaseResource, require_fields, get_object_or_404\n\n\nclass AlertResource(BaseResource):\n def get(self, alert_id):\n alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org)\n require_access(alert.groups, self.current_user, view_only)\n return alert.to_dict()\n\n def post(self, alert_id):\n req = request.get_json(True)\n params = project(req, ('options', 'name', 'query_id', 'rearm'))\n alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org)\n require_admin_or_owner(alert.user.id)\n\n if 'query_id' in params:\n params['query'] = params.pop('query_id')\n\n alert.update_instance(**params)\n\n self.record_event({\n 'action': 'edit',\n 'timestamp': int(time.time()),\n 'object_id': alert.id,\n 'object_type': 'alert'\n })\n\n return alert.to_dict()\n\n def delete(self, alert_id):\n alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org)\n require_admin_or_owner(alert.user.id)\n alert.delete_instance(recursive=True)\n\n\nclass AlertListResource(BaseResource):\n def post(self):\n req = request.get_json(True)\n require_fields(req, ('options', 'name', 'query_id'))\n\n query = models.Query.get_by_id_and_org(req['query_id'], self.current_org)\n require_access(query.groups, self.current_user, view_only)\n\n alert = models.Alert.create(\n name=req['name'],\n query=query,\n user=self.current_user,\n options=req['options']\n )\n\n self.record_event({\n 'action': 'create',\n 'timestamp': int(time.time()),\n 'object_id': alert.id,\n 'object_type': 'alert'\n })\n\n return alert.to_dict()\n\n @require_permission('list_alerts')\n def get(self):\n return [alert.to_dict() for alert in models.Alert.all(groups=self.current_user.groups)]\n\n\nclass AlertSubscriptionListResource(BaseResource):\n def post(self, alert_id):\n req = request.get_json(True)\n\n alert = models.Alert.get_by_id_and_org(alert_id, self.current_org)\n require_access(alert.groups, self.current_user, view_only)\n kwargs = {'alert': alert, 'user': self.current_user}\n\n if 'destination_id' in req:\n destination = models.NotificationDestination.get_by_id_and_org(req['destination_id'], self.current_org)\n kwargs['destination'] = destination\n\n subscription = models.AlertSubscription.create(**kwargs)\n\n self.record_event({\n 'action': 'subscribe',\n 'timestamp': int(time.time()),\n 'object_id': alert_id,\n 'object_type': 'alert',\n 'destination': req.get('destination_id')\n })\n\n return subscription.to_dict()\n\n def get(self, alert_id):\n alert = models.Alert.get_by_id_and_org(alert_id, self.current_org)\n require_access(alert.groups, self.current_user, view_only)\n\n subscriptions = models.AlertSubscription.all(alert_id)\n return [s.to_dict() for s in subscriptions]\n\n\nclass AlertSubscriptionResource(BaseResource):\n def delete(self, alert_id, subscriber_id):\n \n subscription = get_object_or_404(models.AlertSubscription.get_by_id, subscriber_id)\n require_admin_or_owner(subscription.user.id)\n subscription.delete_instance()\n\n self.record_event({\n 'action': 'unsubscribe',\n 'timestamp': int(time.time()),\n 'object_id': alert_id,\n 'object_type': 'alert'\n })\n\n", "path": "redash/handlers/alerts.py" } ]
diff --git a/rd_ui/app/scripts/controllers/alerts.js b/rd_ui/app/scripts/controllers/alerts.js index 342792785a..c444e388e1 100644 --- a/rd_ui/app/scripts/controllers/alerts.js +++ b/rd_ui/app/scripts/controllers/alerts.js @@ -67,10 +67,12 @@ if ($scope.alertId === "new") { $scope.alert = new Alert({options: {}}); + $scope.canEdit = true; } else { $scope.alert = Alert.get({id: $scope.alertId}, function(alert) { $scope.onQuerySelected(new Query($scope.alert.query)); }); + $scope.canEdit = currentUser.canEdit($scope.alert); } $scope.ops = ['greater than', 'less than', 'equals']; @@ -110,6 +112,15 @@ }); }; + $scope.delete = function() { + $scope.alert.$delete(function() { + $location.path('/alerts'); + growl.addSuccessMessage("Alert deleted."); + }, function() { + growl.addErrorMessage("Failed deleting alert."); + }); + } + }; angular.module('redash.directives').directive('alertSubscriptions', ['$q', '$sce', 'AlertSubscription', 'Destination', 'growl', function ($q, $sce, AlertSubscription, Destination, growl) { diff --git a/rd_ui/app/views/alerts/edit.html b/rd_ui/app/views/alerts/edit.html index 79b439a258..fbc6c5021a 100644 --- a/rd_ui/app/views/alerts/edit.html +++ b/rd_ui/app/views/alerts/edit.html @@ -7,10 +7,10 @@ <div class="container"> <div class="row bg-white p-10"> <div class="col-md-8"> - <form name="alertForm" ng-submit="saveChanges()" class="form"> + <form name="alertForm" class="form"> <div class="form-group"> <label>Query</label> - <ui-select ng-model="alert.query" reset-search-input="false" on-select="onQuerySelected($item)"> + <ui-select ng-model="alert.query" reset-search-input="false" on-select="onQuerySelected($item)" ng-disabled="!canEdit"> <ui-select-match placeholder="Search a query by name">{{$select.selected.name}}</ui-select-match> <ui-select-choices repeat="q in queries" refresh="searchQueries($select.search)" @@ -22,7 +22,7 @@ <div class="form-group" ng-show="selectedQuery"> <label>Name</label> - <input type="string" placeholder="{{getDefaultName()}}" class="form-control" ng-model="alert.name"> + <input type="string" placeholder="{{getDefaultName()}}" class="form-control" ng-model="alert.name" ng-disabled="!canEdit"> </div> <div ng-show="queryResult" class="form-horizontal"> @@ -30,7 +30,7 @@ <label class="control-label col-md-2">Value column</label> <div class="col-md-4"> <select ng-options="name for name in queryResult.getColumnNames()" ng-model="alert.options.column" - class="form-control"></select> + class="form-control" ng-disabled="!canEdit"></select> </div> <label class="control-label col-md-2">Value</label> <div class="col-md-4"> @@ -40,24 +40,25 @@ <div class="form-group"> <label class="control-label col-md-2">Op</label> <div class="col-md-4"> - <select ng-options="name for name in ops" ng-model="alert.options.op" class="form-control"></select> + <select ng-options="name for name in ops" ng-model="alert.options.op" class="form-control" ng-disabled="!canEdit"></select> </div> <label class="control-label col-md-2">Reference</label> <div class="col-md-4"> - <input type="number" step="any" class="form-control" ng-model="alert.options.value" placeholder="reference value" + <input type="number" step="any" class="form-control" ng-model="alert.options.value" placeholder="reference value" ng-disabled="!canEdit" required/> </div> </div> <div class="form-group"> <label class="control-label col-md-2">Rearm seconds</label> <div class="col-md-4"> - <input type="number" class="form-control" ng-model="alert.rearm"/> + <input type="number" class="form-control" ng-model="alert.rearm" ng-disabled="!canEdit"/> </div> </div> </div> - <div class="form-group"> - <button class="btn btn-primary" ng-disabled="!alertForm.$valid">Save</button> + <div class="form-group" ng-if="canEdit"> + <button class="btn btn-primary" ng-disabled="!alertForm.$valid" ng-click="saveChanges()">Save</button> + <button class="btn btn-danger" ng-if="alert.id" ng-click="delete()">Delete</button> </div> </form> </div> diff --git a/redash/handlers/alerts.py b/redash/handlers/alerts.py index 8b066d11a7..714dc8f730 100644 --- a/redash/handlers/alerts.py +++ b/redash/handlers/alerts.py @@ -34,6 +34,11 @@ def post(self, alert_id): return alert.to_dict() + def delete(self, alert_id): + alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org) + require_admin_or_owner(alert.user.id) + alert.delete_instance(recursive=True) + class AlertListResource(BaseResource): def post(self): diff --git a/tests/factories.py b/tests/factories.py index c91c584293..b6d7cf3055 100644 --- a/tests/factories.py +++ b/tests/factories.py @@ -187,7 +187,8 @@ def create_alert(self, **kwargs): def create_alert_subscription(self, **kwargs): args = { - 'user': self.user + 'user': self.user, + 'alert': self.create_alert() } args.update(**kwargs) diff --git a/tests/handlers/test_alerts.py b/tests/handlers/test_alerts.py index 46c10e310a..0053fbc10c 100644 --- a/tests/handlers/test_alerts.py +++ b/tests/handlers/test_alerts.py @@ -1,8 +1,5 @@ from tests import BaseTestCase -from tests.factories import org_factory -from tests.handlers import authenticated_user, json_request -from redash.wsgi import app -from redash.models import AlertSubscription +from redash.models import AlertSubscription, Alert class TestAlertResourceGet(BaseTestCase): @@ -30,6 +27,36 @@ def test_returns_404_if_admin_from_another_org(self): self.assertEqual(rv.status_code, 404) +class TestAlertResourceDelete(BaseTestCase): + def test_removes_alert_and_subscriptions(self): + subscription = self.factory.create_alert_subscription() + alert = subscription.alert + + rv = self.make_request('delete', "/api/alerts/{}".format(alert.id)) + self.assertEqual(rv.status_code, 200) + + self.assertRaises(Alert.DoesNotExist, Alert.get_by_id, subscription.alert.id) + self.assertRaises(AlertSubscription.DoesNotExist, AlertSubscription.get_by_id, subscription.id) + + def test_returns_403_if_not_allowed(self): + alert = self.factory.create_alert() + + user = self.factory.create_user() + rv = self.make_request('delete', "/api/alerts/{}".format(alert.id), user=user) + self.assertEqual(rv.status_code, 403) + + rv = self.make_request('delete', "/api/alerts/{}".format(alert.id), user=self.factory.create_admin()) + self.assertEqual(rv.status_code, 200) + + def test_returns_404_for_unauthorized_users(self): + alert = self.factory.create_alert() + + second_org = self.factory.create_org() + second_org_admin = self.factory.create_admin(org=second_org) + rv = self.make_request('delete', "/api/alerts/{}".format(alert.id), user=second_org_admin) + self.assertEqual(rv.status_code, 404) + + class TestAlertListPost(BaseTestCase): def test_returns_200_if_has_access_to_query(self): query = self.factory.create_query()
microsoft__torchgeo-1755
SustainBenchCropYield download doesn't work ### Description Downloading the SustainBenchCropYield dataset doesn't work as expected ### Steps to reproduce ``` ds = SustainBenchCropYield("data/", download=True) ``` This downloads a file called `soybeans` then fails unzipping `soybeans.zip`. Works if you rename to .zip and unzip manually. ### Version 0.6.0.dev0
[ { "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"SustainBench Crop Yield dataset.\"\"\"\n\nimport os\nfrom typing import Any, Callable, Optional\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom matplotlib.figure import Figure\nfrom torch import Tensor\n\nfrom .geo import NonGeoDataset\nfrom .utils import DatasetNotFoundError, download_url, extract_archive\n\n\nclass SustainBenchCropYield(NonGeoDataset):\n \"\"\"SustainBench Crop Yield Dataset.\n\n This dataset contains MODIS band histograms and soybean yield\n estimates for selected counties in the USA, Argentina and Brazil.\n The dataset is part of the\n `SustainBench <https://sustainlab-group.github.io/sustainbench/docs/datasets/sdg2/crop_yield.html>`_\n datasets for tackling the UN Sustainable Development Goals (SDGs).\n\n Dataset Format:\n\n * .npz files of stacked samples\n\n Dataset Features:\n\n * input histogram of 7 surface reflectance and 2 surface temperature\n bands from MODIS pixel values in 32 ranges across 32 timesteps\n resulting in 32x32x9 input images\n * regression target value of soybean yield in metric tonnes per\n harvested hectare\n\n If you use this dataset in your research, please cite:\n\n * https://doi.org/10.1145/3209811.3212707\n * https://doi.org/10.1609/aaai.v31i1.11172\n\n .. versionadded:: 0.5\n \"\"\" # noqa: E501\n\n valid_countries = [\"usa\", \"brazil\", \"argentina\"]\n\n md5 = \"c2794e59512c897d9bea77b112848122\"\n\n url = \"https://drive.google.com/file/d/1odwkI1hiE5rMZ4VfM0hOXzlFR4NbhrfU/view?usp=share_link\" # noqa: E501\n\n dir = \"soybeans\"\n\n valid_splits = [\"train\", \"dev\", \"test\"]\n\n def __init__(\n self,\n root: str = \"data\",\n split: str = \"train\",\n countries: list[str] = [\"usa\"],\n transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]] = None,\n download: bool = False,\n checksum: bool = False,\n ) -> None:\n \"\"\"Initialize a new Dataset instance.\n\n Args:\n root: root directory where dataset can be found\n split: one of \"train\", \"dev\", or \"test\"\n countries: which countries to include in the dataset\n transforms: a function/transform that takes an input sample\n and returns a transformed version\n download: if True, download dataset and store it in the root directory\n checksum: if True, check the MD5 after downloading files (may be slow)\n\n Raises:\n AssertionError: if ``countries`` contains invalid countries or if ``split``\n is invalid\n DatasetNotFoundError: If dataset is not found and *download* is False.\n \"\"\"\n assert set(countries).issubset(\n self.valid_countries\n ), f\"Please choose a subset of these valid countried: {self.valid_countries}.\"\n self.countries = countries\n\n assert (\n split in self.valid_splits\n ), f\"Pleas choose one of these valid data splits {self.valid_splits}.\"\n self.split = split\n\n self.root = root\n self.transforms = transforms\n self.download = download\n self.checksum = checksum\n\n self._verify()\n self.collection = self.retrieve_collection()\n\n def __len__(self) -> int:\n \"\"\"Return the number of data points in the dataset.\n\n Returns:\n length of the dataset\n \"\"\"\n return len(self.collection)\n\n def __getitem__(self, index: int) -> dict[str, Tensor]:\n \"\"\"Return an index within the dataset.\n\n Args:\n index: index to return\n\n Returns:\n data and label at that index\n \"\"\"\n input_file_path, sample_idx = self.collection[index]\n\n sample: dict[str, Tensor] = {\n \"image\": self._load_image(input_file_path, sample_idx)\n }\n sample.update(self._load_features(input_file_path, sample_idx))\n\n if self.transforms is not None:\n sample = self.transforms(sample)\n\n return sample\n\n def _load_image(self, path: str, sample_idx: int) -> Tensor:\n \"\"\"Load input image.\n\n Args:\n path: path to input npz collection\n sample_idx: what sample to index from the npz collection\n\n Returns:\n input image as tensor\n \"\"\"\n arr = np.load(path)[\"data\"][sample_idx]\n # return [channel, height, width]\n return torch.from_numpy(arr).permute(2, 0, 1).to(torch.float32)\n\n def _load_features(self, path: str, sample_idx: int) -> dict[str, Tensor]:\n \"\"\"Load features value.\n\n Args:\n path: path to image npz collection\n sample_idx: what sample to index from the npz collection\n\n Returns:\n target regression value\n \"\"\"\n target_file_path = path.replace(\"_hists\", \"_yields\")\n target = np.load(target_file_path)[\"data\"][sample_idx]\n\n years_file_path = path.replace(\"_hists\", \"_years\")\n year = int(np.load(years_file_path)[\"data\"][sample_idx])\n\n ndvi_file_path = path.replace(\"_hists\", \"_ndvi\")\n ndvi = np.load(ndvi_file_path)[\"data\"][sample_idx]\n\n features = {\n \"label\": torch.tensor(target).to(torch.float32),\n \"year\": torch.tensor(year),\n \"ndvi\": torch.from_numpy(ndvi).to(dtype=torch.float32),\n }\n return features\n\n def retrieve_collection(self) -> list[tuple[str, int]]:\n \"\"\"Retrieve the collection.\n\n Returns:\n path and index to dataset samples\n \"\"\"\n collection = []\n for country in self.countries:\n file_path = os.path.join(\n self.root, self.dir, country, f\"{self.split}_hists.npz\"\n )\n npz_file = np.load(file_path)\n num_data_points = npz_file[\"data\"].shape[0]\n for idx in range(num_data_points):\n collection.append((file_path, idx))\n\n return collection\n\n def _verify(self) -> None:\n \"\"\"Verify the integrity of the dataset.\"\"\"\n # Check if the extracted files already exist\n pathname = os.path.join(self.root, self.dir)\n if os.path.exists(pathname):\n return\n\n # Check if the zip files have already been downloaded\n pathname = os.path.join(self.root, self.dir) + \".zip\"\n if os.path.exists(pathname):\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise DatasetNotFoundError(self)\n\n # Download the dataset\n self._download()\n self._extract()\n\n def _download(self) -> None:\n \"\"\"Download the dataset and extract it.\"\"\"\n download_url(\n self.url,\n self.root,\n filename=self.dir,\n md5=self.md5 if self.checksum else None,\n )\n self._extract()\n\n def _extract(self) -> None:\n \"\"\"Extract the dataset.\"\"\"\n zipfile_path = os.path.join(self.root, self.dir) + \".zip\"\n extract_archive(zipfile_path, self.root)\n\n def plot(\n self,\n sample: dict[str, Any],\n band_idx: int = 0,\n show_titles: bool = True,\n suptitle: Optional[str] = None,\n ) -> Figure:\n \"\"\"Plot a sample from the dataset.\n\n Args:\n sample: a sample return by :meth:`__getitem__`\n band_idx: which of the nine histograms to index\n show_titles: flag indicating whether to show titles above each panel\n suptitle: optional suptitle to use for figure\n\n Returns:\n a matplotlib Figure with the rendered sample\n\n \"\"\"\n image, label = sample[\"image\"], sample[\"label\"].item()\n\n showing_predictions = \"prediction\" in sample\n if showing_predictions:\n prediction = sample[\"prediction\"].item()\n\n fig, ax = plt.subplots(1, 1, figsize=(10, 10))\n\n ax.imshow(image.permute(1, 2, 0)[:, :, band_idx])\n ax.axis(\"off\")\n\n if show_titles:\n title = f\"Label: {label:.3f}\"\n if showing_predictions:\n title += f\"\\nPrediction: {prediction:.3f}\"\n ax.set_title(title)\n\n if suptitle is not None:\n plt.suptitle(suptitle)\n\n return fig\n", "path": "torchgeo/datasets/sustainbench_crop_yield.py" } ]
[ { "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"SustainBench Crop Yield dataset.\"\"\"\n\nimport os\nfrom typing import Any, Callable, Optional\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom matplotlib.figure import Figure\nfrom torch import Tensor\n\nfrom .geo import NonGeoDataset\nfrom .utils import DatasetNotFoundError, download_url, extract_archive\n\n\nclass SustainBenchCropYield(NonGeoDataset):\n \"\"\"SustainBench Crop Yield Dataset.\n\n This dataset contains MODIS band histograms and soybean yield\n estimates for selected counties in the USA, Argentina and Brazil.\n The dataset is part of the\n `SustainBench <https://sustainlab-group.github.io/sustainbench/docs/datasets/sdg2/crop_yield.html>`_\n datasets for tackling the UN Sustainable Development Goals (SDGs).\n\n Dataset Format:\n\n * .npz files of stacked samples\n\n Dataset Features:\n\n * input histogram of 7 surface reflectance and 2 surface temperature\n bands from MODIS pixel values in 32 ranges across 32 timesteps\n resulting in 32x32x9 input images\n * regression target value of soybean yield in metric tonnes per\n harvested hectare\n\n If you use this dataset in your research, please cite:\n\n * https://doi.org/10.1145/3209811.3212707\n * https://doi.org/10.1609/aaai.v31i1.11172\n\n .. versionadded:: 0.5\n \"\"\" # noqa: E501\n\n valid_countries = [\"usa\", \"brazil\", \"argentina\"]\n\n md5 = \"c2794e59512c897d9bea77b112848122\"\n\n url = \"https://drive.google.com/file/d/1odwkI1hiE5rMZ4VfM0hOXzlFR4NbhrfU/view?usp=share_link\" # noqa: E501\n\n dir = \"soybeans\"\n\n valid_splits = [\"train\", \"dev\", \"test\"]\n\n def __init__(\n self,\n root: str = \"data\",\n split: str = \"train\",\n countries: list[str] = [\"usa\"],\n transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]] = None,\n download: bool = False,\n checksum: bool = False,\n ) -> None:\n \"\"\"Initialize a new Dataset instance.\n\n Args:\n root: root directory where dataset can be found\n split: one of \"train\", \"dev\", or \"test\"\n countries: which countries to include in the dataset\n transforms: a function/transform that takes an input sample\n and returns a transformed version\n download: if True, download dataset and store it in the root directory\n checksum: if True, check the MD5 after downloading files (may be slow)\n\n Raises:\n AssertionError: if ``countries`` contains invalid countries or if ``split``\n is invalid\n DatasetNotFoundError: If dataset is not found and *download* is False.\n \"\"\"\n assert set(countries).issubset(\n self.valid_countries\n ), f\"Please choose a subset of these valid countried: {self.valid_countries}.\"\n self.countries = countries\n\n assert (\n split in self.valid_splits\n ), f\"Pleas choose one of these valid data splits {self.valid_splits}.\"\n self.split = split\n\n self.root = root\n self.transforms = transforms\n self.download = download\n self.checksum = checksum\n\n self._verify()\n self.collection = self.retrieve_collection()\n\n def __len__(self) -> int:\n \"\"\"Return the number of data points in the dataset.\n\n Returns:\n length of the dataset\n \"\"\"\n return len(self.collection)\n\n def __getitem__(self, index: int) -> dict[str, Tensor]:\n \"\"\"Return an index within the dataset.\n\n Args:\n index: index to return\n\n Returns:\n data and label at that index\n \"\"\"\n input_file_path, sample_idx = self.collection[index]\n\n sample: dict[str, Tensor] = {\n \"image\": self._load_image(input_file_path, sample_idx)\n }\n sample.update(self._load_features(input_file_path, sample_idx))\n\n if self.transforms is not None:\n sample = self.transforms(sample)\n\n return sample\n\n def _load_image(self, path: str, sample_idx: int) -> Tensor:\n \"\"\"Load input image.\n\n Args:\n path: path to input npz collection\n sample_idx: what sample to index from the npz collection\n\n Returns:\n input image as tensor\n \"\"\"\n arr = np.load(path)[\"data\"][sample_idx]\n # return [channel, height, width]\n return torch.from_numpy(arr).permute(2, 0, 1).to(torch.float32)\n\n def _load_features(self, path: str, sample_idx: int) -> dict[str, Tensor]:\n \"\"\"Load features value.\n\n Args:\n path: path to image npz collection\n sample_idx: what sample to index from the npz collection\n\n Returns:\n target regression value\n \"\"\"\n target_file_path = path.replace(\"_hists\", \"_yields\")\n target = np.load(target_file_path)[\"data\"][sample_idx]\n\n years_file_path = path.replace(\"_hists\", \"_years\")\n year = int(np.load(years_file_path)[\"data\"][sample_idx])\n\n ndvi_file_path = path.replace(\"_hists\", \"_ndvi\")\n ndvi = np.load(ndvi_file_path)[\"data\"][sample_idx]\n\n features = {\n \"label\": torch.tensor(target).to(torch.float32),\n \"year\": torch.tensor(year),\n \"ndvi\": torch.from_numpy(ndvi).to(dtype=torch.float32),\n }\n return features\n\n def retrieve_collection(self) -> list[tuple[str, int]]:\n \"\"\"Retrieve the collection.\n\n Returns:\n path and index to dataset samples\n \"\"\"\n collection = []\n for country in self.countries:\n file_path = os.path.join(\n self.root, self.dir, country, f\"{self.split}_hists.npz\"\n )\n npz_file = np.load(file_path)\n num_data_points = npz_file[\"data\"].shape[0]\n for idx in range(num_data_points):\n collection.append((file_path, idx))\n\n return collection\n\n def _verify(self) -> None:\n \"\"\"Verify the integrity of the dataset.\"\"\"\n # Check if the extracted files already exist\n pathname = os.path.join(self.root, self.dir)\n if os.path.exists(pathname):\n return\n\n # Check if the zip files have already been downloaded\n pathname = os.path.join(self.root, self.dir) + \".zip\"\n if os.path.exists(pathname):\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise DatasetNotFoundError(self)\n\n # Download the dataset\n self._download()\n self._extract()\n\n def _download(self) -> None:\n \"\"\"Download the dataset and extract it.\"\"\"\n download_url(\n self.url,\n self.root,\n filename=self.dir + \".zip\",\n md5=self.md5 if self.checksum else None,\n )\n self._extract()\n\n def _extract(self) -> None:\n \"\"\"Extract the dataset.\"\"\"\n zipfile_path = os.path.join(self.root, self.dir) + \".zip\"\n extract_archive(zipfile_path, self.root)\n\n def plot(\n self,\n sample: dict[str, Any],\n band_idx: int = 0,\n show_titles: bool = True,\n suptitle: Optional[str] = None,\n ) -> Figure:\n \"\"\"Plot a sample from the dataset.\n\n Args:\n sample: a sample return by :meth:`__getitem__`\n band_idx: which of the nine histograms to index\n show_titles: flag indicating whether to show titles above each panel\n suptitle: optional suptitle to use for figure\n\n Returns:\n a matplotlib Figure with the rendered sample\n\n \"\"\"\n image, label = sample[\"image\"], sample[\"label\"].item()\n\n showing_predictions = \"prediction\" in sample\n if showing_predictions:\n prediction = sample[\"prediction\"].item()\n\n fig, ax = plt.subplots(1, 1, figsize=(10, 10))\n\n ax.imshow(image.permute(1, 2, 0)[:, :, band_idx])\n ax.axis(\"off\")\n\n if show_titles:\n title = f\"Label: {label:.3f}\"\n if showing_predictions:\n title += f\"\\nPrediction: {prediction:.3f}\"\n ax.set_title(title)\n\n if suptitle is not None:\n plt.suptitle(suptitle)\n\n return fig\n", "path": "torchgeo/datasets/sustainbench_crop_yield.py" } ]
diff --git a/torchgeo/datasets/sustainbench_crop_yield.py b/torchgeo/datasets/sustainbench_crop_yield.py index 5dca4e6d969..564524f9479 100644 --- a/torchgeo/datasets/sustainbench_crop_yield.py +++ b/torchgeo/datasets/sustainbench_crop_yield.py @@ -211,7 +211,7 @@ def _download(self) -> None: download_url( self.url, self.root, - filename=self.dir, + filename=self.dir + ".zip", md5=self.md5 if self.checksum else None, ) self._extract()
geopandas__geopandas-28
Minor typo? It looks like line 121 in geoseries.py (in function `_series_unary_op`) should read: ``` python return Series([getattr(geom, op) for geom in self], ``` rather than ``` python return GeoSeries([getattr(geom, op) for geom in self], ``` This is a very minor bug, so I didn't bother with a PR, but am happy to do one if that helps.
[ { "content": "from warnings import warn\nfrom functools import partial\n\nimport numpy as np\nfrom pandas import Series, DataFrame\n\nimport pyproj\nfrom shapely.geometry import shape, Polygon, Point\nfrom shapely.geometry.collection import GeometryCollection\nfrom shapely.geometry.base import BaseGeometry\nfrom shapely.ops import cascaded_union, unary_union, transform\nimport fiona\nfrom fiona.crs import from_epsg\n\nfrom plotting import plot_series\n\nEMPTY_COLLECTION = GeometryCollection()\nEMPTY_POLYGON = Polygon()\nEMPTY_POINT = Point()\n\n\n\n\ndef _is_empty(x):\n try:\n return x.is_empty\n except:\n return False\n\n\ndef _is_geometry(x):\n return isinstance(x, BaseGeometry)\n\n\nclass GeoSeries(Series):\n \"\"\"A Series object designed to store shapely geometry objects.\"\"\"\n\n def __new__(cls, *args, **kwargs):\n kwargs.pop('crs', None)\n arr = Series.__new__(cls, *args, **kwargs)\n if type(arr) is GeoSeries:\n return arr\n else:\n return arr.view(GeoSeries)\n\n def __init__(self, *args, **kwargs):\n crs = kwargs.pop('crs', None)\n super(GeoSeries, self).__init__(*args, **kwargs)\n self.crs = crs\n\n @classmethod\n def from_file(cls, filename, **kwargs):\n \"\"\"\n Alternate constructor to create a GeoSeries from a file\n \n Parameters\n ----------\n \n filename : str\n File path or file handle to read from. Depending on which kwargs\n are included, the content of filename may vary, see:\n http://toblerity.github.io/fiona/README.html#usage\n for usage details.\n kwargs : key-word arguments\n These arguments are passed to fiona.open, and can be used to \n access multi-layer data, data stored within archives (zip files),\n etc.\n \n \"\"\"\n geoms = []\n with fiona.open(filename, **kwargs) as f:\n crs = f.crs\n for rec in f:\n geoms.append(shape(rec['geometry']))\n g = GeoSeries(geoms)\n g.crs = crs\n return g\n\n def to_file(self, filename, driver=\"ESRI Shapefile\", **kwargs):\n from geopandas import GeoDataFrame\n data = GeoDataFrame({\"geometry\": self,\n \"id\":self.index.values},\n index=self.index)\n data.crs = self.crs\n data.to_file(filename, driver, **kwargs)\n \n #\n # Internal methods\n #\n\n def _geo_op(self, other, op):\n \"\"\"Operation that returns a GeoSeries\"\"\"\n if isinstance(other, GeoSeries):\n if self.crs != other.crs:\n warn('GeoSeries crs mismatch: {} and {}'.format(self.crs, other.crs))\n this, other = self.align(other)\n return GeoSeries([getattr(s[0], op)(s[1]) for s in zip(this, other)],\n index=this.index, crs=self.crs)\n else:\n return GeoSeries([getattr(s, op)(other) for s in self],\n index=self.index, crs=self.crs)\n\n # TODO: think about merging with _geo_op\n def _series_op(self, other, op, **kwargs):\n \"\"\"Geometric operation that returns a pandas Series\"\"\"\n if isinstance(other, GeoSeries):\n this, other = self.align(other)\n return Series([getattr(s[0], op)(s[1], **kwargs) for s in zip(this, other)],\n index=this.index)\n else:\n return Series([getattr(s, op)(other, **kwargs) for s in self],\n index=self.index)\n\n def _geo_unary_op(self, op):\n \"\"\"Unary operation that returns a GeoSeries\"\"\"\n return GeoSeries([getattr(geom, op) for geom in self],\n index=self.index, crs=self.crs)\n\n def _series_unary_op(self, op):\n \"\"\"Unary operation that returns a Series\"\"\"\n return GeoSeries([getattr(geom, op) for geom in self],\n index=self.index)\n\n #\n # Implementation of Shapely methods\n #\n\n #\n # Unary operations that return a Series\n #\n\n @property\n def area(self):\n \"\"\"Return the area of each geometry in the GeoSeries\"\"\"\n return self._series_unary_op('area')\n\n @property\n def geom_type(self):\n \"\"\"Return the geometry type of each geometry in the GeoSeries\"\"\"\n return self._series_unary_op('geom_type')\n\n @property\n def type(self):\n \"\"\"Return the geometry type of each geometry in the GeoSeries\"\"\"\n return self.geom_type\n\n @property\n def length(self):\n \"\"\"Return the length of each geometry in the GeoSeries\"\"\"\n return self._series_unary_op('length')\n\n @property\n def is_valid(self):\n \"\"\"Return True for each valid geometry, else False\"\"\"\n return self._series_unary_op('is_valid')\n\n @property\n def is_empty(self):\n \"\"\"Return True for each empty geometry, False for non-empty\"\"\"\n return self._series_unary_op('is_empty')\n\n @property\n def is_simple(self):\n \"\"\"Return True for each simple geometry, else False\"\"\"\n return self._series_unary_op('is_simple')\n\n @property\n def is_ring(self):\n \"\"\"Return True for each geometry that is a closed ring, else False\"\"\"\n # operates on the exterior, so can't use _series_unary_op()\n return Series([geom.exterior.is_ring for geom in self],\n index=self.index)\n\n #\n # Unary operations that return a GeoSeries\n #\n\n @property\n def boundary(self):\n \"\"\"Return the bounding geometry for each geometry\"\"\"\n return self._geo_unary_op('boundary')\n\n @property\n def centroid(self):\n \"\"\"Return the centroid of each geometry in the GeoSeries\"\"\"\n return self._geo_unary_op('centroid')\n\n @property\n def convex_hull(self):\n \"\"\"Return the convex hull of each geometry\"\"\"\n return self._geo_unary_op('convex_hull')\n\n @property\n def envelope(self):\n \"\"\"Return a bounding rectangle for each geometry\"\"\"\n return self._geo_unary_op('envelope')\n\n @property\n def exterior(self):\n \"\"\"Return the outer boundary of each polygon\"\"\"\n # TODO: return empty geometry for non-polygons\n return self._geo_unary_op('exterior')\n\n @property\n def interiors(self):\n \"\"\"Return the interior rings of each polygon\"\"\"\n # TODO: return empty list or None for non-polygons\n return self._geo_unary_op('interiors')\n\n def representative_point(self):\n \"\"\"Return a GeoSeries of points guaranteed to be in each geometry\"\"\"\n return GeoSeries([geom.representative_point() for geom in self],\n index=self.index)\n\n #\n # Reduction operations that return a Shapely geometry\n #\n\n @property\n def cascaded_union(self):\n \"\"\"Deprecated: Return the unary_union of all geometries\"\"\"\n return cascaded_union(self.values)\n\n @property\n def unary_union(self):\n \"\"\"Return the union of all geometries\"\"\"\n return unary_union(self.values)\n\n #\n # Binary operations that return a GeoSeries\n #\n\n def difference(self, other):\n \"\"\"Return the set-theoretic difference of each geometry with *other*\"\"\"\n return self._geo_op(other, 'difference')\n\n def symmetric_difference(self, other):\n \"\"\"Return the symmetric difference of each geometry with *other*\"\"\"\n return self._geo_op(other, 'symmetric_difference')\n\n def union(self, other):\n \"\"\"Return the set-theoretic union of each geometry with *other*\"\"\"\n return self._geo_op(other, 'union')\n\n def intersection(self, other):\n \"\"\"Return the set-theoretic intersection of each geometry with *other*\"\"\"\n return self._geo_op(other, 'intersection')\n\n #\n # Binary operations that return a pandas Series\n #\n\n def contains(self, other):\n \"\"\"Return True for all geometries that contain *other*, else False\"\"\"\n return self._series_op(other, 'contains')\n\n def equals(self, other):\n \"\"\"Return True for all geometries that equal *other*, else False\"\"\"\n return self._series_op(other, 'equals')\n\n def almost_equals(self, other, decimal=6):\n \"\"\"Return True for all geometries that is approximately equal to *other*, else False\"\"\"\n # TODO: pass precision argument\n return self._series_op(other, 'almost_equals', decimal=decimal)\n\n def equals_exact(self, other, tolerance):\n \"\"\"Return True for all geometries that equal *other* to a given tolerance, else False\"\"\"\n # TODO: pass tolerance argument.\n return self._series_op(other, 'equals_exact', tolerance=tolerance)\n\n def crosses(self, other):\n \"\"\"Return True for all geometries that cross *other*, else False\"\"\"\n return self._series_op(other, 'crosses')\n\n def disjoint(self, other):\n \"\"\"Return True for all geometries that are disjoint with *other*, else False\"\"\"\n return self._series_op(other, 'disjoint')\n\n def intersects(self, other):\n \"\"\"Return True for all geometries that intersect *other*, else False\"\"\"\n return self._series_op(other, 'intersects')\n\n def overlaps(self, other):\n \"\"\"Return True for all geometries that overlap *other*, else False\"\"\"\n return self._series_op(other, 'overlaps')\n\n def touches(self, other):\n \"\"\"Return True for all geometries that touch *other*, else False\"\"\"\n return self._series_op(other, 'touches')\n\n def within(self, other):\n \"\"\"Return True for all geometries that are within *other*, else False\"\"\"\n return self._series_op(other, 'within')\n\n def distance(self, other):\n \"\"\"Return distance of each geometry to *other*\"\"\"\n return self._series_op(other, 'distance')\n\n #\n # Other operations\n #\n\n # should this return bounds for entire series, or elementwise?\n @property\n def bounds(self):\n \"\"\"Return a DataFrame of minx, miny, maxx, maxy values of geometry objects\"\"\"\n bounds = np.array([geom.bounds for geom in self])\n return DataFrame(bounds,\n columns=['minx', 'miny', 'maxx', 'maxy'],\n index=self.index)\n\n def buffer(self, distance, resolution=16):\n return GeoSeries([geom.buffer(distance, resolution) for geom in self],\n index=self.index)\n\n def simplify(self, *args, **kwargs):\n return Series([geom.simplify(*args, **kwargs) for geom in self],\n index=self.index)\n\n def interpolate(self):\n raise NotImplementedError\n\n def relate(self, other):\n raise NotImplementedError\n\n def project(self, *args, **kwargs):\n raise NotImplementedError\n\n #\n # Implement standard operators for GeoSeries\n #\n\n def __contains__(self, other):\n \"\"\"Allow tests of the form \"geom in s\"\n\n Tests whether a GeoSeries contains a geometry.\n\n Note: This is not the same as the geometric method \"contains\".\n \"\"\"\n if isinstance(other, BaseGeometry):\n return np.any(self.equals(other))\n else:\n return False\n\n def __xor__(self, other):\n \"\"\"Implement ^ operator as for builtin set type\"\"\"\n return self.symmetric_difference(other)\n\n def __or__(self, other):\n \"\"\"Implement | operator as for builtin set type\"\"\"\n return self.union(other)\n\n def __and__(self, other):\n \"\"\"Implement & operator as for builtin set type\"\"\"\n return self.intersection(other)\n\n def __sub__(self, other):\n \"\"\"Implement - operator as for builtin set type\"\"\"\n return self.difference(other)\n\n #\n # Implement pandas methods\n #\n\n def _wrapped_pandas_method(self, mtd, *args, **kwargs):\n \"\"\"Wrap a generic pandas method to ensure it returns a GeoSeries\"\"\"\n val = getattr(super(GeoSeries, self), mtd)(*args, **kwargs)\n if type(val) == Series:\n val.__class__ = GeoSeries\n val.crs = self.crs\n return val\n\n def __getitem__(self, key):\n return self._wrapped_pandas_method('__getitem__', key)\n\n def __getslice__(self, i, j):\n return self._wrapped_pandas_method('__getslice__', i, j)\n\n def order(self, *args, **kwargs):\n return self._wrapped_pandas_method('order', *args, **kwargs)\n\n def sort_index(self, *args, **kwargs):\n return self._wrapped_pandas_method('sort_index', *args, **kwargs)\n\n def take(self, *args, **kwargs):\n return self._wrapped_pandas_method('take', *args, **kwargs)\n\n def select(self, *args, **kwargs):\n return self._wrapped_pandas_method('select', *args, **kwargs)\n\n @property\n def _can_hold_na(self):\n return False\n\n def copy(self, order='C'):\n \"\"\"Return new GeoSeries with copy of underlying values\n\n Returns\n -------\n cp : GeoSeries\n \"\"\"\n return GeoSeries(self.values.copy(order), index=self.index,\n name=self.name)\n\n def isnull(self):\n \"\"\"Null values in a GeoSeries are represented by empty geometric objects\"\"\"\n non_geo_null = super(GeoSeries, self).isnull()\n val = self.apply(_is_empty)\n return np.logical_or(non_geo_null, val)\n\n def fillna(self, value=EMPTY_POLYGON, method=None, inplace=False,\n limit=None):\n \"\"\"Fill NA/NaN values with a geometry (empty polygon by default).\n\n \"method\" is currently not implemented for GeoSeries.\n \"\"\"\n if method is not None:\n raise NotImplementedError('Fill method is currently not implemented for GeoSeries')\n if isinstance(value, BaseGeometry):\n result = self.copy() if not inplace else self\n mask = self.isnull()\n result[mask] = value\n if not inplace:\n return GeoSeries(result)\n else:\n raise ValueError('Non-geometric fill values not allowed for GeoSeries')\n\n def align(self, other, join='outer', level=None, copy=True,\n fill_value=EMPTY_POLYGON, method=None, limit=None):\n left, right = super(GeoSeries, self).align(other, join=join,\n level=level, copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit)\n return GeoSeries(left), GeoSeries(right)\n\n def plot(self, *args, **kwargs):\n return plot_series(self, *args, **kwargs)\n\n #\n # Additional methods\n #\n\n def to_crs(self, crs=None, epsg=None):\n \"\"\"Transform geometries to a new coordinate reference system\n\n This method will transform all points in all objects. It has\n no notion or projecting entire geometries. All segments\n joining points are assumed to be lines in the current\n projection, not geodesics. Objects crossing the dateline (or\n other projection boundary) will have undesirable behavior.\n \"\"\"\n if self.crs is None:\n raise ValueError('Cannot transform naive geometries. '\n 'Please set a crs on the object first.')\n if crs is None:\n try:\n crs = from_epsg(epsg)\n except TypeError:\n raise TypeError('Must set either crs or epsg for output.')\n proj_in = pyproj.Proj(preserve_units=True, **self.crs)\n proj_out = pyproj.Proj(preserve_units=True, **crs)\n project = partial(pyproj.transform, proj_in, proj_out)\n result = self.apply(lambda geom: transform(project, geom))\n result.__class__ = GeoSeries\n result.crs = crs\n return result\n", "path": "geopandas/geoseries.py" } ]
[ { "content": "from warnings import warn\nfrom functools import partial\n\nimport numpy as np\nfrom pandas import Series, DataFrame\n\nimport pyproj\nfrom shapely.geometry import shape, Polygon, Point\nfrom shapely.geometry.collection import GeometryCollection\nfrom shapely.geometry.base import BaseGeometry\nfrom shapely.ops import cascaded_union, unary_union, transform\nimport fiona\nfrom fiona.crs import from_epsg\n\nfrom plotting import plot_series\n\nEMPTY_COLLECTION = GeometryCollection()\nEMPTY_POLYGON = Polygon()\nEMPTY_POINT = Point()\n\n\n\n\ndef _is_empty(x):\n try:\n return x.is_empty\n except:\n return False\n\n\ndef _is_geometry(x):\n return isinstance(x, BaseGeometry)\n\n\nclass GeoSeries(Series):\n \"\"\"A Series object designed to store shapely geometry objects.\"\"\"\n\n def __new__(cls, *args, **kwargs):\n kwargs.pop('crs', None)\n arr = Series.__new__(cls, *args, **kwargs)\n if type(arr) is GeoSeries:\n return arr\n else:\n return arr.view(GeoSeries)\n\n def __init__(self, *args, **kwargs):\n crs = kwargs.pop('crs', None)\n super(GeoSeries, self).__init__(*args, **kwargs)\n self.crs = crs\n\n @classmethod\n def from_file(cls, filename, **kwargs):\n \"\"\"\n Alternate constructor to create a GeoSeries from a file\n \n Parameters\n ----------\n \n filename : str\n File path or file handle to read from. Depending on which kwargs\n are included, the content of filename may vary, see:\n http://toblerity.github.io/fiona/README.html#usage\n for usage details.\n kwargs : key-word arguments\n These arguments are passed to fiona.open, and can be used to \n access multi-layer data, data stored within archives (zip files),\n etc.\n \n \"\"\"\n geoms = []\n with fiona.open(filename, **kwargs) as f:\n crs = f.crs\n for rec in f:\n geoms.append(shape(rec['geometry']))\n g = GeoSeries(geoms)\n g.crs = crs\n return g\n\n def to_file(self, filename, driver=\"ESRI Shapefile\", **kwargs):\n from geopandas import GeoDataFrame\n data = GeoDataFrame({\"geometry\": self,\n \"id\":self.index.values},\n index=self.index)\n data.crs = self.crs\n data.to_file(filename, driver, **kwargs)\n \n #\n # Internal methods\n #\n\n def _geo_op(self, other, op):\n \"\"\"Operation that returns a GeoSeries\"\"\"\n if isinstance(other, GeoSeries):\n if self.crs != other.crs:\n warn('GeoSeries crs mismatch: {} and {}'.format(self.crs, other.crs))\n this, other = self.align(other)\n return GeoSeries([getattr(s[0], op)(s[1]) for s in zip(this, other)],\n index=this.index, crs=self.crs)\n else:\n return GeoSeries([getattr(s, op)(other) for s in self],\n index=self.index, crs=self.crs)\n\n # TODO: think about merging with _geo_op\n def _series_op(self, other, op, **kwargs):\n \"\"\"Geometric operation that returns a pandas Series\"\"\"\n if isinstance(other, GeoSeries):\n this, other = self.align(other)\n return Series([getattr(s[0], op)(s[1], **kwargs) for s in zip(this, other)],\n index=this.index)\n else:\n return Series([getattr(s, op)(other, **kwargs) for s in self],\n index=self.index)\n\n def _geo_unary_op(self, op):\n \"\"\"Unary operation that returns a GeoSeries\"\"\"\n return GeoSeries([getattr(geom, op) for geom in self],\n index=self.index, crs=self.crs)\n\n def _series_unary_op(self, op):\n \"\"\"Unary operation that returns a Series\"\"\"\n return Series([getattr(geom, op) for geom in self],\n index=self.index)\n\n #\n # Implementation of Shapely methods\n #\n\n #\n # Unary operations that return a Series\n #\n\n @property\n def area(self):\n \"\"\"Return the area of each geometry in the GeoSeries\"\"\"\n return self._series_unary_op('area')\n\n @property\n def geom_type(self):\n \"\"\"Return the geometry type of each geometry in the GeoSeries\"\"\"\n return self._series_unary_op('geom_type')\n\n @property\n def type(self):\n \"\"\"Return the geometry type of each geometry in the GeoSeries\"\"\"\n return self.geom_type\n\n @property\n def length(self):\n \"\"\"Return the length of each geometry in the GeoSeries\"\"\"\n return self._series_unary_op('length')\n\n @property\n def is_valid(self):\n \"\"\"Return True for each valid geometry, else False\"\"\"\n return self._series_unary_op('is_valid')\n\n @property\n def is_empty(self):\n \"\"\"Return True for each empty geometry, False for non-empty\"\"\"\n return self._series_unary_op('is_empty')\n\n @property\n def is_simple(self):\n \"\"\"Return True for each simple geometry, else False\"\"\"\n return self._series_unary_op('is_simple')\n\n @property\n def is_ring(self):\n \"\"\"Return True for each geometry that is a closed ring, else False\"\"\"\n # operates on the exterior, so can't use _series_unary_op()\n return Series([geom.exterior.is_ring for geom in self],\n index=self.index)\n\n #\n # Unary operations that return a GeoSeries\n #\n\n @property\n def boundary(self):\n \"\"\"Return the bounding geometry for each geometry\"\"\"\n return self._geo_unary_op('boundary')\n\n @property\n def centroid(self):\n \"\"\"Return the centroid of each geometry in the GeoSeries\"\"\"\n return self._geo_unary_op('centroid')\n\n @property\n def convex_hull(self):\n \"\"\"Return the convex hull of each geometry\"\"\"\n return self._geo_unary_op('convex_hull')\n\n @property\n def envelope(self):\n \"\"\"Return a bounding rectangle for each geometry\"\"\"\n return self._geo_unary_op('envelope')\n\n @property\n def exterior(self):\n \"\"\"Return the outer boundary of each polygon\"\"\"\n # TODO: return empty geometry for non-polygons\n return self._geo_unary_op('exterior')\n\n @property\n def interiors(self):\n \"\"\"Return the interior rings of each polygon\"\"\"\n # TODO: return empty list or None for non-polygons\n return self._geo_unary_op('interiors')\n\n def representative_point(self):\n \"\"\"Return a GeoSeries of points guaranteed to be in each geometry\"\"\"\n return GeoSeries([geom.representative_point() for geom in self],\n index=self.index)\n\n #\n # Reduction operations that return a Shapely geometry\n #\n\n @property\n def cascaded_union(self):\n \"\"\"Deprecated: Return the unary_union of all geometries\"\"\"\n return cascaded_union(self.values)\n\n @property\n def unary_union(self):\n \"\"\"Return the union of all geometries\"\"\"\n return unary_union(self.values)\n\n #\n # Binary operations that return a GeoSeries\n #\n\n def difference(self, other):\n \"\"\"Return the set-theoretic difference of each geometry with *other*\"\"\"\n return self._geo_op(other, 'difference')\n\n def symmetric_difference(self, other):\n \"\"\"Return the symmetric difference of each geometry with *other*\"\"\"\n return self._geo_op(other, 'symmetric_difference')\n\n def union(self, other):\n \"\"\"Return the set-theoretic union of each geometry with *other*\"\"\"\n return self._geo_op(other, 'union')\n\n def intersection(self, other):\n \"\"\"Return the set-theoretic intersection of each geometry with *other*\"\"\"\n return self._geo_op(other, 'intersection')\n\n #\n # Binary operations that return a pandas Series\n #\n\n def contains(self, other):\n \"\"\"Return True for all geometries that contain *other*, else False\"\"\"\n return self._series_op(other, 'contains')\n\n def equals(self, other):\n \"\"\"Return True for all geometries that equal *other*, else False\"\"\"\n return self._series_op(other, 'equals')\n\n def almost_equals(self, other, decimal=6):\n \"\"\"Return True for all geometries that is approximately equal to *other*, else False\"\"\"\n # TODO: pass precision argument\n return self._series_op(other, 'almost_equals', decimal=decimal)\n\n def equals_exact(self, other, tolerance):\n \"\"\"Return True for all geometries that equal *other* to a given tolerance, else False\"\"\"\n # TODO: pass tolerance argument.\n return self._series_op(other, 'equals_exact', tolerance=tolerance)\n\n def crosses(self, other):\n \"\"\"Return True for all geometries that cross *other*, else False\"\"\"\n return self._series_op(other, 'crosses')\n\n def disjoint(self, other):\n \"\"\"Return True for all geometries that are disjoint with *other*, else False\"\"\"\n return self._series_op(other, 'disjoint')\n\n def intersects(self, other):\n \"\"\"Return True for all geometries that intersect *other*, else False\"\"\"\n return self._series_op(other, 'intersects')\n\n def overlaps(self, other):\n \"\"\"Return True for all geometries that overlap *other*, else False\"\"\"\n return self._series_op(other, 'overlaps')\n\n def touches(self, other):\n \"\"\"Return True for all geometries that touch *other*, else False\"\"\"\n return self._series_op(other, 'touches')\n\n def within(self, other):\n \"\"\"Return True for all geometries that are within *other*, else False\"\"\"\n return self._series_op(other, 'within')\n\n def distance(self, other):\n \"\"\"Return distance of each geometry to *other*\"\"\"\n return self._series_op(other, 'distance')\n\n #\n # Other operations\n #\n\n # should this return bounds for entire series, or elementwise?\n @property\n def bounds(self):\n \"\"\"Return a DataFrame of minx, miny, maxx, maxy values of geometry objects\"\"\"\n bounds = np.array([geom.bounds for geom in self])\n return DataFrame(bounds,\n columns=['minx', 'miny', 'maxx', 'maxy'],\n index=self.index)\n\n def buffer(self, distance, resolution=16):\n return GeoSeries([geom.buffer(distance, resolution) for geom in self],\n index=self.index)\n\n def simplify(self, *args, **kwargs):\n return Series([geom.simplify(*args, **kwargs) for geom in self],\n index=self.index)\n\n def interpolate(self):\n raise NotImplementedError\n\n def relate(self, other):\n raise NotImplementedError\n\n def project(self, *args, **kwargs):\n raise NotImplementedError\n\n #\n # Implement standard operators for GeoSeries\n #\n\n def __contains__(self, other):\n \"\"\"Allow tests of the form \"geom in s\"\n\n Tests whether a GeoSeries contains a geometry.\n\n Note: This is not the same as the geometric method \"contains\".\n \"\"\"\n if isinstance(other, BaseGeometry):\n return np.any(self.equals(other))\n else:\n return False\n\n def __xor__(self, other):\n \"\"\"Implement ^ operator as for builtin set type\"\"\"\n return self.symmetric_difference(other)\n\n def __or__(self, other):\n \"\"\"Implement | operator as for builtin set type\"\"\"\n return self.union(other)\n\n def __and__(self, other):\n \"\"\"Implement & operator as for builtin set type\"\"\"\n return self.intersection(other)\n\n def __sub__(self, other):\n \"\"\"Implement - operator as for builtin set type\"\"\"\n return self.difference(other)\n\n #\n # Implement pandas methods\n #\n\n def _wrapped_pandas_method(self, mtd, *args, **kwargs):\n \"\"\"Wrap a generic pandas method to ensure it returns a GeoSeries\"\"\"\n val = getattr(super(GeoSeries, self), mtd)(*args, **kwargs)\n if type(val) == Series:\n val.__class__ = GeoSeries\n val.crs = self.crs\n return val\n\n def __getitem__(self, key):\n return self._wrapped_pandas_method('__getitem__', key)\n\n def __getslice__(self, i, j):\n return self._wrapped_pandas_method('__getslice__', i, j)\n\n def order(self, *args, **kwargs):\n return self._wrapped_pandas_method('order', *args, **kwargs)\n\n def sort_index(self, *args, **kwargs):\n return self._wrapped_pandas_method('sort_index', *args, **kwargs)\n\n def take(self, *args, **kwargs):\n return self._wrapped_pandas_method('take', *args, **kwargs)\n\n def select(self, *args, **kwargs):\n return self._wrapped_pandas_method('select', *args, **kwargs)\n\n @property\n def _can_hold_na(self):\n return False\n\n def copy(self, order='C'):\n \"\"\"Return new GeoSeries with copy of underlying values\n\n Returns\n -------\n cp : GeoSeries\n \"\"\"\n return GeoSeries(self.values.copy(order), index=self.index,\n name=self.name)\n\n def isnull(self):\n \"\"\"Null values in a GeoSeries are represented by empty geometric objects\"\"\"\n non_geo_null = super(GeoSeries, self).isnull()\n val = self.apply(_is_empty)\n return np.logical_or(non_geo_null, val)\n\n def fillna(self, value=EMPTY_POLYGON, method=None, inplace=False,\n limit=None):\n \"\"\"Fill NA/NaN values with a geometry (empty polygon by default).\n\n \"method\" is currently not implemented for GeoSeries.\n \"\"\"\n if method is not None:\n raise NotImplementedError('Fill method is currently not implemented for GeoSeries')\n if isinstance(value, BaseGeometry):\n result = self.copy() if not inplace else self\n mask = self.isnull()\n result[mask] = value\n if not inplace:\n return GeoSeries(result)\n else:\n raise ValueError('Non-geometric fill values not allowed for GeoSeries')\n\n def align(self, other, join='outer', level=None, copy=True,\n fill_value=EMPTY_POLYGON, method=None, limit=None):\n left, right = super(GeoSeries, self).align(other, join=join,\n level=level, copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit)\n return GeoSeries(left), GeoSeries(right)\n\n def plot(self, *args, **kwargs):\n return plot_series(self, *args, **kwargs)\n\n #\n # Additional methods\n #\n\n def to_crs(self, crs=None, epsg=None):\n \"\"\"Transform geometries to a new coordinate reference system\n\n This method will transform all points in all objects. It has\n no notion or projecting entire geometries. All segments\n joining points are assumed to be lines in the current\n projection, not geodesics. Objects crossing the dateline (or\n other projection boundary) will have undesirable behavior.\n \"\"\"\n if self.crs is None:\n raise ValueError('Cannot transform naive geometries. '\n 'Please set a crs on the object first.')\n if crs is None:\n try:\n crs = from_epsg(epsg)\n except TypeError:\n raise TypeError('Must set either crs or epsg for output.')\n proj_in = pyproj.Proj(preserve_units=True, **self.crs)\n proj_out = pyproj.Proj(preserve_units=True, **crs)\n project = partial(pyproj.transform, proj_in, proj_out)\n result = self.apply(lambda geom: transform(project, geom))\n result.__class__ = GeoSeries\n result.crs = crs\n return result\n", "path": "geopandas/geoseries.py" } ]
diff --git a/geopandas/geoseries.py b/geopandas/geoseries.py index 6f95b08f43..30de157553 100644 --- a/geopandas/geoseries.py +++ b/geopandas/geoseries.py @@ -118,7 +118,7 @@ def _geo_unary_op(self, op): def _series_unary_op(self, op): """Unary operation that returns a Series""" - return GeoSeries([getattr(geom, op) for geom in self], + return Series([getattr(geom, op) for geom in self], index=self.index) # diff --git a/tests/test_geoseries.py b/tests/test_geoseries.py index 7d9b4abf9e..f0298d6095 100644 --- a/tests/test_geoseries.py +++ b/tests/test_geoseries.py @@ -1,6 +1,7 @@ import unittest import numpy as np from numpy.testing import assert_array_equal +from pandas import Series from shapely.geometry import Polygon, Point, LineString from shapely.geometry.base import BaseGeometry from geopandas import GeoSeries @@ -38,6 +39,7 @@ def setUp(self): crs={'init': 'epsg:4326', 'no_defs': True}) def test_area(self): + assert type(self.g1.area) is Series assert_array_equal(self.g1.area.values, np.array([0.5, 1.0])) def test_in(self):
nautobot__nautobot-5051
GraphiQL interface save query function giving error <!-- NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED. This form is only for reporting reproducible bugs. If you need assistance with Nautobot installation, or if you have a general question, please start a discussion instead: https://github.com/nautobot/nautobot/discussions Please describe the environment in which you are running Nautobot. Be sure that you are running an unmodified instance of the latest stable release before submitting a bug report, and that any plugins have been disabled. --> ### Environment * Nautobot version (Docker tag too if applicable): 1.6.2 * Python version: 3.9.16 * Database platform, version: postgres * Middleware(s): n/a <!-- Describe in detail the exact steps that someone else can take to reproduce this bug using the current stable release of Nautobot. Begin with the creation of any necessary database objects and call out every operation being performed explicitly. If reporting a bug in the REST API, be sure to reconstruct the raw HTTP request(s) being made: Don't rely on a client library such as pynautobot. --> ### Steps to Reproduce 1. Have an existing saved Graphql query (initially from an earlier version of Nautobot) 2. Using the Graphiql interface 3. Update the query with a new value or delete a value 4. Attempt to save the query and observe the error *note* the saved query editor in the primary Nautobot GUI works <!-- What did you expect to happen? --> ### Expected Behavior Query to save <!-- What happened instead? --> ### Observed Behavior An error: <img width="815" alt="Screenshot 2023-10-04 at 21 26 48" src="https://github.com/nautobot/nautobot/assets/10200477/2f1f2b2e-f94e-412e-9be4-d2e5c6b8a9c1">
[ { "content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom drf_spectacular.utils import extend_schema_field\nfrom rest_framework import serializers\n\nfrom nautobot.core.api import (\n BaseModelSerializer,\n ChoiceField,\n ContentTypeField,\n CustomFieldModelSerializerMixin,\n NautobotModelSerializer,\n NotesSerializerMixin,\n RelationshipModelSerializerMixin,\n ValidatedModelSerializer,\n)\nfrom nautobot.core.api.exceptions import SerializerNotFound\nfrom nautobot.core.api.serializers import PolymorphicProxySerializer\nfrom nautobot.core.api.utils import (\n get_nested_serializer_depth,\n nested_serializers_for_models,\n return_nested_serializer_data_based_on_depth,\n)\nfrom nautobot.core.models.utils import get_all_concrete_models\nfrom nautobot.dcim.api.serializers import (\n DeviceSerializer,\n LocationSerializer,\n RackSerializer,\n)\nfrom nautobot.extras import choices, models\nfrom nautobot.extras.choices import (\n CustomFieldFilterLogicChoices,\n CustomFieldTypeChoices,\n JobExecutionType,\n JobResultStatusChoices,\n ObjectChangeActionChoices,\n)\nfrom nautobot.extras.api.mixins import (\n TaggedModelSerializerMixin,\n)\nfrom nautobot.extras.datasources import get_datasource_content_choices\nfrom nautobot.extras.models import (\n ComputedField,\n ConfigContext,\n ConfigContextSchema,\n CustomField,\n CustomFieldChoice,\n CustomLink,\n DynamicGroup,\n DynamicGroupMembership,\n ExportTemplate,\n ExternalIntegration,\n FileProxy,\n GitRepository,\n GraphQLQuery,\n ImageAttachment,\n Job,\n JobButton,\n JobHook,\n JobLogEntry,\n JobResult,\n Note,\n ObjectChange,\n Relationship,\n RelationshipAssociation,\n Role,\n ScheduledJob,\n Secret,\n SecretsGroup,\n SecretsGroupAssociation,\n Status,\n Tag,\n Webhook,\n)\nfrom nautobot.extras.models.mixins import NotesMixin\nfrom nautobot.extras.utils import ChangeLoggedModelsQuery, FeatureQuery, RoleModelsQuery, TaggableClassesQuery\n\nfrom .fields import MultipleChoiceJSONField\n\n#\n# Mixins and Base Classes\n#\n\nlogger = logging.getLogger(__name__)\n\n\n#\n# Computed Fields\n#\n\n\nclass ComputedFieldSerializer(ValidatedModelSerializer, NotesSerializerMixin):\n content_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_fields\").get_query()).order_by(\"app_label\", \"model\"),\n )\n\n class Meta:\n model = ComputedField\n fields = \"__all__\"\n\n\n#\n# Config contexts\n#\n\n\nclass ConfigContextSerializer(ValidatedModelSerializer, TaggedModelSerializerMixin, NotesSerializerMixin):\n owner_content_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"config_context_owners\").get_query()),\n required=False,\n allow_null=True,\n default=None,\n )\n owner = serializers.SerializerMethodField(read_only=True)\n\n # Conditional enablement of dynamic groups filtering\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n if not settings.CONFIG_CONTEXT_DYNAMIC_GROUPS_ENABLED:\n # In the case of a nested serializer, we won't have a `dynamic_groups` field at all.\n self.fields.pop(\"dynamic_groups\", None)\n\n class Meta:\n model = ConfigContext\n fields = \"__all__\"\n\n @extend_schema_field(\n PolymorphicProxySerializer(\n component_name=\"ConfigContextOwner\",\n resource_type_field_name=\"object_type\",\n serializers=lambda: nested_serializers_for_models(FeatureQuery(\"config_context_owners\").list_subclasses()),\n allow_null=True,\n )\n )\n def get_owner(self, obj):\n if obj.owner is None:\n return None\n depth = get_nested_serializer_depth(self)\n return return_nested_serializer_data_based_on_depth(self, depth, obj, obj.owner, \"owner\")\n\n\n#\n# Config context Schemas\n#\n\n\nclass ConfigContextSchemaSerializer(NautobotModelSerializer):\n owner_content_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"config_context_owners\").get_query()),\n required=False,\n allow_null=True,\n default=None,\n )\n owner = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = ConfigContextSchema\n fields = \"__all__\"\n\n @extend_schema_field(\n PolymorphicProxySerializer(\n component_name=\"ConfigContextSchemaOwner\",\n resource_type_field_name=\"object_type\",\n serializers=lambda: nested_serializers_for_models(FeatureQuery(\"config_context_owners\").list_subclasses()),\n allow_null=True,\n )\n )\n def get_owner(self, obj):\n if obj.owner is None:\n return None\n depth = get_nested_serializer_depth(self)\n return return_nested_serializer_data_based_on_depth(self, depth, obj, obj.owner, \"owner\")\n\n\n#\n# ContentTypes\n#\n\n\nclass ContentTypeSerializer(BaseModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name=\"extras-api:contenttype-detail\")\n display = serializers.SerializerMethodField()\n\n class Meta:\n model = ContentType\n fields = \"__all__\"\n\n @extend_schema_field(serializers.CharField)\n def get_display(self, obj):\n return obj.app_labeled_name\n\n\n#\n# Custom fields\n#\n\n\nclass CustomFieldSerializer(ValidatedModelSerializer, NotesSerializerMixin):\n content_types = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_fields\").get_query()),\n many=True,\n )\n type = ChoiceField(choices=CustomFieldTypeChoices)\n filter_logic = ChoiceField(choices=CustomFieldFilterLogicChoices, required=False)\n label = serializers.CharField(max_length=50, required=True)\n\n class Meta:\n model = CustomField\n fields = \"__all__\"\n\n\nclass CustomFieldChoiceSerializer(ValidatedModelSerializer):\n class Meta:\n model = CustomFieldChoice\n fields = \"__all__\"\n\n\n#\n# Custom Links\n#\n\n\nclass CustomLinkSerializer(ValidatedModelSerializer, NotesSerializerMixin):\n content_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_links\").get_query()).order_by(\"app_label\", \"model\"),\n )\n\n class Meta:\n model = CustomLink\n fields = \"__all__\"\n\n\n#\n# Dynamic Groups\n#\n\n\nclass DynamicGroupMembershipSerializer(ValidatedModelSerializer):\n class Meta:\n model = DynamicGroupMembership\n fields = \"__all__\"\n\n\nclass DynamicGroupSerializer(NautobotModelSerializer):\n content_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"dynamic_groups\").get_query()).order_by(\"app_label\", \"model\"),\n )\n\n class Meta:\n model = DynamicGroup\n fields = \"__all__\"\n extra_kwargs = {\n \"children\": {\"source\": \"dynamic_group_memberships\", \"read_only\": True},\n \"filter\": {\"read_only\": False},\n }\n\n\n#\n# Export templates\n#\n\n\n# TODO: export-templates don't support custom-fields, is this omission intentional?\nclass ExportTemplateSerializer(RelationshipModelSerializerMixin, ValidatedModelSerializer, NotesSerializerMixin):\n content_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"export_templates\").get_query()),\n )\n owner_content_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"export_template_owners\").get_query()),\n required=False,\n allow_null=True,\n default=None,\n )\n owner = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = ExportTemplate\n fields = \"__all__\"\n\n @extend_schema_field(\n PolymorphicProxySerializer(\n component_name=\"ExportTemplateOwner\",\n resource_type_field_name=\"object_type\",\n serializers=lambda: nested_serializers_for_models(FeatureQuery(\"export_template_owners\").list_subclasses()),\n allow_null=True,\n )\n )\n def get_owner(self, obj):\n if obj.owner is None:\n return None\n depth = get_nested_serializer_depth(self)\n return return_nested_serializer_data_based_on_depth(self, depth, obj, obj.owner, \"owner\")\n\n\n#\n# External integrations\n#\n\n\nclass ExternalIntegrationSerializer(NautobotModelSerializer):\n class Meta:\n model = ExternalIntegration\n fields = \"__all__\"\n\n\n#\n# File proxies\n#\n\n\nclass FileProxySerializer(BaseModelSerializer):\n class Meta:\n model = FileProxy\n exclude = [\"file\"]\n\n\n#\n# Git repositories\n#\n\n\nclass GitRepositorySerializer(NautobotModelSerializer):\n \"\"\"Git repositories defined as a data source.\"\"\"\n\n provided_contents = MultipleChoiceJSONField(\n choices=lambda: get_datasource_content_choices(\"extras.gitrepository\"),\n allow_blank=True,\n required=False,\n )\n\n class Meta:\n model = GitRepository\n fields = \"__all__\"\n\n\n#\n# GraphQL Queries\n#\n\n\nclass GraphQLQuerySerializer(ValidatedModelSerializer, NotesSerializerMixin):\n variables = serializers.DictField(required=False, allow_null=True, default={})\n\n class Meta:\n model = GraphQLQuery\n fields = \"__all__\"\n\n\nclass GraphQLQueryInputSerializer(serializers.Serializer):\n variables = serializers.DictField(allow_null=True, default={})\n\n\nclass GraphQLQueryOutputSerializer(serializers.Serializer):\n data = serializers.DictField(default={})\n\n\n#\n# Image attachments\n#\n\n\nclass ImageAttachmentSerializer(ValidatedModelSerializer):\n content_type = ContentTypeField(queryset=ContentType.objects.all())\n\n class Meta:\n model = ImageAttachment\n fields = \"__all__\"\n\n def validate(self, data):\n # Validate that the parent object exists\n try:\n data[\"content_type\"].get_object_for_this_type(id=data[\"object_id\"])\n except ObjectDoesNotExist:\n raise serializers.ValidationError(f\"Invalid parent object: {data['content_type']} ID {data['object_id']}\")\n\n # Enforce model validation\n super().validate(data)\n\n return data\n\n @extend_schema_field(\n PolymorphicProxySerializer(\n component_name=\"ImageAttachmentParent\",\n resource_type_field_name=\"object_type\",\n serializers=[\n DeviceSerializer,\n LocationSerializer,\n RackSerializer,\n ],\n )\n )\n def get_parent(self, obj):\n depth = get_nested_serializer_depth(self)\n return return_nested_serializer_data_based_on_depth(self, depth, obj, obj.parent, \"parent\")\n\n\n#\n# Jobs\n#\n\n\nclass JobSerializer(NautobotModelSerializer, TaggedModelSerializerMixin):\n class Meta:\n model = Job\n fields = \"__all__\"\n\n def validate(self, data):\n # note no validation for on creation of jobs because we do not support user creation of Job records via API\n if self.instance:\n has_sensitive_variables = data.get(\"has_sensitive_variables\", self.instance.has_sensitive_variables)\n approval_required = data.get(\"approval_required\", self.instance.approval_required)\n\n if approval_required and has_sensitive_variables:\n error_message = \"A job with sensitive variables cannot also be marked as requiring approval\"\n errors = {}\n\n if \"approval_required\" in data:\n errors[\"approval_required\"] = [error_message]\n if \"has_sensitive_variables\" in data:\n errors[\"has_sensitive_variables\"] = [error_message]\n\n raise serializers.ValidationError(errors)\n\n return super().validate(data)\n\n\nclass JobVariableSerializer(serializers.Serializer):\n \"\"\"Serializer used for responses from the JobModelViewSet.variables() detail endpoint.\"\"\"\n\n name = serializers.CharField(read_only=True)\n type = serializers.CharField(read_only=True)\n label = serializers.CharField(read_only=True, required=False)\n help_text = serializers.CharField(read_only=True, required=False)\n default = serializers.JSONField(read_only=True, required=False)\n required = serializers.BooleanField(read_only=True, required=False)\n min_length = serializers.IntegerField(read_only=True, required=False)\n max_length = serializers.IntegerField(read_only=True, required=False)\n min_value = serializers.IntegerField(read_only=True, required=False)\n max_value = serializers.IntegerField(read_only=True, required=False)\n choices = serializers.JSONField(read_only=True, required=False)\n model = serializers.CharField(read_only=True, required=False)\n\n\n#\n# Scheduled Jobs\n#\n\n\nclass ScheduledJobSerializer(BaseModelSerializer):\n # start_time = serializers.DateTimeField(format=None, required=False)\n\n class Meta:\n model = ScheduledJob\n fields = \"__all__\"\n\n\n#\n# Job Results\n#\n\n\nclass JobResultSerializer(CustomFieldModelSerializerMixin, BaseModelSerializer):\n status = ChoiceField(choices=JobResultStatusChoices, read_only=True)\n\n class Meta:\n model = JobResult\n fields = \"__all__\"\n extra_kwargs = {\n \"files\": {\"read_only\": True},\n }\n\n def get_field_names(self, declared_fields, info):\n \"\"\"Add reverse relation to related FileProxy objects.\"\"\"\n fields = list(super().get_field_names(declared_fields, info))\n self.extend_field_names(fields, \"files\")\n return fields\n\n\nclass JobRunResponseSerializer(serializers.Serializer):\n \"\"\"Serializer representing responses from the JobModelViewSet.run() POST endpoint.\"\"\"\n\n schedule = ScheduledJobSerializer(read_only=True, required=False)\n job_result = JobResultSerializer(read_only=True, required=False)\n\n\n#\n# Job classes (fka Custom Scripts, Reports)\n# 2.0 TODO: remove these if no longer needed\n#\n\n\nclass JobClassSerializer(serializers.Serializer):\n url = serializers.HyperlinkedIdentityField(\n view_name=\"extras-api:job-detail\",\n lookup_field=\"class_path\",\n lookup_url_kwarg=\"class_path\",\n )\n id = serializers.CharField(read_only=True, source=\"class_path\")\n pk = serializers.SerializerMethodField(read_only=True)\n name = serializers.CharField(max_length=255, read_only=True)\n description = serializers.CharField(max_length=255, required=False, read_only=True)\n test_methods = serializers.ListField(child=serializers.CharField(max_length=255))\n vars = serializers.SerializerMethodField(read_only=True)\n\n @extend_schema_field(serializers.DictField)\n def get_vars(self, instance):\n return {k: v.__class__.__name__ for k, v in instance._get_vars().items()}\n\n @extend_schema_field(serializers.UUIDField(allow_null=True))\n def get_pk(self, instance):\n try:\n jobs = Job.objects\n if \"request\" in self.context and self.context[\"request\"].user is not None:\n jobs = jobs.restrict(self.context[\"request\"].user, \"view\")\n job_model = jobs.get_for_class_path(instance.class_path)\n return job_model.pk\n except Job.DoesNotExist:\n return None\n\n\nclass JobClassDetailSerializer(JobClassSerializer):\n result = JobResultSerializer(required=False)\n\n\nclass JobHookSerializer(NautobotModelSerializer):\n content_types = ContentTypeField(\n queryset=ChangeLoggedModelsQuery().as_queryset(),\n many=True,\n )\n\n class Meta:\n model = JobHook\n fields = \"__all__\"\n\n def validate(self, data):\n validated_data = super().validate(data)\n\n conflicts = JobHook.check_for_conflicts(\n instance=self.instance,\n content_types=data.get(\"content_types\"),\n job=data.get(\"job\"),\n type_create=data.get(\"type_create\"),\n type_update=data.get(\"type_update\"),\n type_delete=data.get(\"type_delete\"),\n )\n\n if conflicts:\n raise serializers.ValidationError(conflicts)\n\n return validated_data\n\n\nclass JobCreationSerializer(BaseModelSerializer):\n \"\"\"\n Nested serializer specifically for use with `JobInputSerializer.schedule`.\n\n We don't use `WritableNestedSerializer` here because this is not used to look up\n an existing `ScheduledJob`, but instead used to specify parameters for creating one.\n \"\"\"\n\n url = serializers.HyperlinkedIdentityField(view_name=\"extras-api:scheduledjob-detail\")\n name = serializers.CharField(max_length=255, required=False)\n start_time = serializers.DateTimeField(format=None, required=False)\n\n class Meta:\n model = ScheduledJob\n fields = [\"url\", \"name\", \"start_time\", \"interval\", \"crontab\"]\n\n def validate(self, data):\n data = super().validate(data)\n\n if data[\"interval\"] in choices.JobExecutionType.SCHEDULE_CHOICES:\n if \"name\" not in data:\n raise serializers.ValidationError({\"name\": \"Please provide a name for the job schedule.\"})\n\n if (\"start_time\" not in data and data[\"interval\"] != choices.JobExecutionType.TYPE_CUSTOM) or (\n \"start_time\" in data and data[\"start_time\"] < models.ScheduledJob.earliest_possible_time()\n ):\n raise serializers.ValidationError(\n {\n \"start_time\": \"Please enter a valid date and time greater than or equal to the current date and time.\"\n }\n )\n\n if data[\"interval\"] == choices.JobExecutionType.TYPE_CUSTOM:\n if data.get(\"crontab\") is None:\n raise serializers.ValidationError({\"crontab\": \"Please enter a valid crontab.\"})\n try:\n models.ScheduledJob.get_crontab(data[\"crontab\"])\n except Exception as e:\n raise serializers.ValidationError({\"crontab\": e})\n\n return data\n\n\nclass JobInputSerializer(serializers.Serializer):\n data = serializers.JSONField(required=False, default=dict)\n schedule = JobCreationSerializer(required=False)\n task_queue = serializers.CharField(required=False, allow_blank=True)\n\n\nclass JobMultiPartInputSerializer(serializers.Serializer):\n \"\"\"JobMultiPartInputSerializer is a \"flattened\" version of JobInputSerializer for use with multipart/form-data submissions which only accept key-value pairs\"\"\"\n\n _schedule_name = serializers.CharField(max_length=255, required=False)\n _schedule_start_time = serializers.DateTimeField(format=None, required=False)\n _schedule_interval = ChoiceField(choices=JobExecutionType, required=False)\n _schedule_crontab = serializers.CharField(required=False, allow_blank=True)\n _task_queue = serializers.CharField(required=False, allow_blank=True)\n\n def validate(self, data):\n data = super().validate(data)\n\n if \"_schedule_interval\" in data and data[\"_schedule_interval\"] != JobExecutionType.TYPE_IMMEDIATELY:\n if \"_schedule_name\" not in data:\n raise serializers.ValidationError({\"_schedule_name\": \"Please provide a name for the job schedule.\"})\n\n if (\"_schedule_start_time\" not in data and data[\"_schedule_interval\"] != JobExecutionType.TYPE_CUSTOM) or (\n \"_schedule_start_time\" in data and data[\"_schedule_start_time\"] < ScheduledJob.earliest_possible_time()\n ):\n raise serializers.ValidationError(\n {\n \"_schedule_start_time\": \"Please enter a valid date and time greater than or equal to the current date and time.\"\n }\n )\n\n if data[\"_schedule_interval\"] == JobExecutionType.TYPE_CUSTOM:\n if data.get(\"_schedule_crontab\") is None:\n raise serializers.ValidationError({\"_schedule_crontab\": \"Please enter a valid crontab.\"})\n try:\n ScheduledJob.get_crontab(data[\"_schedule_crontab\"])\n except Exception as e:\n raise serializers.ValidationError({\"_schedule_crontab\": e})\n\n return data\n\n\nclass JobLogEntrySerializer(BaseModelSerializer):\n class Meta:\n model = JobLogEntry\n fields = \"__all__\"\n\n\n#\n# Job Button\n#\n\n\nclass JobButtonSerializer(ValidatedModelSerializer, NotesSerializerMixin):\n content_types = ContentTypeField(queryset=ContentType.objects.all(), many=True)\n\n class Meta:\n model = JobButton\n fields = \"__all__\"\n\n\n#\n# Notes\n#\n\n\nclass NoteSerializer(BaseModelSerializer):\n assigned_object_type = ContentTypeField(queryset=ContentType.objects.all())\n assigned_object = serializers.SerializerMethodField()\n\n class Meta:\n model = Note\n fields = \"__all__\"\n list_display_fields = [\"note\", \"assigned_object_type\", \"assigned_object_id\", \"user\"]\n\n @extend_schema_field(\n PolymorphicProxySerializer(\n component_name=\"NoteAssignedObject\",\n resource_type_field_name=\"object_type\",\n serializers=lambda: nested_serializers_for_models(get_all_concrete_models(NotesMixin)),\n allow_null=True,\n )\n )\n def get_assigned_object(self, obj):\n if obj.assigned_object is None:\n return None\n try:\n depth = get_nested_serializer_depth(self)\n return return_nested_serializer_data_based_on_depth(\n self, depth, obj, obj.assigned_object, \"assigned_object\"\n )\n except SerializerNotFound:\n return None\n\n\nclass NoteInputSerializer(serializers.Serializer):\n note = serializers.CharField()\n\n\n#\n# Change logging\n#\n\n\nclass ObjectChangeSerializer(BaseModelSerializer):\n action = ChoiceField(choices=ObjectChangeActionChoices, read_only=True)\n changed_object_type = ContentTypeField(read_only=True)\n related_object_type = ContentTypeField(read_only=True)\n changed_object = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = ObjectChange\n fields = \"__all__\"\n list_display_fields = [\"changed_object_id\", \"related_object_id\", \"related_object_type\", \"user\"]\n\n @extend_schema_field(\n PolymorphicProxySerializer(\n component_name=\"ObjectChangeChangedObject\",\n resource_type_field_name=\"object_type\",\n serializers=lambda: nested_serializers_for_models(ChangeLoggedModelsQuery().list_subclasses()),\n allow_null=True,\n )\n )\n def get_changed_object(self, obj):\n \"\"\"\n Serialize a nested representation of the changed object.\n \"\"\"\n if obj.changed_object is None:\n return None\n try:\n depth = get_nested_serializer_depth(self)\n return return_nested_serializer_data_based_on_depth(self, depth, obj, obj.changed_object, \"changed_object\")\n except SerializerNotFound:\n return obj.object_repr\n\n\n#\n# Relationship\n#\n\n\nclass RelationshipSerializer(ValidatedModelSerializer, NotesSerializerMixin):\n source_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"relationships\").get_query()),\n )\n\n destination_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"relationships\").get_query()),\n )\n\n class Meta:\n model = Relationship\n fields = \"__all__\"\n\n\nclass RelationshipAssociationSerializer(ValidatedModelSerializer):\n source_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"relationships\").get_query()),\n )\n\n destination_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"relationships\").get_query()),\n )\n\n class Meta:\n model = RelationshipAssociation\n fields = \"__all__\"\n\n\n#\n# Roles\n#\n\n\nclass RoleSerializer(NautobotModelSerializer):\n \"\"\"Serializer for `Role` objects.\"\"\"\n\n content_types = ContentTypeField(\n queryset=RoleModelsQuery().as_queryset(),\n many=True,\n )\n\n class Meta:\n model = Role\n fields = \"__all__\"\n extra_kwargs = {\n \"color\": {\"help_text\": \"RGB color in hexadecimal (e.g. 00ff00)\"},\n }\n\n\n#\n# Secrets\n#\n\n\nclass SecretSerializer(NautobotModelSerializer, TaggedModelSerializerMixin):\n \"\"\"Serializer for `Secret` objects.\"\"\"\n\n class Meta:\n model = Secret\n fields = \"__all__\"\n\n\nclass SecretsGroupAssociationSerializer(ValidatedModelSerializer):\n \"\"\"Serializer for `SecretsGroupAssociation` objects.\"\"\"\n\n class Meta:\n model = SecretsGroupAssociation\n fields = \"__all__\"\n\n\nclass SecretsGroupSerializer(NautobotModelSerializer):\n \"\"\"Serializer for `SecretsGroup` objects.\"\"\"\n\n class Meta:\n model = SecretsGroup\n fields = \"__all__\"\n # TODO: it would be **awesome** if we could create/update SecretsGroupAssociations\n # alongside creating/updating the base SecretsGroup, but since this is a ManyToManyField with\n # a `through` table, that appears very non-trivial to implement. For now we have this as a\n # read-only field; to create/update SecretsGroupAssociations you must make separate calls to the\n # api/extras/secrets-group-associations/ REST endpoint as appropriate.\n extra_kwargs = {\n \"secrets\": {\"source\": \"secrets_group_associations\", \"read_only\": True},\n }\n\n\n#\n# Custom statuses\n#\n\n\nclass StatusSerializer(NautobotModelSerializer):\n \"\"\"Serializer for `Status` objects.\"\"\"\n\n content_types = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"statuses\").get_query()),\n many=True,\n )\n\n class Meta:\n model = Status\n fields = \"__all__\"\n extra_kwargs = {\n \"color\": {\"help_text\": \"RGB color in hexadecimal (e.g. 00ff00)\"},\n }\n\n\n#\n# Tags\n#\n\n\nclass TagSerializer(NautobotModelSerializer):\n tagged_items = serializers.IntegerField(read_only=True)\n content_types = ContentTypeField(\n queryset=TaggableClassesQuery().as_queryset(),\n many=True,\n required=True,\n )\n\n class Meta:\n model = Tag\n fields = \"__all__\"\n extra_kwargs = {\n \"color\": {\"help_text\": \"RGB color in hexadecimal (e.g. 00ff00)\"},\n }\n\n def validate(self, data):\n data = super().validate(data)\n\n # check if tag is assigned to any of the removed content_types\n if self.instance is not None and self.instance.present_in_database and \"content_types\" in data:\n content_types_id = [content_type.id for content_type in data[\"content_types\"]]\n errors = self.instance.validate_content_types_removal(content_types_id)\n\n if errors:\n raise serializers.ValidationError(errors)\n\n return data\n\n\n#\n# Webhook\n#\n\n\nclass WebhookSerializer(ValidatedModelSerializer, NotesSerializerMixin):\n content_types = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"webhooks\").get_query()).order_by(\"app_label\", \"model\"),\n many=True,\n )\n\n class Meta:\n model = Webhook\n fields = \"__all__\"\n\n def validate(self, data):\n validated_data = super().validate(data)\n\n conflicts = Webhook.check_for_conflicts(\n instance=self.instance,\n content_types=data.get(\"content_types\"),\n payload_url=data.get(\"payload_url\"),\n type_create=data.get(\"type_create\"),\n type_update=data.get(\"type_update\"),\n type_delete=data.get(\"type_delete\"),\n )\n\n if conflicts:\n raise serializers.ValidationError(conflicts)\n\n return validated_data\n", "path": "nautobot/extras/api/serializers.py" } ]
[ { "content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom drf_spectacular.utils import extend_schema_field\nfrom rest_framework import serializers\n\nfrom nautobot.core.api import (\n BaseModelSerializer,\n ChoiceField,\n ContentTypeField,\n CustomFieldModelSerializerMixin,\n NautobotModelSerializer,\n NotesSerializerMixin,\n RelationshipModelSerializerMixin,\n ValidatedModelSerializer,\n)\nfrom nautobot.core.api.exceptions import SerializerNotFound\nfrom nautobot.core.api.serializers import PolymorphicProxySerializer\nfrom nautobot.core.api.utils import (\n get_nested_serializer_depth,\n nested_serializers_for_models,\n return_nested_serializer_data_based_on_depth,\n)\nfrom nautobot.core.models.utils import get_all_concrete_models\nfrom nautobot.dcim.api.serializers import (\n DeviceSerializer,\n LocationSerializer,\n RackSerializer,\n)\nfrom nautobot.extras import choices, models\nfrom nautobot.extras.choices import (\n CustomFieldFilterLogicChoices,\n CustomFieldTypeChoices,\n JobExecutionType,\n JobResultStatusChoices,\n ObjectChangeActionChoices,\n)\nfrom nautobot.extras.api.mixins import (\n TaggedModelSerializerMixin,\n)\nfrom nautobot.extras.datasources import get_datasource_content_choices\nfrom nautobot.extras.models import (\n ComputedField,\n ConfigContext,\n ConfigContextSchema,\n CustomField,\n CustomFieldChoice,\n CustomLink,\n DynamicGroup,\n DynamicGroupMembership,\n ExportTemplate,\n ExternalIntegration,\n FileProxy,\n GitRepository,\n GraphQLQuery,\n ImageAttachment,\n Job,\n JobButton,\n JobHook,\n JobLogEntry,\n JobResult,\n Note,\n ObjectChange,\n Relationship,\n RelationshipAssociation,\n Role,\n ScheduledJob,\n Secret,\n SecretsGroup,\n SecretsGroupAssociation,\n Status,\n Tag,\n Webhook,\n)\nfrom nautobot.extras.models.mixins import NotesMixin\nfrom nautobot.extras.utils import ChangeLoggedModelsQuery, FeatureQuery, RoleModelsQuery, TaggableClassesQuery\n\nfrom .fields import MultipleChoiceJSONField\n\n#\n# Mixins and Base Classes\n#\n\nlogger = logging.getLogger(__name__)\n\n\n#\n# Computed Fields\n#\n\n\nclass ComputedFieldSerializer(ValidatedModelSerializer, NotesSerializerMixin):\n content_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_fields\").get_query()).order_by(\"app_label\", \"model\"),\n )\n\n class Meta:\n model = ComputedField\n fields = \"__all__\"\n\n\n#\n# Config contexts\n#\n\n\nclass ConfigContextSerializer(ValidatedModelSerializer, TaggedModelSerializerMixin, NotesSerializerMixin):\n owner_content_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"config_context_owners\").get_query()),\n required=False,\n allow_null=True,\n default=None,\n )\n owner = serializers.SerializerMethodField(read_only=True)\n\n # Conditional enablement of dynamic groups filtering\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n if not settings.CONFIG_CONTEXT_DYNAMIC_GROUPS_ENABLED:\n # In the case of a nested serializer, we won't have a `dynamic_groups` field at all.\n self.fields.pop(\"dynamic_groups\", None)\n\n class Meta:\n model = ConfigContext\n fields = \"__all__\"\n\n @extend_schema_field(\n PolymorphicProxySerializer(\n component_name=\"ConfigContextOwner\",\n resource_type_field_name=\"object_type\",\n serializers=lambda: nested_serializers_for_models(FeatureQuery(\"config_context_owners\").list_subclasses()),\n allow_null=True,\n )\n )\n def get_owner(self, obj):\n if obj.owner is None:\n return None\n depth = get_nested_serializer_depth(self)\n return return_nested_serializer_data_based_on_depth(self, depth, obj, obj.owner, \"owner\")\n\n\n#\n# Config context Schemas\n#\n\n\nclass ConfigContextSchemaSerializer(NautobotModelSerializer):\n owner_content_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"config_context_owners\").get_query()),\n required=False,\n allow_null=True,\n default=None,\n )\n owner = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = ConfigContextSchema\n fields = \"__all__\"\n\n @extend_schema_field(\n PolymorphicProxySerializer(\n component_name=\"ConfigContextSchemaOwner\",\n resource_type_field_name=\"object_type\",\n serializers=lambda: nested_serializers_for_models(FeatureQuery(\"config_context_owners\").list_subclasses()),\n allow_null=True,\n )\n )\n def get_owner(self, obj):\n if obj.owner is None:\n return None\n depth = get_nested_serializer_depth(self)\n return return_nested_serializer_data_based_on_depth(self, depth, obj, obj.owner, \"owner\")\n\n\n#\n# ContentTypes\n#\n\n\nclass ContentTypeSerializer(BaseModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name=\"extras-api:contenttype-detail\")\n display = serializers.SerializerMethodField()\n\n class Meta:\n model = ContentType\n fields = \"__all__\"\n\n @extend_schema_field(serializers.CharField)\n def get_display(self, obj):\n return obj.app_labeled_name\n\n\n#\n# Custom fields\n#\n\n\nclass CustomFieldSerializer(ValidatedModelSerializer, NotesSerializerMixin):\n content_types = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_fields\").get_query()),\n many=True,\n )\n type = ChoiceField(choices=CustomFieldTypeChoices)\n filter_logic = ChoiceField(choices=CustomFieldFilterLogicChoices, required=False)\n label = serializers.CharField(max_length=50, required=True)\n\n class Meta:\n model = CustomField\n fields = \"__all__\"\n\n\nclass CustomFieldChoiceSerializer(ValidatedModelSerializer):\n class Meta:\n model = CustomFieldChoice\n fields = \"__all__\"\n\n\n#\n# Custom Links\n#\n\n\nclass CustomLinkSerializer(ValidatedModelSerializer, NotesSerializerMixin):\n content_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_links\").get_query()).order_by(\"app_label\", \"model\"),\n )\n\n class Meta:\n model = CustomLink\n fields = \"__all__\"\n\n\n#\n# Dynamic Groups\n#\n\n\nclass DynamicGroupMembershipSerializer(ValidatedModelSerializer):\n class Meta:\n model = DynamicGroupMembership\n fields = \"__all__\"\n\n\nclass DynamicGroupSerializer(NautobotModelSerializer):\n content_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"dynamic_groups\").get_query()).order_by(\"app_label\", \"model\"),\n )\n\n class Meta:\n model = DynamicGroup\n fields = \"__all__\"\n extra_kwargs = {\n \"children\": {\"source\": \"dynamic_group_memberships\", \"read_only\": True},\n \"filter\": {\"read_only\": False},\n }\n\n\n#\n# Export templates\n#\n\n\n# TODO: export-templates don't support custom-fields, is this omission intentional?\nclass ExportTemplateSerializer(RelationshipModelSerializerMixin, ValidatedModelSerializer, NotesSerializerMixin):\n content_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"export_templates\").get_query()),\n )\n owner_content_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"export_template_owners\").get_query()),\n required=False,\n allow_null=True,\n default=None,\n )\n owner = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = ExportTemplate\n fields = \"__all__\"\n\n @extend_schema_field(\n PolymorphicProxySerializer(\n component_name=\"ExportTemplateOwner\",\n resource_type_field_name=\"object_type\",\n serializers=lambda: nested_serializers_for_models(FeatureQuery(\"export_template_owners\").list_subclasses()),\n allow_null=True,\n )\n )\n def get_owner(self, obj):\n if obj.owner is None:\n return None\n depth = get_nested_serializer_depth(self)\n return return_nested_serializer_data_based_on_depth(self, depth, obj, obj.owner, \"owner\")\n\n\n#\n# External integrations\n#\n\n\nclass ExternalIntegrationSerializer(NautobotModelSerializer):\n class Meta:\n model = ExternalIntegration\n fields = \"__all__\"\n\n\n#\n# File proxies\n#\n\n\nclass FileProxySerializer(BaseModelSerializer):\n class Meta:\n model = FileProxy\n exclude = [\"file\"]\n\n\n#\n# Git repositories\n#\n\n\nclass GitRepositorySerializer(NautobotModelSerializer):\n \"\"\"Git repositories defined as a data source.\"\"\"\n\n provided_contents = MultipleChoiceJSONField(\n choices=lambda: get_datasource_content_choices(\"extras.gitrepository\"),\n allow_blank=True,\n required=False,\n )\n\n class Meta:\n model = GitRepository\n fields = \"__all__\"\n\n\n#\n# GraphQL Queries\n#\n\n\nclass GraphQLQuerySerializer(ValidatedModelSerializer, NotesSerializerMixin):\n variables = serializers.DictField(read_only=True)\n\n class Meta:\n model = GraphQLQuery\n fields = \"__all__\"\n\n\nclass GraphQLQueryInputSerializer(serializers.Serializer):\n variables = serializers.DictField(allow_null=True, default={})\n\n\nclass GraphQLQueryOutputSerializer(serializers.Serializer):\n data = serializers.DictField(default={})\n\n\n#\n# Image attachments\n#\n\n\nclass ImageAttachmentSerializer(ValidatedModelSerializer):\n content_type = ContentTypeField(queryset=ContentType.objects.all())\n\n class Meta:\n model = ImageAttachment\n fields = \"__all__\"\n\n def validate(self, data):\n # Validate that the parent object exists\n try:\n data[\"content_type\"].get_object_for_this_type(id=data[\"object_id\"])\n except ObjectDoesNotExist:\n raise serializers.ValidationError(f\"Invalid parent object: {data['content_type']} ID {data['object_id']}\")\n\n # Enforce model validation\n super().validate(data)\n\n return data\n\n @extend_schema_field(\n PolymorphicProxySerializer(\n component_name=\"ImageAttachmentParent\",\n resource_type_field_name=\"object_type\",\n serializers=[\n DeviceSerializer,\n LocationSerializer,\n RackSerializer,\n ],\n )\n )\n def get_parent(self, obj):\n depth = get_nested_serializer_depth(self)\n return return_nested_serializer_data_based_on_depth(self, depth, obj, obj.parent, \"parent\")\n\n\n#\n# Jobs\n#\n\n\nclass JobSerializer(NautobotModelSerializer, TaggedModelSerializerMixin):\n class Meta:\n model = Job\n fields = \"__all__\"\n\n def validate(self, data):\n # note no validation for on creation of jobs because we do not support user creation of Job records via API\n if self.instance:\n has_sensitive_variables = data.get(\"has_sensitive_variables\", self.instance.has_sensitive_variables)\n approval_required = data.get(\"approval_required\", self.instance.approval_required)\n\n if approval_required and has_sensitive_variables:\n error_message = \"A job with sensitive variables cannot also be marked as requiring approval\"\n errors = {}\n\n if \"approval_required\" in data:\n errors[\"approval_required\"] = [error_message]\n if \"has_sensitive_variables\" in data:\n errors[\"has_sensitive_variables\"] = [error_message]\n\n raise serializers.ValidationError(errors)\n\n return super().validate(data)\n\n\nclass JobVariableSerializer(serializers.Serializer):\n \"\"\"Serializer used for responses from the JobModelViewSet.variables() detail endpoint.\"\"\"\n\n name = serializers.CharField(read_only=True)\n type = serializers.CharField(read_only=True)\n label = serializers.CharField(read_only=True, required=False)\n help_text = serializers.CharField(read_only=True, required=False)\n default = serializers.JSONField(read_only=True, required=False)\n required = serializers.BooleanField(read_only=True, required=False)\n min_length = serializers.IntegerField(read_only=True, required=False)\n max_length = serializers.IntegerField(read_only=True, required=False)\n min_value = serializers.IntegerField(read_only=True, required=False)\n max_value = serializers.IntegerField(read_only=True, required=False)\n choices = serializers.JSONField(read_only=True, required=False)\n model = serializers.CharField(read_only=True, required=False)\n\n\n#\n# Scheduled Jobs\n#\n\n\nclass ScheduledJobSerializer(BaseModelSerializer):\n # start_time = serializers.DateTimeField(format=None, required=False)\n\n class Meta:\n model = ScheduledJob\n fields = \"__all__\"\n\n\n#\n# Job Results\n#\n\n\nclass JobResultSerializer(CustomFieldModelSerializerMixin, BaseModelSerializer):\n status = ChoiceField(choices=JobResultStatusChoices, read_only=True)\n\n class Meta:\n model = JobResult\n fields = \"__all__\"\n extra_kwargs = {\n \"files\": {\"read_only\": True},\n }\n\n def get_field_names(self, declared_fields, info):\n \"\"\"Add reverse relation to related FileProxy objects.\"\"\"\n fields = list(super().get_field_names(declared_fields, info))\n self.extend_field_names(fields, \"files\")\n return fields\n\n\nclass JobRunResponseSerializer(serializers.Serializer):\n \"\"\"Serializer representing responses from the JobModelViewSet.run() POST endpoint.\"\"\"\n\n schedule = ScheduledJobSerializer(read_only=True, required=False)\n job_result = JobResultSerializer(read_only=True, required=False)\n\n\n#\n# Job classes (fka Custom Scripts, Reports)\n# 2.0 TODO: remove these if no longer needed\n#\n\n\nclass JobClassSerializer(serializers.Serializer):\n url = serializers.HyperlinkedIdentityField(\n view_name=\"extras-api:job-detail\",\n lookup_field=\"class_path\",\n lookup_url_kwarg=\"class_path\",\n )\n id = serializers.CharField(read_only=True, source=\"class_path\")\n pk = serializers.SerializerMethodField(read_only=True)\n name = serializers.CharField(max_length=255, read_only=True)\n description = serializers.CharField(max_length=255, required=False, read_only=True)\n test_methods = serializers.ListField(child=serializers.CharField(max_length=255))\n vars = serializers.SerializerMethodField(read_only=True)\n\n @extend_schema_field(serializers.DictField)\n def get_vars(self, instance):\n return {k: v.__class__.__name__ for k, v in instance._get_vars().items()}\n\n @extend_schema_field(serializers.UUIDField(allow_null=True))\n def get_pk(self, instance):\n try:\n jobs = Job.objects\n if \"request\" in self.context and self.context[\"request\"].user is not None:\n jobs = jobs.restrict(self.context[\"request\"].user, \"view\")\n job_model = jobs.get_for_class_path(instance.class_path)\n return job_model.pk\n except Job.DoesNotExist:\n return None\n\n\nclass JobClassDetailSerializer(JobClassSerializer):\n result = JobResultSerializer(required=False)\n\n\nclass JobHookSerializer(NautobotModelSerializer):\n content_types = ContentTypeField(\n queryset=ChangeLoggedModelsQuery().as_queryset(),\n many=True,\n )\n\n class Meta:\n model = JobHook\n fields = \"__all__\"\n\n def validate(self, data):\n validated_data = super().validate(data)\n\n conflicts = JobHook.check_for_conflicts(\n instance=self.instance,\n content_types=data.get(\"content_types\"),\n job=data.get(\"job\"),\n type_create=data.get(\"type_create\"),\n type_update=data.get(\"type_update\"),\n type_delete=data.get(\"type_delete\"),\n )\n\n if conflicts:\n raise serializers.ValidationError(conflicts)\n\n return validated_data\n\n\nclass JobCreationSerializer(BaseModelSerializer):\n \"\"\"\n Nested serializer specifically for use with `JobInputSerializer.schedule`.\n\n We don't use `WritableNestedSerializer` here because this is not used to look up\n an existing `ScheduledJob`, but instead used to specify parameters for creating one.\n \"\"\"\n\n url = serializers.HyperlinkedIdentityField(view_name=\"extras-api:scheduledjob-detail\")\n name = serializers.CharField(max_length=255, required=False)\n start_time = serializers.DateTimeField(format=None, required=False)\n\n class Meta:\n model = ScheduledJob\n fields = [\"url\", \"name\", \"start_time\", \"interval\", \"crontab\"]\n\n def validate(self, data):\n data = super().validate(data)\n\n if data[\"interval\"] in choices.JobExecutionType.SCHEDULE_CHOICES:\n if \"name\" not in data:\n raise serializers.ValidationError({\"name\": \"Please provide a name for the job schedule.\"})\n\n if (\"start_time\" not in data and data[\"interval\"] != choices.JobExecutionType.TYPE_CUSTOM) or (\n \"start_time\" in data and data[\"start_time\"] < models.ScheduledJob.earliest_possible_time()\n ):\n raise serializers.ValidationError(\n {\n \"start_time\": \"Please enter a valid date and time greater than or equal to the current date and time.\"\n }\n )\n\n if data[\"interval\"] == choices.JobExecutionType.TYPE_CUSTOM:\n if data.get(\"crontab\") is None:\n raise serializers.ValidationError({\"crontab\": \"Please enter a valid crontab.\"})\n try:\n models.ScheduledJob.get_crontab(data[\"crontab\"])\n except Exception as e:\n raise serializers.ValidationError({\"crontab\": e})\n\n return data\n\n\nclass JobInputSerializer(serializers.Serializer):\n data = serializers.JSONField(required=False, default=dict)\n schedule = JobCreationSerializer(required=False)\n task_queue = serializers.CharField(required=False, allow_blank=True)\n\n\nclass JobMultiPartInputSerializer(serializers.Serializer):\n \"\"\"JobMultiPartInputSerializer is a \"flattened\" version of JobInputSerializer for use with multipart/form-data submissions which only accept key-value pairs\"\"\"\n\n _schedule_name = serializers.CharField(max_length=255, required=False)\n _schedule_start_time = serializers.DateTimeField(format=None, required=False)\n _schedule_interval = ChoiceField(choices=JobExecutionType, required=False)\n _schedule_crontab = serializers.CharField(required=False, allow_blank=True)\n _task_queue = serializers.CharField(required=False, allow_blank=True)\n\n def validate(self, data):\n data = super().validate(data)\n\n if \"_schedule_interval\" in data and data[\"_schedule_interval\"] != JobExecutionType.TYPE_IMMEDIATELY:\n if \"_schedule_name\" not in data:\n raise serializers.ValidationError({\"_schedule_name\": \"Please provide a name for the job schedule.\"})\n\n if (\"_schedule_start_time\" not in data and data[\"_schedule_interval\"] != JobExecutionType.TYPE_CUSTOM) or (\n \"_schedule_start_time\" in data and data[\"_schedule_start_time\"] < ScheduledJob.earliest_possible_time()\n ):\n raise serializers.ValidationError(\n {\n \"_schedule_start_time\": \"Please enter a valid date and time greater than or equal to the current date and time.\"\n }\n )\n\n if data[\"_schedule_interval\"] == JobExecutionType.TYPE_CUSTOM:\n if data.get(\"_schedule_crontab\") is None:\n raise serializers.ValidationError({\"_schedule_crontab\": \"Please enter a valid crontab.\"})\n try:\n ScheduledJob.get_crontab(data[\"_schedule_crontab\"])\n except Exception as e:\n raise serializers.ValidationError({\"_schedule_crontab\": e})\n\n return data\n\n\nclass JobLogEntrySerializer(BaseModelSerializer):\n class Meta:\n model = JobLogEntry\n fields = \"__all__\"\n\n\n#\n# Job Button\n#\n\n\nclass JobButtonSerializer(ValidatedModelSerializer, NotesSerializerMixin):\n content_types = ContentTypeField(queryset=ContentType.objects.all(), many=True)\n\n class Meta:\n model = JobButton\n fields = \"__all__\"\n\n\n#\n# Notes\n#\n\n\nclass NoteSerializer(BaseModelSerializer):\n assigned_object_type = ContentTypeField(queryset=ContentType.objects.all())\n assigned_object = serializers.SerializerMethodField()\n\n class Meta:\n model = Note\n fields = \"__all__\"\n list_display_fields = [\"note\", \"assigned_object_type\", \"assigned_object_id\", \"user\"]\n\n @extend_schema_field(\n PolymorphicProxySerializer(\n component_name=\"NoteAssignedObject\",\n resource_type_field_name=\"object_type\",\n serializers=lambda: nested_serializers_for_models(get_all_concrete_models(NotesMixin)),\n allow_null=True,\n )\n )\n def get_assigned_object(self, obj):\n if obj.assigned_object is None:\n return None\n try:\n depth = get_nested_serializer_depth(self)\n return return_nested_serializer_data_based_on_depth(\n self, depth, obj, obj.assigned_object, \"assigned_object\"\n )\n except SerializerNotFound:\n return None\n\n\nclass NoteInputSerializer(serializers.Serializer):\n note = serializers.CharField()\n\n\n#\n# Change logging\n#\n\n\nclass ObjectChangeSerializer(BaseModelSerializer):\n action = ChoiceField(choices=ObjectChangeActionChoices, read_only=True)\n changed_object_type = ContentTypeField(read_only=True)\n related_object_type = ContentTypeField(read_only=True)\n changed_object = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = ObjectChange\n fields = \"__all__\"\n list_display_fields = [\"changed_object_id\", \"related_object_id\", \"related_object_type\", \"user\"]\n\n @extend_schema_field(\n PolymorphicProxySerializer(\n component_name=\"ObjectChangeChangedObject\",\n resource_type_field_name=\"object_type\",\n serializers=lambda: nested_serializers_for_models(ChangeLoggedModelsQuery().list_subclasses()),\n allow_null=True,\n )\n )\n def get_changed_object(self, obj):\n \"\"\"\n Serialize a nested representation of the changed object.\n \"\"\"\n if obj.changed_object is None:\n return None\n try:\n depth = get_nested_serializer_depth(self)\n return return_nested_serializer_data_based_on_depth(self, depth, obj, obj.changed_object, \"changed_object\")\n except SerializerNotFound:\n return obj.object_repr\n\n\n#\n# Relationship\n#\n\n\nclass RelationshipSerializer(ValidatedModelSerializer, NotesSerializerMixin):\n source_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"relationships\").get_query()),\n )\n\n destination_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"relationships\").get_query()),\n )\n\n class Meta:\n model = Relationship\n fields = \"__all__\"\n\n\nclass RelationshipAssociationSerializer(ValidatedModelSerializer):\n source_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"relationships\").get_query()),\n )\n\n destination_type = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"relationships\").get_query()),\n )\n\n class Meta:\n model = RelationshipAssociation\n fields = \"__all__\"\n\n\n#\n# Roles\n#\n\n\nclass RoleSerializer(NautobotModelSerializer):\n \"\"\"Serializer for `Role` objects.\"\"\"\n\n content_types = ContentTypeField(\n queryset=RoleModelsQuery().as_queryset(),\n many=True,\n )\n\n class Meta:\n model = Role\n fields = \"__all__\"\n extra_kwargs = {\n \"color\": {\"help_text\": \"RGB color in hexadecimal (e.g. 00ff00)\"},\n }\n\n\n#\n# Secrets\n#\n\n\nclass SecretSerializer(NautobotModelSerializer, TaggedModelSerializerMixin):\n \"\"\"Serializer for `Secret` objects.\"\"\"\n\n class Meta:\n model = Secret\n fields = \"__all__\"\n\n\nclass SecretsGroupAssociationSerializer(ValidatedModelSerializer):\n \"\"\"Serializer for `SecretsGroupAssociation` objects.\"\"\"\n\n class Meta:\n model = SecretsGroupAssociation\n fields = \"__all__\"\n\n\nclass SecretsGroupSerializer(NautobotModelSerializer):\n \"\"\"Serializer for `SecretsGroup` objects.\"\"\"\n\n class Meta:\n model = SecretsGroup\n fields = \"__all__\"\n # TODO: it would be **awesome** if we could create/update SecretsGroupAssociations\n # alongside creating/updating the base SecretsGroup, but since this is a ManyToManyField with\n # a `through` table, that appears very non-trivial to implement. For now we have this as a\n # read-only field; to create/update SecretsGroupAssociations you must make separate calls to the\n # api/extras/secrets-group-associations/ REST endpoint as appropriate.\n extra_kwargs = {\n \"secrets\": {\"source\": \"secrets_group_associations\", \"read_only\": True},\n }\n\n\n#\n# Custom statuses\n#\n\n\nclass StatusSerializer(NautobotModelSerializer):\n \"\"\"Serializer for `Status` objects.\"\"\"\n\n content_types = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"statuses\").get_query()),\n many=True,\n )\n\n class Meta:\n model = Status\n fields = \"__all__\"\n extra_kwargs = {\n \"color\": {\"help_text\": \"RGB color in hexadecimal (e.g. 00ff00)\"},\n }\n\n\n#\n# Tags\n#\n\n\nclass TagSerializer(NautobotModelSerializer):\n tagged_items = serializers.IntegerField(read_only=True)\n content_types = ContentTypeField(\n queryset=TaggableClassesQuery().as_queryset(),\n many=True,\n required=True,\n )\n\n class Meta:\n model = Tag\n fields = \"__all__\"\n extra_kwargs = {\n \"color\": {\"help_text\": \"RGB color in hexadecimal (e.g. 00ff00)\"},\n }\n\n def validate(self, data):\n data = super().validate(data)\n\n # check if tag is assigned to any of the removed content_types\n if self.instance is not None and self.instance.present_in_database and \"content_types\" in data:\n content_types_id = [content_type.id for content_type in data[\"content_types\"]]\n errors = self.instance.validate_content_types_removal(content_types_id)\n\n if errors:\n raise serializers.ValidationError(errors)\n\n return data\n\n\n#\n# Webhook\n#\n\n\nclass WebhookSerializer(ValidatedModelSerializer, NotesSerializerMixin):\n content_types = ContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"webhooks\").get_query()).order_by(\"app_label\", \"model\"),\n many=True,\n )\n\n class Meta:\n model = Webhook\n fields = \"__all__\"\n\n def validate(self, data):\n validated_data = super().validate(data)\n\n conflicts = Webhook.check_for_conflicts(\n instance=self.instance,\n content_types=data.get(\"content_types\"),\n payload_url=data.get(\"payload_url\"),\n type_create=data.get(\"type_create\"),\n type_update=data.get(\"type_update\"),\n type_delete=data.get(\"type_delete\"),\n )\n\n if conflicts:\n raise serializers.ValidationError(conflicts)\n\n return validated_data\n", "path": "nautobot/extras/api/serializers.py" } ]
diff --git a/changes/4606.fixed b/changes/4606.fixed new file mode 100644 index 00000000000..c60342ca12c --- /dev/null +++ b/changes/4606.fixed @@ -0,0 +1,4 @@ +Fixed an error when attempting to "Save Changes" to an existing GraphQL saved query via the GraphiQL UI. +Fixed incorrect positioning of the "Save Changes" button in the "Queries" menu in the GraphiQL UI. +Fixed incorrect specification of the "variables" field in the GraphQL saved query REST API. +Fixed a display glitch in the detail view for GraphQL saved queries. diff --git a/nautobot/core/templates/graphene/graphiql.html b/nautobot/core/templates/graphene/graphiql.html index 24043895570..5403a76f4ae 100644 --- a/nautobot/core/templates/graphene/graphiql.html +++ b/nautobot/core/templates/graphene/graphiql.html @@ -53,6 +53,10 @@ margin: 0; width: 100%; } + .toolbar .dropdown-menu > li > a { + clear: left; + margin-right: 120px; /* leave room for the "Save Changes" button if present */ + } </style> <!-- As Nautobot may be run without internet access, we source these files locally rather than from an online CDN --> <link rel="stylesheet" @@ -163,7 +167,6 @@ } function save() { - graphql_data = getHashParams(); var url = new URL(window.location.href); var id = url.searchParams.get("id"); var result_window = $(".result-window").find(".CodeMirror-code"); @@ -179,10 +182,13 @@ return }; + var graphql_data = JSON.stringify({"query": getHashParams().query}); + $.ajax({ url: `{% url 'extras-api:graphqlquery-list' %}${id}/`, method: "PATCH", headers: {"X-CSRFTOKEN": "{{ csrf_token }}"}, + contentType: "application/json", dataType: "json", data: graphql_data, success: function(data) { diff --git a/nautobot/extras/api/serializers.py b/nautobot/extras/api/serializers.py index 574482f598f..2f2c484c47d 100644 --- a/nautobot/extras/api/serializers.py +++ b/nautobot/extras/api/serializers.py @@ -342,7 +342,7 @@ class Meta: class GraphQLQuerySerializer(ValidatedModelSerializer, NotesSerializerMixin): - variables = serializers.DictField(required=False, allow_null=True, default={}) + variables = serializers.DictField(read_only=True) class Meta: model = GraphQLQuery diff --git a/nautobot/extras/templates/extras/graphqlquery.html b/nautobot/extras/templates/extras/graphqlquery.html index ce50510688f..0e194a00b06 100644 --- a/nautobot/extras/templates/extras/graphqlquery.html +++ b/nautobot/extras/templates/extras/graphqlquery.html @@ -37,9 +37,11 @@ </tr> </table> {% endif %} - <pre id="query_output"> + <div class="panel-footer"> + <pre id="query_output"> - </pre> + </pre> + </div> </div> {% endblock content_right_page %}
zigpy__zha-device-handlers-1205
Support for LIDL Livarno home staande led lamp zigbee **Feature request.** This LED Lamp is discovered in HA as a generic light (no Quirk). - ON/OFF works Okay - DIM/Level works Okay - RGB color capability is reported, but not supported by this lamp. it only CCT. - Color temperaturedoes not work correct. When i change the color temperature, the LED does change. but not correct. when putting it to minimal it is cold white (with little bit warm) and putting it to MAX, it become full cold. While i expect it to be warm (yellow, orange). LAMP website: https://www.lidl.nl/p/livarno-home-staande-led-lamp-zigbee-smart-home/p100335194 **Describe the solution you'd like** 1. I like to have the RGB detection removed, so it does not show to change the color. 2. I like to have the Color temperature working as it should be. For solution (1. I have added in the file **/zhaquirks/lidl/cct.py** the following signature: signature = { MODELS_INFO: [ ("_TZ3000_8uaoilu9", "TS0502A") That solves the RGB problem. For solution (2, i have no idea where to even start. **Device signature - this can be acquired by removing the device from ZHA and pairing it again from the add devices screen. Be sure to add the entire content of the log panel after pairing the device to a code block below this line.** ```{ "node_descriptor": "NodeDescriptor(logical_type=<LogicalType.Router: 1>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress|RxOnWhenIdle|MainsPowered|FullFunctionDevice: 142>, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.NONE: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False)", "endpoints": { "1": { "profile_id": 260, "device_type": "0x010c", "in_clusters": [ "0x0000", "0x0003", "0x0004", "0x0005", "0x0006", "0x0008", "0x0300", "0x1000" ], "out_clusters": [ "0x000a", "0x0019" ] }, "242": { "profile_id": 41440, "device_type": "0x0061", "in_clusters": [], "out_clusters": [ "0x0021" ] } }, "manufacturer": "_TZ3000_8uaoilu9", "model": "TS0502A", "class": "zhaquirks.lidl.cct.CCTLight" }``` **Additional context** If you need any other info / logging, just let me know.
[ { "content": "\"\"\"Quirk for LIDL CCT bulb.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomCluster, CustomDevice\nfrom zigpy.zcl.clusters.general import (\n Basic,\n GreenPowerProxy,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n Scenes,\n Time,\n)\nfrom zigpy.zcl.clusters.lighting import Color\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\n\nclass LidlCCTColorCluster(CustomCluster, Color):\n \"\"\"Lidl CCT Lighting custom cluster.\"\"\"\n\n # Remove RGB color wheel for CCT Lighting: only expose color temperature\n # LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant)\n _CONSTANT_ATTRIBUTES = {0x400A: 16}\n\n\nclass CCTLight(CustomDevice):\n \"\"\"Lidl CCT Lighting device.\"\"\"\n\n signature = {\n MODELS_INFO: [\n (\"_TZ3000_49qchf10\", \"TS0502A\"),\n (\"_TZ3000_oborybow\", \"TS0502A\"),\n (\"_TZ3000_9evm3otq\", \"TS0502A\"),\n (\"_TZ3000_rylaozuc\", \"TS0502A\"),\n (\"_TZ3000_el5kt5im\", \"TS0502A\"),\n (\"_TZ3000_oh7jddmx\", \"TS0502A\"),\n ],\n ENDPOINTS: {\n 1: {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=268\n # device_version=1\n # input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]\n # output_clusters=[10, 25]\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Color.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n # <SimpleDescriptor endpoint=242 profile=41440 device_type=97\n # device_version=0\n # input_clusters=[]\n # output_clusters=[33]\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n LidlCCTColorCluster,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n }\n }\n", "path": "zhaquirks/lidl/cct.py" } ]
[ { "content": "\"\"\"Quirk for LIDL CCT bulb.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomCluster, CustomDevice\nfrom zigpy.zcl.clusters.general import (\n Basic,\n GreenPowerProxy,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n Scenes,\n Time,\n)\nfrom zigpy.zcl.clusters.lighting import Color\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\n\nclass LidlCCTColorCluster(CustomCluster, Color):\n \"\"\"Lidl CCT Lighting custom cluster.\"\"\"\n\n # Remove RGB color wheel for CCT Lighting: only expose color temperature\n # LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant)\n _CONSTANT_ATTRIBUTES = {0x400A: 16}\n\n\nclass CCTLight(CustomDevice):\n \"\"\"Lidl CCT Lighting device.\"\"\"\n\n signature = {\n MODELS_INFO: [\n (\"_TZ3000_49qchf10\", \"TS0502A\"),\n (\"_TZ3000_oborybow\", \"TS0502A\"),\n (\"_TZ3000_9evm3otq\", \"TS0502A\"),\n (\"_TZ3000_rylaozuc\", \"TS0502A\"),\n (\"_TZ3000_el5kt5im\", \"TS0502A\"),\n (\"_TZ3000_oh7jddmx\", \"TS0502A\"),\n (\"_TZ3000_8uaoilu9\", \"TS0502A\"),\n ],\n ENDPOINTS: {\n 1: {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=268\n # device_version=1\n # input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]\n # output_clusters=[10, 25]\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Color.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n # <SimpleDescriptor endpoint=242 profile=41440 device_type=97\n # device_version=0\n # input_clusters=[]\n # output_clusters=[33]\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n LidlCCTColorCluster,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n }\n }\n", "path": "zhaquirks/lidl/cct.py" } ]
diff --git a/zhaquirks/lidl/cct.py b/zhaquirks/lidl/cct.py index b45bbed83a..de1d9769b6 100644 --- a/zhaquirks/lidl/cct.py +++ b/zhaquirks/lidl/cct.py @@ -44,6 +44,7 @@ class CCTLight(CustomDevice): ("_TZ3000_rylaozuc", "TS0502A"), ("_TZ3000_el5kt5im", "TS0502A"), ("_TZ3000_oh7jddmx", "TS0502A"), + ("_TZ3000_8uaoilu9", "TS0502A"), ], ENDPOINTS: { 1: {
googleapis__google-cloud-python-2379
'_AsyncQuery.cancel' fails to update from returned resource E.g.: ``` python Traceback (most recent call last): File ... job.cancel() File ".../google/cloud/bigquery/job.py", line 378, in cancel self._set_properties(api_response) File ".../google/cloud/bigquery/job.py", line 262, in _set_properties self._scrub_local_properties(cleaned) File ".../google/cloud/bigquery/job.py", line 1050, in _scrub_local_properties configuration = cleaned['configuration']['query'] KeyError: 'configuration' ``` The [docs for `job.cancel`](https://cloud.google.com/bigquery/docs/reference/v2/jobs/cancel#response) show that the job resource is in a `job` subkey of the response.
[ { "content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define API Jobs.\"\"\"\n\nimport six\n\nfrom google.cloud.exceptions import NotFound\nfrom google.cloud._helpers import _datetime_from_microseconds\nfrom google.cloud.bigquery.dataset import Dataset\nfrom google.cloud.bigquery.schema import SchemaField\nfrom google.cloud.bigquery.table import Table\nfrom google.cloud.bigquery.table import _build_schema_resource\nfrom google.cloud.bigquery.table import _parse_schema_resource\nfrom google.cloud.bigquery._helpers import UDFResourcesProperty\nfrom google.cloud.bigquery._helpers import _EnumProperty\nfrom google.cloud.bigquery._helpers import _TypedProperty\nfrom google.cloud.bigquery._helpers import _build_udf_resources\n\n\nclass Compression(_EnumProperty):\n \"\"\"Pseudo-enum for ``compression`` properties.\"\"\"\n GZIP = 'GZIP'\n NONE = 'NONE'\n ALLOWED = (GZIP, NONE)\n\n\nclass CreateDisposition(_EnumProperty):\n \"\"\"Pseudo-enum for ``create_disposition`` properties.\"\"\"\n CREATE_IF_NEEDED = 'CREATE_IF_NEEDED'\n CREATE_NEVER = 'CREATE_NEVER'\n ALLOWED = (CREATE_IF_NEEDED, CREATE_NEVER)\n\n\nclass DestinationFormat(_EnumProperty):\n \"\"\"Pseudo-enum for ``destination_format`` properties.\"\"\"\n CSV = 'CSV'\n NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON'\n AVRO = 'AVRO'\n ALLOWED = (CSV, NEWLINE_DELIMITED_JSON, AVRO)\n\n\nclass Encoding(_EnumProperty):\n \"\"\"Pseudo-enum for ``encoding`` properties.\"\"\"\n UTF_8 = 'UTF-8'\n ISO_8559_1 = 'ISO-8559-1'\n ALLOWED = (UTF_8, ISO_8559_1)\n\n\nclass QueryPriority(_EnumProperty):\n \"\"\"Pseudo-enum for ``QueryJob.priority`` property.\"\"\"\n INTERACTIVE = 'INTERACTIVE'\n BATCH = 'BATCH'\n ALLOWED = (INTERACTIVE, BATCH)\n\n\nclass SourceFormat(_EnumProperty):\n \"\"\"Pseudo-enum for ``source_format`` properties.\"\"\"\n CSV = 'CSV'\n DATASTORE_BACKUP = 'DATASTORE_BACKUP'\n NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON'\n ALLOWED = (CSV, DATASTORE_BACKUP, NEWLINE_DELIMITED_JSON)\n\n\nclass WriteDisposition(_EnumProperty):\n \"\"\"Pseudo-enum for ``write_disposition`` properties.\"\"\"\n WRITE_APPEND = 'WRITE_APPEND'\n WRITE_TRUNCATE = 'WRITE_TRUNCATE'\n WRITE_EMPTY = 'WRITE_EMPTY'\n ALLOWED = (WRITE_APPEND, WRITE_TRUNCATE, WRITE_EMPTY)\n\n\nclass _BaseJob(object):\n \"\"\"Base class for jobs.\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: A client which holds credentials and project configuration\n for the dataset (which requires a project).\n \"\"\"\n def __init__(self, client):\n self._client = client\n self._properties = {}\n\n @property\n def project(self):\n \"\"\"Project bound to the job.\n\n :rtype: string\n :returns: the project (derived from the client).\n \"\"\"\n return self._client.project\n\n def _require_client(self, client):\n \"\"\"Check client or verify over-ride.\n\n :type client: :class:`~google.cloud.bigquery.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current dataset.\n\n :rtype: :class:`google.cloud.bigquery.client.Client`\n :returns: The client passed in or the currently bound client.\n \"\"\"\n if client is None:\n client = self._client\n return client\n\n\nclass _AsyncJob(_BaseJob):\n \"\"\"Base class for asynchronous jobs.\n\n :type name: string\n :param name: the name of the job\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: A client which holds credentials and project configuration\n for the dataset (which requires a project).\n \"\"\"\n def __init__(self, name, client):\n super(_AsyncJob, self).__init__(client)\n self.name = name\n\n @property\n def job_type(self):\n \"\"\"Type of job\n\n :rtype: string\n :returns: one of 'load', 'copy', 'extract', 'query'\n \"\"\"\n return self._JOB_TYPE\n\n @property\n def path(self):\n \"\"\"URL path for the job's APIs.\n\n :rtype: string\n :returns: the path based on project and job name.\n \"\"\"\n return '/projects/%s/jobs/%s' % (self.project, self.name)\n\n @property\n def etag(self):\n \"\"\"ETag for the job resource.\n\n :rtype: string, or ``NoneType``\n :returns: the ETag (None until set from the server).\n \"\"\"\n return self._properties.get('etag')\n\n @property\n def self_link(self):\n \"\"\"URL for the job resource.\n\n :rtype: string, or ``NoneType``\n :returns: the URL (None until set from the server).\n \"\"\"\n return self._properties.get('selfLink')\n\n @property\n def user_email(self):\n \"\"\"E-mail address of user who submitted the job.\n\n :rtype: string, or ``NoneType``\n :returns: the URL (None until set from the server).\n \"\"\"\n return self._properties.get('user_email')\n\n @property\n def created(self):\n \"\"\"Datetime at which the job was created.\n\n :rtype: ``datetime.datetime``, or ``NoneType``\n :returns: the creation time (None until set from the server).\n \"\"\"\n statistics = self._properties.get('statistics')\n if statistics is not None:\n millis = statistics.get('creationTime')\n if millis is not None:\n return _datetime_from_microseconds(millis * 1000.0)\n\n @property\n def started(self):\n \"\"\"Datetime at which the job was started.\n\n :rtype: ``datetime.datetime``, or ``NoneType``\n :returns: the start time (None until set from the server).\n \"\"\"\n statistics = self._properties.get('statistics')\n if statistics is not None:\n millis = statistics.get('startTime')\n if millis is not None:\n return _datetime_from_microseconds(millis * 1000.0)\n\n @property\n def ended(self):\n \"\"\"Datetime at which the job finished.\n\n :rtype: ``datetime.datetime``, or ``NoneType``\n :returns: the end time (None until set from the server).\n \"\"\"\n statistics = self._properties.get('statistics')\n if statistics is not None:\n millis = statistics.get('endTime')\n if millis is not None:\n return _datetime_from_microseconds(millis * 1000.0)\n\n @property\n def error_result(self):\n \"\"\"Error information about the job as a whole.\n\n :rtype: mapping, or ``NoneType``\n :returns: the error information (None until set from the server).\n \"\"\"\n status = self._properties.get('status')\n if status is not None:\n return status.get('errorResult')\n\n @property\n def errors(self):\n \"\"\"Information about individual errors generated by the job.\n\n :rtype: list of mappings, or ``NoneType``\n :returns: the error information (None until set from the server).\n \"\"\"\n status = self._properties.get('status')\n if status is not None:\n return status.get('errors')\n\n @property\n def state(self):\n \"\"\"Status of the job.\n\n :rtype: string, or ``NoneType``\n :returns: the state (None until set from the server).\n \"\"\"\n status = self._properties.get('status')\n if status is not None:\n return status.get('state')\n\n def _scrub_local_properties(self, cleaned):\n \"\"\"Helper: handle subclass properties in cleaned.\"\"\"\n pass\n\n def _set_properties(self, api_response):\n \"\"\"Update properties from resource in body of ``api_response``\n\n :type api_response: httplib2.Response\n :param api_response: response returned from an API call\n \"\"\"\n cleaned = api_response.copy()\n self._scrub_local_properties(cleaned)\n\n statistics = cleaned.get('statistics', {})\n if 'creationTime' in statistics:\n statistics['creationTime'] = float(statistics['creationTime'])\n if 'startTime' in statistics:\n statistics['startTime'] = float(statistics['startTime'])\n if 'endTime' in statistics:\n statistics['endTime'] = float(statistics['endTime'])\n\n self._properties.clear()\n self._properties.update(cleaned)\n\n @classmethod\n def _get_resource_config(cls, resource):\n \"\"\"Helper for :meth:`from_api_repr`\n\n :type resource: dict\n :param resource: resource for the job\n\n :rtype: dict\n :returns: tuple (string, dict), where the first element is the\n job name and the second contains job-specific configuration.\n :raises: :class:`KeyError` if the resource has no identifier, or\n is missing the appropriate configuration.\n \"\"\"\n if ('jobReference' not in resource or\n 'jobId' not in resource['jobReference']):\n raise KeyError('Resource lacks required identity information: '\n '[\"jobReference\"][\"jobId\"]')\n name = resource['jobReference']['jobId']\n if ('configuration' not in resource or\n cls._JOB_TYPE not in resource['configuration']):\n raise KeyError('Resource lacks required configuration: '\n '[\"configuration\"][\"%s\"]' % cls._JOB_TYPE)\n config = resource['configuration'][cls._JOB_TYPE]\n return name, config\n\n def begin(self, client=None):\n \"\"\"API call: begin the job via a POST request\n\n See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs/insert\n\n :type client: :class:`~google.cloud.bigquery.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current dataset.\n\n :raises: :exc:`ValueError` if the job has already begin.\n \"\"\"\n if self.state is not None:\n raise ValueError(\"Job already begun.\")\n\n client = self._require_client(client)\n path = '/projects/%s/jobs' % (self.project,)\n api_response = client.connection.api_request(\n method='POST', path=path, data=self._build_resource())\n self._set_properties(api_response)\n\n def exists(self, client=None):\n \"\"\"API call: test for the existence of the job via a GET request\n\n See\n https://cloud.google.com/bigquery/docs/reference/v2/jobs/get\n\n :type client: :class:`~google.cloud.bigquery.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current dataset.\n\n :rtype: bool\n :returns: Boolean indicating existence of the job.\n \"\"\"\n client = self._require_client(client)\n\n try:\n client.connection.api_request(method='GET', path=self.path,\n query_params={'fields': 'id'})\n except NotFound:\n return False\n else:\n return True\n\n def reload(self, client=None):\n \"\"\"API call: refresh job properties via a GET request\n\n See\n https://cloud.google.com/bigquery/docs/reference/v2/jobs/get\n\n :type client: :class:`~google.cloud.bigquery.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current dataset.\n \"\"\"\n client = self._require_client(client)\n\n api_response = client.connection.api_request(\n method='GET', path=self.path)\n self._set_properties(api_response)\n\n def cancel(self, client=None):\n \"\"\"API call: cancel job via a POST request\n\n See\n https://cloud.google.com/bigquery/docs/reference/v2/jobs/cancel\n\n :type client: :class:`~google.cloud.bigquery.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current dataset.\n \"\"\"\n client = self._require_client(client)\n\n api_response = client.connection.api_request(\n method='POST', path='%s/cancel' % (self.path,))\n self._set_properties(api_response)\n\n\nclass _LoadConfiguration(object):\n \"\"\"User-settable configuration options for load jobs.\n\n Values which are ``None`` -> server defaults.\n \"\"\"\n _allow_jagged_rows = None\n _allow_quoted_newlines = None\n _create_disposition = None\n _encoding = None\n _field_delimiter = None\n _ignore_unknown_values = None\n _max_bad_records = None\n _quote_character = None\n _skip_leading_rows = None\n _source_format = None\n _write_disposition = None\n\n\nclass LoadTableFromStorageJob(_AsyncJob):\n \"\"\"Asynchronous job for loading data into a table from CloudStorage.\n\n :type name: string\n :param name: the name of the job\n\n :type destination: :class:`google.cloud.bigquery.table.Table`\n :param destination: Table into which data is to be loaded.\n\n :type source_uris: sequence of string\n :param source_uris: URIs of one or more data files to be loaded, in\n format ``gs://<bucket_name>/<object_name_or_glob>``.\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: A client which holds credentials and project configuration\n for the dataset (which requires a project).\n\n :type schema: list of :class:`google.cloud.bigquery.table.SchemaField`\n :param schema: The job's schema\n \"\"\"\n\n _schema = None\n _JOB_TYPE = 'load'\n\n def __init__(self, name, destination, source_uris, client, schema=()):\n super(LoadTableFromStorageJob, self).__init__(name, client)\n self.destination = destination\n self.source_uris = source_uris\n # Let the @property do validation.\n self.schema = schema\n self._configuration = _LoadConfiguration()\n\n @property\n def schema(self):\n \"\"\"Table's schema.\n\n :rtype: list of :class:`SchemaField`\n :returns: fields describing the schema\n \"\"\"\n return list(self._schema)\n\n @schema.setter\n def schema(self, value):\n \"\"\"Update table's schema\n\n :type value: list of :class:`SchemaField`\n :param value: fields describing the schema\n\n :raises: TypeError if 'value' is not a sequence, or ValueError if\n any item in the sequence is not a SchemaField\n \"\"\"\n if not all(isinstance(field, SchemaField) for field in value):\n raise ValueError('Schema items must be fields')\n self._schema = tuple(value)\n\n @property\n def input_file_bytes(self):\n \"\"\"Count of bytes loaded from source files.\n\n :rtype: integer, or ``NoneType``\n :returns: the count (None until set from the server).\n \"\"\"\n statistics = self._properties.get('statistics')\n if statistics is not None:\n return int(statistics['load']['inputFileBytes'])\n\n @property\n def input_files(self):\n \"\"\"Count of source files.\n\n :rtype: integer, or ``NoneType``\n :returns: the count (None until set from the server).\n \"\"\"\n statistics = self._properties.get('statistics')\n if statistics is not None:\n return int(statistics['load']['inputFiles'])\n\n @property\n def output_bytes(self):\n \"\"\"Count of bytes saved to destination table.\n\n :rtype: integer, or ``NoneType``\n :returns: the count (None until set from the server).\n \"\"\"\n statistics = self._properties.get('statistics')\n if statistics is not None:\n return int(statistics['load']['outputBytes'])\n\n @property\n def output_rows(self):\n \"\"\"Count of rows saved to destination table.\n\n :rtype: integer, or ``NoneType``\n :returns: the count (None until set from the server).\n \"\"\"\n statistics = self._properties.get('statistics')\n if statistics is not None:\n return int(statistics['load']['outputRows'])\n\n allow_jagged_rows = _TypedProperty('allow_jagged_rows', bool)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.allowJaggedRows\n \"\"\"\n\n allow_quoted_newlines = _TypedProperty('allow_quoted_newlines', bool)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.allowQuotedNewlines\n \"\"\"\n\n create_disposition = CreateDisposition('create_disposition')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.createDisposition\n \"\"\"\n\n encoding = Encoding('encoding')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding\n \"\"\"\n\n field_delimiter = _TypedProperty('field_delimiter', six.string_types)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.fieldDelimiter\n \"\"\"\n\n ignore_unknown_values = _TypedProperty('ignore_unknown_values', bool)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.ignoreUnknownValues\n \"\"\"\n\n max_bad_records = _TypedProperty('max_bad_records', six.integer_types)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.maxBadRecords\n \"\"\"\n\n quote_character = _TypedProperty('quote_character', six.string_types)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.quote\n \"\"\"\n\n skip_leading_rows = _TypedProperty('skip_leading_rows', six.integer_types)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.skipLeadingRows\n \"\"\"\n\n source_format = SourceFormat('source_format')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.sourceFormat\n \"\"\"\n\n write_disposition = WriteDisposition('write_disposition')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.writeDisposition\n \"\"\"\n\n def _populate_config_resource(self, configuration):\n \"\"\"Helper for _build_resource: copy config properties to resource\"\"\"\n if self.allow_jagged_rows is not None:\n configuration['allowJaggedRows'] = self.allow_jagged_rows\n if self.allow_quoted_newlines is not None:\n configuration['allowQuotedNewlines'] = self.allow_quoted_newlines\n if self.create_disposition is not None:\n configuration['createDisposition'] = self.create_disposition\n if self.encoding is not None:\n configuration['encoding'] = self.encoding\n if self.field_delimiter is not None:\n configuration['fieldDelimiter'] = self.field_delimiter\n if self.ignore_unknown_values is not None:\n configuration['ignoreUnknownValues'] = self.ignore_unknown_values\n if self.max_bad_records is not None:\n configuration['maxBadRecords'] = self.max_bad_records\n if self.quote_character is not None:\n configuration['quote'] = self.quote_character\n if self.skip_leading_rows is not None:\n configuration['skipLeadingRows'] = self.skip_leading_rows\n if self.source_format is not None:\n configuration['sourceFormat'] = self.source_format\n if self.write_disposition is not None:\n configuration['writeDisposition'] = self.write_disposition\n\n def _build_resource(self):\n \"\"\"Generate a resource for :meth:`begin`.\"\"\"\n resource = {\n 'jobReference': {\n 'projectId': self.project,\n 'jobId': self.name,\n },\n 'configuration': {\n self._JOB_TYPE: {\n 'sourceUris': self.source_uris,\n 'destinationTable': {\n 'projectId': self.destination.project,\n 'datasetId': self.destination.dataset_name,\n 'tableId': self.destination.name,\n },\n },\n },\n }\n configuration = resource['configuration'][self._JOB_TYPE]\n self._populate_config_resource(configuration)\n\n if len(self.schema) > 0:\n configuration['schema'] = {\n 'fields': _build_schema_resource(self.schema)}\n\n return resource\n\n def _scrub_local_properties(self, cleaned):\n \"\"\"Helper: handle subclass properties in cleaned.\"\"\"\n schema = cleaned.pop('schema', {'fields': ()})\n self.schema = _parse_schema_resource(schema)\n\n @classmethod\n def from_api_repr(cls, resource, client):\n \"\"\"Factory: construct a job given its API representation\n\n .. note:\n\n This method assumes that the project found in the resource matches\n the client's project.\n\n :type resource: dict\n :param resource: dataset job representation returned from the API\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: Client which holds credentials and project\n configuration for the dataset.\n\n :rtype: :class:`google.cloud.bigquery.job.LoadTableFromStorageJob`\n :returns: Job parsed from ``resource``.\n \"\"\"\n name, config = cls._get_resource_config(resource)\n dest_config = config['destinationTable']\n dataset = Dataset(dest_config['datasetId'], client)\n destination = Table(dest_config['tableId'], dataset)\n source_urls = config.get('sourceUris', ())\n job = cls(name, destination, source_urls, client=client)\n job._set_properties(resource)\n return job\n\n\nclass _CopyConfiguration(object):\n \"\"\"User-settable configuration options for copy jobs.\n\n Values which are ``None`` -> server defaults.\n \"\"\"\n _create_disposition = None\n _write_disposition = None\n\n\nclass CopyJob(_AsyncJob):\n \"\"\"Asynchronous job: copy data into a table from other tables.\n\n :type name: string\n :param name: the name of the job\n\n :type destination: :class:`google.cloud.bigquery.table.Table`\n :param destination: Table into which data is to be loaded.\n\n :type sources: list of :class:`google.cloud.bigquery.table.Table`\n :param sources: Table into which data is to be loaded.\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: A client which holds credentials and project configuration\n for the dataset (which requires a project).\n \"\"\"\n\n _JOB_TYPE = 'copy'\n\n def __init__(self, name, destination, sources, client):\n super(CopyJob, self).__init__(name, client)\n self.destination = destination\n self.sources = sources\n self._configuration = _CopyConfiguration()\n\n create_disposition = CreateDisposition('create_disposition')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy.createDisposition\n \"\"\"\n\n write_disposition = WriteDisposition('write_disposition')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy.writeDisposition\n \"\"\"\n\n def _populate_config_resource(self, configuration):\n \"\"\"Helper for _build_resource: copy config properties to resource\"\"\"\n if self.create_disposition is not None:\n configuration['createDisposition'] = self.create_disposition\n if self.write_disposition is not None:\n configuration['writeDisposition'] = self.write_disposition\n\n def _build_resource(self):\n \"\"\"Generate a resource for :meth:`begin`.\"\"\"\n\n source_refs = [{\n 'projectId': table.project,\n 'datasetId': table.dataset_name,\n 'tableId': table.name,\n } for table in self.sources]\n\n resource = {\n 'jobReference': {\n 'projectId': self.project,\n 'jobId': self.name,\n },\n 'configuration': {\n self._JOB_TYPE: {\n 'sourceTables': source_refs,\n 'destinationTable': {\n 'projectId': self.destination.project,\n 'datasetId': self.destination.dataset_name,\n 'tableId': self.destination.name,\n },\n },\n },\n }\n configuration = resource['configuration'][self._JOB_TYPE]\n self._populate_config_resource(configuration)\n\n return resource\n\n @classmethod\n def from_api_repr(cls, resource, client):\n \"\"\"Factory: construct a job given its API representation\n\n .. note:\n\n This method assumes that the project found in the resource matches\n the client's project.\n\n :type resource: dict\n :param resource: dataset job representation returned from the API\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: Client which holds credentials and project\n configuration for the dataset.\n\n :rtype: :class:`google.cloud.bigquery.job.CopyJob`\n :returns: Job parsed from ``resource``.\n \"\"\"\n name, config = cls._get_resource_config(resource)\n dest_config = config['destinationTable']\n dataset = Dataset(dest_config['datasetId'], client)\n destination = Table(dest_config['tableId'], dataset)\n sources = []\n for source_config in config['sourceTables']:\n dataset = Dataset(source_config['datasetId'], client)\n sources.append(Table(source_config['tableId'], dataset))\n job = cls(name, destination, sources, client=client)\n job._set_properties(resource)\n return job\n\n\nclass _ExtractConfiguration(object):\n \"\"\"User-settable configuration options for extract jobs.\n\n Values which are ``None`` -> server defaults.\n \"\"\"\n _compression = None\n _destination_format = None\n _field_delimiter = None\n _print_header = None\n\n\nclass ExtractTableToStorageJob(_AsyncJob):\n \"\"\"Asynchronous job: extract data from a table into Cloud Storage.\n\n :type name: string\n :param name: the name of the job\n\n :type source: :class:`google.cloud.bigquery.table.Table`\n :param source: Table into which data is to be loaded.\n\n :type destination_uris: list of string\n :param destination_uris: URIs describing Cloud Storage blobs into which\n extracted data will be written, in format\n ``gs://<bucket_name>/<object_name_or_glob>``.\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: A client which holds credentials and project configuration\n for the dataset (which requires a project).\n \"\"\"\n _JOB_TYPE = 'extract'\n\n def __init__(self, name, source, destination_uris, client):\n super(ExtractTableToStorageJob, self).__init__(name, client)\n self.source = source\n self.destination_uris = destination_uris\n self._configuration = _ExtractConfiguration()\n\n compression = Compression('compression')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extracted.compression\n \"\"\"\n\n destination_format = DestinationFormat('destination_format')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extracted.destinationFormat\n \"\"\"\n\n field_delimiter = _TypedProperty('field_delimiter', six.string_types)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extracted.fieldDelimiter\n \"\"\"\n\n print_header = _TypedProperty('print_header', bool)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extracted.printHeader\n \"\"\"\n\n def _populate_config_resource(self, configuration):\n \"\"\"Helper for _build_resource: copy config properties to resource\"\"\"\n if self.compression is not None:\n configuration['compression'] = self.compression\n if self.destination_format is not None:\n configuration['destinationFormat'] = self.destination_format\n if self.field_delimiter is not None:\n configuration['fieldDelimiter'] = self.field_delimiter\n if self.print_header is not None:\n configuration['printHeader'] = self.print_header\n\n def _build_resource(self):\n \"\"\"Generate a resource for :meth:`begin`.\"\"\"\n\n source_ref = {\n 'projectId': self.source.project,\n 'datasetId': self.source.dataset_name,\n 'tableId': self.source.name,\n }\n\n resource = {\n 'jobReference': {\n 'projectId': self.project,\n 'jobId': self.name,\n },\n 'configuration': {\n self._JOB_TYPE: {\n 'sourceTable': source_ref,\n 'destinationUris': self.destination_uris,\n },\n },\n }\n configuration = resource['configuration'][self._JOB_TYPE]\n self._populate_config_resource(configuration)\n\n return resource\n\n @classmethod\n def from_api_repr(cls, resource, client):\n \"\"\"Factory: construct a job given its API representation\n\n .. note:\n\n This method assumes that the project found in the resource matches\n the client's project.\n\n :type resource: dict\n :param resource: dataset job representation returned from the API\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: Client which holds credentials and project\n configuration for the dataset.\n\n :rtype: :class:`google.cloud.bigquery.job.ExtractTableToStorageJob`\n :returns: Job parsed from ``resource``.\n \"\"\"\n name, config = cls._get_resource_config(resource)\n source_config = config['sourceTable']\n dataset = Dataset(source_config['datasetId'], client)\n source = Table(source_config['tableId'], dataset)\n destination_uris = config['destinationUris']\n job = cls(name, source, destination_uris, client=client)\n job._set_properties(resource)\n return job\n\n\nclass _AsyncQueryConfiguration(object):\n \"\"\"User-settable configuration options for asynchronous query jobs.\n\n Values which are ``None`` -> server defaults.\n \"\"\"\n _allow_large_results = None\n _create_disposition = None\n _default_dataset = None\n _destination = None\n _flatten_results = None\n _priority = None\n _use_query_cache = None\n _use_legacy_sql = None\n _write_disposition = None\n _maximum_billing_tier = None\n _maximum_bytes_billed = None\n\n\nclass QueryJob(_AsyncJob):\n \"\"\"Asynchronous job: query tables.\n\n :type name: string\n :param name: the name of the job\n\n :type query: string\n :param query: SQL query string\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: A client which holds credentials and project configuration\n for the dataset (which requires a project).\n\n :type udf_resources: tuple\n :param udf_resources: An iterable of\n :class:`google.cloud.bigquery._helpers.UDFResource`\n (empty by default)\n \"\"\"\n _JOB_TYPE = 'query'\n _UDF_KEY = 'userDefinedFunctionResources'\n\n def __init__(self, name, query, client, udf_resources=()):\n super(QueryJob, self).__init__(name, client)\n self.query = query\n self.udf_resources = udf_resources\n self._configuration = _AsyncQueryConfiguration()\n\n allow_large_results = _TypedProperty('allow_large_results', bool)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.allowLargeResults\n \"\"\"\n\n create_disposition = CreateDisposition('create_disposition')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.createDisposition\n \"\"\"\n\n default_dataset = _TypedProperty('default_dataset', Dataset)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.defaultDataset\n \"\"\"\n\n destination = _TypedProperty('destination', Table)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.destinationTable\n \"\"\"\n\n flatten_results = _TypedProperty('flatten_results', bool)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.flattenResults\n \"\"\"\n\n priority = QueryPriority('priority')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.priority\n \"\"\"\n\n udf_resources = UDFResourcesProperty()\n\n use_query_cache = _TypedProperty('use_query_cache', bool)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.useQueryCache\n \"\"\"\n\n use_legacy_sql = _TypedProperty('use_legacy_sql', bool)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/\\\n reference/v2/jobs#configuration.query.useLegacySql\n \"\"\"\n\n write_disposition = WriteDisposition('write_disposition')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.writeDisposition\n \"\"\"\n\n maximum_billing_tier = _TypedProperty('maximum_billing_tier', int)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.maximumBillingTier\n \"\"\"\n\n maximum_bytes_billed = _TypedProperty('maximum_bytes_billed', int)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.maximumBytesBilled\n \"\"\"\n\n def _destination_table_resource(self):\n \"\"\"Create a JSON resource for the destination table.\n\n Helper for :meth:`_populate_config_resource` and\n :meth:`_scrub_local_properties`\n \"\"\"\n if self.destination is not None:\n return {\n 'projectId': self.destination.project,\n 'datasetId': self.destination.dataset_name,\n 'tableId': self.destination.name,\n }\n\n def _populate_config_resource(self, configuration):\n \"\"\"Helper for _build_resource: copy config properties to resource\"\"\"\n if self.allow_large_results is not None:\n configuration['allowLargeResults'] = self.allow_large_results\n if self.create_disposition is not None:\n configuration['createDisposition'] = self.create_disposition\n if self.default_dataset is not None:\n configuration['defaultDataset'] = {\n 'projectId': self.default_dataset.project,\n 'datasetId': self.default_dataset.name,\n }\n if self.destination is not None:\n table_res = self._destination_table_resource()\n configuration['destinationTable'] = table_res\n if self.flatten_results is not None:\n configuration['flattenResults'] = self.flatten_results\n if self.priority is not None:\n configuration['priority'] = self.priority\n if self.use_query_cache is not None:\n configuration['useQueryCache'] = self.use_query_cache\n if self.use_legacy_sql is not None:\n configuration['useLegacySql'] = self.use_legacy_sql\n if self.write_disposition is not None:\n configuration['writeDisposition'] = self.write_disposition\n if self.maximum_billing_tier is not None:\n configuration['maximumBillingTier'] = self.maximum_billing_tier\n if self.maximum_bytes_billed is not None:\n configuration['maximumBytesBilled'] = self.maximum_bytes_billed\n if len(self._udf_resources) > 0:\n configuration[self._UDF_KEY] = _build_udf_resources(\n self._udf_resources)\n\n def _build_resource(self):\n \"\"\"Generate a resource for :meth:`begin`.\"\"\"\n\n resource = {\n 'jobReference': {\n 'projectId': self.project,\n 'jobId': self.name,\n },\n 'configuration': {\n self._JOB_TYPE: {\n 'query': self.query,\n },\n },\n }\n configuration = resource['configuration'][self._JOB_TYPE]\n self._populate_config_resource(configuration)\n\n return resource\n\n def _scrub_local_properties(self, cleaned):\n \"\"\"Helper: handle subclass properties in cleaned.\n\n .. note:\n\n This method assumes that the project found in the resource matches\n the client's project.\n \"\"\"\n configuration = cleaned['configuration']['query']\n dest_remote = configuration.get('destinationTable')\n\n if dest_remote is None:\n if self.destination is not None:\n del self.destination\n else:\n dest_local = self._destination_table_resource()\n if dest_remote != dest_local:\n dataset = self._client.dataset(dest_remote['datasetId'])\n self.destination = dataset.table(dest_remote['tableId'])\n\n @classmethod\n def from_api_repr(cls, resource, client):\n \"\"\"Factory: construct a job given its API representation\n\n :type resource: dict\n :param resource: dataset job representation returned from the API\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: Client which holds credentials and project\n configuration for the dataset.\n\n :rtype: :class:`google.cloud.bigquery.job.RunAsyncQueryJob`\n :returns: Job parsed from ``resource``.\n \"\"\"\n name, config = cls._get_resource_config(resource)\n query = config['query']\n job = cls(name, query, client=client)\n job._set_properties(resource)\n return job\n\n def results(self):\n \"\"\"Construct a QueryResults instance, bound to this job.\n\n :rtype: :class:`~google.cloud.bigquery.query.QueryResults`\n :returns: results instance\n \"\"\"\n from google.cloud.bigquery.query import QueryResults\n return QueryResults.from_query_job(self)\n", "path": "google/cloud/bigquery/job.py" } ]
[ { "content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define API Jobs.\"\"\"\n\nimport six\n\nfrom google.cloud.exceptions import NotFound\nfrom google.cloud._helpers import _datetime_from_microseconds\nfrom google.cloud.bigquery.dataset import Dataset\nfrom google.cloud.bigquery.schema import SchemaField\nfrom google.cloud.bigquery.table import Table\nfrom google.cloud.bigquery.table import _build_schema_resource\nfrom google.cloud.bigquery.table import _parse_schema_resource\nfrom google.cloud.bigquery._helpers import UDFResourcesProperty\nfrom google.cloud.bigquery._helpers import _EnumProperty\nfrom google.cloud.bigquery._helpers import _TypedProperty\nfrom google.cloud.bigquery._helpers import _build_udf_resources\n\n\nclass Compression(_EnumProperty):\n \"\"\"Pseudo-enum for ``compression`` properties.\"\"\"\n GZIP = 'GZIP'\n NONE = 'NONE'\n ALLOWED = (GZIP, NONE)\n\n\nclass CreateDisposition(_EnumProperty):\n \"\"\"Pseudo-enum for ``create_disposition`` properties.\"\"\"\n CREATE_IF_NEEDED = 'CREATE_IF_NEEDED'\n CREATE_NEVER = 'CREATE_NEVER'\n ALLOWED = (CREATE_IF_NEEDED, CREATE_NEVER)\n\n\nclass DestinationFormat(_EnumProperty):\n \"\"\"Pseudo-enum for ``destination_format`` properties.\"\"\"\n CSV = 'CSV'\n NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON'\n AVRO = 'AVRO'\n ALLOWED = (CSV, NEWLINE_DELIMITED_JSON, AVRO)\n\n\nclass Encoding(_EnumProperty):\n \"\"\"Pseudo-enum for ``encoding`` properties.\"\"\"\n UTF_8 = 'UTF-8'\n ISO_8559_1 = 'ISO-8559-1'\n ALLOWED = (UTF_8, ISO_8559_1)\n\n\nclass QueryPriority(_EnumProperty):\n \"\"\"Pseudo-enum for ``QueryJob.priority`` property.\"\"\"\n INTERACTIVE = 'INTERACTIVE'\n BATCH = 'BATCH'\n ALLOWED = (INTERACTIVE, BATCH)\n\n\nclass SourceFormat(_EnumProperty):\n \"\"\"Pseudo-enum for ``source_format`` properties.\"\"\"\n CSV = 'CSV'\n DATASTORE_BACKUP = 'DATASTORE_BACKUP'\n NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON'\n ALLOWED = (CSV, DATASTORE_BACKUP, NEWLINE_DELIMITED_JSON)\n\n\nclass WriteDisposition(_EnumProperty):\n \"\"\"Pseudo-enum for ``write_disposition`` properties.\"\"\"\n WRITE_APPEND = 'WRITE_APPEND'\n WRITE_TRUNCATE = 'WRITE_TRUNCATE'\n WRITE_EMPTY = 'WRITE_EMPTY'\n ALLOWED = (WRITE_APPEND, WRITE_TRUNCATE, WRITE_EMPTY)\n\n\nclass _BaseJob(object):\n \"\"\"Base class for jobs.\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: A client which holds credentials and project configuration\n for the dataset (which requires a project).\n \"\"\"\n def __init__(self, client):\n self._client = client\n self._properties = {}\n\n @property\n def project(self):\n \"\"\"Project bound to the job.\n\n :rtype: string\n :returns: the project (derived from the client).\n \"\"\"\n return self._client.project\n\n def _require_client(self, client):\n \"\"\"Check client or verify over-ride.\n\n :type client: :class:`~google.cloud.bigquery.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current dataset.\n\n :rtype: :class:`google.cloud.bigquery.client.Client`\n :returns: The client passed in or the currently bound client.\n \"\"\"\n if client is None:\n client = self._client\n return client\n\n\nclass _AsyncJob(_BaseJob):\n \"\"\"Base class for asynchronous jobs.\n\n :type name: string\n :param name: the name of the job\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: A client which holds credentials and project configuration\n for the dataset (which requires a project).\n \"\"\"\n def __init__(self, name, client):\n super(_AsyncJob, self).__init__(client)\n self.name = name\n\n @property\n def job_type(self):\n \"\"\"Type of job\n\n :rtype: string\n :returns: one of 'load', 'copy', 'extract', 'query'\n \"\"\"\n return self._JOB_TYPE\n\n @property\n def path(self):\n \"\"\"URL path for the job's APIs.\n\n :rtype: string\n :returns: the path based on project and job name.\n \"\"\"\n return '/projects/%s/jobs/%s' % (self.project, self.name)\n\n @property\n def etag(self):\n \"\"\"ETag for the job resource.\n\n :rtype: string, or ``NoneType``\n :returns: the ETag (None until set from the server).\n \"\"\"\n return self._properties.get('etag')\n\n @property\n def self_link(self):\n \"\"\"URL for the job resource.\n\n :rtype: string, or ``NoneType``\n :returns: the URL (None until set from the server).\n \"\"\"\n return self._properties.get('selfLink')\n\n @property\n def user_email(self):\n \"\"\"E-mail address of user who submitted the job.\n\n :rtype: string, or ``NoneType``\n :returns: the URL (None until set from the server).\n \"\"\"\n return self._properties.get('user_email')\n\n @property\n def created(self):\n \"\"\"Datetime at which the job was created.\n\n :rtype: ``datetime.datetime``, or ``NoneType``\n :returns: the creation time (None until set from the server).\n \"\"\"\n statistics = self._properties.get('statistics')\n if statistics is not None:\n millis = statistics.get('creationTime')\n if millis is not None:\n return _datetime_from_microseconds(millis * 1000.0)\n\n @property\n def started(self):\n \"\"\"Datetime at which the job was started.\n\n :rtype: ``datetime.datetime``, or ``NoneType``\n :returns: the start time (None until set from the server).\n \"\"\"\n statistics = self._properties.get('statistics')\n if statistics is not None:\n millis = statistics.get('startTime')\n if millis is not None:\n return _datetime_from_microseconds(millis * 1000.0)\n\n @property\n def ended(self):\n \"\"\"Datetime at which the job finished.\n\n :rtype: ``datetime.datetime``, or ``NoneType``\n :returns: the end time (None until set from the server).\n \"\"\"\n statistics = self._properties.get('statistics')\n if statistics is not None:\n millis = statistics.get('endTime')\n if millis is not None:\n return _datetime_from_microseconds(millis * 1000.0)\n\n @property\n def error_result(self):\n \"\"\"Error information about the job as a whole.\n\n :rtype: mapping, or ``NoneType``\n :returns: the error information (None until set from the server).\n \"\"\"\n status = self._properties.get('status')\n if status is not None:\n return status.get('errorResult')\n\n @property\n def errors(self):\n \"\"\"Information about individual errors generated by the job.\n\n :rtype: list of mappings, or ``NoneType``\n :returns: the error information (None until set from the server).\n \"\"\"\n status = self._properties.get('status')\n if status is not None:\n return status.get('errors')\n\n @property\n def state(self):\n \"\"\"Status of the job.\n\n :rtype: string, or ``NoneType``\n :returns: the state (None until set from the server).\n \"\"\"\n status = self._properties.get('status')\n if status is not None:\n return status.get('state')\n\n def _scrub_local_properties(self, cleaned):\n \"\"\"Helper: handle subclass properties in cleaned.\"\"\"\n pass\n\n def _set_properties(self, api_response):\n \"\"\"Update properties from resource in body of ``api_response``\n\n :type api_response: httplib2.Response\n :param api_response: response returned from an API call\n \"\"\"\n cleaned = api_response.copy()\n self._scrub_local_properties(cleaned)\n\n statistics = cleaned.get('statistics', {})\n if 'creationTime' in statistics:\n statistics['creationTime'] = float(statistics['creationTime'])\n if 'startTime' in statistics:\n statistics['startTime'] = float(statistics['startTime'])\n if 'endTime' in statistics:\n statistics['endTime'] = float(statistics['endTime'])\n\n self._properties.clear()\n self._properties.update(cleaned)\n\n @classmethod\n def _get_resource_config(cls, resource):\n \"\"\"Helper for :meth:`from_api_repr`\n\n :type resource: dict\n :param resource: resource for the job\n\n :rtype: dict\n :returns: tuple (string, dict), where the first element is the\n job name and the second contains job-specific configuration.\n :raises: :class:`KeyError` if the resource has no identifier, or\n is missing the appropriate configuration.\n \"\"\"\n if ('jobReference' not in resource or\n 'jobId' not in resource['jobReference']):\n raise KeyError('Resource lacks required identity information: '\n '[\"jobReference\"][\"jobId\"]')\n name = resource['jobReference']['jobId']\n if ('configuration' not in resource or\n cls._JOB_TYPE not in resource['configuration']):\n raise KeyError('Resource lacks required configuration: '\n '[\"configuration\"][\"%s\"]' % cls._JOB_TYPE)\n config = resource['configuration'][cls._JOB_TYPE]\n return name, config\n\n def begin(self, client=None):\n \"\"\"API call: begin the job via a POST request\n\n See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs/insert\n\n :type client: :class:`~google.cloud.bigquery.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current dataset.\n\n :raises: :exc:`ValueError` if the job has already begin.\n \"\"\"\n if self.state is not None:\n raise ValueError(\"Job already begun.\")\n\n client = self._require_client(client)\n path = '/projects/%s/jobs' % (self.project,)\n api_response = client.connection.api_request(\n method='POST', path=path, data=self._build_resource())\n self._set_properties(api_response)\n\n def exists(self, client=None):\n \"\"\"API call: test for the existence of the job via a GET request\n\n See\n https://cloud.google.com/bigquery/docs/reference/v2/jobs/get\n\n :type client: :class:`~google.cloud.bigquery.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current dataset.\n\n :rtype: bool\n :returns: Boolean indicating existence of the job.\n \"\"\"\n client = self._require_client(client)\n\n try:\n client.connection.api_request(method='GET', path=self.path,\n query_params={'fields': 'id'})\n except NotFound:\n return False\n else:\n return True\n\n def reload(self, client=None):\n \"\"\"API call: refresh job properties via a GET request\n\n See\n https://cloud.google.com/bigquery/docs/reference/v2/jobs/get\n\n :type client: :class:`~google.cloud.bigquery.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current dataset.\n \"\"\"\n client = self._require_client(client)\n\n api_response = client.connection.api_request(\n method='GET', path=self.path)\n self._set_properties(api_response)\n\n def cancel(self, client=None):\n \"\"\"API call: cancel job via a POST request\n\n See\n https://cloud.google.com/bigquery/docs/reference/v2/jobs/cancel\n\n :type client: :class:`~google.cloud.bigquery.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current dataset.\n \"\"\"\n client = self._require_client(client)\n\n api_response = client.connection.api_request(\n method='POST', path='%s/cancel' % (self.path,))\n self._set_properties(api_response['job'])\n\n\nclass _LoadConfiguration(object):\n \"\"\"User-settable configuration options for load jobs.\n\n Values which are ``None`` -> server defaults.\n \"\"\"\n _allow_jagged_rows = None\n _allow_quoted_newlines = None\n _create_disposition = None\n _encoding = None\n _field_delimiter = None\n _ignore_unknown_values = None\n _max_bad_records = None\n _quote_character = None\n _skip_leading_rows = None\n _source_format = None\n _write_disposition = None\n\n\nclass LoadTableFromStorageJob(_AsyncJob):\n \"\"\"Asynchronous job for loading data into a table from CloudStorage.\n\n :type name: string\n :param name: the name of the job\n\n :type destination: :class:`google.cloud.bigquery.table.Table`\n :param destination: Table into which data is to be loaded.\n\n :type source_uris: sequence of string\n :param source_uris: URIs of one or more data files to be loaded, in\n format ``gs://<bucket_name>/<object_name_or_glob>``.\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: A client which holds credentials and project configuration\n for the dataset (which requires a project).\n\n :type schema: list of :class:`google.cloud.bigquery.table.SchemaField`\n :param schema: The job's schema\n \"\"\"\n\n _schema = None\n _JOB_TYPE = 'load'\n\n def __init__(self, name, destination, source_uris, client, schema=()):\n super(LoadTableFromStorageJob, self).__init__(name, client)\n self.destination = destination\n self.source_uris = source_uris\n # Let the @property do validation.\n self.schema = schema\n self._configuration = _LoadConfiguration()\n\n @property\n def schema(self):\n \"\"\"Table's schema.\n\n :rtype: list of :class:`SchemaField`\n :returns: fields describing the schema\n \"\"\"\n return list(self._schema)\n\n @schema.setter\n def schema(self, value):\n \"\"\"Update table's schema\n\n :type value: list of :class:`SchemaField`\n :param value: fields describing the schema\n\n :raises: TypeError if 'value' is not a sequence, or ValueError if\n any item in the sequence is not a SchemaField\n \"\"\"\n if not all(isinstance(field, SchemaField) for field in value):\n raise ValueError('Schema items must be fields')\n self._schema = tuple(value)\n\n @property\n def input_file_bytes(self):\n \"\"\"Count of bytes loaded from source files.\n\n :rtype: integer, or ``NoneType``\n :returns: the count (None until set from the server).\n \"\"\"\n statistics = self._properties.get('statistics')\n if statistics is not None:\n return int(statistics['load']['inputFileBytes'])\n\n @property\n def input_files(self):\n \"\"\"Count of source files.\n\n :rtype: integer, or ``NoneType``\n :returns: the count (None until set from the server).\n \"\"\"\n statistics = self._properties.get('statistics')\n if statistics is not None:\n return int(statistics['load']['inputFiles'])\n\n @property\n def output_bytes(self):\n \"\"\"Count of bytes saved to destination table.\n\n :rtype: integer, or ``NoneType``\n :returns: the count (None until set from the server).\n \"\"\"\n statistics = self._properties.get('statistics')\n if statistics is not None:\n return int(statistics['load']['outputBytes'])\n\n @property\n def output_rows(self):\n \"\"\"Count of rows saved to destination table.\n\n :rtype: integer, or ``NoneType``\n :returns: the count (None until set from the server).\n \"\"\"\n statistics = self._properties.get('statistics')\n if statistics is not None:\n return int(statistics['load']['outputRows'])\n\n allow_jagged_rows = _TypedProperty('allow_jagged_rows', bool)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.allowJaggedRows\n \"\"\"\n\n allow_quoted_newlines = _TypedProperty('allow_quoted_newlines', bool)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.allowQuotedNewlines\n \"\"\"\n\n create_disposition = CreateDisposition('create_disposition')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.createDisposition\n \"\"\"\n\n encoding = Encoding('encoding')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding\n \"\"\"\n\n field_delimiter = _TypedProperty('field_delimiter', six.string_types)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.fieldDelimiter\n \"\"\"\n\n ignore_unknown_values = _TypedProperty('ignore_unknown_values', bool)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.ignoreUnknownValues\n \"\"\"\n\n max_bad_records = _TypedProperty('max_bad_records', six.integer_types)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.maxBadRecords\n \"\"\"\n\n quote_character = _TypedProperty('quote_character', six.string_types)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.quote\n \"\"\"\n\n skip_leading_rows = _TypedProperty('skip_leading_rows', six.integer_types)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.skipLeadingRows\n \"\"\"\n\n source_format = SourceFormat('source_format')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.sourceFormat\n \"\"\"\n\n write_disposition = WriteDisposition('write_disposition')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.writeDisposition\n \"\"\"\n\n def _populate_config_resource(self, configuration):\n \"\"\"Helper for _build_resource: copy config properties to resource\"\"\"\n if self.allow_jagged_rows is not None:\n configuration['allowJaggedRows'] = self.allow_jagged_rows\n if self.allow_quoted_newlines is not None:\n configuration['allowQuotedNewlines'] = self.allow_quoted_newlines\n if self.create_disposition is not None:\n configuration['createDisposition'] = self.create_disposition\n if self.encoding is not None:\n configuration['encoding'] = self.encoding\n if self.field_delimiter is not None:\n configuration['fieldDelimiter'] = self.field_delimiter\n if self.ignore_unknown_values is not None:\n configuration['ignoreUnknownValues'] = self.ignore_unknown_values\n if self.max_bad_records is not None:\n configuration['maxBadRecords'] = self.max_bad_records\n if self.quote_character is not None:\n configuration['quote'] = self.quote_character\n if self.skip_leading_rows is not None:\n configuration['skipLeadingRows'] = self.skip_leading_rows\n if self.source_format is not None:\n configuration['sourceFormat'] = self.source_format\n if self.write_disposition is not None:\n configuration['writeDisposition'] = self.write_disposition\n\n def _build_resource(self):\n \"\"\"Generate a resource for :meth:`begin`.\"\"\"\n resource = {\n 'jobReference': {\n 'projectId': self.project,\n 'jobId': self.name,\n },\n 'configuration': {\n self._JOB_TYPE: {\n 'sourceUris': self.source_uris,\n 'destinationTable': {\n 'projectId': self.destination.project,\n 'datasetId': self.destination.dataset_name,\n 'tableId': self.destination.name,\n },\n },\n },\n }\n configuration = resource['configuration'][self._JOB_TYPE]\n self._populate_config_resource(configuration)\n\n if len(self.schema) > 0:\n configuration['schema'] = {\n 'fields': _build_schema_resource(self.schema)}\n\n return resource\n\n def _scrub_local_properties(self, cleaned):\n \"\"\"Helper: handle subclass properties in cleaned.\"\"\"\n schema = cleaned.pop('schema', {'fields': ()})\n self.schema = _parse_schema_resource(schema)\n\n @classmethod\n def from_api_repr(cls, resource, client):\n \"\"\"Factory: construct a job given its API representation\n\n .. note:\n\n This method assumes that the project found in the resource matches\n the client's project.\n\n :type resource: dict\n :param resource: dataset job representation returned from the API\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: Client which holds credentials and project\n configuration for the dataset.\n\n :rtype: :class:`google.cloud.bigquery.job.LoadTableFromStorageJob`\n :returns: Job parsed from ``resource``.\n \"\"\"\n name, config = cls._get_resource_config(resource)\n dest_config = config['destinationTable']\n dataset = Dataset(dest_config['datasetId'], client)\n destination = Table(dest_config['tableId'], dataset)\n source_urls = config.get('sourceUris', ())\n job = cls(name, destination, source_urls, client=client)\n job._set_properties(resource)\n return job\n\n\nclass _CopyConfiguration(object):\n \"\"\"User-settable configuration options for copy jobs.\n\n Values which are ``None`` -> server defaults.\n \"\"\"\n _create_disposition = None\n _write_disposition = None\n\n\nclass CopyJob(_AsyncJob):\n \"\"\"Asynchronous job: copy data into a table from other tables.\n\n :type name: string\n :param name: the name of the job\n\n :type destination: :class:`google.cloud.bigquery.table.Table`\n :param destination: Table into which data is to be loaded.\n\n :type sources: list of :class:`google.cloud.bigquery.table.Table`\n :param sources: Table into which data is to be loaded.\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: A client which holds credentials and project configuration\n for the dataset (which requires a project).\n \"\"\"\n\n _JOB_TYPE = 'copy'\n\n def __init__(self, name, destination, sources, client):\n super(CopyJob, self).__init__(name, client)\n self.destination = destination\n self.sources = sources\n self._configuration = _CopyConfiguration()\n\n create_disposition = CreateDisposition('create_disposition')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy.createDisposition\n \"\"\"\n\n write_disposition = WriteDisposition('write_disposition')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy.writeDisposition\n \"\"\"\n\n def _populate_config_resource(self, configuration):\n \"\"\"Helper for _build_resource: copy config properties to resource\"\"\"\n if self.create_disposition is not None:\n configuration['createDisposition'] = self.create_disposition\n if self.write_disposition is not None:\n configuration['writeDisposition'] = self.write_disposition\n\n def _build_resource(self):\n \"\"\"Generate a resource for :meth:`begin`.\"\"\"\n\n source_refs = [{\n 'projectId': table.project,\n 'datasetId': table.dataset_name,\n 'tableId': table.name,\n } for table in self.sources]\n\n resource = {\n 'jobReference': {\n 'projectId': self.project,\n 'jobId': self.name,\n },\n 'configuration': {\n self._JOB_TYPE: {\n 'sourceTables': source_refs,\n 'destinationTable': {\n 'projectId': self.destination.project,\n 'datasetId': self.destination.dataset_name,\n 'tableId': self.destination.name,\n },\n },\n },\n }\n configuration = resource['configuration'][self._JOB_TYPE]\n self._populate_config_resource(configuration)\n\n return resource\n\n @classmethod\n def from_api_repr(cls, resource, client):\n \"\"\"Factory: construct a job given its API representation\n\n .. note:\n\n This method assumes that the project found in the resource matches\n the client's project.\n\n :type resource: dict\n :param resource: dataset job representation returned from the API\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: Client which holds credentials and project\n configuration for the dataset.\n\n :rtype: :class:`google.cloud.bigquery.job.CopyJob`\n :returns: Job parsed from ``resource``.\n \"\"\"\n name, config = cls._get_resource_config(resource)\n dest_config = config['destinationTable']\n dataset = Dataset(dest_config['datasetId'], client)\n destination = Table(dest_config['tableId'], dataset)\n sources = []\n for source_config in config['sourceTables']:\n dataset = Dataset(source_config['datasetId'], client)\n sources.append(Table(source_config['tableId'], dataset))\n job = cls(name, destination, sources, client=client)\n job._set_properties(resource)\n return job\n\n\nclass _ExtractConfiguration(object):\n \"\"\"User-settable configuration options for extract jobs.\n\n Values which are ``None`` -> server defaults.\n \"\"\"\n _compression = None\n _destination_format = None\n _field_delimiter = None\n _print_header = None\n\n\nclass ExtractTableToStorageJob(_AsyncJob):\n \"\"\"Asynchronous job: extract data from a table into Cloud Storage.\n\n :type name: string\n :param name: the name of the job\n\n :type source: :class:`google.cloud.bigquery.table.Table`\n :param source: Table into which data is to be loaded.\n\n :type destination_uris: list of string\n :param destination_uris: URIs describing Cloud Storage blobs into which\n extracted data will be written, in format\n ``gs://<bucket_name>/<object_name_or_glob>``.\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: A client which holds credentials and project configuration\n for the dataset (which requires a project).\n \"\"\"\n _JOB_TYPE = 'extract'\n\n def __init__(self, name, source, destination_uris, client):\n super(ExtractTableToStorageJob, self).__init__(name, client)\n self.source = source\n self.destination_uris = destination_uris\n self._configuration = _ExtractConfiguration()\n\n compression = Compression('compression')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extracted.compression\n \"\"\"\n\n destination_format = DestinationFormat('destination_format')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extracted.destinationFormat\n \"\"\"\n\n field_delimiter = _TypedProperty('field_delimiter', six.string_types)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extracted.fieldDelimiter\n \"\"\"\n\n print_header = _TypedProperty('print_header', bool)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extracted.printHeader\n \"\"\"\n\n def _populate_config_resource(self, configuration):\n \"\"\"Helper for _build_resource: copy config properties to resource\"\"\"\n if self.compression is not None:\n configuration['compression'] = self.compression\n if self.destination_format is not None:\n configuration['destinationFormat'] = self.destination_format\n if self.field_delimiter is not None:\n configuration['fieldDelimiter'] = self.field_delimiter\n if self.print_header is not None:\n configuration['printHeader'] = self.print_header\n\n def _build_resource(self):\n \"\"\"Generate a resource for :meth:`begin`.\"\"\"\n\n source_ref = {\n 'projectId': self.source.project,\n 'datasetId': self.source.dataset_name,\n 'tableId': self.source.name,\n }\n\n resource = {\n 'jobReference': {\n 'projectId': self.project,\n 'jobId': self.name,\n },\n 'configuration': {\n self._JOB_TYPE: {\n 'sourceTable': source_ref,\n 'destinationUris': self.destination_uris,\n },\n },\n }\n configuration = resource['configuration'][self._JOB_TYPE]\n self._populate_config_resource(configuration)\n\n return resource\n\n @classmethod\n def from_api_repr(cls, resource, client):\n \"\"\"Factory: construct a job given its API representation\n\n .. note:\n\n This method assumes that the project found in the resource matches\n the client's project.\n\n :type resource: dict\n :param resource: dataset job representation returned from the API\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: Client which holds credentials and project\n configuration for the dataset.\n\n :rtype: :class:`google.cloud.bigquery.job.ExtractTableToStorageJob`\n :returns: Job parsed from ``resource``.\n \"\"\"\n name, config = cls._get_resource_config(resource)\n source_config = config['sourceTable']\n dataset = Dataset(source_config['datasetId'], client)\n source = Table(source_config['tableId'], dataset)\n destination_uris = config['destinationUris']\n job = cls(name, source, destination_uris, client=client)\n job._set_properties(resource)\n return job\n\n\nclass _AsyncQueryConfiguration(object):\n \"\"\"User-settable configuration options for asynchronous query jobs.\n\n Values which are ``None`` -> server defaults.\n \"\"\"\n _allow_large_results = None\n _create_disposition = None\n _default_dataset = None\n _destination = None\n _flatten_results = None\n _priority = None\n _use_query_cache = None\n _use_legacy_sql = None\n _write_disposition = None\n _maximum_billing_tier = None\n _maximum_bytes_billed = None\n\n\nclass QueryJob(_AsyncJob):\n \"\"\"Asynchronous job: query tables.\n\n :type name: string\n :param name: the name of the job\n\n :type query: string\n :param query: SQL query string\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: A client which holds credentials and project configuration\n for the dataset (which requires a project).\n\n :type udf_resources: tuple\n :param udf_resources: An iterable of\n :class:`google.cloud.bigquery._helpers.UDFResource`\n (empty by default)\n \"\"\"\n _JOB_TYPE = 'query'\n _UDF_KEY = 'userDefinedFunctionResources'\n\n def __init__(self, name, query, client, udf_resources=()):\n super(QueryJob, self).__init__(name, client)\n self.query = query\n self.udf_resources = udf_resources\n self._configuration = _AsyncQueryConfiguration()\n\n allow_large_results = _TypedProperty('allow_large_results', bool)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.allowLargeResults\n \"\"\"\n\n create_disposition = CreateDisposition('create_disposition')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.createDisposition\n \"\"\"\n\n default_dataset = _TypedProperty('default_dataset', Dataset)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.defaultDataset\n \"\"\"\n\n destination = _TypedProperty('destination', Table)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.destinationTable\n \"\"\"\n\n flatten_results = _TypedProperty('flatten_results', bool)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.flattenResults\n \"\"\"\n\n priority = QueryPriority('priority')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.priority\n \"\"\"\n\n udf_resources = UDFResourcesProperty()\n\n use_query_cache = _TypedProperty('use_query_cache', bool)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.useQueryCache\n \"\"\"\n\n use_legacy_sql = _TypedProperty('use_legacy_sql', bool)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/\\\n reference/v2/jobs#configuration.query.useLegacySql\n \"\"\"\n\n write_disposition = WriteDisposition('write_disposition')\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.writeDisposition\n \"\"\"\n\n maximum_billing_tier = _TypedProperty('maximum_billing_tier', int)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.maximumBillingTier\n \"\"\"\n\n maximum_bytes_billed = _TypedProperty('maximum_bytes_billed', int)\n \"\"\"See:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.maximumBytesBilled\n \"\"\"\n\n def _destination_table_resource(self):\n \"\"\"Create a JSON resource for the destination table.\n\n Helper for :meth:`_populate_config_resource` and\n :meth:`_scrub_local_properties`\n \"\"\"\n if self.destination is not None:\n return {\n 'projectId': self.destination.project,\n 'datasetId': self.destination.dataset_name,\n 'tableId': self.destination.name,\n }\n\n def _populate_config_resource(self, configuration):\n \"\"\"Helper for _build_resource: copy config properties to resource\"\"\"\n if self.allow_large_results is not None:\n configuration['allowLargeResults'] = self.allow_large_results\n if self.create_disposition is not None:\n configuration['createDisposition'] = self.create_disposition\n if self.default_dataset is not None:\n configuration['defaultDataset'] = {\n 'projectId': self.default_dataset.project,\n 'datasetId': self.default_dataset.name,\n }\n if self.destination is not None:\n table_res = self._destination_table_resource()\n configuration['destinationTable'] = table_res\n if self.flatten_results is not None:\n configuration['flattenResults'] = self.flatten_results\n if self.priority is not None:\n configuration['priority'] = self.priority\n if self.use_query_cache is not None:\n configuration['useQueryCache'] = self.use_query_cache\n if self.use_legacy_sql is not None:\n configuration['useLegacySql'] = self.use_legacy_sql\n if self.write_disposition is not None:\n configuration['writeDisposition'] = self.write_disposition\n if self.maximum_billing_tier is not None:\n configuration['maximumBillingTier'] = self.maximum_billing_tier\n if self.maximum_bytes_billed is not None:\n configuration['maximumBytesBilled'] = self.maximum_bytes_billed\n if len(self._udf_resources) > 0:\n configuration[self._UDF_KEY] = _build_udf_resources(\n self._udf_resources)\n\n def _build_resource(self):\n \"\"\"Generate a resource for :meth:`begin`.\"\"\"\n\n resource = {\n 'jobReference': {\n 'projectId': self.project,\n 'jobId': self.name,\n },\n 'configuration': {\n self._JOB_TYPE: {\n 'query': self.query,\n },\n },\n }\n configuration = resource['configuration'][self._JOB_TYPE]\n self._populate_config_resource(configuration)\n\n return resource\n\n def _scrub_local_properties(self, cleaned):\n \"\"\"Helper: handle subclass properties in cleaned.\n\n .. note:\n\n This method assumes that the project found in the resource matches\n the client's project.\n \"\"\"\n configuration = cleaned['configuration']['query']\n dest_remote = configuration.get('destinationTable')\n\n if dest_remote is None:\n if self.destination is not None:\n del self.destination\n else:\n dest_local = self._destination_table_resource()\n if dest_remote != dest_local:\n dataset = self._client.dataset(dest_remote['datasetId'])\n self.destination = dataset.table(dest_remote['tableId'])\n\n @classmethod\n def from_api_repr(cls, resource, client):\n \"\"\"Factory: construct a job given its API representation\n\n :type resource: dict\n :param resource: dataset job representation returned from the API\n\n :type client: :class:`google.cloud.bigquery.client.Client`\n :param client: Client which holds credentials and project\n configuration for the dataset.\n\n :rtype: :class:`google.cloud.bigquery.job.RunAsyncQueryJob`\n :returns: Job parsed from ``resource``.\n \"\"\"\n name, config = cls._get_resource_config(resource)\n query = config['query']\n job = cls(name, query, client=client)\n job._set_properties(resource)\n return job\n\n def results(self):\n \"\"\"Construct a QueryResults instance, bound to this job.\n\n :rtype: :class:`~google.cloud.bigquery.query.QueryResults`\n :returns: results instance\n \"\"\"\n from google.cloud.bigquery.query import QueryResults\n return QueryResults.from_query_job(self)\n", "path": "google/cloud/bigquery/job.py" } ]
diff --git a/google/cloud/bigquery/job.py b/google/cloud/bigquery/job.py index a4eb745b4942..5a88f8316fdb 100644 --- a/google/cloud/bigquery/job.py +++ b/google/cloud/bigquery/job.py @@ -375,7 +375,7 @@ def cancel(self, client=None): api_response = client.connection.api_request( method='POST', path='%s/cancel' % (self.path,)) - self._set_properties(api_response) + self._set_properties(api_response['job']) class _LoadConfiguration(object): diff --git a/system_tests/bigquery.py b/system_tests/bigquery.py index c417e0538b54..1c566b4e4e0d 100644 --- a/system_tests/bigquery.py +++ b/system_tests/bigquery.py @@ -435,3 +435,37 @@ def _job_done(instance): by_age = operator.itemgetter(1) self.assertEqual(sorted(rows, key=by_age), sorted(ROWS, key=by_age)) + + def test_job_cancel(self): + DATASET_NAME = _make_dataset_name('job_cancel') + JOB_NAME = 'fetch_' + DATASET_NAME + TABLE_NAME = 'test_table' + QUERY = 'SELECT * FROM %s.%s' % (DATASET_NAME, TABLE_NAME) + + dataset = Config.CLIENT.dataset(DATASET_NAME) + + retry_403(dataset.create)() + self.to_delete.append(dataset) + + full_name = bigquery.SchemaField('full_name', 'STRING', + mode='REQUIRED') + age = bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED') + table = dataset.table(TABLE_NAME, schema=[full_name, age]) + table.create() + self.to_delete.insert(0, table) + + job = Config.CLIENT.run_async_query(JOB_NAME, QUERY) + job.begin() + job.cancel() + + def _job_done(instance): + return instance.state in ('DONE', 'done') + + retry = RetryInstanceState(_job_done, max_tries=8) + retry(job.reload)() + + # The `cancel` API doesn't leave any reliable traces on + # the status of the job resource, so we can't really assert for + # them here. The best we can do is not that the API call didn't + # raise an error, and that the job completed (in the `retry()` + # above). diff --git a/unit_tests/bigquery/test_job.py b/unit_tests/bigquery/test_job.py index f4add8d8f7be..2d63f7b5bada 100644 --- a/unit_tests/bigquery/test_job.py +++ b/unit_tests/bigquery/test_job.py @@ -601,8 +601,9 @@ def test_reload_w_alternate_client(self): def test_cancel_w_bound_client(self): PATH = 'projects/%s/jobs/%s/cancel' % (self.PROJECT, self.JOB_NAME) - RESOURCE = self._makeResource() - conn = _Connection(RESOURCE) + RESOURCE = self._makeResource(ended=True) + RESPONSE = {'job': RESOURCE} + conn = _Connection(RESPONSE) client = _Client(project=self.PROJECT, connection=conn) table = _Table() job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client) @@ -617,10 +618,11 @@ def test_cancel_w_bound_client(self): def test_cancel_w_alternate_client(self): PATH = 'projects/%s/jobs/%s/cancel' % (self.PROJECT, self.JOB_NAME) - RESOURCE = self._makeResource() + RESOURCE = self._makeResource(ended=True) + RESPONSE = {'job': RESOURCE} conn1 = _Connection() client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) + conn2 = _Connection(RESPONSE) client2 = _Client(project=self.PROJECT, connection=conn2) table = _Table() job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client1)
borgbackup__borg-5109
`pathconf(..., _PC_NAME_MAX) == 0` in borg mount When I use `pathconf(..., _PC_NAME_MAX)` to allocate space for `struct dirent` as described in [`man 3 readdir`](https://linux.die.net/man/3/readdir), I get back 0. [`man 3 pathconf`](https://linux.die.net/man/3/pathconf) says it should return -1 on error. Original bindfs bug: https://github.com/mpartel/bindfs/issues/54 by @mengelmann
[ { "content": "import errno\nimport io\nimport os\nimport stat\nimport struct\nimport sys\nimport tempfile\nimport time\nfrom collections import defaultdict\nfrom signal import SIGINT\nfrom distutils.version import LooseVersion\n\nimport llfuse\n\nfrom .logger import create_logger\nlogger = create_logger()\n\nfrom .crypto.low_level import blake2b_128\nfrom .archiver import Archiver\nfrom .archive import Archive\nfrom .hashindex import FuseVersionsIndex\nfrom .helpers import daemonize, hardlinkable, signal_handler, format_file_size\nfrom .helpers import msgpack\nfrom .item import Item\nfrom .lrucache import LRUCache\nfrom .remote import RemoteRepository\n\n# Does this version of llfuse support ns precision?\nhave_fuse_xtime_ns = hasattr(llfuse.EntryAttributes, 'st_mtime_ns')\n\n# Does this version of llfuse support birthtime?\nhave_fuse_birthtime = hasattr(llfuse.EntryAttributes, 'st_birthtime') # never?\nhave_fuse_birthtime_ns = hasattr(llfuse.EntryAttributes, 'st_birthtime_ns') # since llfuse 1.3\n\nfuse_version = LooseVersion(getattr(llfuse, '__version__', '0.1'))\nif fuse_version >= '0.42':\n def fuse_main():\n return llfuse.main(workers=1)\nelse:\n def fuse_main():\n llfuse.main(single=True)\n return None\n\n# size of some LRUCaches (1 element per simultaneously open file)\n# note: _inode_cache might have rather large elements - Item.chunks can be large!\n# also, simultaneously reading too many files should be avoided anyway.\n# thus, do not set FILES to high values.\nFILES = 4\n\n\nclass ItemCache:\n \"\"\"\n This is the \"meat\" of the file system's metadata storage.\n\n This class generates inode numbers that efficiently index items in archives,\n and retrieves items from these inode numbers.\n \"\"\"\n\n # 2 MiB are approximately ~230000 items (depends on the average number of items per metadata chunk).\n #\n # Since growing a bytearray has to copy it, growing it will converge to O(n^2), however,\n # this is not yet relevant due to the swiftness of copying memory. If it becomes an issue,\n # use an anonymous mmap and just resize that (or, if on 64 bit, make it so big you never need\n # to resize it in the first place; that's free).\n GROW_META_BY = 2 * 1024 * 1024\n\n indirect_entry_struct = struct.Struct('=cII')\n assert indirect_entry_struct.size == 9\n\n def __init__(self, decrypted_repository):\n self.decrypted_repository = decrypted_repository\n # self.meta, the \"meta-array\" is a densely packed array of metadata about where items can be found.\n # It is indexed by the inode number minus self.offset. (This is in a way eerily similar to how the first\n # unices did this).\n # The meta-array contains chunk IDs and item entries (described in iter_archive_items).\n # The chunk IDs are referenced by item entries through relative offsets,\n # which are bounded by the metadata chunk size.\n self.meta = bytearray()\n # The current write offset in self.meta\n self.write_offset = 0\n\n # Offset added to meta-indices, resulting in inodes,\n # or subtracted from inodes, resulting in meta-indices.\n # XXX: Merge FuseOperations.items and ItemCache to avoid\n # this implicit limitation / hack (on the number of synthetic inodes, degenerate\n # cases can inflate their number far beyond the number of archives).\n self.offset = 1000000\n\n # A temporary file that contains direct items, i.e. items directly cached in this layer.\n # These are items that span more than one chunk and thus cannot be efficiently cached\n # by the object cache (self.decrypted_repository), which would require variable-length structures;\n # possible but not worth the effort, see iter_archive_items.\n self.fd = tempfile.TemporaryFile(prefix='borg-tmp')\n\n # A small LRU cache for chunks requested by ItemCache.get() from the object cache,\n # this significantly speeds up directory traversal and similar operations which\n # tend to re-read the same chunks over and over.\n # The capacity is kept low because increasing it does not provide any significant advantage,\n # but makes LRUCache's square behaviour noticeable and consumes more memory.\n self.chunks = LRUCache(capacity=10, dispose=lambda _: None)\n\n # Instrumentation\n # Count of indirect items, i.e. data is cached in the object cache, not directly in this cache\n self.indirect_items = 0\n # Count of direct items, i.e. data is in self.fd\n self.direct_items = 0\n\n def get(self, inode):\n offset = inode - self.offset\n if offset < 0:\n raise ValueError('ItemCache.get() called with an invalid inode number')\n if self.meta[offset] == ord(b'I'):\n _, chunk_id_relative_offset, chunk_offset = self.indirect_entry_struct.unpack_from(self.meta, offset)\n chunk_id_offset = offset - chunk_id_relative_offset\n # bytearray slices are bytearrays as well, explicitly convert to bytes()\n chunk_id = bytes(self.meta[chunk_id_offset:chunk_id_offset + 32])\n chunk = self.chunks.get(chunk_id)\n if not chunk:\n csize, chunk = next(self.decrypted_repository.get_many([chunk_id]))\n self.chunks[chunk_id] = chunk\n data = memoryview(chunk)[chunk_offset:]\n unpacker = msgpack.Unpacker()\n unpacker.feed(data)\n return Item(internal_dict=next(unpacker))\n elif self.meta[offset] == ord(b'S'):\n fd_offset = int.from_bytes(self.meta[offset + 1:offset + 9], 'little')\n self.fd.seek(fd_offset, io.SEEK_SET)\n return Item(internal_dict=next(msgpack.Unpacker(self.fd, read_size=1024)))\n else:\n raise ValueError('Invalid entry type in self.meta')\n\n def iter_archive_items(self, archive_item_ids, filter=None, consider_part_files=False):\n unpacker = msgpack.Unpacker()\n\n # Current offset in the metadata stream, which consists of all metadata chunks glued together\n stream_offset = 0\n # Offset of the current chunk in the metadata stream\n chunk_begin = 0\n # Length of the chunk preciding the current chunk\n last_chunk_length = 0\n msgpacked_bytes = b''\n\n write_offset = self.write_offset\n meta = self.meta\n pack_indirect_into = self.indirect_entry_struct.pack_into\n\n def write_bytes(append_msgpacked_bytes):\n # XXX: Future versions of msgpack include an Unpacker.tell() method that provides this for free.\n nonlocal msgpacked_bytes\n nonlocal stream_offset\n msgpacked_bytes += append_msgpacked_bytes\n stream_offset += len(append_msgpacked_bytes)\n\n for key, (csize, data) in zip(archive_item_ids, self.decrypted_repository.get_many(archive_item_ids)):\n # Store the chunk ID in the meta-array\n if write_offset + 32 >= len(meta):\n self.meta = meta = meta + bytes(self.GROW_META_BY)\n meta[write_offset:write_offset + 32] = key\n current_id_offset = write_offset\n write_offset += 32\n\n # The chunk boundaries cannot be tracked through write_bytes, because the unpack state machine\n # *can* and *will* consume partial items, so calls to write_bytes are unrelated to chunk boundaries.\n chunk_begin += last_chunk_length\n last_chunk_length = len(data)\n\n unpacker.feed(data)\n while True:\n try:\n item = unpacker.unpack(write_bytes)\n except msgpack.OutOfData:\n # Need more data, feed the next chunk\n break\n\n item = Item(internal_dict=item)\n if filter and not filter(item) or not consider_part_files and 'part' in item:\n msgpacked_bytes = b''\n continue\n\n current_item = msgpacked_bytes\n current_item_length = len(current_item)\n current_spans_chunks = stream_offset - current_item_length < chunk_begin\n msgpacked_bytes = b''\n\n if write_offset + 9 >= len(meta):\n self.meta = meta = meta + bytes(self.GROW_META_BY)\n\n # item entries in the meta-array come in two different flavours, both nine bytes long.\n # (1) for items that span chunks:\n #\n # 'S' + 8 byte offset into the self.fd file, where the msgpacked item starts.\n #\n # (2) for items that are completely contained in one chunk, which usually is the great majority\n # (about 700:1 for system backups)\n #\n # 'I' + 4 byte offset where the chunk ID is + 4 byte offset in the chunk\n # where the msgpacked items starts\n #\n # The chunk ID offset is the number of bytes _back_ from the start of the entry, i.e.:\n #\n # |Chunk ID| .... |S1234abcd|\n # ^------ offset ----------^\n\n if current_spans_chunks:\n pos = self.fd.seek(0, io.SEEK_END)\n self.fd.write(current_item)\n meta[write_offset:write_offset + 9] = b'S' + pos.to_bytes(8, 'little')\n self.direct_items += 1\n else:\n item_offset = stream_offset - current_item_length - chunk_begin\n pack_indirect_into(meta, write_offset, b'I', write_offset - current_id_offset, item_offset)\n self.indirect_items += 1\n inode = write_offset + self.offset\n write_offset += 9\n\n yield inode, item\n\n self.write_offset = write_offset\n\n\nclass FuseOperations(llfuse.Operations):\n \"\"\"Export archive as a FUSE filesystem\n \"\"\"\n # mount options\n allow_damaged_files = False\n versions = False\n uid_forced = None\n gid_forced = None\n umask = 0\n\n def __init__(self, key, repository, manifest, args, decrypted_repository):\n super().__init__()\n self.repository_uncached = repository\n self.decrypted_repository = decrypted_repository\n self.args = args\n self.manifest = manifest\n self.key = key\n # Maps inode numbers to Item instances. This is used for synthetic inodes,\n # i.e. file-system objects that are made up by FuseOperations and are not contained\n # in the archives. For example archive directories or intermediate directories\n # not contained in archives.\n self.items = {}\n # cache up to <FILES> Items\n self._inode_cache = LRUCache(capacity=FILES, dispose=lambda _: None)\n # _inode_count is the current count of synthetic inodes, i.e. those in self.items\n self._inode_count = 0\n # Maps inode numbers to the inode number of the parent\n self.parent = {}\n # Maps inode numbers to a dictionary mapping byte directory entry names to their inode numbers,\n # i.e. this contains all dirents of everything that is mounted. (It becomes really big).\n self.contents = defaultdict(dict)\n self.default_uid = os.getuid()\n self.default_gid = os.getgid()\n self.default_dir = None\n self.pending_archives = {}\n self.cache = ItemCache(decrypted_repository)\n data_cache_capacity = int(os.environ.get('BORG_MOUNT_DATA_CACHE_ENTRIES', os.cpu_count() or 1))\n logger.debug('mount data cache capacity: %d chunks', data_cache_capacity)\n self.data_cache = LRUCache(capacity=data_cache_capacity, dispose=lambda _: None)\n self._last_pos = LRUCache(capacity=FILES, dispose=lambda _: None)\n\n def _create_filesystem(self):\n self._create_dir(parent=1) # first call, create root dir (inode == 1)\n if self.args.location.archive:\n self.process_archive(self.args.location.archive)\n else:\n self.versions_index = FuseVersionsIndex()\n for archive in self.manifest.archives.list_considering(self.args):\n if self.versions:\n # process archives immediately\n self.process_archive(archive.name)\n else:\n # lazily load archives, create archive placeholder inode\n archive_inode = self._create_dir(parent=1, mtime=int(archive.ts.timestamp() * 1e9))\n self.contents[1][os.fsencode(archive.name)] = archive_inode\n self.pending_archives[archive_inode] = archive.name\n\n def sig_info_handler(self, sig_no, stack):\n logger.debug('fuse: %d synth inodes, %d edges (%s)',\n self._inode_count, len(self.parent),\n # getsizeof is the size of the dict itself; key and value are two small-ish integers,\n # which are shared due to code structure (this has been verified).\n format_file_size(sys.getsizeof(self.parent) + len(self.parent) * sys.getsizeof(self._inode_count)))\n logger.debug('fuse: %d pending archives', len(self.pending_archives))\n logger.debug('fuse: ItemCache %d entries (%d direct, %d indirect), meta-array size %s, direct items size %s',\n self.cache.direct_items + self.cache.indirect_items, self.cache.direct_items, self.cache.indirect_items,\n format_file_size(sys.getsizeof(self.cache.meta)),\n format_file_size(os.stat(self.cache.fd.fileno()).st_size))\n logger.debug('fuse: data cache: %d/%d entries, %s', len(self.data_cache.items()), self.data_cache._capacity,\n format_file_size(sum(len(chunk) for key, chunk in self.data_cache.items())))\n self.decrypted_repository.log_instrumentation()\n\n def mount(self, mountpoint, mount_options, foreground=False):\n \"\"\"Mount filesystem on *mountpoint* with *mount_options*.\"\"\"\n\n def pop_option(options, key, present, not_present, wanted_type, int_base=0):\n assert isinstance(options, list) # we mutate this\n for idx, option in enumerate(options):\n if option == key:\n options.pop(idx)\n return present\n if option.startswith(key + '='):\n options.pop(idx)\n value = option.split('=', 1)[1]\n if wanted_type is bool:\n v = value.lower()\n if v in ('y', 'yes', 'true', '1'):\n return True\n if v in ('n', 'no', 'false', '0'):\n return False\n raise ValueError('unsupported value in option: %s' % option)\n if wanted_type is int:\n try:\n return int(value, base=int_base)\n except ValueError:\n raise ValueError('unsupported value in option: %s' % option) from None\n try:\n return wanted_type(value)\n except ValueError:\n raise ValueError('unsupported value in option: %s' % option) from None\n else:\n return not_present\n\n # default_permissions enables permission checking by the kernel. Without\n # this, any umask (or uid/gid) would not have an effect and this could\n # cause security issues if used with allow_other mount option.\n # When not using allow_other or allow_root, access is limited to the\n # mounting user anyway.\n options = ['fsname=borgfs', 'ro', 'default_permissions']\n if mount_options:\n options.extend(mount_options.split(','))\n ignore_permissions = pop_option(options, 'ignore_permissions', True, False, bool)\n if ignore_permissions:\n # in case users have a use-case that requires NOT giving \"default_permissions\",\n # this is enabled by the custom \"ignore_permissions\" mount option which just\n # removes \"default_permissions\" again:\n pop_option(options, 'default_permissions', True, False, bool)\n self.allow_damaged_files = pop_option(options, 'allow_damaged_files', True, False, bool)\n self.versions = pop_option(options, 'versions', True, False, bool)\n self.uid_forced = pop_option(options, 'uid', None, None, int)\n self.gid_forced = pop_option(options, 'gid', None, None, int)\n self.umask = pop_option(options, 'umask', 0, 0, int, int_base=8) # umask is octal, e.g. 222 or 0222\n dir_uid = self.uid_forced if self.uid_forced is not None else self.default_uid\n dir_gid = self.gid_forced if self.gid_forced is not None else self.default_gid\n dir_mode = 0o40755 & ~self.umask\n self.default_dir = Item(mode=dir_mode, mtime=int(time.time() * 1e9), uid=dir_uid, gid=dir_gid)\n self._create_filesystem()\n llfuse.init(self, mountpoint, options)\n if not foreground:\n old_id, new_id = daemonize()\n if not isinstance(self.repository_uncached, RemoteRepository):\n # local repo and the locking process' PID just changed, migrate it:\n self.repository_uncached.migrate_lock(old_id, new_id)\n\n # If the file system crashes, we do not want to umount because in that\n # case the mountpoint suddenly appears to become empty. This can have\n # nasty consequences, imagine the user has e.g. an active rsync mirror\n # job - seeing the mountpoint empty, rsync would delete everything in the\n # mirror.\n umount = False\n try:\n with signal_handler('SIGUSR1', self.sig_info_handler), \\\n signal_handler('SIGINFO', self.sig_info_handler):\n signal = fuse_main()\n # no crash and no signal (or it's ^C and we're in the foreground) -> umount request\n umount = (signal is None or (signal == SIGINT and foreground))\n finally:\n llfuse.close(umount)\n\n def _create_dir(self, parent, mtime=None):\n \"\"\"Create directory\n \"\"\"\n ino = self.allocate_inode()\n if mtime is not None:\n self.items[ino] = Item(**self.default_dir.as_dict())\n self.items[ino].mtime = mtime\n else:\n self.items[ino] = self.default_dir\n self.parent[ino] = parent\n return ino\n\n def process_archive(self, archive_name, prefix=[]):\n \"\"\"Build FUSE inode hierarchy from archive metadata\n \"\"\"\n self.file_versions = {} # for versions mode: original path -> version\n t0 = time.perf_counter()\n archive = Archive(self.repository_uncached, self.key, self.manifest, archive_name,\n consider_part_files=self.args.consider_part_files)\n strip_components = self.args.strip_components\n matcher = Archiver.build_matcher(self.args.patterns, self.args.paths)\n partial_extract = not matcher.empty() or strip_components\n hardlink_masters = {} if partial_extract else None\n\n def peek_and_store_hardlink_masters(item, matched):\n if (partial_extract and not matched and hardlinkable(item.mode) and\n item.get('hardlink_master', True) and 'source' not in item):\n hardlink_masters[item.get('path')] = (item.get('chunks'), None)\n\n filter = Archiver.build_filter(matcher, peek_and_store_hardlink_masters, strip_components)\n for item_inode, item in self.cache.iter_archive_items(archive.metadata.items, filter=filter,\n consider_part_files=self.args.consider_part_files):\n if strip_components:\n item.path = os.sep.join(item.path.split(os.sep)[strip_components:])\n path = os.fsencode(item.path)\n is_dir = stat.S_ISDIR(item.mode)\n if is_dir:\n try:\n # This can happen if an archive was created with a command line like\n # $ borg create ... dir1/file dir1\n # In this case the code below will have created a default_dir inode for dir1 already.\n inode = self._find_inode(path, prefix)\n except KeyError:\n pass\n else:\n self.items[inode] = item\n continue\n segments = prefix + path.split(b'/')\n parent = 1\n for segment in segments[:-1]:\n parent = self.process_inner(segment, parent)\n self.process_leaf(segments[-1], item, parent, prefix, is_dir, item_inode,\n hardlink_masters, strip_components)\n duration = time.perf_counter() - t0\n logger.debug('fuse: process_archive completed in %.1f s for archive %s', duration, archive.name)\n\n def process_leaf(self, name, item, parent, prefix, is_dir, item_inode, hardlink_masters, stripped_components):\n path = item.path\n del item.path # save some space\n hardlink_masters = hardlink_masters or {}\n\n def file_version(item, path):\n if 'chunks' in item:\n file_id = blake2b_128(path)\n current_version, previous_id = self.versions_index.get(file_id, (0, None))\n\n chunk_ids = [chunk_id for chunk_id, _, _ in item.chunks]\n contents_id = blake2b_128(b''.join(chunk_ids))\n\n if contents_id != previous_id:\n current_version += 1\n self.versions_index[file_id] = current_version, contents_id\n\n return current_version\n\n def make_versioned_name(name, version, add_dir=False):\n if add_dir:\n # add intermediate directory with same name as filename\n path_fname = name.rsplit(b'/', 1)\n name += b'/' + path_fname[-1]\n # keep original extension at end to avoid confusing tools\n name, ext = os.path.splitext(name)\n version_enc = os.fsencode('.%05d' % version)\n return name + version_enc + ext\n\n if 'source' in item and hardlinkable(item.mode):\n source = os.sep.join(item.source.split(os.sep)[stripped_components:])\n chunks, link_target = hardlink_masters.get(item.source, (None, source))\n if link_target:\n # Hard link was extracted previously, just link\n link_target = os.fsencode(link_target)\n if self.versions:\n # adjust link target name with version\n version = self.file_versions[link_target]\n link_target = make_versioned_name(link_target, version, add_dir=True)\n try:\n inode = self._find_inode(link_target, prefix)\n except KeyError:\n logger.warning('Skipping broken hard link: %s -> %s', path, source)\n return\n item = self.get_item(inode)\n item.nlink = item.get('nlink', 1) + 1\n self.items[inode] = item\n elif chunks is not None:\n # assign chunks to this item, since the item which had the chunks was not extracted\n item.chunks = chunks\n inode = item_inode\n self.items[inode] = item\n if hardlink_masters:\n # Update master entry with extracted item path, so that following hardlinks don't extract twice.\n hardlink_masters[item.source] = (None, path)\n else:\n inode = item_inode\n\n if self.versions and not is_dir:\n parent = self.process_inner(name, parent)\n enc_path = os.fsencode(path)\n version = file_version(item, enc_path)\n if version is not None:\n # regular file, with contents - maybe a hardlink master\n name = make_versioned_name(name, version)\n self.file_versions[enc_path] = version\n\n self.parent[inode] = parent\n if name:\n self.contents[parent][name] = inode\n\n def process_inner(self, name, parent_inode):\n dir = self.contents[parent_inode]\n if name in dir:\n inode = dir[name]\n else:\n inode = self._create_dir(parent_inode)\n if name:\n dir[name] = inode\n return inode\n\n def allocate_inode(self):\n self._inode_count += 1\n return self._inode_count\n\n def statfs(self, ctx=None):\n stat_ = llfuse.StatvfsData()\n stat_.f_bsize = 512\n stat_.f_frsize = 512\n stat_.f_blocks = 0\n stat_.f_bfree = 0\n stat_.f_bavail = 0\n stat_.f_files = 0\n stat_.f_ffree = 0\n stat_.f_favail = 0\n return stat_\n\n def get_item(self, inode):\n item = self._inode_cache.get(inode)\n if item is not None:\n return item\n try:\n # this is a cheap get-from-dictionary operation, no need to cache the result.\n return self.items[inode]\n except KeyError:\n # while self.cache does some internal caching, it has still quite some overhead, so we cache the result.\n item = self.cache.get(inode)\n self._inode_cache[inode] = item\n return item\n\n def _find_inode(self, path, prefix=[]):\n segments = prefix + path.split(b'/')\n inode = 1\n for segment in segments:\n inode = self.contents[inode][segment]\n return inode\n\n def getattr(self, inode, ctx=None):\n item = self.get_item(inode)\n entry = llfuse.EntryAttributes()\n entry.st_ino = inode\n entry.generation = 0\n entry.entry_timeout = 300\n entry.attr_timeout = 300\n entry.st_mode = item.mode & ~self.umask\n entry.st_nlink = item.get('nlink', 1)\n entry.st_uid = self.uid_forced if self.uid_forced is not None else item.uid if item.uid >= 0 else self.default_uid\n entry.st_gid = self.gid_forced if self.gid_forced is not None else item.gid if item.gid >= 0 else self.default_gid\n entry.st_rdev = item.get('rdev', 0)\n entry.st_size = item.get_size()\n entry.st_blksize = 512\n entry.st_blocks = (entry.st_size + entry.st_blksize - 1) // entry.st_blksize\n # note: older archives only have mtime (not atime nor ctime)\n mtime_ns = item.mtime\n if have_fuse_xtime_ns:\n entry.st_mtime_ns = mtime_ns\n entry.st_atime_ns = item.get('atime', mtime_ns)\n entry.st_ctime_ns = item.get('ctime', mtime_ns)\n if have_fuse_birthtime_ns:\n entry.st_birthtime_ns = item.get('birthtime', mtime_ns)\n else:\n entry.st_mtime = mtime_ns / 1e9\n entry.st_atime = item.get('atime', mtime_ns) / 1e9\n entry.st_ctime = item.get('ctime', mtime_ns) / 1e9\n if have_fuse_birthtime:\n entry.st_birthtime = item.get('birthtime', mtime_ns) / 1e9\n return entry\n\n def listxattr(self, inode, ctx=None):\n item = self.get_item(inode)\n return item.get('xattrs', {}).keys()\n\n def getxattr(self, inode, name, ctx=None):\n item = self.get_item(inode)\n try:\n return item.get('xattrs', {})[name] or b''\n except KeyError:\n raise llfuse.FUSEError(llfuse.ENOATTR) from None\n\n def _load_pending_archive(self, inode):\n # Check if this is an archive we need to load\n archive_name = self.pending_archives.pop(inode, None)\n if archive_name:\n self.process_archive(archive_name, [os.fsencode(archive_name)])\n\n def lookup(self, parent_inode, name, ctx=None):\n self._load_pending_archive(parent_inode)\n if name == b'.':\n inode = parent_inode\n elif name == b'..':\n inode = self.parent[parent_inode]\n else:\n inode = self.contents[parent_inode].get(name)\n if not inode:\n raise llfuse.FUSEError(errno.ENOENT)\n return self.getattr(inode)\n\n def open(self, inode, flags, ctx=None):\n if not self.allow_damaged_files:\n item = self.get_item(inode)\n if 'chunks_healthy' in item:\n # Processed archive items don't carry the path anymore; for converting the inode\n # to the path we'd either have to store the inverse of the current structure,\n # or search the entire archive. So we just don't print it. It's easy to correlate anyway.\n logger.warning('File has damaged (all-zero) chunks. Try running borg check --repair. '\n 'Mount with allow_damaged_files to read damaged files.')\n raise llfuse.FUSEError(errno.EIO)\n return inode\n\n def opendir(self, inode, ctx=None):\n self._load_pending_archive(inode)\n return inode\n\n def read(self, fh, offset, size):\n parts = []\n item = self.get_item(fh)\n\n # optimize for linear reads:\n # we cache the chunk number and the in-file offset of the chunk in _last_pos[fh]\n chunk_no, chunk_offset = self._last_pos.get(fh, (0, 0))\n if chunk_offset > offset:\n # this is not a linear read, so we lost track and need to start from beginning again...\n chunk_no, chunk_offset = (0, 0)\n\n offset -= chunk_offset\n chunks = item.chunks\n # note: using index iteration to avoid frequently copying big (sub)lists by slicing\n for idx in range(chunk_no, len(chunks)):\n id, s, csize = chunks[idx]\n if s < offset:\n offset -= s\n chunk_offset += s\n chunk_no += 1\n continue\n n = min(size, s - offset)\n if id in self.data_cache:\n data = self.data_cache[id]\n if offset + n == len(data):\n # evict fully read chunk from cache\n del self.data_cache[id]\n else:\n data = self.key.decrypt(id, self.repository_uncached.get(id))\n if offset + n < len(data):\n # chunk was only partially read, cache it\n self.data_cache[id] = data\n parts.append(data[offset:offset + n])\n offset = 0\n size -= n\n if not size:\n if fh in self._last_pos:\n self._last_pos.upd(fh, (chunk_no, chunk_offset))\n else:\n self._last_pos[fh] = (chunk_no, chunk_offset)\n break\n return b''.join(parts)\n\n def readdir(self, fh, off):\n entries = [(b'.', fh), (b'..', self.parent[fh])]\n entries.extend(self.contents[fh].items())\n for i, (name, inode) in enumerate(entries[off:], off):\n yield name, self.getattr(inode), i + 1\n\n def readlink(self, inode, ctx=None):\n item = self.get_item(inode)\n return os.fsencode(item.source)\n", "path": "src/borg/fuse.py" } ]
[ { "content": "import errno\nimport io\nimport os\nimport stat\nimport struct\nimport sys\nimport tempfile\nimport time\nfrom collections import defaultdict\nfrom signal import SIGINT\nfrom distutils.version import LooseVersion\n\nimport llfuse\n\nfrom .logger import create_logger\nlogger = create_logger()\n\nfrom .crypto.low_level import blake2b_128\nfrom .archiver import Archiver\nfrom .archive import Archive\nfrom .hashindex import FuseVersionsIndex\nfrom .helpers import daemonize, hardlinkable, signal_handler, format_file_size\nfrom .helpers import msgpack\nfrom .item import Item\nfrom .lrucache import LRUCache\nfrom .remote import RemoteRepository\n\n# Does this version of llfuse support ns precision?\nhave_fuse_xtime_ns = hasattr(llfuse.EntryAttributes, 'st_mtime_ns')\n\n# Does this version of llfuse support birthtime?\nhave_fuse_birthtime = hasattr(llfuse.EntryAttributes, 'st_birthtime') # never?\nhave_fuse_birthtime_ns = hasattr(llfuse.EntryAttributes, 'st_birthtime_ns') # since llfuse 1.3\n\nfuse_version = LooseVersion(getattr(llfuse, '__version__', '0.1'))\nif fuse_version >= '0.42':\n def fuse_main():\n return llfuse.main(workers=1)\nelse:\n def fuse_main():\n llfuse.main(single=True)\n return None\n\n# size of some LRUCaches (1 element per simultaneously open file)\n# note: _inode_cache might have rather large elements - Item.chunks can be large!\n# also, simultaneously reading too many files should be avoided anyway.\n# thus, do not set FILES to high values.\nFILES = 4\n\n\nclass ItemCache:\n \"\"\"\n This is the \"meat\" of the file system's metadata storage.\n\n This class generates inode numbers that efficiently index items in archives,\n and retrieves items from these inode numbers.\n \"\"\"\n\n # 2 MiB are approximately ~230000 items (depends on the average number of items per metadata chunk).\n #\n # Since growing a bytearray has to copy it, growing it will converge to O(n^2), however,\n # this is not yet relevant due to the swiftness of copying memory. If it becomes an issue,\n # use an anonymous mmap and just resize that (or, if on 64 bit, make it so big you never need\n # to resize it in the first place; that's free).\n GROW_META_BY = 2 * 1024 * 1024\n\n indirect_entry_struct = struct.Struct('=cII')\n assert indirect_entry_struct.size == 9\n\n def __init__(self, decrypted_repository):\n self.decrypted_repository = decrypted_repository\n # self.meta, the \"meta-array\" is a densely packed array of metadata about where items can be found.\n # It is indexed by the inode number minus self.offset. (This is in a way eerily similar to how the first\n # unices did this).\n # The meta-array contains chunk IDs and item entries (described in iter_archive_items).\n # The chunk IDs are referenced by item entries through relative offsets,\n # which are bounded by the metadata chunk size.\n self.meta = bytearray()\n # The current write offset in self.meta\n self.write_offset = 0\n\n # Offset added to meta-indices, resulting in inodes,\n # or subtracted from inodes, resulting in meta-indices.\n # XXX: Merge FuseOperations.items and ItemCache to avoid\n # this implicit limitation / hack (on the number of synthetic inodes, degenerate\n # cases can inflate their number far beyond the number of archives).\n self.offset = 1000000\n\n # A temporary file that contains direct items, i.e. items directly cached in this layer.\n # These are items that span more than one chunk and thus cannot be efficiently cached\n # by the object cache (self.decrypted_repository), which would require variable-length structures;\n # possible but not worth the effort, see iter_archive_items.\n self.fd = tempfile.TemporaryFile(prefix='borg-tmp')\n\n # A small LRU cache for chunks requested by ItemCache.get() from the object cache,\n # this significantly speeds up directory traversal and similar operations which\n # tend to re-read the same chunks over and over.\n # The capacity is kept low because increasing it does not provide any significant advantage,\n # but makes LRUCache's square behaviour noticeable and consumes more memory.\n self.chunks = LRUCache(capacity=10, dispose=lambda _: None)\n\n # Instrumentation\n # Count of indirect items, i.e. data is cached in the object cache, not directly in this cache\n self.indirect_items = 0\n # Count of direct items, i.e. data is in self.fd\n self.direct_items = 0\n\n def get(self, inode):\n offset = inode - self.offset\n if offset < 0:\n raise ValueError('ItemCache.get() called with an invalid inode number')\n if self.meta[offset] == ord(b'I'):\n _, chunk_id_relative_offset, chunk_offset = self.indirect_entry_struct.unpack_from(self.meta, offset)\n chunk_id_offset = offset - chunk_id_relative_offset\n # bytearray slices are bytearrays as well, explicitly convert to bytes()\n chunk_id = bytes(self.meta[chunk_id_offset:chunk_id_offset + 32])\n chunk = self.chunks.get(chunk_id)\n if not chunk:\n csize, chunk = next(self.decrypted_repository.get_many([chunk_id]))\n self.chunks[chunk_id] = chunk\n data = memoryview(chunk)[chunk_offset:]\n unpacker = msgpack.Unpacker()\n unpacker.feed(data)\n return Item(internal_dict=next(unpacker))\n elif self.meta[offset] == ord(b'S'):\n fd_offset = int.from_bytes(self.meta[offset + 1:offset + 9], 'little')\n self.fd.seek(fd_offset, io.SEEK_SET)\n return Item(internal_dict=next(msgpack.Unpacker(self.fd, read_size=1024)))\n else:\n raise ValueError('Invalid entry type in self.meta')\n\n def iter_archive_items(self, archive_item_ids, filter=None, consider_part_files=False):\n unpacker = msgpack.Unpacker()\n\n # Current offset in the metadata stream, which consists of all metadata chunks glued together\n stream_offset = 0\n # Offset of the current chunk in the metadata stream\n chunk_begin = 0\n # Length of the chunk preciding the current chunk\n last_chunk_length = 0\n msgpacked_bytes = b''\n\n write_offset = self.write_offset\n meta = self.meta\n pack_indirect_into = self.indirect_entry_struct.pack_into\n\n def write_bytes(append_msgpacked_bytes):\n # XXX: Future versions of msgpack include an Unpacker.tell() method that provides this for free.\n nonlocal msgpacked_bytes\n nonlocal stream_offset\n msgpacked_bytes += append_msgpacked_bytes\n stream_offset += len(append_msgpacked_bytes)\n\n for key, (csize, data) in zip(archive_item_ids, self.decrypted_repository.get_many(archive_item_ids)):\n # Store the chunk ID in the meta-array\n if write_offset + 32 >= len(meta):\n self.meta = meta = meta + bytes(self.GROW_META_BY)\n meta[write_offset:write_offset + 32] = key\n current_id_offset = write_offset\n write_offset += 32\n\n # The chunk boundaries cannot be tracked through write_bytes, because the unpack state machine\n # *can* and *will* consume partial items, so calls to write_bytes are unrelated to chunk boundaries.\n chunk_begin += last_chunk_length\n last_chunk_length = len(data)\n\n unpacker.feed(data)\n while True:\n try:\n item = unpacker.unpack(write_bytes)\n except msgpack.OutOfData:\n # Need more data, feed the next chunk\n break\n\n item = Item(internal_dict=item)\n if filter and not filter(item) or not consider_part_files and 'part' in item:\n msgpacked_bytes = b''\n continue\n\n current_item = msgpacked_bytes\n current_item_length = len(current_item)\n current_spans_chunks = stream_offset - current_item_length < chunk_begin\n msgpacked_bytes = b''\n\n if write_offset + 9 >= len(meta):\n self.meta = meta = meta + bytes(self.GROW_META_BY)\n\n # item entries in the meta-array come in two different flavours, both nine bytes long.\n # (1) for items that span chunks:\n #\n # 'S' + 8 byte offset into the self.fd file, where the msgpacked item starts.\n #\n # (2) for items that are completely contained in one chunk, which usually is the great majority\n # (about 700:1 for system backups)\n #\n # 'I' + 4 byte offset where the chunk ID is + 4 byte offset in the chunk\n # where the msgpacked items starts\n #\n # The chunk ID offset is the number of bytes _back_ from the start of the entry, i.e.:\n #\n # |Chunk ID| .... |S1234abcd|\n # ^------ offset ----------^\n\n if current_spans_chunks:\n pos = self.fd.seek(0, io.SEEK_END)\n self.fd.write(current_item)\n meta[write_offset:write_offset + 9] = b'S' + pos.to_bytes(8, 'little')\n self.direct_items += 1\n else:\n item_offset = stream_offset - current_item_length - chunk_begin\n pack_indirect_into(meta, write_offset, b'I', write_offset - current_id_offset, item_offset)\n self.indirect_items += 1\n inode = write_offset + self.offset\n write_offset += 9\n\n yield inode, item\n\n self.write_offset = write_offset\n\n\nclass FuseOperations(llfuse.Operations):\n \"\"\"Export archive as a FUSE filesystem\n \"\"\"\n # mount options\n allow_damaged_files = False\n versions = False\n uid_forced = None\n gid_forced = None\n umask = 0\n\n def __init__(self, key, repository, manifest, args, decrypted_repository):\n super().__init__()\n self.repository_uncached = repository\n self.decrypted_repository = decrypted_repository\n self.args = args\n self.manifest = manifest\n self.key = key\n # Maps inode numbers to Item instances. This is used for synthetic inodes,\n # i.e. file-system objects that are made up by FuseOperations and are not contained\n # in the archives. For example archive directories or intermediate directories\n # not contained in archives.\n self.items = {}\n # cache up to <FILES> Items\n self._inode_cache = LRUCache(capacity=FILES, dispose=lambda _: None)\n # _inode_count is the current count of synthetic inodes, i.e. those in self.items\n self._inode_count = 0\n # Maps inode numbers to the inode number of the parent\n self.parent = {}\n # Maps inode numbers to a dictionary mapping byte directory entry names to their inode numbers,\n # i.e. this contains all dirents of everything that is mounted. (It becomes really big).\n self.contents = defaultdict(dict)\n self.default_uid = os.getuid()\n self.default_gid = os.getgid()\n self.default_dir = None\n self.pending_archives = {}\n self.cache = ItemCache(decrypted_repository)\n data_cache_capacity = int(os.environ.get('BORG_MOUNT_DATA_CACHE_ENTRIES', os.cpu_count() or 1))\n logger.debug('mount data cache capacity: %d chunks', data_cache_capacity)\n self.data_cache = LRUCache(capacity=data_cache_capacity, dispose=lambda _: None)\n self._last_pos = LRUCache(capacity=FILES, dispose=lambda _: None)\n\n def _create_filesystem(self):\n self._create_dir(parent=1) # first call, create root dir (inode == 1)\n if self.args.location.archive:\n self.process_archive(self.args.location.archive)\n else:\n self.versions_index = FuseVersionsIndex()\n for archive in self.manifest.archives.list_considering(self.args):\n if self.versions:\n # process archives immediately\n self.process_archive(archive.name)\n else:\n # lazily load archives, create archive placeholder inode\n archive_inode = self._create_dir(parent=1, mtime=int(archive.ts.timestamp() * 1e9))\n self.contents[1][os.fsencode(archive.name)] = archive_inode\n self.pending_archives[archive_inode] = archive.name\n\n def sig_info_handler(self, sig_no, stack):\n logger.debug('fuse: %d synth inodes, %d edges (%s)',\n self._inode_count, len(self.parent),\n # getsizeof is the size of the dict itself; key and value are two small-ish integers,\n # which are shared due to code structure (this has been verified).\n format_file_size(sys.getsizeof(self.parent) + len(self.parent) * sys.getsizeof(self._inode_count)))\n logger.debug('fuse: %d pending archives', len(self.pending_archives))\n logger.debug('fuse: ItemCache %d entries (%d direct, %d indirect), meta-array size %s, direct items size %s',\n self.cache.direct_items + self.cache.indirect_items, self.cache.direct_items, self.cache.indirect_items,\n format_file_size(sys.getsizeof(self.cache.meta)),\n format_file_size(os.stat(self.cache.fd.fileno()).st_size))\n logger.debug('fuse: data cache: %d/%d entries, %s', len(self.data_cache.items()), self.data_cache._capacity,\n format_file_size(sum(len(chunk) for key, chunk in self.data_cache.items())))\n self.decrypted_repository.log_instrumentation()\n\n def mount(self, mountpoint, mount_options, foreground=False):\n \"\"\"Mount filesystem on *mountpoint* with *mount_options*.\"\"\"\n\n def pop_option(options, key, present, not_present, wanted_type, int_base=0):\n assert isinstance(options, list) # we mutate this\n for idx, option in enumerate(options):\n if option == key:\n options.pop(idx)\n return present\n if option.startswith(key + '='):\n options.pop(idx)\n value = option.split('=', 1)[1]\n if wanted_type is bool:\n v = value.lower()\n if v in ('y', 'yes', 'true', '1'):\n return True\n if v in ('n', 'no', 'false', '0'):\n return False\n raise ValueError('unsupported value in option: %s' % option)\n if wanted_type is int:\n try:\n return int(value, base=int_base)\n except ValueError:\n raise ValueError('unsupported value in option: %s' % option) from None\n try:\n return wanted_type(value)\n except ValueError:\n raise ValueError('unsupported value in option: %s' % option) from None\n else:\n return not_present\n\n # default_permissions enables permission checking by the kernel. Without\n # this, any umask (or uid/gid) would not have an effect and this could\n # cause security issues if used with allow_other mount option.\n # When not using allow_other or allow_root, access is limited to the\n # mounting user anyway.\n options = ['fsname=borgfs', 'ro', 'default_permissions']\n if mount_options:\n options.extend(mount_options.split(','))\n ignore_permissions = pop_option(options, 'ignore_permissions', True, False, bool)\n if ignore_permissions:\n # in case users have a use-case that requires NOT giving \"default_permissions\",\n # this is enabled by the custom \"ignore_permissions\" mount option which just\n # removes \"default_permissions\" again:\n pop_option(options, 'default_permissions', True, False, bool)\n self.allow_damaged_files = pop_option(options, 'allow_damaged_files', True, False, bool)\n self.versions = pop_option(options, 'versions', True, False, bool)\n self.uid_forced = pop_option(options, 'uid', None, None, int)\n self.gid_forced = pop_option(options, 'gid', None, None, int)\n self.umask = pop_option(options, 'umask', 0, 0, int, int_base=8) # umask is octal, e.g. 222 or 0222\n dir_uid = self.uid_forced if self.uid_forced is not None else self.default_uid\n dir_gid = self.gid_forced if self.gid_forced is not None else self.default_gid\n dir_mode = 0o40755 & ~self.umask\n self.default_dir = Item(mode=dir_mode, mtime=int(time.time() * 1e9), uid=dir_uid, gid=dir_gid)\n self._create_filesystem()\n llfuse.init(self, mountpoint, options)\n if not foreground:\n old_id, new_id = daemonize()\n if not isinstance(self.repository_uncached, RemoteRepository):\n # local repo and the locking process' PID just changed, migrate it:\n self.repository_uncached.migrate_lock(old_id, new_id)\n\n # If the file system crashes, we do not want to umount because in that\n # case the mountpoint suddenly appears to become empty. This can have\n # nasty consequences, imagine the user has e.g. an active rsync mirror\n # job - seeing the mountpoint empty, rsync would delete everything in the\n # mirror.\n umount = False\n try:\n with signal_handler('SIGUSR1', self.sig_info_handler), \\\n signal_handler('SIGINFO', self.sig_info_handler):\n signal = fuse_main()\n # no crash and no signal (or it's ^C and we're in the foreground) -> umount request\n umount = (signal is None or (signal == SIGINT and foreground))\n finally:\n llfuse.close(umount)\n\n def _create_dir(self, parent, mtime=None):\n \"\"\"Create directory\n \"\"\"\n ino = self.allocate_inode()\n if mtime is not None:\n self.items[ino] = Item(**self.default_dir.as_dict())\n self.items[ino].mtime = mtime\n else:\n self.items[ino] = self.default_dir\n self.parent[ino] = parent\n return ino\n\n def process_archive(self, archive_name, prefix=[]):\n \"\"\"Build FUSE inode hierarchy from archive metadata\n \"\"\"\n self.file_versions = {} # for versions mode: original path -> version\n t0 = time.perf_counter()\n archive = Archive(self.repository_uncached, self.key, self.manifest, archive_name,\n consider_part_files=self.args.consider_part_files)\n strip_components = self.args.strip_components\n matcher = Archiver.build_matcher(self.args.patterns, self.args.paths)\n partial_extract = not matcher.empty() or strip_components\n hardlink_masters = {} if partial_extract else None\n\n def peek_and_store_hardlink_masters(item, matched):\n if (partial_extract and not matched and hardlinkable(item.mode) and\n item.get('hardlink_master', True) and 'source' not in item):\n hardlink_masters[item.get('path')] = (item.get('chunks'), None)\n\n filter = Archiver.build_filter(matcher, peek_and_store_hardlink_masters, strip_components)\n for item_inode, item in self.cache.iter_archive_items(archive.metadata.items, filter=filter,\n consider_part_files=self.args.consider_part_files):\n if strip_components:\n item.path = os.sep.join(item.path.split(os.sep)[strip_components:])\n path = os.fsencode(item.path)\n is_dir = stat.S_ISDIR(item.mode)\n if is_dir:\n try:\n # This can happen if an archive was created with a command line like\n # $ borg create ... dir1/file dir1\n # In this case the code below will have created a default_dir inode for dir1 already.\n inode = self._find_inode(path, prefix)\n except KeyError:\n pass\n else:\n self.items[inode] = item\n continue\n segments = prefix + path.split(b'/')\n parent = 1\n for segment in segments[:-1]:\n parent = self.process_inner(segment, parent)\n self.process_leaf(segments[-1], item, parent, prefix, is_dir, item_inode,\n hardlink_masters, strip_components)\n duration = time.perf_counter() - t0\n logger.debug('fuse: process_archive completed in %.1f s for archive %s', duration, archive.name)\n\n def process_leaf(self, name, item, parent, prefix, is_dir, item_inode, hardlink_masters, stripped_components):\n path = item.path\n del item.path # save some space\n hardlink_masters = hardlink_masters or {}\n\n def file_version(item, path):\n if 'chunks' in item:\n file_id = blake2b_128(path)\n current_version, previous_id = self.versions_index.get(file_id, (0, None))\n\n chunk_ids = [chunk_id for chunk_id, _, _ in item.chunks]\n contents_id = blake2b_128(b''.join(chunk_ids))\n\n if contents_id != previous_id:\n current_version += 1\n self.versions_index[file_id] = current_version, contents_id\n\n return current_version\n\n def make_versioned_name(name, version, add_dir=False):\n if add_dir:\n # add intermediate directory with same name as filename\n path_fname = name.rsplit(b'/', 1)\n name += b'/' + path_fname[-1]\n # keep original extension at end to avoid confusing tools\n name, ext = os.path.splitext(name)\n version_enc = os.fsencode('.%05d' % version)\n return name + version_enc + ext\n\n if 'source' in item and hardlinkable(item.mode):\n source = os.sep.join(item.source.split(os.sep)[stripped_components:])\n chunks, link_target = hardlink_masters.get(item.source, (None, source))\n if link_target:\n # Hard link was extracted previously, just link\n link_target = os.fsencode(link_target)\n if self.versions:\n # adjust link target name with version\n version = self.file_versions[link_target]\n link_target = make_versioned_name(link_target, version, add_dir=True)\n try:\n inode = self._find_inode(link_target, prefix)\n except KeyError:\n logger.warning('Skipping broken hard link: %s -> %s', path, source)\n return\n item = self.get_item(inode)\n item.nlink = item.get('nlink', 1) + 1\n self.items[inode] = item\n elif chunks is not None:\n # assign chunks to this item, since the item which had the chunks was not extracted\n item.chunks = chunks\n inode = item_inode\n self.items[inode] = item\n if hardlink_masters:\n # Update master entry with extracted item path, so that following hardlinks don't extract twice.\n hardlink_masters[item.source] = (None, path)\n else:\n inode = item_inode\n\n if self.versions and not is_dir:\n parent = self.process_inner(name, parent)\n enc_path = os.fsencode(path)\n version = file_version(item, enc_path)\n if version is not None:\n # regular file, with contents - maybe a hardlink master\n name = make_versioned_name(name, version)\n self.file_versions[enc_path] = version\n\n self.parent[inode] = parent\n if name:\n self.contents[parent][name] = inode\n\n def process_inner(self, name, parent_inode):\n dir = self.contents[parent_inode]\n if name in dir:\n inode = dir[name]\n else:\n inode = self._create_dir(parent_inode)\n if name:\n dir[name] = inode\n return inode\n\n def allocate_inode(self):\n self._inode_count += 1\n return self._inode_count\n\n def statfs(self, ctx=None):\n stat_ = llfuse.StatvfsData()\n stat_.f_bsize = 512\n stat_.f_frsize = 512\n stat_.f_blocks = 0\n stat_.f_bfree = 0\n stat_.f_bavail = 0\n stat_.f_files = 0\n stat_.f_ffree = 0\n stat_.f_favail = 0\n if hasattr(stat_, 'f_namemax'): # since llfuse 1.3.0\n stat_.f_namemax = 255 # == NAME_MAX (depends on archive source OS / FS)\n return stat_\n\n def get_item(self, inode):\n item = self._inode_cache.get(inode)\n if item is not None:\n return item\n try:\n # this is a cheap get-from-dictionary operation, no need to cache the result.\n return self.items[inode]\n except KeyError:\n # while self.cache does some internal caching, it has still quite some overhead, so we cache the result.\n item = self.cache.get(inode)\n self._inode_cache[inode] = item\n return item\n\n def _find_inode(self, path, prefix=[]):\n segments = prefix + path.split(b'/')\n inode = 1\n for segment in segments:\n inode = self.contents[inode][segment]\n return inode\n\n def getattr(self, inode, ctx=None):\n item = self.get_item(inode)\n entry = llfuse.EntryAttributes()\n entry.st_ino = inode\n entry.generation = 0\n entry.entry_timeout = 300\n entry.attr_timeout = 300\n entry.st_mode = item.mode & ~self.umask\n entry.st_nlink = item.get('nlink', 1)\n entry.st_uid = self.uid_forced if self.uid_forced is not None else item.uid if item.uid >= 0 else self.default_uid\n entry.st_gid = self.gid_forced if self.gid_forced is not None else item.gid if item.gid >= 0 else self.default_gid\n entry.st_rdev = item.get('rdev', 0)\n entry.st_size = item.get_size()\n entry.st_blksize = 512\n entry.st_blocks = (entry.st_size + entry.st_blksize - 1) // entry.st_blksize\n # note: older archives only have mtime (not atime nor ctime)\n mtime_ns = item.mtime\n if have_fuse_xtime_ns:\n entry.st_mtime_ns = mtime_ns\n entry.st_atime_ns = item.get('atime', mtime_ns)\n entry.st_ctime_ns = item.get('ctime', mtime_ns)\n if have_fuse_birthtime_ns:\n entry.st_birthtime_ns = item.get('birthtime', mtime_ns)\n else:\n entry.st_mtime = mtime_ns / 1e9\n entry.st_atime = item.get('atime', mtime_ns) / 1e9\n entry.st_ctime = item.get('ctime', mtime_ns) / 1e9\n if have_fuse_birthtime:\n entry.st_birthtime = item.get('birthtime', mtime_ns) / 1e9\n return entry\n\n def listxattr(self, inode, ctx=None):\n item = self.get_item(inode)\n return item.get('xattrs', {}).keys()\n\n def getxattr(self, inode, name, ctx=None):\n item = self.get_item(inode)\n try:\n return item.get('xattrs', {})[name] or b''\n except KeyError:\n raise llfuse.FUSEError(llfuse.ENOATTR) from None\n\n def _load_pending_archive(self, inode):\n # Check if this is an archive we need to load\n archive_name = self.pending_archives.pop(inode, None)\n if archive_name:\n self.process_archive(archive_name, [os.fsencode(archive_name)])\n\n def lookup(self, parent_inode, name, ctx=None):\n self._load_pending_archive(parent_inode)\n if name == b'.':\n inode = parent_inode\n elif name == b'..':\n inode = self.parent[parent_inode]\n else:\n inode = self.contents[parent_inode].get(name)\n if not inode:\n raise llfuse.FUSEError(errno.ENOENT)\n return self.getattr(inode)\n\n def open(self, inode, flags, ctx=None):\n if not self.allow_damaged_files:\n item = self.get_item(inode)\n if 'chunks_healthy' in item:\n # Processed archive items don't carry the path anymore; for converting the inode\n # to the path we'd either have to store the inverse of the current structure,\n # or search the entire archive. So we just don't print it. It's easy to correlate anyway.\n logger.warning('File has damaged (all-zero) chunks. Try running borg check --repair. '\n 'Mount with allow_damaged_files to read damaged files.')\n raise llfuse.FUSEError(errno.EIO)\n return inode\n\n def opendir(self, inode, ctx=None):\n self._load_pending_archive(inode)\n return inode\n\n def read(self, fh, offset, size):\n parts = []\n item = self.get_item(fh)\n\n # optimize for linear reads:\n # we cache the chunk number and the in-file offset of the chunk in _last_pos[fh]\n chunk_no, chunk_offset = self._last_pos.get(fh, (0, 0))\n if chunk_offset > offset:\n # this is not a linear read, so we lost track and need to start from beginning again...\n chunk_no, chunk_offset = (0, 0)\n\n offset -= chunk_offset\n chunks = item.chunks\n # note: using index iteration to avoid frequently copying big (sub)lists by slicing\n for idx in range(chunk_no, len(chunks)):\n id, s, csize = chunks[idx]\n if s < offset:\n offset -= s\n chunk_offset += s\n chunk_no += 1\n continue\n n = min(size, s - offset)\n if id in self.data_cache:\n data = self.data_cache[id]\n if offset + n == len(data):\n # evict fully read chunk from cache\n del self.data_cache[id]\n else:\n data = self.key.decrypt(id, self.repository_uncached.get(id))\n if offset + n < len(data):\n # chunk was only partially read, cache it\n self.data_cache[id] = data\n parts.append(data[offset:offset + n])\n offset = 0\n size -= n\n if not size:\n if fh in self._last_pos:\n self._last_pos.upd(fh, (chunk_no, chunk_offset))\n else:\n self._last_pos[fh] = (chunk_no, chunk_offset)\n break\n return b''.join(parts)\n\n def readdir(self, fh, off):\n entries = [(b'.', fh), (b'..', self.parent[fh])]\n entries.extend(self.contents[fh].items())\n for i, (name, inode) in enumerate(entries[off:], off):\n yield name, self.getattr(inode), i + 1\n\n def readlink(self, inode, ctx=None):\n item = self.get_item(inode)\n return os.fsencode(item.source)\n", "path": "src/borg/fuse.py" } ]
diff --git a/src/borg/fuse.py b/src/borg/fuse.py index f0933ddbe6..429790e43d 100644 --- a/src/borg/fuse.py +++ b/src/borg/fuse.py @@ -518,6 +518,8 @@ def statfs(self, ctx=None): stat_.f_files = 0 stat_.f_ffree = 0 stat_.f_favail = 0 + if hasattr(stat_, 'f_namemax'): # since llfuse 1.3.0 + stat_.f_namemax = 255 # == NAME_MAX (depends on archive source OS / FS) return stat_ def get_item(self, inode):
ietf-tools__datatracker-4145
v1 api crashes on some content when serializing to xml See the very old ticket at https://github.com/django-tastypie/django-tastypie/issues/1107. submission.first_two_pages can contain formfeeds. These break tastypie's xml serialization. Json serialization succeeds. The quick fix is to stop exposing first_two_pages through the API.
[ { "content": "# Copyright The IETF Trust 2014-2019, All Rights Reserved\n# -*- coding: utf-8 -*-\n# Autogenerated by the mkresources management command 2014-11-13 23:53\n\n\nfrom ietf.api import ModelResource\nfrom tastypie.fields import ToOneField, ToManyField\nfrom tastypie.constants import ALL, ALL_WITH_RELATIONS\nfrom tastypie.cache import SimpleCache\n\nfrom ietf import api\nfrom ietf.submit.models import ( Preapproval, SubmissionCheck, Submission,\n SubmissionEmailEvent, SubmissionEvent, SubmissionExtResource )\nfrom ietf.person.resources import PersonResource\n\n\nclass PreapprovalResource(ModelResource):\n by = ToOneField(PersonResource, 'by')\n class Meta:\n cache = SimpleCache()\n queryset = Preapproval.objects.all()\n serializer = api.Serializer()\n #resource_name = 'preapproval'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"name\": ALL,\n \"time\": ALL,\n \"by\": ALL_WITH_RELATIONS,\n }\napi.submit.register(PreapprovalResource())\n\nfrom ietf.group.resources import GroupResource\nfrom ietf.name.resources import DraftSubmissionStateNameResource\nfrom ietf.doc.resources import DocumentResource\nclass SubmissionResource(ModelResource):\n state = ToOneField(DraftSubmissionStateNameResource, 'state')\n group = ToOneField(GroupResource, 'group', null=True)\n draft = ToOneField(DocumentResource, 'draft', null=True)\n checks = ToManyField('ietf.submit.resources.SubmissionCheckResource', 'checks', null=True)\n class Meta:\n cache = SimpleCache()\n queryset = Submission.objects.all()\n serializer = api.Serializer()\n #resource_name = 'submission'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"remote_ip\": ALL,\n \"access_key\": ALL,\n \"auth_key\": ALL,\n \"name\": ALL,\n \"title\": ALL,\n \"abstract\": ALL,\n \"rev\": ALL,\n \"pages\": ALL,\n \"authors\": ALL,\n \"note\": ALL,\n \"replaces\": ALL,\n \"first_two_pages\": ALL,\n \"file_types\": ALL,\n \"file_size\": ALL,\n \"document_date\": ALL,\n \"submission_date\": ALL,\n \"submitter\": ALL,\n \"xml_version\": ALL,\n \"state\": ALL_WITH_RELATIONS,\n \"group\": ALL_WITH_RELATIONS,\n \"draft\": ALL_WITH_RELATIONS,\n }\napi.submit.register(SubmissionResource())\n\nfrom ietf.person.resources import PersonResource\nclass SubmissionEventResource(ModelResource):\n submission = ToOneField(SubmissionResource, 'submission')\n by = ToOneField(PersonResource, 'by', null=True)\n class Meta:\n cache = SimpleCache()\n queryset = SubmissionEvent.objects.all()\n serializer = api.Serializer()\n #resource_name = 'submissionevent'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"time\": ALL,\n \"desc\": ALL,\n \"submission\": ALL_WITH_RELATIONS,\n \"by\": ALL_WITH_RELATIONS,\n }\napi.submit.register(SubmissionEventResource())\n\nclass SubmissionCheckResource(ModelResource):\n submission = ToOneField(SubmissionResource, 'submission')\n class Meta:\n cache = SimpleCache()\n queryset = SubmissionCheck.objects.all()\n serializer = api.Serializer()\n #resource_name = 'submissioncheck'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"time\": ALL,\n \"checker\": ALL,\n \"passed\": ALL,\n \"message\": ALL,\n \"errors\": ALL,\n \"warnings\": ALL,\n \"items\": ALL,\n \"submission\": ALL_WITH_RELATIONS,\n }\napi.submit.register(SubmissionCheckResource())\n\n\n\nfrom ietf.person.resources import PersonResource\nfrom ietf.message.resources import MessageResource\nclass SubmissionEmailEventResource(ModelResource):\n submission = ToOneField(SubmissionResource, 'submission')\n by = ToOneField(PersonResource, 'by', null=True)\n submissionevent_ptr = ToOneField(SubmissionEventResource, 'submissionevent_ptr')\n message = ToOneField(MessageResource, 'message', null=True)\n in_reply_to = ToOneField(MessageResource, 'in_reply_to', null=True)\n class Meta:\n queryset = SubmissionEmailEvent.objects.all()\n serializer = api.Serializer()\n cache = SimpleCache()\n #resource_name = 'submissionemailevent'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"time\": ALL,\n \"desc\": ALL,\n \"msgtype\": ALL,\n \"submission\": ALL_WITH_RELATIONS,\n \"by\": ALL_WITH_RELATIONS,\n \"submissionevent_ptr\": ALL_WITH_RELATIONS,\n \"message\": ALL_WITH_RELATIONS,\n \"in_reply_to\": ALL_WITH_RELATIONS,\n }\napi.submit.register(SubmissionEmailEventResource())\n\n\n\nfrom ietf.name.resources import ExtResourceNameResource\nclass SubmissionExtResourceResource(ModelResource):\n name = ToOneField(ExtResourceNameResource, 'name')\n submission = ToOneField(SubmissionResource, 'submission')\n class Meta:\n queryset = SubmissionExtResource.objects.all()\n serializer = api.Serializer()\n cache = SimpleCache()\n resource_name = 'submissionextresource'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"display_name\": ALL,\n \"value\": ALL,\n \"name\": ALL_WITH_RELATIONS,\n \"submission\": ALL_WITH_RELATIONS,\n }\napi.submit.register(SubmissionExtResourceResource())\n", "path": "ietf/submit/resources.py" } ]
[ { "content": "# Copyright The IETF Trust 2014-2019, All Rights Reserved\n# -*- coding: utf-8 -*-\n# Autogenerated by the mkresources management command 2014-11-13 23:53\n\n\nfrom ietf.api import ModelResource\nfrom tastypie.fields import ToOneField, ToManyField\nfrom tastypie.constants import ALL, ALL_WITH_RELATIONS\nfrom tastypie.cache import SimpleCache\n\nfrom ietf import api\nfrom ietf.submit.models import ( Preapproval, SubmissionCheck, Submission,\n SubmissionEmailEvent, SubmissionEvent, SubmissionExtResource )\nfrom ietf.person.resources import PersonResource\n\n\nclass PreapprovalResource(ModelResource):\n by = ToOneField(PersonResource, 'by')\n class Meta:\n cache = SimpleCache()\n queryset = Preapproval.objects.all()\n serializer = api.Serializer()\n #resource_name = 'preapproval'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"name\": ALL,\n \"time\": ALL,\n \"by\": ALL_WITH_RELATIONS,\n }\napi.submit.register(PreapprovalResource())\n\nfrom ietf.group.resources import GroupResource\nfrom ietf.name.resources import DraftSubmissionStateNameResource\nfrom ietf.doc.resources import DocumentResource\nclass SubmissionResource(ModelResource):\n state = ToOneField(DraftSubmissionStateNameResource, 'state')\n group = ToOneField(GroupResource, 'group', null=True)\n draft = ToOneField(DocumentResource, 'draft', null=True)\n checks = ToManyField('ietf.submit.resources.SubmissionCheckResource', 'checks', null=True)\n class Meta:\n cache = SimpleCache()\n queryset = Submission.objects.all()\n serializer = api.Serializer()\n #resource_name = 'submission'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"remote_ip\": ALL,\n \"access_key\": ALL,\n \"auth_key\": ALL,\n \"name\": ALL,\n \"title\": ALL,\n \"abstract\": ALL,\n \"rev\": ALL,\n \"pages\": ALL,\n \"authors\": ALL,\n \"note\": ALL,\n \"replaces\": ALL,\n \"first_two_pages\": ALL,\n \"file_types\": ALL,\n \"file_size\": ALL,\n \"document_date\": ALL,\n \"submission_date\": ALL,\n \"submitter\": ALL,\n \"xml_version\": ALL,\n \"state\": ALL_WITH_RELATIONS,\n \"group\": ALL_WITH_RELATIONS,\n \"draft\": ALL_WITH_RELATIONS,\n }\n excludes = ('first_two_pages',)\napi.submit.register(SubmissionResource())\n\nfrom ietf.person.resources import PersonResource\nclass SubmissionEventResource(ModelResource):\n submission = ToOneField(SubmissionResource, 'submission')\n by = ToOneField(PersonResource, 'by', null=True)\n class Meta:\n cache = SimpleCache()\n queryset = SubmissionEvent.objects.all()\n serializer = api.Serializer()\n #resource_name = 'submissionevent'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"time\": ALL,\n \"desc\": ALL,\n \"submission\": ALL_WITH_RELATIONS,\n \"by\": ALL_WITH_RELATIONS,\n }\napi.submit.register(SubmissionEventResource())\n\nclass SubmissionCheckResource(ModelResource):\n submission = ToOneField(SubmissionResource, 'submission')\n class Meta:\n cache = SimpleCache()\n queryset = SubmissionCheck.objects.all()\n serializer = api.Serializer()\n #resource_name = 'submissioncheck'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"time\": ALL,\n \"checker\": ALL,\n \"passed\": ALL,\n \"message\": ALL,\n \"errors\": ALL,\n \"warnings\": ALL,\n \"items\": ALL,\n \"submission\": ALL_WITH_RELATIONS,\n }\napi.submit.register(SubmissionCheckResource())\n\n\n\nfrom ietf.person.resources import PersonResource\nfrom ietf.message.resources import MessageResource\nclass SubmissionEmailEventResource(ModelResource):\n submission = ToOneField(SubmissionResource, 'submission')\n by = ToOneField(PersonResource, 'by', null=True)\n submissionevent_ptr = ToOneField(SubmissionEventResource, 'submissionevent_ptr')\n message = ToOneField(MessageResource, 'message', null=True)\n in_reply_to = ToOneField(MessageResource, 'in_reply_to', null=True)\n class Meta:\n queryset = SubmissionEmailEvent.objects.all()\n serializer = api.Serializer()\n cache = SimpleCache()\n #resource_name = 'submissionemailevent'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"time\": ALL,\n \"desc\": ALL,\n \"msgtype\": ALL,\n \"submission\": ALL_WITH_RELATIONS,\n \"by\": ALL_WITH_RELATIONS,\n \"submissionevent_ptr\": ALL_WITH_RELATIONS,\n \"message\": ALL_WITH_RELATIONS,\n \"in_reply_to\": ALL_WITH_RELATIONS,\n }\napi.submit.register(SubmissionEmailEventResource())\n\n\n\nfrom ietf.name.resources import ExtResourceNameResource\nclass SubmissionExtResourceResource(ModelResource):\n name = ToOneField(ExtResourceNameResource, 'name')\n submission = ToOneField(SubmissionResource, 'submission')\n class Meta:\n queryset = SubmissionExtResource.objects.all()\n serializer = api.Serializer()\n cache = SimpleCache()\n resource_name = 'submissionextresource'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"display_name\": ALL,\n \"value\": ALL,\n \"name\": ALL_WITH_RELATIONS,\n \"submission\": ALL_WITH_RELATIONS,\n }\napi.submit.register(SubmissionExtResourceResource())\n", "path": "ietf/submit/resources.py" } ]
diff --git a/ietf/submit/resources.py b/ietf/submit/resources.py index bf4959f240..98de27aa62 100644 --- a/ietf/submit/resources.py +++ b/ietf/submit/resources.py @@ -68,6 +68,7 @@ class Meta: "group": ALL_WITH_RELATIONS, "draft": ALL_WITH_RELATIONS, } + excludes = ('first_two_pages',) api.submit.register(SubmissionResource()) from ietf.person.resources import PersonResource
biolab__orange3-text-239
Preprocess Text: empty Tagger (Stanford) results in segfault Corpus (bookexcerpts) - Preprocess Text - turn on POS Tagger - select Tagger (window opens) - don't select any tagger, just use cancel). Procedure results in segfault. Perhaps disable Tagger if Model empty or otherwise prevent segfaulting.
[ { "content": "import os\n\nfrom AnyQt.QtWidgets import (QComboBox, QWidget, QHBoxLayout,\n QSizePolicy, QLineEdit, QDoubleSpinBox,\n QSpinBox, QTextEdit, QDateEdit, QGroupBox,\n QPushButton, QStyle, QFileDialog, QLabel,\n QGridLayout, QCheckBox, QStackedLayout)\nfrom AnyQt.QtGui import QColor\nfrom AnyQt.QtCore import QDate, pyqtSignal, Qt, QSize\n\nfrom Orange.widgets.gui import OWComponent, hBox\nfrom Orange.widgets import settings\n\n\nclass ListEdit(QTextEdit):\n PLACEHOLDER_COLOR = QColor(128, 128, 128)\n USER_TEXT_COLOR = QColor(0, 0, 0)\n\n def __init__(self, master=None, attr=None, placeholder_text=None,\n fixed_height=None, *args):\n super().__init__(*args)\n self.master = master\n self.attr = attr\n self.placeholder_text = placeholder_text\n\n if self.master and self.attr:\n self.setText('\\n'.join(getattr(self.master, self.attr, [])))\n\n self.set_placeholder()\n self.textChanged.connect(self.synchronize)\n\n if fixed_height:\n self.setFixedHeight(fixed_height)\n\n def set_placeholder(self):\n \"\"\" Set placeholder if there is no user input. \"\"\"\n if self.toPlainText() == '':\n self.setFontItalic(True)\n self.setTextColor(self.PLACEHOLDER_COLOR)\n self.setText(self.placeholder_text)\n\n def toPlainText(self):\n \"\"\" Return only text input from user. \"\"\"\n text = super().toPlainText()\n if self.placeholder_text is not None and text == self.placeholder_text:\n text = ''\n return text\n\n def focusInEvent(self, event):\n super().focusInEvent(event)\n if self.toPlainText() == '':\n self.clear()\n self.setFontItalic(False)\n self.setTextColor(self.USER_TEXT_COLOR)\n\n def focusOutEvent(self, event):\n self.set_placeholder()\n QTextEdit.focusOutEvent(self, event)\n\n def synchronize(self):\n if self.master and self.attr:\n setattr(self.master, self.attr, self.value())\n\n def value(self):\n return self.text.split('\\n') if self.text else []\n\n @property\n def text(self):\n return self.toPlainText().strip()\n\n\nclass QueryBox(QComboBox):\n def __init__(self, widget, master, history, callback, min_width=150):\n super().__init__()\n self.master = master\n self.history = history\n self.callback = callback\n\n self.setMinimumWidth(min_width)\n self.setEditable(True)\n self.activated[int].connect(self.synchronize) # triggered for enter and drop-down\n widget.layout().addWidget(self)\n self.refresh()\n\n def synchronize(self, n=None, silent=False):\n if n is not None and n < len(self.history): # selecting from drop-down\n name = self.history[n]\n del self.history[n]\n self.history.insert(0, name)\n else: # enter pressed\n query = self.currentText()\n if query != '':\n if query in self.history:\n self.history.remove(query)\n self.history.insert(0, self.currentText())\n\n self.refresh()\n\n if callable(self.callback) and not silent:\n self.callback()\n\n def refresh(self):\n self.clear()\n for query in self.history:\n self.addItem(query)\n\n\nclass CheckListLayout(QGroupBox):\n def __init__(self, title, master, attr, items, cols=1, callback=None):\n super().__init__(title=title)\n self.master = master\n self.attr = attr\n self.items = items\n self.callback = callback\n\n self.current_values = getattr(self.master, self.attr)\n\n layout = QGridLayout()\n self.setLayout(layout)\n\n nrows = len(items) // cols + bool(len(items) % cols)\n\n self.boxes = []\n for i, value in enumerate(self.items):\n box = QCheckBox(value)\n box.setChecked(value in self.current_values)\n box.stateChanged.connect(self.synchronize)\n self.boxes.append(box)\n layout.addWidget(box, i % nrows, i // nrows)\n\n def synchronize(self):\n values = []\n for item, check_box in zip(self.items, self.boxes):\n if check_box.isChecked():\n values.append(item)\n\n setattr(self.master, self.attr, values)\n\n if self.callback:\n self.callback()\n\n\nclass ComboBox(QComboBox):\n def __init__(self, master, attr, items):\n super().__init__()\n self.attr = attr\n self.master = master\n\n if not isinstance(items[0], tuple):\n self.items = [(str(item), item) for item in items]\n else:\n self.items = items\n\n for i, (key, value) in enumerate(self.items):\n self.addItem(key)\n if value == getattr(master, attr, None):\n self.setCurrentIndex(i)\n\n self.currentIndexChanged[int].connect(self.synchronize)\n\n def synchronize(self, i):\n setattr(self.master, self.attr, self.items[i][1])\n\n\nclass DatePicker(QDateEdit):\n QT_DATE_FORMAT = 'yyyy-MM-dd'\n PY_DATE_FORMAT = '%Y-%m-%d'\n\n def __init__(self, widget, master, attribute, label, margin=(0, 0, 0, 0),\n display_format=QT_DATE_FORMAT, min_date=None, max_date=None, calendar_popup=True):\n super().__init__()\n self.master = master\n self.attribute = attribute\n\n hb = hBox(widget)\n hb.layout().setContentsMargins(*margin)\n hb.layout().addWidget(QLabel(label))\n hb.layout().addWidget(self)\n\n self.setCalendarPopup(calendar_popup)\n self.setDisplayFormat(display_format)\n self.setDate(self.to_qdate(getattr(master, attribute)))\n if min_date:\n self.setMinimumDate(self.to_qdate(min_date))\n if max_date:\n self.setMaximumDate(self.to_qdate(max_date))\n self.dateChanged.connect(self.synchronize)\n\n @classmethod\n def to_qdate(cls, date):\n return QDate.fromString(date.strftime(cls.PY_DATE_FORMAT),\n cls.QT_DATE_FORMAT)\n\n def synchronize(self):\n setattr(self.master, self.attribute, self.date().toPyDate())\n\n\nclass DatePickerInterval(QWidget):\n def __init__(self, widget, master, attribute_from, attribute_to, min_date=None, max_date=None,\n label_from='From:', label_to='To:', margin=(0, 0, 0, 0)):\n super().__init__()\n self.setParent(widget)\n\n hb = hBox(widget)\n self.picker_from = DatePicker(hb, master, attribute_from, label_from,\n min_date=min_date, max_date=max_date, margin=margin)\n self.picker_to = DatePicker(hb, master, attribute_to, label_to,\n min_date=min_date, max_date=max_date, margin=margin)\n self.picker_from.dateChanged.connect(self.synchronize)\n self.picker_to.dateChanged.connect(self.synchronize)\n self.synchronize()\n\n def synchronize(self):\n self.picker_from.setMaximumDate(self.picker_to.date())\n self.picker_to.setMinimumDate(self.picker_from.date())\n\n\nclass FileWidget(QWidget):\n on_open = pyqtSignal(str)\n\n def __init__(self, dialog_title='', dialog_format='',\n start_dir=os.path.expanduser('~/'),\n icon_size=(12, 20), minimal_width=200,\n browse_label='Browse', on_open=None,\n reload_button=True, reload_label='Reload',\n recent_files=None, directory_aliases=None,\n allow_empty=True, empty_file_label='(none)'):\n \"\"\" Creates a widget with a button for file loading and\n an optional combo box for recent files and reload buttons.\n\n Args:\n dialog_title (str): The title of the dialog.\n dialog_format (str): Formats for the dialog.\n start_dir (str): A directory to start from.\n icon_size (int, int): The size of buttons' icons.\n on_open (callable): A callback function that accepts filepath as the only argument.\n reload_button (bool): Whether to show reload button.\n reload_label (str): The text displayed on the reload button.\n recent_files (List[str]): List of recent files.\n directory_aliases (dict): An {alias: dir} dictionary for fast directories' access.\n allow_empty (bool): Whether empty path is allowed.\n \"\"\"\n super().__init__()\n self.dialog_title = dialog_title\n self.dialog_format = dialog_format\n self.start_dir = start_dir\n\n self.recent_files = recent_files\n self.directory_aliases = directory_aliases or {}\n self.check_existence()\n\n self.on_open.connect(on_open)\n self.allow_empty = allow_empty\n self.empty_file_label = empty_file_label\n\n layout = QHBoxLayout(self)\n layout.setContentsMargins(0, 0, 0, 0)\n\n if recent_files is not None:\n self.file_combo = QComboBox()\n self.file_combo.setMinimumWidth(minimal_width)\n self.file_combo.activated[int].connect(self.select)\n self.update_combo()\n layout.addWidget(self.file_combo)\n\n self.browse_button = QPushButton(browse_label)\n self.browse_button.setFocusPolicy(Qt.NoFocus)\n self.browse_button.clicked.connect(self.browse)\n self.browse_button.setIcon(self.style()\n .standardIcon(QStyle.SP_DirOpenIcon))\n self.browse_button.setIconSize(QSize(*icon_size))\n self.browse_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n layout.addWidget(self.browse_button)\n\n if reload_button:\n self.reload_button = QPushButton(reload_label)\n self.reload_button.setFocusPolicy(Qt.NoFocus)\n self.reload_button.clicked.connect(self.reload)\n self.reload_button.setIcon(self.style()\n .standardIcon(QStyle.SP_BrowserReload))\n self.reload_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n self.reload_button.setIconSize(QSize(*icon_size))\n layout.addWidget(self.reload_button)\n\n def browse(self, start_dir=None):\n start_dir = start_dir or self.start_dir\n path, _ = QFileDialog().getOpenFileName(self, self.dialog_title,\n start_dir, self.dialog_format)\n\n if path and self.recent_files is not None:\n if path in self.recent_files:\n self.recent_files.remove(path)\n self.recent_files.insert(0, path)\n self.update_combo()\n\n self.open_file(path)\n\n def select(self, n):\n name = self.file_combo.currentText()\n if n < len(self.recent_files):\n name = self.recent_files[n]\n del self.recent_files[n]\n self.recent_files.insert(0, name)\n self.open_file(self.recent_files[0])\n self.update_combo()\n elif name == self.empty_file_label:\n self.open_file(self.empty_file_label)\n elif name in self.directory_aliases:\n self.browse(self.directory_aliases[name])\n\n def update_combo(self):\n if self.recent_files is not None:\n self.file_combo.clear()\n for file in self.recent_files:\n self.file_combo.addItem(os.path.split(file)[1])\n\n if self.allow_empty or not self.recent_files:\n self.file_combo.addItem(self.empty_file_label)\n\n for alias in self.directory_aliases.keys():\n self.file_combo.addItem(alias)\n\n def reload(self):\n if self.recent_files:\n self.select(0)\n\n def check_existence(self):\n if self.recent_files:\n to_remove = [\n file for file in self.recent_files if not os.path.exists(file)\n ]\n for file in to_remove:\n self.recent_files.remove(file)\n\n def open_file(self, path):\n try:\n self.on_open.emit(path if path != self.empty_file_label else '')\n except (OSError, IOError):\n self.loading_error_signal.emit('Could not open \"{}\".'\n .format(path))\n\n\nclass ValidatedLineEdit(QLineEdit):\n invalid_input_signal = pyqtSignal(str)\n\n def __init__(self, master, attr, validator, *args):\n super().__init__(*args)\n self.master = master\n self.attr = attr\n self.validator = validator\n\n self.setText(getattr(master, attr))\n self.on_change()\n self.textChanged.connect(self.on_change)\n\n def on_change(self):\n if self.validator(self.text()):\n self.setStyleSheet(\"QLineEdit { border : 1px solid gray;}\")\n self.synchronize()\n else:\n self.setStyleSheet(\"QLineEdit { border : 2px solid red;}\")\n self.invalid_input_signal.emit(\"Invalid '{}' value.\".format(self.attr))\n\n def synchronize(self):\n setattr(self.master, self.attr, self.text())\n\n\nclass AbsoluteRelativeSpinBox(QWidget):\n editingFinished = pyqtSignal()\n valueChanged = pyqtSignal()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args)\n layout = QStackedLayout(self)\n\n self.double_spin = QDoubleSpinBox()\n self.double_spin.valueChanged.connect(self.double_value_changed)\n self.double_spin.editingFinished.connect(self.double_editing_finished)\n layout.addWidget(self.double_spin)\n\n self.int_spin = QSpinBox()\n self.int_spin.setMaximum(10 ** 4)\n self.int_spin.valueChanged.connect(self.int_value_changed)\n self.int_spin.editingFinished.connect(self.int_editing_finished)\n layout.addWidget(self.int_spin)\n\n self.setValue(kwargs.get('value', 0.))\n\n def double_value_changed(self):\n if self.double_spin.value() > 1:\n self.layout().setCurrentIndex(1)\n self.int_spin.setValue(self.double_spin.value())\n\n self.valueChanged.emit()\n\n def double_editing_finished(self):\n if self.double_spin.value() <= 1.:\n self.editingFinished.emit()\n\n def int_value_changed(self):\n if self.int_spin.value() == 0:\n self.layout().setCurrentIndex(0)\n self.double_spin.setValue(1. - self.double_spin.singleStep())\n # There is no need to emit valueChanged signal.\n\n def int_editing_finished(self):\n if self.int_spin.value() > 0:\n self.editingFinished.emit()\n\n def value(self):\n return self.int_spin.value() or self.double_spin.value()\n\n def setValue(self, value):\n if isinstance(value, int):\n self.layout().setCurrentIndex(1)\n self.int_spin.setValue(value)\n else:\n self.layout().setCurrentIndex(0)\n self.double_spin.setValue(value)\n\n def setSingleStep(self, step):\n if isinstance(step, float):\n self.double_spin.setSingleStep(step)\n else:\n self.int_spin.setSingleStep(step)\n\n\nclass RangeWidget(QWidget):\n valueChanged = pyqtSignal()\n editingFinished = pyqtSignal()\n\n def __init__(self, widget, master, attribute, minimum=0., maximum=1., step=.05,\n min_label=None, max_label=None, allow_absolute=False, dtype=float,\n callback=None, *args):\n super().__init__(*args)\n if widget:\n widget.layout().addWidget(self)\n self.allow_absolute_values = allow_absolute\n self.master = master\n self.attribute = attribute\n self.min = minimum\n self.max = maximum\n self.step = step\n\n self.min_label = min_label\n self.max_label = max_label\n a, b = self.master_value()\n layout = QHBoxLayout(self)\n layout.setContentsMargins(0, 0, 0, 0)\n\n if self.allow_absolute_values:\n SpinBox = AbsoluteRelativeSpinBox\n else:\n if dtype == float:\n SpinBox = QDoubleSpinBox\n else:\n SpinBox = QSpinBox\n\n if self.min_label:\n layout.addWidget(QLabel(self.min_label))\n\n self.min_spin = SpinBox(value=a)\n self.min_spin.setSingleStep(self.step)\n layout.addWidget(self.min_spin)\n\n if self.max_label:\n layout.addWidget(QLabel(self.max_label))\n\n self.max_spin = SpinBox(value=b)\n self.max_spin.setSingleStep(self.step)\n layout.addWidget(self.max_spin)\n\n self.set_range()\n self.min_spin.valueChanged.connect(self.synchronize)\n self.min_spin.editingFinished.connect(self.editingFinished)\n self.max_spin.valueChanged.connect(self.synchronize)\n self.max_spin.editingFinished.connect(self.editingFinished)\n if callback:\n self.valueChanged.connect(callback)\n\n def synchronize(self):\n a, b = self.value()\n if isinstance(self.attribute, str):\n setattr(self.master, self.attribute, (a, b))\n else:\n setattr(self.master, self.attribute[0], a)\n setattr(self.master, self.attribute[1], b)\n self.set_range()\n self.valueChanged.emit()\n\n def master_value(self):\n if isinstance(self.attribute, str):\n return getattr(self.master, self.attribute)\n return (getattr(self.master, self.attribute[0]),\n getattr(self.master, self.attribute[1]))\n\n def value(self):\n return self.min_spin.value(), self.max_spin.value()\n\n def set_range(self):\n if not self.allow_absolute_values:\n a, b = self.value()\n self.min_spin.setRange(self.min, b)\n self.max_spin.setRange(a, self.max)\n\n\nclass ResourceLoader(QWidget, OWComponent):\n valueChanged = pyqtSignal(str, str)\n\n recent_files = settings.Setting([])\n resource_path = settings.Setting('')\n\n def __init__(self, widget, model_format, provider_format,\n model_button_label='Model', provider_button_label='Provider'):\n QWidget.__init__(self)\n OWComponent.__init__(self, widget)\n\n self.model_path = None\n layout = QHBoxLayout(self, spacing=0)\n layout.setContentsMargins(0, 0, 0, 0)\n\n self.model_widget = FileWidget(recent_files=self.recent_files, dialog_title='Load model',\n dialog_format=model_format, start_dir=None,\n on_open=self.load_model, allow_empty=False,\n reload_button=False, browse_label=model_button_label)\n self.model_path = self.recent_files[0] if self.recent_files else None\n\n layout.addWidget(self.model_widget)\n\n self.provider_widget = FileWidget(recent_files=None, dialog_title='Load provider',\n dialog_format=provider_format, start_dir=None,\n on_open=self.load_provider, allow_empty=False,\n reload_button=False, browse_label=provider_button_label)\n layout.addWidget(self.provider_widget)\n\n def load_model(self, path_to_file):\n self.model_path = path_to_file\n self.valueChanged.emit(self.model_path, self.resource_path)\n\n def load_provider(self, path_to_file):\n self.resource_path = path_to_file\n self.valueChanged.emit(self.model_path, self.resource_path)\n\n", "path": "orangecontrib/text/widgets/utils/widgets.py" } ]
[ { "content": "import os\n\nfrom AnyQt.QtWidgets import (QComboBox, QWidget, QHBoxLayout,\n QSizePolicy, QLineEdit, QDoubleSpinBox,\n QSpinBox, QTextEdit, QDateEdit, QGroupBox,\n QPushButton, QStyle, QFileDialog, QLabel,\n QGridLayout, QCheckBox, QStackedLayout)\nfrom AnyQt.QtGui import QColor\nfrom AnyQt.QtCore import QDate, pyqtSignal, Qt, QSize\n\nfrom Orange.widgets.gui import OWComponent, hBox\nfrom Orange.widgets import settings\n\n\nclass ListEdit(QTextEdit):\n PLACEHOLDER_COLOR = QColor(128, 128, 128)\n USER_TEXT_COLOR = QColor(0, 0, 0)\n\n def __init__(self, master=None, attr=None, placeholder_text=None,\n fixed_height=None, *args):\n super().__init__(*args)\n self.master = master\n self.attr = attr\n self.placeholder_text = placeholder_text\n\n if self.master and self.attr:\n self.setText('\\n'.join(getattr(self.master, self.attr, [])))\n\n self.set_placeholder()\n self.textChanged.connect(self.synchronize)\n\n if fixed_height:\n self.setFixedHeight(fixed_height)\n\n def set_placeholder(self):\n \"\"\" Set placeholder if there is no user input. \"\"\"\n if self.toPlainText() == '':\n self.setFontItalic(True)\n self.setTextColor(self.PLACEHOLDER_COLOR)\n self.setText(self.placeholder_text)\n\n def toPlainText(self):\n \"\"\" Return only text input from user. \"\"\"\n text = super().toPlainText()\n if self.placeholder_text is not None and text == self.placeholder_text:\n text = ''\n return text\n\n def focusInEvent(self, event):\n super().focusInEvent(event)\n if self.toPlainText() == '':\n self.clear()\n self.setFontItalic(False)\n self.setTextColor(self.USER_TEXT_COLOR)\n\n def focusOutEvent(self, event):\n self.set_placeholder()\n QTextEdit.focusOutEvent(self, event)\n\n def synchronize(self):\n if self.master and self.attr:\n setattr(self.master, self.attr, self.value())\n\n def value(self):\n return self.text.split('\\n') if self.text else []\n\n @property\n def text(self):\n return self.toPlainText().strip()\n\n\nclass QueryBox(QComboBox):\n def __init__(self, widget, master, history, callback, min_width=150):\n super().__init__()\n self.master = master\n self.history = history\n self.callback = callback\n\n self.setMinimumWidth(min_width)\n self.setEditable(True)\n self.activated[int].connect(self.synchronize) # triggered for enter and drop-down\n widget.layout().addWidget(self)\n self.refresh()\n\n def synchronize(self, n=None, silent=False):\n if n is not None and n < len(self.history): # selecting from drop-down\n name = self.history[n]\n del self.history[n]\n self.history.insert(0, name)\n else: # enter pressed\n query = self.currentText()\n if query != '':\n if query in self.history:\n self.history.remove(query)\n self.history.insert(0, self.currentText())\n\n self.refresh()\n\n if callable(self.callback) and not silent:\n self.callback()\n\n def refresh(self):\n self.clear()\n for query in self.history:\n self.addItem(query)\n\n\nclass CheckListLayout(QGroupBox):\n def __init__(self, title, master, attr, items, cols=1, callback=None):\n super().__init__(title=title)\n self.master = master\n self.attr = attr\n self.items = items\n self.callback = callback\n\n self.current_values = getattr(self.master, self.attr)\n\n layout = QGridLayout()\n self.setLayout(layout)\n\n nrows = len(items) // cols + bool(len(items) % cols)\n\n self.boxes = []\n for i, value in enumerate(self.items):\n box = QCheckBox(value)\n box.setChecked(value in self.current_values)\n box.stateChanged.connect(self.synchronize)\n self.boxes.append(box)\n layout.addWidget(box, i % nrows, i // nrows)\n\n def synchronize(self):\n values = []\n for item, check_box in zip(self.items, self.boxes):\n if check_box.isChecked():\n values.append(item)\n\n setattr(self.master, self.attr, values)\n\n if self.callback:\n self.callback()\n\n\nclass ComboBox(QComboBox):\n def __init__(self, master, attr, items):\n super().__init__()\n self.attr = attr\n self.master = master\n\n if not isinstance(items[0], tuple):\n self.items = [(str(item), item) for item in items]\n else:\n self.items = items\n\n for i, (key, value) in enumerate(self.items):\n self.addItem(key)\n if value == getattr(master, attr, None):\n self.setCurrentIndex(i)\n\n self.currentIndexChanged[int].connect(self.synchronize)\n\n def synchronize(self, i):\n setattr(self.master, self.attr, self.items[i][1])\n\n\nclass DatePicker(QDateEdit):\n QT_DATE_FORMAT = 'yyyy-MM-dd'\n PY_DATE_FORMAT = '%Y-%m-%d'\n\n def __init__(self, widget, master, attribute, label, margin=(0, 0, 0, 0),\n display_format=QT_DATE_FORMAT, min_date=None, max_date=None, calendar_popup=True):\n super().__init__()\n self.master = master\n self.attribute = attribute\n\n hb = hBox(widget)\n hb.layout().setContentsMargins(*margin)\n hb.layout().addWidget(QLabel(label))\n hb.layout().addWidget(self)\n\n self.setCalendarPopup(calendar_popup)\n self.setDisplayFormat(display_format)\n self.setDate(self.to_qdate(getattr(master, attribute)))\n if min_date:\n self.setMinimumDate(self.to_qdate(min_date))\n if max_date:\n self.setMaximumDate(self.to_qdate(max_date))\n self.dateChanged.connect(self.synchronize)\n\n @classmethod\n def to_qdate(cls, date):\n return QDate.fromString(date.strftime(cls.PY_DATE_FORMAT),\n cls.QT_DATE_FORMAT)\n\n def synchronize(self):\n setattr(self.master, self.attribute, self.date().toPyDate())\n\n\nclass DatePickerInterval(QWidget):\n def __init__(self, widget, master, attribute_from, attribute_to, min_date=None, max_date=None,\n label_from='From:', label_to='To:', margin=(0, 0, 0, 0)):\n super().__init__()\n self.setParent(widget)\n\n hb = hBox(widget)\n self.picker_from = DatePicker(hb, master, attribute_from, label_from,\n min_date=min_date, max_date=max_date, margin=margin)\n self.picker_to = DatePicker(hb, master, attribute_to, label_to,\n min_date=min_date, max_date=max_date, margin=margin)\n self.picker_from.dateChanged.connect(self.synchronize)\n self.picker_to.dateChanged.connect(self.synchronize)\n self.synchronize()\n\n def synchronize(self):\n self.picker_from.setMaximumDate(self.picker_to.date())\n self.picker_to.setMinimumDate(self.picker_from.date())\n\n\nclass FileWidget(QWidget):\n on_open = pyqtSignal(str)\n\n def __init__(self, dialog_title='', dialog_format='',\n start_dir=os.path.expanduser('~/'),\n icon_size=(12, 20), minimal_width=200,\n browse_label='Browse', on_open=None,\n reload_button=True, reload_label='Reload',\n recent_files=None, directory_aliases=None,\n allow_empty=True, empty_file_label='(none)'):\n \"\"\" Creates a widget with a button for file loading and\n an optional combo box for recent files and reload buttons.\n\n Args:\n dialog_title (str): The title of the dialog.\n dialog_format (str): Formats for the dialog.\n start_dir (str): A directory to start from.\n icon_size (int, int): The size of buttons' icons.\n on_open (callable): A callback function that accepts filepath as the only argument.\n reload_button (bool): Whether to show reload button.\n reload_label (str): The text displayed on the reload button.\n recent_files (List[str]): List of recent files.\n directory_aliases (dict): An {alias: dir} dictionary for fast directories' access.\n allow_empty (bool): Whether empty path is allowed.\n \"\"\"\n super().__init__()\n self.dialog_title = dialog_title\n self.dialog_format = dialog_format\n self.start_dir = start_dir\n\n self.recent_files = recent_files\n self.directory_aliases = directory_aliases or {}\n self.check_existence()\n\n self.on_open.connect(on_open)\n self.allow_empty = allow_empty\n self.empty_file_label = empty_file_label\n\n layout = QHBoxLayout(self)\n layout.setContentsMargins(0, 0, 0, 0)\n\n if recent_files is not None:\n self.file_combo = QComboBox()\n self.file_combo.setMinimumWidth(minimal_width)\n self.file_combo.activated[int].connect(self.select)\n self.update_combo()\n layout.addWidget(self.file_combo)\n\n self.browse_button = QPushButton(browse_label)\n self.browse_button.setFocusPolicy(Qt.NoFocus)\n self.browse_button.clicked.connect(self.browse)\n self.browse_button.setIcon(self.style()\n .standardIcon(QStyle.SP_DirOpenIcon))\n self.browse_button.setIconSize(QSize(*icon_size))\n self.browse_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n layout.addWidget(self.browse_button)\n\n if reload_button:\n self.reload_button = QPushButton(reload_label)\n self.reload_button.setFocusPolicy(Qt.NoFocus)\n self.reload_button.clicked.connect(self.reload)\n self.reload_button.setIcon(self.style()\n .standardIcon(QStyle.SP_BrowserReload))\n self.reload_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n self.reload_button.setIconSize(QSize(*icon_size))\n layout.addWidget(self.reload_button)\n\n def browse(self, start_dir=None):\n start_dir = start_dir or self.start_dir\n path, _ = QFileDialog().getOpenFileName(self, self.dialog_title,\n start_dir, self.dialog_format)\n\n if path and self.recent_files is not None:\n if path in self.recent_files:\n self.recent_files.remove(path)\n self.recent_files.insert(0, path)\n self.update_combo()\n\n if path:\n self.open_file(path)\n\n def select(self, n):\n name = self.file_combo.currentText()\n if n < len(self.recent_files):\n name = self.recent_files[n]\n del self.recent_files[n]\n self.recent_files.insert(0, name)\n self.open_file(self.recent_files[0])\n self.update_combo()\n elif name == self.empty_file_label:\n self.open_file(self.empty_file_label)\n elif name in self.directory_aliases:\n self.browse(self.directory_aliases[name])\n\n def update_combo(self):\n if self.recent_files is not None:\n self.file_combo.clear()\n for file in self.recent_files:\n self.file_combo.addItem(os.path.split(file)[1])\n\n if self.allow_empty or not self.recent_files:\n self.file_combo.addItem(self.empty_file_label)\n\n for alias in self.directory_aliases.keys():\n self.file_combo.addItem(alias)\n\n def reload(self):\n if self.recent_files:\n self.select(0)\n\n def check_existence(self):\n if self.recent_files:\n to_remove = [\n file for file in self.recent_files if not os.path.exists(file)\n ]\n for file in to_remove:\n self.recent_files.remove(file)\n\n def open_file(self, path):\n try:\n self.on_open.emit(path if path != self.empty_file_label else '')\n except (OSError, IOError):\n self.loading_error_signal.emit('Could not open \"{}\".'\n .format(path))\n\n\nclass ValidatedLineEdit(QLineEdit):\n invalid_input_signal = pyqtSignal(str)\n\n def __init__(self, master, attr, validator, *args):\n super().__init__(*args)\n self.master = master\n self.attr = attr\n self.validator = validator\n\n self.setText(getattr(master, attr))\n self.on_change()\n self.textChanged.connect(self.on_change)\n\n def on_change(self):\n if self.validator(self.text()):\n self.setStyleSheet(\"QLineEdit { border : 1px solid gray;}\")\n self.synchronize()\n else:\n self.setStyleSheet(\"QLineEdit { border : 2px solid red;}\")\n self.invalid_input_signal.emit(\"Invalid '{}' value.\".format(self.attr))\n\n def synchronize(self):\n setattr(self.master, self.attr, self.text())\n\n\nclass AbsoluteRelativeSpinBox(QWidget):\n editingFinished = pyqtSignal()\n valueChanged = pyqtSignal()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args)\n layout = QStackedLayout(self)\n\n self.double_spin = QDoubleSpinBox()\n self.double_spin.valueChanged.connect(self.double_value_changed)\n self.double_spin.editingFinished.connect(self.double_editing_finished)\n layout.addWidget(self.double_spin)\n\n self.int_spin = QSpinBox()\n self.int_spin.setMaximum(10 ** 4)\n self.int_spin.valueChanged.connect(self.int_value_changed)\n self.int_spin.editingFinished.connect(self.int_editing_finished)\n layout.addWidget(self.int_spin)\n\n self.setValue(kwargs.get('value', 0.))\n\n def double_value_changed(self):\n if self.double_spin.value() > 1:\n self.layout().setCurrentIndex(1)\n self.int_spin.setValue(self.double_spin.value())\n\n self.valueChanged.emit()\n\n def double_editing_finished(self):\n if self.double_spin.value() <= 1.:\n self.editingFinished.emit()\n\n def int_value_changed(self):\n if self.int_spin.value() == 0:\n self.layout().setCurrentIndex(0)\n self.double_spin.setValue(1. - self.double_spin.singleStep())\n # There is no need to emit valueChanged signal.\n\n def int_editing_finished(self):\n if self.int_spin.value() > 0:\n self.editingFinished.emit()\n\n def value(self):\n return self.int_spin.value() or self.double_spin.value()\n\n def setValue(self, value):\n if isinstance(value, int):\n self.layout().setCurrentIndex(1)\n self.int_spin.setValue(value)\n else:\n self.layout().setCurrentIndex(0)\n self.double_spin.setValue(value)\n\n def setSingleStep(self, step):\n if isinstance(step, float):\n self.double_spin.setSingleStep(step)\n else:\n self.int_spin.setSingleStep(step)\n\n\nclass RangeWidget(QWidget):\n valueChanged = pyqtSignal()\n editingFinished = pyqtSignal()\n\n def __init__(self, widget, master, attribute, minimum=0., maximum=1., step=.05,\n min_label=None, max_label=None, allow_absolute=False, dtype=float,\n callback=None, *args):\n super().__init__(*args)\n if widget:\n widget.layout().addWidget(self)\n self.allow_absolute_values = allow_absolute\n self.master = master\n self.attribute = attribute\n self.min = minimum\n self.max = maximum\n self.step = step\n\n self.min_label = min_label\n self.max_label = max_label\n a, b = self.master_value()\n layout = QHBoxLayout(self)\n layout.setContentsMargins(0, 0, 0, 0)\n\n if self.allow_absolute_values:\n SpinBox = AbsoluteRelativeSpinBox\n else:\n if dtype == float:\n SpinBox = QDoubleSpinBox\n else:\n SpinBox = QSpinBox\n\n if self.min_label:\n layout.addWidget(QLabel(self.min_label))\n\n self.min_spin = SpinBox(value=a)\n self.min_spin.setSingleStep(self.step)\n layout.addWidget(self.min_spin)\n\n if self.max_label:\n layout.addWidget(QLabel(self.max_label))\n\n self.max_spin = SpinBox(value=b)\n self.max_spin.setSingleStep(self.step)\n layout.addWidget(self.max_spin)\n\n self.set_range()\n self.min_spin.valueChanged.connect(self.synchronize)\n self.min_spin.editingFinished.connect(self.editingFinished)\n self.max_spin.valueChanged.connect(self.synchronize)\n self.max_spin.editingFinished.connect(self.editingFinished)\n if callback:\n self.valueChanged.connect(callback)\n\n def synchronize(self):\n a, b = self.value()\n if isinstance(self.attribute, str):\n setattr(self.master, self.attribute, (a, b))\n else:\n setattr(self.master, self.attribute[0], a)\n setattr(self.master, self.attribute[1], b)\n self.set_range()\n self.valueChanged.emit()\n\n def master_value(self):\n if isinstance(self.attribute, str):\n return getattr(self.master, self.attribute)\n return (getattr(self.master, self.attribute[0]),\n getattr(self.master, self.attribute[1]))\n\n def value(self):\n return self.min_spin.value(), self.max_spin.value()\n\n def set_range(self):\n if not self.allow_absolute_values:\n a, b = self.value()\n self.min_spin.setRange(self.min, b)\n self.max_spin.setRange(a, self.max)\n\n\nclass ResourceLoader(QWidget, OWComponent):\n valueChanged = pyqtSignal(str, str)\n\n recent_files = settings.Setting([])\n resource_path = settings.Setting('')\n\n def __init__(self, widget, model_format, provider_format,\n model_button_label='Model', provider_button_label='Provider'):\n QWidget.__init__(self)\n OWComponent.__init__(self, widget)\n\n self.model_path = None\n layout = QHBoxLayout(self, spacing=0)\n layout.setContentsMargins(0, 0, 0, 0)\n\n self.model_widget = FileWidget(recent_files=self.recent_files, dialog_title='Load model',\n dialog_format=model_format, start_dir=None,\n on_open=self.load_model, allow_empty=False,\n reload_button=False, browse_label=model_button_label)\n self.model_path = self.recent_files[0] if self.recent_files else None\n\n layout.addWidget(self.model_widget)\n\n self.provider_widget = FileWidget(recent_files=None, dialog_title='Load provider',\n dialog_format=provider_format, start_dir=None,\n on_open=self.load_provider, allow_empty=False,\n reload_button=False, browse_label=provider_button_label)\n layout.addWidget(self.provider_widget)\n\n def load_model(self, path_to_file):\n self.model_path = path_to_file\n self.valueChanged.emit(self.model_path, self.resource_path)\n\n def load_provider(self, path_to_file):\n self.resource_path = path_to_file\n self.valueChanged.emit(self.model_path, self.resource_path)\n\n", "path": "orangecontrib/text/widgets/utils/widgets.py" } ]
diff --git a/orangecontrib/text/widgets/utils/widgets.py b/orangecontrib/text/widgets/utils/widgets.py index 6c27435a2..4528c9421 100644 --- a/orangecontrib/text/widgets/utils/widgets.py +++ b/orangecontrib/text/widgets/utils/widgets.py @@ -293,7 +293,8 @@ def browse(self, start_dir=None): self.recent_files.insert(0, path) self.update_combo() - self.open_file(path) + if path: + self.open_file(path) def select(self, n): name = self.file_combo.currentText()
zigpy__zha-device-handlers-891
[Device Support Request] Lidl _TZ3000_oh7jddmx TS0502A **Is your feature request related to a problem? Please describe.** Very similar to #808 I have a LIDL ceiling light panel, which only supports CCT but is incorrectly reported to HA. **Describe the solution you'd like** Only exposing the color temperature. **Device signature** ``` { "node_descriptor": "NodeDescriptor(byte1=1, byte2=64, mac_capability_flags=142, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=0, *allocate_address=True, *complex_descriptor_available=False, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False, *is_valid=True, *logical_type=<LogicalType.Router: 1>, *user_descriptor_available=False)", "endpoints": { "1": { "profile_id": 260, "device_type": "0x010c", "in_clusters": [ "0x0000", "0x0003", "0x0004", "0x0005", "0x0006", "0x0008", "0x0300", "0x1000" ], "out_clusters": [ "0x000a", "0x0019" ] }, "242": { "profile_id": 41440, "device_type": "0x0061", "in_clusters": [], "out_clusters": [ "0x0021" ] } }, "manufacturer": "_TZ3000_oh7jddmx", "model": "TS0502A", "class": "zigpy.device.Device" } ``` **Additional context** I'm assuming adding the signature into https://github.com/zigpy/zha-device-handlers/blob/b180e4f7ab4a096688f4d4ad9b47ac1b3efa9fe2/zhaquirks/lidl/cct.py#L40-L46 will fix this. ~~I'll test it and open a PR if I find time for it.~~ Update: Successfully tested. PR opened.
[ { "content": "\"\"\"Quirk for LIDL CCT bulb.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomCluster, CustomDevice\nfrom zigpy.zcl.clusters.general import (\n Basic,\n GreenPowerProxy,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n Scenes,\n Time,\n)\nfrom zigpy.zcl.clusters.lighting import Color\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\n\nclass LidlCCTColorCluster(CustomCluster, Color):\n \"\"\"Lidl CCT Lighting custom cluster.\"\"\"\n\n # Remove RGB color wheel for CCT Lighting: only expose color temperature\n # LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant)\n _CONSTANT_ATTRIBUTES = {0x400A: 16}\n\n\nclass CCTLight(CustomDevice):\n \"\"\"Lidl CCT Lighting device.\"\"\"\n\n signature = {\n MODELS_INFO: [\n (\"_TZ3000_49qchf10\", \"TS0502A\"),\n (\"_TZ3000_oborybow\", \"TS0502A\"),\n (\"_TZ3000_9evm3otq\", \"TS0502A\"),\n (\"_TZ3000_rylaozuc\", \"TS0502A\"),\n (\"_TZ3000_el5kt5im\", \"TS0502A\"),\n ],\n ENDPOINTS: {\n 1: {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=268\n # device_version=1\n # input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]\n # output_clusters=[10, 25]\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Color.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n # <SimpleDescriptor endpoint=242 profile=41440 device_type=97\n # device_version=0\n # input_clusters=[]\n # output_clusters=[33]\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n LidlCCTColorCluster,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n }\n }\n", "path": "zhaquirks/lidl/cct.py" } ]
[ { "content": "\"\"\"Quirk for LIDL CCT bulb.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomCluster, CustomDevice\nfrom zigpy.zcl.clusters.general import (\n Basic,\n GreenPowerProxy,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n Scenes,\n Time,\n)\nfrom zigpy.zcl.clusters.lighting import Color\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\n\nclass LidlCCTColorCluster(CustomCluster, Color):\n \"\"\"Lidl CCT Lighting custom cluster.\"\"\"\n\n # Remove RGB color wheel for CCT Lighting: only expose color temperature\n # LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant)\n _CONSTANT_ATTRIBUTES = {0x400A: 16}\n\n\nclass CCTLight(CustomDevice):\n \"\"\"Lidl CCT Lighting device.\"\"\"\n\n signature = {\n MODELS_INFO: [\n (\"_TZ3000_49qchf10\", \"TS0502A\"),\n (\"_TZ3000_oborybow\", \"TS0502A\"),\n (\"_TZ3000_9evm3otq\", \"TS0502A\"),\n (\"_TZ3000_rylaozuc\", \"TS0502A\"),\n (\"_TZ3000_el5kt5im\", \"TS0502A\"),\n (\"_TZ3000_oh7jddmx\", \"TS0502A\"),\n ],\n ENDPOINTS: {\n 1: {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=268\n # device_version=1\n # input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]\n # output_clusters=[10, 25]\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Color.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n # <SimpleDescriptor endpoint=242 profile=41440 device_type=97\n # device_version=0\n # input_clusters=[]\n # output_clusters=[33]\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n LidlCCTColorCluster,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n }\n }\n", "path": "zhaquirks/lidl/cct.py" } ]
diff --git a/zhaquirks/lidl/cct.py b/zhaquirks/lidl/cct.py index 633323a952..b45bbed83a 100644 --- a/zhaquirks/lidl/cct.py +++ b/zhaquirks/lidl/cct.py @@ -43,6 +43,7 @@ class CCTLight(CustomDevice): ("_TZ3000_9evm3otq", "TS0502A"), ("_TZ3000_rylaozuc", "TS0502A"), ("_TZ3000_el5kt5im", "TS0502A"), + ("_TZ3000_oh7jddmx", "TS0502A"), ], ENDPOINTS: { 1: {
Lightning-AI__torchmetrics-2430
Can't access metrics in a MetricCollection via keys returned in MetricCollection.keys ## 🐛 Bug print(list(self.val_metrics_macro.keys())) # prints "['val/MulticlassAccuracy/macro', 'val/MulticlassF1Score/macro', 'val/MulticlassPrecision/macro', 'val/MulticlassRecall/macro']" print( f'val/accuracy/macro: {self.val_metrics_macro["val/MulticlassAccuracy/macro"].compute():.4}' ) # throws error: KeyError: 'val/MulticlassAccuracy/macro' ### Expected behavior If a key shows up in `.keys` it should be a valid key in `__getitem__`. ### Environment - TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source): 1.3.1 - Python & PyTorch Version (e.g., 1.0): torch 2.2.0, python 3.10.12 - Any other relevant information such as OS (e.g., Linux): MacOS
[ { "content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# this is just a bypass for this module name collision with built-in one\nfrom collections import OrderedDict\nfrom copy import deepcopy\nfrom typing import Any, Dict, Hashable, Iterable, Iterator, List, Optional, Sequence, Tuple, Union\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import ModuleDict\nfrom typing_extensions import Literal\n\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities import rank_zero_warn\nfrom torchmetrics.utilities.data import _flatten_dict, allclose\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE, plot_single_or_multi_val\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"MetricCollection.plot\", \"MetricCollection.plot_all\"]\n\n\nclass MetricCollection(ModuleDict):\n \"\"\"MetricCollection class can be used to chain metrics that have the same call pattern into one single class.\n\n Args:\n metrics: One of the following\n\n * list or tuple (sequence): if metrics are passed in as a list or tuple, will use the metrics class name\n as key for output dict. Therefore, two metrics of the same class cannot be chained this way.\n\n * arguments: similar to passing in as a list, metrics passed in as arguments will use their metric\n class name as key for the output dict.\n\n * dict: if metrics are passed in as a dict, will use each key in the dict as key for output dict.\n Use this format if you want to chain together multiple of the same metric with different parameters.\n Note that the keys in the output dict will be sorted alphabetically.\n\n prefix: a string to append in front of the keys of the output dict\n\n postfix: a string to append after the keys of the output dict\n\n compute_groups:\n By default the MetricCollection will try to reduce the computations needed for the metrics in the collection\n by checking if they belong to the same **compute group**. All metrics in a compute group share the same\n metric state and are therefore only different in their compute step e.g. accuracy, precision and recall\n can all be computed from the true positives/negatives and false positives/negatives. By default,\n this argument is ``True`` which enables this feature. Set this argument to `False` for disabling\n this behaviour. Can also be set to a list of lists of metrics for setting the compute groups yourself.\n\n .. note::\n The compute groups feature can significantly speedup the calculation of metrics under the right conditions.\n First, the feature is only available when calling the ``update`` method and not when calling ``forward`` method\n due to the internal logic of ``forward`` preventing this. Secondly, since we compute groups share metric\n states by reference, calling ``.items()``, ``.values()`` etc. on the metric collection will break this\n reference and a copy of states are instead returned in this case (reference will be reestablished on the next\n call to ``update``).\n\n .. note::\n Metric collections can be nested at initialization (see last example) but the output of the collection will\n still be a single flatten dictionary combining the prefix and postfix arguments from the nested collection.\n\n Raises:\n ValueError:\n If one of the elements of ``metrics`` is not an instance of ``pl.metrics.Metric``.\n ValueError:\n If two elements in ``metrics`` have the same ``name``.\n ValueError:\n If ``metrics`` is not a ``list``, ``tuple`` or a ``dict``.\n ValueError:\n If ``metrics`` is ``dict`` and additional_metrics are passed in.\n ValueError:\n If ``prefix`` is set and it is not a string.\n ValueError:\n If ``postfix`` is set and it is not a string.\n\n Example::\n In the most basic case, the metrics can be passed in as a list or tuple. The keys of the output dict will be\n the same as the class name of the metric:\n\n >>> from torch import tensor\n >>> from pprint import pprint\n >>> from torchmetrics import MetricCollection\n >>> from torchmetrics.regression import MeanSquaredError\n >>> from torchmetrics.classification import MulticlassAccuracy, MulticlassPrecision, MulticlassRecall\n >>> target = tensor([0, 2, 0, 2, 0, 1, 0, 2])\n >>> preds = tensor([2, 1, 2, 0, 1, 2, 2, 2])\n >>> metrics = MetricCollection([MulticlassAccuracy(num_classes=3, average='micro'),\n ... MulticlassPrecision(num_classes=3, average='macro'),\n ... MulticlassRecall(num_classes=3, average='macro')])\n >>> metrics(preds, target) # doctest: +NORMALIZE_WHITESPACE\n {'MulticlassAccuracy': tensor(0.1250),\n 'MulticlassPrecision': tensor(0.0667),\n 'MulticlassRecall': tensor(0.1111)}\n\n Example::\n Alternatively, metrics can be passed in as arguments. The keys of the output dict will be the same as the\n class name of the metric:\n\n >>> metrics = MetricCollection(MulticlassAccuracy(num_classes=3, average='micro'),\n ... MulticlassPrecision(num_classes=3, average='macro'),\n ... MulticlassRecall(num_classes=3, average='macro'))\n >>> metrics(preds, target) # doctest: +NORMALIZE_WHITESPACE\n {'MulticlassAccuracy': tensor(0.1250),\n 'MulticlassPrecision': tensor(0.0667),\n 'MulticlassRecall': tensor(0.1111)}\n\n Example::\n If multiple of the same metric class (with different parameters) should be chained together, metrics can be\n passed in as a dict and the output dict will have the same keys as the input dict:\n\n >>> metrics = MetricCollection({'micro_recall': MulticlassRecall(num_classes=3, average='micro'),\n ... 'macro_recall': MulticlassRecall(num_classes=3, average='macro')})\n >>> same_metric = metrics.clone()\n >>> pprint(metrics(preds, target))\n {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)}\n >>> pprint(same_metric(preds, target))\n {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)}\n\n Example::\n Metric collections can also be nested up to a single time. The output of the collection will still be a single\n dict with the prefix and postfix arguments from the nested collection:\n\n >>> metrics = MetricCollection([\n ... MetricCollection([\n ... MulticlassAccuracy(num_classes=3, average='macro'),\n ... MulticlassPrecision(num_classes=3, average='macro')\n ... ], postfix='_macro'),\n ... MetricCollection([\n ... MulticlassAccuracy(num_classes=3, average='micro'),\n ... MulticlassPrecision(num_classes=3, average='micro')\n ... ], postfix='_micro'),\n ... ], prefix='valmetrics/')\n >>> pprint(metrics(preds, target)) # doctest: +NORMALIZE_WHITESPACE\n {'valmetrics/MulticlassAccuracy_macro': tensor(0.1111),\n 'valmetrics/MulticlassAccuracy_micro': tensor(0.1250),\n 'valmetrics/MulticlassPrecision_macro': tensor(0.0667),\n 'valmetrics/MulticlassPrecision_micro': tensor(0.1250)}\n\n Example::\n The `compute_groups` argument allow you to specify which metrics should share metric state. By default, this\n will automatically be derived but can also be set manually.\n\n >>> metrics = MetricCollection(\n ... MulticlassRecall(num_classes=3, average='macro'),\n ... MulticlassPrecision(num_classes=3, average='macro'),\n ... MeanSquaredError(),\n ... compute_groups=[['MulticlassRecall', 'MulticlassPrecision'], ['MeanSquaredError']]\n ... )\n >>> metrics.update(preds, target)\n >>> pprint(metrics.compute())\n {'MeanSquaredError': tensor(2.3750), 'MulticlassPrecision': tensor(0.0667), 'MulticlassRecall': tensor(0.1111)}\n >>> pprint(metrics.compute_groups)\n {0: ['MulticlassRecall', 'MulticlassPrecision'], 1: ['MeanSquaredError']}\n\n \"\"\"\n\n _modules: Dict[str, Metric] # type: ignore[assignment]\n _groups: Dict[int, List[str]]\n\n def __init__(\n self,\n metrics: Union[Metric, Sequence[Metric], Dict[str, Metric]],\n *additional_metrics: Metric,\n prefix: Optional[str] = None,\n postfix: Optional[str] = None,\n compute_groups: Union[bool, List[List[str]]] = True,\n ) -> None:\n super().__init__()\n\n self.prefix = self._check_arg(prefix, \"prefix\")\n self.postfix = self._check_arg(postfix, \"postfix\")\n self._enable_compute_groups = compute_groups\n self._groups_checked: bool = False\n self._state_is_copy: bool = False\n\n self.add_metrics(metrics, *additional_metrics)\n\n @torch.jit.unused\n def forward(self, *args: Any, **kwargs: Any) -> Dict[str, Any]:\n \"\"\"Call forward for each metric sequentially.\n\n Positional arguments (args) will be passed to every metric in the collection, while keyword arguments (kwargs)\n will be filtered based on the signature of the individual metric.\n\n \"\"\"\n return self._compute_and_reduce(\"forward\", *args, **kwargs)\n\n def update(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Call update for each metric sequentially.\n\n Positional arguments (args) will be passed to every metric in the collection, while keyword arguments (kwargs)\n will be filtered based on the signature of the individual metric.\n\n \"\"\"\n # Use compute groups if already initialized and checked\n if self._groups_checked:\n for cg in self._groups.values():\n # only update the first member\n m0 = getattr(self, cg[0])\n m0.update(*args, **m0._filter_kwargs(**kwargs))\n if self._state_is_copy:\n # If we have deep copied state in between updates, reestablish link\n self._compute_groups_create_state_ref()\n self._state_is_copy = False\n else: # the first update always do per metric to form compute groups\n for m in self.values(copy_state=False):\n m_kwargs = m._filter_kwargs(**kwargs)\n m.update(*args, **m_kwargs)\n\n if self._enable_compute_groups:\n self._merge_compute_groups()\n # create reference between states\n self._compute_groups_create_state_ref()\n self._groups_checked = True\n\n def _merge_compute_groups(self) -> None:\n \"\"\"Iterate over the collection of metrics, checking if the state of each metric matches another.\n\n If so, their compute groups will be merged into one. The complexity of the method is approximately\n ``O(number_of_metrics_in_collection ** 2)``, as all metrics need to be compared to all other metrics.\n\n \"\"\"\n num_groups = len(self._groups)\n while True:\n for cg_idx1, cg_members1 in deepcopy(self._groups).items():\n for cg_idx2, cg_members2 in deepcopy(self._groups).items():\n if cg_idx1 == cg_idx2:\n continue\n\n metric1 = getattr(self, cg_members1[0])\n metric2 = getattr(self, cg_members2[0])\n\n if self._equal_metric_states(metric1, metric2):\n self._groups[cg_idx1].extend(self._groups.pop(cg_idx2))\n break\n\n # Start over if we merged groups\n if len(self._groups) != num_groups:\n break\n\n # Stop when we iterate over everything and do not merge any groups\n if len(self._groups) == num_groups:\n break\n num_groups = len(self._groups)\n\n # Re-index groups\n temp = deepcopy(self._groups)\n self._groups = {}\n for idx, values in enumerate(temp.values()):\n self._groups[idx] = values\n\n @staticmethod\n def _equal_metric_states(metric1: Metric, metric2: Metric) -> bool:\n \"\"\"Check if the metric state of two metrics are the same.\"\"\"\n # empty state\n if len(metric1._defaults) == 0 or len(metric2._defaults) == 0:\n return False\n\n if metric1._defaults.keys() != metric2._defaults.keys():\n return False\n\n for key in metric1._defaults:\n state1 = getattr(metric1, key)\n state2 = getattr(metric2, key)\n\n if type(state1) != type(state2):\n return False\n\n if isinstance(state1, Tensor) and isinstance(state2, Tensor):\n return state1.shape == state2.shape and allclose(state1, state2)\n\n if isinstance(state1, list) and isinstance(state2, list):\n return all(s1.shape == s2.shape and allclose(s1, s2) for s1, s2 in zip(state1, state2))\n\n return True\n\n def _compute_groups_create_state_ref(self, copy: bool = False) -> None:\n \"\"\"Create reference between metrics in the same compute group.\n\n Args:\n copy: If `True` the metric state will between members will be copied instead\n of just passed by reference\n\n \"\"\"\n if not self._state_is_copy:\n for cg in self._groups.values():\n m0 = getattr(self, cg[0])\n for i in range(1, len(cg)):\n mi = getattr(self, cg[i])\n for state in m0._defaults:\n m0_state = getattr(m0, state)\n # Determine if we just should set a reference or a full copy\n setattr(mi, state, deepcopy(m0_state) if copy else m0_state)\n mi._update_count = deepcopy(m0._update_count) if copy else m0._update_count\n mi._computed = deepcopy(m0._computed) if copy else m0._computed\n self._state_is_copy = copy\n\n def compute(self) -> Dict[str, Any]:\n \"\"\"Compute the result for each metric in the collection.\"\"\"\n return self._compute_and_reduce(\"compute\")\n\n def _compute_and_reduce(\n self, method_name: Literal[\"compute\", \"forward\"], *args: Any, **kwargs: Any\n ) -> Dict[str, Any]:\n \"\"\"Compute result from collection and reduce into a single dictionary.\n\n Args:\n method_name: The method to call on each metric in the collection.\n Should be either `compute` or `forward`.\n args: Positional arguments to pass to each metric (if method_name is `forward`)\n kwargs: Keyword arguments to pass to each metric (if method_name is `forward`)\n\n Raises:\n ValueError:\n If method_name is not `compute` or `forward`.\n\n \"\"\"\n result = {}\n for k, m in self.items(keep_base=True, copy_state=False):\n if method_name == \"compute\":\n res = m.compute()\n elif method_name == \"forward\":\n res = m(*args, **m._filter_kwargs(**kwargs))\n else:\n raise ValueError(\"method_name should be either 'compute' or 'forward', but got {method_name}\")\n result[k] = res\n\n _, duplicates = _flatten_dict(result)\n\n flattened_results = {}\n for k, m in self.items(keep_base=True, copy_state=False):\n res = result[k]\n if isinstance(res, dict):\n for key, v in res.items():\n # if duplicates of keys we need to add unique prefix to each key\n if duplicates:\n stripped_k = k.replace(getattr(m, \"prefix\", \"\"), \"\")\n stripped_k = stripped_k.replace(getattr(m, \"postfix\", \"\"), \"\")\n key = f\"{stripped_k}_{key}\"\n if getattr(m, \"_from_collection\", None) and m.prefix is not None:\n key = f\"{m.prefix}{key}\"\n if getattr(m, \"_from_collection\", None) and m.postfix is not None:\n key = f\"{key}{m.postfix}\"\n flattened_results[key] = v\n else:\n flattened_results[k] = res\n return {self._set_name(k): v for k, v in flattened_results.items()}\n\n def reset(self) -> None:\n \"\"\"Call reset for each metric sequentially.\"\"\"\n for m in self.values(copy_state=False):\n m.reset()\n if self._enable_compute_groups and self._groups_checked:\n # reset state reference\n self._compute_groups_create_state_ref()\n\n def clone(self, prefix: Optional[str] = None, postfix: Optional[str] = None) -> \"MetricCollection\":\n \"\"\"Make a copy of the metric collection.\n\n Args:\n prefix: a string to append in front of the metric keys\n postfix: a string to append after the keys of the output dict.\n\n \"\"\"\n mc = deepcopy(self)\n if prefix:\n mc.prefix = self._check_arg(prefix, \"prefix\")\n if postfix:\n mc.postfix = self._check_arg(postfix, \"postfix\")\n return mc\n\n def persistent(self, mode: bool = True) -> None:\n \"\"\"Change if metric states should be saved to its state_dict after initialization.\"\"\"\n for m in self.values(copy_state=False):\n m.persistent(mode)\n\n def add_metrics(\n self, metrics: Union[Metric, Sequence[Metric], Dict[str, Metric]], *additional_metrics: Metric\n ) -> None:\n \"\"\"Add new metrics to Metric Collection.\"\"\"\n if isinstance(metrics, Metric):\n # set compatible with original type expectations\n metrics = [metrics]\n if isinstance(metrics, Sequence):\n # prepare for optional additions\n metrics = list(metrics)\n remain: list = []\n for m in additional_metrics:\n sel = metrics if isinstance(m, Metric) else remain\n sel.append(m)\n\n if remain:\n rank_zero_warn(\n f\"You have passes extra arguments {remain} which are not `Metric` so they will be ignored.\"\n )\n elif additional_metrics:\n raise ValueError(\n f\"You have passes extra arguments {additional_metrics} which are not compatible\"\n f\" with first passed dictionary {metrics} so they will be ignored.\"\n )\n\n if isinstance(metrics, dict):\n # Check all values are metrics\n # Make sure that metrics are added in deterministic order\n for name in sorted(metrics.keys()):\n metric = metrics[name]\n if not isinstance(metric, (Metric, MetricCollection)):\n raise ValueError(\n f\"Value {metric} belonging to key {name} is not an instance of\"\n \" `torchmetrics.Metric` or `torchmetrics.MetricCollection`\"\n )\n if isinstance(metric, Metric):\n self[name] = metric\n else:\n for k, v in metric.items(keep_base=False):\n v.postfix = metric.postfix\n v.prefix = metric.prefix\n v._from_collection = True\n self[f\"{name}_{k}\"] = v\n elif isinstance(metrics, Sequence):\n for metric in metrics:\n if not isinstance(metric, (Metric, MetricCollection)):\n raise ValueError(\n f\"Input {metric} to `MetricCollection` is not a instance of\"\n \" `torchmetrics.Metric` or `torchmetrics.MetricCollection`\"\n )\n if isinstance(metric, Metric):\n name = metric.__class__.__name__\n if name in self:\n raise ValueError(f\"Encountered two metrics both named {name}\")\n self[name] = metric\n else:\n for k, v in metric.items(keep_base=False):\n v.postfix = metric.postfix\n v.prefix = metric.prefix\n v._from_collection = True\n self[k] = v\n else:\n raise ValueError(\n \"Unknown input to MetricCollection. Expected, `Metric`, `MetricCollection` or `dict`/`sequence` of the\"\n f\" previous, but got {metrics}\"\n )\n\n self._groups_checked = False\n if self._enable_compute_groups:\n self._init_compute_groups()\n else:\n self._groups = {}\n\n def _init_compute_groups(self) -> None:\n \"\"\"Initialize compute groups.\n\n If user provided a list, we check that all metrics in the list are also in the collection. If set to `True` we\n simply initialize each metric in the collection as its own group\n\n \"\"\"\n if isinstance(self._enable_compute_groups, list):\n self._groups = dict(enumerate(self._enable_compute_groups))\n for v in self._groups.values():\n for metric in v:\n if metric not in self:\n raise ValueError(\n f\"Input {metric} in `compute_groups` argument does not match a metric in the collection.\"\n f\" Please make sure that {self._enable_compute_groups} matches {self.keys(keep_base=True)}\"\n )\n self._groups_checked = True\n else:\n # Initialize all metrics as their own compute group\n self._groups = {i: [str(k)] for i, k in enumerate(self.keys(keep_base=True))}\n\n @property\n def compute_groups(self) -> Dict[int, List[str]]:\n \"\"\"Return a dict with the current compute groups in the collection.\"\"\"\n return self._groups\n\n def _set_name(self, base: str) -> str:\n \"\"\"Adjust name of metric with both prefix and postfix.\"\"\"\n name = base if self.prefix is None else self.prefix + base\n return name if self.postfix is None else name + self.postfix\n\n def _to_renamed_ordered_dict(self) -> OrderedDict:\n od = OrderedDict()\n for k, v in self._modules.items():\n od[self._set_name(k)] = v\n return od\n\n def __iter__(self) -> Iterator[Hashable]:\n \"\"\"Return an iterator over the keys of the MetricDict.\"\"\"\n return iter(self.keys())\n\n # TODO: redefine this as native python dict\n def keys(self, keep_base: bool = False) -> Iterable[Hashable]:\n r\"\"\"Return an iterable of the ModuleDict key.\n\n Args:\n keep_base: Whether to add prefix/postfix on the items collection.\n\n \"\"\"\n if keep_base:\n return self._modules.keys()\n return self._to_renamed_ordered_dict().keys()\n\n def items(self, keep_base: bool = False, copy_state: bool = True) -> Iterable[Tuple[str, Metric]]:\n r\"\"\"Return an iterable of the ModuleDict key/value pairs.\n\n Args:\n keep_base: Whether to add prefix/postfix on the collection.\n copy_state:\n If metric states should be copied between metrics in the same compute group or just passed by reference\n\n \"\"\"\n self._compute_groups_create_state_ref(copy_state)\n if keep_base:\n return self._modules.items()\n return self._to_renamed_ordered_dict().items()\n\n def values(self, copy_state: bool = True) -> Iterable[Metric]:\n \"\"\"Return an iterable of the ModuleDict values.\n\n Args:\n copy_state:\n If metric states should be copied between metrics in the same compute group or just passed by reference\n\n \"\"\"\n self._compute_groups_create_state_ref(copy_state)\n return self._modules.values()\n\n def __getitem__(self, key: str, copy_state: bool = True) -> Metric:\n \"\"\"Retrieve a single metric from the collection.\n\n Args:\n key: name of metric to retrieve\n copy_state:\n If metric states should be copied between metrics in the same compute group or just passed by reference\n\n \"\"\"\n self._compute_groups_create_state_ref(copy_state)\n return self._modules[key]\n\n @staticmethod\n def _check_arg(arg: Optional[str], name: str) -> Optional[str]:\n if arg is None or isinstance(arg, str):\n return arg\n raise ValueError(f\"Expected input `{name}` to be a string, but got {type(arg)}\")\n\n def __repr__(self) -> str:\n \"\"\"Return the representation of the metric collection including all metrics in the collection.\"\"\"\n repr_str = super().__repr__()[:-2]\n if self.prefix:\n repr_str += f\",\\n prefix={self.prefix}{',' if self.postfix else ''}\"\n if self.postfix:\n repr_str += f\"{',' if not self.prefix else ''}\\n postfix={self.postfix}\"\n return repr_str + \"\\n)\"\n\n def set_dtype(self, dst_type: Union[str, torch.dtype]) -> \"MetricCollection\":\n \"\"\"Transfer all metric state to specific dtype. Special version of standard `type` method.\n\n Arguments:\n dst_type: the desired type as ``torch.dtype`` or string.\n\n \"\"\"\n for m in self.values(copy_state=False):\n m.set_dtype(dst_type)\n return self\n\n def plot(\n self,\n val: Optional[Union[Dict, Sequence[Dict]]] = None,\n ax: Optional[Union[_AX_TYPE, Sequence[_AX_TYPE]]] = None,\n together: bool = False,\n ) -> Sequence[_PLOT_OUT_TYPE]:\n \"\"\"Plot a single or multiple values from the metric.\n\n The plot method has two modes of operation. If argument `together` is set to `False` (default), the `.plot`\n method of each metric will be called individually and the result will be list of figures. If `together` is set\n to `True`, the values of all metrics will instead be plotted in the same figure.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: Either a single instance of matplotlib axis object or an sequence of matplotlib axis objects. If\n provided, will add the plots to the provided axis objects. If not provided, will create a new. If\n argument `together` is set to `True`, a single object is expected. If `together` is set to `False`,\n the number of axis objects needs to be the same length as the number of metrics in the collection.\n together: If `True`, will plot all metrics in the same axis. If `False`, will plot each metric in a separate\n\n Returns:\n Either install tuple of Figure and Axes object or an sequence of tuples with Figure and Axes object for each\n metric in the collection.\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n ValueError:\n If `together` is not an bool\n ValueError:\n If `ax` is not an instance of matplotlib axis object or a sequence of matplotlib axis objects\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting a single value\n >>> import torch\n >>> from torchmetrics import MetricCollection\n >>> from torchmetrics.classification import BinaryAccuracy, BinaryPrecision, BinaryRecall\n >>> metrics = MetricCollection([BinaryAccuracy(), BinaryPrecision(), BinaryRecall()])\n >>> metrics.update(torch.rand(10), torch.randint(2, (10,)))\n >>> fig_ax_ = metrics.plot()\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting multiple values\n >>> import torch\n >>> from torchmetrics import MetricCollection\n >>> from torchmetrics.classification import BinaryAccuracy, BinaryPrecision, BinaryRecall\n >>> metrics = MetricCollection([BinaryAccuracy(), BinaryPrecision(), BinaryRecall()])\n >>> values = []\n >>> for _ in range(10):\n ... values.append(metrics(torch.rand(10), torch.randint(2, (10,))))\n >>> fig_, ax_ = metrics.plot(values, together=True)\n\n \"\"\"\n if not isinstance(together, bool):\n raise ValueError(f\"Expected argument `together` to be a boolean, but got {type(together)}\")\n if ax is not None:\n if together and not isinstance(ax, _AX_TYPE):\n raise ValueError(\n f\"Expected argument `ax` to be a matplotlib axis object, but got {type(ax)} when `together=True`\"\n )\n if not together and not (\n isinstance(ax, Sequence) and all(isinstance(a, _AX_TYPE) for a in ax) and len(ax) == len(self)\n ):\n raise ValueError(\n f\"Expected argument `ax` to be a sequence of matplotlib axis objects with the same length as the \"\n f\"number of metrics in the collection, but got {type(ax)} with len {len(ax)} when `together=False`\"\n )\n val = val or self.compute()\n if together:\n return plot_single_or_multi_val(val, ax=ax)\n fig_axs = []\n for i, (k, m) in enumerate(self.items(keep_base=False, copy_state=False)):\n if isinstance(val, dict):\n f, a = m.plot(val[k], ax=ax[i] if ax is not None else ax)\n elif isinstance(val, Sequence):\n f, a = m.plot([v[k] for v in val], ax=ax[i] if ax is not None else ax)\n fig_axs.append((f, a))\n return fig_axs\n", "path": "src/torchmetrics/collections.py" } ]
[ { "content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# this is just a bypass for this module name collision with built-in one\nfrom collections import OrderedDict\nfrom copy import deepcopy\nfrom typing import Any, Dict, Hashable, Iterable, Iterator, List, Optional, Sequence, Tuple, Union\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import ModuleDict\nfrom typing_extensions import Literal\n\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities import rank_zero_warn\nfrom torchmetrics.utilities.data import _flatten_dict, allclose\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE, plot_single_or_multi_val\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"MetricCollection.plot\", \"MetricCollection.plot_all\"]\n\n\nclass MetricCollection(ModuleDict):\n \"\"\"MetricCollection class can be used to chain metrics that have the same call pattern into one single class.\n\n Args:\n metrics: One of the following\n\n * list or tuple (sequence): if metrics are passed in as a list or tuple, will use the metrics class name\n as key for output dict. Therefore, two metrics of the same class cannot be chained this way.\n\n * arguments: similar to passing in as a list, metrics passed in as arguments will use their metric\n class name as key for the output dict.\n\n * dict: if metrics are passed in as a dict, will use each key in the dict as key for output dict.\n Use this format if you want to chain together multiple of the same metric with different parameters.\n Note that the keys in the output dict will be sorted alphabetically.\n\n prefix: a string to append in front of the keys of the output dict\n\n postfix: a string to append after the keys of the output dict\n\n compute_groups:\n By default the MetricCollection will try to reduce the computations needed for the metrics in the collection\n by checking if they belong to the same **compute group**. All metrics in a compute group share the same\n metric state and are therefore only different in their compute step e.g. accuracy, precision and recall\n can all be computed from the true positives/negatives and false positives/negatives. By default,\n this argument is ``True`` which enables this feature. Set this argument to `False` for disabling\n this behaviour. Can also be set to a list of lists of metrics for setting the compute groups yourself.\n\n .. note::\n The compute groups feature can significantly speedup the calculation of metrics under the right conditions.\n First, the feature is only available when calling the ``update`` method and not when calling ``forward`` method\n due to the internal logic of ``forward`` preventing this. Secondly, since we compute groups share metric\n states by reference, calling ``.items()``, ``.values()`` etc. on the metric collection will break this\n reference and a copy of states are instead returned in this case (reference will be reestablished on the next\n call to ``update``).\n\n .. note::\n Metric collections can be nested at initialization (see last example) but the output of the collection will\n still be a single flatten dictionary combining the prefix and postfix arguments from the nested collection.\n\n Raises:\n ValueError:\n If one of the elements of ``metrics`` is not an instance of ``pl.metrics.Metric``.\n ValueError:\n If two elements in ``metrics`` have the same ``name``.\n ValueError:\n If ``metrics`` is not a ``list``, ``tuple`` or a ``dict``.\n ValueError:\n If ``metrics`` is ``dict`` and additional_metrics are passed in.\n ValueError:\n If ``prefix`` is set and it is not a string.\n ValueError:\n If ``postfix`` is set and it is not a string.\n\n Example::\n In the most basic case, the metrics can be passed in as a list or tuple. The keys of the output dict will be\n the same as the class name of the metric:\n\n >>> from torch import tensor\n >>> from pprint import pprint\n >>> from torchmetrics import MetricCollection\n >>> from torchmetrics.regression import MeanSquaredError\n >>> from torchmetrics.classification import MulticlassAccuracy, MulticlassPrecision, MulticlassRecall\n >>> target = tensor([0, 2, 0, 2, 0, 1, 0, 2])\n >>> preds = tensor([2, 1, 2, 0, 1, 2, 2, 2])\n >>> metrics = MetricCollection([MulticlassAccuracy(num_classes=3, average='micro'),\n ... MulticlassPrecision(num_classes=3, average='macro'),\n ... MulticlassRecall(num_classes=3, average='macro')])\n >>> metrics(preds, target) # doctest: +NORMALIZE_WHITESPACE\n {'MulticlassAccuracy': tensor(0.1250),\n 'MulticlassPrecision': tensor(0.0667),\n 'MulticlassRecall': tensor(0.1111)}\n\n Example::\n Alternatively, metrics can be passed in as arguments. The keys of the output dict will be the same as the\n class name of the metric:\n\n >>> metrics = MetricCollection(MulticlassAccuracy(num_classes=3, average='micro'),\n ... MulticlassPrecision(num_classes=3, average='macro'),\n ... MulticlassRecall(num_classes=3, average='macro'))\n >>> metrics(preds, target) # doctest: +NORMALIZE_WHITESPACE\n {'MulticlassAccuracy': tensor(0.1250),\n 'MulticlassPrecision': tensor(0.0667),\n 'MulticlassRecall': tensor(0.1111)}\n\n Example::\n If multiple of the same metric class (with different parameters) should be chained together, metrics can be\n passed in as a dict and the output dict will have the same keys as the input dict:\n\n >>> metrics = MetricCollection({'micro_recall': MulticlassRecall(num_classes=3, average='micro'),\n ... 'macro_recall': MulticlassRecall(num_classes=3, average='macro')})\n >>> same_metric = metrics.clone()\n >>> pprint(metrics(preds, target))\n {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)}\n >>> pprint(same_metric(preds, target))\n {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)}\n\n Example::\n Metric collections can also be nested up to a single time. The output of the collection will still be a single\n dict with the prefix and postfix arguments from the nested collection:\n\n >>> metrics = MetricCollection([\n ... MetricCollection([\n ... MulticlassAccuracy(num_classes=3, average='macro'),\n ... MulticlassPrecision(num_classes=3, average='macro')\n ... ], postfix='_macro'),\n ... MetricCollection([\n ... MulticlassAccuracy(num_classes=3, average='micro'),\n ... MulticlassPrecision(num_classes=3, average='micro')\n ... ], postfix='_micro'),\n ... ], prefix='valmetrics/')\n >>> pprint(metrics(preds, target)) # doctest: +NORMALIZE_WHITESPACE\n {'valmetrics/MulticlassAccuracy_macro': tensor(0.1111),\n 'valmetrics/MulticlassAccuracy_micro': tensor(0.1250),\n 'valmetrics/MulticlassPrecision_macro': tensor(0.0667),\n 'valmetrics/MulticlassPrecision_micro': tensor(0.1250)}\n\n Example::\n The `compute_groups` argument allow you to specify which metrics should share metric state. By default, this\n will automatically be derived but can also be set manually.\n\n >>> metrics = MetricCollection(\n ... MulticlassRecall(num_classes=3, average='macro'),\n ... MulticlassPrecision(num_classes=3, average='macro'),\n ... MeanSquaredError(),\n ... compute_groups=[['MulticlassRecall', 'MulticlassPrecision'], ['MeanSquaredError']]\n ... )\n >>> metrics.update(preds, target)\n >>> pprint(metrics.compute())\n {'MeanSquaredError': tensor(2.3750), 'MulticlassPrecision': tensor(0.0667), 'MulticlassRecall': tensor(0.1111)}\n >>> pprint(metrics.compute_groups)\n {0: ['MulticlassRecall', 'MulticlassPrecision'], 1: ['MeanSquaredError']}\n\n \"\"\"\n\n _modules: Dict[str, Metric] # type: ignore[assignment]\n _groups: Dict[int, List[str]]\n\n def __init__(\n self,\n metrics: Union[Metric, Sequence[Metric], Dict[str, Metric]],\n *additional_metrics: Metric,\n prefix: Optional[str] = None,\n postfix: Optional[str] = None,\n compute_groups: Union[bool, List[List[str]]] = True,\n ) -> None:\n super().__init__()\n\n self.prefix = self._check_arg(prefix, \"prefix\")\n self.postfix = self._check_arg(postfix, \"postfix\")\n self._enable_compute_groups = compute_groups\n self._groups_checked: bool = False\n self._state_is_copy: bool = False\n\n self.add_metrics(metrics, *additional_metrics)\n\n @torch.jit.unused\n def forward(self, *args: Any, **kwargs: Any) -> Dict[str, Any]:\n \"\"\"Call forward for each metric sequentially.\n\n Positional arguments (args) will be passed to every metric in the collection, while keyword arguments (kwargs)\n will be filtered based on the signature of the individual metric.\n\n \"\"\"\n return self._compute_and_reduce(\"forward\", *args, **kwargs)\n\n def update(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Call update for each metric sequentially.\n\n Positional arguments (args) will be passed to every metric in the collection, while keyword arguments (kwargs)\n will be filtered based on the signature of the individual metric.\n\n \"\"\"\n # Use compute groups if already initialized and checked\n if self._groups_checked:\n for cg in self._groups.values():\n # only update the first member\n m0 = getattr(self, cg[0])\n m0.update(*args, **m0._filter_kwargs(**kwargs))\n if self._state_is_copy:\n # If we have deep copied state in between updates, reestablish link\n self._compute_groups_create_state_ref()\n self._state_is_copy = False\n else: # the first update always do per metric to form compute groups\n for m in self.values(copy_state=False):\n m_kwargs = m._filter_kwargs(**kwargs)\n m.update(*args, **m_kwargs)\n\n if self._enable_compute_groups:\n self._merge_compute_groups()\n # create reference between states\n self._compute_groups_create_state_ref()\n self._groups_checked = True\n\n def _merge_compute_groups(self) -> None:\n \"\"\"Iterate over the collection of metrics, checking if the state of each metric matches another.\n\n If so, their compute groups will be merged into one. The complexity of the method is approximately\n ``O(number_of_metrics_in_collection ** 2)``, as all metrics need to be compared to all other metrics.\n\n \"\"\"\n num_groups = len(self._groups)\n while True:\n for cg_idx1, cg_members1 in deepcopy(self._groups).items():\n for cg_idx2, cg_members2 in deepcopy(self._groups).items():\n if cg_idx1 == cg_idx2:\n continue\n\n metric1 = getattr(self, cg_members1[0])\n metric2 = getattr(self, cg_members2[0])\n\n if self._equal_metric_states(metric1, metric2):\n self._groups[cg_idx1].extend(self._groups.pop(cg_idx2))\n break\n\n # Start over if we merged groups\n if len(self._groups) != num_groups:\n break\n\n # Stop when we iterate over everything and do not merge any groups\n if len(self._groups) == num_groups:\n break\n num_groups = len(self._groups)\n\n # Re-index groups\n temp = deepcopy(self._groups)\n self._groups = {}\n for idx, values in enumerate(temp.values()):\n self._groups[idx] = values\n\n @staticmethod\n def _equal_metric_states(metric1: Metric, metric2: Metric) -> bool:\n \"\"\"Check if the metric state of two metrics are the same.\"\"\"\n # empty state\n if len(metric1._defaults) == 0 or len(metric2._defaults) == 0:\n return False\n\n if metric1._defaults.keys() != metric2._defaults.keys():\n return False\n\n for key in metric1._defaults:\n state1 = getattr(metric1, key)\n state2 = getattr(metric2, key)\n\n if type(state1) != type(state2):\n return False\n\n if isinstance(state1, Tensor) and isinstance(state2, Tensor):\n return state1.shape == state2.shape and allclose(state1, state2)\n\n if isinstance(state1, list) and isinstance(state2, list):\n return all(s1.shape == s2.shape and allclose(s1, s2) for s1, s2 in zip(state1, state2))\n\n return True\n\n def _compute_groups_create_state_ref(self, copy: bool = False) -> None:\n \"\"\"Create reference between metrics in the same compute group.\n\n Args:\n copy: If `True` the metric state will between members will be copied instead\n of just passed by reference\n\n \"\"\"\n if not self._state_is_copy:\n for cg in self._groups.values():\n m0 = getattr(self, cg[0])\n for i in range(1, len(cg)):\n mi = getattr(self, cg[i])\n for state in m0._defaults:\n m0_state = getattr(m0, state)\n # Determine if we just should set a reference or a full copy\n setattr(mi, state, deepcopy(m0_state) if copy else m0_state)\n mi._update_count = deepcopy(m0._update_count) if copy else m0._update_count\n mi._computed = deepcopy(m0._computed) if copy else m0._computed\n self._state_is_copy = copy\n\n def compute(self) -> Dict[str, Any]:\n \"\"\"Compute the result for each metric in the collection.\"\"\"\n return self._compute_and_reduce(\"compute\")\n\n def _compute_and_reduce(\n self, method_name: Literal[\"compute\", \"forward\"], *args: Any, **kwargs: Any\n ) -> Dict[str, Any]:\n \"\"\"Compute result from collection and reduce into a single dictionary.\n\n Args:\n method_name: The method to call on each metric in the collection.\n Should be either `compute` or `forward`.\n args: Positional arguments to pass to each metric (if method_name is `forward`)\n kwargs: Keyword arguments to pass to each metric (if method_name is `forward`)\n\n Raises:\n ValueError:\n If method_name is not `compute` or `forward`.\n\n \"\"\"\n result = {}\n for k, m in self.items(keep_base=True, copy_state=False):\n if method_name == \"compute\":\n res = m.compute()\n elif method_name == \"forward\":\n res = m(*args, **m._filter_kwargs(**kwargs))\n else:\n raise ValueError(\"method_name should be either 'compute' or 'forward', but got {method_name}\")\n result[k] = res\n\n _, duplicates = _flatten_dict(result)\n\n flattened_results = {}\n for k, m in self.items(keep_base=True, copy_state=False):\n res = result[k]\n if isinstance(res, dict):\n for key, v in res.items():\n # if duplicates of keys we need to add unique prefix to each key\n if duplicates:\n stripped_k = k.replace(getattr(m, \"prefix\", \"\"), \"\")\n stripped_k = stripped_k.replace(getattr(m, \"postfix\", \"\"), \"\")\n key = f\"{stripped_k}_{key}\"\n if getattr(m, \"_from_collection\", None) and m.prefix is not None:\n key = f\"{m.prefix}{key}\"\n if getattr(m, \"_from_collection\", None) and m.postfix is not None:\n key = f\"{key}{m.postfix}\"\n flattened_results[key] = v\n else:\n flattened_results[k] = res\n return {self._set_name(k): v for k, v in flattened_results.items()}\n\n def reset(self) -> None:\n \"\"\"Call reset for each metric sequentially.\"\"\"\n for m in self.values(copy_state=False):\n m.reset()\n if self._enable_compute_groups and self._groups_checked:\n # reset state reference\n self._compute_groups_create_state_ref()\n\n def clone(self, prefix: Optional[str] = None, postfix: Optional[str] = None) -> \"MetricCollection\":\n \"\"\"Make a copy of the metric collection.\n\n Args:\n prefix: a string to append in front of the metric keys\n postfix: a string to append after the keys of the output dict.\n\n \"\"\"\n mc = deepcopy(self)\n if prefix:\n mc.prefix = self._check_arg(prefix, \"prefix\")\n if postfix:\n mc.postfix = self._check_arg(postfix, \"postfix\")\n return mc\n\n def persistent(self, mode: bool = True) -> None:\n \"\"\"Change if metric states should be saved to its state_dict after initialization.\"\"\"\n for m in self.values(copy_state=False):\n m.persistent(mode)\n\n def add_metrics(\n self, metrics: Union[Metric, Sequence[Metric], Dict[str, Metric]], *additional_metrics: Metric\n ) -> None:\n \"\"\"Add new metrics to Metric Collection.\"\"\"\n if isinstance(metrics, Metric):\n # set compatible with original type expectations\n metrics = [metrics]\n if isinstance(metrics, Sequence):\n # prepare for optional additions\n metrics = list(metrics)\n remain: list = []\n for m in additional_metrics:\n sel = metrics if isinstance(m, Metric) else remain\n sel.append(m)\n\n if remain:\n rank_zero_warn(\n f\"You have passes extra arguments {remain} which are not `Metric` so they will be ignored.\"\n )\n elif additional_metrics:\n raise ValueError(\n f\"You have passes extra arguments {additional_metrics} which are not compatible\"\n f\" with first passed dictionary {metrics} so they will be ignored.\"\n )\n\n if isinstance(metrics, dict):\n # Check all values are metrics\n # Make sure that metrics are added in deterministic order\n for name in sorted(metrics.keys()):\n metric = metrics[name]\n if not isinstance(metric, (Metric, MetricCollection)):\n raise ValueError(\n f\"Value {metric} belonging to key {name} is not an instance of\"\n \" `torchmetrics.Metric` or `torchmetrics.MetricCollection`\"\n )\n if isinstance(metric, Metric):\n self[name] = metric\n else:\n for k, v in metric.items(keep_base=False):\n v.postfix = metric.postfix\n v.prefix = metric.prefix\n v._from_collection = True\n self[f\"{name}_{k}\"] = v\n elif isinstance(metrics, Sequence):\n for metric in metrics:\n if not isinstance(metric, (Metric, MetricCollection)):\n raise ValueError(\n f\"Input {metric} to `MetricCollection` is not a instance of\"\n \" `torchmetrics.Metric` or `torchmetrics.MetricCollection`\"\n )\n if isinstance(metric, Metric):\n name = metric.__class__.__name__\n if name in self:\n raise ValueError(f\"Encountered two metrics both named {name}\")\n self[name] = metric\n else:\n for k, v in metric.items(keep_base=False):\n v.postfix = metric.postfix\n v.prefix = metric.prefix\n v._from_collection = True\n self[k] = v\n else:\n raise ValueError(\n \"Unknown input to MetricCollection. Expected, `Metric`, `MetricCollection` or `dict`/`sequence` of the\"\n f\" previous, but got {metrics}\"\n )\n\n self._groups_checked = False\n if self._enable_compute_groups:\n self._init_compute_groups()\n else:\n self._groups = {}\n\n def _init_compute_groups(self) -> None:\n \"\"\"Initialize compute groups.\n\n If user provided a list, we check that all metrics in the list are also in the collection. If set to `True` we\n simply initialize each metric in the collection as its own group\n\n \"\"\"\n if isinstance(self._enable_compute_groups, list):\n self._groups = dict(enumerate(self._enable_compute_groups))\n for v in self._groups.values():\n for metric in v:\n if metric not in self:\n raise ValueError(\n f\"Input {metric} in `compute_groups` argument does not match a metric in the collection.\"\n f\" Please make sure that {self._enable_compute_groups} matches {self.keys(keep_base=True)}\"\n )\n self._groups_checked = True\n else:\n # Initialize all metrics as their own compute group\n self._groups = {i: [str(k)] for i, k in enumerate(self.keys(keep_base=True))}\n\n @property\n def compute_groups(self) -> Dict[int, List[str]]:\n \"\"\"Return a dict with the current compute groups in the collection.\"\"\"\n return self._groups\n\n def _set_name(self, base: str) -> str:\n \"\"\"Adjust name of metric with both prefix and postfix.\"\"\"\n name = base if self.prefix is None else self.prefix + base\n return name if self.postfix is None else name + self.postfix\n\n def _to_renamed_ordered_dict(self) -> OrderedDict:\n od = OrderedDict()\n for k, v in self._modules.items():\n od[self._set_name(k)] = v\n return od\n\n def __iter__(self) -> Iterator[Hashable]:\n \"\"\"Return an iterator over the keys of the MetricDict.\"\"\"\n return iter(self.keys())\n\n # TODO: redefine this as native python dict\n def keys(self, keep_base: bool = False) -> Iterable[Hashable]:\n r\"\"\"Return an iterable of the ModuleDict key.\n\n Args:\n keep_base: Whether to add prefix/postfix on the items collection.\n\n \"\"\"\n if keep_base:\n return self._modules.keys()\n return self._to_renamed_ordered_dict().keys()\n\n def items(self, keep_base: bool = False, copy_state: bool = True) -> Iterable[Tuple[str, Metric]]:\n r\"\"\"Return an iterable of the ModuleDict key/value pairs.\n\n Args:\n keep_base: Whether to add prefix/postfix on the collection.\n copy_state:\n If metric states should be copied between metrics in the same compute group or just passed by reference\n\n \"\"\"\n self._compute_groups_create_state_ref(copy_state)\n if keep_base:\n return self._modules.items()\n return self._to_renamed_ordered_dict().items()\n\n def values(self, copy_state: bool = True) -> Iterable[Metric]:\n \"\"\"Return an iterable of the ModuleDict values.\n\n Args:\n copy_state:\n If metric states should be copied between metrics in the same compute group or just passed by reference\n\n \"\"\"\n self._compute_groups_create_state_ref(copy_state)\n return self._modules.values()\n\n def __getitem__(self, key: str, copy_state: bool = True) -> Metric:\n \"\"\"Retrieve a single metric from the collection.\n\n Args:\n key: name of metric to retrieve\n copy_state:\n If metric states should be copied between metrics in the same compute group or just passed by reference\n\n \"\"\"\n self._compute_groups_create_state_ref(copy_state)\n if self.prefix:\n key = key.removeprefix(self.prefix)\n if self.postfix:\n key = key.removesuffix(self.postfix)\n return self._modules[key]\n\n @staticmethod\n def _check_arg(arg: Optional[str], name: str) -> Optional[str]:\n if arg is None or isinstance(arg, str):\n return arg\n raise ValueError(f\"Expected input `{name}` to be a string, but got {type(arg)}\")\n\n def __repr__(self) -> str:\n \"\"\"Return the representation of the metric collection including all metrics in the collection.\"\"\"\n repr_str = super().__repr__()[:-2]\n if self.prefix:\n repr_str += f\",\\n prefix={self.prefix}{',' if self.postfix else ''}\"\n if self.postfix:\n repr_str += f\"{',' if not self.prefix else ''}\\n postfix={self.postfix}\"\n return repr_str + \"\\n)\"\n\n def set_dtype(self, dst_type: Union[str, torch.dtype]) -> \"MetricCollection\":\n \"\"\"Transfer all metric state to specific dtype. Special version of standard `type` method.\n\n Arguments:\n dst_type: the desired type as ``torch.dtype`` or string.\n\n \"\"\"\n for m in self.values(copy_state=False):\n m.set_dtype(dst_type)\n return self\n\n def plot(\n self,\n val: Optional[Union[Dict, Sequence[Dict]]] = None,\n ax: Optional[Union[_AX_TYPE, Sequence[_AX_TYPE]]] = None,\n together: bool = False,\n ) -> Sequence[_PLOT_OUT_TYPE]:\n \"\"\"Plot a single or multiple values from the metric.\n\n The plot method has two modes of operation. If argument `together` is set to `False` (default), the `.plot`\n method of each metric will be called individually and the result will be list of figures. If `together` is set\n to `True`, the values of all metrics will instead be plotted in the same figure.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: Either a single instance of matplotlib axis object or an sequence of matplotlib axis objects. If\n provided, will add the plots to the provided axis objects. If not provided, will create a new. If\n argument `together` is set to `True`, a single object is expected. If `together` is set to `False`,\n the number of axis objects needs to be the same length as the number of metrics in the collection.\n together: If `True`, will plot all metrics in the same axis. If `False`, will plot each metric in a separate\n\n Returns:\n Either install tuple of Figure and Axes object or an sequence of tuples with Figure and Axes object for each\n metric in the collection.\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n ValueError:\n If `together` is not an bool\n ValueError:\n If `ax` is not an instance of matplotlib axis object or a sequence of matplotlib axis objects\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting a single value\n >>> import torch\n >>> from torchmetrics import MetricCollection\n >>> from torchmetrics.classification import BinaryAccuracy, BinaryPrecision, BinaryRecall\n >>> metrics = MetricCollection([BinaryAccuracy(), BinaryPrecision(), BinaryRecall()])\n >>> metrics.update(torch.rand(10), torch.randint(2, (10,)))\n >>> fig_ax_ = metrics.plot()\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting multiple values\n >>> import torch\n >>> from torchmetrics import MetricCollection\n >>> from torchmetrics.classification import BinaryAccuracy, BinaryPrecision, BinaryRecall\n >>> metrics = MetricCollection([BinaryAccuracy(), BinaryPrecision(), BinaryRecall()])\n >>> values = []\n >>> for _ in range(10):\n ... values.append(metrics(torch.rand(10), torch.randint(2, (10,))))\n >>> fig_, ax_ = metrics.plot(values, together=True)\n\n \"\"\"\n if not isinstance(together, bool):\n raise ValueError(f\"Expected argument `together` to be a boolean, but got {type(together)}\")\n if ax is not None:\n if together and not isinstance(ax, _AX_TYPE):\n raise ValueError(\n f\"Expected argument `ax` to be a matplotlib axis object, but got {type(ax)} when `together=True`\"\n )\n if not together and not (\n isinstance(ax, Sequence) and all(isinstance(a, _AX_TYPE) for a in ax) and len(ax) == len(self)\n ):\n raise ValueError(\n f\"Expected argument `ax` to be a sequence of matplotlib axis objects with the same length as the \"\n f\"number of metrics in the collection, but got {type(ax)} with len {len(ax)} when `together=False`\"\n )\n val = val or self.compute()\n if together:\n return plot_single_or_multi_val(val, ax=ax)\n fig_axs = []\n for i, (k, m) in enumerate(self.items(keep_base=False, copy_state=False)):\n if isinstance(val, dict):\n f, a = m.plot(val[k], ax=ax[i] if ax is not None else ax)\n elif isinstance(val, Sequence):\n f, a = m.plot([v[k] for v in val], ax=ax[i] if ax is not None else ax)\n fig_axs.append((f, a))\n return fig_axs\n", "path": "src/torchmetrics/collections.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 537d5cf61f7..dd66310e14a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed +- Fix getitem for metric collection when prefix/postfix is set ([#2430](https://github.com/Lightning-AI/torchmetrics/pull/2430)) + + - Fixed axis names with Precision-Recall curve ([#2462](https://github.com/Lightning-AI/torchmetrics/pull/2462)) diff --git a/src/torchmetrics/collections.py b/src/torchmetrics/collections.py index d6ad1287c58..e4b0dbafd2a 100644 --- a/src/torchmetrics/collections.py +++ b/src/torchmetrics/collections.py @@ -547,6 +547,10 @@ def __getitem__(self, key: str, copy_state: bool = True) -> Metric: """ self._compute_groups_create_state_ref(copy_state) + if self.prefix: + key = key.removeprefix(self.prefix) + if self.postfix: + key = key.removesuffix(self.postfix) return self._modules[key] @staticmethod diff --git a/tests/unittests/bases/test_collections.py b/tests/unittests/bases/test_collections.py index 0e124125509..a677c92ddb1 100644 --- a/tests/unittests/bases/test_collections.py +++ b/tests/unittests/bases/test_collections.py @@ -33,6 +33,7 @@ MultilabelAveragePrecision, ) from torchmetrics.utilities.checks import _allclose_recursive +from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_2_0 from unittests._helpers import seed_all from unittests._helpers.testers import DummyMetricDiff, DummyMetricMultiOutputDict, DummyMetricSum @@ -150,6 +151,7 @@ def test_metric_collection_args_kwargs(tmpdir): assert metric_collection["DummyMetricDiff"].x == -20 [email protected](not _TORCH_GREATER_EQUAL_2_0, reason="Test requires torch 2.0 or higher") @pytest.mark.parametrize( ("prefix", "postfix"), [ @@ -204,6 +206,10 @@ def test_metric_collection_prefix_postfix_args(prefix, postfix): for name in names: assert f"new_prefix_{name}_new_postfix" in out, "postfix argument not working as intended with clone method" + keys = list(new_metric_collection.keys()) + for k in keys: + assert new_metric_collection[k] # check that the keys are valid even with prefix and postfix + def test_metric_collection_repr(): """Test MetricCollection."""
ckan__ckan-7125
Is ckan.datasets_per_page not being used? **CKAN version** CKAN 2.9.5 **Describe the bug** I set the _ckan.datasets_per_page_ configuration options to 10 and this is working fine on routes: - /dataset/id - /group - /organization But is not working in the routes* - /group/id - /organization/id where it always shows 20 results per page. Reading through the source code I noticed that the _read_ function called in *those routes receives a _limit_ parameter with a default value of 20, but it is not updated in the function body with something like ```python the_limit = config.get('ckan.datasets_per_page', limit) ``` I added this change in my local installation and it worked. Is this by design or is it a bug? I just want to be sure that this is a bug before opening a PR. https://github.com/ckan/ckan/blob/dc49cece33e447858d0008bc71b9a4ab724602ee/ckan/views/group.py#L408-L411 Thanks.
[ { "content": "# encoding: utf-8\nfrom __future__ import annotations\n\nimport logging\nimport re\nfrom collections import OrderedDict\nfrom typing import Any, Optional, Union, cast\nfrom typing_extensions import Literal\n\nfrom urllib.parse import urlencode\n\nimport ckan.lib.base as base\nimport ckan.lib.helpers as h\nimport ckan.lib.navl.dictization_functions as dict_fns\nimport ckan.logic as logic\nimport ckan.lib.search as search\nimport ckan.model as model\nimport ckan.authz as authz\nimport ckan.lib.plugins as lib_plugins\nimport ckan.plugins as plugins\nfrom ckan.common import g, config, request, current_user, _\nfrom ckan.views.home import CACHE_PARAMETERS\nfrom ckan.views.dataset import _get_search_details\n\nfrom flask import Blueprint\nfrom flask.views import MethodView\nfrom flask.wrappers import Response\nfrom ckan.types import Action, Context, DataDict, Schema\n\n\nNotFound = logic.NotFound\nNotAuthorized = logic.NotAuthorized\nValidationError = logic.ValidationError\ncheck_access = logic.check_access\nget_action = logic.get_action\ntuplize_dict = logic.tuplize_dict\nclean_dict = logic.clean_dict\nparse_params = logic.parse_params\n\nlog = logging.getLogger(__name__)\n\nlookup_group_plugin = lib_plugins.lookup_group_plugin\nlookup_group_controller = lib_plugins.lookup_group_controller\n\nis_org = False\n\n\ndef _get_group_template(template_type: str,\n group_type: Optional[str] = None) -> str:\n group_plugin = lookup_group_plugin(group_type)\n method = getattr(group_plugin, template_type)\n try:\n return method(group_type)\n except TypeError as err:\n if u'takes 1' not in str(err) and u'takes exactly 1' not in str(err):\n raise\n return method()\n\n\ndef _db_to_form_schema(group_type: Optional[str] = None) -> Schema:\n u'''This is an interface to manipulate data from the database\n into a format suitable for the form (optional)'''\n return lookup_group_plugin(group_type).db_to_form_schema()\n\n\ndef _setup_template_variables(context: Context,\n data_dict: DataDict,\n group_type: Optional[str] = None) -> None:\n if u'type' not in data_dict:\n data_dict[u'type'] = group_type\n return lookup_group_plugin(group_type).\\\n setup_template_variables(context, data_dict)\n\n\ndef _replace_group_org(string: str) -> str:\n u''' substitute organization for group if this is an org'''\n if is_org:\n return re.sub(u'^group', u'organization', string)\n return string\n\n\ndef _action(action_name: str) -> Action:\n u''' select the correct group/org action '''\n return get_action(_replace_group_org(action_name))\n\n\ndef _check_access(action_name: str, *args: Any, **kw: Any) -> Literal[True]:\n u''' select the correct group/org check_access '''\n return check_access(_replace_group_org(action_name), *args, **kw)\n\n\ndef _force_reindex(grp: dict[str, Any]) -> None:\n u''' When the group name has changed, we need to force a reindex\n of the datasets within the group, otherwise they will stop\n appearing on the read page for the group (as they're connected via\n the group name)'''\n group = model.Group.get(grp['name'])\n assert group\n for dataset in group.packages():\n search.rebuild(dataset.name)\n\n\ndef _guess_group_type(expecting_name: bool = False) -> str:\n u\"\"\"\n Guess the type of group from the URL.\n * The default url '/group/xyz' returns None\n * group_type is unicode\n * this handles the case where there is a prefix on the URL\n (such as /data/organization)\n \"\"\"\n parts: list[str] = request.path.split(u'/')\n parts = [x for x in parts if x]\n\n idx = 0\n if expecting_name:\n idx = -1\n\n gt = parts[idx]\n\n return gt\n\n\ndef set_org(is_organization: bool) -> None:\n global is_org\n is_org = is_organization\n\n\ndef index(group_type: str, is_organization: bool) -> str:\n extra_vars: dict[str, Any] = {}\n set_org(is_organization)\n page = h.get_page_number(request.args) or 1\n items_per_page = config.get_value('ckan.datasets_per_page')\n\n context = cast(Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name,\n u'for_view': True,\n u'with_private': False\n })\n\n try:\n assert _check_access(u'site_read', context)\n assert _check_access(u'group_list', context)\n except NotAuthorized:\n base.abort(403, _(u'Not authorized to see this page'))\n\n q = request.args.get(u'q', u'')\n sort_by = request.args.get(u'sort')\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.q = q\n g.sort_by_selected = sort_by\n\n extra_vars[\"q\"] = q\n extra_vars[\"sort_by_selected\"] = sort_by\n\n # pass user info to context as needed to view private datasets of\n # orgs correctly\n if current_user.is_authenticated:\n context['user_id'] = current_user.id # type: ignore\n context['user_is_admin'] = current_user.sysadmin # type: ignore\n\n try:\n data_dict_global_results: dict[str, Any] = {\n u'all_fields': False,\n u'q': q,\n u'sort': sort_by,\n u'type': group_type or u'group',\n }\n global_results = _action(u'group_list')(context,\n data_dict_global_results)\n except ValidationError as e:\n if e.error_dict and e.error_dict.get(u'message'):\n msg: Any = e.error_dict['message']\n else:\n msg = str(e)\n h.flash_error(msg)\n extra_vars[\"page\"] = h.Page([], 0)\n extra_vars[\"group_type\"] = group_type\n return base.render(\n _get_group_template(u'index_template', group_type), extra_vars)\n\n data_dict_page_results: dict[str, Any] = {\n u'all_fields': True,\n u'q': q,\n u'sort': sort_by,\n u'type': group_type or u'group',\n u'limit': items_per_page,\n u'offset': items_per_page * (page - 1),\n u'include_extras': True\n }\n page_results = _action(u'group_list')(context, data_dict_page_results)\n\n extra_vars[\"page\"] = h.Page(\n collection=global_results,\n page=page,\n url=h.pager_url,\n items_per_page=items_per_page, )\n\n extra_vars[\"page\"].items = page_results\n extra_vars[\"group_type\"] = group_type\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.page = extra_vars[\"page\"]\n return base.render(\n _get_group_template(u'index_template', group_type), extra_vars)\n\n\ndef _read(id: Optional[str], limit: int, group_type: str) -> dict[str, Any]:\n u''' This is common code used by both read and bulk_process'''\n extra_vars: dict[str, Any] = {}\n context = cast(Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name,\n u'schema': _db_to_form_schema(group_type=group_type),\n u'for_view': True,\n u'extras_as_string': True\n })\n\n q = request.args.get(u'q', u'')\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.q = q\n\n # Search within group\n if g.group_dict.get(u'is_organization'):\n fq = u' owner_org:\"%s\"' % g.group_dict.get(u'id')\n else:\n fq = u' groups:\"%s\"' % g.group_dict.get(u'name')\n\n extra_vars[\"q\"] = q\n\n g.description_formatted = \\\n h.render_markdown(g.group_dict.get(u'description'))\n\n context['return_query'] = True\n\n page = h.get_page_number(request.args)\n\n # most search operations should reset the page counter:\n params_nopage = [(k, v) for k, v in request.args.items(multi=True)\n if k != u'page']\n sort_by = request.args.get(u'sort', None)\n\n def search_url(params: Any) -> str:\n action = u'bulk_process' if getattr(\n g, u'action', u'') == u'bulk_process' else u'read'\n url = h.url_for(u'.'.join([group_type, action]), id=id)\n params = [(k, v.encode(u'utf-8')\n if isinstance(v, str) else str(v))\n for k, v in params]\n return url + u'?' + urlencode(params)\n\n def remove_field(\n key: str, value: Optional[str] = None,\n replace: Optional[str] = None):\n controller = lookup_group_controller(group_type)\n return h.remove_url_param(\n key,\n value=value,\n replace=replace,\n controller=controller,\n action=u'read',\n extras=dict(id=g.group_dict.get(u'name')))\n\n extra_vars[\"remove_field\"] = remove_field\n\n def pager_url(q: Any = None, page: Optional[int] = None):\n params: list[tuple[str, Any]] = list(params_nopage)\n params.append((u'page', page))\n return search_url(params)\n\n details = _get_search_details()\n extra_vars[u'fields'] = details[u'fields']\n extra_vars[u'fields_grouped'] = details[u'fields_grouped']\n fq += details[u'fq']\n search_extras = details[u'search_extras']\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.fields = extra_vars[u'fields']\n g.fields_grouped = extra_vars[u'fields_grouped']\n\n facets: \"OrderedDict[str, str]\" = OrderedDict()\n\n org_label = h.humanize_entity_type(\n u'organization',\n h.default_group_type(u'organization'),\n u'facet label') or _(u'Organizations')\n\n group_label = h.humanize_entity_type(\n u'group',\n h.default_group_type(u'group'),\n u'facet label') or _(u'Groups')\n\n default_facet_titles = {\n u'organization': org_label,\n u'groups': group_label,\n u'tags': _(u'Tags'),\n u'res_format': _(u'Formats'),\n u'license_id': _(u'Licenses')\n }\n\n for facet in h.facets():\n if facet in default_facet_titles:\n facets[facet] = default_facet_titles[facet]\n else:\n facets[facet] = facet\n\n # Facet titles\n facets = _update_facet_titles(facets, group_type)\n\n extra_vars[\"facet_titles\"] = facets\n\n data_dict: dict[str, Any] = {\n u'q': q,\n u'fq': fq,\n u'include_private': True,\n u'facet.field': list(facets.keys()),\n u'rows': limit,\n u'sort': sort_by,\n u'start': (page - 1) * limit,\n u'extras': search_extras\n }\n\n context_ = cast(\n Context, dict((k, v) for (k, v) in context.items() if k != u'schema')\n )\n try:\n query = get_action(u'package_search')(context_, data_dict)\n except search.SearchError as se:\n log.error(u'Group search error: %r', se.args)\n extra_vars[\"query_error\"] = True\n extra_vars[\"page\"] = h.Page(collection=[])\n else:\n extra_vars[\"page\"] = h.Page(\n collection=query['results'],\n page=page,\n url=pager_url,\n item_count=query['count'],\n items_per_page=limit)\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.group_dict['package_count'] = query['count']\n\n extra_vars[\"search_facets\"] = query['search_facets']\n extra_vars[\"search_facets_limits\"] = g.search_facets_limits = {}\n default_limit: int = config.get_value(u'search.facets.default')\n for facet in extra_vars[\"search_facets\"].keys():\n limit = int(request.args.get(u'_%s_limit' % facet, default_limit))\n g.search_facets_limits[facet] = limit\n extra_vars[\"page\"].items = query['results']\n\n extra_vars[\"sort_by_selected\"] = sort_by\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.facet_titles = facets\n g.page = extra_vars[\"page\"]\n\n extra_vars[\"group_type\"] = group_type\n _setup_template_variables(context, {u'id': id}, group_type=group_type)\n return extra_vars\n\n\ndef _update_facet_titles(\n facets: 'OrderedDict[str, str]',\n group_type: str) -> 'OrderedDict[str, str]':\n for plugin in plugins.PluginImplementations(plugins.IFacets):\n facets = (\n plugin.group_facets(facets, group_type, None)\n if group_type == \"group\"\n else plugin.organization_facets(facets, group_type, None)\n )\n return facets\n\n\ndef _get_group_dict(id: str, group_type: str) -> dict[str, Any]:\n u''' returns the result of group_show action or aborts if there is a\n problem '''\n context = cast(Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name,\n u'for_view': True\n })\n try:\n return _action(u'group_show')(context, {\n u'id': id,\n u'include_datasets': False\n })\n except (NotFound, NotAuthorized):\n base.abort(404, _(u'Group not found'))\n\n\ndef read(group_type: str,\n is_organization: bool,\n id: Optional[str] = None,\n limit: int = 20) -> Union[str, Response]:\n extra_vars = {}\n set_org(is_organization)\n context = cast(Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name,\n u'schema': _db_to_form_schema(group_type=group_type),\n u'for_view': True\n })\n data_dict: dict[str, Any] = {u'id': id, u'type': group_type}\n\n # unicode format (decoded from utf8)\n q = request.args.get(u'q', u'')\n\n extra_vars[\"q\"] = q\n\n try:\n # Do not query for the group datasets when dictizing, as they will\n # be ignored and get requested on the controller anyway\n data_dict['include_datasets'] = False\n\n # Do not query group members as they aren't used in the view\n data_dict['include_users'] = False\n\n group_dict = _action(u'group_show')(context, data_dict)\n except (NotFound, NotAuthorized):\n base.abort(404, _(u'Group not found'))\n\n # if the user specified a group id, redirect to the group name\n if data_dict['id'] == group_dict['id'] and \\\n data_dict['id'] != group_dict['name']:\n\n url_with_name = h.url_for(u'{}.read'.format(group_type),\n id=group_dict['name'])\n\n return h.redirect_to(\n h.add_url_param(alternative_url=url_with_name))\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.q = q\n g.group_dict = group_dict\n\n extra_vars = _read(id, limit, group_type)\n\n extra_vars[\"group_type\"] = group_type\n extra_vars[\"group_dict\"] = group_dict\n\n return base.render(\n _get_group_template(u'read_template', cast(str, g.group_dict['type'])),\n extra_vars)\n\n\ndef about(id: str, group_type: str, is_organization: bool) -> str:\n extra_vars = {}\n set_org(is_organization)\n context = cast(\n Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name\n }\n )\n group_dict = _get_group_dict(id, group_type)\n group_type = group_dict['type']\n _setup_template_variables(context, {u'id': id}, group_type=group_type)\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.group_dict = group_dict\n g.group_type = group_type\n\n extra_vars: dict[str, Any] = {u\"group_dict\": group_dict,\n u\"group_type\": group_type}\n\n return base.render(\n _get_group_template(u'about_template', group_type), extra_vars)\n\n\ndef members(id: str, group_type: str, is_organization: bool) -> str:\n extra_vars = {}\n set_org(is_organization)\n context = cast(\n Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name\n }\n )\n\n try:\n data_dict: dict[str, Any] = {u'id': id}\n assert check_access(u'group_edit_permissions', context, data_dict)\n members = get_action(u'member_list')(context, {\n u'id': id,\n u'object_type': u'user'\n })\n data_dict['include_datasets'] = False\n group_dict = _action(u'group_show')(context, data_dict)\n except NotFound:\n base.abort(404, _(u'Group not found'))\n except NotAuthorized:\n base.abort(403,\n _(u'User %r not authorized to edit members of %s') %\n (current_user.name, id))\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.members = members\n g.group_dict = group_dict\n\n extra_vars: dict[str, Any] = {\n u\"members\": members,\n u\"group_dict\": group_dict,\n u\"group_type\": group_type\n }\n return base.render(_replace_group_org(u'group/members.html'), extra_vars)\n\n\ndef member_delete(id: str, group_type: str,\n is_organization: bool) -> Union[Response, str]:\n extra_vars = {}\n set_org(is_organization)\n if u'cancel' in request.args:\n return h.redirect_to(u'{}.members'.format(group_type), id=id)\n\n context = cast(\n Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name\n }\n )\n try:\n assert _check_access(u'group_member_delete', context, {u'id': id})\n except NotAuthorized:\n base.abort(403, _(u'Unauthorized to delete group %s members') % u'')\n\n try:\n user_id = request.args.get(u'user')\n if request.method == u'POST':\n _action(u'group_member_delete')(context, {\n u'id': id,\n u'user_id': user_id\n })\n h.flash_notice(_(u'Group member has been deleted.'))\n return h.redirect_to(u'{}.members'.format(group_type), id=id)\n user_dict = _action(u'user_show')(context, {u'id': user_id})\n\n except NotAuthorized:\n base.abort(403, _(u'Unauthorized to delete group %s members') % u'')\n except NotFound:\n base.abort(404, _(u'Group not found'))\n extra_vars: dict[str, Any] = {\n u\"user_id\": user_id,\n u\"user_dict\": user_dict,\n u\"group_id\": id,\n u\"group_type\": group_type\n }\n return base.render(_replace_group_org(u'group/confirm_delete_member.html'),\n extra_vars)\n\n\ndef follow(id: str, group_type: str, is_organization: bool) -> Response:\n u'''Start following this group.'''\n set_org(is_organization)\n context = cast(\n Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name\n }\n )\n data_dict = {u'id': id}\n try:\n get_action(u'follow_group')(context, data_dict)\n group_dict = get_action(u'group_show')(context, data_dict)\n h.flash_success(\n _(u\"You are now following {0}\").format(group_dict['title']))\n\n id = group_dict['name']\n except ValidationError as e:\n error_message = (e.message or e.error_summary or e.error_dict)\n h.flash_error(error_message)\n except NotAuthorized as e:\n h.flash_error(e.message)\n return h.redirect_to(u'group.read', id=id)\n\n\ndef unfollow(id: str, group_type: str, is_organization: bool) -> Response:\n u'''Stop following this group.'''\n set_org(is_organization)\n context = cast(\n Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name\n }\n )\n data_dict = {u'id': id}\n try:\n get_action(u'unfollow_group')(context, data_dict)\n group_dict = get_action(u'group_show')(context, data_dict)\n h.flash_success(\n _(u\"You are no longer following {0}\").format(group_dict['title']))\n id = group_dict['name']\n except ValidationError as e:\n error_message = (e.message or e.error_summary or e.error_dict)\n h.flash_error(error_message)\n except NotFound as e:\n error_message = e.message or ''\n base.abort(404, _(error_message))\n except NotAuthorized as e:\n error_message = e.message or ''\n base.abort(403, _(error_message))\n return h.redirect_to(u'group.read', id=id)\n\n\ndef followers(id: str, group_type: str, is_organization: bool) -> str:\n extra_vars = {}\n set_org(is_organization)\n context = cast(\n Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name\n }\n )\n group_dict = _get_group_dict(id, group_type)\n try:\n followers = \\\n get_action(u'group_follower_list')(context, {u'id': id})\n except NotAuthorized:\n base.abort(403, _(u'Unauthorized to view followers %s') % u'')\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.group_dict = group_dict\n g.followers = followers\n\n extra_vars: dict[str, Any] = {\n u\"group_dict\": group_dict,\n u\"group_type\": group_type,\n u\"followers\": followers\n }\n return base.render(u'group/followers.html', extra_vars)\n\n\ndef admins(id: str, group_type: str, is_organization: bool) -> str:\n extra_vars = {}\n set_org(is_organization)\n group_dict = _get_group_dict(id, group_type)\n admins = authz.get_group_or_org_admin_ids(id)\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.group_dict = group_dict\n g.admins = admins\n\n extra_vars: dict[str, Any] = {\n u\"group_dict\": group_dict,\n u'group_type': group_type,\n u\"admins\": admins\n }\n\n return base.render(\n _get_group_template(u'admins_template', group_dict['type']),\n extra_vars)\n\n\nclass BulkProcessView(MethodView):\n u''' Bulk process view'''\n\n def _prepare(self, group_type: str, id: str) -> Context:\n\n # check we are org admin\n\n context = cast(Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name,\n u'schema': _db_to_form_schema(group_type=group_type),\n u'for_view': True,\n u'extras_as_string': True\n })\n\n try:\n check_access(u'bulk_update_public', context, {u'org_id': id})\n except NotAuthorized:\n base.abort(403, _(u'Unauthorized to access'))\n\n return context\n\n def get(self, id: str, group_type: str, is_organization: bool) -> str:\n extra_vars = {}\n set_org(is_organization)\n context = self._prepare(group_type, id)\n data_dict: dict[str, Any] = {u'id': id, u'type': group_type}\n data_dict['include_datasets'] = False\n try:\n group_dict = _action(u'group_show')(context, data_dict)\n group = context['group']\n except NotFound:\n base.abort(404, _(u'Group not found'))\n\n if not group_dict['is_organization']:\n # FIXME: better error\n raise Exception(u'Must be an organization')\n\n # If no action then just show the datasets\n limit = 500\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.group_dict = group_dict\n extra_vars = _read(id, limit, group_type)\n extra_vars['packages'] = g.page.items\n extra_vars['group_dict'] = group_dict\n extra_vars['group'] = group\n\n return base.render(\n _get_group_template(u'bulk_process_template', group_type),\n extra_vars)\n\n def post(\n self, id: str, group_type: str,\n is_organization: bool) -> Response:\n set_org(is_organization)\n context = self._prepare(group_type, id)\n data_dict: dict[str, Any] = {u'id': id, u'type': group_type}\n user = current_user.name\n try:\n # Do not query for the group datasets when dictizing, as they will\n # be ignored and get requested on the controller anyway\n data_dict['include_datasets'] = False\n group_dict = _action(u'group_show')(context, data_dict)\n except NotFound:\n group_label = h.humanize_entity_type(\n u'organization' if is_organization else u'group',\n group_type,\n u'default label') or _(\n u'Organization' if is_organization else u'Group')\n base.abort(404, _(u'{} not found'.format(group_label)))\n except NotAuthorized:\n base.abort(403,\n _(u'User %r not authorized to edit %s') % (user, id))\n\n if not group_dict['is_organization']:\n # FIXME: better error\n raise Exception(u'Must be an organization')\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.group_dict = group_dict\n\n # use different form names so that ie7 can be detected\n form_names = set([\n u\"bulk_action.public\",\n u\"bulk_action.delete\",\n u\"bulk_action.private\"\n ])\n actions_in_form: set[str] = set(request.form.keys())\n actions = form_names.intersection(actions_in_form)\n # ie7 puts all buttons in form params but puts submitted one twice\n\n form_dict: dict[str, str] = request.form.to_dict()\n for key, value in form_dict.items():\n if value in [u'private', u'public']:\n action = key.split(u'.')[-1]\n break\n else:\n # normal good browser form submission\n action = actions.pop().split(u'.')[-1]\n\n # process the action first find the datasets to perform the action on.\n # they are prefixed by dataset_ in the form data\n datasets = []\n for param in request.form:\n if param.startswith(u'dataset_'):\n datasets.append(param[8:])\n\n action_functions = {\n u'private': u'bulk_update_private',\n u'public': u'bulk_update_public',\n u'delete': u'bulk_update_delete',\n }\n\n data_dict = {u'datasets': datasets, u'org_id': group_dict['id']}\n\n try:\n get_action(action_functions[action])(context, data_dict)\n except NotAuthorized:\n base.abort(403, _(u'Not authorized to perform bulk update'))\n return h.redirect_to(u'{}.bulk_process'.format(group_type), id=id)\n\n\nclass CreateGroupView(MethodView):\n u'''Create group view '''\n\n def _prepare(self, data: Optional[dict[str, Any]] = None) -> Context:\n if data and u'type' in data:\n group_type = data['type']\n else:\n group_type = _guess_group_type()\n if data:\n data['type'] = group_type\n\n context = cast(Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name,\n u'save': u'save' in request.args,\n u'parent': request.args.get(u'parent', None),\n u'group_type': group_type\n })\n\n try:\n assert _check_access(u'group_create', context)\n except NotAuthorized:\n base.abort(403, _(u'Unauthorized to create a group'))\n\n return context\n\n def post(self, group_type: str,\n is_organization: bool) -> Union[Response, str]:\n set_org(is_organization)\n context = self._prepare()\n try:\n data_dict = clean_dict(\n dict_fns.unflatten(tuplize_dict(parse_params(request.form))))\n data_dict.update(clean_dict(\n dict_fns.unflatten(tuplize_dict(parse_params(request.files)))\n ))\n except dict_fns.DataError:\n base.abort(400, _(u'Integrity Error'))\n user = current_user.name\n data_dict['type'] = group_type or u'group'\n data_dict['users'] = [{u'name': user, u'capacity': u'admin'}]\n try:\n group = _action(u'group_create')(context, data_dict)\n except (NotFound, NotAuthorized):\n base.abort(404, _(u'Group not found'))\n except ValidationError as e:\n errors = e.error_dict\n error_summary = e.error_summary\n return self.get(group_type, is_organization,\n data_dict, errors, error_summary)\n\n return h.redirect_to(\n cast(str, group['type']) + u'.read', id=group['name'])\n\n def get(self,\n group_type: str,\n is_organization: bool,\n data: Optional[dict[str, Any]] = None,\n errors: Optional[dict[str, Any]] = None,\n error_summary: Optional[dict[str, Any]] = None) -> str:\n extra_vars = {}\n set_org(is_organization)\n context = self._prepare()\n data = data or clean_dict(\n dict_fns.unflatten(\n tuplize_dict(\n parse_params(request.args, ignore_keys=CACHE_PARAMETERS)\n )\n )\n )\n\n if not data.get(u'image_url', u'').startswith(u'http'):\n data.pop(u'image_url', None)\n errors = errors or {}\n error_summary = error_summary or {}\n extra_vars: dict[str, Any] = {\n u'data': data,\n u'errors': errors,\n u'error_summary': error_summary,\n u'action': u'new',\n u'group_type': group_type\n }\n _setup_template_variables(\n context, data, group_type=group_type)\n form = base.render(\n _get_group_template(u'group_form', group_type), extra_vars)\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.form = form\n\n extra_vars[\"form\"] = form\n return base.render(\n _get_group_template(u'new_template', group_type), extra_vars)\n\n\nclass EditGroupView(MethodView):\n u''' Edit group view'''\n\n def _prepare(self, id: Optional[str]) -> Context:\n data_dict: dict[str, Any] = {u'id': id, u'include_datasets': False}\n\n context = cast(Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name,\n u'save': u'save' in request.args,\n u'for_edit': True,\n u'parent': request.args.get(u'parent', None),\n u'id': id\n })\n\n try:\n _action(u'group_show')(context, data_dict)\n _check_access(u'group_update', context, {u'id': id})\n except NotAuthorized:\n base.abort(403, _(u'Unauthorized to create a group'))\n except NotFound:\n base.abort(404, _(u'Group not found'))\n\n return context\n\n def post(self,\n group_type: str,\n is_organization: bool,\n id: Optional[str] = None) -> Union[Response, str]:\n set_org(is_organization)\n context = self._prepare(id)\n try:\n data_dict = clean_dict(\n dict_fns.unflatten(tuplize_dict(parse_params(request.form))))\n data_dict.update(clean_dict(\n dict_fns.unflatten(tuplize_dict(parse_params(request.files)))\n ))\n except dict_fns.DataError:\n base.abort(400, _(u'Integrity Error'))\n data_dict['id'] = context['id']\n context['allow_partial_update'] = True\n try:\n group = _action(u'group_update')(context, data_dict)\n if id != group['name']:\n _force_reindex(group)\n except (NotFound, NotAuthorized):\n base.abort(404, _(u'Group not found'))\n except ValidationError as e:\n errors = e.error_dict\n error_summary = e.error_summary\n assert id\n return self.get(id, group_type, is_organization,\n data_dict, errors, error_summary)\n return h.redirect_to(\n cast(str, group[u'type']) + u'.read', id=group[u'name'])\n\n def get(self,\n id: str,\n group_type: str,\n is_organization: bool,\n data: Optional[dict[str, Any]] = None,\n errors: Optional[dict[str, Any]] = None,\n error_summary: Optional[dict[str, Any]] = None) -> str:\n extra_vars = {}\n set_org(is_organization)\n context = self._prepare(id)\n data_dict: dict[str, Any] = {u'id': id, u'include_datasets': False}\n try:\n group_dict = _action(u'group_show')(context, data_dict)\n except (NotFound, NotAuthorized):\n base.abort(404, _(u'Group not found'))\n data = data or group_dict\n assert data is not None\n errors = errors or {}\n extra_vars: dict[str, Any] = {\n u'data': data,\n u\"group_dict\": group_dict,\n u'errors': errors,\n u'error_summary': error_summary,\n u'action': u'edit',\n u'group_type': group_type\n }\n\n _setup_template_variables(context, data, group_type=group_type)\n form = base.render(\n _get_group_template(u'group_form', group_type), extra_vars)\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.grouptitle = group_dict.get(u'title')\n g.groupname = group_dict.get(u'name')\n g.data = data\n g.group_dict = group_dict\n\n extra_vars[\"form\"] = form\n return base.render(\n _get_group_template(u'edit_template', group_type), extra_vars)\n\n\nclass DeleteGroupView(MethodView):\n u'''Delete group view '''\n\n def _prepare(self, id: Optional[str] = None) -> Context:\n context = cast(Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name,\n })\n try:\n assert _check_access(u'group_delete', context, {u'id': id})\n except NotAuthorized:\n base.abort(403, _(u'Unauthorized to delete group %s') % u'')\n return context\n\n def post(self,\n group_type: str,\n is_organization: bool,\n id: Optional[str] = None) -> Response:\n set_org(is_organization)\n context = self._prepare(id)\n try:\n _action(u'group_delete')(context, {u'id': id})\n group_label = h.humanize_entity_type(\n u'group',\n group_type,\n u'has been deleted') or _(u'Group')\n h.flash_notice(\n _(u'%s has been deleted.') % _(group_label))\n except NotAuthorized:\n base.abort(403, _(u'Unauthorized to delete group %s') % u'')\n except NotFound:\n base.abort(404, _(u'Group not found'))\n except ValidationError as e:\n base.abort(403, _(e.error_dict['message']))\n\n return h.redirect_to(u'{}.index'.format(group_type))\n\n def get(self,\n group_type: str,\n is_organization: bool,\n id: Optional[str] = None) -> Union[str, Response]:\n set_org(is_organization)\n context = self._prepare(id)\n group_dict = _action(u'group_show')(context, {u'id': id})\n if u'cancel' in request.args:\n return h.redirect_to(u'{}.edit'.format(group_type), id=id)\n\n # TODO: Remove\n g.group_dict = group_dict\n extra_vars: dict[str, Any] = {\n u\"group_dict\": group_dict,\n u\"group_type\": group_type\n }\n return base.render(_replace_group_org(u'group/confirm_delete.html'),\n extra_vars)\n\n\nclass MembersGroupView(MethodView):\n u'''New members group view'''\n\n def _prepare(self, id: Optional[str] = None) -> Context:\n context = cast(Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name\n })\n try:\n assert _check_access(u'group_member_create', context, {u'id': id})\n except NotAuthorized:\n base.abort(403,\n _(u'Unauthorized to create group %s members') % u'')\n\n return context\n\n def post(self,\n group_type: str,\n is_organization: bool,\n id: Optional[str] = None) -> Response:\n set_org(is_organization)\n context = self._prepare(id)\n data_dict = clean_dict(\n dict_fns.unflatten(tuplize_dict(parse_params(request.form))))\n data_dict['id'] = id\n\n email = data_dict.get(u'email')\n\n if email:\n user_data_dict: dict[str, Any] = {\n u'email': email,\n u'group_id': data_dict['id'],\n u'role': data_dict['role']\n }\n del data_dict['email']\n\n try:\n user_dict = _action(u'user_invite')(context, user_data_dict)\n except ValidationError as e:\n for error in e.error_summary.values():\n h.flash_error(error)\n return h.redirect_to(\n u'{}.member_new'.format(group_type), id=id)\n\n data_dict['username'] = user_dict['name']\n\n try:\n group_dict = _action(u'group_member_create')(context, data_dict)\n except NotAuthorized:\n base.abort(403, _(u'Unauthorized to add member to group %s') % u'')\n except NotFound:\n base.abort(404, _(u'Group not found'))\n except ValidationError as e:\n for error in e.error_summary.values():\n h.flash_error(error)\n return h.redirect_to(u'{}.member_new'.format(group_type), id=id)\n\n # TODO: Remove\n g.group_dict = group_dict\n\n return h.redirect_to(u'{}.members'.format(group_type), id=id)\n\n def get(self,\n group_type: str,\n is_organization: bool,\n id: Optional[str] = None) -> str:\n extra_vars: dict[str, Any] = {}\n set_org(is_organization)\n context = self._prepare(id)\n user = request.args.get(u'user')\n data_dict: dict[str, Any] = {u'id': id}\n data_dict['include_datasets'] = False\n group_dict = _action(u'group_show')(context, data_dict)\n roles = _action(u'member_roles_list')(context, {\n u'group_type': group_type\n })\n user_dict = {}\n if user:\n user_dict = get_action(u'user_show')(context, {u'id': user})\n user_role =\\\n authz.users_role_for_group_or_org(id, user) or u'member'\n # TODO: Remove\n g.user_dict = user_dict\n extra_vars[\"user_dict\"] = user_dict\n else:\n user_role = u'member'\n\n # TODO: Remove\n g.group_dict = group_dict\n g.roles = roles\n g.user_role = user_role\n\n extra_vars.update({\n u\"group_dict\": group_dict,\n u\"roles\": roles,\n u\"user_role\": user_role,\n u\"group_type\": group_type,\n u\"user_dict\": user_dict\n })\n return base.render(_replace_group_org(u'group/member_new.html'),\n extra_vars)\n\n\ngroup = Blueprint(u'group', __name__, url_prefix=u'/group',\n url_defaults={u'group_type': u'group',\n u'is_organization': False})\norganization = Blueprint(u'organization', __name__,\n url_prefix=u'/organization',\n url_defaults={u'group_type': u'organization',\n u'is_organization': True})\n\n\ndef register_group_plugin_rules(blueprint: Blueprint) -> None:\n actions = [\n u'member_delete', u'followers', u'follow',\n u'unfollow', u'admins',\n ]\n blueprint.add_url_rule(u'/', view_func=index, strict_slashes=False)\n blueprint.add_url_rule(\n u'/new',\n methods=[u'GET', u'POST'],\n view_func=CreateGroupView.as_view(str(u'new')))\n blueprint.add_url_rule(u'/<id>', methods=[u'GET'], view_func=read)\n blueprint.add_url_rule(\n u'/edit/<id>', view_func=EditGroupView.as_view(str(u'edit')))\n blueprint.add_url_rule(u'/about/<id>', methods=[u'GET'], view_func=about)\n blueprint.add_url_rule(\n u'/members/<id>', methods=[u'GET', u'POST'], view_func=members)\n blueprint.add_url_rule(\n u'/member_new/<id>',\n view_func=MembersGroupView.as_view(str(u'member_new')))\n blueprint.add_url_rule(\n u'/bulk_process/<id>',\n view_func=BulkProcessView.as_view(str(u'bulk_process')))\n blueprint.add_url_rule(\n u'/delete/<id>',\n methods=[u'GET', u'POST'],\n view_func=DeleteGroupView.as_view(str(u'delete')))\n for action in actions:\n blueprint.add_url_rule(\n u'/{0}/<id>'.format(action),\n methods=[u'GET', u'POST'],\n view_func=globals()[action])\n\n\nregister_group_plugin_rules(group)\nregister_group_plugin_rules(organization)\n", "path": "ckan/views/group.py" } ]
[ { "content": "# encoding: utf-8\nfrom __future__ import annotations\n\nimport logging\nimport re\nfrom collections import OrderedDict\nfrom typing import Any, Optional, Union, cast\nfrom typing_extensions import Literal\n\nfrom urllib.parse import urlencode\n\nimport ckan.lib.base as base\nimport ckan.lib.helpers as h\nimport ckan.lib.navl.dictization_functions as dict_fns\nimport ckan.logic as logic\nimport ckan.lib.search as search\nimport ckan.model as model\nimport ckan.authz as authz\nimport ckan.lib.plugins as lib_plugins\nimport ckan.plugins as plugins\nfrom ckan.common import g, config, request, current_user, _\nfrom ckan.views.home import CACHE_PARAMETERS\nfrom ckan.views.dataset import _get_search_details\n\nfrom flask import Blueprint\nfrom flask.views import MethodView\nfrom flask.wrappers import Response\nfrom ckan.types import Action, Context, DataDict, Schema\n\n\nNotFound = logic.NotFound\nNotAuthorized = logic.NotAuthorized\nValidationError = logic.ValidationError\ncheck_access = logic.check_access\nget_action = logic.get_action\ntuplize_dict = logic.tuplize_dict\nclean_dict = logic.clean_dict\nparse_params = logic.parse_params\n\nlog = logging.getLogger(__name__)\n\nlookup_group_plugin = lib_plugins.lookup_group_plugin\nlookup_group_controller = lib_plugins.lookup_group_controller\n\nis_org = False\n\n\ndef _get_group_template(template_type: str,\n group_type: Optional[str] = None) -> str:\n group_plugin = lookup_group_plugin(group_type)\n method = getattr(group_plugin, template_type)\n try:\n return method(group_type)\n except TypeError as err:\n if u'takes 1' not in str(err) and u'takes exactly 1' not in str(err):\n raise\n return method()\n\n\ndef _db_to_form_schema(group_type: Optional[str] = None) -> Schema:\n u'''This is an interface to manipulate data from the database\n into a format suitable for the form (optional)'''\n return lookup_group_plugin(group_type).db_to_form_schema()\n\n\ndef _setup_template_variables(context: Context,\n data_dict: DataDict,\n group_type: Optional[str] = None) -> None:\n if u'type' not in data_dict:\n data_dict[u'type'] = group_type\n return lookup_group_plugin(group_type).\\\n setup_template_variables(context, data_dict)\n\n\ndef _replace_group_org(string: str) -> str:\n u''' substitute organization for group if this is an org'''\n if is_org:\n return re.sub(u'^group', u'organization', string)\n return string\n\n\ndef _action(action_name: str) -> Action:\n u''' select the correct group/org action '''\n return get_action(_replace_group_org(action_name))\n\n\ndef _check_access(action_name: str, *args: Any, **kw: Any) -> Literal[True]:\n u''' select the correct group/org check_access '''\n return check_access(_replace_group_org(action_name), *args, **kw)\n\n\ndef _force_reindex(grp: dict[str, Any]) -> None:\n u''' When the group name has changed, we need to force a reindex\n of the datasets within the group, otherwise they will stop\n appearing on the read page for the group (as they're connected via\n the group name)'''\n group = model.Group.get(grp['name'])\n assert group\n for dataset in group.packages():\n search.rebuild(dataset.name)\n\n\ndef _guess_group_type(expecting_name: bool = False) -> str:\n u\"\"\"\n Guess the type of group from the URL.\n * The default url '/group/xyz' returns None\n * group_type is unicode\n * this handles the case where there is a prefix on the URL\n (such as /data/organization)\n \"\"\"\n parts: list[str] = request.path.split(u'/')\n parts = [x for x in parts if x]\n\n idx = 0\n if expecting_name:\n idx = -1\n\n gt = parts[idx]\n\n return gt\n\n\ndef set_org(is_organization: bool) -> None:\n global is_org\n is_org = is_organization\n\n\ndef index(group_type: str, is_organization: bool) -> str:\n extra_vars: dict[str, Any] = {}\n set_org(is_organization)\n page = h.get_page_number(request.args) or 1\n items_per_page = config.get_value('ckan.datasets_per_page')\n\n context = cast(Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name,\n u'for_view': True,\n u'with_private': False\n })\n\n try:\n assert _check_access(u'site_read', context)\n assert _check_access(u'group_list', context)\n except NotAuthorized:\n base.abort(403, _(u'Not authorized to see this page'))\n\n q = request.args.get(u'q', u'')\n sort_by = request.args.get(u'sort')\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.q = q\n g.sort_by_selected = sort_by\n\n extra_vars[\"q\"] = q\n extra_vars[\"sort_by_selected\"] = sort_by\n\n # pass user info to context as needed to view private datasets of\n # orgs correctly\n if current_user.is_authenticated:\n context['user_id'] = current_user.id # type: ignore\n context['user_is_admin'] = current_user.sysadmin # type: ignore\n\n try:\n data_dict_global_results: dict[str, Any] = {\n u'all_fields': False,\n u'q': q,\n u'sort': sort_by,\n u'type': group_type or u'group',\n }\n global_results = _action(u'group_list')(context,\n data_dict_global_results)\n except ValidationError as e:\n if e.error_dict and e.error_dict.get(u'message'):\n msg: Any = e.error_dict['message']\n else:\n msg = str(e)\n h.flash_error(msg)\n extra_vars[\"page\"] = h.Page([], 0)\n extra_vars[\"group_type\"] = group_type\n return base.render(\n _get_group_template(u'index_template', group_type), extra_vars)\n\n data_dict_page_results: dict[str, Any] = {\n u'all_fields': True,\n u'q': q,\n u'sort': sort_by,\n u'type': group_type or u'group',\n u'limit': items_per_page,\n u'offset': items_per_page * (page - 1),\n u'include_extras': True\n }\n page_results = _action(u'group_list')(context, data_dict_page_results)\n\n extra_vars[\"page\"] = h.Page(\n collection=global_results,\n page=page,\n url=h.pager_url,\n items_per_page=items_per_page, )\n\n extra_vars[\"page\"].items = page_results\n extra_vars[\"group_type\"] = group_type\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.page = extra_vars[\"page\"]\n return base.render(\n _get_group_template(u'index_template', group_type), extra_vars)\n\n\ndef _read(id: Optional[str], limit: int, group_type: str) -> dict[str, Any]:\n u''' This is common code used by both read and bulk_process'''\n extra_vars: dict[str, Any] = {}\n context = cast(Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name,\n u'schema': _db_to_form_schema(group_type=group_type),\n u'for_view': True,\n u'extras_as_string': True\n })\n\n q = request.args.get(u'q', u'')\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.q = q\n\n # Search within group\n if g.group_dict.get(u'is_organization'):\n fq = u' owner_org:\"%s\"' % g.group_dict.get(u'id')\n else:\n fq = u' groups:\"%s\"' % g.group_dict.get(u'name')\n\n extra_vars[\"q\"] = q\n\n g.description_formatted = \\\n h.render_markdown(g.group_dict.get(u'description'))\n\n context['return_query'] = True\n\n page = h.get_page_number(request.args)\n\n # most search operations should reset the page counter:\n params_nopage = [(k, v) for k, v in request.args.items(multi=True)\n if k != u'page']\n sort_by = request.args.get(u'sort', None)\n\n def search_url(params: Any) -> str:\n action = u'bulk_process' if getattr(\n g, u'action', u'') == u'bulk_process' else u'read'\n url = h.url_for(u'.'.join([group_type, action]), id=id)\n params = [(k, v.encode(u'utf-8')\n if isinstance(v, str) else str(v))\n for k, v in params]\n return url + u'?' + urlencode(params)\n\n def remove_field(\n key: str, value: Optional[str] = None,\n replace: Optional[str] = None):\n controller = lookup_group_controller(group_type)\n return h.remove_url_param(\n key,\n value=value,\n replace=replace,\n controller=controller,\n action=u'read',\n extras=dict(id=g.group_dict.get(u'name')))\n\n extra_vars[\"remove_field\"] = remove_field\n\n def pager_url(q: Any = None, page: Optional[int] = None):\n params: list[tuple[str, Any]] = list(params_nopage)\n params.append((u'page', page))\n return search_url(params)\n\n details = _get_search_details()\n extra_vars[u'fields'] = details[u'fields']\n extra_vars[u'fields_grouped'] = details[u'fields_grouped']\n fq += details[u'fq']\n search_extras = details[u'search_extras']\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.fields = extra_vars[u'fields']\n g.fields_grouped = extra_vars[u'fields_grouped']\n\n facets: \"OrderedDict[str, str]\" = OrderedDict()\n\n org_label = h.humanize_entity_type(\n u'organization',\n h.default_group_type(u'organization'),\n u'facet label') or _(u'Organizations')\n\n group_label = h.humanize_entity_type(\n u'group',\n h.default_group_type(u'group'),\n u'facet label') or _(u'Groups')\n\n default_facet_titles = {\n u'organization': org_label,\n u'groups': group_label,\n u'tags': _(u'Tags'),\n u'res_format': _(u'Formats'),\n u'license_id': _(u'Licenses')\n }\n\n for facet in h.facets():\n if facet in default_facet_titles:\n facets[facet] = default_facet_titles[facet]\n else:\n facets[facet] = facet\n\n # Facet titles\n facets = _update_facet_titles(facets, group_type)\n\n extra_vars[\"facet_titles\"] = facets\n\n data_dict: dict[str, Any] = {\n u'q': q,\n u'fq': fq,\n u'include_private': True,\n u'facet.field': list(facets.keys()),\n u'rows': limit,\n u'sort': sort_by,\n u'start': (page - 1) * limit,\n u'extras': search_extras\n }\n\n context_ = cast(\n Context, dict((k, v) for (k, v) in context.items() if k != u'schema')\n )\n try:\n query = get_action(u'package_search')(context_, data_dict)\n except search.SearchError as se:\n log.error(u'Group search error: %r', se.args)\n extra_vars[\"query_error\"] = True\n extra_vars[\"page\"] = h.Page(collection=[])\n else:\n extra_vars[\"page\"] = h.Page(\n collection=query['results'],\n page=page,\n url=pager_url,\n item_count=query['count'],\n items_per_page=limit)\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.group_dict['package_count'] = query['count']\n\n extra_vars[\"search_facets\"] = query['search_facets']\n extra_vars[\"search_facets_limits\"] = g.search_facets_limits = {}\n default_limit: int = config.get_value(u'search.facets.default')\n for facet in extra_vars[\"search_facets\"].keys():\n limit = int(request.args.get(u'_%s_limit' % facet, default_limit))\n g.search_facets_limits[facet] = limit\n extra_vars[\"page\"].items = query['results']\n\n extra_vars[\"sort_by_selected\"] = sort_by\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.facet_titles = facets\n g.page = extra_vars[\"page\"]\n\n extra_vars[\"group_type\"] = group_type\n _setup_template_variables(context, {u'id': id}, group_type=group_type)\n return extra_vars\n\n\ndef _update_facet_titles(\n facets: 'OrderedDict[str, str]',\n group_type: str) -> 'OrderedDict[str, str]':\n for plugin in plugins.PluginImplementations(plugins.IFacets):\n facets = (\n plugin.group_facets(facets, group_type, None)\n if group_type == \"group\"\n else plugin.organization_facets(facets, group_type, None)\n )\n return facets\n\n\ndef _get_group_dict(id: str, group_type: str) -> dict[str, Any]:\n u''' returns the result of group_show action or aborts if there is a\n problem '''\n context = cast(Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name,\n u'for_view': True\n })\n try:\n return _action(u'group_show')(context, {\n u'id': id,\n u'include_datasets': False\n })\n except (NotFound, NotAuthorized):\n base.abort(404, _(u'Group not found'))\n\n\ndef read(group_type: str,\n is_organization: bool,\n id: Optional[str] = None,\n limit: int = 20) -> Union[str, Response]:\n extra_vars = {}\n set_org(is_organization)\n context = cast(Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name,\n u'schema': _db_to_form_schema(group_type=group_type),\n u'for_view': True\n })\n data_dict: dict[str, Any] = {u'id': id, u'type': group_type}\n\n # unicode format (decoded from utf8)\n q = request.args.get(u'q', u'')\n\n extra_vars[\"q\"] = q\n\n limit = config.get(u'ckan.datasets_per_page', limit)\n\n try:\n # Do not query for the group datasets when dictizing, as they will\n # be ignored and get requested on the controller anyway\n data_dict['include_datasets'] = False\n\n # Do not query group members as they aren't used in the view\n data_dict['include_users'] = False\n\n group_dict = _action(u'group_show')(context, data_dict)\n except (NotFound, NotAuthorized):\n base.abort(404, _(u'Group not found'))\n\n # if the user specified a group id, redirect to the group name\n if data_dict['id'] == group_dict['id'] and \\\n data_dict['id'] != group_dict['name']:\n\n url_with_name = h.url_for(u'{}.read'.format(group_type),\n id=group_dict['name'])\n\n return h.redirect_to(\n h.add_url_param(alternative_url=url_with_name))\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.q = q\n g.group_dict = group_dict\n\n extra_vars = _read(id, limit, group_type)\n\n extra_vars[\"group_type\"] = group_type\n extra_vars[\"group_dict\"] = group_dict\n\n return base.render(\n _get_group_template(u'read_template', cast(str, g.group_dict['type'])),\n extra_vars)\n\n\ndef about(id: str, group_type: str, is_organization: bool) -> str:\n extra_vars = {}\n set_org(is_organization)\n context = cast(\n Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name\n }\n )\n group_dict = _get_group_dict(id, group_type)\n group_type = group_dict['type']\n _setup_template_variables(context, {u'id': id}, group_type=group_type)\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.group_dict = group_dict\n g.group_type = group_type\n\n extra_vars: dict[str, Any] = {u\"group_dict\": group_dict,\n u\"group_type\": group_type}\n\n return base.render(\n _get_group_template(u'about_template', group_type), extra_vars)\n\n\ndef members(id: str, group_type: str, is_organization: bool) -> str:\n extra_vars = {}\n set_org(is_organization)\n context = cast(\n Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name\n }\n )\n\n try:\n data_dict: dict[str, Any] = {u'id': id}\n assert check_access(u'group_edit_permissions', context, data_dict)\n members = get_action(u'member_list')(context, {\n u'id': id,\n u'object_type': u'user'\n })\n data_dict['include_datasets'] = False\n group_dict = _action(u'group_show')(context, data_dict)\n except NotFound:\n base.abort(404, _(u'Group not found'))\n except NotAuthorized:\n base.abort(403,\n _(u'User %r not authorized to edit members of %s') %\n (current_user.name, id))\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.members = members\n g.group_dict = group_dict\n\n extra_vars: dict[str, Any] = {\n u\"members\": members,\n u\"group_dict\": group_dict,\n u\"group_type\": group_type\n }\n return base.render(_replace_group_org(u'group/members.html'), extra_vars)\n\n\ndef member_delete(id: str, group_type: str,\n is_organization: bool) -> Union[Response, str]:\n extra_vars = {}\n set_org(is_organization)\n if u'cancel' in request.args:\n return h.redirect_to(u'{}.members'.format(group_type), id=id)\n\n context = cast(\n Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name\n }\n )\n try:\n assert _check_access(u'group_member_delete', context, {u'id': id})\n except NotAuthorized:\n base.abort(403, _(u'Unauthorized to delete group %s members') % u'')\n\n try:\n user_id = request.args.get(u'user')\n if request.method == u'POST':\n _action(u'group_member_delete')(context, {\n u'id': id,\n u'user_id': user_id\n })\n h.flash_notice(_(u'Group member has been deleted.'))\n return h.redirect_to(u'{}.members'.format(group_type), id=id)\n user_dict = _action(u'user_show')(context, {u'id': user_id})\n\n except NotAuthorized:\n base.abort(403, _(u'Unauthorized to delete group %s members') % u'')\n except NotFound:\n base.abort(404, _(u'Group not found'))\n extra_vars: dict[str, Any] = {\n u\"user_id\": user_id,\n u\"user_dict\": user_dict,\n u\"group_id\": id,\n u\"group_type\": group_type\n }\n return base.render(_replace_group_org(u'group/confirm_delete_member.html'),\n extra_vars)\n\n\ndef follow(id: str, group_type: str, is_organization: bool) -> Response:\n u'''Start following this group.'''\n set_org(is_organization)\n context = cast(\n Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name\n }\n )\n data_dict = {u'id': id}\n try:\n get_action(u'follow_group')(context, data_dict)\n group_dict = get_action(u'group_show')(context, data_dict)\n h.flash_success(\n _(u\"You are now following {0}\").format(group_dict['title']))\n\n id = group_dict['name']\n except ValidationError as e:\n error_message = (e.message or e.error_summary or e.error_dict)\n h.flash_error(error_message)\n except NotAuthorized as e:\n h.flash_error(e.message)\n return h.redirect_to(u'group.read', id=id)\n\n\ndef unfollow(id: str, group_type: str, is_organization: bool) -> Response:\n u'''Stop following this group.'''\n set_org(is_organization)\n context = cast(\n Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name\n }\n )\n data_dict = {u'id': id}\n try:\n get_action(u'unfollow_group')(context, data_dict)\n group_dict = get_action(u'group_show')(context, data_dict)\n h.flash_success(\n _(u\"You are no longer following {0}\").format(group_dict['title']))\n id = group_dict['name']\n except ValidationError as e:\n error_message = (e.message or e.error_summary or e.error_dict)\n h.flash_error(error_message)\n except NotFound as e:\n error_message = e.message or ''\n base.abort(404, _(error_message))\n except NotAuthorized as e:\n error_message = e.message or ''\n base.abort(403, _(error_message))\n return h.redirect_to(u'group.read', id=id)\n\n\ndef followers(id: str, group_type: str, is_organization: bool) -> str:\n extra_vars = {}\n set_org(is_organization)\n context = cast(\n Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name\n }\n )\n group_dict = _get_group_dict(id, group_type)\n try:\n followers = \\\n get_action(u'group_follower_list')(context, {u'id': id})\n except NotAuthorized:\n base.abort(403, _(u'Unauthorized to view followers %s') % u'')\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.group_dict = group_dict\n g.followers = followers\n\n extra_vars: dict[str, Any] = {\n u\"group_dict\": group_dict,\n u\"group_type\": group_type,\n u\"followers\": followers\n }\n return base.render(u'group/followers.html', extra_vars)\n\n\ndef admins(id: str, group_type: str, is_organization: bool) -> str:\n extra_vars = {}\n set_org(is_organization)\n group_dict = _get_group_dict(id, group_type)\n admins = authz.get_group_or_org_admin_ids(id)\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.group_dict = group_dict\n g.admins = admins\n\n extra_vars: dict[str, Any] = {\n u\"group_dict\": group_dict,\n u'group_type': group_type,\n u\"admins\": admins\n }\n\n return base.render(\n _get_group_template(u'admins_template', group_dict['type']),\n extra_vars)\n\n\nclass BulkProcessView(MethodView):\n u''' Bulk process view'''\n\n def _prepare(self, group_type: str, id: str) -> Context:\n\n # check we are org admin\n\n context = cast(Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name,\n u'schema': _db_to_form_schema(group_type=group_type),\n u'for_view': True,\n u'extras_as_string': True\n })\n\n try:\n check_access(u'bulk_update_public', context, {u'org_id': id})\n except NotAuthorized:\n base.abort(403, _(u'Unauthorized to access'))\n\n return context\n\n def get(self, id: str, group_type: str, is_organization: bool) -> str:\n extra_vars = {}\n set_org(is_organization)\n context = self._prepare(group_type, id)\n data_dict: dict[str, Any] = {u'id': id, u'type': group_type}\n data_dict['include_datasets'] = False\n try:\n group_dict = _action(u'group_show')(context, data_dict)\n group = context['group']\n except NotFound:\n base.abort(404, _(u'Group not found'))\n\n if not group_dict['is_organization']:\n # FIXME: better error\n raise Exception(u'Must be an organization')\n\n # If no action then just show the datasets\n limit = 500\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.group_dict = group_dict\n extra_vars = _read(id, limit, group_type)\n extra_vars['packages'] = g.page.items\n extra_vars['group_dict'] = group_dict\n extra_vars['group'] = group\n\n return base.render(\n _get_group_template(u'bulk_process_template', group_type),\n extra_vars)\n\n def post(\n self, id: str, group_type: str,\n is_organization: bool) -> Response:\n set_org(is_organization)\n context = self._prepare(group_type, id)\n data_dict: dict[str, Any] = {u'id': id, u'type': group_type}\n user = current_user.name\n try:\n # Do not query for the group datasets when dictizing, as they will\n # be ignored and get requested on the controller anyway\n data_dict['include_datasets'] = False\n group_dict = _action(u'group_show')(context, data_dict)\n except NotFound:\n group_label = h.humanize_entity_type(\n u'organization' if is_organization else u'group',\n group_type,\n u'default label') or _(\n u'Organization' if is_organization else u'Group')\n base.abort(404, _(u'{} not found'.format(group_label)))\n except NotAuthorized:\n base.abort(403,\n _(u'User %r not authorized to edit %s') % (user, id))\n\n if not group_dict['is_organization']:\n # FIXME: better error\n raise Exception(u'Must be an organization')\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.group_dict = group_dict\n\n # use different form names so that ie7 can be detected\n form_names = set([\n u\"bulk_action.public\",\n u\"bulk_action.delete\",\n u\"bulk_action.private\"\n ])\n actions_in_form: set[str] = set(request.form.keys())\n actions = form_names.intersection(actions_in_form)\n # ie7 puts all buttons in form params but puts submitted one twice\n\n form_dict: dict[str, str] = request.form.to_dict()\n for key, value in form_dict.items():\n if value in [u'private', u'public']:\n action = key.split(u'.')[-1]\n break\n else:\n # normal good browser form submission\n action = actions.pop().split(u'.')[-1]\n\n # process the action first find the datasets to perform the action on.\n # they are prefixed by dataset_ in the form data\n datasets = []\n for param in request.form:\n if param.startswith(u'dataset_'):\n datasets.append(param[8:])\n\n action_functions = {\n u'private': u'bulk_update_private',\n u'public': u'bulk_update_public',\n u'delete': u'bulk_update_delete',\n }\n\n data_dict = {u'datasets': datasets, u'org_id': group_dict['id']}\n\n try:\n get_action(action_functions[action])(context, data_dict)\n except NotAuthorized:\n base.abort(403, _(u'Not authorized to perform bulk update'))\n return h.redirect_to(u'{}.bulk_process'.format(group_type), id=id)\n\n\nclass CreateGroupView(MethodView):\n u'''Create group view '''\n\n def _prepare(self, data: Optional[dict[str, Any]] = None) -> Context:\n if data and u'type' in data:\n group_type = data['type']\n else:\n group_type = _guess_group_type()\n if data:\n data['type'] = group_type\n\n context = cast(Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name,\n u'save': u'save' in request.args,\n u'parent': request.args.get(u'parent', None),\n u'group_type': group_type\n })\n\n try:\n assert _check_access(u'group_create', context)\n except NotAuthorized:\n base.abort(403, _(u'Unauthorized to create a group'))\n\n return context\n\n def post(self, group_type: str,\n is_organization: bool) -> Union[Response, str]:\n set_org(is_organization)\n context = self._prepare()\n try:\n data_dict = clean_dict(\n dict_fns.unflatten(tuplize_dict(parse_params(request.form))))\n data_dict.update(clean_dict(\n dict_fns.unflatten(tuplize_dict(parse_params(request.files)))\n ))\n except dict_fns.DataError:\n base.abort(400, _(u'Integrity Error'))\n user = current_user.name\n data_dict['type'] = group_type or u'group'\n data_dict['users'] = [{u'name': user, u'capacity': u'admin'}]\n try:\n group = _action(u'group_create')(context, data_dict)\n except (NotFound, NotAuthorized):\n base.abort(404, _(u'Group not found'))\n except ValidationError as e:\n errors = e.error_dict\n error_summary = e.error_summary\n return self.get(group_type, is_organization,\n data_dict, errors, error_summary)\n\n return h.redirect_to(\n cast(str, group['type']) + u'.read', id=group['name'])\n\n def get(self,\n group_type: str,\n is_organization: bool,\n data: Optional[dict[str, Any]] = None,\n errors: Optional[dict[str, Any]] = None,\n error_summary: Optional[dict[str, Any]] = None) -> str:\n extra_vars = {}\n set_org(is_organization)\n context = self._prepare()\n data = data or clean_dict(\n dict_fns.unflatten(\n tuplize_dict(\n parse_params(request.args, ignore_keys=CACHE_PARAMETERS)\n )\n )\n )\n\n if not data.get(u'image_url', u'').startswith(u'http'):\n data.pop(u'image_url', None)\n errors = errors or {}\n error_summary = error_summary or {}\n extra_vars: dict[str, Any] = {\n u'data': data,\n u'errors': errors,\n u'error_summary': error_summary,\n u'action': u'new',\n u'group_type': group_type\n }\n _setup_template_variables(\n context, data, group_type=group_type)\n form = base.render(\n _get_group_template(u'group_form', group_type), extra_vars)\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.form = form\n\n extra_vars[\"form\"] = form\n return base.render(\n _get_group_template(u'new_template', group_type), extra_vars)\n\n\nclass EditGroupView(MethodView):\n u''' Edit group view'''\n\n def _prepare(self, id: Optional[str]) -> Context:\n data_dict: dict[str, Any] = {u'id': id, u'include_datasets': False}\n\n context = cast(Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name,\n u'save': u'save' in request.args,\n u'for_edit': True,\n u'parent': request.args.get(u'parent', None),\n u'id': id\n })\n\n try:\n _action(u'group_show')(context, data_dict)\n _check_access(u'group_update', context, {u'id': id})\n except NotAuthorized:\n base.abort(403, _(u'Unauthorized to create a group'))\n except NotFound:\n base.abort(404, _(u'Group not found'))\n\n return context\n\n def post(self,\n group_type: str,\n is_organization: bool,\n id: Optional[str] = None) -> Union[Response, str]:\n set_org(is_organization)\n context = self._prepare(id)\n try:\n data_dict = clean_dict(\n dict_fns.unflatten(tuplize_dict(parse_params(request.form))))\n data_dict.update(clean_dict(\n dict_fns.unflatten(tuplize_dict(parse_params(request.files)))\n ))\n except dict_fns.DataError:\n base.abort(400, _(u'Integrity Error'))\n data_dict['id'] = context['id']\n context['allow_partial_update'] = True\n try:\n group = _action(u'group_update')(context, data_dict)\n if id != group['name']:\n _force_reindex(group)\n except (NotFound, NotAuthorized):\n base.abort(404, _(u'Group not found'))\n except ValidationError as e:\n errors = e.error_dict\n error_summary = e.error_summary\n assert id\n return self.get(id, group_type, is_organization,\n data_dict, errors, error_summary)\n return h.redirect_to(\n cast(str, group[u'type']) + u'.read', id=group[u'name'])\n\n def get(self,\n id: str,\n group_type: str,\n is_organization: bool,\n data: Optional[dict[str, Any]] = None,\n errors: Optional[dict[str, Any]] = None,\n error_summary: Optional[dict[str, Any]] = None) -> str:\n extra_vars = {}\n set_org(is_organization)\n context = self._prepare(id)\n data_dict: dict[str, Any] = {u'id': id, u'include_datasets': False}\n try:\n group_dict = _action(u'group_show')(context, data_dict)\n except (NotFound, NotAuthorized):\n base.abort(404, _(u'Group not found'))\n data = data or group_dict\n assert data is not None\n errors = errors or {}\n extra_vars: dict[str, Any] = {\n u'data': data,\n u\"group_dict\": group_dict,\n u'errors': errors,\n u'error_summary': error_summary,\n u'action': u'edit',\n u'group_type': group_type\n }\n\n _setup_template_variables(context, data, group_type=group_type)\n form = base.render(\n _get_group_template(u'group_form', group_type), extra_vars)\n\n # TODO: Remove\n # ckan 2.9: Adding variables that were removed from c object for\n # compatibility with templates in existing extensions\n g.grouptitle = group_dict.get(u'title')\n g.groupname = group_dict.get(u'name')\n g.data = data\n g.group_dict = group_dict\n\n extra_vars[\"form\"] = form\n return base.render(\n _get_group_template(u'edit_template', group_type), extra_vars)\n\n\nclass DeleteGroupView(MethodView):\n u'''Delete group view '''\n\n def _prepare(self, id: Optional[str] = None) -> Context:\n context = cast(Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name,\n })\n try:\n assert _check_access(u'group_delete', context, {u'id': id})\n except NotAuthorized:\n base.abort(403, _(u'Unauthorized to delete group %s') % u'')\n return context\n\n def post(self,\n group_type: str,\n is_organization: bool,\n id: Optional[str] = None) -> Response:\n set_org(is_organization)\n context = self._prepare(id)\n try:\n _action(u'group_delete')(context, {u'id': id})\n group_label = h.humanize_entity_type(\n u'group',\n group_type,\n u'has been deleted') or _(u'Group')\n h.flash_notice(\n _(u'%s has been deleted.') % _(group_label))\n except NotAuthorized:\n base.abort(403, _(u'Unauthorized to delete group %s') % u'')\n except NotFound:\n base.abort(404, _(u'Group not found'))\n except ValidationError as e:\n base.abort(403, _(e.error_dict['message']))\n\n return h.redirect_to(u'{}.index'.format(group_type))\n\n def get(self,\n group_type: str,\n is_organization: bool,\n id: Optional[str] = None) -> Union[str, Response]:\n set_org(is_organization)\n context = self._prepare(id)\n group_dict = _action(u'group_show')(context, {u'id': id})\n if u'cancel' in request.args:\n return h.redirect_to(u'{}.edit'.format(group_type), id=id)\n\n # TODO: Remove\n g.group_dict = group_dict\n extra_vars: dict[str, Any] = {\n u\"group_dict\": group_dict,\n u\"group_type\": group_type\n }\n return base.render(_replace_group_org(u'group/confirm_delete.html'),\n extra_vars)\n\n\nclass MembersGroupView(MethodView):\n u'''New members group view'''\n\n def _prepare(self, id: Optional[str] = None) -> Context:\n context = cast(Context, {\n u'model': model,\n u'session': model.Session,\n u'user': current_user.name\n })\n try:\n assert _check_access(u'group_member_create', context, {u'id': id})\n except NotAuthorized:\n base.abort(403,\n _(u'Unauthorized to create group %s members') % u'')\n\n return context\n\n def post(self,\n group_type: str,\n is_organization: bool,\n id: Optional[str] = None) -> Response:\n set_org(is_organization)\n context = self._prepare(id)\n data_dict = clean_dict(\n dict_fns.unflatten(tuplize_dict(parse_params(request.form))))\n data_dict['id'] = id\n\n email = data_dict.get(u'email')\n\n if email:\n user_data_dict: dict[str, Any] = {\n u'email': email,\n u'group_id': data_dict['id'],\n u'role': data_dict['role']\n }\n del data_dict['email']\n\n try:\n user_dict = _action(u'user_invite')(context, user_data_dict)\n except ValidationError as e:\n for error in e.error_summary.values():\n h.flash_error(error)\n return h.redirect_to(\n u'{}.member_new'.format(group_type), id=id)\n\n data_dict['username'] = user_dict['name']\n\n try:\n group_dict = _action(u'group_member_create')(context, data_dict)\n except NotAuthorized:\n base.abort(403, _(u'Unauthorized to add member to group %s') % u'')\n except NotFound:\n base.abort(404, _(u'Group not found'))\n except ValidationError as e:\n for error in e.error_summary.values():\n h.flash_error(error)\n return h.redirect_to(u'{}.member_new'.format(group_type), id=id)\n\n # TODO: Remove\n g.group_dict = group_dict\n\n return h.redirect_to(u'{}.members'.format(group_type), id=id)\n\n def get(self,\n group_type: str,\n is_organization: bool,\n id: Optional[str] = None) -> str:\n extra_vars: dict[str, Any] = {}\n set_org(is_organization)\n context = self._prepare(id)\n user = request.args.get(u'user')\n data_dict: dict[str, Any] = {u'id': id}\n data_dict['include_datasets'] = False\n group_dict = _action(u'group_show')(context, data_dict)\n roles = _action(u'member_roles_list')(context, {\n u'group_type': group_type\n })\n user_dict = {}\n if user:\n user_dict = get_action(u'user_show')(context, {u'id': user})\n user_role =\\\n authz.users_role_for_group_or_org(id, user) or u'member'\n # TODO: Remove\n g.user_dict = user_dict\n extra_vars[\"user_dict\"] = user_dict\n else:\n user_role = u'member'\n\n # TODO: Remove\n g.group_dict = group_dict\n g.roles = roles\n g.user_role = user_role\n\n extra_vars.update({\n u\"group_dict\": group_dict,\n u\"roles\": roles,\n u\"user_role\": user_role,\n u\"group_type\": group_type,\n u\"user_dict\": user_dict\n })\n return base.render(_replace_group_org(u'group/member_new.html'),\n extra_vars)\n\n\ngroup = Blueprint(u'group', __name__, url_prefix=u'/group',\n url_defaults={u'group_type': u'group',\n u'is_organization': False})\norganization = Blueprint(u'organization', __name__,\n url_prefix=u'/organization',\n url_defaults={u'group_type': u'organization',\n u'is_organization': True})\n\n\ndef register_group_plugin_rules(blueprint: Blueprint) -> None:\n actions = [\n u'member_delete', u'followers', u'follow',\n u'unfollow', u'admins',\n ]\n blueprint.add_url_rule(u'/', view_func=index, strict_slashes=False)\n blueprint.add_url_rule(\n u'/new',\n methods=[u'GET', u'POST'],\n view_func=CreateGroupView.as_view(str(u'new')))\n blueprint.add_url_rule(u'/<id>', methods=[u'GET'], view_func=read)\n blueprint.add_url_rule(\n u'/edit/<id>', view_func=EditGroupView.as_view(str(u'edit')))\n blueprint.add_url_rule(u'/about/<id>', methods=[u'GET'], view_func=about)\n blueprint.add_url_rule(\n u'/members/<id>', methods=[u'GET', u'POST'], view_func=members)\n blueprint.add_url_rule(\n u'/member_new/<id>',\n view_func=MembersGroupView.as_view(str(u'member_new')))\n blueprint.add_url_rule(\n u'/bulk_process/<id>',\n view_func=BulkProcessView.as_view(str(u'bulk_process')))\n blueprint.add_url_rule(\n u'/delete/<id>',\n methods=[u'GET', u'POST'],\n view_func=DeleteGroupView.as_view(str(u'delete')))\n for action in actions:\n blueprint.add_url_rule(\n u'/{0}/<id>'.format(action),\n methods=[u'GET', u'POST'],\n view_func=globals()[action])\n\n\nregister_group_plugin_rules(group)\nregister_group_plugin_rules(organization)\n", "path": "ckan/views/group.py" } ]
diff --git a/ckan/views/group.py b/ckan/views/group.py index 39154d95035..235e337f790 100644 --- a/ckan/views/group.py +++ b/ckan/views/group.py @@ -425,6 +425,8 @@ def read(group_type: str, extra_vars["q"] = q + limit = config.get(u'ckan.datasets_per_page', limit) + try: # Do not query for the group datasets when dictizing, as they will # be ignored and get requested on the controller anyway
Cog-Creators__Red-DiscordBot-3294
[V3/develop] "owner" config field is ignored # Other bugs #### What were you trying to do? Start bot with owner permissions on me. #### What were you expecting to happen? I have owner permissions #### What actually happened? I don't have owner permissions, but owner is set in config. #### How can we reproduce this issue? 1. Setup bot, token of which doesn't belongs directly to you (i.e. other user or just a team) 2. Set an owner through config (via `redbot --edit`) 3. Run bot: config is ignored, owner used from application's data
[ { "content": "import asyncio\nimport inspect\nimport logging\nimport os\nimport platform\nimport shutil\nimport sys\nfrom collections import namedtuple\nfrom datetime import datetime\nfrom enum import IntEnum\nfrom importlib.machinery import ModuleSpec\nfrom pathlib import Path\nfrom typing import Optional, Union, List, Dict, NoReturn\nfrom types import MappingProxyType\n\nimport discord\nfrom discord.ext.commands import when_mentioned_or\n\nfrom . import Config, i18n, commands, errors, drivers, modlog, bank\nfrom .cog_manager import CogManager, CogManagerUI\nfrom .core_commands import license_info_command, Core\nfrom .data_manager import cog_data_path\nfrom .dev_commands import Dev\nfrom .events import init_events\nfrom .global_checks import init_global_checks\n\nfrom .rpc import RPCMixin\nfrom .utils import common_filters\n\nCUSTOM_GROUPS = \"CUSTOM_GROUPS\"\nSHARED_API_TOKENS = \"SHARED_API_TOKENS\"\n\nlog = logging.getLogger(\"redbot\")\n\n__all__ = [\"RedBase\", \"Red\", \"ExitCodes\"]\n\nNotMessage = namedtuple(\"NotMessage\", \"guild\")\n\n\ndef _is_submodule(parent, child):\n return parent == child or child.startswith(parent + \".\")\n\n\n# barely spurious warning caused by our intentional shadowing\nclass RedBase(commands.GroupMixin, commands.bot.BotBase, RPCMixin): # pylint: disable=no-member\n \"\"\"Mixin for the main bot class.\n\n This exists because `Red` inherits from `discord.AutoShardedClient`, which\n is something other bot classes may not want to have as a parent class.\n \"\"\"\n\n def __init__(self, *args, cli_flags=None, bot_dir: Path = Path.cwd(), **kwargs):\n self._shutdown_mode = ExitCodes.CRITICAL\n self._cli_flags = cli_flags\n self._config = Config.get_core_conf(force_registration=False)\n self._co_owners = cli_flags.co_owner\n self.rpc_enabled = cli_flags.rpc\n self.rpc_port = cli_flags.rpc_port\n self._last_exception = None\n self._config.register_global(\n token=None,\n prefix=[],\n packages=[],\n owner=None,\n whitelist=[],\n blacklist=[],\n locale=\"en-US\",\n embeds=True,\n color=15158332,\n fuzzy=False,\n custom_info=None,\n help__page_char_limit=1000,\n help__max_pages_in_guild=2,\n help__use_menus=False,\n help__show_hidden=False,\n help__verify_checks=True,\n help__verify_exists=False,\n help__tagline=\"\",\n invite_public=False,\n invite_perm=0,\n disabled_commands=[],\n disabled_command_msg=\"That command is disabled.\",\n extra_owner_destinations=[],\n owner_opt_out_list=[],\n last_system_info__python_version=[3, 7],\n last_system_info__machine=None,\n last_system_info__system=None,\n schema_version=0,\n )\n\n self._config.register_guild(\n prefix=[],\n whitelist=[],\n blacklist=[],\n admin_role=[],\n mod_role=[],\n embeds=None,\n use_bot_color=False,\n fuzzy=False,\n disabled_commands=[],\n autoimmune_ids=[],\n )\n\n self._config.register_user(embeds=None)\n\n self._config.init_custom(CUSTOM_GROUPS, 2)\n self._config.register_custom(CUSTOM_GROUPS)\n\n self._config.init_custom(SHARED_API_TOKENS, 2)\n self._config.register_custom(SHARED_API_TOKENS)\n\n async def prefix_manager(bot, message):\n if not cli_flags.prefix:\n global_prefix = await bot._config.prefix()\n else:\n global_prefix = cli_flags.prefix\n if message.guild is None:\n return global_prefix\n server_prefix = await bot._config.guild(message.guild).prefix()\n if cli_flags.mentionable:\n return (\n when_mentioned_or(*server_prefix)(bot, message)\n if server_prefix\n else when_mentioned_or(*global_prefix)(bot, message)\n )\n else:\n return server_prefix if server_prefix else global_prefix\n\n if \"command_prefix\" not in kwargs:\n kwargs[\"command_prefix\"] = prefix_manager\n\n if cli_flags.owner and \"owner_id\" not in kwargs:\n kwargs[\"owner_id\"] = cli_flags.owner\n\n if \"command_not_found\" not in kwargs:\n kwargs[\"command_not_found\"] = \"Command {} not found.\\n{}\"\n\n self._uptime = None\n self._checked_time_accuracy = None\n self._color = discord.Embed.Empty # This is needed or color ends up 0x000000\n\n self._main_dir = bot_dir\n self._cog_mgr = CogManager()\n super().__init__(*args, help_command=None, **kwargs)\n # Do not manually use the help formatter attribute here, see `send_help_for`,\n # for a documented API. The internals of this object are still subject to change.\n self._help_formatter = commands.help.RedHelpFormatter()\n self.add_command(commands.help.red_help)\n\n self._permissions_hooks: List[commands.CheckPredicate] = []\n self._red_ready = asyncio.Event()\n\n @property\n def cog_mgr(self) -> NoReturn:\n raise AttributeError(\"Please don't mess with the cog manager internals.\")\n\n @property\n def uptime(self) -> datetime:\n \"\"\" Allow access to the value, but we don't want cog creators setting it \"\"\"\n return self._uptime\n\n @uptime.setter\n def uptime(self, value) -> NoReturn:\n raise RuntimeError(\n \"Hey, we're cool with sharing info about the uptime, but don't try and assign to it please.\"\n )\n\n @property\n def db(self) -> NoReturn:\n raise AttributeError(\n \"We really don't want you touching the bot config directly. \"\n \"If you need something in here, take a look at the exposed methods \"\n \"and use the one which corresponds to your needs or \"\n \"open an issue if you need an additional method for your use case.\"\n )\n\n @property\n def counter(self) -> NoReturn:\n raise AttributeError(\n \"Please make your own counter object by importing ``Counter`` from ``collections``.\"\n )\n\n @property\n def color(self) -> NoReturn:\n raise AttributeError(\"Please fetch the embed color with `get_embed_color`\")\n\n @property\n def colour(self) -> NoReturn:\n raise AttributeError(\"Please fetch the embed colour with `get_embed_colour`\")\n\n async def allowed_by_whitelist_blacklist(\n self,\n who: Optional[Union[discord.Member, discord.User]] = None,\n *,\n who_id: Optional[int] = None,\n guild_id: Optional[int] = None,\n role_ids: Optional[List[int]] = None,\n ) -> bool:\n \"\"\"\n This checks if a user or member is allowed to run things,\n as considered by Red's whitelist and blacklist.\n \n If given a user object, this function will check the global lists\n \n If given a member, this will additionally check guild lists\n \n If omiting a user or member, you must provide a value for ``who_id``\n \n You may also provide a value for ``guild_id`` in this case\n \n If providing a member by guild and member ids,\n you should supply ``role_ids`` as well\n\n Parameters\n ----------\n who : Optional[Union[discord.Member, discord.User]]\n The user or member object to check\n \n Other Parameters\n ----------------\n who_id : Optional[int]\n The id of the user or member to check\n If not providing a value for ``who``, this is a required parameter.\n guild_id : Optional[int]\n When used in conjunction with a provided value for ``who_id``, checks\n the lists for the corresponding guild as well.\n role_ids : Optional[List[int]]\n When used with both ``who_id`` and ``guild_id``, checks the role ids provided.\n This is required for accurate checking of members in a guild if providing ids.\n\n Raises\n ------\n TypeError\n Did not provide ``who`` or ``who_id``\n \n Returns\n -------\n bool\n `True` if user is allowed to run things, `False` otherwise\n \"\"\"\n # Contributor Note:\n # All config calls are delayed until needed in this section\n # All changes should be made keeping in mind that this is also used as a global check\n\n guild = None\n mocked = False # used for an accurate delayed role id expansion later.\n if not who:\n if not who_id:\n raise TypeError(\"Must provide a value for either `who` or `who_id`\")\n mocked = True\n who = discord.Object(id=who_id)\n if guild_id:\n guild = discord.Object(id=guild_id)\n else:\n guild = getattr(who, \"guild\", None)\n\n if await self.is_owner(who):\n return True\n\n global_whitelist = await self._config.whitelist()\n if global_whitelist:\n if who.id not in global_whitelist:\n return False\n else:\n # blacklist is only used when whitelist doesn't exist.\n global_blacklist = await self._config.blacklist()\n if who.id in global_blacklist:\n return False\n\n if guild:\n if guild.owner_id == who.id:\n return True\n\n # The delayed expansion of ids to check saves time in the DM case.\n # Converting to a set reduces the total lookup time in section\n if mocked:\n ids = {i for i in (who.id, *(role_ids or [])) if i != guild.id}\n else:\n # DEP-WARN\n # This uses member._roles (getattr is for the user case)\n # If this is removed upstream (undocumented)\n # there is a silent failure potential, and role blacklist/whitelists will break.\n ids = {i for i in (who.id, *(getattr(who, \"_roles\", []))) if i != guild.id}\n\n guild_whitelist = await self._config.guild(guild).whitelist()\n if guild_whitelist:\n if ids.isdisjoint(guild_whitelist):\n return False\n else:\n guild_blacklist = await self._config.guild(guild).blacklist()\n if not ids.isdisjoint(guild_blacklist):\n return False\n\n return True\n\n async def get_valid_prefixes(self, guild: Optional[discord.Guild] = None) -> List[str]:\n \"\"\"\n This gets the valid prefixes for a guild.\n\n If not provided a guild (or passed None) it will give the DM prefixes.\n\n This is just a fancy wrapper around ``get_prefix``\n\n Parameters\n ----------\n guild : Optional[discord.Guild]\n The guild you want prefixes for. Omit (or pass None) for the DM prefixes\n\n Returns\n -------\n List[str]\n If a guild was specified, the valid prefixes in that guild.\n If a guild was not specified, the valid prefixes for DMs\n \"\"\"\n return await self.get_prefix(NotMessage(guild))\n\n async def get_embed_color(self, location: discord.abc.Messageable) -> discord.Color:\n \"\"\"\n Get the embed color for a location. This takes into account all related settings.\n\n Parameters\n ----------\n location : `discord.abc.Messageable`\n Location to check embed color for.\n\n Returns\n -------\n discord.Color\n Embed color for the provided location.\n \"\"\"\n\n guild = getattr(location, \"guild\", None)\n\n if (\n guild\n and await self._config.guild(guild).use_bot_color()\n and not isinstance(location, discord.Member)\n ):\n return guild.me.color\n\n return self._color\n\n get_embed_colour = get_embed_color\n\n # start config migrations\n async def _maybe_update_config(self):\n \"\"\"\n This should be run prior to loading cogs or connecting to discord.\n \"\"\"\n schema_version = await self._config.schema_version()\n\n if schema_version == 0:\n await self._schema_0_to_1()\n schema_version += 1\n await self._config.schema_version.set(schema_version)\n if schema_version == 1:\n await self._schema_1_to_2()\n schema_version += 1\n await self._config.schema_version.set(schema_version)\n\n async def _schema_1_to_2(self):\n \"\"\"\n This contains the migration of shared API tokens to a custom config scope\n \"\"\"\n\n log.info(\"Moving shared API tokens to a custom group\")\n all_shared_api_tokens = await self._config.get_raw(\"api_tokens\", default={})\n for service_name, token_mapping in all_shared_api_tokens.items():\n service_partial = self._config.custom(SHARED_API_TOKENS, service_name)\n async with service_partial.all() as basically_bulk_update:\n basically_bulk_update.update(token_mapping)\n\n await self._config.clear_raw(\"api_tokens\")\n\n async def _schema_0_to_1(self):\n \"\"\"\n This contains the migration to allow multiple mod and multiple admin roles.\n \"\"\"\n\n log.info(\"Begin updating guild configs to support multiple mod/admin roles\")\n all_guild_data = await self._config.all_guilds()\n for guild_id, guild_data in all_guild_data.items():\n guild_obj = discord.Object(id=guild_id)\n mod_roles, admin_roles = [], []\n maybe_mod_role_id = guild_data[\"mod_role\"]\n maybe_admin_role_id = guild_data[\"admin_role\"]\n\n if maybe_mod_role_id:\n mod_roles.append(maybe_mod_role_id)\n await self._config.guild(guild_obj).mod_role.set(mod_roles)\n if maybe_admin_role_id:\n admin_roles.append(maybe_admin_role_id)\n await self._config.guild(guild_obj).admin_role.set(admin_roles)\n log.info(\"Done updating guild configs to support multiple mod/admin roles\")\n\n # end Config migrations\n\n async def pre_flight(self, cli_flags):\n \"\"\"\n This should only be run once, prior to connecting to discord.\n \"\"\"\n await self._maybe_update_config()\n\n init_global_checks(self)\n init_events(self, cli_flags)\n\n i18n_locale = await self._config.locale()\n i18n.set_locale(i18n_locale)\n\n self.add_cog(Core(self))\n self.add_cog(CogManagerUI())\n self.add_command(license_info_command)\n if cli_flags.dev:\n self.add_cog(Dev())\n\n await modlog._init(self)\n bank._init()\n\n packages = []\n\n last_system_info = await self._config.last_system_info()\n\n async def notify_owners(content: str) -> None:\n destinations = await self.get_owner_notification_destinations()\n for destination in destinations:\n prefixes = await self.get_valid_prefixes(getattr(destination, \"guild\", None))\n prefix = prefixes[0]\n try:\n await destination.send(content.format(prefix=prefix))\n except Exception as _exc:\n log.exception(\n f\"I could not send an owner notification to ({destination.id}){destination}\"\n )\n\n ver_info = list(sys.version_info[:2])\n python_version_changed = False\n LIB_PATH = cog_data_path(raw_name=\"Downloader\") / \"lib\"\n if ver_info != last_system_info[\"python_version\"]:\n await self._config.last_system_info.python_version.set(ver_info)\n if any(LIB_PATH.iterdir()):\n shutil.rmtree(str(LIB_PATH))\n LIB_PATH.mkdir()\n self.loop.create_task(\n notify_owners(\n \"We detected a change in minor Python version\"\n \" and cleared packages in lib folder.\\n\"\n \"The instance was started with no cogs, please load Downloader\"\n \" and use `{prefix}cog reinstallreqs` to regenerate lib folder.\"\n \" After that, restart the bot to get\"\n \" all of your previously loaded cogs loaded again.\"\n )\n )\n python_version_changed = True\n else:\n if cli_flags.no_cogs is False:\n packages.extend(await self._config.packages())\n\n if cli_flags.load_cogs:\n packages.extend(cli_flags.load_cogs)\n\n system_changed = False\n machine = platform.machine()\n system = platform.system()\n if last_system_info[\"machine\"] is None:\n await self._config.last_system_info.machine.set(machine)\n elif last_system_info[\"machine\"] != machine:\n await self._config.last_system_info.machine.set(machine)\n system_changed = True\n\n if last_system_info[\"system\"] is None:\n await self._config.last_system_info.system.set(system)\n elif last_system_info[\"system\"] != system:\n await self._config.last_system_info.system.set(system)\n system_changed = True\n\n if system_changed and not python_version_changed:\n self.loop.create_task(\n notify_owners(\n \"We detected a possible change in machine's operating system\"\n \" or architecture. You might need to regenerate your lib folder\"\n \" if 3rd-party cogs stop working properly.\\n\"\n \"To regenerate lib folder, load Downloader and use `{prefix}cog reinstallreqs`.\"\n )\n )\n\n if packages:\n # Load permissions first, for security reasons\n try:\n packages.remove(\"permissions\")\n except ValueError:\n pass\n else:\n packages.insert(0, \"permissions\")\n\n to_remove = []\n print(\"Loading packages...\")\n for package in packages:\n try:\n spec = await self._cog_mgr.find_cog(package)\n await asyncio.wait_for(self.load_extension(spec), 30)\n except asyncio.TimeoutError:\n log.exception(\"Failed to load package %s (timeout)\", package)\n to_remove.append(package)\n except Exception as e:\n log.exception(\"Failed to load package {}\".format(package), exc_info=e)\n await self.remove_loaded_package(package)\n to_remove.append(package)\n for package in to_remove:\n packages.remove(package)\n if packages:\n print(\"Loaded packages: \" + \", \".join(packages))\n\n if self.rpc_enabled:\n await self.rpc.initialize(self.rpc_port)\n\n async def start(self, *args, **kwargs):\n cli_flags = kwargs.pop(\"cli_flags\")\n await self.pre_flight(cli_flags=cli_flags)\n return await super().start(*args, **kwargs)\n\n async def send_help_for(\n self, ctx: commands.Context, help_for: Union[commands.Command, commands.GroupMixin, str]\n ):\n \"\"\"\n Invokes Red's helpformatter for a given context and object.\n \"\"\"\n return await self._help_formatter.send_help(ctx, help_for)\n\n async def embed_requested(self, channel, user, command=None) -> bool:\n \"\"\"\n Determine if an embed is requested for a response.\n\n Parameters\n ----------\n channel : `discord.abc.GuildChannel` or `discord.abc.PrivateChannel`\n The channel to check embed settings for.\n user : `discord.abc.User`\n The user to check embed settings for.\n command\n (Optional) the command ran.\n\n Returns\n -------\n bool\n :code:`True` if an embed is requested\n \"\"\"\n if isinstance(channel, discord.abc.PrivateChannel) or (\n command and command == self.get_command(\"help\")\n ):\n user_setting = await self._config.user(user).embeds()\n if user_setting is not None:\n return user_setting\n else:\n guild_setting = await self._config.guild(channel.guild).embeds()\n if guild_setting is not None:\n return guild_setting\n global_setting = await self._config.embeds()\n return global_setting\n\n async def is_owner(self, user) -> bool:\n if user.id in self._co_owners:\n return True\n return await super().is_owner(user)\n\n async def is_admin(self, member: discord.Member) -> bool:\n \"\"\"Checks if a member is an admin of their guild.\"\"\"\n try:\n member_snowflakes = member._roles # DEP-WARN\n for snowflake in await self._config.guild(member.guild).admin_role():\n if member_snowflakes.has(snowflake): # Dep-WARN\n return True\n except AttributeError: # someone passed a webhook to this\n pass\n return False\n\n async def is_mod(self, member: discord.Member) -> bool:\n \"\"\"Checks if a member is a mod or admin of their guild.\"\"\"\n try:\n member_snowflakes = member._roles # DEP-WARN\n for snowflake in await self._config.guild(member.guild).admin_role():\n if member_snowflakes.has(snowflake): # DEP-WARN\n return True\n for snowflake in await self._config.guild(member.guild).mod_role():\n if member_snowflakes.has(snowflake): # DEP-WARN\n return True\n except AttributeError: # someone passed a webhook to this\n pass\n return False\n\n async def get_admin_roles(self, guild: discord.Guild) -> List[discord.Role]:\n \"\"\"\n Gets the admin roles for a guild.\n \"\"\"\n ret: List[discord.Role] = []\n for snowflake in await self._config.guild(guild).admin_role():\n r = guild.get_role(snowflake)\n if r:\n ret.append(r)\n return ret\n\n async def get_mod_roles(self, guild: discord.Guild) -> List[discord.Role]:\n \"\"\"\n Gets the mod roles for a guild.\n \"\"\"\n ret: List[discord.Role] = []\n for snowflake in await self._config.guild(guild).mod_role():\n r = guild.get_role(snowflake)\n if r:\n ret.append(r)\n return ret\n\n async def get_admin_role_ids(self, guild_id: int) -> List[int]:\n \"\"\"\n Gets the admin role ids for a guild id.\n \"\"\"\n return await self._config.guild(discord.Object(id=guild_id)).admin_role()\n\n async def get_mod_role_ids(self, guild_id: int) -> List[int]:\n \"\"\"\n Gets the mod role ids for a guild id.\n \"\"\"\n return await self._config.guild(discord.Object(id=guild_id)).mod_role()\n\n async def get_shared_api_tokens(self, service_name: str) -> Dict[str, str]:\n \"\"\"\n Gets the shared API tokens for a service\n\n Parameters\n ----------\n service_name: str\n The service to get tokens for.\n\n Returns\n -------\n Dict[str, str]\n A Mapping of token names to tokens.\n This mapping exists because some services have multiple tokens.\n \"\"\"\n return await self._config.custom(SHARED_API_TOKENS, service_name).all()\n\n async def set_shared_api_tokens(self, service_name: str, **tokens: str):\n \"\"\"\n Sets shared API tokens for a service\n\n In most cases, this should not be used. Users should instead be using the\n ``set api`` command\n\n This will not clear existing values not specified.\n\n Parameters\n ----------\n service_name: str\n The service to set tokens for\n **tokens\n token_name -> token\n\n Examples\n --------\n Setting the api_key for youtube from a value in a variable ``my_key``\n\n >>> await ctx.bot.set_shared_api_tokens(\"youtube\", api_key=my_key)\n \"\"\"\n\n async with self._config.custom(SHARED_API_TOKENS, service_name).all() as group:\n group.update(tokens)\n self.dispatch(\"red_api_tokens_update\", service_name, MappingProxyType(group))\n\n async def remove_shared_api_tokens(self, service_name: str, *token_names: str):\n \"\"\"\n Removes shared API tokens\n\n Parameters\n ----------\n service_name: str\n The service to remove tokens for\n *token_names: str\n The name of each token to be removed\n\n Examples\n --------\n Removing the api_key for youtube\n\n >>> await ctx.bot.remove_shared_api_tokens(\"youtube\", \"api_key\")\n \"\"\"\n async with self._config.custom(SHARED_API_TOKENS, service_name).all() as group:\n for name in token_names:\n group.pop(name, None)\n\n async def get_context(self, message, *, cls=commands.Context):\n return await super().get_context(message, cls=cls)\n\n async def process_commands(self, message: discord.Message):\n \"\"\"\n Same as base method, but dispatches an additional event for cogs\n which want to handle normal messages differently to command\n messages, without the overhead of additional get_context calls\n per cog.\n \"\"\"\n if not message.author.bot:\n ctx = await self.get_context(message)\n await self.invoke(ctx)\n else:\n ctx = None\n\n if ctx is None or ctx.valid is False:\n self.dispatch(\"message_without_command\", message)\n\n @staticmethod\n def list_packages():\n \"\"\"Lists packages present in the cogs the folder\"\"\"\n return os.listdir(\"cogs\")\n\n async def save_packages_status(self, packages):\n await self._config.packages.set(packages)\n\n async def add_loaded_package(self, pkg_name: str):\n async with self._config.packages() as curr_pkgs:\n if pkg_name not in curr_pkgs:\n curr_pkgs.append(pkg_name)\n\n async def remove_loaded_package(self, pkg_name: str):\n async with self._config.packages() as curr_pkgs:\n while pkg_name in curr_pkgs:\n curr_pkgs.remove(pkg_name)\n\n async def load_extension(self, spec: ModuleSpec):\n # NB: this completely bypasses `discord.ext.commands.Bot._load_from_module_spec`\n name = spec.name.split(\".\")[-1]\n if name in self.extensions:\n raise errors.PackageAlreadyLoaded(spec)\n\n lib = spec.loader.load_module()\n if not hasattr(lib, \"setup\"):\n del lib\n raise discord.ClientException(f\"extension {name} does not have a setup function\")\n\n try:\n if asyncio.iscoroutinefunction(lib.setup):\n await lib.setup(self)\n else:\n lib.setup(self)\n except Exception as e:\n self._remove_module_references(lib.__name__)\n self._call_module_finalizers(lib, name)\n raise\n else:\n self._BotBase__extensions[name] = lib\n\n def remove_cog(self, cogname: str):\n cog = self.get_cog(cogname)\n if cog is None:\n return\n\n for cls in inspect.getmro(cog.__class__):\n try:\n hook = getattr(cog, f\"_{cls.__name__}__permissions_hook\")\n except AttributeError:\n pass\n else:\n self.remove_permissions_hook(hook)\n\n super().remove_cog(cogname)\n\n cog.requires.reset()\n\n for meth in self.rpc_handlers.pop(cogname.upper(), ()):\n self.unregister_rpc_handler(meth)\n\n async def is_automod_immune(\n self, to_check: Union[discord.Message, commands.Context, discord.abc.User, discord.Role]\n ) -> bool:\n \"\"\"\n Checks if the user, message, context, or role should be considered immune from automated\n moderation actions.\n\n This will return ``False`` in direct messages.\n\n Parameters\n ----------\n to_check : `discord.Message` or `commands.Context` or `discord.abc.User` or `discord.Role`\n Something to check if it would be immune\n\n Returns\n -------\n bool\n ``True`` if immune\n\n \"\"\"\n guild = getattr(to_check, \"guild\", None)\n if not guild:\n return False\n\n if isinstance(to_check, discord.Role):\n ids_to_check = [to_check.id]\n else:\n author = getattr(to_check, \"author\", to_check)\n try:\n ids_to_check = [r.id for r in author.roles]\n except AttributeError:\n # webhook messages are a user not member,\n # cheaper than isinstance\n if author.bot and author.discriminator == \"0000\":\n return True # webhooks require significant permissions to enable.\n else:\n ids_to_check.append(author.id)\n\n immune_ids = await self._config.guild(guild).autoimmune_ids()\n\n return any(i in immune_ids for i in ids_to_check)\n\n @staticmethod\n async def send_filtered(\n destination: discord.abc.Messageable,\n filter_mass_mentions=True,\n filter_invite_links=True,\n filter_all_links=False,\n **kwargs,\n ):\n \"\"\"\n This is a convienience wrapper around\n\n discord.abc.Messageable.send\n\n It takes the destination you'd like to send to, which filters to apply\n (defaults on mass mentions, and invite links) and any other parameters\n normally accepted by destination.send\n\n This should realistically only be used for responding using user provided\n input. (unfortunately, including usernames)\n Manually crafted messages which dont take any user input have no need of this\n \n Returns\n -------\n discord.Message\n The message that was sent.\n \"\"\"\n\n content = kwargs.pop(\"content\", None)\n\n if content:\n if filter_mass_mentions:\n content = common_filters.filter_mass_mentions(content)\n if filter_invite_links:\n content = common_filters.filter_invites(content)\n if filter_all_links:\n content = common_filters.filter_urls(content)\n\n return await destination.send(content=content, **kwargs)\n\n def add_cog(self, cog: commands.Cog):\n if not isinstance(cog, commands.Cog):\n raise RuntimeError(\n f\"The {cog.__class__.__name__} cog in the {cog.__module__} package does \"\n f\"not inherit from the commands.Cog base class. The cog author must update \"\n f\"the cog to adhere to this requirement.\"\n )\n if cog.__cog_name__ in self.cogs:\n raise RuntimeError(f\"There is already a cog named {cog.__cog_name__} loaded.\")\n if not hasattr(cog, \"requires\"):\n commands.Cog.__init__(cog)\n\n added_hooks = []\n\n try:\n for cls in inspect.getmro(cog.__class__):\n try:\n hook = getattr(cog, f\"_{cls.__name__}__permissions_hook\")\n except AttributeError:\n pass\n else:\n self.add_permissions_hook(hook)\n added_hooks.append(hook)\n\n super().add_cog(cog)\n self.dispatch(\"cog_add\", cog)\n if \"permissions\" not in self.extensions:\n cog.requires.ready_event.set()\n except Exception:\n for hook in added_hooks:\n try:\n self.remove_permissions_hook(hook)\n except Exception:\n # This shouldn't be possible\n log.exception(\n \"A hook got extremely screwed up, \"\n \"and could not be removed properly during another error in cog load.\"\n )\n del cog\n raise\n\n def add_command(self, command: commands.Command) -> None:\n if not isinstance(command, commands.Command):\n raise RuntimeError(\"Commands must be instances of `redbot.core.commands.Command`\")\n\n super().add_command(command)\n\n permissions_not_loaded = \"permissions\" not in self.extensions\n self.dispatch(\"command_add\", command)\n if permissions_not_loaded:\n command.requires.ready_event.set()\n if isinstance(command, commands.Group):\n for subcommand in set(command.walk_commands()):\n self.dispatch(\"command_add\", subcommand)\n if permissions_not_loaded:\n subcommand.requires.ready_event.set()\n\n def remove_command(self, name: str) -> None:\n command = super().remove_command(name)\n if not command:\n return\n command.requires.reset()\n if isinstance(command, commands.Group):\n for subcommand in set(command.walk_commands()):\n subcommand.requires.reset()\n\n def clear_permission_rules(self, guild_id: Optional[int], **kwargs) -> None:\n \"\"\"Clear all permission overrides in a scope.\n\n Parameters\n ----------\n guild_id : Optional[int]\n The guild ID to wipe permission overrides for. If\n ``None``, this will clear all global rules and leave all\n guild rules untouched.\n\n **kwargs\n Keyword arguments to be passed to each required call of\n ``commands.Requires.clear_all_rules``\n\n \"\"\"\n for cog in self.cogs.values():\n cog.requires.clear_all_rules(guild_id, **kwargs)\n for command in self.walk_commands():\n command.requires.clear_all_rules(guild_id, **kwargs)\n\n def add_permissions_hook(self, hook: commands.CheckPredicate) -> None:\n \"\"\"Add a permissions hook.\n\n Permissions hooks are check predicates which are called before\n calling `Requires.verify`, and they can optionally return an\n override: ``True`` to allow, ``False`` to deny, and ``None`` to\n default to normal behaviour.\n\n Parameters\n ----------\n hook\n A command check predicate which returns ``True``, ``False``\n or ``None``.\n\n \"\"\"\n self._permissions_hooks.append(hook)\n\n def remove_permissions_hook(self, hook: commands.CheckPredicate) -> None:\n \"\"\"Remove a permissions hook.\n\n Parameters are the same as those in `add_permissions_hook`.\n\n Raises\n ------\n ValueError\n If the permissions hook has not been added.\n\n \"\"\"\n self._permissions_hooks.remove(hook)\n\n async def verify_permissions_hooks(self, ctx: commands.Context) -> Optional[bool]:\n \"\"\"Run permissions hooks.\n\n Parameters\n ----------\n ctx : commands.Context\n The context for the command being invoked.\n\n Returns\n -------\n Optional[bool]\n ``False`` if any hooks returned ``False``, ``True`` if any\n hooks return ``True`` and none returned ``False``, ``None``\n otherwise.\n\n \"\"\"\n hook_results = []\n for hook in self._permissions_hooks:\n result = await discord.utils.maybe_coroutine(hook, ctx)\n if result is not None:\n hook_results.append(result)\n if hook_results:\n if all(hook_results):\n ctx.permission_state = commands.PermState.ALLOWED_BY_HOOK\n return True\n else:\n ctx.permission_state = commands.PermState.DENIED_BY_HOOK\n return False\n\n async def get_owner_notification_destinations(self) -> List[discord.abc.Messageable]:\n \"\"\"\n Gets the users and channels to send to\n \"\"\"\n await self.wait_until_red_ready()\n destinations = []\n opt_outs = await self._config.owner_opt_out_list()\n for user_id in (self.owner_id, *self._co_owners):\n if user_id not in opt_outs:\n user = self.get_user(user_id)\n if user:\n destinations.append(user)\n else:\n log.warning(\n \"Owner with ID %s is missing in user cache,\"\n \" ignoring owner notification destination.\",\n user_id,\n )\n\n channel_ids = await self._config.extra_owner_destinations()\n for channel_id in channel_ids:\n channel = self.get_channel(channel_id)\n if channel:\n destinations.append(channel)\n else:\n log.warning(\n \"Channel with ID %s is not available,\"\n \" ignoring owner notification destination.\",\n channel_id,\n )\n\n return destinations\n\n async def send_to_owners(self, content=None, **kwargs):\n \"\"\"\n This sends something to all owners and their configured extra destinations.\n\n This takes the same arguments as discord.abc.Messageable.send\n\n This logs failing sends\n \"\"\"\n destinations = await self.get_owner_notification_destinations()\n\n async def wrapped_send(location, content=None, **kwargs):\n try:\n await location.send(content, **kwargs)\n except Exception as _exc:\n log.exception(\n f\"I could not send an owner notification to ({location.id}){location}\"\n )\n\n sends = [wrapped_send(d, content, **kwargs) for d in destinations]\n await asyncio.gather(*sends)\n\n async def wait_until_red_ready(self):\n \"\"\"Wait until our post connection startup is done.\"\"\"\n await self._red_ready.wait()\n\n\nclass Red(RedBase, discord.AutoShardedClient):\n \"\"\"\n You're welcome Caleb.\n \"\"\"\n\n async def logout(self):\n \"\"\"Logs out of Discord and closes all connections.\"\"\"\n await super().logout()\n await drivers.get_driver_class().teardown()\n try:\n await self.rpc.close()\n except AttributeError:\n pass\n\n async def shutdown(self, *, restart: bool = False):\n \"\"\"Gracefully quit Red.\n\n The program will exit with code :code:`0` by default.\n\n Parameters\n ----------\n restart : bool\n If :code:`True`, the program will exit with code :code:`26`. If the\n launcher sees this, it will attempt to restart the bot.\n\n \"\"\"\n if not restart:\n self._shutdown_mode = ExitCodes.SHUTDOWN\n else:\n self._shutdown_mode = ExitCodes.RESTART\n\n await self.logout()\n sys.exit(self._shutdown_mode)\n\n\nclass ExitCodes(IntEnum):\n # This needs to be an int enum to be used\n # with sys.exit\n CRITICAL = 1\n SHUTDOWN = 0\n RESTART = 26\n", "path": "redbot/core/bot.py" } ]
[ { "content": "import asyncio\nimport inspect\nimport logging\nimport os\nimport platform\nimport shutil\nimport sys\nfrom collections import namedtuple\nfrom datetime import datetime\nfrom enum import IntEnum\nfrom importlib.machinery import ModuleSpec\nfrom pathlib import Path\nfrom typing import Optional, Union, List, Dict, NoReturn\nfrom types import MappingProxyType\n\nimport discord\nfrom discord.ext.commands import when_mentioned_or\n\nfrom . import Config, i18n, commands, errors, drivers, modlog, bank\nfrom .cog_manager import CogManager, CogManagerUI\nfrom .core_commands import license_info_command, Core\nfrom .data_manager import cog_data_path\nfrom .dev_commands import Dev\nfrom .events import init_events\nfrom .global_checks import init_global_checks\n\nfrom .rpc import RPCMixin\nfrom .utils import common_filters\n\nCUSTOM_GROUPS = \"CUSTOM_GROUPS\"\nSHARED_API_TOKENS = \"SHARED_API_TOKENS\"\n\nlog = logging.getLogger(\"redbot\")\n\n__all__ = [\"RedBase\", \"Red\", \"ExitCodes\"]\n\nNotMessage = namedtuple(\"NotMessage\", \"guild\")\n\n\ndef _is_submodule(parent, child):\n return parent == child or child.startswith(parent + \".\")\n\n\n# barely spurious warning caused by our intentional shadowing\nclass RedBase(commands.GroupMixin, commands.bot.BotBase, RPCMixin): # pylint: disable=no-member\n \"\"\"Mixin for the main bot class.\n\n This exists because `Red` inherits from `discord.AutoShardedClient`, which\n is something other bot classes may not want to have as a parent class.\n \"\"\"\n\n def __init__(self, *args, cli_flags=None, bot_dir: Path = Path.cwd(), **kwargs):\n self._shutdown_mode = ExitCodes.CRITICAL\n self._cli_flags = cli_flags\n self._config = Config.get_core_conf(force_registration=False)\n self._co_owners = cli_flags.co_owner\n self.rpc_enabled = cli_flags.rpc\n self.rpc_port = cli_flags.rpc_port\n self._last_exception = None\n self._config.register_global(\n token=None,\n prefix=[],\n packages=[],\n owner=None,\n whitelist=[],\n blacklist=[],\n locale=\"en-US\",\n embeds=True,\n color=15158332,\n fuzzy=False,\n custom_info=None,\n help__page_char_limit=1000,\n help__max_pages_in_guild=2,\n help__use_menus=False,\n help__show_hidden=False,\n help__verify_checks=True,\n help__verify_exists=False,\n help__tagline=\"\",\n invite_public=False,\n invite_perm=0,\n disabled_commands=[],\n disabled_command_msg=\"That command is disabled.\",\n extra_owner_destinations=[],\n owner_opt_out_list=[],\n last_system_info__python_version=[3, 7],\n last_system_info__machine=None,\n last_system_info__system=None,\n schema_version=0,\n )\n\n self._config.register_guild(\n prefix=[],\n whitelist=[],\n blacklist=[],\n admin_role=[],\n mod_role=[],\n embeds=None,\n use_bot_color=False,\n fuzzy=False,\n disabled_commands=[],\n autoimmune_ids=[],\n )\n\n self._config.register_user(embeds=None)\n\n self._config.init_custom(CUSTOM_GROUPS, 2)\n self._config.register_custom(CUSTOM_GROUPS)\n\n self._config.init_custom(SHARED_API_TOKENS, 2)\n self._config.register_custom(SHARED_API_TOKENS)\n\n async def prefix_manager(bot, message):\n if not cli_flags.prefix:\n global_prefix = await bot._config.prefix()\n else:\n global_prefix = cli_flags.prefix\n if message.guild is None:\n return global_prefix\n server_prefix = await bot._config.guild(message.guild).prefix()\n if cli_flags.mentionable:\n return (\n when_mentioned_or(*server_prefix)(bot, message)\n if server_prefix\n else when_mentioned_or(*global_prefix)(bot, message)\n )\n else:\n return server_prefix if server_prefix else global_prefix\n\n if \"command_prefix\" not in kwargs:\n kwargs[\"command_prefix\"] = prefix_manager\n\n if cli_flags.owner and \"owner_id\" not in kwargs:\n kwargs[\"owner_id\"] = cli_flags.owner\n\n if \"command_not_found\" not in kwargs:\n kwargs[\"command_not_found\"] = \"Command {} not found.\\n{}\"\n\n self._uptime = None\n self._checked_time_accuracy = None\n self._color = discord.Embed.Empty # This is needed or color ends up 0x000000\n\n self._main_dir = bot_dir\n self._cog_mgr = CogManager()\n super().__init__(*args, help_command=None, **kwargs)\n # Do not manually use the help formatter attribute here, see `send_help_for`,\n # for a documented API. The internals of this object are still subject to change.\n self._help_formatter = commands.help.RedHelpFormatter()\n self.add_command(commands.help.red_help)\n\n self._permissions_hooks: List[commands.CheckPredicate] = []\n self._red_ready = asyncio.Event()\n\n @property\n def cog_mgr(self) -> NoReturn:\n raise AttributeError(\"Please don't mess with the cog manager internals.\")\n\n @property\n def uptime(self) -> datetime:\n \"\"\" Allow access to the value, but we don't want cog creators setting it \"\"\"\n return self._uptime\n\n @uptime.setter\n def uptime(self, value) -> NoReturn:\n raise RuntimeError(\n \"Hey, we're cool with sharing info about the uptime, but don't try and assign to it please.\"\n )\n\n @property\n def db(self) -> NoReturn:\n raise AttributeError(\n \"We really don't want you touching the bot config directly. \"\n \"If you need something in here, take a look at the exposed methods \"\n \"and use the one which corresponds to your needs or \"\n \"open an issue if you need an additional method for your use case.\"\n )\n\n @property\n def counter(self) -> NoReturn:\n raise AttributeError(\n \"Please make your own counter object by importing ``Counter`` from ``collections``.\"\n )\n\n @property\n def color(self) -> NoReturn:\n raise AttributeError(\"Please fetch the embed color with `get_embed_color`\")\n\n @property\n def colour(self) -> NoReturn:\n raise AttributeError(\"Please fetch the embed colour with `get_embed_colour`\")\n\n async def allowed_by_whitelist_blacklist(\n self,\n who: Optional[Union[discord.Member, discord.User]] = None,\n *,\n who_id: Optional[int] = None,\n guild_id: Optional[int] = None,\n role_ids: Optional[List[int]] = None,\n ) -> bool:\n \"\"\"\n This checks if a user or member is allowed to run things,\n as considered by Red's whitelist and blacklist.\n \n If given a user object, this function will check the global lists\n \n If given a member, this will additionally check guild lists\n \n If omiting a user or member, you must provide a value for ``who_id``\n \n You may also provide a value for ``guild_id`` in this case\n \n If providing a member by guild and member ids,\n you should supply ``role_ids`` as well\n\n Parameters\n ----------\n who : Optional[Union[discord.Member, discord.User]]\n The user or member object to check\n \n Other Parameters\n ----------------\n who_id : Optional[int]\n The id of the user or member to check\n If not providing a value for ``who``, this is a required parameter.\n guild_id : Optional[int]\n When used in conjunction with a provided value for ``who_id``, checks\n the lists for the corresponding guild as well.\n role_ids : Optional[List[int]]\n When used with both ``who_id`` and ``guild_id``, checks the role ids provided.\n This is required for accurate checking of members in a guild if providing ids.\n\n Raises\n ------\n TypeError\n Did not provide ``who`` or ``who_id``\n \n Returns\n -------\n bool\n `True` if user is allowed to run things, `False` otherwise\n \"\"\"\n # Contributor Note:\n # All config calls are delayed until needed in this section\n # All changes should be made keeping in mind that this is also used as a global check\n\n guild = None\n mocked = False # used for an accurate delayed role id expansion later.\n if not who:\n if not who_id:\n raise TypeError(\"Must provide a value for either `who` or `who_id`\")\n mocked = True\n who = discord.Object(id=who_id)\n if guild_id:\n guild = discord.Object(id=guild_id)\n else:\n guild = getattr(who, \"guild\", None)\n\n if await self.is_owner(who):\n return True\n\n global_whitelist = await self._config.whitelist()\n if global_whitelist:\n if who.id not in global_whitelist:\n return False\n else:\n # blacklist is only used when whitelist doesn't exist.\n global_blacklist = await self._config.blacklist()\n if who.id in global_blacklist:\n return False\n\n if guild:\n if guild.owner_id == who.id:\n return True\n\n # The delayed expansion of ids to check saves time in the DM case.\n # Converting to a set reduces the total lookup time in section\n if mocked:\n ids = {i for i in (who.id, *(role_ids or [])) if i != guild.id}\n else:\n # DEP-WARN\n # This uses member._roles (getattr is for the user case)\n # If this is removed upstream (undocumented)\n # there is a silent failure potential, and role blacklist/whitelists will break.\n ids = {i for i in (who.id, *(getattr(who, \"_roles\", []))) if i != guild.id}\n\n guild_whitelist = await self._config.guild(guild).whitelist()\n if guild_whitelist:\n if ids.isdisjoint(guild_whitelist):\n return False\n else:\n guild_blacklist = await self._config.guild(guild).blacklist()\n if not ids.isdisjoint(guild_blacklist):\n return False\n\n return True\n\n async def get_valid_prefixes(self, guild: Optional[discord.Guild] = None) -> List[str]:\n \"\"\"\n This gets the valid prefixes for a guild.\n\n If not provided a guild (or passed None) it will give the DM prefixes.\n\n This is just a fancy wrapper around ``get_prefix``\n\n Parameters\n ----------\n guild : Optional[discord.Guild]\n The guild you want prefixes for. Omit (or pass None) for the DM prefixes\n\n Returns\n -------\n List[str]\n If a guild was specified, the valid prefixes in that guild.\n If a guild was not specified, the valid prefixes for DMs\n \"\"\"\n return await self.get_prefix(NotMessage(guild))\n\n async def get_embed_color(self, location: discord.abc.Messageable) -> discord.Color:\n \"\"\"\n Get the embed color for a location. This takes into account all related settings.\n\n Parameters\n ----------\n location : `discord.abc.Messageable`\n Location to check embed color for.\n\n Returns\n -------\n discord.Color\n Embed color for the provided location.\n \"\"\"\n\n guild = getattr(location, \"guild\", None)\n\n if (\n guild\n and await self._config.guild(guild).use_bot_color()\n and not isinstance(location, discord.Member)\n ):\n return guild.me.color\n\n return self._color\n\n get_embed_colour = get_embed_color\n\n # start config migrations\n async def _maybe_update_config(self):\n \"\"\"\n This should be run prior to loading cogs or connecting to discord.\n \"\"\"\n schema_version = await self._config.schema_version()\n\n if schema_version == 0:\n await self._schema_0_to_1()\n schema_version += 1\n await self._config.schema_version.set(schema_version)\n if schema_version == 1:\n await self._schema_1_to_2()\n schema_version += 1\n await self._config.schema_version.set(schema_version)\n\n async def _schema_1_to_2(self):\n \"\"\"\n This contains the migration of shared API tokens to a custom config scope\n \"\"\"\n\n log.info(\"Moving shared API tokens to a custom group\")\n all_shared_api_tokens = await self._config.get_raw(\"api_tokens\", default={})\n for service_name, token_mapping in all_shared_api_tokens.items():\n service_partial = self._config.custom(SHARED_API_TOKENS, service_name)\n async with service_partial.all() as basically_bulk_update:\n basically_bulk_update.update(token_mapping)\n\n await self._config.clear_raw(\"api_tokens\")\n\n async def _schema_0_to_1(self):\n \"\"\"\n This contains the migration to allow multiple mod and multiple admin roles.\n \"\"\"\n\n log.info(\"Begin updating guild configs to support multiple mod/admin roles\")\n all_guild_data = await self._config.all_guilds()\n for guild_id, guild_data in all_guild_data.items():\n guild_obj = discord.Object(id=guild_id)\n mod_roles, admin_roles = [], []\n maybe_mod_role_id = guild_data[\"mod_role\"]\n maybe_admin_role_id = guild_data[\"admin_role\"]\n\n if maybe_mod_role_id:\n mod_roles.append(maybe_mod_role_id)\n await self._config.guild(guild_obj).mod_role.set(mod_roles)\n if maybe_admin_role_id:\n admin_roles.append(maybe_admin_role_id)\n await self._config.guild(guild_obj).admin_role.set(admin_roles)\n log.info(\"Done updating guild configs to support multiple mod/admin roles\")\n\n # end Config migrations\n\n async def pre_flight(self, cli_flags):\n \"\"\"\n This should only be run once, prior to connecting to discord.\n \"\"\"\n await self._maybe_update_config()\n\n init_global_checks(self)\n init_events(self, cli_flags)\n\n if self.owner_id is None:\n self.owner_id = await self._config.owner()\n\n i18n_locale = await self._config.locale()\n i18n.set_locale(i18n_locale)\n\n self.add_cog(Core(self))\n self.add_cog(CogManagerUI())\n self.add_command(license_info_command)\n if cli_flags.dev:\n self.add_cog(Dev())\n\n await modlog._init(self)\n bank._init()\n\n packages = []\n\n last_system_info = await self._config.last_system_info()\n\n async def notify_owners(content: str) -> None:\n destinations = await self.get_owner_notification_destinations()\n for destination in destinations:\n prefixes = await self.get_valid_prefixes(getattr(destination, \"guild\", None))\n prefix = prefixes[0]\n try:\n await destination.send(content.format(prefix=prefix))\n except Exception as _exc:\n log.exception(\n f\"I could not send an owner notification to ({destination.id}){destination}\"\n )\n\n ver_info = list(sys.version_info[:2])\n python_version_changed = False\n LIB_PATH = cog_data_path(raw_name=\"Downloader\") / \"lib\"\n if ver_info != last_system_info[\"python_version\"]:\n await self._config.last_system_info.python_version.set(ver_info)\n if any(LIB_PATH.iterdir()):\n shutil.rmtree(str(LIB_PATH))\n LIB_PATH.mkdir()\n self.loop.create_task(\n notify_owners(\n \"We detected a change in minor Python version\"\n \" and cleared packages in lib folder.\\n\"\n \"The instance was started with no cogs, please load Downloader\"\n \" and use `{prefix}cog reinstallreqs` to regenerate lib folder.\"\n \" After that, restart the bot to get\"\n \" all of your previously loaded cogs loaded again.\"\n )\n )\n python_version_changed = True\n else:\n if cli_flags.no_cogs is False:\n packages.extend(await self._config.packages())\n\n if cli_flags.load_cogs:\n packages.extend(cli_flags.load_cogs)\n\n system_changed = False\n machine = platform.machine()\n system = platform.system()\n if last_system_info[\"machine\"] is None:\n await self._config.last_system_info.machine.set(machine)\n elif last_system_info[\"machine\"] != machine:\n await self._config.last_system_info.machine.set(machine)\n system_changed = True\n\n if last_system_info[\"system\"] is None:\n await self._config.last_system_info.system.set(system)\n elif last_system_info[\"system\"] != system:\n await self._config.last_system_info.system.set(system)\n system_changed = True\n\n if system_changed and not python_version_changed:\n self.loop.create_task(\n notify_owners(\n \"We detected a possible change in machine's operating system\"\n \" or architecture. You might need to regenerate your lib folder\"\n \" if 3rd-party cogs stop working properly.\\n\"\n \"To regenerate lib folder, load Downloader and use `{prefix}cog reinstallreqs`.\"\n )\n )\n\n if packages:\n # Load permissions first, for security reasons\n try:\n packages.remove(\"permissions\")\n except ValueError:\n pass\n else:\n packages.insert(0, \"permissions\")\n\n to_remove = []\n print(\"Loading packages...\")\n for package in packages:\n try:\n spec = await self._cog_mgr.find_cog(package)\n await asyncio.wait_for(self.load_extension(spec), 30)\n except asyncio.TimeoutError:\n log.exception(\"Failed to load package %s (timeout)\", package)\n to_remove.append(package)\n except Exception as e:\n log.exception(\"Failed to load package {}\".format(package), exc_info=e)\n await self.remove_loaded_package(package)\n to_remove.append(package)\n for package in to_remove:\n packages.remove(package)\n if packages:\n print(\"Loaded packages: \" + \", \".join(packages))\n\n if self.rpc_enabled:\n await self.rpc.initialize(self.rpc_port)\n\n async def start(self, *args, **kwargs):\n cli_flags = kwargs.pop(\"cli_flags\")\n await self.pre_flight(cli_flags=cli_flags)\n return await super().start(*args, **kwargs)\n\n async def send_help_for(\n self, ctx: commands.Context, help_for: Union[commands.Command, commands.GroupMixin, str]\n ):\n \"\"\"\n Invokes Red's helpformatter for a given context and object.\n \"\"\"\n return await self._help_formatter.send_help(ctx, help_for)\n\n async def embed_requested(self, channel, user, command=None) -> bool:\n \"\"\"\n Determine if an embed is requested for a response.\n\n Parameters\n ----------\n channel : `discord.abc.GuildChannel` or `discord.abc.PrivateChannel`\n The channel to check embed settings for.\n user : `discord.abc.User`\n The user to check embed settings for.\n command\n (Optional) the command ran.\n\n Returns\n -------\n bool\n :code:`True` if an embed is requested\n \"\"\"\n if isinstance(channel, discord.abc.PrivateChannel) or (\n command and command == self.get_command(\"help\")\n ):\n user_setting = await self._config.user(user).embeds()\n if user_setting is not None:\n return user_setting\n else:\n guild_setting = await self._config.guild(channel.guild).embeds()\n if guild_setting is not None:\n return guild_setting\n global_setting = await self._config.embeds()\n return global_setting\n\n async def is_owner(self, user) -> bool:\n if user.id in self._co_owners:\n return True\n return await super().is_owner(user)\n\n async def is_admin(self, member: discord.Member) -> bool:\n \"\"\"Checks if a member is an admin of their guild.\"\"\"\n try:\n member_snowflakes = member._roles # DEP-WARN\n for snowflake in await self._config.guild(member.guild).admin_role():\n if member_snowflakes.has(snowflake): # Dep-WARN\n return True\n except AttributeError: # someone passed a webhook to this\n pass\n return False\n\n async def is_mod(self, member: discord.Member) -> bool:\n \"\"\"Checks if a member is a mod or admin of their guild.\"\"\"\n try:\n member_snowflakes = member._roles # DEP-WARN\n for snowflake in await self._config.guild(member.guild).admin_role():\n if member_snowflakes.has(snowflake): # DEP-WARN\n return True\n for snowflake in await self._config.guild(member.guild).mod_role():\n if member_snowflakes.has(snowflake): # DEP-WARN\n return True\n except AttributeError: # someone passed a webhook to this\n pass\n return False\n\n async def get_admin_roles(self, guild: discord.Guild) -> List[discord.Role]:\n \"\"\"\n Gets the admin roles for a guild.\n \"\"\"\n ret: List[discord.Role] = []\n for snowflake in await self._config.guild(guild).admin_role():\n r = guild.get_role(snowflake)\n if r:\n ret.append(r)\n return ret\n\n async def get_mod_roles(self, guild: discord.Guild) -> List[discord.Role]:\n \"\"\"\n Gets the mod roles for a guild.\n \"\"\"\n ret: List[discord.Role] = []\n for snowflake in await self._config.guild(guild).mod_role():\n r = guild.get_role(snowflake)\n if r:\n ret.append(r)\n return ret\n\n async def get_admin_role_ids(self, guild_id: int) -> List[int]:\n \"\"\"\n Gets the admin role ids for a guild id.\n \"\"\"\n return await self._config.guild(discord.Object(id=guild_id)).admin_role()\n\n async def get_mod_role_ids(self, guild_id: int) -> List[int]:\n \"\"\"\n Gets the mod role ids for a guild id.\n \"\"\"\n return await self._config.guild(discord.Object(id=guild_id)).mod_role()\n\n async def get_shared_api_tokens(self, service_name: str) -> Dict[str, str]:\n \"\"\"\n Gets the shared API tokens for a service\n\n Parameters\n ----------\n service_name: str\n The service to get tokens for.\n\n Returns\n -------\n Dict[str, str]\n A Mapping of token names to tokens.\n This mapping exists because some services have multiple tokens.\n \"\"\"\n return await self._config.custom(SHARED_API_TOKENS, service_name).all()\n\n async def set_shared_api_tokens(self, service_name: str, **tokens: str):\n \"\"\"\n Sets shared API tokens for a service\n\n In most cases, this should not be used. Users should instead be using the\n ``set api`` command\n\n This will not clear existing values not specified.\n\n Parameters\n ----------\n service_name: str\n The service to set tokens for\n **tokens\n token_name -> token\n\n Examples\n --------\n Setting the api_key for youtube from a value in a variable ``my_key``\n\n >>> await ctx.bot.set_shared_api_tokens(\"youtube\", api_key=my_key)\n \"\"\"\n\n async with self._config.custom(SHARED_API_TOKENS, service_name).all() as group:\n group.update(tokens)\n self.dispatch(\"red_api_tokens_update\", service_name, MappingProxyType(group))\n\n async def remove_shared_api_tokens(self, service_name: str, *token_names: str):\n \"\"\"\n Removes shared API tokens\n\n Parameters\n ----------\n service_name: str\n The service to remove tokens for\n *token_names: str\n The name of each token to be removed\n\n Examples\n --------\n Removing the api_key for youtube\n\n >>> await ctx.bot.remove_shared_api_tokens(\"youtube\", \"api_key\")\n \"\"\"\n async with self._config.custom(SHARED_API_TOKENS, service_name).all() as group:\n for name in token_names:\n group.pop(name, None)\n\n async def get_context(self, message, *, cls=commands.Context):\n return await super().get_context(message, cls=cls)\n\n async def process_commands(self, message: discord.Message):\n \"\"\"\n Same as base method, but dispatches an additional event for cogs\n which want to handle normal messages differently to command\n messages, without the overhead of additional get_context calls\n per cog.\n \"\"\"\n if not message.author.bot:\n ctx = await self.get_context(message)\n await self.invoke(ctx)\n else:\n ctx = None\n\n if ctx is None or ctx.valid is False:\n self.dispatch(\"message_without_command\", message)\n\n @staticmethod\n def list_packages():\n \"\"\"Lists packages present in the cogs the folder\"\"\"\n return os.listdir(\"cogs\")\n\n async def save_packages_status(self, packages):\n await self._config.packages.set(packages)\n\n async def add_loaded_package(self, pkg_name: str):\n async with self._config.packages() as curr_pkgs:\n if pkg_name not in curr_pkgs:\n curr_pkgs.append(pkg_name)\n\n async def remove_loaded_package(self, pkg_name: str):\n async with self._config.packages() as curr_pkgs:\n while pkg_name in curr_pkgs:\n curr_pkgs.remove(pkg_name)\n\n async def load_extension(self, spec: ModuleSpec):\n # NB: this completely bypasses `discord.ext.commands.Bot._load_from_module_spec`\n name = spec.name.split(\".\")[-1]\n if name in self.extensions:\n raise errors.PackageAlreadyLoaded(spec)\n\n lib = spec.loader.load_module()\n if not hasattr(lib, \"setup\"):\n del lib\n raise discord.ClientException(f\"extension {name} does not have a setup function\")\n\n try:\n if asyncio.iscoroutinefunction(lib.setup):\n await lib.setup(self)\n else:\n lib.setup(self)\n except Exception as e:\n self._remove_module_references(lib.__name__)\n self._call_module_finalizers(lib, name)\n raise\n else:\n self._BotBase__extensions[name] = lib\n\n def remove_cog(self, cogname: str):\n cog = self.get_cog(cogname)\n if cog is None:\n return\n\n for cls in inspect.getmro(cog.__class__):\n try:\n hook = getattr(cog, f\"_{cls.__name__}__permissions_hook\")\n except AttributeError:\n pass\n else:\n self.remove_permissions_hook(hook)\n\n super().remove_cog(cogname)\n\n cog.requires.reset()\n\n for meth in self.rpc_handlers.pop(cogname.upper(), ()):\n self.unregister_rpc_handler(meth)\n\n async def is_automod_immune(\n self, to_check: Union[discord.Message, commands.Context, discord.abc.User, discord.Role]\n ) -> bool:\n \"\"\"\n Checks if the user, message, context, or role should be considered immune from automated\n moderation actions.\n\n This will return ``False`` in direct messages.\n\n Parameters\n ----------\n to_check : `discord.Message` or `commands.Context` or `discord.abc.User` or `discord.Role`\n Something to check if it would be immune\n\n Returns\n -------\n bool\n ``True`` if immune\n\n \"\"\"\n guild = getattr(to_check, \"guild\", None)\n if not guild:\n return False\n\n if isinstance(to_check, discord.Role):\n ids_to_check = [to_check.id]\n else:\n author = getattr(to_check, \"author\", to_check)\n try:\n ids_to_check = [r.id for r in author.roles]\n except AttributeError:\n # webhook messages are a user not member,\n # cheaper than isinstance\n if author.bot and author.discriminator == \"0000\":\n return True # webhooks require significant permissions to enable.\n else:\n ids_to_check.append(author.id)\n\n immune_ids = await self._config.guild(guild).autoimmune_ids()\n\n return any(i in immune_ids for i in ids_to_check)\n\n @staticmethod\n async def send_filtered(\n destination: discord.abc.Messageable,\n filter_mass_mentions=True,\n filter_invite_links=True,\n filter_all_links=False,\n **kwargs,\n ):\n \"\"\"\n This is a convienience wrapper around\n\n discord.abc.Messageable.send\n\n It takes the destination you'd like to send to, which filters to apply\n (defaults on mass mentions, and invite links) and any other parameters\n normally accepted by destination.send\n\n This should realistically only be used for responding using user provided\n input. (unfortunately, including usernames)\n Manually crafted messages which dont take any user input have no need of this\n \n Returns\n -------\n discord.Message\n The message that was sent.\n \"\"\"\n\n content = kwargs.pop(\"content\", None)\n\n if content:\n if filter_mass_mentions:\n content = common_filters.filter_mass_mentions(content)\n if filter_invite_links:\n content = common_filters.filter_invites(content)\n if filter_all_links:\n content = common_filters.filter_urls(content)\n\n return await destination.send(content=content, **kwargs)\n\n def add_cog(self, cog: commands.Cog):\n if not isinstance(cog, commands.Cog):\n raise RuntimeError(\n f\"The {cog.__class__.__name__} cog in the {cog.__module__} package does \"\n f\"not inherit from the commands.Cog base class. The cog author must update \"\n f\"the cog to adhere to this requirement.\"\n )\n if cog.__cog_name__ in self.cogs:\n raise RuntimeError(f\"There is already a cog named {cog.__cog_name__} loaded.\")\n if not hasattr(cog, \"requires\"):\n commands.Cog.__init__(cog)\n\n added_hooks = []\n\n try:\n for cls in inspect.getmro(cog.__class__):\n try:\n hook = getattr(cog, f\"_{cls.__name__}__permissions_hook\")\n except AttributeError:\n pass\n else:\n self.add_permissions_hook(hook)\n added_hooks.append(hook)\n\n super().add_cog(cog)\n self.dispatch(\"cog_add\", cog)\n if \"permissions\" not in self.extensions:\n cog.requires.ready_event.set()\n except Exception:\n for hook in added_hooks:\n try:\n self.remove_permissions_hook(hook)\n except Exception:\n # This shouldn't be possible\n log.exception(\n \"A hook got extremely screwed up, \"\n \"and could not be removed properly during another error in cog load.\"\n )\n del cog\n raise\n\n def add_command(self, command: commands.Command) -> None:\n if not isinstance(command, commands.Command):\n raise RuntimeError(\"Commands must be instances of `redbot.core.commands.Command`\")\n\n super().add_command(command)\n\n permissions_not_loaded = \"permissions\" not in self.extensions\n self.dispatch(\"command_add\", command)\n if permissions_not_loaded:\n command.requires.ready_event.set()\n if isinstance(command, commands.Group):\n for subcommand in set(command.walk_commands()):\n self.dispatch(\"command_add\", subcommand)\n if permissions_not_loaded:\n subcommand.requires.ready_event.set()\n\n def remove_command(self, name: str) -> None:\n command = super().remove_command(name)\n if not command:\n return\n command.requires.reset()\n if isinstance(command, commands.Group):\n for subcommand in set(command.walk_commands()):\n subcommand.requires.reset()\n\n def clear_permission_rules(self, guild_id: Optional[int], **kwargs) -> None:\n \"\"\"Clear all permission overrides in a scope.\n\n Parameters\n ----------\n guild_id : Optional[int]\n The guild ID to wipe permission overrides for. If\n ``None``, this will clear all global rules and leave all\n guild rules untouched.\n\n **kwargs\n Keyword arguments to be passed to each required call of\n ``commands.Requires.clear_all_rules``\n\n \"\"\"\n for cog in self.cogs.values():\n cog.requires.clear_all_rules(guild_id, **kwargs)\n for command in self.walk_commands():\n command.requires.clear_all_rules(guild_id, **kwargs)\n\n def add_permissions_hook(self, hook: commands.CheckPredicate) -> None:\n \"\"\"Add a permissions hook.\n\n Permissions hooks are check predicates which are called before\n calling `Requires.verify`, and they can optionally return an\n override: ``True`` to allow, ``False`` to deny, and ``None`` to\n default to normal behaviour.\n\n Parameters\n ----------\n hook\n A command check predicate which returns ``True``, ``False``\n or ``None``.\n\n \"\"\"\n self._permissions_hooks.append(hook)\n\n def remove_permissions_hook(self, hook: commands.CheckPredicate) -> None:\n \"\"\"Remove a permissions hook.\n\n Parameters are the same as those in `add_permissions_hook`.\n\n Raises\n ------\n ValueError\n If the permissions hook has not been added.\n\n \"\"\"\n self._permissions_hooks.remove(hook)\n\n async def verify_permissions_hooks(self, ctx: commands.Context) -> Optional[bool]:\n \"\"\"Run permissions hooks.\n\n Parameters\n ----------\n ctx : commands.Context\n The context for the command being invoked.\n\n Returns\n -------\n Optional[bool]\n ``False`` if any hooks returned ``False``, ``True`` if any\n hooks return ``True`` and none returned ``False``, ``None``\n otherwise.\n\n \"\"\"\n hook_results = []\n for hook in self._permissions_hooks:\n result = await discord.utils.maybe_coroutine(hook, ctx)\n if result is not None:\n hook_results.append(result)\n if hook_results:\n if all(hook_results):\n ctx.permission_state = commands.PermState.ALLOWED_BY_HOOK\n return True\n else:\n ctx.permission_state = commands.PermState.DENIED_BY_HOOK\n return False\n\n async def get_owner_notification_destinations(self) -> List[discord.abc.Messageable]:\n \"\"\"\n Gets the users and channels to send to\n \"\"\"\n await self.wait_until_red_ready()\n destinations = []\n opt_outs = await self._config.owner_opt_out_list()\n for user_id in (self.owner_id, *self._co_owners):\n if user_id not in opt_outs:\n user = self.get_user(user_id)\n if user:\n destinations.append(user)\n else:\n log.warning(\n \"Owner with ID %s is missing in user cache,\"\n \" ignoring owner notification destination.\",\n user_id,\n )\n\n channel_ids = await self._config.extra_owner_destinations()\n for channel_id in channel_ids:\n channel = self.get_channel(channel_id)\n if channel:\n destinations.append(channel)\n else:\n log.warning(\n \"Channel with ID %s is not available,\"\n \" ignoring owner notification destination.\",\n channel_id,\n )\n\n return destinations\n\n async def send_to_owners(self, content=None, **kwargs):\n \"\"\"\n This sends something to all owners and their configured extra destinations.\n\n This takes the same arguments as discord.abc.Messageable.send\n\n This logs failing sends\n \"\"\"\n destinations = await self.get_owner_notification_destinations()\n\n async def wrapped_send(location, content=None, **kwargs):\n try:\n await location.send(content, **kwargs)\n except Exception as _exc:\n log.exception(\n f\"I could not send an owner notification to ({location.id}){location}\"\n )\n\n sends = [wrapped_send(d, content, **kwargs) for d in destinations]\n await asyncio.gather(*sends)\n\n async def wait_until_red_ready(self):\n \"\"\"Wait until our post connection startup is done.\"\"\"\n await self._red_ready.wait()\n\n\nclass Red(RedBase, discord.AutoShardedClient):\n \"\"\"\n You're welcome Caleb.\n \"\"\"\n\n async def logout(self):\n \"\"\"Logs out of Discord and closes all connections.\"\"\"\n await super().logout()\n await drivers.get_driver_class().teardown()\n try:\n await self.rpc.close()\n except AttributeError:\n pass\n\n async def shutdown(self, *, restart: bool = False):\n \"\"\"Gracefully quit Red.\n\n The program will exit with code :code:`0` by default.\n\n Parameters\n ----------\n restart : bool\n If :code:`True`, the program will exit with code :code:`26`. If the\n launcher sees this, it will attempt to restart the bot.\n\n \"\"\"\n if not restart:\n self._shutdown_mode = ExitCodes.SHUTDOWN\n else:\n self._shutdown_mode = ExitCodes.RESTART\n\n await self.logout()\n sys.exit(self._shutdown_mode)\n\n\nclass ExitCodes(IntEnum):\n # This needs to be an int enum to be used\n # with sys.exit\n CRITICAL = 1\n SHUTDOWN = 0\n RESTART = 26\n", "path": "redbot/core/bot.py" } ]
diff --git a/changelog.d/3293.misc.rst b/changelog.d/3293.misc.rst new file mode 100644 index 00000000000..c51b07a7b54 --- /dev/null +++ b/changelog.d/3293.misc.rst @@ -0,0 +1 @@ +Properly set owner from config during bot's pre-flight. diff --git a/redbot/core/bot.py b/redbot/core/bot.py index 4ea8a90559a..4264bd940fc 100644 --- a/redbot/core/bot.py +++ b/redbot/core/bot.py @@ -404,6 +404,9 @@ async def pre_flight(self, cli_flags): init_global_checks(self) init_events(self, cli_flags) + if self.owner_id is None: + self.owner_id = await self._config.owner() + i18n_locale = await self._config.locale() i18n.set_locale(i18n_locale)
OCHA-DAP__hdx-ckan-1887
Please allow markdown to the organization description field Right now markdown is not allowed in that field. I believe that this is preventing me from adding paragraphs and other particular styles to the text in question. ![screen shot 2014-10-17 at 12 27 34 pm](https://cloud.githubusercontent.com/assets/953118/4682223/b9992900-561a-11e4-9cec-7f450c6909dc.png)
[ { "content": "'''\nCreated on Nov 3, 2014\n\n@author: alexandru-m-g\n'''\n\nimport logging\n\nimport ckan.lib.base as base\nimport ckan.logic as logic\nimport ckan.model as model\nimport ckan.common as common\nimport ckan.lib.helpers as h\n\nimport ckanext.hdx_crisis.dao.data_access as data_access\nimport ckanext.hdx_crisis.formatters.top_line_items_formatter as formatters\n\nrender = base.render\nget_action = logic.get_action\nc = common.c\nrequest = common.request\n_ = common._\n\n\nlog = logging.getLogger(__name__)\n\n\nclass CrisisController(base.BaseController):\n\n def show(self):\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True,\n 'auth_user_obj': c.userobj}\n\n crisis_data_access = data_access.EbolaCrisisDataAccess()\n crisis_data_access.fetch_data(context)\n c.top_line_items = crisis_data_access.get_top_line_items()\n\n formatter = formatters.TopLineItemsFormatter(c.top_line_items)\n formatter.format_results()\n\n search_term = u'ebola'\n\n self._generate_dataset_results(context, search_term)\n\n self._generate_other_links(search_term)\n\n return render('crisis/crisis.html')\n\n def _generate_dataset_results(self, context, search_term):\n limit = 25\n c.q = search_term\n\n page = int(request.params.get('page', 1))\n data_dict = {'sort': u'metadata_modified desc',\n 'fq': '+dataset_type:dataset',\n 'rows': limit,\n 'q': c.q,\n 'start': (page - 1) * limit\n }\n query = get_action(\"package_search\")(context, data_dict)\n\n def pager_url(q=None, page=None):\n url = h.url_for('show_crisis', page=page) + '#datasets-section'\n return url\n\n c.page = h.Page(\n collection=query['results'],\n page=page,\n url=pager_url,\n item_count=query['count'],\n items_per_page=limit\n )\n c.items = query['results']\n c.item_count = query['count']\n\n def _generate_other_links(self, search_term):\n c.other_links = {}\n c.other_links['show_more'] = h.url_for(\n \"search\", **{'q': search_term, 'sort': u'metadata_modified desc',\n 'ext_indicator': '0'})\n", "path": "ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py" } ]
[ { "content": "'''\nCreated on Nov 3, 2014\n\n@author: alexandru-m-g\n'''\n\nimport logging\n\nimport ckan.lib.base as base\nimport ckan.logic as logic\nimport ckan.model as model\nimport ckan.common as common\nimport ckan.lib.helpers as h\n\nimport ckanext.hdx_crisis.dao.data_access as data_access\nimport ckanext.hdx_crisis.formatters.top_line_items_formatter as formatters\n\nrender = base.render\nget_action = logic.get_action\nc = common.c\nrequest = common.request\n_ = common._\n\n\nlog = logging.getLogger(__name__)\n\n\nclass CrisisController(base.BaseController):\n\n def show(self):\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True,\n 'auth_user_obj': c.userobj}\n\n crisis_data_access = data_access.EbolaCrisisDataAccess()\n crisis_data_access.fetch_data(context)\n c.top_line_items = crisis_data_access.get_top_line_items()\n\n formatter = formatters.TopLineItemsFormatter(c.top_line_items)\n formatter.format_results()\n\n search_term = u'ebola'\n\n self._generate_dataset_results(context, search_term)\n\n self._generate_other_links(search_term)\n\n return render('crisis/crisis-ebola.html')\n\n def _generate_dataset_results(self, context, search_term):\n limit = 25\n c.q = search_term\n\n page = int(request.params.get('page', 1))\n data_dict = {'sort': u'metadata_modified desc',\n 'fq': '+dataset_type:dataset',\n 'rows': limit,\n 'q': c.q,\n 'start': (page - 1) * limit\n }\n query = get_action(\"package_search\")(context, data_dict)\n\n def pager_url(q=None, page=None):\n url = h.url_for('show_crisis', page=page) + '#datasets-section'\n return url\n\n c.page = h.Page(\n collection=query['results'],\n page=page,\n url=pager_url,\n item_count=query['count'],\n items_per_page=limit\n )\n c.items = query['results']\n c.item_count = query['count']\n\n def _generate_other_links(self, search_term):\n c.other_links = {}\n c.other_links['show_more'] = h.url_for(\n \"search\", **{'q': search_term, 'sort': u'metadata_modified desc',\n 'ext_indicator': '0'})\n", "path": "ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py" } ]
diff --git a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py index d41a13b03e..70261e9f43 100644 --- a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py +++ b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py @@ -46,7 +46,7 @@ def show(self): self._generate_other_links(search_term) - return render('crisis/crisis.html') + return render('crisis/crisis-ebola.html') def _generate_dataset_results(self, context, search_term): limit = 25 diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-base/crisis-base.css b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-base/crisis-base.css new file mode 100644 index 0000000000..7f46ae6138 --- /dev/null +++ b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-base/crisis-base.css @@ -0,0 +1,23 @@ +#crisis-map { + height: 350px; +} + +.crisis-map-title { + height:50px; + background-color: rgba(255, 255, 255, 0.4); + position: absolute; + top: 300px; + width: 100%; +} + +.crisisTitle{ + color: #000000; + margin-top: 80px; + font-family: 'Source Sans Pro', sans-serif; + font-weight: bold; + font-size: 28px; + letter-spacing: 0.01em; + line-height: 50px; + text-transform: capitalize; +} + diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-base/crisis-base.js b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-base/crisis-base.js new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-colombia/crisis-colombia.css b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-colombia/crisis-colombia.css new file mode 100644 index 0000000000..88d2454c10 --- /dev/null +++ b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-colombia/crisis-colombia.css @@ -0,0 +1,55 @@ +.item-info { + border-top: 1px solid #cccccc; + border-bottom: 1px solid #cccccc; + padding: 20px 0; + margin-top: -1px; + color: #333333; +} + +.item-info-title{ + font-family: 'Gotham-Bold', sans-serif; + font-weight: 400; + font-size: 14px; + letter-spacing: 0.01em; + /*margin-bottom: 10px;*/ + color: #333333; +} +.item-info .item-info-extra { + /*margin-top: 10px;*/ +} +.item-info .item-info-number { + font-family: 'Gotham-Light', sans-serif; + font-size: 47px; + line-height: 1; + letter-spacing: 0.01em; + /*margin-bottom: 10px;*/ +} + +.item-info .item-info-number span.small { + font-family: 'Gotham-Light', sans-serif; + font-size: 25px; + line-height: 1; + letter-spacing: 0.01em; + margin-bottom: 10px; + margin-left: -15px; +} + +.item-info-source-title-text{ + font-family: 'Source Sans Pro', sans-serif; + font-size: 14px; + color: #333333; + letter-spacing: 0.01em; +} + +.item-info .item-info-source-title{ + font-family: 'Source Sans Pro', sans-serif; + font-size: 12px; + letter-spacing: 0.01em; +} +.item-info .item-info-source-date { + font-family: 'Source Sans Pro', sans-serif; + font-size: 12px; + letter-spacing: 0.01em; + color: #888888; +} + diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-colombia/crisis-colombia.js b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-colombia/crisis-colombia.js new file mode 100644 index 0000000000..8d2cdb3a5a --- /dev/null +++ b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-colombia/crisis-colombia.js @@ -0,0 +1,14 @@ +$(document).ready(function() { + map = L.map('crisis-map', { attributionControl: false }); + map.scrollWheelZoom.disable(); + L.tileLayer($('#crisis-map-url-div').text(), { + attribution: ' © <a href="http://www.openstreetmap.org/copyright" target="_blank">OpenStreetMap</a> contributors', + maxZoom: 10 + }).addTo(map); + + L.control.attribution({position: 'topright'}).addTo(map); + map.setView([5, -70], 5); + +// drawDistricts(map); +// c3Sparklines(); +}); diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/crisis-page.css b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/crisis-ebola.css similarity index 84% rename from ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/crisis-page.css rename to ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/crisis-ebola.css index cca656e421..617c28b6e9 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/crisis-page.css +++ b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/crisis-ebola.css @@ -1,15 +1,3 @@ -#ebola-map { - height: 350px; -} - -.ebola-map-title { - height:50px; - background-color: rgba(255, 255, 255, 0.4); - position: absolute; - top: 300px; - width: 100%; -} - .item-info { border-top: 1px solid #cccccc; border-bottom: 1px solid #cccccc; @@ -65,16 +53,6 @@ color: #888888; } -.crisisTitle{ - color: #000000; - margin-top: 80px; - font-family: 'Source Sans Pro', sans-serif; - font-weight: bold; - font-size: 28px; - letter-spacing: 0.01em; - line-height: 50px; - text-transform: capitalize; -} /* Dataset search results on crisis page */ .crisis-list-header.list-header { /*background-color: inherit; */ diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/crisis-page.js b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/crisis-ebola.js similarity index 99% rename from ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/crisis-page.js rename to ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/crisis-ebola.js index cf2b659ecb..49edba734d 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/crisis-page.js +++ b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/crisis-ebola.js @@ -1,5 +1,5 @@ $(document).ready(function() { - map = L.map('ebola-map', { attributionControl: false }); + map = L.map('crisis-map', { attributionControl: false }); map.scrollWheelZoom.disable(); L.tileLayer($('#crisis-map-url-div').text(), { attribution: ' © <a href="http://www.openstreetmap.org/copyright" target="_blank">OpenStreetMap</a> contributors', diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/data.js b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/data.js similarity index 100% rename from ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/data.js rename to ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/data.js diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/ebola_crisis_page_graph.css b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/ebola_crisis_page_graph.css similarity index 100% rename from ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/ebola_crisis_page_graph.css rename to ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/ebola_crisis_page_graph.css diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/ebola_crisis_page_graph.js b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/ebola_crisis_page_graph.js similarity index 100% rename from ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/ebola_crisis_page_graph.js rename to ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/ebola_crisis_page_graph.js diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/medical_centres.js b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/medical_centres.js similarity index 100% rename from ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/medical_centres.js rename to ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/medical_centres.js diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/regions.js b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/regions.js similarity index 100% rename from ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis_page/regions.js rename to ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/crisis-ebola/regions.js diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/indicator_graph.js b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/indicator_graph.js index 2e5c6ef9b0..35724f9e2e 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/indicator_graph.js +++ b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/indicator_graph.js @@ -212,11 +212,12 @@ ckan.module('hdx-indicator-graph', function ($, _) { if (json.success){ this.data = json.result.results; - //#1595 - fix not active until datateam will clean the data -// var unitName = this.data[0].unitName; -// if (unitName){ -// this.c3_chart.axis.labels({y: unitName}); -// } + var unitName = this.data[0].unitName; + if (unitName){ + if (unitName.length > 40) + unitName = unitName.slice(0, 37) + '...'; + this.c3_chart.axis.labels({y: unitName}); + } //Call all callbacks that new data was loaded for (var i = 0; i < this.dataCallbacks.length; i++){ this.dataCallbacks[i](); diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/resource.config b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/resource.config index bfa6702573..eb5eedc903 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/resource.config +++ b/ckanext-hdx_theme/ckanext/hdx_theme/fanstatic/resource.config @@ -16,8 +16,8 @@ css/search.css = 99 highlight.js=99 css/indicator.css = 99 css/filter.css = 99 -crisis_page/crisis-page.css = 99 -crisis_page/ebola_crisis_page_graph.css = 99 +crisis-ebola/crisis-ebola.css = 99 +crisis-ebola/ebola_crisis_page_graph.css = 99 [depends] @@ -39,15 +39,26 @@ c3 = vendor/c3/c3.css -crisis-page = - crisis_page/crisis-page.css - crisis_page/ebola_crisis_page_graph.css - crisis_page/crisis-page.js - crisis_page/ebola_crisis_page_graph.js +crisis-base = + crisis-base/crisis-base.css + crisis-base/crisis-base.js - crisis_page/data.js - crisis_page/medical_centres.js - crisis_page/regions.js +crisis-colombia = + crisis-colombia/crisis-colombia.css + crisis-colombia/crisis-colombia.js + crisis-ebola/ebola_crisis_page_graph.css + crisis-ebola/ebola_crisis_page_graph.js + + +crisis-ebola = + crisis-ebola/crisis-ebola.css + crisis-ebola/ebola_crisis_page_graph.css + crisis-ebola/crisis-ebola.js + crisis-ebola/ebola_crisis_page_graph.js + + crisis-ebola/data.js + crisis-ebola/medical_centres.js + crisis-ebola/regions.js google-analytics = google-analytics.js diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/public/hdx_theme.css b/ckanext-hdx_theme/ckanext/hdx_theme/public/hdx_theme.css index 6db42587f4..298155c9b7 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/public/hdx_theme.css +++ b/ckanext-hdx_theme/ckanext/hdx_theme/public/hdx_theme.css @@ -849,6 +849,7 @@ span.clear { clear: left; display: block; } .org-control textarea{ max-height:70px; + margin-bottom: 0; } diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/country/country.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/country/country.html index c84f9716ee..dfbdf4ad5f 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/country/country.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/country/country.html @@ -1 +1,77 @@ -{% extends "crisis/crisis.html" %} \ No newline at end of file +{% extends "crisis-base.html" %} + +{% block subtitle %}{{ _("Colombia crisis page") }}{% endblock %} +{% block crisis_title %} {{ _("Colombia") }} {% endblock %} +{% block breadcrumb_content %} + <li class="active">{{ h.nav_link(_('Colombia'), controller='ckanext.hdx_crisis.controllers.country_controller:CountryController', + action='show', highlight_actions = 'new index') }}</li> +{% endblock %} + +{% block crisis_data %} + <div class="row mTop25"> + <div class="col-xs-4"> + <div class="row"> + {% block top_line_figures %} + {% for item in c.top_line_items %} + <div class="col-xs-12"> + <div class="item-info"> + <div class="row"> + <div class="col-xs-7"> + <div class="row"> + <div class="col-xs-12 item-info-title"> + {{ item.title }} + </div> + <div class="col-xs-12 item-info-extra"> + <div class="row"> + <div class="col-xs-12"> + <span class="item-info-source-title-text">{{ item.source }}</span> - + {% if item.source_link %} + <span class="item-info-source-title"><a href="{{ item.source_link }}" target="new">Data</a></span> - + {% endif %} + {% if item.explore %} + <span class="item-info-source-title"><a href="{{ item.explore }}" target="new">Explore</a></span> - + {% endif %} + <span class="item-info-source-date">{{ item.latest_date }}</span> + </div> + </div> + </div> + </div> + </div> + <div class="col-xs-5"> + <div class="row"> + <div class="col-xs-12 item-info-number text-right"> + <span title="{{ item.notes }}">{{ item.formatted_value }}</span> + {% if item.units == 'million' %} + <span class="small">{{ _('million') }} </span> + {% endif %} + {% if item.units == 'ratio' %} + <span class="small">%</span> + {% endif %} + </div> + </div> + </div> + </div> + </div> + </div> + {% endfor %} + {% endblock %} + </div> + </div> + <div class="col-xs-8"> + <div class="row"> + <div class="col-xs-12"> + <div id="ebola_graph"></div> + </div> + <div class="col-xs-12"> + <div id="ebola_graph"></div> + </div> + </div> + </div> + + </div> +{% endblock %} + +{% block scripts2 %} + {{ super() }} + {% resource 'hdx_theme/crisis-colombia' %} +{% endblock %} \ No newline at end of file diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/crisis/crisis-ebola.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/crisis/crisis-ebola.html new file mode 100644 index 0000000000..491761802f --- /dev/null +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/crisis/crisis-ebola.html @@ -0,0 +1,77 @@ +{% extends "crisis-base.html" %} +{% import 'macros/form.html' as form %} + +{% block subtitle %}{{ _("Ebola crisis page") }}{% endblock %} +{% block crisis_title %} {{ _("West Africa: Ebola outbreak") }} {% endblock %} +{% block breadcrumb_content %} + <li class="active">{{ h.nav_link(_('Ebola'), controller='ckanext.hdx_crisis.controllers.crisis_controller:CrisisController', + action='show', highlight_actions = 'new index') }}</li> +{% endblock %} + + +{% block crisis_data %} + <div class="row mTop25"> + {% block top_line_figures %} + {% for item in c.top_line_items %} + <div class="col-xs-4"> + <div class="item-info"> + <div class="row"> + <div class="col-xs-12 item-info-title"> + {{ item.title }} + </div> + <div class="col-xs-12 item-info-number"> + <span title="{{ item.notes }}">{{ item.formatted_value }}</span> + {% if item.units == 'million' %} + <span class="small">{{ _('million') }} </span> + {% endif %} + {% if item.units == 'ratio' %} + <span class="small">%</span> + {% endif %} + + </div> + {% if item.sparklines_json %} + <div class="col-xs-12"> + <div style="display: none;" class="sparkline">{{ item.sparklines_json }}</div> + </div> + {% endif %} + <div class="col-xs-12 item-info-extra"> + <div class="row"> + <div class="col-xs-12"> + <span class="item-info-source-title-text">{{ item.source }}</span> + </div> + </div> + <div class="row"> + <div class="col-xs-12"> + {% if item.source_link %} + <span class="item-info-source-title"><a href="{{ item.source_link }}" target="new">Data</a></span> - + {% endif %} + {% if item.explore %} + <span class="item-info-source-title"><a href="{{ item.explore }}" target="new">Explore</a></span> - + {% endif %} + <span class="item-info-source-date">{{ item.latest_date }}</span> + </div> + </div> + </div> + </div> + </div> + </div> + {% endfor %} + {% endblock %} + </div> + + <div class="row mTop25"> + <div class="col-xs-12 item-info-title"> + Week by Week Spread of Ebola in West Africa + </div> + <div class="col-xs-12"> + <div id="ebola_graph"></div> + </div> + </div> +{% endblock %} + + +{% block scripts2 %} + {{ super() }} + {% resource 'hdx_theme/crisis-ebola' %} + +{% endblock %} \ No newline at end of file diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/read.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/read.html index 719df17af9..98b7d3db2a 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/read.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/read.html @@ -48,8 +48,8 @@ <h1 class="itemTitle"> {% block package_notes %} {% if c.group_dict.description %} <div class="notes embedded-content mBottom15"> - {{ c.group_dict.description }} - {% set org_url = h.hdx_get_extras_element(c.group_dict.extras) %} + {{ h.render_markdown(c.group_dict.description) }} + {% set org_url = h.hdx_get_extras_element(c.group_dict.extras) %} {% if org_url != '' %} <span> &nbsp;&nbsp;&nbsp;{{ _('More info') }} diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/organization_form.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/organization_form.html index 4186a52285..eba781f168 100644 --- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/organization_form.html +++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/organization/snippets/organization_form.html @@ -28,7 +28,7 @@ </div> </div> <div class="org-control-container"> - {{ form.textarea('description', label=_('Description of Organisation'), id='field-description', value=data.description, error=errors.description, classes=['org-control', 'mandatory','field-with-info']) }} + {{ form.markdown('description', id='field-description', label=_('Description of Organisation'), value=data.description, error=errors.description, classes=['org-control', 'mandatory','field-with-info']) }} <div class="org-control-info-large info-field"> <div class="org-info-label-large"> {{_('Brief overview of what organisation is for.')}}</div>
ARM-DOE__ACT-673
Feedstock failing due to pandas datetime ### Description CI is failing due to datetime units not being set for csv reader ### What I Did See the PR here that was failing https://github.com/conda-forge/act-atmos-feedstock/pull/63
[ { "content": "\"\"\"\nThis module contains I/O operations for loading csv files.\n\n\"\"\"\n\nimport pathlib\n\nimport pandas as pd\n\nfrom .armfiles import check_arm_standards\n\n\ndef read_csv(filename, sep=',', engine='python', column_names=None, skipfooter=0, ignore_index=True, **kwargs):\n\n \"\"\"\n Returns an `xarray.Dataset` with stored data and metadata from user-defined\n query of CSV files.\n\n Parameters\n ----------\n filenames : str or list\n Name of file(s) to read.\n sep : str\n The separator between columns in the csv file.\n column_names : list or None\n The list of column names in the csv file.\n verbose : bool\n If true, will print if a file is not found.\n ignore_index : bool\n Keyword for pandas concat function. If True, do not use the index\n values along the concatenation axis. The resulting axis will be labeled\n 0, …, n - 1. This is useful if you are concatenating datasets where the\n concatenation axis does not have meaningful indexing information. Note\n the index values on the other axes are still respected in the join.\n\n Additional keyword arguments will be passed into pandas.read_csv.\n\n Returns\n -------\n ds : xarray.Dataset\n ACT Xarray dataset. Will be None if the file is not found.\n\n Examples\n --------\n This example will load the example sounding data used for unit testing:\n\n .. code-block:: python\n\n import act\n\n ds = act.io.csvfiles.read(act.tests.sample_files.EXAMPLE_CSV_WILDCARD)\n\n \"\"\"\n\n # Convert to string if filename is a pathlib or not a list\n if isinstance(filename, (pathlib.PurePath, str)):\n filename = [str(filename)]\n\n if isinstance(filename, list) and isinstance(filename[0], pathlib.PurePath):\n filename = [str(ii) for ii in filename]\n\n # Read data using pandas read_csv one file at a time and append to\n # list. Then concatinate the list into one pandas dataframe.\n li = []\n for fl in filename:\n df = pd.read_csv(\n fl, sep=sep, names=column_names, skipfooter=skipfooter, engine=engine, **kwargs\n )\n li.append(df)\n\n if len(li) == 1:\n df = li[0]\n else:\n df = pd.concat(li, axis=0, ignore_index=ignore_index)\n\n # Set Coordinates if there's a variable date_time\n if 'date_time' in df:\n df.date_time = df.date_time.astype('datetime64')\n df.time = df.date_time\n df = df.set_index('time')\n\n # Convert to xarray DataSet\n ds = df.to_xarray()\n\n # Set additional variables\n # Since we cannot assume a standard naming convention setting\n # file_date and file_time to the first time in the file\n x_coord = ds.coords.to_index().values[0]\n if isinstance(x_coord, str):\n x_coord_dt = pd.to_datetime(x_coord)\n ds.attrs['_file_dates'] = x_coord_dt.strftime('%Y%m%d')\n ds.attrs['_file_times'] = x_coord_dt.strftime('%H%M%S')\n\n # Check for standard ARM datastream name, if none, assume the file is ARM\n # standard format.\n is_arm_file_flag = check_arm_standards(ds)\n if is_arm_file_flag == 0:\n\n ds.attrs['_datastream'] = '.'.join(filename[0].split('/')[-1].split('.')[0:2])\n\n # Add additional attributes, site, standards flag, etc...\n ds.attrs['_site'] = str(ds.attrs['_datastream'])[0:3]\n ds.attrs['_arm_standards_flag'] = is_arm_file_flag\n\n return ds\n", "path": "act/io/csvfiles.py" } ]
[ { "content": "\"\"\"\nThis module contains I/O operations for loading csv files.\n\n\"\"\"\n\nimport pathlib\n\nimport pandas as pd\n\nfrom .armfiles import check_arm_standards\n\n\ndef read_csv(filename, sep=',', engine='python', column_names=None, skipfooter=0, ignore_index=True, **kwargs):\n\n \"\"\"\n Returns an `xarray.Dataset` with stored data and metadata from user-defined\n query of CSV files.\n\n Parameters\n ----------\n filenames : str or list\n Name of file(s) to read.\n sep : str\n The separator between columns in the csv file.\n column_names : list or None\n The list of column names in the csv file.\n verbose : bool\n If true, will print if a file is not found.\n ignore_index : bool\n Keyword for pandas concat function. If True, do not use the index\n values along the concatenation axis. The resulting axis will be labeled\n 0, …, n - 1. This is useful if you are concatenating datasets where the\n concatenation axis does not have meaningful indexing information. Note\n the index values on the other axes are still respected in the join.\n\n Additional keyword arguments will be passed into pandas.read_csv.\n\n Returns\n -------\n ds : xarray.Dataset\n ACT Xarray dataset. Will be None if the file is not found.\n\n Examples\n --------\n This example will load the example sounding data used for unit testing:\n\n .. code-block:: python\n\n import act\n\n ds = act.io.csvfiles.read(act.tests.sample_files.EXAMPLE_CSV_WILDCARD)\n\n \"\"\"\n\n # Convert to string if filename is a pathlib or not a list\n if isinstance(filename, (pathlib.PurePath, str)):\n filename = [str(filename)]\n\n if isinstance(filename, list) and isinstance(filename[0], pathlib.PurePath):\n filename = [str(ii) for ii in filename]\n\n # Read data using pandas read_csv one file at a time and append to\n # list. Then concatinate the list into one pandas dataframe.\n li = []\n for fl in filename:\n df = pd.read_csv(\n fl, sep=sep, names=column_names, skipfooter=skipfooter, engine=engine, **kwargs\n )\n li.append(df)\n\n if len(li) == 1:\n df = li[0]\n else:\n df = pd.concat(li, axis=0, ignore_index=ignore_index)\n\n # Set Coordinates if there's a variable date_time\n if 'date_time' in df:\n df.date_time = df.date_time.astype('datetime64[ns]')\n df.time = df.date_time\n df = df.set_index('time')\n\n # Convert to xarray DataSet\n ds = df.to_xarray()\n\n # Set additional variables\n # Since we cannot assume a standard naming convention setting\n # file_date and file_time to the first time in the file\n x_coord = ds.coords.to_index().values[0]\n if isinstance(x_coord, str):\n x_coord_dt = pd.to_datetime(x_coord)\n ds.attrs['_file_dates'] = x_coord_dt.strftime('%Y%m%d')\n ds.attrs['_file_times'] = x_coord_dt.strftime('%H%M%S')\n\n # Check for standard ARM datastream name, if none, assume the file is ARM\n # standard format.\n is_arm_file_flag = check_arm_standards(ds)\n if is_arm_file_flag == 0:\n\n ds.attrs['_datastream'] = '.'.join(filename[0].split('/')[-1].split('.')[0:2])\n\n # Add additional attributes, site, standards flag, etc...\n ds.attrs['_site'] = str(ds.attrs['_datastream'])[0:3]\n ds.attrs['_arm_standards_flag'] = is_arm_file_flag\n\n return ds\n", "path": "act/io/csvfiles.py" } ]
diff --git a/act/io/csvfiles.py b/act/io/csvfiles.py index f0d2fc36a4..32be0668b0 100644 --- a/act/io/csvfiles.py +++ b/act/io/csvfiles.py @@ -75,7 +75,7 @@ def read_csv(filename, sep=',', engine='python', column_names=None, skipfooter=0 # Set Coordinates if there's a variable date_time if 'date_time' in df: - df.date_time = df.date_time.astype('datetime64') + df.date_time = df.date_time.astype('datetime64[ns]') df.time = df.date_time df = df.set_index('time')
rasterio__rasterio-883
Single int indexes param in sample method According to docs the `indexes` param in the `sample` method can be a "list of ints or a single int". However passing a single int raises this exception: `IndexError: too many indices for array`.
[ { "content": "# Workaround for issue #378. A pure Python generator.\n\ndef sample_gen(dataset, xy, indexes=None):\n index = dataset.index\n read = dataset.read\n for x, y in xy:\n r, c = index(x, y)\n window = ((r, r+1), (c, c+1))\n data = read(indexes, window=window, masked=False, boundless=True)\n yield data[:,0,0]\n", "path": "rasterio/sample.py" } ]
[ { "content": "# Workaround for issue #378. A pure Python generator.\n\ndef sample_gen(dataset, xy, indexes=None):\n index = dataset.index\n read = dataset.read\n\n if isinstance(indexes, int):\n indexes = [indexes]\n\n for x, y in xy:\n r, c = index(x, y)\n window = ((r, r+1), (c, c+1))\n data = read(indexes, window=window, masked=False, boundless=True)\n yield data[:,0,0]\n", "path": "rasterio/sample.py" } ]
diff --git a/rasterio/sample.py b/rasterio/sample.py index aad74ad10..bc718c765 100644 --- a/rasterio/sample.py +++ b/rasterio/sample.py @@ -3,6 +3,10 @@ def sample_gen(dataset, xy, indexes=None): index = dataset.index read = dataset.read + + if isinstance(indexes, int): + indexes = [indexes] + for x, y in xy: r, c = index(x, y) window = ((r, r+1), (c, c+1)) diff --git a/tests/test_sampling.py b/tests/test_sampling.py index 6d73737f4..470119744 100644 --- a/tests/test_sampling.py +++ b/tests/test_sampling.py @@ -19,6 +19,12 @@ def test_sampling_indexes(): assert list(data) == [25] +def test_sampling_single_index(): + with rasterio.open('tests/data/RGB.byte.tif') as src: + data = next(src.sample([(220650.0, 2719200.0)], indexes=2)) + assert list(data) == [25] + + def test_sampling_type(): """See https://github.com/mapbox/rasterio/issues/378.""" with rasterio.open('tests/data/RGB.byte.tif') as src:
edgedb__edgedb-2849
ISE: `unimplemented auth method: NoneType` when inserting new user on clean instance <!-- Please search existing issues to avoid creating duplicates. --> - EdgeDB Version: 1-beta3-451baf4 - OS Version: ubuntu 18.04 Steps to Reproduce: 1. initialize new project with new `EdgeDB` instance in docker with `edgedb project init` 2. login into EdgeDB shell using `edgedb` command and create new user with password: ``` CREATE SUPERUSER ROLE edgedb_scram { SET password := 'edgedb_scram_password' }; CONFIGURE INSTANCE INSERT Auth { user := 'edgedb_scram', method := (INSERT SCRAM), priority := 2 }; ``` 3. exit current shell session and try running `edgedb` command again to login into shell ``` $ edgedb edgedb error: Error authenticating: ERROR: InternalServerError: unimplemented auth method: NoneType Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md Server traceback: Traceback (most recent call last): File "edb/server/protocol/binary.pyx", line 1689, in edb.server.protocol.binary.EdgeConnection.main File "edb/server/protocol/binary.pyx", line 445, in auth edb.errors.InternalServerError: unimplemented auth method: NoneType ``` As I understand it, this is because the default user (`edgedb`) created by the EdgeDB bootstrap process is not being added to `cfg::Auth`. This can be verified by selecting the existing `cfg::Auth` objects before inserting a new one: ``` SELECT cfg::Auth { user, method, priority }; ``` The output will be an empty set (`{}`). But in any case, user should not receive such error from the EdgeDB server in case of authentication failure. I would rather expect something like `AuthenticationError: no authentication method configured for the "edgedb" role` or something similar. It's possible I missed something about roles configuration, but that part of EdgeDB is poorly documented right now, IMO.
[ { "content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom __future__ import annotations\nfrom typing import *\n\nimport asyncio\nimport binascii\nimport json\nimport logging\nimport os\nimport pickle\nimport socket\nimport ssl\nimport stat\nimport sys\nimport uuid\n\nimport immutables\n\nfrom edb import errors\n\nfrom edb.common import devmode\nfrom edb.common import taskgroup\nfrom edb.common import windowedsum\n\nfrom edb.schema import reflection as s_refl\nfrom edb.schema import roles as s_role\nfrom edb.schema import schema as s_schema\n\nfrom edb.edgeql import parser as ql_parser\n\nfrom edb.server import args as srvargs\nfrom edb.server import cache\nfrom edb.server import config\nfrom edb.server import connpool\nfrom edb.server import compiler_pool\nfrom edb.server import defines\nfrom edb.server import protocol\nfrom edb.server.protocol import binary # type: ignore\nfrom edb.server import pgcon\nfrom edb.server.pgcon import errors as pgcon_errors\n\nfrom . import dbview\n\n\nADMIN_PLACEHOLDER = \"<edgedb:admin>\"\nlogger = logging.getLogger('edb.server')\nlog_metrics = logging.getLogger('edb.server.metrics')\n\n\nclass RoleDescriptor(TypedDict):\n superuser: bool\n name: str\n password: str\n\n\nclass StartupError(Exception):\n pass\n\n\nclass Server:\n\n _sys_pgcon: Optional[pgcon.PGConnection]\n\n _roles: Mapping[str, RoleDescriptor]\n _instance_data: Mapping[str, str]\n _sys_queries: Mapping[str, str]\n _local_intro_query: bytes\n _global_intro_query: bytes\n\n _std_schema: s_schema.Schema\n _refl_schema: s_schema.Schema\n _schema_class_layout: s_refl.SchemaTypeLayout\n\n _sys_pgcon_waiter: asyncio.Lock\n _servers: Mapping[str, asyncio.AbstractServer]\n\n _task_group: Optional[taskgroup.TaskGroup]\n _binary_conns: Set[binary.EdgeConnection]\n\n def __init__(\n self,\n *,\n cluster,\n runstate_dir,\n internal_runstate_dir,\n max_backend_connections,\n compiler_pool_size,\n nethosts,\n netport,\n allow_insecure_binary_clients: bool = False,\n allow_insecure_http_clients: bool = False,\n auto_shutdown_after: float = -1,\n echo_runtime_info: bool = False,\n status_sink: Optional[Callable[[str], None]] = None,\n startup_script: Optional[srvargs.StartupScript] = None,\n ):\n\n self._loop = asyncio.get_running_loop()\n\n # Used to tag PG notifications to later disambiguate them.\n self._server_id = str(uuid.uuid4())\n\n self._serving = False\n self._initing = False\n self._accept_new_tasks = False\n\n self._cluster = cluster\n self._pg_addr = self._get_pgaddr()\n inst_params = cluster.get_runtime_params().instance_params\n self._tenant_id = inst_params.tenant_id\n\n # 1 connection is reserved for the system DB\n pool_capacity = max_backend_connections - 1\n self._pg_pool = connpool.Pool(\n connect=self._pg_connect,\n disconnect=self._pg_disconnect,\n max_capacity=pool_capacity,\n )\n self._pg_unavailable_msg = None\n\n # DB state will be initialized in init().\n self._dbindex = None\n\n self._runstate_dir = runstate_dir\n self._internal_runstate_dir = internal_runstate_dir\n self._max_backend_connections = max_backend_connections\n self._compiler_pool = None\n self._compiler_pool_size = compiler_pool_size\n\n self._listen_hosts = nethosts\n self._listen_port = netport\n\n self._sys_auth: Tuple[Any, ...] = tuple()\n\n # Shutdown the server after the last management\n # connection has disconnected\n # and there have been no new connections for n seconds\n self._auto_shutdown_after = auto_shutdown_after\n self._auto_shutdown_handler = None\n\n self._echo_runtime_info = echo_runtime_info\n self._status_sink = status_sink\n\n self._startup_script = startup_script\n\n # Never use `self.__sys_pgcon` directly; get it via\n # `await self._acquire_sys_pgcon()`.\n self.__sys_pgcon = None\n\n self._roles = immutables.Map()\n self._instance_data = immutables.Map()\n self._sys_queries = immutables.Map()\n\n self._devmode = devmode.is_in_dev_mode()\n\n self._binary_proto_id_counter = 0\n self._binary_conns = set()\n self._accepting_connections = False\n\n self._servers = {}\n\n self._http_query_cache = cache.StatementsCache(\n maxsize=defines.HTTP_PORT_QUERY_CACHE_SIZE)\n\n self._http_last_minute_requests = windowedsum.WindowedSum()\n self._http_request_logger = None\n\n self._task_group = None\n self._stop_evt = asyncio.Event()\n self._tls_cert_file = None\n self._sslctx = None\n\n self._allow_insecure_binary_clients = allow_insecure_binary_clients\n self._allow_insecure_http_clients = allow_insecure_http_clients\n\n async def _request_stats_logger(self):\n last_seen = -1\n while True:\n current = int(self._http_last_minute_requests)\n if current != last_seen:\n log_metrics.info(\n \"HTTP requests in last minute: %d\",\n current,\n )\n last_seen = current\n\n await asyncio.sleep(30)\n\n def get_listen_hosts(self):\n return self._listen_hosts\n\n def get_listen_port(self):\n return self._listen_port\n\n def get_loop(self):\n return self._loop\n\n def in_dev_mode(self):\n return self._devmode\n\n def get_pg_dbname(self, dbname: str) -> str:\n return self._cluster.get_db_name(dbname)\n\n def on_binary_client_connected(self) -> str:\n self._binary_proto_id_counter += 1\n\n if self._auto_shutdown_handler:\n self._auto_shutdown_handler.cancel()\n self._auto_shutdown_handler = None\n\n return str(self._binary_proto_id_counter)\n\n def on_binary_client_authed(self, conn):\n self._binary_conns.add(conn)\n self._report_connections(event='opened')\n\n def on_binary_client_disconnected(self, conn):\n self._binary_conns.discard(conn)\n self._report_connections(event=\"closed\")\n\n if not self._binary_conns and self._auto_shutdown_after >= 0:\n\n def shutdown():\n self._accepting_connections = False\n self._stop_evt.set()\n\n self._auto_shutdown_handler = self._loop.call_later(\n self._auto_shutdown_after, shutdown)\n\n def _report_connections(self, *, event: str) -> None:\n log_metrics.info(\n \"%s a connection; open_count=%d\",\n event,\n len(self._binary_conns),\n )\n\n async def _pg_connect(self, dbname):\n pg_dbname = self.get_pg_dbname(dbname)\n return await pgcon.connect(\n self._get_pgaddr(), pg_dbname, self._tenant_id)\n\n async def _pg_disconnect(self, conn):\n conn.terminate()\n\n async def init(self):\n self._initing = True\n try:\n self.__sys_pgcon = await self._pg_connect(defines.EDGEDB_SYSTEM_DB)\n self._sys_pgcon_waiter = asyncio.Lock()\n self._sys_pgcon_ready_evt = asyncio.Event()\n self._sys_pgcon_reconnect_evt = asyncio.Event()\n\n await self._load_instance_data()\n\n global_schema = await self.introspect_global_schema()\n sys_config = await self.load_sys_config()\n\n self._dbindex = dbview.DatabaseIndex(\n self,\n std_schema=self._std_schema,\n global_schema=global_schema,\n sys_config=sys_config,\n )\n\n self._fetch_roles()\n await self._introspect_dbs()\n\n # Now, once all DBs have been introspected, start listening on\n # any notifications about schema/roles/etc changes.\n await self.__sys_pgcon.listen_for_sysevent()\n self.__sys_pgcon.set_server(self)\n self._sys_pgcon_ready_evt.set()\n\n self._populate_sys_auth()\n\n if not self._listen_hosts:\n self._listen_hosts = (\n config.lookup('listen_addresses', sys_config)\n or ('localhost',)\n )\n\n if self._listen_port is None:\n self._listen_port = (\n config.lookup('listen_port', sys_config)\n or defines.EDGEDB_PORT\n )\n\n self._http_request_logger = asyncio.create_task(\n self._request_stats_logger()\n )\n\n finally:\n self._initing = False\n\n async def _create_compiler_pool(self):\n self._compiler_pool = await compiler_pool.create_compiler_pool(\n pool_size=self._compiler_pool_size,\n dbindex=self._dbindex,\n runstate_dir=self._internal_runstate_dir,\n backend_runtime_params=self.get_backend_runtime_params(),\n std_schema=self._std_schema,\n refl_schema=self._refl_schema,\n schema_class_layout=self._schema_class_layout,\n )\n\n async def _destroy_compiler_pool(self):\n if self._compiler_pool is not None:\n await self._compiler_pool.stop()\n self._compiler_pool = None\n\n def _populate_sys_auth(self):\n cfg = self._dbindex.get_sys_config()\n auth = config.lookup('auth', cfg) or ()\n self._sys_auth = tuple(sorted(auth, key=lambda a: a.priority))\n\n def _get_pgaddr(self):\n return self._cluster.get_connection_spec()\n\n def get_compiler_pool(self):\n return self._compiler_pool\n\n def get_db(self, *, dbname: str):\n assert self._dbindex is not None\n return self._dbindex.get_db(dbname)\n\n def maybe_get_db(self, *, dbname: str):\n assert self._dbindex is not None\n return self._dbindex.maybe_get_db(dbname)\n\n def new_dbview(self, *, dbname, user, query_cache):\n return self._dbindex.new_view(\n dbname, user=user, query_cache=query_cache)\n\n def remove_dbview(self, dbview):\n return self._dbindex.remove_view(dbview)\n\n def get_global_schema(self):\n return self._dbindex.get_global_schema()\n\n def get_compilation_system_config(self):\n return self._dbindex.get_compilation_system_config()\n\n async def acquire_pgcon(self, dbname):\n if self._pg_unavailable_msg is not None:\n raise errors.BackendUnavailableError(\n 'Postgres is not available: ' + self._pg_unavailable_msg\n )\n\n for _ in range(self._pg_pool.max_capacity + 1):\n conn = await self._pg_pool.acquire(dbname)\n if conn.is_healthy():\n return conn\n else:\n logger.warning('Acquired an unhealthy pgcon; discard now.')\n self._pg_pool.release(dbname, conn, discard=True)\n else:\n # This is unlikely to happen, but we defer to the caller to retry\n # when it does happen\n raise errors.BackendUnavailableError(\n 'No healthy backend connection available at the moment, '\n 'please try again.'\n )\n\n def release_pgcon(self, dbname, conn, *, discard=False):\n if not conn.is_healthy():\n logger.warning('Released an unhealthy pgcon; discard now.')\n discard = True\n self._pg_pool.release(dbname, conn, discard=discard)\n\n async def load_sys_config(self):\n syscon = await self._acquire_sys_pgcon()\n try:\n query = self.get_sys_query('sysconfig')\n sys_config_json = await syscon.parse_execute_json(\n query,\n b'__backend_sysconfig',\n dbver=0,\n use_prep_stmt=True,\n args=(),\n )\n finally:\n self._release_sys_pgcon()\n\n return config.from_json(config.get_settings(), sys_config_json)\n\n async def introspect_global_schema(self, conn=None):\n if conn is not None:\n json_data = await conn.parse_execute_json(\n self._global_intro_query, b'__global_intro_db',\n dbver=0, use_prep_stmt=True, args=(),\n )\n else:\n syscon = await self._acquire_sys_pgcon()\n try:\n json_data = await syscon.parse_execute_json(\n self._global_intro_query, b'__global_intro_db',\n dbver=0, use_prep_stmt=True, args=(),\n )\n finally:\n self._release_sys_pgcon()\n\n return s_refl.parse_into(\n base_schema=self._std_schema,\n schema=s_schema.FlatSchema(),\n data=json_data,\n schema_class_layout=self._schema_class_layout,\n )\n\n async def _reintrospect_global_schema(self):\n if not self._initing and not self._serving:\n logger.warning(\n \"global-schema-changes event received during shutdown; \"\n \"ignoring.\"\n )\n return\n new_global_schema = await self.introspect_global_schema()\n self._dbindex.update_global_schema(new_global_schema)\n self._fetch_roles()\n\n async def introspect_user_schema(self, conn):\n json_data = await conn.parse_execute_json(\n self._local_intro_query, b'__local_intro_db',\n dbver=0, use_prep_stmt=True, args=(),\n )\n\n base_schema = s_schema.ChainedSchema(\n self._std_schema,\n s_schema.FlatSchema(),\n self.get_global_schema(),\n )\n\n return s_refl.parse_into(\n base_schema=base_schema,\n schema=s_schema.FlatSchema(),\n data=json_data,\n schema_class_layout=self._schema_class_layout,\n )\n\n async def introspect_db(\n self, dbname, *, refresh=False, skip_dropped=False\n ):\n try:\n conn = await self.acquire_pgcon(dbname)\n except pgcon_errors.BackendError as e:\n if skip_dropped and e.code_is(\n pgcon_errors.ERROR_INVALID_CATALOG_NAME\n ):\n # database does not exist\n logger.warning(\n \"Detected concurrently-dropped database %s; skipping.\",\n dbname,\n )\n return\n else:\n raise\n\n try:\n user_schema = await self.introspect_user_schema(conn)\n\n reflection_cache_json = await conn.parse_execute_json(\n b'''\n SELECT json_agg(o.c)\n FROM (\n SELECT\n json_build_object(\n 'eql_hash', t.eql_hash,\n 'argnames', array_to_json(t.argnames)\n ) AS c\n FROM\n ROWS FROM(edgedb._get_cached_reflection())\n AS t(eql_hash text, argnames text[])\n ) AS o;\n ''',\n b'__reflection_cache',\n dbver=0,\n use_prep_stmt=True,\n args=(),\n )\n\n reflection_cache = immutables.Map({\n r['eql_hash']: tuple(r['argnames'])\n for r in json.loads(reflection_cache_json)\n })\n\n backend_ids_json = await conn.parse_execute_json(\n b'''\n SELECT\n json_object_agg(\n \"id\"::text,\n \"backend_id\"\n )::text\n FROM\n edgedb.\"_SchemaType\"\n ''',\n b'__backend_ids_fetch',\n dbver=0,\n use_prep_stmt=True,\n args=(),\n )\n backend_ids = json.loads(backend_ids_json)\n\n db_config = await self.introspect_db_config(conn)\n\n self._dbindex.register_db(\n dbname,\n user_schema=user_schema,\n db_config=db_config,\n reflection_cache=reflection_cache,\n backend_ids=backend_ids,\n refresh=refresh,\n )\n finally:\n self.release_pgcon(dbname, conn)\n\n async def introspect_db_config(self, conn):\n query = self.get_sys_query('dbconfig')\n result = await conn.parse_execute_json(\n query,\n b'__backend_dbconfig',\n dbver=0,\n use_prep_stmt=True,\n args=(),\n )\n return config.from_json(config.get_settings(), result)\n\n async def _introspect_dbs(self):\n syscon = await self._acquire_sys_pgcon()\n try:\n dbs_query = self.get_sys_query('listdbs')\n json_data = await syscon.parse_execute_json(\n dbs_query, b'__listdbs',\n dbver=0, use_prep_stmt=True, args=(),\n )\n dbnames = json.loads(json_data)\n finally:\n self._release_sys_pgcon()\n\n async with taskgroup.TaskGroup(name='introspect DBs') as g:\n for dbname in dbnames:\n g.create_task(self.introspect_db(dbname, skip_dropped=True))\n\n def _fetch_roles(self):\n global_schema = self._dbindex.get_global_schema()\n\n roles = {}\n for role in global_schema.get_objects(type=s_role.Role):\n role_name = str(role.get_name(global_schema))\n roles[role_name] = {\n 'name': role_name,\n 'superuser': role.get_superuser(global_schema),\n 'password': role.get_password(global_schema),\n }\n\n self._roles = immutables.Map(roles)\n\n async def _load_instance_data(self):\n syscon = await self._acquire_sys_pgcon()\n try:\n result = await syscon.simple_query(b'''\\\n SELECT json FROM edgedbinstdata.instdata\n WHERE key = 'instancedata';\n ''', ignore_data=False)\n self._instance_data = immutables.Map(\n json.loads(result[0][0].decode('utf-8')))\n\n result = await syscon.simple_query(b'''\\\n SELECT json FROM edgedbinstdata.instdata\n WHERE key = 'sysqueries';\n ''', ignore_data=False)\n queries = json.loads(result[0][0].decode('utf-8'))\n self._sys_queries = immutables.Map(\n {k: q.encode() for k, q in queries.items()})\n\n result = await syscon.simple_query(b'''\\\n SELECT text FROM edgedbinstdata.instdata\n WHERE key = 'local_intro_query';\n ''', ignore_data=False)\n self._local_intro_query = result[0][0]\n\n result = await syscon.simple_query(b'''\\\n SELECT text FROM edgedbinstdata.instdata\n WHERE key = 'global_intro_query';\n ''', ignore_data=False)\n self._global_intro_query = result[0][0]\n\n result = await syscon.simple_query(b'''\\\n SELECT bin FROM edgedbinstdata.instdata\n WHERE key = 'stdschema';\n ''', ignore_data=False)\n try:\n data = binascii.a2b_hex(result[0][0][2:])\n self._std_schema = pickle.loads(data)\n except Exception as e:\n raise RuntimeError(\n 'could not load std schema pickle') from e\n\n result = await syscon.simple_query(b'''\\\n SELECT bin FROM edgedbinstdata.instdata\n WHERE key = 'reflschema';\n ''', ignore_data=False)\n try:\n data = binascii.a2b_hex(result[0][0][2:])\n self._refl_schema = pickle.loads(data)\n except Exception as e:\n raise RuntimeError(\n 'could not load refl schema pickle') from e\n\n result = await syscon.simple_query(b'''\\\n SELECT bin FROM edgedbinstdata.instdata\n WHERE key = 'classlayout';\n ''', ignore_data=False)\n try:\n data = binascii.a2b_hex(result[0][0][2:])\n self._schema_class_layout = pickle.loads(data)\n except Exception as e:\n raise RuntimeError(\n 'could not load schema class layout pickle') from e\n finally:\n self._release_sys_pgcon()\n\n def get_roles(self):\n return self._roles\n\n async def _restart_servers_new_addr(self, nethosts, netport):\n if not netport:\n raise RuntimeError('cannot restart without network port specified')\n nethosts = _fix_wildcard_host(nethosts)\n servers_to_stop = []\n servers = {}\n if self._listen_port == netport:\n hosts_to_start = [\n host for host in nethosts if host not in self._servers\n ]\n for host, srv in self._servers.items():\n if host == ADMIN_PLACEHOLDER or host in nethosts:\n servers[host] = srv\n else:\n servers_to_stop.append(srv)\n admin = False\n else:\n hosts_to_start = nethosts\n servers_to_stop = self._servers.values()\n admin = True\n\n new_servers, *_ = await self._start_servers(\n hosts_to_start, netport, admin\n )\n servers.update(new_servers)\n self._servers = servers\n self._listen_hosts = nethosts\n self._listen_port = netport\n\n addrs = []\n unix_addr = None\n port = None\n for srv in servers_to_stop:\n for s in srv.sockets:\n addr = s.getsockname()\n if isinstance(addr, tuple):\n addrs.append(addr)\n if port is None:\n port = addr[1]\n elif port != addr[1]:\n port = 0\n else:\n unix_addr = addr\n if len(addrs) > 1:\n if port:\n addr_str = f\"{{{', '.join(addr[0] for addr in addrs)}}}:{port}\"\n else:\n addr_str = f\"{{{', '.join('%s:%d' % addr for addr in addrs)}}}\"\n elif addrs:\n addr_str = \"%s:%d\" % addrs[0]\n else:\n addr_str = None\n if addr_str:\n logger.info('Stopping to serve on %s', addr_str)\n if unix_addr:\n logger.info('Stopping to serve admin on %s', unix_addr)\n\n await self._stop_servers(servers_to_stop)\n\n async def _on_before_drop_db(\n self,\n dbname: str,\n current_dbname: str\n ) -> None:\n if current_dbname == dbname:\n raise errors.ExecutionError(\n f'cannot drop the currently open database {dbname!r}')\n\n await self._ensure_database_not_connected(dbname)\n\n async def _on_before_create_db_from_template(\n self,\n dbname: str,\n current_dbname: str\n ):\n if current_dbname == dbname:\n raise errors.ExecutionError(\n f'cannot create database using currently open database '\n f'{dbname!r} as a template database')\n\n await self._ensure_database_not_connected(dbname)\n\n async def _ensure_database_not_connected(self, dbname: str):\n assert self._dbindex is not None\n\n if self._dbindex.count_connections(dbname):\n # If there are open EdgeDB connections to the `dbname` DB\n # just raise the error Postgres would have raised itself.\n raise errors.ExecutionError(\n f'database {dbname!r} is being accessed by other users')\n else:\n # If, however, there are no open EdgeDB connections, prune\n # all non-active postgres connection to the `dbname` DB.\n await self._pg_pool.prune_inactive_connections(dbname)\n\n def _on_after_drop_db(self, dbname: str):\n assert self._dbindex is not None\n self._dbindex.unregister_db(dbname)\n\n async def _on_system_config_add(self, setting_name, value):\n # CONFIGURE INSTANCE INSERT ConfigObject;\n pass\n\n async def _on_system_config_rem(self, setting_name, value):\n # CONFIGURE INSTANCE RESET ConfigObject;\n pass\n\n async def _on_system_config_set(self, setting_name, value):\n # CONFIGURE INSTANCE SET setting_name := value;\n if setting_name == 'listen_addresses':\n await self._restart_servers_new_addr(value, self._listen_port)\n\n elif setting_name == 'listen_port':\n await self._restart_servers_new_addr(self._listen_hosts, value)\n\n async def _on_system_config_reset(self, setting_name):\n # CONFIGURE INSTANCE RESET setting_name;\n if setting_name == 'listen_addresses':\n await self._restart_servers_new_addr(\n ('localhost',), self._listen_port)\n\n elif setting_name == 'listen_port':\n await self._restart_servers_new_addr(\n self._listen_hosts, defines.EDGEDB_PORT)\n\n async def _after_system_config_add(self, setting_name, value):\n # CONFIGURE INSTANCE INSERT ConfigObject;\n if setting_name == 'auth':\n self._populate_sys_auth()\n\n async def _after_system_config_rem(self, setting_name, value):\n # CONFIGURE INSTANCE RESET ConfigObject;\n if setting_name == 'auth':\n self._populate_sys_auth()\n\n async def _after_system_config_set(self, setting_name, value):\n # CONFIGURE INSTANCE SET setting_name := value;\n pass\n\n async def _after_system_config_reset(self, setting_name):\n # CONFIGURE INSTANCE RESET setting_name;\n pass\n\n async def _acquire_sys_pgcon(self):\n if not self._initing and not self._serving:\n raise RuntimeError(\"EdgeDB server is not serving.\")\n\n await self._sys_pgcon_waiter.acquire()\n\n if not self._initing and not self._serving:\n self._sys_pgcon_waiter.release()\n raise RuntimeError(\"EdgeDB server is not serving.\")\n\n if self.__sys_pgcon is None or not self.__sys_pgcon.is_healthy():\n conn, self.__sys_pgcon = self.__sys_pgcon, None\n if conn is not None:\n self._sys_pgcon_ready_evt.clear()\n conn.abort()\n # We depend on the reconnect on connection_lost() of __sys_pgcon\n await self._sys_pgcon_ready_evt.wait()\n if self.__sys_pgcon is None:\n self._sys_pgcon_waiter.release()\n raise RuntimeError(\"Cannot acquire pgcon to the system DB.\")\n\n return self.__sys_pgcon\n\n def _release_sys_pgcon(self):\n self._sys_pgcon_waiter.release()\n\n async def _cancel_pgcon_operation(self, pgcon) -> bool:\n syscon = await self._acquire_sys_pgcon()\n try:\n if pgcon.idle:\n # pgcon could have received the query results while we\n # were acquiring a system connection to cancel it.\n return False\n\n if pgcon.is_cancelling():\n # Somehow the connection is already being cancelled and\n # we don't want to have to cancellations go in parallel.\n return False\n\n pgcon.start_pg_cancellation()\n try:\n # Returns True if the `pid` exists and it was able to send it a\n # SIGINT. Will throw an exception if the priveleges aren't\n # sufficient.\n result = await syscon.simple_query(\n f'SELECT pg_cancel_backend({pgcon.backend_pid});'.encode(),\n ignore_data=False\n )\n finally:\n pgcon.finish_pg_cancellation()\n\n return result[0][0] == b't'\n finally:\n self._release_sys_pgcon()\n\n async def _cancel_and_discard_pgcon(self, pgcon, dbname) -> None:\n try:\n if self._serving:\n await self._cancel_pgcon_operation(pgcon)\n finally:\n self.release_pgcon(dbname, pgcon, discard=True)\n\n async def _signal_sysevent(self, event, **kwargs):\n if not self._initing and not self._serving:\n # This is very likely if we are doing\n # \"run_startup_script_and_exit()\", but is also possible if the\n # server was shut down with this coroutine as a background task\n # in flight.\n return\n\n pgcon = await self._acquire_sys_pgcon()\n try:\n await pgcon.signal_sysevent(event, **kwargs)\n finally:\n self._release_sys_pgcon()\n\n def _on_remote_ddl(self, dbname):\n # Triggered by a postgres notification event 'schema-changes'\n # on the __edgedb_sysevent__ channel\n self._loop.create_task(\n self.introspect_db(dbname, refresh=True)\n )\n\n def _on_remote_database_config_change(self, dbname):\n # Triggered by a postgres notification event 'database-config-changes'\n # on the __edgedb_sysevent__ channel\n pass\n\n def _on_remote_system_config_change(self):\n # Triggered by a postgres notification event 'ystem-config-changes'\n # on the __edgedb_sysevent__ channel\n pass\n\n def _on_global_schema_change(self):\n self._loop.create_task(self._reintrospect_global_schema())\n\n def _on_sys_pgcon_connection_lost(self, exc):\n if not self._serving:\n # The server is shutting down, release all events so that\n # the waiters if any could continue and exit\n self._sys_pgcon_ready_evt.set()\n self._sys_pgcon_reconnect_evt.set()\n return\n\n logger.error(\n \"Connection to the system database is \" +\n (\"closed.\" if exc is None else f\"broken! Reason: {exc}\")\n )\n self.set_pg_unavailable_msg(\n \"Connection is lost, please check server log for the reason.\"\n )\n self.__sys_pgcon = None\n self._sys_pgcon_ready_evt.clear()\n self._loop.create_task(self._reconnect_sys_pgcon())\n\n async def _reconnect_sys_pgcon(self):\n try:\n conn = None\n while self._serving:\n try:\n conn = await self._pg_connect(defines.EDGEDB_SYSTEM_DB)\n break\n except ConnectionError:\n # Keep retrying as far as:\n # 1. The EdgeDB server is still serving,\n # 2. We still cannot connect to the Postgres cluster, or\n pass\n except pgcon_errors.BackendError as e:\n # 3. The Postgres cluster is still starting up\n if not e.code_is(pgcon_errors.ERROR_CANNOT_CONNECT_NOW):\n raise\n\n if self._serving:\n try:\n # Retry after INTERVAL seconds, unless the event is set\n # and we can retry immediately after the event.\n await asyncio.wait_for(\n self._sys_pgcon_reconnect_evt.wait(),\n defines.SYSTEM_DB_RECONNECT_INTERVAL,\n )\n # But the event can only skip one INTERVAL.\n self._sys_pgcon_reconnect_evt.clear()\n except asyncio.TimeoutError:\n pass\n\n if not self._serving:\n if conn is not None:\n conn.abort()\n return\n\n logger.info(\"Successfully reconnected to the system database.\")\n self.__sys_pgcon = conn\n self.__sys_pgcon.set_server(self)\n # This await is meant to be after set_server() because we need the\n # pgcon to be able to trigger another reconnect if its connection\n # is lost during this await.\n await self.__sys_pgcon.listen_for_sysevent()\n self.set_pg_unavailable_msg(None)\n finally:\n self._sys_pgcon_ready_evt.set()\n\n async def run_startup_script_and_exit(self):\n \"\"\"Run the script specified in *startup_script* and exit immediately\"\"\"\n if self._startup_script is None:\n raise AssertionError('startup script is not defined')\n await self._create_compiler_pool()\n try:\n ql_parser.preload()\n await binary.EdgeConnection.run_script(\n server=self,\n database=self._startup_script.database,\n user=self._startup_script.user,\n script=self._startup_script.text,\n )\n finally:\n await self._destroy_compiler_pool()\n\n async def _start_server(\n self, host: str, port: int\n ) -> asyncio.AbstractServer:\n nethost = None\n if host == \"localhost\":\n nethost = await _resolve_localhost()\n\n proto_factory = lambda: protocol.HttpProtocol(\n self, self._sslctx,\n allow_insecure_binary_clients=self._allow_insecure_binary_clients,\n allow_insecure_http_clients=self._allow_insecure_http_clients,\n )\n\n return await self._loop.create_server(\n proto_factory, host=nethost or host, port=port)\n\n async def _start_admin_server(self, port: int) -> asyncio.AbstractServer:\n admin_unix_sock_path = os.path.join(\n self._runstate_dir, f'.s.EDGEDB.admin.{port}')\n admin_unix_srv = await self._loop.create_unix_server(\n lambda: binary.EdgeConnection(self, external_auth=True),\n admin_unix_sock_path\n )\n os.chmod(admin_unix_sock_path, stat.S_IRUSR | stat.S_IWUSR)\n logger.info('Serving admin on %s', admin_unix_sock_path)\n return admin_unix_srv\n\n async def _start_servers(self, hosts, port, admin=True):\n servers = {}\n try:\n async with taskgroup.TaskGroup() as g:\n for host in hosts:\n servers[host] = g.create_task(\n self._start_server(host, port)\n )\n except Exception:\n await self._stop_servers([\n fut.result() for fut in servers.values()\n if fut.done() and fut.exception() is None\n ])\n raise\n servers = {host: fut.result() for host, fut in servers.items()}\n\n addrs = []\n for tcp_srv in servers.values():\n for s in tcp_srv.sockets:\n addrs.append(s.getsockname())\n\n if len(addrs) > 1:\n if port:\n addr_str = f\"{{{', '.join(addr[0] for addr in addrs)}}}:{port}\"\n else:\n addr_str = f\"{{{', '.join('%s:%d' % addr for addr in addrs)}}}\"\n elif addrs:\n addr_str = \"%s:%d\" % addrs[0]\n port = addrs[0][1]\n else:\n addr_str = None\n\n if addr_str:\n logger.info('Serving on %s', addr_str)\n\n if admin and port:\n try:\n admin_unix_srv = await self._start_admin_server(port)\n except Exception:\n await self._stop_servers(servers.values())\n raise\n servers[ADMIN_PLACEHOLDER] = admin_unix_srv\n\n return servers, port, addrs\n\n def init_tls(self, tls_cert_file, tls_key_file):\n assert self._sslctx is None\n tls_password_needed = False\n\n def _tls_private_key_password():\n nonlocal tls_password_needed\n tls_password_needed = True\n return os.environ.get('EDGEDB_SERVER_TLS_PRIVATE_KEY_PASSWORD', '')\n\n sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)\n try:\n sslctx.load_cert_chain(\n tls_cert_file,\n tls_key_file,\n password=_tls_private_key_password,\n )\n except ssl.SSLError as e:\n if e.library == \"SSL\" and e.errno == 9: # ERR_LIB_PEM\n if tls_password_needed:\n if _tls_private_key_password():\n raise StartupError(\n \"Cannot load TLS certificates - it's likely that \"\n \"the private key password is wrong.\"\n ) from e\n else:\n raise StartupError(\n \"Cannot load TLS certificates - the private key \"\n \"file is likely protected by a password. Specify \"\n \"the password using environment variable: \"\n \"EDGEDB_SERVER_TLS_PRIVATE_KEY_PASSWORD\"\n ) from e\n elif tls_key_file is None:\n raise StartupError(\n \"Cannot load TLS certificates - have you specified \"\n \"the private key file using the `--tls-key-file` \"\n \"command-line argument?\"\n ) from e\n else:\n raise StartupError(\n \"Cannot load TLS certificates - please double check \"\n \"if the specified certificate files are valid.\"\n )\n elif e.library == \"X509\" and e.errno == 116:\n # X509 Error 116: X509_R_KEY_VALUES_MISMATCH\n raise StartupError(\n \"Cannot load TLS certificates - the private key doesn't \"\n \"match the certificate.\"\n )\n\n raise StartupError(f\"Cannot load TLS certificates - {e}\") from e\n\n sslctx.set_alpn_protocols(['edgedb-binary', 'http/1.1'])\n self._sslctx = sslctx\n self._tls_cert_file = str(tls_cert_file)\n\n async def _stop_servers(self, servers):\n async with taskgroup.TaskGroup() as g:\n for srv in servers:\n srv.close()\n g.create_task(srv.wait_closed())\n\n async def start(self):\n self._stop_evt.clear()\n assert self._task_group is None\n self._task_group = taskgroup.TaskGroup()\n await self._task_group.__aenter__()\n self._accept_new_tasks = True\n\n await self._create_compiler_pool()\n\n # Make sure that EdgeQL parser is preloaded; edgecon might use\n # it to restore config values.\n ql_parser.preload()\n\n if self._startup_script:\n await binary.EdgeConnection.run_script(\n server=self,\n database=self._startup_script.database,\n user=self._startup_script.user,\n script=self._startup_script.text,\n )\n\n self._servers, actual_port, listen_addrs = await self._start_servers(\n _fix_wildcard_host(self._listen_hosts), self._listen_port\n )\n if self._listen_port == 0:\n self._listen_port = actual_port\n\n self._accepting_connections = True\n self._serving = True\n\n if self._echo_runtime_info:\n ri = {\n \"port\": self._listen_port,\n \"runstate_dir\": str(self._runstate_dir),\n \"tls_cert_file\": self._tls_cert_file,\n }\n print(f'\\nEDGEDB_SERVER_DATA:{json.dumps(ri)}\\n', flush=True)\n\n if self._status_sink is not None:\n status = {\n \"listen_addrs\": listen_addrs,\n \"port\": self._listen_port,\n \"socket_dir\": str(self._runstate_dir),\n \"main_pid\": os.getpid(),\n \"tenant_id\": self._tenant_id,\n \"tls_cert_file\": self._tls_cert_file,\n }\n self._status_sink(f'READY={json.dumps(status)}')\n\n async def stop(self):\n try:\n self._serving = False\n self._accept_new_tasks = False\n\n if self._http_request_logger is not None:\n self._http_request_logger.cancel()\n\n await self._stop_servers(self._servers.values())\n self._servers = {}\n\n for conn in self._binary_conns:\n conn.stop()\n self._binary_conns = set()\n\n if self._task_group is not None:\n tg = self._task_group\n self._task_group = None\n await tg.__aexit__(*sys.exc_info())\n\n await self._destroy_compiler_pool()\n\n finally:\n if self.__sys_pgcon is not None:\n self.__sys_pgcon.terminate()\n self.__sys_pgcon = None\n self._sys_pgcon_waiter = None\n\n def create_task(self, coro):\n if self._accept_new_tasks:\n return self._task_group.create_task(coro)\n\n async def serve_forever(self):\n await self._stop_evt.wait()\n\n async def get_auth_method(self, user):\n authlist = self._sys_auth\n\n if not authlist:\n default_method = 'SCRAM'\n return config.get_settings().get_type_by_name(default_method)()\n else:\n for auth in authlist:\n match = (\n (user in auth.user or '*' in auth.user)\n )\n\n if match:\n return auth.method\n\n def get_sys_query(self, key):\n return self._sys_queries[key]\n\n def get_instance_data(self, key):\n return self._instance_data[key]\n\n def get_backend_runtime_params(self) -> Any:\n return self._cluster.get_runtime_params()\n\n def set_pg_unavailable_msg(self, msg):\n if msg is None or self._pg_unavailable_msg is None:\n self._pg_unavailable_msg = msg\n\n\nasync def _resolve_localhost() -> List[str]:\n # On many systems 'localhost' resolves to _both_ IPv4 and IPv6\n # addresses, even if the system is not capable of handling\n # IPv6 connections. Due to the common nature of this issue\n # we explicitly disable the AF_INET6 component of 'localhost'.\n\n loop = asyncio.get_running_loop()\n localhost = await loop.getaddrinfo(\n 'localhost',\n 0,\n family=socket.AF_UNSPEC,\n type=socket.SOCK_STREAM,\n flags=socket.AI_PASSIVE,\n proto=0,\n )\n\n infos = [a for a in localhost if a[0] == socket.AF_INET]\n\n if not infos:\n # \"localhost\" did not resolve to an IPv4 address,\n # let create_server handle the situation.\n return [\"localhost\"]\n\n # Replace 'localhost' with explicitly resolved AF_INET addresses.\n hosts = []\n for info in reversed(infos):\n addr, *_ = info[4]\n hosts.append(addr)\n\n return hosts\n\n\ndef _fix_wildcard_host(hosts: Sequence[str]) -> Sequence[str]:\n # Even though it is sometimes not a conflict to bind on the same port of\n # both the wildcard host 0.0.0.0 and some specific host at the same time,\n # we're still discarding other hosts if 0.0.0.0 is present because it\n # should behave the same and we could avoid potential conflicts.\n\n if '0.0.0.0' in hosts:\n if len(hosts) > 1:\n logger.warning(\n \"0.0.0.0 found in listen_addresses; \"\n \"discarding the other hosts.\"\n )\n hosts = ['0.0.0.0']\n return hosts\n", "path": "edb/server/server.py" } ]
[ { "content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom __future__ import annotations\nfrom typing import *\n\nimport asyncio\nimport binascii\nimport json\nimport logging\nimport os\nimport pickle\nimport socket\nimport ssl\nimport stat\nimport sys\nimport uuid\n\nimport immutables\n\nfrom edb import errors\n\nfrom edb.common import devmode\nfrom edb.common import taskgroup\nfrom edb.common import windowedsum\n\nfrom edb.schema import reflection as s_refl\nfrom edb.schema import roles as s_role\nfrom edb.schema import schema as s_schema\n\nfrom edb.edgeql import parser as ql_parser\n\nfrom edb.server import args as srvargs\nfrom edb.server import cache\nfrom edb.server import config\nfrom edb.server import connpool\nfrom edb.server import compiler_pool\nfrom edb.server import defines\nfrom edb.server import protocol\nfrom edb.server.protocol import binary # type: ignore\nfrom edb.server import pgcon\nfrom edb.server.pgcon import errors as pgcon_errors\n\nfrom . import dbview\n\n\nADMIN_PLACEHOLDER = \"<edgedb:admin>\"\nlogger = logging.getLogger('edb.server')\nlog_metrics = logging.getLogger('edb.server.metrics')\n\n\nclass RoleDescriptor(TypedDict):\n superuser: bool\n name: str\n password: str\n\n\nclass StartupError(Exception):\n pass\n\n\nclass Server:\n\n _sys_pgcon: Optional[pgcon.PGConnection]\n\n _roles: Mapping[str, RoleDescriptor]\n _instance_data: Mapping[str, str]\n _sys_queries: Mapping[str, str]\n _local_intro_query: bytes\n _global_intro_query: bytes\n\n _std_schema: s_schema.Schema\n _refl_schema: s_schema.Schema\n _schema_class_layout: s_refl.SchemaTypeLayout\n\n _sys_pgcon_waiter: asyncio.Lock\n _servers: Mapping[str, asyncio.AbstractServer]\n\n _task_group: Optional[taskgroup.TaskGroup]\n _binary_conns: Set[binary.EdgeConnection]\n\n def __init__(\n self,\n *,\n cluster,\n runstate_dir,\n internal_runstate_dir,\n max_backend_connections,\n compiler_pool_size,\n nethosts,\n netport,\n allow_insecure_binary_clients: bool = False,\n allow_insecure_http_clients: bool = False,\n auto_shutdown_after: float = -1,\n echo_runtime_info: bool = False,\n status_sink: Optional[Callable[[str], None]] = None,\n startup_script: Optional[srvargs.StartupScript] = None,\n ):\n\n self._loop = asyncio.get_running_loop()\n\n # Used to tag PG notifications to later disambiguate them.\n self._server_id = str(uuid.uuid4())\n\n self._serving = False\n self._initing = False\n self._accept_new_tasks = False\n\n self._cluster = cluster\n self._pg_addr = self._get_pgaddr()\n inst_params = cluster.get_runtime_params().instance_params\n self._tenant_id = inst_params.tenant_id\n\n # 1 connection is reserved for the system DB\n pool_capacity = max_backend_connections - 1\n self._pg_pool = connpool.Pool(\n connect=self._pg_connect,\n disconnect=self._pg_disconnect,\n max_capacity=pool_capacity,\n )\n self._pg_unavailable_msg = None\n\n # DB state will be initialized in init().\n self._dbindex = None\n\n self._runstate_dir = runstate_dir\n self._internal_runstate_dir = internal_runstate_dir\n self._max_backend_connections = max_backend_connections\n self._compiler_pool = None\n self._compiler_pool_size = compiler_pool_size\n\n self._listen_hosts = nethosts\n self._listen_port = netport\n\n self._sys_auth: Tuple[Any, ...] = tuple()\n\n # Shutdown the server after the last management\n # connection has disconnected\n # and there have been no new connections for n seconds\n self._auto_shutdown_after = auto_shutdown_after\n self._auto_shutdown_handler = None\n\n self._echo_runtime_info = echo_runtime_info\n self._status_sink = status_sink\n\n self._startup_script = startup_script\n\n # Never use `self.__sys_pgcon` directly; get it via\n # `await self._acquire_sys_pgcon()`.\n self.__sys_pgcon = None\n\n self._roles = immutables.Map()\n self._instance_data = immutables.Map()\n self._sys_queries = immutables.Map()\n\n self._devmode = devmode.is_in_dev_mode()\n\n self._binary_proto_id_counter = 0\n self._binary_conns = set()\n self._accepting_connections = False\n\n self._servers = {}\n\n self._http_query_cache = cache.StatementsCache(\n maxsize=defines.HTTP_PORT_QUERY_CACHE_SIZE)\n\n self._http_last_minute_requests = windowedsum.WindowedSum()\n self._http_request_logger = None\n\n self._task_group = None\n self._stop_evt = asyncio.Event()\n self._tls_cert_file = None\n self._sslctx = None\n\n self._allow_insecure_binary_clients = allow_insecure_binary_clients\n self._allow_insecure_http_clients = allow_insecure_http_clients\n\n async def _request_stats_logger(self):\n last_seen = -1\n while True:\n current = int(self._http_last_minute_requests)\n if current != last_seen:\n log_metrics.info(\n \"HTTP requests in last minute: %d\",\n current,\n )\n last_seen = current\n\n await asyncio.sleep(30)\n\n def get_listen_hosts(self):\n return self._listen_hosts\n\n def get_listen_port(self):\n return self._listen_port\n\n def get_loop(self):\n return self._loop\n\n def in_dev_mode(self):\n return self._devmode\n\n def get_pg_dbname(self, dbname: str) -> str:\n return self._cluster.get_db_name(dbname)\n\n def on_binary_client_connected(self) -> str:\n self._binary_proto_id_counter += 1\n\n if self._auto_shutdown_handler:\n self._auto_shutdown_handler.cancel()\n self._auto_shutdown_handler = None\n\n return str(self._binary_proto_id_counter)\n\n def on_binary_client_authed(self, conn):\n self._binary_conns.add(conn)\n self._report_connections(event='opened')\n\n def on_binary_client_disconnected(self, conn):\n self._binary_conns.discard(conn)\n self._report_connections(event=\"closed\")\n\n if not self._binary_conns and self._auto_shutdown_after >= 0:\n\n def shutdown():\n self._accepting_connections = False\n self._stop_evt.set()\n\n self._auto_shutdown_handler = self._loop.call_later(\n self._auto_shutdown_after, shutdown)\n\n def _report_connections(self, *, event: str) -> None:\n log_metrics.info(\n \"%s a connection; open_count=%d\",\n event,\n len(self._binary_conns),\n )\n\n async def _pg_connect(self, dbname):\n pg_dbname = self.get_pg_dbname(dbname)\n return await pgcon.connect(\n self._get_pgaddr(), pg_dbname, self._tenant_id)\n\n async def _pg_disconnect(self, conn):\n conn.terminate()\n\n async def init(self):\n self._initing = True\n try:\n self.__sys_pgcon = await self._pg_connect(defines.EDGEDB_SYSTEM_DB)\n self._sys_pgcon_waiter = asyncio.Lock()\n self._sys_pgcon_ready_evt = asyncio.Event()\n self._sys_pgcon_reconnect_evt = asyncio.Event()\n\n await self._load_instance_data()\n\n global_schema = await self.introspect_global_schema()\n sys_config = await self.load_sys_config()\n\n self._dbindex = dbview.DatabaseIndex(\n self,\n std_schema=self._std_schema,\n global_schema=global_schema,\n sys_config=sys_config,\n )\n\n self._fetch_roles()\n await self._introspect_dbs()\n\n # Now, once all DBs have been introspected, start listening on\n # any notifications about schema/roles/etc changes.\n await self.__sys_pgcon.listen_for_sysevent()\n self.__sys_pgcon.set_server(self)\n self._sys_pgcon_ready_evt.set()\n\n self._populate_sys_auth()\n\n if not self._listen_hosts:\n self._listen_hosts = (\n config.lookup('listen_addresses', sys_config)\n or ('localhost',)\n )\n\n if self._listen_port is None:\n self._listen_port = (\n config.lookup('listen_port', sys_config)\n or defines.EDGEDB_PORT\n )\n\n self._http_request_logger = asyncio.create_task(\n self._request_stats_logger()\n )\n\n finally:\n self._initing = False\n\n async def _create_compiler_pool(self):\n self._compiler_pool = await compiler_pool.create_compiler_pool(\n pool_size=self._compiler_pool_size,\n dbindex=self._dbindex,\n runstate_dir=self._internal_runstate_dir,\n backend_runtime_params=self.get_backend_runtime_params(),\n std_schema=self._std_schema,\n refl_schema=self._refl_schema,\n schema_class_layout=self._schema_class_layout,\n )\n\n async def _destroy_compiler_pool(self):\n if self._compiler_pool is not None:\n await self._compiler_pool.stop()\n self._compiler_pool = None\n\n def _populate_sys_auth(self):\n cfg = self._dbindex.get_sys_config()\n auth = config.lookup('auth', cfg) or ()\n self._sys_auth = tuple(sorted(auth, key=lambda a: a.priority))\n\n def _get_pgaddr(self):\n return self._cluster.get_connection_spec()\n\n def get_compiler_pool(self):\n return self._compiler_pool\n\n def get_db(self, *, dbname: str):\n assert self._dbindex is not None\n return self._dbindex.get_db(dbname)\n\n def maybe_get_db(self, *, dbname: str):\n assert self._dbindex is not None\n return self._dbindex.maybe_get_db(dbname)\n\n def new_dbview(self, *, dbname, user, query_cache):\n return self._dbindex.new_view(\n dbname, user=user, query_cache=query_cache)\n\n def remove_dbview(self, dbview):\n return self._dbindex.remove_view(dbview)\n\n def get_global_schema(self):\n return self._dbindex.get_global_schema()\n\n def get_compilation_system_config(self):\n return self._dbindex.get_compilation_system_config()\n\n async def acquire_pgcon(self, dbname):\n if self._pg_unavailable_msg is not None:\n raise errors.BackendUnavailableError(\n 'Postgres is not available: ' + self._pg_unavailable_msg\n )\n\n for _ in range(self._pg_pool.max_capacity + 1):\n conn = await self._pg_pool.acquire(dbname)\n if conn.is_healthy():\n return conn\n else:\n logger.warning('Acquired an unhealthy pgcon; discard now.')\n self._pg_pool.release(dbname, conn, discard=True)\n else:\n # This is unlikely to happen, but we defer to the caller to retry\n # when it does happen\n raise errors.BackendUnavailableError(\n 'No healthy backend connection available at the moment, '\n 'please try again.'\n )\n\n def release_pgcon(self, dbname, conn, *, discard=False):\n if not conn.is_healthy():\n logger.warning('Released an unhealthy pgcon; discard now.')\n discard = True\n self._pg_pool.release(dbname, conn, discard=discard)\n\n async def load_sys_config(self):\n syscon = await self._acquire_sys_pgcon()\n try:\n query = self.get_sys_query('sysconfig')\n sys_config_json = await syscon.parse_execute_json(\n query,\n b'__backend_sysconfig',\n dbver=0,\n use_prep_stmt=True,\n args=(),\n )\n finally:\n self._release_sys_pgcon()\n\n return config.from_json(config.get_settings(), sys_config_json)\n\n async def introspect_global_schema(self, conn=None):\n if conn is not None:\n json_data = await conn.parse_execute_json(\n self._global_intro_query, b'__global_intro_db',\n dbver=0, use_prep_stmt=True, args=(),\n )\n else:\n syscon = await self._acquire_sys_pgcon()\n try:\n json_data = await syscon.parse_execute_json(\n self._global_intro_query, b'__global_intro_db',\n dbver=0, use_prep_stmt=True, args=(),\n )\n finally:\n self._release_sys_pgcon()\n\n return s_refl.parse_into(\n base_schema=self._std_schema,\n schema=s_schema.FlatSchema(),\n data=json_data,\n schema_class_layout=self._schema_class_layout,\n )\n\n async def _reintrospect_global_schema(self):\n if not self._initing and not self._serving:\n logger.warning(\n \"global-schema-changes event received during shutdown; \"\n \"ignoring.\"\n )\n return\n new_global_schema = await self.introspect_global_schema()\n self._dbindex.update_global_schema(new_global_schema)\n self._fetch_roles()\n\n async def introspect_user_schema(self, conn):\n json_data = await conn.parse_execute_json(\n self._local_intro_query, b'__local_intro_db',\n dbver=0, use_prep_stmt=True, args=(),\n )\n\n base_schema = s_schema.ChainedSchema(\n self._std_schema,\n s_schema.FlatSchema(),\n self.get_global_schema(),\n )\n\n return s_refl.parse_into(\n base_schema=base_schema,\n schema=s_schema.FlatSchema(),\n data=json_data,\n schema_class_layout=self._schema_class_layout,\n )\n\n async def introspect_db(\n self, dbname, *, refresh=False, skip_dropped=False\n ):\n try:\n conn = await self.acquire_pgcon(dbname)\n except pgcon_errors.BackendError as e:\n if skip_dropped and e.code_is(\n pgcon_errors.ERROR_INVALID_CATALOG_NAME\n ):\n # database does not exist\n logger.warning(\n \"Detected concurrently-dropped database %s; skipping.\",\n dbname,\n )\n return\n else:\n raise\n\n try:\n user_schema = await self.introspect_user_schema(conn)\n\n reflection_cache_json = await conn.parse_execute_json(\n b'''\n SELECT json_agg(o.c)\n FROM (\n SELECT\n json_build_object(\n 'eql_hash', t.eql_hash,\n 'argnames', array_to_json(t.argnames)\n ) AS c\n FROM\n ROWS FROM(edgedb._get_cached_reflection())\n AS t(eql_hash text, argnames text[])\n ) AS o;\n ''',\n b'__reflection_cache',\n dbver=0,\n use_prep_stmt=True,\n args=(),\n )\n\n reflection_cache = immutables.Map({\n r['eql_hash']: tuple(r['argnames'])\n for r in json.loads(reflection_cache_json)\n })\n\n backend_ids_json = await conn.parse_execute_json(\n b'''\n SELECT\n json_object_agg(\n \"id\"::text,\n \"backend_id\"\n )::text\n FROM\n edgedb.\"_SchemaType\"\n ''',\n b'__backend_ids_fetch',\n dbver=0,\n use_prep_stmt=True,\n args=(),\n )\n backend_ids = json.loads(backend_ids_json)\n\n db_config = await self.introspect_db_config(conn)\n\n self._dbindex.register_db(\n dbname,\n user_schema=user_schema,\n db_config=db_config,\n reflection_cache=reflection_cache,\n backend_ids=backend_ids,\n refresh=refresh,\n )\n finally:\n self.release_pgcon(dbname, conn)\n\n async def introspect_db_config(self, conn):\n query = self.get_sys_query('dbconfig')\n result = await conn.parse_execute_json(\n query,\n b'__backend_dbconfig',\n dbver=0,\n use_prep_stmt=True,\n args=(),\n )\n return config.from_json(config.get_settings(), result)\n\n async def _introspect_dbs(self):\n syscon = await self._acquire_sys_pgcon()\n try:\n dbs_query = self.get_sys_query('listdbs')\n json_data = await syscon.parse_execute_json(\n dbs_query, b'__listdbs',\n dbver=0, use_prep_stmt=True, args=(),\n )\n dbnames = json.loads(json_data)\n finally:\n self._release_sys_pgcon()\n\n async with taskgroup.TaskGroup(name='introspect DBs') as g:\n for dbname in dbnames:\n g.create_task(self.introspect_db(dbname, skip_dropped=True))\n\n def _fetch_roles(self):\n global_schema = self._dbindex.get_global_schema()\n\n roles = {}\n for role in global_schema.get_objects(type=s_role.Role):\n role_name = str(role.get_name(global_schema))\n roles[role_name] = {\n 'name': role_name,\n 'superuser': role.get_superuser(global_schema),\n 'password': role.get_password(global_schema),\n }\n\n self._roles = immutables.Map(roles)\n\n async def _load_instance_data(self):\n syscon = await self._acquire_sys_pgcon()\n try:\n result = await syscon.simple_query(b'''\\\n SELECT json FROM edgedbinstdata.instdata\n WHERE key = 'instancedata';\n ''', ignore_data=False)\n self._instance_data = immutables.Map(\n json.loads(result[0][0].decode('utf-8')))\n\n result = await syscon.simple_query(b'''\\\n SELECT json FROM edgedbinstdata.instdata\n WHERE key = 'sysqueries';\n ''', ignore_data=False)\n queries = json.loads(result[0][0].decode('utf-8'))\n self._sys_queries = immutables.Map(\n {k: q.encode() for k, q in queries.items()})\n\n result = await syscon.simple_query(b'''\\\n SELECT text FROM edgedbinstdata.instdata\n WHERE key = 'local_intro_query';\n ''', ignore_data=False)\n self._local_intro_query = result[0][0]\n\n result = await syscon.simple_query(b'''\\\n SELECT text FROM edgedbinstdata.instdata\n WHERE key = 'global_intro_query';\n ''', ignore_data=False)\n self._global_intro_query = result[0][0]\n\n result = await syscon.simple_query(b'''\\\n SELECT bin FROM edgedbinstdata.instdata\n WHERE key = 'stdschema';\n ''', ignore_data=False)\n try:\n data = binascii.a2b_hex(result[0][0][2:])\n self._std_schema = pickle.loads(data)\n except Exception as e:\n raise RuntimeError(\n 'could not load std schema pickle') from e\n\n result = await syscon.simple_query(b'''\\\n SELECT bin FROM edgedbinstdata.instdata\n WHERE key = 'reflschema';\n ''', ignore_data=False)\n try:\n data = binascii.a2b_hex(result[0][0][2:])\n self._refl_schema = pickle.loads(data)\n except Exception as e:\n raise RuntimeError(\n 'could not load refl schema pickle') from e\n\n result = await syscon.simple_query(b'''\\\n SELECT bin FROM edgedbinstdata.instdata\n WHERE key = 'classlayout';\n ''', ignore_data=False)\n try:\n data = binascii.a2b_hex(result[0][0][2:])\n self._schema_class_layout = pickle.loads(data)\n except Exception as e:\n raise RuntimeError(\n 'could not load schema class layout pickle') from e\n finally:\n self._release_sys_pgcon()\n\n def get_roles(self):\n return self._roles\n\n async def _restart_servers_new_addr(self, nethosts, netport):\n if not netport:\n raise RuntimeError('cannot restart without network port specified')\n nethosts = _fix_wildcard_host(nethosts)\n servers_to_stop = []\n servers = {}\n if self._listen_port == netport:\n hosts_to_start = [\n host for host in nethosts if host not in self._servers\n ]\n for host, srv in self._servers.items():\n if host == ADMIN_PLACEHOLDER or host in nethosts:\n servers[host] = srv\n else:\n servers_to_stop.append(srv)\n admin = False\n else:\n hosts_to_start = nethosts\n servers_to_stop = self._servers.values()\n admin = True\n\n new_servers, *_ = await self._start_servers(\n hosts_to_start, netport, admin\n )\n servers.update(new_servers)\n self._servers = servers\n self._listen_hosts = nethosts\n self._listen_port = netport\n\n addrs = []\n unix_addr = None\n port = None\n for srv in servers_to_stop:\n for s in srv.sockets:\n addr = s.getsockname()\n if isinstance(addr, tuple):\n addrs.append(addr)\n if port is None:\n port = addr[1]\n elif port != addr[1]:\n port = 0\n else:\n unix_addr = addr\n if len(addrs) > 1:\n if port:\n addr_str = f\"{{{', '.join(addr[0] for addr in addrs)}}}:{port}\"\n else:\n addr_str = f\"{{{', '.join('%s:%d' % addr for addr in addrs)}}}\"\n elif addrs:\n addr_str = \"%s:%d\" % addrs[0]\n else:\n addr_str = None\n if addr_str:\n logger.info('Stopping to serve on %s', addr_str)\n if unix_addr:\n logger.info('Stopping to serve admin on %s', unix_addr)\n\n await self._stop_servers(servers_to_stop)\n\n async def _on_before_drop_db(\n self,\n dbname: str,\n current_dbname: str\n ) -> None:\n if current_dbname == dbname:\n raise errors.ExecutionError(\n f'cannot drop the currently open database {dbname!r}')\n\n await self._ensure_database_not_connected(dbname)\n\n async def _on_before_create_db_from_template(\n self,\n dbname: str,\n current_dbname: str\n ):\n if current_dbname == dbname:\n raise errors.ExecutionError(\n f'cannot create database using currently open database '\n f'{dbname!r} as a template database')\n\n await self._ensure_database_not_connected(dbname)\n\n async def _ensure_database_not_connected(self, dbname: str):\n assert self._dbindex is not None\n\n if self._dbindex.count_connections(dbname):\n # If there are open EdgeDB connections to the `dbname` DB\n # just raise the error Postgres would have raised itself.\n raise errors.ExecutionError(\n f'database {dbname!r} is being accessed by other users')\n else:\n # If, however, there are no open EdgeDB connections, prune\n # all non-active postgres connection to the `dbname` DB.\n await self._pg_pool.prune_inactive_connections(dbname)\n\n def _on_after_drop_db(self, dbname: str):\n assert self._dbindex is not None\n self._dbindex.unregister_db(dbname)\n\n async def _on_system_config_add(self, setting_name, value):\n # CONFIGURE INSTANCE INSERT ConfigObject;\n pass\n\n async def _on_system_config_rem(self, setting_name, value):\n # CONFIGURE INSTANCE RESET ConfigObject;\n pass\n\n async def _on_system_config_set(self, setting_name, value):\n # CONFIGURE INSTANCE SET setting_name := value;\n if setting_name == 'listen_addresses':\n await self._restart_servers_new_addr(value, self._listen_port)\n\n elif setting_name == 'listen_port':\n await self._restart_servers_new_addr(self._listen_hosts, value)\n\n async def _on_system_config_reset(self, setting_name):\n # CONFIGURE INSTANCE RESET setting_name;\n if setting_name == 'listen_addresses':\n await self._restart_servers_new_addr(\n ('localhost',), self._listen_port)\n\n elif setting_name == 'listen_port':\n await self._restart_servers_new_addr(\n self._listen_hosts, defines.EDGEDB_PORT)\n\n async def _after_system_config_add(self, setting_name, value):\n # CONFIGURE INSTANCE INSERT ConfigObject;\n if setting_name == 'auth':\n self._populate_sys_auth()\n\n async def _after_system_config_rem(self, setting_name, value):\n # CONFIGURE INSTANCE RESET ConfigObject;\n if setting_name == 'auth':\n self._populate_sys_auth()\n\n async def _after_system_config_set(self, setting_name, value):\n # CONFIGURE INSTANCE SET setting_name := value;\n pass\n\n async def _after_system_config_reset(self, setting_name):\n # CONFIGURE INSTANCE RESET setting_name;\n pass\n\n async def _acquire_sys_pgcon(self):\n if not self._initing and not self._serving:\n raise RuntimeError(\"EdgeDB server is not serving.\")\n\n await self._sys_pgcon_waiter.acquire()\n\n if not self._initing and not self._serving:\n self._sys_pgcon_waiter.release()\n raise RuntimeError(\"EdgeDB server is not serving.\")\n\n if self.__sys_pgcon is None or not self.__sys_pgcon.is_healthy():\n conn, self.__sys_pgcon = self.__sys_pgcon, None\n if conn is not None:\n self._sys_pgcon_ready_evt.clear()\n conn.abort()\n # We depend on the reconnect on connection_lost() of __sys_pgcon\n await self._sys_pgcon_ready_evt.wait()\n if self.__sys_pgcon is None:\n self._sys_pgcon_waiter.release()\n raise RuntimeError(\"Cannot acquire pgcon to the system DB.\")\n\n return self.__sys_pgcon\n\n def _release_sys_pgcon(self):\n self._sys_pgcon_waiter.release()\n\n async def _cancel_pgcon_operation(self, pgcon) -> bool:\n syscon = await self._acquire_sys_pgcon()\n try:\n if pgcon.idle:\n # pgcon could have received the query results while we\n # were acquiring a system connection to cancel it.\n return False\n\n if pgcon.is_cancelling():\n # Somehow the connection is already being cancelled and\n # we don't want to have to cancellations go in parallel.\n return False\n\n pgcon.start_pg_cancellation()\n try:\n # Returns True if the `pid` exists and it was able to send it a\n # SIGINT. Will throw an exception if the priveleges aren't\n # sufficient.\n result = await syscon.simple_query(\n f'SELECT pg_cancel_backend({pgcon.backend_pid});'.encode(),\n ignore_data=False\n )\n finally:\n pgcon.finish_pg_cancellation()\n\n return result[0][0] == b't'\n finally:\n self._release_sys_pgcon()\n\n async def _cancel_and_discard_pgcon(self, pgcon, dbname) -> None:\n try:\n if self._serving:\n await self._cancel_pgcon_operation(pgcon)\n finally:\n self.release_pgcon(dbname, pgcon, discard=True)\n\n async def _signal_sysevent(self, event, **kwargs):\n if not self._initing and not self._serving:\n # This is very likely if we are doing\n # \"run_startup_script_and_exit()\", but is also possible if the\n # server was shut down with this coroutine as a background task\n # in flight.\n return\n\n pgcon = await self._acquire_sys_pgcon()\n try:\n await pgcon.signal_sysevent(event, **kwargs)\n finally:\n self._release_sys_pgcon()\n\n def _on_remote_ddl(self, dbname):\n # Triggered by a postgres notification event 'schema-changes'\n # on the __edgedb_sysevent__ channel\n self._loop.create_task(\n self.introspect_db(dbname, refresh=True)\n )\n\n def _on_remote_database_config_change(self, dbname):\n # Triggered by a postgres notification event 'database-config-changes'\n # on the __edgedb_sysevent__ channel\n pass\n\n def _on_remote_system_config_change(self):\n # Triggered by a postgres notification event 'ystem-config-changes'\n # on the __edgedb_sysevent__ channel\n pass\n\n def _on_global_schema_change(self):\n self._loop.create_task(self._reintrospect_global_schema())\n\n def _on_sys_pgcon_connection_lost(self, exc):\n if not self._serving:\n # The server is shutting down, release all events so that\n # the waiters if any could continue and exit\n self._sys_pgcon_ready_evt.set()\n self._sys_pgcon_reconnect_evt.set()\n return\n\n logger.error(\n \"Connection to the system database is \" +\n (\"closed.\" if exc is None else f\"broken! Reason: {exc}\")\n )\n self.set_pg_unavailable_msg(\n \"Connection is lost, please check server log for the reason.\"\n )\n self.__sys_pgcon = None\n self._sys_pgcon_ready_evt.clear()\n self._loop.create_task(self._reconnect_sys_pgcon())\n\n async def _reconnect_sys_pgcon(self):\n try:\n conn = None\n while self._serving:\n try:\n conn = await self._pg_connect(defines.EDGEDB_SYSTEM_DB)\n break\n except ConnectionError:\n # Keep retrying as far as:\n # 1. The EdgeDB server is still serving,\n # 2. We still cannot connect to the Postgres cluster, or\n pass\n except pgcon_errors.BackendError as e:\n # 3. The Postgres cluster is still starting up\n if not e.code_is(pgcon_errors.ERROR_CANNOT_CONNECT_NOW):\n raise\n\n if self._serving:\n try:\n # Retry after INTERVAL seconds, unless the event is set\n # and we can retry immediately after the event.\n await asyncio.wait_for(\n self._sys_pgcon_reconnect_evt.wait(),\n defines.SYSTEM_DB_RECONNECT_INTERVAL,\n )\n # But the event can only skip one INTERVAL.\n self._sys_pgcon_reconnect_evt.clear()\n except asyncio.TimeoutError:\n pass\n\n if not self._serving:\n if conn is not None:\n conn.abort()\n return\n\n logger.info(\"Successfully reconnected to the system database.\")\n self.__sys_pgcon = conn\n self.__sys_pgcon.set_server(self)\n # This await is meant to be after set_server() because we need the\n # pgcon to be able to trigger another reconnect if its connection\n # is lost during this await.\n await self.__sys_pgcon.listen_for_sysevent()\n self.set_pg_unavailable_msg(None)\n finally:\n self._sys_pgcon_ready_evt.set()\n\n async def run_startup_script_and_exit(self):\n \"\"\"Run the script specified in *startup_script* and exit immediately\"\"\"\n if self._startup_script is None:\n raise AssertionError('startup script is not defined')\n await self._create_compiler_pool()\n try:\n ql_parser.preload()\n await binary.EdgeConnection.run_script(\n server=self,\n database=self._startup_script.database,\n user=self._startup_script.user,\n script=self._startup_script.text,\n )\n finally:\n await self._destroy_compiler_pool()\n\n async def _start_server(\n self, host: str, port: int\n ) -> asyncio.AbstractServer:\n nethost = None\n if host == \"localhost\":\n nethost = await _resolve_localhost()\n\n proto_factory = lambda: protocol.HttpProtocol(\n self, self._sslctx,\n allow_insecure_binary_clients=self._allow_insecure_binary_clients,\n allow_insecure_http_clients=self._allow_insecure_http_clients,\n )\n\n return await self._loop.create_server(\n proto_factory, host=nethost or host, port=port)\n\n async def _start_admin_server(self, port: int) -> asyncio.AbstractServer:\n admin_unix_sock_path = os.path.join(\n self._runstate_dir, f'.s.EDGEDB.admin.{port}')\n admin_unix_srv = await self._loop.create_unix_server(\n lambda: binary.EdgeConnection(self, external_auth=True),\n admin_unix_sock_path\n )\n os.chmod(admin_unix_sock_path, stat.S_IRUSR | stat.S_IWUSR)\n logger.info('Serving admin on %s', admin_unix_sock_path)\n return admin_unix_srv\n\n async def _start_servers(self, hosts, port, admin=True):\n servers = {}\n try:\n async with taskgroup.TaskGroup() as g:\n for host in hosts:\n servers[host] = g.create_task(\n self._start_server(host, port)\n )\n except Exception:\n await self._stop_servers([\n fut.result() for fut in servers.values()\n if fut.done() and fut.exception() is None\n ])\n raise\n servers = {host: fut.result() for host, fut in servers.items()}\n\n addrs = []\n for tcp_srv in servers.values():\n for s in tcp_srv.sockets:\n addrs.append(s.getsockname())\n\n if len(addrs) > 1:\n if port:\n addr_str = f\"{{{', '.join(addr[0] for addr in addrs)}}}:{port}\"\n else:\n addr_str = f\"{{{', '.join('%s:%d' % addr for addr in addrs)}}}\"\n elif addrs:\n addr_str = \"%s:%d\" % addrs[0]\n port = addrs[0][1]\n else:\n addr_str = None\n\n if addr_str:\n logger.info('Serving on %s', addr_str)\n\n if admin and port:\n try:\n admin_unix_srv = await self._start_admin_server(port)\n except Exception:\n await self._stop_servers(servers.values())\n raise\n servers[ADMIN_PLACEHOLDER] = admin_unix_srv\n\n return servers, port, addrs\n\n def init_tls(self, tls_cert_file, tls_key_file):\n assert self._sslctx is None\n tls_password_needed = False\n\n def _tls_private_key_password():\n nonlocal tls_password_needed\n tls_password_needed = True\n return os.environ.get('EDGEDB_SERVER_TLS_PRIVATE_KEY_PASSWORD', '')\n\n sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)\n try:\n sslctx.load_cert_chain(\n tls_cert_file,\n tls_key_file,\n password=_tls_private_key_password,\n )\n except ssl.SSLError as e:\n if e.library == \"SSL\" and e.errno == 9: # ERR_LIB_PEM\n if tls_password_needed:\n if _tls_private_key_password():\n raise StartupError(\n \"Cannot load TLS certificates - it's likely that \"\n \"the private key password is wrong.\"\n ) from e\n else:\n raise StartupError(\n \"Cannot load TLS certificates - the private key \"\n \"file is likely protected by a password. Specify \"\n \"the password using environment variable: \"\n \"EDGEDB_SERVER_TLS_PRIVATE_KEY_PASSWORD\"\n ) from e\n elif tls_key_file is None:\n raise StartupError(\n \"Cannot load TLS certificates - have you specified \"\n \"the private key file using the `--tls-key-file` \"\n \"command-line argument?\"\n ) from e\n else:\n raise StartupError(\n \"Cannot load TLS certificates - please double check \"\n \"if the specified certificate files are valid.\"\n )\n elif e.library == \"X509\" and e.errno == 116:\n # X509 Error 116: X509_R_KEY_VALUES_MISMATCH\n raise StartupError(\n \"Cannot load TLS certificates - the private key doesn't \"\n \"match the certificate.\"\n )\n\n raise StartupError(f\"Cannot load TLS certificates - {e}\") from e\n\n sslctx.set_alpn_protocols(['edgedb-binary', 'http/1.1'])\n self._sslctx = sslctx\n self._tls_cert_file = str(tls_cert_file)\n\n async def _stop_servers(self, servers):\n async with taskgroup.TaskGroup() as g:\n for srv in servers:\n srv.close()\n g.create_task(srv.wait_closed())\n\n async def start(self):\n self._stop_evt.clear()\n assert self._task_group is None\n self._task_group = taskgroup.TaskGroup()\n await self._task_group.__aenter__()\n self._accept_new_tasks = True\n\n await self._create_compiler_pool()\n\n # Make sure that EdgeQL parser is preloaded; edgecon might use\n # it to restore config values.\n ql_parser.preload()\n\n if self._startup_script:\n await binary.EdgeConnection.run_script(\n server=self,\n database=self._startup_script.database,\n user=self._startup_script.user,\n script=self._startup_script.text,\n )\n\n self._servers, actual_port, listen_addrs = await self._start_servers(\n _fix_wildcard_host(self._listen_hosts), self._listen_port\n )\n if self._listen_port == 0:\n self._listen_port = actual_port\n\n self._accepting_connections = True\n self._serving = True\n\n if self._echo_runtime_info:\n ri = {\n \"port\": self._listen_port,\n \"runstate_dir\": str(self._runstate_dir),\n \"tls_cert_file\": self._tls_cert_file,\n }\n print(f'\\nEDGEDB_SERVER_DATA:{json.dumps(ri)}\\n', flush=True)\n\n if self._status_sink is not None:\n status = {\n \"listen_addrs\": listen_addrs,\n \"port\": self._listen_port,\n \"socket_dir\": str(self._runstate_dir),\n \"main_pid\": os.getpid(),\n \"tenant_id\": self._tenant_id,\n \"tls_cert_file\": self._tls_cert_file,\n }\n self._status_sink(f'READY={json.dumps(status)}')\n\n async def stop(self):\n try:\n self._serving = False\n self._accept_new_tasks = False\n\n if self._http_request_logger is not None:\n self._http_request_logger.cancel()\n\n await self._stop_servers(self._servers.values())\n self._servers = {}\n\n for conn in self._binary_conns:\n conn.stop()\n self._binary_conns = set()\n\n if self._task_group is not None:\n tg = self._task_group\n self._task_group = None\n await tg.__aexit__(*sys.exc_info())\n\n await self._destroy_compiler_pool()\n\n finally:\n if self.__sys_pgcon is not None:\n self.__sys_pgcon.terminate()\n self.__sys_pgcon = None\n self._sys_pgcon_waiter = None\n\n def create_task(self, coro):\n if self._accept_new_tasks:\n return self._task_group.create_task(coro)\n\n async def serve_forever(self):\n await self._stop_evt.wait()\n\n async def get_auth_method(self, user):\n authlist = self._sys_auth\n\n if not authlist:\n default_method = 'SCRAM'\n return config.get_settings().get_type_by_name(default_method)()\n else:\n for auth in authlist:\n match = (\n (user in auth.user or '*' in auth.user)\n )\n\n if match:\n return auth.method\n\n raise errors.AuthenticationError(\n f\"no authentication method configured for {user!r} role\"\n )\n\n def get_sys_query(self, key):\n return self._sys_queries[key]\n\n def get_instance_data(self, key):\n return self._instance_data[key]\n\n def get_backend_runtime_params(self) -> Any:\n return self._cluster.get_runtime_params()\n\n def set_pg_unavailable_msg(self, msg):\n if msg is None or self._pg_unavailable_msg is None:\n self._pg_unavailable_msg = msg\n\n\nasync def _resolve_localhost() -> List[str]:\n # On many systems 'localhost' resolves to _both_ IPv4 and IPv6\n # addresses, even if the system is not capable of handling\n # IPv6 connections. Due to the common nature of this issue\n # we explicitly disable the AF_INET6 component of 'localhost'.\n\n loop = asyncio.get_running_loop()\n localhost = await loop.getaddrinfo(\n 'localhost',\n 0,\n family=socket.AF_UNSPEC,\n type=socket.SOCK_STREAM,\n flags=socket.AI_PASSIVE,\n proto=0,\n )\n\n infos = [a for a in localhost if a[0] == socket.AF_INET]\n\n if not infos:\n # \"localhost\" did not resolve to an IPv4 address,\n # let create_server handle the situation.\n return [\"localhost\"]\n\n # Replace 'localhost' with explicitly resolved AF_INET addresses.\n hosts = []\n for info in reversed(infos):\n addr, *_ = info[4]\n hosts.append(addr)\n\n return hosts\n\n\ndef _fix_wildcard_host(hosts: Sequence[str]) -> Sequence[str]:\n # Even though it is sometimes not a conflict to bind on the same port of\n # both the wildcard host 0.0.0.0 and some specific host at the same time,\n # we're still discarding other hosts if 0.0.0.0 is present because it\n # should behave the same and we could avoid potential conflicts.\n\n if '0.0.0.0' in hosts:\n if len(hosts) > 1:\n logger.warning(\n \"0.0.0.0 found in listen_addresses; \"\n \"discarding the other hosts.\"\n )\n hosts = ['0.0.0.0']\n return hosts\n", "path": "edb/server/server.py" } ]
diff --git a/edb/server/server.py b/edb/server/server.py index ace23ca2d02..c848c757f97 100644 --- a/edb/server/server.py +++ b/edb/server/server.py @@ -1191,6 +1191,10 @@ async def get_auth_method(self, user): if match: return auth.method + raise errors.AuthenticationError( + f"no authentication method configured for {user!r} role" + ) + def get_sys_query(self, key): return self._sys_queries[key] diff --git a/tests/test_server_auth.py b/tests/test_server_auth.py index 7db62127245..3ba4f49b2f7 100644 --- a/tests/test_server_auth.py +++ b/tests/test_server_auth.py @@ -158,6 +158,57 @@ async def test_server_auth_01(self): finally: await self.con.query("DROP ROLE bar") + async def test_server_auth_02(self): + try: + await self.con.query(''' + CREATE SUPERUSER ROLE foo { + SET password := 'foo-pass'; + } + ''') + + await self.con.query(''' + CREATE SUPERUSER ROLE bar { + SET password := 'bar-pass'; + } + ''') + + await self.con.query(''' + CONFIGURE INSTANCE INSERT Auth { + comment := 'test-02', + priority := 0, + method := (INSERT SCRAM), + user := 'foo', + } + ''') + + # good password with configured Auth + conn = await self.connect( + user='foo', + password='foo-pass', + ) + await conn.aclose() + + # good password but Auth is not configured + with self.assertRaisesRegex( + edgedb.AuthenticationError, + "no authentication method configured for 'bar' role"): + await self.connect( + user='bar', + password='bar-pass' + ) + finally: + await self.con.query(''' + CONFIGURE INSTANCE RESET Auth FILTER .comment = 'test-02' + ''') + + await self.con.query(''' + DROP ROLE foo; + ''') + + await self.con.query(''' + DROP ROLE bar; + ''') + async def test_long_role_name(self): with self.assertRaisesRegex( edgedb.SchemaDefinitionError,
goauthentik__authentik-5630
SMS Authenticator Setup Stage with generic provider does not work without mapping **Describe the bug** Setting up the stage and flow works but trying to set up an mfa device results in an error after entering the phone number **To Reproduce** Create a SMS Authenticator Setup Stage with a generic provider and without mapping **Expected behavior** sms should be send **Version and Deployment (please complete the following information):** - current main
[ { "content": "\"\"\"SMS Authenticator models\"\"\"\nfrom hashlib import sha256\nfrom typing import Optional\n\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\nfrom django_otp.models import SideChannelDevice\nfrom requests.exceptions import RequestException\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.serializers import BaseSerializer\nfrom structlog.stdlib import get_logger\nfrom twilio.base.exceptions import TwilioRestException\nfrom twilio.rest import Client\n\nfrom authentik.core.types import UserSettingSerializer\nfrom authentik.events.models import Event, EventAction, NotificationWebhookMapping\nfrom authentik.events.utils import sanitize_item\nfrom authentik.flows.models import ConfigurableStage, FriendlyNamedStage, Stage\nfrom authentik.lib.models import SerializerModel\nfrom authentik.lib.utils.errors import exception_to_string\nfrom authentik.lib.utils.http import get_http_session\n\nLOGGER = get_logger()\n\n\nclass SMSProviders(models.TextChoices):\n \"\"\"Supported SMS Providers\"\"\"\n\n TWILIO = \"twilio\"\n GENERIC = \"generic\"\n\n\nclass SMSAuthTypes(models.TextChoices):\n \"\"\"Supported SMS Auth Types\"\"\"\n\n BASIC = \"basic\"\n BEARER = \"bearer\"\n\n\nclass AuthenticatorSMSStage(ConfigurableStage, FriendlyNamedStage, Stage):\n \"\"\"Use SMS-based TOTP instead of authenticator-based.\"\"\"\n\n provider = models.TextField(choices=SMSProviders.choices)\n\n from_number = models.TextField()\n\n account_sid = models.TextField()\n auth = models.TextField()\n auth_password = models.TextField(default=\"\", blank=True)\n auth_type = models.TextField(choices=SMSAuthTypes.choices, default=SMSAuthTypes.BASIC)\n\n verify_only = models.BooleanField(\n default=False,\n help_text=_(\n \"When enabled, the Phone number is only used during enrollment to verify the \"\n \"users authenticity. Only a hash of the phone number is saved to ensure it is \"\n \"not re-used in the future.\"\n ),\n )\n\n mapping = models.ForeignKey(\n NotificationWebhookMapping,\n null=True,\n default=None,\n on_delete=models.SET_NULL,\n help_text=_(\"Optionally modify the payload being sent to custom providers.\"),\n )\n\n def send(self, token: str, device: \"SMSDevice\"):\n \"\"\"Send message via selected provider\"\"\"\n if self.provider == SMSProviders.TWILIO:\n return self.send_twilio(token, device)\n if self.provider == SMSProviders.GENERIC:\n return self.send_generic(token, device)\n raise ValueError(f\"invalid provider {self.provider}\")\n\n def get_message(self, token: str) -> str:\n \"\"\"Get SMS message\"\"\"\n return _(\"Use this code to authenticate in authentik: %(token)s\" % {\"token\": token})\n\n def send_twilio(self, token: str, device: \"SMSDevice\"):\n \"\"\"send sms via twilio provider\"\"\"\n client = Client(self.account_sid, self.auth)\n\n try:\n message = client.messages.create(\n to=device.phone_number, from_=self.from_number, body=str(self.get_message(token))\n )\n LOGGER.debug(\"Sent SMS\", to=device, message=message.sid)\n except TwilioRestException as exc:\n LOGGER.warning(\"Error sending token by Twilio SMS\", exc=exc, msg=exc.msg)\n raise ValidationError(exc.msg)\n\n def send_generic(self, token: str, device: \"SMSDevice\"):\n \"\"\"Send SMS via outside API\"\"\"\n payload = {\n \"From\": self.from_number,\n \"To\": device.phone_number,\n \"Body\": token,\n \"Message\": self.get_message(token),\n }\n\n if self.mapping:\n payload = sanitize_item(\n self.mapping.evaluate(\n user=device.user,\n request=None,\n device=device,\n token=token,\n stage=self,\n )\n )\n\n if self.auth_type == SMSAuthTypes.BEARER:\n response = get_http_session().post(\n self.account_sid,\n json=payload,\n headers={\"Authorization\": f\"Bearer {self.auth}\"},\n )\n elif self.auth_type == SMSAuthTypes.BASIC:\n response = get_http_session().post(\n self.account_sid,\n json=payload,\n auth=(self.auth, self.auth_password),\n )\n else:\n raise ValueError(f\"Invalid Auth type '{self.auth_type}'\")\n\n LOGGER.debug(\"Sent SMS\", to=device.phone_number)\n try:\n response.raise_for_status()\n except RequestException as exc:\n LOGGER.warning(\n \"Error sending token by generic SMS\",\n exc=exc,\n status=response.status_code,\n body=response.text[:100],\n )\n Event.new(\n EventAction.CONFIGURATION_ERROR,\n message=\"Error sending SMS\",\n exc=exception_to_string(exc),\n status_code=response.status_code,\n body=response.text,\n ).set_user(device.user).save()\n if response.status_code >= 400:\n raise ValidationError(response.text)\n raise\n\n @property\n def serializer(self) -> type[BaseSerializer]:\n from authentik.stages.authenticator_sms.api import AuthenticatorSMSStageSerializer\n\n return AuthenticatorSMSStageSerializer\n\n @property\n def type(self) -> type[View]:\n from authentik.stages.authenticator_sms.stage import AuthenticatorSMSStageView\n\n return AuthenticatorSMSStageView\n\n @property\n def component(self) -> str:\n return \"ak-stage-authenticator-sms-form\"\n\n def ui_user_settings(self) -> Optional[UserSettingSerializer]:\n return UserSettingSerializer(\n data={\n \"title\": self.friendly_name or str(self._meta.verbose_name),\n \"component\": \"ak-user-settings-authenticator-sms\",\n }\n )\n\n def __str__(self) -> str:\n return f\"SMS Authenticator Setup Stage {self.name}\"\n\n class Meta:\n verbose_name = _(\"SMS Authenticator Setup Stage\")\n verbose_name_plural = _(\"SMS Authenticator Setup Stages\")\n\n\ndef hash_phone_number(phone_number: str) -> str:\n \"\"\"Hash phone number with prefix\"\"\"\n return \"hash:\" + sha256(phone_number.encode()).hexdigest()\n\n\nclass SMSDevice(SerializerModel, SideChannelDevice):\n \"\"\"SMS Device\"\"\"\n\n user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)\n\n # Connect to the stage to when validating access we know the API Credentials\n stage = models.ForeignKey(AuthenticatorSMSStage, on_delete=models.CASCADE)\n\n phone_number = models.TextField()\n\n last_t = models.DateTimeField(auto_now=True)\n\n def set_hashed_number(self):\n \"\"\"Set phone_number to hashed number\"\"\"\n self.phone_number = hash_phone_number(self.phone_number)\n\n @property\n def is_hashed(self) -> bool:\n \"\"\"Check if the phone number is hashed\"\"\"\n return self.phone_number.startswith(\"hash:\")\n\n @property\n def serializer(self) -> type[BaseSerializer]:\n from authentik.stages.authenticator_sms.api import SMSDeviceSerializer\n\n return SMSDeviceSerializer\n\n def verify_token(self, token):\n valid = super().verify_token(token)\n if valid:\n self.save()\n return valid\n\n def __str__(self):\n return str(self.name) or str(self.user)\n\n class Meta:\n verbose_name = _(\"SMS Device\")\n verbose_name_plural = _(\"SMS Devices\")\n unique_together = ((\"stage\", \"phone_number\"),)\n", "path": "authentik/stages/authenticator_sms/models.py" } ]
[ { "content": "\"\"\"SMS Authenticator models\"\"\"\nfrom hashlib import sha256\nfrom typing import Optional\n\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\nfrom django_otp.models import SideChannelDevice\nfrom requests.exceptions import RequestException\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.serializers import BaseSerializer\nfrom structlog.stdlib import get_logger\nfrom twilio.base.exceptions import TwilioRestException\nfrom twilio.rest import Client\n\nfrom authentik.core.types import UserSettingSerializer\nfrom authentik.events.models import Event, EventAction, NotificationWebhookMapping\nfrom authentik.events.utils import sanitize_item\nfrom authentik.flows.models import ConfigurableStage, FriendlyNamedStage, Stage\nfrom authentik.lib.models import SerializerModel\nfrom authentik.lib.utils.errors import exception_to_string\nfrom authentik.lib.utils.http import get_http_session\n\nLOGGER = get_logger()\n\n\nclass SMSProviders(models.TextChoices):\n \"\"\"Supported SMS Providers\"\"\"\n\n TWILIO = \"twilio\"\n GENERIC = \"generic\"\n\n\nclass SMSAuthTypes(models.TextChoices):\n \"\"\"Supported SMS Auth Types\"\"\"\n\n BASIC = \"basic\"\n BEARER = \"bearer\"\n\n\nclass AuthenticatorSMSStage(ConfigurableStage, FriendlyNamedStage, Stage):\n \"\"\"Use SMS-based TOTP instead of authenticator-based.\"\"\"\n\n provider = models.TextField(choices=SMSProviders.choices)\n\n from_number = models.TextField()\n\n account_sid = models.TextField()\n auth = models.TextField()\n auth_password = models.TextField(default=\"\", blank=True)\n auth_type = models.TextField(choices=SMSAuthTypes.choices, default=SMSAuthTypes.BASIC)\n\n verify_only = models.BooleanField(\n default=False,\n help_text=_(\n \"When enabled, the Phone number is only used during enrollment to verify the \"\n \"users authenticity. Only a hash of the phone number is saved to ensure it is \"\n \"not re-used in the future.\"\n ),\n )\n\n mapping = models.ForeignKey(\n NotificationWebhookMapping,\n null=True,\n default=None,\n on_delete=models.SET_NULL,\n help_text=_(\"Optionally modify the payload being sent to custom providers.\"),\n )\n\n def send(self, token: str, device: \"SMSDevice\"):\n \"\"\"Send message via selected provider\"\"\"\n if self.provider == SMSProviders.TWILIO:\n return self.send_twilio(token, device)\n if self.provider == SMSProviders.GENERIC:\n return self.send_generic(token, device)\n raise ValueError(f\"invalid provider {self.provider}\")\n\n def get_message(self, token: str) -> str:\n \"\"\"Get SMS message\"\"\"\n return _(\"Use this code to authenticate in authentik: %(token)s\" % {\"token\": token})\n\n def send_twilio(self, token: str, device: \"SMSDevice\"):\n \"\"\"send sms via twilio provider\"\"\"\n client = Client(self.account_sid, self.auth)\n\n try:\n message = client.messages.create(\n to=device.phone_number, from_=self.from_number, body=str(self.get_message(token))\n )\n LOGGER.debug(\"Sent SMS\", to=device, message=message.sid)\n except TwilioRestException as exc:\n LOGGER.warning(\"Error sending token by Twilio SMS\", exc=exc, msg=exc.msg)\n raise ValidationError(exc.msg)\n\n def send_generic(self, token: str, device: \"SMSDevice\"):\n \"\"\"Send SMS via outside API\"\"\"\n payload = {\n \"From\": self.from_number,\n \"To\": device.phone_number,\n \"Body\": token,\n \"Message\": str(self.get_message(token)),\n }\n\n if self.mapping:\n payload = sanitize_item(\n self.mapping.evaluate(\n user=device.user,\n request=None,\n device=device,\n token=token,\n stage=self,\n )\n )\n\n if self.auth_type == SMSAuthTypes.BEARER:\n response = get_http_session().post(\n self.account_sid,\n json=payload,\n headers={\"Authorization\": f\"Bearer {self.auth}\"},\n )\n elif self.auth_type == SMSAuthTypes.BASIC:\n response = get_http_session().post(\n self.account_sid,\n json=payload,\n auth=(self.auth, self.auth_password),\n )\n else:\n raise ValueError(f\"Invalid Auth type '{self.auth_type}'\")\n\n LOGGER.debug(\"Sent SMS\", to=device.phone_number)\n try:\n response.raise_for_status()\n except RequestException as exc:\n LOGGER.warning(\n \"Error sending token by generic SMS\",\n exc=exc,\n status=response.status_code,\n body=response.text[:100],\n )\n Event.new(\n EventAction.CONFIGURATION_ERROR,\n message=\"Error sending SMS\",\n exc=exception_to_string(exc),\n status_code=response.status_code,\n body=response.text,\n ).set_user(device.user).save()\n if response.status_code >= 400:\n raise ValidationError(response.text)\n raise\n\n @property\n def serializer(self) -> type[BaseSerializer]:\n from authentik.stages.authenticator_sms.api import AuthenticatorSMSStageSerializer\n\n return AuthenticatorSMSStageSerializer\n\n @property\n def type(self) -> type[View]:\n from authentik.stages.authenticator_sms.stage import AuthenticatorSMSStageView\n\n return AuthenticatorSMSStageView\n\n @property\n def component(self) -> str:\n return \"ak-stage-authenticator-sms-form\"\n\n def ui_user_settings(self) -> Optional[UserSettingSerializer]:\n return UserSettingSerializer(\n data={\n \"title\": self.friendly_name or str(self._meta.verbose_name),\n \"component\": \"ak-user-settings-authenticator-sms\",\n }\n )\n\n def __str__(self) -> str:\n return f\"SMS Authenticator Setup Stage {self.name}\"\n\n class Meta:\n verbose_name = _(\"SMS Authenticator Setup Stage\")\n verbose_name_plural = _(\"SMS Authenticator Setup Stages\")\n\n\ndef hash_phone_number(phone_number: str) -> str:\n \"\"\"Hash phone number with prefix\"\"\"\n return \"hash:\" + sha256(phone_number.encode()).hexdigest()\n\n\nclass SMSDevice(SerializerModel, SideChannelDevice):\n \"\"\"SMS Device\"\"\"\n\n user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)\n\n # Connect to the stage to when validating access we know the API Credentials\n stage = models.ForeignKey(AuthenticatorSMSStage, on_delete=models.CASCADE)\n\n phone_number = models.TextField()\n\n last_t = models.DateTimeField(auto_now=True)\n\n def set_hashed_number(self):\n \"\"\"Set phone_number to hashed number\"\"\"\n self.phone_number = hash_phone_number(self.phone_number)\n\n @property\n def is_hashed(self) -> bool:\n \"\"\"Check if the phone number is hashed\"\"\"\n return self.phone_number.startswith(\"hash:\")\n\n @property\n def serializer(self) -> type[BaseSerializer]:\n from authentik.stages.authenticator_sms.api import SMSDeviceSerializer\n\n return SMSDeviceSerializer\n\n def verify_token(self, token):\n valid = super().verify_token(token)\n if valid:\n self.save()\n return valid\n\n def __str__(self):\n return str(self.name) or str(self.user)\n\n class Meta:\n verbose_name = _(\"SMS Device\")\n verbose_name_plural = _(\"SMS Devices\")\n unique_together = ((\"stage\", \"phone_number\"),)\n", "path": "authentik/stages/authenticator_sms/models.py" } ]
diff --git a/authentik/stages/authenticator_sms/models.py b/authentik/stages/authenticator_sms/models.py index 9f2e117b4bf1..65e842d556f0 100644 --- a/authentik/stages/authenticator_sms/models.py +++ b/authentik/stages/authenticator_sms/models.py @@ -99,7 +99,7 @@ def send_generic(self, token: str, device: "SMSDevice"): "From": self.from_number, "To": device.phone_number, "Body": token, - "Message": self.get_message(token), + "Message": str(self.get_message(token)), } if self.mapping:
pypa__pipenv-5909
documentation is a mix of markdown and restructuredtext ### Issue description documentation is a mix of markdown and restructuredtext, but the [documentation says the documentation is restructuredtext](https://pipenv.pypa.io/en/latest/dev/contributing/#documentation-contributions). I found out commit 761a03d seemingly did a conversion to markdown which did not go all the way to the end. I would like to know what is the pursued direction (markdown + restructuredtext or full markdown), and if the direction is full markdown, try to help. ### Expected result * either the documentation says the documentation is both markdown and restructuredtext * either we clean out one of them (restructuredtext, I guess) ### Actual result mix of both ### Steps to replicate N/A
[ { "content": "#\n# pipenv documentation build configuration file, created by\n# sphinx-quickstart on Mon Jan 30 13:28:36 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\n\n# Path hackery to get current version number.\nhere = os.path.abspath(os.path.dirname(__file__))\n\nabout = {}\nwith open(os.path.join(here, \"..\", \"pipenv\", \"__version__.py\")) as f:\n exec(f.read(), about)\n\n# Hackery to get the CLI docs to generate\nimport click\n\nimport pipenv.vendor.click\n\nclick.Command = pipenv.vendor.click.Command\nclick.Group = pipenv.vendor.click.Group\nclick.BaseCommand = pipenv.vendor.click.BaseCommand\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.viewcode\",\n \"myst_parser\",\n \"sphinx_click\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\nmyst_enable_extensions = [\n \"amsmath\",\n \"colon_fence\",\n \"deflist\",\n \"dollarmath\",\n \"fieldlist\",\n \"html_admonition\",\n \"html_image\",\n \"linkify\",\n \"replacements\",\n \"smartquotes\",\n \"strikethrough\",\n \"substitution\",\n \"tasklist\",\n]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"pipenv\"\ncopyright = '2020. A project founded by Kenneth Reitz and maintained by <a href=\"https://www.pypa.io/en/latest/\">Python Packaging Authority (PyPA).</a>'\nauthor = \"Python Packaging Authority\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = about[\"__version__\"]\n# The full version, including alpha/beta/rc tags.\nrelease = about[\"__version__\"]\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n \"show_powered_by\": False,\n \"github_user\": \"pypa\",\n \"github_repo\": \"pipenv\",\n \"github_banner\": False,\n \"show_related\": False,\n}\n\nhtml_sidebars = {\n \"index\": [\"sidebarlogo.html\", \"sourcelink.html\", \"searchbox.html\", \"hacks.html\"],\n \"**\": [\n \"sidebarlogo.html\",\n \"localtoc.html\",\n \"relations.html\",\n \"sourcelink.html\",\n \"searchbox.html\",\n \"hacks.html\",\n ],\n}\n\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n\ndef setup(app):\n app.add_css_file(\"custom.css\")\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"pipenvdoc\"\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"pipenv.tex\", \"pipenv Documentation\", \"Kenneth Reitz\", \"manual\"),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(\"quickstart\", \"pipenv\", \"\", [author], 1)]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"pipenv\",\n \"pipenv Documentation\",\n author,\n \"pipenv\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\n\n\n# -- Options for Epub output ----------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n", "path": "docs/conf.py" } ]
[ { "content": "#\n# pipenv documentation build configuration file, created by\n# sphinx-quickstart on Mon Jan 30 13:28:36 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\n\n# Path hackery to get current version number.\nhere = os.path.abspath(os.path.dirname(__file__))\n\nabout = {}\nwith open(os.path.join(here, \"..\", \"pipenv\", \"__version__.py\")) as f:\n exec(f.read(), about)\n\n# Hackery to get the CLI docs to generate\nimport click\n\nimport pipenv.vendor.click\n\nclick.Command = pipenv.vendor.click.Command\nclick.Group = pipenv.vendor.click.Group\nclick.BaseCommand = pipenv.vendor.click.BaseCommand\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.viewcode\",\n \"myst_parser\",\n \"sphinx_click\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\nmyst_enable_extensions = [\n \"amsmath\",\n \"colon_fence\",\n \"deflist\",\n \"dollarmath\",\n \"fieldlist\",\n \"html_admonition\",\n \"html_image\",\n \"linkify\",\n \"replacements\",\n \"smartquotes\",\n \"strikethrough\",\n \"substitution\",\n \"tasklist\",\n]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = [\".rst\", \".md\"]\n# source_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"pipenv\"\ncopyright = '2020. A project founded by Kenneth Reitz and maintained by <a href=\"https://www.pypa.io/en/latest/\">Python Packaging Authority (PyPA).</a>'\nauthor = \"Python Packaging Authority\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = about[\"__version__\"]\n# The full version, including alpha/beta/rc tags.\nrelease = about[\"__version__\"]\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n \"show_powered_by\": False,\n \"github_user\": \"pypa\",\n \"github_repo\": \"pipenv\",\n \"github_banner\": False,\n \"show_related\": False,\n}\n\nhtml_sidebars = {\n \"index\": [\"sidebarlogo.html\", \"sourcelink.html\", \"searchbox.html\", \"hacks.html\"],\n \"**\": [\n \"sidebarlogo.html\",\n \"localtoc.html\",\n \"relations.html\",\n \"sourcelink.html\",\n \"searchbox.html\",\n \"hacks.html\",\n ],\n}\n\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n\ndef setup(app):\n app.add_css_file(\"custom.css\")\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"pipenvdoc\"\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"pipenv.tex\", \"pipenv Documentation\", \"Kenneth Reitz\", \"manual\"),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(\"quickstart\", \"pipenv\", \"\", [author], 1)]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"pipenv\",\n \"pipenv Documentation\",\n author,\n \"pipenv\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\n\n\n# -- Options for Epub output ----------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n", "path": "docs/conf.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000..e836f43dd6 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,1875 @@ +# 2023.9.1 (2023-09-01) + +# Pipenv 2023.9.1 (2023-09-01) + +## Features & Improvements + +- Top level Pipfile sys_platform markers should be transitive; adds top level platform_machine entries that are also transitive. Marker entries continue to operate the same as before. [#5892](https://github.com/pypa/pipenv/issues/5892) + +## Bug Fixes + +- Apply patch for install_search_all_sources = True functionality. [#5895](https://github.com/pypa/pipenv/issues/5895) +- Relative paths improvements for editable installs. [#5896](https://github.com/pypa/pipenv/issues/5896) +- Set log level in resolver to WARN when verbose is not passed. [#5897](https://github.com/pypa/pipenv/issues/5897) +- Handle more variations in private index html to improve hash collection. [#5898](https://github.com/pypa/pipenv/issues/5898) + +# 2023.8.28 (2023-08-28) + +## Bug Fixes + +- Revert change that caused the credentials in source url issue. [#5878](https://github.com/pypa/pipenv/issues/5878) +- Do not treat named requirements as file installs just becacuse a match path exists; better handling of editable keyword for local file installs. + Handle additional edge cases in the setup.py ast parser logic for trying to determine local install package name. [#5885](https://github.com/pypa/pipenv/issues/5885) + +# 2023.8.26 (2023-08-26) + +## Bug Fixes + +- Additional property caching to avoid duplication of sources in the resolver. [#5863](https://github.com/pypa/pipenv/issues/5863) +- Fix recent regressions with local/editable file installs. [#5870](https://github.com/pypa/pipenv/issues/5870) +- Fixes the vcs subdirectory fragments regression; fixes sys_platform markers regression. [#5871](https://github.com/pypa/pipenv/issues/5871) +- Fix regression that caused printing non-printable ascii characters when help was called. [#5872](https://github.com/pypa/pipenv/issues/5872) + +# 2023.8.25 (2023-08-25) + +## Bug Fixes + +- Fix regression of hash collection when downloading package from private indexes when the hash is not found in the index href url fragment. [#5866](https://github.com/pypa/pipenv/issues/5866) + +# 2023.8.23 (2023-08-22) + +## Bug Fixes + +- More gracefully handle @ symbols in vcs URLs to address recent regression with vcs URLs. [#5849](https://github.com/pypa/pipenv/issues/5849) + +# 2023.8.22 (2023-08-22) + +## Bug Fixes + +- Fix regression with `ssh://` vcs URLs introduced in `2023.8.21` whereby ssh vcs URLs are expected to have at least one `@` symbol. [#5846](https://github.com/pypa/pipenv/issues/5846) + +# 2023.8.21 (2023-08-21) + +## Bug Fixes + +- Add back some relevant caching to increase performance after the major refactor released with `2023.8.19` [#5841](https://github.com/pypa/pipenv/issues/5841) +- Fix some edge cases around vcs dependencies without a ref, and older Pipfile/lockfile formats. [#5843](https://github.com/pypa/pipenv/issues/5843) + +## Vendored Libraries + +- Remove unused command line interface for vendored packages. [#5840](https://github.com/pypa/pipenv/issues/5840) + +# 2023.8.20 (2023-08-20) + +## Bug Fixes + +- Fix the expected output of the `version` command. [#5838](https://github.com/pypa/pipenv/issues/5838) + +# 2023.8.19 (2023-08-19) + +## Features & Improvements + +- The `--categories` option now works with requirements.txt file. [#5722](https://github.com/pypa/pipenv/issues/5722) + +## Bug Fixes + +- Drop requirementslib for managing pip lines and InstallRequirements, bring remaining requirementslib functionality into pipenv. + Fixes numerous reports about extras installs with vcs and file installs; format pip lines correctly to not generate deprecation warnings. [#5793](https://github.com/pypa/pipenv/issues/5793) + +## Vendored Libraries + +- Update pip 23.2 -> 23.2.1 [#5822](https://github.com/pypa/pipenv/issues/5822) + +## Improved Documentation + +- Added documentation on how to move or rename a project directory [#5129](https://github.com/pypa/pipenv/issues/5129) + +## Removals and Deprecations + +- The `--skip-lock` flag which was deprecated, has now been removed to unblock modernizing the pipenv resolver code. [#5805](https://github.com/pypa/pipenv/issues/5805) + +# 2023.7.23 (2023-07-23) + +## Features & Improvements + +- Upgrades `pip==23.2` which includes everything from the pip changelog. Drops the "install_compatatability_finder" pip internals patch. [#5808](https://github.com/pypa/pipenv/issues/5808) + +## Bug Fixes + +- Fix issue parsing some Pipfiles with separate packages.\<pkg> sections (tomlkit OutOfOrderTableProxy) [#5794](https://github.com/pypa/pipenv/issues/5794) +- Fix all ruff linter warnings [#5807](https://github.com/pypa/pipenv/issues/5807) +- Restore running Resolver in sub-process using the project python by default; maintains ability to run directly by setting `PIPENV_RESOLVER_PARENT_PYTHON` environment variable to 1 (useful for internal debugging). [#5809](https://github.com/pypa/pipenv/issues/5809) +- Fix error when a Windows path begins with a '' with `pythonfinder==2.0.5`. [#5812](https://github.com/pypa/pipenv/issues/5812) + +## Vendored Libraries + +- Remove usage of click.secho in some modules. [#5804](https://github.com/pypa/pipenv/issues/5804) + +2023.7.11 (2023-07-11) + +## Bug Fixes + +- Invoke the resolver in the same process as pipenv rather than utilizing subprocess. [#5787](https://github.com/pypa/pipenv/issues/5787) +- Fix regression markers being included as None/null in requirements command. [#5788](https://github.com/pypa/pipenv/issues/5788) + +# 2023.7.9 (2023-07-09) + +## Bug Fixes + +- Drop the --keep-outdated flag and --selective-upgrade flags that have been deprecated in favor of update/upgrade commands. [#5730](https://github.com/pypa/pipenv/issues/5730) +- Fix regressions in the `requirements` command related to standard index extras and handling of local file requirements. [#5784](https://github.com/pypa/pipenv/issues/5784) + +# 2023.7.4 (2023-07-04) + +## Bug Fixes + +- Fixes regression on Pipfile requirements syntax. Ensure default operator is provided to requirement lib to avoid crash. [#5765](https://github.com/pypa/pipenv/issues/5765) +- Ensure hashes included in a generated requirements file are after any markers. [#5777](https://github.com/pypa/pipenv/issues/5777) + +# 2023.7.3 (2023-07-02) + +## Bug Fixes + +- Fix regression with `--system` flag usage. [#5773](https://github.com/pypa/pipenv/issues/5773) + +# 2023.7.1 (2023-07-01) + +## Bug Fixes + +- Patch `_get_requests_session` method to consider `PIP_CLIENT_CERT` value when present. [#5746](https://github.com/pypa/pipenv/issues/5746) +- Fix regression in `requirements` command that was causing package installs after upgrade to `requirementslib==3.0.0`. [#5755](https://github.com/pypa/pipenv/issues/5755) +- Fix `error: invalid command 'egg_info'` edge case with requirementslib 3.0.0. It exposed pipenv resolver sometimes was using a different python than expected. [#5760](https://github.com/pypa/pipenv/issues/5760) +- Fix issue in requirementslib 3.0.0 where dependencies defined in pyproject.toml were not being included in the lock file. [#5766](https://github.com/pypa/pipenv/issues/5766) + +## Removals and Deprecations + +- Bump dparse to 0.6.3 [#5750](https://github.com/pypa/pipenv/issues/5750) + +# 2023.6.26 (2023-06-26) + +## Improved Documentation + +- Add missing environment variable descriptions back to documentation [#missing_env_var_desc](https://github.com/pypa/pipenv/issues/missing_env_var_desc) + +# 2023.6.18 (2023-06-18) + +## Bug Fixes + +- Fixes resolver to only consider the default index for packages when a secondary index is not specified. This brings the code into alignment with stated assumptions about index restricted packages behavior of `pipenv`. [#5737](https://github.com/pypa/pipenv/issues/5737) + +## Removals and Deprecations + +- Deprecation of `--skip-lock` flag as it bypasses the security benefits of pipenv. Plus it lacks proper deterministic support of installation from multiple package indexes. [#5737](https://github.com/pypa/pipenv/issues/5737) + +# 2023.6.12 (2023-06-11) + +## Bug Fixes + +- Remove the `sys.path` modifications and as a result fixes keyring support. [#5719](https://github.com/pypa/pipenv/issues/5719) + +# 2023.6.11 (2023-06-11) + +## Vendored Libraries + +- Upgrades to `pipdeptree==2.8.0` which fixes edge cases of the `pipenv graph` command. [#5720](https://github.com/pypa/pipenv/issues/5720) + +# 2023.6.2 (2023-06-02) + +## Features & Improvements + +- Resolver performance: package sources following PEP 503 will leverage package hashes from the URL fragment, without downloading the package. [#5701](https://github.com/pypa/pipenv/issues/5701) + +## Bug Fixes + +- Improve regex for python versions to handle hidden paths; handle relative paths to python better as well. [#4588](https://github.com/pypa/pipenv/issues/4588) +- Update `pythonfinder==2.0.4` with fix for "RecursionError: maximum recursion depth exceeded". [#5709](https://github.com/pypa/pipenv/issues/5709) + +## Vendored Libraries + +- Drop old vendored toml library. Use stdlib tomllib or tomli instead. [#5678](https://github.com/pypa/pipenv/issues/5678) +- Drop vendored library cerberus. This isn't actually used by pipenv. [#5699](https://github.com/pypa/pipenv/issues/5699) + +# 2023.5.19 (2023-05-19) + +## Bug Fixes + +- Consider `--index` argument in `update` and `upgrade` commands. [#5692](https://github.com/pypa/pipenv/issues/5692) + +## Vendored Libraries + +- Upgrade `pythonfinder==2.0.0` which also brings in `pydantic==1.10.7`. [#5677](https://github.com/pypa/pipenv/issues/5677) + +# 2023.4.29 (2023-04-29) + +## Vendored Libraries + +- Vendor in `pip==23.1.2` latest. [#5671](https://github.com/pypa/pipenv/issues/5671) +- Vendor in `requirementslib==2.3.0` which drops usage of `vistir`. [#5672](https://github.com/pypa/pipenv/issues/5672) + +# 2023.4.20 (2023-04-20) + +## Features & Improvements + +- Checks environment variable `PIP_TRUSTED_HOSTS` when evaluating an + index specified at the command line when adding to `Pipfile`. + + For example, this command line + + ``` + PIP_TRUSTED_HOSTS=internal.mycompany.com pipenv install pypkg --index=https://internal.mycompany.com/pypi/simple + ``` + + will add the following to the `Pipfile`: + + ``` + [[source]] + url = 'https://internal.mycompany.com/pypi/simple' + verify_ssl = false + name = 'Internalmycompany' + + [packages] + pypkg = {version="*", index="Internalmycompany"} + ``` + + This allows users with private indexes to add them to `Pipfile` + initially from command line with correct permissions using environment + variable `PIP_TRUSTED_HOSTS`. [#5572](https://github.com/pypa/pipenv/issues/5572) + +- Vendor in the updates, upgrades and fixes provided by `pip==23.1`. [#5655](https://github.com/pypa/pipenv/issues/5655) + +- Replace flake8 and isort with [ruff](https://beta.ruff.rs). [#ruff](https://github.com/pypa/pipenv/issues/ruff) + +## Bug Fixes + +- Fix regression with `--skip-lock` option with `install` command. [#5653](https://github.com/pypa/pipenv/issues/5653) + +## Vendored Libraries + +- Vendor in latest `python-dotenv==1.0.0` [#5656](https://github.com/pypa/pipenv/issues/5656) +- Vendor in latest available dependencies: `attrs==23.1.0` `click-didyoumean==0.3.0` `click==8.1.3` `markupsafe==2.1.2` `pipdeptree==2.7.0` `shellingham==1.5.0.post1` `tomlkit==0.11.7` [#5657](https://github.com/pypa/pipenv/issues/5657) +- Vendor in latest `requirementslib==2.2.5` which includes updates for pip 23.1 [#5659](https://github.com/pypa/pipenv/issues/5659) + +## Improved Documentation + +- Made documentation clear about tilde-equals operator for package versions. [#5594](https://github.com/pypa/pipenv/issues/5594) + +# 2023.3.20 (2023-03-19) + +No significant changes. + +# 2023.3.18 (2023-03-19) + +## Bug Fixes + +- Fix import error in virtualenv utility for creating new environments caused by `2023.3.18` release. [#5636](https://github.com/pypa/pipenv/issues/5636) + +# 2023.3.18 (2023-03-18) + +## Features & Improvements + +- Provide a more powerful solution than `--keep-outdated` and `--selective-upgrade` which are deprecated for removal. + Introducing the `pipenv upgrade` command which takes the same package specifiers as `pipenv install` and + updates the `Pipfile` and `Pipfile.lock` with a valid lock resolution that only effects the specified packages and their dependencies. + Additionally, the `pipenv update` command has been updated to use the `pipenv upgrade` routine when packages are provided, which will install sync the new lock file as well. [#5617](https://github.com/pypa/pipenv/issues/5617) + +## Vendored Libraries + +- Bump vistir to 0.8.0, requirementslib to 2.2.4. [#5635](https://github.com/pypa/pipenv/issues/5635) + +# 2023.2.18 (2023-02-18) + +## Features & Improvements + +- `pipenv` now reads the system `pip.conf` or `pip.ini` file in order to determine pre-defined indexes to use for package resolution and installation. [#5297](https://github.com/pypa/pipenv/issues/5297) +- Behavior change for `pipenv check` now checks the default packages group of the lockfile. + Specifying `--categories` to override which categories to check against. + Pass `--use-installed` to get the prior behavior of checking the packages actually installed into the environment. [#5600](https://github.com/pypa/pipenv/issues/5600) + +## Bug Fixes + +- Fix regression with detection of `CI` env variable being set to something other than a truthy value. [#5554](https://github.com/pypa/pipenv/issues/5554) +- Fix `--categories` argument inconsistency between requirements command and install/sync by allowing comma separated values or spaces. [#5570](https://github.com/pypa/pipenv/issues/5570) +- Use Nushell overlays when running `pipenv shell`. [#5603](https://github.com/pypa/pipenv/issues/5603) + +## Vendored Libraries + +- Vendor in the `pip==23.0` release. [#5586](https://github.com/pypa/pipenv/issues/5586) +- Vendor in `pip==23.0.1` minor pt release. Updates `pythonfinder==1.3.2`. [#5614](https://github.com/pypa/pipenv/issues/5614) + +## Improved Documentation + +- Make some improvements to the contributing guide. [#5611](https://github.com/pypa/pipenv/issues/5611) + +# 2023.2.4 (2023-02-04) + +## Bug Fixes + +- Fix overwriting of output in verbose mode [#5530](https://github.com/pypa/pipenv/issues/5530) +- Fix for resolution error when direct url includes an extras. [#5536](https://github.com/pypa/pipenv/issues/5536) + +## Removals and Deprecations + +- Remove pytest-pypi package since it's not used anymore [#5556](https://github.com/pypa/pipenv/issues/5556) +- Remove deprecated --three flag from the CLI. [#5576](https://github.com/pypa/pipenv/issues/5576) + +# 2022.12.19 (2022-12-19) + +## Bug Fixes + +- Fix for `requirementslib` hanging during install of remote wheels files. [#5546](https://github.com/pypa/pipenv/issues/5546) + +# 2022.12.17 (2022-12-17) + +## Bug Fixes + +- virtualenv creation no longer uses `--creator=venv` by default; introduced two environment variables: + `PIPENV_VIRTUALENV_CREATOR` -- May be specified to instruct virtualenv which `--creator=` to use. + `PIPENV_VIRTUALENV_COPIES` -- When specified as truthy, instructs virtualenv to not use symlinks. [#5477](https://github.com/pypa/pipenv/issues/5477) +- Fix regression where `path` is not propagated to the `Pipfile.lock`. [#5479](https://github.com/pypa/pipenv/issues/5479) +- Solve issue where null markers were getting added to lock file when extras were provided. [#5486](https://github.com/pypa/pipenv/issues/5486) +- Fix: `update --outdated` raises NonExistentKey with outdated dev packages [#5540](https://github.com/pypa/pipenv/issues/5540) + +## Vendored Libraries + +- Vendor in `pip==22.3.1` which is currently the latest version of `pip`. [#5520](https://github.com/pypa/pipenv/issues/5520) +- - Bump version of requirementslib to 2.2.1 + - Bump version of vistir to 0.7.5 + - Bump version of colorama to 0.4.6 [#5522](https://github.com/pypa/pipenv/issues/5522) +- Bump plette version to 0.4.4 [#5539](https://github.com/pypa/pipenv/issues/5539) + +# 2022.11.30 (2022-11-30) + +## Bug Fixes + +- Fix regression: pipenv does not sync indexes to lockfile. [#5508](https://github.com/pypa/pipenv/issues/5508) + +# 2022.11.25 (2022-11-24) + +## Bug Fixes + +- Solving issue where `pipenv check` command has been broken in the published wheel distribution. [#5493](https://github.com/pypa/pipenv/issues/5493) + +# 2022.11.24 (2022-11-24) + +## Bug Fixes + +- Stop building universal wheels since Python 2 is no longer supported. [#5496](https://github.com/pypa/pipenv/issues/5496) + +# 2022.11.23 (2022-11-23) + +## Features & Improvements + +- Find nushell activate scripts. [#5470](https://github.com/pypa/pipenv/issues/5470) + +## Vendored Libraries + +- - Drop unused code from cerberus + - Drop unused module wheel [#5467](https://github.com/pypa/pipenv/issues/5467) +- - Replace yaspin spinner with rich spinner. + - Bump vistir version to 0.7.4 [#5468](https://github.com/pypa/pipenv/issues/5468) +- Bump version of requirementslib to 2.2.0 + Drop yaspin which is no longer used. + Bump vistir to version 0.7.4 + Remove parse. + Remove termcolor. + Remove idna. [#5481](https://github.com/pypa/pipenv/issues/5481) + +# 2022.11.11 (2022-11-11) + +## Bug Fixes + +- Fix regression of lock generation that caused the keep-outdated behavior to be default. [#5456](https://github.com/pypa/pipenv/issues/5456) + +# 2022.11.5 (2022-11-05) + +## Bug Fixes + +- Rollback the change in version of `colorama` due to regressions in core functionality. [#5459](https://github.com/pypa/pipenv/issues/5459) + +# 2022.11.4 (2022-11-04) + +## Features & Improvements + +- Allow pipenv settings to be explicitly disabled more easily by assigning to the environment variable a falsy value. [#5451](https://github.com/pypa/pipenv/issues/5451) + +## Bug Fixes + +- Provide an install iteration per index when `install_search_all_sources` is `false` (default behavior). + This fixes regression where install phase was using unexpected index after updating `pip==22.3` [#5444](https://github.com/pypa/pipenv/issues/5444) + +## Vendored Libraries + +- Drop tomli, which is not used anymore. + Bump attrs version see #5449. + Drop distlib, colorama and platformdirs - use the ones from pip.\_vendor. [#5450](https://github.com/pypa/pipenv/issues/5450) + +# 2022.10.25 (2022-10-25) + +## Features & Improvements + +- Add support to export requirements file for a specified set of categories. [#5431](https://github.com/pypa/pipenv/issues/5431) + +## Vendored Libraries + +- Remove appdirs.py in favor of platformdirs. [#5420](https://github.com/pypa/pipenv/issues/5420) + +## Removals and Deprecations + +- Remove usage of vistir.cmdparse in favor of pipenv.cmdparse [#5419](https://github.com/pypa/pipenv/issues/5419) + +# 2022.10.12 (2022-10-12) + +## Improved Documentation + +- Update pipenv docs for with example for callabale package functions in Pipfile scripts [#5396](https://github.com/pypa/pipenv/issues/5396) + +# 2022.10.11 (2022-10-11) + +## Bug Fixes + +- Revert decision to change the default isolation level because it caused problems with existing workflows; solution is to recommend users that have issues requiring pre-requisites to pass --extra-pip-args="--no-build-isolation" in their install or sync commands. [#5399](https://github.com/pypa/pipenv/issues/5399) + +# 2022.10.10 (2022-10-10) + +## Features & Improvements + +- Add ability for callable scripts in Pipfile under \[scripts\]. Callables can now be added like: `<pathed.module>:<func>` and can also take arguments. For example: `func = {call = "package.module:func('arg1', 'arg2')"}` then this can be activated in the shell with `pipenv run func` [#5294](https://github.com/pypa/pipenv/issues/5294) + +## Bug Fixes + +- Fixes regression from `2022.10.9` where `Pipfile` with `pipenv` section began generating new hash, + and also fix regression where lock phase did not update the hash value. [#5394](https://github.com/pypa/pipenv/issues/5394) + +# 2022.10.9 (2022-10-09) + +## Behavior Changes + +- New pipfiles show python_full_version under \[requires\] if specified. Previously creating a new pipenv project would only specify in the Pipfile the major and minor version, i.e. "python_version = 3.7". Now if you create a new project with a fully named python version it will record both in the Pipfile. So: "python_version = 3.7" and "python_full_version = 3.7.2" [#5345](https://github.com/pypa/pipenv/issues/5345) + +## Relates to dev process changes + +- Silence majority of pytest.mark warnings by registering custom marks. Can view a list of custom marks by running `pipenv run pytest --markers` + +# 2022.10.4 (2022-10-04) + +## Bug Fixes + +- Use `--creator=venv` when creating virtual environments to avoid issue with sysconfig `posix_prefix` on some systems. [#5075](https://github.com/pypa/pipenv/issues/5075) +- Prefer to use the lockfile sources if available during the install phase. [#5380](https://github.com/pypa/pipenv/issues/5380) + +## Vendored Libraries + +- Drop vendored six - we no longer depend on this library, as we migrated from pipfile to plette. [#5187](https://github.com/pypa/pipenv/issues/5187) + +# 2022.9.24 (2022-09-24) + +## Bug Fixes + +- Update `requirementslib==2.0.3` to always evaluate the requirement markers fresh (without lru_cache) to fix marker determinism issue. [#4660](https://github.com/pypa/pipenv/issues/4660) + +# 2022.9.21 (2022-09-21) + +## Bug Fixes + +- Fix regression to `install --skip-lock` with update to `plette`. [#5368](https://github.com/pypa/pipenv/issues/5368) + +# 2022.9.20 (2022-09-20) + +## Behavior Changes + +- Remove usage of pipfile module in favour of Plette. + pipfile is not actively maintained anymore. Plette is actively maintained, + and has stricter checking of the Pipefile and Pipefile.lock. As a result, + Pipefile with unnamed package indices will fail to lock. If a Pipefile + was hand crafeted, and the source is anonymous an error will be thrown. + The solution is simple, add a name to your index, e.g, replace: + + ``` + [[source]] + url = "https://pypi.acme.com/simple" + verify_ssl = true + ``` + + With: + + ``` + [[source]] + url = "https://pypi.acme.com/simple" + verify_ssl = true + name = acmes_private_index `#5339 <https://github.com/pypa/pipenv/issues/5339>`_ + ``` + +## Bug Fixes + +- Modernize `pipenv` path patch with `importlib.util` to eliminate import of `pkg_resources` [#5349](https://github.com/pypa/pipenv/issues/5349) + +## Vendored Libraries + +- Remove iso8601 from vendored packages since it was not used. [#5346](https://github.com/pypa/pipenv/issues/5346) + +# 2022.9.8 (2022-09-08) + +## Features & Improvements + +- It is now possible to supply additional arguments to `pip` install by supplying `--extra-pip-args="<arg1> <arg2>"` + See the updated documentation `Supplying additional arguments to pip` for more details. [#5283](https://github.com/pypa/pipenv/issues/5283) + +## Bug Fixes + +- Make editable detection better because not everyone specifies editable entry in the Pipfile for local editable installs. [#4784](https://github.com/pypa/pipenv/issues/4784) +- Add error handling for when the installed package setup.py does not contain valid markers. [#5329](https://github.com/pypa/pipenv/issues/5329) +- Load the dot env earlier so that `PIPENV_CUSTOM_VENV_NAME` is more useful across projects. [#5334](https://github.com/pypa/pipenv/issues/5334) + +## Vendored Libraries + +- Bump version of shellingham to support nushell. [#5336](https://github.com/pypa/pipenv/issues/5336) +- Bump plette to version v0.3.0 [#5337](https://github.com/pypa/pipenv/issues/5337) +- Bump version of pipdeptree [#5343](https://github.com/pypa/pipenv/issues/5343) + +## Removals and Deprecations + +- Add deprecation warning to the --three flag. Pipenv now uses python3 by default. [#5328](https://github.com/pypa/pipenv/issues/5328) + +## Relates to dev process changes + +- Convert the test runner to use `pypiserver` as a standalone process for all tests that referencce internal `pypi` artifacts. + General refactoring of some test cases to create more variety in packages selected--preferring lighter weight packages--in existing test cases. + +# 2022.9.4 (2022-09-04) + +## Bug Fixes + +- Fix the issue from `2022.9.2` where tarball URL packages were being skipped on batch_install. [#5306](https://github.com/pypa/pipenv/issues/5306) + +# 2022.9.2 (2022-09-02) + +## Bug Fixes + +- Fix issue where unnamed constraints were provided but which are not allowed by `pip` resolver. [#5273](https://github.com/pypa/pipenv/issues/5273) + +# 2022.8.31 (2022-08-31) + +## Features & Improvements + +- Performance optimization to `batch_install` results in a faster and less CPU intensive `pipenv sync` or `pipenv install` experience. [#5301](https://github.com/pypa/pipenv/issues/5301) + +## Bug Fixes + +- `pipenv` now uses a `NamedTemporaryFile` for rsolver constraints and drops internal env var `PIPENV_PACKAGES`. [#4925](https://github.com/pypa/pipenv/issues/4925) + +## Removals and Deprecations + +- Remove no longer used method `which_pip`. [#5314](https://github.com/pypa/pipenv/issues/5314) +- Drop progress bar file due to recent performance optimization to combine `batch_install` requirements in at most two invocations of `pip install`. + To see progress of install pass `--verbose` flag and `pip` progress will be output in realtime. [#5315](https://github.com/pypa/pipenv/issues/5315) + +# 2022.8.30 (2022-08-30) + +## Bug Fixes + +- Fix an issue when using `pipenv install --system` on systems that having the `python` executable pointing to Python 2 and a Python 3 executable being `python3`. [#5296](https://github.com/pypa/pipenv/issues/5296) +- Sorting `constraints` before resolving, which fixes `pipenv lock` generates nondeterminism environment markers. [#5299](https://github.com/pypa/pipenv/issues/5299) +- Fix #5273, use our own method for checking if a package is a valid constraint. [#5309](https://github.com/pypa/pipenv/issues/5309) + +## Vendored Libraries + +- Vendor in `requirementslib==2.0.1` which fixes issue with local install not marked editable, and vendor in `vistir==0.6.1` which drops python2 support. + Drops `orderedmultidict` from vendoring. [#5308](https://github.com/pypa/pipenv/issues/5308) + +# 2022.8.24 (2022-08-24) + +## Bug Fixes + +- Remove eager and unnecessary importing of `setuptools` and `pkg_resources` to avoid conflict upgrading `setuptools`. + Roll back `sysconfig` patch of `pip` because it was problematic for some `--system` commands. [#5228](https://github.com/pypa/pipenv/issues/5228) + +## Vendored Libraries + +- Vendor in `requirementslib==2.0.0` and drop `pip-shims` entirely. [#5228](https://github.com/pypa/pipenv/issues/5228) +- Vendor in `pythonfinder==1.3.1` [#5292](https://github.com/pypa/pipenv/issues/5292) + +# 2022.8.19 (2022-08-19) + +## Bug Fixes + +- Fix issue where resolver is provided with `install_requires` constraints from `setup.py` that depend on editable dependencies and could not resolve them. [#5271](https://github.com/pypa/pipenv/issues/5271) +- Fix for `pipenv lock` fails for packages with extras as of `2022.8.13`. [#5274](https://github.com/pypa/pipenv/issues/5274) +- Revert the exclusion of `BAD_PACKAGES` from `batch_install` in order for `pipenv` to install specific versions of `setuptools`. + To prevent issue upgrading `setuptools` this patches `_USE_SYSCONFIG_DEFAULT` to use `sysconfig` for `3.7` and above whereas `pip` default behavior was `3.10` and above. [#5275](https://github.com/pypa/pipenv/issues/5275) + +# 2022.8.17 (2022-08-17) + +## Bug Fixes + +- Fix "The Python interpreter can't be found" error when running `pipenv install --system` with a python3 but no python. [#5261](https://github.com/pypa/pipenv/issues/5261) +- Revise pip import patch to include only `pipenv` from site-packages and removed `--ignore-installed` argument from pip install in order to fix regressions with `--use-site-packages`. [#5265](https://github.com/pypa/pipenv/issues/5265) + +# 2022.8.15 (2022-08-15) + +## Bug Fixes + +- `pip_install` method was using a different way of finding the python executable than other `pipenv` commands, which caused an issue with skipping package installation if it was already installed in site-packages. [#5254](https://github.com/pypa/pipenv/issues/5254) + +# 2022.8.14 (2022-08-14) + +## Bug Fixes + +- Removed `packaging` library from `BAD_PACKAGES` constant to allow it to be installed, which fixes regression from `pipenv==2022.8.13`. [#5247](https://github.com/pypa/pipenv/issues/5247) + +# 2022.8.13 (2022-08-13) + +## Bug Fixes + +- If environment variable `CI` or `TF_BUILD` is set but does not evaluate to `False` it is now treated as `True`. [#5128](https://github.com/pypa/pipenv/issues/5128) +- Fix auto-complete crashing on 'install' and 'uninstall' keywords [#5214](https://github.com/pypa/pipenv/issues/5214) +- Address remaining `pipenv` commands that were still referencing the user or system installed `pip` to use the vendored `pip` internal to `pipenv`. [#5229](https://github.com/pypa/pipenv/issues/5229) +- Use `packages` as constraints when locking `dev-packages` in Pipfile. + Use `packages` as constraints when installing new `dev-packages`. [#5234](https://github.com/pypa/pipenv/issues/5234) + +## Vendored Libraries + +- Vendor in minor `pip` update `22.2.2` [#5230](https://github.com/pypa/pipenv/issues/5230) + +## Improved Documentation + +- Add documentation for environment variables the configure pipenv. [#5235](https://github.com/pypa/pipenv/issues/5235) + +## Removals and Deprecations + +- The deprecated way of generating requirements `install -r` or `lock -r` has been removed in favor of the `pipenv requirements` command. [#5200](https://github.com/pypa/pipenv/issues/5200) + +# 2022.8.5 (2022-08-05) + +## Features & Improvements + +- support PIPENV_CUSTOM_VENV_NAME to be the venv name if specified, update relevant docs. [#4974](https://github.com/pypa/pipenv/issues/4974) + +## Bug Fixes + +- Remove usages of `pip_shims` from the non vendored `pipenv` code, but retain initialization for `requirementslib` still has usages. [#5204](https://github.com/pypa/pipenv/issues/5204) +- Fix case sensitivity of color name `red` in exception when getting hashes from pypi in `_get_hashes_from_pypi`. [#5206](https://github.com/pypa/pipenv/issues/5206) +- Write output from `subprocess_run` directly to `stdout` instead of creating temporary file. + Remove deprecated `distutils.sysconfig`, use `sysconfig`. [#5210](https://github.com/pypa/pipenv/issues/5210) + +## Vendored Libraries + +- - Rename patched `notpip` to `pip` in order to be clear that its a patched version of pip. + - Remove the part of \_post_pip_import.patch that overrode the standalone pip to be the user installed pip, now we fully rely on our vendored and patched `pip`, even for all types of installs. + - Vendor in the next newest version of `pip==22.2` + - Modify patch for `pipdeptree` to not use `pip-shims` [#5188](https://github.com/pypa/pipenv/issues/5188) + - Remove vendored `urllib3` in favor of using it from vendored version in `pip._vendor` [#5215](https://github.com/pypa/pipenv/issues/5215) + +## Removals and Deprecations + +- Remove tests that have been for a while been marked skipped and are no longer relevant. [#5165](https://github.com/pypa/pipenv/issues/5165) + +# 2022.7.24 (2022-07-24) + +## Bug Fixes + +- Re-enabled three installs tests again on the Windows CI as recent refactor work has fixed them. [#5064](https://github.com/pypa/pipenv/issues/5064) +- Support ANSI `NO_COLOR` environment variable and deprecate `PIPENV_COLORBLIND` variable, which will be removed after this release. [#5158](https://github.com/pypa/pipenv/issues/5158) +- Fixed edge case where a non-editable file, url or vcs would overwrite the value `no_deps` for all other requirements in the loop causing a retry condition. [#5164](https://github.com/pypa/pipenv/issues/5164) +- Vendor in latest `requirementslib` for fix to lock when using editable VCS module with specific `@` git reference. [#5179](https://github.com/pypa/pipenv/issues/5179) + +## Vendored Libraries + +- Remove crayons and replace with click.secho and click.styles per <https://github.com/pypa/pipenv/issues/3741> [#3741](https://github.com/pypa/pipenv/issues/3741) +- Vendor in latest version of `pip==22.1.2` which upgrades `pipenv` from `pip==22.0.4`. + Vendor in latest version of `requirementslib==1.6.7` which includes a fix for tracebacks on encountering Annotated variables. + Vendor in latest version of `pip-shims==0.7.3` such that imports could be rewritten to utilize `packaging` from vendor'd `pip`. + Drop the `packaging` requirement from the `vendor` directory in `pipenv`. [#5147](https://github.com/pypa/pipenv/issues/5147) +- Remove unused vendored dependency `normailze-charset`. [#5161](https://github.com/pypa/pipenv/issues/5161) +- Remove obsolete package `funcsigs`. [#5168](https://github.com/pypa/pipenv/issues/5168) +- Bump vendored dependency `pyparsing==3.0.9`. [#5170](https://github.com/pypa/pipenv/issues/5170) + +# 2022.7.4 (2022-07-04) + +## Behavior Changes + +- Adjust `pipenv requirements` to add markers and add an `--exclude-markers` option to allow the exclusion of markers. [#5092](https://github.com/pypa/pipenv/issues/5092) + +## Bug Fixes + +- Stopped expanding environment variables when using `pipenv requirements` [#5134](https://github.com/pypa/pipenv/issues/5134) + +## Vendored Libraries + +- Depend on `requests` and `certifi` from vendored `pip` and remove them as explicit vendor dependencies. [#5000](https://github.com/pypa/pipenv/issues/5000) +- Vendor in the latest version of `requirementslib==1.6.5` which includes bug fixes for beta python versions, projects with an at sign (@) in the path, and a `setuptools` deprecation warning. [#5132](https://github.com/pypa/pipenv/issues/5132) + +## Relates to dev process changes + +- Switch from using type comments to type annotations. + +# 2022.5.3.dev0 (2022-06-07) + +## Bug Fixes + +- Adjust pipenv to work with the newly added `venv` install scheme in Python. + First check if `venv` is among the available install schemes, and use it if it is. Otherwise fall back to the `nt` or `posix_prefix` install schemes as before. This should produce no change for environments where the install schemes were not redefined. [#5096](https://github.com/pypa/pipenv/issues/5096) + +# 2022.5.2 (2022-05-02) + +## Bug Fixes + +- Fixes issue of `pipenv lock -r` command printing to stdout instead of stderr. [#5091](https://github.com/pypa/pipenv/issues/5091) + +# 2022.4.30 (2022-04-30) + +## Bug Fixes + +- Fixes issue of `requirements` command problem by modifying to print `-e` and path of the editable package. [#5070](https://github.com/pypa/pipenv/issues/5070) +- Revert specifier of `setuptools` requirement in `setup.py` back to what it was in order to fix `FileNotFoundError: [Errno 2]` issue report. [#5075](https://github.com/pypa/pipenv/issues/5075) +- Fixes issue of requirements command where git requirements cause the command to fail, solved by using existing convert_deps_to_pip function. [#5076](https://github.com/pypa/pipenv/issues/5076) + +## Vendored Libraries + +- Vendor in `requirementslib==1.6.4` to Fix `SetuptoolsDeprecationWarning` `setuptools.config.read_configuration` became deprecated. [#5081](https://github.com/pypa/pipenv/issues/5081) + +## Removals and Deprecations + +- Remove more usage of misc functions of vistir. Many of this function are available in the STL or in another dependency of pipenv. [#5078](https://github.com/pypa/pipenv/issues/5078) + +# 2022.4.21 (2022-04-21) + +## Removals and Deprecations + +- Updated setup.py to remove support for python 3.6 from built `pipenv` packages' Metadata. [#5065](https://github.com/pypa/pipenv/issues/5065) + +# 2022.4.20 (2022-04-20) + +## Features & Improvements + +- Added new Pipenv option `install_search_all_sources` that allows installation of packages from an + existing `Pipfile.lock` to search all defined indexes for the constrained package version and hash signatures. [#5041](https://github.com/pypa/pipenv/issues/5041) + +## Bug Fixes + +- allow the user to disable the `no_input` flag, so the use of e.g Google Artifact Registry is possible. [#4706](https://github.com/pypa/pipenv/issues/4706) +- Fixes case where packages could fail to install and the exit code was successful. [#5031](https://github.com/pypa/pipenv/issues/5031) + +## Vendored Libraries + +- Updated vendor version of `pip` from `21.2.2` to `22.0.4` which fixes a number of bugs including + several reports of pipenv locking for an infinite amount of time when using certain package constraints. + This also drops support for python 3.6 as it is EOL and support was removed in pip 22.x [#4995](https://github.com/pypa/pipenv/issues/4995) + +## Removals and Deprecations + +- Removed the vendor dependency `more-itertools` as it was originally added for `zipp`, which since stopped using it. [#5044](https://github.com/pypa/pipenv/issues/5044) +- Removed all usages of `pipenv.vendor.vistir.compat.fs_str`, since this function was used for PY2-PY3 compatibility and is no longer needed. [#5062](https://github.com/pypa/pipenv/issues/5062) + +## Relates to dev process changes + +- Added pytest-cov and basic configuration to the project for generating html testing coverage reports. +- Make all CI jobs run only after the lint stage. Also added a makefile target for vendoring the packages. + +# 2022.4.8 (2022-04-08) + +## Features & Improvements + +- Implements a `pipenv requirements` command which generates a requirements.txt compatible output without locking. [#4959](https://github.com/pypa/pipenv/issues/4959) +- Internal to pipenv, the utils.py was split into a utils module with unused code removed. [#4992](https://github.com/pypa/pipenv/issues/4992) + +## Bug Fixes + +- Pipenv will now ignore `.venv` in the project when `PIPENV_VENV_IN_PROJECT` variable is False. + Unset variable maintains the existing behavior of preferring to use the project's `.venv` should it exist. [#2763](https://github.com/pypa/pipenv/issues/2763) +- Fix an edge case of hash collection in index restricted packages whereby the hashes for some packages would + be missing from the `Pipfile.lock` following package index restrictions added in `pipenv==2022.3.23`. [#5023](https://github.com/pypa/pipenv/issues/5023) + +## Improved Documentation + +- Pipenv CLI documentation generation has been fixed. It had broke when `click` was vendored into the project in + `2021.11.9` because by default `sphinx-click` could no longer determine the CLI inherited from click. [#4778](https://github.com/pypa/pipenv/issues/4778) +- Improve documentation around extra indexes and index restricted packages. [#5022](https://github.com/pypa/pipenv/issues/5022) + +## Removals and Deprecations + +- Removes the optional `install` argument `--extra-index-url` as it was not compatible with index restricted packages. + Using the `--index` argument is the correct way to specify a package should be pulled from the non-default index. [#5022](https://github.com/pypa/pipenv/issues/5022) + +## Relates to dev process changes + +- Added code linting using pre-commit-hooks, black, flake8, isort, pygrep-hooks, news-fragments and check-manifest. + Very similar to pip's configuration; adds a towncrier new's type `process` for change to Development processes. + +# 2022.3.28 (2022-03-27) + +## Bug Fixes + +- Environment variables were not being loaded when the `--quiet` flag was set [#5010](https://github.com/pypa/pipenv/issues/5010) +- It would appear that `requirementslib` was not fully specifying the subdirectory to `build_pep517` and + and when a new version of `setuptools` was released, the test `test_lock_nested_vcs_direct_url` + broke indicating the Pipfile.lock no longer contained the extra dependencies that should have been resolved. + This regression affected `pipenv>=2021.11.9` but has been fixed by a patch to `requirementslib`. [#5019](https://github.com/pypa/pipenv/issues/5019) + +## Vendored Libraries + +- Vendor in pip==21.2.4 (from 21.2.2) in order to bring in requested bug fix for python3.6. Note: support for 3.6 will be dropped in a subsequent release. [#5008](https://github.com/pypa/pipenv/issues/5008) + +# 2022.3.24 (2022-03-23) + +## Features & Improvements + +- It is now possible to silence the `Loading .env environment variables` message on `pipenv run` + with the `--quiet` flag or the `PIPENV_QUIET` environment variable. [#4027](https://github.com/pypa/pipenv/issues/4027) + +## Bug Fixes + +- Fixes issue with new index safety restriction, whereby an unnamed extra sources index + caused and error to be thrown during install. [#5002](https://github.com/pypa/pipenv/issues/5002) +- The text `Loading .env environment variables...` has been switched back to stderr as to not + break requirements.txt generation. Also it only prints now when a `.env` file is actually present. [#5003](https://github.com/pypa/pipenv/issues/5003) + +# 2022.3.23 (2022-03-22) + +## Features & Improvements + +- Use environment variable `PIPENV_SKIP_LOCK` to control the behaviour of lock skipping. [#4797](https://github.com/pypa/pipenv/issues/4797) +- New CLI command `verify`, checks the Pipfile.lock is up-to-date [#4893](https://github.com/pypa/pipenv/issues/4893) + +## Behavior Changes + +- Pattern expansion for arguments was disabled on Windows. [#4935](https://github.com/pypa/pipenv/issues/4935) + +## Bug Fixes + +- Python versions on Windows can now be installed automatically through pyenv-win [#4525](https://github.com/pypa/pipenv/issues/4525) +- Patched our vendored Pip to fix: Pipenv Lock (Or Install) Does Not Respect Index Specified For A Package. [#4637](https://github.com/pypa/pipenv/issues/4637) +- If `PIP_TARGET` is set to environment variables, Refer specified directory for calculate delta, instead default directory [#4775](https://github.com/pypa/pipenv/issues/4775) +- Remove remaining mention of python2 and --two flag from codebase. [#4938](https://github.com/pypa/pipenv/issues/4938) +- Use `CI` environment value, over mere existence of name [#4944](https://github.com/pypa/pipenv/issues/4944) +- Environment variables from dot env files are now properly expanded when included in scripts. [#4975](https://github.com/pypa/pipenv/issues/4975) + +## Vendored Libraries + +- Updated vendor version of `pythonfinder` from `1.2.9` to `1.2.10` which fixes a bug with WSL + (Windows Subsystem for Linux) when a path can not be read and Permission Denied error is encountered. [#4976](https://github.com/pypa/pipenv/issues/4976) + +## Removals and Deprecations + +- Removes long broken argument `--code` from `install` and `--unused` from `check`. + Check command no longer takes in arguments to ignore. + Removed the vendored dependencies: `pipreqs` and `yarg` [#4998](https://github.com/pypa/pipenv/issues/4998) + +# 2022.1.8 (2022-01-08) + +## Bug Fixes + +- Remove the extra parentheses around the venv prompt. [#4877](https://github.com/pypa/pipenv/issues/4877) +- Fix a bug of installation fails when extra index url is given. [#4881](https://github.com/pypa/pipenv/issues/4881) +- Fix regression where lockfiles would only include the hashes for releases for the platform generating the lockfile [#4885](https://github.com/pypa/pipenv/issues/4885) +- Fix the index parsing to reject illegal requirements.txt. [#4899](https://github.com/pypa/pipenv/issues/4899) + +# 2021.11.23 (2021-11-23) + +## Bug Fixes + +- Update `charset-normalizer` from `2.0.3` to `2.0.7`, this fixes an import error on Python 3.6. [#4865](https://github.com/pypa/pipenv/issues/4865) +- Fix a bug of deleting a virtualenv that is not managed by Pipenv. [#4867](https://github.com/pypa/pipenv/issues/4867) +- Fix a bug that source is not added to `Pipfile` when index url is given with `pipenv install`. [#4873](https://github.com/pypa/pipenv/issues/4873) + +# 2021.11.15 (2021-11-15) + +## Bug Fixes + +- Return an empty dict when `PIPENV_DONT_LOAD_ENV` is set. [#4851](https://github.com/pypa/pipenv/issues/4851) +- Don't use `sys.executable` when inside an activated venv. [#4852](https://github.com/pypa/pipenv/issues/4852) + +## Vendored Libraries + +- Drop the vendored `jinja2` dependency as it is not needed any more. [#4858](https://github.com/pypa/pipenv/issues/4858) +- Update `click` from `8.0.1` to `8.0.3`, to fix a problem with bash completion. [#4860](https://github.com/pypa/pipenv/issues/4860) +- Drop unused vendor `chardet`. [#4862](https://github.com/pypa/pipenv/issues/4862) + +## Improved Documentation + +- Fix the documentation to reflect the fact that special characters must be percent-encoded in the URL. [#4856](https://github.com/pypa/pipenv/issues/4856) + +# 2021.11.9 (2021-11-09) + +## Features & Improvements + +- Replace `click-completion` with `click`'s own completion implementation. [#4786](https://github.com/pypa/pipenv/issues/4786) + +## Bug Fixes + +- Fix a bug that `pipenv run` doesn't set environment variables correctly. [#4831](https://github.com/pypa/pipenv/issues/4831) +- Fix a bug that certifi can't be loaded within `notpip`'s vendor library. This makes several objects of `pip` fail to be imported. [#4833](https://github.com/pypa/pipenv/issues/4833) +- Fix a bug that `3.10.0` can be found be python finder. [#4837](https://github.com/pypa/pipenv/issues/4837) + +## Vendored Libraries + +- Update `pythonfinder` from `1.2.8` to `1.2.9`. [#4837](https://github.com/pypa/pipenv/issues/4837) + +# 2021.11.5.post0 (2021-11-05) + +## Bug Fixes + +- Fix a regression that `pipenv shell` fails to start a subshell. [#4828](https://github.com/pypa/pipenv/issues/4828) +- Fix a regression that `pip_shims` object isn't imported correctly. [#4829](https://github.com/pypa/pipenv/issues/4829) + +# 2021.11.5 (2021-11-05) + +## Features & Improvements + +- Avoid sharing states but create project objects on demand. So that most integration test cases are able to switch to a in-process execution method. [#4757](https://github.com/pypa/pipenv/issues/4757) +- Shell-quote `pip` commands when logging. [#4760](https://github.com/pypa/pipenv/issues/4760) + +## Bug Fixes + +- Ignore empty .venv in rood dir and create project name base virtual environment [#4790](https://github.com/pypa/pipenv/issues/4790) + +## Vendored Libraries + +- Update vendored dependencies + \- `attrs` from `20.3.0` to `21.2.0` + \- `cerberus` from `1.3.2` to `1.3.4` + \- `certifi` from `2020.11.8` to `2021.5.30` + \- `chardet` from `3.0.4` to `4.0.0` + \- `click` from `7.1.2` to `8.0.1` + \- `distlib` from `0.3.1` to `0.3.2` + \- `idna` from `2.10` to `3.2` + \- `importlib-metadata` from `2.0.0` to `4.6.1` + \- `importlib-resources` from `3.3.0` to `5.2.0` + \- `jinja2` from `2.11.2` to `3.0.1` + \- `markupsafe` from `1.1.1` to `2.0.1` + \- `more-itertools` from `5.0.0` to `8.8.0` + \- `packaging` from `20.8` to `21.0` + \- `pep517` from `0.9.1` to `0.11.0` + \- `pipdeptree` from `1.0.0` to `2.0.0` + \- `ptyprocess` from `0.6.0` to `0.7.0` + \- `python-dateutil` from `2.8.1` to `2.8.2` + \- `python-dotenv` from `0.15.0` to `0.19.0` + \- `pythonfinder` from `1.2.5` to `1.2.8` + \- `requests` from `2.25.0` to `2.26.0` + \- `shellingham` from `1.3.2` to `1.4.0` + \- `six` from `1.15.0` to `1.16.0` + \- `tomlkit` from `0.7.0` to `0.7.2` + \- `urllib3` from `1.26.1` to `1.26.6` + \- `zipp` from `1.2.0` to `3.5.0` + + Add new vendored dependencies + \- `charset-normalizer 2.0.3` + \- `termcolor 1.1.0` + \- `tomli 1.1.0` + \- `wheel 0.36.2` [#4747](https://github.com/pypa/pipenv/issues/4747) + +- Drop the dependencies for Python 2.7 compatibility purpose. [#4751](https://github.com/pypa/pipenv/issues/4751) + +- Switch the dependency resolver from `pip-tools` to `pip`. + + Update vendor libraries: + \- Update `requirementslib` from `1.5.16` to `1.6.1` + \- Update `pip-shims` from `0.5.6` to `0.6.0` + \- New vendor `platformdirs 2.4.0` [#4759](https://github.com/pypa/pipenv/issues/4759) + +## Improved Documentation + +- remove prefixes on install commands for easy copy/pasting [#4792](https://github.com/pypa/pipenv/issues/4792) +- Officially drop support for Python 2.7 and Python 3.5. [#4261](https://github.com/pypa/pipenv/issues/4261) + +# 2021.5.29 (2021-05-29) + +## Bug Fixes + +- Fix a bug where passing --skip-lock when PIPFILE has no \[SOURCE\] section throws the error: "tomlkit.exceptions.NonExistentKey: 'Key "source" does not exist.'" [#4141](https://github.com/pypa/pipenv/issues/4141) +- Fix bug where environment wouldn't activate in paths containing & and \$ symbols [#4538](https://github.com/pypa/pipenv/issues/4538) +- Fix a bug that `importlib-metadata` from the project's dependencies conflicts with that from `pipenv`'s. [#4549](https://github.com/pypa/pipenv/issues/4549) +- Fix a bug where `pep508checker.py` did not expect double-digit Python minor versions (e.g. "3.10"). [#4602](https://github.com/pypa/pipenv/issues/4602) +- Fix bug where environment wouldn't activate in paths containing () and \[\] symbols [#4615](https://github.com/pypa/pipenv/issues/4615) +- Fix bug preventing use of pipenv lock --pre [#4642](https://github.com/pypa/pipenv/issues/4642) + +## Vendored Libraries + +- Update `packaging` from `20.4` to `20.8`. [#4591](https://github.com/pypa/pipenv/issues/4591) + +# 2020.11.15 (2020-11-15) + +## Features & Improvements + +- Support expanding environment variables in requirement URLs. [#3516](https://github.com/pypa/pipenv/issues/3516) +- Show warning message when a dependency is skipped in locking due to the mismatch of its markers. [#4346](https://github.com/pypa/pipenv/issues/4346) + +## Bug Fixes + +- Fix a bug that executable scripts with leading backslash can't be executed via `pipenv run`. [#4368](https://github.com/pypa/pipenv/issues/4368) +- Fix a bug that VCS dependencies always satisfy even if the ref has changed. [#4387](https://github.com/pypa/pipenv/issues/4387) +- Restrict the acceptable hash type to SHA256 only. [#4517](https://github.com/pypa/pipenv/issues/4517) +- Fix the output of `pipenv scripts` under Windows platform. [#4523](https://github.com/pypa/pipenv/issues/4523) +- Fix a bug that the resolver takes wrong section to validate constraints. [#4527](https://github.com/pypa/pipenv/issues/4527) + +## Vendored Libraries + +- Update vendored dependencies: + : - `colorama` from `0.4.3` to `0.4.4` + - `python-dotenv` from `0.10.3` to `0.15.0` + - `first` from `2.0.1` to `2.0.2` + - `iso8601` from `0.1.12` to `0.1.13` + - `parse` from `1.15.0` to `1.18.0` + - `pipdeptree` from `0.13.2` to `1.0.0` + - `requests` from `2.23.0` to `2.25.0` + - `idna` from `2.9` to `2.10` + - `urllib3` from `1.25.9` to `1.26.1` + - `certifi` from `2020.4.5.1` to `2020.11.8` + - `requirementslib` from `1.5.15` to `1.5.16` + - `attrs` from `19.3.0` to `20.3.0` + - `distlib` from `0.3.0` to `0.3.1` + - `packaging` from `20.3` to `20.4` + - `six` from `1.14.0` to `1.15.0` + - `semver` from `2.9.0` to `2.13.0` + - `toml` from `0.10.1` to `0.10.2` + - `cached-property` from `1.5.1` to `1.5.2` + - `yaspin` from `0.14.3` to `1.2.0` + - `resolvelib` from `0.3.0` to `0.5.2` + - `pep517` from `0.8.2` to `0.9.1` + - `zipp` from `0.6.0` to `1.2.0` + - `importlib-metadata` from `1.6.0` to `2.0.0` + - `importlib-resources` from `1.5.0` to `3.3.0` [#4533](https://github.com/pypa/pipenv/issues/4533) + +## Improved Documentation + +- Fix suggested pyenv setup to avoid using shimmed interpreter [#4534](https://github.com/pypa/pipenv/issues/4534) + +# 2020.11.4 (2020-11-04) + +## Features & Improvements + +- Add a new command `pipenv scripts` to display shortcuts from Pipfile. [#3686](https://github.com/pypa/pipenv/issues/3686) +- Retrieve package file hash from URL to accelerate the locking process. [#3827](https://github.com/pypa/pipenv/issues/3827) +- Add the missing `--system` option to `pipenv sync`. [#4441](https://github.com/pypa/pipenv/issues/4441) +- Add a new option pair `--header/--no-header` to `pipenv lock` command, + which adds a header to the generated requirements.txt [#4443](https://github.com/pypa/pipenv/issues/4443) + +## Bug Fixes + +- Fix a bug that percent encoded characters will be unquoted incorrectly in the file URL. [#4089](https://github.com/pypa/pipenv/issues/4089) +- Fix a bug where setting PIPENV_PYTHON to file path breaks environment name [#4225](https://github.com/pypa/pipenv/issues/4225) +- Fix a bug that paths are not normalized before comparison. [#4330](https://github.com/pypa/pipenv/issues/4330) +- Handle Python major and minor versions correctly in Pipfile creation. [#4379](https://github.com/pypa/pipenv/issues/4379) +- Fix a bug that non-wheel file requirements can be resolved successfully. [#4386](https://github.com/pypa/pipenv/issues/4386) +- Fix a bug that `pexept.exceptions.TIMEOUT` is not caught correctly because of the wrong import path. [#4424](https://github.com/pypa/pipenv/issues/4424) +- Fix a bug that compound TOML table is not parsed correctly. [#4433](https://github.com/pypa/pipenv/issues/4433) +- Fix a bug that invalid Python paths from Windows registry break `pipenv install`. [#4436](https://github.com/pypa/pipenv/issues/4436) +- Fix a bug that function calls in `setup.py` can't be parsed rightly. [#4446](https://github.com/pypa/pipenv/issues/4446) +- Fix a bug that dist-info inside `venv` directory will be mistaken as the editable package's metadata. [#4480](https://github.com/pypa/pipenv/issues/4480) +- Make the order of hashes in resolution result stable. [#4513](https://github.com/pypa/pipenv/issues/4513) + +## Vendored Libraries + +- Update `tomlkit` from `0.5.11` to `0.7.0`. [#4433](https://github.com/pypa/pipenv/issues/4433) +- Update `requirementslib` from `1.5.13` to `1.5.14`. [#4480](https://github.com/pypa/pipenv/issues/4480) + +## Improved Documentation + +- Discourage homebrew installation in installation guides. [#4013](https://github.com/pypa/pipenv/issues/4013) + +# 2020.8.13 (2020-08-13) + +## Bug Fixes + +- Fixed behaviour of `pipenv uninstall --all-dev`. + From now on it does not uninstall regular packages. [#3722](https://github.com/pypa/pipenv/issues/3722) +- Fix a bug that incorrect Python path will be used when `--system` flag is on. [#4315](https://github.com/pypa/pipenv/issues/4315) +- Fix falsely flagging a Homebrew installed Python as a virtual environment [#4316](https://github.com/pypa/pipenv/issues/4316) +- Fix a bug that `pipenv uninstall` throws an exception that does not exist. [#4321](https://github.com/pypa/pipenv/issues/4321) +- Fix a bug that Pipenv can't locate the correct file of special directives in `setup.cfg` of an editable package. [#4335](https://github.com/pypa/pipenv/issues/4335) +- Fix a bug that `setup.py` can't be parsed correctly when the assignment is type-annotated. [#4342](https://github.com/pypa/pipenv/issues/4342) +- Fix a bug that `pipenv graph` throws an exception that PipenvCmdError(cmd_string, c.out, c.err, return_code). [#4388](https://github.com/pypa/pipenv/issues/4388) +- Do not copy the whole directory tree of local file package. [#4403](https://github.com/pypa/pipenv/issues/4403) +- Correctly detect whether Pipenv in run under an activated virtualenv. [#4412](https://github.com/pypa/pipenv/issues/4412) + +## Vendored Libraries + +- Update `requirementslib` to `1.5.12`. [#4385](https://github.com/pypa/pipenv/issues/4385) +- - Update `requirements` to `1.5.13`. + - Update `pip-shims` to `0.5.3`. [#4421](https://github.com/pypa/pipenv/issues/4421) + +# 2020.6.2 (2020-06-02) + +## Features & Improvements + +- Pipenv will now detect existing `venv` and `virtualenv` based virtual environments more robustly. [#4276](https://github.com/pypa/pipenv/issues/4276) + +## Bug Fixes + +- `+` signs in URL authentication fragments will no longer be incorrectly replaced with space ( \`\` \`\` ) characters. [#4271](https://github.com/pypa/pipenv/issues/4271) +- Fixed a regression which caused Pipenv to fail when running under `/`. [#4273](https://github.com/pypa/pipenv/issues/4273) +- `setup.py` files with `version` variables read from `os.environ` are now able to be parsed successfully. [#4274](https://github.com/pypa/pipenv/issues/4274) +- Fixed a bug which caused Pipenv to fail to install packages in a virtual environment if those packages were already present in the system global environment. [#4276](https://github.com/pypa/pipenv/issues/4276) +- Fix a bug that caused non-specific versions to be pinned in `Pipfile.lock`. [#4278](https://github.com/pypa/pipenv/issues/4278) +- Corrected a missing exception import and invalid function call invocations in `pipenv.cli.command`. [#4286](https://github.com/pypa/pipenv/issues/4286) +- Fixed an issue with resolving packages with names defined by function calls in `setup.py`. [#4292](https://github.com/pypa/pipenv/issues/4292) +- Fixed a regression with installing the current directory, or `.`, inside a `venv` based virtual environment. [#4295](https://github.com/pypa/pipenv/issues/4295) +- Fixed a bug with the discovery of python paths on Windows which could prevent installation of environments during `pipenv install`. [#4296](https://github.com/pypa/pipenv/issues/4296) +- Fixed an issue in the `requirementslib` AST parser which prevented parsing of `setup.py` files for dependency metadata. [#4298](https://github.com/pypa/pipenv/issues/4298) +- Fix a bug where Pipenv doesn't realize the session is interactive [#4305](https://github.com/pypa/pipenv/issues/4305) + +## Vendored Libraries + +- Updated requirementslib to version `1.5.11`. [#4292](https://github.com/pypa/pipenv/issues/4292) +- Updated vendored dependencies: + : - **pythonfinder**: `1.2.2` => `1.2.4` + - **requirementslib**: `1.5.9` => `1.5.10` [#4302](https://github.com/pypa/pipenv/issues/4302) + +# 2020.5.28 (2020-05-28) + +## Features & Improvements + +- `pipenv install` and `pipenv sync` will no longer attempt to install satisfied dependencies during installation. [#3057](https://github.com/pypa/pipenv/issues/3057), + [#3506](https://github.com/pypa/pipenv/issues/3506) + +- Added support for resolution of direct-url dependencies in `setup.py` files to respect `PEP-508` style URL dependencies. [#3148](https://github.com/pypa/pipenv/issues/3148) + +- Added full support for resolution of all dependency types including direct URLs, zip archives, tarballs, etc. + + - Improved error handling and formatting. + - Introduced improved cross platform stream wrappers for better `stdout` and `stderr` consistency. [#3298](https://github.com/pypa/pipenv/issues/3298) + +- For consistency with other commands and the `--dev` option + description, `pipenv lock --requirements --dev` now emits + both default and development dependencies. + The new `--dev-only` option requests the previous + behaviour (e.g. to generate a `dev-requirements.txt` file). [#3316](https://github.com/pypa/pipenv/issues/3316) + +- Pipenv will now successfully recursively lock VCS sub-dependencies. [#3328](https://github.com/pypa/pipenv/issues/3328) + +- Added support for `--verbose` output to `pipenv run`. [#3348](https://github.com/pypa/pipenv/issues/3348) + +- Pipenv will now discover and resolve the intrinsic dependencies of **all** VCS dependencies, whether they are editable or not, to prevent resolution conflicts. [#3368](https://github.com/pypa/pipenv/issues/3368) + +- Added a new environment variable, `PIPENV_RESOLVE_VCS`, to toggle dependency resolution off for non-editable VCS, file, and URL based dependencies. [#3577](https://github.com/pypa/pipenv/issues/3577) + +- Added the ability for Windows users to enable emojis by setting `PIPENV_HIDE_EMOJIS=0`. [#3595](https://github.com/pypa/pipenv/issues/3595) + +- Allow overriding PIPENV_INSTALL_TIMEOUT environment variable (in seconds). [#3652](https://github.com/pypa/pipenv/issues/3652) + +- Allow overriding PIP_EXISTS_ACTION environment variable (value is passed to pip install). + Possible values here: <https://pip.pypa.io/en/stable/reference/pip/#exists-action-option> + Useful when you need to `PIP_EXISTS_ACTION=i` (ignore existing packages) - great for CI environments, where you need really fast setup. [#3738](https://github.com/pypa/pipenv/issues/3738) + +- Pipenv will no longer forcibly override `PIP_NO_DEPS` on all vcs and file dependencies as resolution happens on these in a pre-lock step. [#3763](https://github.com/pypa/pipenv/issues/3763) + +- Improved verbose logging output during `pipenv lock` will now stream output to the console while maintaining a spinner. [#3810](https://github.com/pypa/pipenv/issues/3810) + +- Added support for automatic python installs via `asdf` and associated `PIPENV_DONT_USE_ASDF` environment variable. [#4018](https://github.com/pypa/pipenv/issues/4018) + +- Pyenv/asdf can now be used whether or not they are available on PATH. Setting PYENV_ROOT/ASDF_DIR in a Pipenv's .env allows Pipenv to install an interpreter without any shell customizations, so long as pyenv/asdf is installed. [#4245](https://github.com/pypa/pipenv/issues/4245) + +- Added `--key` command line parameter for including personal PyUp.io API tokens when running `pipenv check`. [#4257](https://github.com/pypa/pipenv/issues/4257) + +## Behavior Changes + +- Make conservative checks of known exceptions when subprocess returns output, so user won't see the whole traceback - just the error. [#2553](https://github.com/pypa/pipenv/issues/2553) +- Do not touch Pipfile early and rely on it so that one can do `pipenv sync` without a Pipfile. [#3386](https://github.com/pypa/pipenv/issues/3386) +- Re-enable `--help` option for `pipenv run` command. [#3844](https://github.com/pypa/pipenv/issues/3844) +- Make sure `pipenv lock -r --pypi-mirror {MIRROR_URL}` will respect the pypi-mirror in requirements output. [#4199](https://github.com/pypa/pipenv/issues/4199) + +## Bug Fixes + +- Raise `PipenvUsageError` when \[\[source\]\] does not contain url field. [#2373](https://github.com/pypa/pipenv/issues/2373) + +- Fixed a bug which caused editable package resolution to sometimes fail with an unhelpful setuptools-related error message. [#2722](https://github.com/pypa/pipenv/issues/2722) + +- Fixed an issue which caused errors due to reliance on the system utilities `which` and `where` which may not always exist on some systems. + \- Fixed a bug which caused periodic failures in python discovery when executables named `python` were not present on the target `$PATH`. [#2783](https://github.com/pypa/pipenv/issues/2783) + +- Dependency resolution now writes hashes for local and remote files to the lockfile. [#3053](https://github.com/pypa/pipenv/issues/3053) + +- Fixed a bug which prevented `pipenv graph` from correctly showing all dependencies when running from within `pipenv shell`. [#3071](https://github.com/pypa/pipenv/issues/3071) + +- Fixed resolution of direct-url dependencies in `setup.py` files to respect `PEP-508` style URL dependencies. [#3148](https://github.com/pypa/pipenv/issues/3148) + +- Fixed a bug which caused failures in warning reporting when running pipenv inside a virtualenv under some circumstances. + + - Fixed a bug with package discovery when running `pipenv clean`. [#3298](https://github.com/pypa/pipenv/issues/3298) + +- Quote command arguments with carets (`^`) on Windows to work around unintended shell escapes. [#3307](https://github.com/pypa/pipenv/issues/3307) + +- Handle alternate names for UTF-8 encoding. [#3313](https://github.com/pypa/pipenv/issues/3313) + +- Abort pipenv before adding the non-exist package to Pipfile. [#3318](https://github.com/pypa/pipenv/issues/3318) + +- Don't normalize the package name user passes in. [#3324](https://github.com/pypa/pipenv/issues/3324) + +- Fix a bug where custom virtualenv can not be activated with pipenv shell [#3339](https://github.com/pypa/pipenv/issues/3339) + +- Fix a bug that `--site-packages` flag is not recognized. [#3351](https://github.com/pypa/pipenv/issues/3351) + +- Fix a bug where pipenv --clear is not working [#3353](https://github.com/pypa/pipenv/issues/3353) + +- Fix unhashable type error during `$ pipenv install --selective-upgrade` [#3384](https://github.com/pypa/pipenv/issues/3384) + +- Dependencies with direct `PEP508` compliant VCS URLs specified in their `install_requires` will now be successfully locked during the resolution process. [#3396](https://github.com/pypa/pipenv/issues/3396) + +- Fixed a keyerror which could occur when locking VCS dependencies in some cases. [#3404](https://github.com/pypa/pipenv/issues/3404) + +- Fixed a bug that `ValidationError` is thrown when some fields are missing in source section. [#3427](https://github.com/pypa/pipenv/issues/3427) + +- Updated the index names in lock file when source name in Pipfile is changed. [#3449](https://github.com/pypa/pipenv/issues/3449) + +- Fixed an issue which caused `pipenv install --help` to show duplicate entries for `--pre`. [#3479](https://github.com/pypa/pipenv/issues/3479) + +- Fix bug causing `[SSL: CERTIFICATE_VERIFY_FAILED]` when Pipfile `[[source]]` has verify_ssl=false and url with custom port. [#3502](https://github.com/pypa/pipenv/issues/3502) + +- Fix `sync --sequential` ignoring `pip install` errors and logs. [#3537](https://github.com/pypa/pipenv/issues/3537) + +- Fix the issue that lock file can't be created when `PIPENV_PIPFILE` is not under working directory. [#3584](https://github.com/pypa/pipenv/issues/3584) + +- Pipenv will no longer inadvertently set `editable=True` on all vcs dependencies. [#3647](https://github.com/pypa/pipenv/issues/3647) + +- The `--keep-outdated` argument to `pipenv install` and `pipenv lock` will now drop specifier constraints when encountering editable dependencies. + \- In addition, `--keep-outdated` will retain specifiers that would otherwise be dropped from any entries that have not been updated. [#3656](https://github.com/pypa/pipenv/issues/3656) + +- Fixed a bug which sometimes caused pipenv to fail to respect the `--site-packages` flag when passed with `pipenv install`. [#3718](https://github.com/pypa/pipenv/issues/3718) + +- Normalize the package names to lowercase when comparing used and in-Pipfile packages. [#3745](https://github.com/pypa/pipenv/issues/3745) + +- `pipenv update --outdated` will now correctly handle comparisons between pre/post-releases and normal releases. [#3766](https://github.com/pypa/pipenv/issues/3766) + +- Fixed a `KeyError` which could occur when pinning outdated VCS dependencies via `pipenv lock --keep-outdated`. [#3768](https://github.com/pypa/pipenv/issues/3768) + +- Resolved an issue which caused resolution to fail when encountering poorly formatted `python_version` markers in `setup.py` and `setup.cfg` files. [#3786](https://github.com/pypa/pipenv/issues/3786) + +- Fix a bug that installation errors are displayed as a list. [#3794](https://github.com/pypa/pipenv/issues/3794) + +- Update `pythonfinder` to fix a problem that `python.exe` will be mistakenly chosen for + virtualenv creation under WSL. [#3807](https://github.com/pypa/pipenv/issues/3807) + +- Fixed several bugs which could prevent editable VCS dependencies from being installed into target environments, even when reporting successful installation. [#3809](https://github.com/pypa/pipenv/issues/3809) + +- `pipenv check --system` should find the correct Python interpreter when `python` does not exist on the system. [#3819](https://github.com/pypa/pipenv/issues/3819) + +- Resolve the symlinks when the path is absolute. [#3842](https://github.com/pypa/pipenv/issues/3842) + +- Pass `--pre` and `--clear` options to `pipenv update --outdated`. [#3879](https://github.com/pypa/pipenv/issues/3879) + +- Fixed a bug which prevented resolution of direct URL dependencies which have PEP508 style direct url VCS sub-dependencies with subdirectories. [#3976](https://github.com/pypa/pipenv/issues/3976) + +- Honor PIPENV_SPINNER environment variable [#4045](https://github.com/pypa/pipenv/issues/4045) + +- Fixed an issue with `pipenv check` failing due to an invalid API key from `pyup.io`. [#4188](https://github.com/pypa/pipenv/issues/4188) + +- Fixed a bug which caused versions from VCS dependencies to be included in `Pipfile.lock` inadvertently. [#4217](https://github.com/pypa/pipenv/issues/4217) + +- Fixed a bug which caused pipenv to search non-existent virtual environments for `pip` when installing using `--system`. [#4220](https://github.com/pypa/pipenv/issues/4220) + +- `Requires-Python` values specifying constraint versions of python starting from `1.x` will now be parsed successfully. [#4226](https://github.com/pypa/pipenv/issues/4226) + +- Fix a bug of `pipenv update --outdated` that can't print output correctly. [#4229](https://github.com/pypa/pipenv/issues/4229) + +- Fixed a bug which caused pipenv to prefer source distributions over wheels from `PyPI` during the dependency resolution phase. + Fixed an issue which prevented proper build isolation using `pep517` based builders during dependency resolution. [#4231](https://github.com/pypa/pipenv/issues/4231) + +- Don't fallback to system Python when no matching Python version is found. [#4232](https://github.com/pypa/pipenv/issues/4232) + +## Vendored Libraries + +- Updated vendored dependencies: + + > - **attrs**: `18.2.0` => `19.1.0` + > - **certifi**: `2018.10.15` => `2019.3.9` + > - **cached_property**: `1.4.3` => `1.5.1` + > - **cerberus**: `1.2.0` => `1.3.1` + > - **click-completion**: `0.5.0` => `0.5.1` + > - **colorama**: `0.3.9` => `0.4.1` + > - **distlib**: `0.2.8` => `0.2.9` + > - **idna**: `2.7` => `2.8` + > - **jinja2**: `2.10.0` => `2.10.1` + > - **markupsafe**: `1.0` => `1.1.1` + > - **orderedmultidict**: `(new)` => `1.0` + > - **packaging**: `18.0` => `19.0` + > - **parse**: `1.9.0` => `1.12.0` + > - **pathlib2**: `2.3.2` => `2.3.3` + > - **pep517**: `(new)` => `0.5.0` + > - **pexpect**: `4.6.0` => `4.7.0` + > - **pipdeptree**: `0.13.0` => `0.13.2` + > - **pyparsing**: `2.2.2` => `2.3.1` + > - **python-dotenv**: `0.9.1` => `0.10.2` + > - **pythonfinder**: `1.1.10` => `1.2.1` + > - **pytoml**: `(new)` => `0.1.20` + > - **requests**: `2.20.1` => `2.21.0` + > - **requirementslib**: `1.3.3` => `1.5.0` + > - **scandir**: `1.9.0` => `1.10.0` + > - **shellingham**: `1.2.7` => `1.3.1` + > - **six**: `1.11.0` => `1.12.0` + > - **tomlkit**: `0.5.2` => `0.5.3` + > - **urllib3**: `1.24` => `1.25.2` + > - **vistir**: `0.3.0` => `0.4.1` + > - **yaspin**: `0.14.0` => `0.14.3` + + - Removed vendored dependency **cursor**. [#3298](https://github.com/pypa/pipenv/issues/3298) + +- Updated `pip_shims` to support `--outdated` with new pip versions. [#3766](https://github.com/pypa/pipenv/issues/3766) + +- Update vendored dependencies and invocations + + - Update vendored and patched dependencies + - Update patches on `piptools`, `pip`, `pip-shims`, `tomlkit` + - Fix invocations of dependencies + - Fix custom `InstallCommand` instantiation + - Update `PackageFinder` usage + - Fix `Bool` stringify attempts from `tomlkit` + + Updated vendored dependencies: + : - **attrs**: `` `18.2.0 `` => `` `19.1.0 `` + - **certifi**: `` `2018.10.15 `` => `` `2019.3.9 `` + - **cached_property**: `` `1.4.3 `` => `` `1.5.1 `` + - **cerberus**: `` `1.2.0 `` => `` `1.3.1 `` + - **click**: `` `7.0.0 `` => `` `7.1.1 `` + - **click-completion**: `` `0.5.0 `` => `` `0.5.1 `` + - **colorama**: `` `0.3.9 `` => `` `0.4.3 `` + - **contextlib2**: `` `(new) `` => `` `0.6.0.post1 `` + - **distlib**: `` `0.2.8 `` => `` `0.2.9 `` + - **funcsigs**: `` `(new) `` => `` `1.0.2 `` + - **importlib_metadata** `` `1.3.0 `` => `` `1.5.1 `` + - **importlib-resources**: `` `(new) `` => `` `1.4.0 `` + - **idna**: `` `2.7 `` => `` `2.9 `` + - **jinja2**: `` `2.10.0 `` => `` `2.11.1 `` + - **markupsafe**: `` `1.0 `` => `` `1.1.1 `` + - **more-itertools**: `` `(new) `` => `` `5.0.0 `` + - **orderedmultidict**: `` `(new) `` => `` `1.0 `` + - **packaging**: `` `18.0 `` => `` `19.0 `` + - **parse**: `` `1.9.0 `` => `` `1.15.0 `` + - **pathlib2**: `` `2.3.2 `` => `` `2.3.3 `` + - **pep517**: `` `(new) `` => `` `0.5.0 `` + - **pexpect**: `` `4.6.0 `` => `` `4.8.0 `` + - **pip-shims**: `` `0.2.0 `` => `` `0.5.1 `` + - **pipdeptree**: `` `0.13.0 `` => `` `0.13.2 `` + - **pyparsing**: `` `2.2.2 `` => `` `2.4.6 `` + - **python-dotenv**: `` `0.9.1 `` => `` `0.10.2 `` + - **pythonfinder**: `` `1.1.10 `` => `` `1.2.2 `` + - **pytoml**: `` `(new) `` => `` `0.1.20 `` + - **requests**: `` `2.20.1 `` => `` `2.23.0 `` + - **requirementslib**: `` `1.3.3 `` => `` `1.5.4 `` + - **scandir**: `` `1.9.0 `` => `` `1.10.0 `` + - **shellingham**: `` `1.2.7 `` => `` `1.3.2 `` + - **six**: `` `1.11.0 `` => `` `1.14.0 `` + - **tomlkit**: `` `0.5.2 `` => `` `0.5.11 `` + - **urllib3**: `` `1.24 `` => `` `1.25.8 `` + - **vistir**: `` `0.3.0 `` => `` `0.5.0 `` + - **yaspin**: `` `0.14.0 `` => `` `0.14.3 `` + - **zipp**: `` `0.6.0 `` + + - Removed vendored dependency **cursor**. [#4169](https://github.com/pypa/pipenv/issues/4169) + +- Add and update vendored dependencies to accommodate `safety` vendoring: + \- **safety** `(none)` => `1.8.7` + \- **dparse** `(none)` => `0.5.0` + \- **pyyaml** `(none)` => `5.3.1` + \- **urllib3** `1.25.8` => `1.25.9` + \- **certifi** `2019.11.28` => `2020.4.5.1` + \- **pyparsing** `2.4.6` => `2.4.7` + \- **resolvelib** `0.2.2` => `0.3.0` + \- **importlib-metadata** `1.5.1` => `1.6.0` + \- **pip-shims** `0.5.1` => `0.5.2` + \- **requirementslib** `1.5.5` => `1.5.6` [#4188](https://github.com/pypa/pipenv/issues/4188) + +- Updated vendored `pip` => `20.0.2` and `pip-tools` => `5.0.0`. [#4215](https://github.com/pypa/pipenv/issues/4215) + +- Updated vendored dependencies to latest versions for security and bug fixes: + + - **requirementslib** `1.5.8` => `1.5.9` + - **vistir** `0.5.0` => `0.5.1` + - **jinja2** `2.11.1` => `2.11.2` + - **click** `7.1.1` => `7.1.2` + - **dateutil** `(none)` => `2.8.1` + - **backports.functools_lru_cache** `1.5.0` => `1.6.1` + - **enum34** `1.1.6` => `1.1.10` + - **toml** `0.10.0` => `0.10.1` + - **importlib_resources** `1.4.0` => `1.5.0` [#4226](https://github.com/pypa/pipenv/issues/4226) + +- Changed attrs import path in vendored dependencies to always import from `pipenv.vendor`. [#4267](https://github.com/pypa/pipenv/issues/4267) + +## Improved Documentation + +- Added documentation about variable expansion in `Pipfile` entries. [#2317](https://github.com/pypa/pipenv/issues/2317) +- Consolidate all contributing docs in the rst file [#3120](https://github.com/pypa/pipenv/issues/3120) +- Update the out-dated manual page. [#3246](https://github.com/pypa/pipenv/issues/3246) +- Move CLI docs to its own page. [#3346](https://github.com/pypa/pipenv/issues/3346) +- Replace (non-existent) video on docs index.rst with equivalent gif. [#3499](https://github.com/pypa/pipenv/issues/3499) +- Clarify wording in Basic Usage example on using double quotes to escape shell redirection [#3522](https://github.com/pypa/pipenv/issues/3522) +- Ensure docs show navigation on small-screen devices [#3527](https://github.com/pypa/pipenv/issues/3527) +- Added a link to the TOML Spec under General Recommendations & Version Control to clarify how Pipfiles should be written. [#3629](https://github.com/pypa/pipenv/issues/3629) +- Updated the documentation with the new `pytest` entrypoint. [#3759](https://github.com/pypa/pipenv/issues/3759) +- Fix link to GIF in README.md demonstrating Pipenv's usage, and add descriptive alt text. [#3911](https://github.com/pypa/pipenv/issues/3911) +- Added a line describing potential issues in fancy extension. [#3912](https://github.com/pypa/pipenv/issues/3912) +- Documental description of how Pipfile works and association with Pipenv. [#3913](https://github.com/pypa/pipenv/issues/3913) +- Clarify the proper value of `python_version` and `python_full_version`. [#3914](https://github.com/pypa/pipenv/issues/3914) +- Write description for --deploy extension and few extensions differences. [#3915](https://github.com/pypa/pipenv/issues/3915) +- More documentation for `.env` files [#4100](https://github.com/pypa/pipenv/issues/4100) +- Updated documentation to point to working links. [#4137](https://github.com/pypa/pipenv/issues/4137) +- Replace docs.pipenv.org with pipenv.pypa.io [#4167](https://github.com/pypa/pipenv/issues/4167) +- Added functionality to check spelling in documentation and cleaned up existing typographical issues. [#4209](https://github.com/pypa/pipenv/issues/4209) + +# 2018.11.26 (2018-11-26) + +## Bug Fixes + +- Environment variables are expanded correctly before running scripts on POSIX. [#3178](https://github.com/pypa/pipenv/issues/3178) +- Pipenv will no longer disable user-mode installation when the `--system` flag is passed in. [#3222](https://github.com/pypa/pipenv/issues/3222) +- Fixed an issue with attempting to render unicode output in non-unicode locales. [#3223](https://github.com/pypa/pipenv/issues/3223) +- Fixed a bug which could cause failures to occur when parsing python entries from global pyenv version files. [#3224](https://github.com/pypa/pipenv/issues/3224) +- Fixed an issue which prevented the parsing of named extras sections from certain `setup.py` files. [#3230](https://github.com/pypa/pipenv/issues/3230) +- Correctly detect the virtualenv location inside an activated virtualenv. [#3231](https://github.com/pypa/pipenv/issues/3231) +- Fixed a bug which caused spinner frames to be written to standard output during locking operations which could cause redirection pipes to fail. [#3239](https://github.com/pypa/pipenv/issues/3239) +- Fixed a bug that editable packages can't be uninstalled correctly. [#3240](https://github.com/pypa/pipenv/issues/3240) +- Corrected an issue with installation timeouts which caused dependency resolution to fail for longer duration resolution steps. [#3244](https://github.com/pypa/pipenv/issues/3244) +- Adding normal pep 508 compatible markers is now fully functional when using VCS dependencies. [#3249](https://github.com/pypa/pipenv/issues/3249) +- Updated `requirementslib` and `pythonfinder` for multiple bug fixes. [#3254](https://github.com/pypa/pipenv/issues/3254) +- Pipenv will now ignore hashes when installing with `--skip-lock`. [#3255](https://github.com/pypa/pipenv/issues/3255) +- Fixed an issue where pipenv could crash when multiple pipenv processes attempted to create the same directory. [#3257](https://github.com/pypa/pipenv/issues/3257) +- Fixed an issue which sometimes prevented successful creation of a project Pipfile. [#3260](https://github.com/pypa/pipenv/issues/3260) +- `pipenv install` will now unset the `PYTHONHOME` environment variable when not combined with `--system`. [#3261](https://github.com/pypa/pipenv/issues/3261) +- Pipenv will ensure that warnings do not interfere with the resolution process by suppressing warnings' usage of standard output and writing to standard error instead. [#3273](https://github.com/pypa/pipenv/issues/3273) +- Fixed an issue which prevented variables from the environment, such as `PIPENV_DEV` or `PIPENV_SYSTEM`, from being parsed and implemented correctly. [#3278](https://github.com/pypa/pipenv/issues/3278) +- Clear pythonfinder cache after Python install. [#3287](https://github.com/pypa/pipenv/issues/3287) +- Fixed a race condition in hash resolution for dependencies for certain dependencies with missing cache entries or fresh Pipenv installs. [#3289](https://github.com/pypa/pipenv/issues/3289) +- Pipenv will now respect top-level pins over VCS dependency locks. [#3296](https://github.com/pypa/pipenv/issues/3296) + +## Vendored Libraries + +- Update vendored dependencies to resolve resolution output parsing and python finding: + : - `pythonfinder 1.1.9 -> 1.1.10` + - `requirementslib 1.3.1 -> 1.3.3` + - `vistir 0.2.3 -> 0.2.5` [#3280](https://github.com/pypa/pipenv/issues/3280) + +# 2018.11.14 (2018-11-14) + +## Features & Improvements + +- Improved exceptions and error handling on failures. [#1977](https://github.com/pypa/pipenv/issues/1977) +- Added persistent settings for all CLI flags via `PIPENV_{FLAG_NAME}` environment variables by enabling `auto_envvar_prefix=PIPENV` in click (implements PEEP-0002). [#2200](https://github.com/pypa/pipenv/issues/2200) +- Added improved messaging about available but skipped updates due to dependency conflicts when running `pipenv update --outdated`. [#2411](https://github.com/pypa/pipenv/issues/2411) +- Added environment variable `PIPENV_PYUP_API_KEY` to add ability + to override the bundled PyUP.io API key. [#2825](https://github.com/pypa/pipenv/issues/2825) +- Added additional output to `pipenv update --outdated` to indicate that the operation succeeded and all packages were already up to date. [#2828](https://github.com/pypa/pipenv/issues/2828) +- Updated `crayons` patch to enable colors on native powershell but swap native blue for magenta. [#3020](https://github.com/pypa/pipenv/issues/3020) +- Added support for `--bare` to `pipenv clean`, and fixed `pipenv sync --bare` to actually reduce output. [#3041](https://github.com/pypa/pipenv/issues/3041) +- Added windows-compatible spinner via upgraded `vistir` dependency. [#3089](https://github.com/pypa/pipenv/issues/3089) +- - Added support for python installations managed by `asdf`. [#3096](https://github.com/pypa/pipenv/issues/3096) +- Improved runtime performance of no-op commands such as `pipenv --venv` by around 2/3. [#3158](https://github.com/pypa/pipenv/issues/3158) +- Do not show error but success for running `pipenv uninstall --all` in a fresh virtual environment. [#3170](https://github.com/pypa/pipenv/issues/3170) +- Improved asynchronous installation and error handling via queued subprocess parallelization. [#3217](https://github.com/pypa/pipenv/issues/3217) + +## Bug Fixes + +- Remote non-PyPI artifacts and local wheels and artifacts will now include their own hashes rather than including hashes from `PyPI`. [#2394](https://github.com/pypa/pipenv/issues/2394) +- Non-ascii characters will now be handled correctly when parsed by pipenv's `ToML` parsers. [#2737](https://github.com/pypa/pipenv/issues/2737) +- Updated `pipenv uninstall` to respect the `--skip-lock` argument. [#2848](https://github.com/pypa/pipenv/issues/2848) +- Fixed a bug which caused uninstallation to sometimes fail to successfully remove packages from `Pipfiles` with comments on preceding or following lines. [#2885](https://github.com/pypa/pipenv/issues/2885), + [#3099](https://github.com/pypa/pipenv/issues/3099) +- Pipenv will no longer fail when encountering python versions on Windows that have been uninstalled. [#2983](https://github.com/pypa/pipenv/issues/2983) +- Fixed unnecessary extras are added when translating markers [#3026](https://github.com/pypa/pipenv/issues/3026) +- Fixed a virtualenv creation issue which could cause new virtualenvs to inadvertently attempt to read and write to global site packages. [#3047](https://github.com/pypa/pipenv/issues/3047) +- Fixed an issue with virtualenv path derivation which could cause errors, particularly for users on WSL bash. [#3055](https://github.com/pypa/pipenv/issues/3055) +- Fixed a bug which caused `Unexpected EOF` errors to be thrown when `pip` was waiting for input from users who had put login credentials in environment variables. [#3088](https://github.com/pypa/pipenv/issues/3088) +- Fixed a bug in `requirementslib` which prevented successful installation from mercurial repositories. [#3090](https://github.com/pypa/pipenv/issues/3090) +- Fixed random resource warnings when using pyenv or any other subprocess calls. [#3094](https://github.com/pypa/pipenv/issues/3094) +- - Fixed a bug which sometimes prevented cloning and parsing `mercurial` requirements. [#3096](https://github.com/pypa/pipenv/issues/3096) +- Fixed an issue in `delegator.py` related to subprocess calls when using `PopenSpawn` to stream output, which sometimes threw unexpected `EOF` errors. [#3102](https://github.com/pypa/pipenv/issues/3102), + [#3114](https://github.com/pypa/pipenv/issues/3114), + [#3117](https://github.com/pypa/pipenv/issues/3117) +- Fix the path casing issue that makes `pipenv clean` fail on Windows [#3104](https://github.com/pypa/pipenv/issues/3104) +- Pipenv will avoid leaving build artifacts in the current working directory. [#3106](https://github.com/pypa/pipenv/issues/3106) +- Fixed issues with broken subprocess calls leaking resource handles and causing random and sporadic failures. [#3109](https://github.com/pypa/pipenv/issues/3109) +- Fixed an issue which caused `pipenv clean` to sometimes clean packages from the base `site-packages` folder or fail entirely. [#3113](https://github.com/pypa/pipenv/issues/3113) +- Updated `pythonfinder` to correct an issue with unnesting of nested paths when searching for python versions. [#3121](https://github.com/pypa/pipenv/issues/3121) +- Added additional logic for ignoring and replacing non-ascii characters when formatting console output on non-UTF-8 systems. [#3131](https://github.com/pypa/pipenv/issues/3131) +- Fix virtual environment discovery when `PIPENV_VENV_IN_PROJECT` is set, but the in-project `.venv` is a file. [#3134](https://github.com/pypa/pipenv/issues/3134) +- Hashes for remote and local non-PyPI artifacts will now be included in `Pipfile.lock` during resolution. [#3145](https://github.com/pypa/pipenv/issues/3145) +- Fix project path hashing logic in purpose to prevent collisions of virtual environments. [#3151](https://github.com/pypa/pipenv/issues/3151) +- Fix package installation when the virtual environment path contains parentheses. [#3158](https://github.com/pypa/pipenv/issues/3158) +- Azure Pipelines YAML files are updated to use the latest syntax and product name. [#3164](https://github.com/pypa/pipenv/issues/3164) +- Fixed new spinner success message to write only one success message during resolution. [#3183](https://github.com/pypa/pipenv/issues/3183) +- Pipenv will now correctly respect the `--pre` option when used with `pipenv install`. [#3185](https://github.com/pypa/pipenv/issues/3185) +- Fix a bug where exception is raised when run pipenv graph in a project without created virtualenv [#3201](https://github.com/pypa/pipenv/issues/3201) +- When sources are missing names, names will now be derived from the supplied URL. [#3216](https://github.com/pypa/pipenv/issues/3216) + +## Vendored Libraries + +- Updated `pythonfinder` to correct an issue with unnesting of nested paths when searching for python versions. [#3061](https://github.com/pypa/pipenv/issues/3061), + [#3121](https://github.com/pypa/pipenv/issues/3121) +- Updated vendored dependencies: + : - `certifi 2018.08.24 => 2018.10.15` + - `urllib3 1.23 => 1.24` + - `requests 2.19.1 => 2.20.0` + - ``` shellingham ``1.2.6 => 1.2.7 ``` + - `tomlkit 0.4.4. => 0.4.6` + - `vistir 0.1.6 => 0.1.8` + - `pythonfinder 0.1.2 => 0.1.3` + - `requirementslib 1.1.9 => 1.1.10` + - `backports.functools_lru_cache 1.5.0 (new)` + - `cursor 1.2.0 (new)` [#3089](https://github.com/pypa/pipenv/issues/3089) +- Updated vendored dependencies: + : - `requests 2.19.1 => 2.20.1` + - `tomlkit 0.4.46 => 0.5.2` + - `vistir 0.1.6 => 0.2.4` + - `pythonfinder 1.1.2 => 1.1.8` + - `requirementslib 1.1.10 => 1.3.0` [#3096](https://github.com/pypa/pipenv/issues/3096) +- Switch to `tomlkit` for parsing and writing. Drop `prettytoml` and `contoml` from vendors. [#3191](https://github.com/pypa/pipenv/issues/3191) +- Updated `requirementslib` to aid in resolution of local and remote archives. [#3196](https://github.com/pypa/pipenv/issues/3196) + +## Improved Documentation + +- Expanded development and testing documentation for contributors to get started. [#3074](https://github.com/pypa/pipenv/issues/3074) + +# 2018.10.13 (2018-10-13) + +## Bug Fixes + +- Fixed a bug in `pipenv clean` which caused global packages to sometimes be inadvertently targeted for cleanup. [#2849](https://github.com/pypa/pipenv/issues/2849) +- Fix broken backport imports for vendored vistir. [#2950](https://github.com/pypa/pipenv/issues/2950), + [#2955](https://github.com/pypa/pipenv/issues/2955), + [#2961](https://github.com/pypa/pipenv/issues/2961) +- Fixed a bug with importing local vendored dependencies when running `pipenv graph`. [#2952](https://github.com/pypa/pipenv/issues/2952) +- Fixed a bug which caused executable discovery to fail when running inside a virtualenv. [#2957](https://github.com/pypa/pipenv/issues/2957) +- Fix parsing of outline tables. [#2971](https://github.com/pypa/pipenv/issues/2971) +- Fixed a bug which caused `verify_ssl` to fail to drop through to `pip install` correctly as `trusted-host`. [#2979](https://github.com/pypa/pipenv/issues/2979) +- Fixed a bug which caused canonicalized package names to fail to resolve against PyPI. [#2989](https://github.com/pypa/pipenv/issues/2989) +- Enhanced CI detection to detect Azure Devops builds. [#2993](https://github.com/pypa/pipenv/issues/2993) +- Fixed a bug which prevented installing pinned versions which used redirection symbols from the command line. [#2998](https://github.com/pypa/pipenv/issues/2998) +- Fixed a bug which prevented installing the local directory in non-editable mode. [#3005](https://github.com/pypa/pipenv/issues/3005) + +## Vendored Libraries + +- Updated `requirementslib` to version `1.1.9`. [#2989](https://github.com/pypa/pipenv/issues/2989) +- Upgraded `pythonfinder => 1.1.1` and `vistir => 0.1.7`. [#3007](https://github.com/pypa/pipenv/issues/3007) + +# 2018.10.9 (2018-10-09) + +## Features & Improvements + +- Added environment variables `PIPENV_VERBOSE` and `PIPENV_QUIET` to control + output verbosity without needing to pass options. [#2527](https://github.com/pypa/pipenv/issues/2527) + +- Updated test-PyPI add-on to better support json-API access (forward compatibility). + Improved testing process for new contributors. [#2568](https://github.com/pypa/pipenv/issues/2568) + +- Greatly enhanced python discovery functionality: + + - Added pep514 (windows launcher/finder) support for python discovery. + - Introduced architecture discovery for python installations which support different architectures. [#2582](https://github.com/pypa/pipenv/issues/2582) + +- Added support for `pipenv shell` on msys and cygwin/mingw/git bash for Windows. [#2641](https://github.com/pypa/pipenv/issues/2641) + +- Enhanced resolution of editable and VCS dependencies. [#2643](https://github.com/pypa/pipenv/issues/2643) + +- Deduplicate and refactor CLI to use stateful arguments and object passing. See [this issue](https://github.com/pallets/click/issues/108) for reference. [#2814](https://github.com/pypa/pipenv/issues/2814) + +## Behavior Changes + +- Virtual environment activation for `run` is revised to improve interpolation + with other Python discovery tools. [#2503](https://github.com/pypa/pipenv/issues/2503) +- Improve terminal coloring to display better in Powershell. [#2511](https://github.com/pypa/pipenv/issues/2511) +- Invoke `virtualenv` directly for virtual environment creation, instead of depending on `pew`. [#2518](https://github.com/pypa/pipenv/issues/2518) +- `pipenv --help` will now include short help descriptions. [#2542](https://github.com/pypa/pipenv/issues/2542) +- Add `COMSPEC` to fallback option (along with `SHELL` and `PYENV_SHELL`) + if shell detection fails, improving robustness on Windows. [#2651](https://github.com/pypa/pipenv/issues/2651) +- Fallback to shell mode if `run` fails with Windows error 193 to handle non-executable commands. This should improve usability on Windows, where some users run non-executable files without specifying a command, relying on Windows file association to choose the current command. [#2718](https://github.com/pypa/pipenv/issues/2718) + +## Bug Fixes + +- Fixed a bug which prevented installation of editable requirements using `ssh://` style URLs [#1393](https://github.com/pypa/pipenv/issues/1393) +- VCS Refs for locked local editable dependencies will now update appropriately to the latest hash when running `pipenv update`. [#1690](https://github.com/pypa/pipenv/issues/1690) +- `.tar.gz` and `.zip` artifacts will now have dependencies installed even when they are missing from the Lockfile. [#2173](https://github.com/pypa/pipenv/issues/2173) +- The command line parser will now handle multiple `-e/--editable` dependencies properly via click's option parser to help mitigate future parsing issues. [#2279](https://github.com/pypa/pipenv/issues/2279) +- Fixed the ability of pipenv to parse `dependency_links` from `setup.py` when `PIP_PROCESS_DEPENDENCY_LINKS` is enabled. [#2434](https://github.com/pypa/pipenv/issues/2434) +- Fixed a bug which could cause `-i/--index` arguments to sometimes be incorrectly picked up in packages. This is now handled in the command line parser. [#2494](https://github.com/pypa/pipenv/issues/2494) +- Fixed non-deterministic resolution issues related to changes to the internal package finder in `pip 10`. [#2499](https://github.com/pypa/pipenv/issues/2499), + [#2529](https://github.com/pypa/pipenv/issues/2529), + [#2589](https://github.com/pypa/pipenv/issues/2589), + [#2666](https://github.com/pypa/pipenv/issues/2666), + [#2767](https://github.com/pypa/pipenv/issues/2767), + [#2785](https://github.com/pypa/pipenv/issues/2785), + [#2795](https://github.com/pypa/pipenv/issues/2795), + [#2801](https://github.com/pypa/pipenv/issues/2801), + [#2824](https://github.com/pypa/pipenv/issues/2824), + [#2862](https://github.com/pypa/pipenv/issues/2862), + [#2879](https://github.com/pypa/pipenv/issues/2879), + [#2894](https://github.com/pypa/pipenv/issues/2894), + [#2933](https://github.com/pypa/pipenv/issues/2933) +- Fix subshell invocation on Windows for Python 2. [#2515](https://github.com/pypa/pipenv/issues/2515) +- Fixed a bug which sometimes caused pipenv to throw a `TypeError` or to run into encoding issues when writing a Lockfile on python 2. [#2561](https://github.com/pypa/pipenv/issues/2561) +- Improve quoting logic for `pipenv run` so it works better with Windows + built-in commands. [#2563](https://github.com/pypa/pipenv/issues/2563) +- Fixed a bug related to parsing VCS requirements with both extras and subdirectory fragments. + Corrected an issue in the `requirementslib` parser which led to some markers being discarded rather than evaluated. [#2564](https://github.com/pypa/pipenv/issues/2564) +- Fixed multiple issues with finding the correct system python locations. [#2582](https://github.com/pypa/pipenv/issues/2582) +- Catch JSON decoding error to prevent exception when the lock file is of + invalid format. [#2607](https://github.com/pypa/pipenv/issues/2607) +- Fixed a rare bug which could sometimes cause errors when installing packages with custom sources. [#2610](https://github.com/pypa/pipenv/issues/2610) +- Update requirementslib to fix a bug which could raise an `UnboundLocalError` when parsing malformed VCS URIs. [#2617](https://github.com/pypa/pipenv/issues/2617) +- Fixed an issue which prevented passing multiple `--ignore` parameters to `pipenv check`. [#2632](https://github.com/pypa/pipenv/issues/2632) +- Fixed a bug which caused attempted hashing of `ssh://` style URIs which could cause failures during installation of private ssh repositories. + \- Corrected path conversion issues which caused certain editable VCS paths to be converted to `ssh://` URIs improperly. [#2639](https://github.com/pypa/pipenv/issues/2639) +- Fixed a bug which caused paths to be formatted incorrectly when using `pipenv shell` in bash for windows. [#2641](https://github.com/pypa/pipenv/issues/2641) +- Dependency links to private repositories defined via `ssh://` schemes will now install correctly and skip hashing as long as `PIP_PROCESS_DEPENDENCY_LINKS=1`. [#2643](https://github.com/pypa/pipenv/issues/2643) +- Fixed a bug which sometimes caused pipenv to parse the `trusted_host` argument to pip incorrectly when parsing source URLs which specify `verify_ssl = false`. [#2656](https://github.com/pypa/pipenv/issues/2656) +- Prevent crashing when a virtual environment in `WORKON_HOME` is faulty. [#2676](https://github.com/pypa/pipenv/issues/2676) +- Fixed virtualenv creation failure when a .venv file is present in the project root. [#2680](https://github.com/pypa/pipenv/issues/2680) +- Fixed a bug which could cause the `-e/--editable` argument on a dependency to be accidentally parsed as a dependency itself. [#2714](https://github.com/pypa/pipenv/issues/2714) +- Correctly pass `verbose` and `debug` flags to the resolver subprocess so it generates appropriate output. This also resolves a bug introduced by the fix to #2527. [#2732](https://github.com/pypa/pipenv/issues/2732) +- All markers are now included in `pipenv lock --requirements` output. [#2748](https://github.com/pypa/pipenv/issues/2748) +- Fixed a bug in marker resolution which could cause duplicate and non-deterministic markers. [#2760](https://github.com/pypa/pipenv/issues/2760) +- Fixed a bug in the dependency resolver which caused regular issues when handling `setup.py` based dependency resolution. [#2766](https://github.com/pypa/pipenv/issues/2766) +- Updated vendored dependencies: + : - `pip-tools` (updated and patched to latest w/ `pip 18.0` compatibility) + - `pip 10.0.1 => 18.0` + - `click 6.7 => 7.0` + - `toml 0.9.4 => 0.10.0` + - `pyparsing 2.2.0 => 2.2.2` + - `delegator 0.1.0 => 0.1.1` + - `attrs 18.1.0 => 18.2.0` + - `distlib 0.2.7 => 0.2.8` + - `packaging 17.1.0 => 18.0` + - `passa 0.2.0 => 0.3.1` + - `pip_shims 0.1.2 => 0.3.1` + - `plette 0.1.1 => 0.2.2` + - `pythonfinder 1.0.2 => 1.1.0` + - `pytoml 0.1.18 => 0.1.19` + - `requirementslib 1.1.16 => 1.1.17` + - `shellingham 1.2.4 => 1.2.6` + - `tomlkit 0.4.2 => 0.4.4` + - `vistir 0.1.4 => 0.1.6` + [#2802](https://github.com/pypa/pipenv/issues/2802), + [#2867](https://github.com/pypa/pipenv/issues/2867), + [#2880](https://github.com/pypa/pipenv/issues/2880) +- Fixed a bug where `pipenv` crashes when the `WORKON_HOME` directory does not exist. [#2877](https://github.com/pypa/pipenv/issues/2877) +- Fixed pip is not loaded from pipenv's patched one but the system one [#2912](https://github.com/pypa/pipenv/issues/2912) +- Fixed various bugs related to `pip 18.1` release which prevented locking, installation, and syncing, and dumping to a `requirements.txt` file. [#2924](https://github.com/pypa/pipenv/issues/2924) + +## Vendored Libraries + +- Pew is no longer vendored. Entry point `pewtwo`, packages `pipenv.pew` and + `pipenv.patched.pew` are removed. [#2521](https://github.com/pypa/pipenv/issues/2521) +- Update `pythonfinder` to major release `1.0.0` for integration. [#2582](https://github.com/pypa/pipenv/issues/2582) +- Update requirementslib to fix a bug which could raise an `UnboundLocalError` when parsing malformed VCS URIs. [#2617](https://github.com/pypa/pipenv/issues/2617) +- - Vendored new libraries `vistir` and `pip-shims`, `tomlkit`, `modutil`, and `plette`. + - Update vendored libraries: + \- `scandir` to `1.9.0` + \- `click-completion` to `0.4.1` + \- `semver` to `2.8.1` + \- `shellingham` to `1.2.4` + \- `pytoml` to `0.1.18` + \- `certifi` to `2018.8.24` + \- `ptyprocess` to `0.6.0` + \- `requirementslib` to `1.1.5` + \- `pythonfinder` to `1.0.2` + \- `pipdeptree` to `0.13.0` + \- `python-dotenv` to `0.9.1` [#2639](https://github.com/pypa/pipenv/issues/2639) +- Updated vendored dependencies: + : - `pip-tools` (updated and patched to latest w/ `pip 18.0` compatibility) + - `pip 10.0.1 => 18.0` + - `click 6.7 => 7.0` + - `toml 0.9.4 => 0.10.0` + - `pyparsing 2.2.0 => 2.2.2` + - `delegator 0.1.0 => 0.1.1` + - `attrs 18.1.0 => 18.2.0` + - `distlib 0.2.7 => 0.2.8` + - `packaging 17.1.0 => 18.0` + - `passa 0.2.0 => 0.3.1` + - `pip_shims 0.1.2 => 0.3.1` + - `plette 0.1.1 => 0.2.2` + - `pythonfinder 1.0.2 => 1.1.0` + - `pytoml 0.1.18 => 0.1.19` + - `requirementslib 1.1.16 => 1.1.17` + - `shellingham 1.2.4 => 1.2.6` + - `tomlkit 0.4.2 => 0.4.4` + - `vistir 0.1.4 => 0.1.6` + [#2902](https://github.com/pypa/pipenv/issues/2902), + [#2935](https://github.com/pypa/pipenv/issues/2935) + +## Improved Documentation + +- Simplified the test configuration process. [#2568](https://github.com/pypa/pipenv/issues/2568) +- Updated documentation to use working fortune cookie add-on. [#2644](https://github.com/pypa/pipenv/issues/2644) +- Added additional information about troubleshooting `pipenv shell` by using the the `$PIPENV_SHELL` environment variable. [#2671](https://github.com/pypa/pipenv/issues/2671) +- Added a link to `PEP-440` version specifiers in the documentation for additional detail. [#2674](https://github.com/pypa/pipenv/issues/2674) +- Added simple example to README.md for installing from git. [#2685](https://github.com/pypa/pipenv/issues/2685) +- Stopped recommending `--system` for Docker contexts. [#2762](https://github.com/pypa/pipenv/issues/2762) +- Fixed the example url for doing "pipenv install -e + some-repository-url#egg=something", it was missing the "egg=" in the fragment + identifier. [#2792](https://github.com/pypa/pipenv/issues/2792) +- Fixed link to the "be cordial" essay in the contribution documentation. [#2793](https://github.com/pypa/pipenv/issues/2793) +- Clarify `pipenv install` documentation [#2844](https://github.com/pypa/pipenv/issues/2844) +- Replace reference to uservoice with PEEP-000 [#2909](https://github.com/pypa/pipenv/issues/2909) + +# 2018.7.1 (2018-07-01) + +## Features & Improvements + +- All calls to `pipenv shell` are now implemented from the ground up using [shellingham](https://github.com/sarugaku/shellingham), a custom library which was purpose built to handle edge cases and shell detection. [#2371](https://github.com/pypa/pipenv/issues/2371) +- Added support for python 3.7 via a few small compatibility / bug fixes. [#2427](https://github.com/pypa/pipenv/issues/2427), + [#2434](https://github.com/pypa/pipenv/issues/2434), + [#2436](https://github.com/pypa/pipenv/issues/2436) +- Added new flag `pipenv --support` to replace the diagnostic command `python -m pipenv.help`. [#2477](https://github.com/pypa/pipenv/issues/2477), + [#2478](https://github.com/pypa/pipenv/issues/2478) +- Improved import times and CLI run times with minor tweaks. [#2485](https://github.com/pypa/pipenv/issues/2485) + +## Bug Fixes + +- Fixed an ongoing bug which sometimes resolved incompatible versions into the project Lockfile. [#1901](https://github.com/pypa/pipenv/issues/1901) +- Fixed a bug which caused errors when creating virtualenvs which contained leading dash characters. [#2415](https://github.com/pypa/pipenv/issues/2415) +- Fixed a logic error which caused `--deploy --system` to overwrite editable vcs packages in the Pipfile before installing, which caused any installation to fail by default. [#2417](https://github.com/pypa/pipenv/issues/2417) +- Updated requirementslib to fix an issue with properly quoting markers in VCS requirements. [#2419](https://github.com/pypa/pipenv/issues/2419) +- Installed new vendored jinja2 templates for `click-completion` which were causing template errors for users with completion enabled. [#2422](https://github.com/pypa/pipenv/issues/2422) +- Added support for python 3.7 via a few small compatibility / bug fixes. [#2427](https://github.com/pypa/pipenv/issues/2427) +- Fixed an issue reading package names from `setup.py` files in projects which imported utilities such as `versioneer`. [#2433](https://github.com/pypa/pipenv/issues/2433) +- Pipenv will now ensure that its internal package names registry files are written with unicode strings. [#2450](https://github.com/pypa/pipenv/issues/2450) +- Fixed a bug causing requirements input as relative paths to be output as absolute paths or URIs. + Fixed a bug affecting normalization of `git+git@host` URLs. [#2453](https://github.com/pypa/pipenv/issues/2453) +- Pipenv will now always use `pathlib2` for `Path` based filesystem interactions by default on `python<3.5`. [#2454](https://github.com/pypa/pipenv/issues/2454) +- Fixed a bug which prevented passing proxy PyPI indexes set with `--pypi-mirror` from being passed to pip during virtualenv creation, which could cause the creation to freeze in some cases. [#2462](https://github.com/pypa/pipenv/issues/2462) +- Using the `python -m pipenv.help` command will now use proper encoding for the host filesystem to avoid encoding issues. [#2466](https://github.com/pypa/pipenv/issues/2466) +- The new `jinja2` templates for `click_completion` will now be included in pipenv source distributions. [#2479](https://github.com/pypa/pipenv/issues/2479) +- Resolved a long-standing issue with re-using previously generated `InstallRequirement` objects for resolution which could cause `PKG-INFO` file information to be deleted, raising a `TypeError`. [#2480](https://github.com/pypa/pipenv/issues/2480) +- Resolved an issue parsing usernames from private PyPI URIs in `Pipfiles` by updating `requirementslib`. [#2484](https://github.com/pypa/pipenv/issues/2484) + +## Vendored Libraries + +- All calls to `pipenv shell` are now implemented from the ground up using [shellingham](https://github.com/sarugaku/shellingham), a custom library which was purpose built to handle edge cases and shell detection. [#2371](https://github.com/pypa/pipenv/issues/2371) +- Updated requirementslib to fix an issue with properly quoting markers in VCS requirements. [#2419](https://github.com/pypa/pipenv/issues/2419) +- Installed new vendored jinja2 templates for `click-completion` which were causing template errors for users with completion enabled. [#2422](https://github.com/pypa/pipenv/issues/2422) +- Add patch to `prettytoml` to support Python 3.7. [#2426](https://github.com/pypa/pipenv/issues/2426) +- Patched `prettytoml.AbstractTable._enumerate_items` to handle `StopIteration` errors in preparation of release of python 3.7. [#2427](https://github.com/pypa/pipenv/issues/2427) +- Fixed an issue reading package names from `setup.py` files in projects which imported utilities such as `versioneer`. [#2433](https://github.com/pypa/pipenv/issues/2433) +- Updated `requirementslib` to version `1.0.9` [#2453](https://github.com/pypa/pipenv/issues/2453) +- Unraveled a lot of old, unnecessary patches to `pip-tools` which were causing non-deterministic resolution errors. [#2480](https://github.com/pypa/pipenv/issues/2480) +- Resolved an issue parsing usernames from private PyPI URIs in `Pipfiles` by updating `requirementslib`. [#2484](https://github.com/pypa/pipenv/issues/2484) + +## Improved Documentation + +- Added instructions for installing using Fedora's official repositories. [#2404](https://github.com/pypa/pipenv/issues/2404) + +# 2018.6.25 (2018-06-25) + +## Features & Improvements + +- Pipenv-created virtualenvs will now be associated with a `.project` folder + (features can be implemented on top of this later or users may choose to use + `pipenv-pipes` to take full advantage of this.) [#1861](https://github.com/pypa/pipenv/issues/1861) +- Virtualenv names will now appear in prompts for most Windows users. [#2167](https://github.com/pypa/pipenv/issues/2167) +- Added support for cmder shell paths with spaces. [#2168](https://github.com/pypa/pipenv/issues/2168) +- Added nested JSON output to the `pipenv graph` command. [#2199](https://github.com/pypa/pipenv/issues/2199) +- Dropped vendored pip 9 and vendored, patched, and migrated to pip 10. Updated + patched piptools version. [#2255](https://github.com/pypa/pipenv/issues/2255) +- PyPI mirror URLs can now be set to override instances of PyPI URLs by passing + the `--pypi-mirror` argument from the command line or setting the + `PIPENV_PYPI_MIRROR` environment variable. [#2281](https://github.com/pypa/pipenv/issues/2281) +- Virtualenv activation lines will now avoid being written to some shell + history files. [#2287](https://github.com/pypa/pipenv/issues/2287) +- Pipenv will now only search for `requirements.txt` files when creating new + projects, and during that time only if the user doesn't specify packages to + pass in. [#2309](https://github.com/pypa/pipenv/issues/2309) +- Added support for mounted drives via UNC paths. [#2331](https://github.com/pypa/pipenv/issues/2331) +- Added support for Windows Subsystem for Linux bash shell detection. [#2363](https://github.com/pypa/pipenv/issues/2363) +- Pipenv will now generate hashes much more quickly by resolving them in a + single pass during locking. [#2384](https://github.com/pypa/pipenv/issues/2384) +- `pipenv run` will now avoid spawning additional `COMSPEC` instances to + run commands in when possible. [#2385](https://github.com/pypa/pipenv/issues/2385) +- Massive internal improvements to requirements parsing codebase, resolver, and + error messaging. [#2388](https://github.com/pypa/pipenv/issues/2388) +- `pipenv check` now may take multiple of the additional argument + `--ignore` which takes a parameter `cve_id` for the purpose of ignoring + specific CVEs. [#2408](https://github.com/pypa/pipenv/issues/2408) + +## Behavior Changes + +- Pipenv will now parse & capitalize `platform_python_implementation` markers + .. warning:: This could cause an issue if you have an out of date `Pipfile` + which lower-cases the comparison value (e.g. `cpython` instead of + `CPython`). [#2123](https://github.com/pypa/pipenv/issues/2123) +- Pipenv will now only search for `requirements.txt` files when creating new + projects, and during that time only if the user doesn't specify packages to + pass in. [#2309](https://github.com/pypa/pipenv/issues/2309) + +## Bug Fixes + +- Massive internal improvements to requirements parsing codebase, resolver, and + error messaging. [#1962](https://github.com/pypa/pipenv/issues/1962), + [#2186](https://github.com/pypa/pipenv/issues/2186), + [#2263](https://github.com/pypa/pipenv/issues/2263), + [#2312](https://github.com/pypa/pipenv/issues/2312) +- Pipenv will now parse & capitalize `platform_python_implementation` + markers. [#2123](https://github.com/pypa/pipenv/issues/2123) +- Fixed a bug with parsing and grouping old-style `setup.py` extras during + resolution [#2142](https://github.com/pypa/pipenv/issues/2142) +- Fixed a bug causing pipenv graph to throw unhelpful exceptions when running + against empty or non-existent environments. [#2161](https://github.com/pypa/pipenv/issues/2161) +- Fixed a bug which caused `--system` to incorrectly abort when users were in + a virtualenv. [#2181](https://github.com/pypa/pipenv/issues/2181) +- Removed vendored `cacert.pem` which could cause issues for some users with + custom certificate settings. [#2193](https://github.com/pypa/pipenv/issues/2193) +- Fixed a regression which led to direct invocations of `virtualenv`, rather + than calling it by module. [#2198](https://github.com/pypa/pipenv/issues/2198) +- Locking will now pin the correct VCS ref during `pipenv update` runs. + Running `pipenv update` with a new vcs ref specified in the `Pipfile` + will now properly obtain, resolve, and install the specified dependency at + the specified ref. [#2209](https://github.com/pypa/pipenv/issues/2209) +- `pipenv clean` will now correctly ignore comments from `pip freeze` when + cleaning the environment. [#2262](https://github.com/pypa/pipenv/issues/2262) +- Resolution bugs causing packages for incompatible python versions to be + locked have been fixed. [#2267](https://github.com/pypa/pipenv/issues/2267) +- Fixed a bug causing pipenv graph to fail to display sometimes. [#2268](https://github.com/pypa/pipenv/issues/2268) +- Updated `requirementslib` to fix a bug in Pipfile parsing affecting + relative path conversions. [#2269](https://github.com/pypa/pipenv/issues/2269) +- Windows executable discovery now leverages `os.pathext`. [#2298](https://github.com/pypa/pipenv/issues/2298) +- Fixed a bug which caused `--deploy --system` to inadvertently create a + virtualenv before failing. [#2301](https://github.com/pypa/pipenv/issues/2301) +- Fixed an issue which led to a failure to unquote special characters in file + and wheel paths. [#2302](https://github.com/pypa/pipenv/issues/2302) +- VCS dependencies are now manually obtained only if they do not match the + requested ref. [#2304](https://github.com/pypa/pipenv/issues/2304) +- Added error handling functionality to properly cope with single-digit + `Requires-Python` metadata with no specifiers. [#2377](https://github.com/pypa/pipenv/issues/2377) +- `pipenv update` will now always run the resolver and lock before ensuring + dependencies are in sync with project Lockfile. [#2379](https://github.com/pypa/pipenv/issues/2379) +- Resolved a bug in our patched resolvers which could cause nondeterministic + resolution failures in certain conditions. Running `pipenv install` with no + arguments in a project with only a `Pipfile` will now correctly lock first + for dependency resolution before installing. [#2384](https://github.com/pypa/pipenv/issues/2384) +- Patched `python-dotenv` to ensure that environment variables always get + encoded to the filesystem encoding. [#2386](https://github.com/pypa/pipenv/issues/2386) + +## Improved Documentation + +- Update documentation wording to clarify Pipenv's overall role in the packaging ecosystem. [#2194](https://github.com/pypa/pipenv/issues/2194) +- Added contribution documentation and guidelines. [#2205](https://github.com/pypa/pipenv/issues/2205) +- Added instructions for supervisord compatibility. [#2215](https://github.com/pypa/pipenv/issues/2215) +- Fixed broken links to development philosophy and contribution documentation. [#2248](https://github.com/pypa/pipenv/issues/2248) + +## Vendored Libraries + +- Removed vendored `cacert.pem` which could cause issues for some users with + custom certificate settings. [#2193](https://github.com/pypa/pipenv/issues/2193) + +- Dropped vendored pip 9 and vendored, patched, and migrated to pip 10. Updated + patched piptools version. [#2255](https://github.com/pypa/pipenv/issues/2255) + +- Updated `requirementslib` to fix a bug in Pipfile parsing affecting + relative path conversions. [#2269](https://github.com/pypa/pipenv/issues/2269) + +- Added custom shell detection library `shellingham`, a port of our changes + to `pew`. [#2363](https://github.com/pypa/pipenv/issues/2363) + +- Patched `python-dotenv` to ensure that environment variables always get + encoded to the filesystem encoding. [#2386](https://github.com/pypa/pipenv/issues/2386) + +- Updated vendored libraries. The following vendored libraries were updated: + + - distlib from version `0.2.6` to `0.2.7`. + - jinja2 from version `2.9.5` to `2.10`. + - pathlib2 from version `2.1.0` to `2.3.2`. + - parse from version `2.8.0` to `2.8.4`. + - pexpect from version `2.5.2` to `2.6.0`. + - requests from version `2.18.4` to `2.19.1`. + - idna from version `2.6` to `2.7`. + - certifi from version `2018.1.16` to `2018.4.16`. + - packaging from version `16.8` to `17.1`. + - six from version `1.10.0` to `1.11.0`. + - requirementslib from version `0.2.0` to `1.0.1`. + + In addition, scandir was vendored and patched to avoid importing host system binaries when falling back to pathlib2. [#2368](https://github.com/pypa/pipenv/issues/2368) diff --git a/CHANGELOG.rst b/CHANGELOG.rst deleted file mode 100644 index 613dd4f95c..0000000000 --- a/CHANGELOG.rst +++ /dev/null @@ -1,2375 +0,0 @@ -2023.9.1 (2023-09-01) -===================== -Pipenv 2023.9.1 (2023-09-01) -============================ - - -Features & Improvements ------------------------ - -- Top level Pipfile sys_platform markers should be transitive; adds top level platform_machine entries that are also transitive. Marker entries continue to operate the same as before. `#5892 <https://github.com/pypa/pipenv/issues/5892>`_ - -Bug Fixes ---------- - -- Apply patch for install_search_all_sources = True functionality. `#5895 <https://github.com/pypa/pipenv/issues/5895>`_ -- Relative paths improvements for editable installs. `#5896 <https://github.com/pypa/pipenv/issues/5896>`_ -- Set log level in resolver to WARN when verbose is not passed. `#5897 <https://github.com/pypa/pipenv/issues/5897>`_ -- Handle more variations in private index html to improve hash collection. `#5898 <https://github.com/pypa/pipenv/issues/5898>`_ - - -2023.8.28 (2023-08-28) -====================== - -Bug Fixes ---------- - -- Revert change that caused the credentials in source url issue. `#5878 <https://github.com/pypa/pipenv/issues/5878>`_ -- Do not treat named requirements as file installs just becacuse a match path exists; better handling of editable keyword for local file installs. - Handle additional edge cases in the setup.py ast parser logic for trying to determine local install package name. `#5885 <https://github.com/pypa/pipenv/issues/5885>`_ - - -2023.8.26 (2023-08-26) -====================== - -Bug Fixes ---------- - -- Additional property caching to avoid duplication of sources in the resolver. `#5863 <https://github.com/pypa/pipenv/issues/5863>`_ -- Fix recent regressions with local/editable file installs. `#5870 <https://github.com/pypa/pipenv/issues/5870>`_ -- Fixes the vcs subdirectory fragments regression; fixes sys_platform markers regression. `#5871 <https://github.com/pypa/pipenv/issues/5871>`_ -- Fix regression that caused printing non-printable ascii characters when help was called. `#5872 <https://github.com/pypa/pipenv/issues/5872>`_ - - -2023.8.25 (2023-08-25) -====================== - -Bug Fixes ---------- - -- Fix regression of hash collection when downloading package from private indexes when the hash is not found in the index href url fragment. `#5866 <https://github.com/pypa/pipenv/issues/5866>`_ - - -2023.8.23 (2023-08-22) -====================== - -Bug Fixes ---------- - -- More gracefully handle @ symbols in vcs URLs to address recent regression with vcs URLs. `#5849 <https://github.com/pypa/pipenv/issues/5849>`_ - - -2023.8.22 (2023-08-22) -====================== - -Bug Fixes ---------- - -- Fix regression with ``ssh://`` vcs URLs introduced in ``2023.8.21`` whereby ssh vcs URLs are expected to have at least one ``@`` symbol. `#5846 <https://github.com/pypa/pipenv/issues/5846>`_ - - -2023.8.21 (2023-08-21) -====================== - -Bug Fixes ---------- - -- Add back some relevant caching to increase performance after the major refactor released with ``2023.8.19`` `#5841 <https://github.com/pypa/pipenv/issues/5841>`_ -- Fix some edge cases around vcs dependencies without a ref, and older Pipfile/lockfile formats. `#5843 <https://github.com/pypa/pipenv/issues/5843>`_ - -Vendored Libraries ------------------- - -- Remove unused command line interface for vendored packages. `#5840 <https://github.com/pypa/pipenv/issues/5840>`_ - - -2023.8.20 (2023-08-20) -====================== - -Bug Fixes ---------- - -- Fix the expected output of the ``version`` command. `#5838 <https://github.com/pypa/pipenv/issues/5838>`_ - - -2023.8.19 (2023-08-19) -====================== - -Features & Improvements ------------------------ - -- The ``--categories`` option now works with requirements.txt file. `#5722 <https://github.com/pypa/pipenv/issues/5722>`_ - -Bug Fixes ---------- - -- Drop requirementslib for managing pip lines and InstallRequirements, bring remaining requirementslib functionality into pipenv. - Fixes numerous reports about extras installs with vcs and file installs; format pip lines correctly to not generate deprecation warnings. `#5793 <https://github.com/pypa/pipenv/issues/5793>`_ - -Vendored Libraries ------------------- - -- Update pip 23.2 -> 23.2.1 `#5822 <https://github.com/pypa/pipenv/issues/5822>`_ - -Improved Documentation ----------------------- - -- Added documentation on how to move or rename a project directory `#5129 <https://github.com/pypa/pipenv/issues/5129>`_ - -Removals and Deprecations -------------------------- - -- The ``--skip-lock`` flag which was deprecated, has now been removed to unblock modernizing the pipenv resolver code. `#5805 <https://github.com/pypa/pipenv/issues/5805>`_ - - -2023.7.23 (2023-07-23) -====================== - -Features & Improvements ------------------------ - -- Upgrades ``pip==23.2`` which includes everything from the pip changelog. Drops the "install_compatatability_finder" pip internals patch. `#5808 <https://github.com/pypa/pipenv/issues/5808>`_ - -Bug Fixes ---------- - -- Fix issue parsing some Pipfiles with separate packages.<pkg> sections (tomlkit OutOfOrderTableProxy) `#5794 <https://github.com/pypa/pipenv/issues/5794>`_ -- Fix all ruff linter warnings `#5807 <https://github.com/pypa/pipenv/issues/5807>`_ -- Restore running Resolver in sub-process using the project python by default; maintains ability to run directly by setting ``PIPENV_RESOLVER_PARENT_PYTHON`` environment variable to 1 (useful for internal debugging). `#5809 <https://github.com/pypa/pipenv/issues/5809>`_ -- Fix error when a Windows path begins with a '\' with ``pythonfinder==2.0.5``. `#5812 <https://github.com/pypa/pipenv/issues/5812>`_ - -Vendored Libraries ------------------- - -- Remove usage of click.secho in some modules. `#5804 <https://github.com/pypa/pipenv/issues/5804>`_ - - -2023.7.11 (2023-07-11) - -Bug Fixes ---------- - -- Invoke the resolver in the same process as pipenv rather than utilizing subprocess. `#5787 <https://github.com/pypa/pipenv/issues/5787>`_ -- Fix regression markers being included as None/null in requirements command. `#5788 <https://github.com/pypa/pipenv/issues/5788>`_ - - -2023.7.9 (2023-07-09) -===================== - -Bug Fixes ---------- - -- Drop the --keep-outdated flag and --selective-upgrade flags that have been deprecated in favor of update/upgrade commands. `#5730 <https://github.com/pypa/pipenv/issues/5730>`_ -- Fix regressions in the ``requirements`` command related to standard index extras and handling of local file requirements. `#5784 <https://github.com/pypa/pipenv/issues/5784>`_ - - -2023.7.4 (2023-07-04) -===================== - -Bug Fixes ---------- - -- Fixes regression on Pipfile requirements syntax. Ensure default operator is provided to requirement lib to avoid crash. `#5765 <https://github.com/pypa/pipenv/issues/5765>`_ -- Ensure hashes included in a generated requirements file are after any markers. `#5777 <https://github.com/pypa/pipenv/issues/5777>`_ - - -2023.7.3 (2023-07-02) -===================== - -Bug Fixes ---------- - -- Fix regression with ``--system`` flag usage. `#5773 <https://github.com/pypa/pipenv/issues/5773>`_ - - -2023.7.1 (2023-07-01) -===================== - -Bug Fixes ---------- - -- Patch ``_get_requests_session`` method to consider ``PIP_CLIENT_CERT`` value when present. `#5746 <https://github.com/pypa/pipenv/issues/5746>`_ -- Fix regression in ``requirements`` command that was causing package installs after upgrade to ``requirementslib==3.0.0``. `#5755 <https://github.com/pypa/pipenv/issues/5755>`_ -- Fix ``error: invalid command 'egg_info'`` edge case with requirementslib 3.0.0. It exposed pipenv resolver sometimes was using a different python than expected. `#5760 <https://github.com/pypa/pipenv/issues/5760>`_ -- Fix issue in requirementslib 3.0.0 where dependencies defined in pyproject.toml were not being included in the lock file. `#5766 <https://github.com/pypa/pipenv/issues/5766>`_ - -Removals and Deprecations -------------------------- - -- Bump dparse to 0.6.3 `#5750 <https://github.com/pypa/pipenv/issues/5750>`_ - - -2023.6.26 (2023-06-26) -====================== - -Improved Documentation ----------------------- - -- Add missing environment variable descriptions back to documentation `#missing_env_var_desc <https://github.com/pypa/pipenv/issues/missing_env_var_desc>`_ - - -2023.6.18 (2023-06-18) -====================== - -Bug Fixes ---------- - -- Fixes resolver to only consider the default index for packages when a secondary index is not specified. This brings the code into alignment with stated assumptions about index restricted packages behavior of ``pipenv``. `#5737 <https://github.com/pypa/pipenv/issues/5737>`_ - -Removals and Deprecations -------------------------- - -- Deprecation of ``--skip-lock`` flag as it bypasses the security benefits of pipenv. Plus it lacks proper deterministic support of installation from multiple package indexes. `#5737 <https://github.com/pypa/pipenv/issues/5737>`_ - - -2023.6.12 (2023-06-11) -====================== - -Bug Fixes ---------- - -- Remove the ``sys.path`` modifications and as a result fixes keyring support. `#5719 <https://github.com/pypa/pipenv/issues/5719>`_ - - -2023.6.11 (2023-06-11) -====================== - -Vendored Libraries ------------------- - -- Upgrades to ``pipdeptree==2.8.0`` which fixes edge cases of the ``pipenv graph`` command. `#5720 <https://github.com/pypa/pipenv/issues/5720>`_ - - -2023.6.2 (2023-06-02) -===================== - -Features & Improvements ------------------------ - -- Resolver performance: package sources following PEP 503 will leverage package hashes from the URL fragment, without downloading the package. `#5701 <https://github.com/pypa/pipenv/issues/5701>`_ - -Bug Fixes ---------- - -- Improve regex for python versions to handle hidden paths; handle relative paths to python better as well. `#4588 <https://github.com/pypa/pipenv/issues/4588>`_ -- Update ``pythonfinder==2.0.4`` with fix for "RecursionError: maximum recursion depth exceeded". `#5709 <https://github.com/pypa/pipenv/issues/5709>`_ - -Vendored Libraries ------------------- - -- Drop old vendored toml library. Use stdlib tomllib or tomli instead. `#5678 <https://github.com/pypa/pipenv/issues/5678>`_ -- Drop vendored library cerberus. This isn't actually used by pipenv. `#5699 <https://github.com/pypa/pipenv/issues/5699>`_ - - -2023.5.19 (2023-05-19) -====================== - -Bug Fixes ---------- - -- Consider ``--index`` argument in ``update`` and ``upgrade`` commands. `#5692 <https://github.com/pypa/pipenv/issues/5692>`_ - -Vendored Libraries ------------------- - -- Upgrade ``pythonfinder==2.0.0`` which also brings in ``pydantic==1.10.7``. `#5677 <https://github.com/pypa/pipenv/issues/5677>`_ - - -2023.4.29 (2023-04-29) -====================== - -Vendored Libraries ------------------- - -- Vendor in ``pip==23.1.2`` latest. `#5671 <https://github.com/pypa/pipenv/issues/5671>`_ -- Vendor in ``requirementslib==2.3.0`` which drops usage of ``vistir``. `#5672 <https://github.com/pypa/pipenv/issues/5672>`_ - - -2023.4.20 (2023-04-20) -====================== - -Features & Improvements ------------------------ - -- Checks environment variable ``PIP_TRUSTED_HOSTS`` when evaluating an - index specified at the command line when adding to ``Pipfile``. - - For example, this command line - - :: - - PIP_TRUSTED_HOSTS=internal.mycompany.com pipenv install pypkg --index=https://internal.mycompany.com/pypi/simple - - will add the following to the ``Pipfile``: - - :: - - [[source]] - url = 'https://internal.mycompany.com/pypi/simple' - verify_ssl = false - name = 'Internalmycompany' - - [packages] - pypkg = {version="*", index="Internalmycompany"} - - This allows users with private indexes to add them to ``Pipfile`` - initially from command line with correct permissions using environment - variable ``PIP_TRUSTED_HOSTS``. `#5572 <https://github.com/pypa/pipenv/issues/5572>`_ -- Vendor in the updates, upgrades and fixes provided by ``pip==23.1``. `#5655 <https://github.com/pypa/pipenv/issues/5655>`_ -- Replace flake8 and isort with `ruff <https://beta.ruff.rs>`_. `#ruff <https://github.com/pypa/pipenv/issues/ruff>`_ - -Bug Fixes ---------- - -- Fix regression with ``--skip-lock`` option with ``install`` command. `#5653 <https://github.com/pypa/pipenv/issues/5653>`_ - -Vendored Libraries ------------------- - -- Vendor in latest ``python-dotenv==1.0.0`` `#5656 <https://github.com/pypa/pipenv/issues/5656>`_ -- Vendor in latest available dependencies: ``attrs==23.1.0`` ``click-didyoumean==0.3.0`` ``click==8.1.3`` ``markupsafe==2.1.2`` ``pipdeptree==2.7.0`` ``shellingham==1.5.0.post1`` ``tomlkit==0.11.7`` `#5657 <https://github.com/pypa/pipenv/issues/5657>`_ -- Vendor in latest ``requirementslib==2.2.5`` which includes updates for pip 23.1 `#5659 <https://github.com/pypa/pipenv/issues/5659>`_ - -Improved Documentation ----------------------- - -- Made documentation clear about tilde-equals operator for package versions. `#5594 <https://github.com/pypa/pipenv/issues/5594>`_ - - -2023.3.20 (2023-03-19) -====================== - -No significant changes. - - -2023.3.18 (2023-03-19) -====================== - -Bug Fixes ---------- - -- Fix import error in virtualenv utility for creating new environments caused by ``2023.3.18`` release. `#5636 <https://github.com/pypa/pipenv/issues/5636>`_ - - -2023.3.18 (2023-03-18) -====================== - -Features & Improvements ------------------------ - -- Provide a more powerful solution than ``--keep-outdated`` and ``--selective-upgrade`` which are deprecated for removal. - Introducing the ``pipenv upgrade`` command which takes the same package specifiers as ``pipenv install`` and - updates the ``Pipfile`` and ``Pipfile.lock`` with a valid lock resolution that only effects the specified packages and their dependencies. - Additionally, the ``pipenv update`` command has been updated to use the ``pipenv upgrade`` routine when packages are provided, which will install sync the new lock file as well. `#5617 <https://github.com/pypa/pipenv/issues/5617>`_ - -Vendored Libraries ------------------- - -- Bump vistir to 0.8.0, requirementslib to 2.2.4. `#5635 <https://github.com/pypa/pipenv/issues/5635>`_ - - -2023.2.18 (2023-02-18) -============================= - - -Features & Improvements ------------------------ - -- ``pipenv`` now reads the system ``pip.conf`` or ``pip.ini`` file in order to determine pre-defined indexes to use for package resolution and installation. `#5297 <https://github.com/pypa/pipenv/issues/5297>`_ -- Behavior change for ``pipenv check`` now checks the default packages group of the lockfile. - Specifying ``--categories`` to override which categories to check against. - Pass ``--use-installed`` to get the prior behavior of checking the packages actually installed into the environment. `#5600 <https://github.com/pypa/pipenv/issues/5600>`_ - -Bug Fixes ---------- - -- Fix regression with detection of ``CI`` env variable being set to something other than a truthy value. `#5554 <https://github.com/pypa/pipenv/issues/5554>`_ -- Fix ``--categories`` argument inconsistency between requirements command and install/sync by allowing comma separated values or spaces. `#5570 <https://github.com/pypa/pipenv/issues/5570>`_ -- Use Nushell overlays when running ``pipenv shell``. `#5603 <https://github.com/pypa/pipenv/issues/5603>`_ - -Vendored Libraries ------------------- - -- Vendor in the ``pip==23.0`` release. `#5586 <https://github.com/pypa/pipenv/issues/5586>`_ -- Vendor in ``pip==23.0.1`` minor pt release. Updates ``pythonfinder==1.3.2``. `#5614 <https://github.com/pypa/pipenv/issues/5614>`_ - -Improved Documentation ----------------------- - -- Make some improvements to the contributing guide. `#5611 <https://github.com/pypa/pipenv/issues/5611>`_ - - -2023.2.4 (2023-02-04) -============================ - - -Bug Fixes ---------- - -- Fix overwriting of output in verbose mode `#5530 <https://github.com/pypa/pipenv/issues/5530>`_ -- Fix for resolution error when direct url includes an extras. `#5536 <https://github.com/pypa/pipenv/issues/5536>`_ - -Removals and Deprecations -------------------------- - -- Remove pytest-pypi package since it's not used anymore `#5556 <https://github.com/pypa/pipenv/issues/5556>`_ -- Remove deprecated --three flag from the CLI. `#5576 <https://github.com/pypa/pipenv/issues/5576>`_ - - -2022.12.19 (2022-12-19) -============================== - - -Bug Fixes ---------- - -- Fix for ``requirementslib`` hanging during install of remote wheels files. `#5546 <https://github.com/pypa/pipenv/issues/5546>`_ - - -2022.12.17 (2022-12-17) -============================== - - -Bug Fixes ---------- - -- virtualenv creation no longer uses ``--creator=venv`` by default; introduced two environment variables: - ``PIPENV_VIRTUALENV_CREATOR`` -- May be specified to instruct virtualenv which ``--creator=`` to use. - ``PIPENV_VIRTUALENV_COPIES`` -- When specified as truthy, instructs virtualenv to not use symlinks. `#5477 <https://github.com/pypa/pipenv/issues/5477>`_ -- Fix regression where ``path`` is not propagated to the ``Pipfile.lock``. `#5479 <https://github.com/pypa/pipenv/issues/5479>`_ -- Solve issue where null markers were getting added to lock file when extras were provided. `#5486 <https://github.com/pypa/pipenv/issues/5486>`_ -- Fix: ``update --outdated`` raises NonExistentKey with outdated dev packages `#5540 <https://github.com/pypa/pipenv/issues/5540>`_ - -Vendored Libraries ------------------- - -- Vendor in ``pip==22.3.1`` which is currently the latest version of ``pip``. `#5520 <https://github.com/pypa/pipenv/issues/5520>`_ -- * Bump version of requirementslib to 2.2.1 - * Bump version of vistir to 0.7.5 - * Bump version of colorama to 0.4.6 `#5522 <https://github.com/pypa/pipenv/issues/5522>`_ -- Bump plette version to 0.4.4 `#5539 <https://github.com/pypa/pipenv/issues/5539>`_ - - -2022.11.30 (2022-11-30) -============================== - - -Bug Fixes ---------- - -- Fix regression: pipenv does not sync indexes to lockfile. `#5508 <https://github.com/pypa/pipenv/issues/5508>`_ - - -2022.11.25 (2022-11-24) -============================== - - -Bug Fixes ---------- - -- Solving issue where ``pipenv check`` command has been broken in the published wheel distribution. `#5493 <https://github.com/pypa/pipenv/issues/5493>`_ - - -2022.11.24 (2022-11-24) -============================== - - -Bug Fixes ---------- - -- Stop building universal wheels since Python 2 is no longer supported. `#5496 <https://github.com/pypa/pipenv/issues/5496>`_ - - -2022.11.23 (2022-11-23) -============================== - - -Features & Improvements ------------------------ - -- Find nushell activate scripts. `#5470 <https://github.com/pypa/pipenv/issues/5470>`_ - -Vendored Libraries ------------------- - -- * Drop unused code from cerberus - * Drop unused module wheel `#5467 <https://github.com/pypa/pipenv/issues/5467>`_ -- * Replace yaspin spinner with rich spinner. - * Bump vistir version to 0.7.4 `#5468 <https://github.com/pypa/pipenv/issues/5468>`_ -- Bump version of requirementslib to 2.2.0 - Drop yaspin which is no longer used. - Bump vistir to version 0.7.4 - Remove parse. - Remove termcolor. - Remove idna. `#5481 <https://github.com/pypa/pipenv/issues/5481>`_ - - -2022.11.11 (2022-11-11) -============================== - - -Bug Fixes ---------- - -- Fix regression of lock generation that caused the keep-outdated behavior to be default. `#5456 <https://github.com/pypa/pipenv/issues/5456>`_ - - -2022.11.5 (2022-11-05) -============================= - - -Bug Fixes ---------- - -- Rollback the change in version of ``colorama`` due to regressions in core functionality. `#5459 <https://github.com/pypa/pipenv/issues/5459>`_ - - -2022.11.4 (2022-11-04) -============================= - - -Features & Improvements ------------------------ - -- Allow pipenv settings to be explicitly disabled more easily by assigning to the environment variable a falsy value. `#5451 <https://github.com/pypa/pipenv/issues/5451>`_ - -Bug Fixes ---------- - -- Provide an install iteration per index when ``install_search_all_sources`` is ``false`` (default behavior). - This fixes regression where install phase was using unexpected index after updating ``pip==22.3`` `#5444 <https://github.com/pypa/pipenv/issues/5444>`_ - -Vendored Libraries ------------------- - -- Drop tomli, which is not used anymore. - Bump attrs version see #5449. - Drop distlib, colorama and platformdirs - use the ones from pip._vendor. `#5450 <https://github.com/pypa/pipenv/issues/5450>`_ - - -2022.10.25 (2022-10-25) -============================== - - -Features & Improvements ------------------------ - -- Add support to export requirements file for a specified set of categories. `#5431 <https://github.com/pypa/pipenv/issues/5431>`_ - -Vendored Libraries ------------------- - -- Remove appdirs.py in favor of platformdirs. `#5420 <https://github.com/pypa/pipenv/issues/5420>`_ - -Removals and Deprecations -------------------------- - -- Remove usage of vistir.cmdparse in favor of pipenv.cmdparse `#5419 <https://github.com/pypa/pipenv/issues/5419>`_ - - -2022.10.12 (2022-10-12) -============================== - - -Improved Documentation ----------------------- - -- Update pipenv docs for with example for callabale package functions in Pipfile scripts `#5396 <https://github.com/pypa/pipenv/issues/5396>`_ - - -2022.10.11 (2022-10-11) -============================== - - -Bug Fixes ---------- - -- Revert decision to change the default isolation level because it caused problems with existing workflows; solution is to recommend users that have issues requiring pre-requisites to pass --extra-pip-args="--no-build-isolation" in their install or sync commands. `#5399 <https://github.com/pypa/pipenv/issues/5399>`_ - - -2022.10.10 (2022-10-10) -============================== - - -Features & Improvements ------------------------ - -- Add ability for callable scripts in Pipfile under [scripts]. Callables can now be added like: ``<pathed.module>:<func>`` and can also take arguments. For example: ``func = {call = "package.module:func('arg1', 'arg2')"}`` then this can be activated in the shell with ``pipenv run func`` `#5294 <https://github.com/pypa/pipenv/issues/5294>`_ - -Bug Fixes ---------- - -- Fixes regression from ``2022.10.9`` where ``Pipfile`` with ``pipenv`` section began generating new hash, - and also fix regression where lock phase did not update the hash value. `#5394 <https://github.com/pypa/pipenv/issues/5394>`_ - - -2022.10.9 (2022-10-09) -============================= - - -Behavior Changes ----------------- - -- New pipfiles show python_full_version under [requires] if specified. Previously creating a new pipenv project would only specify in the Pipfile the major and minor version, i.e. "python_version = 3.7". Now if you create a new project with a fully named python version it will record both in the Pipfile. So: "python_version = 3.7" and "python_full_version = 3.7.2" `#5345 <https://github.com/pypa/pipenv/issues/5345>`_ - -Relates to dev process changes ------------------------------- - -- Silence majority of pytest.mark warnings by registering custom marks. Can view a list of custom marks by running ``pipenv run pytest --markers`` - - -2022.10.4 (2022-10-04) -============================= - - -Bug Fixes ---------- - -- Use ``--creator=venv`` when creating virtual environments to avoid issue with sysconfig ``posix_prefix`` on some systems. `#5075 <https://github.com/pypa/pipenv/issues/5075>`_ -- Prefer to use the lockfile sources if available during the install phase. `#5380 <https://github.com/pypa/pipenv/issues/5380>`_ - -Vendored Libraries ------------------- - -- Drop vendored six - we no longer depend on this library, as we migrated from pipfile to plette. `#5187 <https://github.com/pypa/pipenv/issues/5187>`_ - - -2022.9.24 (2022-09-24) -============================= - - -Bug Fixes ---------- - -- Update ``requirementslib==2.0.3`` to always evaluate the requirement markers fresh (without lru_cache) to fix marker determinism issue. `#4660 <https://github.com/pypa/pipenv/issues/4660>`_ - - -2022.9.21 (2022-09-21) -============================= - - -Bug Fixes ---------- - -- Fix regression to ``install --skip-lock`` with update to ``plette``. `#5368 <https://github.com/pypa/pipenv/issues/5368>`_ - - -2022.9.20 (2022-09-20) -============================= - - -Behavior Changes ----------------- - -- Remove usage of pipfile module in favour of Plette. - pipfile is not actively maintained anymore. Plette is actively maintained, - and has stricter checking of the Pipefile and Pipefile.lock. As a result, - Pipefile with unnamed package indices will fail to lock. If a Pipefile - was hand crafeted, and the source is anonymous an error will be thrown. - The solution is simple, add a name to your index, e.g, replace:: - - [[source]] - url = "https://pypi.acme.com/simple" - verify_ssl = true - - With:: - - [[source]] - url = "https://pypi.acme.com/simple" - verify_ssl = true - name = acmes_private_index `#5339 <https://github.com/pypa/pipenv/issues/5339>`_ - -Bug Fixes ---------- - -- Modernize ``pipenv`` path patch with ``importlib.util`` to eliminate import of ``pkg_resources`` `#5349 <https://github.com/pypa/pipenv/issues/5349>`_ - -Vendored Libraries ------------------- - -- Remove iso8601 from vendored packages since it was not used. `#5346 <https://github.com/pypa/pipenv/issues/5346>`_ - - -2022.9.8 (2022-09-08) -============================ - - -Features & Improvements ------------------------ - -- It is now possible to supply additional arguments to ``pip`` install by supplying ``--extra-pip-args="<arg1> <arg2>"`` - See the updated documentation ``Supplying additional arguments to pip`` for more details. `#5283 <https://github.com/pypa/pipenv/issues/5283>`_ - -Bug Fixes ---------- - -- Make editable detection better because not everyone specifies editable entry in the Pipfile for local editable installs. `#4784 <https://github.com/pypa/pipenv/issues/4784>`_ -- Add error handling for when the installed package setup.py does not contain valid markers. `#5329 <https://github.com/pypa/pipenv/issues/5329>`_ -- Load the dot env earlier so that ``PIPENV_CUSTOM_VENV_NAME`` is more useful across projects. `#5334 <https://github.com/pypa/pipenv/issues/5334>`_ - -Vendored Libraries ------------------- - -- Bump version of shellingham to support nushell. `#5336 <https://github.com/pypa/pipenv/issues/5336>`_ -- Bump plette to version v0.3.0 `#5337 <https://github.com/pypa/pipenv/issues/5337>`_ -- Bump version of pipdeptree `#5343 <https://github.com/pypa/pipenv/issues/5343>`_ - -Removals and Deprecations -------------------------- - -- Add deprecation warning to the --three flag. Pipenv now uses python3 by default. `#5328 <https://github.com/pypa/pipenv/issues/5328>`_ - -Relates to dev process changes ------------------------------- - -- Convert the test runner to use ``pypiserver`` as a standalone process for all tests that referencce internal ``pypi`` artifacts. - General refactoring of some test cases to create more variety in packages selected--preferring lighter weight packages--in existing test cases. - - -2022.9.4 (2022-09-04) -===================== - - -Bug Fixes ---------- - -- Fix the issue from ``2022.9.2`` where tarball URL packages were being skipped on batch_install. `#5306 <https://github.com/pypa/pipenv/issues/5306>`_ - - -2022.9.2 (2022-09-02) -===================== - - -Bug Fixes ---------- - -- Fix issue where unnamed constraints were provided but which are not allowed by ``pip`` resolver. `#5273 <https://github.com/pypa/pipenv/issues/5273>`_ - - -2022.8.31 (2022-08-31) -====================== - - -Features & Improvements ------------------------ - -- Performance optimization to ``batch_install`` results in a faster and less CPU intensive ``pipenv sync`` or ``pipenv install`` experience. `#5301 <https://github.com/pypa/pipenv/issues/5301>`_ - -Bug Fixes ---------- - -- ``pipenv`` now uses a ``NamedTemporaryFile`` for rsolver constraints and drops internal env var ``PIPENV_PACKAGES``. `#4925 <https://github.com/pypa/pipenv/issues/4925>`_ - -Removals and Deprecations -------------------------- - -- Remove no longer used method ``which_pip``. `#5314 <https://github.com/pypa/pipenv/issues/5314>`_ -- Drop progress bar file due to recent performance optimization to combine ``batch_install`` requirements in at most two invocations of ``pip install``. - To see progress of install pass ``--verbose`` flag and ``pip`` progress will be output in realtime. `#5315 <https://github.com/pypa/pipenv/issues/5315>`_ - - -2022.8.30 (2022-08-30) -====================== - - -Bug Fixes ---------- - -- Fix an issue when using ``pipenv install --system`` on systems that having the ``python`` executable pointing to Python 2 and a Python 3 executable being ``python3``. `#5296 <https://github.com/pypa/pipenv/issues/5296>`_ -- Sorting ``constraints`` before resolving, which fixes ``pipenv lock`` generates nondeterminism environment markers. `#5299 <https://github.com/pypa/pipenv/issues/5299>`_ -- Fix #5273, use our own method for checking if a package is a valid constraint. `#5309 <https://github.com/pypa/pipenv/issues/5309>`_ - -Vendored Libraries ------------------- - -- Vendor in ``requirementslib==2.0.1`` which fixes issue with local install not marked editable, and vendor in ``vistir==0.6.1`` which drops python2 support. - Drops ``orderedmultidict`` from vendoring. `#5308 <https://github.com/pypa/pipenv/issues/5308>`_ - - -2022.8.24 (2022-08-24) -====================== - - -Bug Fixes ---------- - -- Remove eager and unnecessary importing of ``setuptools`` and ``pkg_resources`` to avoid conflict upgrading ``setuptools``. - Roll back ``sysconfig`` patch of ``pip`` because it was problematic for some ``--system`` commands. `#5228 <https://github.com/pypa/pipenv/issues/5228>`_ - -Vendored Libraries ------------------- - -- Vendor in ``requirementslib==2.0.0`` and drop ``pip-shims`` entirely. `#5228 <https://github.com/pypa/pipenv/issues/5228>`_ -- Vendor in ``pythonfinder==1.3.1`` `#5292 <https://github.com/pypa/pipenv/issues/5292>`_ - - -2022.8.19 (2022-08-19) -====================== - - -Bug Fixes ---------- - -- Fix issue where resolver is provided with ``install_requires`` constraints from ``setup.py`` that depend on editable dependencies and could not resolve them. `#5271 <https://github.com/pypa/pipenv/issues/5271>`_ -- Fix for ``pipenv lock`` fails for packages with extras as of ``2022.8.13``. `#5274 <https://github.com/pypa/pipenv/issues/5274>`_ -- Revert the exclusion of ``BAD_PACKAGES`` from ``batch_install`` in order for ``pipenv`` to install specific versions of ``setuptools``. - To prevent issue upgrading ``setuptools`` this patches ``_USE_SYSCONFIG_DEFAULT`` to use ``sysconfig`` for ``3.7`` and above whereas ``pip`` default behavior was ``3.10`` and above. `#5275 <https://github.com/pypa/pipenv/issues/5275>`_ - - -2022.8.17 (2022-08-17) -====================== - - -Bug Fixes ---------- - -- Fix "The Python interpreter can't be found" error when running ``pipenv install --system`` with a python3 but no python. `#5261 <https://github.com/pypa/pipenv/issues/5261>`_ -- Revise pip import patch to include only ``pipenv`` from site-packages and removed ``--ignore-installed`` argument from pip install in order to fix regressions with ``--use-site-packages``. `#5265 <https://github.com/pypa/pipenv/issues/5265>`_ - - -2022.8.15 (2022-08-15) -====================== - - -Bug Fixes ---------- - -- ``pip_install`` method was using a different way of finding the python executable than other ``pipenv`` commands, which caused an issue with skipping package installation if it was already installed in site-packages. `#5254 <https://github.com/pypa/pipenv/issues/5254>`_ - - -2022.8.14 (2022-08-14) -====================== - - -Bug Fixes ---------- - -- Removed ``packaging`` library from ``BAD_PACKAGES`` constant to allow it to be installed, which fixes regression from ``pipenv==2022.8.13``. `#5247 <https://github.com/pypa/pipenv/issues/5247>`_ - - -2022.8.13 (2022-08-13) -====================== - - -Bug Fixes ---------- - -- If environment variable ``CI`` or ``TF_BUILD`` is set but does not evaluate to ``False`` it is now treated as ``True``. `#5128 <https://github.com/pypa/pipenv/issues/5128>`_ -- Fix auto-complete crashing on 'install' and 'uninstall' keywords `#5214 <https://github.com/pypa/pipenv/issues/5214>`_ -- Address remaining ``pipenv`` commands that were still referencing the user or system installed ``pip`` to use the vendored ``pip`` internal to ``pipenv``. `#5229 <https://github.com/pypa/pipenv/issues/5229>`_ -- Use ``packages`` as constraints when locking ``dev-packages`` in Pipfile. - Use ``packages`` as constraints when installing new ``dev-packages``. `#5234 <https://github.com/pypa/pipenv/issues/5234>`_ - -Vendored Libraries ------------------- - -- Vendor in minor ``pip`` update ``22.2.2`` `#5230 <https://github.com/pypa/pipenv/issues/5230>`_ - -Improved Documentation ----------------------- - -- Add documentation for environment variables the configure pipenv. `#5235 <https://github.com/pypa/pipenv/issues/5235>`_ - -Removals and Deprecations -------------------------- - -- The deprecated way of generating requirements ``install -r`` or ``lock -r`` has been removed in favor of the ``pipenv requirements`` command. `#5200 <https://github.com/pypa/pipenv/issues/5200>`_ - - -2022.8.5 (2022-08-05) -===================== - - -Features & Improvements ------------------------ - -- support PIPENV_CUSTOM_VENV_NAME to be the venv name if specified, update relevant docs. `#4974 <https://github.com/pypa/pipenv/issues/4974>`_ - -Bug Fixes ---------- - -- Remove usages of ``pip_shims`` from the non vendored ``pipenv`` code, but retain initialization for ``requirementslib`` still has usages. `#5204 <https://github.com/pypa/pipenv/issues/5204>`_ -- Fix case sensitivity of color name ``red`` in exception when getting hashes from pypi in ``_get_hashes_from_pypi``. `#5206 <https://github.com/pypa/pipenv/issues/5206>`_ -- Write output from ``subprocess_run`` directly to ``stdout`` instead of creating temporary file. - Remove deprecated ``distutils.sysconfig``, use ``sysconfig``. `#5210 <https://github.com/pypa/pipenv/issues/5210>`_ - -Vendored Libraries ------------------- - -- * Rename patched ``notpip`` to ``pip`` in order to be clear that its a patched version of pip. - * Remove the part of _post_pip_import.patch that overrode the standalone pip to be the user installed pip, now we fully rely on our vendored and patched ``pip``, even for all types of installs. - * Vendor in the next newest version of ``pip==22.2`` - * Modify patch for ``pipdeptree`` to not use ``pip-shims`` `#5188 <https://github.com/pypa/pipenv/issues/5188>`_ - * Remove vendored ``urllib3`` in favor of using it from vendored version in ``pip._vendor`` `#5215 <https://github.com/pypa/pipenv/issues/5215>`_ - -Removals and Deprecations -------------------------- - -- Remove tests that have been for a while been marked skipped and are no longer relevant. `#5165 <https://github.com/pypa/pipenv/issues/5165>`_ - - -2022.7.24 (2022-07-24) -====================== - - -Bug Fixes ---------- - -- Re-enabled three installs tests again on the Windows CI as recent refactor work has fixed them. `#5064 <https://github.com/pypa/pipenv/issues/5064>`_ -- Support ANSI ``NO_COLOR`` environment variable and deprecate ``PIPENV_COLORBLIND`` variable, which will be removed after this release. `#5158 <https://github.com/pypa/pipenv/issues/5158>`_ -- Fixed edge case where a non-editable file, url or vcs would overwrite the value ``no_deps`` for all other requirements in the loop causing a retry condition. `#5164 <https://github.com/pypa/pipenv/issues/5164>`_ -- Vendor in latest ``requirementslib`` for fix to lock when using editable VCS module with specific ``@`` git reference. `#5179 <https://github.com/pypa/pipenv/issues/5179>`_ - -Vendored Libraries ------------------- - -- Remove crayons and replace with click.secho and click.styles per https://github.com/pypa/pipenv/issues/3741 `#3741 <https://github.com/pypa/pipenv/issues/3741>`_ -- Vendor in latest version of ``pip==22.1.2`` which upgrades ``pipenv`` from ``pip==22.0.4``. - Vendor in latest version of ``requirementslib==1.6.7`` which includes a fix for tracebacks on encountering Annotated variables. - Vendor in latest version of ``pip-shims==0.7.3`` such that imports could be rewritten to utilize ``packaging`` from vendor'd ``pip``. - Drop the ``packaging`` requirement from the ``vendor`` directory in ``pipenv``. `#5147 <https://github.com/pypa/pipenv/issues/5147>`_ -- Remove unused vendored dependency ``normailze-charset``. `#5161 <https://github.com/pypa/pipenv/issues/5161>`_ -- Remove obsolete package ``funcsigs``. `#5168 <https://github.com/pypa/pipenv/issues/5168>`_ -- Bump vendored dependency ``pyparsing==3.0.9``. `#5170 <https://github.com/pypa/pipenv/issues/5170>`_ - - -2022.7.4 (2022-07-04) -===================== - - -Behavior Changes ----------------- - -- Adjust ``pipenv requirements`` to add markers and add an ``--exclude-markers`` option to allow the exclusion of markers. `#5092 <https://github.com/pypa/pipenv/issues/5092>`_ - -Bug Fixes ---------- - -- Stopped expanding environment variables when using ``pipenv requirements`` `#5134 <https://github.com/pypa/pipenv/issues/5134>`_ - -Vendored Libraries ------------------- - -- Depend on ``requests`` and ``certifi`` from vendored ``pip`` and remove them as explicit vendor dependencies. `#5000 <https://github.com/pypa/pipenv/issues/5000>`_ -- Vendor in the latest version of ``requirementslib==1.6.5`` which includes bug fixes for beta python versions, projects with an at sign (@) in the path, and a ``setuptools`` deprecation warning. `#5132 <https://github.com/pypa/pipenv/issues/5132>`_ - -Relates to dev process changes ------------------------------- - -- Switch from using type comments to type annotations. - - -2022.5.3.dev0 (2022-06-07) -========================== - - -Bug Fixes ---------- - -- Adjust pipenv to work with the newly added ``venv`` install scheme in Python. - First check if ``venv`` is among the available install schemes, and use it if it is. Otherwise fall back to the ``nt`` or ``posix_prefix`` install schemes as before. This should produce no change for environments where the install schemes were not redefined. `#5096 <https://github.com/pypa/pipenv/issues/5096>`_ - - -2022.5.2 (2022-05-02) -===================== - - -Bug Fixes ---------- - -- Fixes issue of ``pipenv lock -r`` command printing to stdout instead of stderr. `#5091 <https://github.com/pypa/pipenv/issues/5091>`_ - - -2022.4.30 (2022-04-30) -====================== - - -Bug Fixes ---------- - -- Fixes issue of ``requirements`` command problem by modifying to print ``-e`` and path of the editable package. `#5070 <https://github.com/pypa/pipenv/issues/5070>`_ -- Revert specifier of ``setuptools`` requirement in ``setup.py`` back to what it was in order to fix ``FileNotFoundError: [Errno 2]`` issue report. `#5075 <https://github.com/pypa/pipenv/issues/5075>`_ -- Fixes issue of requirements command where git requirements cause the command to fail, solved by using existing convert_deps_to_pip function. `#5076 <https://github.com/pypa/pipenv/issues/5076>`_ - -Vendored Libraries ------------------- - -- Vendor in ``requirementslib==1.6.4`` to Fix ``SetuptoolsDeprecationWarning`` ``setuptools.config.read_configuration`` became deprecated. `#5081 <https://github.com/pypa/pipenv/issues/5081>`_ - -Removals and Deprecations -------------------------- - -- Remove more usage of misc functions of vistir. Many of this function are available in the STL or in another dependency of pipenv. `#5078 <https://github.com/pypa/pipenv/issues/5078>`_ - - -2022.4.21 (2022-04-21) -====================== - - -Removals and Deprecations -------------------------- - -- Updated setup.py to remove support for python 3.6 from built ``pipenv`` packages' Metadata. `#5065 <https://github.com/pypa/pipenv/issues/5065>`_ - - -2022.4.20 (2022-04-20) -====================== - - -Features & Improvements ------------------------ - -- Added new Pipenv option ``install_search_all_sources`` that allows installation of packages from an - existing ``Pipfile.lock`` to search all defined indexes for the constrained package version and hash signatures. `#5041 <https://github.com/pypa/pipenv/issues/5041>`_ - -Bug Fixes ---------- - -- allow the user to disable the ``no_input`` flag, so the use of e.g Google Artifact Registry is possible. `#4706 <https://github.com/pypa/pipenv/issues/4706>`_ -- Fixes case where packages could fail to install and the exit code was successful. `#5031 <https://github.com/pypa/pipenv/issues/5031>`_ - -Vendored Libraries ------------------- - -- Updated vendor version of ``pip`` from ``21.2.2`` to ``22.0.4`` which fixes a number of bugs including - several reports of pipenv locking for an infinite amount of time when using certain package constraints. - This also drops support for python 3.6 as it is EOL and support was removed in pip 22.x `#4995 <https://github.com/pypa/pipenv/issues/4995>`_ - -Removals and Deprecations -------------------------- - -- Removed the vendor dependency ``more-itertools`` as it was originally added for ``zipp``, which since stopped using it. `#5044 <https://github.com/pypa/pipenv/issues/5044>`_ -- Removed all usages of ``pipenv.vendor.vistir.compat.fs_str``, since this function was used for PY2-PY3 compatibility and is no longer needed. `#5062 <https://github.com/pypa/pipenv/issues/5062>`_ - -Relates to dev process changes ------------------------------- - -- Added pytest-cov and basic configuration to the project for generating html testing coverage reports. -- Make all CI jobs run only after the lint stage. Also added a makefile target for vendoring the packages. - - -2022.4.8 (2022-04-08) -===================== - - -Features & Improvements ------------------------ - -- Implements a ``pipenv requirements`` command which generates a requirements.txt compatible output without locking. `#4959 <https://github.com/pypa/pipenv/issues/4959>`_ -- Internal to pipenv, the utils.py was split into a utils module with unused code removed. `#4992 <https://github.com/pypa/pipenv/issues/4992>`_ - -Bug Fixes ---------- - -- Pipenv will now ignore ``.venv`` in the project when ``PIPENV_VENV_IN_PROJECT`` variable is False. - Unset variable maintains the existing behavior of preferring to use the project's ``.venv`` should it exist. `#2763 <https://github.com/pypa/pipenv/issues/2763>`_ -- Fix an edge case of hash collection in index restricted packages whereby the hashes for some packages would - be missing from the ``Pipfile.lock`` following package index restrictions added in ``pipenv==2022.3.23``. `#5023 <https://github.com/pypa/pipenv/issues/5023>`_ - -Improved Documentation ----------------------- - -- Pipenv CLI documentation generation has been fixed. It had broke when ``click`` was vendored into the project in - ``2021.11.9`` because by default ``sphinx-click`` could no longer determine the CLI inherited from click. `#4778 <https://github.com/pypa/pipenv/issues/4778>`_ -- Improve documentation around extra indexes and index restricted packages. `#5022 <https://github.com/pypa/pipenv/issues/5022>`_ - -Removals and Deprecations -------------------------- - -- Removes the optional ``install`` argument ``--extra-index-url`` as it was not compatible with index restricted packages. - Using the ``--index`` argument is the correct way to specify a package should be pulled from the non-default index. `#5022 <https://github.com/pypa/pipenv/issues/5022>`_ - -Relates to dev process changes ------------------------------- - -- Added code linting using pre-commit-hooks, black, flake8, isort, pygrep-hooks, news-fragments and check-manifest. - Very similar to pip's configuration; adds a towncrier new's type ``process`` for change to Development processes. - - -2022.3.28 (2022-03-27) -====================== - - -Bug Fixes ---------- - -- Environment variables were not being loaded when the ``--quiet`` flag was set `#5010 <https://github.com/pypa/pipenv/issues/5010>`_ -- It would appear that ``requirementslib`` was not fully specifying the subdirectory to ``build_pep517`` and - and when a new version of ``setuptools`` was released, the test ``test_lock_nested_vcs_direct_url`` - broke indicating the Pipfile.lock no longer contained the extra dependencies that should have been resolved. - This regression affected ``pipenv>=2021.11.9`` but has been fixed by a patch to ``requirementslib``. `#5019 <https://github.com/pypa/pipenv/issues/5019>`_ - -Vendored Libraries ------------------- - -- Vendor in pip==21.2.4 (from 21.2.2) in order to bring in requested bug fix for python3.6. Note: support for 3.6 will be dropped in a subsequent release. `#5008 <https://github.com/pypa/pipenv/issues/5008>`_ - - -2022.3.24 (2022-03-23) -====================== - - -Features & Improvements ------------------------ - -- It is now possible to silence the ``Loading .env environment variables`` message on ``pipenv run`` - with the ``--quiet`` flag or the ``PIPENV_QUIET`` environment variable. `#4027 <https://github.com/pypa/pipenv/issues/4027>`_ - -Bug Fixes ---------- - -- Fixes issue with new index safety restriction, whereby an unnamed extra sources index - caused and error to be thrown during install. `#5002 <https://github.com/pypa/pipenv/issues/5002>`_ -- The text ``Loading .env environment variables...`` has been switched back to stderr as to not - break requirements.txt generation. Also it only prints now when a ``.env`` file is actually present. `#5003 <https://github.com/pypa/pipenv/issues/5003>`_ - - -2022.3.23 (2022-03-22) -====================== - - -Features & Improvements ------------------------ - -- Use environment variable ``PIPENV_SKIP_LOCK`` to control the behaviour of lock skipping. `#4797 <https://github.com/pypa/pipenv/issues/4797>`_ -- New CLI command ``verify``, checks the Pipfile.lock is up-to-date `#4893 <https://github.com/pypa/pipenv/issues/4893>`_ - -Behavior Changes ----------------- - -- Pattern expansion for arguments was disabled on Windows. `#4935 <https://github.com/pypa/pipenv/issues/4935>`_ - -Bug Fixes ---------- - -- Python versions on Windows can now be installed automatically through pyenv-win `#4525 <https://github.com/pypa/pipenv/issues/4525>`_ -- Patched our vendored Pip to fix: Pipenv Lock (Or Install) Does Not Respect Index Specified For A Package. `#4637 <https://github.com/pypa/pipenv/issues/4637>`_ -- If ``PIP_TARGET`` is set to environment variables, Refer specified directory for calculate delta, instead default directory `#4775 <https://github.com/pypa/pipenv/issues/4775>`_ -- Remove remaining mention of python2 and --two flag from codebase. `#4938 <https://github.com/pypa/pipenv/issues/4938>`_ -- Use ``CI`` environment value, over mere existence of name `#4944 <https://github.com/pypa/pipenv/issues/4944>`_ -- Environment variables from dot env files are now properly expanded when included in scripts. `#4975 <https://github.com/pypa/pipenv/issues/4975>`_ - -Vendored Libraries ------------------- - -- Updated vendor version of ``pythonfinder`` from ``1.2.9`` to ``1.2.10`` which fixes a bug with WSL - (Windows Subsystem for Linux) when a path can not be read and Permission Denied error is encountered. `#4976 <https://github.com/pypa/pipenv/issues/4976>`_ - -Removals and Deprecations -------------------------- - -- Removes long broken argument ``--code`` from ``install`` and ``--unused`` from ``check``. - Check command no longer takes in arguments to ignore. - Removed the vendored dependencies: ``pipreqs`` and ``yarg`` `#4998 <https://github.com/pypa/pipenv/issues/4998>`_ - - -2022.1.8 (2022-01-08) -===================== - - -Bug Fixes ---------- - -- Remove the extra parentheses around the venv prompt. `#4877 <https://github.com/pypa/pipenv/issues/4877>`_ -- Fix a bug of installation fails when extra index url is given. `#4881 <https://github.com/pypa/pipenv/issues/4881>`_ -- Fix regression where lockfiles would only include the hashes for releases for the platform generating the lockfile `#4885 <https://github.com/pypa/pipenv/issues/4885>`_ -- Fix the index parsing to reject illegal requirements.txt. `#4899 <https://github.com/pypa/pipenv/issues/4899>`_ - - -2021.11.23 (2021-11-23) -======================= - - -Bug Fixes ---------- - -- Update ``charset-normalizer`` from ``2.0.3`` to ``2.0.7``, this fixes an import error on Python 3.6. `#4865 <https://github.com/pypa/pipenv/issues/4865>`_ -- Fix a bug of deleting a virtualenv that is not managed by Pipenv. `#4867 <https://github.com/pypa/pipenv/issues/4867>`_ -- Fix a bug that source is not added to ``Pipfile`` when index url is given with ``pipenv install``. `#4873 <https://github.com/pypa/pipenv/issues/4873>`_ - - -2021.11.15 (2021-11-15) -======================= - - -Bug Fixes ---------- - -- Return an empty dict when ``PIPENV_DONT_LOAD_ENV`` is set. `#4851 <https://github.com/pypa/pipenv/issues/4851>`_ -- Don't use ``sys.executable`` when inside an activated venv. `#4852 <https://github.com/pypa/pipenv/issues/4852>`_ - -Vendored Libraries ------------------- - -- Drop the vendored ``jinja2`` dependency as it is not needed any more. `#4858 <https://github.com/pypa/pipenv/issues/4858>`_ -- Update ``click`` from ``8.0.1`` to ``8.0.3``, to fix a problem with bash completion. `#4860 <https://github.com/pypa/pipenv/issues/4860>`_ -- Drop unused vendor ``chardet``. `#4862 <https://github.com/pypa/pipenv/issues/4862>`_ - -Improved Documentation ----------------------- - -- Fix the documentation to reflect the fact that special characters must be percent-encoded in the URL. `#4856 <https://github.com/pypa/pipenv/issues/4856>`_ - - -2021.11.9 (2021-11-09) -====================== - - -Features & Improvements ------------------------ - -- Replace ``click-completion`` with ``click``'s own completion implementation. `#4786 <https://github.com/pypa/pipenv/issues/4786>`_ - -Bug Fixes ---------- - -- Fix a bug that ``pipenv run`` doesn't set environment variables correctly. `#4831 <https://github.com/pypa/pipenv/issues/4831>`_ -- Fix a bug that certifi can't be loaded within ``notpip``'s vendor library. This makes several objects of ``pip`` fail to be imported. `#4833 <https://github.com/pypa/pipenv/issues/4833>`_ -- Fix a bug that ``3.10.0`` can be found be python finder. `#4837 <https://github.com/pypa/pipenv/issues/4837>`_ - -Vendored Libraries ------------------- - -- Update ``pythonfinder`` from ``1.2.8`` to ``1.2.9``. `#4837 <https://github.com/pypa/pipenv/issues/4837>`_ - - -2021.11.5.post0 (2021-11-05) -============================ - - -Bug Fixes ---------- - -- Fix a regression that ``pipenv shell`` fails to start a subshell. `#4828 <https://github.com/pypa/pipenv/issues/4828>`_ -- Fix a regression that ``pip_shims`` object isn't imported correctly. `#4829 <https://github.com/pypa/pipenv/issues/4829>`_ - - -2021.11.5 (2021-11-05) -====================== - - -Features & Improvements ------------------------ - -- Avoid sharing states but create project objects on demand. So that most integration test cases are able to switch to a in-process execution method. `#4757 <https://github.com/pypa/pipenv/issues/4757>`_ -- Shell-quote ``pip`` commands when logging. `#4760 <https://github.com/pypa/pipenv/issues/4760>`_ - -Bug Fixes ---------- - -- Ignore empty .venv in rood dir and create project name base virtual environment `#4790 <https://github.com/pypa/pipenv/issues/4790>`_ - -Vendored Libraries ------------------- - -- Update vendored dependencies - - ``attrs`` from ``20.3.0`` to ``21.2.0`` - - ``cerberus`` from ``1.3.2`` to ``1.3.4`` - - ``certifi`` from ``2020.11.8`` to ``2021.5.30`` - - ``chardet`` from ``3.0.4`` to ``4.0.0`` - - ``click`` from ``7.1.2`` to ``8.0.1`` - - ``distlib`` from ``0.3.1`` to ``0.3.2`` - - ``idna`` from ``2.10`` to ``3.2`` - - ``importlib-metadata`` from ``2.0.0`` to ``4.6.1`` - - ``importlib-resources`` from ``3.3.0`` to ``5.2.0`` - - ``jinja2`` from ``2.11.2`` to ``3.0.1`` - - ``markupsafe`` from ``1.1.1`` to ``2.0.1`` - - ``more-itertools`` from ``5.0.0`` to ``8.8.0`` - - ``packaging`` from ``20.8`` to ``21.0`` - - ``pep517`` from ``0.9.1`` to ``0.11.0`` - - ``pipdeptree`` from ``1.0.0`` to ``2.0.0`` - - ``ptyprocess`` from ``0.6.0`` to ``0.7.0`` - - ``python-dateutil`` from ``2.8.1`` to ``2.8.2`` - - ``python-dotenv`` from ``0.15.0`` to ``0.19.0`` - - ``pythonfinder`` from ``1.2.5`` to ``1.2.8`` - - ``requests`` from ``2.25.0`` to ``2.26.0`` - - ``shellingham`` from ``1.3.2`` to ``1.4.0`` - - ``six`` from ``1.15.0`` to ``1.16.0`` - - ``tomlkit`` from ``0.7.0`` to ``0.7.2`` - - ``urllib3`` from ``1.26.1`` to ``1.26.6`` - - ``zipp`` from ``1.2.0`` to ``3.5.0`` - - Add new vendored dependencies - - ``charset-normalizer 2.0.3`` - - ``termcolor 1.1.0`` - - ``tomli 1.1.0`` - - ``wheel 0.36.2`` `#4747 <https://github.com/pypa/pipenv/issues/4747>`_ -- Drop the dependencies for Python 2.7 compatibility purpose. `#4751 <https://github.com/pypa/pipenv/issues/4751>`_ -- Switch the dependency resolver from ``pip-tools`` to ``pip``. - - Update vendor libraries: - - Update ``requirementslib`` from ``1.5.16`` to ``1.6.1`` - - Update ``pip-shims`` from ``0.5.6`` to ``0.6.0`` - - New vendor ``platformdirs 2.4.0`` `#4759 <https://github.com/pypa/pipenv/issues/4759>`_ - -Improved Documentation ----------------------- - -- remove prefixes on install commands for easy copy/pasting `#4792 <https://github.com/pypa/pipenv/issues/4792>`_ -- Officially drop support for Python 2.7 and Python 3.5. `#4261 <https://github.com/pypa/pipenv/issues/4261>`_ - - -2021.5.29 (2021-05-29) -====================== - -Bug Fixes ---------- - -- Fix a bug where passing --skip-lock when PIPFILE has no [SOURCE] section throws the error: "tomlkit.exceptions.NonExistentKey: 'Key "source" does not exist.'" `#4141 <https://github.com/pypa/pipenv/issues/4141>`_ -- Fix bug where environment wouldn't activate in paths containing & and $ symbols `#4538 <https://github.com/pypa/pipenv/issues/4538>`_ -- Fix a bug that ``importlib-metadata`` from the project's dependencies conflicts with that from ``pipenv``'s. `#4549 <https://github.com/pypa/pipenv/issues/4549>`_ -- Fix a bug where ``pep508checker.py`` did not expect double-digit Python minor versions (e.g. "3.10"). `#4602 <https://github.com/pypa/pipenv/issues/4602>`_ -- Fix bug where environment wouldn't activate in paths containing () and [] symbols `#4615 <https://github.com/pypa/pipenv/issues/4615>`_ -- Fix bug preventing use of pipenv lock --pre `#4642 <https://github.com/pypa/pipenv/issues/4642>`_ - -Vendored Libraries ------------------- - -- Update ``packaging`` from ``20.4`` to ``20.8``. `#4591 <https://github.com/pypa/pipenv/issues/4591>`_ - - -2020.11.15 (2020-11-15) -======================= - -Features & Improvements ------------------------ - -- Support expanding environment variables in requirement URLs. `#3516 <https://github.com/pypa/pipenv/issues/3516>`_ -- Show warning message when a dependency is skipped in locking due to the mismatch of its markers. `#4346 <https://github.com/pypa/pipenv/issues/4346>`_ - -Bug Fixes ---------- - -- Fix a bug that executable scripts with leading backslash can't be executed via ``pipenv run``. `#4368 <https://github.com/pypa/pipenv/issues/4368>`_ -- Fix a bug that VCS dependencies always satisfy even if the ref has changed. `#4387 <https://github.com/pypa/pipenv/issues/4387>`_ -- Restrict the acceptable hash type to SHA256 only. `#4517 <https://github.com/pypa/pipenv/issues/4517>`_ -- Fix the output of ``pipenv scripts`` under Windows platform. `#4523 <https://github.com/pypa/pipenv/issues/4523>`_ -- Fix a bug that the resolver takes wrong section to validate constraints. `#4527 <https://github.com/pypa/pipenv/issues/4527>`_ - -Vendored Libraries ------------------- - -- Update vendored dependencies: - - ``colorama`` from ``0.4.3`` to ``0.4.4`` - - ``python-dotenv`` from ``0.10.3`` to ``0.15.0`` - - ``first`` from ``2.0.1`` to ``2.0.2`` - - ``iso8601`` from ``0.1.12`` to ``0.1.13`` - - ``parse`` from ``1.15.0`` to ``1.18.0`` - - ``pipdeptree`` from ``0.13.2`` to ``1.0.0`` - - ``requests`` from ``2.23.0`` to ``2.25.0`` - - ``idna`` from ``2.9`` to ``2.10`` - - ``urllib3`` from ``1.25.9`` to ``1.26.1`` - - ``certifi`` from ``2020.4.5.1`` to ``2020.11.8`` - - ``requirementslib`` from ``1.5.15`` to ``1.5.16`` - - ``attrs`` from ``19.3.0`` to ``20.3.0`` - - ``distlib`` from ``0.3.0`` to ``0.3.1`` - - ``packaging`` from ``20.3`` to ``20.4`` - - ``six`` from ``1.14.0`` to ``1.15.0`` - - ``semver`` from ``2.9.0`` to ``2.13.0`` - - ``toml`` from ``0.10.1`` to ``0.10.2`` - - ``cached-property`` from ``1.5.1`` to ``1.5.2`` - - ``yaspin`` from ``0.14.3`` to ``1.2.0`` - - ``resolvelib`` from ``0.3.0`` to ``0.5.2`` - - ``pep517`` from ``0.8.2`` to ``0.9.1`` - - ``zipp`` from ``0.6.0`` to ``1.2.0`` - - ``importlib-metadata`` from ``1.6.0`` to ``2.0.0`` - - ``importlib-resources`` from ``1.5.0`` to ``3.3.0`` `#4533 <https://github.com/pypa/pipenv/issues/4533>`_ - -Improved Documentation ----------------------- - -- Fix suggested pyenv setup to avoid using shimmed interpreter `#4534 <https://github.com/pypa/pipenv/issues/4534>`_ - - -2020.11.4 (2020-11-04) -====================== - -Features & Improvements ------------------------ - -- Add a new command ``pipenv scripts`` to display shortcuts from Pipfile. `#3686 <https://github.com/pypa/pipenv/issues/3686>`_ -- Retrieve package file hash from URL to accelerate the locking process. `#3827 <https://github.com/pypa/pipenv/issues/3827>`_ -- Add the missing ``--system`` option to ``pipenv sync``. `#4441 <https://github.com/pypa/pipenv/issues/4441>`_ -- Add a new option pair ``--header/--no-header`` to ``pipenv lock`` command, - which adds a header to the generated requirements.txt `#4443 <https://github.com/pypa/pipenv/issues/4443>`_ - -Bug Fixes ---------- - -- Fix a bug that percent encoded characters will be unquoted incorrectly in the file URL. `#4089 <https://github.com/pypa/pipenv/issues/4089>`_ -- Fix a bug where setting PIPENV_PYTHON to file path breaks environment name `#4225 <https://github.com/pypa/pipenv/issues/4225>`_ -- Fix a bug that paths are not normalized before comparison. `#4330 <https://github.com/pypa/pipenv/issues/4330>`_ -- Handle Python major and minor versions correctly in Pipfile creation. `#4379 <https://github.com/pypa/pipenv/issues/4379>`_ -- Fix a bug that non-wheel file requirements can be resolved successfully. `#4386 <https://github.com/pypa/pipenv/issues/4386>`_ -- Fix a bug that ``pexept.exceptions.TIMEOUT`` is not caught correctly because of the wrong import path. `#4424 <https://github.com/pypa/pipenv/issues/4424>`_ -- Fix a bug that compound TOML table is not parsed correctly. `#4433 <https://github.com/pypa/pipenv/issues/4433>`_ -- Fix a bug that invalid Python paths from Windows registry break ``pipenv install``. `#4436 <https://github.com/pypa/pipenv/issues/4436>`_ -- Fix a bug that function calls in ``setup.py`` can't be parsed rightly. `#4446 <https://github.com/pypa/pipenv/issues/4446>`_ -- Fix a bug that dist-info inside ``venv`` directory will be mistaken as the editable package's metadata. `#4480 <https://github.com/pypa/pipenv/issues/4480>`_ -- Make the order of hashes in resolution result stable. `#4513 <https://github.com/pypa/pipenv/issues/4513>`_ - -Vendored Libraries ------------------- - -- Update ``tomlkit`` from ``0.5.11`` to ``0.7.0``. `#4433 <https://github.com/pypa/pipenv/issues/4433>`_ -- Update ``requirementslib`` from ``1.5.13`` to ``1.5.14``. `#4480 <https://github.com/pypa/pipenv/issues/4480>`_ - -Improved Documentation ----------------------- - -- Discourage homebrew installation in installation guides. `#4013 <https://github.com/pypa/pipenv/issues/4013>`_ - - -2020.8.13 (2020-08-13) -====================== - -Bug Fixes ---------- - -- Fixed behaviour of ``pipenv uninstall --all-dev``. - From now on it does not uninstall regular packages. `#3722 <https://github.com/pypa/pipenv/issues/3722>`_ -- Fix a bug that incorrect Python path will be used when ``--system`` flag is on. `#4315 <https://github.com/pypa/pipenv/issues/4315>`_ -- Fix falsely flagging a Homebrew installed Python as a virtual environment `#4316 <https://github.com/pypa/pipenv/issues/4316>`_ -- Fix a bug that ``pipenv uninstall`` throws an exception that does not exist. `#4321 <https://github.com/pypa/pipenv/issues/4321>`_ -- Fix a bug that Pipenv can't locate the correct file of special directives in ``setup.cfg`` of an editable package. `#4335 <https://github.com/pypa/pipenv/issues/4335>`_ -- Fix a bug that ``setup.py`` can't be parsed correctly when the assignment is type-annotated. `#4342 <https://github.com/pypa/pipenv/issues/4342>`_ -- Fix a bug that ``pipenv graph`` throws an exception that PipenvCmdError(cmd_string, c.out, c.err, return_code). `#4388 <https://github.com/pypa/pipenv/issues/4388>`_ -- Do not copy the whole directory tree of local file package. `#4403 <https://github.com/pypa/pipenv/issues/4403>`_ -- Correctly detect whether Pipenv in run under an activated virtualenv. `#4412 <https://github.com/pypa/pipenv/issues/4412>`_ - -Vendored Libraries ------------------- - -- Update ``requirementslib`` to ``1.5.12``. `#4385 <https://github.com/pypa/pipenv/issues/4385>`_ -- * Update ``requirements`` to ``1.5.13``. - * Update ``pip-shims`` to ``0.5.3``. `#4421 <https://github.com/pypa/pipenv/issues/4421>`_ - - -2020.6.2 (2020-06-02) -===================== - -Features & Improvements ------------------------ - -- Pipenv will now detect existing ``venv`` and ``virtualenv`` based virtual environments more robustly. `#4276 <https://github.com/pypa/pipenv/issues/4276>`_ - -Bug Fixes ---------- - -- ``+`` signs in URL authentication fragments will no longer be incorrectly replaced with space ( `` `` ) characters. `#4271 <https://github.com/pypa/pipenv/issues/4271>`_ -- Fixed a regression which caused Pipenv to fail when running under ``/``. `#4273 <https://github.com/pypa/pipenv/issues/4273>`_ -- ``setup.py`` files with ``version`` variables read from ``os.environ`` are now able to be parsed successfully. `#4274 <https://github.com/pypa/pipenv/issues/4274>`_ -- Fixed a bug which caused Pipenv to fail to install packages in a virtual environment if those packages were already present in the system global environment. `#4276 <https://github.com/pypa/pipenv/issues/4276>`_ -- Fix a bug that caused non-specific versions to be pinned in ``Pipfile.lock``. `#4278 <https://github.com/pypa/pipenv/issues/4278>`_ -- Corrected a missing exception import and invalid function call invocations in ``pipenv.cli.command``. `#4286 <https://github.com/pypa/pipenv/issues/4286>`_ -- Fixed an issue with resolving packages with names defined by function calls in ``setup.py``. `#4292 <https://github.com/pypa/pipenv/issues/4292>`_ -- Fixed a regression with installing the current directory, or ``.``, inside a ``venv`` based virtual environment. `#4295 <https://github.com/pypa/pipenv/issues/4295>`_ -- Fixed a bug with the discovery of python paths on Windows which could prevent installation of environments during ``pipenv install``. `#4296 <https://github.com/pypa/pipenv/issues/4296>`_ -- Fixed an issue in the ``requirementslib`` AST parser which prevented parsing of ``setup.py`` files for dependency metadata. `#4298 <https://github.com/pypa/pipenv/issues/4298>`_ -- Fix a bug where Pipenv doesn't realize the session is interactive `#4305 <https://github.com/pypa/pipenv/issues/4305>`_ - -Vendored Libraries ------------------- - -- Updated requirementslib to version ``1.5.11``. `#4292 <https://github.com/pypa/pipenv/issues/4292>`_ -- Updated vendored dependencies: - - **pythonfinder**: ``1.2.2`` => ``1.2.4`` - - **requirementslib**: ``1.5.9`` => ``1.5.10`` `#4302 <https://github.com/pypa/pipenv/issues/4302>`_ - - -2020.5.28 (2020-05-28) -====================== - -Features & Improvements ------------------------ - -- ``pipenv install`` and ``pipenv sync`` will no longer attempt to install satisfied dependencies during installation. `#3057 <https://github.com/pypa/pipenv/issues/3057>`_, - `#3506 <https://github.com/pypa/pipenv/issues/3506>`_ -- Added support for resolution of direct-url dependencies in ``setup.py`` files to respect ``PEP-508`` style URL dependencies. `#3148 <https://github.com/pypa/pipenv/issues/3148>`_ -- Added full support for resolution of all dependency types including direct URLs, zip archives, tarballs, etc. - - - Improved error handling and formatting. - - - Introduced improved cross platform stream wrappers for better ``stdout`` and ``stderr`` consistency. `#3298 <https://github.com/pypa/pipenv/issues/3298>`_ -- For consistency with other commands and the ``--dev`` option - description, ``pipenv lock --requirements --dev`` now emits - both default and development dependencies. - The new ``--dev-only`` option requests the previous - behaviour (e.g. to generate a ``dev-requirements.txt`` file). `#3316 <https://github.com/pypa/pipenv/issues/3316>`_ -- Pipenv will now successfully recursively lock VCS sub-dependencies. `#3328 <https://github.com/pypa/pipenv/issues/3328>`_ -- Added support for ``--verbose`` output to ``pipenv run``. `#3348 <https://github.com/pypa/pipenv/issues/3348>`_ -- Pipenv will now discover and resolve the intrinsic dependencies of **all** VCS dependencies, whether they are editable or not, to prevent resolution conflicts. `#3368 <https://github.com/pypa/pipenv/issues/3368>`_ -- Added a new environment variable, ``PIPENV_RESOLVE_VCS``, to toggle dependency resolution off for non-editable VCS, file, and URL based dependencies. `#3577 <https://github.com/pypa/pipenv/issues/3577>`_ -- Added the ability for Windows users to enable emojis by setting ``PIPENV_HIDE_EMOJIS=0``. `#3595 <https://github.com/pypa/pipenv/issues/3595>`_ -- Allow overriding PIPENV_INSTALL_TIMEOUT environment variable (in seconds). `#3652 <https://github.com/pypa/pipenv/issues/3652>`_ -- Allow overriding PIP_EXISTS_ACTION environment variable (value is passed to pip install). - Possible values here: https://pip.pypa.io/en/stable/reference/pip/#exists-action-option - Useful when you need to ``PIP_EXISTS_ACTION=i`` (ignore existing packages) - great for CI environments, where you need really fast setup. `#3738 <https://github.com/pypa/pipenv/issues/3738>`_ -- Pipenv will no longer forcibly override ``PIP_NO_DEPS`` on all vcs and file dependencies as resolution happens on these in a pre-lock step. `#3763 <https://github.com/pypa/pipenv/issues/3763>`_ -- Improved verbose logging output during ``pipenv lock`` will now stream output to the console while maintaining a spinner. `#3810 <https://github.com/pypa/pipenv/issues/3810>`_ -- Added support for automatic python installs via ``asdf`` and associated ``PIPENV_DONT_USE_ASDF`` environment variable. `#4018 <https://github.com/pypa/pipenv/issues/4018>`_ -- Pyenv/asdf can now be used whether or not they are available on PATH. Setting PYENV_ROOT/ASDF_DIR in a Pipenv's .env allows Pipenv to install an interpreter without any shell customizations, so long as pyenv/asdf is installed. `#4245 <https://github.com/pypa/pipenv/issues/4245>`_ -- Added ``--key`` command line parameter for including personal PyUp.io API tokens when running ``pipenv check``. `#4257 <https://github.com/pypa/pipenv/issues/4257>`_ - -Behavior Changes ----------------- - -- Make conservative checks of known exceptions when subprocess returns output, so user won't see the whole traceback - just the error. `#2553 <https://github.com/pypa/pipenv/issues/2553>`_ -- Do not touch Pipfile early and rely on it so that one can do ``pipenv sync`` without a Pipfile. `#3386 <https://github.com/pypa/pipenv/issues/3386>`_ -- Re-enable ``--help`` option for ``pipenv run`` command. `#3844 <https://github.com/pypa/pipenv/issues/3844>`_ -- Make sure ``pipenv lock -r --pypi-mirror {MIRROR_URL}`` will respect the pypi-mirror in requirements output. `#4199 <https://github.com/pypa/pipenv/issues/4199>`_ - -Bug Fixes ---------- - -- Raise ``PipenvUsageError`` when [[source]] does not contain url field. `#2373 <https://github.com/pypa/pipenv/issues/2373>`_ -- Fixed a bug which caused editable package resolution to sometimes fail with an unhelpful setuptools-related error message. `#2722 <https://github.com/pypa/pipenv/issues/2722>`_ -- Fixed an issue which caused errors due to reliance on the system utilities ``which`` and ``where`` which may not always exist on some systems. - - Fixed a bug which caused periodic failures in python discovery when executables named ``python`` were not present on the target ``$PATH``. `#2783 <https://github.com/pypa/pipenv/issues/2783>`_ -- Dependency resolution now writes hashes for local and remote files to the lockfile. `#3053 <https://github.com/pypa/pipenv/issues/3053>`_ -- Fixed a bug which prevented ``pipenv graph`` from correctly showing all dependencies when running from within ``pipenv shell``. `#3071 <https://github.com/pypa/pipenv/issues/3071>`_ -- Fixed resolution of direct-url dependencies in ``setup.py`` files to respect ``PEP-508`` style URL dependencies. `#3148 <https://github.com/pypa/pipenv/issues/3148>`_ -- Fixed a bug which caused failures in warning reporting when running pipenv inside a virtualenv under some circumstances. - - - Fixed a bug with package discovery when running ``pipenv clean``. `#3298 <https://github.com/pypa/pipenv/issues/3298>`_ -- Quote command arguments with carets (``^``) on Windows to work around unintended shell escapes. `#3307 <https://github.com/pypa/pipenv/issues/3307>`_ -- Handle alternate names for UTF-8 encoding. `#3313 <https://github.com/pypa/pipenv/issues/3313>`_ -- Abort pipenv before adding the non-exist package to Pipfile. `#3318 <https://github.com/pypa/pipenv/issues/3318>`_ -- Don't normalize the package name user passes in. `#3324 <https://github.com/pypa/pipenv/issues/3324>`_ -- Fix a bug where custom virtualenv can not be activated with pipenv shell `#3339 <https://github.com/pypa/pipenv/issues/3339>`_ -- Fix a bug that ``--site-packages`` flag is not recognized. `#3351 <https://github.com/pypa/pipenv/issues/3351>`_ -- Fix a bug where pipenv --clear is not working `#3353 <https://github.com/pypa/pipenv/issues/3353>`_ -- Fix unhashable type error during ``$ pipenv install --selective-upgrade`` `#3384 <https://github.com/pypa/pipenv/issues/3384>`_ -- Dependencies with direct ``PEP508`` compliant VCS URLs specified in their ``install_requires`` will now be successfully locked during the resolution process. `#3396 <https://github.com/pypa/pipenv/issues/3396>`_ -- Fixed a keyerror which could occur when locking VCS dependencies in some cases. `#3404 <https://github.com/pypa/pipenv/issues/3404>`_ -- Fixed a bug that ``ValidationError`` is thrown when some fields are missing in source section. `#3427 <https://github.com/pypa/pipenv/issues/3427>`_ -- Updated the index names in lock file when source name in Pipfile is changed. `#3449 <https://github.com/pypa/pipenv/issues/3449>`_ -- Fixed an issue which caused ``pipenv install --help`` to show duplicate entries for ``--pre``. `#3479 <https://github.com/pypa/pipenv/issues/3479>`_ -- Fix bug causing ``[SSL: CERTIFICATE_VERIFY_FAILED]`` when Pipfile ``[[source]]`` has verify_ssl=false and url with custom port. `#3502 <https://github.com/pypa/pipenv/issues/3502>`_ -- Fix ``sync --sequential`` ignoring ``pip install`` errors and logs. `#3537 <https://github.com/pypa/pipenv/issues/3537>`_ -- Fix the issue that lock file can't be created when ``PIPENV_PIPFILE`` is not under working directory. `#3584 <https://github.com/pypa/pipenv/issues/3584>`_ -- Pipenv will no longer inadvertently set ``editable=True`` on all vcs dependencies. `#3647 <https://github.com/pypa/pipenv/issues/3647>`_ -- The ``--keep-outdated`` argument to ``pipenv install`` and ``pipenv lock`` will now drop specifier constraints when encountering editable dependencies. - - In addition, ``--keep-outdated`` will retain specifiers that would otherwise be dropped from any entries that have not been updated. `#3656 <https://github.com/pypa/pipenv/issues/3656>`_ -- Fixed a bug which sometimes caused pipenv to fail to respect the ``--site-packages`` flag when passed with ``pipenv install``. `#3718 <https://github.com/pypa/pipenv/issues/3718>`_ -- Normalize the package names to lowercase when comparing used and in-Pipfile packages. `#3745 <https://github.com/pypa/pipenv/issues/3745>`_ -- ``pipenv update --outdated`` will now correctly handle comparisons between pre/post-releases and normal releases. `#3766 <https://github.com/pypa/pipenv/issues/3766>`_ -- Fixed a ``KeyError`` which could occur when pinning outdated VCS dependencies via ``pipenv lock --keep-outdated``. `#3768 <https://github.com/pypa/pipenv/issues/3768>`_ -- Resolved an issue which caused resolution to fail when encountering poorly formatted ``python_version`` markers in ``setup.py`` and ``setup.cfg`` files. `#3786 <https://github.com/pypa/pipenv/issues/3786>`_ -- Fix a bug that installation errors are displayed as a list. `#3794 <https://github.com/pypa/pipenv/issues/3794>`_ -- Update ``pythonfinder`` to fix a problem that ``python.exe`` will be mistakenly chosen for - virtualenv creation under WSL. `#3807 <https://github.com/pypa/pipenv/issues/3807>`_ -- Fixed several bugs which could prevent editable VCS dependencies from being installed into target environments, even when reporting successful installation. `#3809 <https://github.com/pypa/pipenv/issues/3809>`_ -- ``pipenv check --system`` should find the correct Python interpreter when ``python`` does not exist on the system. `#3819 <https://github.com/pypa/pipenv/issues/3819>`_ -- Resolve the symlinks when the path is absolute. `#3842 <https://github.com/pypa/pipenv/issues/3842>`_ -- Pass ``--pre`` and ``--clear`` options to ``pipenv update --outdated``. `#3879 <https://github.com/pypa/pipenv/issues/3879>`_ -- Fixed a bug which prevented resolution of direct URL dependencies which have PEP508 style direct url VCS sub-dependencies with subdirectories. `#3976 <https://github.com/pypa/pipenv/issues/3976>`_ -- Honor PIPENV_SPINNER environment variable `#4045 <https://github.com/pypa/pipenv/issues/4045>`_ -- Fixed an issue with ``pipenv check`` failing due to an invalid API key from ``pyup.io``. `#4188 <https://github.com/pypa/pipenv/issues/4188>`_ -- Fixed a bug which caused versions from VCS dependencies to be included in ``Pipfile.lock`` inadvertently. `#4217 <https://github.com/pypa/pipenv/issues/4217>`_ -- Fixed a bug which caused pipenv to search non-existent virtual environments for ``pip`` when installing using ``--system``. `#4220 <https://github.com/pypa/pipenv/issues/4220>`_ -- ``Requires-Python`` values specifying constraint versions of python starting from ``1.x`` will now be parsed successfully. `#4226 <https://github.com/pypa/pipenv/issues/4226>`_ -- Fix a bug of ``pipenv update --outdated`` that can't print output correctly. `#4229 <https://github.com/pypa/pipenv/issues/4229>`_ -- Fixed a bug which caused pipenv to prefer source distributions over wheels from ``PyPI`` during the dependency resolution phase. - Fixed an issue which prevented proper build isolation using ``pep517`` based builders during dependency resolution. `#4231 <https://github.com/pypa/pipenv/issues/4231>`_ -- Don't fallback to system Python when no matching Python version is found. `#4232 <https://github.com/pypa/pipenv/issues/4232>`_ - -Vendored Libraries ------------------- - -- Updated vendored dependencies: - - - **attrs**: ``18.2.0`` => ``19.1.0`` - - **certifi**: ``2018.10.15`` => ``2019.3.9`` - - **cached_property**: ``1.4.3`` => ``1.5.1`` - - **cerberus**: ``1.2.0`` => ``1.3.1`` - - **click-completion**: ``0.5.0`` => ``0.5.1`` - - **colorama**: ``0.3.9`` => ``0.4.1`` - - **distlib**: ``0.2.8`` => ``0.2.9`` - - **idna**: ``2.7`` => ``2.8`` - - **jinja2**: ``2.10.0`` => ``2.10.1`` - - **markupsafe**: ``1.0`` => ``1.1.1`` - - **orderedmultidict**: ``(new)`` => ``1.0`` - - **packaging**: ``18.0`` => ``19.0`` - - **parse**: ``1.9.0`` => ``1.12.0`` - - **pathlib2**: ``2.3.2`` => ``2.3.3`` - - **pep517**: ``(new)`` => ``0.5.0`` - - **pexpect**: ``4.6.0`` => ``4.7.0`` - - **pipdeptree**: ``0.13.0`` => ``0.13.2`` - - **pyparsing**: ``2.2.2`` => ``2.3.1`` - - **python-dotenv**: ``0.9.1`` => ``0.10.2`` - - **pythonfinder**: ``1.1.10`` => ``1.2.1`` - - **pytoml**: ``(new)`` => ``0.1.20`` - - **requests**: ``2.20.1`` => ``2.21.0`` - - **requirementslib**: ``1.3.3`` => ``1.5.0`` - - **scandir**: ``1.9.0`` => ``1.10.0`` - - **shellingham**: ``1.2.7`` => ``1.3.1`` - - **six**: ``1.11.0`` => ``1.12.0`` - - **tomlkit**: ``0.5.2`` => ``0.5.3`` - - **urllib3**: ``1.24`` => ``1.25.2`` - - **vistir**: ``0.3.0`` => ``0.4.1`` - - **yaspin**: ``0.14.0`` => ``0.14.3`` - - - Removed vendored dependency **cursor**. `#3298 <https://github.com/pypa/pipenv/issues/3298>`_ -- Updated ``pip_shims`` to support ``--outdated`` with new pip versions. `#3766 <https://github.com/pypa/pipenv/issues/3766>`_ -- Update vendored dependencies and invocations - - - Update vendored and patched dependencies - - Update patches on ``piptools``, ``pip``, ``pip-shims``, ``tomlkit`` - - Fix invocations of dependencies - - Fix custom ``InstallCommand`` instantiation - - Update ``PackageFinder`` usage - - Fix ``Bool`` stringify attempts from ``tomlkit`` - - Updated vendored dependencies: - - **attrs**: ```18.2.0`` => ```19.1.0`` - - **certifi**: ```2018.10.15`` => ```2019.3.9`` - - **cached_property**: ```1.4.3`` => ```1.5.1`` - - **cerberus**: ```1.2.0`` => ```1.3.1`` - - **click**: ```7.0.0`` => ```7.1.1`` - - **click-completion**: ```0.5.0`` => ```0.5.1`` - - **colorama**: ```0.3.9`` => ```0.4.3`` - - **contextlib2**: ```(new)`` => ```0.6.0.post1`` - - **distlib**: ```0.2.8`` => ```0.2.9`` - - **funcsigs**: ```(new)`` => ```1.0.2`` - - **importlib_metadata** ```1.3.0`` => ```1.5.1`` - - **importlib-resources**: ```(new)`` => ```1.4.0`` - - **idna**: ```2.7`` => ```2.9`` - - **jinja2**: ```2.10.0`` => ```2.11.1`` - - **markupsafe**: ```1.0`` => ```1.1.1`` - - **more-itertools**: ```(new)`` => ```5.0.0`` - - **orderedmultidict**: ```(new)`` => ```1.0`` - - **packaging**: ```18.0`` => ```19.0`` - - **parse**: ```1.9.0`` => ```1.15.0`` - - **pathlib2**: ```2.3.2`` => ```2.3.3`` - - **pep517**: ```(new)`` => ```0.5.0`` - - **pexpect**: ```4.6.0`` => ```4.8.0`` - - **pip-shims**: ```0.2.0`` => ```0.5.1`` - - **pipdeptree**: ```0.13.0`` => ```0.13.2`` - - **pyparsing**: ```2.2.2`` => ```2.4.6`` - - **python-dotenv**: ```0.9.1`` => ```0.10.2`` - - **pythonfinder**: ```1.1.10`` => ```1.2.2`` - - **pytoml**: ```(new)`` => ```0.1.20`` - - **requests**: ```2.20.1`` => ```2.23.0`` - - **requirementslib**: ```1.3.3`` => ```1.5.4`` - - **scandir**: ```1.9.0`` => ```1.10.0`` - - **shellingham**: ```1.2.7`` => ```1.3.2`` - - **six**: ```1.11.0`` => ```1.14.0`` - - **tomlkit**: ```0.5.2`` => ```0.5.11`` - - **urllib3**: ```1.24`` => ```1.25.8`` - - **vistir**: ```0.3.0`` => ```0.5.0`` - - **yaspin**: ```0.14.0`` => ```0.14.3`` - - **zipp**: ```0.6.0`` - - - Removed vendored dependency **cursor**. `#4169 <https://github.com/pypa/pipenv/issues/4169>`_ -- Add and update vendored dependencies to accommodate ``safety`` vendoring: - - **safety** ``(none)`` => ``1.8.7`` - - **dparse** ``(none)`` => ``0.5.0`` - - **pyyaml** ``(none)`` => ``5.3.1`` - - **urllib3** ``1.25.8`` => ``1.25.9`` - - **certifi** ``2019.11.28`` => ``2020.4.5.1`` - - **pyparsing** ``2.4.6`` => ``2.4.7`` - - **resolvelib** ``0.2.2`` => ``0.3.0`` - - **importlib-metadata** ``1.5.1`` => ``1.6.0`` - - **pip-shims** ``0.5.1`` => ``0.5.2`` - - **requirementslib** ``1.5.5`` => ``1.5.6`` `#4188 <https://github.com/pypa/pipenv/issues/4188>`_ -- Updated vendored ``pip`` => ``20.0.2`` and ``pip-tools`` => ``5.0.0``. `#4215 <https://github.com/pypa/pipenv/issues/4215>`_ -- Updated vendored dependencies to latest versions for security and bug fixes: - - - **requirementslib** ``1.5.8`` => ``1.5.9`` - - **vistir** ``0.5.0`` => ``0.5.1`` - - **jinja2** ``2.11.1`` => ``2.11.2`` - - **click** ``7.1.1`` => ``7.1.2`` - - **dateutil** ``(none)`` => ``2.8.1`` - - **backports.functools_lru_cache** ``1.5.0`` => ``1.6.1`` - - **enum34** ``1.1.6`` => ``1.1.10`` - - **toml** ``0.10.0`` => ``0.10.1`` - - **importlib_resources** ``1.4.0`` => ``1.5.0`` `#4226 <https://github.com/pypa/pipenv/issues/4226>`_ -- Changed attrs import path in vendored dependencies to always import from ``pipenv.vendor``. `#4267 <https://github.com/pypa/pipenv/issues/4267>`_ - -Improved Documentation ----------------------- - -- Added documentation about variable expansion in ``Pipfile`` entries. `#2317 <https://github.com/pypa/pipenv/issues/2317>`_ -- Consolidate all contributing docs in the rst file `#3120 <https://github.com/pypa/pipenv/issues/3120>`_ -- Update the out-dated manual page. `#3246 <https://github.com/pypa/pipenv/issues/3246>`_ -- Move CLI docs to its own page. `#3346 <https://github.com/pypa/pipenv/issues/3346>`_ -- Replace (non-existent) video on docs index.rst with equivalent gif. `#3499 <https://github.com/pypa/pipenv/issues/3499>`_ -- Clarify wording in Basic Usage example on using double quotes to escape shell redirection `#3522 <https://github.com/pypa/pipenv/issues/3522>`_ -- Ensure docs show navigation on small-screen devices `#3527 <https://github.com/pypa/pipenv/issues/3527>`_ -- Added a link to the TOML Spec under General Recommendations & Version Control to clarify how Pipfiles should be written. `#3629 <https://github.com/pypa/pipenv/issues/3629>`_ -- Updated the documentation with the new ``pytest`` entrypoint. `#3759 <https://github.com/pypa/pipenv/issues/3759>`_ -- Fix link to GIF in README.md demonstrating Pipenv's usage, and add descriptive alt text. `#3911 <https://github.com/pypa/pipenv/issues/3911>`_ -- Added a line describing potential issues in fancy extension. `#3912 <https://github.com/pypa/pipenv/issues/3912>`_ -- Documental description of how Pipfile works and association with Pipenv. `#3913 <https://github.com/pypa/pipenv/issues/3913>`_ -- Clarify the proper value of ``python_version`` and ``python_full_version``. `#3914 <https://github.com/pypa/pipenv/issues/3914>`_ -- Write description for --deploy extension and few extensions differences. `#3915 <https://github.com/pypa/pipenv/issues/3915>`_ -- More documentation for ``.env`` files `#4100 <https://github.com/pypa/pipenv/issues/4100>`_ -- Updated documentation to point to working links. `#4137 <https://github.com/pypa/pipenv/issues/4137>`_ -- Replace docs.pipenv.org with pipenv.pypa.io `#4167 <https://github.com/pypa/pipenv/issues/4167>`_ -- Added functionality to check spelling in documentation and cleaned up existing typographical issues. `#4209 <https://github.com/pypa/pipenv/issues/4209>`_ - - -2018.11.26 (2018-11-26) -======================= - -Bug Fixes ---------- - -- Environment variables are expanded correctly before running scripts on POSIX. `#3178 <https://github.com/pypa/pipenv/issues/3178>`_ -- Pipenv will no longer disable user-mode installation when the ``--system`` flag is passed in. `#3222 <https://github.com/pypa/pipenv/issues/3222>`_ -- Fixed an issue with attempting to render unicode output in non-unicode locales. `#3223 <https://github.com/pypa/pipenv/issues/3223>`_ -- Fixed a bug which could cause failures to occur when parsing python entries from global pyenv version files. `#3224 <https://github.com/pypa/pipenv/issues/3224>`_ -- Fixed an issue which prevented the parsing of named extras sections from certain ``setup.py`` files. `#3230 <https://github.com/pypa/pipenv/issues/3230>`_ -- Correctly detect the virtualenv location inside an activated virtualenv. `#3231 <https://github.com/pypa/pipenv/issues/3231>`_ -- Fixed a bug which caused spinner frames to be written to standard output during locking operations which could cause redirection pipes to fail. `#3239 <https://github.com/pypa/pipenv/issues/3239>`_ -- Fixed a bug that editable packages can't be uninstalled correctly. `#3240 <https://github.com/pypa/pipenv/issues/3240>`_ -- Corrected an issue with installation timeouts which caused dependency resolution to fail for longer duration resolution steps. `#3244 <https://github.com/pypa/pipenv/issues/3244>`_ -- Adding normal pep 508 compatible markers is now fully functional when using VCS dependencies. `#3249 <https://github.com/pypa/pipenv/issues/3249>`_ -- Updated ``requirementslib`` and ``pythonfinder`` for multiple bug fixes. `#3254 <https://github.com/pypa/pipenv/issues/3254>`_ -- Pipenv will now ignore hashes when installing with ``--skip-lock``. `#3255 <https://github.com/pypa/pipenv/issues/3255>`_ -- Fixed an issue where pipenv could crash when multiple pipenv processes attempted to create the same directory. `#3257 <https://github.com/pypa/pipenv/issues/3257>`_ -- Fixed an issue which sometimes prevented successful creation of a project Pipfile. `#3260 <https://github.com/pypa/pipenv/issues/3260>`_ -- ``pipenv install`` will now unset the ``PYTHONHOME`` environment variable when not combined with ``--system``. `#3261 <https://github.com/pypa/pipenv/issues/3261>`_ -- Pipenv will ensure that warnings do not interfere with the resolution process by suppressing warnings' usage of standard output and writing to standard error instead. `#3273 <https://github.com/pypa/pipenv/issues/3273>`_ -- Fixed an issue which prevented variables from the environment, such as ``PIPENV_DEV`` or ``PIPENV_SYSTEM``, from being parsed and implemented correctly. `#3278 <https://github.com/pypa/pipenv/issues/3278>`_ -- Clear pythonfinder cache after Python install. `#3287 <https://github.com/pypa/pipenv/issues/3287>`_ -- Fixed a race condition in hash resolution for dependencies for certain dependencies with missing cache entries or fresh Pipenv installs. `#3289 <https://github.com/pypa/pipenv/issues/3289>`_ -- Pipenv will now respect top-level pins over VCS dependency locks. `#3296 <https://github.com/pypa/pipenv/issues/3296>`_ - -Vendored Libraries ------------------- - -- Update vendored dependencies to resolve resolution output parsing and python finding: - - ``pythonfinder 1.1.9 -> 1.1.10`` - - ``requirementslib 1.3.1 -> 1.3.3`` - - ``vistir 0.2.3 -> 0.2.5`` `#3280 <https://github.com/pypa/pipenv/issues/3280>`_ - - -2018.11.14 (2018-11-14) -======================= - -Features & Improvements ------------------------ - -- Improved exceptions and error handling on failures. `#1977 <https://github.com/pypa/pipenv/issues/1977>`_ -- Added persistent settings for all CLI flags via ``PIPENV_{FLAG_NAME}`` environment variables by enabling ``auto_envvar_prefix=PIPENV`` in click (implements PEEP-0002). `#2200 <https://github.com/pypa/pipenv/issues/2200>`_ -- Added improved messaging about available but skipped updates due to dependency conflicts when running ``pipenv update --outdated``. `#2411 <https://github.com/pypa/pipenv/issues/2411>`_ -- Added environment variable ``PIPENV_PYUP_API_KEY`` to add ability - to override the bundled PyUP.io API key. `#2825 <https://github.com/pypa/pipenv/issues/2825>`_ -- Added additional output to ``pipenv update --outdated`` to indicate that the operation succeeded and all packages were already up to date. `#2828 <https://github.com/pypa/pipenv/issues/2828>`_ -- Updated ``crayons`` patch to enable colors on native powershell but swap native blue for magenta. `#3020 <https://github.com/pypa/pipenv/issues/3020>`_ -- Added support for ``--bare`` to ``pipenv clean``, and fixed ``pipenv sync --bare`` to actually reduce output. `#3041 <https://github.com/pypa/pipenv/issues/3041>`_ -- Added windows-compatible spinner via upgraded ``vistir`` dependency. `#3089 <https://github.com/pypa/pipenv/issues/3089>`_ -- - Added support for python installations managed by ``asdf``. `#3096 <https://github.com/pypa/pipenv/issues/3096>`_ -- Improved runtime performance of no-op commands such as ``pipenv --venv`` by around 2/3. `#3158 <https://github.com/pypa/pipenv/issues/3158>`_ -- Do not show error but success for running ``pipenv uninstall --all`` in a fresh virtual environment. `#3170 <https://github.com/pypa/pipenv/issues/3170>`_ -- Improved asynchronous installation and error handling via queued subprocess parallelization. `#3217 <https://github.com/pypa/pipenv/issues/3217>`_ - -Bug Fixes ---------- - -- Remote non-PyPI artifacts and local wheels and artifacts will now include their own hashes rather than including hashes from ``PyPI``. `#2394 <https://github.com/pypa/pipenv/issues/2394>`_ -- Non-ascii characters will now be handled correctly when parsed by pipenv's ``ToML`` parsers. `#2737 <https://github.com/pypa/pipenv/issues/2737>`_ -- Updated ``pipenv uninstall`` to respect the ``--skip-lock`` argument. `#2848 <https://github.com/pypa/pipenv/issues/2848>`_ -- Fixed a bug which caused uninstallation to sometimes fail to successfully remove packages from ``Pipfiles`` with comments on preceding or following lines. `#2885 <https://github.com/pypa/pipenv/issues/2885>`_, - `#3099 <https://github.com/pypa/pipenv/issues/3099>`_ -- Pipenv will no longer fail when encountering python versions on Windows that have been uninstalled. `#2983 <https://github.com/pypa/pipenv/issues/2983>`_ -- Fixed unnecessary extras are added when translating markers `#3026 <https://github.com/pypa/pipenv/issues/3026>`_ -- Fixed a virtualenv creation issue which could cause new virtualenvs to inadvertently attempt to read and write to global site packages. `#3047 <https://github.com/pypa/pipenv/issues/3047>`_ -- Fixed an issue with virtualenv path derivation which could cause errors, particularly for users on WSL bash. `#3055 <https://github.com/pypa/pipenv/issues/3055>`_ -- Fixed a bug which caused ``Unexpected EOF`` errors to be thrown when ``pip`` was waiting for input from users who had put login credentials in environment variables. `#3088 <https://github.com/pypa/pipenv/issues/3088>`_ -- Fixed a bug in ``requirementslib`` which prevented successful installation from mercurial repositories. `#3090 <https://github.com/pypa/pipenv/issues/3090>`_ -- Fixed random resource warnings when using pyenv or any other subprocess calls. `#3094 <https://github.com/pypa/pipenv/issues/3094>`_ -- - Fixed a bug which sometimes prevented cloning and parsing ``mercurial`` requirements. `#3096 <https://github.com/pypa/pipenv/issues/3096>`_ -- Fixed an issue in ``delegator.py`` related to subprocess calls when using ``PopenSpawn`` to stream output, which sometimes threw unexpected ``EOF`` errors. `#3102 <https://github.com/pypa/pipenv/issues/3102>`_, - `#3114 <https://github.com/pypa/pipenv/issues/3114>`_, - `#3117 <https://github.com/pypa/pipenv/issues/3117>`_ -- Fix the path casing issue that makes ``pipenv clean`` fail on Windows `#3104 <https://github.com/pypa/pipenv/issues/3104>`_ -- Pipenv will avoid leaving build artifacts in the current working directory. `#3106 <https://github.com/pypa/pipenv/issues/3106>`_ -- Fixed issues with broken subprocess calls leaking resource handles and causing random and sporadic failures. `#3109 <https://github.com/pypa/pipenv/issues/3109>`_ -- Fixed an issue which caused ``pipenv clean`` to sometimes clean packages from the base ``site-packages`` folder or fail entirely. `#3113 <https://github.com/pypa/pipenv/issues/3113>`_ -- Updated ``pythonfinder`` to correct an issue with unnesting of nested paths when searching for python versions. `#3121 <https://github.com/pypa/pipenv/issues/3121>`_ -- Added additional logic for ignoring and replacing non-ascii characters when formatting console output on non-UTF-8 systems. `#3131 <https://github.com/pypa/pipenv/issues/3131>`_ -- Fix virtual environment discovery when ``PIPENV_VENV_IN_PROJECT`` is set, but the in-project ``.venv`` is a file. `#3134 <https://github.com/pypa/pipenv/issues/3134>`_ -- Hashes for remote and local non-PyPI artifacts will now be included in ``Pipfile.lock`` during resolution. `#3145 <https://github.com/pypa/pipenv/issues/3145>`_ -- Fix project path hashing logic in purpose to prevent collisions of virtual environments. `#3151 <https://github.com/pypa/pipenv/issues/3151>`_ -- Fix package installation when the virtual environment path contains parentheses. `#3158 <https://github.com/pypa/pipenv/issues/3158>`_ -- Azure Pipelines YAML files are updated to use the latest syntax and product name. `#3164 <https://github.com/pypa/pipenv/issues/3164>`_ -- Fixed new spinner success message to write only one success message during resolution. `#3183 <https://github.com/pypa/pipenv/issues/3183>`_ -- Pipenv will now correctly respect the ``--pre`` option when used with ``pipenv install``. `#3185 <https://github.com/pypa/pipenv/issues/3185>`_ -- Fix a bug where exception is raised when run pipenv graph in a project without created virtualenv `#3201 <https://github.com/pypa/pipenv/issues/3201>`_ -- When sources are missing names, names will now be derived from the supplied URL. `#3216 <https://github.com/pypa/pipenv/issues/3216>`_ - -Vendored Libraries ------------------- - -- Updated ``pythonfinder`` to correct an issue with unnesting of nested paths when searching for python versions. `#3061 <https://github.com/pypa/pipenv/issues/3061>`_, - `#3121 <https://github.com/pypa/pipenv/issues/3121>`_ -- Updated vendored dependencies: - - ``certifi 2018.08.24 => 2018.10.15`` - - ``urllib3 1.23 => 1.24`` - - ``requests 2.19.1 => 2.20.0`` - - ``shellingham ``1.2.6 => 1.2.7`` - - ``tomlkit 0.4.4. => 0.4.6`` - - ``vistir 0.1.6 => 0.1.8`` - - ``pythonfinder 0.1.2 => 0.1.3`` - - ``requirementslib 1.1.9 => 1.1.10`` - - ``backports.functools_lru_cache 1.5.0 (new)`` - - ``cursor 1.2.0 (new)`` `#3089 <https://github.com/pypa/pipenv/issues/3089>`_ -- Updated vendored dependencies: - - ``requests 2.19.1 => 2.20.1`` - - ``tomlkit 0.4.46 => 0.5.2`` - - ``vistir 0.1.6 => 0.2.4`` - - ``pythonfinder 1.1.2 => 1.1.8`` - - ``requirementslib 1.1.10 => 1.3.0`` `#3096 <https://github.com/pypa/pipenv/issues/3096>`_ -- Switch to ``tomlkit`` for parsing and writing. Drop ``prettytoml`` and ``contoml`` from vendors. `#3191 <https://github.com/pypa/pipenv/issues/3191>`_ -- Updated ``requirementslib`` to aid in resolution of local and remote archives. `#3196 <https://github.com/pypa/pipenv/issues/3196>`_ - -Improved Documentation ----------------------- - -- Expanded development and testing documentation for contributors to get started. `#3074 <https://github.com/pypa/pipenv/issues/3074>`_ - - -2018.10.13 (2018-10-13) -======================= - -Bug Fixes ---------- - -- Fixed a bug in ``pipenv clean`` which caused global packages to sometimes be inadvertently targeted for cleanup. `#2849 <https://github.com/pypa/pipenv/issues/2849>`_ - -- Fix broken backport imports for vendored vistir. `#2950 <https://github.com/pypa/pipenv/issues/2950>`_, - `#2955 <https://github.com/pypa/pipenv/issues/2955>`_, - `#2961 <https://github.com/pypa/pipenv/issues/2961>`_ - -- Fixed a bug with importing local vendored dependencies when running ``pipenv graph``. `#2952 <https://github.com/pypa/pipenv/issues/2952>`_ - -- Fixed a bug which caused executable discovery to fail when running inside a virtualenv. `#2957 <https://github.com/pypa/pipenv/issues/2957>`_ - -- Fix parsing of outline tables. `#2971 <https://github.com/pypa/pipenv/issues/2971>`_ - -- Fixed a bug which caused ``verify_ssl`` to fail to drop through to ``pip install`` correctly as ``trusted-host``. `#2979 <https://github.com/pypa/pipenv/issues/2979>`_ - -- Fixed a bug which caused canonicalized package names to fail to resolve against PyPI. `#2989 <https://github.com/pypa/pipenv/issues/2989>`_ - -- Enhanced CI detection to detect Azure Devops builds. `#2993 <https://github.com/pypa/pipenv/issues/2993>`_ - -- Fixed a bug which prevented installing pinned versions which used redirection symbols from the command line. `#2998 <https://github.com/pypa/pipenv/issues/2998>`_ - -- Fixed a bug which prevented installing the local directory in non-editable mode. `#3005 <https://github.com/pypa/pipenv/issues/3005>`_ - - -Vendored Libraries ------------------- - -- Updated ``requirementslib`` to version ``1.1.9``. `#2989 <https://github.com/pypa/pipenv/issues/2989>`_ - -- Upgraded ``pythonfinder => 1.1.1`` and ``vistir => 0.1.7``. `#3007 <https://github.com/pypa/pipenv/issues/3007>`_ - - -2018.10.9 (2018-10-09) -====================== - -Features & Improvements ------------------------ - -- Added environment variables ``PIPENV_VERBOSE`` and ``PIPENV_QUIET`` to control - output verbosity without needing to pass options. `#2527 <https://github.com/pypa/pipenv/issues/2527>`_ - -- Updated test-PyPI add-on to better support json-API access (forward compatibility). - Improved testing process for new contributors. `#2568 <https://github.com/pypa/pipenv/issues/2568>`_ - -- Greatly enhanced python discovery functionality: - - - Added pep514 (windows launcher/finder) support for python discovery. - - Introduced architecture discovery for python installations which support different architectures. `#2582 <https://github.com/pypa/pipenv/issues/2582>`_ - -- Added support for ``pipenv shell`` on msys and cygwin/mingw/git bash for Windows. `#2641 <https://github.com/pypa/pipenv/issues/2641>`_ - -- Enhanced resolution of editable and VCS dependencies. `#2643 <https://github.com/pypa/pipenv/issues/2643>`_ - -- Deduplicate and refactor CLI to use stateful arguments and object passing. See `this issue <https://github.com/pallets/click/issues/108>`_ for reference. `#2814 <https://github.com/pypa/pipenv/issues/2814>`_ - - -Behavior Changes ----------------- - -- Virtual environment activation for ``run`` is revised to improve interpolation - with other Python discovery tools. `#2503 <https://github.com/pypa/pipenv/issues/2503>`_ - -- Improve terminal coloring to display better in Powershell. `#2511 <https://github.com/pypa/pipenv/issues/2511>`_ - -- Invoke ``virtualenv`` directly for virtual environment creation, instead of depending on ``pew``. `#2518 <https://github.com/pypa/pipenv/issues/2518>`_ - -- ``pipenv --help`` will now include short help descriptions. `#2542 <https://github.com/pypa/pipenv/issues/2542>`_ - -- Add ``COMSPEC`` to fallback option (along with ``SHELL`` and ``PYENV_SHELL``) - if shell detection fails, improving robustness on Windows. `#2651 <https://github.com/pypa/pipenv/issues/2651>`_ - -- Fallback to shell mode if ``run`` fails with Windows error 193 to handle non-executable commands. This should improve usability on Windows, where some users run non-executable files without specifying a command, relying on Windows file association to choose the current command. `#2718 <https://github.com/pypa/pipenv/issues/2718>`_ - - -Bug Fixes ---------- - -- Fixed a bug which prevented installation of editable requirements using ``ssh://`` style URLs `#1393 <https://github.com/pypa/pipenv/issues/1393>`_ - -- VCS Refs for locked local editable dependencies will now update appropriately to the latest hash when running ``pipenv update``. `#1690 <https://github.com/pypa/pipenv/issues/1690>`_ - -- ``.tar.gz`` and ``.zip`` artifacts will now have dependencies installed even when they are missing from the Lockfile. `#2173 <https://github.com/pypa/pipenv/issues/2173>`_ - -- The command line parser will now handle multiple ``-e/--editable`` dependencies properly via click's option parser to help mitigate future parsing issues. `#2279 <https://github.com/pypa/pipenv/issues/2279>`_ - -- Fixed the ability of pipenv to parse ``dependency_links`` from ``setup.py`` when ``PIP_PROCESS_DEPENDENCY_LINKS`` is enabled. `#2434 <https://github.com/pypa/pipenv/issues/2434>`_ - -- Fixed a bug which could cause ``-i/--index`` arguments to sometimes be incorrectly picked up in packages. This is now handled in the command line parser. `#2494 <https://github.com/pypa/pipenv/issues/2494>`_ - -- Fixed non-deterministic resolution issues related to changes to the internal package finder in ``pip 10``. `#2499 <https://github.com/pypa/pipenv/issues/2499>`_, - `#2529 <https://github.com/pypa/pipenv/issues/2529>`_, - `#2589 <https://github.com/pypa/pipenv/issues/2589>`_, - `#2666 <https://github.com/pypa/pipenv/issues/2666>`_, - `#2767 <https://github.com/pypa/pipenv/issues/2767>`_, - `#2785 <https://github.com/pypa/pipenv/issues/2785>`_, - `#2795 <https://github.com/pypa/pipenv/issues/2795>`_, - `#2801 <https://github.com/pypa/pipenv/issues/2801>`_, - `#2824 <https://github.com/pypa/pipenv/issues/2824>`_, - `#2862 <https://github.com/pypa/pipenv/issues/2862>`_, - `#2879 <https://github.com/pypa/pipenv/issues/2879>`_, - `#2894 <https://github.com/pypa/pipenv/issues/2894>`_, - `#2933 <https://github.com/pypa/pipenv/issues/2933>`_ - -- Fix subshell invocation on Windows for Python 2. `#2515 <https://github.com/pypa/pipenv/issues/2515>`_ - -- Fixed a bug which sometimes caused pipenv to throw a ``TypeError`` or to run into encoding issues when writing a Lockfile on python 2. `#2561 <https://github.com/pypa/pipenv/issues/2561>`_ - -- Improve quoting logic for ``pipenv run`` so it works better with Windows - built-in commands. `#2563 <https://github.com/pypa/pipenv/issues/2563>`_ - -- Fixed a bug related to parsing VCS requirements with both extras and subdirectory fragments. - Corrected an issue in the ``requirementslib`` parser which led to some markers being discarded rather than evaluated. `#2564 <https://github.com/pypa/pipenv/issues/2564>`_ - -- Fixed multiple issues with finding the correct system python locations. `#2582 <https://github.com/pypa/pipenv/issues/2582>`_ - -- Catch JSON decoding error to prevent exception when the lock file is of - invalid format. `#2607 <https://github.com/pypa/pipenv/issues/2607>`_ - -- Fixed a rare bug which could sometimes cause errors when installing packages with custom sources. `#2610 <https://github.com/pypa/pipenv/issues/2610>`_ - -- Update requirementslib to fix a bug which could raise an ``UnboundLocalError`` when parsing malformed VCS URIs. `#2617 <https://github.com/pypa/pipenv/issues/2617>`_ - -- Fixed an issue which prevented passing multiple ``--ignore`` parameters to ``pipenv check``. `#2632 <https://github.com/pypa/pipenv/issues/2632>`_ - -- Fixed a bug which caused attempted hashing of ``ssh://`` style URIs which could cause failures during installation of private ssh repositories. - - Corrected path conversion issues which caused certain editable VCS paths to be converted to ``ssh://`` URIs improperly. `#2639 <https://github.com/pypa/pipenv/issues/2639>`_ - -- Fixed a bug which caused paths to be formatted incorrectly when using ``pipenv shell`` in bash for windows. `#2641 <https://github.com/pypa/pipenv/issues/2641>`_ - -- Dependency links to private repositories defined via ``ssh://`` schemes will now install correctly and skip hashing as long as ``PIP_PROCESS_DEPENDENCY_LINKS=1``. `#2643 <https://github.com/pypa/pipenv/issues/2643>`_ - -- Fixed a bug which sometimes caused pipenv to parse the ``trusted_host`` argument to pip incorrectly when parsing source URLs which specify ``verify_ssl = false``. `#2656 <https://github.com/pypa/pipenv/issues/2656>`_ - -- Prevent crashing when a virtual environment in ``WORKON_HOME`` is faulty. `#2676 <https://github.com/pypa/pipenv/issues/2676>`_ - -- Fixed virtualenv creation failure when a .venv file is present in the project root. `#2680 <https://github.com/pypa/pipenv/issues/2680>`_ - -- Fixed a bug which could cause the ``-e/--editable`` argument on a dependency to be accidentally parsed as a dependency itself. `#2714 <https://github.com/pypa/pipenv/issues/2714>`_ - -- Correctly pass ``verbose`` and ``debug`` flags to the resolver subprocess so it generates appropriate output. This also resolves a bug introduced by the fix to #2527. `#2732 <https://github.com/pypa/pipenv/issues/2732>`_ - -- All markers are now included in ``pipenv lock --requirements`` output. `#2748 <https://github.com/pypa/pipenv/issues/2748>`_ - -- Fixed a bug in marker resolution which could cause duplicate and non-deterministic markers. `#2760 <https://github.com/pypa/pipenv/issues/2760>`_ - -- Fixed a bug in the dependency resolver which caused regular issues when handling ``setup.py`` based dependency resolution. `#2766 <https://github.com/pypa/pipenv/issues/2766>`_ - -- Updated vendored dependencies: - - ``pip-tools`` (updated and patched to latest w/ ``pip 18.0`` compatibility) - - ``pip 10.0.1 => 18.0`` - - ``click 6.7 => 7.0`` - - ``toml 0.9.4 => 0.10.0`` - - ``pyparsing 2.2.0 => 2.2.2`` - - ``delegator 0.1.0 => 0.1.1`` - - ``attrs 18.1.0 => 18.2.0`` - - ``distlib 0.2.7 => 0.2.8`` - - ``packaging 17.1.0 => 18.0`` - - ``passa 0.2.0 => 0.3.1`` - - ``pip_shims 0.1.2 => 0.3.1`` - - ``plette 0.1.1 => 0.2.2`` - - ``pythonfinder 1.0.2 => 1.1.0`` - - ``pytoml 0.1.18 => 0.1.19`` - - ``requirementslib 1.1.16 => 1.1.17`` - - ``shellingham 1.2.4 => 1.2.6`` - - ``tomlkit 0.4.2 => 0.4.4`` - - ``vistir 0.1.4 => 0.1.6`` - - `#2802 <https://github.com/pypa/pipenv/issues/2802>`_, - `#2867 <https://github.com/pypa/pipenv/issues/2867>`_, - `#2880 <https://github.com/pypa/pipenv/issues/2880>`_ - -- Fixed a bug where ``pipenv`` crashes when the ``WORKON_HOME`` directory does not exist. `#2877 <https://github.com/pypa/pipenv/issues/2877>`_ - -- Fixed pip is not loaded from pipenv's patched one but the system one `#2912 <https://github.com/pypa/pipenv/issues/2912>`_ - -- Fixed various bugs related to ``pip 18.1`` release which prevented locking, installation, and syncing, and dumping to a ``requirements.txt`` file. `#2924 <https://github.com/pypa/pipenv/issues/2924>`_ - - -Vendored Libraries ------------------- - -- Pew is no longer vendored. Entry point ``pewtwo``, packages ``pipenv.pew`` and - ``pipenv.patched.pew`` are removed. `#2521 <https://github.com/pypa/pipenv/issues/2521>`_ - -- Update ``pythonfinder`` to major release ``1.0.0`` for integration. `#2582 <https://github.com/pypa/pipenv/issues/2582>`_ - -- Update requirementslib to fix a bug which could raise an ``UnboundLocalError`` when parsing malformed VCS URIs. `#2617 <https://github.com/pypa/pipenv/issues/2617>`_ - -- - Vendored new libraries ``vistir`` and ``pip-shims``, ``tomlkit``, ``modutil``, and ``plette``. - - - Update vendored libraries: - - ``scandir`` to ``1.9.0`` - - ``click-completion`` to ``0.4.1`` - - ``semver`` to ``2.8.1`` - - ``shellingham`` to ``1.2.4`` - - ``pytoml`` to ``0.1.18`` - - ``certifi`` to ``2018.8.24`` - - ``ptyprocess`` to ``0.6.0`` - - ``requirementslib`` to ``1.1.5`` - - ``pythonfinder`` to ``1.0.2`` - - ``pipdeptree`` to ``0.13.0`` - - ``python-dotenv`` to ``0.9.1`` `#2639 <https://github.com/pypa/pipenv/issues/2639>`_ - -- Updated vendored dependencies: - - ``pip-tools`` (updated and patched to latest w/ ``pip 18.0`` compatibility) - - ``pip 10.0.1 => 18.0`` - - ``click 6.7 => 7.0`` - - ``toml 0.9.4 => 0.10.0`` - - ``pyparsing 2.2.0 => 2.2.2`` - - ``delegator 0.1.0 => 0.1.1`` - - ``attrs 18.1.0 => 18.2.0`` - - ``distlib 0.2.7 => 0.2.8`` - - ``packaging 17.1.0 => 18.0`` - - ``passa 0.2.0 => 0.3.1`` - - ``pip_shims 0.1.2 => 0.3.1`` - - ``plette 0.1.1 => 0.2.2`` - - ``pythonfinder 1.0.2 => 1.1.0`` - - ``pytoml 0.1.18 => 0.1.19`` - - ``requirementslib 1.1.16 => 1.1.17`` - - ``shellingham 1.2.4 => 1.2.6`` - - ``tomlkit 0.4.2 => 0.4.4`` - - ``vistir 0.1.4 => 0.1.6`` - - `#2902 <https://github.com/pypa/pipenv/issues/2902>`_, - `#2935 <https://github.com/pypa/pipenv/issues/2935>`_ - - -Improved Documentation ----------------------- - -- Simplified the test configuration process. `#2568 <https://github.com/pypa/pipenv/issues/2568>`_ - -- Updated documentation to use working fortune cookie add-on. `#2644 <https://github.com/pypa/pipenv/issues/2644>`_ - -- Added additional information about troubleshooting ``pipenv shell`` by using the the ``$PIPENV_SHELL`` environment variable. `#2671 <https://github.com/pypa/pipenv/issues/2671>`_ - -- Added a link to ``PEP-440`` version specifiers in the documentation for additional detail. `#2674 <https://github.com/pypa/pipenv/issues/2674>`_ - -- Added simple example to README.md for installing from git. `#2685 <https://github.com/pypa/pipenv/issues/2685>`_ - -- Stopped recommending ``--system`` for Docker contexts. `#2762 <https://github.com/pypa/pipenv/issues/2762>`_ - -- Fixed the example url for doing "pipenv install -e - some-repository-url#egg=something", it was missing the "egg=" in the fragment - identifier. `#2792 <https://github.com/pypa/pipenv/issues/2792>`_ - -- Fixed link to the "be cordial" essay in the contribution documentation. `#2793 <https://github.com/pypa/pipenv/issues/2793>`_ - -- Clarify ``pipenv install`` documentation `#2844 <https://github.com/pypa/pipenv/issues/2844>`_ - -- Replace reference to uservoice with PEEP-000 `#2909 <https://github.com/pypa/pipenv/issues/2909>`_ - - -2018.7.1 (2018-07-01) -===================== - -Features & Improvements ------------------------ - -- All calls to ``pipenv shell`` are now implemented from the ground up using `shellingham <https://github.com/sarugaku/shellingham>`_, a custom library which was purpose built to handle edge cases and shell detection. `#2371 <https://github.com/pypa/pipenv/issues/2371>`_ - -- Added support for python 3.7 via a few small compatibility / bug fixes. `#2427 <https://github.com/pypa/pipenv/issues/2427>`_, - `#2434 <https://github.com/pypa/pipenv/issues/2434>`_, - `#2436 <https://github.com/pypa/pipenv/issues/2436>`_ - -- Added new flag ``pipenv --support`` to replace the diagnostic command ``python -m pipenv.help``. `#2477 <https://github.com/pypa/pipenv/issues/2477>`_, - `#2478 <https://github.com/pypa/pipenv/issues/2478>`_ - -- Improved import times and CLI run times with minor tweaks. `#2485 <https://github.com/pypa/pipenv/issues/2485>`_ - - -Bug Fixes ---------- - -- Fixed an ongoing bug which sometimes resolved incompatible versions into the project Lockfile. `#1901 <https://github.com/pypa/pipenv/issues/1901>`_ - -- Fixed a bug which caused errors when creating virtualenvs which contained leading dash characters. `#2415 <https://github.com/pypa/pipenv/issues/2415>`_ - -- Fixed a logic error which caused ``--deploy --system`` to overwrite editable vcs packages in the Pipfile before installing, which caused any installation to fail by default. `#2417 <https://github.com/pypa/pipenv/issues/2417>`_ - -- Updated requirementslib to fix an issue with properly quoting markers in VCS requirements. `#2419 <https://github.com/pypa/pipenv/issues/2419>`_ - -- Installed new vendored jinja2 templates for ``click-completion`` which were causing template errors for users with completion enabled. `#2422 <https://github.com/pypa/pipenv/issues/2422>`_ - -- Added support for python 3.7 via a few small compatibility / bug fixes. `#2427 <https://github.com/pypa/pipenv/issues/2427>`_ - -- Fixed an issue reading package names from ``setup.py`` files in projects which imported utilities such as ``versioneer``. `#2433 <https://github.com/pypa/pipenv/issues/2433>`_ - -- Pipenv will now ensure that its internal package names registry files are written with unicode strings. `#2450 <https://github.com/pypa/pipenv/issues/2450>`_ - -- Fixed a bug causing requirements input as relative paths to be output as absolute paths or URIs. - Fixed a bug affecting normalization of ``git+git@host`` URLs. `#2453 <https://github.com/pypa/pipenv/issues/2453>`_ - -- Pipenv will now always use ``pathlib2`` for ``Path`` based filesystem interactions by default on ``python<3.5``. `#2454 <https://github.com/pypa/pipenv/issues/2454>`_ - -- Fixed a bug which prevented passing proxy PyPI indexes set with ``--pypi-mirror`` from being passed to pip during virtualenv creation, which could cause the creation to freeze in some cases. `#2462 <https://github.com/pypa/pipenv/issues/2462>`_ - -- Using the ``python -m pipenv.help`` command will now use proper encoding for the host filesystem to avoid encoding issues. `#2466 <https://github.com/pypa/pipenv/issues/2466>`_ - -- The new ``jinja2`` templates for ``click_completion`` will now be included in pipenv source distributions. `#2479 <https://github.com/pypa/pipenv/issues/2479>`_ - -- Resolved a long-standing issue with re-using previously generated ``InstallRequirement`` objects for resolution which could cause ``PKG-INFO`` file information to be deleted, raising a ``TypeError``. `#2480 <https://github.com/pypa/pipenv/issues/2480>`_ - -- Resolved an issue parsing usernames from private PyPI URIs in ``Pipfiles`` by updating ``requirementslib``. `#2484 <https://github.com/pypa/pipenv/issues/2484>`_ - - -Vendored Libraries ------------------- - -- All calls to ``pipenv shell`` are now implemented from the ground up using `shellingham <https://github.com/sarugaku/shellingham>`_, a custom library which was purpose built to handle edge cases and shell detection. `#2371 <https://github.com/pypa/pipenv/issues/2371>`_ - -- Updated requirementslib to fix an issue with properly quoting markers in VCS requirements. `#2419 <https://github.com/pypa/pipenv/issues/2419>`_ - -- Installed new vendored jinja2 templates for ``click-completion`` which were causing template errors for users with completion enabled. `#2422 <https://github.com/pypa/pipenv/issues/2422>`_ - -- Add patch to ``prettytoml`` to support Python 3.7. `#2426 <https://github.com/pypa/pipenv/issues/2426>`_ - -- Patched ``prettytoml.AbstractTable._enumerate_items`` to handle ``StopIteration`` errors in preparation of release of python 3.7. `#2427 <https://github.com/pypa/pipenv/issues/2427>`_ - -- Fixed an issue reading package names from ``setup.py`` files in projects which imported utilities such as ``versioneer``. `#2433 <https://github.com/pypa/pipenv/issues/2433>`_ - -- Updated ``requirementslib`` to version ``1.0.9`` `#2453 <https://github.com/pypa/pipenv/issues/2453>`_ - -- Unraveled a lot of old, unnecessary patches to ``pip-tools`` which were causing non-deterministic resolution errors. `#2480 <https://github.com/pypa/pipenv/issues/2480>`_ - -- Resolved an issue parsing usernames from private PyPI URIs in ``Pipfiles`` by updating ``requirementslib``. `#2484 <https://github.com/pypa/pipenv/issues/2484>`_ - - -Improved Documentation ----------------------- - -- Added instructions for installing using Fedora's official repositories. `#2404 <https://github.com/pypa/pipenv/issues/2404>`_ - - -2018.6.25 (2018-06-25) -====================== - -Features & Improvements ------------------------ - -- Pipenv-created virtualenvs will now be associated with a ``.project`` folder - (features can be implemented on top of this later or users may choose to use - ``pipenv-pipes`` to take full advantage of this.) `#1861 - <https://github.com/pypa/pipenv/issues/1861>`_ - -- Virtualenv names will now appear in prompts for most Windows users. `#2167 - <https://github.com/pypa/pipenv/issues/2167>`_ - -- Added support for cmder shell paths with spaces. `#2168 - <https://github.com/pypa/pipenv/issues/2168>`_ - -- Added nested JSON output to the ``pipenv graph`` command. `#2199 - <https://github.com/pypa/pipenv/issues/2199>`_ - -- Dropped vendored pip 9 and vendored, patched, and migrated to pip 10. Updated - patched piptools version. `#2255 - <https://github.com/pypa/pipenv/issues/2255>`_ - -- PyPI mirror URLs can now be set to override instances of PyPI URLs by passing - the ``--pypi-mirror`` argument from the command line or setting the - ``PIPENV_PYPI_MIRROR`` environment variable. `#2281 - <https://github.com/pypa/pipenv/issues/2281>`_ - -- Virtualenv activation lines will now avoid being written to some shell - history files. `#2287 <https://github.com/pypa/pipenv/issues/2287>`_ - -- Pipenv will now only search for ``requirements.txt`` files when creating new - projects, and during that time only if the user doesn't specify packages to - pass in. `#2309 <https://github.com/pypa/pipenv/issues/2309>`_ - -- Added support for mounted drives via UNC paths. `#2331 - <https://github.com/pypa/pipenv/issues/2331>`_ - -- Added support for Windows Subsystem for Linux bash shell detection. `#2363 - <https://github.com/pypa/pipenv/issues/2363>`_ - -- Pipenv will now generate hashes much more quickly by resolving them in a - single pass during locking. `#2384 - <https://github.com/pypa/pipenv/issues/2384>`_ - -- ``pipenv run`` will now avoid spawning additional ``COMSPEC`` instances to - run commands in when possible. `#2385 - <https://github.com/pypa/pipenv/issues/2385>`_ - -- Massive internal improvements to requirements parsing codebase, resolver, and - error messaging. `#2388 <https://github.com/pypa/pipenv/issues/2388>`_ - -- ``pipenv check`` now may take multiple of the additional argument - ``--ignore`` which takes a parameter ``cve_id`` for the purpose of ignoring - specific CVEs. `#2408 <https://github.com/pypa/pipenv/issues/2408>`_ - - -Behavior Changes ----------------- - -- Pipenv will now parse & capitalize ``platform_python_implementation`` markers - .. warning:: This could cause an issue if you have an out of date ``Pipfile`` - which lower-cases the comparison value (e.g. ``cpython`` instead of - ``CPython``). `#2123 <https://github.com/pypa/pipenv/issues/2123>`_ - -- Pipenv will now only search for ``requirements.txt`` files when creating new - projects, and during that time only if the user doesn't specify packages to - pass in. `#2309 <https://github.com/pypa/pipenv/issues/2309>`_ - - -Bug Fixes ---------- - -- Massive internal improvements to requirements parsing codebase, resolver, and - error messaging. `#1962 <https://github.com/pypa/pipenv/issues/1962>`_, - `#2186 <https://github.com/pypa/pipenv/issues/2186>`_, - `#2263 <https://github.com/pypa/pipenv/issues/2263>`_, - `#2312 <https://github.com/pypa/pipenv/issues/2312>`_ - -- Pipenv will now parse & capitalize ``platform_python_implementation`` - markers. `#2123 <https://github.com/pypa/pipenv/issues/2123>`_ - -- Fixed a bug with parsing and grouping old-style ``setup.py`` extras during - resolution `#2142 <https://github.com/pypa/pipenv/issues/2142>`_ - -- Fixed a bug causing pipenv graph to throw unhelpful exceptions when running - against empty or non-existent environments. `#2161 - <https://github.com/pypa/pipenv/issues/2161>`_ - -- Fixed a bug which caused ``--system`` to incorrectly abort when users were in - a virtualenv. `#2181 <https://github.com/pypa/pipenv/issues/2181>`_ - -- Removed vendored ``cacert.pem`` which could cause issues for some users with - custom certificate settings. `#2193 - <https://github.com/pypa/pipenv/issues/2193>`_ - -- Fixed a regression which led to direct invocations of ``virtualenv``, rather - than calling it by module. `#2198 - <https://github.com/pypa/pipenv/issues/2198>`_ - -- Locking will now pin the correct VCS ref during ``pipenv update`` runs. - Running ``pipenv update`` with a new vcs ref specified in the ``Pipfile`` - will now properly obtain, resolve, and install the specified dependency at - the specified ref. `#2209 <https://github.com/pypa/pipenv/issues/2209>`_ - -- ``pipenv clean`` will now correctly ignore comments from ``pip freeze`` when - cleaning the environment. `#2262 - <https://github.com/pypa/pipenv/issues/2262>`_ - -- Resolution bugs causing packages for incompatible python versions to be - locked have been fixed. `#2267 - <https://github.com/pypa/pipenv/issues/2267>`_ - -- Fixed a bug causing pipenv graph to fail to display sometimes. `#2268 - <https://github.com/pypa/pipenv/issues/2268>`_ - -- Updated ``requirementslib`` to fix a bug in Pipfile parsing affecting - relative path conversions. `#2269 - <https://github.com/pypa/pipenv/issues/2269>`_ - -- Windows executable discovery now leverages ``os.pathext``. `#2298 - <https://github.com/pypa/pipenv/issues/2298>`_ - -- Fixed a bug which caused ``--deploy --system`` to inadvertently create a - virtualenv before failing. `#2301 - <https://github.com/pypa/pipenv/issues/2301>`_ - -- Fixed an issue which led to a failure to unquote special characters in file - and wheel paths. `#2302 <https://github.com/pypa/pipenv/issues/2302>`_ - -- VCS dependencies are now manually obtained only if they do not match the - requested ref. `#2304 <https://github.com/pypa/pipenv/issues/2304>`_ - -- Added error handling functionality to properly cope with single-digit - ``Requires-Python`` metadata with no specifiers. `#2377 - <https://github.com/pypa/pipenv/issues/2377>`_ - -- ``pipenv update`` will now always run the resolver and lock before ensuring - dependencies are in sync with project Lockfile. `#2379 - <https://github.com/pypa/pipenv/issues/2379>`_ - -- Resolved a bug in our patched resolvers which could cause nondeterministic - resolution failures in certain conditions. Running ``pipenv install`` with no - arguments in a project with only a ``Pipfile`` will now correctly lock first - for dependency resolution before installing. `#2384 - <https://github.com/pypa/pipenv/issues/2384>`_ - -- Patched ``python-dotenv`` to ensure that environment variables always get - encoded to the filesystem encoding. `#2386 - <https://github.com/pypa/pipenv/issues/2386>`_ - - -Improved Documentation ----------------------- - -- Update documentation wording to clarify Pipenv's overall role in the packaging ecosystem. `#2194 <https://github.com/pypa/pipenv/issues/2194>`_ - -- Added contribution documentation and guidelines. `#2205 <https://github.com/pypa/pipenv/issues/2205>`_ - -- Added instructions for supervisord compatibility. `#2215 <https://github.com/pypa/pipenv/issues/2215>`_ - -- Fixed broken links to development philosophy and contribution documentation. `#2248 <https://github.com/pypa/pipenv/issues/2248>`_ - - -Vendored Libraries ------------------- - -- Removed vendored ``cacert.pem`` which could cause issues for some users with - custom certificate settings. `#2193 - <https://github.com/pypa/pipenv/issues/2193>`_ - -- Dropped vendored pip 9 and vendored, patched, and migrated to pip 10. Updated - patched piptools version. `#2255 - <https://github.com/pypa/pipenv/issues/2255>`_ - -- Updated ``requirementslib`` to fix a bug in Pipfile parsing affecting - relative path conversions. `#2269 - <https://github.com/pypa/pipenv/issues/2269>`_ - -- Added custom shell detection library ``shellingham``, a port of our changes - to ``pew``. `#2363 <https://github.com/pypa/pipenv/issues/2363>`_ - -- Patched ``python-dotenv`` to ensure that environment variables always get - encoded to the filesystem encoding. `#2386 - <https://github.com/pypa/pipenv/issues/2386>`_ - -- Updated vendored libraries. The following vendored libraries were updated: - - * distlib from version ``0.2.6`` to ``0.2.7``. - * jinja2 from version ``2.9.5`` to ``2.10``. - * pathlib2 from version ``2.1.0`` to ``2.3.2``. - * parse from version ``2.8.0`` to ``2.8.4``. - * pexpect from version ``2.5.2`` to ``2.6.0``. - * requests from version ``2.18.4`` to ``2.19.1``. - * idna from version ``2.6`` to ``2.7``. - * certifi from version ``2018.1.16`` to ``2018.4.16``. - * packaging from version ``16.8`` to ``17.1``. - * six from version ``1.10.0`` to ``1.11.0``. - * requirementslib from version ``0.2.0`` to ``1.0.1``. - - In addition, scandir was vendored and patched to avoid importing host system binaries when falling back to pathlib2. `#2368 <https://github.com/pypa/pipenv/issues/2368>`_ diff --git a/docs/advanced.rst b/docs/advanced.md similarity index 53% rename from docs/advanced.rst rename to docs/advanced.md index 3abc8b9b33..4c10ce803a 100644 --- a/docs/advanced.rst +++ b/docs/advanced.md @@ -1,80 +1,66 @@ -.. _advanced: - -Other Topics -======================== +# Other topics This document is current in the process of being broken apart into more granular sections so that we may provide better overall documentation. +## ☤ Supplying additional arguments to pip -☤ Supplying additional arguments to pip ------------------------------------------------- - -There may be cases where you wish to supply additional arguments to pip to be used during the install phase. -For example, you may want to enable the pip feature for using -`system certificate stores <https://pip.pypa.io/en/latest/topics/https-certificates/#using-system-certificate-stores>`_ +There may be cases where you wish to supply additional arguments to pip to be used during the install phase. For example, you may want to enable the pip feature for using [system certificate stores](https://pip.pypa.io/en/latest/topics/https-certificates/#using-system-certificate-stores) -In this case you can supply these additional arguments to ``pipenv sync`` or ``pipenv install`` by passing additional -argument ``--extra-pip-args="--use-feature=truststore"``. It is possible to supply multiple arguments in the ``--extra-pip-args``. -Example usage:: +In this case you can supply these additional arguments to `pipenv sync` or `pipenv install` by passing additional +argument `--extra-pip-args="--use-feature=truststore"`. It is possible to supply multiple arguments in the `--extra-pip-args`. Example usage: pipenv sync --extra-pip-args="--use-feature=truststore --proxy=127.0.0.1" +## ☤ Using pipenv for Deployments +You may want to use `pipenv` as part of a deployment process. -☤ Using pipenv for Deployments ------------------------------- - -You may want to use ``pipenv`` as part of a deployment process. - -You can enforce that your ``Pipfile.lock`` is up to date using the ``--deploy`` flag:: +You can enforce that your `Pipfile.lock` is up to date using the `--deploy` flag: $ pipenv install --deploy -This will fail a build if the ``Pipfile.lock`` is out–of–date, instead of generating a new one. +This will fail a build if the `Pipfile.lock` is out–of–date, instead of generating a new one. -Or you can install packages exactly as specified in ``Pipfile.lock`` using the ``sync`` command:: +Or you can install packages exactly as specified in `Pipfile.lock` using the `sync` command: $ pipenv sync -.. note:: +Note ``pipenv install --ignore-pipfile`` is nearly equivalent to ``pipenv sync``, but ``pipenv sync`` will *never* attempt to re-lock your dependencies as it is considered an atomic operation. ``pipenv install`` by default does attempt to re-lock unless using the ``--deploy`` flag. -You may only wish to verify your ``Pipfile.lock`` is up-to-date with dependencies specified in the ``Pipfile``, without installing:: +You may only wish to verify your `Pipfile.lock` is up-to-date with dependencies specified in the `Pipfile`, without installing: $ pipenv verify -The command will perform a verification, and return an exit code ``1`` when dependency locking is needed. This may be useful for cases when the ``Pipfile.lock`` file is subject to version control, so this command can be used within your CI/CD pipelines. +The command will perform a verification, and return an exit code `1` when dependency locking is needed. This may be useful for cases when the `Pipfile.lock` file is subject to version control, so this command can be used within your CI/CD pipelines. -Deploying System Dependencies -///////////////////////////// +### Deploying System Dependencies -You can tell Pipenv to install a Pipfile's contents into its parent system with the ``--system`` flag:: +You can tell Pipenv to install a Pipfile's contents into its parent system with the `--system` flag: $ pipenv install --system This is useful for managing the system Python, and deployment infrastructure (e.g. Heroku does this). -☤ Pipenv and Other Python Distributions ---------------------------------------- +## ☤ Pipenv and Other Python Distributions -To use Pipenv with a third-party Python distribution (e.g. Anaconda), you simply provide the path to the Python binary:: +To use Pipenv with a third-party Python distribution (e.g. Anaconda), you simply provide the path to the Python binary: $ pipenv install --python=/path/to/python -Anaconda uses Conda to manage packages. To reuse Conda–installed Python packages, use the ``--site-packages`` flag:: +Anaconda uses Conda to manage packages. To reuse Conda–installed Python packages, use the `--site-packages` flag: $ pipenv --python=/path/to/python --site-packages -☤ Generating a ``requirements.txt`` ------------------------------------ +## ☤ Generating a `requirements.txt` Sometimes, you would want to generate a requirements file based on your current environment, for example to include tooling that only supports requirements.txt. -You can convert a ``Pipfile.lock`` into a ``requirements.txt`` +You can convert a `Pipfile.lock` into a `requirements.txt` file very easily. -Let's take this ``Pipfile``:: +Let's take this `Pipfile`: [[source]] name = "pypi" @@ -87,7 +73,7 @@ Let's take this ``Pipfile``:: [dev-packages] pytest = {version="==3.2.3"} -Which generates a ``Pipfile.lock`` upon completion of running ``pipenv lock``` similar to:: +Which generates a `Pipfile.lock` upon completion of running ``pipenv lock``` similar to: { "_meta": { @@ -105,7 +91,7 @@ Which generates a ``Pipfile.lock`` upon completion of running ``pipenv lock``` s ] }, "default": { - ... snipped ... + ... snipped ... "requests": { "hashes": [ "sha256:6a1b267aa90cac58ac3a765d067950e7dbbf75b1da07e895d1f594193a40a38b", @@ -114,7 +100,7 @@ Which generates a ``Pipfile.lock`` upon completion of running ``pipenv lock``` s "index": "pypi", "version": "==2.18.4" }, - ... snipped ... + ... snipped ... }, "develop": { ... snipped ... @@ -129,7 +115,7 @@ Which generates a ``Pipfile.lock`` upon completion of running ``pipenv lock``` s ... snipped ... } -Given the ``Pipfile.lock`` exists, you may generate a set of requirements out of it with the default dependencies:: +Given the `Pipfile.lock` exists, you may generate a set of requirements out of it with the default dependencies: $ pipenv requirements -i https://pypi.org/simple @@ -139,10 +125,10 @@ Given the ``Pipfile.lock`` exists, you may generate a set of requirements out of requests==2.18.4 urllib3==1.22 -As with other commands, passing ``--dev`` will include both the default and -development dependencies:: +As with other commands, passing `--dev` will include both the default and +development dependencies: - $ pipenv requirements --dev + $ pipenv requirements --dev -i https://pypi.org/simple colorama==0.4.5 ; sys_platform == 'win32' py==1.11.0 ; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' @@ -154,9 +140,7 @@ development dependencies:: requests==2.18.4 urllib3==1.22 -If you wish to generate a requirements file with only the -development requirements you can do that too, using the ``--dev-only`` -flag:: +If you wish to generate a requirements file with only the development requirements you can do that too, using the `--dev-only` flag: $ pipenv requirements --dev-only -i https://pypi.org/simple @@ -165,11 +149,11 @@ flag:: pytest==3.2.3 setuptools==65.4.1 ; python_version >= '3.7' -Adding the ``--hash`` flag adds package hashes to the output for extra security. -Adding the ``--exclude-markers`` flag excludes the markers from the output. +Adding the `--hash` flag adds package hashes to the output for extra security. +Adding the `--exclude-markers` flag excludes the markers from the output. The locked requirements are written to stdout, with shell output redirection -used to write them to a file:: +used to write them to a file: $ pipenv requirements > requirements.txt $ pipenv requirements --dev-only > dev-requirements.txt @@ -189,7 +173,7 @@ used to write them to a file:: If you have multiple categories in your Pipfile and wish to generate a requirements file for only some categories, you can do that too, -using the ``--categories`` option:: +using the `--categories` option: $ pipenv requirements --categories="tests" > requirements-tests.txt $ pipenv requirements --categories="docs" > requirements-docs.txt @@ -208,19 +192,17 @@ It can be used to specify multiple categories also. $ pipenv requirements --categories="tests,docs" -☤ Detection of Security Vulnerabilities ---------------------------------------- +## ☤ Detection of Security Vulnerabilities -Pipenv includes the `safety <https://github.com/pyupio/safety>`_ package, and will use it to scan your dependency graph -for known security vulnerabilities! +Pipenv includes the [safety](https://github.com/pyupio/safety) package, and will use it to scan your dependency graph for known security vulnerabilities! -By default ``pipenv check`` will scan the Pipfile.lock default packages group and use this as the input to the safety command. -To scan other package categories pass the specific ``--categories`` you want to check against. -To have ``pipenv check`` scan the virtualenv packages for what is installed and use this as the input to the safety command, -run``pipenv check --use-installed``. -Note: ``--use-installed`` was the default behavior in ``pipenv<=2023.2.4`` +By default `pipenv check` will scan the Pipfile.lock default packages group and use this as the input to the safety command. +To scan other package categories pass the specific `--categories` you want to check against. +To have `pipenv check` scan the virtualenv packages for what is installed and use this as the input to the safety command, +run`pipenv check --use-installed`. +Note: `--use-installed` was the default behavior in `pipenv<=2023.2.4`. -Example:: +Example: $ pipenv install wheel==0.37.1 $ cat Pipfile.lock @@ -250,38 +232,28 @@ Example:: Scan was completed. 1 vulnerability was found. ... +Note -.. note:: - - Each month, `PyUp.io <https://pyup.io>`_ updates the ``safety`` database of - insecure Python packages and `makes it available to the open source - community for free <https://pyup.io/safety/>`__. Each time - you run ``pipenv check`` to show you vulnerable dependencies, - Pipenv makes an API call to retrieve and use those results. - - For more up-to-date vulnerability data, you may also use your own safety - API key by setting the environment variable ``PIPENV_PYUP_API_KEY``. +Each month, [PyUp.io](https://pyup.io>)updates the `safety` database of insecure Python packages and [makes it available to the open source community for free](https://pyup.io/safety/). Each time you run `pipenv check` to show you vulnerable dependencies, +Pipenv makes an API call to retrieve and use those results. +For more up-to-date vulnerability data, you may also use your own safety API key by setting the environment variable `PIPENV_PYUP_API_KEY`. -☤ Community Integrations ------------------------- +## ☤ Community Integrations -There are a range of community-maintained plugins and extensions available for a range of editors and IDEs, as well as -different products which integrate with Pipenv projects: +There are a range of community-maintained plugins and extensions available for a range of editors and IDEs, as well as different products which integrate with Pipenv projects: -- `Heroku <https://heroku.com/python>`_ (Cloud Hosting) -- `Platform.sh <https://platform.sh/hosting/python>`_ (Cloud Hosting) -- `PyUp <https://pyup.io>`_ (Security Notification) -- `Emacs <https://github.com/pwalsh/pipenv.el>`_ (Editor Integration) -- `Fish Shell <https://github.com/fisherman/pipenv>`_ (Automatic ``$ pipenv shell``!) -- `VS Code <https://code.visualstudio.com/docs/python/environments>`_ (Editor Integration) -- `PyCharm <https://www.jetbrains.com/pycharm/download/>`_ (Editor Integration) +- [Heroku](https://heroku.com/python) (Cloud Hosting) +- [Platform.sh](https://platform.sh/hosting/python)(Cloud Hosting) +- [PyUp](https://pyup.io) (Security Notification) +- [Emacs](https://github.com/pwalsh/pipenv.el) (Editor Integration) +- [Fish Shell](https://github.com/fisherman/pipenv) (Automatic `$ pipenv shell`!) +- [VS Code](https://code.visualstudio.com/docs/python/environments) (Editor Integration) +- [PyCharm](https://www.jetbrains.com/pycharm/download/) (Editor Integration) +## ☤ Open a Module in Your Editor -☤ Open a Module in Your Editor ------------------------------- - -Pipenv allows you to open any Python module that is installed (including ones in your codebase), with the ``$ pipenv open`` command:: +Pipenv allows you to open any Python module that is installed (including ones in your codebase), with the `$ pipenv open` command: $ pipenv install -e git+https://github.com/kennethreitz/background.git#egg=background Installing -e git+https://github.com/kennethreitz/background.git#egg=background... @@ -293,14 +265,11 @@ Pipenv allows you to open any Python module that is installed (including ones in This allows you to easily read the code you're consuming, instead of looking it up on GitHub. -.. note:: The standard ``EDITOR`` environment variable is used for this. If you're using VS Code, for example, you'll want to ``export EDITOR=code`` (if you're on macOS you will want to `install the command <https://code.visualstudio.com/docs/setup/mac#_launching-from-the-command-line>`_ on to your ``PATH`` first). - -☤ Automatic Python Installation -------------------------------- +Note -If you have `pyenv <https://github.com/pyenv/pyenv#simple-python-version-management-pyenv>`_ installed and configured, Pipenv will automatically ask you if you want to install a required version of Python if you don't already have it available. +The standard `EDITOR` environment variable is used for this. If you're using VS Code, for example, you'll want to `export EDITOR=code` (if you're on macOS you will want to [install the command](https://code.visualstudio.com/docs/setup/mac#_launching-from-the-command-line) on to your `PATH` first). -This is a very fancy feature, and we're very proud of it:: +This is a very fancy feature, and we're very proud of it: $ cat Pipfile [[source]] @@ -332,19 +301,17 @@ This is a very fancy feature, and we're very proud of it:: To activate this project's virtualenv, run the following: $ pipenv shell -Pipenv automatically honors both the ``python_full_version`` and ``python_version`` `PEP 508 <https://www.python.org/dev/peps/pep-0508/>`_ specifiers. +Pipenv automatically honors both the `python_full_version` and `python_version` [PEP 508](https://www.python.org/dev/peps/pep-0508/) specifiers. 💫✨🍰✨💫 -☤ Testing Projects ------------------- +## ☤ Testing Projects -Pipenv is being used in projects like `Requests`_ for declaring development dependencies and running the test suite. +Pipenv is being used in projects like [Requests](https://github.com/psf/requests) for declaring development dependencies and running the test suite. -Tox Automation Project -////////////////////// +### Tox Automation Project -Here's an example ``tox.ini`` for both local and external testing:: +Here's an example `tox.ini` for both local and external testing: [tox] envlist = py37, py38, py39, py310, py311, pypy3, ruff @@ -362,45 +329,27 @@ Here's an example ``tox.ini`` for both local and external testing:: pipenv run ruff --version pipenv run ruff . -Pipenv will automatically use the virtualenv provided by ``tox``. If ``pipenv install --dev`` installs e.g. ``pytest``, then installed command ``pytest`` will be present in given virtualenv and can be called directly by ``pytest tests`` instead of ``pipenv run pytest tests``. - -You might also want to add ``--ignore-pipfile`` to ``pipenv install``, as to -not accidentally modify the lock-file on each test run. This causes Pipenv -to ignore changes to the ``Pipfile`` and (more importantly) prevents it from -adding the current environment to ``Pipfile.lock``. This might be important as -the current environment (i.e. the virtualenv provisioned by tox) will usually -contain the current project (which may or may not be desired) and additional -dependencies from ``tox``'s ``deps`` directive. The initial provisioning may -alternatively be disabled by adding ``skip_install = True`` to tox.ini. - -This method requires you to be explicit about updating the lock-file, which is -probably a good idea in any case. - -A 3rd party plugin, `tox-pipenv`_ is also available to use Pipenv natively with tox. +Pipenv will automatically use the virtualenv provided by `tox`. If `pipenv install --dev` installs e.g. `pytest`, then installed command `pytest` will be present in given virtualenv and can be called directly by `pytest tests` instead of `pipenv run pytest tests`. -.. _Requests: https://github.com/psf/requests -.. _tox: https://tox.readthedocs.io/en/latest/ -.. _tox-pipenv: https://tox-pipenv.readthedocs.io/en/latest/ -.. _Travis-CI: https://travis-ci.org/ +You might also want to add `--ignore-pipfile` to `pipenv install`, as to not accidentally modify the lock-file on each test run. This causes Pipenv +to ignore changes to the `Pipfile` and (more importantly) prevents it from adding the current environment to `Pipfile.lock`. This might be important as the current environment (i.e. the virtualenv provisioned by tox) will usually +contain the current project (which may or may not be desired) and additional dependencies from `tox`'s `deps` directive. The initial provisioning may +alternatively be disabled by adding `skip_install = True` to tox.ini. +This method requires you to be explicit about updating the lock-file, which is probably a good idea in any case. +A 3rd party plugin, [tox-pipenv](https://tox-pipenv.readthedocs.io/en/latest/) is also available to use Pipenv natively with tox. ✨🍰✨ -☤ Working with Platform-Provided Python Components --------------------------------------------------- +## ☤ Working with Platform-Provided Python Components -It's reasonably common for platform specific Python bindings for -operating system interfaces to only be available through the system -package manager, and hence unavailable for installation into virtual -environments with ``pip``. In these cases, the virtual environment can -be created with access to the system ``site-packages`` directory:: +It's reasonably common for platform specific Python bindings for operating system interfaces to only be available through the system package manager, and hence unavailable for installation into virtual +environments with `pip`. In these cases, the virtual environment can be created with access to the system `site-packages` directory: $ pipenv --site-packages -To ensure that all ``pip``-installable components actually are installed -into the virtual environment and system packages are only used for -interfaces that don't participate in Python-level dependency resolution -at all, use the ``PIP_IGNORE_INSTALLED`` setting:: +To ensure that all `pip`-installable components actually are installed into the virtual environment and system packages are only used for interfaces that don't participate in Python-level dependency resolution +at all, use the `PIP_IGNORE_INSTALLED` setting: $ PIP_IGNORE_INSTALLED=1 pipenv install --dev diff --git a/docs/changelog.md b/docs/changelog.md new file mode 100644 index 0000000000..e836f43dd6 --- /dev/null +++ b/docs/changelog.md @@ -0,0 +1,1875 @@ +# 2023.9.1 (2023-09-01) + +# Pipenv 2023.9.1 (2023-09-01) + +## Features & Improvements + +- Top level Pipfile sys_platform markers should be transitive; adds top level platform_machine entries that are also transitive. Marker entries continue to operate the same as before. [#5892](https://github.com/pypa/pipenv/issues/5892) + +## Bug Fixes + +- Apply patch for install_search_all_sources = True functionality. [#5895](https://github.com/pypa/pipenv/issues/5895) +- Relative paths improvements for editable installs. [#5896](https://github.com/pypa/pipenv/issues/5896) +- Set log level in resolver to WARN when verbose is not passed. [#5897](https://github.com/pypa/pipenv/issues/5897) +- Handle more variations in private index html to improve hash collection. [#5898](https://github.com/pypa/pipenv/issues/5898) + +# 2023.8.28 (2023-08-28) + +## Bug Fixes + +- Revert change that caused the credentials in source url issue. [#5878](https://github.com/pypa/pipenv/issues/5878) +- Do not treat named requirements as file installs just becacuse a match path exists; better handling of editable keyword for local file installs. + Handle additional edge cases in the setup.py ast parser logic for trying to determine local install package name. [#5885](https://github.com/pypa/pipenv/issues/5885) + +# 2023.8.26 (2023-08-26) + +## Bug Fixes + +- Additional property caching to avoid duplication of sources in the resolver. [#5863](https://github.com/pypa/pipenv/issues/5863) +- Fix recent regressions with local/editable file installs. [#5870](https://github.com/pypa/pipenv/issues/5870) +- Fixes the vcs subdirectory fragments regression; fixes sys_platform markers regression. [#5871](https://github.com/pypa/pipenv/issues/5871) +- Fix regression that caused printing non-printable ascii characters when help was called. [#5872](https://github.com/pypa/pipenv/issues/5872) + +# 2023.8.25 (2023-08-25) + +## Bug Fixes + +- Fix regression of hash collection when downloading package from private indexes when the hash is not found in the index href url fragment. [#5866](https://github.com/pypa/pipenv/issues/5866) + +# 2023.8.23 (2023-08-22) + +## Bug Fixes + +- More gracefully handle @ symbols in vcs URLs to address recent regression with vcs URLs. [#5849](https://github.com/pypa/pipenv/issues/5849) + +# 2023.8.22 (2023-08-22) + +## Bug Fixes + +- Fix regression with `ssh://` vcs URLs introduced in `2023.8.21` whereby ssh vcs URLs are expected to have at least one `@` symbol. [#5846](https://github.com/pypa/pipenv/issues/5846) + +# 2023.8.21 (2023-08-21) + +## Bug Fixes + +- Add back some relevant caching to increase performance after the major refactor released with `2023.8.19` [#5841](https://github.com/pypa/pipenv/issues/5841) +- Fix some edge cases around vcs dependencies without a ref, and older Pipfile/lockfile formats. [#5843](https://github.com/pypa/pipenv/issues/5843) + +## Vendored Libraries + +- Remove unused command line interface for vendored packages. [#5840](https://github.com/pypa/pipenv/issues/5840) + +# 2023.8.20 (2023-08-20) + +## Bug Fixes + +- Fix the expected output of the `version` command. [#5838](https://github.com/pypa/pipenv/issues/5838) + +# 2023.8.19 (2023-08-19) + +## Features & Improvements + +- The `--categories` option now works with requirements.txt file. [#5722](https://github.com/pypa/pipenv/issues/5722) + +## Bug Fixes + +- Drop requirementslib for managing pip lines and InstallRequirements, bring remaining requirementslib functionality into pipenv. + Fixes numerous reports about extras installs with vcs and file installs; format pip lines correctly to not generate deprecation warnings. [#5793](https://github.com/pypa/pipenv/issues/5793) + +## Vendored Libraries + +- Update pip 23.2 -> 23.2.1 [#5822](https://github.com/pypa/pipenv/issues/5822) + +## Improved Documentation + +- Added documentation on how to move or rename a project directory [#5129](https://github.com/pypa/pipenv/issues/5129) + +## Removals and Deprecations + +- The `--skip-lock` flag which was deprecated, has now been removed to unblock modernizing the pipenv resolver code. [#5805](https://github.com/pypa/pipenv/issues/5805) + +# 2023.7.23 (2023-07-23) + +## Features & Improvements + +- Upgrades `pip==23.2` which includes everything from the pip changelog. Drops the "install_compatatability_finder" pip internals patch. [#5808](https://github.com/pypa/pipenv/issues/5808) + +## Bug Fixes + +- Fix issue parsing some Pipfiles with separate packages.\<pkg> sections (tomlkit OutOfOrderTableProxy) [#5794](https://github.com/pypa/pipenv/issues/5794) +- Fix all ruff linter warnings [#5807](https://github.com/pypa/pipenv/issues/5807) +- Restore running Resolver in sub-process using the project python by default; maintains ability to run directly by setting `PIPENV_RESOLVER_PARENT_PYTHON` environment variable to 1 (useful for internal debugging). [#5809](https://github.com/pypa/pipenv/issues/5809) +- Fix error when a Windows path begins with a '' with `pythonfinder==2.0.5`. [#5812](https://github.com/pypa/pipenv/issues/5812) + +## Vendored Libraries + +- Remove usage of click.secho in some modules. [#5804](https://github.com/pypa/pipenv/issues/5804) + +2023.7.11 (2023-07-11) + +## Bug Fixes + +- Invoke the resolver in the same process as pipenv rather than utilizing subprocess. [#5787](https://github.com/pypa/pipenv/issues/5787) +- Fix regression markers being included as None/null in requirements command. [#5788](https://github.com/pypa/pipenv/issues/5788) + +# 2023.7.9 (2023-07-09) + +## Bug Fixes + +- Drop the --keep-outdated flag and --selective-upgrade flags that have been deprecated in favor of update/upgrade commands. [#5730](https://github.com/pypa/pipenv/issues/5730) +- Fix regressions in the `requirements` command related to standard index extras and handling of local file requirements. [#5784](https://github.com/pypa/pipenv/issues/5784) + +# 2023.7.4 (2023-07-04) + +## Bug Fixes + +- Fixes regression on Pipfile requirements syntax. Ensure default operator is provided to requirement lib to avoid crash. [#5765](https://github.com/pypa/pipenv/issues/5765) +- Ensure hashes included in a generated requirements file are after any markers. [#5777](https://github.com/pypa/pipenv/issues/5777) + +# 2023.7.3 (2023-07-02) + +## Bug Fixes + +- Fix regression with `--system` flag usage. [#5773](https://github.com/pypa/pipenv/issues/5773) + +# 2023.7.1 (2023-07-01) + +## Bug Fixes + +- Patch `_get_requests_session` method to consider `PIP_CLIENT_CERT` value when present. [#5746](https://github.com/pypa/pipenv/issues/5746) +- Fix regression in `requirements` command that was causing package installs after upgrade to `requirementslib==3.0.0`. [#5755](https://github.com/pypa/pipenv/issues/5755) +- Fix `error: invalid command 'egg_info'` edge case with requirementslib 3.0.0. It exposed pipenv resolver sometimes was using a different python than expected. [#5760](https://github.com/pypa/pipenv/issues/5760) +- Fix issue in requirementslib 3.0.0 where dependencies defined in pyproject.toml were not being included in the lock file. [#5766](https://github.com/pypa/pipenv/issues/5766) + +## Removals and Deprecations + +- Bump dparse to 0.6.3 [#5750](https://github.com/pypa/pipenv/issues/5750) + +# 2023.6.26 (2023-06-26) + +## Improved Documentation + +- Add missing environment variable descriptions back to documentation [#missing_env_var_desc](https://github.com/pypa/pipenv/issues/missing_env_var_desc) + +# 2023.6.18 (2023-06-18) + +## Bug Fixes + +- Fixes resolver to only consider the default index for packages when a secondary index is not specified. This brings the code into alignment with stated assumptions about index restricted packages behavior of `pipenv`. [#5737](https://github.com/pypa/pipenv/issues/5737) + +## Removals and Deprecations + +- Deprecation of `--skip-lock` flag as it bypasses the security benefits of pipenv. Plus it lacks proper deterministic support of installation from multiple package indexes. [#5737](https://github.com/pypa/pipenv/issues/5737) + +# 2023.6.12 (2023-06-11) + +## Bug Fixes + +- Remove the `sys.path` modifications and as a result fixes keyring support. [#5719](https://github.com/pypa/pipenv/issues/5719) + +# 2023.6.11 (2023-06-11) + +## Vendored Libraries + +- Upgrades to `pipdeptree==2.8.0` which fixes edge cases of the `pipenv graph` command. [#5720](https://github.com/pypa/pipenv/issues/5720) + +# 2023.6.2 (2023-06-02) + +## Features & Improvements + +- Resolver performance: package sources following PEP 503 will leverage package hashes from the URL fragment, without downloading the package. [#5701](https://github.com/pypa/pipenv/issues/5701) + +## Bug Fixes + +- Improve regex for python versions to handle hidden paths; handle relative paths to python better as well. [#4588](https://github.com/pypa/pipenv/issues/4588) +- Update `pythonfinder==2.0.4` with fix for "RecursionError: maximum recursion depth exceeded". [#5709](https://github.com/pypa/pipenv/issues/5709) + +## Vendored Libraries + +- Drop old vendored toml library. Use stdlib tomllib or tomli instead. [#5678](https://github.com/pypa/pipenv/issues/5678) +- Drop vendored library cerberus. This isn't actually used by pipenv. [#5699](https://github.com/pypa/pipenv/issues/5699) + +# 2023.5.19 (2023-05-19) + +## Bug Fixes + +- Consider `--index` argument in `update` and `upgrade` commands. [#5692](https://github.com/pypa/pipenv/issues/5692) + +## Vendored Libraries + +- Upgrade `pythonfinder==2.0.0` which also brings in `pydantic==1.10.7`. [#5677](https://github.com/pypa/pipenv/issues/5677) + +# 2023.4.29 (2023-04-29) + +## Vendored Libraries + +- Vendor in `pip==23.1.2` latest. [#5671](https://github.com/pypa/pipenv/issues/5671) +- Vendor in `requirementslib==2.3.0` which drops usage of `vistir`. [#5672](https://github.com/pypa/pipenv/issues/5672) + +# 2023.4.20 (2023-04-20) + +## Features & Improvements + +- Checks environment variable `PIP_TRUSTED_HOSTS` when evaluating an + index specified at the command line when adding to `Pipfile`. + + For example, this command line + + ``` + PIP_TRUSTED_HOSTS=internal.mycompany.com pipenv install pypkg --index=https://internal.mycompany.com/pypi/simple + ``` + + will add the following to the `Pipfile`: + + ``` + [[source]] + url = 'https://internal.mycompany.com/pypi/simple' + verify_ssl = false + name = 'Internalmycompany' + + [packages] + pypkg = {version="*", index="Internalmycompany"} + ``` + + This allows users with private indexes to add them to `Pipfile` + initially from command line with correct permissions using environment + variable `PIP_TRUSTED_HOSTS`. [#5572](https://github.com/pypa/pipenv/issues/5572) + +- Vendor in the updates, upgrades and fixes provided by `pip==23.1`. [#5655](https://github.com/pypa/pipenv/issues/5655) + +- Replace flake8 and isort with [ruff](https://beta.ruff.rs). [#ruff](https://github.com/pypa/pipenv/issues/ruff) + +## Bug Fixes + +- Fix regression with `--skip-lock` option with `install` command. [#5653](https://github.com/pypa/pipenv/issues/5653) + +## Vendored Libraries + +- Vendor in latest `python-dotenv==1.0.0` [#5656](https://github.com/pypa/pipenv/issues/5656) +- Vendor in latest available dependencies: `attrs==23.1.0` `click-didyoumean==0.3.0` `click==8.1.3` `markupsafe==2.1.2` `pipdeptree==2.7.0` `shellingham==1.5.0.post1` `tomlkit==0.11.7` [#5657](https://github.com/pypa/pipenv/issues/5657) +- Vendor in latest `requirementslib==2.2.5` which includes updates for pip 23.1 [#5659](https://github.com/pypa/pipenv/issues/5659) + +## Improved Documentation + +- Made documentation clear about tilde-equals operator for package versions. [#5594](https://github.com/pypa/pipenv/issues/5594) + +# 2023.3.20 (2023-03-19) + +No significant changes. + +# 2023.3.18 (2023-03-19) + +## Bug Fixes + +- Fix import error in virtualenv utility for creating new environments caused by `2023.3.18` release. [#5636](https://github.com/pypa/pipenv/issues/5636) + +# 2023.3.18 (2023-03-18) + +## Features & Improvements + +- Provide a more powerful solution than `--keep-outdated` and `--selective-upgrade` which are deprecated for removal. + Introducing the `pipenv upgrade` command which takes the same package specifiers as `pipenv install` and + updates the `Pipfile` and `Pipfile.lock` with a valid lock resolution that only effects the specified packages and their dependencies. + Additionally, the `pipenv update` command has been updated to use the `pipenv upgrade` routine when packages are provided, which will install sync the new lock file as well. [#5617](https://github.com/pypa/pipenv/issues/5617) + +## Vendored Libraries + +- Bump vistir to 0.8.0, requirementslib to 2.2.4. [#5635](https://github.com/pypa/pipenv/issues/5635) + +# 2023.2.18 (2023-02-18) + +## Features & Improvements + +- `pipenv` now reads the system `pip.conf` or `pip.ini` file in order to determine pre-defined indexes to use for package resolution and installation. [#5297](https://github.com/pypa/pipenv/issues/5297) +- Behavior change for `pipenv check` now checks the default packages group of the lockfile. + Specifying `--categories` to override which categories to check against. + Pass `--use-installed` to get the prior behavior of checking the packages actually installed into the environment. [#5600](https://github.com/pypa/pipenv/issues/5600) + +## Bug Fixes + +- Fix regression with detection of `CI` env variable being set to something other than a truthy value. [#5554](https://github.com/pypa/pipenv/issues/5554) +- Fix `--categories` argument inconsistency between requirements command and install/sync by allowing comma separated values or spaces. [#5570](https://github.com/pypa/pipenv/issues/5570) +- Use Nushell overlays when running `pipenv shell`. [#5603](https://github.com/pypa/pipenv/issues/5603) + +## Vendored Libraries + +- Vendor in the `pip==23.0` release. [#5586](https://github.com/pypa/pipenv/issues/5586) +- Vendor in `pip==23.0.1` minor pt release. Updates `pythonfinder==1.3.2`. [#5614](https://github.com/pypa/pipenv/issues/5614) + +## Improved Documentation + +- Make some improvements to the contributing guide. [#5611](https://github.com/pypa/pipenv/issues/5611) + +# 2023.2.4 (2023-02-04) + +## Bug Fixes + +- Fix overwriting of output in verbose mode [#5530](https://github.com/pypa/pipenv/issues/5530) +- Fix for resolution error when direct url includes an extras. [#5536](https://github.com/pypa/pipenv/issues/5536) + +## Removals and Deprecations + +- Remove pytest-pypi package since it's not used anymore [#5556](https://github.com/pypa/pipenv/issues/5556) +- Remove deprecated --three flag from the CLI. [#5576](https://github.com/pypa/pipenv/issues/5576) + +# 2022.12.19 (2022-12-19) + +## Bug Fixes + +- Fix for `requirementslib` hanging during install of remote wheels files. [#5546](https://github.com/pypa/pipenv/issues/5546) + +# 2022.12.17 (2022-12-17) + +## Bug Fixes + +- virtualenv creation no longer uses `--creator=venv` by default; introduced two environment variables: + `PIPENV_VIRTUALENV_CREATOR` -- May be specified to instruct virtualenv which `--creator=` to use. + `PIPENV_VIRTUALENV_COPIES` -- When specified as truthy, instructs virtualenv to not use symlinks. [#5477](https://github.com/pypa/pipenv/issues/5477) +- Fix regression where `path` is not propagated to the `Pipfile.lock`. [#5479](https://github.com/pypa/pipenv/issues/5479) +- Solve issue where null markers were getting added to lock file when extras were provided. [#5486](https://github.com/pypa/pipenv/issues/5486) +- Fix: `update --outdated` raises NonExistentKey with outdated dev packages [#5540](https://github.com/pypa/pipenv/issues/5540) + +## Vendored Libraries + +- Vendor in `pip==22.3.1` which is currently the latest version of `pip`. [#5520](https://github.com/pypa/pipenv/issues/5520) +- - Bump version of requirementslib to 2.2.1 + - Bump version of vistir to 0.7.5 + - Bump version of colorama to 0.4.6 [#5522](https://github.com/pypa/pipenv/issues/5522) +- Bump plette version to 0.4.4 [#5539](https://github.com/pypa/pipenv/issues/5539) + +# 2022.11.30 (2022-11-30) + +## Bug Fixes + +- Fix regression: pipenv does not sync indexes to lockfile. [#5508](https://github.com/pypa/pipenv/issues/5508) + +# 2022.11.25 (2022-11-24) + +## Bug Fixes + +- Solving issue where `pipenv check` command has been broken in the published wheel distribution. [#5493](https://github.com/pypa/pipenv/issues/5493) + +# 2022.11.24 (2022-11-24) + +## Bug Fixes + +- Stop building universal wheels since Python 2 is no longer supported. [#5496](https://github.com/pypa/pipenv/issues/5496) + +# 2022.11.23 (2022-11-23) + +## Features & Improvements + +- Find nushell activate scripts. [#5470](https://github.com/pypa/pipenv/issues/5470) + +## Vendored Libraries + +- - Drop unused code from cerberus + - Drop unused module wheel [#5467](https://github.com/pypa/pipenv/issues/5467) +- - Replace yaspin spinner with rich spinner. + - Bump vistir version to 0.7.4 [#5468](https://github.com/pypa/pipenv/issues/5468) +- Bump version of requirementslib to 2.2.0 + Drop yaspin which is no longer used. + Bump vistir to version 0.7.4 + Remove parse. + Remove termcolor. + Remove idna. [#5481](https://github.com/pypa/pipenv/issues/5481) + +# 2022.11.11 (2022-11-11) + +## Bug Fixes + +- Fix regression of lock generation that caused the keep-outdated behavior to be default. [#5456](https://github.com/pypa/pipenv/issues/5456) + +# 2022.11.5 (2022-11-05) + +## Bug Fixes + +- Rollback the change in version of `colorama` due to regressions in core functionality. [#5459](https://github.com/pypa/pipenv/issues/5459) + +# 2022.11.4 (2022-11-04) + +## Features & Improvements + +- Allow pipenv settings to be explicitly disabled more easily by assigning to the environment variable a falsy value. [#5451](https://github.com/pypa/pipenv/issues/5451) + +## Bug Fixes + +- Provide an install iteration per index when `install_search_all_sources` is `false` (default behavior). + This fixes regression where install phase was using unexpected index after updating `pip==22.3` [#5444](https://github.com/pypa/pipenv/issues/5444) + +## Vendored Libraries + +- Drop tomli, which is not used anymore. + Bump attrs version see #5449. + Drop distlib, colorama and platformdirs - use the ones from pip.\_vendor. [#5450](https://github.com/pypa/pipenv/issues/5450) + +# 2022.10.25 (2022-10-25) + +## Features & Improvements + +- Add support to export requirements file for a specified set of categories. [#5431](https://github.com/pypa/pipenv/issues/5431) + +## Vendored Libraries + +- Remove appdirs.py in favor of platformdirs. [#5420](https://github.com/pypa/pipenv/issues/5420) + +## Removals and Deprecations + +- Remove usage of vistir.cmdparse in favor of pipenv.cmdparse [#5419](https://github.com/pypa/pipenv/issues/5419) + +# 2022.10.12 (2022-10-12) + +## Improved Documentation + +- Update pipenv docs for with example for callabale package functions in Pipfile scripts [#5396](https://github.com/pypa/pipenv/issues/5396) + +# 2022.10.11 (2022-10-11) + +## Bug Fixes + +- Revert decision to change the default isolation level because it caused problems with existing workflows; solution is to recommend users that have issues requiring pre-requisites to pass --extra-pip-args="--no-build-isolation" in their install or sync commands. [#5399](https://github.com/pypa/pipenv/issues/5399) + +# 2022.10.10 (2022-10-10) + +## Features & Improvements + +- Add ability for callable scripts in Pipfile under \[scripts\]. Callables can now be added like: `<pathed.module>:<func>` and can also take arguments. For example: `func = {call = "package.module:func('arg1', 'arg2')"}` then this can be activated in the shell with `pipenv run func` [#5294](https://github.com/pypa/pipenv/issues/5294) + +## Bug Fixes + +- Fixes regression from `2022.10.9` where `Pipfile` with `pipenv` section began generating new hash, + and also fix regression where lock phase did not update the hash value. [#5394](https://github.com/pypa/pipenv/issues/5394) + +# 2022.10.9 (2022-10-09) + +## Behavior Changes + +- New pipfiles show python_full_version under \[requires\] if specified. Previously creating a new pipenv project would only specify in the Pipfile the major and minor version, i.e. "python_version = 3.7". Now if you create a new project with a fully named python version it will record both in the Pipfile. So: "python_version = 3.7" and "python_full_version = 3.7.2" [#5345](https://github.com/pypa/pipenv/issues/5345) + +## Relates to dev process changes + +- Silence majority of pytest.mark warnings by registering custom marks. Can view a list of custom marks by running `pipenv run pytest --markers` + +# 2022.10.4 (2022-10-04) + +## Bug Fixes + +- Use `--creator=venv` when creating virtual environments to avoid issue with sysconfig `posix_prefix` on some systems. [#5075](https://github.com/pypa/pipenv/issues/5075) +- Prefer to use the lockfile sources if available during the install phase. [#5380](https://github.com/pypa/pipenv/issues/5380) + +## Vendored Libraries + +- Drop vendored six - we no longer depend on this library, as we migrated from pipfile to plette. [#5187](https://github.com/pypa/pipenv/issues/5187) + +# 2022.9.24 (2022-09-24) + +## Bug Fixes + +- Update `requirementslib==2.0.3` to always evaluate the requirement markers fresh (without lru_cache) to fix marker determinism issue. [#4660](https://github.com/pypa/pipenv/issues/4660) + +# 2022.9.21 (2022-09-21) + +## Bug Fixes + +- Fix regression to `install --skip-lock` with update to `plette`. [#5368](https://github.com/pypa/pipenv/issues/5368) + +# 2022.9.20 (2022-09-20) + +## Behavior Changes + +- Remove usage of pipfile module in favour of Plette. + pipfile is not actively maintained anymore. Plette is actively maintained, + and has stricter checking of the Pipefile and Pipefile.lock. As a result, + Pipefile with unnamed package indices will fail to lock. If a Pipefile + was hand crafeted, and the source is anonymous an error will be thrown. + The solution is simple, add a name to your index, e.g, replace: + + ``` + [[source]] + url = "https://pypi.acme.com/simple" + verify_ssl = true + ``` + + With: + + ``` + [[source]] + url = "https://pypi.acme.com/simple" + verify_ssl = true + name = acmes_private_index `#5339 <https://github.com/pypa/pipenv/issues/5339>`_ + ``` + +## Bug Fixes + +- Modernize `pipenv` path patch with `importlib.util` to eliminate import of `pkg_resources` [#5349](https://github.com/pypa/pipenv/issues/5349) + +## Vendored Libraries + +- Remove iso8601 from vendored packages since it was not used. [#5346](https://github.com/pypa/pipenv/issues/5346) + +# 2022.9.8 (2022-09-08) + +## Features & Improvements + +- It is now possible to supply additional arguments to `pip` install by supplying `--extra-pip-args="<arg1> <arg2>"` + See the updated documentation `Supplying additional arguments to pip` for more details. [#5283](https://github.com/pypa/pipenv/issues/5283) + +## Bug Fixes + +- Make editable detection better because not everyone specifies editable entry in the Pipfile for local editable installs. [#4784](https://github.com/pypa/pipenv/issues/4784) +- Add error handling for when the installed package setup.py does not contain valid markers. [#5329](https://github.com/pypa/pipenv/issues/5329) +- Load the dot env earlier so that `PIPENV_CUSTOM_VENV_NAME` is more useful across projects. [#5334](https://github.com/pypa/pipenv/issues/5334) + +## Vendored Libraries + +- Bump version of shellingham to support nushell. [#5336](https://github.com/pypa/pipenv/issues/5336) +- Bump plette to version v0.3.0 [#5337](https://github.com/pypa/pipenv/issues/5337) +- Bump version of pipdeptree [#5343](https://github.com/pypa/pipenv/issues/5343) + +## Removals and Deprecations + +- Add deprecation warning to the --three flag. Pipenv now uses python3 by default. [#5328](https://github.com/pypa/pipenv/issues/5328) + +## Relates to dev process changes + +- Convert the test runner to use `pypiserver` as a standalone process for all tests that referencce internal `pypi` artifacts. + General refactoring of some test cases to create more variety in packages selected--preferring lighter weight packages--in existing test cases. + +# 2022.9.4 (2022-09-04) + +## Bug Fixes + +- Fix the issue from `2022.9.2` where tarball URL packages were being skipped on batch_install. [#5306](https://github.com/pypa/pipenv/issues/5306) + +# 2022.9.2 (2022-09-02) + +## Bug Fixes + +- Fix issue where unnamed constraints were provided but which are not allowed by `pip` resolver. [#5273](https://github.com/pypa/pipenv/issues/5273) + +# 2022.8.31 (2022-08-31) + +## Features & Improvements + +- Performance optimization to `batch_install` results in a faster and less CPU intensive `pipenv sync` or `pipenv install` experience. [#5301](https://github.com/pypa/pipenv/issues/5301) + +## Bug Fixes + +- `pipenv` now uses a `NamedTemporaryFile` for rsolver constraints and drops internal env var `PIPENV_PACKAGES`. [#4925](https://github.com/pypa/pipenv/issues/4925) + +## Removals and Deprecations + +- Remove no longer used method `which_pip`. [#5314](https://github.com/pypa/pipenv/issues/5314) +- Drop progress bar file due to recent performance optimization to combine `batch_install` requirements in at most two invocations of `pip install`. + To see progress of install pass `--verbose` flag and `pip` progress will be output in realtime. [#5315](https://github.com/pypa/pipenv/issues/5315) + +# 2022.8.30 (2022-08-30) + +## Bug Fixes + +- Fix an issue when using `pipenv install --system` on systems that having the `python` executable pointing to Python 2 and a Python 3 executable being `python3`. [#5296](https://github.com/pypa/pipenv/issues/5296) +- Sorting `constraints` before resolving, which fixes `pipenv lock` generates nondeterminism environment markers. [#5299](https://github.com/pypa/pipenv/issues/5299) +- Fix #5273, use our own method for checking if a package is a valid constraint. [#5309](https://github.com/pypa/pipenv/issues/5309) + +## Vendored Libraries + +- Vendor in `requirementslib==2.0.1` which fixes issue with local install not marked editable, and vendor in `vistir==0.6.1` which drops python2 support. + Drops `orderedmultidict` from vendoring. [#5308](https://github.com/pypa/pipenv/issues/5308) + +# 2022.8.24 (2022-08-24) + +## Bug Fixes + +- Remove eager and unnecessary importing of `setuptools` and `pkg_resources` to avoid conflict upgrading `setuptools`. + Roll back `sysconfig` patch of `pip` because it was problematic for some `--system` commands. [#5228](https://github.com/pypa/pipenv/issues/5228) + +## Vendored Libraries + +- Vendor in `requirementslib==2.0.0` and drop `pip-shims` entirely. [#5228](https://github.com/pypa/pipenv/issues/5228) +- Vendor in `pythonfinder==1.3.1` [#5292](https://github.com/pypa/pipenv/issues/5292) + +# 2022.8.19 (2022-08-19) + +## Bug Fixes + +- Fix issue where resolver is provided with `install_requires` constraints from `setup.py` that depend on editable dependencies and could not resolve them. [#5271](https://github.com/pypa/pipenv/issues/5271) +- Fix for `pipenv lock` fails for packages with extras as of `2022.8.13`. [#5274](https://github.com/pypa/pipenv/issues/5274) +- Revert the exclusion of `BAD_PACKAGES` from `batch_install` in order for `pipenv` to install specific versions of `setuptools`. + To prevent issue upgrading `setuptools` this patches `_USE_SYSCONFIG_DEFAULT` to use `sysconfig` for `3.7` and above whereas `pip` default behavior was `3.10` and above. [#5275](https://github.com/pypa/pipenv/issues/5275) + +# 2022.8.17 (2022-08-17) + +## Bug Fixes + +- Fix "The Python interpreter can't be found" error when running `pipenv install --system` with a python3 but no python. [#5261](https://github.com/pypa/pipenv/issues/5261) +- Revise pip import patch to include only `pipenv` from site-packages and removed `--ignore-installed` argument from pip install in order to fix regressions with `--use-site-packages`. [#5265](https://github.com/pypa/pipenv/issues/5265) + +# 2022.8.15 (2022-08-15) + +## Bug Fixes + +- `pip_install` method was using a different way of finding the python executable than other `pipenv` commands, which caused an issue with skipping package installation if it was already installed in site-packages. [#5254](https://github.com/pypa/pipenv/issues/5254) + +# 2022.8.14 (2022-08-14) + +## Bug Fixes + +- Removed `packaging` library from `BAD_PACKAGES` constant to allow it to be installed, which fixes regression from `pipenv==2022.8.13`. [#5247](https://github.com/pypa/pipenv/issues/5247) + +# 2022.8.13 (2022-08-13) + +## Bug Fixes + +- If environment variable `CI` or `TF_BUILD` is set but does not evaluate to `False` it is now treated as `True`. [#5128](https://github.com/pypa/pipenv/issues/5128) +- Fix auto-complete crashing on 'install' and 'uninstall' keywords [#5214](https://github.com/pypa/pipenv/issues/5214) +- Address remaining `pipenv` commands that were still referencing the user or system installed `pip` to use the vendored `pip` internal to `pipenv`. [#5229](https://github.com/pypa/pipenv/issues/5229) +- Use `packages` as constraints when locking `dev-packages` in Pipfile. + Use `packages` as constraints when installing new `dev-packages`. [#5234](https://github.com/pypa/pipenv/issues/5234) + +## Vendored Libraries + +- Vendor in minor `pip` update `22.2.2` [#5230](https://github.com/pypa/pipenv/issues/5230) + +## Improved Documentation + +- Add documentation for environment variables the configure pipenv. [#5235](https://github.com/pypa/pipenv/issues/5235) + +## Removals and Deprecations + +- The deprecated way of generating requirements `install -r` or `lock -r` has been removed in favor of the `pipenv requirements` command. [#5200](https://github.com/pypa/pipenv/issues/5200) + +# 2022.8.5 (2022-08-05) + +## Features & Improvements + +- support PIPENV_CUSTOM_VENV_NAME to be the venv name if specified, update relevant docs. [#4974](https://github.com/pypa/pipenv/issues/4974) + +## Bug Fixes + +- Remove usages of `pip_shims` from the non vendored `pipenv` code, but retain initialization for `requirementslib` still has usages. [#5204](https://github.com/pypa/pipenv/issues/5204) +- Fix case sensitivity of color name `red` in exception when getting hashes from pypi in `_get_hashes_from_pypi`. [#5206](https://github.com/pypa/pipenv/issues/5206) +- Write output from `subprocess_run` directly to `stdout` instead of creating temporary file. + Remove deprecated `distutils.sysconfig`, use `sysconfig`. [#5210](https://github.com/pypa/pipenv/issues/5210) + +## Vendored Libraries + +- - Rename patched `notpip` to `pip` in order to be clear that its a patched version of pip. + - Remove the part of \_post_pip_import.patch that overrode the standalone pip to be the user installed pip, now we fully rely on our vendored and patched `pip`, even for all types of installs. + - Vendor in the next newest version of `pip==22.2` + - Modify patch for `pipdeptree` to not use `pip-shims` [#5188](https://github.com/pypa/pipenv/issues/5188) + - Remove vendored `urllib3` in favor of using it from vendored version in `pip._vendor` [#5215](https://github.com/pypa/pipenv/issues/5215) + +## Removals and Deprecations + +- Remove tests that have been for a while been marked skipped and are no longer relevant. [#5165](https://github.com/pypa/pipenv/issues/5165) + +# 2022.7.24 (2022-07-24) + +## Bug Fixes + +- Re-enabled three installs tests again on the Windows CI as recent refactor work has fixed them. [#5064](https://github.com/pypa/pipenv/issues/5064) +- Support ANSI `NO_COLOR` environment variable and deprecate `PIPENV_COLORBLIND` variable, which will be removed after this release. [#5158](https://github.com/pypa/pipenv/issues/5158) +- Fixed edge case where a non-editable file, url or vcs would overwrite the value `no_deps` for all other requirements in the loop causing a retry condition. [#5164](https://github.com/pypa/pipenv/issues/5164) +- Vendor in latest `requirementslib` for fix to lock when using editable VCS module with specific `@` git reference. [#5179](https://github.com/pypa/pipenv/issues/5179) + +## Vendored Libraries + +- Remove crayons and replace with click.secho and click.styles per <https://github.com/pypa/pipenv/issues/3741> [#3741](https://github.com/pypa/pipenv/issues/3741) +- Vendor in latest version of `pip==22.1.2` which upgrades `pipenv` from `pip==22.0.4`. + Vendor in latest version of `requirementslib==1.6.7` which includes a fix for tracebacks on encountering Annotated variables. + Vendor in latest version of `pip-shims==0.7.3` such that imports could be rewritten to utilize `packaging` from vendor'd `pip`. + Drop the `packaging` requirement from the `vendor` directory in `pipenv`. [#5147](https://github.com/pypa/pipenv/issues/5147) +- Remove unused vendored dependency `normailze-charset`. [#5161](https://github.com/pypa/pipenv/issues/5161) +- Remove obsolete package `funcsigs`. [#5168](https://github.com/pypa/pipenv/issues/5168) +- Bump vendored dependency `pyparsing==3.0.9`. [#5170](https://github.com/pypa/pipenv/issues/5170) + +# 2022.7.4 (2022-07-04) + +## Behavior Changes + +- Adjust `pipenv requirements` to add markers and add an `--exclude-markers` option to allow the exclusion of markers. [#5092](https://github.com/pypa/pipenv/issues/5092) + +## Bug Fixes + +- Stopped expanding environment variables when using `pipenv requirements` [#5134](https://github.com/pypa/pipenv/issues/5134) + +## Vendored Libraries + +- Depend on `requests` and `certifi` from vendored `pip` and remove them as explicit vendor dependencies. [#5000](https://github.com/pypa/pipenv/issues/5000) +- Vendor in the latest version of `requirementslib==1.6.5` which includes bug fixes for beta python versions, projects with an at sign (@) in the path, and a `setuptools` deprecation warning. [#5132](https://github.com/pypa/pipenv/issues/5132) + +## Relates to dev process changes + +- Switch from using type comments to type annotations. + +# 2022.5.3.dev0 (2022-06-07) + +## Bug Fixes + +- Adjust pipenv to work with the newly added `venv` install scheme in Python. + First check if `venv` is among the available install schemes, and use it if it is. Otherwise fall back to the `nt` or `posix_prefix` install schemes as before. This should produce no change for environments where the install schemes were not redefined. [#5096](https://github.com/pypa/pipenv/issues/5096) + +# 2022.5.2 (2022-05-02) + +## Bug Fixes + +- Fixes issue of `pipenv lock -r` command printing to stdout instead of stderr. [#5091](https://github.com/pypa/pipenv/issues/5091) + +# 2022.4.30 (2022-04-30) + +## Bug Fixes + +- Fixes issue of `requirements` command problem by modifying to print `-e` and path of the editable package. [#5070](https://github.com/pypa/pipenv/issues/5070) +- Revert specifier of `setuptools` requirement in `setup.py` back to what it was in order to fix `FileNotFoundError: [Errno 2]` issue report. [#5075](https://github.com/pypa/pipenv/issues/5075) +- Fixes issue of requirements command where git requirements cause the command to fail, solved by using existing convert_deps_to_pip function. [#5076](https://github.com/pypa/pipenv/issues/5076) + +## Vendored Libraries + +- Vendor in `requirementslib==1.6.4` to Fix `SetuptoolsDeprecationWarning` `setuptools.config.read_configuration` became deprecated. [#5081](https://github.com/pypa/pipenv/issues/5081) + +## Removals and Deprecations + +- Remove more usage of misc functions of vistir. Many of this function are available in the STL or in another dependency of pipenv. [#5078](https://github.com/pypa/pipenv/issues/5078) + +# 2022.4.21 (2022-04-21) + +## Removals and Deprecations + +- Updated setup.py to remove support for python 3.6 from built `pipenv` packages' Metadata. [#5065](https://github.com/pypa/pipenv/issues/5065) + +# 2022.4.20 (2022-04-20) + +## Features & Improvements + +- Added new Pipenv option `install_search_all_sources` that allows installation of packages from an + existing `Pipfile.lock` to search all defined indexes for the constrained package version and hash signatures. [#5041](https://github.com/pypa/pipenv/issues/5041) + +## Bug Fixes + +- allow the user to disable the `no_input` flag, so the use of e.g Google Artifact Registry is possible. [#4706](https://github.com/pypa/pipenv/issues/4706) +- Fixes case where packages could fail to install and the exit code was successful. [#5031](https://github.com/pypa/pipenv/issues/5031) + +## Vendored Libraries + +- Updated vendor version of `pip` from `21.2.2` to `22.0.4` which fixes a number of bugs including + several reports of pipenv locking for an infinite amount of time when using certain package constraints. + This also drops support for python 3.6 as it is EOL and support was removed in pip 22.x [#4995](https://github.com/pypa/pipenv/issues/4995) + +## Removals and Deprecations + +- Removed the vendor dependency `more-itertools` as it was originally added for `zipp`, which since stopped using it. [#5044](https://github.com/pypa/pipenv/issues/5044) +- Removed all usages of `pipenv.vendor.vistir.compat.fs_str`, since this function was used for PY2-PY3 compatibility and is no longer needed. [#5062](https://github.com/pypa/pipenv/issues/5062) + +## Relates to dev process changes + +- Added pytest-cov and basic configuration to the project for generating html testing coverage reports. +- Make all CI jobs run only after the lint stage. Also added a makefile target for vendoring the packages. + +# 2022.4.8 (2022-04-08) + +## Features & Improvements + +- Implements a `pipenv requirements` command which generates a requirements.txt compatible output without locking. [#4959](https://github.com/pypa/pipenv/issues/4959) +- Internal to pipenv, the utils.py was split into a utils module with unused code removed. [#4992](https://github.com/pypa/pipenv/issues/4992) + +## Bug Fixes + +- Pipenv will now ignore `.venv` in the project when `PIPENV_VENV_IN_PROJECT` variable is False. + Unset variable maintains the existing behavior of preferring to use the project's `.venv` should it exist. [#2763](https://github.com/pypa/pipenv/issues/2763) +- Fix an edge case of hash collection in index restricted packages whereby the hashes for some packages would + be missing from the `Pipfile.lock` following package index restrictions added in `pipenv==2022.3.23`. [#5023](https://github.com/pypa/pipenv/issues/5023) + +## Improved Documentation + +- Pipenv CLI documentation generation has been fixed. It had broke when `click` was vendored into the project in + `2021.11.9` because by default `sphinx-click` could no longer determine the CLI inherited from click. [#4778](https://github.com/pypa/pipenv/issues/4778) +- Improve documentation around extra indexes and index restricted packages. [#5022](https://github.com/pypa/pipenv/issues/5022) + +## Removals and Deprecations + +- Removes the optional `install` argument `--extra-index-url` as it was not compatible with index restricted packages. + Using the `--index` argument is the correct way to specify a package should be pulled from the non-default index. [#5022](https://github.com/pypa/pipenv/issues/5022) + +## Relates to dev process changes + +- Added code linting using pre-commit-hooks, black, flake8, isort, pygrep-hooks, news-fragments and check-manifest. + Very similar to pip's configuration; adds a towncrier new's type `process` for change to Development processes. + +# 2022.3.28 (2022-03-27) + +## Bug Fixes + +- Environment variables were not being loaded when the `--quiet` flag was set [#5010](https://github.com/pypa/pipenv/issues/5010) +- It would appear that `requirementslib` was not fully specifying the subdirectory to `build_pep517` and + and when a new version of `setuptools` was released, the test `test_lock_nested_vcs_direct_url` + broke indicating the Pipfile.lock no longer contained the extra dependencies that should have been resolved. + This regression affected `pipenv>=2021.11.9` but has been fixed by a patch to `requirementslib`. [#5019](https://github.com/pypa/pipenv/issues/5019) + +## Vendored Libraries + +- Vendor in pip==21.2.4 (from 21.2.2) in order to bring in requested bug fix for python3.6. Note: support for 3.6 will be dropped in a subsequent release. [#5008](https://github.com/pypa/pipenv/issues/5008) + +# 2022.3.24 (2022-03-23) + +## Features & Improvements + +- It is now possible to silence the `Loading .env environment variables` message on `pipenv run` + with the `--quiet` flag or the `PIPENV_QUIET` environment variable. [#4027](https://github.com/pypa/pipenv/issues/4027) + +## Bug Fixes + +- Fixes issue with new index safety restriction, whereby an unnamed extra sources index + caused and error to be thrown during install. [#5002](https://github.com/pypa/pipenv/issues/5002) +- The text `Loading .env environment variables...` has been switched back to stderr as to not + break requirements.txt generation. Also it only prints now when a `.env` file is actually present. [#5003](https://github.com/pypa/pipenv/issues/5003) + +# 2022.3.23 (2022-03-22) + +## Features & Improvements + +- Use environment variable `PIPENV_SKIP_LOCK` to control the behaviour of lock skipping. [#4797](https://github.com/pypa/pipenv/issues/4797) +- New CLI command `verify`, checks the Pipfile.lock is up-to-date [#4893](https://github.com/pypa/pipenv/issues/4893) + +## Behavior Changes + +- Pattern expansion for arguments was disabled on Windows. [#4935](https://github.com/pypa/pipenv/issues/4935) + +## Bug Fixes + +- Python versions on Windows can now be installed automatically through pyenv-win [#4525](https://github.com/pypa/pipenv/issues/4525) +- Patched our vendored Pip to fix: Pipenv Lock (Or Install) Does Not Respect Index Specified For A Package. [#4637](https://github.com/pypa/pipenv/issues/4637) +- If `PIP_TARGET` is set to environment variables, Refer specified directory for calculate delta, instead default directory [#4775](https://github.com/pypa/pipenv/issues/4775) +- Remove remaining mention of python2 and --two flag from codebase. [#4938](https://github.com/pypa/pipenv/issues/4938) +- Use `CI` environment value, over mere existence of name [#4944](https://github.com/pypa/pipenv/issues/4944) +- Environment variables from dot env files are now properly expanded when included in scripts. [#4975](https://github.com/pypa/pipenv/issues/4975) + +## Vendored Libraries + +- Updated vendor version of `pythonfinder` from `1.2.9` to `1.2.10` which fixes a bug with WSL + (Windows Subsystem for Linux) when a path can not be read and Permission Denied error is encountered. [#4976](https://github.com/pypa/pipenv/issues/4976) + +## Removals and Deprecations + +- Removes long broken argument `--code` from `install` and `--unused` from `check`. + Check command no longer takes in arguments to ignore. + Removed the vendored dependencies: `pipreqs` and `yarg` [#4998](https://github.com/pypa/pipenv/issues/4998) + +# 2022.1.8 (2022-01-08) + +## Bug Fixes + +- Remove the extra parentheses around the venv prompt. [#4877](https://github.com/pypa/pipenv/issues/4877) +- Fix a bug of installation fails when extra index url is given. [#4881](https://github.com/pypa/pipenv/issues/4881) +- Fix regression where lockfiles would only include the hashes for releases for the platform generating the lockfile [#4885](https://github.com/pypa/pipenv/issues/4885) +- Fix the index parsing to reject illegal requirements.txt. [#4899](https://github.com/pypa/pipenv/issues/4899) + +# 2021.11.23 (2021-11-23) + +## Bug Fixes + +- Update `charset-normalizer` from `2.0.3` to `2.0.7`, this fixes an import error on Python 3.6. [#4865](https://github.com/pypa/pipenv/issues/4865) +- Fix a bug of deleting a virtualenv that is not managed by Pipenv. [#4867](https://github.com/pypa/pipenv/issues/4867) +- Fix a bug that source is not added to `Pipfile` when index url is given with `pipenv install`. [#4873](https://github.com/pypa/pipenv/issues/4873) + +# 2021.11.15 (2021-11-15) + +## Bug Fixes + +- Return an empty dict when `PIPENV_DONT_LOAD_ENV` is set. [#4851](https://github.com/pypa/pipenv/issues/4851) +- Don't use `sys.executable` when inside an activated venv. [#4852](https://github.com/pypa/pipenv/issues/4852) + +## Vendored Libraries + +- Drop the vendored `jinja2` dependency as it is not needed any more. [#4858](https://github.com/pypa/pipenv/issues/4858) +- Update `click` from `8.0.1` to `8.0.3`, to fix a problem with bash completion. [#4860](https://github.com/pypa/pipenv/issues/4860) +- Drop unused vendor `chardet`. [#4862](https://github.com/pypa/pipenv/issues/4862) + +## Improved Documentation + +- Fix the documentation to reflect the fact that special characters must be percent-encoded in the URL. [#4856](https://github.com/pypa/pipenv/issues/4856) + +# 2021.11.9 (2021-11-09) + +## Features & Improvements + +- Replace `click-completion` with `click`'s own completion implementation. [#4786](https://github.com/pypa/pipenv/issues/4786) + +## Bug Fixes + +- Fix a bug that `pipenv run` doesn't set environment variables correctly. [#4831](https://github.com/pypa/pipenv/issues/4831) +- Fix a bug that certifi can't be loaded within `notpip`'s vendor library. This makes several objects of `pip` fail to be imported. [#4833](https://github.com/pypa/pipenv/issues/4833) +- Fix a bug that `3.10.0` can be found be python finder. [#4837](https://github.com/pypa/pipenv/issues/4837) + +## Vendored Libraries + +- Update `pythonfinder` from `1.2.8` to `1.2.9`. [#4837](https://github.com/pypa/pipenv/issues/4837) + +# 2021.11.5.post0 (2021-11-05) + +## Bug Fixes + +- Fix a regression that `pipenv shell` fails to start a subshell. [#4828](https://github.com/pypa/pipenv/issues/4828) +- Fix a regression that `pip_shims` object isn't imported correctly. [#4829](https://github.com/pypa/pipenv/issues/4829) + +# 2021.11.5 (2021-11-05) + +## Features & Improvements + +- Avoid sharing states but create project objects on demand. So that most integration test cases are able to switch to a in-process execution method. [#4757](https://github.com/pypa/pipenv/issues/4757) +- Shell-quote `pip` commands when logging. [#4760](https://github.com/pypa/pipenv/issues/4760) + +## Bug Fixes + +- Ignore empty .venv in rood dir and create project name base virtual environment [#4790](https://github.com/pypa/pipenv/issues/4790) + +## Vendored Libraries + +- Update vendored dependencies + \- `attrs` from `20.3.0` to `21.2.0` + \- `cerberus` from `1.3.2` to `1.3.4` + \- `certifi` from `2020.11.8` to `2021.5.30` + \- `chardet` from `3.0.4` to `4.0.0` + \- `click` from `7.1.2` to `8.0.1` + \- `distlib` from `0.3.1` to `0.3.2` + \- `idna` from `2.10` to `3.2` + \- `importlib-metadata` from `2.0.0` to `4.6.1` + \- `importlib-resources` from `3.3.0` to `5.2.0` + \- `jinja2` from `2.11.2` to `3.0.1` + \- `markupsafe` from `1.1.1` to `2.0.1` + \- `more-itertools` from `5.0.0` to `8.8.0` + \- `packaging` from `20.8` to `21.0` + \- `pep517` from `0.9.1` to `0.11.0` + \- `pipdeptree` from `1.0.0` to `2.0.0` + \- `ptyprocess` from `0.6.0` to `0.7.0` + \- `python-dateutil` from `2.8.1` to `2.8.2` + \- `python-dotenv` from `0.15.0` to `0.19.0` + \- `pythonfinder` from `1.2.5` to `1.2.8` + \- `requests` from `2.25.0` to `2.26.0` + \- `shellingham` from `1.3.2` to `1.4.0` + \- `six` from `1.15.0` to `1.16.0` + \- `tomlkit` from `0.7.0` to `0.7.2` + \- `urllib3` from `1.26.1` to `1.26.6` + \- `zipp` from `1.2.0` to `3.5.0` + + Add new vendored dependencies + \- `charset-normalizer 2.0.3` + \- `termcolor 1.1.0` + \- `tomli 1.1.0` + \- `wheel 0.36.2` [#4747](https://github.com/pypa/pipenv/issues/4747) + +- Drop the dependencies for Python 2.7 compatibility purpose. [#4751](https://github.com/pypa/pipenv/issues/4751) + +- Switch the dependency resolver from `pip-tools` to `pip`. + + Update vendor libraries: + \- Update `requirementslib` from `1.5.16` to `1.6.1` + \- Update `pip-shims` from `0.5.6` to `0.6.0` + \- New vendor `platformdirs 2.4.0` [#4759](https://github.com/pypa/pipenv/issues/4759) + +## Improved Documentation + +- remove prefixes on install commands for easy copy/pasting [#4792](https://github.com/pypa/pipenv/issues/4792) +- Officially drop support for Python 2.7 and Python 3.5. [#4261](https://github.com/pypa/pipenv/issues/4261) + +# 2021.5.29 (2021-05-29) + +## Bug Fixes + +- Fix a bug where passing --skip-lock when PIPFILE has no \[SOURCE\] section throws the error: "tomlkit.exceptions.NonExistentKey: 'Key "source" does not exist.'" [#4141](https://github.com/pypa/pipenv/issues/4141) +- Fix bug where environment wouldn't activate in paths containing & and \$ symbols [#4538](https://github.com/pypa/pipenv/issues/4538) +- Fix a bug that `importlib-metadata` from the project's dependencies conflicts with that from `pipenv`'s. [#4549](https://github.com/pypa/pipenv/issues/4549) +- Fix a bug where `pep508checker.py` did not expect double-digit Python minor versions (e.g. "3.10"). [#4602](https://github.com/pypa/pipenv/issues/4602) +- Fix bug where environment wouldn't activate in paths containing () and \[\] symbols [#4615](https://github.com/pypa/pipenv/issues/4615) +- Fix bug preventing use of pipenv lock --pre [#4642](https://github.com/pypa/pipenv/issues/4642) + +## Vendored Libraries + +- Update `packaging` from `20.4` to `20.8`. [#4591](https://github.com/pypa/pipenv/issues/4591) + +# 2020.11.15 (2020-11-15) + +## Features & Improvements + +- Support expanding environment variables in requirement URLs. [#3516](https://github.com/pypa/pipenv/issues/3516) +- Show warning message when a dependency is skipped in locking due to the mismatch of its markers. [#4346](https://github.com/pypa/pipenv/issues/4346) + +## Bug Fixes + +- Fix a bug that executable scripts with leading backslash can't be executed via `pipenv run`. [#4368](https://github.com/pypa/pipenv/issues/4368) +- Fix a bug that VCS dependencies always satisfy even if the ref has changed. [#4387](https://github.com/pypa/pipenv/issues/4387) +- Restrict the acceptable hash type to SHA256 only. [#4517](https://github.com/pypa/pipenv/issues/4517) +- Fix the output of `pipenv scripts` under Windows platform. [#4523](https://github.com/pypa/pipenv/issues/4523) +- Fix a bug that the resolver takes wrong section to validate constraints. [#4527](https://github.com/pypa/pipenv/issues/4527) + +## Vendored Libraries + +- Update vendored dependencies: + : - `colorama` from `0.4.3` to `0.4.4` + - `python-dotenv` from `0.10.3` to `0.15.0` + - `first` from `2.0.1` to `2.0.2` + - `iso8601` from `0.1.12` to `0.1.13` + - `parse` from `1.15.0` to `1.18.0` + - `pipdeptree` from `0.13.2` to `1.0.0` + - `requests` from `2.23.0` to `2.25.0` + - `idna` from `2.9` to `2.10` + - `urllib3` from `1.25.9` to `1.26.1` + - `certifi` from `2020.4.5.1` to `2020.11.8` + - `requirementslib` from `1.5.15` to `1.5.16` + - `attrs` from `19.3.0` to `20.3.0` + - `distlib` from `0.3.0` to `0.3.1` + - `packaging` from `20.3` to `20.4` + - `six` from `1.14.0` to `1.15.0` + - `semver` from `2.9.0` to `2.13.0` + - `toml` from `0.10.1` to `0.10.2` + - `cached-property` from `1.5.1` to `1.5.2` + - `yaspin` from `0.14.3` to `1.2.0` + - `resolvelib` from `0.3.0` to `0.5.2` + - `pep517` from `0.8.2` to `0.9.1` + - `zipp` from `0.6.0` to `1.2.0` + - `importlib-metadata` from `1.6.0` to `2.0.0` + - `importlib-resources` from `1.5.0` to `3.3.0` [#4533](https://github.com/pypa/pipenv/issues/4533) + +## Improved Documentation + +- Fix suggested pyenv setup to avoid using shimmed interpreter [#4534](https://github.com/pypa/pipenv/issues/4534) + +# 2020.11.4 (2020-11-04) + +## Features & Improvements + +- Add a new command `pipenv scripts` to display shortcuts from Pipfile. [#3686](https://github.com/pypa/pipenv/issues/3686) +- Retrieve package file hash from URL to accelerate the locking process. [#3827](https://github.com/pypa/pipenv/issues/3827) +- Add the missing `--system` option to `pipenv sync`. [#4441](https://github.com/pypa/pipenv/issues/4441) +- Add a new option pair `--header/--no-header` to `pipenv lock` command, + which adds a header to the generated requirements.txt [#4443](https://github.com/pypa/pipenv/issues/4443) + +## Bug Fixes + +- Fix a bug that percent encoded characters will be unquoted incorrectly in the file URL. [#4089](https://github.com/pypa/pipenv/issues/4089) +- Fix a bug where setting PIPENV_PYTHON to file path breaks environment name [#4225](https://github.com/pypa/pipenv/issues/4225) +- Fix a bug that paths are not normalized before comparison. [#4330](https://github.com/pypa/pipenv/issues/4330) +- Handle Python major and minor versions correctly in Pipfile creation. [#4379](https://github.com/pypa/pipenv/issues/4379) +- Fix a bug that non-wheel file requirements can be resolved successfully. [#4386](https://github.com/pypa/pipenv/issues/4386) +- Fix a bug that `pexept.exceptions.TIMEOUT` is not caught correctly because of the wrong import path. [#4424](https://github.com/pypa/pipenv/issues/4424) +- Fix a bug that compound TOML table is not parsed correctly. [#4433](https://github.com/pypa/pipenv/issues/4433) +- Fix a bug that invalid Python paths from Windows registry break `pipenv install`. [#4436](https://github.com/pypa/pipenv/issues/4436) +- Fix a bug that function calls in `setup.py` can't be parsed rightly. [#4446](https://github.com/pypa/pipenv/issues/4446) +- Fix a bug that dist-info inside `venv` directory will be mistaken as the editable package's metadata. [#4480](https://github.com/pypa/pipenv/issues/4480) +- Make the order of hashes in resolution result stable. [#4513](https://github.com/pypa/pipenv/issues/4513) + +## Vendored Libraries + +- Update `tomlkit` from `0.5.11` to `0.7.0`. [#4433](https://github.com/pypa/pipenv/issues/4433) +- Update `requirementslib` from `1.5.13` to `1.5.14`. [#4480](https://github.com/pypa/pipenv/issues/4480) + +## Improved Documentation + +- Discourage homebrew installation in installation guides. [#4013](https://github.com/pypa/pipenv/issues/4013) + +# 2020.8.13 (2020-08-13) + +## Bug Fixes + +- Fixed behaviour of `pipenv uninstall --all-dev`. + From now on it does not uninstall regular packages. [#3722](https://github.com/pypa/pipenv/issues/3722) +- Fix a bug that incorrect Python path will be used when `--system` flag is on. [#4315](https://github.com/pypa/pipenv/issues/4315) +- Fix falsely flagging a Homebrew installed Python as a virtual environment [#4316](https://github.com/pypa/pipenv/issues/4316) +- Fix a bug that `pipenv uninstall` throws an exception that does not exist. [#4321](https://github.com/pypa/pipenv/issues/4321) +- Fix a bug that Pipenv can't locate the correct file of special directives in `setup.cfg` of an editable package. [#4335](https://github.com/pypa/pipenv/issues/4335) +- Fix a bug that `setup.py` can't be parsed correctly when the assignment is type-annotated. [#4342](https://github.com/pypa/pipenv/issues/4342) +- Fix a bug that `pipenv graph` throws an exception that PipenvCmdError(cmd_string, c.out, c.err, return_code). [#4388](https://github.com/pypa/pipenv/issues/4388) +- Do not copy the whole directory tree of local file package. [#4403](https://github.com/pypa/pipenv/issues/4403) +- Correctly detect whether Pipenv in run under an activated virtualenv. [#4412](https://github.com/pypa/pipenv/issues/4412) + +## Vendored Libraries + +- Update `requirementslib` to `1.5.12`. [#4385](https://github.com/pypa/pipenv/issues/4385) +- - Update `requirements` to `1.5.13`. + - Update `pip-shims` to `0.5.3`. [#4421](https://github.com/pypa/pipenv/issues/4421) + +# 2020.6.2 (2020-06-02) + +## Features & Improvements + +- Pipenv will now detect existing `venv` and `virtualenv` based virtual environments more robustly. [#4276](https://github.com/pypa/pipenv/issues/4276) + +## Bug Fixes + +- `+` signs in URL authentication fragments will no longer be incorrectly replaced with space ( \`\` \`\` ) characters. [#4271](https://github.com/pypa/pipenv/issues/4271) +- Fixed a regression which caused Pipenv to fail when running under `/`. [#4273](https://github.com/pypa/pipenv/issues/4273) +- `setup.py` files with `version` variables read from `os.environ` are now able to be parsed successfully. [#4274](https://github.com/pypa/pipenv/issues/4274) +- Fixed a bug which caused Pipenv to fail to install packages in a virtual environment if those packages were already present in the system global environment. [#4276](https://github.com/pypa/pipenv/issues/4276) +- Fix a bug that caused non-specific versions to be pinned in `Pipfile.lock`. [#4278](https://github.com/pypa/pipenv/issues/4278) +- Corrected a missing exception import and invalid function call invocations in `pipenv.cli.command`. [#4286](https://github.com/pypa/pipenv/issues/4286) +- Fixed an issue with resolving packages with names defined by function calls in `setup.py`. [#4292](https://github.com/pypa/pipenv/issues/4292) +- Fixed a regression with installing the current directory, or `.`, inside a `venv` based virtual environment. [#4295](https://github.com/pypa/pipenv/issues/4295) +- Fixed a bug with the discovery of python paths on Windows which could prevent installation of environments during `pipenv install`. [#4296](https://github.com/pypa/pipenv/issues/4296) +- Fixed an issue in the `requirementslib` AST parser which prevented parsing of `setup.py` files for dependency metadata. [#4298](https://github.com/pypa/pipenv/issues/4298) +- Fix a bug where Pipenv doesn't realize the session is interactive [#4305](https://github.com/pypa/pipenv/issues/4305) + +## Vendored Libraries + +- Updated requirementslib to version `1.5.11`. [#4292](https://github.com/pypa/pipenv/issues/4292) +- Updated vendored dependencies: + : - **pythonfinder**: `1.2.2` => `1.2.4` + - **requirementslib**: `1.5.9` => `1.5.10` [#4302](https://github.com/pypa/pipenv/issues/4302) + +# 2020.5.28 (2020-05-28) + +## Features & Improvements + +- `pipenv install` and `pipenv sync` will no longer attempt to install satisfied dependencies during installation. [#3057](https://github.com/pypa/pipenv/issues/3057), + [#3506](https://github.com/pypa/pipenv/issues/3506) + +- Added support for resolution of direct-url dependencies in `setup.py` files to respect `PEP-508` style URL dependencies. [#3148](https://github.com/pypa/pipenv/issues/3148) + +- Added full support for resolution of all dependency types including direct URLs, zip archives, tarballs, etc. + + - Improved error handling and formatting. + - Introduced improved cross platform stream wrappers for better `stdout` and `stderr` consistency. [#3298](https://github.com/pypa/pipenv/issues/3298) + +- For consistency with other commands and the `--dev` option + description, `pipenv lock --requirements --dev` now emits + both default and development dependencies. + The new `--dev-only` option requests the previous + behaviour (e.g. to generate a `dev-requirements.txt` file). [#3316](https://github.com/pypa/pipenv/issues/3316) + +- Pipenv will now successfully recursively lock VCS sub-dependencies. [#3328](https://github.com/pypa/pipenv/issues/3328) + +- Added support for `--verbose` output to `pipenv run`. [#3348](https://github.com/pypa/pipenv/issues/3348) + +- Pipenv will now discover and resolve the intrinsic dependencies of **all** VCS dependencies, whether they are editable or not, to prevent resolution conflicts. [#3368](https://github.com/pypa/pipenv/issues/3368) + +- Added a new environment variable, `PIPENV_RESOLVE_VCS`, to toggle dependency resolution off for non-editable VCS, file, and URL based dependencies. [#3577](https://github.com/pypa/pipenv/issues/3577) + +- Added the ability for Windows users to enable emojis by setting `PIPENV_HIDE_EMOJIS=0`. [#3595](https://github.com/pypa/pipenv/issues/3595) + +- Allow overriding PIPENV_INSTALL_TIMEOUT environment variable (in seconds). [#3652](https://github.com/pypa/pipenv/issues/3652) + +- Allow overriding PIP_EXISTS_ACTION environment variable (value is passed to pip install). + Possible values here: <https://pip.pypa.io/en/stable/reference/pip/#exists-action-option> + Useful when you need to `PIP_EXISTS_ACTION=i` (ignore existing packages) - great for CI environments, where you need really fast setup. [#3738](https://github.com/pypa/pipenv/issues/3738) + +- Pipenv will no longer forcibly override `PIP_NO_DEPS` on all vcs and file dependencies as resolution happens on these in a pre-lock step. [#3763](https://github.com/pypa/pipenv/issues/3763) + +- Improved verbose logging output during `pipenv lock` will now stream output to the console while maintaining a spinner. [#3810](https://github.com/pypa/pipenv/issues/3810) + +- Added support for automatic python installs via `asdf` and associated `PIPENV_DONT_USE_ASDF` environment variable. [#4018](https://github.com/pypa/pipenv/issues/4018) + +- Pyenv/asdf can now be used whether or not they are available on PATH. Setting PYENV_ROOT/ASDF_DIR in a Pipenv's .env allows Pipenv to install an interpreter without any shell customizations, so long as pyenv/asdf is installed. [#4245](https://github.com/pypa/pipenv/issues/4245) + +- Added `--key` command line parameter for including personal PyUp.io API tokens when running `pipenv check`. [#4257](https://github.com/pypa/pipenv/issues/4257) + +## Behavior Changes + +- Make conservative checks of known exceptions when subprocess returns output, so user won't see the whole traceback - just the error. [#2553](https://github.com/pypa/pipenv/issues/2553) +- Do not touch Pipfile early and rely on it so that one can do `pipenv sync` without a Pipfile. [#3386](https://github.com/pypa/pipenv/issues/3386) +- Re-enable `--help` option for `pipenv run` command. [#3844](https://github.com/pypa/pipenv/issues/3844) +- Make sure `pipenv lock -r --pypi-mirror {MIRROR_URL}` will respect the pypi-mirror in requirements output. [#4199](https://github.com/pypa/pipenv/issues/4199) + +## Bug Fixes + +- Raise `PipenvUsageError` when \[\[source\]\] does not contain url field. [#2373](https://github.com/pypa/pipenv/issues/2373) + +- Fixed a bug which caused editable package resolution to sometimes fail with an unhelpful setuptools-related error message. [#2722](https://github.com/pypa/pipenv/issues/2722) + +- Fixed an issue which caused errors due to reliance on the system utilities `which` and `where` which may not always exist on some systems. + \- Fixed a bug which caused periodic failures in python discovery when executables named `python` were not present on the target `$PATH`. [#2783](https://github.com/pypa/pipenv/issues/2783) + +- Dependency resolution now writes hashes for local and remote files to the lockfile. [#3053](https://github.com/pypa/pipenv/issues/3053) + +- Fixed a bug which prevented `pipenv graph` from correctly showing all dependencies when running from within `pipenv shell`. [#3071](https://github.com/pypa/pipenv/issues/3071) + +- Fixed resolution of direct-url dependencies in `setup.py` files to respect `PEP-508` style URL dependencies. [#3148](https://github.com/pypa/pipenv/issues/3148) + +- Fixed a bug which caused failures in warning reporting when running pipenv inside a virtualenv under some circumstances. + + - Fixed a bug with package discovery when running `pipenv clean`. [#3298](https://github.com/pypa/pipenv/issues/3298) + +- Quote command arguments with carets (`^`) on Windows to work around unintended shell escapes. [#3307](https://github.com/pypa/pipenv/issues/3307) + +- Handle alternate names for UTF-8 encoding. [#3313](https://github.com/pypa/pipenv/issues/3313) + +- Abort pipenv before adding the non-exist package to Pipfile. [#3318](https://github.com/pypa/pipenv/issues/3318) + +- Don't normalize the package name user passes in. [#3324](https://github.com/pypa/pipenv/issues/3324) + +- Fix a bug where custom virtualenv can not be activated with pipenv shell [#3339](https://github.com/pypa/pipenv/issues/3339) + +- Fix a bug that `--site-packages` flag is not recognized. [#3351](https://github.com/pypa/pipenv/issues/3351) + +- Fix a bug where pipenv --clear is not working [#3353](https://github.com/pypa/pipenv/issues/3353) + +- Fix unhashable type error during `$ pipenv install --selective-upgrade` [#3384](https://github.com/pypa/pipenv/issues/3384) + +- Dependencies with direct `PEP508` compliant VCS URLs specified in their `install_requires` will now be successfully locked during the resolution process. [#3396](https://github.com/pypa/pipenv/issues/3396) + +- Fixed a keyerror which could occur when locking VCS dependencies in some cases. [#3404](https://github.com/pypa/pipenv/issues/3404) + +- Fixed a bug that `ValidationError` is thrown when some fields are missing in source section. [#3427](https://github.com/pypa/pipenv/issues/3427) + +- Updated the index names in lock file when source name in Pipfile is changed. [#3449](https://github.com/pypa/pipenv/issues/3449) + +- Fixed an issue which caused `pipenv install --help` to show duplicate entries for `--pre`. [#3479](https://github.com/pypa/pipenv/issues/3479) + +- Fix bug causing `[SSL: CERTIFICATE_VERIFY_FAILED]` when Pipfile `[[source]]` has verify_ssl=false and url with custom port. [#3502](https://github.com/pypa/pipenv/issues/3502) + +- Fix `sync --sequential` ignoring `pip install` errors and logs. [#3537](https://github.com/pypa/pipenv/issues/3537) + +- Fix the issue that lock file can't be created when `PIPENV_PIPFILE` is not under working directory. [#3584](https://github.com/pypa/pipenv/issues/3584) + +- Pipenv will no longer inadvertently set `editable=True` on all vcs dependencies. [#3647](https://github.com/pypa/pipenv/issues/3647) + +- The `--keep-outdated` argument to `pipenv install` and `pipenv lock` will now drop specifier constraints when encountering editable dependencies. + \- In addition, `--keep-outdated` will retain specifiers that would otherwise be dropped from any entries that have not been updated. [#3656](https://github.com/pypa/pipenv/issues/3656) + +- Fixed a bug which sometimes caused pipenv to fail to respect the `--site-packages` flag when passed with `pipenv install`. [#3718](https://github.com/pypa/pipenv/issues/3718) + +- Normalize the package names to lowercase when comparing used and in-Pipfile packages. [#3745](https://github.com/pypa/pipenv/issues/3745) + +- `pipenv update --outdated` will now correctly handle comparisons between pre/post-releases and normal releases. [#3766](https://github.com/pypa/pipenv/issues/3766) + +- Fixed a `KeyError` which could occur when pinning outdated VCS dependencies via `pipenv lock --keep-outdated`. [#3768](https://github.com/pypa/pipenv/issues/3768) + +- Resolved an issue which caused resolution to fail when encountering poorly formatted `python_version` markers in `setup.py` and `setup.cfg` files. [#3786](https://github.com/pypa/pipenv/issues/3786) + +- Fix a bug that installation errors are displayed as a list. [#3794](https://github.com/pypa/pipenv/issues/3794) + +- Update `pythonfinder` to fix a problem that `python.exe` will be mistakenly chosen for + virtualenv creation under WSL. [#3807](https://github.com/pypa/pipenv/issues/3807) + +- Fixed several bugs which could prevent editable VCS dependencies from being installed into target environments, even when reporting successful installation. [#3809](https://github.com/pypa/pipenv/issues/3809) + +- `pipenv check --system` should find the correct Python interpreter when `python` does not exist on the system. [#3819](https://github.com/pypa/pipenv/issues/3819) + +- Resolve the symlinks when the path is absolute. [#3842](https://github.com/pypa/pipenv/issues/3842) + +- Pass `--pre` and `--clear` options to `pipenv update --outdated`. [#3879](https://github.com/pypa/pipenv/issues/3879) + +- Fixed a bug which prevented resolution of direct URL dependencies which have PEP508 style direct url VCS sub-dependencies with subdirectories. [#3976](https://github.com/pypa/pipenv/issues/3976) + +- Honor PIPENV_SPINNER environment variable [#4045](https://github.com/pypa/pipenv/issues/4045) + +- Fixed an issue with `pipenv check` failing due to an invalid API key from `pyup.io`. [#4188](https://github.com/pypa/pipenv/issues/4188) + +- Fixed a bug which caused versions from VCS dependencies to be included in `Pipfile.lock` inadvertently. [#4217](https://github.com/pypa/pipenv/issues/4217) + +- Fixed a bug which caused pipenv to search non-existent virtual environments for `pip` when installing using `--system`. [#4220](https://github.com/pypa/pipenv/issues/4220) + +- `Requires-Python` values specifying constraint versions of python starting from `1.x` will now be parsed successfully. [#4226](https://github.com/pypa/pipenv/issues/4226) + +- Fix a bug of `pipenv update --outdated` that can't print output correctly. [#4229](https://github.com/pypa/pipenv/issues/4229) + +- Fixed a bug which caused pipenv to prefer source distributions over wheels from `PyPI` during the dependency resolution phase. + Fixed an issue which prevented proper build isolation using `pep517` based builders during dependency resolution. [#4231](https://github.com/pypa/pipenv/issues/4231) + +- Don't fallback to system Python when no matching Python version is found. [#4232](https://github.com/pypa/pipenv/issues/4232) + +## Vendored Libraries + +- Updated vendored dependencies: + + > - **attrs**: `18.2.0` => `19.1.0` + > - **certifi**: `2018.10.15` => `2019.3.9` + > - **cached_property**: `1.4.3` => `1.5.1` + > - **cerberus**: `1.2.0` => `1.3.1` + > - **click-completion**: `0.5.0` => `0.5.1` + > - **colorama**: `0.3.9` => `0.4.1` + > - **distlib**: `0.2.8` => `0.2.9` + > - **idna**: `2.7` => `2.8` + > - **jinja2**: `2.10.0` => `2.10.1` + > - **markupsafe**: `1.0` => `1.1.1` + > - **orderedmultidict**: `(new)` => `1.0` + > - **packaging**: `18.0` => `19.0` + > - **parse**: `1.9.0` => `1.12.0` + > - **pathlib2**: `2.3.2` => `2.3.3` + > - **pep517**: `(new)` => `0.5.0` + > - **pexpect**: `4.6.0` => `4.7.0` + > - **pipdeptree**: `0.13.0` => `0.13.2` + > - **pyparsing**: `2.2.2` => `2.3.1` + > - **python-dotenv**: `0.9.1` => `0.10.2` + > - **pythonfinder**: `1.1.10` => `1.2.1` + > - **pytoml**: `(new)` => `0.1.20` + > - **requests**: `2.20.1` => `2.21.0` + > - **requirementslib**: `1.3.3` => `1.5.0` + > - **scandir**: `1.9.0` => `1.10.0` + > - **shellingham**: `1.2.7` => `1.3.1` + > - **six**: `1.11.0` => `1.12.0` + > - **tomlkit**: `0.5.2` => `0.5.3` + > - **urllib3**: `1.24` => `1.25.2` + > - **vistir**: `0.3.0` => `0.4.1` + > - **yaspin**: `0.14.0` => `0.14.3` + + - Removed vendored dependency **cursor**. [#3298](https://github.com/pypa/pipenv/issues/3298) + +- Updated `pip_shims` to support `--outdated` with new pip versions. [#3766](https://github.com/pypa/pipenv/issues/3766) + +- Update vendored dependencies and invocations + + - Update vendored and patched dependencies + - Update patches on `piptools`, `pip`, `pip-shims`, `tomlkit` + - Fix invocations of dependencies + - Fix custom `InstallCommand` instantiation + - Update `PackageFinder` usage + - Fix `Bool` stringify attempts from `tomlkit` + + Updated vendored dependencies: + : - **attrs**: `` `18.2.0 `` => `` `19.1.0 `` + - **certifi**: `` `2018.10.15 `` => `` `2019.3.9 `` + - **cached_property**: `` `1.4.3 `` => `` `1.5.1 `` + - **cerberus**: `` `1.2.0 `` => `` `1.3.1 `` + - **click**: `` `7.0.0 `` => `` `7.1.1 `` + - **click-completion**: `` `0.5.0 `` => `` `0.5.1 `` + - **colorama**: `` `0.3.9 `` => `` `0.4.3 `` + - **contextlib2**: `` `(new) `` => `` `0.6.0.post1 `` + - **distlib**: `` `0.2.8 `` => `` `0.2.9 `` + - **funcsigs**: `` `(new) `` => `` `1.0.2 `` + - **importlib_metadata** `` `1.3.0 `` => `` `1.5.1 `` + - **importlib-resources**: `` `(new) `` => `` `1.4.0 `` + - **idna**: `` `2.7 `` => `` `2.9 `` + - **jinja2**: `` `2.10.0 `` => `` `2.11.1 `` + - **markupsafe**: `` `1.0 `` => `` `1.1.1 `` + - **more-itertools**: `` `(new) `` => `` `5.0.0 `` + - **orderedmultidict**: `` `(new) `` => `` `1.0 `` + - **packaging**: `` `18.0 `` => `` `19.0 `` + - **parse**: `` `1.9.0 `` => `` `1.15.0 `` + - **pathlib2**: `` `2.3.2 `` => `` `2.3.3 `` + - **pep517**: `` `(new) `` => `` `0.5.0 `` + - **pexpect**: `` `4.6.0 `` => `` `4.8.0 `` + - **pip-shims**: `` `0.2.0 `` => `` `0.5.1 `` + - **pipdeptree**: `` `0.13.0 `` => `` `0.13.2 `` + - **pyparsing**: `` `2.2.2 `` => `` `2.4.6 `` + - **python-dotenv**: `` `0.9.1 `` => `` `0.10.2 `` + - **pythonfinder**: `` `1.1.10 `` => `` `1.2.2 `` + - **pytoml**: `` `(new) `` => `` `0.1.20 `` + - **requests**: `` `2.20.1 `` => `` `2.23.0 `` + - **requirementslib**: `` `1.3.3 `` => `` `1.5.4 `` + - **scandir**: `` `1.9.0 `` => `` `1.10.0 `` + - **shellingham**: `` `1.2.7 `` => `` `1.3.2 `` + - **six**: `` `1.11.0 `` => `` `1.14.0 `` + - **tomlkit**: `` `0.5.2 `` => `` `0.5.11 `` + - **urllib3**: `` `1.24 `` => `` `1.25.8 `` + - **vistir**: `` `0.3.0 `` => `` `0.5.0 `` + - **yaspin**: `` `0.14.0 `` => `` `0.14.3 `` + - **zipp**: `` `0.6.0 `` + + - Removed vendored dependency **cursor**. [#4169](https://github.com/pypa/pipenv/issues/4169) + +- Add and update vendored dependencies to accommodate `safety` vendoring: + \- **safety** `(none)` => `1.8.7` + \- **dparse** `(none)` => `0.5.0` + \- **pyyaml** `(none)` => `5.3.1` + \- **urllib3** `1.25.8` => `1.25.9` + \- **certifi** `2019.11.28` => `2020.4.5.1` + \- **pyparsing** `2.4.6` => `2.4.7` + \- **resolvelib** `0.2.2` => `0.3.0` + \- **importlib-metadata** `1.5.1` => `1.6.0` + \- **pip-shims** `0.5.1` => `0.5.2` + \- **requirementslib** `1.5.5` => `1.5.6` [#4188](https://github.com/pypa/pipenv/issues/4188) + +- Updated vendored `pip` => `20.0.2` and `pip-tools` => `5.0.0`. [#4215](https://github.com/pypa/pipenv/issues/4215) + +- Updated vendored dependencies to latest versions for security and bug fixes: + + - **requirementslib** `1.5.8` => `1.5.9` + - **vistir** `0.5.0` => `0.5.1` + - **jinja2** `2.11.1` => `2.11.2` + - **click** `7.1.1` => `7.1.2` + - **dateutil** `(none)` => `2.8.1` + - **backports.functools_lru_cache** `1.5.0` => `1.6.1` + - **enum34** `1.1.6` => `1.1.10` + - **toml** `0.10.0` => `0.10.1` + - **importlib_resources** `1.4.0` => `1.5.0` [#4226](https://github.com/pypa/pipenv/issues/4226) + +- Changed attrs import path in vendored dependencies to always import from `pipenv.vendor`. [#4267](https://github.com/pypa/pipenv/issues/4267) + +## Improved Documentation + +- Added documentation about variable expansion in `Pipfile` entries. [#2317](https://github.com/pypa/pipenv/issues/2317) +- Consolidate all contributing docs in the rst file [#3120](https://github.com/pypa/pipenv/issues/3120) +- Update the out-dated manual page. [#3246](https://github.com/pypa/pipenv/issues/3246) +- Move CLI docs to its own page. [#3346](https://github.com/pypa/pipenv/issues/3346) +- Replace (non-existent) video on docs index.rst with equivalent gif. [#3499](https://github.com/pypa/pipenv/issues/3499) +- Clarify wording in Basic Usage example on using double quotes to escape shell redirection [#3522](https://github.com/pypa/pipenv/issues/3522) +- Ensure docs show navigation on small-screen devices [#3527](https://github.com/pypa/pipenv/issues/3527) +- Added a link to the TOML Spec under General Recommendations & Version Control to clarify how Pipfiles should be written. [#3629](https://github.com/pypa/pipenv/issues/3629) +- Updated the documentation with the new `pytest` entrypoint. [#3759](https://github.com/pypa/pipenv/issues/3759) +- Fix link to GIF in README.md demonstrating Pipenv's usage, and add descriptive alt text. [#3911](https://github.com/pypa/pipenv/issues/3911) +- Added a line describing potential issues in fancy extension. [#3912](https://github.com/pypa/pipenv/issues/3912) +- Documental description of how Pipfile works and association with Pipenv. [#3913](https://github.com/pypa/pipenv/issues/3913) +- Clarify the proper value of `python_version` and `python_full_version`. [#3914](https://github.com/pypa/pipenv/issues/3914) +- Write description for --deploy extension and few extensions differences. [#3915](https://github.com/pypa/pipenv/issues/3915) +- More documentation for `.env` files [#4100](https://github.com/pypa/pipenv/issues/4100) +- Updated documentation to point to working links. [#4137](https://github.com/pypa/pipenv/issues/4137) +- Replace docs.pipenv.org with pipenv.pypa.io [#4167](https://github.com/pypa/pipenv/issues/4167) +- Added functionality to check spelling in documentation and cleaned up existing typographical issues. [#4209](https://github.com/pypa/pipenv/issues/4209) + +# 2018.11.26 (2018-11-26) + +## Bug Fixes + +- Environment variables are expanded correctly before running scripts on POSIX. [#3178](https://github.com/pypa/pipenv/issues/3178) +- Pipenv will no longer disable user-mode installation when the `--system` flag is passed in. [#3222](https://github.com/pypa/pipenv/issues/3222) +- Fixed an issue with attempting to render unicode output in non-unicode locales. [#3223](https://github.com/pypa/pipenv/issues/3223) +- Fixed a bug which could cause failures to occur when parsing python entries from global pyenv version files. [#3224](https://github.com/pypa/pipenv/issues/3224) +- Fixed an issue which prevented the parsing of named extras sections from certain `setup.py` files. [#3230](https://github.com/pypa/pipenv/issues/3230) +- Correctly detect the virtualenv location inside an activated virtualenv. [#3231](https://github.com/pypa/pipenv/issues/3231) +- Fixed a bug which caused spinner frames to be written to standard output during locking operations which could cause redirection pipes to fail. [#3239](https://github.com/pypa/pipenv/issues/3239) +- Fixed a bug that editable packages can't be uninstalled correctly. [#3240](https://github.com/pypa/pipenv/issues/3240) +- Corrected an issue with installation timeouts which caused dependency resolution to fail for longer duration resolution steps. [#3244](https://github.com/pypa/pipenv/issues/3244) +- Adding normal pep 508 compatible markers is now fully functional when using VCS dependencies. [#3249](https://github.com/pypa/pipenv/issues/3249) +- Updated `requirementslib` and `pythonfinder` for multiple bug fixes. [#3254](https://github.com/pypa/pipenv/issues/3254) +- Pipenv will now ignore hashes when installing with `--skip-lock`. [#3255](https://github.com/pypa/pipenv/issues/3255) +- Fixed an issue where pipenv could crash when multiple pipenv processes attempted to create the same directory. [#3257](https://github.com/pypa/pipenv/issues/3257) +- Fixed an issue which sometimes prevented successful creation of a project Pipfile. [#3260](https://github.com/pypa/pipenv/issues/3260) +- `pipenv install` will now unset the `PYTHONHOME` environment variable when not combined with `--system`. [#3261](https://github.com/pypa/pipenv/issues/3261) +- Pipenv will ensure that warnings do not interfere with the resolution process by suppressing warnings' usage of standard output and writing to standard error instead. [#3273](https://github.com/pypa/pipenv/issues/3273) +- Fixed an issue which prevented variables from the environment, such as `PIPENV_DEV` or `PIPENV_SYSTEM`, from being parsed and implemented correctly. [#3278](https://github.com/pypa/pipenv/issues/3278) +- Clear pythonfinder cache after Python install. [#3287](https://github.com/pypa/pipenv/issues/3287) +- Fixed a race condition in hash resolution for dependencies for certain dependencies with missing cache entries or fresh Pipenv installs. [#3289](https://github.com/pypa/pipenv/issues/3289) +- Pipenv will now respect top-level pins over VCS dependency locks. [#3296](https://github.com/pypa/pipenv/issues/3296) + +## Vendored Libraries + +- Update vendored dependencies to resolve resolution output parsing and python finding: + : - `pythonfinder 1.1.9 -> 1.1.10` + - `requirementslib 1.3.1 -> 1.3.3` + - `vistir 0.2.3 -> 0.2.5` [#3280](https://github.com/pypa/pipenv/issues/3280) + +# 2018.11.14 (2018-11-14) + +## Features & Improvements + +- Improved exceptions and error handling on failures. [#1977](https://github.com/pypa/pipenv/issues/1977) +- Added persistent settings for all CLI flags via `PIPENV_{FLAG_NAME}` environment variables by enabling `auto_envvar_prefix=PIPENV` in click (implements PEEP-0002). [#2200](https://github.com/pypa/pipenv/issues/2200) +- Added improved messaging about available but skipped updates due to dependency conflicts when running `pipenv update --outdated`. [#2411](https://github.com/pypa/pipenv/issues/2411) +- Added environment variable `PIPENV_PYUP_API_KEY` to add ability + to override the bundled PyUP.io API key. [#2825](https://github.com/pypa/pipenv/issues/2825) +- Added additional output to `pipenv update --outdated` to indicate that the operation succeeded and all packages were already up to date. [#2828](https://github.com/pypa/pipenv/issues/2828) +- Updated `crayons` patch to enable colors on native powershell but swap native blue for magenta. [#3020](https://github.com/pypa/pipenv/issues/3020) +- Added support for `--bare` to `pipenv clean`, and fixed `pipenv sync --bare` to actually reduce output. [#3041](https://github.com/pypa/pipenv/issues/3041) +- Added windows-compatible spinner via upgraded `vistir` dependency. [#3089](https://github.com/pypa/pipenv/issues/3089) +- - Added support for python installations managed by `asdf`. [#3096](https://github.com/pypa/pipenv/issues/3096) +- Improved runtime performance of no-op commands such as `pipenv --venv` by around 2/3. [#3158](https://github.com/pypa/pipenv/issues/3158) +- Do not show error but success for running `pipenv uninstall --all` in a fresh virtual environment. [#3170](https://github.com/pypa/pipenv/issues/3170) +- Improved asynchronous installation and error handling via queued subprocess parallelization. [#3217](https://github.com/pypa/pipenv/issues/3217) + +## Bug Fixes + +- Remote non-PyPI artifacts and local wheels and artifacts will now include their own hashes rather than including hashes from `PyPI`. [#2394](https://github.com/pypa/pipenv/issues/2394) +- Non-ascii characters will now be handled correctly when parsed by pipenv's `ToML` parsers. [#2737](https://github.com/pypa/pipenv/issues/2737) +- Updated `pipenv uninstall` to respect the `--skip-lock` argument. [#2848](https://github.com/pypa/pipenv/issues/2848) +- Fixed a bug which caused uninstallation to sometimes fail to successfully remove packages from `Pipfiles` with comments on preceding or following lines. [#2885](https://github.com/pypa/pipenv/issues/2885), + [#3099](https://github.com/pypa/pipenv/issues/3099) +- Pipenv will no longer fail when encountering python versions on Windows that have been uninstalled. [#2983](https://github.com/pypa/pipenv/issues/2983) +- Fixed unnecessary extras are added when translating markers [#3026](https://github.com/pypa/pipenv/issues/3026) +- Fixed a virtualenv creation issue which could cause new virtualenvs to inadvertently attempt to read and write to global site packages. [#3047](https://github.com/pypa/pipenv/issues/3047) +- Fixed an issue with virtualenv path derivation which could cause errors, particularly for users on WSL bash. [#3055](https://github.com/pypa/pipenv/issues/3055) +- Fixed a bug which caused `Unexpected EOF` errors to be thrown when `pip` was waiting for input from users who had put login credentials in environment variables. [#3088](https://github.com/pypa/pipenv/issues/3088) +- Fixed a bug in `requirementslib` which prevented successful installation from mercurial repositories. [#3090](https://github.com/pypa/pipenv/issues/3090) +- Fixed random resource warnings when using pyenv or any other subprocess calls. [#3094](https://github.com/pypa/pipenv/issues/3094) +- - Fixed a bug which sometimes prevented cloning and parsing `mercurial` requirements. [#3096](https://github.com/pypa/pipenv/issues/3096) +- Fixed an issue in `delegator.py` related to subprocess calls when using `PopenSpawn` to stream output, which sometimes threw unexpected `EOF` errors. [#3102](https://github.com/pypa/pipenv/issues/3102), + [#3114](https://github.com/pypa/pipenv/issues/3114), + [#3117](https://github.com/pypa/pipenv/issues/3117) +- Fix the path casing issue that makes `pipenv clean` fail on Windows [#3104](https://github.com/pypa/pipenv/issues/3104) +- Pipenv will avoid leaving build artifacts in the current working directory. [#3106](https://github.com/pypa/pipenv/issues/3106) +- Fixed issues with broken subprocess calls leaking resource handles and causing random and sporadic failures. [#3109](https://github.com/pypa/pipenv/issues/3109) +- Fixed an issue which caused `pipenv clean` to sometimes clean packages from the base `site-packages` folder or fail entirely. [#3113](https://github.com/pypa/pipenv/issues/3113) +- Updated `pythonfinder` to correct an issue with unnesting of nested paths when searching for python versions. [#3121](https://github.com/pypa/pipenv/issues/3121) +- Added additional logic for ignoring and replacing non-ascii characters when formatting console output on non-UTF-8 systems. [#3131](https://github.com/pypa/pipenv/issues/3131) +- Fix virtual environment discovery when `PIPENV_VENV_IN_PROJECT` is set, but the in-project `.venv` is a file. [#3134](https://github.com/pypa/pipenv/issues/3134) +- Hashes for remote and local non-PyPI artifacts will now be included in `Pipfile.lock` during resolution. [#3145](https://github.com/pypa/pipenv/issues/3145) +- Fix project path hashing logic in purpose to prevent collisions of virtual environments. [#3151](https://github.com/pypa/pipenv/issues/3151) +- Fix package installation when the virtual environment path contains parentheses. [#3158](https://github.com/pypa/pipenv/issues/3158) +- Azure Pipelines YAML files are updated to use the latest syntax and product name. [#3164](https://github.com/pypa/pipenv/issues/3164) +- Fixed new spinner success message to write only one success message during resolution. [#3183](https://github.com/pypa/pipenv/issues/3183) +- Pipenv will now correctly respect the `--pre` option when used with `pipenv install`. [#3185](https://github.com/pypa/pipenv/issues/3185) +- Fix a bug where exception is raised when run pipenv graph in a project without created virtualenv [#3201](https://github.com/pypa/pipenv/issues/3201) +- When sources are missing names, names will now be derived from the supplied URL. [#3216](https://github.com/pypa/pipenv/issues/3216) + +## Vendored Libraries + +- Updated `pythonfinder` to correct an issue with unnesting of nested paths when searching for python versions. [#3061](https://github.com/pypa/pipenv/issues/3061), + [#3121](https://github.com/pypa/pipenv/issues/3121) +- Updated vendored dependencies: + : - `certifi 2018.08.24 => 2018.10.15` + - `urllib3 1.23 => 1.24` + - `requests 2.19.1 => 2.20.0` + - ``` shellingham ``1.2.6 => 1.2.7 ``` + - `tomlkit 0.4.4. => 0.4.6` + - `vistir 0.1.6 => 0.1.8` + - `pythonfinder 0.1.2 => 0.1.3` + - `requirementslib 1.1.9 => 1.1.10` + - `backports.functools_lru_cache 1.5.0 (new)` + - `cursor 1.2.0 (new)` [#3089](https://github.com/pypa/pipenv/issues/3089) +- Updated vendored dependencies: + : - `requests 2.19.1 => 2.20.1` + - `tomlkit 0.4.46 => 0.5.2` + - `vistir 0.1.6 => 0.2.4` + - `pythonfinder 1.1.2 => 1.1.8` + - `requirementslib 1.1.10 => 1.3.0` [#3096](https://github.com/pypa/pipenv/issues/3096) +- Switch to `tomlkit` for parsing and writing. Drop `prettytoml` and `contoml` from vendors. [#3191](https://github.com/pypa/pipenv/issues/3191) +- Updated `requirementslib` to aid in resolution of local and remote archives. [#3196](https://github.com/pypa/pipenv/issues/3196) + +## Improved Documentation + +- Expanded development and testing documentation for contributors to get started. [#3074](https://github.com/pypa/pipenv/issues/3074) + +# 2018.10.13 (2018-10-13) + +## Bug Fixes + +- Fixed a bug in `pipenv clean` which caused global packages to sometimes be inadvertently targeted for cleanup. [#2849](https://github.com/pypa/pipenv/issues/2849) +- Fix broken backport imports for vendored vistir. [#2950](https://github.com/pypa/pipenv/issues/2950), + [#2955](https://github.com/pypa/pipenv/issues/2955), + [#2961](https://github.com/pypa/pipenv/issues/2961) +- Fixed a bug with importing local vendored dependencies when running `pipenv graph`. [#2952](https://github.com/pypa/pipenv/issues/2952) +- Fixed a bug which caused executable discovery to fail when running inside a virtualenv. [#2957](https://github.com/pypa/pipenv/issues/2957) +- Fix parsing of outline tables. [#2971](https://github.com/pypa/pipenv/issues/2971) +- Fixed a bug which caused `verify_ssl` to fail to drop through to `pip install` correctly as `trusted-host`. [#2979](https://github.com/pypa/pipenv/issues/2979) +- Fixed a bug which caused canonicalized package names to fail to resolve against PyPI. [#2989](https://github.com/pypa/pipenv/issues/2989) +- Enhanced CI detection to detect Azure Devops builds. [#2993](https://github.com/pypa/pipenv/issues/2993) +- Fixed a bug which prevented installing pinned versions which used redirection symbols from the command line. [#2998](https://github.com/pypa/pipenv/issues/2998) +- Fixed a bug which prevented installing the local directory in non-editable mode. [#3005](https://github.com/pypa/pipenv/issues/3005) + +## Vendored Libraries + +- Updated `requirementslib` to version `1.1.9`. [#2989](https://github.com/pypa/pipenv/issues/2989) +- Upgraded `pythonfinder => 1.1.1` and `vistir => 0.1.7`. [#3007](https://github.com/pypa/pipenv/issues/3007) + +# 2018.10.9 (2018-10-09) + +## Features & Improvements + +- Added environment variables `PIPENV_VERBOSE` and `PIPENV_QUIET` to control + output verbosity without needing to pass options. [#2527](https://github.com/pypa/pipenv/issues/2527) + +- Updated test-PyPI add-on to better support json-API access (forward compatibility). + Improved testing process for new contributors. [#2568](https://github.com/pypa/pipenv/issues/2568) + +- Greatly enhanced python discovery functionality: + + - Added pep514 (windows launcher/finder) support for python discovery. + - Introduced architecture discovery for python installations which support different architectures. [#2582](https://github.com/pypa/pipenv/issues/2582) + +- Added support for `pipenv shell` on msys and cygwin/mingw/git bash for Windows. [#2641](https://github.com/pypa/pipenv/issues/2641) + +- Enhanced resolution of editable and VCS dependencies. [#2643](https://github.com/pypa/pipenv/issues/2643) + +- Deduplicate and refactor CLI to use stateful arguments and object passing. See [this issue](https://github.com/pallets/click/issues/108) for reference. [#2814](https://github.com/pypa/pipenv/issues/2814) + +## Behavior Changes + +- Virtual environment activation for `run` is revised to improve interpolation + with other Python discovery tools. [#2503](https://github.com/pypa/pipenv/issues/2503) +- Improve terminal coloring to display better in Powershell. [#2511](https://github.com/pypa/pipenv/issues/2511) +- Invoke `virtualenv` directly for virtual environment creation, instead of depending on `pew`. [#2518](https://github.com/pypa/pipenv/issues/2518) +- `pipenv --help` will now include short help descriptions. [#2542](https://github.com/pypa/pipenv/issues/2542) +- Add `COMSPEC` to fallback option (along with `SHELL` and `PYENV_SHELL`) + if shell detection fails, improving robustness on Windows. [#2651](https://github.com/pypa/pipenv/issues/2651) +- Fallback to shell mode if `run` fails with Windows error 193 to handle non-executable commands. This should improve usability on Windows, where some users run non-executable files without specifying a command, relying on Windows file association to choose the current command. [#2718](https://github.com/pypa/pipenv/issues/2718) + +## Bug Fixes + +- Fixed a bug which prevented installation of editable requirements using `ssh://` style URLs [#1393](https://github.com/pypa/pipenv/issues/1393) +- VCS Refs for locked local editable dependencies will now update appropriately to the latest hash when running `pipenv update`. [#1690](https://github.com/pypa/pipenv/issues/1690) +- `.tar.gz` and `.zip` artifacts will now have dependencies installed even when they are missing from the Lockfile. [#2173](https://github.com/pypa/pipenv/issues/2173) +- The command line parser will now handle multiple `-e/--editable` dependencies properly via click's option parser to help mitigate future parsing issues. [#2279](https://github.com/pypa/pipenv/issues/2279) +- Fixed the ability of pipenv to parse `dependency_links` from `setup.py` when `PIP_PROCESS_DEPENDENCY_LINKS` is enabled. [#2434](https://github.com/pypa/pipenv/issues/2434) +- Fixed a bug which could cause `-i/--index` arguments to sometimes be incorrectly picked up in packages. This is now handled in the command line parser. [#2494](https://github.com/pypa/pipenv/issues/2494) +- Fixed non-deterministic resolution issues related to changes to the internal package finder in `pip 10`. [#2499](https://github.com/pypa/pipenv/issues/2499), + [#2529](https://github.com/pypa/pipenv/issues/2529), + [#2589](https://github.com/pypa/pipenv/issues/2589), + [#2666](https://github.com/pypa/pipenv/issues/2666), + [#2767](https://github.com/pypa/pipenv/issues/2767), + [#2785](https://github.com/pypa/pipenv/issues/2785), + [#2795](https://github.com/pypa/pipenv/issues/2795), + [#2801](https://github.com/pypa/pipenv/issues/2801), + [#2824](https://github.com/pypa/pipenv/issues/2824), + [#2862](https://github.com/pypa/pipenv/issues/2862), + [#2879](https://github.com/pypa/pipenv/issues/2879), + [#2894](https://github.com/pypa/pipenv/issues/2894), + [#2933](https://github.com/pypa/pipenv/issues/2933) +- Fix subshell invocation on Windows for Python 2. [#2515](https://github.com/pypa/pipenv/issues/2515) +- Fixed a bug which sometimes caused pipenv to throw a `TypeError` or to run into encoding issues when writing a Lockfile on python 2. [#2561](https://github.com/pypa/pipenv/issues/2561) +- Improve quoting logic for `pipenv run` so it works better with Windows + built-in commands. [#2563](https://github.com/pypa/pipenv/issues/2563) +- Fixed a bug related to parsing VCS requirements with both extras and subdirectory fragments. + Corrected an issue in the `requirementslib` parser which led to some markers being discarded rather than evaluated. [#2564](https://github.com/pypa/pipenv/issues/2564) +- Fixed multiple issues with finding the correct system python locations. [#2582](https://github.com/pypa/pipenv/issues/2582) +- Catch JSON decoding error to prevent exception when the lock file is of + invalid format. [#2607](https://github.com/pypa/pipenv/issues/2607) +- Fixed a rare bug which could sometimes cause errors when installing packages with custom sources. [#2610](https://github.com/pypa/pipenv/issues/2610) +- Update requirementslib to fix a bug which could raise an `UnboundLocalError` when parsing malformed VCS URIs. [#2617](https://github.com/pypa/pipenv/issues/2617) +- Fixed an issue which prevented passing multiple `--ignore` parameters to `pipenv check`. [#2632](https://github.com/pypa/pipenv/issues/2632) +- Fixed a bug which caused attempted hashing of `ssh://` style URIs which could cause failures during installation of private ssh repositories. + \- Corrected path conversion issues which caused certain editable VCS paths to be converted to `ssh://` URIs improperly. [#2639](https://github.com/pypa/pipenv/issues/2639) +- Fixed a bug which caused paths to be formatted incorrectly when using `pipenv shell` in bash for windows. [#2641](https://github.com/pypa/pipenv/issues/2641) +- Dependency links to private repositories defined via `ssh://` schemes will now install correctly and skip hashing as long as `PIP_PROCESS_DEPENDENCY_LINKS=1`. [#2643](https://github.com/pypa/pipenv/issues/2643) +- Fixed a bug which sometimes caused pipenv to parse the `trusted_host` argument to pip incorrectly when parsing source URLs which specify `verify_ssl = false`. [#2656](https://github.com/pypa/pipenv/issues/2656) +- Prevent crashing when a virtual environment in `WORKON_HOME` is faulty. [#2676](https://github.com/pypa/pipenv/issues/2676) +- Fixed virtualenv creation failure when a .venv file is present in the project root. [#2680](https://github.com/pypa/pipenv/issues/2680) +- Fixed a bug which could cause the `-e/--editable` argument on a dependency to be accidentally parsed as a dependency itself. [#2714](https://github.com/pypa/pipenv/issues/2714) +- Correctly pass `verbose` and `debug` flags to the resolver subprocess so it generates appropriate output. This also resolves a bug introduced by the fix to #2527. [#2732](https://github.com/pypa/pipenv/issues/2732) +- All markers are now included in `pipenv lock --requirements` output. [#2748](https://github.com/pypa/pipenv/issues/2748) +- Fixed a bug in marker resolution which could cause duplicate and non-deterministic markers. [#2760](https://github.com/pypa/pipenv/issues/2760) +- Fixed a bug in the dependency resolver which caused regular issues when handling `setup.py` based dependency resolution. [#2766](https://github.com/pypa/pipenv/issues/2766) +- Updated vendored dependencies: + : - `pip-tools` (updated and patched to latest w/ `pip 18.0` compatibility) + - `pip 10.0.1 => 18.0` + - `click 6.7 => 7.0` + - `toml 0.9.4 => 0.10.0` + - `pyparsing 2.2.0 => 2.2.2` + - `delegator 0.1.0 => 0.1.1` + - `attrs 18.1.0 => 18.2.0` + - `distlib 0.2.7 => 0.2.8` + - `packaging 17.1.0 => 18.0` + - `passa 0.2.0 => 0.3.1` + - `pip_shims 0.1.2 => 0.3.1` + - `plette 0.1.1 => 0.2.2` + - `pythonfinder 1.0.2 => 1.1.0` + - `pytoml 0.1.18 => 0.1.19` + - `requirementslib 1.1.16 => 1.1.17` + - `shellingham 1.2.4 => 1.2.6` + - `tomlkit 0.4.2 => 0.4.4` + - `vistir 0.1.4 => 0.1.6` + [#2802](https://github.com/pypa/pipenv/issues/2802), + [#2867](https://github.com/pypa/pipenv/issues/2867), + [#2880](https://github.com/pypa/pipenv/issues/2880) +- Fixed a bug where `pipenv` crashes when the `WORKON_HOME` directory does not exist. [#2877](https://github.com/pypa/pipenv/issues/2877) +- Fixed pip is not loaded from pipenv's patched one but the system one [#2912](https://github.com/pypa/pipenv/issues/2912) +- Fixed various bugs related to `pip 18.1` release which prevented locking, installation, and syncing, and dumping to a `requirements.txt` file. [#2924](https://github.com/pypa/pipenv/issues/2924) + +## Vendored Libraries + +- Pew is no longer vendored. Entry point `pewtwo`, packages `pipenv.pew` and + `pipenv.patched.pew` are removed. [#2521](https://github.com/pypa/pipenv/issues/2521) +- Update `pythonfinder` to major release `1.0.0` for integration. [#2582](https://github.com/pypa/pipenv/issues/2582) +- Update requirementslib to fix a bug which could raise an `UnboundLocalError` when parsing malformed VCS URIs. [#2617](https://github.com/pypa/pipenv/issues/2617) +- - Vendored new libraries `vistir` and `pip-shims`, `tomlkit`, `modutil`, and `plette`. + - Update vendored libraries: + \- `scandir` to `1.9.0` + \- `click-completion` to `0.4.1` + \- `semver` to `2.8.1` + \- `shellingham` to `1.2.4` + \- `pytoml` to `0.1.18` + \- `certifi` to `2018.8.24` + \- `ptyprocess` to `0.6.0` + \- `requirementslib` to `1.1.5` + \- `pythonfinder` to `1.0.2` + \- `pipdeptree` to `0.13.0` + \- `python-dotenv` to `0.9.1` [#2639](https://github.com/pypa/pipenv/issues/2639) +- Updated vendored dependencies: + : - `pip-tools` (updated and patched to latest w/ `pip 18.0` compatibility) + - `pip 10.0.1 => 18.0` + - `click 6.7 => 7.0` + - `toml 0.9.4 => 0.10.0` + - `pyparsing 2.2.0 => 2.2.2` + - `delegator 0.1.0 => 0.1.1` + - `attrs 18.1.0 => 18.2.0` + - `distlib 0.2.7 => 0.2.8` + - `packaging 17.1.0 => 18.0` + - `passa 0.2.0 => 0.3.1` + - `pip_shims 0.1.2 => 0.3.1` + - `plette 0.1.1 => 0.2.2` + - `pythonfinder 1.0.2 => 1.1.0` + - `pytoml 0.1.18 => 0.1.19` + - `requirementslib 1.1.16 => 1.1.17` + - `shellingham 1.2.4 => 1.2.6` + - `tomlkit 0.4.2 => 0.4.4` + - `vistir 0.1.4 => 0.1.6` + [#2902](https://github.com/pypa/pipenv/issues/2902), + [#2935](https://github.com/pypa/pipenv/issues/2935) + +## Improved Documentation + +- Simplified the test configuration process. [#2568](https://github.com/pypa/pipenv/issues/2568) +- Updated documentation to use working fortune cookie add-on. [#2644](https://github.com/pypa/pipenv/issues/2644) +- Added additional information about troubleshooting `pipenv shell` by using the the `$PIPENV_SHELL` environment variable. [#2671](https://github.com/pypa/pipenv/issues/2671) +- Added a link to `PEP-440` version specifiers in the documentation for additional detail. [#2674](https://github.com/pypa/pipenv/issues/2674) +- Added simple example to README.md for installing from git. [#2685](https://github.com/pypa/pipenv/issues/2685) +- Stopped recommending `--system` for Docker contexts. [#2762](https://github.com/pypa/pipenv/issues/2762) +- Fixed the example url for doing "pipenv install -e + some-repository-url#egg=something", it was missing the "egg=" in the fragment + identifier. [#2792](https://github.com/pypa/pipenv/issues/2792) +- Fixed link to the "be cordial" essay in the contribution documentation. [#2793](https://github.com/pypa/pipenv/issues/2793) +- Clarify `pipenv install` documentation [#2844](https://github.com/pypa/pipenv/issues/2844) +- Replace reference to uservoice with PEEP-000 [#2909](https://github.com/pypa/pipenv/issues/2909) + +# 2018.7.1 (2018-07-01) + +## Features & Improvements + +- All calls to `pipenv shell` are now implemented from the ground up using [shellingham](https://github.com/sarugaku/shellingham), a custom library which was purpose built to handle edge cases and shell detection. [#2371](https://github.com/pypa/pipenv/issues/2371) +- Added support for python 3.7 via a few small compatibility / bug fixes. [#2427](https://github.com/pypa/pipenv/issues/2427), + [#2434](https://github.com/pypa/pipenv/issues/2434), + [#2436](https://github.com/pypa/pipenv/issues/2436) +- Added new flag `pipenv --support` to replace the diagnostic command `python -m pipenv.help`. [#2477](https://github.com/pypa/pipenv/issues/2477), + [#2478](https://github.com/pypa/pipenv/issues/2478) +- Improved import times and CLI run times with minor tweaks. [#2485](https://github.com/pypa/pipenv/issues/2485) + +## Bug Fixes + +- Fixed an ongoing bug which sometimes resolved incompatible versions into the project Lockfile. [#1901](https://github.com/pypa/pipenv/issues/1901) +- Fixed a bug which caused errors when creating virtualenvs which contained leading dash characters. [#2415](https://github.com/pypa/pipenv/issues/2415) +- Fixed a logic error which caused `--deploy --system` to overwrite editable vcs packages in the Pipfile before installing, which caused any installation to fail by default. [#2417](https://github.com/pypa/pipenv/issues/2417) +- Updated requirementslib to fix an issue with properly quoting markers in VCS requirements. [#2419](https://github.com/pypa/pipenv/issues/2419) +- Installed new vendored jinja2 templates for `click-completion` which were causing template errors for users with completion enabled. [#2422](https://github.com/pypa/pipenv/issues/2422) +- Added support for python 3.7 via a few small compatibility / bug fixes. [#2427](https://github.com/pypa/pipenv/issues/2427) +- Fixed an issue reading package names from `setup.py` files in projects which imported utilities such as `versioneer`. [#2433](https://github.com/pypa/pipenv/issues/2433) +- Pipenv will now ensure that its internal package names registry files are written with unicode strings. [#2450](https://github.com/pypa/pipenv/issues/2450) +- Fixed a bug causing requirements input as relative paths to be output as absolute paths or URIs. + Fixed a bug affecting normalization of `git+git@host` URLs. [#2453](https://github.com/pypa/pipenv/issues/2453) +- Pipenv will now always use `pathlib2` for `Path` based filesystem interactions by default on `python<3.5`. [#2454](https://github.com/pypa/pipenv/issues/2454) +- Fixed a bug which prevented passing proxy PyPI indexes set with `--pypi-mirror` from being passed to pip during virtualenv creation, which could cause the creation to freeze in some cases. [#2462](https://github.com/pypa/pipenv/issues/2462) +- Using the `python -m pipenv.help` command will now use proper encoding for the host filesystem to avoid encoding issues. [#2466](https://github.com/pypa/pipenv/issues/2466) +- The new `jinja2` templates for `click_completion` will now be included in pipenv source distributions. [#2479](https://github.com/pypa/pipenv/issues/2479) +- Resolved a long-standing issue with re-using previously generated `InstallRequirement` objects for resolution which could cause `PKG-INFO` file information to be deleted, raising a `TypeError`. [#2480](https://github.com/pypa/pipenv/issues/2480) +- Resolved an issue parsing usernames from private PyPI URIs in `Pipfiles` by updating `requirementslib`. [#2484](https://github.com/pypa/pipenv/issues/2484) + +## Vendored Libraries + +- All calls to `pipenv shell` are now implemented from the ground up using [shellingham](https://github.com/sarugaku/shellingham), a custom library which was purpose built to handle edge cases and shell detection. [#2371](https://github.com/pypa/pipenv/issues/2371) +- Updated requirementslib to fix an issue with properly quoting markers in VCS requirements. [#2419](https://github.com/pypa/pipenv/issues/2419) +- Installed new vendored jinja2 templates for `click-completion` which were causing template errors for users with completion enabled. [#2422](https://github.com/pypa/pipenv/issues/2422) +- Add patch to `prettytoml` to support Python 3.7. [#2426](https://github.com/pypa/pipenv/issues/2426) +- Patched `prettytoml.AbstractTable._enumerate_items` to handle `StopIteration` errors in preparation of release of python 3.7. [#2427](https://github.com/pypa/pipenv/issues/2427) +- Fixed an issue reading package names from `setup.py` files in projects which imported utilities such as `versioneer`. [#2433](https://github.com/pypa/pipenv/issues/2433) +- Updated `requirementslib` to version `1.0.9` [#2453](https://github.com/pypa/pipenv/issues/2453) +- Unraveled a lot of old, unnecessary patches to `pip-tools` which were causing non-deterministic resolution errors. [#2480](https://github.com/pypa/pipenv/issues/2480) +- Resolved an issue parsing usernames from private PyPI URIs in `Pipfiles` by updating `requirementslib`. [#2484](https://github.com/pypa/pipenv/issues/2484) + +## Improved Documentation + +- Added instructions for installing using Fedora's official repositories. [#2404](https://github.com/pypa/pipenv/issues/2404) + +# 2018.6.25 (2018-06-25) + +## Features & Improvements + +- Pipenv-created virtualenvs will now be associated with a `.project` folder + (features can be implemented on top of this later or users may choose to use + `pipenv-pipes` to take full advantage of this.) [#1861](https://github.com/pypa/pipenv/issues/1861) +- Virtualenv names will now appear in prompts for most Windows users. [#2167](https://github.com/pypa/pipenv/issues/2167) +- Added support for cmder shell paths with spaces. [#2168](https://github.com/pypa/pipenv/issues/2168) +- Added nested JSON output to the `pipenv graph` command. [#2199](https://github.com/pypa/pipenv/issues/2199) +- Dropped vendored pip 9 and vendored, patched, and migrated to pip 10. Updated + patched piptools version. [#2255](https://github.com/pypa/pipenv/issues/2255) +- PyPI mirror URLs can now be set to override instances of PyPI URLs by passing + the `--pypi-mirror` argument from the command line or setting the + `PIPENV_PYPI_MIRROR` environment variable. [#2281](https://github.com/pypa/pipenv/issues/2281) +- Virtualenv activation lines will now avoid being written to some shell + history files. [#2287](https://github.com/pypa/pipenv/issues/2287) +- Pipenv will now only search for `requirements.txt` files when creating new + projects, and during that time only if the user doesn't specify packages to + pass in. [#2309](https://github.com/pypa/pipenv/issues/2309) +- Added support for mounted drives via UNC paths. [#2331](https://github.com/pypa/pipenv/issues/2331) +- Added support for Windows Subsystem for Linux bash shell detection. [#2363](https://github.com/pypa/pipenv/issues/2363) +- Pipenv will now generate hashes much more quickly by resolving them in a + single pass during locking. [#2384](https://github.com/pypa/pipenv/issues/2384) +- `pipenv run` will now avoid spawning additional `COMSPEC` instances to + run commands in when possible. [#2385](https://github.com/pypa/pipenv/issues/2385) +- Massive internal improvements to requirements parsing codebase, resolver, and + error messaging. [#2388](https://github.com/pypa/pipenv/issues/2388) +- `pipenv check` now may take multiple of the additional argument + `--ignore` which takes a parameter `cve_id` for the purpose of ignoring + specific CVEs. [#2408](https://github.com/pypa/pipenv/issues/2408) + +## Behavior Changes + +- Pipenv will now parse & capitalize `platform_python_implementation` markers + .. warning:: This could cause an issue if you have an out of date `Pipfile` + which lower-cases the comparison value (e.g. `cpython` instead of + `CPython`). [#2123](https://github.com/pypa/pipenv/issues/2123) +- Pipenv will now only search for `requirements.txt` files when creating new + projects, and during that time only if the user doesn't specify packages to + pass in. [#2309](https://github.com/pypa/pipenv/issues/2309) + +## Bug Fixes + +- Massive internal improvements to requirements parsing codebase, resolver, and + error messaging. [#1962](https://github.com/pypa/pipenv/issues/1962), + [#2186](https://github.com/pypa/pipenv/issues/2186), + [#2263](https://github.com/pypa/pipenv/issues/2263), + [#2312](https://github.com/pypa/pipenv/issues/2312) +- Pipenv will now parse & capitalize `platform_python_implementation` + markers. [#2123](https://github.com/pypa/pipenv/issues/2123) +- Fixed a bug with parsing and grouping old-style `setup.py` extras during + resolution [#2142](https://github.com/pypa/pipenv/issues/2142) +- Fixed a bug causing pipenv graph to throw unhelpful exceptions when running + against empty or non-existent environments. [#2161](https://github.com/pypa/pipenv/issues/2161) +- Fixed a bug which caused `--system` to incorrectly abort when users were in + a virtualenv. [#2181](https://github.com/pypa/pipenv/issues/2181) +- Removed vendored `cacert.pem` which could cause issues for some users with + custom certificate settings. [#2193](https://github.com/pypa/pipenv/issues/2193) +- Fixed a regression which led to direct invocations of `virtualenv`, rather + than calling it by module. [#2198](https://github.com/pypa/pipenv/issues/2198) +- Locking will now pin the correct VCS ref during `pipenv update` runs. + Running `pipenv update` with a new vcs ref specified in the `Pipfile` + will now properly obtain, resolve, and install the specified dependency at + the specified ref. [#2209](https://github.com/pypa/pipenv/issues/2209) +- `pipenv clean` will now correctly ignore comments from `pip freeze` when + cleaning the environment. [#2262](https://github.com/pypa/pipenv/issues/2262) +- Resolution bugs causing packages for incompatible python versions to be + locked have been fixed. [#2267](https://github.com/pypa/pipenv/issues/2267) +- Fixed a bug causing pipenv graph to fail to display sometimes. [#2268](https://github.com/pypa/pipenv/issues/2268) +- Updated `requirementslib` to fix a bug in Pipfile parsing affecting + relative path conversions. [#2269](https://github.com/pypa/pipenv/issues/2269) +- Windows executable discovery now leverages `os.pathext`. [#2298](https://github.com/pypa/pipenv/issues/2298) +- Fixed a bug which caused `--deploy --system` to inadvertently create a + virtualenv before failing. [#2301](https://github.com/pypa/pipenv/issues/2301) +- Fixed an issue which led to a failure to unquote special characters in file + and wheel paths. [#2302](https://github.com/pypa/pipenv/issues/2302) +- VCS dependencies are now manually obtained only if they do not match the + requested ref. [#2304](https://github.com/pypa/pipenv/issues/2304) +- Added error handling functionality to properly cope with single-digit + `Requires-Python` metadata with no specifiers. [#2377](https://github.com/pypa/pipenv/issues/2377) +- `pipenv update` will now always run the resolver and lock before ensuring + dependencies are in sync with project Lockfile. [#2379](https://github.com/pypa/pipenv/issues/2379) +- Resolved a bug in our patched resolvers which could cause nondeterministic + resolution failures in certain conditions. Running `pipenv install` with no + arguments in a project with only a `Pipfile` will now correctly lock first + for dependency resolution before installing. [#2384](https://github.com/pypa/pipenv/issues/2384) +- Patched `python-dotenv` to ensure that environment variables always get + encoded to the filesystem encoding. [#2386](https://github.com/pypa/pipenv/issues/2386) + +## Improved Documentation + +- Update documentation wording to clarify Pipenv's overall role in the packaging ecosystem. [#2194](https://github.com/pypa/pipenv/issues/2194) +- Added contribution documentation and guidelines. [#2205](https://github.com/pypa/pipenv/issues/2205) +- Added instructions for supervisord compatibility. [#2215](https://github.com/pypa/pipenv/issues/2215) +- Fixed broken links to development philosophy and contribution documentation. [#2248](https://github.com/pypa/pipenv/issues/2248) + +## Vendored Libraries + +- Removed vendored `cacert.pem` which could cause issues for some users with + custom certificate settings. [#2193](https://github.com/pypa/pipenv/issues/2193) + +- Dropped vendored pip 9 and vendored, patched, and migrated to pip 10. Updated + patched piptools version. [#2255](https://github.com/pypa/pipenv/issues/2255) + +- Updated `requirementslib` to fix a bug in Pipfile parsing affecting + relative path conversions. [#2269](https://github.com/pypa/pipenv/issues/2269) + +- Added custom shell detection library `shellingham`, a port of our changes + to `pew`. [#2363](https://github.com/pypa/pipenv/issues/2363) + +- Patched `python-dotenv` to ensure that environment variables always get + encoded to the filesystem encoding. [#2386](https://github.com/pypa/pipenv/issues/2386) + +- Updated vendored libraries. The following vendored libraries were updated: + + - distlib from version `0.2.6` to `0.2.7`. + - jinja2 from version `2.9.5` to `2.10`. + - pathlib2 from version `2.1.0` to `2.3.2`. + - parse from version `2.8.0` to `2.8.4`. + - pexpect from version `2.5.2` to `2.6.0`. + - requests from version `2.18.4` to `2.19.1`. + - idna from version `2.6` to `2.7`. + - certifi from version `2018.1.16` to `2018.4.16`. + - packaging from version `16.8` to `17.1`. + - six from version `1.10.0` to `1.11.0`. + - requirementslib from version `0.2.0` to `1.0.1`. + + In addition, scandir was vendored and patched to avoid importing host system binaries when falling back to pathlib2. [#2368](https://github.com/pypa/pipenv/issues/2368) diff --git a/docs/changelog.rst b/docs/changelog.rst deleted file mode 100644 index 82e6d0879f..0000000000 --- a/docs/changelog.rst +++ /dev/null @@ -1,4 +0,0 @@ -Release and Version History -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. include:: ../CHANGELOG.rst diff --git a/docs/cli.md b/docs/cli.md new file mode 100644 index 0000000000..8b48507ac9 --- /dev/null +++ b/docs/cli.md @@ -0,0 +1,147 @@ +# Pipenv CLI Reference + +## pipenv + +```bash +pipenv [OPTIONS] COMMAND [ARGS]... +``` + +## check + +Checks for PyUp Safety security vulnerabilities and against PEP 508 markers provided in Pipfile. + +```bash +pipenv check [OPTIONS] +``` + +## clean + +Uninstalls all packages not specified in Pipfile.lock. + +```bash +pipenv clean [OPTIONS] +``` + +## graph + +Displays currently–installed dependency graph information. + +```bash +pipenv graph [OPTIONS] +``` + +## install + +Installs provided packages and adds them to Pipfile, or (if no packages are given), installs all packages from Pipfile. + +```bash +pipenv install [OPTIONS] [PACKAGES]... +``` + +Environment Variables + +PIP_INDEX_URL + +```bash + Provide a default for -i +``` + +## lock + +Generates Pipfile.lock. + +```bash +pipenv lock [OPTIONS] +``` + +## open + +View a given module in your editor. + +This uses the EDITOR environment variable. You can temporarily override it, for example: + +EDITOR=atom pipenv open requests + +```bash +pipenv open [OPTIONS] MODULE +``` + +## requirements + +Generate a requirements.txt from Pipfile.lock. + +```bash +pipenv requirements [OPTIONS] +``` + +## run + +Spawns a command installed into the virtualenv. + +```bash +pipenv run [OPTIONS] COMMAND [ARGS]... +``` + +## shell + +Spawns a shell within the virtualenv. + +```bash +pipenv shell [OPTIONS] [SHELL_ARGS]... +``` + +## sync + +Installs all packages specified in Pipfile.lock. + +```bash +pipenv sync [OPTIONS] +``` + +## uninstall + +Un-installs a provided package and removes it from Pipfile. + +```bash +pipenv uninstall [OPTIONS] [PACKAGES]... +``` + +## update + +Runs lock when no packages are specified, or upgrade, and then sync. + +```bash +pipenv update [OPTIONS] [PACKAGES]... +``` + +Environment Variables + +PIP_INDEX_URL + +```bash + Provide a default for -i +``` + +## upgrade + +Resolves provided packages and adds them to Pipfile, or (if no packages are given), merges results to Pipfile.lock + +```bash +pipenv upgrade [OPTIONS] [PACKAGES]... +``` + +Environment Variables + +PIP_INDEX_URL + +```bash + Provide a default for -i +``` + +## verify + +Verify the hash in Pipfile.lock is up-to-date. + +```bash +pipenv verify [OPTIONS] +``` diff --git a/docs/cli.rst b/docs/cli.rst deleted file mode 100644 index 873c67d706..0000000000 --- a/docs/cli.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. _cli: - -Pipenv CLI Reference -====================================== - -.. click:: pipenv:cli - :prog: pipenv - :show-nested: diff --git a/docs/conf.py b/docs/conf.py index a03988b063..edd02f6202 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -72,8 +72,8 @@ # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # -# source_suffix = ['.rst', '.md'] -source_suffix = ".rst" +source_suffix = [".rst", ".md"] +# source_suffix = ".rst" # The master toctree document. master_doc = "index" diff --git a/docs/dev/contributing.rst b/docs/dev/contributing.md similarity index 52% rename from docs/dev/contributing.rst rename to docs/dev/contributing.md index d9ca81227f..a6bd1021dd 100644 --- a/docs/dev/contributing.rst +++ b/docs/dev/contributing.md @@ -1,10 +1,9 @@ -Contributing to Pipenv -====================== +# Contributing to Pipenv If you're reading this, you're probably interested in contributing to Pipenv. Thank you very much! Open source projects live-and-die based on the support they receive from others, and the fact that you're even considering -contributing to the Pipenv project is *very* generous of you. +contributing to the Pipenv project is _very_ generous of you. This document lays out guidelines and advice for contributing to this project. If you're thinking of contributing, please start by reading this document and @@ -14,28 +13,19 @@ The guide is split into sections based on the type of contribution you're thinking of making, with a section that covers general guidelines for all contributors. +## General Guidelines -General Guidelines ------------------- +### Be Cordial -Be Cordial -~~~~~~~~~~ - - **Be cordial or be on your way**. *—Kenneth Reitz* - -.. _be cordial or be on your way: https://kennethreitz.org/essays/2013/01/27/be-cordial-or-be-on-your-way +> **Be cordial or be on your way**. _—Kenneth Reitz_ Pipenv has one very important rule governing all forms of contribution, -including reporting bugs or requesting features. This golden rule is -"`be cordial or be on your way`_" +including reporting bugs or requesting features. This golden rule is [be cordial or be on your way](https://kennethreitz.org/essays/2013/01/27/be-cordial-or-be-on-your-way) **All contributions are welcome**, as long as everyone involved is treated with respect. -.. _early-feedback: - -Get Early Feedback -~~~~~~~~~~~~~~~~~~ +### Get Early Feedback If you are contributing, do not feel the need to sit on your contribution until it is perfectly polished and complete. It helps everyone involved for you to @@ -44,8 +34,7 @@ version of your contribution for feedback in no way prejudices your chances of getting that contribution accepted, and can save you from putting a lot of work into a contribution that is not suitable for the project. -Contribution Suitability -~~~~~~~~~~~~~~~~~~~~~~~~ +### Contribution Suitability Our project maintainers have the last word on whether or not a contribution is suitable for Pipenv. All contributions will be considered carefully, but from @@ -56,68 +45,51 @@ If your contribution is rejected, don't despair! As long as you followed these guidelines, you will have a much better chance of getting your next contribution accepted. +## Questions -Questions ---------- - -The GitHub issue tracker is for *bug reports* and *feature requests*. Please do +The GitHub issue tracker is for _bug reports_ and _feature requests_. Please do not use it to ask questions about how to use Pipenv. These questions should -instead be directed to `Stack Overflow`_. Make sure that your question is tagged -with the ``pipenv`` tag when asking it on Stack Overflow, to ensure that it is +instead be directed to [Stack Overflow](https://stackoverflow.com/). Make sure that your question is tagged +with the `pipenv` tag when asking it on Stack Overflow, to ensure that it is answered promptly and accurately. -.. _Stack Overflow: https://stackoverflow.com/ - -Code Contributions ------------------- +## Code Contributions -Steps for Submitting Code -~~~~~~~~~~~~~~~~~~~~~~~~~ +### Steps for Submitting Code When contributing code, you'll want to follow this checklist: -#. Fork the repository on GitHub. -#. Set up your :ref:`dev-setup` -#. Run the tests (:ref:`run-the-tests`) to confirm they all pass on your system. - If they don't, you'll need to investigate why they fail. If you're unable - to diagnose this yourself, raise it as a bug report by following the guidelines - in this document: :ref:`bug-reports`. -#. Write tests that demonstrate your bug or feature. Ensure that they fail. -#. Make your change. -#. Run the entire test suite again, confirming that all tests pass *including - the ones you just added*. -#. Send a GitHub Pull Request to the main repository's ``main`` branch. - GitHub Pull Requests are the expected method of code collaboration on this - project. +1. Fork the repository on GitHub. +2. Set up your [development environment](#development-setup) +3. Run the tests from [here](#run-the-tests) to confirm they all pass on your system. If they don't, you'll need to investigate why they fail. If you're unable to diagnose this yourself, raise it as a bug report by following the guidelines + in this [document](#bug-reports). +4. Write tests that demonstrate your bug or feature. Ensure that they fail. +5. Make your change. +6. Run the entire test suite again, confirming that all tests pass _including the ones you just added_. +7. Send a GitHub Pull Request to the main repository's `main` branch. GitHub Pull Requests are the expected method of code collaboration on this project. The following sub-sections go into more detail on some of the points above. -.. _dev-setup: +### Development Setup -Development Setup -~~~~~~~~~~~~~~~~~ - -The repository version of Pipenv must be installed over other global versions to -resolve conflicts with the ``pipenv`` folder being implicitly added to ``sys.path``. -See `pypa/pipenv#2557`_ for more details. - -.. _pypa/pipenv#2557: https://github.com/pypa/pipenv/issues/2557 +The repository version of Pipenv must be installed over other global versions to resolve conflicts with the `pipenv` folder being implicitly added to `sys.path`. +See [pypa/pipenv#2557](https://github.com/pypa/pipenv/issues/2557) for more details. Pipenv now uses pre-commit hooks similar to Pip in order to apply linting and code formatting automatically! The build now also checks that these linting rules have been applied to the code before running the tests. The build will fail when linting changes are detected so be sure to sync dev requirements -and install the pre-commit hooks locally:: +and install the pre-commit hooks locally: +```bash $ pipenv install --dev # This will configure running the pre-commit checks at start of each commit $ pre-commit install # Should you want to check the pre-commit configuration against all configured project files $ pre-commit run --all-files --verbose +``` - -Code Review -~~~~~~~~~~~ +### Code Review Contributions will not be merged until they have been code reviewed. You should implement any code review feedback unless you strongly object to it. In the @@ -125,98 +97,88 @@ event that you object to the code review feedback, you should make your case clearly and calmly. If, after doing so, the feedback is judged to still apply, you must either apply the feedback or withdraw your contribution. - -Package Index -~~~~~~~~~~~~~ +### Package Index To speed up testing, tests that rely on a package index for locking and installing use a local server that contains vendored packages in the -``tests/pypi`` directory. Each vendored package should have it's own folder +`tests/pypi` directory. Each vendored package should have it's own folder containing the necessary releases. When adding a release for a package, it is -easiest to use either the ``.tar.gz`` or universal wheels (ex: ``py2.py3-none``). If -a ``.tar.gz`` or universal wheel is not available, add wheels for all available +easiest to use either the `.tar.gz` or universal wheels (ex: `py2.py3-none`). If +a `.tar.gz` or universal wheel is not available, add wheels for all available architectures and platforms. - -Documentation Contributions ---------------------------- +## Documentation Contributions Documentation improvements are always welcome! The documentation files live in -the ``docs/`` directory of the codebase. They're written in -`reStructuredText`_, and use `Sphinx`_ to generate the full suite of +the `docs/` directory of the codebase. They're written in +[MarkDown](https://www.markdownguide.org/), and use [Sphinx](http://sphinx-doc.org/index.html) to generate the full suite of documentation. When contributing documentation, please do your best to follow the style of the documentation files. This means a soft-limit of 79 characters wide in your text files and a semi-formal, yet friendly and approachable, prose style. -When presenting Python code, use single-quoted strings (``'hello'`` instead of -``"hello"``). +When presenting Python code, use single-quoted strings (`'hello'` instead of +`"hello"`). -.. _reStructuredText: http://docutils.sourceforge.net/rst.html -.. _Sphinx: http://sphinx-doc.org/index.html +## Bug Reports -.. _bug-reports: - -Bug Reports ------------ - -Bug reports are hugely important! They are recorded as `GitHub issues`_. Please +Bug reports are hugely important! They are recorded as [GitHub issues](https://github.com/pypa/pipenv/issues). Please be aware of the following things when filing bug reports: -.. _GitHub issues: https://github.com/pypa/pipenv/issues - -1. Avoid raising duplicate issues. *Please* use the GitHub issue search feature +1. Avoid raising duplicate issues. _Please_ use the GitHub issue search feature to check whether your bug report or feature request has been mentioned in the past. Duplicate bug reports and feature requests are a huge maintenance burden on the limited resources of the project. If it is clear from your report that you would have struggled to find the original, that's okay, but if searching for a selection of words in your issue title would have found the duplicate then the issue will likely be closed extremely abruptly. + 2. When filing bug reports about exceptions or tracebacks, please include the - *complete* traceback. Partial tracebacks, or just the exception text, are + _complete_ traceback. Partial tracebacks, or just the exception text, are not helpful. Issues that do not contain complete tracebacks may be closed without warning. + 3. Make sure you provide a suitable amount of information to work with. This means you should provide: - - Guidance on **how to reproduce the issue**. Ideally, this should be a - *small* code sample that can be run immediately by the maintainers. - Failing that, let us know what you're doing, how often it happens, what - environment you're using, etc. Be thorough: it prevents us needing to ask - further questions. - - Tell us **what you expected to happen**. When we run your example code, - what are we expecting to happen? What does "success" look like for your - code? - - Tell us **what actually happens**. It's not helpful for you to say "it - doesn't work" or "it fails". Tell us *how* it fails: do you get an - exception? A hang? The packages installed seem incorrect? - How was the actual result different from your expected result? - - Tell us **what version of Pipenv you're using**, and - **how you installed it**. Different versions of Pipenv behave - differently and have different bugs, and some distributors of Pipenv - ship patches on top of the code we supply. + - Guidance on **how to reproduce the issue**. Ideally, this should be a + _small_ code sample that can be run immediately by the maintainers. + Failing that, let us know what you're doing, how often it happens, what + environment you're using, etc. Be thorough: it prevents us needing to ask + further questions. - If you do not provide all of these things, it will take us much longer to - fix your problem. If we ask you to clarify these and you never respond, we - will close your issue without fixing it. + - Tell us **what you expected to happen**. When we run your example code, + what are we expecting to happen? What does "success" look like for your + code? -.. _run-the-tests: + - Tell us **what actually happens**. It's not helpful for you to say "it + doesn't work" or "it fails". Tell us _how_ it fails: do you get an + exception? A hang? The packages installed seem incorrect? + How was the actual result different from your expected result? -Run the tests -------------- + - Tell us **what version of Pipenv you're using**, and + **how you installed it**. Different versions of Pipenv behave + differently and have different bugs, and some distributors of Pipenv + ship patches on top of the code we supply. -Tests are written in ``pytest`` style and can be run very simply: +If you do not provide all of these things, it will take us much longer to +fix your problem. If we ask you to clarify these and you never respond, we +will close your issue without fixing it. -.. code-block:: bash +## Run the tests - pytest +Tests are written in `pytest` style and can be run very simply: + +```bash + pytest +``` However many tests depend on running a private pypi server on localhost:8080. -This can be accomplished by using either the ``run-tests.sh`` or ``run-tests.bat`` scripts -which will start the ``pypiserver`` process ahead of invoking pytest. +This can be accomplished by using either the `run-tests.sh` or `run-tests.bat` scripts +which will start the `pypiserver` process ahead of invoking pytest. -You may also manually perform this step and then invoke pytest as you would normally. Example:: +You may also manually perform this step and then invoke pytest as you would normally. Example: # Linux or MacOS pipenv run pypi-server run -v --host=0.0.0.0 --port=8080 --hash-algo=sha256 --disable-fallback ./tests/pypi/ ./tests/fixtures & @@ -224,60 +186,59 @@ You may also manually perform this step and then invoke pytest as you would norm # Windows cmd /c start pipenv run pypi-server run -v --host=0.0.0.0 --port=8080 --hash-algo=sha256 --disable-fallback ./tests/pypi/ ./tests/fixtures - This will run all Pipenv tests, which can take awhile. To run a subset of the tests, the standard pytest filters are available, such as: -- provide a directory or file: ``pytest tests/unit`` or ``pytest tests/unit/test_cmdparse.py`` -- provide a keyword expression: ``pytest -k test_lock_editable_vcs_without_install`` -- provide a nodeid: ``pytest tests/unit/test_cmdparse.py::test_parse`` -- provide a test marker: ``pytest -m lock`` +- provide a directory or file: `pytest tests/unit` or `pytest tests/unit/test_cmdparse.py` +- provide a keyword expression: `pytest -k test_lock_editable_vcs_without_install` +- provide a nodeid: `pytest tests/unit/test_cmdparse.py::test_parse` +- provide a test marker: `pytest -m lock` There are a few other ways of running the tests: 1. test scripts -The scripts for bash or windows: ``run-tests.sh`` and ``run-tests.bat`` +The scripts for bash or windows: `run-tests.sh` and `run-tests.bat` Note that, you override the default Python Pipenv will use with PIPENV_PYTHON and the Python binary name with PYTHON in case it -is not called ``python`` on your system or in case you have many. +is not called `python` on your system or in case you have many. Here is an example how you can override both variables (you can -override just one too):: +override just one too): - $ PYTHON=python3.8 PIPENV_PYTHON=python3.9 run-tests.sh + $ PYTHON=python3.8 PIPENV_PYTHON=python3.9 run-tests.sh -You can also do:: +You can also do: - $ PYTHON=/opt/python/python3.10/python3 run-tests.sh +$ PYTHON=/opt/python/python3.10/python3 run-tests.sh If you need to change how pytest is invoked, see how to run the -test suite manually. The ``run-tests.sh`` script does the same +test suite manually. The `run-tests.sh` script does the same steps the Github CI workflow does, and as such it is recommended you run it before you open a PR. Taking this second approach, will allow you, for example, to run a single test case, or -``fail fast`` if you need it. +`fail fast` if you need it. 2. Manually This repeats the steps of the scripts above: -.. code-block:: console - - $ git clone https://github.com/pypa/pipenv.git - $ cd pipenv - $ git submodule sync && git submodule update --init --recursive - $ pipenv install --dev - $ pipenv run pytest [--any optional arguments to pytest] +```console +$ git clone https://github.com/pypa/pipenv.git +$ cd pipenv +$ git submodule sync && git submodule update --init --recursive +$ pipenv install --dev +$ pipenv run pytest [--any optional arguments to pytest] +``` -The second options assumes you already have ``pipenv`` on your system. +The second options assumes you already have `pipenv` on your system. And simply repeats all the steps in the script above. Preferably, you should be running your tests in a Linux container (or FreeBSD Jail or even VM). This will guarantee that you don't break stuff, and that the tests run in a pristine environment. -Consider doing something like this:: +Consider doing something like this: $ docker run --rm -v $(pwd):/usr/src -it python:3.7 bash # inside the container @@ -285,17 +246,16 @@ Consider doing something like this:: # su debian && cd /usr/src/ # bash run-tests.sh - 3. Using the Makefile: The Makefile automates all the task as in the script. However, it allows -one more fine grained control on every step. For example:: +one more fine grained control on every step. For example: $ make ramdisk # create a ram disk to preserve your SSDs life $ make ramdisk-virtualenv $ make test suite="-m not cli" # run all tests but cli -or :: +or $ make tests parallel="" suite="tests/integration/test_cli.py::test_pipenv_check" @@ -303,23 +263,23 @@ It is important that your environment is setup correctly, and this may take some work, for example, on a specific Mac installation, the following steps may be needed: -.. code-block:: bash - - # Make sure the tests can access github - if [ "$SSH_AGENT_PID" = "" ] - then - eval ``ssh-agent`` - ssh-add - fi +```bash +# Make sure the tests can access github +if [ "$SSH_AGENT_PID" = "" ] +then + eval ``ssh-agent`` + ssh-add +fi - # Use unix like utilities, installed with brew, - # e.g. brew install coreutils - for d in /usr/local/opt/*/libexec/gnubin /usr/local/opt/python/libexec/bin - do - [[ ":$PATH:" != *":$d:"* ]] && PATH="$d:${PATH}" - done +# Use unix like utilities, installed with brew, +# e.g. brew install coreutils +for d in /usr/local/opt/*/libexec/gnubin /usr/local/opt/python/libexec/bin +do + [[ ":$PATH:" != *":$d:"* ]] && PATH="$d:${PATH}" +done - export PATH +export PATH - # PIP_FIND_LINKS currently breaks test_uninstall.py +# PIP_FIND_LINKS currently breaks test_uninstall.py unset PIP_FIND_LINKS +``` diff --git a/docs/diagnose.md b/docs/diagnose.md new file mode 100644 index 0000000000..c65f6b34b0 --- /dev/null +++ b/docs/diagnose.md @@ -0,0 +1,97 @@ +# Frequently Encountered PiPenv Problems + +Pipenv is constantly being improved by volunteers, but is still a very young +project with limited resources, and has some quirks that needs to be dealt +with. We need everyone’s help (including yours!). + +Here are some common questions people have using Pipenv. Please take a look +below and see if they resolve your problem. + +Note + +**Make sure you’re running the newest Pipenv version first!** + +## ☤ Your dependencies could not be resolved + +Make sure your dependencies actually _do_ resolve. If you’re confident they +are, you may need to clear your resolver cache. Run the following command:: + + pipenv lock --clear + +and try again. + +If this does not work, try manually deleting the whole cache directory. It is +usually one of the following locations: + +- `~/Library/Caches/pipenv` (macOS) +- `%LOCALAPPDATA%\pipenv\pipenv\Cache` (Windows) +- `~/.cache/pipenv` (other operating systems) + +Pipenv does not install pre-releases (i.e. a version with an alpha/beta/etc. +suffix, such as _1.0b1_) by default. You will need to pass the `--pre` flag +in your command, or set + + [pipenv] + allow_prereleases = true + +in your Pipfile. + +## ☤ No module named <module name> + +This is usually a result of mixing Pipenv with system packages. We _strongly_ +recommend installing Pipenv in an isolated environment. Uninstall all existing +Pipenv installations, and see [installing pipenv](./installation.md/#installing-pipenv) to choose one of the recommended way to install Pipenv instead. + +## ☤ My pyenv-installed Python is not found + +Make sure you have `PYENV_ROOT` set correctly. Pipenv only supports CPython +distributions, with version name like `3.6.4` or similar. + +## ☤ Pipenv does not respect pyenv’s global and local Python versions + +Pipenv by default uses the Python it is installed against to create the +virtualenv. You can set the `--python` option to `$(pyenv which python)` to use your current pyenv interpreter. See [specifying versions](./specifiers.md) for more information. + +## ☤ ValueError: unknown locale: UTF-8 + +macOS has a bug in its locale detection that prevents us from detecting your +shell encoding correctly. This can also be an issue on other systems if the +locale variables do not specify an encoding. + +The workaround is to set the following two environment variables to a standard +localization format: + +- `LC_ALL` +- `LANG` + +```bash +export LC_ALL='en_US.UTF-8' +export LANG='en_US.UTF-8' +``` + +For Zsh, the file to edit is `~/.zshrc`. + +Note + +You can change both the `en_US` and `UTF-8` part to the language/locale and encoding you use. + +## ☤ /bin/pip: No such file or directory + +This may be related to your locale setting. See [here](#☤-valueerror-unknown-locale-utf-8) for a possible solution. + +## ☤ Pipenv does not respect dependencies in setup.py + +No, it does not, intentionally. Pipfile and setup.py serve different purposes, and should not consider each other by default. See :ref:`pipfile-vs-setuppy` for more information. + +## ☤ Using `pipenv run` in Supervisor program + +When you configure a supervisor program's `command` with `pipenv run ...`, you need to set locale environment variables properly to make it work. + +Add this line under `[supervisord]` section in `/etc/supervisor/supervisord.conf`:: + + [supervisord] + environment=LC_ALL='en_US.UTF-8',LANG='en_US.UTF-8' + +## ☤ An exception is raised during `Locking dependencies...` + +Run `pipenv lock --clear` and try again. The lock sequence caches results to speed up subsequent runs. The cache may contain faulty results if a bug causes the format to corrupt, even after the bug is fixed. `--clear` flushes the cache, and therefore removes the bad results. diff --git a/docs/diagnose.rst b/docs/diagnose.rst deleted file mode 100644 index 92a1e2649e..0000000000 --- a/docs/diagnose.rst +++ /dev/null @@ -1,123 +0,0 @@ -.. _diagnose: - -Frequently Encountered Pipenv Problems -====================================== - -Pipenv is constantly being improved by volunteers, but is still a very young -project with limited resources, and has some quirks that needs to be dealt -with. We need everyone’s help (including yours!). - -Here are some common questions people have using Pipenv. Please take a look -below and see if they resolve your problem. - -.. Note:: **Make sure you’re running the newest Pipenv version first!** - -☤ Your dependencies could not be resolved ------------------------------------------ - -Make sure your dependencies actually *do* resolve. If you’re confident they -are, you may need to clear your resolver cache. Run the following command:: - - pipenv lock --clear - -and try again. - -If this does not work, try manually deleting the whole cache directory. It is -usually one of the following locations: - -* ``~/Library/Caches/pipenv`` (macOS) -* ``%LOCALAPPDATA%\pipenv\pipenv\Cache`` (Windows) -* ``~/.cache/pipenv`` (other operating systems) - -Pipenv does not install pre-releases (i.e. a version with an alpha/beta/etc. -suffix, such as *1.0b1*) by default. You will need to pass the ``--pre`` flag -in your command, or set - -:: - - [pipenv] - allow_prereleases = true - -in your Pipfile. - -☤ No module named <module name> ---------------------------------- - -This is usually a result of mixing Pipenv with system packages. We *strongly* -recommend installing Pipenv in an isolated environment. Uninstall all existing -Pipenv installations, and see :ref:`installing-pipenv` to choose one of the -recommended way to install Pipenv instead. - -☤ My pyenv-installed Python is not found ----------------------------------------- - -Make sure you have ``PYENV_ROOT`` set correctly. Pipenv only supports CPython -distributions, with version name like ``3.6.4`` or similar. - -☤ Pipenv does not respect pyenv’s global and local Python versions ------------------------------------------------------------------- - -Pipenv by default uses the Python it is installed against to create the -virtualenv. You can set the ``--python`` option to ``$(pyenv which python)`` -to use your current pyenv interpreter. See :ref:`specifying_versions` for more -information. - -.. _unknown-local-diagnose: - -☤ ValueError: unknown locale: UTF-8 ------------------------------------ - -macOS has a bug in its locale detection that prevents us from detecting your -shell encoding correctly. This can also be an issue on other systems if the -locale variables do not specify an encoding. - -The workaround is to set the following two environment variables to a standard -localization format: - -* ``LC_ALL`` -* ``LANG`` - -For Bash, for example, you can add the following to your ``~/.bash_profile``: - -.. code-block:: bash - - export LC_ALL='en_US.UTF-8' - export LANG='en_US.UTF-8' - -For Zsh, the file to edit is ``~/.zshrc``. - -.. Note:: You can change both the ``en_US`` and ``UTF-8`` part to the - language/locale and encoding you use. - -☤ /bin/pip: No such file or directory -------------------------------------- - -This may be related to your locale setting. See :ref:`unknown-local-diagnose` -for a possible solution. - - -☤ Pipenv does not respect dependencies in setup.py --------------------------------------------------- - -No, it does not, intentionally. Pipfile and setup.py serve different purposes, -and should not consider each other by default. See :ref:`pipfile-vs-setuppy` -for more information. - -☤ Using ``pipenv run`` in Supervisor program ---------------------------------------------- - -When you configure a supervisor program's ``command`` with ``pipenv run ...``, you -need to set locale environment variables properly to make it work. - -Add this line under ``[supervisord]`` section in ``/etc/supervisor/supervisord.conf``:: - - [supervisord] - environment=LC_ALL='en_US.UTF-8',LANG='en_US.UTF-8' - -☤ An exception is raised during ``Locking dependencies...`` ------------------------------------------------------------ - -Run ``pipenv lock --clear`` and try again. The lock sequence caches results -to speed up subsequent runs. The cache may contain faulty results if a bug -causes the format to corrupt, even after the bug is fixed. ``--clear`` flushes -the cache, and therefore removes the bad results. diff --git a/docs/requirements.txt b/docs/requirements.txt index e3fd593198..7079f46f1e 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -19,5 +19,5 @@ sphinx-click==4.4.0 sphinxcontrib-spelling==7.7.0 sphinxcontrib-websupport==1.2.4 urllib3==1.26.14 -virtualenv==20.20.0 +virtualenv>=20.20.0 virtualenv-clone==0.5.7 diff --git a/pipenv/vendor/click/LICENSE.rst b/pipenv/vendor/click/LICENSE.md similarity index 100% rename from pipenv/vendor/click/LICENSE.rst rename to pipenv/vendor/click/LICENSE.md diff --git a/pipenv/vendor/markupsafe/LICENSE.rst b/pipenv/vendor/markupsafe/LICENSE.md similarity index 100% rename from pipenv/vendor/markupsafe/LICENSE.rst rename to pipenv/vendor/markupsafe/LICENSE.md diff --git a/tests/fixtures/fake-package/README.md b/tests/fixtures/fake-package/README.md new file mode 100644 index 0000000000..4f91c7bbe5 --- /dev/null +++ b/tests/fixtures/fake-package/README.md @@ -0,0 +1 @@ +# fake_package: A fake python package. diff --git a/tests/fixtures/fake-package/README.rst b/tests/fixtures/fake-package/README.rst deleted file mode 100644 index 4256cd1f8e..0000000000 --- a/tests/fixtures/fake-package/README.rst +++ /dev/null @@ -1,3 +0,0 @@ -=============================================================================== -fake_package: A fake python package. -===============================================================================
urllib3__urllib3-708
Error using https and ipv6: InvalidURL("nonnumeric port: '4f7b'",) To reproduce: ``` Python import urllib3 p = urllib3.connection_from_url('https://[2001:0:53aa:64c:104c:2c10:2bef:4f7b]') p.request('GET', '/') ``` produces: ``` Python Traceback (most recent call last): File "/usr/lib/python3.4/http/client.py", line 771, in _set_hostport port = int(host[i+1:]) ValueError: invalid literal for int() with base 10: '4f7b' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/local/lib/python3.4/dist-packages/urllib3/connectionpool.py", line 548, in urlopen conn = self._get_conn(timeout=pool_timeout) File "/usr/local/lib/python3.4/dist-packages/urllib3/connectionpool.py", line 250, in _get_conn return conn or self._new_conn() File "/usr/local/lib/python3.4/dist-packages/urllib3/connectionpool.py", line 770, in _new_conn strict=self.strict, **self.conn_kw) File "/usr/local/lib/python3.4/dist-packages/urllib3/connection.py", line 171, in __init__ timeout=timeout, **kw) File "/usr/local/lib/python3.4/dist-packages/urllib3/connection.py", line 119, in __init__ _HTTPConnection.__init__(self, *args, **kw) File "/usr/lib/python3.4/http/client.py", line 750, in __init__ self._set_hostport(host, port) File "/usr/lib/python3.4/http/client.py", line 776, in _set_hostport raise InvalidURL("nonnumeric port: '%s'" % host[i+1:]) http.client.InvalidURL: nonnumeric port: '4f7b' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/lib/python3.4/dist-packages/urllib3/request.py", line 68, in request **urlopen_kw) File "/usr/local/lib/python3.4/dist-packages/urllib3/request.py", line 89, in request_encode_url return self.urlopen(method, url, **extra_kw) File "/usr/local/lib/python3.4/dist-packages/urllib3/connectionpool.py", line 629, in urlopen release_conn=release_conn, **response_kw) File "/usr/local/lib/python3.4/dist-packages/urllib3/connectionpool.py", line 629, in urlopen release_conn=release_conn, **response_kw) File "/usr/local/lib/python3.4/dist-packages/urllib3/connectionpool.py", line 629, in urlopen release_conn=release_conn, **response_kw) File "/usr/local/lib/python3.4/dist-packages/urllib3/connectionpool.py", line 609, in urlopen _stacktrace=sys.exc_info()[2]) File "/usr/local/lib/python3.4/dist-packages/urllib3/util/retry.py", line 271, in increment raise MaxRetryError(_pool, url, error or ResponseError(cause)) urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='2001:0:53aa:64c:104c:2c10:2bef:4f7b', port=None): Max retries exceeded with url: / (Caused by ProtocolError('Connection aborted.', InvalidURL("nonnumeric port: '4f7b'",))) ``` Using `https://[2001:0:53aa:64c:104c:2c10:2bef:4f7b]:443` instead is a workaround.
[ { "content": "import errno\nimport logging\nimport sys\nimport warnings\n\nfrom socket import error as SocketError, timeout as SocketTimeout\nimport socket\n\ntry: # Python 3\n from queue import LifoQueue, Empty, Full\nexcept ImportError:\n from Queue import LifoQueue, Empty, Full\n import Queue as _ # Platform-specific: Windows\n\n\nfrom .exceptions import (\n ClosedPoolError,\n ProtocolError,\n EmptyPoolError,\n HeaderParsingError,\n HostChangedError,\n LocationValueError,\n MaxRetryError,\n ProxyError,\n ConnectTimeoutError,\n ReadTimeoutError,\n SSLError,\n TimeoutError,\n InsecureRequestWarning,\n NewConnectionError,\n)\nfrom .packages.ssl_match_hostname import CertificateError\nfrom .packages import six\nfrom .connection import (\n port_by_scheme,\n DummyConnection,\n HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,\n HTTPException, BaseSSLError, ConnectionError\n)\nfrom .request import RequestMethods\nfrom .response import HTTPResponse\n\nfrom .util.connection import is_connection_dropped\nfrom .util.response import assert_header_parsing\nfrom .util.retry import Retry\nfrom .util.timeout import Timeout\nfrom .util.url import get_host, Url\n\n\nxrange = six.moves.xrange\n\nlog = logging.getLogger(__name__)\n\n_Default = object()\n\n\n## Pool objects\nclass ConnectionPool(object):\n \"\"\"\n Base class for all connection pools, such as\n :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.\n \"\"\"\n\n scheme = None\n QueueCls = LifoQueue\n\n def __init__(self, host, port=None):\n if not host:\n raise LocationValueError(\"No host specified.\")\n\n # httplib doesn't like it when we include brackets in ipv6 addresses\n self.host = host.strip('[]')\n self.port = port\n\n def __str__(self):\n return '%s(host=%r, port=%r)' % (type(self).__name__,\n self.host, self.port)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n # Return False to re-raise any potential exceptions\n return False\n\n def close():\n \"\"\"\n Close all pooled connections and disable the pool.\n \"\"\"\n pass\n\n\n# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252\n_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])\n\n\nclass HTTPConnectionPool(ConnectionPool, RequestMethods):\n \"\"\"\n Thread-safe connection pool for one host.\n\n :param host:\n Host used for this HTTP Connection (e.g. \"localhost\"), passed into\n :class:`httplib.HTTPConnection`.\n\n :param port:\n Port used for this HTTP Connection (None is equivalent to 80), passed\n into :class:`httplib.HTTPConnection`.\n\n :param strict:\n Causes BadStatusLine to be raised if the status line can't be parsed\n as a valid HTTP/1.0 or 1.1 status line, passed into\n :class:`httplib.HTTPConnection`.\n\n .. note::\n Only works in Python 2. This parameter is ignored in Python 3.\n\n :param timeout:\n Socket timeout in seconds for each individual connection. This can\n be a float or integer, which sets the timeout for the HTTP request,\n or an instance of :class:`urllib3.util.Timeout` which gives you more\n fine-grained control over request timeouts. After the constructor has\n been parsed, this is always a `urllib3.util.Timeout` object.\n\n :param maxsize:\n Number of connections to save that can be reused. More than 1 is useful\n in multithreaded situations. If ``block`` is set to False, more\n connections will be created but they will not be saved once they've\n been used.\n\n :param block:\n If set to True, no more than ``maxsize`` connections will be used at\n a time. When no free connections are available, the call will block\n until a connection has been released. This is a useful side effect for\n particular multithreaded situations where one does not want to use more\n than maxsize connections per host to prevent flooding.\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n\n :param retries:\n Retry configuration to use by default with requests in this pool.\n\n :param _proxy:\n Parsed proxy URL, should not be used directly, instead, see\n :class:`urllib3.connectionpool.ProxyManager`\"\n\n :param _proxy_headers:\n A dictionary with proxy headers, should not be used directly,\n instead, see :class:`urllib3.connectionpool.ProxyManager`\"\n\n :param \\**conn_kw:\n Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,\n :class:`urllib3.connection.HTTPSConnection` instances.\n \"\"\"\n\n scheme = 'http'\n ConnectionCls = HTTPConnection\n\n def __init__(self, host, port=None, strict=False,\n timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,\n headers=None, retries=None,\n _proxy=None, _proxy_headers=None,\n **conn_kw):\n ConnectionPool.__init__(self, host, port)\n RequestMethods.__init__(self, headers)\n\n self.strict = strict\n\n if not isinstance(timeout, Timeout):\n timeout = Timeout.from_float(timeout)\n\n if retries is None:\n retries = Retry.DEFAULT\n\n self.timeout = timeout\n self.retries = retries\n\n self.pool = self.QueueCls(maxsize)\n self.block = block\n\n self.proxy = _proxy\n self.proxy_headers = _proxy_headers or {}\n\n # Fill the queue up so that doing get() on it will block properly\n for _ in xrange(maxsize):\n self.pool.put(None)\n\n # These are mostly for testing and debugging purposes.\n self.num_connections = 0\n self.num_requests = 0\n self.conn_kw = conn_kw\n\n if self.proxy:\n # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.\n # We cannot know if the user has added default socket options, so we cannot replace the\n # list.\n self.conn_kw.setdefault('socket_options', [])\n\n def _new_conn(self):\n \"\"\"\n Return a fresh :class:`HTTPConnection`.\n \"\"\"\n self.num_connections += 1\n log.info(\"Starting new HTTP connection (%d): %s\" %\n (self.num_connections, self.host))\n\n conn = self.ConnectionCls(host=self.host, port=self.port,\n timeout=self.timeout.connect_timeout,\n strict=self.strict, **self.conn_kw)\n return conn\n\n def _get_conn(self, timeout=None):\n \"\"\"\n Get a connection. Will return a pooled connection if one is available.\n\n If no connections are available and :prop:`.block` is ``False``, then a\n fresh connection is returned.\n\n :param timeout:\n Seconds to wait before giving up and raising\n :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and\n :prop:`.block` is ``True``.\n \"\"\"\n conn = None\n try:\n conn = self.pool.get(block=self.block, timeout=timeout)\n\n except AttributeError: # self.pool is None\n raise ClosedPoolError(self, \"Pool is closed.\")\n\n except Empty:\n if self.block:\n raise EmptyPoolError(self,\n \"Pool reached maximum size and no more \"\n \"connections are allowed.\")\n pass # Oh well, we'll create a new connection then\n\n # If this is a persistent connection, check if it got disconnected\n if conn and is_connection_dropped(conn):\n log.info(\"Resetting dropped connection: %s\" % self.host)\n conn.close()\n if getattr(conn, 'auto_open', 1) == 0:\n # This is a proxied connection that has been mutated by\n # httplib._tunnel() and cannot be reused (since it would\n # attempt to bypass the proxy)\n conn = None\n\n return conn or self._new_conn()\n\n def _put_conn(self, conn):\n \"\"\"\n Put a connection back into the pool.\n\n :param conn:\n Connection object for the current host and port as returned by\n :meth:`._new_conn` or :meth:`._get_conn`.\n\n If the pool is already full, the connection is closed and discarded\n because we exceeded maxsize. If connections are discarded frequently,\n then maxsize should be increased.\n\n If the pool is closed, then the connection will be closed and discarded.\n \"\"\"\n try:\n self.pool.put(conn, block=False)\n return # Everything is dandy, done.\n except AttributeError:\n # self.pool is None.\n pass\n except Full:\n # This should never happen if self.block == True\n log.warning(\n \"Connection pool is full, discarding connection: %s\" %\n self.host)\n\n # Connection never got put back into the pool, close it.\n if conn:\n conn.close()\n\n def _validate_conn(self, conn):\n \"\"\"\n Called right before a request is made, after the socket is created.\n \"\"\"\n pass\n\n def _prepare_proxy(self, conn):\n # Nothing to do for HTTP connections.\n pass\n\n def _get_timeout(self, timeout):\n \"\"\" Helper that always returns a :class:`urllib3.util.Timeout` \"\"\"\n if timeout is _Default:\n return self.timeout.clone()\n\n if isinstance(timeout, Timeout):\n return timeout.clone()\n else:\n # User passed us an int/float. This is for backwards compatibility,\n # can be removed later\n return Timeout.from_float(timeout)\n\n def _raise_timeout(self, err, url, timeout_value):\n \"\"\"Is the error actually a timeout? Will raise a ReadTimeout or pass\"\"\"\n\n if isinstance(err, SocketTimeout):\n raise ReadTimeoutError(self, url, \"Read timed out. (read timeout=%s)\" % timeout_value)\n\n # See the above comment about EAGAIN in Python 3. In Python 2 we have\n # to specifically catch it and throw the timeout error\n if hasattr(err, 'errno') and err.errno in _blocking_errnos:\n raise ReadTimeoutError(self, url, \"Read timed out. (read timeout=%s)\" % timeout_value)\n\n # Catch possible read timeouts thrown as SSL errors. If not the\n # case, rethrow the original. We need to do this because of:\n # http://bugs.python.org/issue10272\n if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6\n raise ReadTimeoutError(self, url, \"Read timed out. (read timeout=%s)\" % timeout_value)\n\n def _make_request(self, conn, method, url, timeout=_Default,\n **httplib_request_kw):\n \"\"\"\n Perform a request on a given urllib connection object taken from our\n pool.\n\n :param conn:\n a connection from one of our connection pools\n\n :param timeout:\n Socket timeout in seconds for the request. This can be a\n float or integer, which will set the same timeout value for\n the socket connect and the socket read, or an instance of\n :class:`urllib3.util.Timeout`, which gives you more fine-grained\n control over your timeouts.\n \"\"\"\n self.num_requests += 1\n\n timeout_obj = self._get_timeout(timeout)\n timeout_obj.start_connect()\n conn.timeout = timeout_obj.connect_timeout\n\n # Trigger any extra validation we need to do.\n try:\n self._validate_conn(conn)\n except (SocketTimeout, BaseSSLError) as e:\n # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.\n self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)\n raise\n\n # conn.request() calls httplib.*.request, not the method in\n # urllib3.request. It also calls makefile (recv) on the socket.\n conn.request(method, url, **httplib_request_kw)\n\n # Reset the timeout for the recv() on the socket\n read_timeout = timeout_obj.read_timeout\n\n # App Engine doesn't have a sock attr\n if getattr(conn, 'sock', None):\n # In Python 3 socket.py will catch EAGAIN and return None when you\n # try and read into the file pointer created by http.client, which\n # instead raises a BadStatusLine exception. Instead of catching\n # the exception and assuming all BadStatusLine exceptions are read\n # timeouts, check for a zero timeout before making the request.\n if read_timeout == 0:\n raise ReadTimeoutError(\n self, url, \"Read timed out. (read timeout=%s)\" % read_timeout)\n if read_timeout is Timeout.DEFAULT_TIMEOUT:\n conn.sock.settimeout(socket.getdefaulttimeout())\n else: # None or a value\n conn.sock.settimeout(read_timeout)\n\n # Receive the response from the server\n try:\n try: # Python 2.7, use buffering of HTTP responses\n httplib_response = conn.getresponse(buffering=True)\n except TypeError: # Python 2.6 and older\n httplib_response = conn.getresponse()\n except (SocketTimeout, BaseSSLError, SocketError) as e:\n self._raise_timeout(err=e, url=url, timeout_value=read_timeout)\n raise\n\n # AppEngine doesn't have a version attr.\n http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')\n log.debug(\"\\\"%s %s %s\\\" %s %s\" % (method, url, http_version,\n httplib_response.status,\n httplib_response.length))\n\n try:\n assert_header_parsing(httplib_response.msg)\n except HeaderParsingError as hpe: # Platform-specific: Python 3\n log.warning(\n 'Failed to parse headers (url=%s): %s',\n self._absolute_url(url), hpe, exc_info=True)\n\n return httplib_response\n\n def _absolute_url(self, path):\n return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url\n\n def close(self):\n \"\"\"\n Close all pooled connections and disable the pool.\n \"\"\"\n # Disable access to the pool\n old_pool, self.pool = self.pool, None\n\n try:\n while True:\n conn = old_pool.get(block=False)\n if conn:\n conn.close()\n\n except Empty:\n pass # Done.\n\n def is_same_host(self, url):\n \"\"\"\n Check if the given ``url`` is a member of the same host as this\n connection pool.\n \"\"\"\n if url.startswith('/'):\n return True\n\n # TODO: Add optional support for socket.gethostbyname checking.\n scheme, host, port = get_host(url)\n\n # Use explicit default port for comparison when none is given\n if self.port and not port:\n port = port_by_scheme.get(scheme)\n elif not self.port and port == port_by_scheme.get(scheme):\n port = None\n\n return (scheme, host, port) == (self.scheme, self.host, self.port)\n\n def urlopen(self, method, url, body=None, headers=None, retries=None,\n redirect=True, assert_same_host=True, timeout=_Default,\n pool_timeout=None, release_conn=None, **response_kw):\n \"\"\"\n Get a connection from the pool and perform an HTTP request. This is the\n lowest level call for making a request, so you'll need to specify all\n the raw details.\n\n .. note::\n\n More commonly, it's appropriate to use a convenience method provided\n by :class:`.RequestMethods`, such as :meth:`request`.\n\n .. note::\n\n `release_conn` will only behave as expected if\n `preload_content=False` because we want to make\n `preload_content=False` the default behaviour someday soon without\n breaking backwards compatibility.\n\n :param method:\n HTTP request method (such as GET, POST, PUT, etc.)\n\n :param body:\n Data to send in the request body (useful for creating\n POST requests, see HTTPConnectionPool.post_url for\n more convenience).\n\n :param headers:\n Dictionary of custom headers to send, such as User-Agent,\n If-None-Match, etc. If None, pool headers are used. If provided,\n these headers completely replace any pool-specific headers.\n\n :param retries:\n Configure the number of retries to allow before raising a\n :class:`~urllib3.exceptions.MaxRetryError` exception.\n\n Pass ``None`` to retry until you receive a response. Pass a\n :class:`~urllib3.util.retry.Retry` object for fine-grained control\n over different types of retries.\n Pass an integer number to retry connection errors that many times,\n but no other types of errors. Pass zero to never retry.\n\n If ``False``, then retries are disabled and any exception is raised\n immediately. Also, instead of raising a MaxRetryError on redirects,\n the redirect response will be returned.\n\n :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.\n\n :param redirect:\n If True, automatically handle redirects (status codes 301, 302,\n 303, 307, 308). Each redirect counts as a retry. Disabling retries\n will disable redirect, too.\n\n :param assert_same_host:\n If ``True``, will make sure that the host of the pool requests is\n consistent else will raise HostChangedError. When False, you can\n use the pool on an HTTP proxy and request foreign hosts.\n\n :param timeout:\n If specified, overrides the default timeout for this one\n request. It may be a float (in seconds) or an instance of\n :class:`urllib3.util.Timeout`.\n\n :param pool_timeout:\n If set and the pool is set to block=True, then this method will\n block for ``pool_timeout`` seconds and raise EmptyPoolError if no\n connection is available within the time period.\n\n :param release_conn:\n If False, then the urlopen call will not release the connection\n back into the pool once a response is received (but will release if\n you read the entire contents of the response such as when\n `preload_content=True`). This is useful if you're not preloading\n the response's content immediately. You will need to call\n ``r.release_conn()`` on the response ``r`` to return the connection\n back into the pool. If None, it takes the value of\n ``response_kw.get('preload_content', True)``.\n\n :param \\**response_kw:\n Additional parameters are passed to\n :meth:`urllib3.response.HTTPResponse.from_httplib`\n \"\"\"\n if headers is None:\n headers = self.headers\n\n if not isinstance(retries, Retry):\n retries = Retry.from_int(retries, redirect=redirect, default=self.retries)\n\n if release_conn is None:\n release_conn = response_kw.get('preload_content', True)\n\n # Check host\n if assert_same_host and not self.is_same_host(url):\n raise HostChangedError(self, url, retries)\n\n conn = None\n\n # Merge the proxy headers. Only do this in HTTP. We have to copy the\n # headers dict so we can safely change it without those changes being\n # reflected in anyone else's copy.\n if self.scheme == 'http':\n headers = headers.copy()\n headers.update(self.proxy_headers)\n\n # Must keep the exception bound to a separate variable or else Python 3\n # complains about UnboundLocalError.\n err = None\n\n try:\n # Request a connection from the queue.\n timeout_obj = self._get_timeout(timeout)\n conn = self._get_conn(timeout=pool_timeout)\n\n conn.timeout = timeout_obj.connect_timeout\n\n is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)\n if is_new_proxy_conn:\n self._prepare_proxy(conn)\n\n # Make the request on the httplib connection object.\n httplib_response = self._make_request(conn, method, url,\n timeout=timeout_obj,\n body=body, headers=headers)\n\n # If we're going to release the connection in ``finally:``, then\n # the request doesn't need to know about the connection. Otherwise\n # it will also try to release it and we'll have a double-release\n # mess.\n response_conn = not release_conn and conn\n\n # Import httplib's response into our own wrapper object\n response = HTTPResponse.from_httplib(httplib_response,\n pool=self,\n connection=response_conn,\n **response_kw)\n\n # else:\n # The connection will be put back into the pool when\n # ``response.release_conn()`` is called (implicitly by\n # ``response.read()``)\n\n except Empty:\n # Timed out by queue.\n raise EmptyPoolError(self, \"No pool connections are available.\")\n\n except (BaseSSLError, CertificateError) as e:\n # Close the connection. If a connection is reused on which there\n # was a Certificate error, the next request will certainly raise\n # another Certificate error.\n conn = conn and conn.close()\n release_conn = True\n raise SSLError(e)\n\n except SSLError:\n # Treat SSLError separately from BaseSSLError to preserve\n # traceback.\n conn = conn and conn.close()\n release_conn = True\n raise\n\n except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:\n # Discard the connection for these exceptions. It will be\n # be replaced during the next _get_conn() call.\n conn = conn and conn.close()\n release_conn = True\n\n if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:\n e = ProxyError('Cannot connect to proxy.', e)\n elif isinstance(e, (SocketError, HTTPException)):\n e = ProtocolError('Connection aborted.', e)\n\n retries = retries.increment(method, url, error=e, _pool=self,\n _stacktrace=sys.exc_info()[2])\n retries.sleep()\n\n # Keep track of the error for the retry warning.\n err = e\n\n finally:\n if release_conn:\n # Put the connection back to be reused. If the connection is\n # expired then it will be None, which will get replaced with a\n # fresh connection during _get_conn.\n self._put_conn(conn)\n\n if not conn:\n # Try again\n log.warning(\"Retrying (%r) after connection \"\n \"broken by '%r': %s\" % (retries, err, url))\n return self.urlopen(method, url, body, headers, retries,\n redirect, assert_same_host,\n timeout=timeout, pool_timeout=pool_timeout,\n release_conn=release_conn, **response_kw)\n\n # Handle redirect?\n redirect_location = redirect and response.get_redirect_location()\n if redirect_location:\n if response.status == 303:\n method = 'GET'\n\n try:\n retries = retries.increment(method, url, response=response, _pool=self)\n except MaxRetryError:\n if retries.raise_on_redirect:\n # Release the connection for this response, since we're not\n # returning it to be released manually.\n response.release_conn()\n raise\n return response\n\n log.info(\"Redirecting %s -> %s\" % (url, redirect_location))\n return self.urlopen(method, redirect_location, body, headers,\n retries=retries, redirect=redirect,\n assert_same_host=assert_same_host,\n timeout=timeout, pool_timeout=pool_timeout,\n release_conn=release_conn, **response_kw)\n\n # Check if we should retry the HTTP response.\n if retries.is_forced_retry(method, status_code=response.status):\n retries = retries.increment(method, url, response=response, _pool=self)\n retries.sleep()\n log.info(\"Forced retry: %s\" % url)\n return self.urlopen(method, url, body, headers,\n retries=retries, redirect=redirect,\n assert_same_host=assert_same_host,\n timeout=timeout, pool_timeout=pool_timeout,\n release_conn=release_conn, **response_kw)\n\n return response\n\n\nclass HTTPSConnectionPool(HTTPConnectionPool):\n \"\"\"\n Same as :class:`.HTTPConnectionPool`, but HTTPS.\n\n When Python is compiled with the :mod:`ssl` module, then\n :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,\n instead of :class:`.HTTPSConnection`.\n\n :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,\n ``assert_hostname`` and ``host`` in this order to verify connections.\n If ``assert_hostname`` is False, no verification is done.\n\n The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,\n ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is\n available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade\n the connection socket into an SSL socket.\n \"\"\"\n\n scheme = 'https'\n ConnectionCls = HTTPSConnection\n\n def __init__(self, host, port=None,\n strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,\n block=False, headers=None, retries=None,\n _proxy=None, _proxy_headers=None,\n key_file=None, cert_file=None, cert_reqs=None,\n ca_certs=None, ssl_version=None,\n assert_hostname=None, assert_fingerprint=None,\n ca_cert_dir=None, **conn_kw):\n\n HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,\n block, headers, retries, _proxy, _proxy_headers,\n **conn_kw)\n\n if ca_certs and cert_reqs is None:\n cert_reqs = 'CERT_REQUIRED'\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.cert_reqs = cert_reqs\n self.ca_certs = ca_certs\n self.ca_cert_dir = ca_cert_dir\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n\n def _prepare_conn(self, conn):\n \"\"\"\n Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`\n and establish the tunnel if proxy is used.\n \"\"\"\n\n if isinstance(conn, VerifiedHTTPSConnection):\n conn.set_cert(key_file=self.key_file,\n cert_file=self.cert_file,\n cert_reqs=self.cert_reqs,\n ca_certs=self.ca_certs,\n ca_cert_dir=self.ca_cert_dir,\n assert_hostname=self.assert_hostname,\n assert_fingerprint=self.assert_fingerprint)\n conn.ssl_version = self.ssl_version\n\n return conn\n\n def _prepare_proxy(self, conn):\n \"\"\"\n Establish tunnel connection early, because otherwise httplib\n would improperly set Host: header to proxy's IP:port.\n \"\"\"\n # Python 2.7+\n try:\n set_tunnel = conn.set_tunnel\n except AttributeError: # Platform-specific: Python 2.6\n set_tunnel = conn._set_tunnel\n\n if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older\n set_tunnel(self.host, self.port)\n else:\n set_tunnel(self.host, self.port, self.proxy_headers)\n\n conn.connect()\n\n def _new_conn(self):\n \"\"\"\n Return a fresh :class:`httplib.HTTPSConnection`.\n \"\"\"\n self.num_connections += 1\n log.info(\"Starting new HTTPS connection (%d): %s\"\n % (self.num_connections, self.host))\n\n if not self.ConnectionCls or self.ConnectionCls is DummyConnection:\n raise SSLError(\"Can't connect to HTTPS URL because the SSL \"\n \"module is not available.\")\n\n actual_host = self.host\n actual_port = self.port\n if self.proxy is not None:\n actual_host = self.proxy.host\n actual_port = self.proxy.port\n\n conn = self.ConnectionCls(host=actual_host, port=actual_port,\n timeout=self.timeout.connect_timeout,\n strict=self.strict, **self.conn_kw)\n\n return self._prepare_conn(conn)\n\n def _validate_conn(self, conn):\n \"\"\"\n Called right before a request is made, after the socket is created.\n \"\"\"\n super(HTTPSConnectionPool, self)._validate_conn(conn)\n\n # Force connect early to allow us to validate the connection.\n if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`\n conn.connect()\n\n if not conn.is_verified:\n warnings.warn((\n 'Unverified HTTPS request is being made. '\n 'Adding certificate verification is strongly advised. See: '\n 'https://urllib3.readthedocs.org/en/latest/security.html'),\n InsecureRequestWarning)\n\n\ndef connection_from_url(url, **kw):\n \"\"\"\n Given a url, return an :class:`.ConnectionPool` instance of its host.\n\n This is a shortcut for not having to parse out the scheme, host, and port\n of the url before creating an :class:`.ConnectionPool` instance.\n\n :param url:\n Absolute URL string that must include the scheme. Port is optional.\n\n :param \\**kw:\n Passes additional parameters to the constructor of the appropriate\n :class:`.ConnectionPool`. Useful for specifying things like\n timeout, maxsize, headers, etc.\n\n Example::\n\n >>> conn = connection_from_url('http://google.com/')\n >>> r = conn.request('GET', '/')\n \"\"\"\n scheme, host, port = get_host(url)\n if scheme == 'https':\n return HTTPSConnectionPool(host, port=port, **kw)\n else:\n return HTTPConnectionPool(host, port=port, **kw)\n", "path": "urllib3/connectionpool.py" } ]
[ { "content": "import errno\nimport logging\nimport sys\nimport warnings\n\nfrom socket import error as SocketError, timeout as SocketTimeout\nimport socket\n\ntry: # Python 3\n from queue import LifoQueue, Empty, Full\nexcept ImportError:\n from Queue import LifoQueue, Empty, Full\n import Queue as _ # Platform-specific: Windows\n\n\nfrom .exceptions import (\n ClosedPoolError,\n ProtocolError,\n EmptyPoolError,\n HeaderParsingError,\n HostChangedError,\n LocationValueError,\n MaxRetryError,\n ProxyError,\n ConnectTimeoutError,\n ReadTimeoutError,\n SSLError,\n TimeoutError,\n InsecureRequestWarning,\n NewConnectionError,\n)\nfrom .packages.ssl_match_hostname import CertificateError\nfrom .packages import six\nfrom .connection import (\n port_by_scheme,\n DummyConnection,\n HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,\n HTTPException, BaseSSLError, ConnectionError\n)\nfrom .request import RequestMethods\nfrom .response import HTTPResponse\n\nfrom .util.connection import is_connection_dropped\nfrom .util.response import assert_header_parsing\nfrom .util.retry import Retry\nfrom .util.timeout import Timeout\nfrom .util.url import get_host, Url\n\n\nxrange = six.moves.xrange\n\nlog = logging.getLogger(__name__)\n\n_Default = object()\n\n\n## Pool objects\nclass ConnectionPool(object):\n \"\"\"\n Base class for all connection pools, such as\n :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.\n \"\"\"\n\n scheme = None\n QueueCls = LifoQueue\n\n def __init__(self, host, port=None):\n if not host:\n raise LocationValueError(\"No host specified.\")\n\n self.host = host\n self.port = port\n\n def __str__(self):\n return '%s(host=%r, port=%r)' % (type(self).__name__,\n self.host, self.port)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n # Return False to re-raise any potential exceptions\n return False\n\n def close():\n \"\"\"\n Close all pooled connections and disable the pool.\n \"\"\"\n pass\n\n\n# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252\n_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])\n\n\nclass HTTPConnectionPool(ConnectionPool, RequestMethods):\n \"\"\"\n Thread-safe connection pool for one host.\n\n :param host:\n Host used for this HTTP Connection (e.g. \"localhost\"), passed into\n :class:`httplib.HTTPConnection`.\n\n :param port:\n Port used for this HTTP Connection (None is equivalent to 80), passed\n into :class:`httplib.HTTPConnection`.\n\n :param strict:\n Causes BadStatusLine to be raised if the status line can't be parsed\n as a valid HTTP/1.0 or 1.1 status line, passed into\n :class:`httplib.HTTPConnection`.\n\n .. note::\n Only works in Python 2. This parameter is ignored in Python 3.\n\n :param timeout:\n Socket timeout in seconds for each individual connection. This can\n be a float or integer, which sets the timeout for the HTTP request,\n or an instance of :class:`urllib3.util.Timeout` which gives you more\n fine-grained control over request timeouts. After the constructor has\n been parsed, this is always a `urllib3.util.Timeout` object.\n\n :param maxsize:\n Number of connections to save that can be reused. More than 1 is useful\n in multithreaded situations. If ``block`` is set to False, more\n connections will be created but they will not be saved once they've\n been used.\n\n :param block:\n If set to True, no more than ``maxsize`` connections will be used at\n a time. When no free connections are available, the call will block\n until a connection has been released. This is a useful side effect for\n particular multithreaded situations where one does not want to use more\n than maxsize connections per host to prevent flooding.\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n\n :param retries:\n Retry configuration to use by default with requests in this pool.\n\n :param _proxy:\n Parsed proxy URL, should not be used directly, instead, see\n :class:`urllib3.connectionpool.ProxyManager`\"\n\n :param _proxy_headers:\n A dictionary with proxy headers, should not be used directly,\n instead, see :class:`urllib3.connectionpool.ProxyManager`\"\n\n :param \\**conn_kw:\n Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,\n :class:`urllib3.connection.HTTPSConnection` instances.\n \"\"\"\n\n scheme = 'http'\n ConnectionCls = HTTPConnection\n\n def __init__(self, host, port=None, strict=False,\n timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,\n headers=None, retries=None,\n _proxy=None, _proxy_headers=None,\n **conn_kw):\n ConnectionPool.__init__(self, host, port)\n RequestMethods.__init__(self, headers)\n\n self.strict = strict\n\n if not isinstance(timeout, Timeout):\n timeout = Timeout.from_float(timeout)\n\n if retries is None:\n retries = Retry.DEFAULT\n\n self.timeout = timeout\n self.retries = retries\n\n self.pool = self.QueueCls(maxsize)\n self.block = block\n\n self.proxy = _proxy\n self.proxy_headers = _proxy_headers or {}\n\n # Fill the queue up so that doing get() on it will block properly\n for _ in xrange(maxsize):\n self.pool.put(None)\n\n # These are mostly for testing and debugging purposes.\n self.num_connections = 0\n self.num_requests = 0\n self.conn_kw = conn_kw\n\n if self.proxy:\n # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.\n # We cannot know if the user has added default socket options, so we cannot replace the\n # list.\n self.conn_kw.setdefault('socket_options', [])\n\n def _new_conn(self):\n \"\"\"\n Return a fresh :class:`HTTPConnection`.\n \"\"\"\n self.num_connections += 1\n log.info(\"Starting new HTTP connection (%d): %s\" %\n (self.num_connections, self.host))\n\n conn = self.ConnectionCls(host=self.host, port=self.port,\n timeout=self.timeout.connect_timeout,\n strict=self.strict, **self.conn_kw)\n return conn\n\n def _get_conn(self, timeout=None):\n \"\"\"\n Get a connection. Will return a pooled connection if one is available.\n\n If no connections are available and :prop:`.block` is ``False``, then a\n fresh connection is returned.\n\n :param timeout:\n Seconds to wait before giving up and raising\n :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and\n :prop:`.block` is ``True``.\n \"\"\"\n conn = None\n try:\n conn = self.pool.get(block=self.block, timeout=timeout)\n\n except AttributeError: # self.pool is None\n raise ClosedPoolError(self, \"Pool is closed.\")\n\n except Empty:\n if self.block:\n raise EmptyPoolError(self,\n \"Pool reached maximum size and no more \"\n \"connections are allowed.\")\n pass # Oh well, we'll create a new connection then\n\n # If this is a persistent connection, check if it got disconnected\n if conn and is_connection_dropped(conn):\n log.info(\"Resetting dropped connection: %s\" % self.host)\n conn.close()\n if getattr(conn, 'auto_open', 1) == 0:\n # This is a proxied connection that has been mutated by\n # httplib._tunnel() and cannot be reused (since it would\n # attempt to bypass the proxy)\n conn = None\n\n return conn or self._new_conn()\n\n def _put_conn(self, conn):\n \"\"\"\n Put a connection back into the pool.\n\n :param conn:\n Connection object for the current host and port as returned by\n :meth:`._new_conn` or :meth:`._get_conn`.\n\n If the pool is already full, the connection is closed and discarded\n because we exceeded maxsize. If connections are discarded frequently,\n then maxsize should be increased.\n\n If the pool is closed, then the connection will be closed and discarded.\n \"\"\"\n try:\n self.pool.put(conn, block=False)\n return # Everything is dandy, done.\n except AttributeError:\n # self.pool is None.\n pass\n except Full:\n # This should never happen if self.block == True\n log.warning(\n \"Connection pool is full, discarding connection: %s\" %\n self.host)\n\n # Connection never got put back into the pool, close it.\n if conn:\n conn.close()\n\n def _validate_conn(self, conn):\n \"\"\"\n Called right before a request is made, after the socket is created.\n \"\"\"\n pass\n\n def _prepare_proxy(self, conn):\n # Nothing to do for HTTP connections.\n pass\n\n def _get_timeout(self, timeout):\n \"\"\" Helper that always returns a :class:`urllib3.util.Timeout` \"\"\"\n if timeout is _Default:\n return self.timeout.clone()\n\n if isinstance(timeout, Timeout):\n return timeout.clone()\n else:\n # User passed us an int/float. This is for backwards compatibility,\n # can be removed later\n return Timeout.from_float(timeout)\n\n def _raise_timeout(self, err, url, timeout_value):\n \"\"\"Is the error actually a timeout? Will raise a ReadTimeout or pass\"\"\"\n\n if isinstance(err, SocketTimeout):\n raise ReadTimeoutError(self, url, \"Read timed out. (read timeout=%s)\" % timeout_value)\n\n # See the above comment about EAGAIN in Python 3. In Python 2 we have\n # to specifically catch it and throw the timeout error\n if hasattr(err, 'errno') and err.errno in _blocking_errnos:\n raise ReadTimeoutError(self, url, \"Read timed out. (read timeout=%s)\" % timeout_value)\n\n # Catch possible read timeouts thrown as SSL errors. If not the\n # case, rethrow the original. We need to do this because of:\n # http://bugs.python.org/issue10272\n if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6\n raise ReadTimeoutError(self, url, \"Read timed out. (read timeout=%s)\" % timeout_value)\n\n def _make_request(self, conn, method, url, timeout=_Default,\n **httplib_request_kw):\n \"\"\"\n Perform a request on a given urllib connection object taken from our\n pool.\n\n :param conn:\n a connection from one of our connection pools\n\n :param timeout:\n Socket timeout in seconds for the request. This can be a\n float or integer, which will set the same timeout value for\n the socket connect and the socket read, or an instance of\n :class:`urllib3.util.Timeout`, which gives you more fine-grained\n control over your timeouts.\n \"\"\"\n self.num_requests += 1\n\n timeout_obj = self._get_timeout(timeout)\n timeout_obj.start_connect()\n conn.timeout = timeout_obj.connect_timeout\n\n # Trigger any extra validation we need to do.\n try:\n self._validate_conn(conn)\n except (SocketTimeout, BaseSSLError) as e:\n # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.\n self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)\n raise\n\n # conn.request() calls httplib.*.request, not the method in\n # urllib3.request. It also calls makefile (recv) on the socket.\n conn.request(method, url, **httplib_request_kw)\n\n # Reset the timeout for the recv() on the socket\n read_timeout = timeout_obj.read_timeout\n\n # App Engine doesn't have a sock attr\n if getattr(conn, 'sock', None):\n # In Python 3 socket.py will catch EAGAIN and return None when you\n # try and read into the file pointer created by http.client, which\n # instead raises a BadStatusLine exception. Instead of catching\n # the exception and assuming all BadStatusLine exceptions are read\n # timeouts, check for a zero timeout before making the request.\n if read_timeout == 0:\n raise ReadTimeoutError(\n self, url, \"Read timed out. (read timeout=%s)\" % read_timeout)\n if read_timeout is Timeout.DEFAULT_TIMEOUT:\n conn.sock.settimeout(socket.getdefaulttimeout())\n else: # None or a value\n conn.sock.settimeout(read_timeout)\n\n # Receive the response from the server\n try:\n try: # Python 2.7, use buffering of HTTP responses\n httplib_response = conn.getresponse(buffering=True)\n except TypeError: # Python 2.6 and older\n httplib_response = conn.getresponse()\n except (SocketTimeout, BaseSSLError, SocketError) as e:\n self._raise_timeout(err=e, url=url, timeout_value=read_timeout)\n raise\n\n # AppEngine doesn't have a version attr.\n http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')\n log.debug(\"\\\"%s %s %s\\\" %s %s\" % (method, url, http_version,\n httplib_response.status,\n httplib_response.length))\n\n try:\n assert_header_parsing(httplib_response.msg)\n except HeaderParsingError as hpe: # Platform-specific: Python 3\n log.warning(\n 'Failed to parse headers (url=%s): %s',\n self._absolute_url(url), hpe, exc_info=True)\n\n return httplib_response\n\n def _absolute_url(self, path):\n return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url\n\n def close(self):\n \"\"\"\n Close all pooled connections and disable the pool.\n \"\"\"\n # Disable access to the pool\n old_pool, self.pool = self.pool, None\n\n try:\n while True:\n conn = old_pool.get(block=False)\n if conn:\n conn.close()\n\n except Empty:\n pass # Done.\n\n def is_same_host(self, url):\n \"\"\"\n Check if the given ``url`` is a member of the same host as this\n connection pool.\n \"\"\"\n if url.startswith('/'):\n return True\n\n # TODO: Add optional support for socket.gethostbyname checking.\n scheme, host, port = get_host(url)\n\n # Use explicit default port for comparison when none is given\n if self.port and not port:\n port = port_by_scheme.get(scheme)\n elif not self.port and port == port_by_scheme.get(scheme):\n port = None\n\n return (scheme, host, port) == (self.scheme, self.host, self.port)\n\n def urlopen(self, method, url, body=None, headers=None, retries=None,\n redirect=True, assert_same_host=True, timeout=_Default,\n pool_timeout=None, release_conn=None, **response_kw):\n \"\"\"\n Get a connection from the pool and perform an HTTP request. This is the\n lowest level call for making a request, so you'll need to specify all\n the raw details.\n\n .. note::\n\n More commonly, it's appropriate to use a convenience method provided\n by :class:`.RequestMethods`, such as :meth:`request`.\n\n .. note::\n\n `release_conn` will only behave as expected if\n `preload_content=False` because we want to make\n `preload_content=False` the default behaviour someday soon without\n breaking backwards compatibility.\n\n :param method:\n HTTP request method (such as GET, POST, PUT, etc.)\n\n :param body:\n Data to send in the request body (useful for creating\n POST requests, see HTTPConnectionPool.post_url for\n more convenience).\n\n :param headers:\n Dictionary of custom headers to send, such as User-Agent,\n If-None-Match, etc. If None, pool headers are used. If provided,\n these headers completely replace any pool-specific headers.\n\n :param retries:\n Configure the number of retries to allow before raising a\n :class:`~urllib3.exceptions.MaxRetryError` exception.\n\n Pass ``None`` to retry until you receive a response. Pass a\n :class:`~urllib3.util.retry.Retry` object for fine-grained control\n over different types of retries.\n Pass an integer number to retry connection errors that many times,\n but no other types of errors. Pass zero to never retry.\n\n If ``False``, then retries are disabled and any exception is raised\n immediately. Also, instead of raising a MaxRetryError on redirects,\n the redirect response will be returned.\n\n :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.\n\n :param redirect:\n If True, automatically handle redirects (status codes 301, 302,\n 303, 307, 308). Each redirect counts as a retry. Disabling retries\n will disable redirect, too.\n\n :param assert_same_host:\n If ``True``, will make sure that the host of the pool requests is\n consistent else will raise HostChangedError. When False, you can\n use the pool on an HTTP proxy and request foreign hosts.\n\n :param timeout:\n If specified, overrides the default timeout for this one\n request. It may be a float (in seconds) or an instance of\n :class:`urllib3.util.Timeout`.\n\n :param pool_timeout:\n If set and the pool is set to block=True, then this method will\n block for ``pool_timeout`` seconds and raise EmptyPoolError if no\n connection is available within the time period.\n\n :param release_conn:\n If False, then the urlopen call will not release the connection\n back into the pool once a response is received (but will release if\n you read the entire contents of the response such as when\n `preload_content=True`). This is useful if you're not preloading\n the response's content immediately. You will need to call\n ``r.release_conn()`` on the response ``r`` to return the connection\n back into the pool. If None, it takes the value of\n ``response_kw.get('preload_content', True)``.\n\n :param \\**response_kw:\n Additional parameters are passed to\n :meth:`urllib3.response.HTTPResponse.from_httplib`\n \"\"\"\n if headers is None:\n headers = self.headers\n\n if not isinstance(retries, Retry):\n retries = Retry.from_int(retries, redirect=redirect, default=self.retries)\n\n if release_conn is None:\n release_conn = response_kw.get('preload_content', True)\n\n # Check host\n if assert_same_host and not self.is_same_host(url):\n raise HostChangedError(self, url, retries)\n\n conn = None\n\n # Merge the proxy headers. Only do this in HTTP. We have to copy the\n # headers dict so we can safely change it without those changes being\n # reflected in anyone else's copy.\n if self.scheme == 'http':\n headers = headers.copy()\n headers.update(self.proxy_headers)\n\n # Must keep the exception bound to a separate variable or else Python 3\n # complains about UnboundLocalError.\n err = None\n\n try:\n # Request a connection from the queue.\n timeout_obj = self._get_timeout(timeout)\n conn = self._get_conn(timeout=pool_timeout)\n\n conn.timeout = timeout_obj.connect_timeout\n\n is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)\n if is_new_proxy_conn:\n self._prepare_proxy(conn)\n\n # Make the request on the httplib connection object.\n httplib_response = self._make_request(conn, method, url,\n timeout=timeout_obj,\n body=body, headers=headers)\n\n # If we're going to release the connection in ``finally:``, then\n # the request doesn't need to know about the connection. Otherwise\n # it will also try to release it and we'll have a double-release\n # mess.\n response_conn = not release_conn and conn\n\n # Import httplib's response into our own wrapper object\n response = HTTPResponse.from_httplib(httplib_response,\n pool=self,\n connection=response_conn,\n **response_kw)\n\n # else:\n # The connection will be put back into the pool when\n # ``response.release_conn()`` is called (implicitly by\n # ``response.read()``)\n\n except Empty:\n # Timed out by queue.\n raise EmptyPoolError(self, \"No pool connections are available.\")\n\n except (BaseSSLError, CertificateError) as e:\n # Close the connection. If a connection is reused on which there\n # was a Certificate error, the next request will certainly raise\n # another Certificate error.\n conn = conn and conn.close()\n release_conn = True\n raise SSLError(e)\n\n except SSLError:\n # Treat SSLError separately from BaseSSLError to preserve\n # traceback.\n conn = conn and conn.close()\n release_conn = True\n raise\n\n except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:\n # Discard the connection for these exceptions. It will be\n # be replaced during the next _get_conn() call.\n conn = conn and conn.close()\n release_conn = True\n\n if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:\n e = ProxyError('Cannot connect to proxy.', e)\n elif isinstance(e, (SocketError, HTTPException)):\n e = ProtocolError('Connection aborted.', e)\n\n retries = retries.increment(method, url, error=e, _pool=self,\n _stacktrace=sys.exc_info()[2])\n retries.sleep()\n\n # Keep track of the error for the retry warning.\n err = e\n\n finally:\n if release_conn:\n # Put the connection back to be reused. If the connection is\n # expired then it will be None, which will get replaced with a\n # fresh connection during _get_conn.\n self._put_conn(conn)\n\n if not conn:\n # Try again\n log.warning(\"Retrying (%r) after connection \"\n \"broken by '%r': %s\" % (retries, err, url))\n return self.urlopen(method, url, body, headers, retries,\n redirect, assert_same_host,\n timeout=timeout, pool_timeout=pool_timeout,\n release_conn=release_conn, **response_kw)\n\n # Handle redirect?\n redirect_location = redirect and response.get_redirect_location()\n if redirect_location:\n if response.status == 303:\n method = 'GET'\n\n try:\n retries = retries.increment(method, url, response=response, _pool=self)\n except MaxRetryError:\n if retries.raise_on_redirect:\n # Release the connection for this response, since we're not\n # returning it to be released manually.\n response.release_conn()\n raise\n return response\n\n log.info(\"Redirecting %s -> %s\" % (url, redirect_location))\n return self.urlopen(method, redirect_location, body, headers,\n retries=retries, redirect=redirect,\n assert_same_host=assert_same_host,\n timeout=timeout, pool_timeout=pool_timeout,\n release_conn=release_conn, **response_kw)\n\n # Check if we should retry the HTTP response.\n if retries.is_forced_retry(method, status_code=response.status):\n retries = retries.increment(method, url, response=response, _pool=self)\n retries.sleep()\n log.info(\"Forced retry: %s\" % url)\n return self.urlopen(method, url, body, headers,\n retries=retries, redirect=redirect,\n assert_same_host=assert_same_host,\n timeout=timeout, pool_timeout=pool_timeout,\n release_conn=release_conn, **response_kw)\n\n return response\n\n\nclass HTTPSConnectionPool(HTTPConnectionPool):\n \"\"\"\n Same as :class:`.HTTPConnectionPool`, but HTTPS.\n\n When Python is compiled with the :mod:`ssl` module, then\n :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,\n instead of :class:`.HTTPSConnection`.\n\n :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,\n ``assert_hostname`` and ``host`` in this order to verify connections.\n If ``assert_hostname`` is False, no verification is done.\n\n The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,\n ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is\n available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade\n the connection socket into an SSL socket.\n \"\"\"\n\n scheme = 'https'\n ConnectionCls = HTTPSConnection\n\n def __init__(self, host, port=None,\n strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,\n block=False, headers=None, retries=None,\n _proxy=None, _proxy_headers=None,\n key_file=None, cert_file=None, cert_reqs=None,\n ca_certs=None, ssl_version=None,\n assert_hostname=None, assert_fingerprint=None,\n ca_cert_dir=None, **conn_kw):\n\n HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,\n block, headers, retries, _proxy, _proxy_headers,\n **conn_kw)\n\n if ca_certs and cert_reqs is None:\n cert_reqs = 'CERT_REQUIRED'\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.cert_reqs = cert_reqs\n self.ca_certs = ca_certs\n self.ca_cert_dir = ca_cert_dir\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n\n def _prepare_conn(self, conn):\n \"\"\"\n Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`\n and establish the tunnel if proxy is used.\n \"\"\"\n\n if isinstance(conn, VerifiedHTTPSConnection):\n conn.set_cert(key_file=self.key_file,\n cert_file=self.cert_file,\n cert_reqs=self.cert_reqs,\n ca_certs=self.ca_certs,\n ca_cert_dir=self.ca_cert_dir,\n assert_hostname=self.assert_hostname,\n assert_fingerprint=self.assert_fingerprint)\n conn.ssl_version = self.ssl_version\n\n return conn\n\n def _prepare_proxy(self, conn):\n \"\"\"\n Establish tunnel connection early, because otherwise httplib\n would improperly set Host: header to proxy's IP:port.\n \"\"\"\n # Python 2.7+\n try:\n set_tunnel = conn.set_tunnel\n except AttributeError: # Platform-specific: Python 2.6\n set_tunnel = conn._set_tunnel\n\n if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older\n set_tunnel(self.host, self.port)\n else:\n set_tunnel(self.host, self.port, self.proxy_headers)\n\n conn.connect()\n\n def _new_conn(self):\n \"\"\"\n Return a fresh :class:`httplib.HTTPSConnection`.\n \"\"\"\n self.num_connections += 1\n log.info(\"Starting new HTTPS connection (%d): %s\"\n % (self.num_connections, self.host))\n\n if not self.ConnectionCls or self.ConnectionCls is DummyConnection:\n raise SSLError(\"Can't connect to HTTPS URL because the SSL \"\n \"module is not available.\")\n\n actual_host = self.host\n actual_port = self.port\n if self.proxy is not None:\n actual_host = self.proxy.host\n actual_port = self.proxy.port\n\n conn = self.ConnectionCls(host=actual_host, port=actual_port,\n timeout=self.timeout.connect_timeout,\n strict=self.strict, **self.conn_kw)\n\n return self._prepare_conn(conn)\n\n def _validate_conn(self, conn):\n \"\"\"\n Called right before a request is made, after the socket is created.\n \"\"\"\n super(HTTPSConnectionPool, self)._validate_conn(conn)\n\n # Force connect early to allow us to validate the connection.\n if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`\n conn.connect()\n\n if not conn.is_verified:\n warnings.warn((\n 'Unverified HTTPS request is being made. '\n 'Adding certificate verification is strongly advised. See: '\n 'https://urllib3.readthedocs.org/en/latest/security.html'),\n InsecureRequestWarning)\n\n\ndef connection_from_url(url, **kw):\n \"\"\"\n Given a url, return an :class:`.ConnectionPool` instance of its host.\n\n This is a shortcut for not having to parse out the scheme, host, and port\n of the url before creating an :class:`.ConnectionPool` instance.\n\n :param url:\n Absolute URL string that must include the scheme. Port is optional.\n\n :param \\**kw:\n Passes additional parameters to the constructor of the appropriate\n :class:`.ConnectionPool`. Useful for specifying things like\n timeout, maxsize, headers, etc.\n\n Example::\n\n >>> conn = connection_from_url('http://google.com/')\n >>> r = conn.request('GET', '/')\n \"\"\"\n scheme, host, port = get_host(url)\n if scheme == 'https':\n return HTTPSConnectionPool(host, port=port, **kw)\n else:\n return HTTPConnectionPool(host, port=port, **kw)\n", "path": "urllib3/connectionpool.py" } ]
diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py index b38ac68d7b..1004511a8e 100644 --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -68,8 +68,7 @@ def __init__(self, host, port=None): if not host: raise LocationValueError("No host specified.") - # httplib doesn't like it when we include brackets in ipv6 addresses - self.host = host.strip('[]') + self.host = host self.port = port def __str__(self):
cowrie__cowrie-1551
builtins.KeyError: 'log_time' Python error **Describe the bug** Cowrie won't log properly, due that output plugins are not working -> output_splunk Following error occurs: ``` 2021-04-28T07:00:17.796991Z [twisted.logger._observer#critical] Temporarily disabling observer LegacyLogObserverWrapper(<bound method Output.emit of <cowrie.output.virustotal.Output object at 0x7f3a13c9c550>>) due to exception: [Failure instance: Traceback: <class 'KeyError'>: 'log_time' /home/cowrie/cowrie/src/cowrie/ssh/transport.py:246:connectionLost /home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/python/threadable.py:51:sync /home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/python/log.py:281:msg /home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/logger/_legacy.py:147:publishToNewObserver --- <exception caught here> --- /home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/logger/_observer.py:82:__call__ /home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/logger/_legacy.py:55:__call__ ] Traceback (most recent call last): File "/home/cowrie/cowrie/src/cowrie/ssh/transport.py", line 246, in connectionLost log.msg( File "/home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/python/threadable.py", line 51, in sync return function(self, *args, **kwargs) File "/home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/python/log.py", line 281, in msg _publishNew(self._publishPublisher, actualEventDict, textFromEventDict) File "/home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/logger/_legacy.py", line 147, in publishToNewObserver observer(eventDict) --- <exception caught here> --- File "/home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/logger/_observer.py", line 82, in __call__ observer(event) File "/home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/logger/_legacy.py", line 55, in __call__ event["time"] = event["log_time"] builtins.KeyError: 'log_time' ``` **To Reproduce** Steps to reproduce the behavior: 1. git clone cowrie 2. setup venv 3. setup cowrie.cfg 4. include splunk output 5. run cowrie 6. run honeypot session **Expected behavior** Cowrie should properly log. **Server (please complete the following information):** - OS: `Linux cowrie-1 5.4.103-1-pve #1 SMP PVE 5.4.103-1 (Sun, 07 Mar 2021 15:55:09 +0100) x86_64 x86_64 x86_64 GNU/Linux` - Python: Python 3.8.6
[ { "content": "# Copyright (c) 2015 Michel Oosterhof <[email protected]>\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. The names of the author(s) may not be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED\n# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\n\n\nimport abc\nimport re\nimport socket\nimport time\nfrom os import environ\nfrom typing import Any, Dict, Pattern\n\nfrom twisted.internet import reactor\nfrom twisted.logger import formatTime\n\nfrom cowrie.core.config import CowrieConfig\n\n# Events:\n# cowrie.client.fingerprint\n# cowrie.client.size\n# cowrie.client.var\n# cowrie.client.version\n# cowrie.command.input\n# cowrie.command.failed\n# cowrie.command.success (deprecated)\n# cowrie.direct-tcpip.data\n# cowrie.direct-tcpip.request\n# cowrie.log.closed\n# cowrie.login.failed\n# cowrie.login.success\n# cowrie.session.closed\n# cowrie.session.connect\n# cowrie.session.file_download\n# cowrie.session.file_upload\n\n\"\"\"\nThe time is available in two formats in each event, as key 'time'\nin epoch format and in key 'timestamp' as a ISO compliant string\nin UTC.\n\"\"\"\n\n\nclass Output(metaclass=abc.ABCMeta):\n \"\"\"\n This is the abstract base class intended to be inherited by\n cowrie output plugins. Plugins require the mandatory\n methods: stop, start and write\n \"\"\"\n\n def __init__(self) -> None:\n self.sessions: Dict[str, str] = {}\n self.ips: Dict[str, str] = {}\n\n # Need these for each individual transport, or else the session numbers overlap\n self.sshRegex: Pattern[str] = re.compile(\".*SSHTransport,([0-9]+),[0-9a-f:.]+$\")\n self.telnetRegex: Pattern[str] = re.compile(\n \".*TelnetTransport,([0-9]+),[0-9a-f:.]+$\"\n )\n self.sensor: str = CowrieConfig.get(\n \"honeypot\", \"sensor_name\", fallback=socket.gethostname()\n )\n self.timeFormat: str\n\n # use Z for UTC (Zulu) time, it's shorter.\n if \"TZ\" in environ and environ[\"TZ\"] == \"UTC\":\n self.timeFormat = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n else:\n self.timeFormat = \"%Y-%m-%dT%H:%M:%S.%f%z\"\n\n # Event trigger so that stop() is called by the reactor when stopping\n reactor.addSystemEventTrigger(\"before\", \"shutdown\", self.stop) # type: ignore\n\n self.start()\n\n def logDispatch(self, **kw: str) -> None:\n \"\"\"\n Use logDispatch when the HoneypotTransport prefix is not available.\n Here you can explicitly set the sessionIds to tie the sessions together\n \"\"\"\n ev = kw\n # ev[\"message\"] = msg\n self.emit(ev)\n\n @abc.abstractmethod\n def start(self) -> None:\n \"\"\"\n Abstract method to initialize output plugin\n \"\"\"\n pass\n\n @abc.abstractmethod\n def stop(self) -> None:\n \"\"\"\n Abstract method to shut down output plugin\n \"\"\"\n pass\n\n @abc.abstractmethod\n def write(self, event: Dict[str, Any]) -> None:\n \"\"\"\n Handle a general event within the output plugin\n \"\"\"\n pass\n\n def emit(self, event: dict) -> None:\n \"\"\"\n This is the main emit() hook that gets called by the the Twisted logging\n\n To make this work with Cowrie, the event dictionary needs the following keys:\n - 'eventid'\n - 'sessionno' or 'session'\n - 'message' or 'format'\n \"\"\"\n sessionno: str\n ev: dict\n\n # Ignore stdout and stderr in output plugins\n if \"printed\" in event:\n return\n\n # Ignore anything without eventid\n if \"eventid\" not in event:\n return\n\n # Ignore anything without session information\n if (\n \"sessionno\" not in event\n and \"session\" not in event\n and \"system\" not in event\n ):\n return\n\n # Ignore anything without message\n if \"message\" not in event and \"format\" not in event:\n return\n\n ev: Dict[str, any] = event # type: ignore\n ev[\"sensor\"] = self.sensor\n\n if \"isError\" in ev:\n del ev[\"isError\"]\n\n # Add ISO timestamp and sensor data\n if \"time\" not in ev:\n ev[\"time\"] = time.time()\n ev[\"timestamp\"] = formatTime(ev[\"time\"], timeFormat=self.timeFormat)\n\n if \"format\" in ev and (\"message\" not in ev or ev[\"message\"] == ()):\n try:\n ev[\"message\"] = ev[\"format\"] % ev\n del ev[\"format\"]\n except Exception:\n pass\n\n # Explicit sessionno (from logDispatch) overrides from 'system'\n if \"sessionno\" in ev:\n sessionno = ev[\"sessionno\"]\n del ev[\"sessionno\"]\n # Maybe it's passed explicitly\n elif \"session\" in ev:\n # reverse engineer sessionno\n try:\n sessionno = next(\n key\n for key, value in self.sessions.items()\n if value == ev[\"session\"]\n )\n except StopIteration:\n return\n # Extract session id from the twisted log prefix\n elif \"system\" in ev:\n sessionno = \"0\"\n telnetmatch = self.telnetRegex.match(ev[\"system\"])\n if telnetmatch:\n sessionno = \"T{}\".format(telnetmatch.groups()[0])\n else:\n sshmatch = self.sshRegex.match(ev[\"system\"])\n if sshmatch:\n sessionno = \"S{}\".format(sshmatch.groups()[0])\n if sessionno == \"0\":\n return\n\n if sessionno in self.ips:\n ev[\"src_ip\"] = self.ips[sessionno]\n\n # Connection event is special. adds to session list\n if ev[\"eventid\"] == \"cowrie.session.connect\":\n self.sessions[sessionno] = ev[\"session\"]\n self.ips[sessionno] = ev[\"src_ip\"]\n else:\n ev[\"session\"] = self.sessions[sessionno]\n\n self.write(ev)\n\n # Disconnect is special, remove cached data\n if ev[\"eventid\"] == \"cowrie.session.closed\":\n del self.sessions[sessionno]\n del self.ips[sessionno]\n", "path": "src/cowrie/core/output.py" } ]
[ { "content": "# Copyright (c) 2015 Michel Oosterhof <[email protected]>\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. The names of the author(s) may not be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED\n# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\n\n\nimport abc\nimport re\nimport socket\nimport time\nfrom os import environ\nfrom typing import Any, Dict, Pattern\n\nfrom twisted.internet import reactor\nfrom twisted.logger import formatTime\n\nfrom cowrie.core.config import CowrieConfig\n\n# Events:\n# cowrie.client.fingerprint\n# cowrie.client.size\n# cowrie.client.var\n# cowrie.client.version\n# cowrie.command.input\n# cowrie.command.failed\n# cowrie.command.success (deprecated)\n# cowrie.direct-tcpip.data\n# cowrie.direct-tcpip.request\n# cowrie.log.closed\n# cowrie.login.failed\n# cowrie.login.success\n# cowrie.session.closed\n# cowrie.session.connect\n# cowrie.session.file_download\n# cowrie.session.file_upload\n\n\"\"\"\nThe time is available in two formats in each event, as key 'time'\nin epoch format and in key 'timestamp' as a ISO compliant string\nin UTC.\n\"\"\"\n\n\nclass Output(metaclass=abc.ABCMeta):\n \"\"\"\n This is the abstract base class intended to be inherited by\n cowrie output plugins. Plugins require the mandatory\n methods: stop, start and write\n \"\"\"\n\n def __init__(self) -> None:\n self.sessions: Dict[str, str] = {}\n self.ips: Dict[str, str] = {}\n\n # Need these for each individual transport, or else the session numbers overlap\n self.sshRegex: Pattern[str] = re.compile(\".*SSHTransport,([0-9]+),[0-9a-f:.]+$\")\n self.telnetRegex: Pattern[str] = re.compile(\n \".*TelnetTransport,([0-9]+),[0-9a-f:.]+$\"\n )\n self.sensor: str = CowrieConfig.get(\n \"honeypot\", \"sensor_name\", fallback=socket.gethostname()\n )\n self.timeFormat: str\n\n # use Z for UTC (Zulu) time, it's shorter.\n if \"TZ\" in environ and environ[\"TZ\"] == \"UTC\":\n self.timeFormat = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n else:\n self.timeFormat = \"%Y-%m-%dT%H:%M:%S.%f%z\"\n\n # Event trigger so that stop() is called by the reactor when stopping\n reactor.addSystemEventTrigger(\"before\", \"shutdown\", self.stop) # type: ignore\n\n self.start()\n\n def logDispatch(self, **kw: str) -> None:\n \"\"\"\n Use logDispatch when the HoneypotTransport prefix is not available.\n Here you can explicitly set the sessionIds to tie the sessions together\n \"\"\"\n ev = kw\n # ev[\"message\"] = msg\n self.emit(ev)\n\n @abc.abstractmethod\n def start(self) -> None:\n \"\"\"\n Abstract method to initialize output plugin\n \"\"\"\n pass\n\n @abc.abstractmethod\n def stop(self) -> None:\n \"\"\"\n Abstract method to shut down output plugin\n \"\"\"\n pass\n\n @abc.abstractmethod\n def write(self, event: Dict[str, Any]) -> None:\n \"\"\"\n Handle a general event within the output plugin\n \"\"\"\n pass\n\n def emit(self, event: dict) -> None:\n \"\"\"\n This is the main emit() hook that gets called by the the Twisted logging\n\n To make this work with Cowrie, the event dictionary needs the following keys:\n - 'eventid'\n - 'sessionno' or 'session'\n - 'message' or 'format'\n \"\"\"\n sessionno: str\n ev: dict\n\n # Ignore stdout and stderr in output plugins\n if \"printed\" in event:\n return\n\n # Ignore anything without eventid\n if \"eventid\" not in event:\n return\n\n # Ignore anything without session information\n if (\n \"sessionno\" not in event\n and \"session\" not in event\n and \"system\" not in event\n ):\n return\n\n # Ignore anything without message\n if \"message\" not in event and \"format\" not in event:\n return\n\n ev: Dict[str, any] = event.copy() # type: ignore\n ev[\"sensor\"] = self.sensor\n\n if \"isError\" in ev:\n del ev[\"isError\"]\n\n # Add ISO timestamp and sensor data\n if \"time\" not in ev:\n ev[\"time\"] = time.time()\n ev[\"timestamp\"] = formatTime(ev[\"time\"], timeFormat=self.timeFormat)\n\n if \"format\" in ev and (\"message\" not in ev or ev[\"message\"] == ()):\n try:\n ev[\"message\"] = ev[\"format\"] % ev\n del ev[\"format\"]\n except Exception:\n pass\n\n # Explicit sessionno (from logDispatch) overrides from 'system'\n if \"sessionno\" in ev:\n sessionno = ev[\"sessionno\"]\n del ev[\"sessionno\"]\n # Maybe it's passed explicitly\n elif \"session\" in ev:\n # reverse engineer sessionno\n try:\n sessionno = next(\n key\n for key, value in self.sessions.items()\n if value == ev[\"session\"]\n )\n except StopIteration:\n return\n # Extract session id from the twisted log prefix\n elif \"system\" in ev:\n sessionno = \"0\"\n telnetmatch = self.telnetRegex.match(ev[\"system\"])\n if telnetmatch:\n sessionno = \"T{}\".format(telnetmatch.groups()[0])\n else:\n sshmatch = self.sshRegex.match(ev[\"system\"])\n if sshmatch:\n sessionno = \"S{}\".format(sshmatch.groups()[0])\n if sessionno == \"0\":\n return\n\n if sessionno in self.ips:\n ev[\"src_ip\"] = self.ips[sessionno]\n\n # Connection event is special. adds to session list\n if ev[\"eventid\"] == \"cowrie.session.connect\":\n self.sessions[sessionno] = ev[\"session\"]\n self.ips[sessionno] = ev[\"src_ip\"]\n else:\n ev[\"session\"] = self.sessions[sessionno]\n\n self.write(ev)\n\n # Disconnect is special, remove cached data\n if ev[\"eventid\"] == \"cowrie.session.closed\":\n del self.sessions[sessionno]\n del self.ips[sessionno]\n", "path": "src/cowrie/core/output.py" } ]
diff --git a/src/cowrie/core/output.py b/src/cowrie/core/output.py index b55dea5f41..7c144aa890 100644 --- a/src/cowrie/core/output.py +++ b/src/cowrie/core/output.py @@ -158,7 +158,7 @@ def emit(self, event: dict) -> None: if "message" not in event and "format" not in event: return - ev: Dict[str, any] = event # type: ignore + ev: Dict[str, any] = event.copy() # type: ignore ev["sensor"] = self.sensor if "isError" in ev:
blakeblackshear__frigate-5031
[Support]: recordings on disk are in -4 timezone (4 hours behind UTC) - 0.12.0-beta2-tensorrt ### Describe the problem you are having All my recordings on 0.12.0-beta2-tensorrt are recording with what appears to be a -4 timezone. I have checked my host and docker config which both display +10 (Australian Eastern Standard time). Everything in the GUI displays in my timezone as expected. ### Version 0.12.0-beta2-tensorrt (0.12.0-0DBF909) ### Frigate config file ```yaml mqtt: host: 192.168.1.150 port: 1883 cameras: garage-cam: ffmpeg: inputs: - path: rtsp://***:****@192.168.1.231:554/h265Preview_01_main roles: - detect detect: width: 3840 # <---- update for your camera's resolution height: 2160 # <---- update for your camera's resolution frontright-cam: ffmpeg: inputs: - path: rtsp://***:****@192.168.1.201:554/h265Preview_01_main roles: - detect detect: width: 3840 # <---- update for your camera's resolution height: 2160 # <---- update for your camera's resolution frontdoor-cam: # motion: # mask: # # top portion of frame (road + harry & joyce driveway) # - 1083,0,0,0,0,197,0,594,108,521,391,368,673,235,893,222,1099,219,1369,213,1560,238,1788,254,2230,305,2709,387,3119,492,3421,565,3840,689,3840,537,3840,0,2595,0,1569,0 ffmpeg: inputs: - path: rtsp://***:****@192.168.1.254:554/h265Preview_01_main roles: - detect detect: width: 3840 # <---- update for your camera's resolution height: 2160 # <---- update for your camera's resolution record: enabled: true retain: days: 7 mode: all snapshots: enabled: true retain: default: 90 birdseye: enabled: true mode: continuous ffmpeg: # input_args: hwaccel_args: preset-nvidia-h265 # output_args: # # -an no audio # # -crf default 28 # # -c:v codec for video # # preset default medium # record: -c:v libx265 -hwaccel hevc_cuvid -vf hwupload -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -an -crf 26 -preset fast -pix_fmt yuv420p10le detectors: tensorrt: type: tensorrt device: 0 #This is the default, select the first GPU model: path: /trt-models/yolov7-tiny-416.trt labelmap_path: /trt-models/coco_91cl.txt input_tensor: nchw input_pixel_format: rgb width: 416 height: 416 ``` ### Relevant log output ```shell - frigate docker container # zdump /etc/localtime /etc/localtime Thu Jan 12 10:58:55 2023 AEST - unraid server root@xxx:~# zdump /etc/localtime /etc/localtime Thu Jan 12 10:59:43 2023 AEST - docker config docker run -d --name='frigate-beta' --net='bridge' --privileged=true -e TZ="Australia/Brisbane" -e HOST_OS="Unraid" -e HOST_HOSTNAME="Thea" -e HOST_CONTAINERNAME="frigate-beta" -e 'FRIGATE_RTSP_PASSWORD'='' -l net.unraid.docker.managed=dockerman -l net.unraid.docker.webui='http://[IP]:[PORT:5000]' -l net.unraid.docker.icon='https://raw.githubusercontent.com/yayitazale/unraid-templates/main/frigate.png' -p '5051:5000/tcp' -p '1935:1935/tcp' -p '505:505/tcp' -v '/mnt/user/appdata/frigate-beta':'/config':'rw' -v '/mnt/user/surveillance/frigate-beta/':'/media/frigate':'rw' -v '/mnt/user/appdata/frigate-beta/trt-models':'/trt-models':'rw' -v '/etc/localtime':'/etc/localtime':'ro' --gpus all --shm-size=256mb --mount type=tmpfs,target=/tmp/cache,tmpfs-size=1000000000 --restart unless-stopped 'blakeblackshear/frigate:0.12.0-beta2-tensorrt' f75e5c20040257174b82a719194343839e8af0a33626bebcc17f0d410f1d5437 ``` ### FFprobe output from your camera ```shell N/A ``` ### Frigate stats ```json N/A ``` ### Operating system UNRAID ### Install method Docker Compose ### Coral version CPU (no coral) ### Network connection Wired ### Camera make and model N/A ### Any other information that may be helpful <img width="600" alt="image" src="https://user-images.githubusercontent.com/2231825/211950268-c04b5f85-c1a6-47c7-b653-b148eb71dd26.png"> Here we can see the timestamp of folders (hour 20) in the folder path, and the write time of the local system (hour 10, +10 AEST). UTC time is currently 12.
[ { "content": "import datetime\nimport itertools\nimport logging\nimport multiprocessing as mp\nimport os\nimport queue\nimport random\nimport string\nimport subprocess as sp\nimport threading\nfrom collections import defaultdict\nfrom pathlib import Path\n\nimport psutil\nfrom peewee import JOIN, DoesNotExist\n\nfrom frigate.config import RetainModeEnum, FrigateConfig\nfrom frigate.const import CACHE_DIR, MAX_SEGMENT_DURATION, RECORD_DIR\nfrom frigate.models import Event, Recordings\nfrom frigate.util import area\n\nlogger = logging.getLogger(__name__)\n\nSECONDS_IN_DAY = 60 * 60 * 24\n\n\ndef remove_empty_directories(directory):\n # list all directories recursively and sort them by path,\n # longest first\n paths = sorted(\n [x[0] for x in os.walk(RECORD_DIR)],\n key=lambda p: len(str(p)),\n reverse=True,\n )\n for path in paths:\n # don't delete the parent\n if path == RECORD_DIR:\n continue\n if len(os.listdir(path)) == 0:\n os.rmdir(path)\n\n\nclass RecordingMaintainer(threading.Thread):\n def __init__(\n self, config: FrigateConfig, recordings_info_queue: mp.Queue, stop_event\n ):\n threading.Thread.__init__(self)\n self.name = \"recording_maint\"\n self.config = config\n self.recordings_info_queue = recordings_info_queue\n self.stop_event = stop_event\n self.recordings_info = defaultdict(list)\n self.end_time_cache = {}\n\n def move_files(self):\n cache_files = sorted(\n [\n d\n for d in os.listdir(CACHE_DIR)\n if os.path.isfile(os.path.join(CACHE_DIR, d))\n and d.endswith(\".mp4\")\n and not d.startswith(\"clip_\")\n ]\n )\n\n files_in_use = []\n for process in psutil.process_iter():\n try:\n if process.name() != \"ffmpeg\":\n continue\n flist = process.open_files()\n if flist:\n for nt in flist:\n if nt.path.startswith(CACHE_DIR):\n files_in_use.append(nt.path.split(\"/\")[-1])\n except:\n continue\n\n # group recordings by camera\n grouped_recordings = defaultdict(list)\n for f in cache_files:\n # Skip files currently in use\n if f in files_in_use:\n continue\n\n cache_path = os.path.join(CACHE_DIR, f)\n basename = os.path.splitext(f)[0]\n camera, date = basename.rsplit(\"-\", maxsplit=1)\n start_time = datetime.datetime.strptime(date, \"%Y%m%d%H%M%S\")\n\n grouped_recordings[camera].append(\n {\n \"cache_path\": cache_path,\n \"start_time\": start_time,\n }\n )\n\n # delete all cached files past the most recent 5\n keep_count = 5\n for camera in grouped_recordings.keys():\n segment_count = len(grouped_recordings[camera])\n if segment_count > keep_count:\n ####\n # Need to find a way to tell if these are aging out based on retention settings or if the system is overloaded.\n ####\n # logger.warning(\n # f\"Too many recording segments in cache for {camera}. Keeping the {keep_count} most recent segments out of {segment_count}, discarding the rest...\"\n # )\n to_remove = grouped_recordings[camera][:-keep_count]\n for f in to_remove:\n cache_path = f[\"cache_path\"]\n ####\n # Need to find a way to tell if these are aging out based on retention settings or if the system is overloaded.\n ####\n # logger.warning(f\"Discarding a recording segment: {cache_path}\")\n Path(cache_path).unlink(missing_ok=True)\n self.end_time_cache.pop(cache_path, None)\n grouped_recordings[camera] = grouped_recordings[camera][-keep_count:]\n\n for camera, recordings in grouped_recordings.items():\n\n # clear out all the recording info for old frames\n while (\n len(self.recordings_info[camera]) > 0\n and self.recordings_info[camera][0][0]\n < recordings[0][\"start_time\"].timestamp()\n ):\n self.recordings_info[camera].pop(0)\n\n # get all events with the end time after the start of the oldest cache file\n # or with end_time None\n events: Event = (\n Event.select()\n .where(\n Event.camera == camera,\n (Event.end_time == None)\n | (Event.end_time >= recordings[0][\"start_time\"].timestamp()),\n Event.has_clip,\n )\n .order_by(Event.start_time)\n )\n for r in recordings:\n cache_path = r[\"cache_path\"]\n start_time = r[\"start_time\"]\n\n # Just delete files if recordings are turned off\n if (\n not camera in self.config.cameras\n or not self.config.cameras[camera].record.enabled\n ):\n Path(cache_path).unlink(missing_ok=True)\n self.end_time_cache.pop(cache_path, None)\n continue\n\n if cache_path in self.end_time_cache:\n end_time, duration = self.end_time_cache[cache_path]\n else:\n ffprobe_cmd = [\n \"ffprobe\",\n \"-v\",\n \"error\",\n \"-show_entries\",\n \"format=duration\",\n \"-of\",\n \"default=noprint_wrappers=1:nokey=1\",\n f\"{cache_path}\",\n ]\n p = sp.run(ffprobe_cmd, capture_output=True)\n if p.returncode == 0 and p.stdout.decode():\n duration = float(p.stdout.decode().strip())\n else:\n duration = -1\n\n # ensure duration is within expected length\n if 0 < duration < MAX_SEGMENT_DURATION:\n end_time = start_time + datetime.timedelta(seconds=duration)\n self.end_time_cache[cache_path] = (end_time, duration)\n else:\n if duration == -1:\n logger.warning(\n f\"Failed to probe corrupt segment {f}: {p.returncode} - {p.stderr}\"\n )\n\n logger.warning(f\"Discarding a corrupt recording segment: {f}\")\n Path(cache_path).unlink(missing_ok=True)\n continue\n\n # if cached file's start_time is earlier than the retain days for the camera\n if start_time <= (\n (\n datetime.datetime.now()\n - datetime.timedelta(\n days=self.config.cameras[camera].record.retain.days\n )\n )\n ):\n # if the cached segment overlaps with the events:\n overlaps = False\n for event in events:\n # if the event starts in the future, stop checking events\n # and remove this segment\n if event.start_time > end_time.timestamp():\n overlaps = False\n Path(cache_path).unlink(missing_ok=True)\n self.end_time_cache.pop(cache_path, None)\n break\n\n # if the event is in progress or ends after the recording starts, keep it\n # and stop looking at events\n if (\n event.end_time is None\n or event.end_time >= start_time.timestamp()\n ):\n overlaps = True\n break\n\n if overlaps:\n record_mode = self.config.cameras[\n camera\n ].record.events.retain.mode\n # move from cache to recordings immediately\n self.store_segment(\n camera,\n start_time,\n end_time,\n duration,\n cache_path,\n record_mode,\n )\n # else retain days includes this segment\n else:\n record_mode = self.config.cameras[camera].record.retain.mode\n self.store_segment(\n camera, start_time, end_time, duration, cache_path, record_mode\n )\n\n def segment_stats(self, camera, start_time, end_time):\n active_count = 0\n motion_count = 0\n for frame in self.recordings_info[camera]:\n # frame is after end time of segment\n if frame[0] > end_time.timestamp():\n break\n # frame is before start time of segment\n if frame[0] < start_time.timestamp():\n continue\n\n active_count += len(\n [\n o\n for o in frame[1]\n if not o[\"false_positive\"] and o[\"motionless_count\"] == 0\n ]\n )\n\n motion_count += sum([area(box) for box in frame[2]])\n\n return (motion_count, active_count)\n\n def store_segment(\n self,\n camera,\n start_time: datetime.datetime,\n end_time: datetime.datetime,\n duration,\n cache_path,\n store_mode: RetainModeEnum,\n ):\n motion_count, active_count = self.segment_stats(camera, start_time, end_time)\n\n # check if the segment shouldn't be stored\n if (store_mode == RetainModeEnum.motion and motion_count == 0) or (\n store_mode == RetainModeEnum.active_objects and active_count == 0\n ):\n Path(cache_path).unlink(missing_ok=True)\n self.end_time_cache.pop(cache_path, None)\n return\n\n directory = os.path.join(\n RECORD_DIR,\n start_time.replace(tzinfo=datetime.timezone.utc)\n .astimezone(tz=None)\n .strftime(\"%Y-%m-%d/%H\"),\n camera,\n )\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n file_name = (\n f\"{start_time.replace(tzinfo=datetime.timezone.utc).strftime('%M.%S.mp4')}\"\n )\n file_path = os.path.join(directory, file_name)\n\n try:\n if not os.path.exists(file_path):\n start_frame = datetime.datetime.now().timestamp()\n\n # add faststart to kept segments to improve metadata reading\n ffmpeg_cmd = [\n \"ffmpeg\",\n \"-y\",\n \"-i\",\n cache_path,\n \"-c\",\n \"copy\",\n \"-movflags\",\n \"+faststart\",\n file_path,\n ]\n\n p = sp.run(\n ffmpeg_cmd,\n encoding=\"ascii\",\n capture_output=True,\n )\n\n if p.returncode != 0:\n logger.error(f\"Unable to convert {cache_path} to {file_path}\")\n logger.error(p.stderr)\n return\n else:\n logger.debug(\n f\"Copied {file_path} in {datetime.datetime.now().timestamp()-start_frame} seconds.\"\n )\n\n try:\n # get the segment size of the cache file\n # file without faststart is same size\n segment_size = round(\n float(os.path.getsize(cache_path)) / 1000000, 1\n )\n except OSError:\n segment_size = 0\n\n os.remove(cache_path)\n\n rand_id = \"\".join(\n random.choices(string.ascii_lowercase + string.digits, k=6)\n )\n Recordings.create(\n id=f\"{start_time.timestamp()}-{rand_id}\",\n camera=camera,\n path=file_path,\n start_time=start_time.timestamp(),\n end_time=end_time.timestamp(),\n duration=duration,\n motion=motion_count,\n # TODO: update this to store list of active objects at some point\n objects=active_count,\n segment_size=segment_size,\n )\n except Exception as e:\n logger.error(f\"Unable to store recording segment {cache_path}\")\n Path(cache_path).unlink(missing_ok=True)\n logger.error(e)\n\n # clear end_time cache\n self.end_time_cache.pop(cache_path, None)\n\n def run(self):\n # Check for new files every 5 seconds\n wait_time = 5\n while not self.stop_event.wait(wait_time):\n run_start = datetime.datetime.now().timestamp()\n\n # empty the recordings info queue\n while True:\n try:\n (\n camera,\n frame_time,\n current_tracked_objects,\n motion_boxes,\n regions,\n ) = self.recordings_info_queue.get(False)\n\n if self.config.cameras[camera].record.enabled:\n self.recordings_info[camera].append(\n (\n frame_time,\n current_tracked_objects,\n motion_boxes,\n regions,\n )\n )\n except queue.Empty:\n break\n\n try:\n self.move_files()\n except Exception as e:\n logger.error(\n \"Error occurred when attempting to maintain recording cache\"\n )\n logger.error(e)\n duration = datetime.datetime.now().timestamp() - run_start\n wait_time = max(0, 5 - duration)\n\n logger.info(f\"Exiting recording maintenance...\")\n\n\nclass RecordingCleanup(threading.Thread):\n def __init__(self, config: FrigateConfig, stop_event):\n threading.Thread.__init__(self)\n self.name = \"recording_cleanup\"\n self.config = config\n self.stop_event = stop_event\n\n def clean_tmp_clips(self):\n # delete any clips more than 5 minutes old\n for p in Path(\"/tmp/cache\").rglob(\"clip_*.mp4\"):\n logger.debug(f\"Checking tmp clip {p}.\")\n if p.stat().st_mtime < (datetime.datetime.now().timestamp() - 60 * 1):\n logger.debug(\"Deleting tmp clip.\")\n p.unlink(missing_ok=True)\n\n def expire_recordings(self):\n logger.debug(\"Start expire recordings (new).\")\n\n logger.debug(\"Start deleted cameras.\")\n # Handle deleted cameras\n expire_days = self.config.record.retain.days\n expire_before = (\n datetime.datetime.now() - datetime.timedelta(days=expire_days)\n ).timestamp()\n no_camera_recordings: Recordings = Recordings.select().where(\n Recordings.camera.not_in(list(self.config.cameras.keys())),\n Recordings.end_time < expire_before,\n )\n\n deleted_recordings = set()\n for recording in no_camera_recordings:\n Path(recording.path).unlink(missing_ok=True)\n deleted_recordings.add(recording.id)\n\n logger.debug(f\"Expiring {len(deleted_recordings)} recordings\")\n Recordings.delete().where(Recordings.id << deleted_recordings).execute()\n logger.debug(\"End deleted cameras.\")\n\n logger.debug(\"Start all cameras.\")\n for camera, config in self.config.cameras.items():\n logger.debug(f\"Start camera: {camera}.\")\n # Get the timestamp for cutoff of retained days\n expire_days = config.record.retain.days\n expire_date = (\n datetime.datetime.now() - datetime.timedelta(days=expire_days)\n ).timestamp()\n\n # Get recordings to check for expiration\n recordings: Recordings = (\n Recordings.select()\n .where(\n Recordings.camera == camera,\n Recordings.end_time < expire_date,\n )\n .order_by(Recordings.start_time)\n )\n\n # Get all the events to check against\n events: Event = (\n Event.select()\n .where(\n Event.camera == camera,\n # need to ensure segments for all events starting\n # before the expire date are included\n Event.start_time < expire_date,\n Event.has_clip,\n )\n .order_by(Event.start_time)\n .objects()\n )\n\n # loop over recordings and see if they overlap with any non-expired events\n # TODO: expire segments based on segment stats according to config\n event_start = 0\n deleted_recordings = set()\n for recording in recordings.objects().iterator():\n keep = False\n # Now look for a reason to keep this recording segment\n for idx in range(event_start, len(events)):\n event = events[idx]\n\n # if the event starts in the future, stop checking events\n # and let this recording segment expire\n if event.start_time > recording.end_time:\n keep = False\n break\n\n # if the event is in progress or ends after the recording starts, keep it\n # and stop looking at events\n if event.end_time is None or event.end_time >= recording.start_time:\n keep = True\n break\n\n # if the event ends before this recording segment starts, skip\n # this event and check the next event for an overlap.\n # since the events and recordings are sorted, we can skip events\n # that end before the previous recording segment started on future segments\n if event.end_time < recording.start_time:\n event_start = idx\n\n # Delete recordings outside of the retention window or based on the retention mode\n if (\n not keep\n or (\n config.record.events.retain.mode == RetainModeEnum.motion\n and recording.motion == 0\n )\n or (\n config.record.events.retain.mode\n == RetainModeEnum.active_objects\n and recording.objects == 0\n )\n ):\n Path(recording.path).unlink(missing_ok=True)\n deleted_recordings.add(recording.id)\n\n logger.debug(f\"Expiring {len(deleted_recordings)} recordings\")\n # delete up to 100,000 at a time\n max_deletes = 100000\n deleted_recordings_list = list(deleted_recordings)\n for i in range(0, len(deleted_recordings_list), max_deletes):\n Recordings.delete().where(\n Recordings.id << deleted_recordings_list[i : i + max_deletes]\n ).execute()\n\n logger.debug(f\"End camera: {camera}.\")\n\n logger.debug(\"End all cameras.\")\n logger.debug(\"End expire recordings (new).\")\n\n def expire_files(self):\n logger.debug(\"Start expire files (legacy).\")\n\n default_expire = (\n datetime.datetime.now().timestamp()\n - SECONDS_IN_DAY * self.config.record.retain.days\n )\n delete_before = {}\n\n for name, camera in self.config.cameras.items():\n delete_before[name] = (\n datetime.datetime.now().timestamp()\n - SECONDS_IN_DAY * camera.record.retain.days\n )\n\n # find all the recordings older than the oldest recording in the db\n try:\n oldest_recording = Recordings.select().order_by(Recordings.start_time).get()\n\n p = Path(oldest_recording.path)\n oldest_timestamp = p.stat().st_mtime - 1\n except DoesNotExist:\n oldest_timestamp = datetime.datetime.now().timestamp()\n except FileNotFoundError:\n logger.warning(f\"Unable to find file from recordings database: {p}\")\n Recordings.delete().where(Recordings.id == oldest_recording.id).execute()\n return\n\n logger.debug(f\"Oldest recording in the db: {oldest_timestamp}\")\n process = sp.run(\n [\"find\", RECORD_DIR, \"-type\", \"f\", \"!\", \"-newermt\", f\"@{oldest_timestamp}\"],\n capture_output=True,\n text=True,\n )\n files_to_check = process.stdout.splitlines()\n\n for f in files_to_check:\n p = Path(f)\n try:\n if p.stat().st_mtime < delete_before.get(p.parent.name, default_expire):\n p.unlink(missing_ok=True)\n except FileNotFoundError:\n logger.warning(f\"Attempted to expire missing file: {f}\")\n\n logger.debug(\"End expire files (legacy).\")\n\n def sync_recordings(self):\n logger.debug(\"Start sync recordings.\")\n\n # get all recordings in the db\n recordings: Recordings = Recordings.select()\n\n # get all recordings files on disk\n process = sp.run(\n [\"find\", RECORD_DIR, \"-type\", \"f\"],\n capture_output=True,\n text=True,\n )\n files_on_disk = process.stdout.splitlines()\n\n recordings_to_delete = []\n for recording in recordings.objects().iterator():\n if not recording.path in files_on_disk:\n recordings_to_delete.append(recording.id)\n\n logger.debug(\n f\"Deleting {len(recordings_to_delete)} recordings with missing files\"\n )\n # delete up to 100,000 at a time\n max_deletes = 100000\n for i in range(0, len(recordings_to_delete), max_deletes):\n Recordings.delete().where(\n Recordings.id << recordings_to_delete[i : i + max_deletes]\n ).execute()\n\n logger.debug(\"End sync recordings.\")\n\n def run(self):\n # on startup sync recordings with disk (disabled due to too much CPU usage)\n # self.sync_recordings()\n\n # Expire tmp clips every minute, recordings and clean directories every hour.\n for counter in itertools.cycle(range(self.config.record.expire_interval)):\n if self.stop_event.wait(60):\n logger.info(f\"Exiting recording cleanup...\")\n break\n self.clean_tmp_clips()\n\n if counter == 0:\n self.expire_recordings()\n self.expire_files()\n remove_empty_directories(RECORD_DIR)\n", "path": "frigate/record.py" } ]
[ { "content": "import datetime\nimport itertools\nimport logging\nimport multiprocessing as mp\nimport os\nimport queue\nimport random\nimport string\nimport subprocess as sp\nimport threading\nfrom collections import defaultdict\nfrom pathlib import Path\n\nimport psutil\nfrom peewee import JOIN, DoesNotExist\n\nfrom frigate.config import RetainModeEnum, FrigateConfig\nfrom frigate.const import CACHE_DIR, MAX_SEGMENT_DURATION, RECORD_DIR\nfrom frigate.models import Event, Recordings\nfrom frigate.util import area\n\nlogger = logging.getLogger(__name__)\n\nSECONDS_IN_DAY = 60 * 60 * 24\n\n\ndef remove_empty_directories(directory):\n # list all directories recursively and sort them by path,\n # longest first\n paths = sorted(\n [x[0] for x in os.walk(RECORD_DIR)],\n key=lambda p: len(str(p)),\n reverse=True,\n )\n for path in paths:\n # don't delete the parent\n if path == RECORD_DIR:\n continue\n if len(os.listdir(path)) == 0:\n os.rmdir(path)\n\n\nclass RecordingMaintainer(threading.Thread):\n def __init__(\n self, config: FrigateConfig, recordings_info_queue: mp.Queue, stop_event\n ):\n threading.Thread.__init__(self)\n self.name = \"recording_maint\"\n self.config = config\n self.recordings_info_queue = recordings_info_queue\n self.stop_event = stop_event\n self.recordings_info = defaultdict(list)\n self.end_time_cache = {}\n\n def move_files(self):\n cache_files = sorted(\n [\n d\n for d in os.listdir(CACHE_DIR)\n if os.path.isfile(os.path.join(CACHE_DIR, d))\n and d.endswith(\".mp4\")\n and not d.startswith(\"clip_\")\n ]\n )\n\n files_in_use = []\n for process in psutil.process_iter():\n try:\n if process.name() != \"ffmpeg\":\n continue\n flist = process.open_files()\n if flist:\n for nt in flist:\n if nt.path.startswith(CACHE_DIR):\n files_in_use.append(nt.path.split(\"/\")[-1])\n except:\n continue\n\n # group recordings by camera\n grouped_recordings = defaultdict(list)\n for f in cache_files:\n # Skip files currently in use\n if f in files_in_use:\n continue\n\n cache_path = os.path.join(CACHE_DIR, f)\n basename = os.path.splitext(f)[0]\n camera, date = basename.rsplit(\"-\", maxsplit=1)\n start_time = datetime.datetime.strptime(date, \"%Y%m%d%H%M%S\")\n\n grouped_recordings[camera].append(\n {\n \"cache_path\": cache_path,\n \"start_time\": start_time,\n }\n )\n\n # delete all cached files past the most recent 5\n keep_count = 5\n for camera in grouped_recordings.keys():\n segment_count = len(grouped_recordings[camera])\n if segment_count > keep_count:\n ####\n # Need to find a way to tell if these are aging out based on retention settings or if the system is overloaded.\n ####\n # logger.warning(\n # f\"Too many recording segments in cache for {camera}. Keeping the {keep_count} most recent segments out of {segment_count}, discarding the rest...\"\n # )\n to_remove = grouped_recordings[camera][:-keep_count]\n for f in to_remove:\n cache_path = f[\"cache_path\"]\n ####\n # Need to find a way to tell if these are aging out based on retention settings or if the system is overloaded.\n ####\n # logger.warning(f\"Discarding a recording segment: {cache_path}\")\n Path(cache_path).unlink(missing_ok=True)\n self.end_time_cache.pop(cache_path, None)\n grouped_recordings[camera] = grouped_recordings[camera][-keep_count:]\n\n for camera, recordings in grouped_recordings.items():\n\n # clear out all the recording info for old frames\n while (\n len(self.recordings_info[camera]) > 0\n and self.recordings_info[camera][0][0]\n < recordings[0][\"start_time\"].timestamp()\n ):\n self.recordings_info[camera].pop(0)\n\n # get all events with the end time after the start of the oldest cache file\n # or with end_time None\n events: Event = (\n Event.select()\n .where(\n Event.camera == camera,\n (Event.end_time == None)\n | (Event.end_time >= recordings[0][\"start_time\"].timestamp()),\n Event.has_clip,\n )\n .order_by(Event.start_time)\n )\n for r in recordings:\n cache_path = r[\"cache_path\"]\n start_time = r[\"start_time\"]\n\n # Just delete files if recordings are turned off\n if (\n not camera in self.config.cameras\n or not self.config.cameras[camera].record.enabled\n ):\n Path(cache_path).unlink(missing_ok=True)\n self.end_time_cache.pop(cache_path, None)\n continue\n\n if cache_path in self.end_time_cache:\n end_time, duration = self.end_time_cache[cache_path]\n else:\n ffprobe_cmd = [\n \"ffprobe\",\n \"-v\",\n \"error\",\n \"-show_entries\",\n \"format=duration\",\n \"-of\",\n \"default=noprint_wrappers=1:nokey=1\",\n f\"{cache_path}\",\n ]\n p = sp.run(ffprobe_cmd, capture_output=True)\n if p.returncode == 0 and p.stdout.decode():\n duration = float(p.stdout.decode().strip())\n else:\n duration = -1\n\n # ensure duration is within expected length\n if 0 < duration < MAX_SEGMENT_DURATION:\n end_time = start_time + datetime.timedelta(seconds=duration)\n self.end_time_cache[cache_path] = (end_time, duration)\n else:\n if duration == -1:\n logger.warning(\n f\"Failed to probe corrupt segment {f}: {p.returncode} - {p.stderr}\"\n )\n\n logger.warning(f\"Discarding a corrupt recording segment: {f}\")\n Path(cache_path).unlink(missing_ok=True)\n continue\n\n # if cached file's start_time is earlier than the retain days for the camera\n if start_time <= (\n (\n datetime.datetime.now()\n - datetime.timedelta(\n days=self.config.cameras[camera].record.retain.days\n )\n )\n ):\n # if the cached segment overlaps with the events:\n overlaps = False\n for event in events:\n # if the event starts in the future, stop checking events\n # and remove this segment\n if event.start_time > end_time.timestamp():\n overlaps = False\n Path(cache_path).unlink(missing_ok=True)\n self.end_time_cache.pop(cache_path, None)\n break\n\n # if the event is in progress or ends after the recording starts, keep it\n # and stop looking at events\n if (\n event.end_time is None\n or event.end_time >= start_time.timestamp()\n ):\n overlaps = True\n break\n\n if overlaps:\n record_mode = self.config.cameras[\n camera\n ].record.events.retain.mode\n # move from cache to recordings immediately\n self.store_segment(\n camera,\n start_time,\n end_time,\n duration,\n cache_path,\n record_mode,\n )\n # else retain days includes this segment\n else:\n record_mode = self.config.cameras[camera].record.retain.mode\n self.store_segment(\n camera, start_time, end_time, duration, cache_path, record_mode\n )\n\n def segment_stats(self, camera, start_time, end_time):\n active_count = 0\n motion_count = 0\n for frame in self.recordings_info[camera]:\n # frame is after end time of segment\n if frame[0] > end_time.timestamp():\n break\n # frame is before start time of segment\n if frame[0] < start_time.timestamp():\n continue\n\n active_count += len(\n [\n o\n for o in frame[1]\n if not o[\"false_positive\"] and o[\"motionless_count\"] == 0\n ]\n )\n\n motion_count += sum([area(box) for box in frame[2]])\n\n return (motion_count, active_count)\n\n def store_segment(\n self,\n camera,\n start_time: datetime.datetime,\n end_time: datetime.datetime,\n duration,\n cache_path,\n store_mode: RetainModeEnum,\n ):\n motion_count, active_count = self.segment_stats(camera, start_time, end_time)\n\n # check if the segment shouldn't be stored\n if (store_mode == RetainModeEnum.motion and motion_count == 0) or (\n store_mode == RetainModeEnum.active_objects and active_count == 0\n ):\n Path(cache_path).unlink(missing_ok=True)\n self.end_time_cache.pop(cache_path, None)\n return\n\n directory = os.path.join(\n RECORD_DIR,\n start_time.astimezone(tz=datetime.timezone.utc).strftime(\"%Y-%m-%d/%H\"),\n camera,\n )\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n file_name = (\n f\"{start_time.replace(tzinfo=datetime.timezone.utc).strftime('%M.%S.mp4')}\"\n )\n file_path = os.path.join(directory, file_name)\n\n try:\n if not os.path.exists(file_path):\n start_frame = datetime.datetime.now().timestamp()\n\n # add faststart to kept segments to improve metadata reading\n ffmpeg_cmd = [\n \"ffmpeg\",\n \"-y\",\n \"-i\",\n cache_path,\n \"-c\",\n \"copy\",\n \"-movflags\",\n \"+faststart\",\n file_path,\n ]\n\n p = sp.run(\n ffmpeg_cmd,\n encoding=\"ascii\",\n capture_output=True,\n )\n\n if p.returncode != 0:\n logger.error(f\"Unable to convert {cache_path} to {file_path}\")\n logger.error(p.stderr)\n return\n else:\n logger.debug(\n f\"Copied {file_path} in {datetime.datetime.now().timestamp()-start_frame} seconds.\"\n )\n\n try:\n # get the segment size of the cache file\n # file without faststart is same size\n segment_size = round(\n float(os.path.getsize(cache_path)) / 1000000, 1\n )\n except OSError:\n segment_size = 0\n\n os.remove(cache_path)\n\n rand_id = \"\".join(\n random.choices(string.ascii_lowercase + string.digits, k=6)\n )\n Recordings.create(\n id=f\"{start_time.timestamp()}-{rand_id}\",\n camera=camera,\n path=file_path,\n start_time=start_time.timestamp(),\n end_time=end_time.timestamp(),\n duration=duration,\n motion=motion_count,\n # TODO: update this to store list of active objects at some point\n objects=active_count,\n segment_size=segment_size,\n )\n except Exception as e:\n logger.error(f\"Unable to store recording segment {cache_path}\")\n Path(cache_path).unlink(missing_ok=True)\n logger.error(e)\n\n # clear end_time cache\n self.end_time_cache.pop(cache_path, None)\n\n def run(self):\n # Check for new files every 5 seconds\n wait_time = 5\n while not self.stop_event.wait(wait_time):\n run_start = datetime.datetime.now().timestamp()\n\n # empty the recordings info queue\n while True:\n try:\n (\n camera,\n frame_time,\n current_tracked_objects,\n motion_boxes,\n regions,\n ) = self.recordings_info_queue.get(False)\n\n if self.config.cameras[camera].record.enabled:\n self.recordings_info[camera].append(\n (\n frame_time,\n current_tracked_objects,\n motion_boxes,\n regions,\n )\n )\n except queue.Empty:\n break\n\n try:\n self.move_files()\n except Exception as e:\n logger.error(\n \"Error occurred when attempting to maintain recording cache\"\n )\n logger.error(e)\n duration = datetime.datetime.now().timestamp() - run_start\n wait_time = max(0, 5 - duration)\n\n logger.info(f\"Exiting recording maintenance...\")\n\n\nclass RecordingCleanup(threading.Thread):\n def __init__(self, config: FrigateConfig, stop_event):\n threading.Thread.__init__(self)\n self.name = \"recording_cleanup\"\n self.config = config\n self.stop_event = stop_event\n\n def clean_tmp_clips(self):\n # delete any clips more than 5 minutes old\n for p in Path(\"/tmp/cache\").rglob(\"clip_*.mp4\"):\n logger.debug(f\"Checking tmp clip {p}.\")\n if p.stat().st_mtime < (datetime.datetime.now().timestamp() - 60 * 1):\n logger.debug(\"Deleting tmp clip.\")\n p.unlink(missing_ok=True)\n\n def expire_recordings(self):\n logger.debug(\"Start expire recordings (new).\")\n\n logger.debug(\"Start deleted cameras.\")\n # Handle deleted cameras\n expire_days = self.config.record.retain.days\n expire_before = (\n datetime.datetime.now() - datetime.timedelta(days=expire_days)\n ).timestamp()\n no_camera_recordings: Recordings = Recordings.select().where(\n Recordings.camera.not_in(list(self.config.cameras.keys())),\n Recordings.end_time < expire_before,\n )\n\n deleted_recordings = set()\n for recording in no_camera_recordings:\n Path(recording.path).unlink(missing_ok=True)\n deleted_recordings.add(recording.id)\n\n logger.debug(f\"Expiring {len(deleted_recordings)} recordings\")\n Recordings.delete().where(Recordings.id << deleted_recordings).execute()\n logger.debug(\"End deleted cameras.\")\n\n logger.debug(\"Start all cameras.\")\n for camera, config in self.config.cameras.items():\n logger.debug(f\"Start camera: {camera}.\")\n # Get the timestamp for cutoff of retained days\n expire_days = config.record.retain.days\n expire_date = (\n datetime.datetime.now() - datetime.timedelta(days=expire_days)\n ).timestamp()\n\n # Get recordings to check for expiration\n recordings: Recordings = (\n Recordings.select()\n .where(\n Recordings.camera == camera,\n Recordings.end_time < expire_date,\n )\n .order_by(Recordings.start_time)\n )\n\n # Get all the events to check against\n events: Event = (\n Event.select()\n .where(\n Event.camera == camera,\n # need to ensure segments for all events starting\n # before the expire date are included\n Event.start_time < expire_date,\n Event.has_clip,\n )\n .order_by(Event.start_time)\n .objects()\n )\n\n # loop over recordings and see if they overlap with any non-expired events\n # TODO: expire segments based on segment stats according to config\n event_start = 0\n deleted_recordings = set()\n for recording in recordings.objects().iterator():\n keep = False\n # Now look for a reason to keep this recording segment\n for idx in range(event_start, len(events)):\n event = events[idx]\n\n # if the event starts in the future, stop checking events\n # and let this recording segment expire\n if event.start_time > recording.end_time:\n keep = False\n break\n\n # if the event is in progress or ends after the recording starts, keep it\n # and stop looking at events\n if event.end_time is None or event.end_time >= recording.start_time:\n keep = True\n break\n\n # if the event ends before this recording segment starts, skip\n # this event and check the next event for an overlap.\n # since the events and recordings are sorted, we can skip events\n # that end before the previous recording segment started on future segments\n if event.end_time < recording.start_time:\n event_start = idx\n\n # Delete recordings outside of the retention window or based on the retention mode\n if (\n not keep\n or (\n config.record.events.retain.mode == RetainModeEnum.motion\n and recording.motion == 0\n )\n or (\n config.record.events.retain.mode\n == RetainModeEnum.active_objects\n and recording.objects == 0\n )\n ):\n Path(recording.path).unlink(missing_ok=True)\n deleted_recordings.add(recording.id)\n\n logger.debug(f\"Expiring {len(deleted_recordings)} recordings\")\n # delete up to 100,000 at a time\n max_deletes = 100000\n deleted_recordings_list = list(deleted_recordings)\n for i in range(0, len(deleted_recordings_list), max_deletes):\n Recordings.delete().where(\n Recordings.id << deleted_recordings_list[i : i + max_deletes]\n ).execute()\n\n logger.debug(f\"End camera: {camera}.\")\n\n logger.debug(\"End all cameras.\")\n logger.debug(\"End expire recordings (new).\")\n\n def expire_files(self):\n logger.debug(\"Start expire files (legacy).\")\n\n default_expire = (\n datetime.datetime.now().timestamp()\n - SECONDS_IN_DAY * self.config.record.retain.days\n )\n delete_before = {}\n\n for name, camera in self.config.cameras.items():\n delete_before[name] = (\n datetime.datetime.now().timestamp()\n - SECONDS_IN_DAY * camera.record.retain.days\n )\n\n # find all the recordings older than the oldest recording in the db\n try:\n oldest_recording = Recordings.select().order_by(Recordings.start_time).get()\n\n p = Path(oldest_recording.path)\n oldest_timestamp = p.stat().st_mtime - 1\n except DoesNotExist:\n oldest_timestamp = datetime.datetime.now().timestamp()\n except FileNotFoundError:\n logger.warning(f\"Unable to find file from recordings database: {p}\")\n Recordings.delete().where(Recordings.id == oldest_recording.id).execute()\n return\n\n logger.debug(f\"Oldest recording in the db: {oldest_timestamp}\")\n process = sp.run(\n [\"find\", RECORD_DIR, \"-type\", \"f\", \"!\", \"-newermt\", f\"@{oldest_timestamp}\"],\n capture_output=True,\n text=True,\n )\n files_to_check = process.stdout.splitlines()\n\n for f in files_to_check:\n p = Path(f)\n try:\n if p.stat().st_mtime < delete_before.get(p.parent.name, default_expire):\n p.unlink(missing_ok=True)\n except FileNotFoundError:\n logger.warning(f\"Attempted to expire missing file: {f}\")\n\n logger.debug(\"End expire files (legacy).\")\n\n def sync_recordings(self):\n logger.debug(\"Start sync recordings.\")\n\n # get all recordings in the db\n recordings: Recordings = Recordings.select()\n\n # get all recordings files on disk\n process = sp.run(\n [\"find\", RECORD_DIR, \"-type\", \"f\"],\n capture_output=True,\n text=True,\n )\n files_on_disk = process.stdout.splitlines()\n\n recordings_to_delete = []\n for recording in recordings.objects().iterator():\n if not recording.path in files_on_disk:\n recordings_to_delete.append(recording.id)\n\n logger.debug(\n f\"Deleting {len(recordings_to_delete)} recordings with missing files\"\n )\n # delete up to 100,000 at a time\n max_deletes = 100000\n for i in range(0, len(recordings_to_delete), max_deletes):\n Recordings.delete().where(\n Recordings.id << recordings_to_delete[i : i + max_deletes]\n ).execute()\n\n logger.debug(\"End sync recordings.\")\n\n def run(self):\n # on startup sync recordings with disk (disabled due to too much CPU usage)\n # self.sync_recordings()\n\n # Expire tmp clips every minute, recordings and clean directories every hour.\n for counter in itertools.cycle(range(self.config.record.expire_interval)):\n if self.stop_event.wait(60):\n logger.info(f\"Exiting recording cleanup...\")\n break\n self.clean_tmp_clips()\n\n if counter == 0:\n self.expire_recordings()\n self.expire_files()\n remove_empty_directories(RECORD_DIR)\n", "path": "frigate/record.py" } ]
diff --git a/frigate/record.py b/frigate/record.py index 53a75a4855..7b112f7f40 100644 --- a/frigate/record.py +++ b/frigate/record.py @@ -278,9 +278,7 @@ def store_segment( directory = os.path.join( RECORD_DIR, - start_time.replace(tzinfo=datetime.timezone.utc) - .astimezone(tz=None) - .strftime("%Y-%m-%d/%H"), + start_time.astimezone(tz=datetime.timezone.utc).strftime("%Y-%m-%d/%H"), camera, )
web2py__web2py-1855
Problem with web2py compilation Hi there Very simple to show this issue: 1. download latest version of web2py (I use Mac OS version for Normal users) 2. From Admin interface compile and pack the 'Welcome' application that comes as standard with web2py 3. Then from the Admin interface upload and install the compiled 'Welcome' application you have just created (calling a different name of course) 4. When you try to run this compiled application you will get an error e.g. Invalid function or similar Kind Regards
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n| This file is part of the web2py Web Framework\n| Copyrighted by Massimo Di Pierro <[email protected]>\n| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)\n| Created by Vladyslav Kozlovskyy (Ukraine) <dbdevelop©gmail.com>\n| for Web2py project\n\nUtilities and class for UTF8 strings managing\n----------------------------------------------\n\"\"\"\nfrom __future__ import print_function\nfrom gluon._compat import builtin as __builtin__, unicodeT, iteritems, to_unicode, to_native\n\n__all__ = ['Utf8']\n\nrepr_escape_tab = {}\n#FIXME PY3\nfor i in range(1, 32):\n repr_escape_tab[i] = to_unicode(\"\\\\\"+\"x%02x\" % i)\nrepr_escape_tab[7] = u'\\\\a'\nrepr_escape_tab[8] = u'\\\\b'\nrepr_escape_tab[9] = u'\\\\t'\nrepr_escape_tab[10] = u'\\\\n'\nrepr_escape_tab[11] = u'\\\\v'\nrepr_escape_tab[12] = u'\\\\f'\nrepr_escape_tab[13] = u'\\\\r'\nrepr_escape_tab[ord('\\\\')] = u'\\\\\\\\'\nrepr_escape_tab2 = repr_escape_tab.copy()\nrepr_escape_tab2[ord('\\'')] = u\"\\\\'\"\n\n\ndef sort_key(s):\n \"\"\"Unicode Collation Algorithm (UCA) (http://www.unicode.org/reports/tr10/)\n is used for utf-8 and unicode strings sorting and for utf-8 strings\n comparison\n\n Note:\n pyuca is a very memory cost module! It loads the whole\n \"allkey.txt\" file (~2mb!) into the memory. But this\n functionality is needed only when sort_key() is called as a\n part of sort() function or when Utf8 strings are compared.\n\n So, it is a lazy \"sort_key\" function which (ONLY ONCE, ON ITS\n FIRST CALL) imports pyuca and replaces itself with a real\n sort_key() function\n \"\"\"\n global sort_key\n try:\n from gluon.contrib.pyuca import unicode_collator\n unicode_sort_key = unicode_collator.sort_key\n sort_key = lambda s: unicode_sort_key(\n to_unicode(s, 'utf-8') if isinstance(s, str) else s)\n except:\n sort_key = lambda s: (\n to_unicode(s, 'utf-8') if isinstance(s, str) else s).lower()\n return sort_key(s)\n\n\ndef ord(char):\n \"\"\"Returns unicode id for utf8 or unicode *char* character\n SUPPOSE that *char* is an utf-8 or unicode character only\n \"\"\"\n if isinstance(char, unicodeT):\n return __builtin__.ord(char)\n return __builtin__.ord(to_unicode(char, 'utf-8'))\n\n\ndef chr(code):\n \"\"\"Returns utf8-character with *code* unicode id \"\"\"\n return Utf8(unichr(code))\n\n\ndef size(string):\n \"\"\"Returns length of utf-8 string in bytes\n\n Note:\n The length of correspondent utf-8 string is returned for unicode string\n \"\"\"\n return Utf8(string).__size__()\n\n\ndef truncate(string, length, dots='...'):\n \"\"\"Returns string of length < *length* or truncate string with adding\n *dots* suffix to the string's end\n\n Args:\n length (int): max length of string\n dots (str or unicode): string suffix, when string is cutted\n\n Returns:\n (utf8-str): original or cutted string\n \"\"\"\n text = to_unicode(string, 'utf-8')\n dots = to_unicode(dots, 'utf-8') if isinstance(dots, str) else dots\n if len(text) > length:\n text = text[:length - len(dots)] + dots\n return str.__new__(Utf8, text.encode('utf-8'))\n\n\nclass Utf8(str):\n \"\"\"\n Class for utf8 string storing and manipulations\n\n The base presupposition of this class usage is:\n \"ALL strings in the application are either of\n utf-8 or unicode type, even when simple str\n type is used. UTF-8 is only a \"packed\" version\n of unicode, so Utf-8 and unicode strings are\n interchangeable.\"\n\n CAUTION! This class is slower than str/unicode!\n Do NOT use it inside intensive loops. Simply\n decode string(s) to unicode before loop and\n encode it back to utf-8 string(s) after\n intensive calculation.\n\n You can see the benefit of this class in doctests() below\n \"\"\"\n def __new__(cls, content='', codepage='utf-8'):\n if isinstance(content, unicodeT):\n return str.__new__(cls, to_native(content, 'utf-8'))\n elif codepage in ('utf-8', 'utf8') or isinstance(content, cls):\n return str.__new__(cls, content)\n else:\n return str.__new__(cls, to_native(to_unicode(content, codepage), 'utf-8'))\n\n def __repr__(self):\n r''' # note that we use raw strings to avoid having to use double back slashes below\n NOTE! This function is a clone of web2py:gluon.languages.utf_repl() function::\n\n utf8.__repr__() works same as str.repr() when processing ascii string\n >>> repr(Utf8('abc')) == repr(Utf8(\"abc\")) == repr('abc') == repr(\"abc\") == \"'abc'\"\n True\n >>> repr(Utf8('a\"b\"c')) == repr('a\"b\"c') == '\\'a\"b\"c\\''\n True\n >>> repr(Utf8(\"a'b'c\")) == repr(\"a'b'c\") == '\"a\\'b\\'c\"'\n True\n >>> repr(Utf8('a\\'b\"c')) == repr('a\\'b\"c') == repr(Utf8(\"a'b\\\"c\")) == repr(\"a'b\\\"c\") == '\\'a\\\\\\'b\"c\\''\n True\n >>> repr(Utf8('a\\r\\nb')) == repr('a\\r\\nb') == \"'a\\\\r\\\\nb'\" # Test for \\r, \\n\n True\n\n Unlike str.repr(), Utf8.__repr__() remains utf8 content when processing utf8 string::\n\n >>> repr(Utf8('中文字')) == repr(Utf8(\"中文字\")) == \"'中文字'\" != repr('中文字')\n True\n >>> repr(Utf8('中\"文\"字')) == \"'中\\\"文\\\"字'\" != repr('中\"文\"字')\n True\n >>> repr(Utf8(\"中'文'字\")) == '\"中\\'文\\'字\"' != repr(\"中'文'字\")\n True\n >>> repr(Utf8('中\\'文\"字')) == repr(Utf8(\"中'文\\\"字\")) == '\\'中\\\\\\'文\"字\\'' != repr('中\\'文\"字') == repr(\"中'文\\\"字\")\n True\n >>> repr(Utf8('中\\r\\n文')) == \"'中\\\\r\\\\n文'\" != repr('中\\r\\n文') # Test for \\r, \\n\n True\n '''\n if str.find(self, \"'\") >= 0 and str.find(self, '\"') < 0: # only single quote exists\n return '\"' + to_native(to_unicode(self, 'utf-8').translate(repr_escape_tab), 'utf-8') + '\"'\n else:\n return \"'\" + to_native(to_unicode(self, 'utf-8').translate(repr_escape_tab2), 'utf-8') + \"'\"\n\n def __size__(self):\n \"\"\" length of utf-8 string in bytes \"\"\"\n return str.__len__(self)\n\n def __contains__(self, other):\n return str.__contains__(self, Utf8(other))\n\n def __getitem__(self, index):\n return str.__new__(Utf8, to_native(to_unicode(self, 'utf-8')[index], 'utf-8'))\n\n def __getslice__(self, begin, end):\n return str.__new__(Utf8, to_native(to_unicode(self, 'utf-8')[begin:end], 'utf-8'))\n\n def __add__(self, other):\n return str.__new__(Utf8, str.__add__(self, unicode.encode(other, 'utf-8')\n if isinstance(other, unicode) else other))\n\n def __len__(self):\n return len(to_unicode(self, 'utf-8'))\n\n def __mul__(self, integer):\n return str.__new__(Utf8, str.__mul__(self, integer))\n\n def __eq__(self, string):\n return str.__eq__(self, Utf8(string))\n\n def __ne__(self, string):\n return str.__ne__(self, Utf8(string))\n\n def capitalize(self):\n return str.__new__(Utf8, unicode(self, 'utf-8').capitalize().encode('utf-8'))\n\n def center(self, length):\n return str.__new__(Utf8, unicode(self, 'utf-8').center(length).encode('utf-8'))\n\n def upper(self):\n return str.__new__(Utf8, unicode(self, 'utf-8').upper().encode('utf-8'))\n\n def lower(self):\n return str.__new__(Utf8, unicode(self, 'utf-8').lower().encode('utf-8'))\n\n def title(self):\n return str.__new__(Utf8, unicode(self, 'utf-8').title().encode('utf-8'))\n\n def index(self, string):\n return unicode(self, 'utf-8').index(string if isinstance(string, unicode) else unicode(string, 'utf-8'))\n\n def isalnum(self):\n return unicode(self, 'utf-8').isalnum()\n\n def isalpha(self):\n return unicode(self, 'utf-8').isalpha()\n\n def isdigit(self):\n return unicode(self, 'utf-8').isdigit()\n\n def islower(self):\n return unicode(self, 'utf-8').islower()\n\n def isspace(self):\n return unicode(self, 'utf-8').isspace()\n\n def istitle(self):\n return unicode(self, 'utf-8').istitle()\n\n def isupper(self):\n return unicode(self, 'utf-8').isupper()\n\n def zfill(self, length):\n return str.__new__(Utf8, unicode(self, 'utf-8').zfill(length).encode('utf-8'))\n\n def join(self, iter):\n return str.__new__(Utf8, str.join(self, [Utf8(c) for c in\n list(unicode(iter, 'utf-8') if\n isinstance(iter, str) else\n iter)]))\n\n def lstrip(self, chars=None):\n return str.__new__(Utf8, str.lstrip(self, None if chars is None else Utf8(chars)))\n\n def rstrip(self, chars=None):\n return str.__new__(Utf8, str.rstrip(self, None if chars is None else Utf8(chars)))\n\n def strip(self, chars=None):\n return str.__new__(Utf8, str.strip(self, None if chars is None else Utf8(chars)))\n\n def swapcase(self):\n return str.__new__(Utf8, unicode(self, 'utf-8').swapcase().encode('utf-8'))\n\n def count(self, sub, start=0, end=None):\n unistr = unicode(self, 'utf-8')\n return unistr.count(\n unicode(sub, 'utf-8') if isinstance(sub, str) else sub,\n start, len(unistr) if end is None else end)\n\n def decode(self, encoding='utf-8', errors='strict'):\n return str.decode(self, encoding, errors)\n\n def encode(self, encoding, errors='strict'):\n return unicode(self, 'utf-8').encode(encoding, errors)\n\n def expandtabs(self, tabsize=8):\n return str.__new__(Utf8, unicode(self, 'utf-8').expandtabs(tabsize).encode('utf-8'))\n\n def find(self, sub, start=None, end=None):\n return unicode(self, 'utf-8').find(unicode(sub, 'utf-8')\n if isinstance(sub, str) else sub, start, end)\n\n def ljust(self, width, fillchar=' '):\n return str.__new__(Utf8, unicode(self, 'utf-8').ljust(width, unicode(fillchar, 'utf-8')\n if isinstance(fillchar, str) else fillchar).encode('utf-8'))\n\n def partition(self, sep):\n (head, sep, tail) = str.partition(self, Utf8(sep))\n return (str.__new__(Utf8, head),\n str.__new__(Utf8, sep),\n str.__new__(Utf8, tail))\n\n def replace(self, old, new, count=-1):\n return str.__new__(Utf8, str.replace(self, Utf8(old), Utf8(new), count))\n\n def rfind(self, sub, start=None, end=None):\n return unicode(self, 'utf-8').rfind(unicode(sub, 'utf-8')\n if isinstance(sub, str) else sub, start, end)\n\n def rindex(self, string):\n return unicode(self, 'utf-8').rindex(string if isinstance(string, unicode)\n else unicode(string, 'utf-8'))\n\n def rjust(self, width, fillchar=' '):\n return str.__new__(Utf8, unicode(self, 'utf-8').rjust(width, unicode(fillchar, 'utf-8')\n if isinstance(fillchar, str) else fillchar).encode('utf-8'))\n\n def rpartition(self, sep):\n (head, sep, tail) = str.rpartition(self, Utf8(sep))\n return (str.__new__(Utf8, head),\n str.__new__(Utf8, sep),\n str.__new__(Utf8, tail))\n\n def rsplit(self, sep=None, maxsplit=-1):\n return [str.__new__(Utf8, part) for part in str.rsplit(self,\n None if sep is None else Utf8(sep), maxsplit)]\n\n def split(self, sep=None, maxsplit=-1):\n return [str.__new__(Utf8, part) for part in str.split(self,\n None if sep is None else Utf8(sep), maxsplit)]\n\n def splitlines(self, keepends=False):\n return [str.__new__(Utf8, part) for part in str.splitlines(self, keepends)]\n\n def startswith(self, prefix, start=0, end=None):\n unistr = unicode(self, 'utf-8')\n if isinstance(prefix, tuple):\n prefix = tuple(unicode(\n s, 'utf-8') if isinstance(s, str) else s for s in prefix)\n elif isinstance(prefix, str):\n prefix = unicode(prefix, 'utf-8')\n return unistr.startswith(prefix, start, len(unistr) if end is None else end)\n\n def translate(self, table, deletechars=''):\n if isinstance(table, dict):\n return str.__new__(Utf8, unicode(self, 'utf-8').translate(table).encode('utf-8'))\n else:\n return str.__new__(Utf8, str.translate(self, table, deletechars))\n\n def endswith(self, prefix, start=0, end=None):\n unistr = unicode(self, 'utf-8')\n if isinstance(prefix, tuple):\n prefix = tuple(unicode(\n s, 'utf-8') if isinstance(s, str) else s for s in prefix)\n elif isinstance(prefix, str):\n prefix = unicode(prefix, 'utf-8')\n return unistr.endswith(prefix, start, len(unistr) if end is None else end)\n if hasattr(str, 'format'): # Python 2.5 hasn't got str.format() method\n def format(self, *args, **kwargs):\n args = [unicode(\n s, 'utf-8') if isinstance(s, str) else s for s in args]\n kwargs = dict((unicode(k, 'utf-8') if isinstance(k, str) else k,\n unicode(v, 'utf-8') if isinstance(v, str) else v)\n for k, v in iteritems(kwargs))\n return str.__new__(Utf8, unicode(self, 'utf-8').format(*args, **kwargs).encode('utf-8'))\n\n def __mod__(self, right):\n if isinstance(right, tuple):\n right = tuple(unicode(v, 'utf-8') if isinstance(v, str) else v\n for v in right)\n elif isinstance(right, dict):\n right = dict((unicode(k, 'utf-8') if isinstance(k, str) else k,\n unicode(v, 'utf-8') if isinstance(v, str) else v)\n for k, v in iteritems(right))\n elif isinstance(right, str):\n right = unicode(right, 'utf-8')\n return str.__new__(Utf8, unicode(self, 'utf-8').__mod__(right).encode('utf-8'))\n\n def __ge__(self, string):\n return sort_key(self) >= sort_key(string)\n\n def __gt__(self, string):\n return sort_key(self) > sort_key(string)\n\n def __le__(self, string):\n return sort_key(self) <= sort_key(string)\n\n def __lt__(self, string):\n return sort_key(self) < sort_key(string)\n\n\nif __name__ == '__main__':\n def doctests():\n u\"\"\"\n doctests:\n >>> test_unicode=u'ПРоба Є PRobe'\n >>> test_unicode_word=u'ПРоба'\n >>> test_number_str='12345'\n >>> test_unicode\n u'\\\\u041f\\\\u0420\\\\u043e\\\\u0431\\\\u0430 \\\\u0404 PRobe'\n >>> print test_unicode\n ПРоба Є PRobe\n >>> test_word=test_unicode_word.encode('utf-8')\n >>> test_str=test_unicode.encode('utf-8')\n >>> s=Utf8(test_str)\n >>> s\n 'ПРоба Є PRobe'\n >>> type(s)\n <class '__main__.Utf8'>\n >>> s == test_str\n True\n >>> len(test_str) # wrong length of utf8-string!\n 19\n >>> len(test_unicode) # RIGHT!\n 13\n >>> len(s) # RIGHT!\n 13\n >>> size(test_str) # size of utf-8 string (in bytes) == len(str)\n 19\n >>> size(test_unicode) # size of unicode string in bytes (packed to utf-8 string)\n 19\n >>> size(s) # size of utf-8 string in bytes\n 19\n >>> try: # utf-8 is a multibyte string. Convert it to unicode for use with builtin ord()\n ... __builtin__.ord('б') # ascii string\n ... except Exception, e:\n ... print 'Exception:', e\n Exception: ord() expected a character, but string of length 2 found\n >>> ord('б') # utf8.ord() is used(!!!)\n 1073\n >>> ord(u'б') # utf8.ord() is used(!!!)\n 1073\n >>> ord(s[3]) # utf8.ord() is used(!!!)\n 1073\n >>> chr(ord(s[3])) # utf8.chr() and utf8.chr() is used(!!!)\n 'б'\n >>> type(chr(1073)) # utf8.chr() is used(!!!)\n <class '__main__.Utf8'>\n >>> s=Utf8(test_unicode)\n >>> s\n 'ПРоба Є PRobe'\n >>> s == test_str\n True\n >>> test_str == s\n True\n >>> s == test_unicode\n True\n >>> test_unicode == s\n True\n >>> print test_str.upper() # only ASCII characters uppered\n ПРоба Є PROBE\n >>> print test_unicode.upper() # unicode gives right result\n ПРОБА Є PROBE\n >>> s.upper() # utf8 class use unicode.upper()\n 'ПРОБА Є PROBE'\n >>> type(s.upper())\n <class '__main__.Utf8'>\n >>> s.lower()\n 'проба є probe'\n >>> type(s.lower())\n <class '__main__.Utf8'>\n >>> s.capitalize()\n 'Проба є probe'\n >>> type(s.capitalize())\n <class '__main__.Utf8'>\n >>> len(s)\n 13\n >>> len(test_unicode)\n 13\n >>> s+'. Probe is проба'\n 'ПРоба Є PRobe. Probe is проба'\n >>> type(s+'. Probe is проба')\n <class '__main__.Utf8'>\n >>> s+u'. Probe is проба'\n 'ПРоба Є PRobe. Probe is проба'\n >>> type(s+u'. Probe is проба')\n <class '__main__.Utf8'>\n >>> s+s\n 'ПРоба Є PRobeПРоба Є PRobe'\n >>> type(s+s)\n <class '__main__.Utf8'>\n >>> a=s\n >>> a+=s\n >>> a+=test_unicode\n >>> a+=test_str\n >>> a\n 'ПРоба Є PRobeПРоба Є PRobeПРоба Є PRobeПРоба Є PRobe'\n >>> type(a)\n <class '__main__.Utf8'>\n >>> s*3\n 'ПРоба Є PRobeПРоба Є PRobeПРоба Є PRobe'\n >>> type(s*3)\n <class '__main__.Utf8'>\n >>> a=Utf8(\"-проба-\")\n >>> a*=10\n >>> a\n '-проба--проба--проба--проба--проба--проба--проба--проба--проба--проба-'\n >>> type(a)\n <class '__main__.Utf8'>\n >>> print \"'\"+test_str.center(17)+\"'\" # WRONG RESULT!\n 'ПРоба Є PRobe'\n >>> s.center(17) # RIGHT!\n ' ПРоба Є PRobe '\n >>> type(s.center(17))\n <class '__main__.Utf8'>\n >>> (test_word+test_number_str).isalnum() # WRONG RESULT! non ASCII chars are detected as non alpha\n False\n >>> Utf8(test_word+test_number_str).isalnum()\n True\n >>> s.isalnum()\n False\n >>> test_word.isalpha() # WRONG RESULT! Non ASCII characters are detected as non alpha\n False\n >>> Utf8(test_word).isalpha() # RIGHT!\n True\n >>> s.lower().islower()\n True\n >>> s.upper().isupper()\n True\n >>> print test_str.zfill(17) # WRONG RESULT!\n ПРоба Є PRobe\n >>> s.zfill(17) # RIGHT!\n '0000ПРоба Є PRobe'\n >>> type(s.zfill(17))\n <class '__main__.Utf8'>\n >>> s.istitle()\n False\n >>> s.title().istitle()\n True\n >>> Utf8('1234').isdigit()\n True\n >>> Utf8(' \\t').isspace()\n True\n >>> s.join('•|•')\n '•ПРоба Є PRobe|ПРоба Є PRobe•'\n >>> s.join((str('(utf8 тест1)'), unicode('(unicode тест2)','utf-8'), '(ascii test3)'))\n '(utf8 тест1)ПРоба Є PRobe(unicode тест2)ПРоба Є PRobe(ascii test3)'\n >>> type(s)\n <class '__main__.Utf8'>\n >>> s==test_str\n True\n >>> s==test_unicode\n True\n >>> s.swapcase()\n 'прОБА є prOBE'\n >>> type(s.swapcase())\n <class '__main__.Utf8'>\n >>> truncate(s, 10)\n 'ПРоба Є...'\n >>> truncate(s, 20)\n 'ПРоба Є PRobe'\n >>> truncate(s, 10, '•••') # utf-8 string as *dots*\n 'ПРоба Є•••'\n >>> truncate(s, 10, u'®') # you can use unicode string as *dots*\n 'ПРоба Є P®'\n >>> type(truncate(s, 10))\n <class '__main__.Utf8'>\n >>> Utf8(s.encode('koi8-u'), 'koi8-u')\n 'ПРоба Є PRobe'\n >>> s.decode() # convert utf-8 string to unicode\n u'\\\\u041f\\\\u0420\\\\u043e\\\\u0431\\\\u0430 \\\\u0404 PRobe'\n >>> a='про\\\\tba'\n >>> str_tmp=a.expandtabs()\n >>> utf8_tmp=Utf8(a).expandtabs()\n >>> utf8_tmp.replace(' ','.') # RIGHT! (default tabsize is 8)\n 'про.....ba'\n >>> utf8_tmp.index('b')\n 8\n >>> print \"'\"+str_tmp.replace(' ','.')+\"'\" # WRONG STRING LENGTH!\n 'про..ba'\n >>> str_tmp.index('b') # WRONG index of 'b' character\n 8\n >>> print \"'\"+a.expandtabs(4).replace(' ','.')+\"'\" # WRONG RESULT!\n 'про..ba'\n >>> Utf8(a).expandtabs(4).replace(' ','.') # RIGHT!\n 'про.ba'\n >>> s.find('Є')\n 6\n >>> s.find(u'Є')\n 6\n >>> s.find(' ', 6)\n 7\n >>> s.rfind(' ')\n 7\n >>> s.partition('Є')\n ('ПРоба ', 'Є', ' PRobe')\n >>> s.partition(u'Є')\n ('ПРоба ', 'Є', ' PRobe')\n >>> (a,b,c) = s.partition('Є')\n >>> type(a), type(b), type(c)\n (<class '__main__.Utf8'>, <class '__main__.Utf8'>, <class '__main__.Utf8'>)\n >>> s.partition(' ')\n ('ПРоба', ' ', 'Є PRobe')\n >>> s.rpartition(' ')\n ('ПРоба Є', ' ', 'PRobe')\n >>> s.index('Є')\n 6\n >>> s.rindex(u'Є')\n 6\n >>> s.index(' ')\n 5\n >>> s.rindex(' ')\n 7\n >>> a=Utf8('а б ц д е а б ц д е а\\\\tб ц д е')\n >>> a.split()\n ['а', 'б', 'ц', 'д', 'е', 'а', 'б', 'ц', 'д', 'е', 'а', 'б', 'ц', 'д', 'е']\n >>> a.rsplit()\n ['а', 'б', 'ц', 'д', 'е', 'а', 'б', 'ц', 'д', 'е', 'а', 'б', 'ц', 'д', 'е']\n >>> a.expandtabs().split('б')\n ['а ', ' ц д е а ', ' ц д е а ', ' ц д е']\n >>> a.expandtabs().rsplit('б')\n ['а ', ' ц д е а ', ' ц д е а ', ' ц д е']\n >>> a.expandtabs().split(u'б', 1)\n ['а ', ' ц д е а б ц д е а б ц д е']\n >>> a.expandtabs().rsplit(u'б', 1)\n ['а б ц д е а б ц д е а ', ' ц д е']\n >>> a=Utf8(\"рядок1\\\\nрядок2\\\\nрядок3\")\n >>> a.splitlines()\n ['рядок1', 'рядок2', 'рядок3']\n >>> a.splitlines(True)\n ['рядок1\\\\n', 'рядок2\\\\n', 'рядок3']\n >>> s[6]\n 'Є'\n >>> s[0]\n 'П'\n >>> s[-1]\n 'e'\n >>> s[:10]\n 'ПРоба Є PR'\n >>> s[2:-2:2]\n 'оаЄPo'\n >>> s[::-1]\n 'eboRP Є абоРП'\n >>> s.startswith('ПР')\n True\n >>> s.startswith(('ПР', u'об'),0)\n True\n >>> s.startswith(u'об', 2, 4)\n True\n >>> s.endswith('be')\n True\n >>> s.endswith(('be', 'PR', u'Є'))\n True\n >>> s.endswith('PR', 8, 10)\n True\n >>> s.endswith('Є', -7, -6)\n True\n >>> s.count(' ')\n 2\n >>> s.count(' ',6)\n 1\n >>> s.count(u'Є')\n 1\n >>> s.count('Є', 0, 5)\n 0\n >>> Utf8(\"Parameters: '%(проба)s', %(probe)04d, %(проба2)s\") % { u\"проба\": s,\n ... \"not used\": \"???\", \"probe\": 2, \"проба2\": u\"ПРоба Probe\" }\n \"Parameters: 'ПРоба Є PRobe', 0002, ПРоба Probe\"\n >>> a=Utf8(u\"Параметр: (%s)-(%s)-[%s]\")\n >>> a%=(s, s[::-1], 1000)\n >>> a\n 'Параметр: (ПРоба Є PRobe)-(eboRP Є абоРП)-[1000]'\n >>> if hasattr(Utf8, 'format'):\n ... Utf8(\"Проба <{0}>, {1}, {param1}, {param2}\").format(s, u\"中文字\",\n ... param1=\"барабан\", param2=1000) == 'Проба <ПРоба Є PRobe>, 中文字, барабан, 1000'\n ... else: # format() method is not used in python with version <2.6:\n ... print True\n True\n >>> u'Б'<u'Ї' # WRONG ORDER!\n False\n >>> 'Б'<'Ї' # WRONG ORDER!\n False\n >>> Utf8('Б')<'Ї' # RIGHT!\n True\n >>> u'д'>u'ґ' # WRONG ORDER!\n False\n >>> Utf8('д')>Utf8('ґ') # RIGHT!\n True\n >>> u'є'<=u'ж' # WRONG ORDER!\n False\n >>> Utf8('є')<=u'ж' # RIGHT!\n True\n >>> Utf8('є')<=u'є'\n True\n >>> u'Ї'>=u'И' # WRONG ORDER!\n False\n >>> Utf8(u'Ї') >= u'И' # RIGHT\n True\n >>> Utf8('Є') >= 'Є'\n True\n >>> a=\"яжертиуіопшщїасдфгґхйклчєзьцвбнмюЯЖЕРТИУІОПШЩЇАСДФГҐХЙКЛЧЗЬЦВБНМЮЄ\" # str type\n >>> b=u\"яжертиуіопшщїасдфгґхйклчєзьцвбнмюЯЖЕРТИУІОПШЩЇАСДФГҐХЙКЛЧЗЬЦВБНМЮЄ\" # unicode type\n >>> c=Utf8(\"яжертиуіопшщїасдфгґхйклчєзьцвбнмюЯЖЕРТИУІОПШЩЇАСДФГҐХЙКЛЧЗЬЦВБНМЮЄ\") # utf8 class\n >>> result = \"\".join(sorted(a))\n >>> result[0:20] # result is not utf8 string, because bytes, not utf8-characters were sorted\n '\\\\x80\\\\x81\\\\x82\\\\x83\\\\x84\\\\x84\\\\x85\\\\x86\\\\x86\\\\x87\\\\x87\\\\x88\\\\x89\\\\x8c\\\\x8e\\\\x8f\\\\x90\\\\x90\\\\x91\\\\x91'\n >>> try:\n ... unicode(result, 'utf-8') # try to convert result (utf-8?) to unicode\n ... except Exception, e:\n ... print 'Exception:', e\n Exception: 'utf8' codec can't decode byte 0x80 in position 0: unexpected code byte\n >>> try: # FAILED! (working with bytes, not with utf8-charactes)\n ... \"\".join( sorted(a, key=sort_key) ) # utf8.sort_key may be used with utf8 or unicode strings only!\n ... except Exception, e:\n ... print 'Exception:', e\n Exception: 'utf8' codec can't decode byte 0xd1 in position 0: unexpected end of data\n >>> print \"\".join( sorted(Utf8(a))) # converting *a* to unicode or utf8-string gives us correct result\n аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ\n >>> print u\"\".join( sorted(b) ) # WRONG ORDER! Default sort key is used\n ЄІЇАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЬЮЯабвгдежзийклмнопрстуфхцчшщьюяєіїҐґ\n >>> print u\"\".join( sorted(b, key=sort_key) ) # RIGHT ORDER! utf8.sort_key is used\n аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ\n >>> print \"\".join( sorted(c) ) # RIGHT ORDER! Utf8 \"rich comparison\" methods are used\n аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ\n >>> print \"\".join( sorted(c, key=sort_key) ) # RIGHT ORDER! utf8.sort_key is used\n аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ\n >>> Utf8().join(sorted(c.decode(), key=sort_key)) # convert to unicode for better performance\n 'аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ'\n >>> for result in sorted([\"Іа\", \"Астро\", u\"гала\", Utf8(\"Гоша\"), \"Єва\", \"шовк\", \"аякс\", \"Їжа\",\n ... \"ґанок\", Utf8(\"Дар'я\"), \"білінг\", \"веб\", u\"Жужа\", \"проба\", u\"тест\",\n ... \"абетка\", \"яблуко\", \"Юляся\", \"Київ\", \"лимонад\", \"ложка\", \"Матриця\",\n ... ], key=sort_key):\n ... print result.ljust(20), type(result)\n абетка <type 'str'>\n Астро <type 'str'>\n аякс <type 'str'>\n білінг <type 'str'>\n веб <type 'str'>\n гала <type 'unicode'>\n ґанок <type 'str'>\n Гоша <class '__main__.Utf8'>\n Дар'я <class '__main__.Utf8'>\n Єва <type 'str'>\n Жужа <type 'unicode'>\n Іа <type 'str'>\n Їжа <type 'str'>\n Київ <type 'str'>\n лимонад <type 'str'>\n ложка <type 'str'>\n Матриця <type 'str'>\n проба <type 'str'>\n тест <type 'unicode'>\n шовк <type 'str'>\n Юляся <type 'str'>\n яблуко <type 'str'>\n\n >>> a=Utf8(\"中文字\")\n >>> L=list(a)\n >>> L\n ['中', '文', '字']\n >>> a=\"\".join(L)\n >>> print a\n 中文字\n >>> type(a)\n <type 'str'>\n >>> a=\"中文字\" # standard str type\n >>> L=list(a)\n >>> L\n ['\\\\xe4', '\\\\xb8', '\\\\xad', '\\\\xe6', '\\\\x96', '\\\\x87', '\\\\xe5', '\\\\xad', '\\\\x97']\n >>> from string import maketrans\n >>> str_tab=maketrans('PRobe','12345')\n >>> unicode_tab={ord(u'П'):ord(u'Ж'),\n ... ord(u'Р') : u'Ш',\n ... ord(Utf8('о')) : None, # utf8.ord() is used\n ... ord('б') : None, # -//-//-\n ... ord(u'а') : u\"中文字\",\n ... ord(u'Є') : Utf8('•').decode(), # only unicode type is supported\n ... }\n >>> s.translate(unicode_tab).translate(str_tab, deletechars=' ')\n 'ЖШ中文字•12345'\n \"\"\"\n import sys\n reload(sys)\n sys.setdefaultencoding(\"UTF-8\")\n import doctest\n print(\"DOCTESTS STARTED...\")\n doctest.testmod()\n print(\"DOCTESTS FINISHED\")\n\n doctests()\n", "path": "gluon/utf8.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n| This file is part of the web2py Web Framework\n| Copyrighted by Massimo Di Pierro <[email protected]>\n| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)\n| Created by Vladyslav Kozlovskyy (Ukraine) <dbdevelop©gmail.com>\n| for Web2py project\n\nUtilities and class for UTF8 strings managing\n----------------------------------------------\n\"\"\"\nfrom __future__ import print_function\nfrom gluon._compat import builtin as __builtin__, unicodeT, iteritems, to_unicode, to_native, reload\n\n__all__ = ['Utf8']\n\nrepr_escape_tab = {}\n#FIXME PY3\nfor i in range(1, 32):\n repr_escape_tab[i] = to_unicode(\"\\\\\"+\"x%02x\" % i)\nrepr_escape_tab[7] = u'\\\\a'\nrepr_escape_tab[8] = u'\\\\b'\nrepr_escape_tab[9] = u'\\\\t'\nrepr_escape_tab[10] = u'\\\\n'\nrepr_escape_tab[11] = u'\\\\v'\nrepr_escape_tab[12] = u'\\\\f'\nrepr_escape_tab[13] = u'\\\\r'\nrepr_escape_tab[ord('\\\\')] = u'\\\\\\\\'\nrepr_escape_tab2 = repr_escape_tab.copy()\nrepr_escape_tab2[ord('\\'')] = u\"\\\\'\"\n\n\ndef sort_key(s):\n \"\"\"Unicode Collation Algorithm (UCA) (http://www.unicode.org/reports/tr10/)\n is used for utf-8 and unicode strings sorting and for utf-8 strings\n comparison\n\n Note:\n pyuca is a very memory cost module! It loads the whole\n \"allkey.txt\" file (~2mb!) into the memory. But this\n functionality is needed only when sort_key() is called as a\n part of sort() function or when Utf8 strings are compared.\n\n So, it is a lazy \"sort_key\" function which (ONLY ONCE, ON ITS\n FIRST CALL) imports pyuca and replaces itself with a real\n sort_key() function\n \"\"\"\n global sort_key\n try:\n from gluon.contrib.pyuca import unicode_collator\n unicode_sort_key = unicode_collator.sort_key\n sort_key = lambda s: unicode_sort_key(\n to_unicode(s, 'utf-8') if isinstance(s, str) else s)\n except:\n sort_key = lambda s: (\n to_unicode(s, 'utf-8') if isinstance(s, str) else s).lower()\n return sort_key(s)\n\n\ndef ord(char):\n \"\"\"Returns unicode id for utf8 or unicode *char* character\n SUPPOSE that *char* is an utf-8 or unicode character only\n \"\"\"\n if isinstance(char, unicodeT):\n return __builtin__.ord(char)\n return __builtin__.ord(to_unicode(char, 'utf-8'))\n\n\ndef chr(code):\n \"\"\"Returns utf8-character with *code* unicode id \"\"\"\n return Utf8(unichr(code))\n\n\ndef size(string):\n \"\"\"Returns length of utf-8 string in bytes\n\n Note:\n The length of correspondent utf-8 string is returned for unicode string\n \"\"\"\n return Utf8(string).__size__()\n\n\ndef truncate(string, length, dots='...'):\n \"\"\"Returns string of length < *length* or truncate string with adding\n *dots* suffix to the string's end\n\n Args:\n length (int): max length of string\n dots (str or unicode): string suffix, when string is cutted\n\n Returns:\n (utf8-str): original or cutted string\n \"\"\"\n text = to_unicode(string, 'utf-8')\n dots = to_unicode(dots, 'utf-8') if isinstance(dots, str) else dots\n if len(text) > length:\n text = text[:length - len(dots)] + dots\n return str.__new__(Utf8, text.encode('utf-8'))\n\n\nclass Utf8(str):\n \"\"\"\n Class for utf8 string storing and manipulations\n\n The base presupposition of this class usage is:\n \"ALL strings in the application are either of\n utf-8 or unicode type, even when simple str\n type is used. UTF-8 is only a \"packed\" version\n of unicode, so Utf-8 and unicode strings are\n interchangeable.\"\n\n CAUTION! This class is slower than str/unicode!\n Do NOT use it inside intensive loops. Simply\n decode string(s) to unicode before loop and\n encode it back to utf-8 string(s) after\n intensive calculation.\n\n You can see the benefit of this class in doctests() below\n \"\"\"\n def __new__(cls, content='', codepage='utf-8'):\n if isinstance(content, unicodeT):\n return str.__new__(cls, to_native(content, 'utf-8'))\n elif codepage in ('utf-8', 'utf8') or isinstance(content, cls):\n return str.__new__(cls, content)\n else:\n return str.__new__(cls, to_native(to_unicode(content, codepage), 'utf-8'))\n\n def __repr__(self):\n r''' # note that we use raw strings to avoid having to use double back slashes below\n NOTE! This function is a clone of web2py:gluon.languages.utf_repl() function::\n\n utf8.__repr__() works same as str.repr() when processing ascii string\n >>> repr(Utf8('abc')) == repr(Utf8(\"abc\")) == repr('abc') == repr(\"abc\") == \"'abc'\"\n True\n >>> repr(Utf8('a\"b\"c')) == repr('a\"b\"c') == '\\'a\"b\"c\\''\n True\n >>> repr(Utf8(\"a'b'c\")) == repr(\"a'b'c\") == '\"a\\'b\\'c\"'\n True\n >>> repr(Utf8('a\\'b\"c')) == repr('a\\'b\"c') == repr(Utf8(\"a'b\\\"c\")) == repr(\"a'b\\\"c\") == '\\'a\\\\\\'b\"c\\''\n True\n >>> repr(Utf8('a\\r\\nb')) == repr('a\\r\\nb') == \"'a\\\\r\\\\nb'\" # Test for \\r, \\n\n True\n\n Unlike str.repr(), Utf8.__repr__() remains utf8 content when processing utf8 string::\n\n >>> repr(Utf8('中文字')) == repr(Utf8(\"中文字\")) == \"'中文字'\" != repr('中文字')\n True\n >>> repr(Utf8('中\"文\"字')) == \"'中\\\"文\\\"字'\" != repr('中\"文\"字')\n True\n >>> repr(Utf8(\"中'文'字\")) == '\"中\\'文\\'字\"' != repr(\"中'文'字\")\n True\n >>> repr(Utf8('中\\'文\"字')) == repr(Utf8(\"中'文\\\"字\")) == '\\'中\\\\\\'文\"字\\'' != repr('中\\'文\"字') == repr(\"中'文\\\"字\")\n True\n >>> repr(Utf8('中\\r\\n文')) == \"'中\\\\r\\\\n文'\" != repr('中\\r\\n文') # Test for \\r, \\n\n True\n '''\n if str.find(self, \"'\") >= 0 and str.find(self, '\"') < 0: # only single quote exists\n return '\"' + to_native(to_unicode(self, 'utf-8').translate(repr_escape_tab), 'utf-8') + '\"'\n else:\n return \"'\" + to_native(to_unicode(self, 'utf-8').translate(repr_escape_tab2), 'utf-8') + \"'\"\n\n def __size__(self):\n \"\"\" length of utf-8 string in bytes \"\"\"\n return str.__len__(self)\n\n def __contains__(self, other):\n return str.__contains__(self, Utf8(other))\n\n def __getitem__(self, index):\n return str.__new__(Utf8, to_native(to_unicode(self, 'utf-8')[index], 'utf-8'))\n\n def __getslice__(self, begin, end):\n return str.__new__(Utf8, to_native(to_unicode(self, 'utf-8')[begin:end], 'utf-8'))\n\n def __add__(self, other):\n return str.__new__(Utf8, str.__add__(self, unicode.encode(other, 'utf-8')\n if isinstance(other, unicode) else other))\n\n def __len__(self):\n return len(to_unicode(self, 'utf-8'))\n\n def __mul__(self, integer):\n return str.__new__(Utf8, str.__mul__(self, integer))\n\n def __eq__(self, string):\n return str.__eq__(self, Utf8(string))\n\n def __ne__(self, string):\n return str.__ne__(self, Utf8(string))\n\n def capitalize(self):\n return str.__new__(Utf8, unicode(self, 'utf-8').capitalize().encode('utf-8'))\n\n def center(self, length):\n return str.__new__(Utf8, unicode(self, 'utf-8').center(length).encode('utf-8'))\n\n def upper(self):\n return str.__new__(Utf8, unicode(self, 'utf-8').upper().encode('utf-8'))\n\n def lower(self):\n return str.__new__(Utf8, unicode(self, 'utf-8').lower().encode('utf-8'))\n\n def title(self):\n return str.__new__(Utf8, unicode(self, 'utf-8').title().encode('utf-8'))\n\n def index(self, string):\n return unicode(self, 'utf-8').index(string if isinstance(string, unicode) else unicode(string, 'utf-8'))\n\n def isalnum(self):\n return unicode(self, 'utf-8').isalnum()\n\n def isalpha(self):\n return unicode(self, 'utf-8').isalpha()\n\n def isdigit(self):\n return unicode(self, 'utf-8').isdigit()\n\n def islower(self):\n return unicode(self, 'utf-8').islower()\n\n def isspace(self):\n return unicode(self, 'utf-8').isspace()\n\n def istitle(self):\n return unicode(self, 'utf-8').istitle()\n\n def isupper(self):\n return unicode(self, 'utf-8').isupper()\n\n def zfill(self, length):\n return str.__new__(Utf8, unicode(self, 'utf-8').zfill(length).encode('utf-8'))\n\n def join(self, iter):\n return str.__new__(Utf8, str.join(self, [Utf8(c) for c in\n list(unicode(iter, 'utf-8') if\n isinstance(iter, str) else\n iter)]))\n\n def lstrip(self, chars=None):\n return str.__new__(Utf8, str.lstrip(self, None if chars is None else Utf8(chars)))\n\n def rstrip(self, chars=None):\n return str.__new__(Utf8, str.rstrip(self, None if chars is None else Utf8(chars)))\n\n def strip(self, chars=None):\n return str.__new__(Utf8, str.strip(self, None if chars is None else Utf8(chars)))\n\n def swapcase(self):\n return str.__new__(Utf8, unicode(self, 'utf-8').swapcase().encode('utf-8'))\n\n def count(self, sub, start=0, end=None):\n unistr = unicode(self, 'utf-8')\n return unistr.count(\n unicode(sub, 'utf-8') if isinstance(sub, str) else sub,\n start, len(unistr) if end is None else end)\n\n def decode(self, encoding='utf-8', errors='strict'):\n return str.decode(self, encoding, errors)\n\n def encode(self, encoding, errors='strict'):\n return unicode(self, 'utf-8').encode(encoding, errors)\n\n def expandtabs(self, tabsize=8):\n return str.__new__(Utf8, unicode(self, 'utf-8').expandtabs(tabsize).encode('utf-8'))\n\n def find(self, sub, start=None, end=None):\n return unicode(self, 'utf-8').find(unicode(sub, 'utf-8')\n if isinstance(sub, str) else sub, start, end)\n\n def ljust(self, width, fillchar=' '):\n return str.__new__(Utf8, unicode(self, 'utf-8').ljust(width, unicode(fillchar, 'utf-8')\n if isinstance(fillchar, str) else fillchar).encode('utf-8'))\n\n def partition(self, sep):\n (head, sep, tail) = str.partition(self, Utf8(sep))\n return (str.__new__(Utf8, head),\n str.__new__(Utf8, sep),\n str.__new__(Utf8, tail))\n\n def replace(self, old, new, count=-1):\n return str.__new__(Utf8, str.replace(self, Utf8(old), Utf8(new), count))\n\n def rfind(self, sub, start=None, end=None):\n return unicode(self, 'utf-8').rfind(unicode(sub, 'utf-8')\n if isinstance(sub, str) else sub, start, end)\n\n def rindex(self, string):\n return unicode(self, 'utf-8').rindex(string if isinstance(string, unicode)\n else unicode(string, 'utf-8'))\n\n def rjust(self, width, fillchar=' '):\n return str.__new__(Utf8, unicode(self, 'utf-8').rjust(width, unicode(fillchar, 'utf-8')\n if isinstance(fillchar, str) else fillchar).encode('utf-8'))\n\n def rpartition(self, sep):\n (head, sep, tail) = str.rpartition(self, Utf8(sep))\n return (str.__new__(Utf8, head),\n str.__new__(Utf8, sep),\n str.__new__(Utf8, tail))\n\n def rsplit(self, sep=None, maxsplit=-1):\n return [str.__new__(Utf8, part) for part in str.rsplit(self,\n None if sep is None else Utf8(sep), maxsplit)]\n\n def split(self, sep=None, maxsplit=-1):\n return [str.__new__(Utf8, part) for part in str.split(self,\n None if sep is None else Utf8(sep), maxsplit)]\n\n def splitlines(self, keepends=False):\n return [str.__new__(Utf8, part) for part in str.splitlines(self, keepends)]\n\n def startswith(self, prefix, start=0, end=None):\n unistr = unicode(self, 'utf-8')\n if isinstance(prefix, tuple):\n prefix = tuple(unicode(\n s, 'utf-8') if isinstance(s, str) else s for s in prefix)\n elif isinstance(prefix, str):\n prefix = unicode(prefix, 'utf-8')\n return unistr.startswith(prefix, start, len(unistr) if end is None else end)\n\n def translate(self, table, deletechars=''):\n if isinstance(table, dict):\n return str.__new__(Utf8, unicode(self, 'utf-8').translate(table).encode('utf-8'))\n else:\n return str.__new__(Utf8, str.translate(self, table, deletechars))\n\n def endswith(self, prefix, start=0, end=None):\n unistr = unicode(self, 'utf-8')\n if isinstance(prefix, tuple):\n prefix = tuple(unicode(\n s, 'utf-8') if isinstance(s, str) else s for s in prefix)\n elif isinstance(prefix, str):\n prefix = unicode(prefix, 'utf-8')\n return unistr.endswith(prefix, start, len(unistr) if end is None else end)\n if hasattr(str, 'format'): # Python 2.5 hasn't got str.format() method\n def format(self, *args, **kwargs):\n args = [unicode(\n s, 'utf-8') if isinstance(s, str) else s for s in args]\n kwargs = dict((unicode(k, 'utf-8') if isinstance(k, str) else k,\n unicode(v, 'utf-8') if isinstance(v, str) else v)\n for k, v in iteritems(kwargs))\n return str.__new__(Utf8, unicode(self, 'utf-8').format(*args, **kwargs).encode('utf-8'))\n\n def __mod__(self, right):\n if isinstance(right, tuple):\n right = tuple(unicode(v, 'utf-8') if isinstance(v, str) else v\n for v in right)\n elif isinstance(right, dict):\n right = dict((unicode(k, 'utf-8') if isinstance(k, str) else k,\n unicode(v, 'utf-8') if isinstance(v, str) else v)\n for k, v in iteritems(right))\n elif isinstance(right, str):\n right = unicode(right, 'utf-8')\n return str.__new__(Utf8, unicode(self, 'utf-8').__mod__(right).encode('utf-8'))\n\n def __ge__(self, string):\n return sort_key(self) >= sort_key(string)\n\n def __gt__(self, string):\n return sort_key(self) > sort_key(string)\n\n def __le__(self, string):\n return sort_key(self) <= sort_key(string)\n\n def __lt__(self, string):\n return sort_key(self) < sort_key(string)\n\n\nif __name__ == '__main__':\n def doctests():\n u\"\"\"\n doctests:\n >>> test_unicode=u'ПРоба Є PRobe'\n >>> test_unicode_word=u'ПРоба'\n >>> test_number_str='12345'\n >>> test_unicode\n u'\\\\u041f\\\\u0420\\\\u043e\\\\u0431\\\\u0430 \\\\u0404 PRobe'\n >>> print test_unicode\n ПРоба Є PRobe\n >>> test_word=test_unicode_word.encode('utf-8')\n >>> test_str=test_unicode.encode('utf-8')\n >>> s=Utf8(test_str)\n >>> s\n 'ПРоба Є PRobe'\n >>> type(s)\n <class '__main__.Utf8'>\n >>> s == test_str\n True\n >>> len(test_str) # wrong length of utf8-string!\n 19\n >>> len(test_unicode) # RIGHT!\n 13\n >>> len(s) # RIGHT!\n 13\n >>> size(test_str) # size of utf-8 string (in bytes) == len(str)\n 19\n >>> size(test_unicode) # size of unicode string in bytes (packed to utf-8 string)\n 19\n >>> size(s) # size of utf-8 string in bytes\n 19\n >>> try: # utf-8 is a multibyte string. Convert it to unicode for use with builtin ord()\n ... __builtin__.ord('б') # ascii string\n ... except Exception, e:\n ... print 'Exception:', e\n Exception: ord() expected a character, but string of length 2 found\n >>> ord('б') # utf8.ord() is used(!!!)\n 1073\n >>> ord(u'б') # utf8.ord() is used(!!!)\n 1073\n >>> ord(s[3]) # utf8.ord() is used(!!!)\n 1073\n >>> chr(ord(s[3])) # utf8.chr() and utf8.chr() is used(!!!)\n 'б'\n >>> type(chr(1073)) # utf8.chr() is used(!!!)\n <class '__main__.Utf8'>\n >>> s=Utf8(test_unicode)\n >>> s\n 'ПРоба Є PRobe'\n >>> s == test_str\n True\n >>> test_str == s\n True\n >>> s == test_unicode\n True\n >>> test_unicode == s\n True\n >>> print test_str.upper() # only ASCII characters uppered\n ПРоба Є PROBE\n >>> print test_unicode.upper() # unicode gives right result\n ПРОБА Є PROBE\n >>> s.upper() # utf8 class use unicode.upper()\n 'ПРОБА Є PROBE'\n >>> type(s.upper())\n <class '__main__.Utf8'>\n >>> s.lower()\n 'проба є probe'\n >>> type(s.lower())\n <class '__main__.Utf8'>\n >>> s.capitalize()\n 'Проба є probe'\n >>> type(s.capitalize())\n <class '__main__.Utf8'>\n >>> len(s)\n 13\n >>> len(test_unicode)\n 13\n >>> s+'. Probe is проба'\n 'ПРоба Є PRobe. Probe is проба'\n >>> type(s+'. Probe is проба')\n <class '__main__.Utf8'>\n >>> s+u'. Probe is проба'\n 'ПРоба Є PRobe. Probe is проба'\n >>> type(s+u'. Probe is проба')\n <class '__main__.Utf8'>\n >>> s+s\n 'ПРоба Є PRobeПРоба Є PRobe'\n >>> type(s+s)\n <class '__main__.Utf8'>\n >>> a=s\n >>> a+=s\n >>> a+=test_unicode\n >>> a+=test_str\n >>> a\n 'ПРоба Є PRobeПРоба Є PRobeПРоба Є PRobeПРоба Є PRobe'\n >>> type(a)\n <class '__main__.Utf8'>\n >>> s*3\n 'ПРоба Є PRobeПРоба Є PRobeПРоба Є PRobe'\n >>> type(s*3)\n <class '__main__.Utf8'>\n >>> a=Utf8(\"-проба-\")\n >>> a*=10\n >>> a\n '-проба--проба--проба--проба--проба--проба--проба--проба--проба--проба-'\n >>> type(a)\n <class '__main__.Utf8'>\n >>> print \"'\"+test_str.center(17)+\"'\" # WRONG RESULT!\n 'ПРоба Є PRobe'\n >>> s.center(17) # RIGHT!\n ' ПРоба Є PRobe '\n >>> type(s.center(17))\n <class '__main__.Utf8'>\n >>> (test_word+test_number_str).isalnum() # WRONG RESULT! non ASCII chars are detected as non alpha\n False\n >>> Utf8(test_word+test_number_str).isalnum()\n True\n >>> s.isalnum()\n False\n >>> test_word.isalpha() # WRONG RESULT! Non ASCII characters are detected as non alpha\n False\n >>> Utf8(test_word).isalpha() # RIGHT!\n True\n >>> s.lower().islower()\n True\n >>> s.upper().isupper()\n True\n >>> print test_str.zfill(17) # WRONG RESULT!\n ПРоба Є PRobe\n >>> s.zfill(17) # RIGHT!\n '0000ПРоба Є PRobe'\n >>> type(s.zfill(17))\n <class '__main__.Utf8'>\n >>> s.istitle()\n False\n >>> s.title().istitle()\n True\n >>> Utf8('1234').isdigit()\n True\n >>> Utf8(' \\t').isspace()\n True\n >>> s.join('•|•')\n '•ПРоба Є PRobe|ПРоба Є PRobe•'\n >>> s.join((str('(utf8 тест1)'), unicode('(unicode тест2)','utf-8'), '(ascii test3)'))\n '(utf8 тест1)ПРоба Є PRobe(unicode тест2)ПРоба Є PRobe(ascii test3)'\n >>> type(s)\n <class '__main__.Utf8'>\n >>> s==test_str\n True\n >>> s==test_unicode\n True\n >>> s.swapcase()\n 'прОБА є prOBE'\n >>> type(s.swapcase())\n <class '__main__.Utf8'>\n >>> truncate(s, 10)\n 'ПРоба Є...'\n >>> truncate(s, 20)\n 'ПРоба Є PRobe'\n >>> truncate(s, 10, '•••') # utf-8 string as *dots*\n 'ПРоба Є•••'\n >>> truncate(s, 10, u'®') # you can use unicode string as *dots*\n 'ПРоба Є P®'\n >>> type(truncate(s, 10))\n <class '__main__.Utf8'>\n >>> Utf8(s.encode('koi8-u'), 'koi8-u')\n 'ПРоба Є PRobe'\n >>> s.decode() # convert utf-8 string to unicode\n u'\\\\u041f\\\\u0420\\\\u043e\\\\u0431\\\\u0430 \\\\u0404 PRobe'\n >>> a='про\\\\tba'\n >>> str_tmp=a.expandtabs()\n >>> utf8_tmp=Utf8(a).expandtabs()\n >>> utf8_tmp.replace(' ','.') # RIGHT! (default tabsize is 8)\n 'про.....ba'\n >>> utf8_tmp.index('b')\n 8\n >>> print \"'\"+str_tmp.replace(' ','.')+\"'\" # WRONG STRING LENGTH!\n 'про..ba'\n >>> str_tmp.index('b') # WRONG index of 'b' character\n 8\n >>> print \"'\"+a.expandtabs(4).replace(' ','.')+\"'\" # WRONG RESULT!\n 'про..ba'\n >>> Utf8(a).expandtabs(4).replace(' ','.') # RIGHT!\n 'про.ba'\n >>> s.find('Є')\n 6\n >>> s.find(u'Є')\n 6\n >>> s.find(' ', 6)\n 7\n >>> s.rfind(' ')\n 7\n >>> s.partition('Є')\n ('ПРоба ', 'Є', ' PRobe')\n >>> s.partition(u'Є')\n ('ПРоба ', 'Є', ' PRobe')\n >>> (a,b,c) = s.partition('Є')\n >>> type(a), type(b), type(c)\n (<class '__main__.Utf8'>, <class '__main__.Utf8'>, <class '__main__.Utf8'>)\n >>> s.partition(' ')\n ('ПРоба', ' ', 'Є PRobe')\n >>> s.rpartition(' ')\n ('ПРоба Є', ' ', 'PRobe')\n >>> s.index('Є')\n 6\n >>> s.rindex(u'Є')\n 6\n >>> s.index(' ')\n 5\n >>> s.rindex(' ')\n 7\n >>> a=Utf8('а б ц д е а б ц д е а\\\\tб ц д е')\n >>> a.split()\n ['а', 'б', 'ц', 'д', 'е', 'а', 'б', 'ц', 'д', 'е', 'а', 'б', 'ц', 'д', 'е']\n >>> a.rsplit()\n ['а', 'б', 'ц', 'д', 'е', 'а', 'б', 'ц', 'д', 'е', 'а', 'б', 'ц', 'д', 'е']\n >>> a.expandtabs().split('б')\n ['а ', ' ц д е а ', ' ц д е а ', ' ц д е']\n >>> a.expandtabs().rsplit('б')\n ['а ', ' ц д е а ', ' ц д е а ', ' ц д е']\n >>> a.expandtabs().split(u'б', 1)\n ['а ', ' ц д е а б ц д е а б ц д е']\n >>> a.expandtabs().rsplit(u'б', 1)\n ['а б ц д е а б ц д е а ', ' ц д е']\n >>> a=Utf8(\"рядок1\\\\nрядок2\\\\nрядок3\")\n >>> a.splitlines()\n ['рядок1', 'рядок2', 'рядок3']\n >>> a.splitlines(True)\n ['рядок1\\\\n', 'рядок2\\\\n', 'рядок3']\n >>> s[6]\n 'Є'\n >>> s[0]\n 'П'\n >>> s[-1]\n 'e'\n >>> s[:10]\n 'ПРоба Є PR'\n >>> s[2:-2:2]\n 'оаЄPo'\n >>> s[::-1]\n 'eboRP Є абоРП'\n >>> s.startswith('ПР')\n True\n >>> s.startswith(('ПР', u'об'),0)\n True\n >>> s.startswith(u'об', 2, 4)\n True\n >>> s.endswith('be')\n True\n >>> s.endswith(('be', 'PR', u'Є'))\n True\n >>> s.endswith('PR', 8, 10)\n True\n >>> s.endswith('Є', -7, -6)\n True\n >>> s.count(' ')\n 2\n >>> s.count(' ',6)\n 1\n >>> s.count(u'Є')\n 1\n >>> s.count('Є', 0, 5)\n 0\n >>> Utf8(\"Parameters: '%(проба)s', %(probe)04d, %(проба2)s\") % { u\"проба\": s,\n ... \"not used\": \"???\", \"probe\": 2, \"проба2\": u\"ПРоба Probe\" }\n \"Parameters: 'ПРоба Є PRobe', 0002, ПРоба Probe\"\n >>> a=Utf8(u\"Параметр: (%s)-(%s)-[%s]\")\n >>> a%=(s, s[::-1], 1000)\n >>> a\n 'Параметр: (ПРоба Є PRobe)-(eboRP Є абоРП)-[1000]'\n >>> if hasattr(Utf8, 'format'):\n ... Utf8(\"Проба <{0}>, {1}, {param1}, {param2}\").format(s, u\"中文字\",\n ... param1=\"барабан\", param2=1000) == 'Проба <ПРоба Є PRobe>, 中文字, барабан, 1000'\n ... else: # format() method is not used in python with version <2.6:\n ... print True\n True\n >>> u'Б'<u'Ї' # WRONG ORDER!\n False\n >>> 'Б'<'Ї' # WRONG ORDER!\n False\n >>> Utf8('Б')<'Ї' # RIGHT!\n True\n >>> u'д'>u'ґ' # WRONG ORDER!\n False\n >>> Utf8('д')>Utf8('ґ') # RIGHT!\n True\n >>> u'є'<=u'ж' # WRONG ORDER!\n False\n >>> Utf8('є')<=u'ж' # RIGHT!\n True\n >>> Utf8('є')<=u'є'\n True\n >>> u'Ї'>=u'И' # WRONG ORDER!\n False\n >>> Utf8(u'Ї') >= u'И' # RIGHT\n True\n >>> Utf8('Є') >= 'Є'\n True\n >>> a=\"яжертиуіопшщїасдфгґхйклчєзьцвбнмюЯЖЕРТИУІОПШЩЇАСДФГҐХЙКЛЧЗЬЦВБНМЮЄ\" # str type\n >>> b=u\"яжертиуіопшщїасдфгґхйклчєзьцвбнмюЯЖЕРТИУІОПШЩЇАСДФГҐХЙКЛЧЗЬЦВБНМЮЄ\" # unicode type\n >>> c=Utf8(\"яжертиуіопшщїасдфгґхйклчєзьцвбнмюЯЖЕРТИУІОПШЩЇАСДФГҐХЙКЛЧЗЬЦВБНМЮЄ\") # utf8 class\n >>> result = \"\".join(sorted(a))\n >>> result[0:20] # result is not utf8 string, because bytes, not utf8-characters were sorted\n '\\\\x80\\\\x81\\\\x82\\\\x83\\\\x84\\\\x84\\\\x85\\\\x86\\\\x86\\\\x87\\\\x87\\\\x88\\\\x89\\\\x8c\\\\x8e\\\\x8f\\\\x90\\\\x90\\\\x91\\\\x91'\n >>> try:\n ... unicode(result, 'utf-8') # try to convert result (utf-8?) to unicode\n ... except Exception, e:\n ... print 'Exception:', e\n Exception: 'utf8' codec can't decode byte 0x80 in position 0: unexpected code byte\n >>> try: # FAILED! (working with bytes, not with utf8-charactes)\n ... \"\".join( sorted(a, key=sort_key) ) # utf8.sort_key may be used with utf8 or unicode strings only!\n ... except Exception, e:\n ... print 'Exception:', e\n Exception: 'utf8' codec can't decode byte 0xd1 in position 0: unexpected end of data\n >>> print \"\".join( sorted(Utf8(a))) # converting *a* to unicode or utf8-string gives us correct result\n аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ\n >>> print u\"\".join( sorted(b) ) # WRONG ORDER! Default sort key is used\n ЄІЇАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЬЮЯабвгдежзийклмнопрстуфхцчшщьюяєіїҐґ\n >>> print u\"\".join( sorted(b, key=sort_key) ) # RIGHT ORDER! utf8.sort_key is used\n аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ\n >>> print \"\".join( sorted(c) ) # RIGHT ORDER! Utf8 \"rich comparison\" methods are used\n аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ\n >>> print \"\".join( sorted(c, key=sort_key) ) # RIGHT ORDER! utf8.sort_key is used\n аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ\n >>> Utf8().join(sorted(c.decode(), key=sort_key)) # convert to unicode for better performance\n 'аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ'\n >>> for result in sorted([\"Іа\", \"Астро\", u\"гала\", Utf8(\"Гоша\"), \"Єва\", \"шовк\", \"аякс\", \"Їжа\",\n ... \"ґанок\", Utf8(\"Дар'я\"), \"білінг\", \"веб\", u\"Жужа\", \"проба\", u\"тест\",\n ... \"абетка\", \"яблуко\", \"Юляся\", \"Київ\", \"лимонад\", \"ложка\", \"Матриця\",\n ... ], key=sort_key):\n ... print result.ljust(20), type(result)\n абетка <type 'str'>\n Астро <type 'str'>\n аякс <type 'str'>\n білінг <type 'str'>\n веб <type 'str'>\n гала <type 'unicode'>\n ґанок <type 'str'>\n Гоша <class '__main__.Utf8'>\n Дар'я <class '__main__.Utf8'>\n Єва <type 'str'>\n Жужа <type 'unicode'>\n Іа <type 'str'>\n Їжа <type 'str'>\n Київ <type 'str'>\n лимонад <type 'str'>\n ложка <type 'str'>\n Матриця <type 'str'>\n проба <type 'str'>\n тест <type 'unicode'>\n шовк <type 'str'>\n Юляся <type 'str'>\n яблуко <type 'str'>\n\n >>> a=Utf8(\"中文字\")\n >>> L=list(a)\n >>> L\n ['中', '文', '字']\n >>> a=\"\".join(L)\n >>> print a\n 中文字\n >>> type(a)\n <type 'str'>\n >>> a=\"中文字\" # standard str type\n >>> L=list(a)\n >>> L\n ['\\\\xe4', '\\\\xb8', '\\\\xad', '\\\\xe6', '\\\\x96', '\\\\x87', '\\\\xe5', '\\\\xad', '\\\\x97']\n >>> from string import maketrans\n >>> str_tab=maketrans('PRobe','12345')\n >>> unicode_tab={ord(u'П'):ord(u'Ж'),\n ... ord(u'Р') : u'Ш',\n ... ord(Utf8('о')) : None, # utf8.ord() is used\n ... ord('б') : None, # -//-//-\n ... ord(u'а') : u\"中文字\",\n ... ord(u'Є') : Utf8('•').decode(), # only unicode type is supported\n ... }\n >>> s.translate(unicode_tab).translate(str_tab, deletechars=' ')\n 'ЖШ中文字•12345'\n \"\"\"\n import sys\n reload(sys)\n sys.setdefaultencoding(\"UTF-8\")\n import doctest\n print(\"DOCTESTS STARTED...\")\n doctest.testmod()\n print(\"DOCTESTS FINISHED\")\n\n doctests()\n", "path": "gluon/utf8.py" } ]
diff --git a/gluon/utf8.py b/gluon/utf8.py index 21fd12c4a..34bfa87fe 100644 --- a/gluon/utf8.py +++ b/gluon/utf8.py @@ -11,7 +11,7 @@ ---------------------------------------------- """ from __future__ import print_function -from gluon._compat import builtin as __builtin__, unicodeT, iteritems, to_unicode, to_native +from gluon._compat import builtin as __builtin__, unicodeT, iteritems, to_unicode, to_native, reload __all__ = ['Utf8']
encode__django-rest-framework-9278
_delegate_text was removed in Django 5. ## Checklist - [ ] Raised initially as discussion #... - [x] This cannot be dealt with as a third party library. (We prefer new functionality to be [in the form of third party libraries](https://www.django-rest-framework.org/community/third-party-packages/#about-third-party-packages) where possible.) - [ ] I have reduced the issue to the simplest possible case. https://github.com/encode/django-rest-framework/blob/4c7c693f1555689cda88eb59a4353db69aa6f5b1/rest_framework/utils/representation.py#L30-L31 force_str has a built in parameter called `strings_only`. So the version number should be checked, and then if it is django 5, it should do the following instead of using delegate_text. ```python value = force_str(value, strings_only=True) ```
[ { "content": "\"\"\"\nHelper functions for creating user-friendly representations\nof serializer classes and serializer fields.\n\"\"\"\nimport re\n\nfrom django.db import models\nfrom django.utils.encoding import force_str\nfrom django.utils.functional import Promise\n\n\ndef manager_repr(value):\n model = value.model\n opts = model._meta\n names_and_managers = [\n (manager.name, manager)\n for manager\n in opts.managers\n ]\n for manager_name, manager_instance in names_and_managers:\n if manager_instance == value:\n return '%s.%s.all()' % (model._meta.object_name, manager_name)\n return repr(value)\n\n\ndef smart_repr(value):\n if isinstance(value, models.Manager):\n return manager_repr(value)\n\n if isinstance(value, Promise) and value._delegate_text:\n value = force_str(value)\n\n value = repr(value)\n\n # Representations like u'help text'\n # should simply be presented as 'help text'\n if value.startswith(\"u'\") and value.endswith(\"'\"):\n return value[1:]\n\n # Representations like\n # <django.core.validators.RegexValidator object at 0x1047af050>\n # Should be presented as\n # <django.core.validators.RegexValidator object>\n return re.sub(' at 0x[0-9A-Fa-f]{4,32}>', '>', value)\n\n\ndef field_repr(field, force_many=False):\n kwargs = field._kwargs\n if force_many:\n kwargs = kwargs.copy()\n kwargs['many'] = True\n kwargs.pop('child', None)\n\n arg_string = ', '.join([smart_repr(val) for val in field._args])\n kwarg_string = ', '.join([\n '%s=%s' % (key, smart_repr(val))\n for key, val in sorted(kwargs.items())\n ])\n if arg_string and kwarg_string:\n arg_string += ', '\n\n if force_many:\n class_name = force_many.__class__.__name__\n else:\n class_name = field.__class__.__name__\n\n return \"%s(%s%s)\" % (class_name, arg_string, kwarg_string)\n\n\ndef serializer_repr(serializer, indent, force_many=None):\n ret = field_repr(serializer, force_many) + ':'\n indent_str = ' ' * indent\n\n if force_many:\n fields = force_many.fields\n else:\n fields = serializer.fields\n\n for field_name, field in fields.items():\n ret += '\\n' + indent_str + field_name + ' = '\n if hasattr(field, 'fields'):\n ret += serializer_repr(field, indent + 1)\n elif hasattr(field, 'child'):\n ret += list_repr(field, indent + 1)\n elif hasattr(field, 'child_relation'):\n ret += field_repr(field.child_relation, force_many=field.child_relation)\n else:\n ret += field_repr(field)\n\n if serializer.validators:\n ret += '\\n' + indent_str + 'class Meta:'\n ret += '\\n' + indent_str + ' validators = ' + smart_repr(serializer.validators)\n\n return ret\n\n\ndef list_repr(serializer, indent):\n child = serializer.child\n if hasattr(child, 'fields'):\n return serializer_repr(serializer, indent, force_many=child)\n return field_repr(serializer)\n", "path": "rest_framework/utils/representation.py" } ]
[ { "content": "\"\"\"\nHelper functions for creating user-friendly representations\nof serializer classes and serializer fields.\n\"\"\"\nimport re\n\nfrom django.db import models\nfrom django.utils.encoding import force_str\nfrom django.utils.functional import Promise\n\n\ndef manager_repr(value):\n model = value.model\n opts = model._meta\n names_and_managers = [\n (manager.name, manager)\n for manager\n in opts.managers\n ]\n for manager_name, manager_instance in names_and_managers:\n if manager_instance == value:\n return '%s.%s.all()' % (model._meta.object_name, manager_name)\n return repr(value)\n\n\ndef smart_repr(value):\n if isinstance(value, models.Manager):\n return manager_repr(value)\n\n if isinstance(value, Promise):\n value = force_str(value, strings_only=True)\n\n value = repr(value)\n\n # Representations like u'help text'\n # should simply be presented as 'help text'\n if value.startswith(\"u'\") and value.endswith(\"'\"):\n return value[1:]\n\n # Representations like\n # <django.core.validators.RegexValidator object at 0x1047af050>\n # Should be presented as\n # <django.core.validators.RegexValidator object>\n return re.sub(' at 0x[0-9A-Fa-f]{4,32}>', '>', value)\n\n\ndef field_repr(field, force_many=False):\n kwargs = field._kwargs\n if force_many:\n kwargs = kwargs.copy()\n kwargs['many'] = True\n kwargs.pop('child', None)\n\n arg_string = ', '.join([smart_repr(val) for val in field._args])\n kwarg_string = ', '.join([\n '%s=%s' % (key, smart_repr(val))\n for key, val in sorted(kwargs.items())\n ])\n if arg_string and kwarg_string:\n arg_string += ', '\n\n if force_many:\n class_name = force_many.__class__.__name__\n else:\n class_name = field.__class__.__name__\n\n return \"%s(%s%s)\" % (class_name, arg_string, kwarg_string)\n\n\ndef serializer_repr(serializer, indent, force_many=None):\n ret = field_repr(serializer, force_many) + ':'\n indent_str = ' ' * indent\n\n if force_many:\n fields = force_many.fields\n else:\n fields = serializer.fields\n\n for field_name, field in fields.items():\n ret += '\\n' + indent_str + field_name + ' = '\n if hasattr(field, 'fields'):\n ret += serializer_repr(field, indent + 1)\n elif hasattr(field, 'child'):\n ret += list_repr(field, indent + 1)\n elif hasattr(field, 'child_relation'):\n ret += field_repr(field.child_relation, force_many=field.child_relation)\n else:\n ret += field_repr(field)\n\n if serializer.validators:\n ret += '\\n' + indent_str + 'class Meta:'\n ret += '\\n' + indent_str + ' validators = ' + smart_repr(serializer.validators)\n\n return ret\n\n\ndef list_repr(serializer, indent):\n child = serializer.child\n if hasattr(child, 'fields'):\n return serializer_repr(serializer, indent, force_many=child)\n return field_repr(serializer)\n", "path": "rest_framework/utils/representation.py" } ]
diff --git a/rest_framework/utils/representation.py b/rest_framework/utils/representation.py index 6f2efee164..b24cc3c759 100644 --- a/rest_framework/utils/representation.py +++ b/rest_framework/utils/representation.py @@ -27,8 +27,8 @@ def smart_repr(value): if isinstance(value, models.Manager): return manager_repr(value) - if isinstance(value, Promise) and value._delegate_text: - value = force_str(value) + if isinstance(value, Promise): + value = force_str(value, strings_only=True) value = repr(value)
networkx__networkx-6478
nx.DiGraph.to_undirected() not working as expected for bidirectional edges when using as_view = True Problem: When using `to_undirected()` on a DiGraph the properties are inconsistent, i.e., differ depending on if as_view was set to True or False. More precisely, the reported degree is not as expected when using `as_view = True`. I guess this might also have an effect on other properties that depend on degree. ### Current Behavior The node degree of the undirected graph returned by `to_undirected()` is different depending on if as_view was set to True or False. If the directed graph had bidirectional edges, the degree is off by approx. a factor of 2 in the graph view. Everything works as expected if as_view is set to false. ### Expected Behavior `G.to_undirected(as_view = False).degree()` and `G.to_undirected(as_view = True).degree()` should behave the same. ### Steps to Reproduce ``` import networkx as nx import sys print(sys.version) # 3.10.8 | packaged by conda-forge | (main, Nov 24 2022, 14:07:00) [MSC v.1916 64 bit (AMD64)] print(nx.__version__) # 2.8.8 G = nx.DiGraph() G.add_nodes_from(["v0","v1","v2"]) G.add_edges_from([("v0","v1"),("v1","v0"),("v1","v2")]) print(G.degree()) # Correct [('v0', 2), ('v1', 3), ('v2', 1)] G_undir_1 = G.to_undirected(as_view = False) print(G_undir_1.degree()) # Correct [('v0', 1), ('v1', 2), ('v2', 1)] G_undir_2 = G.to_undirected(as_view = True) print(G_undir_2.degree()) # Incorrect [('v0', 2), ('v1', 3), ('v2', 1)] ``` ### Environment <!--- Please provide details about your local environment --> Python version: 3.10.8 NetworkX version: 2.8.8 ### Additional context n/a
[ { "content": "\"\"\"Views of core data structures such as nested Mappings (e.g. dict-of-dicts).\nThese ``Views`` often restrict element access, with either the entire view or\nlayers of nested mappings being read-only.\n\"\"\"\nfrom collections.abc import Mapping\n\n__all__ = [\n \"AtlasView\",\n \"AdjacencyView\",\n \"MultiAdjacencyView\",\n \"UnionAtlas\",\n \"UnionAdjacency\",\n \"UnionMultiInner\",\n \"UnionMultiAdjacency\",\n \"FilterAtlas\",\n \"FilterAdjacency\",\n \"FilterMultiInner\",\n \"FilterMultiAdjacency\",\n]\n\n\nclass AtlasView(Mapping):\n \"\"\"An AtlasView is a Read-only Mapping of Mappings.\n\n It is a View into a dict-of-dict data structure.\n The inner level of dict is read-write. But the\n outer level is read-only.\n\n See Also\n ========\n AdjacencyView: View into dict-of-dict-of-dict\n MultiAdjacencyView: View into dict-of-dict-of-dict-of-dict\n \"\"\"\n\n __slots__ = (\"_atlas\",)\n\n def __getstate__(self):\n return {\"_atlas\": self._atlas}\n\n def __setstate__(self, state):\n self._atlas = state[\"_atlas\"]\n\n def __init__(self, d):\n self._atlas = d\n\n def __len__(self):\n return len(self._atlas)\n\n def __iter__(self):\n return iter(self._atlas)\n\n def __getitem__(self, key):\n return self._atlas[key]\n\n def copy(self):\n return {n: self[n].copy() for n in self._atlas}\n\n def __str__(self):\n return str(self._atlas) # {nbr: self[nbr] for nbr in self})\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self._atlas!r})\"\n\n\nclass AdjacencyView(AtlasView):\n \"\"\"An AdjacencyView is a Read-only Map of Maps of Maps.\n\n It is a View into a dict-of-dict-of-dict data structure.\n The inner level of dict is read-write. But the\n outer levels are read-only.\n\n See Also\n ========\n AtlasView: View into dict-of-dict\n MultiAdjacencyView: View into dict-of-dict-of-dict-of-dict\n \"\"\"\n\n __slots__ = () # Still uses AtlasView slots names _atlas\n\n def __getitem__(self, name):\n return AtlasView(self._atlas[name])\n\n def copy(self):\n return {n: self[n].copy() for n in self._atlas}\n\n\nclass MultiAdjacencyView(AdjacencyView):\n \"\"\"An MultiAdjacencyView is a Read-only Map of Maps of Maps of Maps.\n\n It is a View into a dict-of-dict-of-dict-of-dict data structure.\n The inner level of dict is read-write. But the\n outer levels are read-only.\n\n See Also\n ========\n AtlasView: View into dict-of-dict\n AdjacencyView: View into dict-of-dict-of-dict\n \"\"\"\n\n __slots__ = () # Still uses AtlasView slots names _atlas\n\n def __getitem__(self, name):\n return AdjacencyView(self._atlas[name])\n\n def copy(self):\n return {n: self[n].copy() for n in self._atlas}\n\n\nclass UnionAtlas(Mapping):\n \"\"\"A read-only union of two atlases (dict-of-dict).\n\n The two dict-of-dicts represent the inner dict of\n an Adjacency: `G.succ[node]` and `G.pred[node]`.\n The inner level of dict of both hold attribute key:value\n pairs and is read-write. But the outer level is read-only.\n\n See Also\n ========\n UnionAdjacency: View into dict-of-dict-of-dict\n UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict\n \"\"\"\n\n __slots__ = (\"_succ\", \"_pred\")\n\n def __getstate__(self):\n return {\"_succ\": self._succ, \"_pred\": self._pred}\n\n def __setstate__(self, state):\n self._succ = state[\"_succ\"]\n self._pred = state[\"_pred\"]\n\n def __init__(self, succ, pred):\n self._succ = succ\n self._pred = pred\n\n def __len__(self):\n return len(self._succ) + len(self._pred)\n\n def __iter__(self):\n return iter(set(self._succ.keys()) | set(self._pred.keys()))\n\n def __getitem__(self, key):\n try:\n return self._succ[key]\n except KeyError:\n return self._pred[key]\n\n def copy(self):\n result = {nbr: dd.copy() for nbr, dd in self._succ.items()}\n for nbr, dd in self._pred.items():\n if nbr in result:\n result[nbr].update(dd)\n else:\n result[nbr] = dd.copy()\n return result\n\n def __str__(self):\n return str({nbr: self[nbr] for nbr in self})\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self._succ!r}, {self._pred!r})\"\n\n\nclass UnionAdjacency(Mapping):\n \"\"\"A read-only union of dict Adjacencies as a Map of Maps of Maps.\n\n The two input dict-of-dict-of-dicts represent the union of\n `G.succ` and `G.pred`. Return values are UnionAtlas\n The inner level of dict is read-write. But the\n middle and outer levels are read-only.\n\n succ : a dict-of-dict-of-dict {node: nbrdict}\n pred : a dict-of-dict-of-dict {node: nbrdict}\n The keys for the two dicts should be the same\n\n See Also\n ========\n UnionAtlas: View into dict-of-dict\n UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict\n \"\"\"\n\n __slots__ = (\"_succ\", \"_pred\")\n\n def __getstate__(self):\n return {\"_succ\": self._succ, \"_pred\": self._pred}\n\n def __setstate__(self, state):\n self._succ = state[\"_succ\"]\n self._pred = state[\"_pred\"]\n\n def __init__(self, succ, pred):\n # keys must be the same for two input dicts\n assert len(set(succ.keys()) ^ set(pred.keys())) == 0\n self._succ = succ\n self._pred = pred\n\n def __len__(self):\n return len(self._succ) # length of each dict should be the same\n\n def __iter__(self):\n return iter(self._succ)\n\n def __getitem__(self, nbr):\n return UnionAtlas(self._succ[nbr], self._pred[nbr])\n\n def copy(self):\n return {n: self[n].copy() for n in self._succ}\n\n def __str__(self):\n return str({nbr: self[nbr] for nbr in self})\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self._succ!r}, {self._pred!r})\"\n\n\nclass UnionMultiInner(UnionAtlas):\n \"\"\"A read-only union of two inner dicts of MultiAdjacencies.\n\n The two input dict-of-dict-of-dicts represent the union of\n `G.succ[node]` and `G.pred[node]` for MultiDiGraphs.\n Return values are UnionAtlas.\n The inner level of dict is read-write. But the outer levels are read-only.\n\n See Also\n ========\n UnionAtlas: View into dict-of-dict\n UnionAdjacency: View into dict-of-dict-of-dict\n UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict\n \"\"\"\n\n __slots__ = () # Still uses UnionAtlas slots names _succ, _pred\n\n def __getitem__(self, node):\n in_succ = node in self._succ\n in_pred = node in self._pred\n if in_succ:\n if in_pred:\n return UnionAtlas(self._succ[node], self._pred[node])\n return UnionAtlas(self._succ[node], {})\n return UnionAtlas({}, self._pred[node])\n\n def copy(self):\n nodes = set(self._succ.keys()) | set(self._pred.keys())\n return {n: self[n].copy() for n in nodes}\n\n\nclass UnionMultiAdjacency(UnionAdjacency):\n \"\"\"A read-only union of two dict MultiAdjacencies.\n\n The two input dict-of-dict-of-dict-of-dicts represent the union of\n `G.succ` and `G.pred` for MultiDiGraphs. Return values are UnionAdjacency.\n The inner level of dict is read-write. But the outer levels are read-only.\n\n See Also\n ========\n UnionAtlas: View into dict-of-dict\n UnionMultiInner: View into dict-of-dict-of-dict\n \"\"\"\n\n __slots__ = () # Still uses UnionAdjacency slots names _succ, _pred\n\n def __getitem__(self, node):\n return UnionMultiInner(self._succ[node], self._pred[node])\n\n\nclass FilterAtlas(Mapping): # nodedict, nbrdict, keydict\n def __init__(self, d, NODE_OK):\n self._atlas = d\n self.NODE_OK = NODE_OK\n\n def __len__(self):\n return sum(1 for n in self)\n\n def __iter__(self):\n try: # check that NODE_OK has attr 'nodes'\n node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)\n except AttributeError:\n node_ok_shorter = False\n if node_ok_shorter:\n return (n for n in self.NODE_OK.nodes if n in self._atlas)\n return (n for n in self._atlas if self.NODE_OK(n))\n\n def __getitem__(self, key):\n if key in self._atlas and self.NODE_OK(key):\n return self._atlas[key]\n raise KeyError(f\"Key {key} not found\")\n\n def __str__(self):\n return str({nbr: self[nbr] for nbr in self})\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self._atlas!r}, {self.NODE_OK!r})\"\n\n\nclass FilterAdjacency(Mapping): # edgedict\n def __init__(self, d, NODE_OK, EDGE_OK):\n self._atlas = d\n self.NODE_OK = NODE_OK\n self.EDGE_OK = EDGE_OK\n\n def __len__(self):\n return sum(1 for n in self)\n\n def __iter__(self):\n try: # check that NODE_OK has attr 'nodes'\n node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)\n except AttributeError:\n node_ok_shorter = False\n if node_ok_shorter:\n return (n for n in self.NODE_OK.nodes if n in self._atlas)\n return (n for n in self._atlas if self.NODE_OK(n))\n\n def __getitem__(self, node):\n if node in self._atlas and self.NODE_OK(node):\n\n def new_node_ok(nbr):\n return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr)\n\n return FilterAtlas(self._atlas[node], new_node_ok)\n raise KeyError(f\"Key {node} not found\")\n\n def __str__(self):\n return str({nbr: self[nbr] for nbr in self})\n\n def __repr__(self):\n name = self.__class__.__name__\n return f\"{name}({self._atlas!r}, {self.NODE_OK!r}, {self.EDGE_OK!r})\"\n\n\nclass FilterMultiInner(FilterAdjacency): # muliedge_seconddict\n def __iter__(self):\n try: # check that NODE_OK has attr 'nodes'\n node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)\n except AttributeError:\n node_ok_shorter = False\n if node_ok_shorter:\n my_nodes = (n for n in self.NODE_OK.nodes if n in self._atlas)\n else:\n my_nodes = (n for n in self._atlas if self.NODE_OK(n))\n for n in my_nodes:\n some_keys_ok = False\n for key in self._atlas[n]:\n if self.EDGE_OK(n, key):\n some_keys_ok = True\n break\n if some_keys_ok is True:\n yield n\n\n def __getitem__(self, nbr):\n if nbr in self._atlas and self.NODE_OK(nbr):\n\n def new_node_ok(key):\n return self.EDGE_OK(nbr, key)\n\n return FilterAtlas(self._atlas[nbr], new_node_ok)\n raise KeyError(f\"Key {nbr} not found\")\n\n\nclass FilterMultiAdjacency(FilterAdjacency): # multiedgedict\n def __getitem__(self, node):\n if node in self._atlas and self.NODE_OK(node):\n\n def edge_ok(nbr, key):\n return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr, key)\n\n return FilterMultiInner(self._atlas[node], self.NODE_OK, edge_ok)\n raise KeyError(f\"Key {node} not found\")\n", "path": "networkx/classes/coreviews.py" } ]
[ { "content": "\"\"\"Views of core data structures such as nested Mappings (e.g. dict-of-dicts).\nThese ``Views`` often restrict element access, with either the entire view or\nlayers of nested mappings being read-only.\n\"\"\"\nfrom collections.abc import Mapping\n\n__all__ = [\n \"AtlasView\",\n \"AdjacencyView\",\n \"MultiAdjacencyView\",\n \"UnionAtlas\",\n \"UnionAdjacency\",\n \"UnionMultiInner\",\n \"UnionMultiAdjacency\",\n \"FilterAtlas\",\n \"FilterAdjacency\",\n \"FilterMultiInner\",\n \"FilterMultiAdjacency\",\n]\n\n\nclass AtlasView(Mapping):\n \"\"\"An AtlasView is a Read-only Mapping of Mappings.\n\n It is a View into a dict-of-dict data structure.\n The inner level of dict is read-write. But the\n outer level is read-only.\n\n See Also\n ========\n AdjacencyView: View into dict-of-dict-of-dict\n MultiAdjacencyView: View into dict-of-dict-of-dict-of-dict\n \"\"\"\n\n __slots__ = (\"_atlas\",)\n\n def __getstate__(self):\n return {\"_atlas\": self._atlas}\n\n def __setstate__(self, state):\n self._atlas = state[\"_atlas\"]\n\n def __init__(self, d):\n self._atlas = d\n\n def __len__(self):\n return len(self._atlas)\n\n def __iter__(self):\n return iter(self._atlas)\n\n def __getitem__(self, key):\n return self._atlas[key]\n\n def copy(self):\n return {n: self[n].copy() for n in self._atlas}\n\n def __str__(self):\n return str(self._atlas) # {nbr: self[nbr] for nbr in self})\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self._atlas!r})\"\n\n\nclass AdjacencyView(AtlasView):\n \"\"\"An AdjacencyView is a Read-only Map of Maps of Maps.\n\n It is a View into a dict-of-dict-of-dict data structure.\n The inner level of dict is read-write. But the\n outer levels are read-only.\n\n See Also\n ========\n AtlasView: View into dict-of-dict\n MultiAdjacencyView: View into dict-of-dict-of-dict-of-dict\n \"\"\"\n\n __slots__ = () # Still uses AtlasView slots names _atlas\n\n def __getitem__(self, name):\n return AtlasView(self._atlas[name])\n\n def copy(self):\n return {n: self[n].copy() for n in self._atlas}\n\n\nclass MultiAdjacencyView(AdjacencyView):\n \"\"\"An MultiAdjacencyView is a Read-only Map of Maps of Maps of Maps.\n\n It is a View into a dict-of-dict-of-dict-of-dict data structure.\n The inner level of dict is read-write. But the\n outer levels are read-only.\n\n See Also\n ========\n AtlasView: View into dict-of-dict\n AdjacencyView: View into dict-of-dict-of-dict\n \"\"\"\n\n __slots__ = () # Still uses AtlasView slots names _atlas\n\n def __getitem__(self, name):\n return AdjacencyView(self._atlas[name])\n\n def copy(self):\n return {n: self[n].copy() for n in self._atlas}\n\n\nclass UnionAtlas(Mapping):\n \"\"\"A read-only union of two atlases (dict-of-dict).\n\n The two dict-of-dicts represent the inner dict of\n an Adjacency: `G.succ[node]` and `G.pred[node]`.\n The inner level of dict of both hold attribute key:value\n pairs and is read-write. But the outer level is read-only.\n\n See Also\n ========\n UnionAdjacency: View into dict-of-dict-of-dict\n UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict\n \"\"\"\n\n __slots__ = (\"_succ\", \"_pred\")\n\n def __getstate__(self):\n return {\"_succ\": self._succ, \"_pred\": self._pred}\n\n def __setstate__(self, state):\n self._succ = state[\"_succ\"]\n self._pred = state[\"_pred\"]\n\n def __init__(self, succ, pred):\n self._succ = succ\n self._pred = pred\n\n def __len__(self):\n return len(self._succ.keys() | self._pred.keys())\n\n def __iter__(self):\n return iter(set(self._succ.keys()) | set(self._pred.keys()))\n\n def __getitem__(self, key):\n try:\n return self._succ[key]\n except KeyError:\n return self._pred[key]\n\n def copy(self):\n result = {nbr: dd.copy() for nbr, dd in self._succ.items()}\n for nbr, dd in self._pred.items():\n if nbr in result:\n result[nbr].update(dd)\n else:\n result[nbr] = dd.copy()\n return result\n\n def __str__(self):\n return str({nbr: self[nbr] for nbr in self})\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self._succ!r}, {self._pred!r})\"\n\n\nclass UnionAdjacency(Mapping):\n \"\"\"A read-only union of dict Adjacencies as a Map of Maps of Maps.\n\n The two input dict-of-dict-of-dicts represent the union of\n `G.succ` and `G.pred`. Return values are UnionAtlas\n The inner level of dict is read-write. But the\n middle and outer levels are read-only.\n\n succ : a dict-of-dict-of-dict {node: nbrdict}\n pred : a dict-of-dict-of-dict {node: nbrdict}\n The keys for the two dicts should be the same\n\n See Also\n ========\n UnionAtlas: View into dict-of-dict\n UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict\n \"\"\"\n\n __slots__ = (\"_succ\", \"_pred\")\n\n def __getstate__(self):\n return {\"_succ\": self._succ, \"_pred\": self._pred}\n\n def __setstate__(self, state):\n self._succ = state[\"_succ\"]\n self._pred = state[\"_pred\"]\n\n def __init__(self, succ, pred):\n # keys must be the same for two input dicts\n assert len(set(succ.keys()) ^ set(pred.keys())) == 0\n self._succ = succ\n self._pred = pred\n\n def __len__(self):\n return len(self._succ) # length of each dict should be the same\n\n def __iter__(self):\n return iter(self._succ)\n\n def __getitem__(self, nbr):\n return UnionAtlas(self._succ[nbr], self._pred[nbr])\n\n def copy(self):\n return {n: self[n].copy() for n in self._succ}\n\n def __str__(self):\n return str({nbr: self[nbr] for nbr in self})\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self._succ!r}, {self._pred!r})\"\n\n\nclass UnionMultiInner(UnionAtlas):\n \"\"\"A read-only union of two inner dicts of MultiAdjacencies.\n\n The two input dict-of-dict-of-dicts represent the union of\n `G.succ[node]` and `G.pred[node]` for MultiDiGraphs.\n Return values are UnionAtlas.\n The inner level of dict is read-write. But the outer levels are read-only.\n\n See Also\n ========\n UnionAtlas: View into dict-of-dict\n UnionAdjacency: View into dict-of-dict-of-dict\n UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict\n \"\"\"\n\n __slots__ = () # Still uses UnionAtlas slots names _succ, _pred\n\n def __getitem__(self, node):\n in_succ = node in self._succ\n in_pred = node in self._pred\n if in_succ:\n if in_pred:\n return UnionAtlas(self._succ[node], self._pred[node])\n return UnionAtlas(self._succ[node], {})\n return UnionAtlas({}, self._pred[node])\n\n def copy(self):\n nodes = set(self._succ.keys()) | set(self._pred.keys())\n return {n: self[n].copy() for n in nodes}\n\n\nclass UnionMultiAdjacency(UnionAdjacency):\n \"\"\"A read-only union of two dict MultiAdjacencies.\n\n The two input dict-of-dict-of-dict-of-dicts represent the union of\n `G.succ` and `G.pred` for MultiDiGraphs. Return values are UnionAdjacency.\n The inner level of dict is read-write. But the outer levels are read-only.\n\n See Also\n ========\n UnionAtlas: View into dict-of-dict\n UnionMultiInner: View into dict-of-dict-of-dict\n \"\"\"\n\n __slots__ = () # Still uses UnionAdjacency slots names _succ, _pred\n\n def __getitem__(self, node):\n return UnionMultiInner(self._succ[node], self._pred[node])\n\n\nclass FilterAtlas(Mapping): # nodedict, nbrdict, keydict\n def __init__(self, d, NODE_OK):\n self._atlas = d\n self.NODE_OK = NODE_OK\n\n def __len__(self):\n return sum(1 for n in self)\n\n def __iter__(self):\n try: # check that NODE_OK has attr 'nodes'\n node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)\n except AttributeError:\n node_ok_shorter = False\n if node_ok_shorter:\n return (n for n in self.NODE_OK.nodes if n in self._atlas)\n return (n for n in self._atlas if self.NODE_OK(n))\n\n def __getitem__(self, key):\n if key in self._atlas and self.NODE_OK(key):\n return self._atlas[key]\n raise KeyError(f\"Key {key} not found\")\n\n def __str__(self):\n return str({nbr: self[nbr] for nbr in self})\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self._atlas!r}, {self.NODE_OK!r})\"\n\n\nclass FilterAdjacency(Mapping): # edgedict\n def __init__(self, d, NODE_OK, EDGE_OK):\n self._atlas = d\n self.NODE_OK = NODE_OK\n self.EDGE_OK = EDGE_OK\n\n def __len__(self):\n return sum(1 for n in self)\n\n def __iter__(self):\n try: # check that NODE_OK has attr 'nodes'\n node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)\n except AttributeError:\n node_ok_shorter = False\n if node_ok_shorter:\n return (n for n in self.NODE_OK.nodes if n in self._atlas)\n return (n for n in self._atlas if self.NODE_OK(n))\n\n def __getitem__(self, node):\n if node in self._atlas and self.NODE_OK(node):\n\n def new_node_ok(nbr):\n return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr)\n\n return FilterAtlas(self._atlas[node], new_node_ok)\n raise KeyError(f\"Key {node} not found\")\n\n def __str__(self):\n return str({nbr: self[nbr] for nbr in self})\n\n def __repr__(self):\n name = self.__class__.__name__\n return f\"{name}({self._atlas!r}, {self.NODE_OK!r}, {self.EDGE_OK!r})\"\n\n\nclass FilterMultiInner(FilterAdjacency): # muliedge_seconddict\n def __iter__(self):\n try: # check that NODE_OK has attr 'nodes'\n node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)\n except AttributeError:\n node_ok_shorter = False\n if node_ok_shorter:\n my_nodes = (n for n in self.NODE_OK.nodes if n in self._atlas)\n else:\n my_nodes = (n for n in self._atlas if self.NODE_OK(n))\n for n in my_nodes:\n some_keys_ok = False\n for key in self._atlas[n]:\n if self.EDGE_OK(n, key):\n some_keys_ok = True\n break\n if some_keys_ok is True:\n yield n\n\n def __getitem__(self, nbr):\n if nbr in self._atlas and self.NODE_OK(nbr):\n\n def new_node_ok(key):\n return self.EDGE_OK(nbr, key)\n\n return FilterAtlas(self._atlas[nbr], new_node_ok)\n raise KeyError(f\"Key {nbr} not found\")\n\n\nclass FilterMultiAdjacency(FilterAdjacency): # multiedgedict\n def __getitem__(self, node):\n if node in self._atlas and self.NODE_OK(node):\n\n def edge_ok(nbr, key):\n return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr, key)\n\n return FilterMultiInner(self._atlas[node], self.NODE_OK, edge_ok)\n raise KeyError(f\"Key {node} not found\")\n", "path": "networkx/classes/coreviews.py" } ]
diff --git a/networkx/classes/coreviews.py b/networkx/classes/coreviews.py index c2b835592cf..5c4defe94aa 100644 --- a/networkx/classes/coreviews.py +++ b/networkx/classes/coreviews.py @@ -134,7 +134,7 @@ def __init__(self, succ, pred): self._pred = pred def __len__(self): - return len(self._succ) + len(self._pred) + return len(self._succ.keys() | self._pred.keys()) def __iter__(self): return iter(set(self._succ.keys()) | set(self._pred.keys())) diff --git a/networkx/classes/tests/test_coreviews.py b/networkx/classes/tests/test_coreviews.py index f773b8580c6..07fa5bfa4de 100644 --- a/networkx/classes/tests/test_coreviews.py +++ b/networkx/classes/tests/test_coreviews.py @@ -155,7 +155,7 @@ def test_pickle(self): assert view.__slots__ == pview.__slots__ def test_len(self): - assert len(self.av) == len(self.s) + len(self.p) + assert len(self.av) == len(self.s.keys() | self.p.keys()) == 5 def test_iter(self): assert set(self.av) == set(self.s) | set(self.p) @@ -257,7 +257,7 @@ def setup_method(self): self.adjview = nx.classes.coreviews.UnionMultiInner(self.s, self.p) def test_len(self): - assert len(self.adjview) == len(self.s) + len(self.p) + assert len(self.adjview) == len(self.s.keys() | self.p.keys()) == 4 def test_getitem(self): assert self.adjview[1] is not self.s[1]
vacanza__python-holidays-1420
Add l10n tests Cover at least duplicated entries check. _Originally posted by @arkid15r in https://github.com/vacanza/python-holidays/pull/1416#discussion_r1286008102_
[ { "content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2023\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import date\nfrom datetime import timedelta as td\nfrom gettext import gettext as tr\n\nfrom holidays.calendars.gregorian import MAR, APR, JUN, JUL, MON, _get_nth_weekday_from\nfrom holidays.holiday_base import HolidayBase\nfrom holidays.holiday_groups import ChristianHolidays, InternationalHolidays\n\n\nclass Canada(HolidayBase, ChristianHolidays, InternationalHolidays):\n country = \"CA\"\n default_language = \"en\"\n subdivisions = (\n \"AB\",\n \"BC\",\n \"MB\",\n \"NB\",\n \"NL\",\n \"NS\",\n \"NT\",\n \"NU\",\n \"ON\",\n \"PE\",\n \"QC\",\n \"SK\",\n \"YT\",\n )\n supported_languages = (\"ar\", \"en\", \"en_US\", \"fr\", \"th\")\n\n def __init__(self, *args, **kwargs):\n # Default subdivision to ON; prov for backwards compatibility\n if not kwargs.get(\"subdiv\", kwargs.get(\"prov\")):\n kwargs[\"subdiv\"] = \"ON\"\n ChristianHolidays.__init__(self)\n InternationalHolidays.__init__(self)\n super().__init__(*args, **kwargs)\n\n def _get_nearest_monday(self, *args) -> date:\n dt = date(self._year, *args)\n return _get_nth_weekday_from(\n +1 if self._is_friday(dt) or self._is_weekend(dt) else -1,\n MON,\n dt,\n )\n\n def _add_observed(self, dt: date, include_sat: bool = True, days: int = +1) -> None:\n if not self.observed:\n return None\n if self._is_sunday(dt) or (include_sat and self._is_saturday(dt)):\n self._add_holiday(\n self.tr(\"%s (Observed)\") % self[dt],\n dt + td(days=+2 if self._is_saturday(dt) else days),\n )\n\n def _populate(self, year):\n if year <= 1866:\n return None\n\n super()._populate(year)\n\n # New Year's Day.\n self._add_observed(self._add_new_years_day(tr(\"New Year's Day\")))\n\n # Good Friday.\n self._add_good_friday(tr(\"Good Friday\"))\n # Easter Monday.\n self._add_easter_monday(tr(\"Easter Monday\"))\n\n if year <= 1982:\n # Dominion Day.\n self._add_observed(self._add_holiday_jul_1(tr(\"Dominion Day\")))\n\n if self._year >= 1894:\n # Labour Day.\n self._add_holiday_1st_mon_of_sep(tr(\"Labour Day\"))\n\n # Christmas Day.\n self._add_observed(self._add_christmas_day(tr(\"Christmas Day\")), days=+2)\n\n # Boxing Day.\n self._add_observed(self._add_christmas_day_two(tr(\"Boxing Day\")), days=+2)\n\n def _add_family_day(self):\n # Family Day.\n self._add_holiday_3rd_mon_of_feb(tr(\"Family Day\"))\n\n def _add_thanksgiving(self):\n if self._year >= 1931:\n # Thanksgiving.\n name = tr(\"Thanksgiving\")\n # in 1935, Canadian Thanksgiving was moved due to the General\n # Election falling on the second Monday of October\n # http://tiny.cc/can_thkgvg\n if self._year == 1935:\n self._add_holiday_oct_25(name)\n else:\n self._add_holiday_2nd_mon_of_oct(name)\n\n def _add_queens_funeral(self):\n if self._year == 2022:\n # Funeral of Queen Elizabeth II.\n self._add_holiday_sep_19(tr(\"Funeral of Her Majesty the Queen Elizabeth II\"))\n\n def _add_subdiv_holidays(self):\n if self._year >= 1983:\n self._add_observed(\n self._add_holiday_jul_1(\n (\n # Memorial Day.\n tr(\"Memorial Day\")\n if self.subdiv == \"NL\"\n # Canada Day.\n else tr(\"Canada Day\")\n )\n )\n )\n\n super()._add_subdiv_holidays()\n\n def _add_subdiv_ab_holidays(self):\n if self._year >= 1990:\n self._add_family_day()\n\n if self._year >= 1953:\n # Victoria Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"Victoria Day\"))\n\n # https://en.wikipedia.org/wiki/Civic_Holiday#Alberta\n if self._year >= 1974:\n # Heritage Day.\n self._add_holiday_1st_mon_of_aug(tr(\"Heritage Day\"))\n\n self._add_thanksgiving()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_remembrance_day(tr(\"Remembrance Day\"))\n\n def _add_subdiv_bc_holidays(self):\n if self._year >= 2013:\n name = tr(\"Family Day\")\n if self._year >= 2019:\n self._add_holiday_3rd_mon_of_feb(name)\n else:\n self._add_holiday_2nd_mon_of_feb(name)\n\n if self._year >= 1953:\n # Victoria Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"Victoria Day\"))\n\n # https://en.wikipedia.org/wiki/Civic_Holiday#British_Columbia\n if self._year >= 1974:\n # British Columbia Day.\n self._add_holiday_1st_mon_of_aug(tr(\"British Columbia Day\"))\n\n self._add_queens_funeral()\n\n if self._year >= 2023:\n # National Day for Truth and Reconciliation.\n self._add_holiday_sep_30(tr(\"National Day for Truth and Reconciliation\"))\n\n self._add_thanksgiving()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_remembrance_day(tr(\"Remembrance Day\"))\n\n def _add_subdiv_mb_holidays(self):\n if self._year >= 2008:\n # Louis Riel Day.\n self._add_holiday_3rd_mon_of_feb(tr(\"Louis Riel Day\"))\n\n if self._year >= 1953:\n # Victoria Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"Victoria Day\"))\n\n if self._year >= 1900:\n name = (\n # Terry Fox Day.\n tr(\"Terry Fox Day\")\n if self._year >= 2015\n # Civic Holiday.\n else tr(\"Civic Holiday\")\n )\n self._add_holiday_1st_mon_of_aug(name)\n\n if self._year >= 2021:\n # National Day for Truth and Reconciliation.\n self._add_holiday_sep_30(tr(\"National Day for Truth and Reconciliation\"))\n\n self._add_thanksgiving()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_remembrance_day(tr(\"Remembrance Day\"))\n\n def _add_subdiv_nb_holidays(self):\n if self._year >= 2018:\n self._add_family_day()\n\n if self._year >= 1953:\n # Victoria Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"Victoria Day\"))\n\n # https://en.wikipedia.org/wiki/Civic_Holiday#New_Brunswick\n if self._year >= 1900:\n # New Brunswick Day.\n self._add_holiday_1st_mon_of_aug(tr(\"New Brunswick Day\"))\n\n self._add_queens_funeral()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_remembrance_day(tr(\"Remembrance Day\"))\n\n def _add_subdiv_nl_holidays(self):\n if self._year >= 1900:\n # St. Patrick's Day.\n self._add_holiday(tr(\"St. Patrick's Day\"), self._get_nearest_monday(MAR, 17))\n\n if self._year >= 1990:\n # Nearest Monday to April 23\n # 4/26 is the Monday closer to 4/23 in 2010\n # but the holiday was observed on 4/19? Crazy Newfies!\n dt = date(2010, APR, 19) if self._year == 2010 else self._get_nearest_monday(APR, 23)\n # St. George's Day.\n self._add_holiday(tr(\"St. George's Day\"), dt)\n\n if self._year >= 1997:\n # Discovery Day.\n self._add_holiday(tr(\"Discovery Day\"), self._get_nearest_monday(JUN, 24))\n\n self._add_queens_funeral()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_observed(self._add_remembrance_day(tr(\"Remembrance Day\")), include_sat=False)\n\n def _add_subdiv_ns_holidays(self):\n # http://novascotia.ca/lae/employmentrights/NovaScotiaHeritageDay.asp\n if self._year >= 2015:\n # Heritage Day.\n self._add_holiday_3rd_mon_of_feb(tr(\"Heritage Day\"))\n\n self._add_queens_funeral()\n\n if self._year >= 2021:\n # National Day for Truth and Reconciliation.\n self._add_holiday_sep_30(tr(\"National Day for Truth and Reconciliation\"))\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_observed(self._add_remembrance_day(tr(\"Remembrance Day\")), include_sat=False)\n\n def _add_subdiv_nt_holidays(self):\n if self._year >= 1953:\n # Victoria Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"Victoria Day\"))\n\n if self._year >= 1996:\n # National Aboriginal Day.\n self._add_holiday_jun_21(tr(\"National Aboriginal Day\"))\n\n if self._year >= 1900:\n # Civic Holiday.\n self._add_holiday_1st_mon_of_aug(tr(\"Civic Holiday\"))\n\n self._add_thanksgiving()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_observed(self._add_remembrance_day(tr(\"Remembrance Day\")), include_sat=False)\n\n def _add_subdiv_nu_holidays(self):\n if self._year >= 1953:\n # Victoria Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"Victoria Day\"))\n\n if self._year >= 2000:\n dt = (APR, 1) if self._year == 2000 else (JUL, 9)\n # Nunavut Day.\n self._add_observed(self._add_holiday(tr(\"Nunavut Day\"), dt), include_sat=False)\n\n self._add_thanksgiving()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_remembrance_day(tr(\"Remembrance Day\"))\n\n def _add_subdiv_on_holidays(self):\n if self._year >= 2008:\n self._add_family_day()\n\n if self._year >= 1953:\n # Victoria Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"Victoria Day\"))\n\n if self._year >= 1900:\n # Civic Holiday.\n self._add_holiday_1st_mon_of_aug(tr(\"Civic Holiday\"))\n\n self._add_thanksgiving()\n\n def _add_subdiv_pe_holidays(self):\n if self._year >= 2009:\n # Islander Day.\n name = tr(\"Islander Day\")\n if self._year >= 2010:\n self._add_holiday_3rd_mon_of_feb(name)\n else:\n self._add_holiday_2nd_mon_of_feb(name)\n\n self._add_queens_funeral()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_observed(self._add_remembrance_day(tr(\"Remembrance Day\")), include_sat=False)\n\n def _add_subdiv_qc_holidays(self):\n if self._year >= 2003:\n # National Patriots' Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"National Patriots' Day\"))\n\n if self._year >= 1925:\n self._add_observed(\n # St. Jean Baptiste Day.\n self._add_saint_johns_day(tr(\"St. Jean Baptiste Day\")),\n include_sat=False,\n )\n\n self._add_thanksgiving()\n\n def _add_subdiv_sk_holidays(self):\n if self._year >= 2007:\n self._add_family_day()\n\n if self._year >= 1953:\n # Victoria Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"Victoria Day\"))\n\n # https://en.wikipedia.org/wiki/Civic_Holiday#Saskatchewan\n if self._year >= 1900:\n # Saskatchewan Day.\n self._add_holiday_1st_mon_of_aug(tr(\"Saskatchewan Day\"))\n\n self._add_thanksgiving()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_observed(self._add_remembrance_day(tr(\"Remembrance Day\")), include_sat=False)\n\n def _add_subdiv_yt_holidays(self):\n # start date?\n # https://www.britannica.com/topic/Heritage-Day-Canadian-holiday\n # Heritage Day was created in 1973\n # by the Heritage Canada Foundation\n # therefore, start date is not earlier than 1974\n # http://heritageyukon.ca/programs/heritage-day\n # https://en.wikipedia.org/wiki/Family_Day_(Canada)#Yukon_Heritage_Day\n # Friday before the last Sunday in February\n if self._year >= 1974:\n self._add_holiday_2_days_prior_last_sun_of_feb(tr(\"Heritage Day\"))\n\n if self._year >= 1953:\n # Victoria Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"Victoria Day\"))\n\n if self._year >= 1912:\n # Discovery Day.\n self._add_holiday_3rd_mon_of_aug(tr(\"Discovery Day\"))\n\n self._add_queens_funeral()\n\n self._add_thanksgiving()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_remembrance_day(tr(\"Remembrance Day\"))\n\n\nclass CA(Canada):\n pass\n\n\nclass CAN(Canada):\n pass\n", "path": "holidays/countries/canada.py" } ]
[ { "content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2023\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import date\nfrom datetime import timedelta as td\nfrom gettext import gettext as tr\n\nfrom holidays.calendars.gregorian import MAR, APR, JUN, JUL, MON, _get_nth_weekday_from\nfrom holidays.holiday_base import HolidayBase\nfrom holidays.holiday_groups import ChristianHolidays, InternationalHolidays\n\n\nclass Canada(HolidayBase, ChristianHolidays, InternationalHolidays):\n country = \"CA\"\n default_language = \"en\"\n subdivisions = (\n \"AB\",\n \"BC\",\n \"MB\",\n \"NB\",\n \"NL\",\n \"NS\",\n \"NT\",\n \"NU\",\n \"ON\",\n \"PE\",\n \"QC\",\n \"SK\",\n \"YT\",\n )\n supported_languages = (\"ar\", \"en\", \"fr\", \"th\")\n\n def __init__(self, *args, **kwargs):\n # Default subdivision to ON; prov for backwards compatibility\n if not kwargs.get(\"subdiv\", kwargs.get(\"prov\")):\n kwargs[\"subdiv\"] = \"ON\"\n ChristianHolidays.__init__(self)\n InternationalHolidays.__init__(self)\n super().__init__(*args, **kwargs)\n\n def _get_nearest_monday(self, *args) -> date:\n dt = date(self._year, *args)\n return _get_nth_weekday_from(\n +1 if self._is_friday(dt) or self._is_weekend(dt) else -1,\n MON,\n dt,\n )\n\n def _add_observed(self, dt: date, include_sat: bool = True, days: int = +1) -> None:\n if not self.observed:\n return None\n if self._is_sunday(dt) or (include_sat and self._is_saturday(dt)):\n self._add_holiday(\n self.tr(\"%s (Observed)\") % self[dt],\n dt + td(days=+2 if self._is_saturday(dt) else days),\n )\n\n def _populate(self, year):\n if year <= 1866:\n return None\n\n super()._populate(year)\n\n # New Year's Day.\n self._add_observed(self._add_new_years_day(tr(\"New Year's Day\")))\n\n # Good Friday.\n self._add_good_friday(tr(\"Good Friday\"))\n # Easter Monday.\n self._add_easter_monday(tr(\"Easter Monday\"))\n\n if year <= 1982:\n # Dominion Day.\n self._add_observed(self._add_holiday_jul_1(tr(\"Dominion Day\")))\n\n if self._year >= 1894:\n # Labour Day.\n self._add_holiday_1st_mon_of_sep(tr(\"Labour Day\"))\n\n # Christmas Day.\n self._add_observed(self._add_christmas_day(tr(\"Christmas Day\")), days=+2)\n\n # Boxing Day.\n self._add_observed(self._add_christmas_day_two(tr(\"Boxing Day\")), days=+2)\n\n def _add_family_day(self):\n # Family Day.\n self._add_holiday_3rd_mon_of_feb(tr(\"Family Day\"))\n\n def _add_thanksgiving(self):\n if self._year >= 1931:\n # Thanksgiving.\n name = tr(\"Thanksgiving\")\n # in 1935, Canadian Thanksgiving was moved due to the General\n # Election falling on the second Monday of October\n # http://tiny.cc/can_thkgvg\n if self._year == 1935:\n self._add_holiday_oct_25(name)\n else:\n self._add_holiday_2nd_mon_of_oct(name)\n\n def _add_queens_funeral(self):\n if self._year == 2022:\n # Funeral of Queen Elizabeth II.\n self._add_holiday_sep_19(tr(\"Funeral of Her Majesty the Queen Elizabeth II\"))\n\n def _add_subdiv_holidays(self):\n if self._year >= 1983:\n self._add_observed(\n self._add_holiday_jul_1(\n (\n # Memorial Day.\n tr(\"Memorial Day\")\n if self.subdiv == \"NL\"\n # Canada Day.\n else tr(\"Canada Day\")\n )\n )\n )\n\n super()._add_subdiv_holidays()\n\n def _add_subdiv_ab_holidays(self):\n if self._year >= 1990:\n self._add_family_day()\n\n if self._year >= 1953:\n # Victoria Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"Victoria Day\"))\n\n # https://en.wikipedia.org/wiki/Civic_Holiday#Alberta\n if self._year >= 1974:\n # Heritage Day.\n self._add_holiday_1st_mon_of_aug(tr(\"Heritage Day\"))\n\n self._add_thanksgiving()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_remembrance_day(tr(\"Remembrance Day\"))\n\n def _add_subdiv_bc_holidays(self):\n if self._year >= 2013:\n name = tr(\"Family Day\")\n if self._year >= 2019:\n self._add_holiday_3rd_mon_of_feb(name)\n else:\n self._add_holiday_2nd_mon_of_feb(name)\n\n if self._year >= 1953:\n # Victoria Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"Victoria Day\"))\n\n # https://en.wikipedia.org/wiki/Civic_Holiday#British_Columbia\n if self._year >= 1974:\n # British Columbia Day.\n self._add_holiday_1st_mon_of_aug(tr(\"British Columbia Day\"))\n\n self._add_queens_funeral()\n\n if self._year >= 2023:\n # National Day for Truth and Reconciliation.\n self._add_holiday_sep_30(tr(\"National Day for Truth and Reconciliation\"))\n\n self._add_thanksgiving()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_remembrance_day(tr(\"Remembrance Day\"))\n\n def _add_subdiv_mb_holidays(self):\n if self._year >= 2008:\n # Louis Riel Day.\n self._add_holiday_3rd_mon_of_feb(tr(\"Louis Riel Day\"))\n\n if self._year >= 1953:\n # Victoria Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"Victoria Day\"))\n\n if self._year >= 1900:\n name = (\n # Terry Fox Day.\n tr(\"Terry Fox Day\")\n if self._year >= 2015\n # Civic Holiday.\n else tr(\"Civic Holiday\")\n )\n self._add_holiday_1st_mon_of_aug(name)\n\n if self._year >= 2021:\n # National Day for Truth and Reconciliation.\n self._add_holiday_sep_30(tr(\"National Day for Truth and Reconciliation\"))\n\n self._add_thanksgiving()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_remembrance_day(tr(\"Remembrance Day\"))\n\n def _add_subdiv_nb_holidays(self):\n if self._year >= 2018:\n self._add_family_day()\n\n if self._year >= 1953:\n # Victoria Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"Victoria Day\"))\n\n # https://en.wikipedia.org/wiki/Civic_Holiday#New_Brunswick\n if self._year >= 1900:\n # New Brunswick Day.\n self._add_holiday_1st_mon_of_aug(tr(\"New Brunswick Day\"))\n\n self._add_queens_funeral()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_remembrance_day(tr(\"Remembrance Day\"))\n\n def _add_subdiv_nl_holidays(self):\n if self._year >= 1900:\n # St. Patrick's Day.\n self._add_holiday(tr(\"St. Patrick's Day\"), self._get_nearest_monday(MAR, 17))\n\n if self._year >= 1990:\n # Nearest Monday to April 23\n # 4/26 is the Monday closer to 4/23 in 2010\n # but the holiday was observed on 4/19? Crazy Newfies!\n dt = date(2010, APR, 19) if self._year == 2010 else self._get_nearest_monday(APR, 23)\n # St. George's Day.\n self._add_holiday(tr(\"St. George's Day\"), dt)\n\n if self._year >= 1997:\n # Discovery Day.\n self._add_holiday(tr(\"Discovery Day\"), self._get_nearest_monday(JUN, 24))\n\n self._add_queens_funeral()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_observed(self._add_remembrance_day(tr(\"Remembrance Day\")), include_sat=False)\n\n def _add_subdiv_ns_holidays(self):\n # http://novascotia.ca/lae/employmentrights/NovaScotiaHeritageDay.asp\n if self._year >= 2015:\n # Heritage Day.\n self._add_holiday_3rd_mon_of_feb(tr(\"Heritage Day\"))\n\n self._add_queens_funeral()\n\n if self._year >= 2021:\n # National Day for Truth and Reconciliation.\n self._add_holiday_sep_30(tr(\"National Day for Truth and Reconciliation\"))\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_observed(self._add_remembrance_day(tr(\"Remembrance Day\")), include_sat=False)\n\n def _add_subdiv_nt_holidays(self):\n if self._year >= 1953:\n # Victoria Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"Victoria Day\"))\n\n if self._year >= 1996:\n # National Aboriginal Day.\n self._add_holiday_jun_21(tr(\"National Aboriginal Day\"))\n\n if self._year >= 1900:\n # Civic Holiday.\n self._add_holiday_1st_mon_of_aug(tr(\"Civic Holiday\"))\n\n self._add_thanksgiving()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_observed(self._add_remembrance_day(tr(\"Remembrance Day\")), include_sat=False)\n\n def _add_subdiv_nu_holidays(self):\n if self._year >= 1953:\n # Victoria Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"Victoria Day\"))\n\n if self._year >= 2000:\n dt = (APR, 1) if self._year == 2000 else (JUL, 9)\n # Nunavut Day.\n self._add_observed(self._add_holiday(tr(\"Nunavut Day\"), dt), include_sat=False)\n\n self._add_thanksgiving()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_remembrance_day(tr(\"Remembrance Day\"))\n\n def _add_subdiv_on_holidays(self):\n if self._year >= 2008:\n self._add_family_day()\n\n if self._year >= 1953:\n # Victoria Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"Victoria Day\"))\n\n if self._year >= 1900:\n # Civic Holiday.\n self._add_holiday_1st_mon_of_aug(tr(\"Civic Holiday\"))\n\n self._add_thanksgiving()\n\n def _add_subdiv_pe_holidays(self):\n if self._year >= 2009:\n # Islander Day.\n name = tr(\"Islander Day\")\n if self._year >= 2010:\n self._add_holiday_3rd_mon_of_feb(name)\n else:\n self._add_holiday_2nd_mon_of_feb(name)\n\n self._add_queens_funeral()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_observed(self._add_remembrance_day(tr(\"Remembrance Day\")), include_sat=False)\n\n def _add_subdiv_qc_holidays(self):\n if self._year >= 2003:\n # National Patriots' Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"National Patriots' Day\"))\n\n if self._year >= 1925:\n self._add_observed(\n # St. Jean Baptiste Day.\n self._add_saint_johns_day(tr(\"St. Jean Baptiste Day\")),\n include_sat=False,\n )\n\n self._add_thanksgiving()\n\n def _add_subdiv_sk_holidays(self):\n if self._year >= 2007:\n self._add_family_day()\n\n if self._year >= 1953:\n # Victoria Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"Victoria Day\"))\n\n # https://en.wikipedia.org/wiki/Civic_Holiday#Saskatchewan\n if self._year >= 1900:\n # Saskatchewan Day.\n self._add_holiday_1st_mon_of_aug(tr(\"Saskatchewan Day\"))\n\n self._add_thanksgiving()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_observed(self._add_remembrance_day(tr(\"Remembrance Day\")), include_sat=False)\n\n def _add_subdiv_yt_holidays(self):\n # start date?\n # https://www.britannica.com/topic/Heritage-Day-Canadian-holiday\n # Heritage Day was created in 1973\n # by the Heritage Canada Foundation\n # therefore, start date is not earlier than 1974\n # http://heritageyukon.ca/programs/heritage-day\n # https://en.wikipedia.org/wiki/Family_Day_(Canada)#Yukon_Heritage_Day\n # Friday before the last Sunday in February\n if self._year >= 1974:\n self._add_holiday_2_days_prior_last_sun_of_feb(tr(\"Heritage Day\"))\n\n if self._year >= 1953:\n # Victoria Day.\n self._add_holiday_1st_mon_before_may_24(tr(\"Victoria Day\"))\n\n if self._year >= 1912:\n # Discovery Day.\n self._add_holiday_3rd_mon_of_aug(tr(\"Discovery Day\"))\n\n self._add_queens_funeral()\n\n self._add_thanksgiving()\n\n if self._year >= 1931:\n # Remembrance Day.\n self._add_remembrance_day(tr(\"Remembrance Day\"))\n\n\nclass CA(Canada):\n pass\n\n\nclass CAN(Canada):\n pass\n", "path": "holidays/countries/canada.py" } ]
diff --git a/README.rst b/README.rst index d17c92acd..e0e41ddae 100644 --- a/README.rst +++ b/README.rst @@ -240,7 +240,7 @@ The list of supported countries, their subdivisions and supported languages * - Canada - CA - Provinces and territories: AB, BC, MB, NB, NL, NS, NT, NU, **ON**, PE, QC, SK, YT - - ar, **en**, en_US, fr, th + - ar, **en**, fr, th * - Chad - TD - diff --git a/holidays/countries/canada.py b/holidays/countries/canada.py index 1723f7fff..0f7071c48 100644 --- a/holidays/countries/canada.py +++ b/holidays/countries/canada.py @@ -36,7 +36,7 @@ class Canada(HolidayBase, ChristianHolidays, InternationalHolidays): "SK", "YT", ) - supported_languages = ("ar", "en", "en_US", "fr", "th") + supported_languages = ("ar", "en", "fr", "th") def __init__(self, *args, **kwargs): # Default subdivision to ON; prov for backwards compatibility diff --git a/holidays/locale/en_US/LC_MESSAGES/CA.po b/holidays/locale/en_US/LC_MESSAGES/CA.po deleted file mode 100644 index 12e4c609d..000000000 --- a/holidays/locale/en_US/LC_MESSAGES/CA.po +++ /dev/null @@ -1,140 +0,0 @@ -# Canada holidays en_US localization. -# Authors: Arkadii Yakovets <[email protected]>, (c) 2023. -# -msgid "" -msgstr "" -"Project-Id-Version: Python Holidays 0.25\n" -"POT-Creation-Date: 2023-02-15 08:30-0800\n" -"PO-Revision-Date: 2023-02-16 08:52-0800\n" -"Last-Translator: Arkadii Yakovets <[email protected]>\n" -"Language-Team: Python Holidays localization team\n" -"Language: en_US\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Lingua 4.15.0\n" -"X-Generator: Poedit 3.2.2\n" - -#. New Year's Day. -msgid "New Year's Day" -msgstr "" - -#, c-format -msgid "%s (Observed)" -msgstr "" - -#. Family Day. -msgid "Family Day" -msgstr "" - -#. Louis Riel Day. -msgid "Louis Riel Day" -msgstr "" - -#. Islander Day. -msgid "Islander Day" -msgstr "" - -#. Heritage Day. -msgid "Heritage Day" -msgstr "" - -#. St. Patrick's Day. -msgid "St. Patrick's Day" -msgstr "" - -#. Good Friday. -msgid "Good Friday" -msgstr "" - -#. Easter Monday. -msgid "Easter Monday" -msgstr "" - -#. St. George's Day. -msgid "St. George's Day" -msgstr "" - -#. Victoria Day. -msgid "Victoria Day" -msgstr "" - -#. National Patriots' Day. -msgid "National Patriots' Day" -msgstr "" - -#. National Aboriginal Day. -msgid "National Aboriginal Day" -msgstr "" - -#. St. Jean Baptiste Day. -msgid "St. Jean Baptiste Day" -msgstr "" - -#. Discovery Day. -msgid "Discovery Day" -msgstr "" - -#. Memorial Day. -msgid "Memorial Day" -msgstr "" - -#. Canada Day. -msgid "Canada Day" -msgstr "" - -#. Dominion Day. -msgid "Dominion Day" -msgstr "" - -#. Nunavut Day. -msgid "Nunavut Day" -msgstr "" - -#. Civic Holiday. -msgid "Civic Holiday" -msgstr "" - -#. British Columbia Day. -msgid "British Columbia Day" -msgstr "" - -#. New Brunswick Day. -msgid "New Brunswick Day" -msgstr "" - -#. Saskatchewan Day. -msgid "Saskatchewan Day" -msgstr "" - -#. Labour Day. -msgid "Labour Day" -msgstr "" - -#. Funeral of Queen Elizabeth II. -msgid "Funeral of Her Majesty the Queen Elizabeth II" -msgstr "" - -#. National Day for Truth and Reconciliation. -msgid "National Day for Truth and Reconciliation" -msgstr "" - -#. Thanksgiving. -msgid "Thanksgiving" -msgstr "" - -#. Remembrance Day. -msgid "Remembrance Day" -msgstr "" - -#. Christmas Day. -msgid "Christmas Day" -msgstr "" - -#. Boxing Day. -msgid "Boxing Day" -msgstr "" - -#. Terry Fox Day. -msgid "Terry Fox Day" -msgstr "" diff --git a/tests/test_l10n.py b/tests/test_l10n.py new file mode 100644 index 000000000..8c053f0f6 --- /dev/null +++ b/tests/test_l10n.py @@ -0,0 +1,68 @@ +# python-holidays +# --------------- +# A fast, efficient Python library for generating country, province and state +# specific sets of holidays on the fly. It aims to make determining whether a +# specific date is a holiday as fast and flexible as possible. +# +# Authors: dr-prodigy <[email protected]> (c) 2017-2023 +# ryanss <[email protected]> (c) 2014-2017 +# Website: https://github.com/dr-prodigy/python-holidays +# License: MIT (see LICENSE file) + +import re +import unittest +from pathlib import Path + +from polib import pofile as create_po_file + +import holidays + + +class TestLocalization(unittest.TestCase): + def test_localization(self): + tests_dir = Path(__file__).parent + locale_dir = tests_dir.parent / "holidays" / "locale" + + for po_path in sorted(Path(locale_dir).rglob("*.po")): + try: + po_file = create_po_file(po_path, check_for_duplicates=True) + except ValueError as e: + # Make sure no duplicated entries added. + match = re.match(r"Entry (.*) already exists", str(e)) + self.assertEqual( + 0, + len(match.groups()), + f"Entry `{match.group(1)}` already exists in {po_path}. " + "Please remove the duplicate.", + ) + + raise e + + # Collect `<country_code>` part from + # holidays/locale/<locale>/LC_MESSAGES/<country_code>.po. + entity_code = po_path.stem + # Collect `<locale>` part from + # holidays/locale/<locale>/LC_MESSAGES/<country_code>.po. + language = po_path.parts[-3] + + entity = getattr(holidays, entity_code) + + # Skip original language files. + if entity.default_language == language: + continue + + # Make sure no entries left unlocalized. + coverage = po_file.percent_translated() + self.assertEqual( + 100, + coverage, + f"The {entity_code} {language} localization is incomplete ({coverage}% < 100%)", + ) + + # Make sure no obsolete entries left. + obsolete_entries = po_file.obsolete_entries() + self.assertFalse( + obsolete_entries, + f"The {entity_code} {language} localization contains obsolete entries: " + f"{', '.join((oe.msgid for oe in obsolete_entries))}", + )
pre-commit__pre-commit-2740
/dev/null not found with pre-commit 3.0.2 ### search you tried in the issue tracker /dev/null ### describe your issue After upgrading to pre-commit 3.0.2, one of my users (on up-to-date macos) is reporting that invoking ruby actions fails with `/dev/null` not found. Relevant output: ``` rubocop..................................................................Failed - hook id: rubocop - exit code: 2 /dev/null not found /Users/user/.rvm/rubies/ruby-3.1.3/lib/ruby/3.1.0/bundler/definition.rb:36:in `build' /Users/user/.rvm/rubies/ruby-3.1.3/lib/ruby/3.1.0/bundler.rb:207:in `definition' /Users/user/.rvm/rubies/ruby-3.1.3/lib/ruby/3.1.0/bundler.rb:190:in `load' /Users/user/.cache/pre-commit/repojquw8lys/rbenv-default/gems/gems/rubocop-1.42.0/lib/rubocop/config_loader_resolver.rb:270:in `gem_config_path' /Users/user/.cache/pre-commit/repojquw8lys/rbenv-default/gems/gems/rubocop-1.42.0/lib/rubocop/config_loader_resolver.rb:65:in `block (2 levels) in resolve_inheritance_from_gems' /Users/user/.cache/pre-commit/repojquw8lys/rbenv-default/gems/gems/rubocop-1.42.0/lib/rubocop/config_loader_resolver.rb:63:in `reverse_each' /Users/user/.cache/pre-commit/repojquw8lys/rbenv-default/gems/gems/rubocop-1.42.0/lib/rubocop/config_loader_resolver.rb:63:in `block in resolve_inheritance_from_gems' /Users/user/.cache/pre-commit/repojquw8lys/rbenv-default/gems/gems/rubocop-1.42.0/lib/rubocop/config_loader_resolver.rb:57:in `each_pair' /Users/user/.cache/pre-commit/repojquw8lys/rbenv-default/gems/gems/rubocop-1.42.0/lib/rubocop/config_loader_resolver.rb:57:in `resolve_inheritance_from_gems' /Users/user/.cache/pre-commit/repojquw8lys/rbenv-default/gems/gems/rubocop-1.42.0/lib/rubocop/config_loader.rb:49:in `load_file' /Users/user/.cache/pre-commit/repojquw8lys/rbenv-default/gems/gems/rubocop-1.42.0/lib/rubocop/config_loader.rb:104:in `configuration_from_file' /Users/user/.cache/pre-commit/repojquw8lys/rbenv-default/gems/gems/rubocop-1.42.0/lib/rubocop/config_store.rb:68:in `for_dir' /Users/user/.cache/pre-commit/repojquw8lys/rbenv-default/gems/gems/rubocop-1.42.0/lib/rubocop/config_store.rb:47:in `for_pwd' /Users/user/.cache/pre-commit/repojquw8lys/rbenv-default/gems/gems/rubocop-1.42.0/lib/rubocop/cli.rb:147:in `apply_default_formatter' /Users/user/.cache/pre-commit/repojquw8lys/rbenv-default/gems/gems/rubocop-1.42.0/lib/rubocop/cli.rb:47:in `run' /Users/user/.cache/pre-commit/repojquw8lys/rbenv-default/gems/gems/rubocop-1.42.0/exe/rubocop:19:in `block in <top (required)>' /Users/user/.rvm/rubies/ruby-3.1.3/lib/ruby/3.1.0/benchmark.rb:311:in `realtime' /Users/user/.cache/pre-commit/repojquw8lys/rbenv-default/gems/gems/rubocop-1.42.0/exe/rubocop:19:in `<top (required)>' /Users/user/.cache/pre-commit/repojquw8lys/rbenv-default/gems/bin/rubocop:25:in `load' /Users/user/.cache/pre-commit/repojquw8lys/rbenv-default/gems/bin/rubocop:25:in `<main>' /Users/user/.cache/pre-commit/repojquw8lys/rbenv-default/gems/bin/ruby_executable_hooks:22:in `eval' /Users/user/.cache/pre-commit/repojquw8lys/rbenv-default/gems/bin/ruby_executable_hooks:22:in `<main>' ``` This looks closely related to #2727. For what it's worth, I did confirm that the user actually has a working `/dev/null` on their system. ### pre-commit --version pre-commit 3.0.2 ### .pre-commit-config.yaml ```yaml repos: - repo: https://github.com/rubocop/rubocop.git rev: '0f7416a0b3ea4a3d4edb1f2091ce8706ea3e6640' hooks: - id: rubocop additional_dependencies: ["standard:1.22.1"] ``` ### ~/.cache/pre-commit/pre-commit.log (if present) _No response_
[ { "content": "from __future__ import annotations\n\nimport contextlib\nimport functools\nimport os.path\nimport shutil\nimport tarfile\nfrom typing import Generator\nfrom typing import Sequence\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import resource_bytesio\n\nENVIRONMENT_DIR = 'rbenv'\nhealth_check = helpers.basic_health_check\nrun_hook = helpers.basic_run_hook\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n if all(helpers.exe_exists(exe) for exe in ('ruby', 'gem')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef get_env_patch(\n venv: str,\n language_version: str,\n) -> PatchesT:\n patches: PatchesT = (\n ('GEM_HOME', os.path.join(venv, 'gems')),\n ('GEM_PATH', UNSET),\n ('BUNDLE_IGNORE_CONFIG', '1'),\n ('BUNDLE_GEMFILE', os.devnull),\n )\n if language_version == 'system':\n patches += (\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n Var('PATH'),\n ),\n ),\n )\n else: # pragma: win32 no cover\n patches += (\n ('RBENV_ROOT', venv),\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n os.path.join(venv, 'shims'), os.pathsep,\n os.path.join(venv, 'bin'), os.pathsep, Var('PATH'),\n ),\n ),\n )\n if language_version not in {'system', 'default'}: # pragma: win32 no cover\n patches += (('RBENV_VERSION', language_version),)\n\n return patches\n\n\[email protected]\ndef in_env(prefix: Prefix, version: str) -> Generator[None, None, None]:\n envdir = helpers.environment_dir(prefix, ENVIRONMENT_DIR, version)\n with envcontext(get_env_patch(envdir, version)):\n yield\n\n\ndef _extract_resource(filename: str, dest: str) -> None:\n with resource_bytesio(filename) as bio:\n with tarfile.open(fileobj=bio) as tf:\n tf.extractall(dest)\n\n\ndef _install_rbenv(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n envdir = helpers.environment_dir(prefix, ENVIRONMENT_DIR, version)\n\n _extract_resource('rbenv.tar.gz', prefix.path('.'))\n shutil.move(prefix.path('rbenv'), envdir)\n\n # Only install ruby-build if the version is specified\n if version != C.DEFAULT:\n plugins_dir = os.path.join(envdir, 'plugins')\n _extract_resource('ruby-download.tar.gz', plugins_dir)\n _extract_resource('ruby-build.tar.gz', plugins_dir)\n\n\ndef _install_ruby(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n try:\n helpers.run_setup_cmd(prefix, ('rbenv', 'download', version))\n except CalledProcessError: # pragma: no cover (usually find with download)\n # Failed to download from mirror for some reason, build it instead\n helpers.run_setup_cmd(prefix, ('rbenv', 'install', version))\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n if version != 'system': # pragma: win32 no cover\n _install_rbenv(prefix, version)\n with in_env(prefix, version):\n # Need to call this before installing so rbenv's directories\n # are set up\n helpers.run_setup_cmd(prefix, ('rbenv', 'init', '-'))\n if version != C.DEFAULT:\n _install_ruby(prefix, version)\n # Need to call this after installing to set up the shims\n helpers.run_setup_cmd(prefix, ('rbenv', 'rehash'))\n\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix, ('gem', 'build', *prefix.star('.gemspec')),\n )\n helpers.run_setup_cmd(\n prefix,\n (\n 'gem', 'install',\n '--no-document', '--no-format-executable',\n '--no-user-install',\n *prefix.star('.gem'), *additional_dependencies,\n ),\n )\n", "path": "pre_commit/languages/ruby.py" } ]
[ { "content": "from __future__ import annotations\n\nimport contextlib\nimport functools\nimport os.path\nimport shutil\nimport tarfile\nfrom typing import Generator\nfrom typing import Sequence\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import resource_bytesio\n\nENVIRONMENT_DIR = 'rbenv'\nhealth_check = helpers.basic_health_check\nrun_hook = helpers.basic_run_hook\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n if all(helpers.exe_exists(exe) for exe in ('ruby', 'gem')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef get_env_patch(\n venv: str,\n language_version: str,\n) -> PatchesT:\n patches: PatchesT = (\n ('GEM_HOME', os.path.join(venv, 'gems')),\n ('GEM_PATH', UNSET),\n ('BUNDLE_IGNORE_CONFIG', '1'),\n )\n if language_version == 'system':\n patches += (\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n Var('PATH'),\n ),\n ),\n )\n else: # pragma: win32 no cover\n patches += (\n ('RBENV_ROOT', venv),\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n os.path.join(venv, 'shims'), os.pathsep,\n os.path.join(venv, 'bin'), os.pathsep, Var('PATH'),\n ),\n ),\n )\n if language_version not in {'system', 'default'}: # pragma: win32 no cover\n patches += (('RBENV_VERSION', language_version),)\n\n return patches\n\n\[email protected]\ndef in_env(prefix: Prefix, version: str) -> Generator[None, None, None]:\n envdir = helpers.environment_dir(prefix, ENVIRONMENT_DIR, version)\n with envcontext(get_env_patch(envdir, version)):\n yield\n\n\ndef _extract_resource(filename: str, dest: str) -> None:\n with resource_bytesio(filename) as bio:\n with tarfile.open(fileobj=bio) as tf:\n tf.extractall(dest)\n\n\ndef _install_rbenv(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n envdir = helpers.environment_dir(prefix, ENVIRONMENT_DIR, version)\n\n _extract_resource('rbenv.tar.gz', prefix.path('.'))\n shutil.move(prefix.path('rbenv'), envdir)\n\n # Only install ruby-build if the version is specified\n if version != C.DEFAULT:\n plugins_dir = os.path.join(envdir, 'plugins')\n _extract_resource('ruby-download.tar.gz', plugins_dir)\n _extract_resource('ruby-build.tar.gz', plugins_dir)\n\n\ndef _install_ruby(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n try:\n helpers.run_setup_cmd(prefix, ('rbenv', 'download', version))\n except CalledProcessError: # pragma: no cover (usually find with download)\n # Failed to download from mirror for some reason, build it instead\n helpers.run_setup_cmd(prefix, ('rbenv', 'install', version))\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n if version != 'system': # pragma: win32 no cover\n _install_rbenv(prefix, version)\n with in_env(prefix, version):\n # Need to call this before installing so rbenv's directories\n # are set up\n helpers.run_setup_cmd(prefix, ('rbenv', 'init', '-'))\n if version != C.DEFAULT:\n _install_ruby(prefix, version)\n # Need to call this after installing to set up the shims\n helpers.run_setup_cmd(prefix, ('rbenv', 'rehash'))\n\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix, ('gem', 'build', *prefix.star('.gemspec')),\n )\n helpers.run_setup_cmd(\n prefix,\n (\n 'gem', 'install',\n '--no-document', '--no-format-executable',\n '--no-user-install',\n *prefix.star('.gem'), *additional_dependencies,\n ),\n )\n", "path": "pre_commit/languages/ruby.py" } ]
diff --git a/pre_commit/languages/ruby.py b/pre_commit/languages/ruby.py index b4d4b45af..4416f7280 100644 --- a/pre_commit/languages/ruby.py +++ b/pre_commit/languages/ruby.py @@ -39,7 +39,6 @@ def get_env_patch( ('GEM_HOME', os.path.join(venv, 'gems')), ('GEM_PATH', UNSET), ('BUNDLE_IGNORE_CONFIG', '1'), - ('BUNDLE_GEMFILE', os.devnull), ) if language_version == 'system': patches += ( diff --git a/tests/languages/ruby_test.py b/tests/languages/ruby_test.py index b312c7fda..9cfaad5d0 100644 --- a/tests/languages/ruby_test.py +++ b/tests/languages/ruby_test.py @@ -123,8 +123,9 @@ def test_ruby_hook_language_version(tmp_path): def test_ruby_with_bundle_disable_shared_gems(tmp_path): workdir = tmp_path.joinpath('workdir') workdir.mkdir() - # this Gemfile is missing `source` - workdir.joinpath('Gemfile').write_text('gem "lol_hai"\n') + # this needs a `source` or there's a deprecation warning + # silencing this with `BUNDLE_GEMFILE` breaks some tools (#2739) + workdir.joinpath('Gemfile').write_text('source ""\ngem "lol_hai"\n') # this bundle config causes things to be written elsewhere bundle = workdir.joinpath('.bundle') bundle.mkdir() @@ -134,5 +135,5 @@ def test_ruby_with_bundle_disable_shared_gems(tmp_path): ) with cwd(workdir): - # `3.2.0` has new enough `gem` requiring `source` and reading `.bundle` + # `3.2.0` has new enough `gem` reading `.bundle` test_ruby_hook_language_version(tmp_path)
scrapy__scrapy-2337
Add a sample middleware to startproject's template It will be nice to have a middleware template inside the template project to serve as an example for people that want to use it.
[ { "content": "from __future__ import print_function\nimport re\nimport os\nimport string\nfrom importlib import import_module\nfrom os.path import join, exists, abspath\nfrom shutil import ignore_patterns, move, copy2, copystat\n\nimport scrapy\nfrom scrapy.commands import ScrapyCommand\nfrom scrapy.utils.template import render_templatefile, string_camelcase\nfrom scrapy.exceptions import UsageError\n\n\nTEMPLATES_TO_RENDER = (\n ('scrapy.cfg',),\n ('${project_name}', 'settings.py.tmpl'),\n ('${project_name}', 'items.py.tmpl'),\n ('${project_name}', 'pipelines.py.tmpl'),\n)\n\nIGNORE = ignore_patterns('*.pyc', '.svn')\n\n\nclass Command(ScrapyCommand):\n\n requires_project = False\n default_settings = {'LOG_ENABLED': False}\n\n def syntax(self):\n return \"<project_name> [project_dir]\"\n\n def short_desc(self):\n return \"Create new project\"\n\n def _is_valid_name(self, project_name):\n def _module_exists(module_name):\n try:\n import_module(module_name)\n return True\n except ImportError:\n return False\n\n if not re.search(r'^[_a-zA-Z]\\w*$', project_name):\n print('Error: Project names must begin with a letter and contain'\\\n ' only\\nletters, numbers and underscores')\n elif _module_exists(project_name):\n print('Error: Module %r already exists' % project_name)\n else:\n return True\n return False\n\n def _copytree(self, src, dst):\n \"\"\"\n Since the original function always creates the directory, to resolve\n the issue a new function had to be created. It's a simple copy and\n was reduced for this case.\n\n More info at:\n https://github.com/scrapy/scrapy/pull/2005\n \"\"\"\n ignore = IGNORE\n names = os.listdir(src)\n ignored_names = ignore(src, names)\n\n if not os.path.exists(dst):\n os.makedirs(dst)\n\n for name in names:\n if name in ignored_names:\n continue\n\n srcname = os.path.join(src, name)\n dstname = os.path.join(dst, name)\n if os.path.isdir(srcname):\n self._copytree(srcname, dstname)\n else:\n copy2(srcname, dstname)\n copystat(src, dst)\n\n def run(self, args, opts):\n if len(args) not in (1, 2):\n raise UsageError()\n\n project_name = args[0]\n project_dir = args[0]\n\n if len(args) == 2:\n project_dir = args[1]\n\n if exists(join(project_dir, 'scrapy.cfg')):\n self.exitcode = 1\n print('Error: scrapy.cfg already exists in %s' % abspath(project_dir))\n return\n\n if not self._is_valid_name(project_name):\n self.exitcode = 1\n return\n\n self._copytree(self.templates_dir, abspath(project_dir))\n move(join(project_dir, 'module'), join(project_dir, project_name))\n for paths in TEMPLATES_TO_RENDER:\n path = join(*paths)\n tplfile = join(project_dir,\n string.Template(path).substitute(project_name=project_name))\n render_templatefile(tplfile, project_name=project_name,\n ProjectName=string_camelcase(project_name))\n print(\"New Scrapy project %r, using template directory %r, created in:\" % \\\n (project_name, self.templates_dir))\n print(\" %s\\n\" % abspath(project_dir))\n print(\"You can start your first spider with:\")\n print(\" cd %s\" % project_dir)\n print(\" scrapy genspider example example.com\")\n\n @property\n def templates_dir(self):\n _templates_base_dir = self.settings['TEMPLATES_DIR'] or \\\n join(scrapy.__path__[0], 'templates')\n return join(_templates_base_dir, 'project')\n \n", "path": "scrapy/commands/startproject.py" } ]
[ { "content": "from __future__ import print_function\nimport re\nimport os\nimport string\nfrom importlib import import_module\nfrom os.path import join, exists, abspath\nfrom shutil import ignore_patterns, move, copy2, copystat\n\nimport scrapy\nfrom scrapy.commands import ScrapyCommand\nfrom scrapy.utils.template import render_templatefile, string_camelcase\nfrom scrapy.exceptions import UsageError\n\n\nTEMPLATES_TO_RENDER = (\n ('scrapy.cfg',),\n ('${project_name}', 'settings.py.tmpl'),\n ('${project_name}', 'items.py.tmpl'),\n ('${project_name}', 'pipelines.py.tmpl'),\n ('${project_name}', 'middlewares.py.tmpl'),\n)\n\nIGNORE = ignore_patterns('*.pyc', '.svn')\n\n\nclass Command(ScrapyCommand):\n\n requires_project = False\n default_settings = {'LOG_ENABLED': False}\n\n def syntax(self):\n return \"<project_name> [project_dir]\"\n\n def short_desc(self):\n return \"Create new project\"\n\n def _is_valid_name(self, project_name):\n def _module_exists(module_name):\n try:\n import_module(module_name)\n return True\n except ImportError:\n return False\n\n if not re.search(r'^[_a-zA-Z]\\w*$', project_name):\n print('Error: Project names must begin with a letter and contain'\\\n ' only\\nletters, numbers and underscores')\n elif _module_exists(project_name):\n print('Error: Module %r already exists' % project_name)\n else:\n return True\n return False\n\n def _copytree(self, src, dst):\n \"\"\"\n Since the original function always creates the directory, to resolve\n the issue a new function had to be created. It's a simple copy and\n was reduced for this case.\n\n More info at:\n https://github.com/scrapy/scrapy/pull/2005\n \"\"\"\n ignore = IGNORE\n names = os.listdir(src)\n ignored_names = ignore(src, names)\n\n if not os.path.exists(dst):\n os.makedirs(dst)\n\n for name in names:\n if name in ignored_names:\n continue\n\n srcname = os.path.join(src, name)\n dstname = os.path.join(dst, name)\n if os.path.isdir(srcname):\n self._copytree(srcname, dstname)\n else:\n copy2(srcname, dstname)\n copystat(src, dst)\n\n def run(self, args, opts):\n if len(args) not in (1, 2):\n raise UsageError()\n\n project_name = args[0]\n project_dir = args[0]\n\n if len(args) == 2:\n project_dir = args[1]\n\n if exists(join(project_dir, 'scrapy.cfg')):\n self.exitcode = 1\n print('Error: scrapy.cfg already exists in %s' % abspath(project_dir))\n return\n\n if not self._is_valid_name(project_name):\n self.exitcode = 1\n return\n\n self._copytree(self.templates_dir, abspath(project_dir))\n move(join(project_dir, 'module'), join(project_dir, project_name))\n for paths in TEMPLATES_TO_RENDER:\n path = join(*paths)\n tplfile = join(project_dir,\n string.Template(path).substitute(project_name=project_name))\n render_templatefile(tplfile, project_name=project_name,\n ProjectName=string_camelcase(project_name))\n print(\"New Scrapy project %r, using template directory %r, created in:\" % \\\n (project_name, self.templates_dir))\n print(\" %s\\n\" % abspath(project_dir))\n print(\"You can start your first spider with:\")\n print(\" cd %s\" % project_dir)\n print(\" scrapy genspider example example.com\")\n\n @property\n def templates_dir(self):\n _templates_base_dir = self.settings['TEMPLATES_DIR'] or \\\n join(scrapy.__path__[0], 'templates')\n return join(_templates_base_dir, 'project')\n \n", "path": "scrapy/commands/startproject.py" } ]
diff --git a/scrapy/commands/startproject.py b/scrapy/commands/startproject.py index e3989baafd6..5941066326a 100644 --- a/scrapy/commands/startproject.py +++ b/scrapy/commands/startproject.py @@ -17,6 +17,7 @@ ('${project_name}', 'settings.py.tmpl'), ('${project_name}', 'items.py.tmpl'), ('${project_name}', 'pipelines.py.tmpl'), + ('${project_name}', 'middlewares.py.tmpl'), ) IGNORE = ignore_patterns('*.pyc', '.svn') diff --git a/scrapy/templates/project/module/middlewares.py.tmpl b/scrapy/templates/project/module/middlewares.py.tmpl new file mode 100644 index 00000000000..42318fec214 --- /dev/null +++ b/scrapy/templates/project/module/middlewares.py.tmpl @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- + +# Define here the models for your spider middleware +# +# See documentation in: +# http://doc.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy import signals + + +class ${ProjectName}SpiderMiddleware(object): + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, dict or Item objects. + for i in result: + yield i + + def process_spider_exception(response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Response, dict + # or Item objects. + pass + + def process_start_requests(start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) diff --git a/scrapy/templates/project/module/settings.py.tmpl b/scrapy/templates/project/module/settings.py.tmpl index f13e8587106..72f25ebefea 100644 --- a/scrapy/templates/project/module/settings.py.tmpl +++ b/scrapy/templates/project/module/settings.py.tmpl @@ -47,7 +47,7 @@ ROBOTSTXT_OBEY = True # Enable or disable spider middlewares # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = { -# '$project_name.middlewares.MyCustomSpiderMiddleware': 543, +# '$project_name.middlewares.${ProjectName}SpiderMiddleware': 543, #} # Enable or disable downloader middlewares
jazzband__django-oauth-toolkit-1088
Write a test for cleartokens management command **Is your feature request related to a problem? Please describe.** <!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] --> Code coverage of the cleartokens management command has been 0% since it was created in 2015. **Describe the solution you'd like** <!-- A clear and concise description of what you want to happen. --> Write a test for the cleartokens command. **Describe alternatives you've considered** <!-- A clear and concise description of any alternative solutions or features you've considered. --> It's not core functionality so could continue to have 0% coverage but it would be nice to add. **Additional context** <!-- Add any other context or screenshots about the feature request here. --> See also #969
[ { "content": "from django.core.management.base import BaseCommand\n\nfrom ...models import clear_expired\n\n\nclass Command(BaseCommand):\n help = \"Can be run as a cronjob or directly to clean out expired tokens\"\n\n def handle(self, *args, **options):\n clear_expired()\n", "path": "oauth2_provider/management/commands/cleartokens.py" } ]
[ { "content": "from django.core.management.base import BaseCommand\n\nfrom ...models import clear_expired\n\n\nclass Command(BaseCommand): # pragma: no cover\n help = \"Can be run as a cronjob or directly to clean out expired tokens\"\n\n def handle(self, *args, **options):\n clear_expired()\n", "path": "oauth2_provider/management/commands/cleartokens.py" } ]
diff --git a/oauth2_provider/management/commands/cleartokens.py b/oauth2_provider/management/commands/cleartokens.py index 3fb1827f6..9d58361bc 100644 --- a/oauth2_provider/management/commands/cleartokens.py +++ b/oauth2_provider/management/commands/cleartokens.py @@ -3,7 +3,7 @@ from ...models import clear_expired -class Command(BaseCommand): +class Command(BaseCommand): # pragma: no cover help = "Can be run as a cronjob or directly to clean out expired tokens" def handle(self, *args, **options): diff --git a/tests/test_models.py b/tests/test_models.py index 7b37486ca..9ce1e5eb7 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -1,3 +1,5 @@ +from datetime import timedelta + import pytest from django.contrib.auth import get_user_model from django.core.exceptions import ImproperlyConfigured, ValidationError @@ -294,7 +296,11 @@ def test_str(self): class TestClearExpired(BaseTestModels): def setUp(self): super().setUp() - # Insert two tokens on database. + # Insert many tokens, both expired and not, and grants. + self.num_tokens = 100 + now = timezone.now() + earlier = now - timedelta(seconds=100) + later = now + timedelta(seconds=100) app = Application.objects.create( name="test_app", redirect_uris="http://localhost http://example.com http://example.org", @@ -302,23 +308,54 @@ def setUp(self): client_type=Application.CLIENT_CONFIDENTIAL, authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE, ) - AccessToken.objects.create( - token="555", - expires=timezone.now(), - scope=2, - application=app, - user=self.user, - created=timezone.now(), - updated=timezone.now(), + # make 200 access tokens, half current and half expired. + expired_access_tokens = AccessToken.objects.bulk_create( + AccessToken(token="expired AccessToken {}".format(i), expires=earlier) + for i in range(self.num_tokens) ) - AccessToken.objects.create( - token="666", - expires=timezone.now(), - scope=2, - application=app, - user=self.user, - created=timezone.now(), - updated=timezone.now(), + current_access_tokens = AccessToken.objects.bulk_create( + AccessToken(token=f"current AccessToken {i}", expires=later) for i in range(self.num_tokens) + ) + # Give the first half of the access tokens a refresh token, + # alternating between current and expired ones. + RefreshToken.objects.bulk_create( + RefreshToken( + token=f"expired AT's refresh token {i}", + application=app, + access_token=expired_access_tokens[i].pk, + user=self.user, + ) + for i in range(0, len(expired_access_tokens) // 2, 2) + ) + RefreshToken.objects.bulk_create( + RefreshToken( + token=f"current AT's refresh token {i}", + application=app, + access_token=current_access_tokens[i].pk, + user=self.user, + ) + for i in range(1, len(current_access_tokens) // 2, 2) + ) + # Make some grants, half of which are expired. + Grant.objects.bulk_create( + Grant( + user=self.user, + code=f"old grant code {i}", + application=app, + expires=earlier, + redirect_uri="https://localhost/redirect", + ) + for i in range(self.num_tokens) + ) + Grant.objects.bulk_create( + Grant( + user=self.user, + code=f"new grant code {i}", + application=app, + expires=later, + redirect_uri="https://localhost/redirect", + ) + for i in range(self.num_tokens) ) def test_clear_expired_tokens(self): @@ -333,15 +370,21 @@ def test_clear_expired_tokens_incorect_timetype(self): assert result == "ImproperlyConfigured" def test_clear_expired_tokens_with_tokens(self): - self.client.login(username="test_user", password="123456") - self.oauth2_settings.REFRESH_TOKEN_EXPIRE_SECONDS = 0 - ttokens = AccessToken.objects.count() - expiredt = AccessToken.objects.filter(expires__lte=timezone.now()).count() - assert ttokens == 2 - assert expiredt == 2 + self.oauth2_settings.CLEAR_EXPIRED_TOKENS_BATCH_SIZE = 10 + self.oauth2_settings.CLEAR_EXPIRED_TOKENS_BATCH_INTERVAL = 0.0 + at_count = AccessToken.objects.count() + assert at_count == 2 * self.num_tokens, f"{2 * self.num_tokens} access tokens should exist." + rt_count = RefreshToken.objects.count() + assert rt_count == self.num_tokens // 2, f"{self.num_tokens // 2} refresh tokens should exist." + gt_count = Grant.objects.count() + assert gt_count == self.num_tokens * 2, f"{self.num_tokens * 2} grants should exist." clear_expired() - expiredt = AccessToken.objects.filter(expires__lte=timezone.now()).count() - assert expiredt == 0 + at_count = AccessToken.objects.count() + assert at_count == self.num_tokens, "Half the access tokens should not have been deleted." + rt_count = RefreshToken.objects.count() + assert rt_count == self.num_tokens // 2, "Half of the refresh tokens should have been deleted." + gt_count = Grant.objects.count() + assert gt_count == self.num_tokens, "Half the grants should have been deleted." @pytest.mark.django_db
kivy__kivy-2755
Please distribute pxd files, expose c apis. I'm writing some kivy extension code, and i want to cimport kivy's extension types, which is more efficient than python api, but kivy don't distribute pxd files to installation directory. I can set PYTHONPATH to kivy's source directory, and ship cython compiled c file with my library, but it would be better if kivy distribute pxd files with it.
[ { "content": "#\n# Kivy - Crossplatform NUI toolkit\n# http://kivy.org/\n#\n\nimport sys\n\nfrom copy import deepcopy\nimport os\nfrom os.path import join, dirname, sep, exists, basename\nfrom os import walk, environ\nfrom distutils.core import setup\nfrom distutils.extension import Extension\nfrom collections import OrderedDict\n\nif sys.version > '3':\n\n PY3 = True\nelse:\n PY3 = False\n\n\ndef getoutput(cmd):\n import subprocess\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n return p.communicate()[0]\n\n\ndef pkgconfig(*packages, **kw):\n flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}\n cmd = 'pkg-config --libs --cflags {}'.format(' '.join(packages))\n for token in getoutput(cmd).split():\n ext = token[:2].decode('utf-8')\n flag = flag_map.get(ext)\n if not flag:\n continue\n kw.setdefault(flag, []).append(token[2:].decode('utf-8'))\n return kw\n\n\n# -----------------------------------------------------------------------------\n# Determine on which platform we are\n\nplatform = sys.platform\n\n# Detect 32/64bit for OSX (http://stackoverflow.com/a/1405971/798575)\nif sys.platform == 'darwin':\n if sys.maxsize > 2 ** 32:\n osx_arch = 'x86_64'\n else:\n osx_arch = 'i386'\n\n# Detect Python for android project (http://github.com/kivy/python-for-android)\nndkplatform = environ.get('NDKPLATFORM')\nif ndkplatform is not None and environ.get('LIBLINK'):\n platform = 'android'\nkivy_ios_root = environ.get('KIVYIOSROOT', None)\nif kivy_ios_root is not None:\n platform = 'ios'\nif exists('/opt/vc/include/bcm_host.h'):\n platform = 'rpi'\n\n# -----------------------------------------------------------------------------\n# Detect options\n#\nc_options = OrderedDict()\nc_options['use_rpi'] = platform == 'rpi'\nc_options['use_opengl_es2'] = None\nc_options['use_opengl_debug'] = False\nc_options['use_glew'] = False\nc_options['use_sdl'] = False\nc_options['use_sdl2'] = False\nc_options['use_ios'] = False\nc_options['use_mesagl'] = False\nc_options['use_x11'] = False\nc_options['use_gstreamer'] = False\nc_options['use_avfoundation'] = platform == 'darwin'\n\n# now check if environ is changing the default values\nfor key in list(c_options.keys()):\n ukey = key.upper()\n if ukey in environ:\n value = bool(int(environ[ukey]))\n print('Environ change {0} -> {1}'.format(key, value))\n c_options[key] = value\n\n# -----------------------------------------------------------------------------\n# Cython check\n# on python-for-android and kivy-ios, cython usage is external\nhave_cython = False\nif platform in ('ios', 'android'):\n print('\\nCython check avoided.')\nelse:\n try:\n # check for cython\n from Cython.Distutils import build_ext\n have_cython = True\n except ImportError:\n print('\\nCython is missing, its required for compiling kivy !\\n\\n')\n raise\n\nif not have_cython:\n from distutils.command.build_ext import build_ext\n\n# -----------------------------------------------------------------------------\n# Setup classes\n\n\nclass KivyBuildExt(build_ext):\n\n def build_extensions(self):\n print('Build configuration is:')\n for opt, value in c_options.items():\n print(' * {0} = {1}'.format(opt, value))\n debug = bool(self.debug)\n print(' * debug = {0}'.format(debug))\n print('Generate config.h')\n config_h_fn = expand('graphics', 'config.h')\n config_h = '// Autogenerated file for Kivy C configuration\\n'\n config_h += '#define __PY3 {0}\\n'.format(int(PY3))\n for k, v in c_options.items():\n config_h += '#define __{0} {1}\\n'.format(k.upper(), int(v))\n self.update_if_changed(config_h_fn, config_h)\n\n print('Generate config.pxi')\n config_pxi_fn = expand('graphics', 'config.pxi')\n # update the pxi only if the content changed\n config_pxi = '# Autogenerated file for Kivy Cython configuration\\n'\n config_pxi += 'DEF PY3 = {0}\\n'.format(int(PY3))\n for k, v in c_options.items():\n config_pxi += 'DEF {0} = {1}\\n'.format(k.upper(), int(v))\n config_pxi += 'DEF DEBUG = {0}\\n'.format(debug)\n self.update_if_changed(config_pxi_fn, config_pxi)\n\n c = self.compiler.compiler_type\n print('Detected compiler is {}'.format(c))\n if c != 'msvc':\n for e in self.extensions:\n e.extra_link_args += ['-lm']\n\n build_ext.build_extensions(self)\n\n def update_if_changed(self, fn, content):\n need_update = True\n if exists(fn):\n with open(fn) as fd:\n need_update = fd.read() != content\n if need_update:\n with open(fn, 'w') as fd:\n fd.write(content)\n\n\n# -----------------------------------------------------------------------------\n# extract version (simulate doc generation, kivy will be not imported)\nenviron['KIVY_DOC_INCLUDE'] = '1'\nimport kivy\n\n# extra build commands go in the cmdclass dict {'command-name': CommandClass}\n# see tools.packaging.{platform}.build.py for custom build commands for\n# portable packages. also e.g. we use build_ext command from cython if its\n# installed for c extensions.\nfrom kivy.tools.packaging.factory import FactoryBuild\ncmdclass = {\n 'build_factory': FactoryBuild,\n 'build_ext': KivyBuildExt}\n\ntry:\n # add build rules for portable packages to cmdclass\n if platform == 'win32':\n from kivy.tools.packaging.win32.build import WindowsPortableBuild\n cmdclass['build_portable'] = WindowsPortableBuild\n elif platform == 'darwin':\n from kivy.tools.packaging.osx.build import OSXPortableBuild\n cmdclass['build_portable'] = OSXPortableBuild\nexcept ImportError:\n print('User distribution detected, avoid portable command.')\n\n# Detect which opengl version headers to use\nif platform in ('android', 'darwin', 'ios', 'rpi'):\n c_options['use_opengl_es2'] = True\nelif platform == 'win32':\n print('Windows platform detected, force GLEW usage.')\n c_options['use_glew'] = True\n c_options['use_opengl_es2'] = False\nelse:\n if c_options['use_opengl_es2'] is None:\n GLES = environ.get('GRAPHICS') == 'GLES'\n OPENGL = environ.get('GRAPHICS') == 'OPENGL'\n if GLES:\n c_options['use_opengl_es2'] = True\n elif OPENGL:\n c_options['use_opengl_es2'] = False\n else:\n # auto detection of GLES headers\n default_header_dirs = ['/usr/include', '/usr/local/include']\n c_options['use_opengl_es2'] = False\n for hdir in default_header_dirs:\n filename = join(hdir, 'GLES2', 'gl2.h')\n if exists(filename):\n c_options['use_opengl_es2'] = True\n print('NOTE: Found GLES 2.0 headers at {0}'.format(\n filename))\n break\n if not c_options['use_opengl_es2']:\n print('NOTE: Not found GLES 2.0 headers at: {}'.format(\n default_header_dirs))\n print(' Please contact us if your distribution '\n 'uses an alternative path for the headers.')\n\nprint('Using this graphics system: {}'.format(\n ['OpenGL', 'OpenGL ES 2'][int(c_options['use_opengl_es2'] or False)]))\n\n# check if we are in a kivy-ios build\nif platform == 'ios':\n print('Kivy-IOS project environment detect, use it.')\n print('Kivy-IOS project located at {0}'.format(kivy_ios_root))\n print('Activate SDL compilation.')\n c_options['use_ios'] = True\n c_options['use_sdl'] = True\n\n# detect gstreamer/sdl2, only on desktop\nsdl2_flags = {}\nif platform not in ('ios', 'android'):\n gst_flags = pkgconfig('gstreamer-1.0')\n if 'libraries' in gst_flags:\n c_options['use_gstreamer'] = True\n\n # XXX deactivated, until we are able to have sdl2 without using anything\n # related to sdl1. Both must not be used at the same time.\n # sdl2_flags = pkgconfig('sdl2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer')\n # if 'libraries' in sdl2_flags:\n # c_options['use_sdl2'] = True\n\n\n# -----------------------------------------------------------------------------\n# declare flags\n\n\ndef get_modulename_from_file(filename):\n filename = filename.replace(sep, '/')\n pyx = '.'.join(filename.split('.')[:-1])\n pyxl = pyx.split('/')\n while pyxl[0] != 'kivy':\n pyxl.pop(0)\n if pyxl[1] == 'kivy':\n pyxl.pop(0)\n return '.'.join(pyxl)\n\n\ndef expand(*args):\n return join(dirname(__file__), 'kivy', *args)\n\n\nclass CythonExtension(Extension):\n\n def __init__(self, *args, **kwargs):\n Extension.__init__(self, *args, **kwargs)\n self.cython_directives = {\n 'c_string_encoding': 'utf-8',\n 'profile': 'USE_PROFILE' in environ,\n 'embedsignature': 'USE_EMBEDSIGNATURE' in environ}\n # XXX with pip, setuptools is imported before distutils, and change\n # our pyx to c, then, cythonize doesn't happen. So force again our\n # sources\n self.sources = args[1]\n\n\ndef merge(d1, *args):\n d1 = deepcopy(d1)\n for d2 in args:\n for key, value in d2.items():\n value = deepcopy(value)\n if key in d1:\n d1[key].extend(value)\n else:\n d1[key] = value\n return d1\n\n\ndef determine_base_flags():\n flags = {\n 'libraries': [],\n 'include_dirs': [],\n 'extra_link_args': [],\n 'extra_compile_args': []}\n if c_options['use_ios']:\n sysroot = environ.get('IOSSDKROOT', environ.get('SDKROOT'))\n if not sysroot:\n raise Exception('IOSSDKROOT is not set')\n flags['include_dirs'] += [sysroot]\n flags['extra_compile_args'] += ['-isysroot', sysroot]\n flags['extra_link_args'] += ['-isysroot', sysroot]\n elif platform == 'darwin':\n v = os.uname()\n if v[2] >= '13.0.0':\n # use xcode-select to search on the right Xcode path\n # XXX use the best SDK available instead of a specific one\n import platform as _platform\n xcode_dev = getoutput('xcode-select -p').splitlines()[0]\n sdk_mac_ver = '.'.join(_platform.mac_ver()[0].split('.')[:2])\n print('Xcode detected at {}, and using MacOSX{} sdk'.format(\n xcode_dev, sdk_mac_ver))\n sysroot = join(xcode_dev.decode('utf-8'),\n 'Platforms/MacOSX.platform/Developer/SDKs',\n 'MacOSX{}.sdk'.format(sdk_mac_ver),\n 'System/Library/Frameworks')\n else:\n sysroot = ('/System/Library/Frameworks/'\n 'ApplicationServices.framework/Frameworks')\n flags['extra_compile_args'] += ['-F%s' % sysroot]\n flags['extra_link_args'] += ['-F%s' % sysroot]\n return flags\n\n\ndef determine_gl_flags():\n flags = {'libraries': []}\n if platform == 'win32':\n flags['libraries'] = ['opengl32']\n elif platform == 'ios':\n flags['libraries'] = ['GLESv2']\n flags['extra_link_args'] = ['-framework', 'OpenGLES']\n elif platform == 'darwin':\n flags['extra_link_args'] = ['-framework', 'OpenGL', '-arch', osx_arch]\n flags['extra_compile_args'] = ['-arch', osx_arch]\n elif platform.startswith('freebsd'):\n flags['include_dirs'] = ['/usr/local/include']\n flags['extra_link_args'] = ['-L', '/usr/local/lib']\n flags['libraries'] = ['GL']\n elif platform.startswith('openbsd'):\n flags['include_dirs'] = ['/usr/X11R6/include']\n flags['extra_link_args'] = ['-L', '/usr/X11R6/lib']\n flags['libraries'] = ['GL']\n elif platform == 'android':\n flags['include_dirs'] = [join(ndkplatform, 'usr', 'include')]\n flags['extra_link_args'] = ['-L', join(ndkplatform, 'usr', 'lib')]\n flags['libraries'] = ['GLESv2']\n elif platform == 'rpi':\n flags['include_dirs'] = ['/opt/vc/include',\n '/opt/vc/include/interface/vcos/pthreads',\n '/opt/vc/include/interface/vmcs_host/linux']\n flags['library_dirs'] = ['/opt/vc/lib']\n flags['libraries'] = ['bcm_host', 'EGL', 'GLESv2']\n else:\n flags['libraries'] = ['GL']\n if c_options['use_glew']:\n if platform == 'win32':\n flags['libraries'] += ['glew32']\n else:\n flags['libraries'] += ['GLEW']\n return flags\n\n\ndef determine_sdl():\n flags = {}\n if not c_options['use_sdl']:\n return flags\n\n flags['libraries'] = ['SDL', 'SDL_ttf', 'freetype', 'z', 'bz2']\n flags['include_dirs'] = []\n flags['extra_link_args'] = []\n flags['extra_compile_args'] = []\n\n # Paths as per homebrew (modified formula to use hg checkout)\n if c_options['use_ios']:\n # Note: on IOS, SDL is already loaded by the launcher/main.m\n # So if we add it here, it will just complain about duplicate\n # symbol, cause libSDL.a would be included in main.m binary +\n # text_sdlttf.so\n # At the result, we are linking without SDL explicitly, and add\n # -undefined dynamic_lookup\n # (/tito)\n flags['libraries'] = ['SDL_ttf', 'freetype', 'bz2']\n flags['include_dirs'] += [\n join(kivy_ios_root, 'build', 'include'),\n join(kivy_ios_root, 'build', 'include', 'SDL'),\n join(kivy_ios_root, 'build', 'include', 'freetype')]\n flags['extra_link_args'] += [\n '-L', join(kivy_ios_root, 'build', 'lib'),\n '-undefined', 'dynamic_lookup']\n else:\n flags['include_dirs'] = ['/usr/local/include/SDL']\n flags['extra_link_args'] += ['-L/usr/local/lib/']\n\n if platform == 'ios':\n flags['extra_link_args'] += [\n '-framework', 'Foundation',\n '-framework', 'UIKit',\n '-framework', 'AudioToolbox',\n '-framework', 'CoreGraphics',\n '-framework', 'QuartzCore',\n '-framework', 'MobileCoreServices',\n '-framework', 'ImageIO']\n elif platform == 'darwin':\n flags['extra_link_args'] += [\n '-framework', 'ApplicationServices']\n return flags\n\n\ndef determine_sdl2():\n flags = {}\n if not c_options['use_sdl2']:\n return flags\n\n sdl2_path = environ.get('KIVY_SDL2_PATH', None)\n\n if sdl2_flags and not sdl2_path:\n return sdl2_flags\n\n # no pkgconfig info, or we want to use a specific sdl2 path, so perform\n # manual configuration\n flags['libraries'] = ['SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer']\n flags['include_dirs'] = ([sdl2_path] if sdl2_path else\n ['/usr/local/include/SDL2', '/usr/include/SDL2'])\n\n flags['extra_link_args'] = []\n flags['extra_compile_args'] = []\n flags['extra_link_args'] += (['-L' + sdl2_path] if sdl2_path else\n ['-L/usr/local/lib/'])\n\n # ensure headers for all the SDL2 and sub libraries are available\n libs_to_check = ['SDL', 'SDL_mixer', 'SDL_ttf', 'SDL_image']\n can_compile = True\n for lib in libs_to_check:\n found = False\n for d in flags['include_dirs']:\n fn = join(d, '{}.h'.format(lib))\n if exists(fn):\n found = True\n print('SDL2: found {} header at {}'.format(lib, fn))\n break\n\n if not found:\n print('SDL2: missing sub library {}'.format(lib))\n can_compile = False\n\n if not can_compile:\n c_options['use_sdl2'] = False\n return {}\n\n return flags\n\n\nbase_flags = determine_base_flags()\ngl_flags = determine_gl_flags()\n\n# -----------------------------------------------------------------------------\n# sources to compile\n# all the dependencies have been found manually with:\n# grep -inr -E '(cimport|include)' kivy/graphics/context_instructions.{pxd,pyx}\ngraphics_dependencies = {\n 'gl_redirect.h': ['common_subset.h'],\n 'c_opengl.pxd': ['config.pxi', 'gl_redirect.h'],\n 'buffer.pyx': ['common.pxi'],\n 'context.pxd': [\n 'instructions.pxd', 'texture.pxd', 'vbo.pxd',\n 'c_opengl.pxd', 'c_opengl_debug.pxd'],\n 'c_opengl_debug.pyx': ['common.pxi', 'c_opengl.pxd'],\n 'compiler.pxd': ['instructions.pxd'],\n 'compiler.pyx': ['context_instructions.pxd'],\n 'context_instructions.pxd': [\n 'transformation.pxd', 'instructions.pxd', 'texture.pxd'],\n 'fbo.pxd': ['c_opengl.pxd', 'instructions.pxd', 'texture.pxd'],\n 'fbo.pyx': [\n 'config.pxi', 'opcodes.pxi', 'transformation.pxd', 'context.pxd',\n 'c_opengl_debug.pxd'],\n 'gl_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd',\n 'instructions.pxd'],\n 'instructions.pxd': [\n 'vbo.pxd', 'context_instructions.pxd', 'compiler.pxd', 'shader.pxd',\n 'texture.pxd', '../_event.pxd'],\n 'instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd',\n 'context.pxd', 'common.pxi', 'vertex.pxd', 'transformation.pxd'],\n 'opengl.pyx': ['config.pxi', 'common.pxi', 'c_opengl.pxd', 'gl_redirect.h'],\n 'opengl_utils.pyx': ['opengl_utils_def.pxi', 'c_opengl.pxd'],\n 'shader.pxd': ['c_opengl.pxd', 'transformation.pxd', 'vertex.pxd'],\n 'shader.pyx': [\n 'config.pxi', 'common.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd',\n 'vertex.pxd', 'transformation.pxd', 'context.pxd'],\n 'stencil_instructions.pxd': ['instructions.pxd'],\n 'stencil_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd'],\n 'svg.pyx': ['config.pxi', 'common.pxi', 'texture.pxd', 'instructions.pxd',\n 'vertex_instructions.pxd', 'tesselator.pxd'],\n 'texture.pxd': ['c_opengl.pxd'],\n 'texture.pyx': [\n 'config.pxi', 'common.pxi', 'opengl_utils_def.pxi', 'context.pxd',\n 'c_opengl.pxd', 'c_opengl_debug.pxd', 'opengl_utils.pxd',\n 'img_tools.pxi'],\n 'vbo.pxd': ['buffer.pxd', 'c_opengl.pxd', 'vertex.pxd'],\n 'vbo.pyx': [\n 'config.pxi', 'common.pxi', 'c_opengl_debug.pxd', 'context.pxd',\n 'instructions.pxd', 'shader.pxd'],\n 'vertex.pxd': ['c_opengl.pxd'],\n 'vertex.pyx': ['config.pxi', 'common.pxi'],\n 'vertex_instructions.pyx': [\n 'config.pxi', 'common.pxi', 'vbo.pxd', 'vertex.pxd', 'instructions.pxd',\n 'vertex_instructions.pxd',\n 'c_opengl.pxd', 'c_opengl_debug.pxd', 'texture.pxd',\n 'vertex_instructions_line.pxi'],\n 'vertex_instructions_line.pxi': ['stencil_instructions.pxd']}\n\nsources = {\n '_event.pyx': merge(base_flags, {'depends': ['properties.pxd']}),\n 'properties.pyx': merge(base_flags, {'depends': ['_event.pxd']}),\n 'graphics/buffer.pyx': base_flags,\n 'graphics/context.pyx': merge(base_flags, gl_flags),\n 'graphics/c_opengl_debug.pyx': merge(base_flags, gl_flags),\n 'graphics/compiler.pyx': merge(base_flags, gl_flags),\n 'graphics/context_instructions.pyx': merge(base_flags, gl_flags),\n 'graphics/fbo.pyx': merge(base_flags, gl_flags),\n 'graphics/gl_instructions.pyx': merge(base_flags, gl_flags),\n 'graphics/instructions.pyx': merge(base_flags, gl_flags),\n 'graphics/opengl.pyx': merge(base_flags, gl_flags),\n 'graphics/opengl_utils.pyx': merge(base_flags, gl_flags),\n 'graphics/shader.pyx': merge(base_flags, gl_flags),\n 'graphics/stencil_instructions.pyx': merge(base_flags, gl_flags),\n 'graphics/texture.pyx': merge(base_flags, gl_flags),\n 'graphics/transformation.pyx': merge(base_flags, gl_flags),\n 'graphics/vbo.pyx': merge(base_flags, gl_flags),\n 'graphics/vertex.pyx': merge(base_flags, gl_flags),\n 'graphics/vertex_instructions.pyx': merge(base_flags, gl_flags),\n 'core/text/text_layout.pyx': base_flags,\n 'graphics/tesselator.pyx': merge(base_flags, {\n 'include_dirs': ['kivy/lib/libtess2/Include'],\n 'c_depends': [\n 'lib/libtess2/Source/bucketalloc.c',\n 'lib/libtess2/Source/dict.c',\n 'lib/libtess2/Source/geom.c',\n 'lib/libtess2/Source/mesh.c',\n 'lib/libtess2/Source/priorityq.c',\n 'lib/libtess2/Source/sweep.c',\n 'lib/libtess2/Source/tess.c'\n ]\n }),\n 'graphics/svg.pyx': merge(base_flags, gl_flags)\n}\n\nif c_options['use_sdl']:\n sdl_flags = determine_sdl()\n sources['core/window/sdl.pyx'] = merge(\n base_flags, gl_flags, sdl_flags)\n sources['core/text/text_sdlttf.pyx'] = merge(\n base_flags, gl_flags, sdl_flags)\n sources['core/audio/audio_sdl.pyx'] = merge(\n base_flags, sdl_flags)\n\nif c_options['use_sdl2']:\n sdl2_flags = determine_sdl2()\n if sdl2_flags:\n sources['core/window/_window_sdl2.pyx'] = merge(\n base_flags, gl_flags, sdl2_flags)\n sources['core/image/_img_sdl2.pyx'] = merge(\n base_flags, gl_flags, sdl2_flags)\n sources['core/text/_text_sdl2.pyx'] = merge(\n base_flags, gl_flags, sdl2_flags)\n sources['core/clipboard/_clipboard_sdl2.pyx'] = merge(\n base_flags, gl_flags, sdl2_flags)\n\nif platform in ('darwin', 'ios'):\n # activate ImageIO provider for our core image\n if platform == 'ios':\n osx_flags = {'extra_link_args': [\n '-framework', 'Foundation',\n '-framework', 'UIKit',\n '-framework', 'AudioToolbox',\n '-framework', 'CoreGraphics',\n '-framework', 'QuartzCore',\n '-framework', 'ImageIO',\n '-framework', 'Accelerate']}\n else:\n osx_flags = {'extra_link_args': [\n '-framework', 'ApplicationServices']}\n sources['core/image/img_imageio.pyx'] = merge(\n base_flags, osx_flags)\n\nif c_options['use_avfoundation']:\n import platform as _platform\n mac_ver = [int(x) for x in _platform.mac_ver()[0].split('.')[:2]]\n if mac_ver >= [10, 7]:\n osx_flags = {\n 'extra_link_args': ['-framework', 'AVFoundation'],\n 'extra_compile_args': ['-ObjC++'],\n 'depends': ['core/camera/camera_avfoundation_implem.m']}\n sources['core/camera/camera_avfoundation.pyx'] = merge(\n base_flags, osx_flags)\n else:\n print('AVFoundation cannot be used, OSX >= 10.7 is required')\n\nif c_options['use_rpi']:\n sources['lib/vidcore_lite/egl.pyx'] = merge(\n base_flags, gl_flags)\n sources['lib/vidcore_lite/bcm.pyx'] = merge(\n base_flags, gl_flags)\n\nif c_options['use_x11']:\n sources['core/window/window_x11.pyx'] = merge(\n base_flags, gl_flags, {\n # FIXME add an option to depend on them but not compile them\n # cause keytab is included in core, and core is included in\n # window_x11\n #\n #'depends': [\n # 'core/window/window_x11_keytab.c',\n # 'core/window/window_x11_core.c'],\n 'libraries': ['Xrender', 'X11']})\n\nif c_options['use_gstreamer']:\n sources['lib/gstplayer/_gstplayer.pyx'] = merge(\n base_flags, gst_flags, {\n 'depends': ['lib/gstplayer/_gstplayer.h']})\n\n\n# -----------------------------------------------------------------------------\n# extension modules\n\ndef get_dependencies(name, deps=None):\n if deps is None:\n deps = []\n for dep in graphics_dependencies.get(name, []):\n if dep not in deps:\n deps.append(dep)\n get_dependencies(dep, deps)\n return deps\n\n\ndef resolve_dependencies(fn, depends):\n fn = basename(fn)\n deps = []\n get_dependencies(fn, deps)\n get_dependencies(fn.replace('.pyx', '.pxd'), deps)\n return [expand('graphics', x) for x in deps]\n\n\ndef get_extensions_from_sources(sources):\n ext_modules = []\n if environ.get('KIVY_FAKE_BUILDEXT'):\n print('Fake build_ext asked, will generate only .h/.c')\n return ext_modules\n for pyx, flags in sources.items():\n is_graphics = pyx.startswith('graphics')\n pyx = expand(pyx)\n depends = [expand(x) for x in flags.pop('depends', [])]\n c_depends = [expand(x) for x in flags.pop('c_depends', [])]\n if not have_cython:\n pyx = '%s.c' % pyx[:-4]\n if is_graphics:\n depends = resolve_dependencies(pyx, depends)\n f_depends = [x for x in depends if x.rsplit('.', 1)[-1] in (\n 'c', 'cpp', 'm')]\n module_name = get_modulename_from_file(pyx)\n flags_clean = {'depends': depends}\n for key, value in flags.items():\n if len(value):\n flags_clean[key] = value\n ext_modules.append(CythonExtension(module_name,\n [pyx] + f_depends + c_depends, **flags_clean))\n return ext_modules\n\next_modules = get_extensions_from_sources(sources)\n\n# -----------------------------------------------------------------------------\n# automatically detect data files\ndata_file_prefix = 'share/kivy-'\nexamples = {}\nexamples_allowed_ext = ('readme', 'py', 'wav', 'png', 'jpg', 'svg', 'json',\n 'avi', 'gif', 'txt', 'ttf', 'obj', 'mtl', 'kv')\nfor root, subFolders, files in walk('examples'):\n for fn in files:\n ext = fn.split('.')[-1].lower()\n if ext not in examples_allowed_ext:\n continue\n filename = join(root, fn)\n directory = '%s%s' % (data_file_prefix, dirname(filename))\n if not directory in examples:\n examples[directory] = []\n examples[directory].append(filename)\n\n# -----------------------------------------------------------------------------\n# setup !\nsetup(\n name='Kivy',\n version=kivy.__version__,\n author='Kivy Crew',\n author_email='[email protected]',\n url='http://kivy.org/',\n license='MIT',\n description=(\n 'A software library for rapid development of '\n 'hardware-accelerated multitouch applications.'),\n ext_modules=ext_modules,\n cmdclass=cmdclass,\n packages=[\n 'kivy',\n 'kivy.adapters',\n 'kivy.core',\n 'kivy.core.audio',\n 'kivy.core.camera',\n 'kivy.core.clipboard',\n 'kivy.core.image',\n 'kivy.core.gl',\n 'kivy.core.spelling',\n 'kivy.core.text',\n 'kivy.core.video',\n 'kivy.core.window',\n 'kivy.effects',\n 'kivy.ext',\n 'kivy.graphics',\n 'kivy.garden',\n 'kivy.input',\n 'kivy.input.postproc',\n 'kivy.input.providers',\n 'kivy.lib',\n 'kivy.lib.osc',\n 'kivy.lib.gstplayer',\n 'kivy.lib.vidcore_lite',\n 'kivy.modules',\n 'kivy.network',\n 'kivy.storage',\n 'kivy.tools',\n 'kivy.tools.packaging',\n 'kivy.tools.packaging.pyinstaller_hooks',\n 'kivy.tools.highlight',\n 'kivy.extras',\n 'kivy.tools.extensions',\n 'kivy.uix', ],\n package_dir={'kivy': 'kivy'},\n package_data={'kivy': [\n 'data/*.kv',\n 'data/*.json',\n 'data/fonts/*.ttf',\n 'data/images/*.png',\n 'data/images/*.jpg',\n 'data/images/*.gif',\n 'data/images/*.atlas',\n 'data/keyboards/*.json',\n 'data/logo/*.png',\n 'data/glsl/*.png',\n 'data/glsl/*.vs',\n 'data/glsl/*.fs',\n 'tools/highlight/*.vim',\n 'tools/highlight/*.el',\n 'tools/packaging/README.txt',\n 'tools/packaging/win32/kivy.bat',\n 'tools/packaging/win32/kivyenv.sh',\n 'tools/packaging/win32/README.txt',\n 'tools/packaging/osx/Info.plist',\n 'tools/packaging/osx/InfoPlist.strings',\n 'tools/packaging/osx/kivy.sh']},\n data_files=list(examples.items()),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications',\n 'Intended Audience :: Developers',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: BSD :: FreeBSD',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Artistic Software',\n 'Topic :: Games/Entertainment',\n 'Topic :: Multimedia :: Graphics :: 3D Rendering',\n 'Topic :: Multimedia :: Graphics :: Capture :: Digital Camera',\n 'Topic :: Multimedia :: Graphics :: Presentation',\n 'Topic :: Multimedia :: Graphics :: Viewers',\n 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',\n 'Topic :: Multimedia :: Video :: Display',\n 'Topic :: Scientific/Engineering :: Human Machine Interfaces',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: User Interfaces'],\n dependency_links=[\n 'https://github.com/kivy-garden/garden/archive/master.zip'],\n install_requires=['Kivy-Garden==0.1.1'])\n", "path": "setup.py" } ]
[ { "content": "#\n# Kivy - Crossplatform NUI toolkit\n# http://kivy.org/\n#\n\nimport sys\n\nfrom copy import deepcopy\nimport os\nfrom os.path import join, dirname, sep, exists, basename\nfrom os import walk, environ\nfrom distutils.core import setup\nfrom distutils.extension import Extension\nfrom collections import OrderedDict\n\nif sys.version > '3':\n\n PY3 = True\nelse:\n PY3 = False\n\n\ndef getoutput(cmd):\n import subprocess\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n return p.communicate()[0]\n\n\ndef pkgconfig(*packages, **kw):\n flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}\n cmd = 'pkg-config --libs --cflags {}'.format(' '.join(packages))\n for token in getoutput(cmd).split():\n ext = token[:2].decode('utf-8')\n flag = flag_map.get(ext)\n if not flag:\n continue\n kw.setdefault(flag, []).append(token[2:].decode('utf-8'))\n return kw\n\n\n# -----------------------------------------------------------------------------\n# Determine on which platform we are\n\nplatform = sys.platform\n\n# Detect 32/64bit for OSX (http://stackoverflow.com/a/1405971/798575)\nif sys.platform == 'darwin':\n if sys.maxsize > 2 ** 32:\n osx_arch = 'x86_64'\n else:\n osx_arch = 'i386'\n\n# Detect Python for android project (http://github.com/kivy/python-for-android)\nndkplatform = environ.get('NDKPLATFORM')\nif ndkplatform is not None and environ.get('LIBLINK'):\n platform = 'android'\nkivy_ios_root = environ.get('KIVYIOSROOT', None)\nif kivy_ios_root is not None:\n platform = 'ios'\nif exists('/opt/vc/include/bcm_host.h'):\n platform = 'rpi'\n\n# -----------------------------------------------------------------------------\n# Detect options\n#\nc_options = OrderedDict()\nc_options['use_rpi'] = platform == 'rpi'\nc_options['use_opengl_es2'] = None\nc_options['use_opengl_debug'] = False\nc_options['use_glew'] = False\nc_options['use_sdl'] = False\nc_options['use_sdl2'] = False\nc_options['use_ios'] = False\nc_options['use_mesagl'] = False\nc_options['use_x11'] = False\nc_options['use_gstreamer'] = False\nc_options['use_avfoundation'] = platform == 'darwin'\n\n# now check if environ is changing the default values\nfor key in list(c_options.keys()):\n ukey = key.upper()\n if ukey in environ:\n value = bool(int(environ[ukey]))\n print('Environ change {0} -> {1}'.format(key, value))\n c_options[key] = value\n\n# -----------------------------------------------------------------------------\n# Cython check\n# on python-for-android and kivy-ios, cython usage is external\nhave_cython = False\nif platform in ('ios', 'android'):\n print('\\nCython check avoided.')\nelse:\n try:\n # check for cython\n from Cython.Distutils import build_ext\n have_cython = True\n except ImportError:\n print('\\nCython is missing, its required for compiling kivy !\\n\\n')\n raise\n\nif not have_cython:\n from distutils.command.build_ext import build_ext\n\n# -----------------------------------------------------------------------------\n# Setup classes\n\n\nclass KivyBuildExt(build_ext):\n\n def build_extensions(self):\n print('Build configuration is:')\n for opt, value in c_options.items():\n print(' * {0} = {1}'.format(opt, value))\n debug = bool(self.debug)\n print(' * debug = {0}'.format(debug))\n print('Generate config.h')\n config_h_fn = expand('graphics', 'config.h')\n config_h = '// Autogenerated file for Kivy C configuration\\n'\n config_h += '#define __PY3 {0}\\n'.format(int(PY3))\n for k, v in c_options.items():\n config_h += '#define __{0} {1}\\n'.format(k.upper(), int(v))\n self.update_if_changed(config_h_fn, config_h)\n\n print('Generate config.pxi')\n config_pxi_fn = expand('graphics', 'config.pxi')\n # update the pxi only if the content changed\n config_pxi = '# Autogenerated file for Kivy Cython configuration\\n'\n config_pxi += 'DEF PY3 = {0}\\n'.format(int(PY3))\n for k, v in c_options.items():\n config_pxi += 'DEF {0} = {1}\\n'.format(k.upper(), int(v))\n config_pxi += 'DEF DEBUG = {0}\\n'.format(debug)\n self.update_if_changed(config_pxi_fn, config_pxi)\n\n c = self.compiler.compiler_type\n print('Detected compiler is {}'.format(c))\n if c != 'msvc':\n for e in self.extensions:\n e.extra_link_args += ['-lm']\n\n build_ext.build_extensions(self)\n\n def update_if_changed(self, fn, content):\n need_update = True\n if exists(fn):\n with open(fn) as fd:\n need_update = fd.read() != content\n if need_update:\n with open(fn, 'w') as fd:\n fd.write(content)\n\n\n# -----------------------------------------------------------------------------\n# extract version (simulate doc generation, kivy will be not imported)\nenviron['KIVY_DOC_INCLUDE'] = '1'\nimport kivy\n\n# extra build commands go in the cmdclass dict {'command-name': CommandClass}\n# see tools.packaging.{platform}.build.py for custom build commands for\n# portable packages. also e.g. we use build_ext command from cython if its\n# installed for c extensions.\nfrom kivy.tools.packaging.factory import FactoryBuild\ncmdclass = {\n 'build_factory': FactoryBuild,\n 'build_ext': KivyBuildExt}\n\ntry:\n # add build rules for portable packages to cmdclass\n if platform == 'win32':\n from kivy.tools.packaging.win32.build import WindowsPortableBuild\n cmdclass['build_portable'] = WindowsPortableBuild\n elif platform == 'darwin':\n from kivy.tools.packaging.osx.build import OSXPortableBuild\n cmdclass['build_portable'] = OSXPortableBuild\nexcept ImportError:\n print('User distribution detected, avoid portable command.')\n\n# Detect which opengl version headers to use\nif platform in ('android', 'darwin', 'ios', 'rpi'):\n c_options['use_opengl_es2'] = True\nelif platform == 'win32':\n print('Windows platform detected, force GLEW usage.')\n c_options['use_glew'] = True\n c_options['use_opengl_es2'] = False\nelse:\n if c_options['use_opengl_es2'] is None:\n GLES = environ.get('GRAPHICS') == 'GLES'\n OPENGL = environ.get('GRAPHICS') == 'OPENGL'\n if GLES:\n c_options['use_opengl_es2'] = True\n elif OPENGL:\n c_options['use_opengl_es2'] = False\n else:\n # auto detection of GLES headers\n default_header_dirs = ['/usr/include', '/usr/local/include']\n c_options['use_opengl_es2'] = False\n for hdir in default_header_dirs:\n filename = join(hdir, 'GLES2', 'gl2.h')\n if exists(filename):\n c_options['use_opengl_es2'] = True\n print('NOTE: Found GLES 2.0 headers at {0}'.format(\n filename))\n break\n if not c_options['use_opengl_es2']:\n print('NOTE: Not found GLES 2.0 headers at: {}'.format(\n default_header_dirs))\n print(' Please contact us if your distribution '\n 'uses an alternative path for the headers.')\n\nprint('Using this graphics system: {}'.format(\n ['OpenGL', 'OpenGL ES 2'][int(c_options['use_opengl_es2'] or False)]))\n\n# check if we are in a kivy-ios build\nif platform == 'ios':\n print('Kivy-IOS project environment detect, use it.')\n print('Kivy-IOS project located at {0}'.format(kivy_ios_root))\n print('Activate SDL compilation.')\n c_options['use_ios'] = True\n c_options['use_sdl'] = True\n\n# detect gstreamer/sdl2, only on desktop\nsdl2_flags = {}\nif platform not in ('ios', 'android'):\n gst_flags = pkgconfig('gstreamer-1.0')\n if 'libraries' in gst_flags:\n c_options['use_gstreamer'] = True\n\n # XXX deactivated, until we are able to have sdl2 without using anything\n # related to sdl1. Both must not be used at the same time.\n # sdl2_flags = pkgconfig('sdl2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer')\n # if 'libraries' in sdl2_flags:\n # c_options['use_sdl2'] = True\n\n\n# -----------------------------------------------------------------------------\n# declare flags\n\n\ndef get_modulename_from_file(filename):\n filename = filename.replace(sep, '/')\n pyx = '.'.join(filename.split('.')[:-1])\n pyxl = pyx.split('/')\n while pyxl[0] != 'kivy':\n pyxl.pop(0)\n if pyxl[1] == 'kivy':\n pyxl.pop(0)\n return '.'.join(pyxl)\n\n\ndef expand(*args):\n return join(dirname(__file__), 'kivy', *args)\n\n\nclass CythonExtension(Extension):\n\n def __init__(self, *args, **kwargs):\n Extension.__init__(self, *args, **kwargs)\n self.cython_directives = {\n 'c_string_encoding': 'utf-8',\n 'profile': 'USE_PROFILE' in environ,\n 'embedsignature': 'USE_EMBEDSIGNATURE' in environ}\n # XXX with pip, setuptools is imported before distutils, and change\n # our pyx to c, then, cythonize doesn't happen. So force again our\n # sources\n self.sources = args[1]\n\n\ndef merge(d1, *args):\n d1 = deepcopy(d1)\n for d2 in args:\n for key, value in d2.items():\n value = deepcopy(value)\n if key in d1:\n d1[key].extend(value)\n else:\n d1[key] = value\n return d1\n\n\ndef determine_base_flags():\n flags = {\n 'libraries': [],\n 'include_dirs': [],\n 'extra_link_args': [],\n 'extra_compile_args': []}\n if c_options['use_ios']:\n sysroot = environ.get('IOSSDKROOT', environ.get('SDKROOT'))\n if not sysroot:\n raise Exception('IOSSDKROOT is not set')\n flags['include_dirs'] += [sysroot]\n flags['extra_compile_args'] += ['-isysroot', sysroot]\n flags['extra_link_args'] += ['-isysroot', sysroot]\n elif platform == 'darwin':\n v = os.uname()\n if v[2] >= '13.0.0':\n # use xcode-select to search on the right Xcode path\n # XXX use the best SDK available instead of a specific one\n import platform as _platform\n xcode_dev = getoutput('xcode-select -p').splitlines()[0]\n sdk_mac_ver = '.'.join(_platform.mac_ver()[0].split('.')[:2])\n print('Xcode detected at {}, and using MacOSX{} sdk'.format(\n xcode_dev, sdk_mac_ver))\n sysroot = join(xcode_dev.decode('utf-8'),\n 'Platforms/MacOSX.platform/Developer/SDKs',\n 'MacOSX{}.sdk'.format(sdk_mac_ver),\n 'System/Library/Frameworks')\n else:\n sysroot = ('/System/Library/Frameworks/'\n 'ApplicationServices.framework/Frameworks')\n flags['extra_compile_args'] += ['-F%s' % sysroot]\n flags['extra_link_args'] += ['-F%s' % sysroot]\n return flags\n\n\ndef determine_gl_flags():\n flags = {'libraries': []}\n if platform == 'win32':\n flags['libraries'] = ['opengl32']\n elif platform == 'ios':\n flags['libraries'] = ['GLESv2']\n flags['extra_link_args'] = ['-framework', 'OpenGLES']\n elif platform == 'darwin':\n flags['extra_link_args'] = ['-framework', 'OpenGL', '-arch', osx_arch]\n flags['extra_compile_args'] = ['-arch', osx_arch]\n elif platform.startswith('freebsd'):\n flags['include_dirs'] = ['/usr/local/include']\n flags['extra_link_args'] = ['-L', '/usr/local/lib']\n flags['libraries'] = ['GL']\n elif platform.startswith('openbsd'):\n flags['include_dirs'] = ['/usr/X11R6/include']\n flags['extra_link_args'] = ['-L', '/usr/X11R6/lib']\n flags['libraries'] = ['GL']\n elif platform == 'android':\n flags['include_dirs'] = [join(ndkplatform, 'usr', 'include')]\n flags['extra_link_args'] = ['-L', join(ndkplatform, 'usr', 'lib')]\n flags['libraries'] = ['GLESv2']\n elif platform == 'rpi':\n flags['include_dirs'] = ['/opt/vc/include',\n '/opt/vc/include/interface/vcos/pthreads',\n '/opt/vc/include/interface/vmcs_host/linux']\n flags['library_dirs'] = ['/opt/vc/lib']\n flags['libraries'] = ['bcm_host', 'EGL', 'GLESv2']\n else:\n flags['libraries'] = ['GL']\n if c_options['use_glew']:\n if platform == 'win32':\n flags['libraries'] += ['glew32']\n else:\n flags['libraries'] += ['GLEW']\n return flags\n\n\ndef determine_sdl():\n flags = {}\n if not c_options['use_sdl']:\n return flags\n\n flags['libraries'] = ['SDL', 'SDL_ttf', 'freetype', 'z', 'bz2']\n flags['include_dirs'] = []\n flags['extra_link_args'] = []\n flags['extra_compile_args'] = []\n\n # Paths as per homebrew (modified formula to use hg checkout)\n if c_options['use_ios']:\n # Note: on IOS, SDL is already loaded by the launcher/main.m\n # So if we add it here, it will just complain about duplicate\n # symbol, cause libSDL.a would be included in main.m binary +\n # text_sdlttf.so\n # At the result, we are linking without SDL explicitly, and add\n # -undefined dynamic_lookup\n # (/tito)\n flags['libraries'] = ['SDL_ttf', 'freetype', 'bz2']\n flags['include_dirs'] += [\n join(kivy_ios_root, 'build', 'include'),\n join(kivy_ios_root, 'build', 'include', 'SDL'),\n join(kivy_ios_root, 'build', 'include', 'freetype')]\n flags['extra_link_args'] += [\n '-L', join(kivy_ios_root, 'build', 'lib'),\n '-undefined', 'dynamic_lookup']\n else:\n flags['include_dirs'] = ['/usr/local/include/SDL']\n flags['extra_link_args'] += ['-L/usr/local/lib/']\n\n if platform == 'ios':\n flags['extra_link_args'] += [\n '-framework', 'Foundation',\n '-framework', 'UIKit',\n '-framework', 'AudioToolbox',\n '-framework', 'CoreGraphics',\n '-framework', 'QuartzCore',\n '-framework', 'MobileCoreServices',\n '-framework', 'ImageIO']\n elif platform == 'darwin':\n flags['extra_link_args'] += [\n '-framework', 'ApplicationServices']\n return flags\n\n\ndef determine_sdl2():\n flags = {}\n if not c_options['use_sdl2']:\n return flags\n\n sdl2_path = environ.get('KIVY_SDL2_PATH', None)\n\n if sdl2_flags and not sdl2_path:\n return sdl2_flags\n\n # no pkgconfig info, or we want to use a specific sdl2 path, so perform\n # manual configuration\n flags['libraries'] = ['SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer']\n flags['include_dirs'] = ([sdl2_path] if sdl2_path else\n ['/usr/local/include/SDL2', '/usr/include/SDL2'])\n\n flags['extra_link_args'] = []\n flags['extra_compile_args'] = []\n flags['extra_link_args'] += (['-L' + sdl2_path] if sdl2_path else\n ['-L/usr/local/lib/'])\n\n # ensure headers for all the SDL2 and sub libraries are available\n libs_to_check = ['SDL', 'SDL_mixer', 'SDL_ttf', 'SDL_image']\n can_compile = True\n for lib in libs_to_check:\n found = False\n for d in flags['include_dirs']:\n fn = join(d, '{}.h'.format(lib))\n if exists(fn):\n found = True\n print('SDL2: found {} header at {}'.format(lib, fn))\n break\n\n if not found:\n print('SDL2: missing sub library {}'.format(lib))\n can_compile = False\n\n if not can_compile:\n c_options['use_sdl2'] = False\n return {}\n\n return flags\n\n\nbase_flags = determine_base_flags()\ngl_flags = determine_gl_flags()\n\n# -----------------------------------------------------------------------------\n# sources to compile\n# all the dependencies have been found manually with:\n# grep -inr -E '(cimport|include)' kivy/graphics/context_instructions.{pxd,pyx}\ngraphics_dependencies = {\n 'gl_redirect.h': ['common_subset.h'],\n 'c_opengl.pxd': ['config.pxi', 'gl_redirect.h'],\n 'buffer.pyx': ['common.pxi'],\n 'context.pxd': [\n 'instructions.pxd', 'texture.pxd', 'vbo.pxd',\n 'c_opengl.pxd', 'c_opengl_debug.pxd'],\n 'c_opengl_debug.pyx': ['common.pxi', 'c_opengl.pxd'],\n 'compiler.pxd': ['instructions.pxd'],\n 'compiler.pyx': ['context_instructions.pxd'],\n 'context_instructions.pxd': [\n 'transformation.pxd', 'instructions.pxd', 'texture.pxd'],\n 'fbo.pxd': ['c_opengl.pxd', 'instructions.pxd', 'texture.pxd'],\n 'fbo.pyx': [\n 'config.pxi', 'opcodes.pxi', 'transformation.pxd', 'context.pxd',\n 'c_opengl_debug.pxd'],\n 'gl_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd',\n 'instructions.pxd'],\n 'instructions.pxd': [\n 'vbo.pxd', 'context_instructions.pxd', 'compiler.pxd', 'shader.pxd',\n 'texture.pxd', '../_event.pxd'],\n 'instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd',\n 'context.pxd', 'common.pxi', 'vertex.pxd', 'transformation.pxd'],\n 'opengl.pyx': ['config.pxi', 'common.pxi', 'c_opengl.pxd', 'gl_redirect.h'],\n 'opengl_utils.pyx': ['opengl_utils_def.pxi', 'c_opengl.pxd'],\n 'shader.pxd': ['c_opengl.pxd', 'transformation.pxd', 'vertex.pxd'],\n 'shader.pyx': [\n 'config.pxi', 'common.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd',\n 'vertex.pxd', 'transformation.pxd', 'context.pxd'],\n 'stencil_instructions.pxd': ['instructions.pxd'],\n 'stencil_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd'],\n 'svg.pyx': ['config.pxi', 'common.pxi', 'texture.pxd', 'instructions.pxd',\n 'vertex_instructions.pxd', 'tesselator.pxd'],\n 'texture.pxd': ['c_opengl.pxd'],\n 'texture.pyx': [\n 'config.pxi', 'common.pxi', 'opengl_utils_def.pxi', 'context.pxd',\n 'c_opengl.pxd', 'c_opengl_debug.pxd', 'opengl_utils.pxd',\n 'img_tools.pxi'],\n 'vbo.pxd': ['buffer.pxd', 'c_opengl.pxd', 'vertex.pxd'],\n 'vbo.pyx': [\n 'config.pxi', 'common.pxi', 'c_opengl_debug.pxd', 'context.pxd',\n 'instructions.pxd', 'shader.pxd'],\n 'vertex.pxd': ['c_opengl.pxd'],\n 'vertex.pyx': ['config.pxi', 'common.pxi'],\n 'vertex_instructions.pyx': [\n 'config.pxi', 'common.pxi', 'vbo.pxd', 'vertex.pxd', 'instructions.pxd',\n 'vertex_instructions.pxd',\n 'c_opengl.pxd', 'c_opengl_debug.pxd', 'texture.pxd',\n 'vertex_instructions_line.pxi'],\n 'vertex_instructions_line.pxi': ['stencil_instructions.pxd']}\n\nsources = {\n '_event.pyx': merge(base_flags, {'depends': ['properties.pxd']}),\n 'properties.pyx': merge(base_flags, {'depends': ['_event.pxd']}),\n 'graphics/buffer.pyx': base_flags,\n 'graphics/context.pyx': merge(base_flags, gl_flags),\n 'graphics/c_opengl_debug.pyx': merge(base_flags, gl_flags),\n 'graphics/compiler.pyx': merge(base_flags, gl_flags),\n 'graphics/context_instructions.pyx': merge(base_flags, gl_flags),\n 'graphics/fbo.pyx': merge(base_flags, gl_flags),\n 'graphics/gl_instructions.pyx': merge(base_flags, gl_flags),\n 'graphics/instructions.pyx': merge(base_flags, gl_flags),\n 'graphics/opengl.pyx': merge(base_flags, gl_flags),\n 'graphics/opengl_utils.pyx': merge(base_flags, gl_flags),\n 'graphics/shader.pyx': merge(base_flags, gl_flags),\n 'graphics/stencil_instructions.pyx': merge(base_flags, gl_flags),\n 'graphics/texture.pyx': merge(base_flags, gl_flags),\n 'graphics/transformation.pyx': merge(base_flags, gl_flags),\n 'graphics/vbo.pyx': merge(base_flags, gl_flags),\n 'graphics/vertex.pyx': merge(base_flags, gl_flags),\n 'graphics/vertex_instructions.pyx': merge(base_flags, gl_flags),\n 'core/text/text_layout.pyx': base_flags,\n 'graphics/tesselator.pyx': merge(base_flags, {\n 'include_dirs': ['kivy/lib/libtess2/Include'],\n 'c_depends': [\n 'lib/libtess2/Source/bucketalloc.c',\n 'lib/libtess2/Source/dict.c',\n 'lib/libtess2/Source/geom.c',\n 'lib/libtess2/Source/mesh.c',\n 'lib/libtess2/Source/priorityq.c',\n 'lib/libtess2/Source/sweep.c',\n 'lib/libtess2/Source/tess.c'\n ]\n }),\n 'graphics/svg.pyx': merge(base_flags, gl_flags)\n}\n\nif c_options['use_sdl']:\n sdl_flags = determine_sdl()\n sources['core/window/sdl.pyx'] = merge(\n base_flags, gl_flags, sdl_flags)\n sources['core/text/text_sdlttf.pyx'] = merge(\n base_flags, gl_flags, sdl_flags)\n sources['core/audio/audio_sdl.pyx'] = merge(\n base_flags, sdl_flags)\n\nif c_options['use_sdl2']:\n sdl2_flags = determine_sdl2()\n if sdl2_flags:\n sources['core/window/_window_sdl2.pyx'] = merge(\n base_flags, gl_flags, sdl2_flags)\n sources['core/image/_img_sdl2.pyx'] = merge(\n base_flags, gl_flags, sdl2_flags)\n sources['core/text/_text_sdl2.pyx'] = merge(\n base_flags, gl_flags, sdl2_flags)\n sources['core/clipboard/_clipboard_sdl2.pyx'] = merge(\n base_flags, gl_flags, sdl2_flags)\n\nif platform in ('darwin', 'ios'):\n # activate ImageIO provider for our core image\n if platform == 'ios':\n osx_flags = {'extra_link_args': [\n '-framework', 'Foundation',\n '-framework', 'UIKit',\n '-framework', 'AudioToolbox',\n '-framework', 'CoreGraphics',\n '-framework', 'QuartzCore',\n '-framework', 'ImageIO',\n '-framework', 'Accelerate']}\n else:\n osx_flags = {'extra_link_args': [\n '-framework', 'ApplicationServices']}\n sources['core/image/img_imageio.pyx'] = merge(\n base_flags, osx_flags)\n\nif c_options['use_avfoundation']:\n import platform as _platform\n mac_ver = [int(x) for x in _platform.mac_ver()[0].split('.')[:2]]\n if mac_ver >= [10, 7]:\n osx_flags = {\n 'extra_link_args': ['-framework', 'AVFoundation'],\n 'extra_compile_args': ['-ObjC++'],\n 'depends': ['core/camera/camera_avfoundation_implem.m']}\n sources['core/camera/camera_avfoundation.pyx'] = merge(\n base_flags, osx_flags)\n else:\n print('AVFoundation cannot be used, OSX >= 10.7 is required')\n\nif c_options['use_rpi']:\n sources['lib/vidcore_lite/egl.pyx'] = merge(\n base_flags, gl_flags)\n sources['lib/vidcore_lite/bcm.pyx'] = merge(\n base_flags, gl_flags)\n\nif c_options['use_x11']:\n sources['core/window/window_x11.pyx'] = merge(\n base_flags, gl_flags, {\n # FIXME add an option to depend on them but not compile them\n # cause keytab is included in core, and core is included in\n # window_x11\n #\n #'depends': [\n # 'core/window/window_x11_keytab.c',\n # 'core/window/window_x11_core.c'],\n 'libraries': ['Xrender', 'X11']})\n\nif c_options['use_gstreamer']:\n sources['lib/gstplayer/_gstplayer.pyx'] = merge(\n base_flags, gst_flags, {\n 'depends': ['lib/gstplayer/_gstplayer.h']})\n\n\n# -----------------------------------------------------------------------------\n# extension modules\n\ndef get_dependencies(name, deps=None):\n if deps is None:\n deps = []\n for dep in graphics_dependencies.get(name, []):\n if dep not in deps:\n deps.append(dep)\n get_dependencies(dep, deps)\n return deps\n\n\ndef resolve_dependencies(fn, depends):\n fn = basename(fn)\n deps = []\n get_dependencies(fn, deps)\n get_dependencies(fn.replace('.pyx', '.pxd'), deps)\n return [expand('graphics', x) for x in deps]\n\n\ndef get_extensions_from_sources(sources):\n ext_modules = []\n if environ.get('KIVY_FAKE_BUILDEXT'):\n print('Fake build_ext asked, will generate only .h/.c')\n return ext_modules\n for pyx, flags in sources.items():\n is_graphics = pyx.startswith('graphics')\n pyx = expand(pyx)\n depends = [expand(x) for x in flags.pop('depends', [])]\n c_depends = [expand(x) for x in flags.pop('c_depends', [])]\n if not have_cython:\n pyx = '%s.c' % pyx[:-4]\n if is_graphics:\n depends = resolve_dependencies(pyx, depends)\n f_depends = [x for x in depends if x.rsplit('.', 1)[-1] in (\n 'c', 'cpp', 'm')]\n module_name = get_modulename_from_file(pyx)\n flags_clean = {'depends': depends}\n for key, value in flags.items():\n if len(value):\n flags_clean[key] = value\n ext_modules.append(CythonExtension(module_name,\n [pyx] + f_depends + c_depends, **flags_clean))\n return ext_modules\n\next_modules = get_extensions_from_sources(sources)\n\n# -----------------------------------------------------------------------------\n# automatically detect data files\ndata_file_prefix = 'share/kivy-'\nexamples = {}\nexamples_allowed_ext = ('readme', 'py', 'wav', 'png', 'jpg', 'svg', 'json',\n 'avi', 'gif', 'txt', 'ttf', 'obj', 'mtl', 'kv')\nfor root, subFolders, files in walk('examples'):\n for fn in files:\n ext = fn.split('.')[-1].lower()\n if ext not in examples_allowed_ext:\n continue\n filename = join(root, fn)\n directory = '%s%s' % (data_file_prefix, dirname(filename))\n if not directory in examples:\n examples[directory] = []\n examples[directory].append(filename)\n\n# -----------------------------------------------------------------------------\n# setup !\nsetup(\n name='Kivy',\n version=kivy.__version__,\n author='Kivy Crew',\n author_email='[email protected]',\n url='http://kivy.org/',\n license='MIT',\n description=(\n 'A software library for rapid development of '\n 'hardware-accelerated multitouch applications.'),\n ext_modules=ext_modules,\n cmdclass=cmdclass,\n packages=[\n 'kivy',\n 'kivy.adapters',\n 'kivy.core',\n 'kivy.core.audio',\n 'kivy.core.camera',\n 'kivy.core.clipboard',\n 'kivy.core.image',\n 'kivy.core.gl',\n 'kivy.core.spelling',\n 'kivy.core.text',\n 'kivy.core.video',\n 'kivy.core.window',\n 'kivy.effects',\n 'kivy.ext',\n 'kivy.graphics',\n 'kivy.garden',\n 'kivy.input',\n 'kivy.input.postproc',\n 'kivy.input.providers',\n 'kivy.lib',\n 'kivy.lib.osc',\n 'kivy.lib.gstplayer',\n 'kivy.lib.vidcore_lite',\n 'kivy.modules',\n 'kivy.network',\n 'kivy.storage',\n 'kivy.tools',\n 'kivy.tools.packaging',\n 'kivy.tools.packaging.pyinstaller_hooks',\n 'kivy.tools.highlight',\n 'kivy.extras',\n 'kivy.tools.extensions',\n 'kivy.uix', ],\n package_dir={'kivy': 'kivy'},\n package_data={'kivy': [\n '*.pxd',\n 'core/text/*.pxd',\n 'graphics/*.pxd',\n 'lib/vidcore_lite/*.pxd',\n 'data/*.kv',\n 'data/*.json',\n 'data/fonts/*.ttf',\n 'data/images/*.png',\n 'data/images/*.jpg',\n 'data/images/*.gif',\n 'data/images/*.atlas',\n 'data/keyboards/*.json',\n 'data/logo/*.png',\n 'data/glsl/*.png',\n 'data/glsl/*.vs',\n 'data/glsl/*.fs',\n 'tools/highlight/*.vim',\n 'tools/highlight/*.el',\n 'tools/packaging/README.txt',\n 'tools/packaging/win32/kivy.bat',\n 'tools/packaging/win32/kivyenv.sh',\n 'tools/packaging/win32/README.txt',\n 'tools/packaging/osx/Info.plist',\n 'tools/packaging/osx/InfoPlist.strings',\n 'tools/packaging/osx/kivy.sh']},\n data_files=list(examples.items()),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications',\n 'Intended Audience :: Developers',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: BSD :: FreeBSD',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Artistic Software',\n 'Topic :: Games/Entertainment',\n 'Topic :: Multimedia :: Graphics :: 3D Rendering',\n 'Topic :: Multimedia :: Graphics :: Capture :: Digital Camera',\n 'Topic :: Multimedia :: Graphics :: Presentation',\n 'Topic :: Multimedia :: Graphics :: Viewers',\n 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',\n 'Topic :: Multimedia :: Video :: Display',\n 'Topic :: Scientific/Engineering :: Human Machine Interfaces',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: User Interfaces'],\n dependency_links=[\n 'https://github.com/kivy-garden/garden/archive/master.zip'],\n install_requires=['Kivy-Garden==0.1.1'])\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 2b7378b4de..f01c59ad5e 100644 --- a/setup.py +++ b/setup.py @@ -727,6 +727,10 @@ def get_extensions_from_sources(sources): 'kivy.uix', ], package_dir={'kivy': 'kivy'}, package_data={'kivy': [ + '*.pxd', + 'core/text/*.pxd', + 'graphics/*.pxd', + 'lib/vidcore_lite/*.pxd', 'data/*.kv', 'data/*.json', 'data/fonts/*.ttf',
google__TensorNetwork-608
Some doc links to github yield error The "Edit on GitHub" links in the top right of some doc pages yield 404 errors when followed, for example [tn.Node](https://tensornetwork.readthedocs.io/en/latest/stubs/tensornetwork.Node.html) and [tn.contractors.optimal](https://tensornetwork.readthedocs.io/en/latest/stubs/tensornetwork.contractors.optimal.html#tensornetwork.contractors.optimal). The links at the top of list pages work, for example [common functions](https://tensornetwork.readthedocs.io/en/latest/network.html) and [contractors](https://tensornetwork.readthedocs.io/en/latest/contractors.html). Possibly those are meant to be links to edit the doc pages themselves, rather than the source code?
[ { "content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('../'))\n\n# -- Project information -----------------------------------------------------\n\nproject = 'TensorNetwork'\ncopyright = '2019, The TensorNetwork Authors'\nauthor = 'The TensorNetwork Authors'\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.autosummary',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\nautosummary_generate = True\nnapolean_use_rtype = False\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_options = {\n 'logo_only': True,\n}\nhtml_logo = '_static/tensornetwork_logo.jpg'\nmaster_doc = 'index'\ndefault_role = 'py:obj'\nautodoc_default_flags = ['members']\nautosummary_generate = True\n", "path": "docs/conf.py" } ]
[ { "content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('../'))\n\n# -- Project information -----------------------------------------------------\n\nproject = 'TensorNetwork'\ncopyright = '2019, The TensorNetwork Authors'\nauthor = 'The TensorNetwork Authors'\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.autosummary',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\nautosummary_generate = True\nautosummary_generate_overwrite = False\nnapolean_use_rtype = False\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_options = {\n 'logo_only': True,\n}\nhtml_logo = '_static/tensornetwork_logo.jpg'\nmaster_doc = 'index'\ndefault_role = 'py:obj'\nautodoc_default_flags = ['members']\nautosummary_generate = True\n", "path": "docs/conf.py" } ]
diff --git a/.gitignore b/.gitignore index 87f2d2db9..f9b513626 100644 --- a/.gitignore +++ b/.gitignore @@ -22,7 +22,6 @@ var/ .installed.cfg *.egg docs/clean -docs/stubs # Installer logs pip-log.txt diff --git a/docs/conf.py b/docs/conf.py index 9fa35a0bf..b3b093c60 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -43,6 +43,7 @@ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] autosummary_generate = True +autosummary_generate_overwrite = False napolean_use_rtype = False # -- Options for HTML output ------------------------------------------------- diff --git a/docs/stubs/tensornetwork.BaseCharge.rst b/docs/stubs/tensornetwork.BaseCharge.rst new file mode 100644 index 000000000..bb1be043b --- /dev/null +++ b/docs/stubs/tensornetwork.BaseCharge.rst @@ -0,0 +1,46 @@ +tensornetwork.BaseCharge +======================== + +.. currentmodule:: tensornetwork + +.. autoclass:: BaseCharge + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~BaseCharge.__init__ + ~BaseCharge.copy + ~BaseCharge.dual + ~BaseCharge.dual_charges + ~BaseCharge.fuse + ~BaseCharge.identity_charge + ~BaseCharge.intersect + ~BaseCharge.isin + ~BaseCharge.random + ~BaseCharge.reduce + ~BaseCharge.sort_unique_charges + ~BaseCharge.unique + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BaseCharge.charges + ~BaseCharge.degeneracies + ~BaseCharge.dim + ~BaseCharge.dtype + ~BaseCharge.identity_charges + ~BaseCharge.label_dtype + ~BaseCharge.num_symmetries + ~BaseCharge.num_unique + + \ No newline at end of file diff --git a/docs/stubs/tensornetwork.BaseNode.rst b/docs/stubs/tensornetwork.BaseNode.rst new file mode 100644 index 000000000..2ec91a19a --- /dev/null +++ b/docs/stubs/tensornetwork.BaseNode.rst @@ -0,0 +1,54 @@ +tensornetwork.BaseNode +====================== + +.. currentmodule:: tensornetwork + +.. autoclass:: BaseNode + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~BaseNode.__init__ + ~BaseNode.add_axis_names + ~BaseNode.add_edge + ~BaseNode.copy + ~BaseNode.disable + ~BaseNode.fresh_edges + ~BaseNode.get_all_dangling + ~BaseNode.get_all_edges + ~BaseNode.get_all_nondangling + ~BaseNode.get_axis_number + ~BaseNode.get_dimension + ~BaseNode.get_edge + ~BaseNode.get_rank + ~BaseNode.get_tensor + ~BaseNode.has_dangling_edge + ~BaseNode.has_nondangling_edge + ~BaseNode.reorder_axes + ~BaseNode.reorder_edges + ~BaseNode.set_name + ~BaseNode.set_tensor + ~BaseNode.tensor_from_edge_order + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BaseNode.axis_names + ~BaseNode.dtype + ~BaseNode.edges + ~BaseNode.name + ~BaseNode.shape + ~BaseNode.sparse_shape + ~BaseNode.tensor + + \ No newline at end of file diff --git a/docs/stubs/tensornetwork.BlockSparseTensor.rst b/docs/stubs/tensornetwork.BlockSparseTensor.rst new file mode 100644 index 000000000..f47814553 --- /dev/null +++ b/docs/stubs/tensornetwork.BlockSparseTensor.rst @@ -0,0 +1,48 @@ +tensornetwork.BlockSparseTensor +=============================== + +.. currentmodule:: tensornetwork + +.. autoclass:: BlockSparseTensor + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~BlockSparseTensor.__init__ + ~BlockSparseTensor.conj + ~BlockSparseTensor.copy + ~BlockSparseTensor.fromdense + ~BlockSparseTensor.ones + ~BlockSparseTensor.randn + ~BlockSparseTensor.random + ~BlockSparseTensor.reshape + ~BlockSparseTensor.todense + ~BlockSparseTensor.transpose + ~BlockSparseTensor.transpose_data + ~BlockSparseTensor.zeros + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BlockSparseTensor.T + ~BlockSparseTensor.charges + ~BlockSparseTensor.dtype + ~BlockSparseTensor.flat_charges + ~BlockSparseTensor.flat_flows + ~BlockSparseTensor.flat_order + ~BlockSparseTensor.flows + ~BlockSparseTensor.ndim + ~BlockSparseTensor.shape + ~BlockSparseTensor.sparse_shape + + \ No newline at end of file diff --git a/docs/stubs/tensornetwork.ChargeArray.rst b/docs/stubs/tensornetwork.ChargeArray.rst new file mode 100644 index 000000000..4dad95d54 --- /dev/null +++ b/docs/stubs/tensornetwork.ChargeArray.rst @@ -0,0 +1,43 @@ +tensornetwork.ChargeArray +========================= + +.. currentmodule:: tensornetwork + +.. autoclass:: ChargeArray + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~ChargeArray.__init__ + ~ChargeArray.conj + ~ChargeArray.random + ~ChargeArray.reshape + ~ChargeArray.todense + ~ChargeArray.transpose + ~ChargeArray.transpose_data + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ChargeArray.T + ~ChargeArray.charges + ~ChargeArray.dtype + ~ChargeArray.flat_charges + ~ChargeArray.flat_flows + ~ChargeArray.flat_order + ~ChargeArray.flows + ~ChargeArray.ndim + ~ChargeArray.shape + ~ChargeArray.sparse_shape + + \ No newline at end of file diff --git a/docs/stubs/tensornetwork.CopyNode.rst b/docs/stubs/tensornetwork.CopyNode.rst new file mode 100644 index 000000000..445c64ed0 --- /dev/null +++ b/docs/stubs/tensornetwork.CopyNode.rst @@ -0,0 +1,57 @@ +tensornetwork.CopyNode +====================== + +.. currentmodule:: tensornetwork + +.. autoclass:: CopyNode + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~CopyNode.__init__ + ~CopyNode.add_axis_names + ~CopyNode.add_edge + ~CopyNode.compute_contracted_tensor + ~CopyNode.copy + ~CopyNode.disable + ~CopyNode.fresh_edges + ~CopyNode.get_all_dangling + ~CopyNode.get_all_edges + ~CopyNode.get_all_nondangling + ~CopyNode.get_axis_number + ~CopyNode.get_dimension + ~CopyNode.get_edge + ~CopyNode.get_partners + ~CopyNode.get_rank + ~CopyNode.get_tensor + ~CopyNode.has_dangling_edge + ~CopyNode.has_nondangling_edge + ~CopyNode.make_copy_tensor + ~CopyNode.reorder_axes + ~CopyNode.reorder_edges + ~CopyNode.set_name + ~CopyNode.set_tensor + ~CopyNode.tensor_from_edge_order + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~CopyNode.axis_names + ~CopyNode.dtype + ~CopyNode.edges + ~CopyNode.name + ~CopyNode.shape + ~CopyNode.sparse_shape + ~CopyNode.tensor + + \ No newline at end of file diff --git a/docs/stubs/tensornetwork.Edge.rst b/docs/stubs/tensornetwork.Edge.rst new file mode 100644 index 000000000..414559266 --- /dev/null +++ b/docs/stubs/tensornetwork.Edge.rst @@ -0,0 +1,41 @@ +tensornetwork.Edge +================== + +.. currentmodule:: tensornetwork + +.. autoclass:: Edge + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~Edge.__init__ + ~Edge.disable + ~Edge.disconnect + ~Edge.get_nodes + ~Edge.is_being_used + ~Edge.is_dangling + ~Edge.is_trace + ~Edge.set_name + ~Edge.update_axis + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Edge.axis1 + ~Edge.axis2 + ~Edge.dimension + ~Edge.name + ~Edge.node1 + ~Edge.node2 + + \ No newline at end of file diff --git a/docs/stubs/tensornetwork.FiniteMPS.rst b/docs/stubs/tensornetwork.FiniteMPS.rst new file mode 100644 index 000000000..98b173d46 --- /dev/null +++ b/docs/stubs/tensornetwork.FiniteMPS.rst @@ -0,0 +1,44 @@ +tensornetwork.FiniteMPS +======================= + +.. currentmodule:: tensornetwork + +.. autoclass:: FiniteMPS + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~FiniteMPS.__init__ + ~FiniteMPS.apply_one_site_gate + ~FiniteMPS.apply_transfer_operator + ~FiniteMPS.apply_two_site_gate + ~FiniteMPS.canonicalize + ~FiniteMPS.check_canonical + ~FiniteMPS.check_orthonormality + ~FiniteMPS.get_tensor + ~FiniteMPS.left_envs + ~FiniteMPS.measure_local_operator + ~FiniteMPS.measure_two_body_correlator + ~FiniteMPS.position + ~FiniteMPS.random + ~FiniteMPS.right_envs + ~FiniteMPS.save + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~FiniteMPS.bond_dimensions + ~FiniteMPS.dtype + ~FiniteMPS.physical_dimensions + + \ No newline at end of file diff --git a/docs/stubs/tensornetwork.Index.rst b/docs/stubs/tensornetwork.Index.rst new file mode 100644 index 000000000..e8abc8bfd --- /dev/null +++ b/docs/stubs/tensornetwork.Index.rst @@ -0,0 +1,33 @@ +tensornetwork.Index +=================== + +.. currentmodule:: tensornetwork + +.. autoclass:: Index + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~Index.__init__ + ~Index.copy + ~Index.flip_flow + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Index.charges + ~Index.dim + ~Index.flat_charges + ~Index.flat_flows + + \ No newline at end of file diff --git a/docs/stubs/tensornetwork.InfiniteMPS.rst b/docs/stubs/tensornetwork.InfiniteMPS.rst new file mode 100644 index 000000000..98b0b8f58 --- /dev/null +++ b/docs/stubs/tensornetwork.InfiniteMPS.rst @@ -0,0 +1,46 @@ +tensornetwork.InfiniteMPS +========================= + +.. currentmodule:: tensornetwork + +.. autoclass:: InfiniteMPS + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~InfiniteMPS.__init__ + ~InfiniteMPS.apply_one_site_gate + ~InfiniteMPS.apply_transfer_operator + ~InfiniteMPS.apply_two_site_gate + ~InfiniteMPS.canonicalize + ~InfiniteMPS.check_canonical + ~InfiniteMPS.check_orthonormality + ~InfiniteMPS.get_tensor + ~InfiniteMPS.left_envs + ~InfiniteMPS.measure_local_operator + ~InfiniteMPS.measure_two_body_correlator + ~InfiniteMPS.position + ~InfiniteMPS.random + ~InfiniteMPS.right_envs + ~InfiniteMPS.save + ~InfiniteMPS.transfer_matrix_eigs + ~InfiniteMPS.unit_cell_transfer_operator + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~InfiniteMPS.bond_dimensions + ~InfiniteMPS.dtype + ~InfiniteMPS.physical_dimensions + + \ No newline at end of file diff --git a/docs/stubs/tensornetwork.Node.rst b/docs/stubs/tensornetwork.Node.rst new file mode 100644 index 000000000..c23e6368b --- /dev/null +++ b/docs/stubs/tensornetwork.Node.rst @@ -0,0 +1,55 @@ +tensornetwork.Node +================== + +.. currentmodule:: tensornetwork + +.. autoclass:: Node + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~Node.__init__ + ~Node.add_axis_names + ~Node.add_edge + ~Node.copy + ~Node.disable + ~Node.fresh_edges + ~Node.get_all_dangling + ~Node.get_all_edges + ~Node.get_all_nondangling + ~Node.get_axis_number + ~Node.get_dimension + ~Node.get_edge + ~Node.get_rank + ~Node.get_tensor + ~Node.has_dangling_edge + ~Node.has_nondangling_edge + ~Node.op_protection + ~Node.reorder_axes + ~Node.reorder_edges + ~Node.set_name + ~Node.set_tensor + ~Node.tensor_from_edge_order + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Node.axis_names + ~Node.dtype + ~Node.edges + ~Node.name + ~Node.shape + ~Node.sparse_shape + ~Node.tensor + + \ No newline at end of file diff --git a/docs/stubs/tensornetwork.NodeCollection.rst b/docs/stubs/tensornetwork.NodeCollection.rst new file mode 100644 index 000000000..ca83f4336 --- /dev/null +++ b/docs/stubs/tensornetwork.NodeCollection.rst @@ -0,0 +1,23 @@ +tensornetwork.NodeCollection +============================ + +.. currentmodule:: tensornetwork + +.. autoclass:: NodeCollection + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~NodeCollection.__init__ + ~NodeCollection.add + + + + + + \ No newline at end of file diff --git a/docs/stubs/tensornetwork.U1Charge.rst b/docs/stubs/tensornetwork.U1Charge.rst new file mode 100644 index 000000000..52c614402 --- /dev/null +++ b/docs/stubs/tensornetwork.U1Charge.rst @@ -0,0 +1,46 @@ +tensornetwork.U1Charge +====================== + +.. currentmodule:: tensornetwork + +.. autoclass:: U1Charge + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~U1Charge.__init__ + ~U1Charge.copy + ~U1Charge.dual + ~U1Charge.dual_charges + ~U1Charge.fuse + ~U1Charge.identity_charge + ~U1Charge.intersect + ~U1Charge.isin + ~U1Charge.random + ~U1Charge.reduce + ~U1Charge.sort_unique_charges + ~U1Charge.unique + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~U1Charge.charges + ~U1Charge.degeneracies + ~U1Charge.dim + ~U1Charge.dtype + ~U1Charge.identity_charges + ~U1Charge.label_dtype + ~U1Charge.num_symmetries + ~U1Charge.num_unique + + \ No newline at end of file diff --git a/docs/stubs/tensornetwork.Z2Charge.rst b/docs/stubs/tensornetwork.Z2Charge.rst new file mode 100644 index 000000000..1ae534f76 --- /dev/null +++ b/docs/stubs/tensornetwork.Z2Charge.rst @@ -0,0 +1,46 @@ +tensornetwork.Z2Charge +====================== + +.. currentmodule:: tensornetwork + +.. autoclass:: Z2Charge + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~Z2Charge.__init__ + ~Z2Charge.copy + ~Z2Charge.dual + ~Z2Charge.dual_charges + ~Z2Charge.fuse + ~Z2Charge.identity_charge + ~Z2Charge.intersect + ~Z2Charge.isin + ~Z2Charge.random + ~Z2Charge.reduce + ~Z2Charge.sort_unique_charges + ~Z2Charge.unique + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Z2Charge.charges + ~Z2Charge.degeneracies + ~Z2Charge.dim + ~Z2Charge.dtype + ~Z2Charge.identity_charges + ~Z2Charge.label_dtype + ~Z2Charge.num_symmetries + ~Z2Charge.num_unique + + \ No newline at end of file diff --git a/docs/stubs/tensornetwork.ZNCharge.rst b/docs/stubs/tensornetwork.ZNCharge.rst new file mode 100644 index 000000000..9ab3d484c --- /dev/null +++ b/docs/stubs/tensornetwork.ZNCharge.rst @@ -0,0 +1,6 @@ +tensornetwork.ZNCharge +====================== + +.. currentmodule:: tensornetwork + +.. autofunction:: ZNCharge \ No newline at end of file diff --git a/docs/stubs/tensornetwork.contractors.auto.rst b/docs/stubs/tensornetwork.contractors.auto.rst new file mode 100644 index 000000000..318d50664 --- /dev/null +++ b/docs/stubs/tensornetwork.contractors.auto.rst @@ -0,0 +1,6 @@ +tensornetwork.contractors.auto +============================== + +.. currentmodule:: tensornetwork.contractors + +.. autofunction:: auto \ No newline at end of file diff --git a/docs/stubs/tensornetwork.contractors.branch.rst b/docs/stubs/tensornetwork.contractors.branch.rst new file mode 100644 index 000000000..fab475ce5 --- /dev/null +++ b/docs/stubs/tensornetwork.contractors.branch.rst @@ -0,0 +1,6 @@ +tensornetwork.contractors.branch +================================ + +.. currentmodule:: tensornetwork.contractors + +.. autofunction:: branch \ No newline at end of file diff --git a/docs/stubs/tensornetwork.contractors.bucket.rst b/docs/stubs/tensornetwork.contractors.bucket.rst new file mode 100644 index 000000000..a6c220ea9 --- /dev/null +++ b/docs/stubs/tensornetwork.contractors.bucket.rst @@ -0,0 +1,6 @@ +tensornetwork.contractors.bucket +================================ + +.. currentmodule:: tensornetwork.contractors + +.. autofunction:: bucket \ No newline at end of file diff --git a/docs/stubs/tensornetwork.contractors.custom.rst b/docs/stubs/tensornetwork.contractors.custom.rst new file mode 100644 index 000000000..96ec1952b --- /dev/null +++ b/docs/stubs/tensornetwork.contractors.custom.rst @@ -0,0 +1,6 @@ +tensornetwork.contractors.custom +================================ + +.. currentmodule:: tensornetwork.contractors + +.. autofunction:: custom \ No newline at end of file diff --git a/docs/stubs/tensornetwork.contractors.greedy.rst b/docs/stubs/tensornetwork.contractors.greedy.rst new file mode 100644 index 000000000..396a55624 --- /dev/null +++ b/docs/stubs/tensornetwork.contractors.greedy.rst @@ -0,0 +1,6 @@ +tensornetwork.contractors.greedy +================================ + +.. currentmodule:: tensornetwork.contractors + +.. autofunction:: greedy \ No newline at end of file diff --git a/docs/stubs/tensornetwork.contractors.optimal.rst b/docs/stubs/tensornetwork.contractors.optimal.rst new file mode 100644 index 000000000..3a6e49e7a --- /dev/null +++ b/docs/stubs/tensornetwork.contractors.optimal.rst @@ -0,0 +1,6 @@ +tensornetwork.contractors.optimal +================================= + +.. currentmodule:: tensornetwork.contractors + +.. autofunction:: optimal \ No newline at end of file
vaexio__vaex-1091
[BUG-REPORT] Can't Replace Values in Vaex 4.0.0a4 **Description** Upgrading from previous versions (I cloned the master branch a few weeks ago and installed), I am no longer able to replace values in a dataframe. See below for an example and the traceback. **Software information** - Vaex version (`import vaex; vaex.__version__)`: 'vaex': '4.0.0a4', 'vaex-core': '4.0.0a9', 'vaex-viz': '0.5.0.dev1', 'vaex-hdf5': '0.7.0a4', 'vaex-server': '0.4.0.dev1', 'vaex-astro': '0.8.0.dev1', 'vaex-jupyter': '0.6.0.dev1', 'vaex-ml': '0.11.0a4' - Vaex was installed via: pip / conda-forge / from source: pip install vaex=4.0.0a4 - OS: Windows 10 **Additional information** ``` import numpy as np import vaex dt = vaex.from_arrays( col1 = np.array(['test', '', 'test2', ''], dtype=str) ) dt['col1'] = dt.func.where(dt['col1'] == '', 'fill', dt['col1']) # errors here ``` <details> <summary>Traceback</summary> ``` ERROR:MainThread:vaex:error evaluating: col1 at rows 0-4 Traceback (most recent call last): File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 102, in evaluate result = self[expression] File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 161, in __getitem__ raise KeyError("Unknown variables or column: %r" % (variable,)) KeyError: 'Unknown variables or column: "where(str_equals(__col1, \'\'), \'fill\', __col1)"' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 1977, in data_type data = self.evaluate(expression, 0, 1, filtered=False, array_type=array_type, parallel=False) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 2620, in evaluate return self._evaluate_implementation(expression, i1=i1, i2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, chunk_size=chunk_size) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 5528, in _evaluate_implementation value = scope.evaluate(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 102, in evaluate result = self[expression] File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 153, in __getitem__ values = self.evaluate(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 108, in evaluate result = eval(expression, expression_namespace, self) File "<string>", line 1, in <module> File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 149, in wrapper result = f(*args, **kwargs) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\functions.py", line 165, in wrapper result = arg.add_missing(result) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 41, in add_missing ar = combine_missing(ar, self._array) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 20, in combine_missing return pa.Array.from_buffers(a.type, len(a), [nulls_buffer, buffers[1]]) File "pyarrow\array.pxi", line 910, in pyarrow.lib.Array.from_buffers ValueError: Type's expected number of buffers (3) did not match the passed number (2). During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 102, in evaluate result = self[expression] File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 161, in __getitem__ raise KeyError("Unknown variables or column: %r" % (variable,)) KeyError: 'Unknown variables or column: "where(str_equals(__col1, \'\'), \'fill\', __col1)"' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 3551, in table_part values = dict(zip(column_names, df.evaluate(column_names))) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 2620, in evaluate return self._evaluate_implementation(expression, i1=i1, i2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, chunk_size=chunk_size) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 5441, in _evaluate_implementation dtypes[expression] = dtype = df.data_type(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 1979, in data_type data = self.evaluate(expression, 0, 1, filtered=True, array_type=array_type, parallel=False) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 2620, in evaluate return self._evaluate_implementation(expression, i1=i1, i2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, chunk_size=chunk_size) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 5528, in _evaluate_implementation value = scope.evaluate(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 102, in evaluate result = self[expression] File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 153, in __getitem__ values = self.evaluate(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 108, in evaluate result = eval(expression, expression_namespace, self) File "<string>", line 1, in <module> File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 149, in wrapper result = f(*args, **kwargs) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\functions.py", line 165, in wrapper result = arg.add_missing(result) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 41, in add_missing ar = combine_missing(ar, self._array) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 20, in combine_missing return pa.Array.from_buffers(a.type, len(a), [nulls_buffer, buffers[1]]) File "pyarrow\array.pxi", line 910, in pyarrow.lib.Array.from_buffers ValueError: Type's expected number of buffers (3) did not match the passed number (2). During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 102, in evaluate result = self[expression] File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 161, in __getitem__ raise KeyError("Unknown variables or column: %r" % (variable,)) KeyError: 'Unknown variables or column: "where(str_equals(__col1, \'\'), \'fill\', __col1)"' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 1977, in data_type data = self.evaluate(expression, 0, 1, filtered=False, array_type=array_type, parallel=False) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 2620, in evaluate return self._evaluate_implementation(expression, i1=i1, i2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, chunk_size=chunk_size) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 5528, in _evaluate_implementation value = scope.evaluate(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 102, in evaluate result = self[expression] File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 153, in __getitem__ values = self.evaluate(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 108, in evaluate result = eval(expression, expression_namespace, self) File "<string>", line 1, in <module> File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 149, in wrapper result = f(*args, **kwargs) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\functions.py", line 165, in wrapper result = arg.add_missing(result) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 41, in add_missing ar = combine_missing(ar, self._array) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 20, in combine_missing return pa.Array.from_buffers(a.type, len(a), [nulls_buffer, buffers[1]]) File "pyarrow\array.pxi", line 910, in pyarrow.lib.Array.from_buffers ValueError: Type's expected number of buffers (3) did not match the passed number (2). During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 102, in evaluate result = self[expression] File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 161, in __getitem__ raise KeyError("Unknown variables or column: %r" % (variable,)) KeyError: 'Unknown variables or column: "where(str_equals(__col1, \'\'), \'fill\', __col1)"' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 3556, in table_part values[name] = df.evaluate(name) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 2620, in evaluate return self._evaluate_implementation(expression, i1=i1, i2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, chunk_size=chunk_size) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 5441, in _evaluate_implementation dtypes[expression] = dtype = df.data_type(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 1979, in data_type data = self.evaluate(expression, 0, 1, filtered=True, array_type=array_type, parallel=False) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 2620, in evaluate return self._evaluate_implementation(expression, i1=i1, i2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, chunk_size=chunk_size) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 5528, in _evaluate_implementation value = scope.evaluate(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 102, in evaluate result = self[expression] File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 153, in __getitem__ values = self.evaluate(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 108, in evaluate result = eval(expression, expression_namespace, self) File "<string>", line 1, in <module> File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 149, in wrapper result = f(*args, **kwargs) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\functions.py", line 165, in wrapper result = arg.add_missing(result) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 41, in add_missing ar = combine_missing(ar, self._array) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 20, in combine_missing return pa.Array.from_buffers(a.type, len(a), [nulls_buffer, buffers[1]]) File "pyarrow\array.pxi", line 910, in pyarrow.lib.Array.from_buffers ValueError: Type's expected number of buffers (3) did not match the passed number (2). ERROR:MainThread:vaex:error evaluating: col1 at rows 0-4 Traceback (most recent call last): File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 102, in evaluate result = self[expression] File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 161, in __getitem__ raise KeyError("Unknown variables or column: %r" % (variable,)) KeyError: 'Unknown variables or column: "where(str_equals(__col1, \'\'), \'fill\', __col1)"' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 1977, in data_type data = self.evaluate(expression, 0, 1, filtered=False, array_type=array_type, parallel=False) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 2620, in evaluate return self._evaluate_implementation(expression, i1=i1, i2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, chunk_size=chunk_size) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 5528, in _evaluate_implementation value = scope.evaluate(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 102, in evaluate result = self[expression] File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 153, in __getitem__ values = self.evaluate(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 108, in evaluate result = eval(expression, expression_namespace, self) File "<string>", line 1, in <module> File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 149, in wrapper result = f(*args, **kwargs) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\functions.py", line 165, in wrapper result = arg.add_missing(result) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 41, in add_missing ar = combine_missing(ar, self._array) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 20, in combine_missing return pa.Array.from_buffers(a.type, len(a), [nulls_buffer, buffers[1]]) File "pyarrow\array.pxi", line 910, in pyarrow.lib.Array.from_buffers ValueError: Type's expected number of buffers (3) did not match the passed number (2). During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 102, in evaluate result = self[expression] File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 161, in __getitem__ raise KeyError("Unknown variables or column: %r" % (variable,)) KeyError: 'Unknown variables or column: "where(str_equals(__col1, \'\'), \'fill\', __col1)"' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 3551, in table_part values = dict(zip(column_names, df.evaluate(column_names))) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 2620, in evaluate return self._evaluate_implementation(expression, i1=i1, i2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, chunk_size=chunk_size) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 5441, in _evaluate_implementation dtypes[expression] = dtype = df.data_type(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 1979, in data_type data = self.evaluate(expression, 0, 1, filtered=True, array_type=array_type, parallel=False) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 2620, in evaluate return self._evaluate_implementation(expression, i1=i1, i2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, chunk_size=chunk_size) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 5528, in _evaluate_implementation value = scope.evaluate(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 102, in evaluate result = self[expression] File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 153, in __getitem__ values = self.evaluate(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 108, in evaluate result = eval(expression, expression_namespace, self) File "<string>", line 1, in <module> File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 149, in wrapper result = f(*args, **kwargs) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\functions.py", line 165, in wrapper result = arg.add_missing(result) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 41, in add_missing ar = combine_missing(ar, self._array) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 20, in combine_missing return pa.Array.from_buffers(a.type, len(a), [nulls_buffer, buffers[1]]) File "pyarrow\array.pxi", line 910, in pyarrow.lib.Array.from_buffers ValueError: Type's expected number of buffers (3) did not match the passed number (2). During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 102, in evaluate result = self[expression] File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 161, in __getitem__ raise KeyError("Unknown variables or column: %r" % (variable,)) KeyError: 'Unknown variables or column: "where(str_equals(__col1, \'\'), \'fill\', __col1)"' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 1977, in data_type data = self.evaluate(expression, 0, 1, filtered=False, array_type=array_type, parallel=False) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 2620, in evaluate return self._evaluate_implementation(expression, i1=i1, i2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, chunk_size=chunk_size) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 5528, in _evaluate_implementation value = scope.evaluate(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 102, in evaluate result = self[expression] File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 153, in __getitem__ values = self.evaluate(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 108, in evaluate result = eval(expression, expression_namespace, self) File "<string>", line 1, in <module> File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 149, in wrapper result = f(*args, **kwargs) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\functions.py", line 165, in wrapper result = arg.add_missing(result) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 41, in add_missing ar = combine_missing(ar, self._array) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 20, in combine_missing return pa.Array.from_buffers(a.type, len(a), [nulls_buffer, buffers[1]]) File "pyarrow\array.pxi", line 910, in pyarrow.lib.Array.from_buffers ValueError: Type's expected number of buffers (3) did not match the passed number (2). During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 102, in evaluate result = self[expression] File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 161, in __getitem__ raise KeyError("Unknown variables or column: %r" % (variable,)) KeyError: 'Unknown variables or column: "where(str_equals(__col1, \'\'), \'fill\', __col1)"' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 3556, in table_part values[name] = df.evaluate(name) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 2620, in evaluate return self._evaluate_implementation(expression, i1=i1, i2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, chunk_size=chunk_size) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 5441, in _evaluate_implementation dtypes[expression] = dtype = df.data_type(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 1979, in data_type data = self.evaluate(expression, 0, 1, filtered=True, array_type=array_type, parallel=False) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 2620, in evaluate return self._evaluate_implementation(expression, i1=i1, i2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, chunk_size=chunk_size) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\dataframe.py", line 5528, in _evaluate_implementation value = scope.evaluate(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 102, in evaluate result = self[expression] File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 153, in __getitem__ values = self.evaluate(expression) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\scopes.py", line 108, in evaluate result = eval(expression, expression_namespace, self) File "<string>", line 1, in <module> File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 149, in wrapper result = f(*args, **kwargs) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\functions.py", line 165, in wrapper result = arg.add_missing(result) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 41, in add_missing ar = combine_missing(ar, self._array) File "C:\Users\kmcentush\Miniconda3\envs\api\lib\site-packages\vaex\arrow\numpy_dispatch.py", line 20, in combine_missing return pa.Array.from_buffers(a.type, len(a), [nulls_buffer, buffers[1]]) File "pyarrow\array.pxi", line 910, in pyarrow.lib.Array.from_buffers ValueError: Type's expected number of buffers (3) did not match the passed number (2). ``` </details>
[ { "content": "import numpy as np\nimport pyarrow as pa\nimport pyarrow.compute as pc\nimport vaex\nfrom ..expression import _binary_ops, _unary_ops, reversable\n\n\ndef combine_missing(a, b):\n assert a.offset == 0\n if a.null_count > 0 or b.null_count > 0:\n # not optimal\n nulls = pc.invert(pc.or_(a.is_null(), b.is_null()))\n assert nulls.offset == 0\n nulls_buffer = nulls.buffers()[1]\n # this is not the case: no reason why it should be (TODO: open arrow issue)\n # assert nulls.buffers()[0] is None\n else:\n nulls_buffer = None\n buffers = a.buffers()\n return pa.Array.from_buffers(a.type, len(a), [nulls_buffer, buffers[1]])\n\nclass NumpyDispatch:\n def __init__(self, ar):\n self._array = ar\n if isinstance(ar, vaex.column.ColumnStringArrow):\n ar = pa.array(ar)\n if isinstance(ar, np.ndarray):\n self._numpy_array = ar\n self._arrow_array = None\n elif isinstance(ar, vaex.array_types.supported_arrow_array_types):\n self._numpy_array = None\n self._arrow_array = ar\n else:\n raise TypeError(f'Only support numpy and arrow, not {type(ar)}')\n\n def add_missing(self, ar):\n if isinstance(ar, np.ndarray):\n # if we are an arrow array, we upgrade ar to one\n if isinstance(self._array, vaex.array_types.supported_arrow_array_types):\n ar = vaex.array_types.to_arrow(ar)\n ar = combine_missing(ar, self._array)\n # else: both numpy, handled by numpy\n else:\n if isinstance(self._array, vaex.array_types.supported_arrow_array_types):\n ar = combine_missing(ar, self._array)\n # else: was numpy, handled by numpy\n return ar\n\n\n @property\n def numpy_array(self):\n if self._numpy_array is None:\n import vaex.arrow.convert\n arrow_array = self._arrow_array\n arrow_array = vaex.arrow.convert.ensure_not_chunked(arrow_array)\n buffers = arrow_array.buffers()\n # for math, we don't care about the nulls\n if buffers[0] is not None:\n buffers[0] = None\n arrow_array = pa.Array.from_buffers(arrow_array.type, len(arrow_array), buffers, offset=arrow_array.offset)\n self._numpy_array = vaex.array_types.to_numpy(arrow_array)\n return self._numpy_array\n\n @property\n def arrow_array(self):\n if self._arrow_array is None:\n # convert lazily, since not all arrow arrays (e.g. lists) can be converted\n if self._arrow_array is None:\n self._arrow_array = vaex.array_types.to_arrow(self._numpy_array)\n return self._arrow_array\n\n\nfor op in _binary_ops:\n def closure(op=op):\n def operator(a, b):\n a_data = a\n b_data = b\n if isinstance(a, NumpyDispatch):\n a_data = a.numpy_array\n if isinstance(b, NumpyDispatch):\n b_data = b.numpy_array\n if op['name'] == 'eq' and (vaex.array_types.is_string(a_data) or vaex.array_types.is_string(b_data)):\n result_data = vaex.functions.str_equals(a_data, b_data)\n else:\n result_data = op['op'](a_data, b_data)\n if isinstance(a, NumpyDispatch):\n result_data = a.add_missing(result_data)\n if isinstance(b, NumpyDispatch):\n result_data = b.add_missing(result_data)\n return NumpyDispatch(result_data)\n return operator\n method_name = '__%s__' % op['name']\n setattr(NumpyDispatch, method_name, closure())\n # to support e.g. (1 + ...) # to support e.g. (1 + ...)\n if op['name'] in reversable:\n def closure(op=op):\n def operator(b, a):\n a_data = a\n b_data = b\n if isinstance(a, NumpyDispatch):\n a_data = a.numpy_array\n if isinstance(b, NumpyDispatch):\n b_data = b.numpy_array\n result_data = op['op'](a_data, b_data)\n if isinstance(a, NumpyDispatch):\n result_data = a.add_missing(result_data)\n if isinstance(b, NumpyDispatch):\n result_data = b.add_missing(result_data)\n return NumpyDispatch(result_data)\n return operator\n method_name = '__r%s__' % op['name']\n setattr(NumpyDispatch, method_name, closure())\n\n\nfor op in _unary_ops:\n def closure(op=op):\n def operator(a):\n a_data = a.numpy_array\n result_data = op['op'](a_data)\n if isinstance(a, NumpyDispatch):\n result_data = a.add_missing(result_data)\n return NumpyDispatch(result_data)\n return operator\n method_name = '__%s__' % op['name']\n setattr(NumpyDispatch, method_name, closure())\n\n\ndef wrap(value):\n if not isinstance(value, NumpyDispatch): # and not isinstance(value, np.ndarray):\n if isinstance(value, vaex.array_types.supported_array_types + (vaex.column.ColumnStringArrow,)):\n return NumpyDispatch(value)\n # for performance reasons we don't visit lists and dicts\n return value\n\n\ndef unwrap(value):\n if isinstance(value, NumpyDispatch):\n return value._array\n # for performance reasons we don't visit lists and dicts\n return value\n\n\ndef autowrapper(f):\n '''Takes a function f, and will unwrap all its arguments and wrap the return value'''\n def wrapper(*args, **kwargs):\n args_original = args\n args = list(map(unwrap, args))\n kwargs = {k: unwrap(v) for k, v, in kwargs.items()}\n result = f(*args, **kwargs)\n return wrap(result)\n return wrapper\n", "path": "packages/vaex-core/vaex/arrow/numpy_dispatch.py" } ]
[ { "content": "import numpy as np\nimport pyarrow as pa\nimport pyarrow.compute as pc\nimport vaex\nfrom ..expression import _binary_ops, _unary_ops, reversable\n\n\ndef combine_missing(a, b):\n assert a.offset == 0\n if a.null_count > 0 or b.null_count > 0:\n # not optimal\n nulls = pc.invert(pc.or_(a.is_null(), b.is_null()))\n assert nulls.offset == 0\n nulls_buffer = nulls.buffers()[1]\n # this is not the case: no reason why it should be (TODO: open arrow issue)\n # assert nulls.buffers()[0] is None\n else:\n nulls_buffer = None\n buffers = a.buffers()\n return pa.Array.from_buffers(a.type, len(a), [nulls_buffer] + buffers[1:])\n\nclass NumpyDispatch:\n def __init__(self, ar):\n self._array = ar\n if isinstance(ar, vaex.column.ColumnStringArrow):\n ar = pa.array(ar)\n if isinstance(ar, np.ndarray):\n self._numpy_array = ar\n self._arrow_array = None\n elif isinstance(ar, vaex.array_types.supported_arrow_array_types):\n self._numpy_array = None\n self._arrow_array = ar\n else:\n raise TypeError(f'Only support numpy and arrow, not {type(ar)}')\n\n def add_missing(self, ar):\n if isinstance(ar, np.ndarray):\n # if we are an arrow array, we upgrade ar to one\n if isinstance(self._array, vaex.array_types.supported_arrow_array_types):\n ar = vaex.array_types.to_arrow(ar)\n ar = combine_missing(ar, self._array)\n # else: both numpy, handled by numpy\n else:\n if isinstance(self._array, vaex.array_types.supported_arrow_array_types):\n ar = combine_missing(ar, self._array)\n # else: was numpy, handled by numpy\n return ar\n\n\n @property\n def numpy_array(self):\n if self._numpy_array is None:\n import vaex.arrow.convert\n arrow_array = self._arrow_array\n arrow_array = vaex.arrow.convert.ensure_not_chunked(arrow_array)\n buffers = arrow_array.buffers()\n # for math, we don't care about the nulls\n if buffers[0] is not None:\n buffers[0] = None\n arrow_array = pa.Array.from_buffers(arrow_array.type, len(arrow_array), buffers, offset=arrow_array.offset)\n self._numpy_array = vaex.array_types.to_numpy(arrow_array)\n return self._numpy_array\n\n @property\n def arrow_array(self):\n if self._arrow_array is None:\n # convert lazily, since not all arrow arrays (e.g. lists) can be converted\n if self._arrow_array is None:\n self._arrow_array = vaex.array_types.to_arrow(self._numpy_array)\n return self._arrow_array\n\n\nfor op in _binary_ops:\n def closure(op=op):\n def operator(a, b):\n a_data = a\n b_data = b\n if isinstance(a, NumpyDispatch):\n a_data = a.numpy_array\n if isinstance(b, NumpyDispatch):\n b_data = b.numpy_array\n if op['name'] == 'eq' and (vaex.array_types.is_string(a_data) or vaex.array_types.is_string(b_data)):\n result_data = vaex.functions.str_equals(a_data, b_data)\n else:\n result_data = op['op'](a_data, b_data)\n if isinstance(a, NumpyDispatch):\n result_data = a.add_missing(result_data)\n if isinstance(b, NumpyDispatch):\n result_data = b.add_missing(result_data)\n return NumpyDispatch(result_data)\n return operator\n method_name = '__%s__' % op['name']\n setattr(NumpyDispatch, method_name, closure())\n # to support e.g. (1 + ...) # to support e.g. (1 + ...)\n if op['name'] in reversable:\n def closure(op=op):\n def operator(b, a):\n a_data = a\n b_data = b\n if isinstance(a, NumpyDispatch):\n a_data = a.numpy_array\n if isinstance(b, NumpyDispatch):\n b_data = b.numpy_array\n result_data = op['op'](a_data, b_data)\n if isinstance(a, NumpyDispatch):\n result_data = a.add_missing(result_data)\n if isinstance(b, NumpyDispatch):\n result_data = b.add_missing(result_data)\n return NumpyDispatch(result_data)\n return operator\n method_name = '__r%s__' % op['name']\n setattr(NumpyDispatch, method_name, closure())\n\n\nfor op in _unary_ops:\n def closure(op=op):\n def operator(a):\n a_data = a.numpy_array\n result_data = op['op'](a_data)\n if isinstance(a, NumpyDispatch):\n result_data = a.add_missing(result_data)\n return NumpyDispatch(result_data)\n return operator\n method_name = '__%s__' % op['name']\n setattr(NumpyDispatch, method_name, closure())\n\n\ndef wrap(value):\n if not isinstance(value, NumpyDispatch): # and not isinstance(value, np.ndarray):\n if isinstance(value, vaex.array_types.supported_array_types + (vaex.column.ColumnStringArrow,)):\n return NumpyDispatch(value)\n # for performance reasons we don't visit lists and dicts\n return value\n\n\ndef unwrap(value):\n if isinstance(value, NumpyDispatch):\n return value._array\n # for performance reasons we don't visit lists and dicts\n return value\n\n\ndef autowrapper(f):\n '''Takes a function f, and will unwrap all its arguments and wrap the return value'''\n def wrapper(*args, **kwargs):\n args_original = args\n args = list(map(unwrap, args))\n kwargs = {k: unwrap(v) for k, v, in kwargs.items()}\n result = f(*args, **kwargs)\n return wrap(result)\n return wrapper\n", "path": "packages/vaex-core/vaex/arrow/numpy_dispatch.py" } ]
diff --git a/packages/vaex-core/vaex/arrow/numpy_dispatch.py b/packages/vaex-core/vaex/arrow/numpy_dispatch.py index 91775a9879..d425456236 100644 --- a/packages/vaex-core/vaex/arrow/numpy_dispatch.py +++ b/packages/vaex-core/vaex/arrow/numpy_dispatch.py @@ -17,7 +17,7 @@ def combine_missing(a, b): else: nulls_buffer = None buffers = a.buffers() - return pa.Array.from_buffers(a.type, len(a), [nulls_buffer, buffers[1]]) + return pa.Array.from_buffers(a.type, len(a), [nulls_buffer] + buffers[1:]) class NumpyDispatch: def __init__(self, ar): diff --git a/tests/compute_test.py b/tests/compute_test.py index fc9663e361..e633fb3252 100644 --- a/tests/compute_test.py +++ b/tests/compute_test.py @@ -62,3 +62,9 @@ def test_mix_string_and_numeric(x, s): assert ((df.s == 'a') | (df.x == 1)).tolist() == [True, True, False, None] assert (('a' == df.s) | (df.x == 1)).tolist() == [True, True, False, None] assert ((df.x == 1) | (df.s == 'a')).tolist() == [True, True, False, None] + + +def test_where(s): + df = vaex.from_arrays(s=s) + expr = df.func.where(df['s'] == 'a', 'A', df['s']) + assert expr.tolist() == ['A', 'b', None, 'd']
pyinstaller__pyinstaller-5568
Support matplotlib-3.4.0rc1 ## Description of the issue `matplotlib._get_data_path` no longer exists since 3.4.0rc1: https://github.com/matplotlib/matplotlib/commit/e1352c71f07aee7eab004b73dd9bda2a260ab31b. This is on schedule for the removal of the deprecations that occurred around the time of #5006. The missing function leads to a traceback output during build, and the whole `cwd` being crammed into `mpl-data`. Finally, `matplotlib` cannot be imported in the packaged app because it cannot find `mpl-data/matplotlibrc`. ## Context information (for bug reports) * Output of `pyinstaller --version`: ```4.2``` * Version of Python: `3.8` * Platform: `Windows` * Did you also try this on another platform? Does it work there? `Surely it is a cross platform bug` > * try the latest development version, using the following command: > > ```shell > pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip > ``` > > * follow *all* the instructions in our "If Things Go Wrong" Guide > (https://github.com/pyinstaller/pyinstaller/wiki/If-Things-Go-Wrong) and > > ### Make sure [everything is packaged correctly](https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs#make-sure-everything-is-packaged-correctly) > > * [ ] start with clean installation > * [ ] use the latest development version > * [ ] Run your frozen program **from a command window (shell)** — instead of double-clicking on it > * [ ] Package your program in **--onedir mode** > * [ ] Package **without UPX**, say: use the option `--noupx` or set `upx=False` in your .spec-file > * [ ] Repackage you application in **verbose/debug mode**. For this, pass the option `--debug` to `pyi-makespec` or `pyinstaller` or use `EXE(..., debug=1, ...)` in your .spec file. > This part of the template is irrelevant, as the responsible code is unchanged on `develop` ### A minimal example program which shows the error ``` pip install --pre matplotlib==3.4.0rc1 pyinstaller echo "import matplotlib" > test.py pyinstaller test.py ``` ### Stacktrace / full error message Building `pyinstaller test.py`: ``` 20391 INFO: Loading module hook 'hook-matplotlib.py' from 'XXXXXXX'... Traceback (most recent call last): File "<string>", line 1, in <module> AttributeError: module 'matplotlib' has no attribute '_get_data_path' ``` Running `test.exe`: ``` Traceback (most recent call last): File "test.py", line 1, in <module> import matplotlib File "<frozen importlib._bootstrap>", line 991, in _find_and_load File "<frozen importlib._bootstrap>", line 975, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 671, in _load_unlocked File "PyInstaller\loader\pyimod03_importers.py", line 531, in exec_module File "matplotlib\__init__.py", line 820, in <module> File "matplotlib\__init__.py", line 725, in _rc_params_in_file File "contextlib.py", line 113, in __enter__ File "matplotlib\__init__.py", line 703, in _open_file_or_url FileNotFoundError: [Errno 2] No such file or directory: 'xxxxx\\matplotlib\\mpl-data\\matplotlibrc' [2688] Failed to execute script test ``` ## Possible resolution Simply remove the first underscore in `matplotlib._get_data_path` in https://github.com/pyinstaller/pyinstaller/blob/b9fcbbf86bc71addafc830debe289e7edb2a5697/PyInstaller/hooks/hook-matplotlib.py#L16 This works on my system. I'm a little confused as to why the private function was being used in the first place. `matplotlib.get_data_path` has been available for some time.
[ { "content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2021, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\n\nfrom PyInstaller.utils.hooks import exec_statement\n\nmpl_data_dir = exec_statement(\n \"import matplotlib; print(matplotlib._get_data_path())\")\n\ndatas = [\n (mpl_data_dir, \"matplotlib/mpl-data\"),\n]\n", "path": "PyInstaller/hooks/hook-matplotlib.py" } ]
[ { "content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2021, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\n\nfrom PyInstaller.utils.hooks import exec_statement\n\nmpl_data_dir = exec_statement(\n \"import matplotlib; print(matplotlib.get_data_path())\")\n\ndatas = [\n (mpl_data_dir, \"matplotlib/mpl-data\"),\n]\n", "path": "PyInstaller/hooks/hook-matplotlib.py" } ]
diff --git a/PyInstaller/hooks/hook-matplotlib.py b/PyInstaller/hooks/hook-matplotlib.py index baf3a5146e..9e4f92ba9c 100644 --- a/PyInstaller/hooks/hook-matplotlib.py +++ b/PyInstaller/hooks/hook-matplotlib.py @@ -13,7 +13,7 @@ from PyInstaller.utils.hooks import exec_statement mpl_data_dir = exec_statement( - "import matplotlib; print(matplotlib._get_data_path())") + "import matplotlib; print(matplotlib.get_data_path())") datas = [ (mpl_data_dir, "matplotlib/mpl-data"), diff --git a/news/5568.bugfix.rst b/news/5568.bugfix.rst new file mode 100644 index 0000000000..e18a44e3fd --- /dev/null +++ b/news/5568.bugfix.rst @@ -0,0 +1,3 @@ +Remove dependence on a `private function +<https://github.com/matplotlib/matplotlib/commit/e1352c71f07aee7eab004b73dd9bda2a260ab31b>`_ +removed in ``matplotlib`` 3.4.0rc1.
django-json-api__django-rest-framework-json-api-720
Tox using pytest-runner setup scripts leads to invalid deps Currently tox uses pytest-runner setup script wrapper. This way tests are run differently when run locally as stated in README.md. This may lead to different dependencies and errors as for example #634 Best to split up requirements as in [DRF](https://github.com/encode/django-rest-framework/tree/master/requirements) and remove setup pytest-runner script. Once this is done `test_requires` in setup.py would not be needed which remoes the duplication to requirements-development.txt
[ { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Django REST Framework JSON API documentation build configuration file, created by\n# sphinx-quickstart on Fri Jul 24 23:31:15 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport datetime\nimport os\nimport shlex\nimport sys\n\nimport django\nfrom sphinx.ext.apidoc import main\n\nfrom rest_framework_json_api import VERSION\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\nos.environ['DJANGO_SETTINGS_MODULE'] = 'example.settings'\ndjango.setup()\n\n# Auto-generate API documentation.\nmain(['-o', '_build/apidoc', '-f', '-e', '-T', '-M', '../rest_framework_json_api'])\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.autodoc', 'recommonmark']\nautodoc_member_order = 'bysource'\nautodoc_inherit_docstrings = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = ['.rst', '.md']\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Django REST Framework JSON API'\nyear = datetime.date.today().year\ncopyright = '{}, Django REST Framework JSON API contributors'.format(year)\nauthor = 'Django REST Framework JSON API contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = VERSION\n# The full version, including alpha/beta/rc tags.\nrelease = VERSION\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build', 'pull_request_template.md']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'default'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'default'\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'DjangoRESTFrameworkJSONAPIdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'DjangoRESTFrameworkJSONAPI.tex', 'Django REST Framework JSON API Documentation',\n 'Django REST Framework JSON API contributors', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'djangorestframeworkjsonapi', 'Django REST Framework JSON API Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'DjangoRESTFrameworkJSONAPI', 'Django REST Framework JSON API Documentation',\n author, 'DjangoRESTFrameworkJSONAPI', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n", "path": "docs/conf.py" } ]
[ { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Django REST Framework JSON API documentation build configuration file, created by\n# sphinx-quickstart on Fri Jul 24 23:31:15 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport datetime\nimport os\nimport shlex\nimport sys\n\nimport django\nfrom sphinx.ext.apidoc import main\n\nfrom rest_framework_json_api import VERSION\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\nos.environ['DJANGO_SETTINGS_MODULE'] = 'example.settings'\ndjango.setup()\n\n# Auto-generate API documentation.\nmain(['-o', 'apidoc', '-f', '-e', '-T', '-M', '../rest_framework_json_api'])\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.autodoc', 'recommonmark']\nautodoc_member_order = 'bysource'\nautodoc_inherit_docstrings = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = ['.rst', '.md']\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Django REST Framework JSON API'\nyear = datetime.date.today().year\ncopyright = '{}, Django REST Framework JSON API contributors'.format(year)\nauthor = 'Django REST Framework JSON API contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = VERSION\n# The full version, including alpha/beta/rc tags.\nrelease = VERSION\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build', 'pull_request_template.md']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'default'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'default'\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'DjangoRESTFrameworkJSONAPIdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'DjangoRESTFrameworkJSONAPI.tex', 'Django REST Framework JSON API Documentation',\n 'Django REST Framework JSON API contributors', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'djangorestframeworkjsonapi', 'Django REST Framework JSON API Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'DjangoRESTFrameworkJSONAPI', 'Django REST Framework JSON API Documentation',\n author, 'DjangoRESTFrameworkJSONAPI', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n", "path": "docs/conf.py" } ]
diff --git a/.pyup.yml b/.pyup.yml index 02f8ed99..c4b52e37 100644 --- a/.pyup.yml +++ b/.pyup.yml @@ -1,5 +1,17 @@ search: False requirements: - - requirements-development.txt: + - requirements/requirements-codestyle.txt: + update: all + pin: True + - requirements/requirements-documentation.txt: + update: all + pin: True + - requirements/requirements-optionals.txt: + update: all + pin: True + - requirements/requirements-packaging.txt: + update: all + pin: True + - requirements/requirements-testing.txt: update: all pin: True diff --git a/.travis.yml b/.travis.yml index b459f10f..6d6ccfb2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -15,7 +15,9 @@ matrix: include: - python: 3.6 - env: TOXENV=flake8 + env: TOXENV=lint + - python: 3.6 + env: TOXENV=docs - python: 3.5 env: TOXENV=py35-django111-drf310 diff --git a/README.rst b/README.rst index d9f9dd09..aaa6678c 100644 --- a/README.rst +++ b/README.rst @@ -128,8 +128,7 @@ installed and activated: $ git clone https://github.com/django-json-api/django-rest-framework-json-api.git $ cd django-rest-framework-json-api - $ pip install -r example/requirements.txt - $ pip install -e . + $ pip install -U -e . -r requirements.txt $ django-admin migrate --settings=example.settings $ django-admin loaddata drf_example --settings=example.settings $ django-admin runserver --settings=example.settings @@ -145,7 +144,7 @@ installed and activated: :: - $ pip install -Ur requirements-development.txt + $ pip install -Ur requirements.txt $ flake8 $ pytest diff --git a/docs/conf.py b/docs/conf.py index 88111be3..ee435d36 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -31,7 +31,7 @@ django.setup() # Auto-generate API documentation. -main(['-o', '_build/apidoc', '-f', '-e', '-T', '-M', '../rest_framework_json_api']) +main(['-o', 'apidoc', '-f', '-e', '-T', '-M', '../rest_framework_json_api']) # -- General configuration ------------------------------------------------ diff --git a/docs/getting-started.md b/docs/getting-started.md index 2e93d77a..6f5d60ab 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -77,8 +77,7 @@ From Source cd django-rest-framework-json-api python3 -m venv env source env/bin/activate - pip install -r example/requirements.txt - pip install -e . + pip install -U -e . r requirements.txt django-admin migrate --settings=example.settings django-admin loaddata drf_example --settings=example.settings django-admin runserver --settings=example.settings diff --git a/requirements-development.txt b/requirements-development.txt deleted file mode 100644 index a8115e16..00000000 --- a/requirements-development.txt +++ /dev/null @@ -1,16 +0,0 @@ --e . -django-debug-toolbar==2.0 -django-filter==2.2.0 -django-polymorphic==2.1.2 -Faker==2.0.2 -factory-boy==2.12.0 -flake8==3.7.7 -flake8-isort==2.7.0 -isort==4.3.21 -pytest==5.2.1 -pytest-cov==2.8.1 -pytest-django==3.5.1 -pytest-factoryboy==2.0.3 -recommonmark==0.6.0 -Sphinx==2.2.0 -sphinx_rtd_theme==0.4.3 diff --git a/requirements.txt b/requirements.txt index 8d1c8b69..862b4aa8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1,13 @@ - +# The base set of requirements for Django REST framework JSON API is actually +# fairly small, but for the purposes of development and testing +# there are a number of packages that are useful to install. + +# Laying these out as separate requirements files, allows us to +# only included the relevant sets when running tox, and ensures +# we are only ever declaring our dependencies in one place. + +-r requirements/requirements-optionals.txt +-r requirements/requirements-testing.txt +-r requirements/requirements-documentation.txt +-r requirements/requirements-codestyle.txt +-r requirements/requirements-packaging.txt diff --git a/requirements/requirements-codestyle.txt b/requirements/requirements-codestyle.txt new file mode 100644 index 00000000..49ef62ac --- /dev/null +++ b/requirements/requirements-codestyle.txt @@ -0,0 +1,3 @@ +flake8==3.7.7 +flake8-isort==2.7.0 +isort==4.3.21 diff --git a/requirements/requirements-documentation.txt b/requirements/requirements-documentation.txt new file mode 100644 index 00000000..19d59a2e --- /dev/null +++ b/requirements/requirements-documentation.txt @@ -0,0 +1,3 @@ +recommonmark==0.6.0 +Sphinx==2.2.0 +sphinx_rtd_theme==0.4.3 diff --git a/requirements/requirements-optionals.txt b/requirements/requirements-optionals.txt new file mode 100644 index 00000000..cc5f81b3 --- /dev/null +++ b/requirements/requirements-optionals.txt @@ -0,0 +1,2 @@ +django-filter==2.2.0 +django-polymorphic==2.1.2 diff --git a/requirements/requirements-packaging.txt b/requirements/requirements-packaging.txt new file mode 100644 index 00000000..f3c2cd3e --- /dev/null +++ b/requirements/requirements-packaging.txt @@ -0,0 +1 @@ +twine==2.0.0 diff --git a/requirements/requirements-testing.txt b/requirements/requirements-testing.txt new file mode 100644 index 00000000..836971ec --- /dev/null +++ b/requirements/requirements-testing.txt @@ -0,0 +1,7 @@ +django-debug-toolbar==2.0 +factory-boy==2.12.0 +Faker==2.0.2 +pytest==5.2.1 +pytest-cov==2.8.1 +pytest-django==3.5.1 +pytest-factoryboy==2.0.3 diff --git a/tox.ini b/tox.ini index 3431b4ea..13992946 100644 --- a/tox.ini +++ b/tox.ini @@ -2,6 +2,7 @@ envlist = py{35,36}-django111-drf{310,master}, py{35,36,37}-django{21,22}-drf{310,master}, + lint,docs [testenv] deps = @@ -10,7 +11,8 @@ deps = django22: Django>=2.2,<2.3 drf310: djangorestframework>=3.10.2,<3.11 drfmaster: https://github.com/encode/django-rest-framework/archive/master.zip - -rrequirements-development.txt + -rrequirements/requirements-testing.txt + -rrequirements/requirements-optionals.txt setenv = PYTHONPATH = {toxinidir} @@ -19,15 +21,19 @@ setenv = commands = pytest --cov --no-cov-on-fail --cov-report xml {posargs} -[testenv:flake8] +[testenv:lint] basepython = python3.6 deps = - -rrequirements-development.txt + -rrequirements/requirements-codestyle.txt + -rrequirements/requirements-testing.txt + -rrequirements/requirements-optionals.txt commands = flake8 -[testenv:sphinx] +[testenv:docs] basepython = python3.6 deps = - -rrequirements-development.txt + -rrequirements/requirements-testing.txt + -rrequirements/requirements-optionals.txt + -rrequirements/requirements-documentation.txt commands = - sphinx-build -b html -d docs/_build/doctrees docs docs/_build/html + sphinx-build -W -b html -d docs/_build/doctrees docs docs/_build/html
ansible__ansible-42736
openstack inventory plugin can create group with an 'empty name' when region is not specified in clouds.yml <!--- Verify first that your issue/request is not already reported on GitHub. THIS FORM WILL BE READ BY A MACHINE, COMPLETE ALL SECTIONS AS DESCRIBED. Also test if the latest release, and devel branch are affected too. ALWAYS add information AFTER (OUTSIDE) these html comments. Otherwise it may end up being automatically closed by our bot. --> ##### SUMMARY <!--- Explain the problem briefly --> When region_name is not specified the openstack inventory plugin create empty named group because plugin have not any check for an empty value: https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/inventory/openstack.py#L257 as soon as 'cloud_' group also created: https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/inventory/openstack.py#L260 the empty group can lead to unexpected behavior: https://github.com/ansible/ansible/issues/42040 ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Insert, BELOW THIS COMMENT, the name of the module, plugin, task or feature. Do not include extra details here, e.g. "vyos_command" not "the network module vyos_command" or the full path--> `contrib/inventory/openstack_inventory.py` ##### ANSIBLE VERSION <!--- Paste, BELOW THIS COMMENT, verbatim output from "ansible --version" between quotes below --> ``` ansible 2.5.5 config file = /Users/igor.tiunov/Sources/ansible-openstack-bug/ansible.cfg configured module search path = [u'/Users/igor.tiunov/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python2.7/site-packages/ansible executable location = /usr/local/bin/ansible python version = 2.7.14 (default, Mar 9 2018, 23:57:12) [GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.39.2)] ``` ##### CONFIGURATION <!--- If using Ansible 2.4 or above, paste, BELOW THIS COMMENT, the results of "ansible-config dump --only-changed" Otherwise, mention any settings you have changed/added/removed in ansible.cfg (or using the ANSIBLE_* environment variables).--> ``` DEFAULT_HOST_LIST(/Users/igor.tiunov/Sources/ansible-openstack-bug/ansible.cfg) = [u'/Users/igor.tiunov/Sources/ansible-openstack-bug/openstack.yml'] DEFAULT_VAULT_PASSWORD_FILE(env: ANSIBLE_VAULT_PASSWORD_FILE) = /Users/igor.tiunov/tmp/.vault INVENTORY_ENABLED(/Users/igor.tiunov/Sources/ansible-openstack-bug/ansible.cfg) = ['openstack'] ``` ##### OS / ENVIRONMENT <!--- Mention, BELOW THIS COMMENT, the OS you are running Ansible from, and the OS you are managing, or say "N/A" for anything that is not platform-specific. Also mention the specific version of what you are trying to control, e.g. if this is a network bug the version of firmware on the network device.--> Max OSX and rhel/windows instances on openstack ##### STEPS TO REPRODUCE <!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case. For new features, show how the feature would be used. --> 1. Create clouds.yaml like this: https://github.com/ITD27M01/ansible-openstack-bug/blob/master/clouds.yaml https://docs.openstack.org/os-client-config/latest/user/configuration.html 2. Run ansible-invetnory --list 3. See the empty group in inventory: https://github.com/ITD27M01/ansible-openstack-bug/blob/master/openstack-inventory.json#L2 https://github.com/ITD27M01/ansible-openstack-bug/blob/master/openstack-inventory.json#L260 <!--- Paste example playbooks or commands between quotes below --> ```yaml ``` <!--- You can also paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- What did you expect to happen when running the steps above? --> Empty group is not created ##### ACTUAL RESULTS <!--- What actually happened? If possible run with extra verbosity (-vvvv) --> Empty group is created <!--- Paste verbatim command output between quotes below --> ``` ```
[ { "content": "# Copyright (c) 2012, Marco Vito Moscaritolo <[email protected]>\n# Copyright (c) 2013, Jesse Keating <[email protected]>\n# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.\n# Copyright (c) 2016, Rackspace Australia\n# Copyright (c) 2017 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\n name: openstack\n plugin_type: inventory\n authors:\n - Marco Vito Moscaritolo <[email protected]>\n - Jesse Keating <[email protected]>\n short_description: OpenStack inventory source\n description:\n - Get inventory hosts from OpenStack clouds\n - Uses openstack.(yml|yaml) YAML configuration file to configure the inventory plugin\n - Uses standard clouds.yaml YAML configuration file to configure cloud credentials\n options:\n show_all:\n description: toggles showing all vms vs only those with a working IP\n type: boolean\n default: False\n inventory_hostname:\n description: |\n What to register as the inventory hostname.\n If set to 'uuid' the uuid of the server will be used and a\n group will be created for the server name.\n If set to 'name' the name of the server will be used unless\n there are more than one server with the same name in which\n case the 'uuid' logic will be used.\n Default is to do 'name', which is the opposite of the old\n openstack.py inventory script's option use_hostnames)\n type: string\n choices:\n - name\n - uuid\n default: \"name\"\n expand_hostvars:\n description: |\n Run extra commands on each host to fill in additional\n information about the host. May interrogate cinder and\n neutron and can be expensive for people with many hosts.\n (Note, the default value of this is opposite from the default\n old openstack.py inventory script's option expand_hostvars)\n type: boolean\n default: False\n private:\n description: |\n Use the private interface of each server, if it has one, as\n the host's IP in the inventory. This can be useful if you are\n running ansible inside a server in the cloud and would rather\n communicate to your servers over the private network.\n type: boolean\n default: False\n only_clouds:\n description: |\n List of clouds from clouds.yaml to use, instead of using\n the whole list.\n type: list\n default: []\n fail_on_errors:\n description: |\n Causes the inventory to fail and return no hosts if one cloud\n has failed (for example, bad credentials or being offline).\n When set to False, the inventory will return as many hosts as\n it can from as many clouds as it can contact. (Note, the\n default value of this is opposite from the old openstack.py\n inventory script's option fail_on_errors)\n type: boolean\n default: False\n clouds_yaml_path:\n description: |\n Override path to clouds.yaml file. If this value is given it\n will be searched first. The default path for the\n ansible inventory adds /etc/ansible/openstack.yaml and\n /etc/ansible/openstack.yml to the regular locations documented\n at https://docs.openstack.org/os-client-config/latest/user/configuration.html#config-files\n type: string\n default: None\n compose:\n description: Create vars from jinja2 expressions.\n type: dictionary\n default: {}\n groups:\n description: Add hosts to group based on Jinja2 conditionals.\n type: dictionary\n default: {}\n'''\n\nEXAMPLES = '''\n# file must be named openstack.yaml or openstack.yml\n# Make the plugin behave like the default behavior of the old script\nplugin: openstack\nexpand_hostvars: yes\nfail_on_errors: yes\n'''\n\nimport collections\n\nfrom ansible.errors import AnsibleParserError\nfrom ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable\n\ntry:\n import os_client_config\n import shade\n import shade.inventory\n HAS_SHADE = True\nexcept ImportError:\n HAS_SHADE = False\n\n\nclass InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):\n ''' Host inventory provider for ansible using OpenStack clouds. '''\n\n NAME = 'openstack'\n\n def parse(self, inventory, loader, path, cache=True):\n\n super(InventoryModule, self).parse(inventory, loader, path)\n\n cache_key = self._get_cache_prefix(path)\n\n # file is config file\n self._config_data = self._read_config_data(path)\n\n msg = ''\n if not self._config_data:\n msg = 'File empty. this is not my config file'\n elif 'plugin' in self._config_data and self._config_data['plugin'] != self.NAME:\n msg = 'plugin config file, but not for us: %s' % self._config_data['plugin']\n elif 'plugin' not in self._config_data and 'clouds' not in self._config_data:\n msg = \"it's not a plugin configuration nor a clouds.yaml file\"\n elif not HAS_SHADE:\n msg = \"shade is required for the OpenStack inventory plugin. OpenStack inventory sources will be skipped.\"\n\n if msg:\n raise AnsibleParserError(msg)\n\n # The user has pointed us at a clouds.yaml file. Use defaults for\n # everything.\n if 'clouds' in self._config_data:\n self._config_data = {}\n\n source_data = None\n if cache and cache_key in self._cache:\n try:\n source_data = self._cache[cache_key]\n except KeyError:\n pass\n\n if not source_data:\n clouds_yaml_path = self._config_data.get('clouds_yaml_path')\n if clouds_yaml_path:\n config_files = (clouds_yaml_path +\n os_client_config.config.CONFIG_FILES)\n else:\n config_files = None\n\n # TODO(mordred) Integrate shade's logging with ansible's logging\n shade.simple_logging()\n\n cloud_inventory = shade.inventory.OpenStackInventory(\n config_files=config_files,\n private=self._config_data.get('private', False))\n only_clouds = self._config_data.get('only_clouds', [])\n if only_clouds and not isinstance(only_clouds, list):\n raise ValueError(\n 'OpenStack Inventory Config Error: only_clouds must be'\n ' a list')\n if only_clouds:\n new_clouds = []\n for cloud in cloud_inventory.clouds:\n if cloud.name in only_clouds:\n new_clouds.append(cloud)\n cloud_inventory.clouds = new_clouds\n\n expand_hostvars = self._config_data.get('expand_hostvars', False)\n fail_on_errors = self._config_data.get('fail_on_errors', False)\n\n source_data = cloud_inventory.list_hosts(\n expand=expand_hostvars, fail_on_cloud_config=fail_on_errors)\n\n self._cache[cache_key] = source_data\n\n self._populate_from_source(source_data)\n\n def _populate_from_source(self, source_data):\n groups = collections.defaultdict(list)\n firstpass = collections.defaultdict(list)\n hostvars = {}\n\n use_server_id = (\n self._config_data.get('inventory_hostname', 'name') != 'name')\n show_all = self._config_data.get('show_all', False)\n\n for server in source_data:\n if 'interface_ip' not in server and not show_all:\n continue\n firstpass[server['name']].append(server)\n\n for name, servers in firstpass.items():\n if len(servers) == 1 and not use_server_id:\n self._append_hostvars(hostvars, groups, name, servers[0])\n else:\n server_ids = set()\n # Trap for duplicate results\n for server in servers:\n server_ids.add(server['id'])\n if len(server_ids) == 1 and not use_server_id:\n self._append_hostvars(hostvars, groups, name, servers[0])\n else:\n for server in servers:\n self._append_hostvars(\n hostvars, groups, server['id'], server,\n namegroup=True)\n\n self._set_variables(hostvars, groups)\n\n def _set_variables(self, hostvars, groups):\n\n # set vars in inventory from hostvars\n for host in hostvars:\n\n # create composite vars\n self._set_composite_vars(\n self._config_data.get('compose'), hostvars, host)\n\n # actually update inventory\n for key in hostvars[host]:\n self.inventory.set_variable(host, key, hostvars[host][key])\n\n # constructed groups based on conditionals\n self._add_host_to_composed_groups(\n self._config_data.get('groups'), hostvars, host)\n\n for group_name, group_hosts in groups.items():\n self.inventory.add_group(group_name)\n for host in group_hosts:\n self.inventory.add_child(group_name, host)\n\n def _get_groups_from_server(self, server_vars, namegroup=True):\n groups = []\n\n region = server_vars['region']\n cloud = server_vars['cloud']\n metadata = server_vars.get('metadata', {})\n\n # Create a group for the cloud\n groups.append(cloud)\n\n # Create a group on region\n groups.append(region)\n\n # And one by cloud_region\n groups.append(\"%s_%s\" % (cloud, region))\n\n # Check if group metadata key in servers' metadata\n if 'group' in metadata:\n groups.append(metadata['group'])\n\n for extra_group in metadata.get('groups', '').split(','):\n if extra_group:\n groups.append(extra_group.strip())\n\n groups.append('instance-%s' % server_vars['id'])\n if namegroup:\n groups.append(server_vars['name'])\n\n for key in ('flavor', 'image'):\n if 'name' in server_vars[key]:\n groups.append('%s-%s' % (key, server_vars[key]['name']))\n\n for key, value in iter(metadata.items()):\n groups.append('meta-%s_%s' % (key, value))\n\n az = server_vars.get('az', None)\n if az:\n # Make groups for az, region_az and cloud_region_az\n groups.append(az)\n groups.append('%s_%s' % (region, az))\n groups.append('%s_%s_%s' % (cloud, region, az))\n return groups\n\n def _append_hostvars(self, hostvars, groups, current_host,\n server, namegroup=False):\n hostvars[current_host] = dict(\n ansible_ssh_host=server['interface_ip'],\n ansible_host=server['interface_ip'],\n openstack=server)\n self.inventory.add_host(current_host)\n\n for group in self._get_groups_from_server(server, namegroup=namegroup):\n groups[group].append(current_host)\n\n def verify_file(self, path):\n\n if super(InventoryModule, self).verify_file(path):\n for fn in ('openstack', 'clouds'):\n for suffix in ('yaml', 'yml'):\n maybe = '{fn}.{suffix}'.format(fn=fn, suffix=suffix)\n if path.endswith(maybe):\n return True\n return False\n", "path": "lib/ansible/plugins/inventory/openstack.py" } ]
[ { "content": "# Copyright (c) 2012, Marco Vito Moscaritolo <[email protected]>\n# Copyright (c) 2013, Jesse Keating <[email protected]>\n# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.\n# Copyright (c) 2016, Rackspace Australia\n# Copyright (c) 2017 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\n name: openstack\n plugin_type: inventory\n authors:\n - Marco Vito Moscaritolo <[email protected]>\n - Jesse Keating <[email protected]>\n short_description: OpenStack inventory source\n description:\n - Get inventory hosts from OpenStack clouds\n - Uses openstack.(yml|yaml) YAML configuration file to configure the inventory plugin\n - Uses standard clouds.yaml YAML configuration file to configure cloud credentials\n options:\n show_all:\n description: toggles showing all vms vs only those with a working IP\n type: boolean\n default: False\n inventory_hostname:\n description: |\n What to register as the inventory hostname.\n If set to 'uuid' the uuid of the server will be used and a\n group will be created for the server name.\n If set to 'name' the name of the server will be used unless\n there are more than one server with the same name in which\n case the 'uuid' logic will be used.\n Default is to do 'name', which is the opposite of the old\n openstack.py inventory script's option use_hostnames)\n type: string\n choices:\n - name\n - uuid\n default: \"name\"\n expand_hostvars:\n description: |\n Run extra commands on each host to fill in additional\n information about the host. May interrogate cinder and\n neutron and can be expensive for people with many hosts.\n (Note, the default value of this is opposite from the default\n old openstack.py inventory script's option expand_hostvars)\n type: boolean\n default: False\n private:\n description: |\n Use the private interface of each server, if it has one, as\n the host's IP in the inventory. This can be useful if you are\n running ansible inside a server in the cloud and would rather\n communicate to your servers over the private network.\n type: boolean\n default: False\n only_clouds:\n description: |\n List of clouds from clouds.yaml to use, instead of using\n the whole list.\n type: list\n default: []\n fail_on_errors:\n description: |\n Causes the inventory to fail and return no hosts if one cloud\n has failed (for example, bad credentials or being offline).\n When set to False, the inventory will return as many hosts as\n it can from as many clouds as it can contact. (Note, the\n default value of this is opposite from the old openstack.py\n inventory script's option fail_on_errors)\n type: boolean\n default: False\n clouds_yaml_path:\n description: |\n Override path to clouds.yaml file. If this value is given it\n will be searched first. The default path for the\n ansible inventory adds /etc/ansible/openstack.yaml and\n /etc/ansible/openstack.yml to the regular locations documented\n at https://docs.openstack.org/os-client-config/latest/user/configuration.html#config-files\n type: string\n default: None\n compose:\n description: Create vars from jinja2 expressions.\n type: dictionary\n default: {}\n groups:\n description: Add hosts to group based on Jinja2 conditionals.\n type: dictionary\n default: {}\n'''\n\nEXAMPLES = '''\n# file must be named openstack.yaml or openstack.yml\n# Make the plugin behave like the default behavior of the old script\nplugin: openstack\nexpand_hostvars: yes\nfail_on_errors: yes\n'''\n\nimport collections\n\nfrom ansible.errors import AnsibleParserError\nfrom ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable\n\ntry:\n import os_client_config\n import shade\n import shade.inventory\n HAS_SHADE = True\nexcept ImportError:\n HAS_SHADE = False\n\n\nclass InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):\n ''' Host inventory provider for ansible using OpenStack clouds. '''\n\n NAME = 'openstack'\n\n def parse(self, inventory, loader, path, cache=True):\n\n super(InventoryModule, self).parse(inventory, loader, path)\n\n cache_key = self._get_cache_prefix(path)\n\n # file is config file\n self._config_data = self._read_config_data(path)\n\n msg = ''\n if not self._config_data:\n msg = 'File empty. this is not my config file'\n elif 'plugin' in self._config_data and self._config_data['plugin'] != self.NAME:\n msg = 'plugin config file, but not for us: %s' % self._config_data['plugin']\n elif 'plugin' not in self._config_data and 'clouds' not in self._config_data:\n msg = \"it's not a plugin configuration nor a clouds.yaml file\"\n elif not HAS_SHADE:\n msg = \"shade is required for the OpenStack inventory plugin. OpenStack inventory sources will be skipped.\"\n\n if msg:\n raise AnsibleParserError(msg)\n\n # The user has pointed us at a clouds.yaml file. Use defaults for\n # everything.\n if 'clouds' in self._config_data:\n self._config_data = {}\n\n source_data = None\n if cache and cache_key in self._cache:\n try:\n source_data = self._cache[cache_key]\n except KeyError:\n pass\n\n if not source_data:\n clouds_yaml_path = self._config_data.get('clouds_yaml_path')\n if clouds_yaml_path:\n config_files = (clouds_yaml_path +\n os_client_config.config.CONFIG_FILES)\n else:\n config_files = None\n\n # TODO(mordred) Integrate shade's logging with ansible's logging\n shade.simple_logging()\n\n cloud_inventory = shade.inventory.OpenStackInventory(\n config_files=config_files,\n private=self._config_data.get('private', False))\n only_clouds = self._config_data.get('only_clouds', [])\n if only_clouds and not isinstance(only_clouds, list):\n raise ValueError(\n 'OpenStack Inventory Config Error: only_clouds must be'\n ' a list')\n if only_clouds:\n new_clouds = []\n for cloud in cloud_inventory.clouds:\n if cloud.name in only_clouds:\n new_clouds.append(cloud)\n cloud_inventory.clouds = new_clouds\n\n expand_hostvars = self._config_data.get('expand_hostvars', False)\n fail_on_errors = self._config_data.get('fail_on_errors', False)\n\n source_data = cloud_inventory.list_hosts(\n expand=expand_hostvars, fail_on_cloud_config=fail_on_errors)\n\n self._cache[cache_key] = source_data\n\n self._populate_from_source(source_data)\n\n def _populate_from_source(self, source_data):\n groups = collections.defaultdict(list)\n firstpass = collections.defaultdict(list)\n hostvars = {}\n\n use_server_id = (\n self._config_data.get('inventory_hostname', 'name') != 'name')\n show_all = self._config_data.get('show_all', False)\n\n for server in source_data:\n if 'interface_ip' not in server and not show_all:\n continue\n firstpass[server['name']].append(server)\n\n for name, servers in firstpass.items():\n if len(servers) == 1 and not use_server_id:\n self._append_hostvars(hostvars, groups, name, servers[0])\n else:\n server_ids = set()\n # Trap for duplicate results\n for server in servers:\n server_ids.add(server['id'])\n if len(server_ids) == 1 and not use_server_id:\n self._append_hostvars(hostvars, groups, name, servers[0])\n else:\n for server in servers:\n self._append_hostvars(\n hostvars, groups, server['id'], server,\n namegroup=True)\n\n self._set_variables(hostvars, groups)\n\n def _set_variables(self, hostvars, groups):\n\n # set vars in inventory from hostvars\n for host in hostvars:\n\n # create composite vars\n self._set_composite_vars(\n self._config_data.get('compose'), hostvars, host)\n\n # actually update inventory\n for key in hostvars[host]:\n self.inventory.set_variable(host, key, hostvars[host][key])\n\n # constructed groups based on conditionals\n self._add_host_to_composed_groups(\n self._config_data.get('groups'), hostvars, host)\n\n for group_name, group_hosts in groups.items():\n self.inventory.add_group(group_name)\n for host in group_hosts:\n self.inventory.add_child(group_name, host)\n\n def _get_groups_from_server(self, server_vars, namegroup=True):\n groups = []\n\n region = server_vars['region']\n cloud = server_vars['cloud']\n metadata = server_vars.get('metadata', {})\n\n # Create a group for the cloud\n groups.append(cloud)\n\n # Create a group on region\n if region:\n groups.append(region)\n\n # And one by cloud_region\n groups.append(\"%s_%s\" % (cloud, region))\n\n # Check if group metadata key in servers' metadata\n if 'group' in metadata:\n groups.append(metadata['group'])\n\n for extra_group in metadata.get('groups', '').split(','):\n if extra_group:\n groups.append(extra_group.strip())\n\n groups.append('instance-%s' % server_vars['id'])\n if namegroup:\n groups.append(server_vars['name'])\n\n for key in ('flavor', 'image'):\n if 'name' in server_vars[key]:\n groups.append('%s-%s' % (key, server_vars[key]['name']))\n\n for key, value in iter(metadata.items()):\n groups.append('meta-%s_%s' % (key, value))\n\n az = server_vars.get('az', None)\n if az:\n # Make groups for az, region_az and cloud_region_az\n groups.append(az)\n groups.append('%s_%s' % (region, az))\n groups.append('%s_%s_%s' % (cloud, region, az))\n return groups\n\n def _append_hostvars(self, hostvars, groups, current_host,\n server, namegroup=False):\n hostvars[current_host] = dict(\n ansible_ssh_host=server['interface_ip'],\n ansible_host=server['interface_ip'],\n openstack=server)\n self.inventory.add_host(current_host)\n\n for group in self._get_groups_from_server(server, namegroup=namegroup):\n groups[group].append(current_host)\n\n def verify_file(self, path):\n\n if super(InventoryModule, self).verify_file(path):\n for fn in ('openstack', 'clouds'):\n for suffix in ('yaml', 'yml'):\n maybe = '{fn}.{suffix}'.format(fn=fn, suffix=suffix)\n if path.endswith(maybe):\n return True\n return False\n", "path": "lib/ansible/plugins/inventory/openstack.py" } ]
diff --git a/lib/ansible/plugins/inventory/openstack.py b/lib/ansible/plugins/inventory/openstack.py index d352450ae4c784..2f5717fd3fbd53 100644 --- a/lib/ansible/plugins/inventory/openstack.py +++ b/lib/ansible/plugins/inventory/openstack.py @@ -253,7 +253,8 @@ def _get_groups_from_server(self, server_vars, namegroup=True): groups.append(cloud) # Create a group on region - groups.append(region) + if region: + groups.append(region) # And one by cloud_region groups.append("%s_%s" % (cloud, region))
rucio__rucio-2492
Issue in client_extract download Motivation ---------- Modification ------------
[ { "content": "\n'''\nThis file is automatically generated; Do not edit it. :)\n'''\nVERSION_INFO = {\n 'final': True,\n 'version': '1.19.5',\n 'branch_nick': 'patch-0-1_19_5_preparation',\n 'revision_id': '9e14d56c9d958e5348b19ddc7e5fa45d4a778807',\n 'revno': 7951\n}\n", "path": "lib/rucio/vcsversion.py" } ]
[ { "content": "\n'''\nThis file is automatically generated; Do not edit it. :)\n'''\nVERSION_INFO = {\n 'final': True,\n 'version': '1.19.6',\n 'branch_nick': 'patch-0-Release__Rucio_1_19_6_preparation',\n 'revision_id': 'a8c639a7a70a9e605ad90535d28d2eab04d89cce',\n 'revno': 7992\n}\n", "path": "lib/rucio/vcsversion.py" } ]
diff --git a/doc/source/releasenotes/1.19.6.rst b/doc/source/releasenotes/1.19.6.rst new file mode 100644 index 0000000000..136b84f446 --- /dev/null +++ b/doc/source/releasenotes/1.19.6.rst @@ -0,0 +1,47 @@ +====== +1.19.6 +====== + +------- +General +------- + +************ +Enhancements +************ + +- Clients: Fix for update_replicas_states function description in replicaclient.py `#2240 <https://github.com/rucio/rucio/issues/2240>`_ +- Documentation: ChangeLog seems to be a COPYRIGHT file? `#2434 <https://github.com/rucio/rucio/issues/2434>`_ +- Rules: Injector should not inject a rule which is about to expire soon `#2219 <https://github.com/rucio/rucio/issues/2219>`_ +- Testing: Test reaper console script `#2149 <https://github.com/rucio/rucio/issues/2149>`_ +- Transfers: Automatically recover requests in state "protocol mismatch" `#2115 <https://github.com/rucio/rucio/issues/2115>`_ + +**** +Bugs +**** + +- Core & Internals: PostgreSQL alembic upgrade problems `#2212 <https://github.com/rucio/rucio/issues/2212>`_ +- Core & Internals: ineffective `list-dids-by-meta` `#2403 <https://github.com/rucio/rucio/issues/2403>`_ +- Core & Internals: New psycopg2-binary release incompatible with current sqlalchemy version `#2446 <https://github.com/rucio/rucio/issues/2446>`_ +- Core & Internals: list_replicas should raise DID not found `#2468 <https://github.com/rucio/rucio/issues/2468>`_ +- Life time model: Atropos doesn’t gracefully handle InvalidRSEExpression `#2432 <https://github.com/rucio/rucio/issues/2432>`_ +- Life time model: Atropos command-line option --unlock-rules has no effect `#2456 <https://github.com/rucio/rucio/issues/2456>`_ +- Rules: Calculation error of progress class for Rule PROGRESS notifications `#2440 <https://github.com/rucio/rucio/issues/2440>`_ +- Testing: nosetests commands override return codes `#2465 <https://github.com/rucio/rucio/issues/2465>`_ + +------- +Clients +------- + +************ +Enhancements +************ + +- Clients: revert xrdcp workaround in downloadclient as soon as gfal is fixed `#1598 <https://github.com/rucio/rucio/issues/1598>`_ + +**** +Bugs +**** + +- Clients: If policy section not available in client config, no exception should be raised `#1485 <https://github.com/rucio/rucio/issues/1485>`_ +- Clients: Issue in client_extract download `#2485 <https://github.com/rucio/rucio/issues/2485>`_ diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py index 619ee28553..d7aab5ed35 100644 --- a/lib/rucio/vcsversion.py +++ b/lib/rucio/vcsversion.py @@ -4,8 +4,8 @@ ''' VERSION_INFO = { 'final': True, - 'version': '1.19.5', - 'branch_nick': 'patch-0-1_19_5_preparation', - 'revision_id': '9e14d56c9d958e5348b19ddc7e5fa45d4a778807', - 'revno': 7951 + 'version': '1.19.6', + 'branch_nick': 'patch-0-Release__Rucio_1_19_6_preparation', + 'revision_id': 'a8c639a7a70a9e605ad90535d28d2eab04d89cce', + 'revno': 7992 } diff --git a/lib/rucio/web/ui/static/webui_version b/lib/rucio/web/ui/static/webui_version index 7a8a49471c..a85b56d4e7 100644 --- a/lib/rucio/web/ui/static/webui_version +++ b/lib/rucio/web/ui/static/webui_version @@ -1 +1 @@ -1.19.5 \ No newline at end of file +1.19.6 \ No newline at end of file
canonical__microk8s-3793
Leaving a worker node always leads to a broken state. #### Summary MicroK8S 1.26: doing a "microk8s leave" command on a worker node that has previously joined a cluster, always leave microk8s in a broken state. MicroK8S will regenerate certificates and will try to restart without success. #### What Should Happen Instead? MicroK8S status should give a "microk8s is running" message. #### Reproduction Steps Create a microk8s cluster with 3 master nodes and 2 worker nodes with "microk8s add-node" command. Finally, when everything is up and running, on both worker nodes do a "microk8s leave" command. You won't be able to restart microk8s anymore on worker nodes. #### Introspection Report "microk8s inspect" will trigger the following error: FAIL: Service snap.microk8s.daemon-kubelite is not running The command "snap logs microk8s.daemon-kubelite" will print: 2023-01-30T00:14:43Z microk8s.daemon-kubelite[234081]: W0130 00:14:43.528692 234081 authentication.go:520] AnonymousAuth is not allowed with the AlwaysAllow authorizer. Resetting AnonymousAuth to false. You should use a different authorizer 2023-01-30T00:14:43Z microk8s.daemon-kubelite[234081]: Error: failed to parse service-account-issuer-key-file: error reading private key file /var/snap/microk8s/4390/certs/serviceaccount.key: data does not contain a valid RSA or ECDSA private key 2023-01-30T00:14:43Z microk8s.daemon-kubelite[234081]: F0130 00:14:43.528708 234081 daemon.go:67] API Server exited failed to parse service-account-issuer-key-file: error reading private key file /var/snap/microk8s/4390/certs/serviceaccount.key: data does not contain a valid RSA or ECDSA private key 2023-01-30T00:14:43Z systemd[1]: snap.microk8s.daemon-kubelite.service: Main process exited, code=exited, status=255/EXCEPTION 2023-01-30T00:14:43Z systemd[1]: snap.microk8s.daemon-kubelite.service: Failed with result 'exit-code'. 2023-01-30T00:14:43Z systemd[1]: snap.microk8s.daemon-kubelite.service: Scheduled restart job, restart counter is at 5. 2023-01-30T00:14:43Z systemd[1]: Stopped Service for snap application microk8s.daemon-kubelite. 2023-01-30T00:14:43Z systemd[1]: snap.microk8s.daemon-kubelite.service: Start request repeated too quickly. 2023-01-30T00:14:43Z systemd[1]: snap.microk8s.daemon-kubelite.service: Failed with result 'exit-code'. 2023-01-30T00:14:43Z systemd[1]: Failed to start Service for snap application microk8s.daemon-kubelite. Report attached: [inspection-report-20230130_002026.tar.gz](https://github.com/canonical/microk8s/files/10531264/inspection-report-20230130_002026.tar.gz)
[ { "content": "#!/usr/bin/python3\nimport base64\nimport random\nimport string\nimport subprocess\nimport os\nimport ssl\nimport sys\nimport time\nimport hashlib\nimport http\n\nimport click\nimport requests\nimport socket\nimport shutil\nimport urllib3\nimport yaml\nimport json\n\nfrom common.cluster.utils import (\n is_low_memory_guard_enabled,\n try_set_file_permissions,\n is_node_running_dqlite,\n get_cluster_agent_port,\n try_initialise_cni_autodetect_for_clustering,\n service,\n mark_no_cert_reissue,\n restart_all_services,\n get_token,\n)\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\nCLUSTER_API = \"cluster/api/v1.0\"\nsnapdata_path = os.environ.get(\"SNAP_DATA\")\nsnap_path = os.environ.get(\"SNAP\")\nca_cert_file_via_env = \"${SNAP_DATA}/certs/ca.remote.crt\"\nca_cert_file = \"{}/certs/ca.remote.crt\".format(snapdata_path)\ncallback_token_file = \"{}/credentials/callback-token.txt\".format(snapdata_path)\ncallback_tokens_file = \"{}/credentials/callback-tokens.txt\".format(snapdata_path)\nserver_cert_file_via_env = \"${SNAP_DATA}/certs/server.remote.crt\"\nserver_cert_file = \"{}/certs/server.remote.crt\".format(snapdata_path)\n\nCLUSTER_API_V2 = \"cluster/api/v2.0\"\ncluster_dir = \"{}/var/kubernetes/backend\".format(snapdata_path)\ncluster_backup_dir = \"{}/var/kubernetes/backend.backup\".format(snapdata_path)\ncluster_cert_file = \"{}/cluster.crt\".format(cluster_dir)\ncluster_key_file = \"{}/cluster.key\".format(cluster_dir)\n\nFINGERPRINT_MIN_LEN = 12\n\n\ndef get_traefik_port():\n \"\"\"\n Return the port Traefik listens to. Try read the port from the Traefik configuration or return the default value\n \"\"\"\n config_file = \"{}/args/traefik/traefik-template.yaml\".format(snapdata_path)\n with open(config_file) as f:\n data = yaml.load(f, Loader=yaml.FullLoader)\n if (\n \"entryPoints\" in data\n and \"apiserver\" in data[\"entryPoints\"]\n and \"address\" in data[\"entryPoints\"][\"apiserver\"]\n ):\n port = data[\"entryPoints\"][\"apiserver\"][\"address\"]\n port = port.replace(\":\", \"\")\n return port\n else:\n return \"16443\"\n\n\ndef join_request(conn, api_version, req_data, master_ip, verify_peer, fingerprint):\n json_params = json.dumps(req_data)\n headers = {\"Content-type\": \"application/json\", \"Accept\": \"application/json\"}\n\n try:\n if verify_peer and fingerprint:\n if len(fingerprint) < FINGERPRINT_MIN_LEN:\n print(\n \"Joining cluster failed. Fingerprint too short.\"\n \" Use '--skip-verify' to skip server certificate check.\"\n )\n exit(4)\n\n # Do the peer certificate verification\n der_cert_bin = conn.sock.getpeercert(True)\n peer_cert_hash = hashlib.sha256(der_cert_bin).hexdigest()\n if not peer_cert_hash.startswith(fingerprint):\n print(\n \"Joining cluster failed. Could not verify the identity of {}.\"\n \" Use '--skip-verify' to skip server certificate check.\".format(master_ip)\n )\n exit(4)\n\n conn.request(\"POST\", \"/{}/join\".format(api_version), json_params, headers)\n response = conn.getresponse()\n if not response.status == 200:\n message = extract_error(response)\n print(\"{} ({}).\".format(message, response.status))\n exit(6)\n body = response.read()\n return json.loads(body)\n except http.client.HTTPException as e:\n print(\"Please ensure the master node is reachable. {}\".format(e))\n exit(1)\n except ssl.SSLError as e:\n print(\"Peer node verification failed ({}).\".format(e))\n exit(4)\n\n\ndef extract_error(response):\n message = \"Connection failed.\"\n try:\n resp = response.read().decode()\n if resp:\n res_data = json.loads(resp)\n if \"error\" in res_data:\n message = \"{} {}\".format(message, res_data[\"error\"])\n except ValueError:\n pass\n return message\n\n\ndef get_connection_info(\n master_ip,\n master_port,\n token,\n callback_token=None,\n cluster_type=\"etcd\",\n verify_peer=False,\n fingerprint=None,\n worker=False,\n):\n \"\"\"\n Contact the master and get all connection information\n\n :param master_ip: the master IP\n :param master_port: the master port\n :param token: the token to contact the master with\n :param callback_token: callback token for etcd based clusters\n :param cluster_type: the type of cluster we want to join, etcd or dqlite\n :param verify_peer: flag indicating if we should verify peers certificate\n :param fingerprint: the certificate fingerprint we expect from the peer\n :param worker: this is a worker only node\n\n :return: the json response of the master\n \"\"\"\n cluster_agent_port = get_cluster_agent_port()\n try:\n context = ssl._create_unverified_context()\n conn = http.client.HTTPSConnection(\"{}:{}\".format(master_ip, master_port), context=context)\n conn.connect()\n if cluster_type == \"dqlite\":\n req_data = {\n \"token\": token,\n \"hostname\": socket.gethostname().lower(),\n \"port\": cluster_agent_port,\n \"worker\": worker,\n }\n\n return join_request(conn, CLUSTER_API_V2, req_data, master_ip, verify_peer, fingerprint)\n else:\n req_data = {\n \"token\": token,\n \"hostname\": socket.gethostname().lower(),\n \"port\": cluster_agent_port,\n \"callback\": callback_token,\n }\n return join_request(\n conn, CLUSTER_API, req_data, master_ip, verify_peer=False, fingerprint=None\n )\n except http.client.HTTPException as e:\n print(\"Connecting to cluster failed with {}.\".format(e))\n exit(5)\n except ssl.SSLError as e:\n print(\"Peer node verification failed with {}.\".format(e))\n exit(4)\n\n\ndef set_arg(key, value, file):\n \"\"\"\n Set an argument to a file\n\n :param key: argument name\n :param value: value\n :param file: the arguments file\n \"\"\"\n filename = \"{}/args/{}\".format(snapdata_path, file)\n filename_remote = \"{}/args/{}.remote\".format(snapdata_path, file)\n done = False\n with open(filename_remote, \"w+\") as back_fp:\n with open(filename, \"r+\") as fp:\n for _, line in enumerate(fp):\n if line.startswith(key):\n done = True\n if value is not None:\n back_fp.write(\"{}={}\\n\".format(key, value))\n else:\n back_fp.write(\"{}\".format(line))\n if not done and value is not None:\n back_fp.write(\"{}={}\\n\".format(key, value))\n\n shutil.copyfile(filename, \"{}.backup\".format(filename))\n try_set_file_permissions(\"{}.backup\".format(filename))\n shutil.copyfile(filename_remote, filename)\n try_set_file_permissions(filename)\n os.remove(filename_remote)\n\n\ndef get_etcd_client_cert(master_ip, master_port, token):\n \"\"\"\n Get a signed cert to access etcd\n\n :param master_ip: master ip\n :param master_port: master port\n :param token: token to contact the master with\n \"\"\"\n cer_req_file = \"{}/certs/server.remote.csr\".format(snapdata_path)\n cmd_cert = (\n \"{snap}/usr/bin/openssl req -new -sha256 -key {snapdata}/certs/server.key -out {csr} \"\n \"-config {snapdata}/certs/csr.conf\".format(\n snap=snap_path, snapdata=snapdata_path, csr=cer_req_file\n )\n )\n subprocess.check_call(cmd_cert.split())\n with open(cer_req_file) as fp:\n csr = fp.read()\n req_data = {\"token\": token, \"request\": csr}\n # TODO: enable ssl verification\n signed = requests.post(\n \"https://{}:{}/{}/sign-cert\".format(master_ip, master_port, CLUSTER_API),\n json=req_data,\n verify=False,\n )\n if signed.status_code != 200:\n print(\"Failed to sign certificate. {}\".format(signed.json()[\"error\"]))\n exit(1)\n info = signed.json()\n with open(server_cert_file, \"w\") as cert_fp:\n cert_fp.write(info[\"certificate\"])\n try_set_file_permissions(server_cert_file)\n\n\ndef get_client_cert(master_ip, master_port, fname, token, username, group=None):\n \"\"\"\n Get a signed cert.\n See https://kubernetes.io/docs/reference/access-authn-authz/authentication/#x509-client-certs\n\n :param master_ip: master ip\n :param master_port: master port\n :param fname: file name prefix for the certificate\n :param token: token to contact the master with\n :param username: the username of the cert's owner\n :param group: the group the owner belongs to\n \"\"\"\n info = \"/CN={}\".format(username)\n if group:\n info = \"{}/O={}\".format(info, group)\n\n # the filenames must survive snap refreshes, so replace revision number with current\n snapdata_current = os.path.abspath(os.path.join(snapdata_path, \"..\", \"current\"))\n\n cer_req_file = \"{}/certs/{}.csr\".format(snapdata_current, fname)\n cer_key_file = \"{}/certs/{}.key\".format(snapdata_current, fname)\n cer_file = \"{}/certs/{}.crt\".format(snapdata_current, fname)\n if not os.path.exists(cer_key_file):\n cmd_gen_cert_key = \"{snap}/usr/bin/openssl genrsa -out {key} 2048\".format(\n snap=snap_path, key=cer_key_file\n )\n subprocess.check_call(\n cmd_gen_cert_key.split(), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL\n )\n try_set_file_permissions(cer_key_file)\n\n cmd_cert = \"{snap}/usr/bin/openssl req -new -sha256 -key {key} -out {csr} -subj {info}\".format(\n snap=snap_path,\n key=cer_key_file,\n csr=cer_req_file,\n info=info,\n )\n subprocess.check_call(cmd_cert.split(), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n with open(cer_req_file) as fp:\n csr = fp.read()\n req_data = {\"token\": token, \"request\": csr}\n # TODO: enable ssl verification\n signed = requests.post(\n \"https://{}:{}/{}/sign-cert\".format(master_ip, master_port, CLUSTER_API),\n json=req_data,\n verify=False,\n )\n if signed.status_code != 200:\n error = \"Failed to sign {} certificate ({}).\".format(fname, signed.status_code)\n try:\n if \"error\" in signed.json():\n error = \"{} {}\".format(error, format(signed.json()[\"error\"]))\n except ValueError:\n print(\"Make sure the cluster you connect to supports joining worker nodes.\")\n print(error)\n exit(1)\n info = signed.json()\n with open(cer_file, \"w\") as cert_fp:\n cert_fp.write(info[\"certificate\"])\n try_set_file_permissions(cer_file)\n\n return {\n \"certificate_location\": cer_file,\n \"certificate_key_location\": cer_key_file,\n }\n\n\ndef update_flannel(etcd, master_ip, master_port, token):\n \"\"\"\n Configure flannel\n\n :param etcd: etcd endpoint\n :param master_ip: master ip\n :param master_port: master port\n :param token: token to contact the master with\n \"\"\"\n get_etcd_client_cert(master_ip, master_port, token)\n etcd = etcd.replace(\"0.0.0.0\", master_ip)\n set_arg(\"--etcd-endpoints\", etcd, \"flanneld\")\n set_arg(\"--etcd-cafile\", ca_cert_file_via_env, \"flanneld\")\n set_arg(\"--etcd-certfile\", server_cert_file_via_env, \"flanneld\")\n set_arg(\"--etcd-keyfile\", \"${SNAP_DATA}/certs/server.key\", \"flanneld\")\n service(\"restart\", \"flanneld\")\n\n\ndef ca_one_line(ca):\n \"\"\"\n The CA in one line\n :param ca: the ca\n :return: one line\n \"\"\"\n return base64.b64encode(ca.encode(\"utf-8\")).decode(\"utf-8\")\n\n\ndef create_kubeconfig(token, ca, master_ip, api_port, filename, user):\n \"\"\"\n Create a kubeconfig file. The file in stored under credentials named after the user\n\n :param token: the token to be in the kubeconfig\n :param ca: the ca\n :param master_ip: the master node IP\n :param api_port: the API server port\n :param filename: the name of the config file\n :param user: the user to use al login\n \"\"\"\n snap_path = os.environ.get(\"SNAP\")\n config_template = \"{}/{}\".format(snap_path, \"kubelet.config.template\")\n config = \"{}/credentials/{}\".format(snapdata_path, filename)\n shutil.copyfile(config, \"{}.backup\".format(config))\n try_set_file_permissions(\"{}.backup\".format(config))\n ca_line = ca_one_line(ca)\n with open(config_template, \"r\") as tfp:\n with open(config, \"w+\") as fp:\n config_txt = tfp.read()\n config_txt = config_txt.replace(\"CADATA\", ca_line)\n config_txt = config_txt.replace(\"NAME\", user)\n config_txt = config_txt.replace(\"TOKEN\", token)\n config_txt = config_txt.replace(\"127.0.0.1\", master_ip)\n config_txt = config_txt.replace(\"16443\", api_port)\n fp.write(config_txt)\n try_set_file_permissions(config)\n\n\ndef create_x509_kubeconfig(ca, master_ip, api_port, filename, user, path_to_cert, path_to_cert_key):\n \"\"\"\n Create a kubeconfig file. The file in stored under credentials named after the user\n\n :param ca: the ca\n :param master_ip: the master node IP\n :param api_port: the API server port\n :param filename: the name of the config file\n :param user: the user to use al login\n :param path_to_cert: path to certificate file\n :param path_to_cert_key: path to certificate key file\n \"\"\"\n snap_path = os.environ.get(\"SNAP\")\n config_template = \"{}/{}\".format(snap_path, \"client-x509.config.template\")\n config = \"{}/credentials/{}\".format(snapdata_path, filename)\n shutil.copyfile(config, \"{}.backup\".format(config))\n try_set_file_permissions(\"{}.backup\".format(config))\n ca_line = ca_one_line(ca)\n with open(config_template, \"r\") as tfp:\n with open(config, \"w+\") as fp:\n config_txt = tfp.read()\n config_txt = config_txt.replace(\"CADATA\", ca_line)\n config_txt = config_txt.replace(\"NAME\", user)\n config_txt = config_txt.replace(\"PATHTOCERT\", path_to_cert)\n config_txt = config_txt.replace(\"PATHTOKEYCERT\", path_to_cert_key)\n config_txt = config_txt.replace(\"127.0.0.1\", master_ip)\n config_txt = config_txt.replace(\"16443\", api_port)\n fp.write(config_txt)\n try_set_file_permissions(config)\n\n\ndef update_kubeproxy(token, ca, master_ip, api_port, hostname_override):\n \"\"\"\n Configure the kube-proxy\n\n :param token: the token to be in the kubeconfig\n :param ca: the ca\n :param master_ip: the master node IP\n :param api_port: the API server port\n :param hostname_override: the hostname override in case the hostname is not resolvable\n \"\"\"\n create_kubeconfig(token, ca, master_ip, api_port, \"proxy.config\", \"kubeproxy\")\n set_arg(\"--master\", None, \"kube-proxy\")\n if hostname_override:\n set_arg(\"--hostname-override\", hostname_override, \"kube-proxy\")\n service(\"restart\", \"proxy\")\n\n\ndef update_cert_auth_kubeproxy(token, ca, master_ip, master_port, hostname_override):\n \"\"\"\n Configure the kube-proxy\n\n :param token: the token to be in the kubeconfig\n :param ca: the ca\n :param master_ip: the master node IP\n :param master_port: the master node port where the cluster agent listens\n :param hostname_override: the hostname override in case the hostname is not resolvable\n \"\"\"\n proxy_token = \"{}-proxy\".format(token)\n traefik_port = get_traefik_port()\n cert = get_client_cert(master_ip, master_port, \"kube-proxy\", proxy_token, \"system:kube-proxy\")\n create_x509_kubeconfig(\n ca,\n \"127.0.0.1\",\n traefik_port,\n \"proxy.config\",\n \"kubeproxy\",\n cert[\"certificate_location\"],\n cert[\"certificate_key_location\"],\n )\n set_arg(\"--master\", None, \"kube-proxy\")\n if hostname_override:\n set_arg(\"--hostname-override\", hostname_override, \"kube-proxy\")\n\n\ndef update_cert_auth_kubelet(token, ca, master_ip, master_port):\n \"\"\"\n Configure the kubelet\n\n :param token: the token to be in the kubeconfig\n :param ca: the ca\n :param master_ip: the master node IP\n :param master_port: the master node port where the cluster agent listens\n \"\"\"\n kubelet_token = \"{}-kubelet\".format(token)\n traefik_port = get_traefik_port()\n kubelet_user = \"system:node:{}\".format(socket.gethostname().lower())\n cert = get_client_cert(\n master_ip, master_port, \"kubelet\", kubelet_token, kubelet_user, \"system:nodes\"\n )\n create_x509_kubeconfig(\n ca,\n \"127.0.0.1\",\n traefik_port,\n \"kubelet.config\",\n \"kubelet\",\n cert[\"certificate_location\"],\n cert[\"certificate_key_location\"],\n )\n set_arg(\"--client-ca-file\", \"${SNAP_DATA}/certs/ca.remote.crt\", \"kubelet\")\n set_arg(\n \"--node-labels\",\n \"microk8s.io/cluster=true,node.kubernetes.io/microk8s-worker=microk8s-worker\",\n \"kubelet\",\n )\n\n\ndef update_kubelet(token, ca, master_ip, api_port):\n \"\"\"\n Configure the kubelet\n\n :param token: the token to be in the kubeconfig\n :param ca: the ca\n :param master_ip: the master node IP\n :param api_port: the API server port\n \"\"\"\n create_kubeconfig(token, ca, master_ip, api_port, \"kubelet.config\", \"kubelet\")\n set_arg(\"--client-ca-file\", \"${SNAP_DATA}/certs/ca.remote.crt\", \"kubelet\")\n set_arg(\n \"--node-labels\",\n \"microk8s.io/cluster=true,node.kubernetes.io/microk8s-worker=microk8s-worker\",\n \"kubelet\",\n )\n service(\"restart\", \"kubelet\")\n\n\ndef update_apiserver(api_authz_mode):\n \"\"\"\n Configure the API server\n\n :param api_authz_mode: the authorization mode to be used\n \"\"\"\n set_arg(\"--authorization-mode\", api_authz_mode, \"kube-apiserver\")\n service(\"restart\", \"apiserver\")\n\n\ndef store_remote_ca(ca):\n \"\"\"\n Store the remote ca\n\n :param ca: the CA\n \"\"\"\n with open(ca_cert_file, \"w+\") as fp:\n fp.write(ca)\n try_set_file_permissions(ca_cert_file)\n\n\ndef mark_worker_node():\n \"\"\"\n Mark a node as being part of a cluster not running the control plane\n by creating a var/lock/clustered.lock\n \"\"\"\n locks = [\"clustered.lock\", \"no-k8s-dqlite\"]\n for lock in locks:\n lock_file = \"{}/var/lock/{}\".format(snapdata_path, lock)\n open(lock_file, \"a\").close()\n os.chmod(lock_file, 0o700)\n services = [\"kubelite\", \"etcd\", \"apiserver-kicker\", \"apiserver-proxy\", \"k8s-dqlite\"]\n for s in services:\n service(\"restart\", s)\n\n\ndef generate_callback_token():\n \"\"\"\n Generate a token and store it in the callback token file\n\n :return: the token\n \"\"\"\n token = \"\".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(64))\n with open(callback_token_file, \"w\") as fp:\n fp.write(\"{}\\n\".format(token))\n\n try_set_file_permissions(callback_token_file)\n return token\n\n\ndef store_base_kubelet_args(args_string):\n \"\"\"\n Create a kubelet args file from the set of args provided\n\n :param args_string: the arguments provided\n \"\"\"\n args_file = \"{}/args/kubelet\".format(snapdata_path)\n with open(args_file, \"w\") as fp:\n fp.write(args_string)\n try_set_file_permissions(args_file)\n\n\ndef update_kubelet_node_ip(args_string, hostname_override):\n \"\"\"\n Update the kubelet --node-ip argument if it was set on the node that we join.\n\n :param args_string: the kubelet arguments\n :param hostname_override: the source IP address used by the node when joining\n \"\"\"\n if \"--node-ip\" in args_string:\n set_arg(\"--node-ip\", hostname_override, \"kubelet\")\n\n\ndef update_kubelet_hostname_override(args_string):\n \"\"\"\n Remove the kubelet --hostname-override argument if it was set on the node that we join.\n\n :param args_string: the kubelet arguments\n \"\"\"\n if \"--hostname-override\" in args_string:\n set_arg(\"--hostname-override\", None, \"kubelet\")\n\n\ndef replace_admin_token(token):\n \"\"\"\n Replaces the admin token in the known tokens\n\n :param token: the admin token\n \"\"\"\n file = \"{}/credentials/known_tokens.csv\".format(snapdata_path)\n backup_file = \"{}.backup\".format(file)\n # That is a critical section. We need to protect it.\n with open(backup_file, \"w\") as back_fp:\n with open(file, \"r\") as fp:\n for _, line in enumerate(fp):\n if 'admin,admin,\"system:masters\"' in line:\n continue\n back_fp.write(\"{}\".format(line))\n back_fp.write('{},admin,admin,\"system:masters\"\\n'.format(token))\n\n try_set_file_permissions(backup_file)\n shutil.copyfile(backup_file, file)\n\n\ndef store_cert(filename, payload):\n \"\"\"\n Store a certificate\n\n :param filename: where to store the certificate\n :param payload: certificate payload\n \"\"\"\n file_with_path = \"{}/certs/{}\".format(snapdata_path, filename)\n backup_file_with_path = \"{}.backup\".format(file_with_path)\n shutil.copyfile(file_with_path, backup_file_with_path)\n try_set_file_permissions(backup_file_with_path)\n with open(file_with_path, \"w+\") as fp:\n fp.write(payload)\n try_set_file_permissions(file_with_path)\n\n\ndef store_cluster_certs(cluster_cert, cluster_key):\n \"\"\"\n Store the dqlite cluster certs\n\n :param cluster_cert: the cluster certificate\n :param cluster_key: the cluster certificate key\n \"\"\"\n with open(cluster_cert_file, \"w+\") as fp:\n fp.write(cluster_cert)\n try_set_file_permissions(cluster_cert_file)\n with open(cluster_key_file, \"w+\") as fp:\n fp.write(cluster_key)\n try_set_file_permissions(cluster_key_file)\n\n\ndef create_admin_kubeconfig(ca, ha_admin_token=None):\n \"\"\"\n Create a kubeconfig file. The file in stored under credentials named after the admin\n\n :param ca: the ca\n :param ha_admin_token: the ha_cluster_token\n \"\"\"\n if not ha_admin_token:\n token = get_token(\"admin\", \"basic_auth.csv\")\n if not token:\n print(\"Error, could not locate admin token. Joining cluster failed.\")\n exit(2)\n else:\n token = ha_admin_token\n assert token is not None\n config_template = \"{}/{}\".format(snap_path, \"client.config.template\")\n config = \"{}/credentials/client.config\".format(snapdata_path)\n shutil.copyfile(config, \"{}.backup\".format(config))\n try_set_file_permissions(\"{}.backup\".format(config))\n ca_line = ca_one_line(ca)\n with open(config_template, \"r\") as tfp:\n with open(config, \"w+\") as fp:\n for _, config_txt in enumerate(tfp):\n if config_txt.strip().startswith(\"username:\"):\n continue\n else:\n config_txt = config_txt.replace(\"CADATA\", ca_line)\n config_txt = config_txt.replace(\"NAME\", \"admin\")\n config_txt = config_txt.replace(\"AUTHTYPE\", \"token\")\n config_txt = config_txt.replace(\"PASSWORD\", token)\n fp.write(config_txt)\n try_set_file_permissions(config)\n\n\ndef store_callback_token(token):\n \"\"\"\n Store the callback token\n\n :param token: the callback token\n \"\"\"\n callback_token_file = \"{}/credentials/callback-token.txt\".format(snapdata_path)\n with open(callback_token_file, \"w\") as fp:\n fp.write(token)\n try_set_file_permissions(callback_token_file)\n\n\ndef update_dqlite(cluster_cert, cluster_key, voters, host):\n \"\"\"\n Configure the dqlite cluster\n\n :param cluster_cert: the dqlite cluster cert\n :param cluster_key: the dqlite cluster key\n :param voters: the dqlite voters\n :param host: the hostname others see of this node\n \"\"\"\n service(\"stop\", \"apiserver\")\n service(\"stop\", \"k8s-dqlite\")\n time.sleep(10)\n shutil.rmtree(cluster_backup_dir, ignore_errors=True)\n shutil.move(cluster_dir, cluster_backup_dir)\n os.mkdir(cluster_dir)\n store_cluster_certs(cluster_cert, cluster_key)\n\n # We get the dqlite port from the already existing deployment\n port = 19001\n with open(\"{}/info.yaml\".format(cluster_backup_dir)) as f:\n data = yaml.safe_load(f)\n if \"Address\" in data:\n port = data[\"Address\"].split(\":\")[1]\n\n init_data = {\"Cluster\": voters, \"Address\": \"{}:{}\".format(host, port)}\n with open(\"{}/init.yaml\".format(cluster_dir), \"w\") as f:\n yaml.dump(init_data, f)\n\n service(\"start\", \"k8s-dqlite\")\n service(\"start\", \"apiserver\")\n\n waits = 10\n print(\"Waiting for this node to finish joining the cluster.\", end=\" \", flush=True)\n while waits > 0:\n try:\n out = subprocess.check_output(\n \"{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt \"\n \"-k {dbdir}/cluster.key -f json k8s .cluster\".format(\n snappath=snap_path, dbdir=cluster_dir\n ).split(),\n timeout=4,\n stderr=subprocess.STDOUT,\n )\n if host in out.decode():\n break\n else:\n print(\".\", end=\" \", flush=True)\n time.sleep(5)\n waits -= 1\n\n except (subprocess.CalledProcessError, subprocess.TimeoutExpired):\n print(\"..\", end=\" \", flush=True)\n time.sleep(2)\n waits -= 1\n print(\" \")\n\n with open(\"{}//certs/csr.conf\".format(snapdata_path), \"w\") as f:\n f.write(\"changeme\")\n\n restart_all_services()\n\n\ndef join_dqlite(connection_parts, verify=False, worker=False):\n \"\"\"\n Configure node to join a dqlite cluster.\n\n :param connection_parts: connection string parts\n \"\"\"\n token = connection_parts[1]\n master_ep = connection_parts[0].split(\":\")\n master_ip = master_ep[0]\n master_port = master_ep[1]\n fingerprint = None\n if len(connection_parts) > 2:\n fingerprint = connection_parts[2]\n else:\n # we do not have a fingerprint, do not attempt to verify the remote cert\n verify = False\n\n print(\"Contacting cluster at {}\".format(master_ip))\n\n info = get_connection_info(\n master_ip,\n master_port,\n token,\n cluster_type=\"dqlite\",\n verify_peer=verify,\n fingerprint=fingerprint,\n worker=worker,\n )\n\n if worker:\n join_dqlite_worker_node(info, master_ip, master_port, token)\n else:\n join_dqlite_master_node(info, master_ip, token)\n\n\ndef update_apiserver_proxy(master_ip, api_port):\n \"\"\"\n Update the apiserver-proxy configuration\n \"\"\"\n lock_path = os.path.expandvars(\"${SNAP_DATA}/var/lock\")\n lock = \"{}/no-apiserver-proxy\".format(lock_path)\n if os.path.exists(lock):\n os.remove(lock)\n\n # add the initial control plane endpoint\n addresses = [{\"address\": \"{}:{}\".format(master_ip, api_port)}]\n\n traefik_providers = os.path.expandvars(\"${SNAP_DATA}/args/traefik/provider-template.yaml\")\n traefik_providers_out = os.path.expandvars(\"${SNAP_DATA}/args/traefik/provider.yaml\")\n with open(traefik_providers) as f:\n p = yaml.safe_load(f)\n p[\"tcp\"][\"services\"][\"kube-apiserver\"][\"loadBalancer\"][\"servers\"] = addresses\n with open(traefik_providers_out, \"w\") as out_file:\n yaml.dump(p, out_file)\n\n try_set_file_permissions(traefik_providers_out)\n service(\"restart\", \"apiserver-proxy\")\n\n\ndef print_worker_usage():\n \"\"\"\n Print Worker usage\n \"\"\"\n print(\"\")\n print(\"The node has joined the cluster and will appear in the nodes list in a few seconds.\")\n print(\"\")\n print(\"This worker node gets automatically configured with the API server endpoints.\")\n print(\n \"If the API servers are behind a loadbalancer please set the '--refresh-interval' to '0s' in:\"\n )\n print(\" /var/snap/microk8s/current/args/apiserver-proxy\")\n print(\"and replace the API server endpoints with the one provided by the loadbalancer in:\")\n print(\" /var/snap/microk8s/current/args/traefik/provider.yaml\")\n print(\"\")\n\n\ndef join_dqlite_worker_node(info, master_ip, master_port, token):\n \"\"\"\n Join this node as a worker to a cluster running dqlite.\n\n :param info: dictionary with the connection information\n :param master_ip: the IP of the master node we contacted to connect to the cluster\n :param master_port: the port of the mester node we contacted to connect to the cluster\n :param token: the token to pass to the master in order to authenticate with it\n \"\"\"\n hostname_override = info[\"hostname_override\"]\n if info[\"ca_key\"] is not None:\n print(\n \"Joining process failed. Make sure the cluster you connect to supports joining worker nodes.\"\n )\n exit(1)\n\n store_remote_ca(info[\"ca\"])\n store_cert(\"serviceaccount.key\", info[\"service_account_key\"])\n\n store_base_kubelet_args(info[\"kubelet_args\"])\n update_kubelet_node_ip(info[\"kubelet_args\"], hostname_override)\n update_kubelet_hostname_override(info[\"kubelet_args\"])\n update_cert_auth_kubeproxy(token, info[\"ca\"], master_ip, master_port, hostname_override)\n update_cert_auth_kubelet(token, info[\"ca\"], master_ip, master_port)\n\n store_callback_token(info[\"callback_token\"])\n update_apiserver_proxy(master_ip, info[\"apiport\"])\n mark_worker_node()\n mark_no_cert_reissue()\n print_worker_usage()\n\n\ndef join_dqlite_master_node(info, master_ip, token):\n \"\"\"\n Join this node to a cluster running dqlite.\n\n :param info: dictionary with the connection information\n :param master_ip: the IP of the master node we contacted to connect to the cluster\n :param token: the token to pass to the master in order to authenticate with it\n \"\"\"\n hostname_override = info[\"hostname_override\"]\n store_cert(\"ca.crt\", info[\"ca\"])\n store_cert(\"ca.key\", info[\"ca_key\"])\n store_cert(\"serviceaccount.key\", info[\"service_account_key\"])\n # triplets of [username in known_tokens.csv, username in kubeconfig, kubeconfig filename name]\n for component in [\n (\"kube-proxy\", \"kubeproxy\", \"proxy.config\"),\n (\"kubelet\", \"kubelet\", \"kubelet.config\"),\n (\"kube-controller-manager\", \"controller\", \"controller.config\"),\n (\"kube-scheduler\", \"scheduler\", \"scheduler.config\"),\n ]:\n component_token = get_token(component[0])\n if not component_token:\n print(\"Error, could not locate {} token. Joining cluster failed.\".format(component[0]))\n exit(3)\n assert token is not None\n # TODO make this configurable\n create_kubeconfig(\n component_token, info[\"ca\"], \"127.0.0.1\", \"16443\", component[2], component[1]\n )\n if \"admin_token\" in info:\n replace_admin_token(info[\"admin_token\"])\n if \"api_authz_mode\" in info:\n update_apiserver(info[\"api_authz_mode\"])\n\n create_admin_kubeconfig(info[\"ca\"], info[\"admin_token\"])\n store_base_kubelet_args(info[\"kubelet_args\"])\n update_kubelet_node_ip(info[\"kubelet_args\"], hostname_override)\n update_kubelet_hostname_override(info[\"kubelet_args\"])\n store_callback_token(info[\"callback_token\"])\n update_dqlite(info[\"cluster_cert\"], info[\"cluster_key\"], info[\"voters\"], hostname_override)\n # We want to update the local CNI yaml but we do not want to apply it.\n # The cni is applied already in the cluster we join\n try_initialise_cni_autodetect_for_clustering(master_ip, apply_cni=False)\n mark_no_cert_reissue()\n\n\ndef join_etcd(connection_parts, verify=True):\n \"\"\"\n Configure node to join an etcd cluster.\n\n :param connection_parts: connection string parts\n \"\"\"\n token = connection_parts[1]\n master_ep = connection_parts[0].split(\":\")\n master_ip = master_ep[0]\n master_port = master_ep[1]\n callback_token = generate_callback_token()\n info = get_connection_info(master_ip, master_port, token, callback_token=callback_token)\n store_base_kubelet_args(info[\"kubelet_args\"])\n update_kubelet_hostname_override(info[\"kubelet_args\"])\n hostname_override = None\n if \"hostname_override\" in info:\n hostname_override = info[\"hostname_override\"]\n update_kubelet_node_ip(info[\"kubelet_args\"], hostname_override)\n\n store_remote_ca(info[\"ca\"])\n update_flannel(info[\"etcd\"], master_ip, master_port, token)\n update_kubeproxy(info[\"kubeproxy\"], info[\"ca\"], master_ip, info[\"apiport\"], hostname_override)\n update_kubelet(info[\"kubelet\"], info[\"ca\"], master_ip, info[\"apiport\"])\n mark_worker_node()\n mark_no_cert_reissue()\n\n\[email protected](\n context_settings={\"ignore_unknown_options\": True, \"help_option_names\": [\"-h\", \"--help\"]}\n)\[email protected](\"connection\", required=True)\[email protected](\n \"--worker\", \"worker\", default=False, flag_value=\"as-worker\", help=\"Join as a worker only node.\"\n)\[email protected](\n \"--controlplane\",\n \"worker\",\n flag_value=\"as-master\",\n help=\"Join running the control plane on HA clusters. (default)\",\n)\[email protected](\n \"--skip-verify\",\n is_flag=True,\n required=False,\n default=False,\n help=\"Skip the certificate verification of the node we are joining to. (default: false)\",\n)\[email protected](\n \"--disable-low-memory-guard\",\n is_flag=True,\n required=False,\n default=False,\n help=\"Disable the low memory guard. (default: false)\",\n)\ndef join(connection, worker, skip_verify, disable_low_memory_guard):\n \"\"\"\n Join the node to a cluster\n\n CONNECTION: the cluster connection endpoint in format <master>:<port>/<token>\n \"\"\"\n connection_parts = connection.split(\"/\")\n verify = not skip_verify\n\n if is_low_memory_guard_enabled() and disable_low_memory_guard:\n os.remove(os.path.expandvars(\"$SNAP_DATA/var/lock/low-memory-guard.lock\"))\n\n if is_low_memory_guard_enabled() and not worker:\n print(\n \"\"\"\nThis node does not have enough RAM to host the Kubernetes control plane services\nand join the database quorum. You may consider joining this node as a worker instead:\n\n microk8s join {connection} --worker\n\nIf you would still like to join the cluster as a control plane node, use:\n\n microk8s join {connection} --disable-low-memory-guard\n\n\"\"\".format(\n connection=connection\n )\n )\n sys.exit(1)\n\n if is_node_running_dqlite():\n join_dqlite(connection_parts, verify, worker)\n else:\n join_etcd(connection_parts, verify)\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n join(prog_name=\"microk8s join\")\n", "path": "scripts/wrappers/join.py" } ]
[ { "content": "#!/usr/bin/python3\nimport base64\nimport random\nimport string\nimport subprocess\nimport os\nimport ssl\nimport sys\nimport time\nimport hashlib\nimport http\n\nimport click\nimport requests\nimport socket\nimport shutil\nimport urllib3\nimport yaml\nimport json\n\nfrom common.cluster.utils import (\n is_low_memory_guard_enabled,\n try_set_file_permissions,\n is_node_running_dqlite,\n get_cluster_agent_port,\n try_initialise_cni_autodetect_for_clustering,\n service,\n mark_no_cert_reissue,\n restart_all_services,\n get_token,\n)\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\nCLUSTER_API = \"cluster/api/v1.0\"\nsnapdata_path = os.environ.get(\"SNAP_DATA\")\nsnap_path = os.environ.get(\"SNAP\")\nca_cert_file_via_env = \"${SNAP_DATA}/certs/ca.remote.crt\"\nca_cert_file = \"{}/certs/ca.remote.crt\".format(snapdata_path)\ncallback_token_file = \"{}/credentials/callback-token.txt\".format(snapdata_path)\ncallback_tokens_file = \"{}/credentials/callback-tokens.txt\".format(snapdata_path)\nserver_cert_file_via_env = \"${SNAP_DATA}/certs/server.remote.crt\"\nserver_cert_file = \"{}/certs/server.remote.crt\".format(snapdata_path)\n\nCLUSTER_API_V2 = \"cluster/api/v2.0\"\ncluster_dir = \"{}/var/kubernetes/backend\".format(snapdata_path)\ncluster_backup_dir = \"{}/var/kubernetes/backend.backup\".format(snapdata_path)\ncluster_cert_file = \"{}/cluster.crt\".format(cluster_dir)\ncluster_key_file = \"{}/cluster.key\".format(cluster_dir)\n\nFINGERPRINT_MIN_LEN = 12\n\n\ndef get_traefik_port():\n \"\"\"\n Return the port Traefik listens to. Try read the port from the Traefik configuration or return the default value\n \"\"\"\n config_file = \"{}/args/traefik/traefik-template.yaml\".format(snapdata_path)\n with open(config_file) as f:\n data = yaml.load(f, Loader=yaml.FullLoader)\n if (\n \"entryPoints\" in data\n and \"apiserver\" in data[\"entryPoints\"]\n and \"address\" in data[\"entryPoints\"][\"apiserver\"]\n ):\n port = data[\"entryPoints\"][\"apiserver\"][\"address\"]\n port = port.replace(\":\", \"\")\n return port\n else:\n return \"16443\"\n\n\ndef join_request(conn, api_version, req_data, master_ip, verify_peer, fingerprint):\n json_params = json.dumps(req_data)\n headers = {\"Content-type\": \"application/json\", \"Accept\": \"application/json\"}\n\n try:\n if verify_peer and fingerprint:\n if len(fingerprint) < FINGERPRINT_MIN_LEN:\n print(\n \"Joining cluster failed. Fingerprint too short.\"\n \" Use '--skip-verify' to skip server certificate check.\"\n )\n exit(4)\n\n # Do the peer certificate verification\n der_cert_bin = conn.sock.getpeercert(True)\n peer_cert_hash = hashlib.sha256(der_cert_bin).hexdigest()\n if not peer_cert_hash.startswith(fingerprint):\n print(\n \"Joining cluster failed. Could not verify the identity of {}.\"\n \" Use '--skip-verify' to skip server certificate check.\".format(master_ip)\n )\n exit(4)\n\n conn.request(\"POST\", \"/{}/join\".format(api_version), json_params, headers)\n response = conn.getresponse()\n if not response.status == 200:\n message = extract_error(response)\n print(\"{} ({}).\".format(message, response.status))\n exit(6)\n body = response.read()\n return json.loads(body)\n except http.client.HTTPException as e:\n print(\"Please ensure the master node is reachable. {}\".format(e))\n exit(1)\n except ssl.SSLError as e:\n print(\"Peer node verification failed ({}).\".format(e))\n exit(4)\n\n\ndef extract_error(response):\n message = \"Connection failed.\"\n try:\n resp = response.read().decode()\n if resp:\n res_data = json.loads(resp)\n if \"error\" in res_data:\n message = \"{} {}\".format(message, res_data[\"error\"])\n except ValueError:\n pass\n return message\n\n\ndef get_connection_info(\n master_ip,\n master_port,\n token,\n callback_token=None,\n cluster_type=\"etcd\",\n verify_peer=False,\n fingerprint=None,\n worker=False,\n):\n \"\"\"\n Contact the master and get all connection information\n\n :param master_ip: the master IP\n :param master_port: the master port\n :param token: the token to contact the master with\n :param callback_token: callback token for etcd based clusters\n :param cluster_type: the type of cluster we want to join, etcd or dqlite\n :param verify_peer: flag indicating if we should verify peers certificate\n :param fingerprint: the certificate fingerprint we expect from the peer\n :param worker: this is a worker only node\n\n :return: the json response of the master\n \"\"\"\n cluster_agent_port = get_cluster_agent_port()\n try:\n context = ssl._create_unverified_context()\n conn = http.client.HTTPSConnection(\"{}:{}\".format(master_ip, master_port), context=context)\n conn.connect()\n if cluster_type == \"dqlite\":\n req_data = {\n \"token\": token,\n \"hostname\": socket.gethostname().lower(),\n \"port\": cluster_agent_port,\n \"worker\": worker,\n }\n\n return join_request(conn, CLUSTER_API_V2, req_data, master_ip, verify_peer, fingerprint)\n else:\n req_data = {\n \"token\": token,\n \"hostname\": socket.gethostname().lower(),\n \"port\": cluster_agent_port,\n \"callback\": callback_token,\n }\n return join_request(\n conn, CLUSTER_API, req_data, master_ip, verify_peer=False, fingerprint=None\n )\n except http.client.HTTPException as e:\n print(\"Connecting to cluster failed with {}.\".format(e))\n exit(5)\n except ssl.SSLError as e:\n print(\"Peer node verification failed with {}.\".format(e))\n exit(4)\n\n\ndef set_arg(key, value, file):\n \"\"\"\n Set an argument to a file\n\n :param key: argument name\n :param value: value\n :param file: the arguments file\n \"\"\"\n filename = \"{}/args/{}\".format(snapdata_path, file)\n filename_remote = \"{}/args/{}.remote\".format(snapdata_path, file)\n done = False\n with open(filename_remote, \"w+\") as back_fp:\n with open(filename, \"r+\") as fp:\n for _, line in enumerate(fp):\n if line.startswith(key):\n done = True\n if value is not None:\n back_fp.write(\"{}={}\\n\".format(key, value))\n else:\n back_fp.write(\"{}\".format(line))\n if not done and value is not None:\n back_fp.write(\"{}={}\\n\".format(key, value))\n\n shutil.copyfile(filename, \"{}.backup\".format(filename))\n try_set_file_permissions(\"{}.backup\".format(filename))\n shutil.copyfile(filename_remote, filename)\n try_set_file_permissions(filename)\n os.remove(filename_remote)\n\n\ndef get_etcd_client_cert(master_ip, master_port, token):\n \"\"\"\n Get a signed cert to access etcd\n\n :param master_ip: master ip\n :param master_port: master port\n :param token: token to contact the master with\n \"\"\"\n cer_req_file = \"{}/certs/server.remote.csr\".format(snapdata_path)\n cmd_cert = (\n \"{snap}/usr/bin/openssl req -new -sha256 -key {snapdata}/certs/server.key -out {csr} \"\n \"-config {snapdata}/certs/csr.conf\".format(\n snap=snap_path, snapdata=snapdata_path, csr=cer_req_file\n )\n )\n subprocess.check_call(cmd_cert.split())\n with open(cer_req_file) as fp:\n csr = fp.read()\n req_data = {\"token\": token, \"request\": csr}\n # TODO: enable ssl verification\n signed = requests.post(\n \"https://{}:{}/{}/sign-cert\".format(master_ip, master_port, CLUSTER_API),\n json=req_data,\n verify=False,\n )\n if signed.status_code != 200:\n print(\"Failed to sign certificate. {}\".format(signed.json()[\"error\"]))\n exit(1)\n info = signed.json()\n with open(server_cert_file, \"w\") as cert_fp:\n cert_fp.write(info[\"certificate\"])\n try_set_file_permissions(server_cert_file)\n\n\ndef get_client_cert(master_ip, master_port, fname, token, username, group=None):\n \"\"\"\n Get a signed cert.\n See https://kubernetes.io/docs/reference/access-authn-authz/authentication/#x509-client-certs\n\n :param master_ip: master ip\n :param master_port: master port\n :param fname: file name prefix for the certificate\n :param token: token to contact the master with\n :param username: the username of the cert's owner\n :param group: the group the owner belongs to\n \"\"\"\n info = \"/CN={}\".format(username)\n if group:\n info = \"{}/O={}\".format(info, group)\n\n # the filenames must survive snap refreshes, so replace revision number with current\n snapdata_current = os.path.abspath(os.path.join(snapdata_path, \"..\", \"current\"))\n\n cer_req_file = \"{}/certs/{}.csr\".format(snapdata_current, fname)\n cer_key_file = \"{}/certs/{}.key\".format(snapdata_current, fname)\n cer_file = \"{}/certs/{}.crt\".format(snapdata_current, fname)\n if not os.path.exists(cer_key_file):\n cmd_gen_cert_key = \"{snap}/usr/bin/openssl genrsa -out {key} 2048\".format(\n snap=snap_path, key=cer_key_file\n )\n subprocess.check_call(\n cmd_gen_cert_key.split(), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL\n )\n try_set_file_permissions(cer_key_file)\n\n cmd_cert = \"{snap}/usr/bin/openssl req -new -sha256 -key {key} -out {csr} -subj {info}\".format(\n snap=snap_path,\n key=cer_key_file,\n csr=cer_req_file,\n info=info,\n )\n subprocess.check_call(cmd_cert.split(), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n with open(cer_req_file) as fp:\n csr = fp.read()\n req_data = {\"token\": token, \"request\": csr}\n # TODO: enable ssl verification\n signed = requests.post(\n \"https://{}:{}/{}/sign-cert\".format(master_ip, master_port, CLUSTER_API),\n json=req_data,\n verify=False,\n )\n if signed.status_code != 200:\n error = \"Failed to sign {} certificate ({}).\".format(fname, signed.status_code)\n try:\n if \"error\" in signed.json():\n error = \"{} {}\".format(error, format(signed.json()[\"error\"]))\n except ValueError:\n print(\"Make sure the cluster you connect to supports joining worker nodes.\")\n print(error)\n exit(1)\n info = signed.json()\n with open(cer_file, \"w\") as cert_fp:\n cert_fp.write(info[\"certificate\"])\n try_set_file_permissions(cer_file)\n\n return {\n \"certificate_location\": cer_file,\n \"certificate_key_location\": cer_key_file,\n }\n\n\ndef update_flannel(etcd, master_ip, master_port, token):\n \"\"\"\n Configure flannel\n\n :param etcd: etcd endpoint\n :param master_ip: master ip\n :param master_port: master port\n :param token: token to contact the master with\n \"\"\"\n get_etcd_client_cert(master_ip, master_port, token)\n etcd = etcd.replace(\"0.0.0.0\", master_ip)\n set_arg(\"--etcd-endpoints\", etcd, \"flanneld\")\n set_arg(\"--etcd-cafile\", ca_cert_file_via_env, \"flanneld\")\n set_arg(\"--etcd-certfile\", server_cert_file_via_env, \"flanneld\")\n set_arg(\"--etcd-keyfile\", \"${SNAP_DATA}/certs/server.key\", \"flanneld\")\n service(\"restart\", \"flanneld\")\n\n\ndef ca_one_line(ca):\n \"\"\"\n The CA in one line\n :param ca: the ca\n :return: one line\n \"\"\"\n return base64.b64encode(ca.encode(\"utf-8\")).decode(\"utf-8\")\n\n\ndef create_kubeconfig(token, ca, master_ip, api_port, filename, user):\n \"\"\"\n Create a kubeconfig file. The file in stored under credentials named after the user\n\n :param token: the token to be in the kubeconfig\n :param ca: the ca\n :param master_ip: the master node IP\n :param api_port: the API server port\n :param filename: the name of the config file\n :param user: the user to use al login\n \"\"\"\n snap_path = os.environ.get(\"SNAP\")\n config_template = \"{}/{}\".format(snap_path, \"kubelet.config.template\")\n config = \"{}/credentials/{}\".format(snapdata_path, filename)\n shutil.copyfile(config, \"{}.backup\".format(config))\n try_set_file_permissions(\"{}.backup\".format(config))\n ca_line = ca_one_line(ca)\n with open(config_template, \"r\") as tfp:\n with open(config, \"w+\") as fp:\n config_txt = tfp.read()\n config_txt = config_txt.replace(\"CADATA\", ca_line)\n config_txt = config_txt.replace(\"NAME\", user)\n config_txt = config_txt.replace(\"TOKEN\", token)\n config_txt = config_txt.replace(\"127.0.0.1\", master_ip)\n config_txt = config_txt.replace(\"16443\", api_port)\n fp.write(config_txt)\n try_set_file_permissions(config)\n\n\ndef create_x509_kubeconfig(ca, master_ip, api_port, filename, user, path_to_cert, path_to_cert_key):\n \"\"\"\n Create a kubeconfig file. The file in stored under credentials named after the user\n\n :param ca: the ca\n :param master_ip: the master node IP\n :param api_port: the API server port\n :param filename: the name of the config file\n :param user: the user to use al login\n :param path_to_cert: path to certificate file\n :param path_to_cert_key: path to certificate key file\n \"\"\"\n snap_path = os.environ.get(\"SNAP\")\n config_template = \"{}/{}\".format(snap_path, \"client-x509.config.template\")\n config = \"{}/credentials/{}\".format(snapdata_path, filename)\n shutil.copyfile(config, \"{}.backup\".format(config))\n try_set_file_permissions(\"{}.backup\".format(config))\n ca_line = ca_one_line(ca)\n with open(config_template, \"r\") as tfp:\n with open(config, \"w+\") as fp:\n config_txt = tfp.read()\n config_txt = config_txt.replace(\"CADATA\", ca_line)\n config_txt = config_txt.replace(\"NAME\", user)\n config_txt = config_txt.replace(\"PATHTOCERT\", path_to_cert)\n config_txt = config_txt.replace(\"PATHTOKEYCERT\", path_to_cert_key)\n config_txt = config_txt.replace(\"127.0.0.1\", master_ip)\n config_txt = config_txt.replace(\"16443\", api_port)\n fp.write(config_txt)\n try_set_file_permissions(config)\n\n\ndef update_kubeproxy(token, ca, master_ip, api_port, hostname_override):\n \"\"\"\n Configure the kube-proxy\n\n :param token: the token to be in the kubeconfig\n :param ca: the ca\n :param master_ip: the master node IP\n :param api_port: the API server port\n :param hostname_override: the hostname override in case the hostname is not resolvable\n \"\"\"\n create_kubeconfig(token, ca, master_ip, api_port, \"proxy.config\", \"kubeproxy\")\n set_arg(\"--master\", None, \"kube-proxy\")\n if hostname_override:\n set_arg(\"--hostname-override\", hostname_override, \"kube-proxy\")\n service(\"restart\", \"proxy\")\n\n\ndef update_cert_auth_kubeproxy(token, ca, master_ip, master_port, hostname_override):\n \"\"\"\n Configure the kube-proxy\n\n :param token: the token to be in the kubeconfig\n :param ca: the ca\n :param master_ip: the master node IP\n :param master_port: the master node port where the cluster agent listens\n :param hostname_override: the hostname override in case the hostname is not resolvable\n \"\"\"\n proxy_token = \"{}-proxy\".format(token)\n traefik_port = get_traefik_port()\n cert = get_client_cert(master_ip, master_port, \"kube-proxy\", proxy_token, \"system:kube-proxy\")\n create_x509_kubeconfig(\n ca,\n \"127.0.0.1\",\n traefik_port,\n \"proxy.config\",\n \"kubeproxy\",\n cert[\"certificate_location\"],\n cert[\"certificate_key_location\"],\n )\n set_arg(\"--master\", None, \"kube-proxy\")\n if hostname_override:\n set_arg(\"--hostname-override\", hostname_override, \"kube-proxy\")\n\n\ndef update_cert_auth_kubelet(token, ca, master_ip, master_port):\n \"\"\"\n Configure the kubelet\n\n :param token: the token to be in the kubeconfig\n :param ca: the ca\n :param master_ip: the master node IP\n :param master_port: the master node port where the cluster agent listens\n \"\"\"\n kubelet_token = \"{}-kubelet\".format(token)\n traefik_port = get_traefik_port()\n kubelet_user = \"system:node:{}\".format(socket.gethostname().lower())\n cert = get_client_cert(\n master_ip, master_port, \"kubelet\", kubelet_token, kubelet_user, \"system:nodes\"\n )\n create_x509_kubeconfig(\n ca,\n \"127.0.0.1\",\n traefik_port,\n \"kubelet.config\",\n \"kubelet\",\n cert[\"certificate_location\"],\n cert[\"certificate_key_location\"],\n )\n set_arg(\"--client-ca-file\", \"${SNAP_DATA}/certs/ca.remote.crt\", \"kubelet\")\n set_arg(\n \"--node-labels\",\n \"microk8s.io/cluster=true,node.kubernetes.io/microk8s-worker=microk8s-worker\",\n \"kubelet\",\n )\n\n\ndef update_kubelet(token, ca, master_ip, api_port):\n \"\"\"\n Configure the kubelet\n\n :param token: the token to be in the kubeconfig\n :param ca: the ca\n :param master_ip: the master node IP\n :param api_port: the API server port\n \"\"\"\n create_kubeconfig(token, ca, master_ip, api_port, \"kubelet.config\", \"kubelet\")\n set_arg(\"--client-ca-file\", \"${SNAP_DATA}/certs/ca.remote.crt\", \"kubelet\")\n set_arg(\n \"--node-labels\",\n \"microk8s.io/cluster=true,node.kubernetes.io/microk8s-worker=microk8s-worker\",\n \"kubelet\",\n )\n service(\"restart\", \"kubelet\")\n\n\ndef update_apiserver(api_authz_mode):\n \"\"\"\n Configure the API server\n\n :param api_authz_mode: the authorization mode to be used\n \"\"\"\n set_arg(\"--authorization-mode\", api_authz_mode, \"kube-apiserver\")\n service(\"restart\", \"apiserver\")\n\n\ndef store_remote_ca(ca):\n \"\"\"\n Store the remote ca\n\n :param ca: the CA\n \"\"\"\n with open(ca_cert_file, \"w+\") as fp:\n fp.write(ca)\n try_set_file_permissions(ca_cert_file)\n\n\ndef mark_worker_node():\n \"\"\"\n Mark a node as being part of a cluster not running the control plane\n by creating a var/lock/clustered.lock\n \"\"\"\n locks = [\"clustered.lock\", \"no-k8s-dqlite\"]\n for lock in locks:\n lock_file = \"{}/var/lock/{}\".format(snapdata_path, lock)\n open(lock_file, \"a\").close()\n os.chmod(lock_file, 0o700)\n services = [\"kubelite\", \"etcd\", \"apiserver-kicker\", \"apiserver-proxy\", \"k8s-dqlite\"]\n for s in services:\n service(\"restart\", s)\n\n\ndef generate_callback_token():\n \"\"\"\n Generate a token and store it in the callback token file\n\n :return: the token\n \"\"\"\n token = \"\".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(64))\n with open(callback_token_file, \"w\") as fp:\n fp.write(\"{}\\n\".format(token))\n\n try_set_file_permissions(callback_token_file)\n return token\n\n\ndef store_base_kubelet_args(args_string):\n \"\"\"\n Create a kubelet args file from the set of args provided\n\n :param args_string: the arguments provided\n \"\"\"\n args_file = \"{}/args/kubelet\".format(snapdata_path)\n with open(args_file, \"w\") as fp:\n fp.write(args_string)\n try_set_file_permissions(args_file)\n\n\ndef update_kubelet_node_ip(args_string, hostname_override):\n \"\"\"\n Update the kubelet --node-ip argument if it was set on the node that we join.\n\n :param args_string: the kubelet arguments\n :param hostname_override: the source IP address used by the node when joining\n \"\"\"\n if \"--node-ip\" in args_string:\n set_arg(\"--node-ip\", hostname_override, \"kubelet\")\n\n\ndef update_kubelet_hostname_override(args_string):\n \"\"\"\n Remove the kubelet --hostname-override argument if it was set on the node that we join.\n\n :param args_string: the kubelet arguments\n \"\"\"\n if \"--hostname-override\" in args_string:\n set_arg(\"--hostname-override\", None, \"kubelet\")\n\n\ndef replace_admin_token(token):\n \"\"\"\n Replaces the admin token in the known tokens\n\n :param token: the admin token\n \"\"\"\n file = \"{}/credentials/known_tokens.csv\".format(snapdata_path)\n backup_file = \"{}.backup\".format(file)\n # That is a critical section. We need to protect it.\n with open(backup_file, \"w\") as back_fp:\n with open(file, \"r\") as fp:\n for _, line in enumerate(fp):\n if 'admin,admin,\"system:masters\"' in line:\n continue\n back_fp.write(\"{}\".format(line))\n back_fp.write('{},admin,admin,\"system:masters\"\\n'.format(token))\n\n try_set_file_permissions(backup_file)\n shutil.copyfile(backup_file, file)\n\n\ndef store_cert(filename, payload):\n \"\"\"\n Store a certificate\n\n :param filename: where to store the certificate\n :param payload: certificate payload\n \"\"\"\n file_with_path = \"{}/certs/{}\".format(snapdata_path, filename)\n backup_file_with_path = \"{}.backup\".format(file_with_path)\n shutil.copyfile(file_with_path, backup_file_with_path)\n try_set_file_permissions(backup_file_with_path)\n with open(file_with_path, \"w+\") as fp:\n fp.write(payload)\n try_set_file_permissions(file_with_path)\n\n\ndef store_cluster_certs(cluster_cert, cluster_key):\n \"\"\"\n Store the dqlite cluster certs\n\n :param cluster_cert: the cluster certificate\n :param cluster_key: the cluster certificate key\n \"\"\"\n with open(cluster_cert_file, \"w+\") as fp:\n fp.write(cluster_cert)\n try_set_file_permissions(cluster_cert_file)\n with open(cluster_key_file, \"w+\") as fp:\n fp.write(cluster_key)\n try_set_file_permissions(cluster_key_file)\n\n\ndef create_admin_kubeconfig(ca, ha_admin_token=None):\n \"\"\"\n Create a kubeconfig file. The file in stored under credentials named after the admin\n\n :param ca: the ca\n :param ha_admin_token: the ha_cluster_token\n \"\"\"\n if not ha_admin_token:\n token = get_token(\"admin\", \"basic_auth.csv\")\n if not token:\n print(\"Error, could not locate admin token. Joining cluster failed.\")\n exit(2)\n else:\n token = ha_admin_token\n assert token is not None\n config_template = \"{}/{}\".format(snap_path, \"client.config.template\")\n config = \"{}/credentials/client.config\".format(snapdata_path)\n shutil.copyfile(config, \"{}.backup\".format(config))\n try_set_file_permissions(\"{}.backup\".format(config))\n ca_line = ca_one_line(ca)\n with open(config_template, \"r\") as tfp:\n with open(config, \"w+\") as fp:\n for _, config_txt in enumerate(tfp):\n if config_txt.strip().startswith(\"username:\"):\n continue\n else:\n config_txt = config_txt.replace(\"CADATA\", ca_line)\n config_txt = config_txt.replace(\"NAME\", \"admin\")\n config_txt = config_txt.replace(\"AUTHTYPE\", \"token\")\n config_txt = config_txt.replace(\"PASSWORD\", token)\n fp.write(config_txt)\n try_set_file_permissions(config)\n\n\ndef store_callback_token(token):\n \"\"\"\n Store the callback token\n\n :param token: the callback token\n \"\"\"\n callback_token_file = \"{}/credentials/callback-token.txt\".format(snapdata_path)\n with open(callback_token_file, \"w\") as fp:\n fp.write(token)\n try_set_file_permissions(callback_token_file)\n\n\ndef update_dqlite(cluster_cert, cluster_key, voters, host):\n \"\"\"\n Configure the dqlite cluster\n\n :param cluster_cert: the dqlite cluster cert\n :param cluster_key: the dqlite cluster key\n :param voters: the dqlite voters\n :param host: the hostname others see of this node\n \"\"\"\n service(\"stop\", \"apiserver\")\n service(\"stop\", \"k8s-dqlite\")\n time.sleep(10)\n shutil.rmtree(cluster_backup_dir, ignore_errors=True)\n shutil.move(cluster_dir, cluster_backup_dir)\n os.mkdir(cluster_dir)\n store_cluster_certs(cluster_cert, cluster_key)\n\n # We get the dqlite port from the already existing deployment\n port = 19001\n with open(\"{}/info.yaml\".format(cluster_backup_dir)) as f:\n data = yaml.safe_load(f)\n if \"Address\" in data:\n port = data[\"Address\"].split(\":\")[1]\n\n init_data = {\"Cluster\": voters, \"Address\": \"{}:{}\".format(host, port)}\n with open(\"{}/init.yaml\".format(cluster_dir), \"w\") as f:\n yaml.dump(init_data, f)\n\n service(\"start\", \"k8s-dqlite\")\n service(\"start\", \"apiserver\")\n\n waits = 10\n print(\"Waiting for this node to finish joining the cluster.\", end=\" \", flush=True)\n while waits > 0:\n try:\n out = subprocess.check_output(\n \"{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt \"\n \"-k {dbdir}/cluster.key -f json k8s .cluster\".format(\n snappath=snap_path, dbdir=cluster_dir\n ).split(),\n timeout=4,\n stderr=subprocess.STDOUT,\n )\n if host in out.decode():\n break\n else:\n print(\".\", end=\" \", flush=True)\n time.sleep(5)\n waits -= 1\n\n except (subprocess.CalledProcessError, subprocess.TimeoutExpired):\n print(\"..\", end=\" \", flush=True)\n time.sleep(2)\n waits -= 1\n print(\" \")\n\n with open(\"{}//certs/csr.conf\".format(snapdata_path), \"w\") as f:\n f.write(\"changeme\")\n\n restart_all_services()\n\n\ndef join_dqlite(connection_parts, verify=False, worker=False):\n \"\"\"\n Configure node to join a dqlite cluster.\n\n :param connection_parts: connection string parts\n \"\"\"\n token = connection_parts[1]\n master_ep = connection_parts[0].split(\":\")\n master_ip = master_ep[0]\n master_port = master_ep[1]\n fingerprint = None\n if len(connection_parts) > 2:\n fingerprint = connection_parts[2]\n else:\n # we do not have a fingerprint, do not attempt to verify the remote cert\n verify = False\n\n print(\"Contacting cluster at {}\".format(master_ip))\n\n info = get_connection_info(\n master_ip,\n master_port,\n token,\n cluster_type=\"dqlite\",\n verify_peer=verify,\n fingerprint=fingerprint,\n worker=worker,\n )\n\n if worker:\n join_dqlite_worker_node(info, master_ip, master_port, token)\n else:\n join_dqlite_master_node(info, master_ip, token)\n\n\ndef update_apiserver_proxy(master_ip, api_port):\n \"\"\"\n Update the apiserver-proxy configuration\n \"\"\"\n lock_path = os.path.expandvars(\"${SNAP_DATA}/var/lock\")\n lock = \"{}/no-apiserver-proxy\".format(lock_path)\n if os.path.exists(lock):\n os.remove(lock)\n\n # add the initial control plane endpoint\n addresses = [{\"address\": \"{}:{}\".format(master_ip, api_port)}]\n\n traefik_providers = os.path.expandvars(\"${SNAP_DATA}/args/traefik/provider-template.yaml\")\n traefik_providers_out = os.path.expandvars(\"${SNAP_DATA}/args/traefik/provider.yaml\")\n with open(traefik_providers) as f:\n p = yaml.safe_load(f)\n p[\"tcp\"][\"services\"][\"kube-apiserver\"][\"loadBalancer\"][\"servers\"] = addresses\n with open(traefik_providers_out, \"w\") as out_file:\n yaml.dump(p, out_file)\n\n try_set_file_permissions(traefik_providers_out)\n service(\"restart\", \"apiserver-proxy\")\n\n\ndef print_worker_usage():\n \"\"\"\n Print Worker usage\n \"\"\"\n print(\"\")\n print(\"The node has joined the cluster and will appear in the nodes list in a few seconds.\")\n print(\"\")\n print(\"This worker node gets automatically configured with the API server endpoints.\")\n print(\n \"If the API servers are behind a loadbalancer please set the '--refresh-interval' to '0s' in:\"\n )\n print(\" /var/snap/microk8s/current/args/apiserver-proxy\")\n print(\"and replace the API server endpoints with the one provided by the loadbalancer in:\")\n print(\" /var/snap/microk8s/current/args/traefik/provider.yaml\")\n print(\"\")\n\n\ndef join_dqlite_worker_node(info, master_ip, master_port, token):\n \"\"\"\n Join this node as a worker to a cluster running dqlite.\n\n :param info: dictionary with the connection information\n :param master_ip: the IP of the master node we contacted to connect to the cluster\n :param master_port: the port of the mester node we contacted to connect to the cluster\n :param token: the token to pass to the master in order to authenticate with it\n \"\"\"\n hostname_override = info[\"hostname_override\"]\n if info[\"ca_key\"] is not None:\n print(\n \"Joining process failed. Make sure the cluster you connect to supports joining worker nodes.\"\n )\n exit(1)\n\n store_remote_ca(info[\"ca\"])\n\n store_base_kubelet_args(info[\"kubelet_args\"])\n update_kubelet_node_ip(info[\"kubelet_args\"], hostname_override)\n update_kubelet_hostname_override(info[\"kubelet_args\"])\n update_cert_auth_kubeproxy(token, info[\"ca\"], master_ip, master_port, hostname_override)\n update_cert_auth_kubelet(token, info[\"ca\"], master_ip, master_port)\n\n store_callback_token(info[\"callback_token\"])\n update_apiserver_proxy(master_ip, info[\"apiport\"])\n mark_worker_node()\n mark_no_cert_reissue()\n print_worker_usage()\n\n\ndef join_dqlite_master_node(info, master_ip, token):\n \"\"\"\n Join this node to a cluster running dqlite.\n\n :param info: dictionary with the connection information\n :param master_ip: the IP of the master node we contacted to connect to the cluster\n :param token: the token to pass to the master in order to authenticate with it\n \"\"\"\n hostname_override = info[\"hostname_override\"]\n store_cert(\"ca.crt\", info[\"ca\"])\n store_cert(\"ca.key\", info[\"ca_key\"])\n store_cert(\"serviceaccount.key\", info[\"service_account_key\"])\n # triplets of [username in known_tokens.csv, username in kubeconfig, kubeconfig filename name]\n for component in [\n (\"kube-proxy\", \"kubeproxy\", \"proxy.config\"),\n (\"kubelet\", \"kubelet\", \"kubelet.config\"),\n (\"kube-controller-manager\", \"controller\", \"controller.config\"),\n (\"kube-scheduler\", \"scheduler\", \"scheduler.config\"),\n ]:\n component_token = get_token(component[0])\n if not component_token:\n print(\"Error, could not locate {} token. Joining cluster failed.\".format(component[0]))\n exit(3)\n assert token is not None\n # TODO make this configurable\n create_kubeconfig(\n component_token, info[\"ca\"], \"127.0.0.1\", \"16443\", component[2], component[1]\n )\n if \"admin_token\" in info:\n replace_admin_token(info[\"admin_token\"])\n if \"api_authz_mode\" in info:\n update_apiserver(info[\"api_authz_mode\"])\n\n create_admin_kubeconfig(info[\"ca\"], info[\"admin_token\"])\n store_base_kubelet_args(info[\"kubelet_args\"])\n update_kubelet_node_ip(info[\"kubelet_args\"], hostname_override)\n update_kubelet_hostname_override(info[\"kubelet_args\"])\n store_callback_token(info[\"callback_token\"])\n update_dqlite(info[\"cluster_cert\"], info[\"cluster_key\"], info[\"voters\"], hostname_override)\n # We want to update the local CNI yaml but we do not want to apply it.\n # The cni is applied already in the cluster we join\n try_initialise_cni_autodetect_for_clustering(master_ip, apply_cni=False)\n mark_no_cert_reissue()\n\n\ndef join_etcd(connection_parts, verify=True):\n \"\"\"\n Configure node to join an etcd cluster.\n\n :param connection_parts: connection string parts\n \"\"\"\n token = connection_parts[1]\n master_ep = connection_parts[0].split(\":\")\n master_ip = master_ep[0]\n master_port = master_ep[1]\n callback_token = generate_callback_token()\n info = get_connection_info(master_ip, master_port, token, callback_token=callback_token)\n store_base_kubelet_args(info[\"kubelet_args\"])\n update_kubelet_hostname_override(info[\"kubelet_args\"])\n hostname_override = None\n if \"hostname_override\" in info:\n hostname_override = info[\"hostname_override\"]\n update_kubelet_node_ip(info[\"kubelet_args\"], hostname_override)\n\n store_remote_ca(info[\"ca\"])\n update_flannel(info[\"etcd\"], master_ip, master_port, token)\n update_kubeproxy(info[\"kubeproxy\"], info[\"ca\"], master_ip, info[\"apiport\"], hostname_override)\n update_kubelet(info[\"kubelet\"], info[\"ca\"], master_ip, info[\"apiport\"])\n mark_worker_node()\n mark_no_cert_reissue()\n\n\[email protected](\n context_settings={\"ignore_unknown_options\": True, \"help_option_names\": [\"-h\", \"--help\"]}\n)\[email protected](\"connection\", required=True)\[email protected](\n \"--worker\", \"worker\", default=False, flag_value=\"as-worker\", help=\"Join as a worker only node.\"\n)\[email protected](\n \"--controlplane\",\n \"worker\",\n flag_value=\"as-master\",\n help=\"Join running the control plane on HA clusters. (default)\",\n)\[email protected](\n \"--skip-verify\",\n is_flag=True,\n required=False,\n default=False,\n help=\"Skip the certificate verification of the node we are joining to. (default: false)\",\n)\[email protected](\n \"--disable-low-memory-guard\",\n is_flag=True,\n required=False,\n default=False,\n help=\"Disable the low memory guard. (default: false)\",\n)\ndef join(connection, worker, skip_verify, disable_low_memory_guard):\n \"\"\"\n Join the node to a cluster\n\n CONNECTION: the cluster connection endpoint in format <master>:<port>/<token>\n \"\"\"\n connection_parts = connection.split(\"/\")\n verify = not skip_verify\n\n if is_low_memory_guard_enabled() and disable_low_memory_guard:\n os.remove(os.path.expandvars(\"$SNAP_DATA/var/lock/low-memory-guard.lock\"))\n\n if is_low_memory_guard_enabled() and not worker:\n print(\n \"\"\"\nThis node does not have enough RAM to host the Kubernetes control plane services\nand join the database quorum. You may consider joining this node as a worker instead:\n\n microk8s join {connection} --worker\n\nIf you would still like to join the cluster as a control plane node, use:\n\n microk8s join {connection} --disable-low-memory-guard\n\n\"\"\".format(\n connection=connection\n )\n )\n sys.exit(1)\n\n if is_node_running_dqlite():\n join_dqlite(connection_parts, verify, worker)\n else:\n join_etcd(connection_parts, verify)\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n join(prog_name=\"microk8s join\")\n", "path": "scripts/wrappers/join.py" } ]
diff --git a/scripts/wrappers/join.py b/scripts/wrappers/join.py index 22f0c4d559..d4ee2f8486 100755 --- a/scripts/wrappers/join.py +++ b/scripts/wrappers/join.py @@ -826,7 +826,6 @@ def join_dqlite_worker_node(info, master_ip, master_port, token): exit(1) store_remote_ca(info["ca"]) - store_cert("serviceaccount.key", info["service_account_key"]) store_base_kubelet_args(info["kubelet_args"]) update_kubelet_node_ip(info["kubelet_args"], hostname_override) diff --git a/tests/test-cluster.py b/tests/test-cluster.py index bb5b3268e1..de95ebe3da 100644 --- a/tests/test-cluster.py +++ b/tests/test-cluster.py @@ -458,6 +458,65 @@ def test_worker_node(self): kubelet = vm.run("cat /var/snap/microk8s/current/credentials/kubelet.config") assert "127.0.0.1" in kubelet.decode() + def test_worker_node_leave(self): + """ + Test when a worker node leaves the cluster + """ + print("Setting up a worker node") + vm = VM(backend) + vm.setup(channel_to_test) + self.VM.append(vm) + + # Form cluster + vm_master = self.VM[0] + print("Adding machine {} to cluster".format(vm.vm_name)) + add_node = vm_master.run("/snap/bin/microk8s.add-node") + endpoint = [ep for ep in add_node.decode().split() if ":25000/" in ep] + vm.run("/snap/bin/microk8s.join {} --worker".format(endpoint[0])) + + # Wait for nodes to be ready + print("Waiting for node to register") + attempt = 0 + while attempt < 10: + try: + connected_nodes = vm_master.run("/snap/bin/microk8s.kubectl get no") + if "NotReady" in connected_nodes.decode(): + time.sleep(5) + continue + print(connected_nodes.decode()) + break + except ChildProcessError: + time.sleep(10) + attempt += 1 + if attempt == 10: + raise + + # Leave the worker node from the cluster + print("Leaving the worker node {} from the cluster".format(vm.vm_name)) + vm.run("/snap/bin/microk8s.leave") + + # Wait for worker node to leave the cluster + attempt = 0 + while attempt < 10: + try: + connected_nodes = vm_master.run("/snap/bin/microk8s.kubectl get no") + if "NotReady" in connected_nodes.decode(): + print(connected_nodes.decode()) + break + time.sleep(5) + continue + except ChildProcessError: + time.sleep(10) + attempt += 1 + if attempt == 10: + raise + + # Check that the worker node is Ready + print("Checking that the worker node {} is working and Ready".format(vm.vm_name)) + worker_node = vm.run("/snap/bin/microk8s status --wait-ready") + print(worker_node.decode()) + assert "microk8s is running" in worker_node.decode() + def test_no_cert_reissue_in_nodes(self): """ Test that each node has the cert no-reissue lock.
getsentry__sentry-18644
BufferError: Local: Queue full I am receiving this error once every 2-4 days and I need to restart Sentry to fix it. This started after moving to the Docker version of Sentry. I never noticed this being an issue on 9.1.2 also with Clickhouse and Snuba running, but without Kafka. > https://observ.app/share/issue/4e4f208a500d48cc898770930706959a/ I am not sure where to look / poke / monitor to see this queue that is being spoken of and how I can flush it / enlarge it if needed. `sentry queues list` showed all 0's so it's not looking like there is a massive backlog of events. Any help is appreciated!
[ { "content": "from __future__ import absolute_import\n\nimport redis\nimport logging\n\nfrom threading import Thread\nfrom six.moves.queue import Queue, Full\n\n\nclass QueuedPublisherService(object):\n \"\"\"\n A publisher that queues items locally and publishes them to a\n remote pubsub service on a background thread.\n\n Maintains a lossy internal queue for posting, will discard the\n value if the queue is full or not immediately available. Will also\n drop items if the publish operation to the remote service fails.\n \"\"\"\n\n def __init__(self, publisher):\n self._started = False\n self.publisher = publisher\n\n def _start(self):\n if self._started:\n return True\n\n self.q = q = Queue(maxsize=100)\n\n def worker():\n while True:\n (channel, key, value) = q.get()\n try:\n self.publisher.publish(channel, key=key, value=value)\n except Exception as e:\n logger = logging.getLogger(\"sentry.errors\")\n logger.debug(\"could not submit event to pubsub: %s\" % e)\n finally:\n q.task_done()\n\n t = Thread(target=worker)\n t.setDaemon(True)\n t.start()\n\n self._started = True\n return True\n\n def publish(self, channel, value, key=None):\n if not self._start():\n return\n\n try:\n self.q.put((channel, key, value), block=False)\n except Full:\n return\n\n\nclass RedisPublisher(object):\n def __init__(self, connection):\n self.rds = None if connection is None else redis.StrictRedis(**connection)\n\n def publish(self, channel, value, key=None):\n if self.rds is not None:\n self.rds.publish(channel, value)\n\n\nclass KafkaPublisher(object):\n def __init__(self, connection, asynchronous=True):\n from confluent_kafka import Producer\n\n self.producer = Producer(connection or {})\n self.asynchronous = asynchronous\n\n def publish(self, channel, value, key=None):\n self.producer.produce(topic=channel, value=value, key=key)\n if not self.asynchronous:\n self.producer.flush()\n", "path": "src/sentry/utils/pubsub.py" } ]
[ { "content": "from __future__ import absolute_import\n\nimport redis\nimport logging\n\nfrom threading import Thread\nfrom six.moves.queue import Queue, Full\n\n\nclass QueuedPublisherService(object):\n \"\"\"\n A publisher that queues items locally and publishes them to a\n remote pubsub service on a background thread.\n\n Maintains a lossy internal queue for posting, will discard the\n value if the queue is full or not immediately available. Will also\n drop items if the publish operation to the remote service fails.\n \"\"\"\n\n def __init__(self, publisher):\n self._started = False\n self.publisher = publisher\n\n def _start(self):\n if self._started:\n return True\n\n self.q = q = Queue(maxsize=100)\n\n def worker():\n while True:\n (channel, key, value) = q.get()\n try:\n self.publisher.publish(channel, key=key, value=value)\n except Exception as e:\n logger = logging.getLogger(\"sentry.errors\")\n logger.debug(\"could not submit event to pubsub: %s\" % e)\n finally:\n q.task_done()\n\n t = Thread(target=worker)\n t.setDaemon(True)\n t.start()\n\n self._started = True\n return True\n\n def publish(self, channel, value, key=None):\n if not self._start():\n return\n\n try:\n self.q.put((channel, key, value), block=False)\n except Full:\n return\n\n\nclass RedisPublisher(object):\n def __init__(self, connection):\n self.rds = None if connection is None else redis.StrictRedis(**connection)\n\n def publish(self, channel, value, key=None):\n if self.rds is not None:\n self.rds.publish(channel, value)\n\n\nclass KafkaPublisher(object):\n def __init__(self, connection, asynchronous=True):\n from confluent_kafka import Producer\n\n self.producer = Producer(connection or {})\n self.asynchronous = asynchronous\n\n def publish(self, channel, value, key=None):\n self.producer.produce(topic=channel, value=value, key=key)\n if self.asynchronous:\n self.producer.poll(0)\n else:\n self.producer.flush()\n", "path": "src/sentry/utils/pubsub.py" } ]
diff --git a/src/sentry/utils/pubsub.py b/src/sentry/utils/pubsub.py index 74cf05ac61cd87..a67ee9e95d8bdb 100644 --- a/src/sentry/utils/pubsub.py +++ b/src/sentry/utils/pubsub.py @@ -73,5 +73,7 @@ def __init__(self, connection, asynchronous=True): def publish(self, channel, value, key=None): self.producer.produce(topic=channel, value=value, key=key) - if not self.asynchronous: + if self.asynchronous: + self.producer.poll(0) + else: self.producer.flush()
pytorch__ignite-751
correct the type hints of this function https://github.com/pytorch/ignite/blob/ca738d8f3f106093aa04b6bce9506129a1059df8/ignite/engine/events.py#L81 this really should read ` def wrapper(engine, event: int) -> bool:`
[ { "content": "\nfrom typing import Callable, Optional, Union, Any\n\nfrom enum import Enum\nimport numbers\nimport weakref\n\nfrom ignite.engine.utils import _check_signature\n\n\n__all__ = [\n 'Events',\n 'State'\n]\n\n\nclass EventWithFilter:\n\n def __init__(self, event: Any, filter: Callable):\n if not callable(filter):\n raise TypeError(\"Argument filter should be callable\")\n self.event = event\n self.filter = filter\n\n def __str__(self) -> str:\n return \"<%s event=%s, filter=%r>\" % (self.__class__.__name__, self.event, self.filter)\n\n\nclass CallableEvents:\n \"\"\"Base class for Events implementing call operator and storing event filter. This class should be inherited\n for any custom events with event filtering feature:\n\n .. code-block:: python\n\n from ignite.engine.engine import CallableEvents\n\n class CustomEvents(CallableEvents, Enum):\n TEST_EVENT = \"test_event\"\n\n engine = ...\n engine.register_events(*CustomEvents, event_to_attr={CustomEvents.TEST_EVENT: \"test_event\"})\n\n @engine.on(CustomEvents.TEST_EVENT(every=5))\n def call_on_test_event_every(engine):\n # do something\n\n \"\"\"\n\n def __call__(self, event_filter: Optional[Callable] = None,\n every: Optional[int] = None, once: Optional[int] = None):\n\n if not ((event_filter is not None) ^ (every is not None) ^ (once is not None)):\n raise ValueError(\"Only one of the input arguments should be specified\")\n\n if (event_filter is not None) and not callable(event_filter):\n raise TypeError(\"Argument event_filter should be a callable\")\n\n if (every is not None) and not (isinstance(every, numbers.Integral) and every > 0):\n raise ValueError(\"Argument every should be integer and greater than zero\")\n\n if (once is not None) and not (isinstance(once, numbers.Integral) and once > 0):\n raise ValueError(\"Argument every should be integer and positive\")\n\n if every is not None:\n if every == 1:\n # Just return the event itself\n return self\n event_filter = CallableEvents.every_event_filter(every)\n\n if once is not None:\n event_filter = CallableEvents.once_event_filter(once)\n\n # check signature:\n _check_signature(\"engine\", event_filter, \"event_filter\", \"event\")\n\n return EventWithFilter(self, event_filter)\n\n @staticmethod\n def every_event_filter(every: int) -> Callable:\n def wrapper(engine, event: bool):\n if event % every == 0:\n return True\n return False\n\n return wrapper\n\n @staticmethod\n def once_event_filter(once: int) -> Callable:\n def wrapper(engine, event: int) -> bool:\n if event == once:\n return True\n return False\n\n return wrapper\n\n\nclass Events(CallableEvents, Enum):\n \"\"\"Events that are fired by the :class:`~ignite.engine.Engine` during execution.\n\n Since v0.3.0, Events become more flexible and allow to pass an event filter to the Engine:\n\n .. code-block:: python\n\n engine = Engine()\n\n # a) custom event filter\n def custom_event_filter(engine, event):\n if event in [1, 2, 5, 10, 50, 100]:\n return True\n return False\n\n @engine.on(Events.ITERATION_STARTED(event_filter=custom_event_filter))\n def call_on_special_event(engine):\n # do something on 1, 2, 5, 10, 50, 100 iterations\n\n # b) \"every\" event filter\n @engine.on(Events.ITERATION_STARTED(every=10))\n def call_every(engine):\n # do something every 10th iteration\n\n # c) \"once\" event filter\n @engine.on(Events.ITERATION_STARTED(once=50))\n def call_once(engine):\n # do something on 50th iteration\n\n Event filter function `event_filter` accepts as input `engine` and `event` and should return True/False.\n Argument `event` is the value of iteration or epoch, depending on which type of Events the function is passed.\n\n \"\"\"\n EPOCH_STARTED = \"epoch_started\"\n EPOCH_COMPLETED = \"epoch_completed\"\n STARTED = \"started\"\n COMPLETED = \"completed\"\n ITERATION_STARTED = \"iteration_started\"\n ITERATION_COMPLETED = \"iteration_completed\"\n EXCEPTION_RAISED = \"exception_raised\"\n\n GET_BATCH_STARTED = \"get_batch_started\"\n GET_BATCH_COMPLETED = \"get_batch_completed\"\n\n\nclass State:\n \"\"\"An object that is used to pass internal and user-defined state between event handlers. By default, state\n contains the following attributes:\n\n .. code-block:: python\n\n state.iteration # 1-based, the first iteration is 1\n state.epoch # 1-based, the first epoch is 1\n state.seed # seed to set at each epoch\n state.dataloader # data passed to engine\n state.epoch_length # optional length of an epoch\n state.max_epochs # number of epochs to run\n state.batch # batch passed to `process_function`\n state.output # output of `process_function` after a single iteration\n state.metrics # dictionary with defined metrics if any\n\n \"\"\"\n\n event_to_attr = {\n Events.GET_BATCH_STARTED: \"iteration\",\n Events.GET_BATCH_COMPLETED: \"iteration\",\n Events.ITERATION_STARTED: \"iteration\",\n Events.ITERATION_COMPLETED: \"iteration\",\n Events.EPOCH_STARTED: \"epoch\",\n Events.EPOCH_COMPLETED: \"epoch\",\n Events.STARTED: \"epoch\",\n Events.COMPLETED: \"epoch\",\n }\n\n def __init__(self, **kwargs):\n self.iteration = 0\n self.epoch = 0\n self.epoch_length = None\n self.max_epochs = None\n self.output = None\n self.batch = None\n self.metrics = {}\n self.dataloader = None\n self.seed = None\n\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n for value in self.event_to_attr.values():\n if not hasattr(self, value):\n setattr(self, value, 0)\n\n def get_event_attrib_value(self, event_name: Union[EventWithFilter, CallableEvents, Enum]) -> int:\n if isinstance(event_name, EventWithFilter):\n event_name = event_name.event\n if event_name not in State.event_to_attr:\n raise RuntimeError(\"Unknown event name '{}'\".format(event_name))\n return getattr(self, State.event_to_attr[event_name])\n\n def __repr__(self) -> str:\n s = \"State:\\n\"\n for attr, value in self.__dict__.items():\n if not isinstance(value, (numbers.Number, str)):\n value = type(value)\n s += \"\\t{}: {}\\n\".format(attr, value)\n return s\n\n\nclass RemovableEventHandle:\n \"\"\"A weakref handle to remove a registered event.\n\n A handle that may be used to remove a registered event handler via the\n remove method, with-statement, or context manager protocol. Returned from\n :meth:`~ignite.engine.Engine.add_event_handler`.\n\n\n Args:\n event_name: Registered event name.\n handler: Registered event handler, stored as weakref.\n engine: Target engine, stored as weakref.\n\n Example usage:\n\n .. code-block:: python\n\n engine = Engine()\n\n def print_epoch(engine):\n print(\"Epoch: {}\".format(engine.state.epoch))\n\n with engine.add_event_handler(Events.EPOCH_COMPLETED, print_epoch):\n # print_epoch handler registered for a single run\n engine.run(data)\n\n # print_epoch handler is now unregistered\n \"\"\"\n\n def __init__(self, event_name: Union[EventWithFilter, CallableEvents, Enum], handler: Callable, engine):\n self.event_name = event_name\n self.handler = weakref.ref(handler)\n self.engine = weakref.ref(engine)\n\n def remove(self) -> None:\n \"\"\"Remove handler from engine.\"\"\"\n handler = self.handler()\n engine = self.engine()\n\n if handler is None or engine is None:\n return\n\n if engine.has_event_handler(handler, self.event_name):\n engine.remove_event_handler(handler, self.event_name)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args, **kwargs) -> None:\n self.remove()\n", "path": "ignite/engine/events.py" } ]
[ { "content": "\nfrom typing import Callable, Optional, Union, Any\n\nfrom enum import Enum\nimport numbers\nimport weakref\n\nfrom ignite.engine.utils import _check_signature\n\n\n__all__ = [\n 'Events',\n 'State'\n]\n\n\nclass EventWithFilter:\n\n def __init__(self, event: Any, filter: Callable):\n if not callable(filter):\n raise TypeError(\"Argument filter should be callable\")\n self.event = event\n self.filter = filter\n\n def __str__(self) -> str:\n return \"<%s event=%s, filter=%r>\" % (self.__class__.__name__, self.event, self.filter)\n\n\nclass CallableEvents:\n \"\"\"Base class for Events implementing call operator and storing event filter. This class should be inherited\n for any custom events with event filtering feature:\n\n .. code-block:: python\n\n from ignite.engine.engine import CallableEvents\n\n class CustomEvents(CallableEvents, Enum):\n TEST_EVENT = \"test_event\"\n\n engine = ...\n engine.register_events(*CustomEvents, event_to_attr={CustomEvents.TEST_EVENT: \"test_event\"})\n\n @engine.on(CustomEvents.TEST_EVENT(every=5))\n def call_on_test_event_every(engine):\n # do something\n\n \"\"\"\n\n def __call__(self, event_filter: Optional[Callable] = None,\n every: Optional[int] = None, once: Optional[int] = None):\n\n if not ((event_filter is not None) ^ (every is not None) ^ (once is not None)):\n raise ValueError(\"Only one of the input arguments should be specified\")\n\n if (event_filter is not None) and not callable(event_filter):\n raise TypeError(\"Argument event_filter should be a callable\")\n\n if (every is not None) and not (isinstance(every, numbers.Integral) and every > 0):\n raise ValueError(\"Argument every should be integer and greater than zero\")\n\n if (once is not None) and not (isinstance(once, numbers.Integral) and once > 0):\n raise ValueError(\"Argument every should be integer and positive\")\n\n if every is not None:\n if every == 1:\n # Just return the event itself\n return self\n event_filter = CallableEvents.every_event_filter(every)\n\n if once is not None:\n event_filter = CallableEvents.once_event_filter(once)\n\n # check signature:\n _check_signature(\"engine\", event_filter, \"event_filter\", \"event\")\n\n return EventWithFilter(self, event_filter)\n\n @staticmethod\n def every_event_filter(every: int) -> Callable:\n def wrapper(engine, event: int) -> bool:\n if event % every == 0:\n return True\n return False\n\n return wrapper\n\n @staticmethod\n def once_event_filter(once: int) -> Callable:\n def wrapper(engine, event: int) -> bool:\n if event == once:\n return True\n return False\n\n return wrapper\n\n\nclass Events(CallableEvents, Enum):\n \"\"\"Events that are fired by the :class:`~ignite.engine.Engine` during execution.\n\n Since v0.3.0, Events become more flexible and allow to pass an event filter to the Engine:\n\n .. code-block:: python\n\n engine = Engine()\n\n # a) custom event filter\n def custom_event_filter(engine, event):\n if event in [1, 2, 5, 10, 50, 100]:\n return True\n return False\n\n @engine.on(Events.ITERATION_STARTED(event_filter=custom_event_filter))\n def call_on_special_event(engine):\n # do something on 1, 2, 5, 10, 50, 100 iterations\n\n # b) \"every\" event filter\n @engine.on(Events.ITERATION_STARTED(every=10))\n def call_every(engine):\n # do something every 10th iteration\n\n # c) \"once\" event filter\n @engine.on(Events.ITERATION_STARTED(once=50))\n def call_once(engine):\n # do something on 50th iteration\n\n Event filter function `event_filter` accepts as input `engine` and `event` and should return True/False.\n Argument `event` is the value of iteration or epoch, depending on which type of Events the function is passed.\n\n \"\"\"\n EPOCH_STARTED = \"epoch_started\"\n EPOCH_COMPLETED = \"epoch_completed\"\n STARTED = \"started\"\n COMPLETED = \"completed\"\n ITERATION_STARTED = \"iteration_started\"\n ITERATION_COMPLETED = \"iteration_completed\"\n EXCEPTION_RAISED = \"exception_raised\"\n\n GET_BATCH_STARTED = \"get_batch_started\"\n GET_BATCH_COMPLETED = \"get_batch_completed\"\n\n\nclass State:\n \"\"\"An object that is used to pass internal and user-defined state between event handlers. By default, state\n contains the following attributes:\n\n .. code-block:: python\n\n state.iteration # 1-based, the first iteration is 1\n state.epoch # 1-based, the first epoch is 1\n state.seed # seed to set at each epoch\n state.dataloader # data passed to engine\n state.epoch_length # optional length of an epoch\n state.max_epochs # number of epochs to run\n state.batch # batch passed to `process_function`\n state.output # output of `process_function` after a single iteration\n state.metrics # dictionary with defined metrics if any\n\n \"\"\"\n\n event_to_attr = {\n Events.GET_BATCH_STARTED: \"iteration\",\n Events.GET_BATCH_COMPLETED: \"iteration\",\n Events.ITERATION_STARTED: \"iteration\",\n Events.ITERATION_COMPLETED: \"iteration\",\n Events.EPOCH_STARTED: \"epoch\",\n Events.EPOCH_COMPLETED: \"epoch\",\n Events.STARTED: \"epoch\",\n Events.COMPLETED: \"epoch\",\n }\n\n def __init__(self, **kwargs):\n self.iteration = 0\n self.epoch = 0\n self.epoch_length = None\n self.max_epochs = None\n self.output = None\n self.batch = None\n self.metrics = {}\n self.dataloader = None\n self.seed = None\n\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n for value in self.event_to_attr.values():\n if not hasattr(self, value):\n setattr(self, value, 0)\n\n def get_event_attrib_value(self, event_name: Union[EventWithFilter, CallableEvents, Enum]) -> int:\n if isinstance(event_name, EventWithFilter):\n event_name = event_name.event\n if event_name not in State.event_to_attr:\n raise RuntimeError(\"Unknown event name '{}'\".format(event_name))\n return getattr(self, State.event_to_attr[event_name])\n\n def __repr__(self) -> str:\n s = \"State:\\n\"\n for attr, value in self.__dict__.items():\n if not isinstance(value, (numbers.Number, str)):\n value = type(value)\n s += \"\\t{}: {}\\n\".format(attr, value)\n return s\n\n\nclass RemovableEventHandle:\n \"\"\"A weakref handle to remove a registered event.\n\n A handle that may be used to remove a registered event handler via the\n remove method, with-statement, or context manager protocol. Returned from\n :meth:`~ignite.engine.Engine.add_event_handler`.\n\n\n Args:\n event_name: Registered event name.\n handler: Registered event handler, stored as weakref.\n engine: Target engine, stored as weakref.\n\n Example usage:\n\n .. code-block:: python\n\n engine = Engine()\n\n def print_epoch(engine):\n print(\"Epoch: {}\".format(engine.state.epoch))\n\n with engine.add_event_handler(Events.EPOCH_COMPLETED, print_epoch):\n # print_epoch handler registered for a single run\n engine.run(data)\n\n # print_epoch handler is now unregistered\n \"\"\"\n\n def __init__(self, event_name: Union[EventWithFilter, CallableEvents, Enum], handler: Callable, engine):\n self.event_name = event_name\n self.handler = weakref.ref(handler)\n self.engine = weakref.ref(engine)\n\n def remove(self) -> None:\n \"\"\"Remove handler from engine.\"\"\"\n handler = self.handler()\n engine = self.engine()\n\n if handler is None or engine is None:\n return\n\n if engine.has_event_handler(handler, self.event_name):\n engine.remove_event_handler(handler, self.event_name)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args, **kwargs) -> None:\n self.remove()\n", "path": "ignite/engine/events.py" } ]
diff --git a/ignite/engine/events.py b/ignite/engine/events.py index 74c8951ec8af..65ce2fabb27c 100644 --- a/ignite/engine/events.py +++ b/ignite/engine/events.py @@ -77,7 +77,7 @@ def __call__(self, event_filter: Optional[Callable] = None, @staticmethod def every_event_filter(every: int) -> Callable: - def wrapper(engine, event: bool): + def wrapper(engine, event: int) -> bool: if event % every == 0: return True return False
LMFDB__lmfdb-4407
Server errors in whitelist check The URL https://www.lmfdb.org/L/1 generates the following server error in the white_listed function in app.py ``` Exception on /L/1 [GET] Traceback (most recent call last): File "/home/sage/sage-9.2/local/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app response = self.full_dispatch_request() File "/home/sage/sage-9.2/local/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request rv = self.handle_user_exception(e) File "/home/sage/sage-9.2/local/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception reraise(exc_type, exc_value, tb) File "/home/sage/sage-9.2/local/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise raise value File "/home/sage/sage-9.2/local/lib/python3.8/site-packages/flask/app.py", line 1948, in full_dispatch_request rv = self.preprocess_request() File "/home/sage/sage-9.2/local/lib/python3.8/site-packages/flask/app.py", line 2242, in preprocess_request rv = func() File "/home/lmfdb/lmfdb-git-web/lmfdb/app.py", line 249, in netloc_redirect not white_listed(urlparts.path) File "/home/lmfdb/lmfdb-git-web/lmfdb/app.py", line 764, in white_listed return white_listed(url[1:]) or url[3].isdigit() IndexError: string index out of range [2021-02-02 03:59:37 UTC] 500 error on URL https://www.lmfdb.org/L/1 () ```
[ { "content": "\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nimport os\nfrom socket import gethostname\nimport time\nimport six\nfrom urllib.parse import urlparse, urlunparse\n\nfrom flask import (Flask, g, render_template, request, make_response,\n redirect, url_for, current_app, abort)\nfrom sage.env import SAGE_VERSION\n# acknowledgement page, reads info from CONTRIBUTORS.yaml\n\nfrom .logger import logger_file_handler, critical\nfrom .homepage import load_boxes, contribs\n\nLMFDB_VERSION = \"LMFDB Release 1.2\"\n\n############################\n# Main app #\n############################\n\nclass ReverseProxied(object):\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n scheme = environ.get('HTTP_X_FORWARDED_PROTO')\n if scheme:\n environ['wsgi.url_scheme'] = scheme\n return self.app(environ, start_response)\n\napp = Flask(__name__)\n\napp.wsgi_app = ReverseProxied(app.wsgi_app)\n\n############################\n# App attribute functions #\n############################\n\ndef is_debug_mode():\n from flask import current_app\n return current_app.debug\n\n# this is set here and is available for ctx_proc_userdata\[email protected]_request\ndef set_beta_state():\n g.BETA = (os.getenv('BETA')=='1') or is_debug_mode()\n\ndef is_beta():\n from flask import g\n return g.BETA\n\napp.is_running = False\ndef set_running():\n app.is_running = True\ndef is_running():\n return app.is_running\n\n############################\n# Global app configuration #\n############################\n\napp.logger.addHandler(logger_file_handler())\n\n# If the debug toolbar is installed then use it\nif app.debug:\n try:\n from flask_debugtoolbar import DebugToolbarExtension\n toolbar = DebugToolbarExtension(app)\n except ImportError:\n pass\n\n# secret key, necessary for sessions, and sessions are\n# in turn necessary for users to login\nfrom .utils.config import get_secret_key\napp.secret_key = get_secret_key()\n\n# tell jinja to remove linebreaks\napp.jinja_env.trim_blocks = True\n\n# enable break and continue in jinja loops\napp.jinja_env.add_extension('jinja2.ext.loopcontrols')\napp.jinja_env.add_extension('jinja2.ext.do')\n\n# the following context processor inserts\n# * empty info={} dict variable\n# * body_class = ''\n# * bread = None for the default bread crumb hierarch\n# * title = 'LMFDB'\n# * meta_description, shortthanks, feedbackpage\n# * DEBUG and BETA variables storing whether running in each mode\[email protected]_processor\ndef ctx_proc_userdata():\n # insert an empty info={} as default\n # set the body class to some default, blueprints should\n # overwrite it with their name, using @<blueprint_object>.context_processor\n # see http://flask.pocoo.org/docs/api/?highlight=context_processor#flask.Blueprint.context_processor\n vars = {'info': {}, 'body_class': ''}\n\n # insert the default bread crumb hierarchy\n # overwrite this variable when you want to customize it\n # For example, [ ('Bread', '.'), ('Crumb', '.'), ('Hierarchy', '.')]\n vars['bread'] = None\n\n # default title\n vars['title'] = r'LMFDB'\n\n # LMFDB version number displayed in footer\n vars['version'] = LMFDB_VERSION\n\n # meta_description appears in the meta tag \"description\"\n vars['meta_description'] = r'Welcome to the LMFDB, the database of L-functions, modular forms, and related objects. These pages are intended to be a modern handbook including tables, formulas, links, and references for L-functions and their underlying objects.'\n vars['shortthanks'] = r'This project is supported by <a href=\"%s\">grants</a> from the US National Science Foundation, the UK Engineering and Physical Sciences Research Council, and the Simons Foundation.' % (url_for('acknowledgment') + \"#sponsors\")\n vars['feedbackpage'] = r\"https://docs.google.com/spreadsheet/viewform?formkey=dDJXYXBleU1BMTFERFFIdjVXVmJqdlE6MQ\"\n vars['LINK_EXT'] = lambda a, b: '<a href=\"%s\" target=\"_blank\">%s</a>' % (b, a)\n\n # debug mode?\n vars['DEBUG'] = is_debug_mode()\n vars['BETA'] = is_beta()\n\n def modify_url(**replace):\n urlparts = urlparse(request.url)\n urlparts = urlparts._replace(**replace)\n return urlunparse(urlparts)\n vars['modify_url'] = modify_url\n\n return vars\n\n# Harald suggested the following but it does not work\n#\n# create the sidebar from its yaml file and inject it into the jinja environment\n#from lmfdb.homepage import get_sidebar\n#app.jinja_env.globals['sidebar'] = get_sidebar()\n#\n# so instead we do this to ensure that the sidebar content is available to every page:\[email protected]_processor\ndef inject_sidebar():\n from .homepage import get_sidebar\n return dict(sidebar=get_sidebar())\n\n##############################\n# Bottom link to google code #\n##############################\n\nbranch = \"web\"\nif (os.getenv('BETA')=='1'):\n branch = \"dev\"\n\ndef git_infos():\n try:\n from subprocess import Popen, PIPE\n # cwd should be the root of git repo\n cwd = os.path.join(os.path.dirname(os.path.realpath(__file__)),\"..\")\n commands = ['''git rev-parse HEAD''',\n '''git show --format=\"%ci\" -s HEAD''',\n '''git branch --contains HEAD''',\n '''git reflog -n5''',\n '''git log --graph -n 10''']\n kwdargs = {'shell': True, 'stdout' : PIPE, 'cwd' : cwd}\n if six.PY3:\n kwdargs['encoding'] = 'utf-8'\n pairs = [(c, Popen(c, **kwdargs).communicate()[0]) for c in commands]\n rev = pairs[0][1]\n date = pairs[0][1]\n summary = \"\\n\".join(\"$ %s\\n%s\" % p for p in pairs)\n return rev, date, summary\n except Exception:\n return '-', '-', '-'\n\n\ngit_rev, git_date, _ = git_infos()\n\n# Creates link to the source code at the most recent commit.\n_url_source = 'https://github.com/LMFDB/lmfdb/tree/'\n_current_source = '<a href=\"%s%s\">%s</a>' % (_url_source, git_rev, \"Source\")\n\n# Creates link to the list of revisions on the master, where the most recent commit is on top.\n_url_changeset = 'https://github.com/LMFDB/lmfdb/commits/%s' % branch\n_latest_changeset = '<a href=\"%s\">%s</a>' % (_url_changeset, git_date)\n\[email protected]_processor\ndef link_to_current_source():\n return {'current_source': _current_source,\n 'latest_changeset': _latest_changeset,\n 'sage_version': 'SageMath version %s' % SAGE_VERSION}\n\n##############################\n# Jinja formatters #\n##############################\n\n# you can pass in a datetime.datetime python object and via\n# {{ <datetimeobject> | fmtdatetime }} you can format it inside a jinja template\n# if you want to do more than just the default, use it for example this way:\n# {{ <datetimeobject>|fmtdatetime('%H:%M:%S') }}\[email protected]_filter(\"fmtdatetime\")\ndef fmtdatetime(value, format='%Y-%m-%d %H:%M:%S'):\n import datetime\n if isinstance(value, datetime.datetime):\n return value.strftime(format)\n else:\n return \"-\"\n\n# You can use this formatter to turn newlines in a string into HTML line breaks\[email protected]_filter(\"nl2br\")\ndef nl2br(s):\n return s.replace('\\n', '<br/>\\n')\n\n# You can use this formatter to encode a dictionary into a url string\[email protected]_filter('urlencode')\ndef urlencode(kwargs):\n from six.moves.urllib.parse import urlencode\n return urlencode(kwargs)\n\n##############################\n# Redirects and errors #\n##############################\n\n\[email protected]_request\ndef netloc_redirect():\n \"\"\"\n Redirect lmfdb.org -> www.lmfdb.org\n Redirect {www, beta, }.lmfdb.com -> {www, beta, }.lmfdb.org\n Force https on www.lmfdb.org\n Redirect non-whitelisted routes from www.lmfdb.org to beta.lmfdb.org\n \"\"\"\n from six.moves.urllib.parse import urlparse, urlunparse\n\n urlparts = urlparse(request.url)\n\n if urlparts.netloc in [\"lmfdb.org\", \"lmfdb.com\", \"www.lmfdb.com\"]:\n replaced = urlparts._replace(netloc=\"www.lmfdb.org\", scheme=\"https\")\n return redirect(urlunparse(replaced), code=301)\n elif urlparts.netloc == \"beta.lmfdb.com\":\n replaced = urlparts._replace(netloc=\"beta.lmfdb.org\", scheme=\"https\")\n return redirect(urlunparse(replaced), code=301)\n elif (\n urlparts.netloc == \"www.lmfdb.org\"\n and request.headers.get(\"X-Forwarded-Proto\", \"http\") != \"https\"\n and request.url.startswith(\"http://\")\n ):\n url = request.url.replace(\"http://\", \"https://\", 1)\n return redirect(url, code=301)\n elif (\n urlparts.netloc == \"www.lmfdb.org\"\n and\n not white_listed(urlparts.path)\n ):\n replaced = urlparts._replace(netloc=\"beta.lmfdb.org\", scheme=\"https\")\n return redirect(urlunparse(replaced), code=302)\n\n\n\ndef timestamp():\n return '[%s UTC]' % time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime())\n\[email protected](404)\ndef not_found_404(error):\n app.logger.info('%s 404 error for URL %s %s'%(timestamp(),request.url,error.description))\n messages = error.description if isinstance(error.description,(list,tuple)) else (error.description,)\n return render_template(\"404.html\", title='LMFDB Page Not Found', messages=messages), 404\n\[email protected](500)\ndef not_found_500(error):\n app.logger.error(\"%s 500 error on URL %s %s\"%(timestamp(),request.url, error.args))\n return render_template(\"500.html\", title='LMFDB Error'), 500\n\[email protected](503)\ndef not_found_503(error):\n return render_template(\"503.html\"), 503\n\n##############################\n# Cookies #\n##############################\n\[email protected]_request\ndef get_menu_cookie():\n \"\"\"\n sets cookie for show/hide sidebar\n \"\"\"\n g.show_menu = str(request.cookies.get('showmenu')) != \"False\"\n\n##############################\n# Top-level pages #\n##############################\n\[email protected](\"/\")\ndef index():\n return render_template('index-boxes.html',\n titletag=\"The L-functions and modular forms database\",\n title=\"LMFDB - The L-functions and Modular Forms Database\",\n bread=None,\n boxes=load_boxes())\n\[email protected](\"/about\")\ndef about():\n return render_template(\"about.html\", title=\"About the LMFDB\")\n\[email protected](\"/health\")\[email protected](\"/alive\")\ndef alive():\n \"\"\"\n a basic health check\n \"\"\"\n from . import db\n if db.is_alive():\n return \"LMFDB!\"\n else:\n abort(503)\n\n\n\[email protected](\"/statshealth\")\ndef statshealth():\n \"\"\"\n a health check on the stats pages\n \"\"\"\n from . import db\n if db.is_alive():\n tc = app.test_client()\n for url in ['/NumberField/stats',\n '/ModularForm/GL2/Q/holomorphic/stats',\n '/EllipticCurve/Q/stats',\n '/EllipticCurve/browse/2/',\n '/EllipticCurve/browse/3/',\n '/EllipticCurve/browse/4/',\n '/EllipticCurve/browse/5/',\n '/EllipticCurve/browse/6/',\n '/Genus2Curve/Q/stats',\n '/Belyi/stats',\n '/HigherGenus/C/Aut/stats',\n ]:\n try:\n if tc.get(url).status_code != 200:\n abort(503)\n except Exception:\n abort(503)\n else:\n return \"LMFDB stats are healthy!\"\n else:\n abort(503)\n\[email protected](\"/info\")\ndef info():\n output = \"\"\n output += \"HOSTNAME = %s\\n\\n\" % gethostname()\n output += \"# PostgreSQL info\\n\"\n from . import db\n if not db.is_alive():\n output += \"db is offline\\n\"\n else:\n conn_str = \"%s\" % db.conn\n output += \"Connection: %s\\n\" % conn_str.replace(\"<\",\"\").replace(\">\",\"\")\n output += \"User: %s\\n\" % db._user\n output += \"Read only: %s\\n\" % db._read_only\n output += \"Read and write to userdb: %s\\n\" % db._read_and_write_userdb\n output += \"Read and write to knowls: %s\\n\" % db._read_and_write_knowls\n output += \"\\n# GIT info\\n\"\n output += git_infos()[-1]\n output += \"\\n\\n\"\n return output.replace(\"\\n\", \"<br>\")\n\n\[email protected](\"/acknowledgment\")\ndef acknowledgment():\n bread = [(\"Acknowledgments\" , '')]\n return render_template(\"acknowledgment.html\", title=\"Acknowledgments\", contribs=contribs, bread=bread)\n\[email protected](\"/acknowledgment/activities\")\ndef workshops():\n bread = [(\"Acknowledgments\" , url_for('.acknowledgment')) , (\"Activities\", '')]\n return render_template(\"workshops.html\", title=\"LMFDB Activities\", contribs=contribs, bread=bread)\n\n# google's CSE for www.lmfdb.org/* (and *only* those pages!)\[email protected](\"/search\")\ndef search():\n return render_template(\"search.html\", title=\"Search LMFDB\", bread=[('Search', url_for(\"search\"))])\n\[email protected]('/ModularForm')\[email protected]('/ModularForm/')\ndef modular_forms():\n t = 'Modular forms'\n b = [(t, url_for('modular_forms'))]\n # lm = [('History of modular forms', '/ModularForm/history')]\n return render_template('single.html', title=t, kid='mf.about', bread=b) #, learnmore=lm)\n\n# @app.route(\"/ModularForm/history\")\ndef modular_forms_history():\n t = 'Modular forms'\n b = [(t, url_for('modular_forms'))]\n b.append(('History', url_for(\"modular_forms_history\")))\n return render_template(_single_knowl, title=\"A brief history of modular forms\", kid='mf.gl2.history', body_class=_bc, bread=b)\n\[email protected]('/Variety')\[email protected]('/Variety/')\ndef varieties():\n t = 'Varieties'\n b = [(t, url_for('varieties'))]\n # lm = [('History of varieties', '/Variety/history')]\n return render_template('single.html', title=t, kid='varieties.about', bread=b) #, learnmore=lm)\n\n# @app.route(\"/Variety/history\")\ndef varieties_history():\n t = 'Varieties'\n b = [(t, url_for('varieties'))]\n b.append(('History', url_for(\"varieties_history\")))\n return render_template(_single_knowl, title=\"A brief history of varieties\", kid='ag.variety.history', body_class=_bc, bread=b)\n\[email protected]('/Field')\[email protected]('/Field/')\ndef fields():\n t = 'Fields'\n b = [(t, url_for('fields'))]\n # lm = [('History of fields', '/Field/history')]\n return render_template('single.html', kid='field.about', title=t, body_class=_bc, bread=b) #, learnmore=lm)\n\n# @app.route(\"/Field/history\")\ndef fields_history():\n t = 'Fields'\n b = [(t, url_for('fields'))]\n b.append(('History', url_for(\"fields_history\")))\n return render_template(_single_knowl, title=\"A brief history of fields\", kid='field.history', body_class=_bc, bread=b)\n\[email protected]('/Representation')\[email protected]('/Representation/')\ndef representations():\n t = 'Representations'\n b = [(t, url_for('representations'))]\n # lm = [('History of representations', '/Representation/history')]\n return render_template('single.html', kid='repn.about', title=t, body_class=_bc, bread=b) #, learnmore=lm)\n\n# @app.route(\"/Representation/history\")\ndef representations_history():\n t = 'Representations'\n b = [(t, url_for('representations'))]\n b.append(('History', url_for(\"representations_history\")))\n return render_template(_single_knowl, title=\"A brief history of representations\", kid='repn.history', body_class=_bc, bread=b)\n\[email protected]('/Motive')\[email protected]('/Motive/')\ndef motives():\n t = 'Motives'\n b = [(t, url_for('motives'))]\n # lm = [('History of motives', '/Motives/history')]\n return render_template('single.html', kid='motives.about', title=t, body_class=_bc, bread=b) #, learnmore=lm)\n\n# @app.route(\"/Motives/history\")\ndef motives_history():\n t = 'Motives'\n b = [(t, url_for('motives'))]\n b.append(('History', url_for(\"motives_history\")))\n return render_template(_single_knowl, title=\"A brief history of motives\", kid='motives.history', body_class=_bc, bread=b)\n\[email protected]('/Group')\[email protected]('/Group/')\ndef groups():\n t = 'Groups'\n b = [(t, url_for('groups'))]\n # lm = [('History of groups', '/Group/history')]\n return render_template('single.html', kid='group.about', title=t, body_class=_bc, bread=b) #, learnmore=lm)\n\n# @app.route(\"/Group/history\")\ndef groups_history():\n t = 'Groups'\n b = [(t, url_for('groups'))]\n b.append(('History', url_for(\"groups_history\")))\n return render_template(_single_knowl, title=\"A brief history of groups\", kid='group.history', body_class=_bc, bread=b)\n\[email protected](\"/editorial-board\")\[email protected](\"/management-board\")\[email protected](\"/management\")\ndef editorial_board():\n t = \"Editorial Board\"\n b = [(t, url_for(\"editorial_board\"))]\n return render_template('management.html', title=t, bread=b)\n\[email protected](\"/citation\")\ndef citation():\n t = \"Citing the LMFDB\"\n b = [(t, url_for(\"citation\"))]\n return render_template('citation.html', title=t, body_class='', bread=b)\n\n\[email protected](\"/contact\")\ndef contact():\n t = \"Contact and Feedback\"\n b = [(t, url_for(\"contact\"))]\n return render_template('contact.html', title=t, body_class='', bread=b)\n\ndef root_static_file(name):\n def static_fn():\n fn = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"static\", name)\n if os.path.exists(fn):\n return open(fn, \"rb\").read()\n critical(\"root_static_file: file %s not found!\" % fn)\n return abort(404, 'static file %s not found.' % fn)\n app.add_url_rule('/%s' % name, 'static_%s' % name, static_fn)\n\n\nfor fn in ['favicon.ico']:\n root_static_file(fn)\n\n\[email protected](\"/robots.txt\")\ndef robots_txt():\n if \"www.lmfdb.org\".lower() in request.url_root.lower():\n fn = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"static\", \"robots.txt\")\n if os.path.exists(fn):\n return open(fn).read()\n # not running on www.lmfdb.org\n else:\n fn = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"static\", \"default_robots.txt\")\n if os.path.exists(fn):\n return open(fn).read()\n return \"User-agent: *\\nDisallow: / \\n\"\n\n# geeky pages have humans.txt\[email protected](\"/humans.txt\")\ndef humans_txt():\n return render_template(\"acknowledgment.html\", title=\"Acknowledgments\")\n\[email protected]_processor\ndef add_colors():\n # FIXME:\n # - the template should use global variable g.color\n # - try to get the color from\n # - the cookie\n # - from the config file\n # - remove cookie at logout (see line 307 of users/main)\n # - add cookie at login or when a color change happens (see line 175 of users/main)\n from .utils.color import all_color_schemes\n color = request.args.get('color')\n if color and color.isdigit():\n color = int(color)\n if color not in all_color_schemes:\n color = None\n if color is None:\n from flask_login import current_user\n userid = current_user.get_id()\n if userid is not None:\n from .users.pwdmanager import userdb\n color = userdb.lookup(userid).get('color_scheme')\n if color not in all_color_schemes:\n color = None\n if color is None:\n from .utils.config import Configuration\n color = Configuration().get_color()\n return dict(color=all_color_schemes[color].dict())\n\[email protected](\"/style.css\")\ndef css():\n response = make_response(render_template(\"style.css\"))\n response.headers['Content-type'] = 'text/css'\n # don't cache css file, if in debug mode.\n if current_app.debug:\n response.headers['Cache-Control'] = 'no-cache, no-store'\n else:\n response.headers['Cache-Control'] = 'public, max-age=600'\n return response\n\[email protected](\"/not_yet_implemented\")\ndef not_yet_implemented():\n return render_template(\"not_yet_implemented.html\", title=\"Not Yet Implemented\")\n\n# the checklist is used for human testing on a high-level, supplements test.sh\n\[email protected](\"/checklist-list\")\ndef checklist_list():\n return render_template(\"checklist.html\", body_class=\"checklist\")\n\[email protected](\"/checklist\")\ndef checklist():\n return render_template(\"checklist-fs.html\")\n\n##############################\n# Intro pages #\n##############################\n\n# common base class and bread\n_bc = 'intro'\nintro_bread = lambda: [('Intro', url_for(\"introduction\"))]\n\n# template displaying just one single knowl as an KNOWL_INC\n_single_knowl = 'single.html'\n\n\[email protected](\"/intro\")\ndef introduction():\n b = intro_bread()\n return render_template(_single_knowl, title=\"Introduction\", kid='intro', body_class=_bc, bread=b)\n\[email protected](\"/intro/features\")\ndef introduction_features():\n b = intro_bread()\n b.append(('Features', url_for(\"introduction_features\")))\n return render_template(_single_knowl, title=\"Features\", kid='intro.features', body_class=_bc, bread=b)\n\n\[email protected](\"/intro/zetatour\")\ndef introduction_zetatour():\n b = intro_bread()\n b.append(('Tutorial', url_for(\"introduction_zetatour\")))\n return render_template(_single_knowl, title=\"A tour of the Riemann zeta function\", kid='intro.tutorial', body_class=_bc, bread=b)\n\[email protected](\"/bigpicture\")\ndef bigpicture():\n b = [('Big picture', url_for('bigpicture'))]\n return render_template(\"bigpicture.html\", title=\"A map of the LMFDB\", body_class=_bc, bread=b)\n\[email protected](\"/universe\")\ndef universe():\n b = [('LMFDB universe', url_for('universe'))]\n return render_template(\"universe.html\", title=\"The LMFDB universe\", body_class=_bc, bread=b)\n\[email protected](\"/news\")\ndef news():\n t = \"News\"\n b = [(t, url_for('news'))]\n return render_template(_single_knowl, title=\"LMFDB in the news\", kid='doc.news.in_the_news', body_class=_bc, bread=b)\n\n\n\n\n###############################################\n# White listing routes for www.lmfdb.org #\n###############################################\n\n\ndef routes():\n \"\"\"\n Returns all routes\n \"\"\"\n links = []\n for rule in app.url_map.iter_rules():\n # Filter out rules we can't navigate to in a browser\n # and rules that require parameters\n if \"GET\" in rule.methods: # and has_no_empty_params(rule):\n try:\n url = url_for(rule.endpoint, **(rule.defaults or {}))\n except Exception:\n url = None\n links.append((url, str(rule)))\n return sorted(links, key= lambda elt: elt[1])\n\[email protected](\"/sitemap\")\ndef sitemap():\n \"\"\"\n Listing all routes\n \"\"\"\n return (\n \"<ul>\"\n + \"\\n\".join(\n [\n '<li><a href=\"{0}\">{1}</a></li>'.format(url, endpoint)\n if url is not None\n else \"<li>{0}</li>\".format(endpoint)\n for url, endpoint in routes()\n ]\n )\n + \"</ul>\"\n )\n\nWhiteListedRoutes = [\n 'ArtinRepresentation',\n 'Character/Dirichlet',\n 'Character/calc-gauss/Dirichlet',\n 'Character/calc-jacobi/Dirichlet',\n 'Character/calc-kloosterman/Dirichlet',\n 'Character/calc-value/Dirichlet',\n 'EllipticCurve',\n 'Field',\n 'GaloisGroup',\n 'Genus2Curve/Q',\n 'Group',\n 'HigherGenus/C/Aut',\n 'L/Completeness',\n 'L/CuspForms',\n 'L/Labels',\n 'L/Lhash',\n 'L/Plot',\n 'L/Riemann',\n 'L/SymmetricPower',\n 'L/contents',\n 'L/degree',\n 'L/download',\n 'L/history',\n 'L/interesting',\n 'L/lhash',\n 'L/rational',\n 'L/tracehash',\n 'L/download',\n 'LocalNumberField',\n 'ModularForm/GL2/ImaginaryQuadratic',\n 'ModularForm/GL2/Q/Maass',\n 'ModularForm/GL2/Q/holomorphic',\n 'ModularForm/GL2/TotallyReal',\n 'NumberField',\n 'Representation/foo', # allows /Representation but not /Representation/Galois/ModL/\n 'SatoTateGroup',\n 'Variety/Abelian/Fq',\n 'about',\n 'acknowledgment',\n 'alive',\n 'api',\n 'api2',\n 'bigpicture',\n 'callback_ajax',\n 'citation',\n 'contact',\n 'editorial-board',\n 'favicon.ico',\n 'features',\n 'forcebetasitemap',\n 'health',\n 'humans.txt',\n 'info',\n 'intro',\n 'inventory',\n 'knowledge',\n 'management',\n 'news',\n 'not_yet_implemented',\n 'random',\n 'robots.txt',\n 'search',\n 'sitemap',\n 'static',\n 'statshealth',\n 'style.css',\n 'universe',\n 'users',\n 'whitelistedsitemap',\n 'zeros/zeta'\n]\n\nWhiteListedBreads = set()\nfor elt in WhiteListedRoutes:\n elt_split = elt.split('/')\n bread = ''\n for s in elt.split('/'):\n if bread:\n bread += '/' + s\n else:\n bread = s\n WhiteListedBreads.add(bread)\n\n\ndef white_listed(url):\n url = url.rstrip(\"/\").lstrip(\"/\")\n if not url:\n return True\n if (\n any(url.startswith(elt) for elt in WhiteListedRoutes)\n # check if is an allowed bread\n or url in WhiteListedBreads\n ):\n return True\n # check if it starts with an L\n elif url[:2] == \"L/\":\n # if the origin is allowed\n # or if it is a L-function with a label\n return white_listed(url[1:]) or url[3].isdigit()\n else:\n return False\n\n\[email protected](\"/forcebetasitemap\")\ndef forcebetasitemap():\n \"\"\"\n Listing routes that are not allowed on www.lmfdb.org\n \"\"\"\n return (\n \"<ul>\"\n + \"\\n\".join(\n [\n '<li><a href=\"{0}\">{1}</a></li>'.format(url, endpoint)\n if url is not None\n else \"<li>{0}</li>\".format(endpoint)\n for url, endpoint in routes()\n if not white_listed(endpoint)\n ]\n )\n + \"</ul>\"\n )\n\n\[email protected](\"/whitelistedsitemap\")\ndef whitelistedsitemap():\n \"\"\"\n Listing routes that are allowed on www.lmfdb.org\n \"\"\"\n return (\n \"<ul>\"\n + \"\\n\".join(\n [\n '<li><a href=\"{0}\">{1}</a></li>'.format(url, endpoint)\n if url is not None\n else \"<li>{0}</li>\".format(endpoint)\n for url, endpoint in routes()\n if white_listed(endpoint)\n ]\n )\n + \"</ul>\"\n )\n\n", "path": "lmfdb/app.py" } ]
[ { "content": "\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nimport os\nfrom socket import gethostname\nimport time\nimport six\nfrom urllib.parse import urlparse, urlunparse\n\nfrom flask import (Flask, g, render_template, request, make_response,\n redirect, url_for, current_app, abort)\nfrom sage.env import SAGE_VERSION\n# acknowledgement page, reads info from CONTRIBUTORS.yaml\n\nfrom .logger import logger_file_handler, critical\nfrom .homepage import load_boxes, contribs\n\nLMFDB_VERSION = \"LMFDB Release 1.2\"\n\n############################\n# Main app #\n############################\n\nclass ReverseProxied(object):\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n scheme = environ.get('HTTP_X_FORWARDED_PROTO')\n if scheme:\n environ['wsgi.url_scheme'] = scheme\n return self.app(environ, start_response)\n\napp = Flask(__name__)\n\napp.wsgi_app = ReverseProxied(app.wsgi_app)\n\n############################\n# App attribute functions #\n############################\n\ndef is_debug_mode():\n from flask import current_app\n return current_app.debug\n\n# this is set here and is available for ctx_proc_userdata\[email protected]_request\ndef set_beta_state():\n g.BETA = (os.getenv('BETA')=='1') or is_debug_mode()\n\ndef is_beta():\n from flask import g\n return g.BETA\n\napp.is_running = False\ndef set_running():\n app.is_running = True\ndef is_running():\n return app.is_running\n\n############################\n# Global app configuration #\n############################\n\napp.logger.addHandler(logger_file_handler())\n\n# If the debug toolbar is installed then use it\nif app.debug:\n try:\n from flask_debugtoolbar import DebugToolbarExtension\n toolbar = DebugToolbarExtension(app)\n except ImportError:\n pass\n\n# secret key, necessary for sessions, and sessions are\n# in turn necessary for users to login\nfrom .utils.config import get_secret_key\napp.secret_key = get_secret_key()\n\n# tell jinja to remove linebreaks\napp.jinja_env.trim_blocks = True\n\n# enable break and continue in jinja loops\napp.jinja_env.add_extension('jinja2.ext.loopcontrols')\napp.jinja_env.add_extension('jinja2.ext.do')\n\n# the following context processor inserts\n# * empty info={} dict variable\n# * body_class = ''\n# * bread = None for the default bread crumb hierarch\n# * title = 'LMFDB'\n# * meta_description, shortthanks, feedbackpage\n# * DEBUG and BETA variables storing whether running in each mode\[email protected]_processor\ndef ctx_proc_userdata():\n # insert an empty info={} as default\n # set the body class to some default, blueprints should\n # overwrite it with their name, using @<blueprint_object>.context_processor\n # see http://flask.pocoo.org/docs/api/?highlight=context_processor#flask.Blueprint.context_processor\n vars = {'info': {}, 'body_class': ''}\n\n # insert the default bread crumb hierarchy\n # overwrite this variable when you want to customize it\n # For example, [ ('Bread', '.'), ('Crumb', '.'), ('Hierarchy', '.')]\n vars['bread'] = None\n\n # default title\n vars['title'] = r'LMFDB'\n\n # LMFDB version number displayed in footer\n vars['version'] = LMFDB_VERSION\n\n # meta_description appears in the meta tag \"description\"\n vars['meta_description'] = r'Welcome to the LMFDB, the database of L-functions, modular forms, and related objects. These pages are intended to be a modern handbook including tables, formulas, links, and references for L-functions and their underlying objects.'\n vars['shortthanks'] = r'This project is supported by <a href=\"%s\">grants</a> from the US National Science Foundation, the UK Engineering and Physical Sciences Research Council, and the Simons Foundation.' % (url_for('acknowledgment') + \"#sponsors\")\n vars['feedbackpage'] = r\"https://docs.google.com/spreadsheet/viewform?formkey=dDJXYXBleU1BMTFERFFIdjVXVmJqdlE6MQ\"\n vars['LINK_EXT'] = lambda a, b: '<a href=\"%s\" target=\"_blank\">%s</a>' % (b, a)\n\n # debug mode?\n vars['DEBUG'] = is_debug_mode()\n vars['BETA'] = is_beta()\n\n def modify_url(**replace):\n urlparts = urlparse(request.url)\n urlparts = urlparts._replace(**replace)\n return urlunparse(urlparts)\n vars['modify_url'] = modify_url\n\n return vars\n\n# Harald suggested the following but it does not work\n#\n# create the sidebar from its yaml file and inject it into the jinja environment\n#from lmfdb.homepage import get_sidebar\n#app.jinja_env.globals['sidebar'] = get_sidebar()\n#\n# so instead we do this to ensure that the sidebar content is available to every page:\[email protected]_processor\ndef inject_sidebar():\n from .homepage import get_sidebar\n return dict(sidebar=get_sidebar())\n\n##############################\n# Bottom link to google code #\n##############################\n\nbranch = \"web\"\nif (os.getenv('BETA')=='1'):\n branch = \"dev\"\n\ndef git_infos():\n try:\n from subprocess import Popen, PIPE\n # cwd should be the root of git repo\n cwd = os.path.join(os.path.dirname(os.path.realpath(__file__)),\"..\")\n commands = ['''git rev-parse HEAD''',\n '''git show --format=\"%ci\" -s HEAD''',\n '''git branch --contains HEAD''',\n '''git reflog -n5''',\n '''git log --graph -n 10''']\n kwdargs = {'shell': True, 'stdout' : PIPE, 'cwd' : cwd}\n if six.PY3:\n kwdargs['encoding'] = 'utf-8'\n pairs = [(c, Popen(c, **kwdargs).communicate()[0]) for c in commands]\n rev = pairs[0][1]\n date = pairs[0][1]\n summary = \"\\n\".join(\"$ %s\\n%s\" % p for p in pairs)\n return rev, date, summary\n except Exception:\n return '-', '-', '-'\n\n\ngit_rev, git_date, _ = git_infos()\n\n# Creates link to the source code at the most recent commit.\n_url_source = 'https://github.com/LMFDB/lmfdb/tree/'\n_current_source = '<a href=\"%s%s\">%s</a>' % (_url_source, git_rev, \"Source\")\n\n# Creates link to the list of revisions on the master, where the most recent commit is on top.\n_url_changeset = 'https://github.com/LMFDB/lmfdb/commits/%s' % branch\n_latest_changeset = '<a href=\"%s\">%s</a>' % (_url_changeset, git_date)\n\[email protected]_processor\ndef link_to_current_source():\n return {'current_source': _current_source,\n 'latest_changeset': _latest_changeset,\n 'sage_version': 'SageMath version %s' % SAGE_VERSION}\n\n##############################\n# Jinja formatters #\n##############################\n\n# you can pass in a datetime.datetime python object and via\n# {{ <datetimeobject> | fmtdatetime }} you can format it inside a jinja template\n# if you want to do more than just the default, use it for example this way:\n# {{ <datetimeobject>|fmtdatetime('%H:%M:%S') }}\[email protected]_filter(\"fmtdatetime\")\ndef fmtdatetime(value, format='%Y-%m-%d %H:%M:%S'):\n import datetime\n if isinstance(value, datetime.datetime):\n return value.strftime(format)\n else:\n return \"-\"\n\n# You can use this formatter to turn newlines in a string into HTML line breaks\[email protected]_filter(\"nl2br\")\ndef nl2br(s):\n return s.replace('\\n', '<br/>\\n')\n\n# You can use this formatter to encode a dictionary into a url string\[email protected]_filter('urlencode')\ndef urlencode(kwargs):\n from six.moves.urllib.parse import urlencode\n return urlencode(kwargs)\n\n##############################\n# Redirects and errors #\n##############################\n\n\[email protected]_request\ndef netloc_redirect():\n \"\"\"\n Redirect lmfdb.org -> www.lmfdb.org\n Redirect {www, beta, }.lmfdb.com -> {www, beta, }.lmfdb.org\n Force https on www.lmfdb.org\n Redirect non-whitelisted routes from www.lmfdb.org to beta.lmfdb.org\n \"\"\"\n from six.moves.urllib.parse import urlparse, urlunparse\n\n urlparts = urlparse(request.url)\n\n if urlparts.netloc in [\"lmfdb.org\", \"lmfdb.com\", \"www.lmfdb.com\"]:\n replaced = urlparts._replace(netloc=\"www.lmfdb.org\", scheme=\"https\")\n return redirect(urlunparse(replaced), code=301)\n elif urlparts.netloc == \"beta.lmfdb.com\":\n replaced = urlparts._replace(netloc=\"beta.lmfdb.org\", scheme=\"https\")\n return redirect(urlunparse(replaced), code=301)\n elif (\n urlparts.netloc == \"www.lmfdb.org\"\n and request.headers.get(\"X-Forwarded-Proto\", \"http\") != \"https\"\n and request.url.startswith(\"http://\")\n ):\n url = request.url.replace(\"http://\", \"https://\", 1)\n return redirect(url, code=301)\n elif (\n urlparts.netloc == \"www.lmfdb.org\"\n and\n not white_listed(urlparts.path)\n ):\n replaced = urlparts._replace(netloc=\"beta.lmfdb.org\", scheme=\"https\")\n return redirect(urlunparse(replaced), code=302)\n\n\n\ndef timestamp():\n return '[%s UTC]' % time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime())\n\[email protected](404)\ndef not_found_404(error):\n app.logger.info('%s 404 error for URL %s %s'%(timestamp(),request.url,error.description))\n messages = error.description if isinstance(error.description,(list,tuple)) else (error.description,)\n return render_template(\"404.html\", title='LMFDB Page Not Found', messages=messages), 404\n\[email protected](500)\ndef not_found_500(error):\n app.logger.error(\"%s 500 error on URL %s %s\"%(timestamp(),request.url, error.args))\n return render_template(\"500.html\", title='LMFDB Error'), 500\n\[email protected](503)\ndef not_found_503(error):\n return render_template(\"503.html\"), 503\n\n##############################\n# Cookies #\n##############################\n\[email protected]_request\ndef get_menu_cookie():\n \"\"\"\n sets cookie for show/hide sidebar\n \"\"\"\n g.show_menu = str(request.cookies.get('showmenu')) != \"False\"\n\n##############################\n# Top-level pages #\n##############################\n\[email protected](\"/\")\ndef index():\n return render_template('index-boxes.html',\n titletag=\"The L-functions and modular forms database\",\n title=\"LMFDB - The L-functions and Modular Forms Database\",\n bread=None,\n boxes=load_boxes())\n\[email protected](\"/about\")\ndef about():\n return render_template(\"about.html\", title=\"About the LMFDB\")\n\[email protected](\"/health\")\[email protected](\"/alive\")\ndef alive():\n \"\"\"\n a basic health check\n \"\"\"\n from . import db\n if db.is_alive():\n return \"LMFDB!\"\n else:\n abort(503)\n\n\n\[email protected](\"/statshealth\")\ndef statshealth():\n \"\"\"\n a health check on the stats pages\n \"\"\"\n from . import db\n if db.is_alive():\n tc = app.test_client()\n for url in ['/NumberField/stats',\n '/ModularForm/GL2/Q/holomorphic/stats',\n '/EllipticCurve/Q/stats',\n '/EllipticCurve/browse/2/',\n '/EllipticCurve/browse/3/',\n '/EllipticCurve/browse/4/',\n '/EllipticCurve/browse/5/',\n '/EllipticCurve/browse/6/',\n '/Genus2Curve/Q/stats',\n '/Belyi/stats',\n '/HigherGenus/C/Aut/stats',\n ]:\n try:\n if tc.get(url).status_code != 200:\n abort(503)\n except Exception:\n abort(503)\n else:\n return \"LMFDB stats are healthy!\"\n else:\n abort(503)\n\[email protected](\"/info\")\ndef info():\n output = \"\"\n output += \"HOSTNAME = %s\\n\\n\" % gethostname()\n output += \"# PostgreSQL info\\n\"\n from . import db\n if not db.is_alive():\n output += \"db is offline\\n\"\n else:\n conn_str = \"%s\" % db.conn\n output += \"Connection: %s\\n\" % conn_str.replace(\"<\",\"\").replace(\">\",\"\")\n output += \"User: %s\\n\" % db._user\n output += \"Read only: %s\\n\" % db._read_only\n output += \"Read and write to userdb: %s\\n\" % db._read_and_write_userdb\n output += \"Read and write to knowls: %s\\n\" % db._read_and_write_knowls\n output += \"\\n# GIT info\\n\"\n output += git_infos()[-1]\n output += \"\\n\\n\"\n return output.replace(\"\\n\", \"<br>\")\n\n\[email protected](\"/acknowledgment\")\ndef acknowledgment():\n bread = [(\"Acknowledgments\" , '')]\n return render_template(\"acknowledgment.html\", title=\"Acknowledgments\", contribs=contribs, bread=bread)\n\[email protected](\"/acknowledgment/activities\")\ndef workshops():\n bread = [(\"Acknowledgments\" , url_for('.acknowledgment')) , (\"Activities\", '')]\n return render_template(\"workshops.html\", title=\"LMFDB Activities\", contribs=contribs, bread=bread)\n\n# google's CSE for www.lmfdb.org/* (and *only* those pages!)\[email protected](\"/search\")\ndef search():\n return render_template(\"search.html\", title=\"Search LMFDB\", bread=[('Search', url_for(\"search\"))])\n\[email protected]('/ModularForm')\[email protected]('/ModularForm/')\ndef modular_forms():\n t = 'Modular forms'\n b = [(t, url_for('modular_forms'))]\n # lm = [('History of modular forms', '/ModularForm/history')]\n return render_template('single.html', title=t, kid='mf.about', bread=b) #, learnmore=lm)\n\n# @app.route(\"/ModularForm/history\")\ndef modular_forms_history():\n t = 'Modular forms'\n b = [(t, url_for('modular_forms'))]\n b.append(('History', url_for(\"modular_forms_history\")))\n return render_template(_single_knowl, title=\"A brief history of modular forms\", kid='mf.gl2.history', body_class=_bc, bread=b)\n\[email protected]('/Variety')\[email protected]('/Variety/')\ndef varieties():\n t = 'Varieties'\n b = [(t, url_for('varieties'))]\n # lm = [('History of varieties', '/Variety/history')]\n return render_template('single.html', title=t, kid='varieties.about', bread=b) #, learnmore=lm)\n\n# @app.route(\"/Variety/history\")\ndef varieties_history():\n t = 'Varieties'\n b = [(t, url_for('varieties'))]\n b.append(('History', url_for(\"varieties_history\")))\n return render_template(_single_knowl, title=\"A brief history of varieties\", kid='ag.variety.history', body_class=_bc, bread=b)\n\[email protected]('/Field')\[email protected]('/Field/')\ndef fields():\n t = 'Fields'\n b = [(t, url_for('fields'))]\n # lm = [('History of fields', '/Field/history')]\n return render_template('single.html', kid='field.about', title=t, body_class=_bc, bread=b) #, learnmore=lm)\n\n# @app.route(\"/Field/history\")\ndef fields_history():\n t = 'Fields'\n b = [(t, url_for('fields'))]\n b.append(('History', url_for(\"fields_history\")))\n return render_template(_single_knowl, title=\"A brief history of fields\", kid='field.history', body_class=_bc, bread=b)\n\[email protected]('/Representation')\[email protected]('/Representation/')\ndef representations():\n t = 'Representations'\n b = [(t, url_for('representations'))]\n # lm = [('History of representations', '/Representation/history')]\n return render_template('single.html', kid='repn.about', title=t, body_class=_bc, bread=b) #, learnmore=lm)\n\n# @app.route(\"/Representation/history\")\ndef representations_history():\n t = 'Representations'\n b = [(t, url_for('representations'))]\n b.append(('History', url_for(\"representations_history\")))\n return render_template(_single_knowl, title=\"A brief history of representations\", kid='repn.history', body_class=_bc, bread=b)\n\[email protected]('/Motive')\[email protected]('/Motive/')\ndef motives():\n t = 'Motives'\n b = [(t, url_for('motives'))]\n # lm = [('History of motives', '/Motives/history')]\n return render_template('single.html', kid='motives.about', title=t, body_class=_bc, bread=b) #, learnmore=lm)\n\n# @app.route(\"/Motives/history\")\ndef motives_history():\n t = 'Motives'\n b = [(t, url_for('motives'))]\n b.append(('History', url_for(\"motives_history\")))\n return render_template(_single_knowl, title=\"A brief history of motives\", kid='motives.history', body_class=_bc, bread=b)\n\[email protected]('/Group')\[email protected]('/Group/')\ndef groups():\n t = 'Groups'\n b = [(t, url_for('groups'))]\n # lm = [('History of groups', '/Group/history')]\n return render_template('single.html', kid='group.about', title=t, body_class=_bc, bread=b) #, learnmore=lm)\n\n# @app.route(\"/Group/history\")\ndef groups_history():\n t = 'Groups'\n b = [(t, url_for('groups'))]\n b.append(('History', url_for(\"groups_history\")))\n return render_template(_single_knowl, title=\"A brief history of groups\", kid='group.history', body_class=_bc, bread=b)\n\[email protected](\"/editorial-board\")\[email protected](\"/management-board\")\[email protected](\"/management\")\ndef editorial_board():\n t = \"Editorial Board\"\n b = [(t, url_for(\"editorial_board\"))]\n return render_template('management.html', title=t, bread=b)\n\[email protected](\"/citation\")\ndef citation():\n t = \"Citing the LMFDB\"\n b = [(t, url_for(\"citation\"))]\n return render_template('citation.html', title=t, body_class='', bread=b)\n\n\[email protected](\"/contact\")\ndef contact():\n t = \"Contact and Feedback\"\n b = [(t, url_for(\"contact\"))]\n return render_template('contact.html', title=t, body_class='', bread=b)\n\ndef root_static_file(name):\n def static_fn():\n fn = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"static\", name)\n if os.path.exists(fn):\n return open(fn, \"rb\").read()\n critical(\"root_static_file: file %s not found!\" % fn)\n return abort(404, 'static file %s not found.' % fn)\n app.add_url_rule('/%s' % name, 'static_%s' % name, static_fn)\n\n\nfor fn in ['favicon.ico']:\n root_static_file(fn)\n\n\[email protected](\"/robots.txt\")\ndef robots_txt():\n if \"www.lmfdb.org\".lower() in request.url_root.lower():\n fn = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"static\", \"robots.txt\")\n if os.path.exists(fn):\n return open(fn).read()\n # not running on www.lmfdb.org\n else:\n fn = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"static\", \"default_robots.txt\")\n if os.path.exists(fn):\n return open(fn).read()\n return \"User-agent: *\\nDisallow: / \\n\"\n\n# geeky pages have humans.txt\[email protected](\"/humans.txt\")\ndef humans_txt():\n return render_template(\"acknowledgment.html\", title=\"Acknowledgments\")\n\[email protected]_processor\ndef add_colors():\n # FIXME:\n # - the template should use global variable g.color\n # - try to get the color from\n # - the cookie\n # - from the config file\n # - remove cookie at logout (see line 307 of users/main)\n # - add cookie at login or when a color change happens (see line 175 of users/main)\n from .utils.color import all_color_schemes\n color = request.args.get('color')\n if color and color.isdigit():\n color = int(color)\n if color not in all_color_schemes:\n color = None\n if color is None:\n from flask_login import current_user\n userid = current_user.get_id()\n if userid is not None:\n from .users.pwdmanager import userdb\n color = userdb.lookup(userid).get('color_scheme')\n if color not in all_color_schemes:\n color = None\n if color is None:\n from .utils.config import Configuration\n color = Configuration().get_color()\n return dict(color=all_color_schemes[color].dict())\n\[email protected](\"/style.css\")\ndef css():\n response = make_response(render_template(\"style.css\"))\n response.headers['Content-type'] = 'text/css'\n # don't cache css file, if in debug mode.\n if current_app.debug:\n response.headers['Cache-Control'] = 'no-cache, no-store'\n else:\n response.headers['Cache-Control'] = 'public, max-age=600'\n return response\n\[email protected](\"/not_yet_implemented\")\ndef not_yet_implemented():\n return render_template(\"not_yet_implemented.html\", title=\"Not Yet Implemented\")\n\n# the checklist is used for human testing on a high-level, supplements test.sh\n\[email protected](\"/checklist-list\")\ndef checklist_list():\n return render_template(\"checklist.html\", body_class=\"checklist\")\n\[email protected](\"/checklist\")\ndef checklist():\n return render_template(\"checklist-fs.html\")\n\n##############################\n# Intro pages #\n##############################\n\n# common base class and bread\n_bc = 'intro'\nintro_bread = lambda: [('Intro', url_for(\"introduction\"))]\n\n# template displaying just one single knowl as an KNOWL_INC\n_single_knowl = 'single.html'\n\n\[email protected](\"/intro\")\ndef introduction():\n b = intro_bread()\n return render_template(_single_knowl, title=\"Introduction\", kid='intro', body_class=_bc, bread=b)\n\[email protected](\"/intro/features\")\ndef introduction_features():\n b = intro_bread()\n b.append(('Features', url_for(\"introduction_features\")))\n return render_template(_single_knowl, title=\"Features\", kid='intro.features', body_class=_bc, bread=b)\n\n\[email protected](\"/intro/zetatour\")\ndef introduction_zetatour():\n b = intro_bread()\n b.append(('Tutorial', url_for(\"introduction_zetatour\")))\n return render_template(_single_knowl, title=\"A tour of the Riemann zeta function\", kid='intro.tutorial', body_class=_bc, bread=b)\n\[email protected](\"/bigpicture\")\ndef bigpicture():\n b = [('Big picture', url_for('bigpicture'))]\n return render_template(\"bigpicture.html\", title=\"A map of the LMFDB\", body_class=_bc, bread=b)\n\[email protected](\"/universe\")\ndef universe():\n b = [('LMFDB universe', url_for('universe'))]\n return render_template(\"universe.html\", title=\"The LMFDB universe\", body_class=_bc, bread=b)\n\[email protected](\"/news\")\ndef news():\n t = \"News\"\n b = [(t, url_for('news'))]\n return render_template(_single_knowl, title=\"LMFDB in the news\", kid='doc.news.in_the_news', body_class=_bc, bread=b)\n\n\n\n\n###############################################\n# White listing routes for www.lmfdb.org #\n###############################################\n\n\ndef routes():\n \"\"\"\n Returns all routes\n \"\"\"\n links = []\n for rule in app.url_map.iter_rules():\n # Filter out rules we can't navigate to in a browser\n # and rules that require parameters\n if \"GET\" in rule.methods: # and has_no_empty_params(rule):\n try:\n url = url_for(rule.endpoint, **(rule.defaults or {}))\n except Exception:\n url = None\n links.append((url, str(rule)))\n return sorted(links, key= lambda elt: elt[1])\n\[email protected](\"/sitemap\")\ndef sitemap():\n \"\"\"\n Listing all routes\n \"\"\"\n return (\n \"<ul>\"\n + \"\\n\".join(\n [\n '<li><a href=\"{0}\">{1}</a></li>'.format(url, endpoint)\n if url is not None\n else \"<li>{0}</li>\".format(endpoint)\n for url, endpoint in routes()\n ]\n )\n + \"</ul>\"\n )\n\nWhiteListedRoutes = [\n 'ArtinRepresentation',\n 'Character/Dirichlet',\n 'Character/calc-gauss/Dirichlet',\n 'Character/calc-jacobi/Dirichlet',\n 'Character/calc-kloosterman/Dirichlet',\n 'Character/calc-value/Dirichlet',\n 'EllipticCurve',\n 'Field',\n 'GaloisGroup',\n 'Genus2Curve/Q',\n 'Group',\n 'HigherGenus/C/Aut',\n 'L/Completeness',\n 'L/CuspForms',\n 'L/Labels',\n 'L/Lhash',\n 'L/Plot',\n 'L/Riemann',\n 'L/SymmetricPower',\n 'L/contents',\n 'L/degree',\n 'L/download',\n 'L/history',\n 'L/interesting',\n 'L/lhash',\n 'L/rational',\n 'L/tracehash',\n 'L/download',\n 'LocalNumberField',\n 'ModularForm/GL2/ImaginaryQuadratic',\n 'ModularForm/GL2/Q/Maass',\n 'ModularForm/GL2/Q/holomorphic',\n 'ModularForm/GL2/TotallyReal',\n 'NumberField',\n 'Representation/foo', # allows /Representation but not /Representation/Galois/ModL/\n 'SatoTateGroup',\n 'Variety/Abelian/Fq',\n 'about',\n 'acknowledgment',\n 'alive',\n 'api',\n 'api2',\n 'bigpicture',\n 'callback_ajax',\n 'citation',\n 'contact',\n 'editorial-board',\n 'favicon.ico',\n 'features',\n 'forcebetasitemap',\n 'health',\n 'humans.txt',\n 'info',\n 'intro',\n 'inventory',\n 'knowledge',\n 'management',\n 'news',\n 'not_yet_implemented',\n 'random',\n 'robots.txt',\n 'search',\n 'sitemap',\n 'static',\n 'statshealth',\n 'style.css',\n 'universe',\n 'users',\n 'whitelistedsitemap',\n 'zeros/zeta'\n]\n\nWhiteListedBreads = set()\nfor elt in WhiteListedRoutes:\n elt_split = elt.split('/')\n bread = ''\n for s in elt.split('/'):\n if bread:\n bread += '/' + s\n else:\n bread = s\n WhiteListedBreads.add(bread)\n\n\ndef white_listed(url):\n url = url.rstrip(\"/\").lstrip(\"/\")\n if not url:\n return True\n if (\n any(url.startswith(elt) for elt in WhiteListedRoutes)\n # check if is an allowed bread\n or url in WhiteListedBreads\n ):\n return True\n # check if it starts with an L\n elif url[:2] == \"L/\":\n # if the origin is allowed\n # or if it is a L-function with a label\n return white_listed(url[1:]) or len(url) == 2 or url[2].isdigit()\n else:\n return False\n\n\[email protected](\"/forcebetasitemap\")\ndef forcebetasitemap():\n \"\"\"\n Listing routes that are not allowed on www.lmfdb.org\n \"\"\"\n return (\n \"<ul>\"\n + \"\\n\".join(\n [\n '<li><a href=\"{0}\">{1}</a></li>'.format(url, endpoint)\n if url is not None\n else \"<li>{0}</li>\".format(endpoint)\n for url, endpoint in routes()\n if not white_listed(endpoint)\n ]\n )\n + \"</ul>\"\n )\n\n\[email protected](\"/whitelistedsitemap\")\ndef whitelistedsitemap():\n \"\"\"\n Listing routes that are allowed on www.lmfdb.org\n \"\"\"\n return (\n \"<ul>\"\n + \"\\n\".join(\n [\n '<li><a href=\"{0}\">{1}</a></li>'.format(url, endpoint)\n if url is not None\n else \"<li>{0}</li>\".format(endpoint)\n for url, endpoint in routes()\n if white_listed(endpoint)\n ]\n )\n + \"</ul>\"\n )\n\n", "path": "lmfdb/app.py" } ]
diff --git a/lmfdb/app.py b/lmfdb/app.py index 0cae7a623a..284a865921 100644 --- a/lmfdb/app.py +++ b/lmfdb/app.py @@ -761,7 +761,7 @@ def white_listed(url): elif url[:2] == "L/": # if the origin is allowed # or if it is a L-function with a label - return white_listed(url[1:]) or url[3].isdigit() + return white_listed(url[1:]) or len(url) == 2 or url[2].isdigit() else: return False
pytorch__vision-2518
Resize sometimes fails https://github.com/pytorch/vision/blob/300ef76d3b6e9c33c58cd124e5f5514a927cadf1/torchvision/transforms/functional_tensor.py#L589 The specific line above will sometimes cause the image to be returned unresized with no warning even when an explicit size (h,w) is given. This will also cause problems to the transform `RandomResizedCrop`.
[ { "content": "import warnings\nfrom typing import Optional\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn.functional import affine_grid, grid_sample\nfrom torch.jit.annotations import List, BroadcastingList2\n\n\ndef _is_tensor_a_torch_image(x: Tensor) -> bool:\n return x.ndim >= 2\n\n\ndef _get_image_size(img: Tensor) -> List[int]:\n \"\"\"Returns (w, h) of tensor image\"\"\"\n if _is_tensor_a_torch_image(img):\n return [img.shape[-1], img.shape[-2]]\n raise TypeError(\"Unexpected type {}\".format(type(img)))\n\n\ndef vflip(img: Tensor) -> Tensor:\n \"\"\"Vertically flip the given the Image Tensor.\n\n Args:\n img (Tensor): Image Tensor to be flipped in the form [C, H, W].\n\n Returns:\n Tensor: Vertically flipped image Tensor.\n \"\"\"\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n return img.flip(-2)\n\n\ndef hflip(img: Tensor) -> Tensor:\n \"\"\"Horizontally flip the given the Image Tensor.\n\n Args:\n img (Tensor): Image Tensor to be flipped in the form [C, H, W].\n\n Returns:\n Tensor: Horizontally flipped image Tensor.\n \"\"\"\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n return img.flip(-1)\n\n\ndef crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:\n \"\"\"Crop the given Image Tensor.\n\n Args:\n img (Tensor): Image to be cropped in the form [..., H, W]. (0,0) denotes the top left corner of the image.\n top (int): Vertical component of the top left corner of the crop box.\n left (int): Horizontal component of the top left corner of the crop box.\n height (int): Height of the crop box.\n width (int): Width of the crop box.\n\n Returns:\n Tensor: Cropped image.\n \"\"\"\n if not _is_tensor_a_torch_image(img):\n raise TypeError(\"tensor is not a torch image.\")\n\n return img[..., top:top + height, left:left + width]\n\n\ndef rgb_to_grayscale(img: Tensor) -> Tensor:\n \"\"\"Convert the given RGB Image Tensor to Grayscale.\n For RGB to Grayscale conversion, ITU-R 601-2 luma transform is performed which\n is L = R * 0.2989 + G * 0.5870 + B * 0.1140\n\n Args:\n img (Tensor): Image to be converted to Grayscale in the form [C, H, W].\n\n Returns:\n Tensor: Grayscale image.\n\n \"\"\"\n if img.shape[0] != 3:\n raise TypeError('Input Image does not contain 3 Channels')\n\n return (0.2989 * img[0] + 0.5870 * img[1] + 0.1140 * img[2]).to(img.dtype)\n\n\ndef adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:\n \"\"\"Adjust brightness of an RGB image.\n\n Args:\n img (Tensor): Image to be adjusted.\n brightness_factor (float): How much to adjust the brightness. Can be\n any non negative number. 0 gives a black image, 1 gives the\n original image while 2 increases the brightness by a factor of 2.\n\n Returns:\n Tensor: Brightness adjusted image.\n \"\"\"\n if brightness_factor < 0:\n raise ValueError('brightness_factor ({}) is not non-negative.'.format(brightness_factor))\n\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n return _blend(img, torch.zeros_like(img), brightness_factor)\n\n\ndef adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:\n \"\"\"Adjust contrast of an RGB image.\n\n Args:\n img (Tensor): Image to be adjusted.\n contrast_factor (float): How much to adjust the contrast. Can be any\n non negative number. 0 gives a solid gray image, 1 gives the\n original image while 2 increases the contrast by a factor of 2.\n\n Returns:\n Tensor: Contrast adjusted image.\n \"\"\"\n if contrast_factor < 0:\n raise ValueError('contrast_factor ({}) is not non-negative.'.format(contrast_factor))\n\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n mean = torch.mean(rgb_to_grayscale(img).to(torch.float))\n\n return _blend(img, mean, contrast_factor)\n\n\ndef adjust_hue(img, hue_factor):\n \"\"\"Adjust hue of an image.\n\n The image hue is adjusted by converting the image to HSV and\n cyclically shifting the intensities in the hue channel (H).\n The image is then converted back to original image mode.\n\n `hue_factor` is the amount of shift in H channel and must be in the\n interval `[-0.5, 0.5]`.\n\n See `Hue`_ for more details.\n\n .. _Hue: https://en.wikipedia.org/wiki/Hue\n\n Args:\n img (Tensor): Image to be adjusted. Image type is either uint8 or float.\n hue_factor (float): How much to shift the hue channel. Should be in\n [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in\n HSV space in positive and negative direction respectively.\n 0 means no shift. Therefore, both -0.5 and 0.5 will give an image\n with complementary colors while 0 gives the original image.\n\n Returns:\n Tensor: Hue adjusted image.\n \"\"\"\n if not (-0.5 <= hue_factor <= 0.5):\n raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))\n\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n orig_dtype = img.dtype\n if img.dtype == torch.uint8:\n img = img.to(dtype=torch.float32) / 255.0\n\n img = _rgb2hsv(img)\n h, s, v = img.unbind(0)\n h += hue_factor\n h = h % 1.0\n img = torch.stack((h, s, v))\n img_hue_adj = _hsv2rgb(img)\n\n if orig_dtype == torch.uint8:\n img_hue_adj = (img_hue_adj * 255.0).to(dtype=orig_dtype)\n\n return img_hue_adj\n\n\ndef adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:\n \"\"\"Adjust color saturation of an RGB image.\n\n Args:\n img (Tensor): Image to be adjusted.\n saturation_factor (float): How much to adjust the saturation. Can be any\n non negative number. 0 gives a black and white image, 1 gives the\n original image while 2 enhances the saturation by a factor of 2.\n\n Returns:\n Tensor: Saturation adjusted image.\n \"\"\"\n if saturation_factor < 0:\n raise ValueError('saturation_factor ({}) is not non-negative.'.format(saturation_factor))\n\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n return _blend(img, rgb_to_grayscale(img), saturation_factor)\n\n\ndef adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:\n r\"\"\"Adjust gamma of an RGB image.\n\n Also known as Power Law Transform. Intensities in RGB mode are adjusted\n based on the following equation:\n\n .. math::\n `I_{\\text{out}} = 255 \\times \\text{gain} \\times \\left(\\frac{I_{\\text{in}}}{255}\\right)^{\\gamma}`\n\n See `Gamma Correction`_ for more details.\n\n .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction\n\n Args:\n img (Tensor): Tensor of RBG values to be adjusted.\n gamma (float): Non negative real number, same as :math:`\\gamma` in the equation.\n gamma larger than 1 make the shadows darker,\n while gamma smaller than 1 make dark regions lighter.\n gain (float): The constant multiplier.\n \"\"\"\n\n if not isinstance(img, torch.Tensor):\n raise TypeError('img should be a Tensor. Got {}'.format(type(img)))\n\n if gamma < 0:\n raise ValueError('Gamma should be a non-negative real number')\n\n result = img\n dtype = img.dtype\n if not torch.is_floating_point(img):\n result = result / 255.0\n\n result = (gain * result ** gamma).clamp(0, 1)\n\n if result.dtype != dtype:\n eps = 1e-3\n result = (255 + 1.0 - eps) * result\n result = result.to(dtype)\n return result\n\n\ndef center_crop(img: Tensor, output_size: BroadcastingList2[int]) -> Tensor:\n \"\"\"Crop the Image Tensor and resize it to desired size.\n\n Args:\n img (Tensor): Image to be cropped.\n output_size (sequence or int): (height, width) of the crop box. If int,\n it is used for both directions\n\n Returns:\n Tensor: Cropped image.\n \"\"\"\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n _, image_width, image_height = img.size()\n crop_height, crop_width = output_size\n # crop_top = int(round((image_height - crop_height) / 2.))\n # Result can be different between python func and scripted func\n # Temporary workaround:\n crop_top = int((image_height - crop_height + 1) * 0.5)\n # crop_left = int(round((image_width - crop_width) / 2.))\n # Result can be different between python func and scripted func\n # Temporary workaround:\n crop_left = int((image_width - crop_width + 1) * 0.5)\n\n return crop(img, crop_top, crop_left, crop_height, crop_width)\n\n\ndef five_crop(img: Tensor, size: BroadcastingList2[int]) -> List[Tensor]:\n \"\"\"Crop the given Image Tensor into four corners and the central crop.\n .. Note::\n This transform returns a List of Tensors and there may be a\n mismatch in the number of inputs and targets your ``Dataset`` returns.\n\n Args:\n img (Tensor): Image to be cropped.\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n\n Returns:\n List: List (tl, tr, bl, br, center)\n Corresponding top left, top right, bottom left, bottom right and center crop.\n \"\"\"\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n assert len(size) == 2, \"Please provide only two dimensions (h, w) for size.\"\n\n _, image_width, image_height = img.size()\n crop_height, crop_width = size\n if crop_width > image_width or crop_height > image_height:\n msg = \"Requested crop size {} is bigger than input size {}\"\n raise ValueError(msg.format(size, (image_height, image_width)))\n\n tl = crop(img, 0, 0, crop_width, crop_height)\n tr = crop(img, image_width - crop_width, 0, image_width, crop_height)\n bl = crop(img, 0, image_height - crop_height, crop_width, image_height)\n br = crop(img, image_width - crop_width, image_height - crop_height, image_width, image_height)\n center = center_crop(img, (crop_height, crop_width))\n\n return [tl, tr, bl, br, center]\n\n\ndef ten_crop(img: Tensor, size: BroadcastingList2[int], vertical_flip: bool = False) -> List[Tensor]:\n \"\"\"Crop the given Image Tensor into four corners and the central crop plus the\n flipped version of these (horizontal flipping is used by default).\n\n .. Note::\n This transform returns a List of images and there may be a\n mismatch in the number of inputs and targets your ``Dataset`` returns.\n\n Args:\n img (Tensor): Image to be cropped.\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n vertical_flip (bool): Use vertical flipping instead of horizontal\n\n Returns:\n List: List (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)\n Corresponding top left, top right, bottom left, bottom right and center crop\n and same for the flipped image's tensor.\n \"\"\"\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n assert len(size) == 2, \"Please provide only two dimensions (h, w) for size.\"\n first_five = five_crop(img, size)\n\n if vertical_flip:\n img = vflip(img)\n else:\n img = hflip(img)\n\n second_five = five_crop(img, size)\n\n return first_five + second_five\n\n\ndef _blend(img1: Tensor, img2: Tensor, ratio: float) -> Tensor:\n bound = 1 if img1.dtype in [torch.half, torch.float32, torch.float64] else 255\n return (ratio * img1 + (1 - ratio) * img2).clamp(0, bound).to(img1.dtype)\n\n\ndef _rgb2hsv(img):\n r, g, b = img.unbind(0)\n\n maxc = torch.max(img, dim=0).values\n minc = torch.min(img, dim=0).values\n\n # The algorithm erases S and H channel where `maxc = minc`. This avoids NaN\n # from happening in the results, because\n # + S channel has division by `maxc`, which is zero only if `maxc = minc`\n # + H channel has division by `(maxc - minc)`.\n #\n # Instead of overwriting NaN afterwards, we just prevent it from occuring so\n # we don't need to deal with it in case we save the NaN in a buffer in\n # backprop, if it is ever supported, but it doesn't hurt to do so.\n eqc = maxc == minc\n\n cr = maxc - minc\n # Since `eqc => cr = 0`, replacing denominator with 1 when `eqc` is fine.\n s = cr / torch.where(eqc, maxc.new_ones(()), maxc)\n # Note that `eqc => maxc = minc = r = g = b`. So the following calculation\n # of `h` would reduce to `bc - gc + 2 + rc - bc + 4 + rc - bc = 6` so it\n # would not matter what values `rc`, `gc`, and `bc` have here, and thus\n # replacing denominator with 1 when `eqc` is fine.\n cr_divisor = torch.where(eqc, maxc.new_ones(()), cr)\n rc = (maxc - r) / cr_divisor\n gc = (maxc - g) / cr_divisor\n bc = (maxc - b) / cr_divisor\n\n hr = (maxc == r) * (bc - gc)\n hg = ((maxc == g) & (maxc != r)) * (2.0 + rc - bc)\n hb = ((maxc != g) & (maxc != r)) * (4.0 + gc - rc)\n h = (hr + hg + hb)\n h = torch.fmod((h / 6.0 + 1.0), 1.0)\n return torch.stack((h, s, maxc))\n\n\ndef _hsv2rgb(img):\n h, s, v = img.unbind(0)\n i = torch.floor(h * 6.0)\n f = (h * 6.0) - i\n i = i.to(dtype=torch.int32)\n\n p = torch.clamp((v * (1.0 - s)), 0.0, 1.0)\n q = torch.clamp((v * (1.0 - s * f)), 0.0, 1.0)\n t = torch.clamp((v * (1.0 - s * (1.0 - f))), 0.0, 1.0)\n i = i % 6\n\n mask = i == torch.arange(6)[:, None, None]\n\n a1 = torch.stack((v, q, p, p, t, v))\n a2 = torch.stack((t, v, v, q, p, p))\n a3 = torch.stack((p, p, t, v, v, q))\n a4 = torch.stack((a1, a2, a3))\n\n return torch.einsum(\"ijk, xijk -> xjk\", mask.to(dtype=img.dtype), a4)\n\n\ndef _pad_symmetric(img: Tensor, padding: List[int]) -> Tensor:\n # padding is left, right, top, bottom\n in_sizes = img.size()\n\n x_indices = [i for i in range(in_sizes[-1])] # [0, 1, 2, 3, ...]\n left_indices = [i for i in range(padding[0] - 1, -1, -1)] # e.g. [3, 2, 1, 0]\n right_indices = [-(i + 1) for i in range(padding[1])] # e.g. [-1, -2, -3]\n x_indices = torch.tensor(left_indices + x_indices + right_indices)\n\n y_indices = [i for i in range(in_sizes[-2])]\n top_indices = [i for i in range(padding[2] - 1, -1, -1)]\n bottom_indices = [-(i + 1) for i in range(padding[3])]\n y_indices = torch.tensor(top_indices + y_indices + bottom_indices)\n\n ndim = img.ndim\n if ndim == 3:\n return img[:, y_indices[:, None], x_indices[None, :]]\n elif ndim == 4:\n return img[:, :, y_indices[:, None], x_indices[None, :]]\n else:\n raise RuntimeError(\"Symmetric padding of N-D tensors are not supported yet\")\n\n\ndef pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = \"constant\") -> Tensor:\n r\"\"\"Pad the given Tensor Image on all sides with specified padding mode and fill value.\n\n Args:\n img (Tensor): Image to be padded.\n padding (int or tuple or list): Padding on each border. If a single int is provided this\n is used to pad all borders. If a tuple or list of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a tuple or list of length 4 is provided\n this is the padding for the left, top, right and bottom borders\n respectively. In torchscript mode padding as single int is not supported, use a tuple or\n list of length 1: ``[padding, ]``.\n fill (int): Pixel fill value for constant fill. Default is 0.\n This value is only used when the padding_mode is constant\n padding_mode (str): Type of padding. Should be: constant, edge or reflect. Default is constant.\n Mode symmetric is not yet supported for Tensor inputs.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value on the edge of the image\n\n - reflect: pads with reflection of image (without repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image (repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n\n Returns:\n Tensor: Padded image.\n \"\"\"\n if not _is_tensor_a_torch_image(img):\n raise TypeError(\"tensor is not a torch image.\")\n\n if not isinstance(padding, (int, tuple, list)):\n raise TypeError(\"Got inappropriate padding arg\")\n if not isinstance(fill, (int, float)):\n raise TypeError(\"Got inappropriate fill arg\")\n if not isinstance(padding_mode, str):\n raise TypeError(\"Got inappropriate padding_mode arg\")\n\n if isinstance(padding, tuple):\n padding = list(padding)\n\n if isinstance(padding, list) and len(padding) not in [1, 2, 4]:\n raise ValueError(\"Padding must be an int or a 1, 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n if padding_mode not in [\"constant\", \"edge\", \"reflect\", \"symmetric\"]:\n raise ValueError(\"Padding mode should be either constant, edge, reflect or symmetric\")\n\n if isinstance(padding, int):\n if torch.jit.is_scripting():\n # This maybe unreachable\n raise ValueError(\"padding can't be an int while torchscripting, set it as a list [value, ]\")\n pad_left = pad_right = pad_top = pad_bottom = padding\n elif len(padding) == 1:\n pad_left = pad_right = pad_top = pad_bottom = padding[0]\n elif len(padding) == 2:\n pad_left = pad_right = padding[0]\n pad_top = pad_bottom = padding[1]\n else:\n pad_left = padding[0]\n pad_top = padding[1]\n pad_right = padding[2]\n pad_bottom = padding[3]\n\n p = [pad_left, pad_right, pad_top, pad_bottom]\n\n if padding_mode == \"edge\":\n # remap padding_mode str\n padding_mode = \"replicate\"\n elif padding_mode == \"symmetric\":\n # route to another implementation\n if p[0] < 0 or p[1] < 0 or p[2] < 0 or p[3] < 0: # no any support for torch script\n raise ValueError(\"Padding can not be negative for symmetric padding_mode\")\n return _pad_symmetric(img, p)\n\n need_squeeze = False\n if img.ndim < 4:\n img = img.unsqueeze(dim=0)\n need_squeeze = True\n\n out_dtype = img.dtype\n need_cast = False\n if (padding_mode != \"constant\") and img.dtype not in (torch.float32, torch.float64):\n # Here we temporary cast input tensor to float\n # until pytorch issue is resolved :\n # https://github.com/pytorch/pytorch/issues/40763\n need_cast = True\n img = img.to(torch.float32)\n\n img = torch.nn.functional.pad(img, p, mode=padding_mode, value=float(fill))\n\n if need_squeeze:\n img = img.squeeze(dim=0)\n\n if need_cast:\n img = img.to(out_dtype)\n\n return img\n\n\ndef resize(img: Tensor, size: List[int], interpolation: int = 2) -> Tensor:\n r\"\"\"Resize the input Tensor to the given size.\n\n Args:\n img (Tensor): Image to be resized.\n size (int or tuple or list): Desired output size. If size is a sequence like\n (h, w), the output size will be matched to this. If size is an int,\n the smaller edge of the image will be matched to this number maintaining\n the aspect ratio. i.e, if height > width, then image will be rescaled to\n :math:`\\left(\\text{size} \\times \\frac{\\text{height}}{\\text{width}}, \\text{size}\\right)`.\n In torchscript mode padding as a single int is not supported, use a tuple or\n list of length 1: ``[size, ]``.\n interpolation (int, optional): Desired interpolation. Default is bilinear (=2). Other supported values:\n nearest(=0) and bicubic(=3).\n\n Returns:\n Tensor: Resized image.\n \"\"\"\n if not _is_tensor_a_torch_image(img):\n raise TypeError(\"tensor is not a torch image.\")\n\n if not isinstance(size, (int, tuple, list)):\n raise TypeError(\"Got inappropriate size arg\")\n if not isinstance(interpolation, int):\n raise TypeError(\"Got inappropriate interpolation arg\")\n\n _interpolation_modes = {\n 0: \"nearest\",\n 2: \"bilinear\",\n 3: \"bicubic\",\n }\n\n if interpolation not in _interpolation_modes:\n raise ValueError(\"This interpolation mode is unsupported with Tensor input\")\n\n if isinstance(size, tuple):\n size = list(size)\n\n if isinstance(size, list) and len(size) not in [1, 2]:\n raise ValueError(\"Size must be an int or a 1 or 2 element tuple/list, not a \"\n \"{} element tuple/list\".format(len(size)))\n\n w, h = _get_image_size(img)\n\n if isinstance(size, int):\n size_w, size_h = size, size\n elif len(size) < 2:\n size_w, size_h = size[0], size[0]\n else:\n size_w, size_h = size[1], size[0] # Convention (h, w)\n\n if isinstance(size, int) or len(size) < 2:\n if w < h:\n size_h = int(size_w * h / w)\n else:\n size_w = int(size_h * w / h)\n\n if (w <= h and w == size_w) or (h <= w and h == size_h):\n return img\n\n # make image NCHW\n need_squeeze = False\n if img.ndim < 4:\n img = img.unsqueeze(dim=0)\n need_squeeze = True\n\n mode = _interpolation_modes[interpolation]\n\n out_dtype = img.dtype\n need_cast = False\n if img.dtype not in (torch.float32, torch.float64):\n need_cast = True\n img = img.to(torch.float32)\n\n # Define align_corners to avoid warnings\n align_corners = False if mode in [\"bilinear\", \"bicubic\"] else None\n\n img = torch.nn.functional.interpolate(img, size=(size_h, size_w), mode=mode, align_corners=align_corners)\n\n if need_squeeze:\n img = img.squeeze(dim=0)\n\n if need_cast:\n if mode == \"bicubic\":\n img = img.clamp(min=0, max=255)\n img = img.to(out_dtype)\n\n return img\n\n\ndef affine(\n img: Tensor, matrix: List[float], resample: int = 0, fillcolor: Optional[int] = None\n) -> Tensor:\n \"\"\"Apply affine transformation on the Tensor image keeping image center invariant.\n\n Args:\n img (Tensor): image to be rotated.\n matrix (list of floats): list of 6 float values representing inverse matrix for affine transformation.\n resample (int, optional): An optional resampling filter. Default is nearest (=2). Other supported values:\n bilinear(=2).\n fillcolor (int, optional): this option is not supported for Tensor input. Fill value for the area outside the\n transform in the output image is always 0.\n\n Returns:\n Tensor: Transformed image.\n \"\"\"\n if not (isinstance(img, torch.Tensor) and _is_tensor_a_torch_image(img)):\n raise TypeError('img should be Tensor Image. Got {}'.format(type(img)))\n\n if fillcolor is not None:\n warnings.warn(\"Argument fillcolor is not supported for Tensor input. Fill value is zero\")\n\n _interpolation_modes = {\n 0: \"nearest\",\n 2: \"bilinear\",\n }\n\n if resample not in _interpolation_modes:\n raise ValueError(\"This resampling mode is unsupported with Tensor input\")\n\n theta = torch.tensor(matrix, dtype=torch.float).reshape(1, 2, 3)\n shape = img.shape\n grid = affine_grid(theta, size=(1, shape[-3], shape[-2], shape[-1]), align_corners=False)\n\n # make image NCHW\n need_squeeze = False\n if img.ndim < 4:\n img = img.unsqueeze(dim=0)\n need_squeeze = True\n\n mode = _interpolation_modes[resample]\n\n out_dtype = img.dtype\n need_cast = False\n if img.dtype not in (torch.float32, torch.float64):\n need_cast = True\n img = img.to(torch.float32)\n\n img = grid_sample(img, grid, mode=mode, padding_mode=\"zeros\", align_corners=False)\n\n if need_squeeze:\n img = img.squeeze(dim=0)\n\n if need_cast:\n # it is better to round before cast\n img = torch.round(img).to(out_dtype)\n\n return img\n", "path": "torchvision/transforms/functional_tensor.py" } ]
[ { "content": "import warnings\nfrom typing import Optional\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn.functional import affine_grid, grid_sample\nfrom torch.jit.annotations import List, BroadcastingList2\n\n\ndef _is_tensor_a_torch_image(x: Tensor) -> bool:\n return x.ndim >= 2\n\n\ndef _get_image_size(img: Tensor) -> List[int]:\n \"\"\"Returns (w, h) of tensor image\"\"\"\n if _is_tensor_a_torch_image(img):\n return [img.shape[-1], img.shape[-2]]\n raise TypeError(\"Unexpected type {}\".format(type(img)))\n\n\ndef vflip(img: Tensor) -> Tensor:\n \"\"\"Vertically flip the given the Image Tensor.\n\n Args:\n img (Tensor): Image Tensor to be flipped in the form [C, H, W].\n\n Returns:\n Tensor: Vertically flipped image Tensor.\n \"\"\"\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n return img.flip(-2)\n\n\ndef hflip(img: Tensor) -> Tensor:\n \"\"\"Horizontally flip the given the Image Tensor.\n\n Args:\n img (Tensor): Image Tensor to be flipped in the form [C, H, W].\n\n Returns:\n Tensor: Horizontally flipped image Tensor.\n \"\"\"\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n return img.flip(-1)\n\n\ndef crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:\n \"\"\"Crop the given Image Tensor.\n\n Args:\n img (Tensor): Image to be cropped in the form [..., H, W]. (0,0) denotes the top left corner of the image.\n top (int): Vertical component of the top left corner of the crop box.\n left (int): Horizontal component of the top left corner of the crop box.\n height (int): Height of the crop box.\n width (int): Width of the crop box.\n\n Returns:\n Tensor: Cropped image.\n \"\"\"\n if not _is_tensor_a_torch_image(img):\n raise TypeError(\"tensor is not a torch image.\")\n\n return img[..., top:top + height, left:left + width]\n\n\ndef rgb_to_grayscale(img: Tensor) -> Tensor:\n \"\"\"Convert the given RGB Image Tensor to Grayscale.\n For RGB to Grayscale conversion, ITU-R 601-2 luma transform is performed which\n is L = R * 0.2989 + G * 0.5870 + B * 0.1140\n\n Args:\n img (Tensor): Image to be converted to Grayscale in the form [C, H, W].\n\n Returns:\n Tensor: Grayscale image.\n\n \"\"\"\n if img.shape[0] != 3:\n raise TypeError('Input Image does not contain 3 Channels')\n\n return (0.2989 * img[0] + 0.5870 * img[1] + 0.1140 * img[2]).to(img.dtype)\n\n\ndef adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:\n \"\"\"Adjust brightness of an RGB image.\n\n Args:\n img (Tensor): Image to be adjusted.\n brightness_factor (float): How much to adjust the brightness. Can be\n any non negative number. 0 gives a black image, 1 gives the\n original image while 2 increases the brightness by a factor of 2.\n\n Returns:\n Tensor: Brightness adjusted image.\n \"\"\"\n if brightness_factor < 0:\n raise ValueError('brightness_factor ({}) is not non-negative.'.format(brightness_factor))\n\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n return _blend(img, torch.zeros_like(img), brightness_factor)\n\n\ndef adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:\n \"\"\"Adjust contrast of an RGB image.\n\n Args:\n img (Tensor): Image to be adjusted.\n contrast_factor (float): How much to adjust the contrast. Can be any\n non negative number. 0 gives a solid gray image, 1 gives the\n original image while 2 increases the contrast by a factor of 2.\n\n Returns:\n Tensor: Contrast adjusted image.\n \"\"\"\n if contrast_factor < 0:\n raise ValueError('contrast_factor ({}) is not non-negative.'.format(contrast_factor))\n\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n mean = torch.mean(rgb_to_grayscale(img).to(torch.float))\n\n return _blend(img, mean, contrast_factor)\n\n\ndef adjust_hue(img, hue_factor):\n \"\"\"Adjust hue of an image.\n\n The image hue is adjusted by converting the image to HSV and\n cyclically shifting the intensities in the hue channel (H).\n The image is then converted back to original image mode.\n\n `hue_factor` is the amount of shift in H channel and must be in the\n interval `[-0.5, 0.5]`.\n\n See `Hue`_ for more details.\n\n .. _Hue: https://en.wikipedia.org/wiki/Hue\n\n Args:\n img (Tensor): Image to be adjusted. Image type is either uint8 or float.\n hue_factor (float): How much to shift the hue channel. Should be in\n [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in\n HSV space in positive and negative direction respectively.\n 0 means no shift. Therefore, both -0.5 and 0.5 will give an image\n with complementary colors while 0 gives the original image.\n\n Returns:\n Tensor: Hue adjusted image.\n \"\"\"\n if not (-0.5 <= hue_factor <= 0.5):\n raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))\n\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n orig_dtype = img.dtype\n if img.dtype == torch.uint8:\n img = img.to(dtype=torch.float32) / 255.0\n\n img = _rgb2hsv(img)\n h, s, v = img.unbind(0)\n h += hue_factor\n h = h % 1.0\n img = torch.stack((h, s, v))\n img_hue_adj = _hsv2rgb(img)\n\n if orig_dtype == torch.uint8:\n img_hue_adj = (img_hue_adj * 255.0).to(dtype=orig_dtype)\n\n return img_hue_adj\n\n\ndef adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:\n \"\"\"Adjust color saturation of an RGB image.\n\n Args:\n img (Tensor): Image to be adjusted.\n saturation_factor (float): How much to adjust the saturation. Can be any\n non negative number. 0 gives a black and white image, 1 gives the\n original image while 2 enhances the saturation by a factor of 2.\n\n Returns:\n Tensor: Saturation adjusted image.\n \"\"\"\n if saturation_factor < 0:\n raise ValueError('saturation_factor ({}) is not non-negative.'.format(saturation_factor))\n\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n return _blend(img, rgb_to_grayscale(img), saturation_factor)\n\n\ndef adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:\n r\"\"\"Adjust gamma of an RGB image.\n\n Also known as Power Law Transform. Intensities in RGB mode are adjusted\n based on the following equation:\n\n .. math::\n `I_{\\text{out}} = 255 \\times \\text{gain} \\times \\left(\\frac{I_{\\text{in}}}{255}\\right)^{\\gamma}`\n\n See `Gamma Correction`_ for more details.\n\n .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction\n\n Args:\n img (Tensor): Tensor of RBG values to be adjusted.\n gamma (float): Non negative real number, same as :math:`\\gamma` in the equation.\n gamma larger than 1 make the shadows darker,\n while gamma smaller than 1 make dark regions lighter.\n gain (float): The constant multiplier.\n \"\"\"\n\n if not isinstance(img, torch.Tensor):\n raise TypeError('img should be a Tensor. Got {}'.format(type(img)))\n\n if gamma < 0:\n raise ValueError('Gamma should be a non-negative real number')\n\n result = img\n dtype = img.dtype\n if not torch.is_floating_point(img):\n result = result / 255.0\n\n result = (gain * result ** gamma).clamp(0, 1)\n\n if result.dtype != dtype:\n eps = 1e-3\n result = (255 + 1.0 - eps) * result\n result = result.to(dtype)\n return result\n\n\ndef center_crop(img: Tensor, output_size: BroadcastingList2[int]) -> Tensor:\n \"\"\"Crop the Image Tensor and resize it to desired size.\n\n Args:\n img (Tensor): Image to be cropped.\n output_size (sequence or int): (height, width) of the crop box. If int,\n it is used for both directions\n\n Returns:\n Tensor: Cropped image.\n \"\"\"\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n _, image_width, image_height = img.size()\n crop_height, crop_width = output_size\n # crop_top = int(round((image_height - crop_height) / 2.))\n # Result can be different between python func and scripted func\n # Temporary workaround:\n crop_top = int((image_height - crop_height + 1) * 0.5)\n # crop_left = int(round((image_width - crop_width) / 2.))\n # Result can be different between python func and scripted func\n # Temporary workaround:\n crop_left = int((image_width - crop_width + 1) * 0.5)\n\n return crop(img, crop_top, crop_left, crop_height, crop_width)\n\n\ndef five_crop(img: Tensor, size: BroadcastingList2[int]) -> List[Tensor]:\n \"\"\"Crop the given Image Tensor into four corners and the central crop.\n .. Note::\n This transform returns a List of Tensors and there may be a\n mismatch in the number of inputs and targets your ``Dataset`` returns.\n\n Args:\n img (Tensor): Image to be cropped.\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n\n Returns:\n List: List (tl, tr, bl, br, center)\n Corresponding top left, top right, bottom left, bottom right and center crop.\n \"\"\"\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n assert len(size) == 2, \"Please provide only two dimensions (h, w) for size.\"\n\n _, image_width, image_height = img.size()\n crop_height, crop_width = size\n if crop_width > image_width or crop_height > image_height:\n msg = \"Requested crop size {} is bigger than input size {}\"\n raise ValueError(msg.format(size, (image_height, image_width)))\n\n tl = crop(img, 0, 0, crop_width, crop_height)\n tr = crop(img, image_width - crop_width, 0, image_width, crop_height)\n bl = crop(img, 0, image_height - crop_height, crop_width, image_height)\n br = crop(img, image_width - crop_width, image_height - crop_height, image_width, image_height)\n center = center_crop(img, (crop_height, crop_width))\n\n return [tl, tr, bl, br, center]\n\n\ndef ten_crop(img: Tensor, size: BroadcastingList2[int], vertical_flip: bool = False) -> List[Tensor]:\n \"\"\"Crop the given Image Tensor into four corners and the central crop plus the\n flipped version of these (horizontal flipping is used by default).\n\n .. Note::\n This transform returns a List of images and there may be a\n mismatch in the number of inputs and targets your ``Dataset`` returns.\n\n Args:\n img (Tensor): Image to be cropped.\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n vertical_flip (bool): Use vertical flipping instead of horizontal\n\n Returns:\n List: List (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)\n Corresponding top left, top right, bottom left, bottom right and center crop\n and same for the flipped image's tensor.\n \"\"\"\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n assert len(size) == 2, \"Please provide only two dimensions (h, w) for size.\"\n first_five = five_crop(img, size)\n\n if vertical_flip:\n img = vflip(img)\n else:\n img = hflip(img)\n\n second_five = five_crop(img, size)\n\n return first_five + second_five\n\n\ndef _blend(img1: Tensor, img2: Tensor, ratio: float) -> Tensor:\n bound = 1 if img1.dtype in [torch.half, torch.float32, torch.float64] else 255\n return (ratio * img1 + (1 - ratio) * img2).clamp(0, bound).to(img1.dtype)\n\n\ndef _rgb2hsv(img):\n r, g, b = img.unbind(0)\n\n maxc = torch.max(img, dim=0).values\n minc = torch.min(img, dim=0).values\n\n # The algorithm erases S and H channel where `maxc = minc`. This avoids NaN\n # from happening in the results, because\n # + S channel has division by `maxc`, which is zero only if `maxc = minc`\n # + H channel has division by `(maxc - minc)`.\n #\n # Instead of overwriting NaN afterwards, we just prevent it from occuring so\n # we don't need to deal with it in case we save the NaN in a buffer in\n # backprop, if it is ever supported, but it doesn't hurt to do so.\n eqc = maxc == minc\n\n cr = maxc - minc\n # Since `eqc => cr = 0`, replacing denominator with 1 when `eqc` is fine.\n s = cr / torch.where(eqc, maxc.new_ones(()), maxc)\n # Note that `eqc => maxc = minc = r = g = b`. So the following calculation\n # of `h` would reduce to `bc - gc + 2 + rc - bc + 4 + rc - bc = 6` so it\n # would not matter what values `rc`, `gc`, and `bc` have here, and thus\n # replacing denominator with 1 when `eqc` is fine.\n cr_divisor = torch.where(eqc, maxc.new_ones(()), cr)\n rc = (maxc - r) / cr_divisor\n gc = (maxc - g) / cr_divisor\n bc = (maxc - b) / cr_divisor\n\n hr = (maxc == r) * (bc - gc)\n hg = ((maxc == g) & (maxc != r)) * (2.0 + rc - bc)\n hb = ((maxc != g) & (maxc != r)) * (4.0 + gc - rc)\n h = (hr + hg + hb)\n h = torch.fmod((h / 6.0 + 1.0), 1.0)\n return torch.stack((h, s, maxc))\n\n\ndef _hsv2rgb(img):\n h, s, v = img.unbind(0)\n i = torch.floor(h * 6.0)\n f = (h * 6.0) - i\n i = i.to(dtype=torch.int32)\n\n p = torch.clamp((v * (1.0 - s)), 0.0, 1.0)\n q = torch.clamp((v * (1.0 - s * f)), 0.0, 1.0)\n t = torch.clamp((v * (1.0 - s * (1.0 - f))), 0.0, 1.0)\n i = i % 6\n\n mask = i == torch.arange(6)[:, None, None]\n\n a1 = torch.stack((v, q, p, p, t, v))\n a2 = torch.stack((t, v, v, q, p, p))\n a3 = torch.stack((p, p, t, v, v, q))\n a4 = torch.stack((a1, a2, a3))\n\n return torch.einsum(\"ijk, xijk -> xjk\", mask.to(dtype=img.dtype), a4)\n\n\ndef _pad_symmetric(img: Tensor, padding: List[int]) -> Tensor:\n # padding is left, right, top, bottom\n in_sizes = img.size()\n\n x_indices = [i for i in range(in_sizes[-1])] # [0, 1, 2, 3, ...]\n left_indices = [i for i in range(padding[0] - 1, -1, -1)] # e.g. [3, 2, 1, 0]\n right_indices = [-(i + 1) for i in range(padding[1])] # e.g. [-1, -2, -3]\n x_indices = torch.tensor(left_indices + x_indices + right_indices)\n\n y_indices = [i for i in range(in_sizes[-2])]\n top_indices = [i for i in range(padding[2] - 1, -1, -1)]\n bottom_indices = [-(i + 1) for i in range(padding[3])]\n y_indices = torch.tensor(top_indices + y_indices + bottom_indices)\n\n ndim = img.ndim\n if ndim == 3:\n return img[:, y_indices[:, None], x_indices[None, :]]\n elif ndim == 4:\n return img[:, :, y_indices[:, None], x_indices[None, :]]\n else:\n raise RuntimeError(\"Symmetric padding of N-D tensors are not supported yet\")\n\n\ndef pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = \"constant\") -> Tensor:\n r\"\"\"Pad the given Tensor Image on all sides with specified padding mode and fill value.\n\n Args:\n img (Tensor): Image to be padded.\n padding (int or tuple or list): Padding on each border. If a single int is provided this\n is used to pad all borders. If a tuple or list of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a tuple or list of length 4 is provided\n this is the padding for the left, top, right and bottom borders\n respectively. In torchscript mode padding as single int is not supported, use a tuple or\n list of length 1: ``[padding, ]``.\n fill (int): Pixel fill value for constant fill. Default is 0.\n This value is only used when the padding_mode is constant\n padding_mode (str): Type of padding. Should be: constant, edge or reflect. Default is constant.\n Mode symmetric is not yet supported for Tensor inputs.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value on the edge of the image\n\n - reflect: pads with reflection of image (without repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image (repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n\n Returns:\n Tensor: Padded image.\n \"\"\"\n if not _is_tensor_a_torch_image(img):\n raise TypeError(\"tensor is not a torch image.\")\n\n if not isinstance(padding, (int, tuple, list)):\n raise TypeError(\"Got inappropriate padding arg\")\n if not isinstance(fill, (int, float)):\n raise TypeError(\"Got inappropriate fill arg\")\n if not isinstance(padding_mode, str):\n raise TypeError(\"Got inappropriate padding_mode arg\")\n\n if isinstance(padding, tuple):\n padding = list(padding)\n\n if isinstance(padding, list) and len(padding) not in [1, 2, 4]:\n raise ValueError(\"Padding must be an int or a 1, 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n if padding_mode not in [\"constant\", \"edge\", \"reflect\", \"symmetric\"]:\n raise ValueError(\"Padding mode should be either constant, edge, reflect or symmetric\")\n\n if isinstance(padding, int):\n if torch.jit.is_scripting():\n # This maybe unreachable\n raise ValueError(\"padding can't be an int while torchscripting, set it as a list [value, ]\")\n pad_left = pad_right = pad_top = pad_bottom = padding\n elif len(padding) == 1:\n pad_left = pad_right = pad_top = pad_bottom = padding[0]\n elif len(padding) == 2:\n pad_left = pad_right = padding[0]\n pad_top = pad_bottom = padding[1]\n else:\n pad_left = padding[0]\n pad_top = padding[1]\n pad_right = padding[2]\n pad_bottom = padding[3]\n\n p = [pad_left, pad_right, pad_top, pad_bottom]\n\n if padding_mode == \"edge\":\n # remap padding_mode str\n padding_mode = \"replicate\"\n elif padding_mode == \"symmetric\":\n # route to another implementation\n if p[0] < 0 or p[1] < 0 or p[2] < 0 or p[3] < 0: # no any support for torch script\n raise ValueError(\"Padding can not be negative for symmetric padding_mode\")\n return _pad_symmetric(img, p)\n\n need_squeeze = False\n if img.ndim < 4:\n img = img.unsqueeze(dim=0)\n need_squeeze = True\n\n out_dtype = img.dtype\n need_cast = False\n if (padding_mode != \"constant\") and img.dtype not in (torch.float32, torch.float64):\n # Here we temporary cast input tensor to float\n # until pytorch issue is resolved :\n # https://github.com/pytorch/pytorch/issues/40763\n need_cast = True\n img = img.to(torch.float32)\n\n img = torch.nn.functional.pad(img, p, mode=padding_mode, value=float(fill))\n\n if need_squeeze:\n img = img.squeeze(dim=0)\n\n if need_cast:\n img = img.to(out_dtype)\n\n return img\n\n\ndef resize(img: Tensor, size: List[int], interpolation: int = 2) -> Tensor:\n r\"\"\"Resize the input Tensor to the given size.\n\n Args:\n img (Tensor): Image to be resized.\n size (int or tuple or list): Desired output size. If size is a sequence like\n (h, w), the output size will be matched to this. If size is an int,\n the smaller edge of the image will be matched to this number maintaining\n the aspect ratio. i.e, if height > width, then image will be rescaled to\n :math:`\\left(\\text{size} \\times \\frac{\\text{height}}{\\text{width}}, \\text{size}\\right)`.\n In torchscript mode padding as a single int is not supported, use a tuple or\n list of length 1: ``[size, ]``.\n interpolation (int, optional): Desired interpolation. Default is bilinear (=2). Other supported values:\n nearest(=0) and bicubic(=3).\n\n Returns:\n Tensor: Resized image.\n \"\"\"\n if not _is_tensor_a_torch_image(img):\n raise TypeError(\"tensor is not a torch image.\")\n\n if not isinstance(size, (int, tuple, list)):\n raise TypeError(\"Got inappropriate size arg\")\n if not isinstance(interpolation, int):\n raise TypeError(\"Got inappropriate interpolation arg\")\n\n _interpolation_modes = {\n 0: \"nearest\",\n 2: \"bilinear\",\n 3: \"bicubic\",\n }\n\n if interpolation not in _interpolation_modes:\n raise ValueError(\"This interpolation mode is unsupported with Tensor input\")\n\n if isinstance(size, tuple):\n size = list(size)\n\n if isinstance(size, list) and len(size) not in [1, 2]:\n raise ValueError(\"Size must be an int or a 1 or 2 element tuple/list, not a \"\n \"{} element tuple/list\".format(len(size)))\n\n w, h = _get_image_size(img)\n\n if isinstance(size, int):\n size_w, size_h = size, size\n elif len(size) < 2:\n size_w, size_h = size[0], size[0]\n else:\n size_w, size_h = size[1], size[0] # Convention (h, w)\n\n if isinstance(size, int) or len(size) < 2:\n if w < h:\n size_h = int(size_w * h / w)\n else:\n size_w = int(size_h * w / h)\n\n if (w <= h and w == size_w) or (h <= w and h == size_h):\n return img\n\n # make image NCHW\n need_squeeze = False\n if img.ndim < 4:\n img = img.unsqueeze(dim=0)\n need_squeeze = True\n\n mode = _interpolation_modes[interpolation]\n\n out_dtype = img.dtype\n need_cast = False\n if img.dtype not in (torch.float32, torch.float64):\n need_cast = True\n img = img.to(torch.float32)\n\n # Define align_corners to avoid warnings\n align_corners = False if mode in [\"bilinear\", \"bicubic\"] else None\n\n img = torch.nn.functional.interpolate(img, size=(size_h, size_w), mode=mode, align_corners=align_corners)\n\n if need_squeeze:\n img = img.squeeze(dim=0)\n\n if need_cast:\n if mode == \"bicubic\":\n img = img.clamp(min=0, max=255)\n img = img.to(out_dtype)\n\n return img\n\n\ndef affine(\n img: Tensor, matrix: List[float], resample: int = 0, fillcolor: Optional[int] = None\n) -> Tensor:\n \"\"\"Apply affine transformation on the Tensor image keeping image center invariant.\n\n Args:\n img (Tensor): image to be rotated.\n matrix (list of floats): list of 6 float values representing inverse matrix for affine transformation.\n resample (int, optional): An optional resampling filter. Default is nearest (=2). Other supported values:\n bilinear(=2).\n fillcolor (int, optional): this option is not supported for Tensor input. Fill value for the area outside the\n transform in the output image is always 0.\n\n Returns:\n Tensor: Transformed image.\n \"\"\"\n if not (isinstance(img, torch.Tensor) and _is_tensor_a_torch_image(img)):\n raise TypeError('img should be Tensor Image. Got {}'.format(type(img)))\n\n if fillcolor is not None:\n warnings.warn(\"Argument fillcolor is not supported for Tensor input. Fill value is zero\")\n\n _interpolation_modes = {\n 0: \"nearest\",\n 2: \"bilinear\",\n }\n\n if resample not in _interpolation_modes:\n raise ValueError(\"This resampling mode is unsupported with Tensor input\")\n\n theta = torch.tensor(matrix, dtype=torch.float).reshape(1, 2, 3)\n shape = img.shape\n grid = affine_grid(theta, size=(1, shape[-3], shape[-2], shape[-1]), align_corners=False)\n\n # make image NCHW\n need_squeeze = False\n if img.ndim < 4:\n img = img.unsqueeze(dim=0)\n need_squeeze = True\n\n mode = _interpolation_modes[resample]\n\n out_dtype = img.dtype\n need_cast = False\n if img.dtype not in (torch.float32, torch.float64):\n need_cast = True\n img = img.to(torch.float32)\n\n img = grid_sample(img, grid, mode=mode, padding_mode=\"zeros\", align_corners=False)\n\n if need_squeeze:\n img = img.squeeze(dim=0)\n\n if need_cast:\n # it is better to round before cast\n img = torch.round(img).to(out_dtype)\n\n return img\n", "path": "torchvision/transforms/functional_tensor.py" } ]
diff --git a/test/test_functional_tensor.py b/test/test_functional_tensor.py index 2e3477ad12b..d01a357d7b5 100644 --- a/test/test_functional_tensor.py +++ b/test/test_functional_tensor.py @@ -337,7 +337,7 @@ def test_resize(self): if dt is not None: # This is a trivial cast to float of uint8 data to test all cases tensor = tensor.to(dt) - for size in [32, [32, ], [32, 32], (32, 32), ]: + for size in [32, 26, [32, ], [32, 32], (32, 32), [26, 35]]: for interpolation in [BILINEAR, BICUBIC, NEAREST]: resized_tensor = F_t.resize(tensor, size=size, interpolation=interpolation) resized_pil_img = F_pil.resize(pil_img, size=size, interpolation=interpolation) diff --git a/test/test_transforms_tensor.py b/test/test_transforms_tensor.py index e05044599ef..e2085b0aaab 100644 --- a/test/test_transforms_tensor.py +++ b/test/test_transforms_tensor.py @@ -226,7 +226,7 @@ def test_resize(self): if dt is not None: # This is a trivial cast to float of uint8 data to test all cases tensor = tensor.to(dt) - for size in [32, [32, ], [32, 32], (32, 32), ]: + for size in [32, 34, [32, ], [32, 32], (32, 32), [34, 35]]: for interpolation in [BILINEAR, BICUBIC, NEAREST]: resized_tensor = F.resize(tensor, size=size, interpolation=interpolation) @@ -250,7 +250,7 @@ def test_resized_crop(self): for scale in [(0.7, 1.2), [0.7, 1.2]]: for ratio in [(0.75, 1.333), [0.75, 1.333]]: - for size in [(32, ), [32, ], [32, 32], (32, 32)]: + for size in [(32, ), [44, ], [32, ], [32, 32], (32, 32), [44, 55]]: for interpolation in [NEAREST, BILINEAR, BICUBIC]: transform = T.RandomResizedCrop( size=size, scale=scale, ratio=ratio, interpolation=interpolation diff --git a/torchvision/transforms/functional_tensor.py b/torchvision/transforms/functional_tensor.py index f2e47b056d3..357f23b88fc 100644 --- a/torchvision/transforms/functional_tensor.py +++ b/torchvision/transforms/functional_tensor.py @@ -586,8 +586,8 @@ def resize(img: Tensor, size: List[int], interpolation: int = 2) -> Tensor: else: size_w = int(size_h * w / h) - if (w <= h and w == size_w) or (h <= w and h == size_h): - return img + if (w <= h and w == size_w) or (h <= w and h == size_h): + return img # make image NCHW need_squeeze = False
redis__redis-py-2674
Canceling async Redis command leaves connection open, in unsafe state for future commands **Version**: 4.5.3 **Platform**: Python 3.8 on Ubuntu / Generic **Description**: Canceling async Redis command leaves connection open, in unsafe state for future commands This is a reincarnation of #2624, which was closed with an incomplete fix and a possibly unreliable test case. This is the same issue that recently got a lot of attention due to ChatGPT outage, and that remains only partially fixed. The cancellation shielding introduced in #2641 addressed only the cancellation of Redis pipeline operation, but non-pipelined ops are still vulnerable. This time I am attaching a script that reproduces the issue reliably without relying on an external, slow Redis server. This is achieved by inserting a small TCP socket proxy between the Redis client and local Redis server, with the proxy introducing a 0.1 second delay when sending data in either direction. Running this script with a Redis server running locally on port 6379 produces the following output: ``` $ python redis_cancel.py managed to cancel the task, connection is left open with unread response bar: b'foo' ping: False foo: b'PONG' ``` ```python import asyncio from redis.asyncio import Redis async def pipe(reader: asyncio.StreamReader, writer: asyncio.StreamWriter, delay: float, name=''): while data := await reader.read(1000): # print(name, 'received:', data) await asyncio.sleep(delay) writer.write(data) await writer.drain() class DelayProxy: def __init__(self, addr, redis_addr, delay: float): self.addr = addr self.redis_addr = redis_addr self.delay = delay async def start(self): server = await asyncio.start_server(self.handle, *self.addr) asyncio.create_task(server.serve_forever()) async def handle(self, reader, writer): # establish connection to redis redis_reader, redis_writer = await asyncio.open_connection(*self.redis_addr) pipe1 = asyncio.create_task(pipe(reader, redis_writer, self.delay, 'to redis:')) pipe2 = asyncio.create_task(pipe(redis_reader, writer, self.delay, 'from redis:')) await asyncio.gather(pipe1, pipe2) async def main(): # create a tcp socket proxy that relays data to Redis and back, inserting 0.1 seconds of delay dp = DelayProxy(addr=('localhost', 6380), redis_addr=('localhost', 6379), delay=0.1) await dp.start() # note that we connect to proxy, rather than to Redis directly async with Redis(host='localhost', port=6380) as r: await r.set('foo', 'foo') await r.set('bar', 'bar') t = asyncio.create_task(r.get('foo')) await asyncio.sleep(0.050) t.cancel() try: await t print('try again, we did not cancel the task in time') except asyncio.CancelledError: print('managed to cancel the task, connection is left open with unread response') print('bar:', await r.get('bar')) print('ping:', await r.ping()) print('foo:', await r.get('foo')) if __name__ == '__main__': asyncio.run(main()) ```
[ { "content": "#!/usr/bin/env python\nfrom setuptools import find_packages, setup\n\nsetup(\n name=\"redis\",\n description=\"Python client for Redis database and key-value store\",\n long_description=open(\"README.md\").read().strip(),\n long_description_content_type=\"text/markdown\",\n keywords=[\"Redis\", \"key-value store\", \"database\"],\n license=\"MIT\",\n version=\"4.5.3\",\n packages=find_packages(\n include=[\n \"redis\",\n \"redis.asyncio\",\n \"redis.commands\",\n \"redis.commands.bf\",\n \"redis.commands.json\",\n \"redis.commands.search\",\n \"redis.commands.timeseries\",\n \"redis.commands.graph\",\n ]\n ),\n url=\"https://github.com/redis/redis-py\",\n project_urls={\n \"Documentation\": \"https://redis.readthedocs.io/en/latest/\",\n \"Changes\": \"https://github.com/redis/redis-py/releases\",\n \"Code\": \"https://github.com/redis/redis-py\",\n \"Issue tracker\": \"https://github.com/redis/redis-py/issues\",\n },\n author=\"Redis Inc.\",\n author_email=\"[email protected]\",\n python_requires=\">=3.7\",\n install_requires=[\n 'importlib-metadata >= 1.0; python_version < \"3.8\"',\n 'typing-extensions; python_version<\"3.8\"',\n 'async-timeout>=4.0.2; python_version<=\"3.11.2\"',\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n extras_require={\n \"hiredis\": [\"hiredis>=1.0.0\"],\n \"ocsp\": [\"cryptography>=36.0.1\", \"pyopenssl==20.0.1\", \"requests>=2.26.0\"],\n },\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\nfrom setuptools import find_packages, setup\n\nsetup(\n name=\"redis\",\n description=\"Python client for Redis database and key-value store\",\n long_description=open(\"README.md\").read().strip(),\n long_description_content_type=\"text/markdown\",\n keywords=[\"Redis\", \"key-value store\", \"database\"],\n license=\"MIT\",\n version=\"4.5.4\",\n packages=find_packages(\n include=[\n \"redis\",\n \"redis.asyncio\",\n \"redis.commands\",\n \"redis.commands.bf\",\n \"redis.commands.json\",\n \"redis.commands.search\",\n \"redis.commands.timeseries\",\n \"redis.commands.graph\",\n ]\n ),\n url=\"https://github.com/redis/redis-py\",\n project_urls={\n \"Documentation\": \"https://redis.readthedocs.io/en/latest/\",\n \"Changes\": \"https://github.com/redis/redis-py/releases\",\n \"Code\": \"https://github.com/redis/redis-py\",\n \"Issue tracker\": \"https://github.com/redis/redis-py/issues\",\n },\n author=\"Redis Inc.\",\n author_email=\"[email protected]\",\n python_requires=\">=3.7\",\n install_requires=[\n 'importlib-metadata >= 1.0; python_version < \"3.8\"',\n 'typing-extensions; python_version<\"3.8\"',\n 'async-timeout>=4.0.2; python_version<=\"3.11.2\"',\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n extras_require={\n \"hiredis\": [\"hiredis>=1.0.0\"],\n \"ocsp\": [\"cryptography>=36.0.1\", \"pyopenssl==20.0.1\", \"requests>=2.26.0\"],\n },\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 3b7347da03..c23038cba7 100644 --- a/setup.py +++ b/setup.py @@ -8,7 +8,7 @@ long_description_content_type="text/markdown", keywords=["Redis", "key-value store", "database"], license="MIT", - version="4.5.3", + version="4.5.4", packages=find_packages( include=[ "redis",
buildbot__buildbot-6680
p4port is not renderable Hello, I'm trying to override p4port per-client. I need this because we want some workers to connect to a local perforce proxy but still use the actual perforce server by default. Basically something like this: ``` P4PORT = 'perforce.company.com:1666' P4PORT_PROXY = 'proxy.lan:1666' c = BuildmasterConfig = {} c['workers'] = [ worker.Worker("worker1", "password1"), worker.Worker("worker2", "password2", properties={'P4PORT': P4PORT_PROXY}), ] factory = util.BuildFactory() factory.addStep(steps.P4(p4port=util.Property('P4PORT', default=P4PORT), p4user=P4USER, p4base='//depot', p4branch=getBranch, stream=True, p4client=getClientName, p4client_spec_options='rmdir clobber', mode='incremental')) ... ``` This doesn't work because p4port is not a renderable: > Perforce client error: > Connect to server failed; check $P4PORT. > TCP connect to Unpersistable("Unpersistable data: b'instance of class buildbot.process.properties.Property deemed insecure'") failed. > The specified class was not found. This is similar to #3954 (`p4passwd` was not renderable).
[ { "content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n# Portions Copyright 2013 Bad Dog Consulting\n\nimport re\n\nfrom twisted.internet import defer\nfrom twisted.python import log\n\nfrom buildbot import config\nfrom buildbot import interfaces\nfrom buildbot.interfaces import WorkerSetupError\nfrom buildbot.process import buildstep\nfrom buildbot.process import remotecommand\nfrom buildbot.process import results\nfrom buildbot.process.properties import Interpolate\nfrom buildbot.steps.source import Source\n\n# Notes:\n# see\n# http://perforce.com/perforce/doc.current/manuals/cmdref/o.gopts.html#1040647\n# for getting p4 command to output marshalled python dictionaries as output\n# for commands.\n# Perhaps switch to using 'p4 -G' : From URL above:\n# -G Causes all output (and batch input for form commands with -i) to be\n# formatted as marshalled Python dictionary objects. This is most often used\n# when scripting.\n\n\nclass P4(Source):\n\n \"\"\"Perform Perforce checkout/update operations.\"\"\"\n\n name = 'p4'\n\n renderables = ['mode', 'p4base', 'p4client', 'p4viewspec', 'p4branch', 'p4passwd']\n possible_modes = ('incremental', 'full')\n\n def __init__(self, mode='incremental',\n method=None, p4base=None, p4branch=None,\n p4port=None, p4user=None,\n p4passwd=None, p4extra_views=(), p4line_end='local',\n p4viewspec=None, p4viewspec_suffix='...',\n p4client=Interpolate(\n 'buildbot_%(prop:workername)s_%(prop:buildername)s'),\n p4client_spec_options='allwrite rmdir',\n p4extra_args=None,\n p4bin='p4',\n use_tickets=False,\n stream=False,\n debug=False,\n **kwargs):\n self.method = method\n self.mode = mode\n self.p4branch = p4branch\n self.p4bin = p4bin\n self.p4base = p4base\n self.p4port = p4port\n self.p4user = p4user\n self.p4passwd = p4passwd\n self.p4extra_views = p4extra_views\n self.p4viewspec = p4viewspec\n self.p4viewspec_suffix = p4viewspec_suffix\n self.p4line_end = p4line_end\n self.p4client = p4client\n self.p4client_spec_options = p4client_spec_options\n self.p4extra_args = p4extra_args\n self.use_tickets = use_tickets\n self.stream = stream\n self.debug = debug\n\n super().__init__(**kwargs)\n\n if self.mode not in self.possible_modes and \\\n not interfaces.IRenderable.providedBy(self.mode):\n config.error(f\"mode {self.mode} is not an IRenderable, or one of {self.possible_modes}\")\n\n if not p4viewspec and p4base is None:\n config.error(\"You must provide p4base or p4viewspec\")\n\n if p4viewspec and (p4base or p4branch or p4extra_views):\n config.error(\n \"Either provide p4viewspec or p4base and p4branch (and optionally p4extra_views)\")\n\n if p4viewspec and isinstance(p4viewspec, str):\n config.error(\n \"p4viewspec must not be a string, and should be a sequence of 2 element sequences\")\n\n if not interfaces.IRenderable.providedBy(p4base) and p4base and not p4base.startswith('/'):\n config.error(f'p4base should start with // [p4base = {p4base}]')\n\n if not interfaces.IRenderable.providedBy(p4base) and p4base and p4base.endswith('/'):\n config.error(f'p4base should not end with a trailing / [p4base = {p4base}]')\n\n if not interfaces.IRenderable.providedBy(p4branch) and p4branch and p4branch.endswith('/'):\n config.error(f'p4branch should not end with a trailing / [p4branch = {p4branch}]')\n\n if stream:\n if (p4extra_views or p4viewspec):\n config.error('You can\\'t use p4extra_views not p4viewspec with stream')\n if not p4base or not p4branch:\n config.error('You must specify both p4base and p4branch when using stream')\n if not interfaces.IRenderable.providedBy(p4base) and \" \" in p4base:\n config.error('p4base must not contain any whitespace')\n if not interfaces.IRenderable.providedBy(p4branch) and \" \" in p4branch:\n config.error('p4branch must not contain any whitespace')\n\n if self.p4client_spec_options is None:\n self.p4client_spec_options = ''\n\n @defer.inlineCallbacks\n def run_vc(self, branch, revision, patch):\n if self.debug:\n log.msg('in run_vc')\n\n self.revision = revision\n self.method = self._getMethod()\n self.stdio_log = yield self.addLogForRemoteCommands(\"stdio\")\n\n installed = yield self.checkP4()\n if not installed:\n raise WorkerSetupError(\"p4 is not installed on worker\")\n\n # Try to obfuscate the password when used as an argument to commands.\n if self.p4passwd is not None:\n if not self.workerVersionIsOlderThan('shell', '2.16'):\n self.p4passwd_arg = ('obfuscated', self.p4passwd, 'XXXXXX')\n else:\n self.p4passwd_arg = self.p4passwd\n log.msg(\"Worker does not understand obfuscation; \"\n \"p4 password will be logged\")\n\n if self.use_tickets and self.p4passwd:\n yield self._acquireTicket()\n\n yield self._getAttrGroupMember('mode', self.mode)()\n yield self.parseGotRevision()\n return results.SUCCESS\n\n @defer.inlineCallbacks\n def mode_full(self):\n if self.debug:\n log.msg(\"P4:full()..\")\n\n # First we need to create the client\n yield self._createClientSpec()\n\n # Then p4 sync #none\n yield self._dovccmd(['sync', '#none'])\n\n # Then remove directory.\n yield self.runRmdir(self.workdir)\n\n # Then we need to sync the client\n if self.revision:\n if self.debug:\n log.msg(\"P4: full() sync command based on :base:%s changeset:%d\",\n self._getP4BaseForLog(), int(self.revision))\n yield self._dovccmd(['sync', f'{self._getP4BaseForCommand()}...@{int(self.revision)}'],\n collectStdout=True)\n else:\n if self.debug:\n log.msg(\"P4: full() sync command based on :base:%s no revision\",\n self._getP4BaseForLog())\n yield self._dovccmd(['sync'], collectStdout=True)\n\n if self.debug:\n log.msg(\"P4: full() sync done.\")\n\n @defer.inlineCallbacks\n def mode_incremental(self):\n if self.debug:\n log.msg(\"P4:incremental()\")\n\n # First we need to create the client\n yield self._createClientSpec()\n\n # and plan to do a checkout\n command = ['sync', ]\n\n if self.revision:\n command.extend([f'{self._getP4BaseForCommand()}...@{int(self.revision)}'])\n\n if self.debug:\n log.msg(\n \"P4:incremental() command:%s revision:%s\", command, self.revision)\n yield self._dovccmd(command)\n\n def _getP4BaseForLog(self):\n return self.p4base or '<custom viewspec>'\n\n def _getP4BaseForCommand(self):\n return self.p4base or ''\n\n def _buildVCCommand(self, doCommand):\n assert doCommand, \"No command specified\"\n\n command = [self.p4bin, ]\n\n if self.p4port:\n command.extend(['-p', self.p4port])\n if self.p4user:\n command.extend(['-u', self.p4user])\n if not self.use_tickets and self.p4passwd:\n command.extend(['-P', self.p4passwd_arg])\n if self.p4client:\n command.extend(['-c', self.p4client])\n\n # Only add the extra arguments for the `sync` command.\n if doCommand[0] == 'sync' and self.p4extra_args:\n command.extend(self.p4extra_args)\n\n command.extend(doCommand)\n return command\n\n @defer.inlineCallbacks\n def _dovccmd(self, command, collectStdout=False, initialStdin=None):\n command = self._buildVCCommand(command)\n\n if self.debug:\n log.msg(f\"P4:_dovccmd():workdir->{self.workdir}\")\n\n cmd = remotecommand.RemoteShellCommand(self.workdir, command,\n env=self.env,\n logEnviron=self.logEnviron,\n timeout=self.timeout,\n collectStdout=collectStdout,\n initialStdin=initialStdin,)\n cmd.useLog(self.stdio_log, False)\n if self.debug:\n log.msg(f'Starting p4 command : p4 {\" \".join(command)}')\n\n yield self.runCommand(cmd)\n\n if cmd.rc != 0:\n if self.debug:\n log.msg(f\"P4:_dovccmd():Source step failed while running command {cmd}\")\n raise buildstep.BuildStepFailed()\n if collectStdout:\n return cmd.stdout\n return cmd.rc\n\n def _getMethod(self):\n if self.method is not None and self.mode != 'incremental':\n return self.method\n elif self.mode == 'incremental':\n return None\n elif self.method is None and self.mode == 'full':\n return 'fresh'\n return None\n\n @defer.inlineCallbacks\n def _createClientSpec(self):\n builddir = self.getProperty('builddir')\n\n if self.debug:\n log.msg(f\"P4:_createClientSpec() builddir:{builddir}\")\n log.msg(f\"P4:_createClientSpec() SELF.workdir:{self.workdir}\")\n\n prop_dict = self.getProperties().asDict()\n prop_dict['p4client'] = self.p4client\n\n root = self.build.path_module.normpath(self.build.path_module.join(builddir, self.workdir))\n client_spec = ''\n client_spec += f\"Client: {self.p4client}\\n\\n\"\n client_spec += f\"Owner: {self.p4user}\\n\\n\"\n client_spec += f\"Description:\\n\\tCreated by {self.p4user}\\n\\n\"\n client_spec += f\"Root:\\t{root}\\n\\n\"\n client_spec += f\"Options:\\t{self.p4client_spec_options}\\n\\n\"\n if self.p4line_end:\n client_spec += f\"LineEnd:\\t{self.p4line_end}\\n\\n\"\n else:\n client_spec += \"LineEnd:\\tlocal\\n\\n\"\n\n # Perforce generates the view for stream-associated workspaces\n if self.stream:\n client_spec += f\"Stream:\\t{self.p4base}/{self.p4branch}\\n\"\n else:\n # Setup a view\n client_spec += \"View:\\n\"\n\n def has_whitespace(*args):\n return any(re.search(r'\\s', i) for i in args if i is not None)\n\n if self.p4viewspec:\n # uses only p4viewspec array of tuples to build view\n # If the user specifies a viewspec via an array of tuples then\n # Ignore any specified p4base,p4branch, and/or p4extra_views\n suffix = self.p4viewspec_suffix or ''\n for k, v in self.p4viewspec:\n if self.debug:\n log.msg(f'P4:_createClientSpec():key:{k} value:{v}')\n\n qa = '\"' if has_whitespace(k, suffix) else ''\n qb = '\"' if has_whitespace(self.p4client, v, suffix) else ''\n client_spec += f'\\t{qa}{k}{suffix}{qa} {qb}//{self.p4client}/{v}{suffix}{qb}\\n'\n else:\n # Uses p4base, p4branch, p4extra_views\n\n qa = '\"' if has_whitespace(self.p4base, self.p4branch) else ''\n\n client_spec += f\"\\t{qa}{self.p4base}\"\n\n if self.p4branch:\n client_spec += f\"/{self.p4branch}\"\n\n client_spec += f\"/...{qa} \"\n\n qb = '\"' if has_whitespace(self.p4client) else ''\n client_spec += f\"{qb}//{self.p4client}/...{qb}\\n\"\n\n if self.p4extra_views:\n for k, v in self.p4extra_views:\n qa = '\"' if has_whitespace(k) else ''\n qb = '\"' if has_whitespace(k, self.p4client, v) else ''\n\n client_spec += f\"\\t{qa}{k}/...{qa} {qb}//{self.p4client}/{v}/...{qb}\\n\"\n\n if self.debug:\n log.msg(client_spec)\n\n stdout = yield self._dovccmd(['client', '-i'], collectStdout=True, initialStdin=client_spec)\n mo = re.search(r'Client (\\S+) (.+)$', stdout, re.M)\n return mo and (mo.group(2) == 'saved.' or mo.group(2) == 'not changed.')\n\n @defer.inlineCallbacks\n def _acquireTicket(self):\n if self.debug:\n log.msg(\"P4:acquireTicket()\")\n\n # TODO: check first if the ticket is still valid?\n initialStdin = self.p4passwd + \"\\n\"\n yield self._dovccmd(['login'], initialStdin=initialStdin)\n\n @defer.inlineCallbacks\n def parseGotRevision(self):\n command = self._buildVCCommand(['changes', '-m1', '#have'])\n\n cmd = remotecommand.RemoteShellCommand(self.workdir, command,\n env=self.env,\n timeout=self.timeout,\n logEnviron=self.logEnviron,\n collectStdout=True)\n cmd.useLog(self.stdio_log, False)\n yield self.runCommand(cmd)\n\n stdout = cmd.stdout.strip()\n # Example output from p4 changes -m1 #have\n # Change 212798 on 2012/04/13 by user@user-unix-bldng2 'change to\n # pickup build'\n revision = stdout.split()[1]\n try:\n int(revision)\n except ValueError as e:\n msg = (\"p4.parseGotRevision unable to parse output \"\n f\"of 'p4 changes -m1 \\\"#have\\\"': '{stdout}'\")\n log.msg(msg)\n raise buildstep.BuildStepFailed() from e\n\n if self.debug:\n log.msg(f\"Got p4 revision {revision}\")\n self.updateSourceProperty('got_revision', revision)\n\n @defer.inlineCallbacks\n def purge(self, ignore_ignores):\n \"\"\"Delete everything that shown up on status.\"\"\"\n command = ['sync', '#none']\n if ignore_ignores:\n command.append('--no-ignore')\n yield self._dovccmd(command, collectStdout=True)\n # FIXME: do the following comments need addressing?\n # add deferred to rm tree\n # then add defer to sync to revision\n\n @defer.inlineCallbacks\n def checkP4(self):\n cmd = remotecommand.RemoteShellCommand(self.workdir, [self.p4bin, '-V'],\n env=self.env,\n logEnviron=self.logEnviron)\n cmd.useLog(self.stdio_log, False)\n yield self.runCommand(cmd)\n return cmd.rc == 0\n\n def computeSourceRevision(self, changes):\n if not changes or None in [c.revision for c in changes]:\n return None\n lastChange = max(int(c.revision) for c in changes)\n return lastChange\n", "path": "master/buildbot/steps/source/p4.py" } ]
[ { "content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n# Portions Copyright 2013 Bad Dog Consulting\n\nimport re\n\nfrom twisted.internet import defer\nfrom twisted.python import log\n\nfrom buildbot import config\nfrom buildbot import interfaces\nfrom buildbot.interfaces import WorkerSetupError\nfrom buildbot.process import buildstep\nfrom buildbot.process import remotecommand\nfrom buildbot.process import results\nfrom buildbot.process.properties import Interpolate\nfrom buildbot.steps.source import Source\n\n# Notes:\n# see\n# http://perforce.com/perforce/doc.current/manuals/cmdref/o.gopts.html#1040647\n# for getting p4 command to output marshalled python dictionaries as output\n# for commands.\n# Perhaps switch to using 'p4 -G' : From URL above:\n# -G Causes all output (and batch input for form commands with -i) to be\n# formatted as marshalled Python dictionary objects. This is most often used\n# when scripting.\n\n\nclass P4(Source):\n\n \"\"\"Perform Perforce checkout/update operations.\"\"\"\n\n name = 'p4'\n\n renderables = ['mode', 'p4base', 'p4client', 'p4viewspec', 'p4branch', 'p4passwd', 'p4port']\n possible_modes = ('incremental', 'full')\n\n def __init__(self, mode='incremental',\n method=None, p4base=None, p4branch=None,\n p4port=None, p4user=None,\n p4passwd=None, p4extra_views=(), p4line_end='local',\n p4viewspec=None, p4viewspec_suffix='...',\n p4client=Interpolate(\n 'buildbot_%(prop:workername)s_%(prop:buildername)s'),\n p4client_spec_options='allwrite rmdir',\n p4extra_args=None,\n p4bin='p4',\n use_tickets=False,\n stream=False,\n debug=False,\n **kwargs):\n self.method = method\n self.mode = mode\n self.p4branch = p4branch\n self.p4bin = p4bin\n self.p4base = p4base\n self.p4port = p4port\n self.p4user = p4user\n self.p4passwd = p4passwd\n self.p4extra_views = p4extra_views\n self.p4viewspec = p4viewspec\n self.p4viewspec_suffix = p4viewspec_suffix\n self.p4line_end = p4line_end\n self.p4client = p4client\n self.p4client_spec_options = p4client_spec_options\n self.p4extra_args = p4extra_args\n self.use_tickets = use_tickets\n self.stream = stream\n self.debug = debug\n\n super().__init__(**kwargs)\n\n if self.mode not in self.possible_modes and \\\n not interfaces.IRenderable.providedBy(self.mode):\n config.error(f\"mode {self.mode} is not an IRenderable, or one of {self.possible_modes}\")\n\n if not p4viewspec and p4base is None:\n config.error(\"You must provide p4base or p4viewspec\")\n\n if p4viewspec and (p4base or p4branch or p4extra_views):\n config.error(\n \"Either provide p4viewspec or p4base and p4branch (and optionally p4extra_views)\")\n\n if p4viewspec and isinstance(p4viewspec, str):\n config.error(\n \"p4viewspec must not be a string, and should be a sequence of 2 element sequences\")\n\n if not interfaces.IRenderable.providedBy(p4base) and p4base and not p4base.startswith('/'):\n config.error(f'p4base should start with // [p4base = {p4base}]')\n\n if not interfaces.IRenderable.providedBy(p4base) and p4base and p4base.endswith('/'):\n config.error(f'p4base should not end with a trailing / [p4base = {p4base}]')\n\n if not interfaces.IRenderable.providedBy(p4branch) and p4branch and p4branch.endswith('/'):\n config.error(f'p4branch should not end with a trailing / [p4branch = {p4branch}]')\n\n if stream:\n if (p4extra_views or p4viewspec):\n config.error('You can\\'t use p4extra_views not p4viewspec with stream')\n if not p4base or not p4branch:\n config.error('You must specify both p4base and p4branch when using stream')\n if not interfaces.IRenderable.providedBy(p4base) and \" \" in p4base:\n config.error('p4base must not contain any whitespace')\n if not interfaces.IRenderable.providedBy(p4branch) and \" \" in p4branch:\n config.error('p4branch must not contain any whitespace')\n\n if self.p4client_spec_options is None:\n self.p4client_spec_options = ''\n\n @defer.inlineCallbacks\n def run_vc(self, branch, revision, patch):\n if self.debug:\n log.msg('in run_vc')\n\n self.revision = revision\n self.method = self._getMethod()\n self.stdio_log = yield self.addLogForRemoteCommands(\"stdio\")\n\n installed = yield self.checkP4()\n if not installed:\n raise WorkerSetupError(\"p4 is not installed on worker\")\n\n # Try to obfuscate the password when used as an argument to commands.\n if self.p4passwd is not None:\n if not self.workerVersionIsOlderThan('shell', '2.16'):\n self.p4passwd_arg = ('obfuscated', self.p4passwd, 'XXXXXX')\n else:\n self.p4passwd_arg = self.p4passwd\n log.msg(\"Worker does not understand obfuscation; \"\n \"p4 password will be logged\")\n\n if self.use_tickets and self.p4passwd:\n yield self._acquireTicket()\n\n yield self._getAttrGroupMember('mode', self.mode)()\n yield self.parseGotRevision()\n return results.SUCCESS\n\n @defer.inlineCallbacks\n def mode_full(self):\n if self.debug:\n log.msg(\"P4:full()..\")\n\n # First we need to create the client\n yield self._createClientSpec()\n\n # Then p4 sync #none\n yield self._dovccmd(['sync', '#none'])\n\n # Then remove directory.\n yield self.runRmdir(self.workdir)\n\n # Then we need to sync the client\n if self.revision:\n if self.debug:\n log.msg(\"P4: full() sync command based on :base:%s changeset:%d\",\n self._getP4BaseForLog(), int(self.revision))\n yield self._dovccmd(['sync', f'{self._getP4BaseForCommand()}...@{int(self.revision)}'],\n collectStdout=True)\n else:\n if self.debug:\n log.msg(\"P4: full() sync command based on :base:%s no revision\",\n self._getP4BaseForLog())\n yield self._dovccmd(['sync'], collectStdout=True)\n\n if self.debug:\n log.msg(\"P4: full() sync done.\")\n\n @defer.inlineCallbacks\n def mode_incremental(self):\n if self.debug:\n log.msg(\"P4:incremental()\")\n\n # First we need to create the client\n yield self._createClientSpec()\n\n # and plan to do a checkout\n command = ['sync', ]\n\n if self.revision:\n command.extend([f'{self._getP4BaseForCommand()}...@{int(self.revision)}'])\n\n if self.debug:\n log.msg(\n \"P4:incremental() command:%s revision:%s\", command, self.revision)\n yield self._dovccmd(command)\n\n def _getP4BaseForLog(self):\n return self.p4base or '<custom viewspec>'\n\n def _getP4BaseForCommand(self):\n return self.p4base or ''\n\n def _buildVCCommand(self, doCommand):\n assert doCommand, \"No command specified\"\n\n command = [self.p4bin, ]\n\n if self.p4port:\n command.extend(['-p', self.p4port])\n if self.p4user:\n command.extend(['-u', self.p4user])\n if not self.use_tickets and self.p4passwd:\n command.extend(['-P', self.p4passwd_arg])\n if self.p4client:\n command.extend(['-c', self.p4client])\n\n # Only add the extra arguments for the `sync` command.\n if doCommand[0] == 'sync' and self.p4extra_args:\n command.extend(self.p4extra_args)\n\n command.extend(doCommand)\n return command\n\n @defer.inlineCallbacks\n def _dovccmd(self, command, collectStdout=False, initialStdin=None):\n command = self._buildVCCommand(command)\n\n if self.debug:\n log.msg(f\"P4:_dovccmd():workdir->{self.workdir}\")\n\n cmd = remotecommand.RemoteShellCommand(self.workdir, command,\n env=self.env,\n logEnviron=self.logEnviron,\n timeout=self.timeout,\n collectStdout=collectStdout,\n initialStdin=initialStdin,)\n cmd.useLog(self.stdio_log, False)\n if self.debug:\n log.msg(f'Starting p4 command : p4 {\" \".join(command)}')\n\n yield self.runCommand(cmd)\n\n if cmd.rc != 0:\n if self.debug:\n log.msg(f\"P4:_dovccmd():Source step failed while running command {cmd}\")\n raise buildstep.BuildStepFailed()\n if collectStdout:\n return cmd.stdout\n return cmd.rc\n\n def _getMethod(self):\n if self.method is not None and self.mode != 'incremental':\n return self.method\n elif self.mode == 'incremental':\n return None\n elif self.method is None and self.mode == 'full':\n return 'fresh'\n return None\n\n @defer.inlineCallbacks\n def _createClientSpec(self):\n builddir = self.getProperty('builddir')\n\n if self.debug:\n log.msg(f\"P4:_createClientSpec() builddir:{builddir}\")\n log.msg(f\"P4:_createClientSpec() SELF.workdir:{self.workdir}\")\n\n prop_dict = self.getProperties().asDict()\n prop_dict['p4client'] = self.p4client\n\n root = self.build.path_module.normpath(self.build.path_module.join(builddir, self.workdir))\n client_spec = ''\n client_spec += f\"Client: {self.p4client}\\n\\n\"\n client_spec += f\"Owner: {self.p4user}\\n\\n\"\n client_spec += f\"Description:\\n\\tCreated by {self.p4user}\\n\\n\"\n client_spec += f\"Root:\\t{root}\\n\\n\"\n client_spec += f\"Options:\\t{self.p4client_spec_options}\\n\\n\"\n if self.p4line_end:\n client_spec += f\"LineEnd:\\t{self.p4line_end}\\n\\n\"\n else:\n client_spec += \"LineEnd:\\tlocal\\n\\n\"\n\n # Perforce generates the view for stream-associated workspaces\n if self.stream:\n client_spec += f\"Stream:\\t{self.p4base}/{self.p4branch}\\n\"\n else:\n # Setup a view\n client_spec += \"View:\\n\"\n\n def has_whitespace(*args):\n return any(re.search(r'\\s', i) for i in args if i is not None)\n\n if self.p4viewspec:\n # uses only p4viewspec array of tuples to build view\n # If the user specifies a viewspec via an array of tuples then\n # Ignore any specified p4base,p4branch, and/or p4extra_views\n suffix = self.p4viewspec_suffix or ''\n for k, v in self.p4viewspec:\n if self.debug:\n log.msg(f'P4:_createClientSpec():key:{k} value:{v}')\n\n qa = '\"' if has_whitespace(k, suffix) else ''\n qb = '\"' if has_whitespace(self.p4client, v, suffix) else ''\n client_spec += f'\\t{qa}{k}{suffix}{qa} {qb}//{self.p4client}/{v}{suffix}{qb}\\n'\n else:\n # Uses p4base, p4branch, p4extra_views\n\n qa = '\"' if has_whitespace(self.p4base, self.p4branch) else ''\n\n client_spec += f\"\\t{qa}{self.p4base}\"\n\n if self.p4branch:\n client_spec += f\"/{self.p4branch}\"\n\n client_spec += f\"/...{qa} \"\n\n qb = '\"' if has_whitespace(self.p4client) else ''\n client_spec += f\"{qb}//{self.p4client}/...{qb}\\n\"\n\n if self.p4extra_views:\n for k, v in self.p4extra_views:\n qa = '\"' if has_whitespace(k) else ''\n qb = '\"' if has_whitespace(k, self.p4client, v) else ''\n\n client_spec += f\"\\t{qa}{k}/...{qa} {qb}//{self.p4client}/{v}/...{qb}\\n\"\n\n if self.debug:\n log.msg(client_spec)\n\n stdout = yield self._dovccmd(['client', '-i'], collectStdout=True, initialStdin=client_spec)\n mo = re.search(r'Client (\\S+) (.+)$', stdout, re.M)\n return mo and (mo.group(2) == 'saved.' or mo.group(2) == 'not changed.')\n\n @defer.inlineCallbacks\n def _acquireTicket(self):\n if self.debug:\n log.msg(\"P4:acquireTicket()\")\n\n # TODO: check first if the ticket is still valid?\n initialStdin = self.p4passwd + \"\\n\"\n yield self._dovccmd(['login'], initialStdin=initialStdin)\n\n @defer.inlineCallbacks\n def parseGotRevision(self):\n command = self._buildVCCommand(['changes', '-m1', '#have'])\n\n cmd = remotecommand.RemoteShellCommand(self.workdir, command,\n env=self.env,\n timeout=self.timeout,\n logEnviron=self.logEnviron,\n collectStdout=True)\n cmd.useLog(self.stdio_log, False)\n yield self.runCommand(cmd)\n\n stdout = cmd.stdout.strip()\n # Example output from p4 changes -m1 #have\n # Change 212798 on 2012/04/13 by user@user-unix-bldng2 'change to\n # pickup build'\n revision = stdout.split()[1]\n try:\n int(revision)\n except ValueError as e:\n msg = (\"p4.parseGotRevision unable to parse output \"\n f\"of 'p4 changes -m1 \\\"#have\\\"': '{stdout}'\")\n log.msg(msg)\n raise buildstep.BuildStepFailed() from e\n\n if self.debug:\n log.msg(f\"Got p4 revision {revision}\")\n self.updateSourceProperty('got_revision', revision)\n\n @defer.inlineCallbacks\n def purge(self, ignore_ignores):\n \"\"\"Delete everything that shown up on status.\"\"\"\n command = ['sync', '#none']\n if ignore_ignores:\n command.append('--no-ignore')\n yield self._dovccmd(command, collectStdout=True)\n # FIXME: do the following comments need addressing?\n # add deferred to rm tree\n # then add defer to sync to revision\n\n @defer.inlineCallbacks\n def checkP4(self):\n cmd = remotecommand.RemoteShellCommand(self.workdir, [self.p4bin, '-V'],\n env=self.env,\n logEnviron=self.logEnviron)\n cmd.useLog(self.stdio_log, False)\n yield self.runCommand(cmd)\n return cmd.rc == 0\n\n def computeSourceRevision(self, changes):\n if not changes or None in [c.revision for c in changes]:\n return None\n lastChange = max(int(c.revision) for c in changes)\n return lastChange\n", "path": "master/buildbot/steps/source/p4.py" } ]
diff --git a/master/buildbot/steps/source/p4.py b/master/buildbot/steps/source/p4.py index 8aa679e7069d..0d37f1582969 100644 --- a/master/buildbot/steps/source/p4.py +++ b/master/buildbot/steps/source/p4.py @@ -45,7 +45,7 @@ class P4(Source): name = 'p4' - renderables = ['mode', 'p4base', 'p4client', 'p4viewspec', 'p4branch', 'p4passwd'] + renderables = ['mode', 'p4base', 'p4client', 'p4viewspec', 'p4branch', 'p4passwd', 'p4port'] possible_modes = ('incremental', 'full') def __init__(self, mode='incremental', diff --git a/newsfragments/p4port-renderable.feature b/newsfragments/p4port-renderable.feature new file mode 100644 index 000000000000..58ab81311091 --- /dev/null +++ b/newsfragments/p4port-renderable.feature @@ -0,0 +1 @@ +``p4port`` argument of the ``P4`` step has been marked renderable.
urllib3__urllib3-1017
Multipart request headers do not work properly for values of empty string Continuing the discussion from https://github.com/sigmavirus24/requests-toolbelt/issues/162, attempting to create a `RequestField` which is then made multipart via `make_multipart` does not work properly if the filename given is an empty string. urllib3 test code: ``` from urllib3.fields import RequestField field = RequestField(name="somename", data="somedata", filename="") field.make_multipart(content_type="application/octet-stream") print(field.headers) ``` Expected output: ``` {'Content-Type': 'application/octet-stream', 'Content-Location': None, 'Content-Disposition': 'form-data; name="somename"; filename=""'} ``` Actual output: ``` {'Content-Type': 'application/octet-stream', 'Content-Location': None, 'Content-Disposition': 'form-data; name="somename"'} ``` ##
[ { "content": "from __future__ import absolute_import\nimport email.utils\nimport mimetypes\n\nfrom .packages import six\n\n\ndef guess_content_type(filename, default='application/octet-stream'):\n \"\"\"\n Guess the \"Content-Type\" of a file.\n\n :param filename:\n The filename to guess the \"Content-Type\" of using :mod:`mimetypes`.\n :param default:\n If no \"Content-Type\" can be guessed, default to `default`.\n \"\"\"\n if filename:\n return mimetypes.guess_type(filename)[0] or default\n return default\n\n\ndef format_header_param(name, value):\n \"\"\"\n Helper function to format and quote a single header parameter.\n\n Particularly useful for header parameters which might contain\n non-ASCII values, like file names. This follows RFC 2231, as\n suggested by RFC 2388 Section 4.4.\n\n :param name:\n The name of the parameter, a string expected to be ASCII only.\n :param value:\n The value of the parameter, provided as a unicode string.\n \"\"\"\n if not any(ch in value for ch in '\"\\\\\\r\\n'):\n result = '%s=\"%s\"' % (name, value)\n try:\n result.encode('ascii')\n except (UnicodeEncodeError, UnicodeDecodeError):\n pass\n else:\n return result\n if not six.PY3 and isinstance(value, six.text_type): # Python 2:\n value = value.encode('utf-8')\n value = email.utils.encode_rfc2231(value, 'utf-8')\n value = '%s*=%s' % (name, value)\n return value\n\n\nclass RequestField(object):\n \"\"\"\n A data container for request body parameters.\n\n :param name:\n The name of this request field.\n :param data:\n The data/value body.\n :param filename:\n An optional filename of the request field.\n :param headers:\n An optional dict-like object of headers to initially use for the field.\n \"\"\"\n def __init__(self, name, data, filename=None, headers=None):\n self._name = name\n self._filename = filename\n self.data = data\n self.headers = {}\n if headers:\n self.headers = dict(headers)\n\n @classmethod\n def from_tuples(cls, fieldname, value):\n \"\"\"\n A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.\n\n Supports constructing :class:`~urllib3.fields.RequestField` from\n parameter of key/value strings AND key/filetuple. A filetuple is a\n (filename, data, MIME type) tuple where the MIME type is optional.\n For example::\n\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n\n Field names and filenames must be unicode.\n \"\"\"\n if isinstance(value, tuple):\n if len(value) == 3:\n filename, data, content_type = value\n else:\n filename, data = value\n content_type = guess_content_type(filename)\n else:\n filename = None\n content_type = None\n data = value\n\n request_param = cls(fieldname, data, filename=filename)\n request_param.make_multipart(content_type=content_type)\n\n return request_param\n\n def _render_part(self, name, value):\n \"\"\"\n Overridable helper function to format a single header parameter.\n\n :param name:\n The name of the parameter, a string expected to be ASCII only.\n :param value:\n The value of the parameter, provided as a unicode string.\n \"\"\"\n return format_header_param(name, value)\n\n def _render_parts(self, header_parts):\n \"\"\"\n Helper function to format and quote a single header.\n\n Useful for single headers that are composed of multiple items. E.g.,\n 'Content-Disposition' fields.\n\n :param header_parts:\n A sequence of (k, v) typles or a :class:`dict` of (k, v) to format\n as `k1=\"v1\"; k2=\"v2\"; ...`.\n \"\"\"\n parts = []\n iterable = header_parts\n if isinstance(header_parts, dict):\n iterable = header_parts.items()\n\n for name, value in iterable:\n if value:\n parts.append(self._render_part(name, value))\n\n return '; '.join(parts)\n\n def render_headers(self):\n \"\"\"\n Renders the headers for this request field.\n \"\"\"\n lines = []\n\n sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']\n for sort_key in sort_keys:\n if self.headers.get(sort_key, False):\n lines.append('%s: %s' % (sort_key, self.headers[sort_key]))\n\n for header_name, header_value in self.headers.items():\n if header_name not in sort_keys:\n if header_value:\n lines.append('%s: %s' % (header_name, header_value))\n\n lines.append('\\r\\n')\n return '\\r\\n'.join(lines)\n\n def make_multipart(self, content_disposition=None, content_type=None,\n content_location=None):\n \"\"\"\n Makes this request field into a multipart request field.\n\n This method overrides \"Content-Disposition\", \"Content-Type\" and\n \"Content-Location\" headers to the request parameter.\n\n :param content_type:\n The 'Content-Type' of the request body.\n :param content_location:\n The 'Content-Location' of the request body.\n\n \"\"\"\n self.headers['Content-Disposition'] = content_disposition or 'form-data'\n self.headers['Content-Disposition'] += '; '.join([\n '', self._render_parts(\n (('name', self._name), ('filename', self._filename))\n )\n ])\n self.headers['Content-Type'] = content_type\n self.headers['Content-Location'] = content_location\n", "path": "urllib3/fields.py" } ]
[ { "content": "from __future__ import absolute_import\nimport email.utils\nimport mimetypes\n\nfrom .packages import six\n\n\ndef guess_content_type(filename, default='application/octet-stream'):\n \"\"\"\n Guess the \"Content-Type\" of a file.\n\n :param filename:\n The filename to guess the \"Content-Type\" of using :mod:`mimetypes`.\n :param default:\n If no \"Content-Type\" can be guessed, default to `default`.\n \"\"\"\n if filename:\n return mimetypes.guess_type(filename)[0] or default\n return default\n\n\ndef format_header_param(name, value):\n \"\"\"\n Helper function to format and quote a single header parameter.\n\n Particularly useful for header parameters which might contain\n non-ASCII values, like file names. This follows RFC 2231, as\n suggested by RFC 2388 Section 4.4.\n\n :param name:\n The name of the parameter, a string expected to be ASCII only.\n :param value:\n The value of the parameter, provided as a unicode string.\n \"\"\"\n if not any(ch in value for ch in '\"\\\\\\r\\n'):\n result = '%s=\"%s\"' % (name, value)\n try:\n result.encode('ascii')\n except (UnicodeEncodeError, UnicodeDecodeError):\n pass\n else:\n return result\n if not six.PY3 and isinstance(value, six.text_type): # Python 2:\n value = value.encode('utf-8')\n value = email.utils.encode_rfc2231(value, 'utf-8')\n value = '%s*=%s' % (name, value)\n return value\n\n\nclass RequestField(object):\n \"\"\"\n A data container for request body parameters.\n\n :param name:\n The name of this request field.\n :param data:\n The data/value body.\n :param filename:\n An optional filename of the request field.\n :param headers:\n An optional dict-like object of headers to initially use for the field.\n \"\"\"\n def __init__(self, name, data, filename=None, headers=None):\n self._name = name\n self._filename = filename\n self.data = data\n self.headers = {}\n if headers:\n self.headers = dict(headers)\n\n @classmethod\n def from_tuples(cls, fieldname, value):\n \"\"\"\n A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.\n\n Supports constructing :class:`~urllib3.fields.RequestField` from\n parameter of key/value strings AND key/filetuple. A filetuple is a\n (filename, data, MIME type) tuple where the MIME type is optional.\n For example::\n\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n\n Field names and filenames must be unicode.\n \"\"\"\n if isinstance(value, tuple):\n if len(value) == 3:\n filename, data, content_type = value\n else:\n filename, data = value\n content_type = guess_content_type(filename)\n else:\n filename = None\n content_type = None\n data = value\n\n request_param = cls(fieldname, data, filename=filename)\n request_param.make_multipart(content_type=content_type)\n\n return request_param\n\n def _render_part(self, name, value):\n \"\"\"\n Overridable helper function to format a single header parameter.\n\n :param name:\n The name of the parameter, a string expected to be ASCII only.\n :param value:\n The value of the parameter, provided as a unicode string.\n \"\"\"\n return format_header_param(name, value)\n\n def _render_parts(self, header_parts):\n \"\"\"\n Helper function to format and quote a single header.\n\n Useful for single headers that are composed of multiple items. E.g.,\n 'Content-Disposition' fields.\n\n :param header_parts:\n A sequence of (k, v) typles or a :class:`dict` of (k, v) to format\n as `k1=\"v1\"; k2=\"v2\"; ...`.\n \"\"\"\n parts = []\n iterable = header_parts\n if isinstance(header_parts, dict):\n iterable = header_parts.items()\n\n for name, value in iterable:\n if value is not None:\n parts.append(self._render_part(name, value))\n\n return '; '.join(parts)\n\n def render_headers(self):\n \"\"\"\n Renders the headers for this request field.\n \"\"\"\n lines = []\n\n sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']\n for sort_key in sort_keys:\n if self.headers.get(sort_key, False):\n lines.append('%s: %s' % (sort_key, self.headers[sort_key]))\n\n for header_name, header_value in self.headers.items():\n if header_name not in sort_keys:\n if header_value:\n lines.append('%s: %s' % (header_name, header_value))\n\n lines.append('\\r\\n')\n return '\\r\\n'.join(lines)\n\n def make_multipart(self, content_disposition=None, content_type=None,\n content_location=None):\n \"\"\"\n Makes this request field into a multipart request field.\n\n This method overrides \"Content-Disposition\", \"Content-Type\" and\n \"Content-Location\" headers to the request parameter.\n\n :param content_type:\n The 'Content-Type' of the request body.\n :param content_location:\n The 'Content-Location' of the request body.\n\n \"\"\"\n self.headers['Content-Disposition'] = content_disposition or 'form-data'\n self.headers['Content-Disposition'] += '; '.join([\n '', self._render_parts(\n (('name', self._name), ('filename', self._filename))\n )\n ])\n self.headers['Content-Type'] = content_type\n self.headers['Content-Location'] = content_location\n", "path": "urllib3/fields.py" } ]
diff --git a/test/test_fields.py b/test/test_fields.py index 21b44819da..27dad92e64 100644 --- a/test/test_fields.py +++ b/test/test_fields.py @@ -36,6 +36,15 @@ def test_make_multipart(self): 'Content-Location: /test\r\n' '\r\n') + def test_make_multipart_empty_filename(self): + field = RequestField('somename', 'data', '') + field.make_multipart(content_type='application/octet-stream') + self.assertEqual( + field.render_headers(), + 'Content-Disposition: form-data; name="somename"; filename=""\r\n' + 'Content-Type: application/octet-stream\r\n' + '\r\n') + def test_render_parts(self): field = RequestField('somename', 'data') parts = field._render_parts({'name': 'value', 'filename': 'value'}) diff --git a/urllib3/fields.py b/urllib3/fields.py index 8fa2a12767..19b0ae0c88 100644 --- a/urllib3/fields.py +++ b/urllib3/fields.py @@ -130,7 +130,7 @@ def _render_parts(self, header_parts): iterable = header_parts.items() for name, value in iterable: - if value: + if value is not None: parts.append(self._render_part(name, value)) return '; '.join(parts)
RocketMap__RocketMap-2240
If path includes non-ASCII character, several things break (eg icon display) if the installation path includes a non-ASCII character, the function to deliver the icons/sprites/images fails. ## Expected Behavior path should work independently of non-ASCII characters ## Current Behavior no images get displayed ## Possible Solution possibly use functions that are UTF-8 safe ## Steps to Reproduce (for bugs) install RM into a path like $HOME/Pokémon/RocketMap and see what happens. ## Context <!--- How has this issue affected you? What are you trying to accomplish? --> <!--- Providing context helps us come up with a solution that is most useful in the real world --> ## Your Environment * Version used: latest version from git * Environment name and version (e.g. Python 2.7): python 2.7 * Operating System and version (desktop or mobile): ubuntu 16.04 server
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport logging\nimport time\nimport re\nimport ssl\nimport json\n\nfrom distutils.version import StrictVersion\n\nfrom threading import Thread, Event\nfrom queue import Queue\nfrom flask_cors import CORS\nfrom flask_cache_bust import init_cache_busting\n\nfrom pogom import config\nfrom pogom.app import Pogom\nfrom pogom.utils import get_args, now, gmaps_reverse_geolocate\nfrom pogom.altitude import get_gmaps_altitude\n\nfrom pogom.models import (init_database, create_tables, drop_tables,\n PlayerLocale, SpawnPoint, db_updater, clean_db_loop,\n verify_table_encoding, verify_database_schema)\nfrom pogom.webhook import wh_updater\n\nfrom pogom.proxy import load_proxies, check_proxies, proxies_refresher\nfrom pogom.search import search_overseer_thread\nfrom time import strftime\n\n\nclass LogFilter(logging.Filter):\n\n def __init__(self, level):\n self.level = level\n\n def filter(self, record):\n return record.levelno < self.level\n\n\n# Moved here so logger is configured at load time.\nformatter = logging.Formatter(\n '%(asctime)s [%(threadName)18s][%(module)14s][%(levelname)8s] %(message)s')\n\n# Redirect messages lower than WARNING to stdout\nstdout_hdlr = logging.StreamHandler(sys.stdout)\nstdout_hdlr.setFormatter(formatter)\nlog_filter = LogFilter(logging.WARNING)\nstdout_hdlr.addFilter(log_filter)\nstdout_hdlr.setLevel(logging.DEBUG)\n\n# Redirect messages equal or higher than WARNING to stderr\nstderr_hdlr = logging.StreamHandler(sys.stderr)\nstderr_hdlr.setFormatter(formatter)\nstderr_hdlr.setLevel(logging.WARNING)\n\nlog = logging.getLogger()\nlog.addHandler(stdout_hdlr)\nlog.addHandler(stderr_hdlr)\n\n\n# Assert pgoapi is installed.\ntry:\n import pgoapi\n from pgoapi import PGoApi, utilities as util\nexcept ImportError:\n log.critical(\n \"It seems `pgoapi` is not installed. Try running \" +\n \"pip install --upgrade -r requirements.txt.\")\n sys.exit(1)\n\n\n# Patch to make exceptions in threads cause an exception.\ndef install_thread_excepthook():\n \"\"\"\n Workaround for sys.excepthook thread bug\n (https://sourceforge.net/tracker/?func=detail&atid=105470&aid=1230540&group_id=5470).\n Call once from __main__ before creating any threads.\n If using psyco, call psycho.cannotcompile(threading.Thread.run)\n since this replaces a new-style class method.\n \"\"\"\n import sys\n run_old = Thread.run\n\n def run(*args, **kwargs):\n try:\n run_old(*args, **kwargs)\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n sys.excepthook(*sys.exc_info())\n Thread.run = run\n\n\n# Exception handler will log unhandled exceptions.\ndef handle_exception(exc_type, exc_value, exc_traceback):\n if issubclass(exc_type, KeyboardInterrupt):\n sys.__excepthook__(exc_type, exc_value, exc_traceback)\n return\n\n log.error(\"Uncaught exception\", exc_info=(\n exc_type, exc_value, exc_traceback))\n\n\ndef validate_assets(args):\n assets_error_log = (\n 'Missing front-end assets (static/dist) -- please run ' +\n '\"npm install && npm run build\" before starting the server.')\n\n root_path = os.path.dirname(__file__)\n if not os.path.exists(os.path.join(root_path, 'static/dist')):\n log.critical(assets_error_log)\n return False\n\n static_path = os.path.join(root_path, 'static/js')\n for file in os.listdir(static_path):\n if file.endswith(\".js\"):\n generated_path = os.path.join(static_path, '../dist/js/',\n file.replace(\".js\", \".min.js\"))\n source_path = os.path.join(static_path, file)\n if not os.path.exists(generated_path) or (\n os.path.getmtime(source_path) >\n os.path.getmtime(generated_path)):\n log.critical(assets_error_log)\n return False\n\n # You need custom image files now.\n if not os.path.isfile(\n os.path.join(root_path, 'static/icons-sprite.png')):\n log.critical(assets_error_log)\n return False\n\n # Check if custom.css is used otherwise fall back to default.\n if os.path.exists(os.path.join(root_path, 'static/css/custom.css')):\n args.custom_css = True\n log.info(\n 'File \\\"custom.css\\\" found, applying user-defined settings.')\n else:\n args.custom_css = False\n log.info('No file \\\"custom.css\\\" found, using default settings.')\n\n # Check if custom.js is used otherwise fall back to default.\n if os.path.exists(os.path.join(root_path, 'static/js/custom.js')):\n args.custom_js = True\n log.info(\n 'File \\\"custom.js\\\" found, applying user-defined settings.')\n else:\n args.custom_js = False\n log.info('No file \\\"custom.js\\\" found, using default settings.')\n\n return True\n\n\ndef can_start_scanning(args):\n # Currently supported pgoapi.\n pgoapi_version = \"1.2.0\"\n api_version_error = (\n 'The installed pgoapi is out of date. Please refer to ' +\n 'http://rocketmap.readthedocs.io/en/develop/common-issues/' +\n 'faq.html#i-get-an-error-about-pgooapi-version'\n )\n\n # Assert pgoapi >= pgoapi_version.\n if (not hasattr(pgoapi, \"__version__\") or\n StrictVersion(pgoapi.__version__) < StrictVersion(pgoapi_version)):\n log.critical(api_version_error)\n return False\n\n # Abort if we don't have a hash key set.\n if not args.hash_key:\n log.critical('Hash key is required for scanning. Exiting.')\n return False\n\n # Check the PoGo api pgoapi implements against what RM is expecting\n try:\n if PGoApi.get_api_version() != int(args.api_version.replace('.', '0')):\n log.critical(api_version_error)\n return False\n except AttributeError:\n log.critical(api_version_error)\n return False\n\n return True\n\n\ndef main():\n # Patch threading to make exceptions catchable.\n install_thread_excepthook()\n\n # Make sure exceptions get logged.\n sys.excepthook = handle_exception\n\n args = get_args()\n\n # Abort if status name is not alphanumeric.\n if not str(args.status_name).isalnum():\n log.critical('Status name must be alphanumeric.')\n sys.exit(1)\n\n set_log_and_verbosity(log)\n\n config['parse_pokemon'] = not args.no_pokemon\n config['parse_pokestops'] = not args.no_pokestops\n config['parse_gyms'] = not args.no_gyms\n config['parse_raids'] = not args.no_raids\n\n # Let's not forget to run Grunt / Only needed when running with webserver.\n if not args.no_server and not validate_assets(args):\n sys.exit(1)\n\n # Use lat/lng directly if matches such a pattern.\n prog = re.compile(\"^(\\-?\\d+\\.\\d+),?\\s?(\\-?\\d+\\.\\d+)$\")\n res = prog.match(args.location)\n if res:\n log.debug('Using coordinates from CLI directly')\n position = (float(res.group(1)), float(res.group(2)), 0)\n else:\n log.debug('Looking up coordinates in API')\n position = util.get_pos_by_name(args.location)\n\n if position is None or not any(position):\n log.error(\"Location not found: '{}'\".format(args.location))\n sys.exit()\n\n # Use the latitude and longitude to get the local altitude from Google.\n (altitude, status) = get_gmaps_altitude(position[0], position[1],\n args.gmaps_key)\n if altitude is not None:\n log.debug('Local altitude is: %sm', altitude)\n position = (position[0], position[1], altitude)\n else:\n if status == 'REQUEST_DENIED':\n log.error(\n 'Google API Elevation request was denied. You probably ' +\n 'forgot to enable elevation api in https://console.' +\n 'developers.google.com/apis/api/elevation_backend/')\n sys.exit()\n else:\n log.error('Unable to retrieve altitude from Google APIs' +\n 'setting to 0')\n\n log.info('Parsed location is: %.4f/%.4f/%.4f (lat/lng/alt)',\n position[0], position[1], position[2])\n\n if args.no_pokemon:\n log.info('Parsing of Pokemon disabled.')\n if args.no_pokestops:\n log.info('Parsing of Pokestops disabled.')\n if args.no_gyms:\n log.info('Parsing of Gyms disabled.')\n if args.encounter:\n log.info('Encountering pokemon enabled.')\n\n config['LOCALE'] = args.locale\n config['CHINA'] = args.china\n\n app = None\n if not args.no_server and not args.clear_db:\n app = Pogom(__name__)\n app.before_request(app.validate_request)\n app.set_current_location(position)\n\n db = init_database(app)\n if args.clear_db:\n log.info('Clearing database')\n if args.db_type == 'mysql':\n drop_tables(db)\n elif os.path.isfile(args.db):\n os.remove(args.db)\n\n verify_database_schema(db)\n\n create_tables(db)\n\n # fixing encoding on present and future tables\n verify_table_encoding(db)\n\n if args.clear_db:\n log.info(\n 'Drop and recreate is complete. Now remove -cd and restart.')\n sys.exit()\n\n # Control the search status (running or not) across threads.\n control_flags = {\n 'on_demand': Event(),\n 'api_watchdog': Event(),\n 'search_control': Event()\n }\n\n for flag in control_flags.values():\n flag.clear()\n\n if args.on_demand_timeout > 0:\n control_flags['on_demand'].set()\n\n heartbeat = [now()]\n\n # Setup the location tracking queue and push the first location on.\n new_location_queue = Queue()\n new_location_queue.put(position)\n\n # DB Updates\n db_updates_queue = Queue()\n\n # Thread(s) to process database updates.\n for i in range(args.db_threads):\n log.debug('Starting db-updater worker thread %d', i)\n t = Thread(target=db_updater, name='db-updater-{}'.format(i),\n args=(db_updates_queue, db))\n t.daemon = True\n t.start()\n\n # db cleaner; really only need one ever.\n if not args.disable_clean:\n t = Thread(target=clean_db_loop, name='db-cleaner', args=(args,))\n t.daemon = True\n t.start()\n\n # WH updates queue & WH unique key LFU caches.\n # The LFU caches will stop the server from resending the same data an\n # infinite number of times. The caches will be instantiated in the\n # webhook's startup code.\n wh_updates_queue = Queue()\n wh_key_cache = {}\n\n if len(args.wh_types) == 0:\n log.info('Webhook disabled.')\n else:\n log.info('Webhook enabled for events: sending %s to %s.',\n args.wh_types,\n args.webhooks)\n\n # Thread to process webhook updates.\n for i in range(args.wh_threads):\n log.debug('Starting wh-updater worker thread %d', i)\n t = Thread(target=wh_updater, name='wh-updater-{}'.format(i),\n args=(args, wh_updates_queue, wh_key_cache))\n t.daemon = True\n t.start()\n\n if not args.only_server:\n # Check if we are able to scan.\n if not can_start_scanning(args):\n sys.exit(1)\n\n # Processing proxies if set (load from file, check and overwrite old\n # args.proxy with new working list).\n args.proxy = load_proxies(args)\n\n if args.proxy and not args.proxy_skip_check:\n args.proxy = check_proxies(args, args.proxy)\n\n # Run periodical proxy refresh thread.\n if (args.proxy_file is not None) and (args.proxy_refresh > 0):\n t = Thread(target=proxies_refresher,\n name='proxy-refresh', args=(args,))\n t.daemon = True\n t.start()\n else:\n log.info('Periodical proxies refresh disabled.')\n\n # Update player locale if not set correctly, yet.\n args.player_locale = PlayerLocale.get_locale(args.location)\n if not args.player_locale:\n args.player_locale = gmaps_reverse_geolocate(\n args.gmaps_key,\n args.locale,\n str(position[0]) + ', ' + str(position[1]))\n db_player_locale = {\n 'location': args.location,\n 'country': args.player_locale['country'],\n 'language': args.player_locale['country'],\n 'timezone': args.player_locale['timezone'],\n }\n db_updates_queue.put((PlayerLocale, {0: db_player_locale}))\n else:\n log.debug(\n 'Existing player locale has been retrieved from the DB.')\n\n # Gather the Pokemon!\n\n # Attempt to dump the spawn points (do this before starting threads of\n # endure the woe).\n if (args.spawnpoint_scanning and\n args.spawnpoint_scanning != 'nofile' and\n args.dump_spawnpoints):\n with open(args.spawnpoint_scanning, 'w+') as file:\n log.info(\n 'Saving spawn points to %s', args.spawnpoint_scanning)\n spawns = SpawnPoint.get_spawnpoints_in_hex(\n position, args.step_limit)\n file.write(json.dumps(spawns))\n log.info('Finished exporting spawn points')\n\n argset = (args, new_location_queue, control_flags,\n heartbeat, db_updates_queue, wh_updates_queue)\n\n log.debug('Starting a %s search thread', args.scheduler)\n search_thread = Thread(target=search_overseer_thread,\n name='search-overseer', args=argset)\n search_thread.daemon = True\n search_thread.start()\n\n if args.no_server:\n # This loop allows for ctrl-c interupts to work since flask won't be\n # holding the program open.\n while search_thread.is_alive():\n time.sleep(60)\n else:\n config['ROOT_PATH'] = app.root_path\n config['GMAPS_KEY'] = args.gmaps_key\n\n if args.cors:\n CORS(app)\n\n # No more stale JS.\n init_cache_busting(app)\n\n app.set_search_control(control_flags['search_control'])\n app.set_heartbeat_control(heartbeat)\n app.set_location_queue(new_location_queue)\n ssl_context = None\n if (args.ssl_certificate and args.ssl_privatekey and\n os.path.exists(args.ssl_certificate) and\n os.path.exists(args.ssl_privatekey)):\n ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)\n ssl_context.load_cert_chain(\n args.ssl_certificate, args.ssl_privatekey)\n log.info('Web server in SSL mode.')\n if args.verbose:\n app.run(threaded=True, use_reloader=False, debug=True,\n host=args.host, port=args.port, ssl_context=ssl_context)\n else:\n app.run(threaded=True, use_reloader=False, debug=False,\n host=args.host, port=args.port, ssl_context=ssl_context)\n\n\ndef set_log_and_verbosity(log):\n # Always write to log file.\n args = get_args()\n if not args.no_file_logs:\n if not os.path.exists(args.log_path):\n os.mkdir(args.log_path)\n date = strftime('%Y%m%d_%H%M')\n filename = os.path.join(\n args.log_path, '{}_{}.log'.format(date, args.status_name))\n filelog = logging.FileHandler(filename)\n filelog.setFormatter(logging.Formatter(\n '%(asctime)s [%(threadName)18s][%(module)14s][%(levelname)8s] ' +\n '%(message)s'))\n log.addHandler(filelog)\n\n if args.verbose:\n log.setLevel(logging.DEBUG)\n else:\n log.setLevel(logging.INFO)\n\n # These are very noisy, let's shush them up a bit.\n logging.getLogger('peewee').setLevel(logging.INFO)\n logging.getLogger('requests').setLevel(logging.WARNING)\n logging.getLogger('pgoapi.pgoapi').setLevel(logging.WARNING)\n logging.getLogger('pgoapi.rpc_api').setLevel(logging.INFO)\n logging.getLogger('werkzeug').setLevel(logging.ERROR)\n\n # Turn these back up if debugging.\n if args.verbose == 2:\n logging.getLogger('pgoapi').setLevel(logging.DEBUG)\n logging.getLogger('pgoapi.pgoapi').setLevel(logging.DEBUG)\n logging.getLogger('requests').setLevel(logging.DEBUG)\n elif args.verbose >= 3:\n logging.getLogger('peewee').setLevel(logging.DEBUG)\n logging.getLogger('rpc_api').setLevel(logging.DEBUG)\n logging.getLogger('pgoapi.rpc_api').setLevel(logging.DEBUG)\n logging.getLogger('werkzeug').setLevel(logging.DEBUG)\n\n # Web access logs.\n if args.access_logs:\n logger = logging.getLogger('werkzeug')\n handler = logging.FileHandler('access.log')\n logger.setLevel(logging.INFO)\n logger.addHandler(handler)\n\n\nif __name__ == '__main__':\n main()\n", "path": "runserver.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport logging\nimport time\nimport re\nimport ssl\nimport json\n\nfrom distutils.version import StrictVersion\n\nfrom threading import Thread, Event\nfrom queue import Queue\nfrom flask_cors import CORS\nfrom flask_cache_bust import init_cache_busting\n\nfrom pogom import config\nfrom pogom.app import Pogom\nfrom pogom.utils import get_args, now, gmaps_reverse_geolocate\nfrom pogom.altitude import get_gmaps_altitude\n\nfrom pogom.models import (init_database, create_tables, drop_tables,\n PlayerLocale, SpawnPoint, db_updater, clean_db_loop,\n verify_table_encoding, verify_database_schema)\nfrom pogom.webhook import wh_updater\n\nfrom pogom.proxy import load_proxies, check_proxies, proxies_refresher\nfrom pogom.search import search_overseer_thread\nfrom time import strftime\n\n\nclass LogFilter(logging.Filter):\n\n def __init__(self, level):\n self.level = level\n\n def filter(self, record):\n return record.levelno < self.level\n\n\n# Moved here so logger is configured at load time.\nformatter = logging.Formatter(\n '%(asctime)s [%(threadName)18s][%(module)14s][%(levelname)8s] %(message)s')\n\n# Redirect messages lower than WARNING to stdout\nstdout_hdlr = logging.StreamHandler(sys.stdout)\nstdout_hdlr.setFormatter(formatter)\nlog_filter = LogFilter(logging.WARNING)\nstdout_hdlr.addFilter(log_filter)\nstdout_hdlr.setLevel(logging.DEBUG)\n\n# Redirect messages equal or higher than WARNING to stderr\nstderr_hdlr = logging.StreamHandler(sys.stderr)\nstderr_hdlr.setFormatter(formatter)\nstderr_hdlr.setLevel(logging.WARNING)\n\nlog = logging.getLogger()\nlog.addHandler(stdout_hdlr)\nlog.addHandler(stderr_hdlr)\n\n\n# Assert pgoapi is installed.\ntry:\n import pgoapi\n from pgoapi import PGoApi, utilities as util\nexcept ImportError:\n log.critical(\n \"It seems `pgoapi` is not installed. Try running \" +\n \"pip install --upgrade -r requirements.txt.\")\n sys.exit(1)\n\n\n# Patch to make exceptions in threads cause an exception.\ndef install_thread_excepthook():\n \"\"\"\n Workaround for sys.excepthook thread bug\n (https://sourceforge.net/tracker/?func=detail&atid=105470&aid=1230540&group_id=5470).\n Call once from __main__ before creating any threads.\n If using psyco, call psycho.cannotcompile(threading.Thread.run)\n since this replaces a new-style class method.\n \"\"\"\n import sys\n run_old = Thread.run\n\n def run(*args, **kwargs):\n try:\n run_old(*args, **kwargs)\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n sys.excepthook(*sys.exc_info())\n Thread.run = run\n\n\n# Exception handler will log unhandled exceptions.\ndef handle_exception(exc_type, exc_value, exc_traceback):\n if issubclass(exc_type, KeyboardInterrupt):\n sys.__excepthook__(exc_type, exc_value, exc_traceback)\n return\n\n log.error(\"Uncaught exception\", exc_info=(\n exc_type, exc_value, exc_traceback))\n\n\ndef validate_assets(args):\n assets_error_log = (\n 'Missing front-end assets (static/dist) -- please run ' +\n '\"npm install && npm run build\" before starting the server.')\n\n root_path = os.path.dirname(__file__)\n if not os.path.exists(os.path.join(root_path, 'static/dist')):\n log.critical(assets_error_log)\n return False\n\n static_path = os.path.join(root_path, 'static/js')\n for file in os.listdir(static_path):\n if file.endswith(\".js\"):\n generated_path = os.path.join(static_path, '../dist/js/',\n file.replace(\".js\", \".min.js\"))\n source_path = os.path.join(static_path, file)\n if not os.path.exists(generated_path) or (\n os.path.getmtime(source_path) >\n os.path.getmtime(generated_path)):\n log.critical(assets_error_log)\n return False\n\n # You need custom image files now.\n if not os.path.isfile(\n os.path.join(root_path, 'static/icons-sprite.png')):\n log.critical(assets_error_log)\n return False\n\n # Check if custom.css is used otherwise fall back to default.\n if os.path.exists(os.path.join(root_path, 'static/css/custom.css')):\n args.custom_css = True\n log.info(\n 'File \\\"custom.css\\\" found, applying user-defined settings.')\n else:\n args.custom_css = False\n log.info('No file \\\"custom.css\\\" found, using default settings.')\n\n # Check if custom.js is used otherwise fall back to default.\n if os.path.exists(os.path.join(root_path, 'static/js/custom.js')):\n args.custom_js = True\n log.info(\n 'File \\\"custom.js\\\" found, applying user-defined settings.')\n else:\n args.custom_js = False\n log.info('No file \\\"custom.js\\\" found, using default settings.')\n\n return True\n\n\ndef can_start_scanning(args):\n # Currently supported pgoapi.\n pgoapi_version = \"1.2.0\"\n api_version_error = (\n 'The installed pgoapi is out of date. Please refer to ' +\n 'http://rocketmap.readthedocs.io/en/develop/common-issues/' +\n 'faq.html#i-get-an-error-about-pgooapi-version'\n )\n\n # Assert pgoapi >= pgoapi_version.\n if (not hasattr(pgoapi, \"__version__\") or\n StrictVersion(pgoapi.__version__) < StrictVersion(pgoapi_version)):\n log.critical(api_version_error)\n return False\n\n # Abort if we don't have a hash key set.\n if not args.hash_key:\n log.critical('Hash key is required for scanning. Exiting.')\n return False\n\n # Check the PoGo api pgoapi implements against what RM is expecting\n try:\n if PGoApi.get_api_version() != int(args.api_version.replace('.', '0')):\n log.critical(api_version_error)\n return False\n except AttributeError:\n log.critical(api_version_error)\n return False\n\n return True\n\n\ndef main():\n # Patch threading to make exceptions catchable.\n install_thread_excepthook()\n\n # Make sure exceptions get logged.\n sys.excepthook = handle_exception\n\n args = get_args()\n\n # Abort if status name is not alphanumeric.\n if not str(args.status_name).isalnum():\n log.critical('Status name must be alphanumeric.')\n sys.exit(1)\n\n set_log_and_verbosity(log)\n\n config['parse_pokemon'] = not args.no_pokemon\n config['parse_pokestops'] = not args.no_pokestops\n config['parse_gyms'] = not args.no_gyms\n config['parse_raids'] = not args.no_raids\n\n # Let's not forget to run Grunt / Only needed when running with webserver.\n if not args.no_server and not validate_assets(args):\n sys.exit(1)\n\n # Use lat/lng directly if matches such a pattern.\n prog = re.compile(\"^(\\-?\\d+\\.\\d+),?\\s?(\\-?\\d+\\.\\d+)$\")\n res = prog.match(args.location)\n if res:\n log.debug('Using coordinates from CLI directly')\n position = (float(res.group(1)), float(res.group(2)), 0)\n else:\n log.debug('Looking up coordinates in API')\n position = util.get_pos_by_name(args.location)\n\n if position is None or not any(position):\n log.error(\"Location not found: '{}'\".format(args.location))\n sys.exit()\n\n # Use the latitude and longitude to get the local altitude from Google.\n (altitude, status) = get_gmaps_altitude(position[0], position[1],\n args.gmaps_key)\n if altitude is not None:\n log.debug('Local altitude is: %sm', altitude)\n position = (position[0], position[1], altitude)\n else:\n if status == 'REQUEST_DENIED':\n log.error(\n 'Google API Elevation request was denied. You probably ' +\n 'forgot to enable elevation api in https://console.' +\n 'developers.google.com/apis/api/elevation_backend/')\n sys.exit()\n else:\n log.error('Unable to retrieve altitude from Google APIs' +\n 'setting to 0')\n\n log.info('Parsed location is: %.4f/%.4f/%.4f (lat/lng/alt)',\n position[0], position[1], position[2])\n\n if args.no_pokemon:\n log.info('Parsing of Pokemon disabled.')\n if args.no_pokestops:\n log.info('Parsing of Pokestops disabled.')\n if args.no_gyms:\n log.info('Parsing of Gyms disabled.')\n if args.encounter:\n log.info('Encountering pokemon enabled.')\n\n config['LOCALE'] = args.locale\n config['CHINA'] = args.china\n\n app = None\n if not args.no_server and not args.clear_db:\n app = Pogom(__name__, root_path=os.path.dirname(__file__))\n app.before_request(app.validate_request)\n app.set_current_location(position)\n\n db = init_database(app)\n if args.clear_db:\n log.info('Clearing database')\n if args.db_type == 'mysql':\n drop_tables(db)\n elif os.path.isfile(args.db):\n os.remove(args.db)\n\n verify_database_schema(db)\n\n create_tables(db)\n\n # fixing encoding on present and future tables\n verify_table_encoding(db)\n\n if args.clear_db:\n log.info(\n 'Drop and recreate is complete. Now remove -cd and restart.')\n sys.exit()\n\n # Control the search status (running or not) across threads.\n control_flags = {\n 'on_demand': Event(),\n 'api_watchdog': Event(),\n 'search_control': Event()\n }\n\n for flag in control_flags.values():\n flag.clear()\n\n if args.on_demand_timeout > 0:\n control_flags['on_demand'].set()\n\n heartbeat = [now()]\n\n # Setup the location tracking queue and push the first location on.\n new_location_queue = Queue()\n new_location_queue.put(position)\n\n # DB Updates\n db_updates_queue = Queue()\n\n # Thread(s) to process database updates.\n for i in range(args.db_threads):\n log.debug('Starting db-updater worker thread %d', i)\n t = Thread(target=db_updater, name='db-updater-{}'.format(i),\n args=(db_updates_queue, db))\n t.daemon = True\n t.start()\n\n # db cleaner; really only need one ever.\n if not args.disable_clean:\n t = Thread(target=clean_db_loop, name='db-cleaner', args=(args,))\n t.daemon = True\n t.start()\n\n # WH updates queue & WH unique key LFU caches.\n # The LFU caches will stop the server from resending the same data an\n # infinite number of times. The caches will be instantiated in the\n # webhook's startup code.\n wh_updates_queue = Queue()\n wh_key_cache = {}\n\n if len(args.wh_types) == 0:\n log.info('Webhook disabled.')\n else:\n log.info('Webhook enabled for events: sending %s to %s.',\n args.wh_types,\n args.webhooks)\n\n # Thread to process webhook updates.\n for i in range(args.wh_threads):\n log.debug('Starting wh-updater worker thread %d', i)\n t = Thread(target=wh_updater, name='wh-updater-{}'.format(i),\n args=(args, wh_updates_queue, wh_key_cache))\n t.daemon = True\n t.start()\n\n if not args.only_server:\n # Check if we are able to scan.\n if not can_start_scanning(args):\n sys.exit(1)\n\n # Processing proxies if set (load from file, check and overwrite old\n # args.proxy with new working list).\n args.proxy = load_proxies(args)\n\n if args.proxy and not args.proxy_skip_check:\n args.proxy = check_proxies(args, args.proxy)\n\n # Run periodical proxy refresh thread.\n if (args.proxy_file is not None) and (args.proxy_refresh > 0):\n t = Thread(target=proxies_refresher,\n name='proxy-refresh', args=(args,))\n t.daemon = True\n t.start()\n else:\n log.info('Periodical proxies refresh disabled.')\n\n # Update player locale if not set correctly, yet.\n args.player_locale = PlayerLocale.get_locale(args.location)\n if not args.player_locale:\n args.player_locale = gmaps_reverse_geolocate(\n args.gmaps_key,\n args.locale,\n str(position[0]) + ', ' + str(position[1]))\n db_player_locale = {\n 'location': args.location,\n 'country': args.player_locale['country'],\n 'language': args.player_locale['country'],\n 'timezone': args.player_locale['timezone'],\n }\n db_updates_queue.put((PlayerLocale, {0: db_player_locale}))\n else:\n log.debug(\n 'Existing player locale has been retrieved from the DB.')\n\n # Gather the Pokemon!\n\n # Attempt to dump the spawn points (do this before starting threads of\n # endure the woe).\n if (args.spawnpoint_scanning and\n args.spawnpoint_scanning != 'nofile' and\n args.dump_spawnpoints):\n with open(args.spawnpoint_scanning, 'w+') as file:\n log.info(\n 'Saving spawn points to %s', args.spawnpoint_scanning)\n spawns = SpawnPoint.get_spawnpoints_in_hex(\n position, args.step_limit)\n file.write(json.dumps(spawns))\n log.info('Finished exporting spawn points')\n\n argset = (args, new_location_queue, control_flags,\n heartbeat, db_updates_queue, wh_updates_queue)\n\n log.debug('Starting a %s search thread', args.scheduler)\n search_thread = Thread(target=search_overseer_thread,\n name='search-overseer', args=argset)\n search_thread.daemon = True\n search_thread.start()\n\n if args.no_server:\n # This loop allows for ctrl-c interupts to work since flask won't be\n # holding the program open.\n while search_thread.is_alive():\n time.sleep(60)\n else:\n config['ROOT_PATH'] = app.root_path\n config['GMAPS_KEY'] = args.gmaps_key\n\n if args.cors:\n CORS(app)\n\n # No more stale JS.\n init_cache_busting(app)\n\n app.set_search_control(control_flags['search_control'])\n app.set_heartbeat_control(heartbeat)\n app.set_location_queue(new_location_queue)\n ssl_context = None\n if (args.ssl_certificate and args.ssl_privatekey and\n os.path.exists(args.ssl_certificate) and\n os.path.exists(args.ssl_privatekey)):\n ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)\n ssl_context.load_cert_chain(\n args.ssl_certificate, args.ssl_privatekey)\n log.info('Web server in SSL mode.')\n if args.verbose:\n app.run(threaded=True, use_reloader=False, debug=True,\n host=args.host, port=args.port, ssl_context=ssl_context)\n else:\n app.run(threaded=True, use_reloader=False, debug=False,\n host=args.host, port=args.port, ssl_context=ssl_context)\n\n\ndef set_log_and_verbosity(log):\n # Always write to log file.\n args = get_args()\n if not args.no_file_logs:\n if not os.path.exists(args.log_path):\n os.mkdir(args.log_path)\n date = strftime('%Y%m%d_%H%M')\n filename = os.path.join(\n args.log_path, '{}_{}.log'.format(date, args.status_name))\n filelog = logging.FileHandler(filename)\n filelog.setFormatter(logging.Formatter(\n '%(asctime)s [%(threadName)18s][%(module)14s][%(levelname)8s] ' +\n '%(message)s'))\n log.addHandler(filelog)\n\n if args.verbose:\n log.setLevel(logging.DEBUG)\n else:\n log.setLevel(logging.INFO)\n\n # These are very noisy, let's shush them up a bit.\n logging.getLogger('peewee').setLevel(logging.INFO)\n logging.getLogger('requests').setLevel(logging.WARNING)\n logging.getLogger('pgoapi.pgoapi').setLevel(logging.WARNING)\n logging.getLogger('pgoapi.rpc_api').setLevel(logging.INFO)\n logging.getLogger('werkzeug').setLevel(logging.ERROR)\n\n # Turn these back up if debugging.\n if args.verbose == 2:\n logging.getLogger('pgoapi').setLevel(logging.DEBUG)\n logging.getLogger('pgoapi.pgoapi').setLevel(logging.DEBUG)\n logging.getLogger('requests').setLevel(logging.DEBUG)\n elif args.verbose >= 3:\n logging.getLogger('peewee').setLevel(logging.DEBUG)\n logging.getLogger('rpc_api').setLevel(logging.DEBUG)\n logging.getLogger('pgoapi.rpc_api').setLevel(logging.DEBUG)\n logging.getLogger('werkzeug').setLevel(logging.DEBUG)\n\n # Web access logs.\n if args.access_logs:\n logger = logging.getLogger('werkzeug')\n handler = logging.FileHandler('access.log')\n logger.setLevel(logging.INFO)\n logger.addHandler(handler)\n\n\nif __name__ == '__main__':\n main()\n", "path": "runserver.py" } ]
diff --git a/runserver.py b/runserver.py index fffd4be055..9d90e747ee 100755 --- a/runserver.py +++ b/runserver.py @@ -258,7 +258,7 @@ def main(): app = None if not args.no_server and not args.clear_db: - app = Pogom(__name__) + app = Pogom(__name__, root_path=os.path.dirname(__file__)) app.before_request(app.validate_request) app.set_current_location(position)
biopython__biopython-4399
682i.cif fails on parse of resolution to structure header ### Setup I am reporting a problem with Biopython 1.77, Python 3.8.3 (anaconda July 2020) , and CentOS 7 as follows: ```(base) [mothcw@localhost biopython]$ python Python 3.8.3 (default, Jul 2 2020, 16:21:59) [GCC 7.3.0] :: Anaconda, Inc. on linux Type "help", "copyright", "credits" or "license" for more information. >>> import sys; print(sys.version) 3.8.3 (default, Jul 2 2020, 16:21:59) [GCC 7.3.0] >>> import platform; print(platform.python_implementation()); print(platform.platform()) CPython Linux-3.10.0-1127.18.2.el7.x86_64-x86_64-with-glibc2.10 >>> import Bio; print(Bio.__version__) 1.77 ``` ### Expected behaviour 682i.cif would load, with no halting exception, with the code: ``` from Bio.PDB import MMCIFParser mmCIF_parser = MMCIFParser(QUIET=True) structure_cif = mmCIF_parser.get_structure('6g2i','6g2i.cif') ``` ### Actual behaviour Parsing fails with: Traceback (most recent call last): File "./mmcif_fail.py", line 8, in <module> structure_cif = mmCIF_parser.get_structure('6g2i','6g2i.cif') File "/home/mothcw/anaconda3/lib/python3.8/site-packages/Bio/PDB/MMCIFParser.py", line 63, in get_structure self._structure_builder.set_header(self._get_header()) File "/home/mothcw/anaconda3/lib/python3.8/site-packages/Bio/PDB/MMCIFParser.py", line 112, in _get_header self.header["resolution"] = float(self.header["resolution"]) ValueError: could not convert string to float: '.' ### Steps to reproduce 1) Download (and gunzip) 6g2i.cif from rcsb. It is a 32MB CryoEM structure. 2) Run this code: ``` $ cat mmcif_fail.py #!/usr/bin/env python """Short program to demonstrate that 6g2i.cif fails to load with biopython 1.77""" # Parse in the small 6g2i.cif file downloaded from rcsb from Bio.PDB import MMCIFParser mmCIF_parser = MMCIFParser(QUIET=True) structure_cif = mmCIF_parser.get_structure('6g2i','6g2i.cif') print("Resolution is %s"%structure_cif.header['resolution']) ``` Note the failure message. ### Towards a fix There are a couple of problems afoot here. First, line 113 or so of MMCIFParser.py is not considering _em_3d_reconstruction.resolution to find the 5.8A resolution. I have added that as a 3rd item in the search keys - and it helps. ``` self._update_header_entry( "resolution", ["_refine.ls_d_res_high", "_refine_hist.d_res_high", "_em_3d_reconstruction.resolution"] ) ``` HOWEVER, in 6g2i.cif itself there is an empty '.' entry for _**refine.ls_d_res_high**. _refine.ls_abs_structure_Rogers_esd ? _refine.ls_d_res_high **.** _refine.ls_d_res_low ? This period to right of d_res_high could be a bug in the file itself - but I am not totally sure of the regular expression for this field defined at RCSB. For me right now, I have modifed MMCIFParser.py to treat both lone ? and lone period as missing for purpose of filling in the header. The bug finally went away when I changed line 84 from: if item != "?": to if item != "?" **and item != ".":** To my eye this is redundant with the _unassigned idea of line 332. I recommend that, if this is the same kind of thing, that you centralize the notion of unassigned so we are not patching it multiple places. May I kindly/friendlysuggest that, at minumum, Biopython should be tested against all .cif files available at rcsb/wwwpdb prior to each release? This is not such a painful thing to do. I have to contend that these formally curated and deposited files should not trigger runtime exceptions like this. Idea: Perhaps more importantly for longer term, this could be yet another case where biopython is stuck in a legacy .PDB-oriented worldview. Perhaps we need to stop initializing 'header' when working with MMCIF. For MMCIF files, the parsing of the .cif into the dictionary should be the central/main event, and not have a side-effect of initializing a legacy 'header' (that attempts to recreate PDB REMARK entries). Once the dictionary is parsed, conversion to a Structure for those who want model/chain/residue/atom traversal might be interesting. But, resolution should come from the parsed in dictionary - and not be set by code trying to replicate the old REMARK RESOLUTION of the PDB.
[ { "content": "# Copyright (C) 2002, Thomas Hamelryck ([email protected])\n# This code is part of the Biopython distribution and governed by its\n# license. Please see the LICENSE file that should have been included\n# as part of this package.\n\n\"\"\"mmCIF parsers.\"\"\"\n\n\nimport numpy as np\nimport warnings\n\nfrom Bio.File import as_handle\n\nfrom Bio.PDB.MMCIF2Dict import MMCIF2Dict\nfrom Bio.PDB.StructureBuilder import StructureBuilder\nfrom Bio.PDB.PDBExceptions import PDBConstructionException\nfrom Bio.PDB.PDBExceptions import PDBConstructionWarning\n\n\nclass MMCIFParser:\n \"\"\"Parse a mmCIF file and return a Structure object.\"\"\"\n\n def __init__(\n self, structure_builder=None, auth_chains=True, auth_residues=True, QUIET=False\n ):\n \"\"\"Create a PDBParser object.\n\n The mmCIF parser calls a number of standard methods in an aggregated\n StructureBuilder object. Normally this object is instantiated by the\n MMCIParser object itself, but if the user provides his/her own\n StructureBuilder object, the latter is used instead.\n\n Arguments:\n - structure_builder - an optional user implemented StructureBuilder class.\n - auth_chains - True by default. If true, use the author chain IDs.\n If false, use the re-assigned mmCIF chain IDs.\n - auth_residues - True by default. If true, use the author residue numbering.\n If false, use the mmCIF \"label\" residue numbering, which has no insertion\n codes, and strictly increments residue numbers.\n NOTE: Non-polymers such as water don't have a \"label\" residue number,\n and will be skipped.\n\n - QUIET - Evaluated as a Boolean. If true, warnings issued in constructing\n the SMCRA data will be suppressed. If false (DEFAULT), they will be shown.\n These warnings might be indicative of problems in the mmCIF file!\n\n \"\"\"\n if structure_builder is not None:\n self._structure_builder = structure_builder\n else:\n self._structure_builder = StructureBuilder()\n self.header = None\n # self.trailer = None\n self.line_counter = 0\n self.build_structure = None\n self.auth_chains = bool(auth_chains)\n self.auth_residues = bool(auth_residues)\n self.QUIET = bool(QUIET)\n\n # Public methods\n\n def get_structure(self, structure_id, filename):\n \"\"\"Return the structure.\n\n Arguments:\n - structure_id - string, the id that will be used for the structure\n - filename - name of mmCIF file, OR an open text mode file handle\n\n \"\"\"\n with warnings.catch_warnings():\n if self.QUIET:\n warnings.filterwarnings(\"ignore\", category=PDBConstructionWarning)\n self._mmcif_dict = MMCIF2Dict(filename)\n self._build_structure(structure_id)\n self._structure_builder.set_header(self._get_header())\n\n return self._structure_builder.get_structure()\n\n # Private methods\n\n def _mmcif_get(self, key, dict, deflt):\n if key in dict:\n rslt = dict[key][0]\n if \"?\" != rslt:\n return rslt\n return deflt\n\n def _update_header_entry(self, target_key, keys):\n md = self._mmcif_dict\n for key in keys:\n val = md.get(key)\n try:\n item = val[0]\n except (TypeError, IndexError):\n continue\n if item != \"?\":\n self.header[target_key] = item\n break\n\n def _get_header(self):\n self.header = {\n \"name\": \"\",\n \"head\": \"\",\n \"idcode\": \"\",\n \"deposition_date\": \"\",\n \"structure_method\": \"\",\n \"resolution\": None,\n }\n\n self._update_header_entry(\n \"idcode\", [\"_entry_id\", \"_exptl.entry_id\", \"_struct.entry_id\"]\n )\n self._update_header_entry(\"name\", [\"_struct.title\"])\n self._update_header_entry(\n \"head\", [\"_struct_keywords.pdbx_keywords\", \"_struct_keywords.text\"]\n )\n self._update_header_entry(\n \"deposition_date\", [\"_pdbx_database_status.recvd_initial_deposition_date\"]\n )\n self._update_header_entry(\"structure_method\", [\"_exptl.method\"])\n self._update_header_entry(\n \"resolution\",\n [\n \"_refine.ls_d_res_high\",\n \"_refine_hist.d_res_high\",\n \"_em_3d_reconstruction.resolution\",\n ],\n )\n if self.header[\"resolution\"] is not None:\n try:\n self.header[\"resolution\"] = float(self.header[\"resolution\"])\n except ValueError:\n self.header[\"resolution\"] = None\n\n return self.header\n\n def _build_structure(self, structure_id):\n # two special chars as placeholders in the mmCIF format\n # for item values that cannot be explicitly assigned\n # see: pdbx/mmcif syntax web page\n _unassigned = {\".\", \"?\"}\n\n mmcif_dict = self._mmcif_dict\n\n atom_serial_list = mmcif_dict[\"_atom_site.id\"]\n atom_id_list = mmcif_dict[\"_atom_site.label_atom_id\"]\n residue_id_list = mmcif_dict[\"_atom_site.label_comp_id\"]\n try:\n element_list = mmcif_dict[\"_atom_site.type_symbol\"]\n except KeyError:\n element_list = None\n if self.auth_chains:\n chain_id_list = mmcif_dict[\"_atom_site.auth_asym_id\"]\n else:\n chain_id_list = mmcif_dict[\"_atom_site.label_asym_id\"]\n x_list = [float(x) for x in mmcif_dict[\"_atom_site.Cartn_x\"]]\n y_list = [float(x) for x in mmcif_dict[\"_atom_site.Cartn_y\"]]\n z_list = [float(x) for x in mmcif_dict[\"_atom_site.Cartn_z\"]]\n alt_list = mmcif_dict[\"_atom_site.label_alt_id\"]\n icode_list = mmcif_dict[\"_atom_site.pdbx_PDB_ins_code\"]\n b_factor_list = mmcif_dict[\"_atom_site.B_iso_or_equiv\"]\n occupancy_list = mmcif_dict[\"_atom_site.occupancy\"]\n fieldname_list = mmcif_dict[\"_atom_site.group_PDB\"]\n try:\n serial_list = [int(n) for n in mmcif_dict[\"_atom_site.pdbx_PDB_model_num\"]]\n except KeyError:\n # No model number column\n serial_list = None\n except ValueError:\n # Invalid model number (malformed file)\n raise PDBConstructionException(\"Invalid model number\") from None\n try:\n aniso_u11 = mmcif_dict[\"_atom_site_anisotrop.U[1][1]\"]\n aniso_u12 = mmcif_dict[\"_atom_site_anisotrop.U[1][2]\"]\n aniso_u13 = mmcif_dict[\"_atom_site_anisotrop.U[1][3]\"]\n aniso_u22 = mmcif_dict[\"_atom_site_anisotrop.U[2][2]\"]\n aniso_u23 = mmcif_dict[\"_atom_site_anisotrop.U[2][3]\"]\n aniso_u33 = mmcif_dict[\"_atom_site_anisotrop.U[3][3]\"]\n aniso_flag = 1\n except KeyError:\n # no anisotropic B factors\n aniso_flag = 0\n\n if self.auth_residues:\n # if auth_seq_id is present, we use this.\n # Otherwise label_seq_id is used.\n if \"_atom_site.auth_seq_id\" in mmcif_dict:\n seq_id_list = mmcif_dict[\"_atom_site.auth_seq_id\"]\n else:\n seq_id_list = mmcif_dict[\"_atom_site.label_seq_id\"]\n else:\n seq_id_list = mmcif_dict[\"_atom_site.label_seq_id\"]\n # Now loop over atoms and build the structure\n current_chain_id = None\n current_residue_id = None\n current_resname = None\n structure_builder = self._structure_builder\n structure_builder.init_structure(structure_id)\n structure_builder.init_seg(\" \")\n # Historically, Biopython PDB parser uses model_id to mean array index\n # so serial_id means the Model ID specified in the file\n current_model_id = -1\n current_serial_id = -1\n for i in range(len(atom_id_list)):\n # set the line_counter for 'ATOM' lines only and not\n # as a global line counter found in the PDBParser()\n structure_builder.set_line_counter(i)\n\n # Try coercing serial to int, for compatibility with PDBParser\n # But do not quit if it fails. mmCIF format specs allow strings.\n try:\n serial = int(atom_serial_list[i])\n except ValueError:\n serial = atom_serial_list[i]\n warnings.warn(\n \"PDBConstructionWarning: Some atom serial numbers are not numerical\",\n PDBConstructionWarning,\n )\n\n x = x_list[i]\n y = y_list[i]\n z = z_list[i]\n resname = residue_id_list[i]\n chainid = chain_id_list[i]\n altloc = alt_list[i]\n if altloc in _unassigned:\n altloc = \" \"\n resseq = seq_id_list[i]\n if resseq == \".\":\n # Non-existing residue ID\n try:\n msg_resseq = mmcif_dict[\"_atom_site.auth_seq_id\"][i]\n msg = \"Non-existing residue ID in chain '{}', residue '{}'\".format(\n chainid, msg_resseq\n )\n except (KeyError, IndexError):\n msg = f\"Non-existing residue ID in chain '{chainid}'\"\n warnings.warn(\n \"PDBConstructionWarning: \" + msg,\n PDBConstructionWarning,\n )\n continue\n int_resseq = int(resseq)\n icode = icode_list[i]\n if icode in _unassigned:\n icode = \" \"\n name = atom_id_list[i]\n # occupancy & B factor\n try:\n tempfactor = float(b_factor_list[i])\n except ValueError:\n raise PDBConstructionException(\"Invalid or missing B factor\") from None\n try:\n occupancy = float(occupancy_list[i])\n except ValueError:\n raise PDBConstructionException(\"Invalid or missing occupancy\") from None\n fieldname = fieldname_list[i]\n if fieldname == \"HETATM\":\n if resname == \"HOH\" or resname == \"WAT\":\n hetatm_flag = \"W\"\n else:\n hetatm_flag = \"H\"\n else:\n hetatm_flag = \" \"\n\n resseq = (hetatm_flag, int_resseq, icode)\n\n if serial_list is not None:\n # model column exists; use it\n serial_id = serial_list[i]\n if current_serial_id != serial_id:\n # if serial changes, update it and start new model\n current_serial_id = serial_id\n current_model_id += 1\n structure_builder.init_model(current_model_id, current_serial_id)\n current_chain_id = None\n current_residue_id = None\n current_resname = None\n else:\n # no explicit model column; initialize single model\n structure_builder.init_model(current_model_id)\n\n if current_chain_id != chainid:\n current_chain_id = chainid\n structure_builder.init_chain(current_chain_id)\n current_residue_id = None\n current_resname = None\n\n if current_residue_id != resseq or current_resname != resname:\n current_residue_id = resseq\n current_resname = resname\n structure_builder.init_residue(resname, hetatm_flag, int_resseq, icode)\n\n coord = np.array((x, y, z), \"f\")\n element = element_list[i].upper() if element_list else None\n structure_builder.init_atom(\n name,\n coord,\n tempfactor,\n occupancy,\n altloc,\n name,\n serial_number=serial,\n element=element,\n )\n if aniso_flag == 1 and i < len(aniso_u11):\n u = (\n aniso_u11[i],\n aniso_u12[i],\n aniso_u13[i],\n aniso_u22[i],\n aniso_u23[i],\n aniso_u33[i],\n )\n mapped_anisou = [float(_) for _ in u]\n anisou_array = np.array(mapped_anisou, \"f\")\n structure_builder.set_anisou(anisou_array)\n # Now try to set the cell\n try:\n a = float(mmcif_dict[\"_cell.length_a\"][0])\n b = float(mmcif_dict[\"_cell.length_b\"][0])\n c = float(mmcif_dict[\"_cell.length_c\"][0])\n alpha = float(mmcif_dict[\"_cell.angle_alpha\"][0])\n beta = float(mmcif_dict[\"_cell.angle_beta\"][0])\n gamma = float(mmcif_dict[\"_cell.angle_gamma\"][0])\n cell = np.array((a, b, c, alpha, beta, gamma), \"f\")\n spacegroup = mmcif_dict[\"_symmetry.space_group_name_H-M\"][0]\n spacegroup = spacegroup[1:-1] # get rid of quotes!!\n if spacegroup is None:\n raise Exception\n structure_builder.set_symmetry(spacegroup, cell)\n except Exception:\n pass # no cell found, so just ignore\n\n\nclass FastMMCIFParser:\n \"\"\"Parse an MMCIF file and return a Structure object.\"\"\"\n\n def __init__(\n self, structure_builder=None, auth_chains=True, auth_residues=True, QUIET=False\n ):\n \"\"\"Create a FastMMCIFParser object.\n\n The mmCIF parser calls a number of standard methods in an aggregated\n StructureBuilder object. Normally this object is instantiated by the\n parser object itself, but if the user provides his/her own\n StructureBuilder object, the latter is used instead.\n\n The main difference between this class and the regular MMCIFParser is\n that only 'ATOM' and 'HETATM' lines are parsed here. Use if you are\n interested only in coordinate information.\n\n Arguments:\n - structure_builder - an optional user implemented StructureBuilder class.\n - auth_chains - True by default. If true, use the author chain IDs.\n If false, use the re-assigned mmCIF chain IDs.\n - auth_residues - True by default. If true, use the author residue numbering.\n If false, use the mmCIF \"label\" residue numbering, which has no insertion\n codes, and strictly increments residue numbers.\n NOTE: Non-polymers such as water don't have a \"label\" residue number,\n and will be skipped.\n\n - QUIET - Evaluated as a Boolean. If true, warnings issued in constructing\n the SMCRA data will be suppressed. If false (DEFAULT), they will be shown.\n These warnings might be indicative of problems in the mmCIF file!\n\n \"\"\"\n if structure_builder is not None:\n self._structure_builder = structure_builder\n else:\n self._structure_builder = StructureBuilder()\n\n self.line_counter = 0\n self.build_structure = None\n self.auth_chains = bool(auth_chains)\n self.auth_residues = bool(auth_residues)\n self.QUIET = bool(QUIET)\n\n # Public methods\n\n def get_structure(self, structure_id, filename):\n \"\"\"Return the structure.\n\n Arguments:\n - structure_id - string, the id that will be used for the structure\n - filename - name of the mmCIF file OR an open filehandle\n\n \"\"\"\n with warnings.catch_warnings():\n if self.QUIET:\n warnings.filterwarnings(\"ignore\", category=PDBConstructionWarning)\n with as_handle(filename) as handle:\n self._build_structure(structure_id, handle)\n\n return self._structure_builder.get_structure()\n\n # Private methods\n\n def _build_structure(self, structure_id, filehandle):\n # two special chars as placeholders in the mmCIF format\n # for item values that cannot be explicitly assigned\n # see: pdbx/mmcif syntax web page\n _unassigned = {\".\", \"?\"}\n\n # Read only _atom_site. and atom_site_anisotrop entries\n read_atom, read_aniso = False, False\n _fields, _records = [], []\n _anisof, _anisors = [], []\n for line in filehandle:\n if line.startswith(\"_atom_site.\"):\n read_atom = True\n _fields.append(line.strip())\n elif line.startswith(\"_atom_site_anisotrop.\"):\n read_aniso = True\n _anisof.append(line.strip())\n elif read_atom and line.startswith(\"#\"):\n read_atom = False\n elif read_aniso and line.startswith(\"#\"):\n read_aniso = False\n elif read_atom:\n _records.append(line.strip())\n elif read_aniso:\n _anisors.append(line.strip())\n\n # Dumping the shlex module here since this particular\n # category should be rather straightforward.\n # Quite a performance boost..\n _record_tbl = zip(*map(str.split, _records))\n _anisob_tbl = zip(*map(str.split, _anisors))\n\n mmcif_dict = dict(zip(_fields, _record_tbl))\n mmcif_dict.update(dict(zip(_anisof, _anisob_tbl)))\n\n # Build structure object\n atom_serial_list = mmcif_dict[\"_atom_site.id\"]\n atom_id_list = mmcif_dict[\"_atom_site.label_atom_id\"]\n residue_id_list = mmcif_dict[\"_atom_site.label_comp_id\"]\n\n try:\n element_list = mmcif_dict[\"_atom_site.type_symbol\"]\n except KeyError:\n element_list = None\n\n if self.auth_chains:\n chain_id_list = mmcif_dict[\"_atom_site.auth_asym_id\"]\n else:\n chain_id_list = mmcif_dict[\"_atom_site.label_asym_id\"]\n\n x_list = [float(x) for x in mmcif_dict[\"_atom_site.Cartn_x\"]]\n y_list = [float(x) for x in mmcif_dict[\"_atom_site.Cartn_y\"]]\n z_list = [float(x) for x in mmcif_dict[\"_atom_site.Cartn_z\"]]\n alt_list = mmcif_dict[\"_atom_site.label_alt_id\"]\n icode_list = mmcif_dict[\"_atom_site.pdbx_PDB_ins_code\"]\n b_factor_list = mmcif_dict[\"_atom_site.B_iso_or_equiv\"]\n occupancy_list = mmcif_dict[\"_atom_site.occupancy\"]\n fieldname_list = mmcif_dict[\"_atom_site.group_PDB\"]\n\n try:\n serial_list = [int(n) for n in mmcif_dict[\"_atom_site.pdbx_PDB_model_num\"]]\n except KeyError:\n # No model number column\n serial_list = None\n except ValueError:\n # Invalid model number (malformed file)\n raise PDBConstructionException(\"Invalid model number\") from None\n\n try:\n aniso_u11 = mmcif_dict[\"_atom_site_anisotrop.U[1][1]\"]\n aniso_u12 = mmcif_dict[\"_atom_site_anisotrop.U[1][2]\"]\n aniso_u13 = mmcif_dict[\"_atom_site_anisotrop.U[1][3]\"]\n aniso_u22 = mmcif_dict[\"_atom_site_anisotrop.U[2][2]\"]\n aniso_u23 = mmcif_dict[\"_atom_site_anisotrop.U[2][3]\"]\n aniso_u33 = mmcif_dict[\"_atom_site_anisotrop.U[3][3]\"]\n aniso_flag = 1\n except KeyError:\n # no anisotropic B factors\n aniso_flag = 0\n\n if self.auth_residues:\n # if auth_seq_id is present, we use this.\n # Otherwise label_seq_id is used.\n if \"_atom_site.auth_seq_id\" in mmcif_dict:\n seq_id_list = mmcif_dict[\"_atom_site.auth_seq_id\"]\n else:\n seq_id_list = mmcif_dict[\"_atom_site.label_seq_id\"]\n else:\n seq_id_list = mmcif_dict[\"_atom_site.label_seq_id\"]\n\n # Now loop over atoms and build the structure\n current_chain_id = None\n current_residue_id = None\n current_resname = None\n structure_builder = self._structure_builder\n structure_builder.init_structure(structure_id)\n structure_builder.init_seg(\" \")\n\n # Historically, Biopython PDB parser uses model_id to mean array index\n # so serial_id means the Model ID specified in the file\n current_model_id = -1\n current_serial_id = -1\n for i in range(len(atom_id_list)):\n # set the line_counter for 'ATOM' lines only and not\n # as a global line counter found in the PDBParser()\n structure_builder.set_line_counter(i)\n\n serial = atom_serial_list[i]\n\n x = x_list[i]\n y = y_list[i]\n z = z_list[i]\n resname = residue_id_list[i]\n chainid = chain_id_list[i]\n altloc = alt_list[i]\n if altloc in _unassigned:\n altloc = \" \"\n resseq = seq_id_list[i]\n if resseq == \".\":\n # Non-existing residue ID\n try:\n msg_resseq = mmcif_dict[\"_atom_site.auth_seq_id\"][i]\n msg = \"Non-existing residue ID in chain '{}', residue '{}'\".format(\n chainid, msg_resseq\n )\n except (KeyError, IndexError):\n msg = f\"Non-existing residue ID in chain '{chainid}'\"\n warnings.warn(\n \"PDBConstructionWarning: \" + msg,\n PDBConstructionWarning,\n )\n continue\n int_resseq = int(resseq)\n icode = icode_list[i]\n if icode in _unassigned:\n icode = \" \"\n # Remove occasional \" from quoted atom names (e.g. xNA)\n name = atom_id_list[i].strip('\"')\n\n # occupancy & B factor\n try:\n tempfactor = float(b_factor_list[i])\n except ValueError:\n raise PDBConstructionException(\"Invalid or missing B factor\") from None\n\n try:\n occupancy = float(occupancy_list[i])\n except ValueError:\n raise PDBConstructionException(\"Invalid or missing occupancy\") from None\n\n fieldname = fieldname_list[i]\n if fieldname == \"HETATM\":\n hetatm_flag = \"H\"\n else:\n hetatm_flag = \" \"\n\n resseq = (hetatm_flag, int_resseq, icode)\n\n if serial_list is not None:\n # model column exists; use it\n serial_id = serial_list[i]\n if current_serial_id != serial_id:\n # if serial changes, update it and start new model\n current_serial_id = serial_id\n current_model_id += 1\n structure_builder.init_model(current_model_id, current_serial_id)\n current_chain_id = None\n current_residue_id = None\n current_resname = None\n else:\n # no explicit model column; initialize single model\n structure_builder.init_model(current_model_id)\n\n if current_chain_id != chainid:\n current_chain_id = chainid\n structure_builder.init_chain(current_chain_id)\n current_residue_id = None\n current_resname = None\n\n if current_residue_id != resseq or current_resname != resname:\n current_residue_id = resseq\n current_resname = resname\n structure_builder.init_residue(resname, hetatm_flag, int_resseq, icode)\n\n coord = np.array((x, y, z), \"f\")\n element = element_list[i] if element_list else None\n structure_builder.init_atom(\n name,\n coord,\n tempfactor,\n occupancy,\n altloc,\n name,\n serial_number=serial,\n element=element,\n )\n if aniso_flag == 1 and i < len(aniso_u11):\n u = (\n aniso_u11[i],\n aniso_u12[i],\n aniso_u13[i],\n aniso_u22[i],\n aniso_u23[i],\n aniso_u33[i],\n )\n mapped_anisou = [float(_) for _ in u]\n anisou_array = np.array(mapped_anisou, \"f\")\n structure_builder.set_anisou(anisou_array)\n", "path": "Bio/PDB/MMCIFParser.py" } ]
[ { "content": "# Copyright (C) 2002, Thomas Hamelryck ([email protected])\n# This code is part of the Biopython distribution and governed by its\n# license. Please see the LICENSE file that should have been included\n# as part of this package.\n\n\"\"\"mmCIF parsers.\"\"\"\n\n\nimport numpy as np\nimport warnings\n\nfrom Bio.File import as_handle\n\nfrom Bio.PDB.MMCIF2Dict import MMCIF2Dict\nfrom Bio.PDB.StructureBuilder import StructureBuilder\nfrom Bio.PDB.PDBExceptions import PDBConstructionException\nfrom Bio.PDB.PDBExceptions import PDBConstructionWarning\n\n\nclass MMCIFParser:\n \"\"\"Parse a mmCIF file and return a Structure object.\"\"\"\n\n def __init__(\n self, structure_builder=None, auth_chains=True, auth_residues=True, QUIET=False\n ):\n \"\"\"Create a PDBParser object.\n\n The mmCIF parser calls a number of standard methods in an aggregated\n StructureBuilder object. Normally this object is instantiated by the\n MMCIParser object itself, but if the user provides his/her own\n StructureBuilder object, the latter is used instead.\n\n Arguments:\n - structure_builder - an optional user implemented StructureBuilder class.\n - auth_chains - True by default. If true, use the author chain IDs.\n If false, use the re-assigned mmCIF chain IDs.\n - auth_residues - True by default. If true, use the author residue numbering.\n If false, use the mmCIF \"label\" residue numbering, which has no insertion\n codes, and strictly increments residue numbers.\n NOTE: Non-polymers such as water don't have a \"label\" residue number,\n and will be skipped.\n\n - QUIET - Evaluated as a Boolean. If true, warnings issued in constructing\n the SMCRA data will be suppressed. If false (DEFAULT), they will be shown.\n These warnings might be indicative of problems in the mmCIF file!\n\n \"\"\"\n if structure_builder is not None:\n self._structure_builder = structure_builder\n else:\n self._structure_builder = StructureBuilder()\n self.header = None\n # self.trailer = None\n self.line_counter = 0\n self.build_structure = None\n self.auth_chains = bool(auth_chains)\n self.auth_residues = bool(auth_residues)\n self.QUIET = bool(QUIET)\n\n # Public methods\n\n def get_structure(self, structure_id, filename):\n \"\"\"Return the structure.\n\n Arguments:\n - structure_id - string, the id that will be used for the structure\n - filename - name of mmCIF file, OR an open text mode file handle\n\n \"\"\"\n with warnings.catch_warnings():\n if self.QUIET:\n warnings.filterwarnings(\"ignore\", category=PDBConstructionWarning)\n self._mmcif_dict = MMCIF2Dict(filename)\n self._build_structure(structure_id)\n self._structure_builder.set_header(self._get_header())\n\n return self._structure_builder.get_structure()\n\n # Private methods\n\n def _mmcif_get(self, key, dict, deflt):\n if key in dict:\n rslt = dict[key][0]\n if \"?\" != rslt:\n return rslt\n return deflt\n\n def _update_header_entry(self, target_key, keys):\n md = self._mmcif_dict\n for key in keys:\n val = md.get(key)\n try:\n item = val[0]\n except (TypeError, IndexError):\n continue\n if item != \"?\" and item != \".\":\n self.header[target_key] = item\n break\n\n def _get_header(self):\n self.header = {\n \"name\": \"\",\n \"head\": \"\",\n \"idcode\": \"\",\n \"deposition_date\": \"\",\n \"structure_method\": \"\",\n \"resolution\": None,\n }\n\n self._update_header_entry(\n \"idcode\", [\"_entry_id\", \"_exptl.entry_id\", \"_struct.entry_id\"]\n )\n self._update_header_entry(\"name\", [\"_struct.title\"])\n self._update_header_entry(\n \"head\", [\"_struct_keywords.pdbx_keywords\", \"_struct_keywords.text\"]\n )\n self._update_header_entry(\n \"deposition_date\", [\"_pdbx_database_status.recvd_initial_deposition_date\"]\n )\n self._update_header_entry(\"structure_method\", [\"_exptl.method\"])\n self._update_header_entry(\n \"resolution\",\n [\n \"_refine.ls_d_res_high\",\n \"_refine_hist.d_res_high\",\n \"_em_3d_reconstruction.resolution\",\n ],\n )\n if self.header[\"resolution\"] is not None:\n try:\n self.header[\"resolution\"] = float(self.header[\"resolution\"])\n except ValueError:\n self.header[\"resolution\"] = None\n\n return self.header\n\n def _build_structure(self, structure_id):\n # two special chars as placeholders in the mmCIF format\n # for item values that cannot be explicitly assigned\n # see: pdbx/mmcif syntax web page\n _unassigned = {\".\", \"?\"}\n\n mmcif_dict = self._mmcif_dict\n\n atom_serial_list = mmcif_dict[\"_atom_site.id\"]\n atom_id_list = mmcif_dict[\"_atom_site.label_atom_id\"]\n residue_id_list = mmcif_dict[\"_atom_site.label_comp_id\"]\n try:\n element_list = mmcif_dict[\"_atom_site.type_symbol\"]\n except KeyError:\n element_list = None\n if self.auth_chains:\n chain_id_list = mmcif_dict[\"_atom_site.auth_asym_id\"]\n else:\n chain_id_list = mmcif_dict[\"_atom_site.label_asym_id\"]\n x_list = [float(x) for x in mmcif_dict[\"_atom_site.Cartn_x\"]]\n y_list = [float(x) for x in mmcif_dict[\"_atom_site.Cartn_y\"]]\n z_list = [float(x) for x in mmcif_dict[\"_atom_site.Cartn_z\"]]\n alt_list = mmcif_dict[\"_atom_site.label_alt_id\"]\n icode_list = mmcif_dict[\"_atom_site.pdbx_PDB_ins_code\"]\n b_factor_list = mmcif_dict[\"_atom_site.B_iso_or_equiv\"]\n occupancy_list = mmcif_dict[\"_atom_site.occupancy\"]\n fieldname_list = mmcif_dict[\"_atom_site.group_PDB\"]\n try:\n serial_list = [int(n) for n in mmcif_dict[\"_atom_site.pdbx_PDB_model_num\"]]\n except KeyError:\n # No model number column\n serial_list = None\n except ValueError:\n # Invalid model number (malformed file)\n raise PDBConstructionException(\"Invalid model number\") from None\n try:\n aniso_u11 = mmcif_dict[\"_atom_site_anisotrop.U[1][1]\"]\n aniso_u12 = mmcif_dict[\"_atom_site_anisotrop.U[1][2]\"]\n aniso_u13 = mmcif_dict[\"_atom_site_anisotrop.U[1][3]\"]\n aniso_u22 = mmcif_dict[\"_atom_site_anisotrop.U[2][2]\"]\n aniso_u23 = mmcif_dict[\"_atom_site_anisotrop.U[2][3]\"]\n aniso_u33 = mmcif_dict[\"_atom_site_anisotrop.U[3][3]\"]\n aniso_flag = 1\n except KeyError:\n # no anisotropic B factors\n aniso_flag = 0\n\n if self.auth_residues:\n # if auth_seq_id is present, we use this.\n # Otherwise label_seq_id is used.\n if \"_atom_site.auth_seq_id\" in mmcif_dict:\n seq_id_list = mmcif_dict[\"_atom_site.auth_seq_id\"]\n else:\n seq_id_list = mmcif_dict[\"_atom_site.label_seq_id\"]\n else:\n seq_id_list = mmcif_dict[\"_atom_site.label_seq_id\"]\n # Now loop over atoms and build the structure\n current_chain_id = None\n current_residue_id = None\n current_resname = None\n structure_builder = self._structure_builder\n structure_builder.init_structure(structure_id)\n structure_builder.init_seg(\" \")\n # Historically, Biopython PDB parser uses model_id to mean array index\n # so serial_id means the Model ID specified in the file\n current_model_id = -1\n current_serial_id = -1\n for i in range(len(atom_id_list)):\n # set the line_counter for 'ATOM' lines only and not\n # as a global line counter found in the PDBParser()\n structure_builder.set_line_counter(i)\n\n # Try coercing serial to int, for compatibility with PDBParser\n # But do not quit if it fails. mmCIF format specs allow strings.\n try:\n serial = int(atom_serial_list[i])\n except ValueError:\n serial = atom_serial_list[i]\n warnings.warn(\n \"PDBConstructionWarning: Some atom serial numbers are not numerical\",\n PDBConstructionWarning,\n )\n\n x = x_list[i]\n y = y_list[i]\n z = z_list[i]\n resname = residue_id_list[i]\n chainid = chain_id_list[i]\n altloc = alt_list[i]\n if altloc in _unassigned:\n altloc = \" \"\n resseq = seq_id_list[i]\n if resseq == \".\":\n # Non-existing residue ID\n try:\n msg_resseq = mmcif_dict[\"_atom_site.auth_seq_id\"][i]\n msg = \"Non-existing residue ID in chain '{}', residue '{}'\".format(\n chainid, msg_resseq\n )\n except (KeyError, IndexError):\n msg = f\"Non-existing residue ID in chain '{chainid}'\"\n warnings.warn(\n \"PDBConstructionWarning: \" + msg,\n PDBConstructionWarning,\n )\n continue\n int_resseq = int(resseq)\n icode = icode_list[i]\n if icode in _unassigned:\n icode = \" \"\n name = atom_id_list[i]\n # occupancy & B factor\n try:\n tempfactor = float(b_factor_list[i])\n except ValueError:\n raise PDBConstructionException(\"Invalid or missing B factor\") from None\n try:\n occupancy = float(occupancy_list[i])\n except ValueError:\n raise PDBConstructionException(\"Invalid or missing occupancy\") from None\n fieldname = fieldname_list[i]\n if fieldname == \"HETATM\":\n if resname == \"HOH\" or resname == \"WAT\":\n hetatm_flag = \"W\"\n else:\n hetatm_flag = \"H\"\n else:\n hetatm_flag = \" \"\n\n resseq = (hetatm_flag, int_resseq, icode)\n\n if serial_list is not None:\n # model column exists; use it\n serial_id = serial_list[i]\n if current_serial_id != serial_id:\n # if serial changes, update it and start new model\n current_serial_id = serial_id\n current_model_id += 1\n structure_builder.init_model(current_model_id, current_serial_id)\n current_chain_id = None\n current_residue_id = None\n current_resname = None\n else:\n # no explicit model column; initialize single model\n structure_builder.init_model(current_model_id)\n\n if current_chain_id != chainid:\n current_chain_id = chainid\n structure_builder.init_chain(current_chain_id)\n current_residue_id = None\n current_resname = None\n\n if current_residue_id != resseq or current_resname != resname:\n current_residue_id = resseq\n current_resname = resname\n structure_builder.init_residue(resname, hetatm_flag, int_resseq, icode)\n\n coord = np.array((x, y, z), \"f\")\n element = element_list[i].upper() if element_list else None\n structure_builder.init_atom(\n name,\n coord,\n tempfactor,\n occupancy,\n altloc,\n name,\n serial_number=serial,\n element=element,\n )\n if aniso_flag == 1 and i < len(aniso_u11):\n u = (\n aniso_u11[i],\n aniso_u12[i],\n aniso_u13[i],\n aniso_u22[i],\n aniso_u23[i],\n aniso_u33[i],\n )\n mapped_anisou = [float(_) for _ in u]\n anisou_array = np.array(mapped_anisou, \"f\")\n structure_builder.set_anisou(anisou_array)\n # Now try to set the cell\n try:\n a = float(mmcif_dict[\"_cell.length_a\"][0])\n b = float(mmcif_dict[\"_cell.length_b\"][0])\n c = float(mmcif_dict[\"_cell.length_c\"][0])\n alpha = float(mmcif_dict[\"_cell.angle_alpha\"][0])\n beta = float(mmcif_dict[\"_cell.angle_beta\"][0])\n gamma = float(mmcif_dict[\"_cell.angle_gamma\"][0])\n cell = np.array((a, b, c, alpha, beta, gamma), \"f\")\n spacegroup = mmcif_dict[\"_symmetry.space_group_name_H-M\"][0]\n spacegroup = spacegroup[1:-1] # get rid of quotes!!\n if spacegroup is None:\n raise Exception\n structure_builder.set_symmetry(spacegroup, cell)\n except Exception:\n pass # no cell found, so just ignore\n\n\nclass FastMMCIFParser:\n \"\"\"Parse an MMCIF file and return a Structure object.\"\"\"\n\n def __init__(\n self, structure_builder=None, auth_chains=True, auth_residues=True, QUIET=False\n ):\n \"\"\"Create a FastMMCIFParser object.\n\n The mmCIF parser calls a number of standard methods in an aggregated\n StructureBuilder object. Normally this object is instantiated by the\n parser object itself, but if the user provides his/her own\n StructureBuilder object, the latter is used instead.\n\n The main difference between this class and the regular MMCIFParser is\n that only 'ATOM' and 'HETATM' lines are parsed here. Use if you are\n interested only in coordinate information.\n\n Arguments:\n - structure_builder - an optional user implemented StructureBuilder class.\n - auth_chains - True by default. If true, use the author chain IDs.\n If false, use the re-assigned mmCIF chain IDs.\n - auth_residues - True by default. If true, use the author residue numbering.\n If false, use the mmCIF \"label\" residue numbering, which has no insertion\n codes, and strictly increments residue numbers.\n NOTE: Non-polymers such as water don't have a \"label\" residue number,\n and will be skipped.\n\n - QUIET - Evaluated as a Boolean. If true, warnings issued in constructing\n the SMCRA data will be suppressed. If false (DEFAULT), they will be shown.\n These warnings might be indicative of problems in the mmCIF file!\n\n \"\"\"\n if structure_builder is not None:\n self._structure_builder = structure_builder\n else:\n self._structure_builder = StructureBuilder()\n\n self.line_counter = 0\n self.build_structure = None\n self.auth_chains = bool(auth_chains)\n self.auth_residues = bool(auth_residues)\n self.QUIET = bool(QUIET)\n\n # Public methods\n\n def get_structure(self, structure_id, filename):\n \"\"\"Return the structure.\n\n Arguments:\n - structure_id - string, the id that will be used for the structure\n - filename - name of the mmCIF file OR an open filehandle\n\n \"\"\"\n with warnings.catch_warnings():\n if self.QUIET:\n warnings.filterwarnings(\"ignore\", category=PDBConstructionWarning)\n with as_handle(filename) as handle:\n self._build_structure(structure_id, handle)\n\n return self._structure_builder.get_structure()\n\n # Private methods\n\n def _build_structure(self, structure_id, filehandle):\n # two special chars as placeholders in the mmCIF format\n # for item values that cannot be explicitly assigned\n # see: pdbx/mmcif syntax web page\n _unassigned = {\".\", \"?\"}\n\n # Read only _atom_site. and atom_site_anisotrop entries\n read_atom, read_aniso = False, False\n _fields, _records = [], []\n _anisof, _anisors = [], []\n for line in filehandle:\n if line.startswith(\"_atom_site.\"):\n read_atom = True\n _fields.append(line.strip())\n elif line.startswith(\"_atom_site_anisotrop.\"):\n read_aniso = True\n _anisof.append(line.strip())\n elif read_atom and line.startswith(\"#\"):\n read_atom = False\n elif read_aniso and line.startswith(\"#\"):\n read_aniso = False\n elif read_atom:\n _records.append(line.strip())\n elif read_aniso:\n _anisors.append(line.strip())\n\n # Dumping the shlex module here since this particular\n # category should be rather straightforward.\n # Quite a performance boost..\n _record_tbl = zip(*map(str.split, _records))\n _anisob_tbl = zip(*map(str.split, _anisors))\n\n mmcif_dict = dict(zip(_fields, _record_tbl))\n mmcif_dict.update(dict(zip(_anisof, _anisob_tbl)))\n\n # Build structure object\n atom_serial_list = mmcif_dict[\"_atom_site.id\"]\n atom_id_list = mmcif_dict[\"_atom_site.label_atom_id\"]\n residue_id_list = mmcif_dict[\"_atom_site.label_comp_id\"]\n\n try:\n element_list = mmcif_dict[\"_atom_site.type_symbol\"]\n except KeyError:\n element_list = None\n\n if self.auth_chains:\n chain_id_list = mmcif_dict[\"_atom_site.auth_asym_id\"]\n else:\n chain_id_list = mmcif_dict[\"_atom_site.label_asym_id\"]\n\n x_list = [float(x) for x in mmcif_dict[\"_atom_site.Cartn_x\"]]\n y_list = [float(x) for x in mmcif_dict[\"_atom_site.Cartn_y\"]]\n z_list = [float(x) for x in mmcif_dict[\"_atom_site.Cartn_z\"]]\n alt_list = mmcif_dict[\"_atom_site.label_alt_id\"]\n icode_list = mmcif_dict[\"_atom_site.pdbx_PDB_ins_code\"]\n b_factor_list = mmcif_dict[\"_atom_site.B_iso_or_equiv\"]\n occupancy_list = mmcif_dict[\"_atom_site.occupancy\"]\n fieldname_list = mmcif_dict[\"_atom_site.group_PDB\"]\n\n try:\n serial_list = [int(n) for n in mmcif_dict[\"_atom_site.pdbx_PDB_model_num\"]]\n except KeyError:\n # No model number column\n serial_list = None\n except ValueError:\n # Invalid model number (malformed file)\n raise PDBConstructionException(\"Invalid model number\") from None\n\n try:\n aniso_u11 = mmcif_dict[\"_atom_site_anisotrop.U[1][1]\"]\n aniso_u12 = mmcif_dict[\"_atom_site_anisotrop.U[1][2]\"]\n aniso_u13 = mmcif_dict[\"_atom_site_anisotrop.U[1][3]\"]\n aniso_u22 = mmcif_dict[\"_atom_site_anisotrop.U[2][2]\"]\n aniso_u23 = mmcif_dict[\"_atom_site_anisotrop.U[2][3]\"]\n aniso_u33 = mmcif_dict[\"_atom_site_anisotrop.U[3][3]\"]\n aniso_flag = 1\n except KeyError:\n # no anisotropic B factors\n aniso_flag = 0\n\n if self.auth_residues:\n # if auth_seq_id is present, we use this.\n # Otherwise label_seq_id is used.\n if \"_atom_site.auth_seq_id\" in mmcif_dict:\n seq_id_list = mmcif_dict[\"_atom_site.auth_seq_id\"]\n else:\n seq_id_list = mmcif_dict[\"_atom_site.label_seq_id\"]\n else:\n seq_id_list = mmcif_dict[\"_atom_site.label_seq_id\"]\n\n # Now loop over atoms and build the structure\n current_chain_id = None\n current_residue_id = None\n current_resname = None\n structure_builder = self._structure_builder\n structure_builder.init_structure(structure_id)\n structure_builder.init_seg(\" \")\n\n # Historically, Biopython PDB parser uses model_id to mean array index\n # so serial_id means the Model ID specified in the file\n current_model_id = -1\n current_serial_id = -1\n for i in range(len(atom_id_list)):\n # set the line_counter for 'ATOM' lines only and not\n # as a global line counter found in the PDBParser()\n structure_builder.set_line_counter(i)\n\n serial = atom_serial_list[i]\n\n x = x_list[i]\n y = y_list[i]\n z = z_list[i]\n resname = residue_id_list[i]\n chainid = chain_id_list[i]\n altloc = alt_list[i]\n if altloc in _unassigned:\n altloc = \" \"\n resseq = seq_id_list[i]\n if resseq == \".\":\n # Non-existing residue ID\n try:\n msg_resseq = mmcif_dict[\"_atom_site.auth_seq_id\"][i]\n msg = \"Non-existing residue ID in chain '{}', residue '{}'\".format(\n chainid, msg_resseq\n )\n except (KeyError, IndexError):\n msg = f\"Non-existing residue ID in chain '{chainid}'\"\n warnings.warn(\n \"PDBConstructionWarning: \" + msg,\n PDBConstructionWarning,\n )\n continue\n int_resseq = int(resseq)\n icode = icode_list[i]\n if icode in _unassigned:\n icode = \" \"\n # Remove occasional \" from quoted atom names (e.g. xNA)\n name = atom_id_list[i].strip('\"')\n\n # occupancy & B factor\n try:\n tempfactor = float(b_factor_list[i])\n except ValueError:\n raise PDBConstructionException(\"Invalid or missing B factor\") from None\n\n try:\n occupancy = float(occupancy_list[i])\n except ValueError:\n raise PDBConstructionException(\"Invalid or missing occupancy\") from None\n\n fieldname = fieldname_list[i]\n if fieldname == \"HETATM\":\n hetatm_flag = \"H\"\n else:\n hetatm_flag = \" \"\n\n resseq = (hetatm_flag, int_resseq, icode)\n\n if serial_list is not None:\n # model column exists; use it\n serial_id = serial_list[i]\n if current_serial_id != serial_id:\n # if serial changes, update it and start new model\n current_serial_id = serial_id\n current_model_id += 1\n structure_builder.init_model(current_model_id, current_serial_id)\n current_chain_id = None\n current_residue_id = None\n current_resname = None\n else:\n # no explicit model column; initialize single model\n structure_builder.init_model(current_model_id)\n\n if current_chain_id != chainid:\n current_chain_id = chainid\n structure_builder.init_chain(current_chain_id)\n current_residue_id = None\n current_resname = None\n\n if current_residue_id != resseq or current_resname != resname:\n current_residue_id = resseq\n current_resname = resname\n structure_builder.init_residue(resname, hetatm_flag, int_resseq, icode)\n\n coord = np.array((x, y, z), \"f\")\n element = element_list[i] if element_list else None\n structure_builder.init_atom(\n name,\n coord,\n tempfactor,\n occupancy,\n altloc,\n name,\n serial_number=serial,\n element=element,\n )\n if aniso_flag == 1 and i < len(aniso_u11):\n u = (\n aniso_u11[i],\n aniso_u12[i],\n aniso_u13[i],\n aniso_u22[i],\n aniso_u23[i],\n aniso_u33[i],\n )\n mapped_anisou = [float(_) for _ in u]\n anisou_array = np.array(mapped_anisou, \"f\")\n structure_builder.set_anisou(anisou_array)\n", "path": "Bio/PDB/MMCIFParser.py" } ]
diff --git a/Bio/PDB/MMCIFParser.py b/Bio/PDB/MMCIFParser.py index c98388119fb..2cab625057a 100644 --- a/Bio/PDB/MMCIFParser.py +++ b/Bio/PDB/MMCIFParser.py @@ -93,7 +93,7 @@ def _update_header_entry(self, target_key, keys): item = val[0] except (TypeError, IndexError): continue - if item != "?": + if item != "?" and item != ".": self.header[target_key] = item break diff --git a/NEWS.rst b/NEWS.rst index 78e8fe851da..05b2b836fad 100644 --- a/NEWS.rst +++ b/NEWS.rst @@ -57,6 +57,8 @@ methods matching those added to strings in Python 3.9. They also now have a ``search`` method to ``Seq`` and ``MutableSeq`` object to search for multiple subsequences at the same time. +The MMCIFParser now ignores '.' header values. + Calling ``set_angle()`` on a residue dihedral angle previously set only the specified angle, now the default behavior is to update overlapping angles as well. For example, setting Psi (N-CA-CN) now updates the diff --git a/Tests/PDB/1SSU_mod.cif b/Tests/PDB/1SSU_mod.cif index c2f82e1a211..f6c7b86766a 100644 --- a/Tests/PDB/1SSU_mod.cif +++ b/Tests/PDB/1SSU_mod.cif @@ -2,6 +2,7 @@ data_1SSU # _refine.ls_d_res_high . _refine.ls_d_res_low ? +_em_3d_reconstruction.resolution 4.1 # loop_ _atom_site.group_PDB diff --git a/Tests/test_PDB_MMCIFParser.py b/Tests/test_PDB_MMCIFParser.py index 417262aa768..a1d5f6ee4d8 100644 --- a/Tests/test_PDB_MMCIFParser.py +++ b/Tests/test_PDB_MMCIFParser.py @@ -349,9 +349,10 @@ def test_header(self): self.assertEqual("X-RAY DIFFRACTION", structure.header["structure_method"]) self.assertEqual(1.7, structure.header["resolution"]) - # test not confused by '.' + # test not confused by '.' or '?' structure = parser.get_structure("example", "PDB/1SSU_mod.cif") - self.assertIsNone(structure.header["resolution"]) + # self.assertIsNone(structure.header["resolution"]) + self.assertEqual(4.1, structure.header["resolution"]) class CIFtoPDB(unittest.TestCase):
huggingface__transformers-13493
Handling tag with no prefix for aggregation_strategy in TokenClassificationPipeline # 🚀 Feature request Previously the parameter grouped_entities would handle entity with no prefix (like "PER" instead of "B-PER") and would correctly group similar entities next to each others. With the new parameter aggregation_strategy, this is not the case anymore. ## Motivation In some simple models, the prefix add some complexity that is not always required. Because of this we are forced to add a prefix to make aggregation works even if not required by the model. ## Your contribution
[ { "content": "import warnings\nfrom typing import TYPE_CHECKING, List, Optional, Tuple, Union\n\nimport numpy as np\n\nfrom ..file_utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available\nfrom ..modelcard import ModelCard\nfrom ..models.bert.tokenization_bert import BasicTokenizer\nfrom ..tokenization_utils import PreTrainedTokenizer\nfrom .base import PIPELINE_INIT_ARGS, ArgumentHandler, Pipeline\n\n\nif TYPE_CHECKING:\n from ..modeling_tf_utils import TFPreTrainedModel\n from ..modeling_utils import PreTrainedModel\n\nif is_tf_available():\n\n from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING\n\nif is_torch_available():\n import torch\n\n from ..models.auto.modeling_auto import MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING\n\n\nclass TokenClassificationArgumentHandler(ArgumentHandler):\n \"\"\"\n Handles arguments for token classification.\n \"\"\"\n\n def __call__(self, inputs: Union[str, List[str]], **kwargs):\n\n if inputs is not None and isinstance(inputs, (list, tuple)) and len(inputs) > 0:\n inputs = list(inputs)\n batch_size = len(inputs)\n elif isinstance(inputs, str):\n inputs = [inputs]\n batch_size = 1\n else:\n raise ValueError(\"At least one input is required.\")\n\n offset_mapping = kwargs.get(\"offset_mapping\")\n if offset_mapping:\n if isinstance(offset_mapping, list) and isinstance(offset_mapping[0], tuple):\n offset_mapping = [offset_mapping]\n if len(offset_mapping) != batch_size:\n raise ValueError(\"offset_mapping should have the same batch size as the input\")\n return inputs, offset_mapping\n\n\nclass AggregationStrategy(ExplicitEnum):\n \"\"\"All the valid aggregation strategies for TokenClassificationPipeline\"\"\"\n\n NONE = \"none\"\n SIMPLE = \"simple\"\n FIRST = \"first\"\n AVERAGE = \"average\"\n MAX = \"max\"\n\n\n@add_end_docstrings(\n PIPELINE_INIT_ARGS,\n r\"\"\"\n ignore_labels (:obj:`List[str]`, defaults to :obj:`[\"O\"]`):\n A list of labels to ignore.\n grouped_entities (:obj:`bool`, `optional`, defaults to :obj:`False`):\n DEPRECATED, use :obj:`aggregation_strategy` instead. Whether or not to group the tokens corresponding to\n the same entity together in the predictions or not.\n aggregation_strategy (:obj:`str`, `optional`, defaults to :obj:`\"none\"`): The strategy to fuse (or not) tokens based on the model prediction.\n\n - \"none\" : Will simply not do any aggregation and simply return raw results from the model\n - \"simple\" : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C,\n I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{\"word\": ABC, \"entity\": \"TAG\"}, {\"word\": \"D\",\n \"entity\": \"TAG2\"}, {\"word\": \"E\", \"entity\": \"TAG2\"}] Notice that two consecutive B tags will end up as\n different entities. On word based languages, we might end up splitting words undesirably : Imagine\n Microsoft being tagged as [{\"word\": \"Micro\", \"entity\": \"ENTERPRISE\"}, {\"word\": \"soft\", \"entity\":\n \"NAME\"}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages\n that support that meaning, which is basically tokens separated by a space). These mitigations will\n only work on real words, \"New york\" might still be tagged with two different entities.\n - \"first\" : (works only on word based models) Will use the :obj:`SIMPLE` strategy except that words,\n cannot end up with different tags. Words will simply use the tag of the first token of the word when\n there is ambiguity.\n - \"average\" : (works only on word based models) Will use the :obj:`SIMPLE` strategy except that words,\n cannot end up with different tags. scores will be averaged first across tokens, and then the maximum\n label is applied.\n - \"max\" : (works only on word based models) Will use the :obj:`SIMPLE` strategy except that words,\n cannot end up with different tags. Word entity will simply be the token with the maximum score.\n \"\"\",\n)\nclass TokenClassificationPipeline(Pipeline):\n \"\"\"\n Named Entity Recognition pipeline using any :obj:`ModelForTokenClassification`. See the `named entity recognition\n examples <../task_summary.html#named-entity-recognition>`__ for more information.\n\n This token recognition pipeline can currently be loaded from :func:`~transformers.pipeline` using the following\n task identifier: :obj:`\"ner\"` (for predicting the classes of tokens in a sequence: person, organisation, location\n or miscellaneous).\n\n The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the\n up-to-date list of available models on `huggingface.co/models\n <https://huggingface.co/models?filter=token-classification>`__.\n \"\"\"\n\n default_input_names = \"sequences\"\n\n def __init__(\n self,\n model: Union[\"PreTrainedModel\", \"TFPreTrainedModel\"],\n tokenizer: PreTrainedTokenizer,\n modelcard: Optional[ModelCard] = None,\n framework: Optional[str] = None,\n args_parser: ArgumentHandler = TokenClassificationArgumentHandler(),\n device: int = -1,\n binary_output: bool = False,\n ignore_labels=[\"O\"],\n task: str = \"\",\n grouped_entities: Optional[bool] = None,\n ignore_subwords: Optional[bool] = None,\n aggregation_strategy: Optional[AggregationStrategy] = None,\n ):\n super().__init__(\n model=model,\n tokenizer=tokenizer,\n modelcard=modelcard,\n framework=framework,\n device=device,\n binary_output=binary_output,\n task=task,\n )\n\n self.check_model_type(\n TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING\n if self.framework == \"tf\"\n else MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING\n )\n\n self._basic_tokenizer = BasicTokenizer(do_lower_case=False)\n self._args_parser = args_parser\n self.ignore_labels = ignore_labels\n\n if aggregation_strategy is None:\n aggregation_strategy = AggregationStrategy.NONE\n if grouped_entities is not None or ignore_subwords is not None:\n\n if grouped_entities and ignore_subwords:\n aggregation_strategy = AggregationStrategy.FIRST\n elif grouped_entities and not ignore_subwords:\n aggregation_strategy = AggregationStrategy.SIMPLE\n else:\n aggregation_strategy = AggregationStrategy.NONE\n\n if grouped_entities is not None:\n warnings.warn(\n f'`grouped_entities` is deprecated and will be removed in version v5.0.0, defaulted to `aggregation_strategy=\"{aggregation_strategy}\"` instead.'\n )\n if ignore_subwords is not None:\n warnings.warn(\n f'`ignore_subwords` is deprecated and will be removed in version v5.0.0, defaulted to `aggregation_strategy=\"{aggregation_strategy}\"` instead.'\n )\n if isinstance(aggregation_strategy, str):\n aggregation_strategy = AggregationStrategy[aggregation_strategy.upper()]\n\n if (\n aggregation_strategy in {AggregationStrategy.FIRST, AggregationStrategy.MAX, AggregationStrategy.AVERAGE}\n and not self.tokenizer.is_fast\n ):\n raise ValueError(\n \"Slow tokenizers cannot handle subwords. Please set the `aggregation_strategy` option\"\n 'to `\"simple\"` or use a fast tokenizer.'\n )\n\n self.aggregation_strategy = aggregation_strategy\n\n def __call__(self, inputs: Union[str, List[str]], **kwargs):\n \"\"\"\n Classify each token of the text(s) given as inputs.\n\n Args:\n inputs (:obj:`str` or :obj:`List[str]`):\n One or several texts (or one list of texts) for token classification.\n\n Return:\n A list or a list of list of :obj:`dict`: Each result comes as a list of dictionaries (one for each token in\n the corresponding input, or each entity if this pipeline was instantiated with an aggregation_strategy)\n with the following keys:\n\n - **word** (:obj:`str`) -- The token/word classified.\n - **score** (:obj:`float`) -- The corresponding probability for :obj:`entity`.\n - **entity** (:obj:`str`) -- The entity predicted for that token/word (it is named `entity_group` when\n `aggregation_strategy` is not :obj:`\"none\"`.\n - **index** (:obj:`int`, only present when ``aggregation_strategy=\"none\"``) -- The index of the\n corresponding token in the sentence.\n - **start** (:obj:`int`, `optional`) -- The index of the start of the corresponding entity in the sentence.\n Only exists if the offsets are available within the tokenizer\n - **end** (:obj:`int`, `optional`) -- The index of the end of the corresponding entity in the sentence.\n Only exists if the offsets are available within the tokenizer\n \"\"\"\n\n _inputs, offset_mappings = self._args_parser(inputs, **kwargs)\n\n answers = []\n\n for i, sentence in enumerate(_inputs):\n\n # Manage correct placement of the tensors\n with self.device_placement():\n\n tokens = self.tokenizer(\n sentence,\n return_attention_mask=False,\n return_tensors=self.framework,\n truncation=True,\n return_special_tokens_mask=True,\n return_offsets_mapping=self.tokenizer.is_fast,\n )\n if self.tokenizer.is_fast:\n offset_mapping = tokens.pop(\"offset_mapping\").cpu().numpy()[0]\n elif offset_mappings:\n offset_mapping = offset_mappings[i]\n else:\n offset_mapping = None\n\n special_tokens_mask = tokens.pop(\"special_tokens_mask\").cpu().numpy()[0]\n\n # Forward\n if self.framework == \"tf\":\n entities = self.model(tokens.data)[0][0].numpy()\n input_ids = tokens[\"input_ids\"].numpy()[0]\n else:\n with torch.no_grad():\n tokens = self.ensure_tensor_on_device(**tokens)\n entities = self.model(**tokens)[0][0].cpu().numpy()\n input_ids = tokens[\"input_ids\"].cpu().numpy()[0]\n\n scores = np.exp(entities) / np.exp(entities).sum(-1, keepdims=True)\n pre_entities = self.gather_pre_entities(sentence, input_ids, scores, offset_mapping, special_tokens_mask)\n grouped_entities = self.aggregate(pre_entities, self.aggregation_strategy)\n # Filter anything that is in self.ignore_labels\n entities = [\n entity\n for entity in grouped_entities\n if entity.get(\"entity\", None) not in self.ignore_labels\n and entity.get(\"entity_group\", None) not in self.ignore_labels\n ]\n answers.append(entities)\n\n if len(answers) == 1:\n return answers[0]\n return answers\n\n def gather_pre_entities(\n self,\n sentence: str,\n input_ids: np.ndarray,\n scores: np.ndarray,\n offset_mapping: Optional[List[Tuple[int, int]]],\n special_tokens_mask: np.ndarray,\n ) -> List[dict]:\n \"\"\"Fuse various numpy arrays into dicts with all the information needed for aggregation\"\"\"\n pre_entities = []\n for idx, token_scores in enumerate(scores):\n # Filter special_tokens, they should only occur\n # at the sentence boundaries since we're not encoding pairs of\n # sentences so we don't have to keep track of those.\n if special_tokens_mask[idx]:\n continue\n\n word = self.tokenizer.convert_ids_to_tokens(int(input_ids[idx]))\n if offset_mapping is not None:\n start_ind, end_ind = offset_mapping[idx]\n word_ref = sentence[start_ind:end_ind]\n if getattr(self.tokenizer._tokenizer.model, \"continuing_subword_prefix\", None):\n # This is a BPE, word aware tokenizer, there is a correct way\n # to fuse tokens\n is_subword = len(word) != len(word_ref)\n else:\n # This is a fallback heuristic. This will fail most likely on any kind of text + punctuation mixtures that will be considered \"words\". Non word aware models cannot do better than this unfortunately.\n if self.aggregation_strategy in {\n AggregationStrategy.FIRST,\n AggregationStrategy.AVERAGE,\n AggregationStrategy.MAX,\n }:\n warnings.warn(\"Tokenizer does not support real words, using fallback heuristic\", UserWarning)\n is_subword = sentence[start_ind - 1 : start_ind] != \" \" if start_ind > 0 else False\n\n if int(input_ids[idx]) == self.tokenizer.unk_token_id:\n word = word_ref\n is_subword = False\n else:\n start_ind = None\n end_ind = None\n is_subword = False\n\n pre_entity = {\n \"word\": word,\n \"scores\": token_scores,\n \"start\": start_ind,\n \"end\": end_ind,\n \"index\": idx,\n \"is_subword\": is_subword,\n }\n pre_entities.append(pre_entity)\n return pre_entities\n\n def aggregate(self, pre_entities: List[dict], aggregation_strategy: AggregationStrategy) -> List[dict]:\n if aggregation_strategy in {AggregationStrategy.NONE, AggregationStrategy.SIMPLE}:\n entities = []\n for pre_entity in pre_entities:\n entity_idx = pre_entity[\"scores\"].argmax()\n score = pre_entity[\"scores\"][entity_idx]\n entity = {\n \"entity\": self.model.config.id2label[entity_idx],\n \"score\": score,\n \"index\": pre_entity[\"index\"],\n \"word\": pre_entity[\"word\"],\n \"start\": pre_entity[\"start\"],\n \"end\": pre_entity[\"end\"],\n }\n entities.append(entity)\n else:\n entities = self.aggregate_words(pre_entities, aggregation_strategy)\n\n if aggregation_strategy == AggregationStrategy.NONE:\n return entities\n return self.group_entities(entities)\n\n def aggregate_word(self, entities: List[dict], aggregation_strategy: AggregationStrategy) -> dict:\n word = self.tokenizer.convert_tokens_to_string([entity[\"word\"] for entity in entities])\n if aggregation_strategy == AggregationStrategy.FIRST:\n scores = entities[0][\"scores\"]\n idx = scores.argmax()\n score = scores[idx]\n entity = self.model.config.id2label[idx]\n elif aggregation_strategy == AggregationStrategy.MAX:\n max_entity = max(entities, key=lambda entity: entity[\"scores\"].max())\n scores = max_entity[\"scores\"]\n idx = scores.argmax()\n score = scores[idx]\n entity = self.model.config.id2label[idx]\n elif aggregation_strategy == AggregationStrategy.AVERAGE:\n scores = np.stack([entity[\"scores\"] for entity in entities])\n average_scores = np.nanmean(scores, axis=0)\n entity_idx = average_scores.argmax()\n entity = self.model.config.id2label[entity_idx]\n score = average_scores[entity_idx]\n else:\n raise ValueError(\"Invalid aggregation_strategy\")\n new_entity = {\n \"entity\": entity,\n \"score\": score,\n \"word\": word,\n \"start\": entities[0][\"start\"],\n \"end\": entities[-1][\"end\"],\n }\n return new_entity\n\n def aggregate_words(self, entities: List[dict], aggregation_strategy: AggregationStrategy) -> List[dict]:\n \"\"\"\n Override tokens from a given word that disagree to force agreement on word boundaries.\n\n Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft|\n company| B-ENT I-ENT\n \"\"\"\n assert aggregation_strategy not in {\n AggregationStrategy.NONE,\n AggregationStrategy.SIMPLE,\n }, \"NONE and SIMPLE strategies are invalid\"\n\n word_entities = []\n word_group = None\n for entity in entities:\n if word_group is None:\n word_group = [entity]\n elif entity[\"is_subword\"]:\n word_group.append(entity)\n else:\n word_entities.append(self.aggregate_word(word_group, aggregation_strategy))\n word_group = [entity]\n # Last item\n word_entities.append(self.aggregate_word(word_group, aggregation_strategy))\n return word_entities\n\n def group_sub_entities(self, entities: List[dict]) -> dict:\n \"\"\"\n Group together the adjacent tokens with the same entity predicted.\n\n Args:\n entities (:obj:`dict`): The entities predicted by the pipeline.\n \"\"\"\n # Get the first entity in the entity group\n entity = entities[0][\"entity\"].split(\"-\")[-1]\n scores = np.nanmean([entity[\"score\"] for entity in entities])\n tokens = [entity[\"word\"] for entity in entities]\n\n entity_group = {\n \"entity_group\": entity,\n \"score\": np.mean(scores),\n \"word\": self.tokenizer.convert_tokens_to_string(tokens),\n \"start\": entities[0][\"start\"],\n \"end\": entities[-1][\"end\"],\n }\n return entity_group\n\n def get_tag(self, entity_name: str) -> Tuple[str, str]:\n if entity_name.startswith(\"B-\"):\n bi = \"B\"\n tag = entity_name[2:]\n elif entity_name.startswith(\"I-\"):\n bi = \"I\"\n tag = entity_name[2:]\n else:\n # It's not in B-, I- format\n bi = \"B\"\n tag = entity_name\n return bi, tag\n\n def group_entities(self, entities: List[dict]) -> List[dict]:\n \"\"\"\n Find and group together the adjacent tokens with the same entity predicted.\n\n Args:\n entities (:obj:`dict`): The entities predicted by the pipeline.\n \"\"\"\n\n entity_groups = []\n entity_group_disagg = []\n\n for entity in entities:\n if not entity_group_disagg:\n entity_group_disagg.append(entity)\n continue\n\n # If the current entity is similar and adjacent to the previous entity,\n # append it to the disaggregated entity group\n # The split is meant to account for the \"B\" and \"I\" prefixes\n # Shouldn't merge if both entities are B-type\n bi, tag = self.get_tag(entity[\"entity\"])\n last_bi, last_tag = self.get_tag(entity_group_disagg[-1][\"entity\"])\n\n if tag == last_tag and bi != \"B\":\n # Modify subword type to be previous_type\n entity_group_disagg.append(entity)\n else:\n # If the current entity is different from the previous entity\n # aggregate the disaggregated entity group\n entity_groups.append(self.group_sub_entities(entity_group_disagg))\n entity_group_disagg = [entity]\n if entity_group_disagg:\n # it's the last entity, add it to the entity groups\n entity_groups.append(self.group_sub_entities(entity_group_disagg))\n\n return entity_groups\n\n\nNerPipeline = TokenClassificationPipeline\n", "path": "src/transformers/pipelines/token_classification.py" } ]
[ { "content": "import warnings\nfrom typing import TYPE_CHECKING, List, Optional, Tuple, Union\n\nimport numpy as np\n\nfrom ..file_utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available\nfrom ..modelcard import ModelCard\nfrom ..models.bert.tokenization_bert import BasicTokenizer\nfrom ..tokenization_utils import PreTrainedTokenizer\nfrom .base import PIPELINE_INIT_ARGS, ArgumentHandler, Pipeline\n\n\nif TYPE_CHECKING:\n from ..modeling_tf_utils import TFPreTrainedModel\n from ..modeling_utils import PreTrainedModel\n\nif is_tf_available():\n\n from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING\n\nif is_torch_available():\n import torch\n\n from ..models.auto.modeling_auto import MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING\n\n\nclass TokenClassificationArgumentHandler(ArgumentHandler):\n \"\"\"\n Handles arguments for token classification.\n \"\"\"\n\n def __call__(self, inputs: Union[str, List[str]], **kwargs):\n\n if inputs is not None and isinstance(inputs, (list, tuple)) and len(inputs) > 0:\n inputs = list(inputs)\n batch_size = len(inputs)\n elif isinstance(inputs, str):\n inputs = [inputs]\n batch_size = 1\n else:\n raise ValueError(\"At least one input is required.\")\n\n offset_mapping = kwargs.get(\"offset_mapping\")\n if offset_mapping:\n if isinstance(offset_mapping, list) and isinstance(offset_mapping[0], tuple):\n offset_mapping = [offset_mapping]\n if len(offset_mapping) != batch_size:\n raise ValueError(\"offset_mapping should have the same batch size as the input\")\n return inputs, offset_mapping\n\n\nclass AggregationStrategy(ExplicitEnum):\n \"\"\"All the valid aggregation strategies for TokenClassificationPipeline\"\"\"\n\n NONE = \"none\"\n SIMPLE = \"simple\"\n FIRST = \"first\"\n AVERAGE = \"average\"\n MAX = \"max\"\n\n\n@add_end_docstrings(\n PIPELINE_INIT_ARGS,\n r\"\"\"\n ignore_labels (:obj:`List[str]`, defaults to :obj:`[\"O\"]`):\n A list of labels to ignore.\n grouped_entities (:obj:`bool`, `optional`, defaults to :obj:`False`):\n DEPRECATED, use :obj:`aggregation_strategy` instead. Whether or not to group the tokens corresponding to\n the same entity together in the predictions or not.\n aggregation_strategy (:obj:`str`, `optional`, defaults to :obj:`\"none\"`): The strategy to fuse (or not) tokens based on the model prediction.\n\n - \"none\" : Will simply not do any aggregation and simply return raw results from the model\n - \"simple\" : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C,\n I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{\"word\": ABC, \"entity\": \"TAG\"}, {\"word\": \"D\",\n \"entity\": \"TAG2\"}, {\"word\": \"E\", \"entity\": \"TAG2\"}] Notice that two consecutive B tags will end up as\n different entities. On word based languages, we might end up splitting words undesirably : Imagine\n Microsoft being tagged as [{\"word\": \"Micro\", \"entity\": \"ENTERPRISE\"}, {\"word\": \"soft\", \"entity\":\n \"NAME\"}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages\n that support that meaning, which is basically tokens separated by a space). These mitigations will\n only work on real words, \"New york\" might still be tagged with two different entities.\n - \"first\" : (works only on word based models) Will use the :obj:`SIMPLE` strategy except that words,\n cannot end up with different tags. Words will simply use the tag of the first token of the word when\n there is ambiguity.\n - \"average\" : (works only on word based models) Will use the :obj:`SIMPLE` strategy except that words,\n cannot end up with different tags. scores will be averaged first across tokens, and then the maximum\n label is applied.\n - \"max\" : (works only on word based models) Will use the :obj:`SIMPLE` strategy except that words,\n cannot end up with different tags. Word entity will simply be the token with the maximum score.\n \"\"\",\n)\nclass TokenClassificationPipeline(Pipeline):\n \"\"\"\n Named Entity Recognition pipeline using any :obj:`ModelForTokenClassification`. See the `named entity recognition\n examples <../task_summary.html#named-entity-recognition>`__ for more information.\n\n This token recognition pipeline can currently be loaded from :func:`~transformers.pipeline` using the following\n task identifier: :obj:`\"ner\"` (for predicting the classes of tokens in a sequence: person, organisation, location\n or miscellaneous).\n\n The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the\n up-to-date list of available models on `huggingface.co/models\n <https://huggingface.co/models?filter=token-classification>`__.\n \"\"\"\n\n default_input_names = \"sequences\"\n\n def __init__(\n self,\n model: Union[\"PreTrainedModel\", \"TFPreTrainedModel\"],\n tokenizer: PreTrainedTokenizer,\n modelcard: Optional[ModelCard] = None,\n framework: Optional[str] = None,\n args_parser: ArgumentHandler = TokenClassificationArgumentHandler(),\n device: int = -1,\n binary_output: bool = False,\n ignore_labels=[\"O\"],\n task: str = \"\",\n grouped_entities: Optional[bool] = None,\n ignore_subwords: Optional[bool] = None,\n aggregation_strategy: Optional[AggregationStrategy] = None,\n ):\n super().__init__(\n model=model,\n tokenizer=tokenizer,\n modelcard=modelcard,\n framework=framework,\n device=device,\n binary_output=binary_output,\n task=task,\n )\n\n self.check_model_type(\n TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING\n if self.framework == \"tf\"\n else MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING\n )\n\n self._basic_tokenizer = BasicTokenizer(do_lower_case=False)\n self._args_parser = args_parser\n self.ignore_labels = ignore_labels\n\n if aggregation_strategy is None:\n aggregation_strategy = AggregationStrategy.NONE\n if grouped_entities is not None or ignore_subwords is not None:\n\n if grouped_entities and ignore_subwords:\n aggregation_strategy = AggregationStrategy.FIRST\n elif grouped_entities and not ignore_subwords:\n aggregation_strategy = AggregationStrategy.SIMPLE\n else:\n aggregation_strategy = AggregationStrategy.NONE\n\n if grouped_entities is not None:\n warnings.warn(\n f'`grouped_entities` is deprecated and will be removed in version v5.0.0, defaulted to `aggregation_strategy=\"{aggregation_strategy}\"` instead.'\n )\n if ignore_subwords is not None:\n warnings.warn(\n f'`ignore_subwords` is deprecated and will be removed in version v5.0.0, defaulted to `aggregation_strategy=\"{aggregation_strategy}\"` instead.'\n )\n if isinstance(aggregation_strategy, str):\n aggregation_strategy = AggregationStrategy[aggregation_strategy.upper()]\n\n if (\n aggregation_strategy in {AggregationStrategy.FIRST, AggregationStrategy.MAX, AggregationStrategy.AVERAGE}\n and not self.tokenizer.is_fast\n ):\n raise ValueError(\n \"Slow tokenizers cannot handle subwords. Please set the `aggregation_strategy` option\"\n 'to `\"simple\"` or use a fast tokenizer.'\n )\n\n self.aggregation_strategy = aggregation_strategy\n\n def __call__(self, inputs: Union[str, List[str]], **kwargs):\n \"\"\"\n Classify each token of the text(s) given as inputs.\n\n Args:\n inputs (:obj:`str` or :obj:`List[str]`):\n One or several texts (or one list of texts) for token classification.\n\n Return:\n A list or a list of list of :obj:`dict`: Each result comes as a list of dictionaries (one for each token in\n the corresponding input, or each entity if this pipeline was instantiated with an aggregation_strategy)\n with the following keys:\n\n - **word** (:obj:`str`) -- The token/word classified.\n - **score** (:obj:`float`) -- The corresponding probability for :obj:`entity`.\n - **entity** (:obj:`str`) -- The entity predicted for that token/word (it is named `entity_group` when\n `aggregation_strategy` is not :obj:`\"none\"`.\n - **index** (:obj:`int`, only present when ``aggregation_strategy=\"none\"``) -- The index of the\n corresponding token in the sentence.\n - **start** (:obj:`int`, `optional`) -- The index of the start of the corresponding entity in the sentence.\n Only exists if the offsets are available within the tokenizer\n - **end** (:obj:`int`, `optional`) -- The index of the end of the corresponding entity in the sentence.\n Only exists if the offsets are available within the tokenizer\n \"\"\"\n\n _inputs, offset_mappings = self._args_parser(inputs, **kwargs)\n\n answers = []\n\n for i, sentence in enumerate(_inputs):\n\n # Manage correct placement of the tensors\n with self.device_placement():\n\n tokens = self.tokenizer(\n sentence,\n return_attention_mask=False,\n return_tensors=self.framework,\n truncation=True,\n return_special_tokens_mask=True,\n return_offsets_mapping=self.tokenizer.is_fast,\n )\n if self.tokenizer.is_fast:\n offset_mapping = tokens.pop(\"offset_mapping\").cpu().numpy()[0]\n elif offset_mappings:\n offset_mapping = offset_mappings[i]\n else:\n offset_mapping = None\n\n special_tokens_mask = tokens.pop(\"special_tokens_mask\").cpu().numpy()[0]\n\n # Forward\n if self.framework == \"tf\":\n entities = self.model(tokens.data)[0][0].numpy()\n input_ids = tokens[\"input_ids\"].numpy()[0]\n else:\n with torch.no_grad():\n tokens = self.ensure_tensor_on_device(**tokens)\n entities = self.model(**tokens)[0][0].cpu().numpy()\n input_ids = tokens[\"input_ids\"].cpu().numpy()[0]\n\n scores = np.exp(entities) / np.exp(entities).sum(-1, keepdims=True)\n pre_entities = self.gather_pre_entities(sentence, input_ids, scores, offset_mapping, special_tokens_mask)\n grouped_entities = self.aggregate(pre_entities, self.aggregation_strategy)\n # Filter anything that is in self.ignore_labels\n entities = [\n entity\n for entity in grouped_entities\n if entity.get(\"entity\", None) not in self.ignore_labels\n and entity.get(\"entity_group\", None) not in self.ignore_labels\n ]\n answers.append(entities)\n\n if len(answers) == 1:\n return answers[0]\n return answers\n\n def gather_pre_entities(\n self,\n sentence: str,\n input_ids: np.ndarray,\n scores: np.ndarray,\n offset_mapping: Optional[List[Tuple[int, int]]],\n special_tokens_mask: np.ndarray,\n ) -> List[dict]:\n \"\"\"Fuse various numpy arrays into dicts with all the information needed for aggregation\"\"\"\n pre_entities = []\n for idx, token_scores in enumerate(scores):\n # Filter special_tokens, they should only occur\n # at the sentence boundaries since we're not encoding pairs of\n # sentences so we don't have to keep track of those.\n if special_tokens_mask[idx]:\n continue\n\n word = self.tokenizer.convert_ids_to_tokens(int(input_ids[idx]))\n if offset_mapping is not None:\n start_ind, end_ind = offset_mapping[idx]\n word_ref = sentence[start_ind:end_ind]\n if getattr(self.tokenizer._tokenizer.model, \"continuing_subword_prefix\", None):\n # This is a BPE, word aware tokenizer, there is a correct way\n # to fuse tokens\n is_subword = len(word) != len(word_ref)\n else:\n # This is a fallback heuristic. This will fail most likely on any kind of text + punctuation mixtures that will be considered \"words\". Non word aware models cannot do better than this unfortunately.\n if self.aggregation_strategy in {\n AggregationStrategy.FIRST,\n AggregationStrategy.AVERAGE,\n AggregationStrategy.MAX,\n }:\n warnings.warn(\"Tokenizer does not support real words, using fallback heuristic\", UserWarning)\n is_subword = sentence[start_ind - 1 : start_ind] != \" \" if start_ind > 0 else False\n\n if int(input_ids[idx]) == self.tokenizer.unk_token_id:\n word = word_ref\n is_subword = False\n else:\n start_ind = None\n end_ind = None\n is_subword = False\n\n pre_entity = {\n \"word\": word,\n \"scores\": token_scores,\n \"start\": start_ind,\n \"end\": end_ind,\n \"index\": idx,\n \"is_subword\": is_subword,\n }\n pre_entities.append(pre_entity)\n return pre_entities\n\n def aggregate(self, pre_entities: List[dict], aggregation_strategy: AggregationStrategy) -> List[dict]:\n if aggregation_strategy in {AggregationStrategy.NONE, AggregationStrategy.SIMPLE}:\n entities = []\n for pre_entity in pre_entities:\n entity_idx = pre_entity[\"scores\"].argmax()\n score = pre_entity[\"scores\"][entity_idx]\n entity = {\n \"entity\": self.model.config.id2label[entity_idx],\n \"score\": score,\n \"index\": pre_entity[\"index\"],\n \"word\": pre_entity[\"word\"],\n \"start\": pre_entity[\"start\"],\n \"end\": pre_entity[\"end\"],\n }\n entities.append(entity)\n else:\n entities = self.aggregate_words(pre_entities, aggregation_strategy)\n\n if aggregation_strategy == AggregationStrategy.NONE:\n return entities\n return self.group_entities(entities)\n\n def aggregate_word(self, entities: List[dict], aggregation_strategy: AggregationStrategy) -> dict:\n word = self.tokenizer.convert_tokens_to_string([entity[\"word\"] for entity in entities])\n if aggregation_strategy == AggregationStrategy.FIRST:\n scores = entities[0][\"scores\"]\n idx = scores.argmax()\n score = scores[idx]\n entity = self.model.config.id2label[idx]\n elif aggregation_strategy == AggregationStrategy.MAX:\n max_entity = max(entities, key=lambda entity: entity[\"scores\"].max())\n scores = max_entity[\"scores\"]\n idx = scores.argmax()\n score = scores[idx]\n entity = self.model.config.id2label[idx]\n elif aggregation_strategy == AggregationStrategy.AVERAGE:\n scores = np.stack([entity[\"scores\"] for entity in entities])\n average_scores = np.nanmean(scores, axis=0)\n entity_idx = average_scores.argmax()\n entity = self.model.config.id2label[entity_idx]\n score = average_scores[entity_idx]\n else:\n raise ValueError(\"Invalid aggregation_strategy\")\n new_entity = {\n \"entity\": entity,\n \"score\": score,\n \"word\": word,\n \"start\": entities[0][\"start\"],\n \"end\": entities[-1][\"end\"],\n }\n return new_entity\n\n def aggregate_words(self, entities: List[dict], aggregation_strategy: AggregationStrategy) -> List[dict]:\n \"\"\"\n Override tokens from a given word that disagree to force agreement on word boundaries.\n\n Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft|\n company| B-ENT I-ENT\n \"\"\"\n assert aggregation_strategy not in {\n AggregationStrategy.NONE,\n AggregationStrategy.SIMPLE,\n }, \"NONE and SIMPLE strategies are invalid\"\n\n word_entities = []\n word_group = None\n for entity in entities:\n if word_group is None:\n word_group = [entity]\n elif entity[\"is_subword\"]:\n word_group.append(entity)\n else:\n word_entities.append(self.aggregate_word(word_group, aggregation_strategy))\n word_group = [entity]\n # Last item\n word_entities.append(self.aggregate_word(word_group, aggregation_strategy))\n return word_entities\n\n def group_sub_entities(self, entities: List[dict]) -> dict:\n \"\"\"\n Group together the adjacent tokens with the same entity predicted.\n\n Args:\n entities (:obj:`dict`): The entities predicted by the pipeline.\n \"\"\"\n # Get the first entity in the entity group\n entity = entities[0][\"entity\"].split(\"-\")[-1]\n scores = np.nanmean([entity[\"score\"] for entity in entities])\n tokens = [entity[\"word\"] for entity in entities]\n\n entity_group = {\n \"entity_group\": entity,\n \"score\": np.mean(scores),\n \"word\": self.tokenizer.convert_tokens_to_string(tokens),\n \"start\": entities[0][\"start\"],\n \"end\": entities[-1][\"end\"],\n }\n return entity_group\n\n def get_tag(self, entity_name: str) -> Tuple[str, str]:\n if entity_name.startswith(\"B-\"):\n bi = \"B\"\n tag = entity_name[2:]\n elif entity_name.startswith(\"I-\"):\n bi = \"I\"\n tag = entity_name[2:]\n else:\n # It's not in B-, I- format\n # Default to I- for continuation.\n bi = \"I\"\n tag = entity_name\n return bi, tag\n\n def group_entities(self, entities: List[dict]) -> List[dict]:\n \"\"\"\n Find and group together the adjacent tokens with the same entity predicted.\n\n Args:\n entities (:obj:`dict`): The entities predicted by the pipeline.\n \"\"\"\n\n entity_groups = []\n entity_group_disagg = []\n\n for entity in entities:\n if not entity_group_disagg:\n entity_group_disagg.append(entity)\n continue\n\n # If the current entity is similar and adjacent to the previous entity,\n # append it to the disaggregated entity group\n # The split is meant to account for the \"B\" and \"I\" prefixes\n # Shouldn't merge if both entities are B-type\n bi, tag = self.get_tag(entity[\"entity\"])\n last_bi, last_tag = self.get_tag(entity_group_disagg[-1][\"entity\"])\n\n if tag == last_tag and bi != \"B\":\n # Modify subword type to be previous_type\n entity_group_disagg.append(entity)\n else:\n # If the current entity is different from the previous entity\n # aggregate the disaggregated entity group\n entity_groups.append(self.group_sub_entities(entity_group_disagg))\n entity_group_disagg = [entity]\n if entity_group_disagg:\n # it's the last entity, add it to the entity groups\n entity_groups.append(self.group_sub_entities(entity_group_disagg))\n\n return entity_groups\n\n\nNerPipeline = TokenClassificationPipeline\n", "path": "src/transformers/pipelines/token_classification.py" } ]
diff --git a/src/transformers/pipelines/token_classification.py b/src/transformers/pipelines/token_classification.py index 6fc1de1dcb38..4a7bbeb77f7a 100644 --- a/src/transformers/pipelines/token_classification.py +++ b/src/transformers/pipelines/token_classification.py @@ -411,7 +411,8 @@ def get_tag(self, entity_name: str) -> Tuple[str, str]: tag = entity_name[2:] else: # It's not in B-, I- format - bi = "B" + # Default to I- for continuation. + bi = "I" tag = entity_name return bi, tag diff --git a/tests/test_pipelines_token_classification.py b/tests/test_pipelines_token_classification.py index b8b572e51782..dcb4a2e53573 100644 --- a/tests/test_pipelines_token_classification.py +++ b/tests/test_pipelines_token_classification.py @@ -318,6 +318,59 @@ def test_aggregation_strategy_byte_level_tokenizer(self): ], ) + @require_torch + def test_aggregation_strategy_no_b_i_prefix(self): + model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english" + tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) + token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt") + # Just to understand scores indexes in this test + token_classifier.model.config.id2label = {0: "O", 1: "MISC", 2: "PER", 3: "ORG", 4: "LOC"} + example = [ + { + # fmt : off + "scores": np.array([0, 0, 0, 0, 0.9968166351318359]), + "index": 1, + "is_subword": False, + "word": "En", + "start": 0, + "end": 2, + }, + { + # fmt : off + "scores": np.array([0, 0, 0, 0, 0.9957635998725891]), + "index": 2, + "is_subword": True, + "word": "##zo", + "start": 2, + "end": 4, + }, + { + # fmt: off + "scores": np.array([0, 0, 0, 0.9986497163772583, 0]), + # fmt: on + "index": 7, + "word": "UN", + "is_subword": False, + "start": 11, + "end": 13, + }, + ] + self.assertEqual( + nested_simplify(token_classifier.aggregate(example, AggregationStrategy.NONE)), + [ + {"end": 2, "entity": "LOC", "score": 0.997, "start": 0, "word": "En", "index": 1}, + {"end": 4, "entity": "LOC", "score": 0.996, "start": 2, "word": "##zo", "index": 2}, + {"end": 13, "entity": "ORG", "score": 0.999, "start": 11, "word": "UN", "index": 7}, + ], + ) + self.assertEqual( + nested_simplify(token_classifier.aggregate(example, AggregationStrategy.SIMPLE)), + [ + {"entity_group": "LOC", "score": 0.996, "word": "Enzo", "start": 0, "end": 4}, + {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13}, + ], + ) + @require_torch def test_aggregation_strategy(self): model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"
pyca__cryptography-2436
Add __repr__ for x509.Extensions x509.Extensions should have a **repr**
[ { "content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nimport hashlib\nimport ipaddress\nfrom enum import Enum\n\nfrom pyasn1.codec.der import decoder\nfrom pyasn1.type import namedtype, univ\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.hazmat.primitives import constant_time, serialization\nfrom cryptography.x509.general_name import GeneralName, IPAddress, OtherName\nfrom cryptography.x509.name import Name\nfrom cryptography.x509.oid import (\n AuthorityInformationAccessOID, ExtensionOID, ObjectIdentifier\n)\n\n\nclass _SubjectPublicKeyInfo(univ.Sequence):\n componentType = namedtype.NamedTypes(\n namedtype.NamedType('algorithm', univ.Sequence()),\n namedtype.NamedType('subjectPublicKey', univ.BitString())\n )\n\n\ndef _key_identifier_from_public_key(public_key):\n # This is a very slow way to do this.\n serialized = public_key.public_bytes(\n serialization.Encoding.DER,\n serialization.PublicFormat.SubjectPublicKeyInfo\n )\n spki, remaining = decoder.decode(\n serialized, asn1Spec=_SubjectPublicKeyInfo()\n )\n assert not remaining\n # the univ.BitString object is a tuple of bits. We need bytes and\n # pyasn1 really doesn't want to give them to us. To get it we'll\n # build an integer and convert that to bytes.\n bits = 0\n for bit in spki.getComponentByName(\"subjectPublicKey\"):\n bits = bits << 1 | bit\n\n data = utils.int_to_bytes(bits)\n return hashlib.sha1(data).digest()\n\n\nclass DuplicateExtension(Exception):\n def __init__(self, msg, oid):\n super(DuplicateExtension, self).__init__(msg)\n self.oid = oid\n\n\nclass UnsupportedExtension(Exception):\n def __init__(self, msg, oid):\n super(UnsupportedExtension, self).__init__(msg)\n self.oid = oid\n\n\nclass ExtensionNotFound(Exception):\n def __init__(self, msg, oid):\n super(ExtensionNotFound, self).__init__(msg)\n self.oid = oid\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass ExtensionType(object):\n @abc.abstractproperty\n def oid(self):\n \"\"\"\n Returns the oid associated with the given extension type.\n \"\"\"\n\n\nclass Extensions(object):\n def __init__(self, extensions):\n self._extensions = extensions\n\n def get_extension_for_oid(self, oid):\n for ext in self:\n if ext.oid == oid:\n return ext\n\n raise ExtensionNotFound(\"No {0} extension was found\".format(oid), oid)\n\n def get_extension_for_class(self, extclass):\n for ext in self:\n if isinstance(ext.value, extclass):\n return ext\n\n raise ExtensionNotFound(\n \"No {0} extension was found\".format(extclass), extclass.oid\n )\n\n def __iter__(self):\n return iter(self._extensions)\n\n def __len__(self):\n return len(self._extensions)\n\n\[email protected]_interface(ExtensionType)\nclass AuthorityKeyIdentifier(object):\n oid = ExtensionOID.AUTHORITY_KEY_IDENTIFIER\n\n def __init__(self, key_identifier, authority_cert_issuer,\n authority_cert_serial_number):\n if authority_cert_issuer or authority_cert_serial_number:\n if not authority_cert_issuer or not authority_cert_serial_number:\n raise ValueError(\n \"authority_cert_issuer and authority_cert_serial_number \"\n \"must both be present or both None\"\n )\n\n if not all(\n isinstance(x, GeneralName) for x in authority_cert_issuer\n ):\n raise TypeError(\n \"authority_cert_issuer must be a list of GeneralName \"\n \"objects\"\n )\n\n if not isinstance(authority_cert_serial_number, six.integer_types):\n raise TypeError(\n \"authority_cert_serial_number must be an integer\"\n )\n\n self._key_identifier = key_identifier\n self._authority_cert_issuer = authority_cert_issuer\n self._authority_cert_serial_number = authority_cert_serial_number\n\n @classmethod\n def from_issuer_public_key(cls, public_key):\n digest = _key_identifier_from_public_key(public_key)\n return cls(\n key_identifier=digest,\n authority_cert_issuer=None,\n authority_cert_serial_number=None\n )\n\n def __repr__(self):\n return (\n \"<AuthorityKeyIdentifier(key_identifier={0.key_identifier!r}, \"\n \"authority_cert_issuer={0.authority_cert_issuer}, \"\n \"authority_cert_serial_number={0.authority_cert_serial_number}\"\n \")>\".format(self)\n )\n\n def __eq__(self, other):\n if not isinstance(other, AuthorityKeyIdentifier):\n return NotImplemented\n\n return (\n self.key_identifier == other.key_identifier and\n self.authority_cert_issuer == other.authority_cert_issuer and\n self.authority_cert_serial_number ==\n other.authority_cert_serial_number\n )\n\n def __ne__(self, other):\n return not self == other\n\n key_identifier = utils.read_only_property(\"_key_identifier\")\n authority_cert_issuer = utils.read_only_property(\"_authority_cert_issuer\")\n authority_cert_serial_number = utils.read_only_property(\n \"_authority_cert_serial_number\"\n )\n\n\[email protected]_interface(ExtensionType)\nclass SubjectKeyIdentifier(object):\n oid = ExtensionOID.SUBJECT_KEY_IDENTIFIER\n\n def __init__(self, digest):\n self._digest = digest\n\n @classmethod\n def from_public_key(cls, public_key):\n return cls(_key_identifier_from_public_key(public_key))\n\n digest = utils.read_only_property(\"_digest\")\n\n def __repr__(self):\n return \"<SubjectKeyIdentifier(digest={0!r})>\".format(self.digest)\n\n def __eq__(self, other):\n if not isinstance(other, SubjectKeyIdentifier):\n return NotImplemented\n\n return constant_time.bytes_eq(self.digest, other.digest)\n\n def __ne__(self, other):\n return not self == other\n\n\[email protected]_interface(ExtensionType)\nclass AuthorityInformationAccess(object):\n oid = ExtensionOID.AUTHORITY_INFORMATION_ACCESS\n\n def __init__(self, descriptions):\n if not all(isinstance(x, AccessDescription) for x in descriptions):\n raise TypeError(\n \"Every item in the descriptions list must be an \"\n \"AccessDescription\"\n )\n\n self._descriptions = descriptions\n\n def __iter__(self):\n return iter(self._descriptions)\n\n def __len__(self):\n return len(self._descriptions)\n\n def __repr__(self):\n return \"<AuthorityInformationAccess({0})>\".format(self._descriptions)\n\n def __eq__(self, other):\n if not isinstance(other, AuthorityInformationAccess):\n return NotImplemented\n\n return self._descriptions == other._descriptions\n\n def __ne__(self, other):\n return not self == other\n\n\nclass AccessDescription(object):\n def __init__(self, access_method, access_location):\n if not (access_method == AuthorityInformationAccessOID.OCSP or\n access_method == AuthorityInformationAccessOID.CA_ISSUERS):\n raise ValueError(\n \"access_method must be OID_OCSP or OID_CA_ISSUERS\"\n )\n\n if not isinstance(access_location, GeneralName):\n raise TypeError(\"access_location must be a GeneralName\")\n\n self._access_method = access_method\n self._access_location = access_location\n\n def __repr__(self):\n return (\n \"<AccessDescription(access_method={0.access_method}, access_locati\"\n \"on={0.access_location})>\".format(self)\n )\n\n def __eq__(self, other):\n if not isinstance(other, AccessDescription):\n return NotImplemented\n\n return (\n self.access_method == other.access_method and\n self.access_location == other.access_location\n )\n\n def __ne__(self, other):\n return not self == other\n\n access_method = utils.read_only_property(\"_access_method\")\n access_location = utils.read_only_property(\"_access_location\")\n\n\[email protected]_interface(ExtensionType)\nclass BasicConstraints(object):\n oid = ExtensionOID.BASIC_CONSTRAINTS\n\n def __init__(self, ca, path_length):\n if not isinstance(ca, bool):\n raise TypeError(\"ca must be a boolean value\")\n\n if path_length is not None and not ca:\n raise ValueError(\"path_length must be None when ca is False\")\n\n if (\n path_length is not None and\n (not isinstance(path_length, six.integer_types) or path_length < 0)\n ):\n raise TypeError(\n \"path_length must be a non-negative integer or None\"\n )\n\n self._ca = ca\n self._path_length = path_length\n\n ca = utils.read_only_property(\"_ca\")\n path_length = utils.read_only_property(\"_path_length\")\n\n def __repr__(self):\n return (\"<BasicConstraints(ca={0.ca}, \"\n \"path_length={0.path_length})>\").format(self)\n\n def __eq__(self, other):\n if not isinstance(other, BasicConstraints):\n return NotImplemented\n\n return self.ca == other.ca and self.path_length == other.path_length\n\n def __ne__(self, other):\n return not self == other\n\n\[email protected]_interface(ExtensionType)\nclass CRLDistributionPoints(object):\n oid = ExtensionOID.CRL_DISTRIBUTION_POINTS\n\n def __init__(self, distribution_points):\n if not all(\n isinstance(x, DistributionPoint) for x in distribution_points\n ):\n raise TypeError(\n \"distribution_points must be a list of DistributionPoint \"\n \"objects\"\n )\n\n self._distribution_points = distribution_points\n\n def __iter__(self):\n return iter(self._distribution_points)\n\n def __len__(self):\n return len(self._distribution_points)\n\n def __repr__(self):\n return \"<CRLDistributionPoints({0})>\".format(self._distribution_points)\n\n def __eq__(self, other):\n if not isinstance(other, CRLDistributionPoints):\n return NotImplemented\n\n return self._distribution_points == other._distribution_points\n\n def __ne__(self, other):\n return not self == other\n\n\nclass DistributionPoint(object):\n def __init__(self, full_name, relative_name, reasons, crl_issuer):\n if full_name and relative_name:\n raise ValueError(\n \"You cannot provide both full_name and relative_name, at \"\n \"least one must be None.\"\n )\n\n if full_name and not all(\n isinstance(x, GeneralName) for x in full_name\n ):\n raise TypeError(\n \"full_name must be a list of GeneralName objects\"\n )\n\n if relative_name and not isinstance(relative_name, Name):\n raise TypeError(\"relative_name must be a Name\")\n\n if crl_issuer and not all(\n isinstance(x, GeneralName) for x in crl_issuer\n ):\n raise TypeError(\n \"crl_issuer must be None or a list of general names\"\n )\n\n if reasons and (not isinstance(reasons, frozenset) or not all(\n isinstance(x, ReasonFlags) for x in reasons\n )):\n raise TypeError(\"reasons must be None or frozenset of ReasonFlags\")\n\n if reasons and (\n ReasonFlags.unspecified in reasons or\n ReasonFlags.remove_from_crl in reasons\n ):\n raise ValueError(\n \"unspecified and remove_from_crl are not valid reasons in a \"\n \"DistributionPoint\"\n )\n\n if reasons and not crl_issuer and not (full_name or relative_name):\n raise ValueError(\n \"You must supply crl_issuer, full_name, or relative_name when \"\n \"reasons is not None\"\n )\n\n self._full_name = full_name\n self._relative_name = relative_name\n self._reasons = reasons\n self._crl_issuer = crl_issuer\n\n def __repr__(self):\n return (\n \"<DistributionPoint(full_name={0.full_name}, relative_name={0.rela\"\n \"tive_name}, reasons={0.reasons}, crl_issuer={0.crl_is\"\n \"suer})>\".format(self)\n )\n\n def __eq__(self, other):\n if not isinstance(other, DistributionPoint):\n return NotImplemented\n\n return (\n self.full_name == other.full_name and\n self.relative_name == other.relative_name and\n self.reasons == other.reasons and\n self.crl_issuer == other.crl_issuer\n )\n\n def __ne__(self, other):\n return not self == other\n\n full_name = utils.read_only_property(\"_full_name\")\n relative_name = utils.read_only_property(\"_relative_name\")\n reasons = utils.read_only_property(\"_reasons\")\n crl_issuer = utils.read_only_property(\"_crl_issuer\")\n\n\nclass ReasonFlags(Enum):\n unspecified = \"unspecified\"\n key_compromise = \"keyCompromise\"\n ca_compromise = \"cACompromise\"\n affiliation_changed = \"affiliationChanged\"\n superseded = \"superseded\"\n cessation_of_operation = \"cessationOfOperation\"\n certificate_hold = \"certificateHold\"\n privilege_withdrawn = \"privilegeWithdrawn\"\n aa_compromise = \"aACompromise\"\n remove_from_crl = \"removeFromCRL\"\n\n\[email protected]_interface(ExtensionType)\nclass CertificatePolicies(object):\n oid = ExtensionOID.CERTIFICATE_POLICIES\n\n def __init__(self, policies):\n if not all(isinstance(x, PolicyInformation) for x in policies):\n raise TypeError(\n \"Every item in the policies list must be a \"\n \"PolicyInformation\"\n )\n\n self._policies = policies\n\n def __iter__(self):\n return iter(self._policies)\n\n def __len__(self):\n return len(self._policies)\n\n def __repr__(self):\n return \"<CertificatePolicies({0})>\".format(self._policies)\n\n def __eq__(self, other):\n if not isinstance(other, CertificatePolicies):\n return NotImplemented\n\n return self._policies == other._policies\n\n def __ne__(self, other):\n return not self == other\n\n\nclass PolicyInformation(object):\n def __init__(self, policy_identifier, policy_qualifiers):\n if not isinstance(policy_identifier, ObjectIdentifier):\n raise TypeError(\"policy_identifier must be an ObjectIdentifier\")\n\n self._policy_identifier = policy_identifier\n if policy_qualifiers and not all(\n isinstance(\n x, (six.text_type, UserNotice)\n ) for x in policy_qualifiers\n ):\n raise TypeError(\n \"policy_qualifiers must be a list of strings and/or UserNotice\"\n \" objects or None\"\n )\n\n self._policy_qualifiers = policy_qualifiers\n\n def __repr__(self):\n return (\n \"<PolicyInformation(policy_identifier={0.policy_identifier}, polic\"\n \"y_qualifiers={0.policy_qualifiers})>\".format(self)\n )\n\n def __eq__(self, other):\n if not isinstance(other, PolicyInformation):\n return NotImplemented\n\n return (\n self.policy_identifier == other.policy_identifier and\n self.policy_qualifiers == other.policy_qualifiers\n )\n\n def __ne__(self, other):\n return not self == other\n\n policy_identifier = utils.read_only_property(\"_policy_identifier\")\n policy_qualifiers = utils.read_only_property(\"_policy_qualifiers\")\n\n\nclass UserNotice(object):\n def __init__(self, notice_reference, explicit_text):\n if notice_reference and not isinstance(\n notice_reference, NoticeReference\n ):\n raise TypeError(\n \"notice_reference must be None or a NoticeReference\"\n )\n\n self._notice_reference = notice_reference\n self._explicit_text = explicit_text\n\n def __repr__(self):\n return (\n \"<UserNotice(notice_reference={0.notice_reference}, explicit_text=\"\n \"{0.explicit_text!r})>\".format(self)\n )\n\n def __eq__(self, other):\n if not isinstance(other, UserNotice):\n return NotImplemented\n\n return (\n self.notice_reference == other.notice_reference and\n self.explicit_text == other.explicit_text\n )\n\n def __ne__(self, other):\n return not self == other\n\n notice_reference = utils.read_only_property(\"_notice_reference\")\n explicit_text = utils.read_only_property(\"_explicit_text\")\n\n\nclass NoticeReference(object):\n def __init__(self, organization, notice_numbers):\n self._organization = organization\n if not isinstance(notice_numbers, list) or not all(\n isinstance(x, int) for x in notice_numbers\n ):\n raise TypeError(\n \"notice_numbers must be a list of integers\"\n )\n\n self._notice_numbers = notice_numbers\n\n def __repr__(self):\n return (\n \"<NoticeReference(organization={0.organization!r}, notice_numbers=\"\n \"{0.notice_numbers})>\".format(self)\n )\n\n def __eq__(self, other):\n if not isinstance(other, NoticeReference):\n return NotImplemented\n\n return (\n self.organization == other.organization and\n self.notice_numbers == other.notice_numbers\n )\n\n def __ne__(self, other):\n return not self == other\n\n organization = utils.read_only_property(\"_organization\")\n notice_numbers = utils.read_only_property(\"_notice_numbers\")\n\n\[email protected]_interface(ExtensionType)\nclass ExtendedKeyUsage(object):\n oid = ExtensionOID.EXTENDED_KEY_USAGE\n\n def __init__(self, usages):\n if not all(isinstance(x, ObjectIdentifier) for x in usages):\n raise TypeError(\n \"Every item in the usages list must be an ObjectIdentifier\"\n )\n\n self._usages = usages\n\n def __iter__(self):\n return iter(self._usages)\n\n def __len__(self):\n return len(self._usages)\n\n def __repr__(self):\n return \"<ExtendedKeyUsage({0})>\".format(self._usages)\n\n def __eq__(self, other):\n if not isinstance(other, ExtendedKeyUsage):\n return NotImplemented\n\n return self._usages == other._usages\n\n def __ne__(self, other):\n return not self == other\n\n\[email protected]_interface(ExtensionType)\nclass OCSPNoCheck(object):\n oid = ExtensionOID.OCSP_NO_CHECK\n\n\[email protected]_interface(ExtensionType)\nclass InhibitAnyPolicy(object):\n oid = ExtensionOID.INHIBIT_ANY_POLICY\n\n def __init__(self, skip_certs):\n if not isinstance(skip_certs, six.integer_types):\n raise TypeError(\"skip_certs must be an integer\")\n\n if skip_certs < 0:\n raise ValueError(\"skip_certs must be a non-negative integer\")\n\n self._skip_certs = skip_certs\n\n def __repr__(self):\n return \"<InhibitAnyPolicy(skip_certs={0.skip_certs})>\".format(self)\n\n def __eq__(self, other):\n if not isinstance(other, InhibitAnyPolicy):\n return NotImplemented\n\n return self.skip_certs == other.skip_certs\n\n def __ne__(self, other):\n return not self == other\n\n skip_certs = utils.read_only_property(\"_skip_certs\")\n\n\[email protected]_interface(ExtensionType)\nclass KeyUsage(object):\n oid = ExtensionOID.KEY_USAGE\n\n def __init__(self, digital_signature, content_commitment, key_encipherment,\n data_encipherment, key_agreement, key_cert_sign, crl_sign,\n encipher_only, decipher_only):\n if not key_agreement and (encipher_only or decipher_only):\n raise ValueError(\n \"encipher_only and decipher_only can only be true when \"\n \"key_agreement is true\"\n )\n\n self._digital_signature = digital_signature\n self._content_commitment = content_commitment\n self._key_encipherment = key_encipherment\n self._data_encipherment = data_encipherment\n self._key_agreement = key_agreement\n self._key_cert_sign = key_cert_sign\n self._crl_sign = crl_sign\n self._encipher_only = encipher_only\n self._decipher_only = decipher_only\n\n digital_signature = utils.read_only_property(\"_digital_signature\")\n content_commitment = utils.read_only_property(\"_content_commitment\")\n key_encipherment = utils.read_only_property(\"_key_encipherment\")\n data_encipherment = utils.read_only_property(\"_data_encipherment\")\n key_agreement = utils.read_only_property(\"_key_agreement\")\n key_cert_sign = utils.read_only_property(\"_key_cert_sign\")\n crl_sign = utils.read_only_property(\"_crl_sign\")\n\n @property\n def encipher_only(self):\n if not self.key_agreement:\n raise ValueError(\n \"encipher_only is undefined unless key_agreement is true\"\n )\n else:\n return self._encipher_only\n\n @property\n def decipher_only(self):\n if not self.key_agreement:\n raise ValueError(\n \"decipher_only is undefined unless key_agreement is true\"\n )\n else:\n return self._decipher_only\n\n def __repr__(self):\n try:\n encipher_only = self.encipher_only\n decipher_only = self.decipher_only\n except ValueError:\n encipher_only = None\n decipher_only = None\n\n return (\"<KeyUsage(digital_signature={0.digital_signature}, \"\n \"content_commitment={0.content_commitment}, \"\n \"key_encipherment={0.key_encipherment}, \"\n \"data_encipherment={0.data_encipherment}, \"\n \"key_agreement={0.key_agreement}, \"\n \"key_cert_sign={0.key_cert_sign}, crl_sign={0.crl_sign}, \"\n \"encipher_only={1}, decipher_only={2})>\").format(\n self, encipher_only, decipher_only)\n\n def __eq__(self, other):\n if not isinstance(other, KeyUsage):\n return NotImplemented\n\n return (\n self.digital_signature == other.digital_signature and\n self.content_commitment == other.content_commitment and\n self.key_encipherment == other.key_encipherment and\n self.data_encipherment == other.data_encipherment and\n self.key_agreement == other.key_agreement and\n self.key_cert_sign == other.key_cert_sign and\n self.crl_sign == other.crl_sign and\n self._encipher_only == other._encipher_only and\n self._decipher_only == other._decipher_only\n )\n\n def __ne__(self, other):\n return not self == other\n\n\[email protected]_interface(ExtensionType)\nclass NameConstraints(object):\n oid = ExtensionOID.NAME_CONSTRAINTS\n\n def __init__(self, permitted_subtrees, excluded_subtrees):\n if permitted_subtrees is not None:\n if not all(\n isinstance(x, GeneralName) for x in permitted_subtrees\n ):\n raise TypeError(\n \"permitted_subtrees must be a list of GeneralName objects \"\n \"or None\"\n )\n\n self._validate_ip_name(permitted_subtrees)\n\n if excluded_subtrees is not None:\n if not all(\n isinstance(x, GeneralName) for x in excluded_subtrees\n ):\n raise TypeError(\n \"excluded_subtrees must be a list of GeneralName objects \"\n \"or None\"\n )\n\n self._validate_ip_name(excluded_subtrees)\n\n if permitted_subtrees is None and excluded_subtrees is None:\n raise ValueError(\n \"At least one of permitted_subtrees and excluded_subtrees \"\n \"must not be None\"\n )\n\n self._permitted_subtrees = permitted_subtrees\n self._excluded_subtrees = excluded_subtrees\n\n def __eq__(self, other):\n if not isinstance(other, NameConstraints):\n return NotImplemented\n\n return (\n self.excluded_subtrees == other.excluded_subtrees and\n self.permitted_subtrees == other.permitted_subtrees\n )\n\n def __ne__(self, other):\n return not self == other\n\n def _validate_ip_name(self, tree):\n if any(isinstance(name, IPAddress) and not isinstance(\n name.value, (ipaddress.IPv4Network, ipaddress.IPv6Network)\n ) for name in tree):\n raise TypeError(\n \"IPAddress name constraints must be an IPv4Network or\"\n \" IPv6Network object\"\n )\n\n def __repr__(self):\n return (\n u\"<NameConstraints(permitted_subtrees={0.permitted_subtrees}, \"\n u\"excluded_subtrees={0.excluded_subtrees})>\".format(self)\n )\n\n permitted_subtrees = utils.read_only_property(\"_permitted_subtrees\")\n excluded_subtrees = utils.read_only_property(\"_excluded_subtrees\")\n\n\nclass Extension(object):\n def __init__(self, oid, critical, value):\n if not isinstance(oid, ObjectIdentifier):\n raise TypeError(\n \"oid argument must be an ObjectIdentifier instance.\"\n )\n\n if not isinstance(critical, bool):\n raise TypeError(\"critical must be a boolean value\")\n\n self._oid = oid\n self._critical = critical\n self._value = value\n\n oid = utils.read_only_property(\"_oid\")\n critical = utils.read_only_property(\"_critical\")\n value = utils.read_only_property(\"_value\")\n\n def __repr__(self):\n return (\"<Extension(oid={0.oid}, critical={0.critical}, \"\n \"value={0.value})>\").format(self)\n\n def __eq__(self, other):\n if not isinstance(other, Extension):\n return NotImplemented\n\n return (\n self.oid == other.oid and\n self.critical == other.critical and\n self.value == other.value\n )\n\n def __ne__(self, other):\n return not self == other\n\n\nclass GeneralNames(object):\n def __init__(self, general_names):\n if not all(isinstance(x, GeneralName) for x in general_names):\n raise TypeError(\n \"Every item in the general_names list must be an \"\n \"object conforming to the GeneralName interface\"\n )\n\n self._general_names = general_names\n\n def __iter__(self):\n return iter(self._general_names)\n\n def __len__(self):\n return len(self._general_names)\n\n def get_values_for_type(self, type):\n # Return the value of each GeneralName, except for OtherName instances\n # which we return directly because it has two important properties not\n # just one value.\n objs = (i for i in self if isinstance(i, type))\n if type != OtherName:\n objs = (i.value for i in objs)\n return list(objs)\n\n def __repr__(self):\n return \"<GeneralNames({0})>\".format(self._general_names)\n\n def __eq__(self, other):\n if not isinstance(other, GeneralNames):\n return NotImplemented\n\n return self._general_names == other._general_names\n\n def __ne__(self, other):\n return not self == other\n\n\[email protected]_interface(ExtensionType)\nclass SubjectAlternativeName(object):\n oid = ExtensionOID.SUBJECT_ALTERNATIVE_NAME\n\n def __init__(self, general_names):\n self._general_names = GeneralNames(general_names)\n\n def __iter__(self):\n return iter(self._general_names)\n\n def __len__(self):\n return len(self._general_names)\n\n def get_values_for_type(self, type):\n return self._general_names.get_values_for_type(type)\n\n def __repr__(self):\n return \"<SubjectAlternativeName({0})>\".format(self._general_names)\n\n def __eq__(self, other):\n if not isinstance(other, SubjectAlternativeName):\n return NotImplemented\n\n return self._general_names == other._general_names\n\n def __ne__(self, other):\n return not self == other\n\n\[email protected]_interface(ExtensionType)\nclass IssuerAlternativeName(object):\n oid = ExtensionOID.ISSUER_ALTERNATIVE_NAME\n\n def __init__(self, general_names):\n self._general_names = GeneralNames(general_names)\n\n def __iter__(self):\n return iter(self._general_names)\n\n def __len__(self):\n return len(self._general_names)\n\n def get_values_for_type(self, type):\n return self._general_names.get_values_for_type(type)\n\n def __repr__(self):\n return \"<IssuerAlternativeName({0})>\".format(self._general_names)\n\n def __eq__(self, other):\n if not isinstance(other, IssuerAlternativeName):\n return NotImplemented\n\n return self._general_names == other._general_names\n\n def __ne__(self, other):\n return not self == other\n", "path": "src/cryptography/x509/extensions.py" } ]
[ { "content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nimport hashlib\nimport ipaddress\nfrom enum import Enum\n\nfrom pyasn1.codec.der import decoder\nfrom pyasn1.type import namedtype, univ\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.hazmat.primitives import constant_time, serialization\nfrom cryptography.x509.general_name import GeneralName, IPAddress, OtherName\nfrom cryptography.x509.name import Name\nfrom cryptography.x509.oid import (\n AuthorityInformationAccessOID, ExtensionOID, ObjectIdentifier\n)\n\n\nclass _SubjectPublicKeyInfo(univ.Sequence):\n componentType = namedtype.NamedTypes(\n namedtype.NamedType('algorithm', univ.Sequence()),\n namedtype.NamedType('subjectPublicKey', univ.BitString())\n )\n\n\ndef _key_identifier_from_public_key(public_key):\n # This is a very slow way to do this.\n serialized = public_key.public_bytes(\n serialization.Encoding.DER,\n serialization.PublicFormat.SubjectPublicKeyInfo\n )\n spki, remaining = decoder.decode(\n serialized, asn1Spec=_SubjectPublicKeyInfo()\n )\n assert not remaining\n # the univ.BitString object is a tuple of bits. We need bytes and\n # pyasn1 really doesn't want to give them to us. To get it we'll\n # build an integer and convert that to bytes.\n bits = 0\n for bit in spki.getComponentByName(\"subjectPublicKey\"):\n bits = bits << 1 | bit\n\n data = utils.int_to_bytes(bits)\n return hashlib.sha1(data).digest()\n\n\nclass DuplicateExtension(Exception):\n def __init__(self, msg, oid):\n super(DuplicateExtension, self).__init__(msg)\n self.oid = oid\n\n\nclass UnsupportedExtension(Exception):\n def __init__(self, msg, oid):\n super(UnsupportedExtension, self).__init__(msg)\n self.oid = oid\n\n\nclass ExtensionNotFound(Exception):\n def __init__(self, msg, oid):\n super(ExtensionNotFound, self).__init__(msg)\n self.oid = oid\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass ExtensionType(object):\n @abc.abstractproperty\n def oid(self):\n \"\"\"\n Returns the oid associated with the given extension type.\n \"\"\"\n\n\nclass Extensions(object):\n def __init__(self, extensions):\n self._extensions = extensions\n\n def get_extension_for_oid(self, oid):\n for ext in self:\n if ext.oid == oid:\n return ext\n\n raise ExtensionNotFound(\"No {0} extension was found\".format(oid), oid)\n\n def get_extension_for_class(self, extclass):\n for ext in self:\n if isinstance(ext.value, extclass):\n return ext\n\n raise ExtensionNotFound(\n \"No {0} extension was found\".format(extclass), extclass.oid\n )\n\n def __iter__(self):\n return iter(self._extensions)\n\n def __len__(self):\n return len(self._extensions)\n\n def __repr__(self):\n return (\n \"<Extensions({0})>\".format(self._extensions)\n )\n\n\[email protected]_interface(ExtensionType)\nclass AuthorityKeyIdentifier(object):\n oid = ExtensionOID.AUTHORITY_KEY_IDENTIFIER\n\n def __init__(self, key_identifier, authority_cert_issuer,\n authority_cert_serial_number):\n if authority_cert_issuer or authority_cert_serial_number:\n if not authority_cert_issuer or not authority_cert_serial_number:\n raise ValueError(\n \"authority_cert_issuer and authority_cert_serial_number \"\n \"must both be present or both None\"\n )\n\n if not all(\n isinstance(x, GeneralName) for x in authority_cert_issuer\n ):\n raise TypeError(\n \"authority_cert_issuer must be a list of GeneralName \"\n \"objects\"\n )\n\n if not isinstance(authority_cert_serial_number, six.integer_types):\n raise TypeError(\n \"authority_cert_serial_number must be an integer\"\n )\n\n self._key_identifier = key_identifier\n self._authority_cert_issuer = authority_cert_issuer\n self._authority_cert_serial_number = authority_cert_serial_number\n\n @classmethod\n def from_issuer_public_key(cls, public_key):\n digest = _key_identifier_from_public_key(public_key)\n return cls(\n key_identifier=digest,\n authority_cert_issuer=None,\n authority_cert_serial_number=None\n )\n\n def __repr__(self):\n return (\n \"<AuthorityKeyIdentifier(key_identifier={0.key_identifier!r}, \"\n \"authority_cert_issuer={0.authority_cert_issuer}, \"\n \"authority_cert_serial_number={0.authority_cert_serial_number}\"\n \")>\".format(self)\n )\n\n def __eq__(self, other):\n if not isinstance(other, AuthorityKeyIdentifier):\n return NotImplemented\n\n return (\n self.key_identifier == other.key_identifier and\n self.authority_cert_issuer == other.authority_cert_issuer and\n self.authority_cert_serial_number ==\n other.authority_cert_serial_number\n )\n\n def __ne__(self, other):\n return not self == other\n\n key_identifier = utils.read_only_property(\"_key_identifier\")\n authority_cert_issuer = utils.read_only_property(\"_authority_cert_issuer\")\n authority_cert_serial_number = utils.read_only_property(\n \"_authority_cert_serial_number\"\n )\n\n\[email protected]_interface(ExtensionType)\nclass SubjectKeyIdentifier(object):\n oid = ExtensionOID.SUBJECT_KEY_IDENTIFIER\n\n def __init__(self, digest):\n self._digest = digest\n\n @classmethod\n def from_public_key(cls, public_key):\n return cls(_key_identifier_from_public_key(public_key))\n\n digest = utils.read_only_property(\"_digest\")\n\n def __repr__(self):\n return \"<SubjectKeyIdentifier(digest={0!r})>\".format(self.digest)\n\n def __eq__(self, other):\n if not isinstance(other, SubjectKeyIdentifier):\n return NotImplemented\n\n return constant_time.bytes_eq(self.digest, other.digest)\n\n def __ne__(self, other):\n return not self == other\n\n\[email protected]_interface(ExtensionType)\nclass AuthorityInformationAccess(object):\n oid = ExtensionOID.AUTHORITY_INFORMATION_ACCESS\n\n def __init__(self, descriptions):\n if not all(isinstance(x, AccessDescription) for x in descriptions):\n raise TypeError(\n \"Every item in the descriptions list must be an \"\n \"AccessDescription\"\n )\n\n self._descriptions = descriptions\n\n def __iter__(self):\n return iter(self._descriptions)\n\n def __len__(self):\n return len(self._descriptions)\n\n def __repr__(self):\n return \"<AuthorityInformationAccess({0})>\".format(self._descriptions)\n\n def __eq__(self, other):\n if not isinstance(other, AuthorityInformationAccess):\n return NotImplemented\n\n return self._descriptions == other._descriptions\n\n def __ne__(self, other):\n return not self == other\n\n\nclass AccessDescription(object):\n def __init__(self, access_method, access_location):\n if not (access_method == AuthorityInformationAccessOID.OCSP or\n access_method == AuthorityInformationAccessOID.CA_ISSUERS):\n raise ValueError(\n \"access_method must be OID_OCSP or OID_CA_ISSUERS\"\n )\n\n if not isinstance(access_location, GeneralName):\n raise TypeError(\"access_location must be a GeneralName\")\n\n self._access_method = access_method\n self._access_location = access_location\n\n def __repr__(self):\n return (\n \"<AccessDescription(access_method={0.access_method}, access_locati\"\n \"on={0.access_location})>\".format(self)\n )\n\n def __eq__(self, other):\n if not isinstance(other, AccessDescription):\n return NotImplemented\n\n return (\n self.access_method == other.access_method and\n self.access_location == other.access_location\n )\n\n def __ne__(self, other):\n return not self == other\n\n access_method = utils.read_only_property(\"_access_method\")\n access_location = utils.read_only_property(\"_access_location\")\n\n\[email protected]_interface(ExtensionType)\nclass BasicConstraints(object):\n oid = ExtensionOID.BASIC_CONSTRAINTS\n\n def __init__(self, ca, path_length):\n if not isinstance(ca, bool):\n raise TypeError(\"ca must be a boolean value\")\n\n if path_length is not None and not ca:\n raise ValueError(\"path_length must be None when ca is False\")\n\n if (\n path_length is not None and\n (not isinstance(path_length, six.integer_types) or path_length < 0)\n ):\n raise TypeError(\n \"path_length must be a non-negative integer or None\"\n )\n\n self._ca = ca\n self._path_length = path_length\n\n ca = utils.read_only_property(\"_ca\")\n path_length = utils.read_only_property(\"_path_length\")\n\n def __repr__(self):\n return (\"<BasicConstraints(ca={0.ca}, \"\n \"path_length={0.path_length})>\").format(self)\n\n def __eq__(self, other):\n if not isinstance(other, BasicConstraints):\n return NotImplemented\n\n return self.ca == other.ca and self.path_length == other.path_length\n\n def __ne__(self, other):\n return not self == other\n\n\[email protected]_interface(ExtensionType)\nclass CRLDistributionPoints(object):\n oid = ExtensionOID.CRL_DISTRIBUTION_POINTS\n\n def __init__(self, distribution_points):\n if not all(\n isinstance(x, DistributionPoint) for x in distribution_points\n ):\n raise TypeError(\n \"distribution_points must be a list of DistributionPoint \"\n \"objects\"\n )\n\n self._distribution_points = distribution_points\n\n def __iter__(self):\n return iter(self._distribution_points)\n\n def __len__(self):\n return len(self._distribution_points)\n\n def __repr__(self):\n return \"<CRLDistributionPoints({0})>\".format(self._distribution_points)\n\n def __eq__(self, other):\n if not isinstance(other, CRLDistributionPoints):\n return NotImplemented\n\n return self._distribution_points == other._distribution_points\n\n def __ne__(self, other):\n return not self == other\n\n\nclass DistributionPoint(object):\n def __init__(self, full_name, relative_name, reasons, crl_issuer):\n if full_name and relative_name:\n raise ValueError(\n \"You cannot provide both full_name and relative_name, at \"\n \"least one must be None.\"\n )\n\n if full_name and not all(\n isinstance(x, GeneralName) for x in full_name\n ):\n raise TypeError(\n \"full_name must be a list of GeneralName objects\"\n )\n\n if relative_name and not isinstance(relative_name, Name):\n raise TypeError(\"relative_name must be a Name\")\n\n if crl_issuer and not all(\n isinstance(x, GeneralName) for x in crl_issuer\n ):\n raise TypeError(\n \"crl_issuer must be None or a list of general names\"\n )\n\n if reasons and (not isinstance(reasons, frozenset) or not all(\n isinstance(x, ReasonFlags) for x in reasons\n )):\n raise TypeError(\"reasons must be None or frozenset of ReasonFlags\")\n\n if reasons and (\n ReasonFlags.unspecified in reasons or\n ReasonFlags.remove_from_crl in reasons\n ):\n raise ValueError(\n \"unspecified and remove_from_crl are not valid reasons in a \"\n \"DistributionPoint\"\n )\n\n if reasons and not crl_issuer and not (full_name or relative_name):\n raise ValueError(\n \"You must supply crl_issuer, full_name, or relative_name when \"\n \"reasons is not None\"\n )\n\n self._full_name = full_name\n self._relative_name = relative_name\n self._reasons = reasons\n self._crl_issuer = crl_issuer\n\n def __repr__(self):\n return (\n \"<DistributionPoint(full_name={0.full_name}, relative_name={0.rela\"\n \"tive_name}, reasons={0.reasons}, crl_issuer={0.crl_is\"\n \"suer})>\".format(self)\n )\n\n def __eq__(self, other):\n if not isinstance(other, DistributionPoint):\n return NotImplemented\n\n return (\n self.full_name == other.full_name and\n self.relative_name == other.relative_name and\n self.reasons == other.reasons and\n self.crl_issuer == other.crl_issuer\n )\n\n def __ne__(self, other):\n return not self == other\n\n full_name = utils.read_only_property(\"_full_name\")\n relative_name = utils.read_only_property(\"_relative_name\")\n reasons = utils.read_only_property(\"_reasons\")\n crl_issuer = utils.read_only_property(\"_crl_issuer\")\n\n\nclass ReasonFlags(Enum):\n unspecified = \"unspecified\"\n key_compromise = \"keyCompromise\"\n ca_compromise = \"cACompromise\"\n affiliation_changed = \"affiliationChanged\"\n superseded = \"superseded\"\n cessation_of_operation = \"cessationOfOperation\"\n certificate_hold = \"certificateHold\"\n privilege_withdrawn = \"privilegeWithdrawn\"\n aa_compromise = \"aACompromise\"\n remove_from_crl = \"removeFromCRL\"\n\n\[email protected]_interface(ExtensionType)\nclass CertificatePolicies(object):\n oid = ExtensionOID.CERTIFICATE_POLICIES\n\n def __init__(self, policies):\n if not all(isinstance(x, PolicyInformation) for x in policies):\n raise TypeError(\n \"Every item in the policies list must be a \"\n \"PolicyInformation\"\n )\n\n self._policies = policies\n\n def __iter__(self):\n return iter(self._policies)\n\n def __len__(self):\n return len(self._policies)\n\n def __repr__(self):\n return \"<CertificatePolicies({0})>\".format(self._policies)\n\n def __eq__(self, other):\n if not isinstance(other, CertificatePolicies):\n return NotImplemented\n\n return self._policies == other._policies\n\n def __ne__(self, other):\n return not self == other\n\n\nclass PolicyInformation(object):\n def __init__(self, policy_identifier, policy_qualifiers):\n if not isinstance(policy_identifier, ObjectIdentifier):\n raise TypeError(\"policy_identifier must be an ObjectIdentifier\")\n\n self._policy_identifier = policy_identifier\n if policy_qualifiers and not all(\n isinstance(\n x, (six.text_type, UserNotice)\n ) for x in policy_qualifiers\n ):\n raise TypeError(\n \"policy_qualifiers must be a list of strings and/or UserNotice\"\n \" objects or None\"\n )\n\n self._policy_qualifiers = policy_qualifiers\n\n def __repr__(self):\n return (\n \"<PolicyInformation(policy_identifier={0.policy_identifier}, polic\"\n \"y_qualifiers={0.policy_qualifiers})>\".format(self)\n )\n\n def __eq__(self, other):\n if not isinstance(other, PolicyInformation):\n return NotImplemented\n\n return (\n self.policy_identifier == other.policy_identifier and\n self.policy_qualifiers == other.policy_qualifiers\n )\n\n def __ne__(self, other):\n return not self == other\n\n policy_identifier = utils.read_only_property(\"_policy_identifier\")\n policy_qualifiers = utils.read_only_property(\"_policy_qualifiers\")\n\n\nclass UserNotice(object):\n def __init__(self, notice_reference, explicit_text):\n if notice_reference and not isinstance(\n notice_reference, NoticeReference\n ):\n raise TypeError(\n \"notice_reference must be None or a NoticeReference\"\n )\n\n self._notice_reference = notice_reference\n self._explicit_text = explicit_text\n\n def __repr__(self):\n return (\n \"<UserNotice(notice_reference={0.notice_reference}, explicit_text=\"\n \"{0.explicit_text!r})>\".format(self)\n )\n\n def __eq__(self, other):\n if not isinstance(other, UserNotice):\n return NotImplemented\n\n return (\n self.notice_reference == other.notice_reference and\n self.explicit_text == other.explicit_text\n )\n\n def __ne__(self, other):\n return not self == other\n\n notice_reference = utils.read_only_property(\"_notice_reference\")\n explicit_text = utils.read_only_property(\"_explicit_text\")\n\n\nclass NoticeReference(object):\n def __init__(self, organization, notice_numbers):\n self._organization = organization\n if not isinstance(notice_numbers, list) or not all(\n isinstance(x, int) for x in notice_numbers\n ):\n raise TypeError(\n \"notice_numbers must be a list of integers\"\n )\n\n self._notice_numbers = notice_numbers\n\n def __repr__(self):\n return (\n \"<NoticeReference(organization={0.organization!r}, notice_numbers=\"\n \"{0.notice_numbers})>\".format(self)\n )\n\n def __eq__(self, other):\n if not isinstance(other, NoticeReference):\n return NotImplemented\n\n return (\n self.organization == other.organization and\n self.notice_numbers == other.notice_numbers\n )\n\n def __ne__(self, other):\n return not self == other\n\n organization = utils.read_only_property(\"_organization\")\n notice_numbers = utils.read_only_property(\"_notice_numbers\")\n\n\[email protected]_interface(ExtensionType)\nclass ExtendedKeyUsage(object):\n oid = ExtensionOID.EXTENDED_KEY_USAGE\n\n def __init__(self, usages):\n if not all(isinstance(x, ObjectIdentifier) for x in usages):\n raise TypeError(\n \"Every item in the usages list must be an ObjectIdentifier\"\n )\n\n self._usages = usages\n\n def __iter__(self):\n return iter(self._usages)\n\n def __len__(self):\n return len(self._usages)\n\n def __repr__(self):\n return \"<ExtendedKeyUsage({0})>\".format(self._usages)\n\n def __eq__(self, other):\n if not isinstance(other, ExtendedKeyUsage):\n return NotImplemented\n\n return self._usages == other._usages\n\n def __ne__(self, other):\n return not self == other\n\n\[email protected]_interface(ExtensionType)\nclass OCSPNoCheck(object):\n oid = ExtensionOID.OCSP_NO_CHECK\n\n\[email protected]_interface(ExtensionType)\nclass InhibitAnyPolicy(object):\n oid = ExtensionOID.INHIBIT_ANY_POLICY\n\n def __init__(self, skip_certs):\n if not isinstance(skip_certs, six.integer_types):\n raise TypeError(\"skip_certs must be an integer\")\n\n if skip_certs < 0:\n raise ValueError(\"skip_certs must be a non-negative integer\")\n\n self._skip_certs = skip_certs\n\n def __repr__(self):\n return \"<InhibitAnyPolicy(skip_certs={0.skip_certs})>\".format(self)\n\n def __eq__(self, other):\n if not isinstance(other, InhibitAnyPolicy):\n return NotImplemented\n\n return self.skip_certs == other.skip_certs\n\n def __ne__(self, other):\n return not self == other\n\n skip_certs = utils.read_only_property(\"_skip_certs\")\n\n\[email protected]_interface(ExtensionType)\nclass KeyUsage(object):\n oid = ExtensionOID.KEY_USAGE\n\n def __init__(self, digital_signature, content_commitment, key_encipherment,\n data_encipherment, key_agreement, key_cert_sign, crl_sign,\n encipher_only, decipher_only):\n if not key_agreement and (encipher_only or decipher_only):\n raise ValueError(\n \"encipher_only and decipher_only can only be true when \"\n \"key_agreement is true\"\n )\n\n self._digital_signature = digital_signature\n self._content_commitment = content_commitment\n self._key_encipherment = key_encipherment\n self._data_encipherment = data_encipherment\n self._key_agreement = key_agreement\n self._key_cert_sign = key_cert_sign\n self._crl_sign = crl_sign\n self._encipher_only = encipher_only\n self._decipher_only = decipher_only\n\n digital_signature = utils.read_only_property(\"_digital_signature\")\n content_commitment = utils.read_only_property(\"_content_commitment\")\n key_encipherment = utils.read_only_property(\"_key_encipherment\")\n data_encipherment = utils.read_only_property(\"_data_encipherment\")\n key_agreement = utils.read_only_property(\"_key_agreement\")\n key_cert_sign = utils.read_only_property(\"_key_cert_sign\")\n crl_sign = utils.read_only_property(\"_crl_sign\")\n\n @property\n def encipher_only(self):\n if not self.key_agreement:\n raise ValueError(\n \"encipher_only is undefined unless key_agreement is true\"\n )\n else:\n return self._encipher_only\n\n @property\n def decipher_only(self):\n if not self.key_agreement:\n raise ValueError(\n \"decipher_only is undefined unless key_agreement is true\"\n )\n else:\n return self._decipher_only\n\n def __repr__(self):\n try:\n encipher_only = self.encipher_only\n decipher_only = self.decipher_only\n except ValueError:\n encipher_only = None\n decipher_only = None\n\n return (\"<KeyUsage(digital_signature={0.digital_signature}, \"\n \"content_commitment={0.content_commitment}, \"\n \"key_encipherment={0.key_encipherment}, \"\n \"data_encipherment={0.data_encipherment}, \"\n \"key_agreement={0.key_agreement}, \"\n \"key_cert_sign={0.key_cert_sign}, crl_sign={0.crl_sign}, \"\n \"encipher_only={1}, decipher_only={2})>\").format(\n self, encipher_only, decipher_only)\n\n def __eq__(self, other):\n if not isinstance(other, KeyUsage):\n return NotImplemented\n\n return (\n self.digital_signature == other.digital_signature and\n self.content_commitment == other.content_commitment and\n self.key_encipherment == other.key_encipherment and\n self.data_encipherment == other.data_encipherment and\n self.key_agreement == other.key_agreement and\n self.key_cert_sign == other.key_cert_sign and\n self.crl_sign == other.crl_sign and\n self._encipher_only == other._encipher_only and\n self._decipher_only == other._decipher_only\n )\n\n def __ne__(self, other):\n return not self == other\n\n\[email protected]_interface(ExtensionType)\nclass NameConstraints(object):\n oid = ExtensionOID.NAME_CONSTRAINTS\n\n def __init__(self, permitted_subtrees, excluded_subtrees):\n if permitted_subtrees is not None:\n if not all(\n isinstance(x, GeneralName) for x in permitted_subtrees\n ):\n raise TypeError(\n \"permitted_subtrees must be a list of GeneralName objects \"\n \"or None\"\n )\n\n self._validate_ip_name(permitted_subtrees)\n\n if excluded_subtrees is not None:\n if not all(\n isinstance(x, GeneralName) for x in excluded_subtrees\n ):\n raise TypeError(\n \"excluded_subtrees must be a list of GeneralName objects \"\n \"or None\"\n )\n\n self._validate_ip_name(excluded_subtrees)\n\n if permitted_subtrees is None and excluded_subtrees is None:\n raise ValueError(\n \"At least one of permitted_subtrees and excluded_subtrees \"\n \"must not be None\"\n )\n\n self._permitted_subtrees = permitted_subtrees\n self._excluded_subtrees = excluded_subtrees\n\n def __eq__(self, other):\n if not isinstance(other, NameConstraints):\n return NotImplemented\n\n return (\n self.excluded_subtrees == other.excluded_subtrees and\n self.permitted_subtrees == other.permitted_subtrees\n )\n\n def __ne__(self, other):\n return not self == other\n\n def _validate_ip_name(self, tree):\n if any(isinstance(name, IPAddress) and not isinstance(\n name.value, (ipaddress.IPv4Network, ipaddress.IPv6Network)\n ) for name in tree):\n raise TypeError(\n \"IPAddress name constraints must be an IPv4Network or\"\n \" IPv6Network object\"\n )\n\n def __repr__(self):\n return (\n u\"<NameConstraints(permitted_subtrees={0.permitted_subtrees}, \"\n u\"excluded_subtrees={0.excluded_subtrees})>\".format(self)\n )\n\n permitted_subtrees = utils.read_only_property(\"_permitted_subtrees\")\n excluded_subtrees = utils.read_only_property(\"_excluded_subtrees\")\n\n\nclass Extension(object):\n def __init__(self, oid, critical, value):\n if not isinstance(oid, ObjectIdentifier):\n raise TypeError(\n \"oid argument must be an ObjectIdentifier instance.\"\n )\n\n if not isinstance(critical, bool):\n raise TypeError(\"critical must be a boolean value\")\n\n self._oid = oid\n self._critical = critical\n self._value = value\n\n oid = utils.read_only_property(\"_oid\")\n critical = utils.read_only_property(\"_critical\")\n value = utils.read_only_property(\"_value\")\n\n def __repr__(self):\n return (\"<Extension(oid={0.oid}, critical={0.critical}, \"\n \"value={0.value})>\").format(self)\n\n def __eq__(self, other):\n if not isinstance(other, Extension):\n return NotImplemented\n\n return (\n self.oid == other.oid and\n self.critical == other.critical and\n self.value == other.value\n )\n\n def __ne__(self, other):\n return not self == other\n\n\nclass GeneralNames(object):\n def __init__(self, general_names):\n if not all(isinstance(x, GeneralName) for x in general_names):\n raise TypeError(\n \"Every item in the general_names list must be an \"\n \"object conforming to the GeneralName interface\"\n )\n\n self._general_names = general_names\n\n def __iter__(self):\n return iter(self._general_names)\n\n def __len__(self):\n return len(self._general_names)\n\n def get_values_for_type(self, type):\n # Return the value of each GeneralName, except for OtherName instances\n # which we return directly because it has two important properties not\n # just one value.\n objs = (i for i in self if isinstance(i, type))\n if type != OtherName:\n objs = (i.value for i in objs)\n return list(objs)\n\n def __repr__(self):\n return \"<GeneralNames({0})>\".format(self._general_names)\n\n def __eq__(self, other):\n if not isinstance(other, GeneralNames):\n return NotImplemented\n\n return self._general_names == other._general_names\n\n def __ne__(self, other):\n return not self == other\n\n\[email protected]_interface(ExtensionType)\nclass SubjectAlternativeName(object):\n oid = ExtensionOID.SUBJECT_ALTERNATIVE_NAME\n\n def __init__(self, general_names):\n self._general_names = GeneralNames(general_names)\n\n def __iter__(self):\n return iter(self._general_names)\n\n def __len__(self):\n return len(self._general_names)\n\n def get_values_for_type(self, type):\n return self._general_names.get_values_for_type(type)\n\n def __repr__(self):\n return \"<SubjectAlternativeName({0})>\".format(self._general_names)\n\n def __eq__(self, other):\n if not isinstance(other, SubjectAlternativeName):\n return NotImplemented\n\n return self._general_names == other._general_names\n\n def __ne__(self, other):\n return not self == other\n\n\[email protected]_interface(ExtensionType)\nclass IssuerAlternativeName(object):\n oid = ExtensionOID.ISSUER_ALTERNATIVE_NAME\n\n def __init__(self, general_names):\n self._general_names = GeneralNames(general_names)\n\n def __iter__(self):\n return iter(self._general_names)\n\n def __len__(self):\n return len(self._general_names)\n\n def get_values_for_type(self, type):\n return self._general_names.get_values_for_type(type)\n\n def __repr__(self):\n return \"<IssuerAlternativeName({0})>\".format(self._general_names)\n\n def __eq__(self, other):\n if not isinstance(other, IssuerAlternativeName):\n return NotImplemented\n\n return self._general_names == other._general_names\n\n def __ne__(self, other):\n return not self == other\n", "path": "src/cryptography/x509/extensions.py" } ]
diff --git a/src/cryptography/x509/extensions.py b/src/cryptography/x509/extensions.py index cd75ecdc255e..46ba5a28dd56 100644 --- a/src/cryptography/x509/extensions.py +++ b/src/cryptography/x509/extensions.py @@ -104,6 +104,11 @@ def __iter__(self): def __len__(self): return len(self._extensions) + def __repr__(self): + return ( + "<Extensions({0})>".format(self._extensions) + ) + @utils.register_interface(ExtensionType) class AuthorityKeyIdentifier(object): diff --git a/tests/test_x509_ext.py b/tests/test_x509_ext.py index 1bc14620bcde..8f4693666561 100644 --- a/tests/test_x509_ext.py +++ b/tests/test_x509_ext.py @@ -857,6 +857,20 @@ def test_one_extension_get_for_class(self, backend): assert ext is not None assert isinstance(ext.value, x509.BasicConstraints) + def test_repr(self, backend): + cert = _load_cert( + os.path.join( + "x509", "custom", "basic_constraints_not_critical.pem" + ), + x509.load_pem_x509_certificate, + backend + ) + assert repr(cert.extensions) == ( + "<Extensions([<Extension(oid=<ObjectIdentifier(oid=2.5.29.19, name" + "=basicConstraints)>, critical=False, value=<BasicConstraints(ca=F" + "alse, path_length=None)>)>])>" + ) + @pytest.mark.requires_backend_interface(interface=RSABackend) @pytest.mark.requires_backend_interface(interface=X509Backend)
ckan__ckan-7808
[Snyk] Security upgrade certifi from 2021.10.8 to 2023.7.22 <p>This PR was automatically created by Snyk using the credentials of a real user.</p><br /><h3>Snyk has created this PR to fix one or more vulnerable packages in the `pip` dependencies of this project.</h3> #### Changes included in this PR - Changes to the following files to upgrade the vulnerable dependencies to a fixed version: - requirements.txt #### Vulnerabilities that will be fixed ##### By pinning: Severity | Priority Score (*) | Issue | Upgrade | Breaking Change | Exploit Maturity :-------------------------:|-------------------------|:-------------------------|:-------------------------|:-------------------------|:------------------------- ![low severity](https://res.cloudinary.com/snyk/image/upload/w_20,h_20/v1561977819/icon/l.png "low severity") | **471/1000** <br/> **Why?** Recently disclosed, Has a fix available, CVSS 3.7 | Improper Following of a Certificate&#x27;s Chain of Trust <br/>[SNYK-PYTHON-CERTIFI-5805047](https://snyk.io/vuln/SNYK-PYTHON-CERTIFI-5805047) | `certifi:` <br> `2021.10.8 -> 2023.7.22` <br> | No | No Known Exploit (*) Note that the real score may have changed since the PR was raised. Some vulnerabilities couldn't be fully fixed and so Snyk will still find them when the project is tested again. This may be because the vulnerability existed within more than one direct dependency, but not all of the affected dependencies could be upgraded. Check the changes in this PR to ensure they won't cause issues with your project. ------------ **Note:** *You are seeing this because you or someone else with access to this repository has authorized Snyk to open fix PRs.* For more information: <img src="https://api.segment.io/v1/pixel/track?data=eyJ3cml0ZUtleSI6InJyWmxZcEdHY2RyTHZsb0lYd0dUcVg4WkFRTnNCOUEwIiwiYW5vbnltb3VzSWQiOiI0ZDYxMTQxYy02YTc4LTRjMTctYmU3ZS02OTQ4YjlmODQzMTAiLCJldmVudCI6IlBSIHZpZXdlZCIsInByb3BlcnRpZXMiOnsicHJJZCI6IjRkNjExNDFjLTZhNzgtNGMxNy1iZTdlLTY5NDhiOWY4NDMxMCJ9fQ==" width="0" height="0"/> 🧐 [View latest project report](https://app.snyk.io/org/wardi/project/7696de9f-5904-4fe5-8767-91ee8e4d2b04?utm_source&#x3D;github&amp;utm_medium&#x3D;referral&amp;page&#x3D;fix-pr) 🛠 [Adjust project settings](https://app.snyk.io/org/wardi/project/7696de9f-5904-4fe5-8767-91ee8e4d2b04?utm_source&#x3D;github&amp;utm_medium&#x3D;referral&amp;page&#x3D;fix-pr/settings) 📚 [Read more about Snyk's upgrade and patch logic](https://support.snyk.io/hc/en-us/articles/360003891078-Snyk-patches-to-fix-vulnerabilities) [//]: # (snyk:metadata:{"prId":"4d61141c-6a78-4c17-be7e-6948b9f84310","prPublicId":"4d61141c-6a78-4c17-be7e-6948b9f84310","dependencies":[{"name":"certifi","from":"2021.10.8","to":"2023.7.22"}],"packageManager":"pip","projectPublicId":"7696de9f-5904-4fe5-8767-91ee8e4d2b04","projectUrl":"https://app.snyk.io/org/wardi/project/7696de9f-5904-4fe5-8767-91ee8e4d2b04?utm_source=github&utm_medium=referral&page=fix-pr","type":"auto","patch":[],"vulns":["SNYK-PYTHON-CERTIFI-5805047"],"upgrade":[],"isBreakingChange":false,"env":"prod","prType":"fix","templateVariants":["updated-fix-title","priorityScore"],"priorityScoreList":[471],"remediationStrategy":"vuln"}) --- **Learn how to fix vulnerabilities with free interactive lessons:** 🦉 [Learn about vulnerability in an interactive lesson of Snyk Learn.](https://learn.snyk.io/?loc&#x3D;fix-pr)
[ { "content": "#!/usr/bin/env python\n# encoding: utf-8\n\nu'''\nAsynchronous background jobs.\n\nNote that most job management functions are not available from this\nmodule but via the various ``job_*`` API functions.\n\nInternally, RQ queue names are prefixed with a string containing the\nCKAN site ID to avoid key collisions when the same Redis database is\nused for multiple CKAN instances. The functions of this module expect\nunprefixed queue names (e.g. ``'default'``) unless noted otherwise. The\nraw RQ objects (e.g. a queue returned by ``get_queue``) use the full,\nprefixed names. Use the functions ``add_queue_name_prefix`` and\n``remove_queue_name_prefix`` to manage queue name prefixes.\n\n.. versionadded:: 2.7\n'''\nfrom __future__ import annotations\n\nimport logging\nfrom typing import Any, Union, Callable, Iterable, Optional, cast\nfrom redis import Redis\n\nimport rq\nfrom rq.connections import push_connection\nfrom rq.exceptions import NoSuchJobError\nfrom rq.job import Job\nfrom rq.utils import ensure_list\n\nfrom ckan.lib.redis import connect_to_redis\nfrom ckan.common import config\nfrom ckan.config.environment import load_environment\nfrom ckan.model import meta\nimport ckan.plugins as plugins\n\nlog = logging.getLogger(__name__)\n\nDEFAULT_QUEUE_NAME = u'default'\n\n# RQ job queues. Do not use this directly, use ``get_queue`` instead.\n_queues: dict[str, rq.Queue] = {}\n\n\ndef _connect() -> Redis: # type: ignore\n u'''\n Connect to Redis and tell RQ about it.\n\n Workaround for https://github.com/nvie/rq/issues/479.\n '''\n conn = connect_to_redis()\n push_connection(conn)\n return conn\n\n\ndef _get_queue_name_prefix() -> str:\n u'''\n Get the queue name prefix.\n '''\n # This must be done at runtime since we need a loaded config\n return u'ckan:{}:'.format(config[u'ckan.site_id'])\n\n\ndef add_queue_name_prefix(name: str) -> str:\n u'''\n Prefix a queue name.\n\n .. seealso:: :py:func:`remove_queue_name_prefix`\n '''\n return _get_queue_name_prefix() + name\n\n\ndef remove_queue_name_prefix(name: str) -> str:\n u'''\n Remove a queue name's prefix.\n\n :raises ValueError: if the given name is not prefixed.\n\n .. seealso:: :py:func:`add_queue_name_prefix`\n '''\n prefix = _get_queue_name_prefix()\n if not name.startswith(prefix):\n raise ValueError(u'Queue name \"{}\" is not prefixed.'.format(name))\n return name[len(prefix):]\n\n\ndef get_all_queues() -> list[rq.Queue]:\n u'''\n Return all job queues currently in use.\n\n :returns: The queues.\n :rtype: List of ``rq.queue.Queue`` instances\n\n .. seealso:: :py:func:`get_queue`\n '''\n redis_conn = _connect()\n prefix = _get_queue_name_prefix()\n return [q for q in rq.Queue.all(connection=redis_conn) if\n q.name.startswith(prefix)]\n\n\ndef get_queue(name: str = DEFAULT_QUEUE_NAME) -> rq.Queue:\n u'''\n Get a job queue.\n\n The job queue is initialized if that hasn't happened before.\n\n :param string name: The name of the queue. If not given then the\n default queue is returned.\n\n :returns: The job queue.\n :rtype: ``rq.queue.Queue``\n\n .. seealso:: :py:func:`get_all_queues`\n '''\n global _queues\n fullname = add_queue_name_prefix(name)\n try:\n return _queues[fullname]\n except KeyError:\n log.debug(u'Initializing background job queue \"{}\"'.format(name))\n redis_conn = _connect()\n queue = _queues[fullname] = rq.Queue(fullname, connection=redis_conn)\n return queue\n\n\ndef enqueue(fn: Callable[..., Any],\n args: Optional[Union[tuple[Any], list[Any], None]] = None,\n kwargs: Optional[dict[str, Any]] = None,\n title: Optional[str] = None,\n queue: str = DEFAULT_QUEUE_NAME,\n rq_kwargs: Optional[dict[str, Any]] = None) -> Job:\n u'''\n Enqueue a job to be run in the background.\n\n :param function fn: Function to be executed in the background\n\n :param list args: List of arguments to be passed to the function.\n Pass an empty list if there are no arguments (default).\n\n :param dict kwargs: Dict of keyword arguments to be passed to the\n function. Pass an empty dict if there are no keyword arguments\n (default).\n\n :param string title: Optional human-readable title of the job.\n\n :param string queue: Name of the queue. If not given then the\n default queue is used.\n\n :param dict rq_kwargs: Dict of keyword arguments that will get passed\n to the RQ ``enqueue_call`` invocation (eg ``timeout``, ``depends_on``,\n ``ttl`` etc).\n\n :returns: The enqueued job.\n :rtype: ``rq.job.Job``\n '''\n if args is None:\n args = []\n if kwargs is None:\n kwargs = {}\n if rq_kwargs is None:\n rq_kwargs = {}\n timeout = config.get(u'ckan.jobs.timeout')\n rq_kwargs[u'timeout'] = rq_kwargs.get(u'timeout', timeout)\n\n job = get_queue(queue).enqueue_call(\n func=fn, args=args, kwargs=kwargs, **rq_kwargs)\n if not job.meta:\n job.meta = {}\n job.meta[\"title\"] = title\n job.save()\n msg = u'Added background job {}'.format(job.id)\n if title:\n msg = u'{} (\"{}\")'.format(msg, title)\n msg = u'{} to queue \"{}\"'.format(msg, queue)\n log.info(msg)\n return job\n\n\ndef job_from_id(id: str) -> Job:\n u'''\n Look up an enqueued job by its ID.\n\n :param string id: The ID of the job.\n\n :returns: The job.\n :rtype: ``rq.job.Job``\n\n :raises KeyError: if no job with that ID exists.\n '''\n try:\n return Job.fetch(id, connection=_connect())\n except NoSuchJobError:\n raise KeyError(u'There is no job with ID \"{}\".'.format(id))\n\n\ndef dictize_job(job: Job) -> dict[str, Any]:\n u'''Convert a job to a dict.\n\n In contrast to ``rq.job.Job.to_dict`` this function includes only\n the attributes that are relevant to our use case and promotes the\n meta attributes that we use (e.g. ``title``).\n\n :param rq.job.Job job: The job to dictize.\n\n :returns: The dictized job.\n :rtype: dict\n '''\n assert job.created_at\n assert job.origin is not None\n if not job.meta:\n job.meta = {}\n return {\n \"id\": job.id,\n \"title\": job.meta.get(\"title\"),\n \"created\": job.created_at.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n \"queue\": remove_queue_name_prefix(job.origin),\n }\n\n\ndef test_job(*args: Any) -> None:\n u'''Test job.\n\n A test job for debugging purposes. Prints out any arguments it\n receives. Can be scheduled via ``paster jobs test``.\n '''\n print(args)\n\n\nclass Worker(rq.Worker):\n u'''\n CKAN-specific worker.\n\n Note that starting an instance of this class (via the ``work``\n method) disposes the currently active database engine and the\n associated session. This is necessary to prevent their corruption by\n the forked worker process. Both the engine and the session\n automatically re-initialize afterwards once they are used. However,\n non-committed changes are rolled back and instance variables bound\n to the old session have to be re-fetched from the database.\n '''\n def __init__(self,\n queues: Optional[Iterable[str]] = None,\n *args: Any,\n **kwargs: Any) -> None:\n u'''\n Constructor.\n\n Accepts the same arguments as the constructor of\n ``rq.worker.Worker``. However, the behavior of the ``queues``\n parameter is different.\n\n :param queues: The job queue(s) to listen on. Can be a string\n with the name of a single queue or a list of queue names.\n If not given then the default queue is used.\n '''\n queue_names = cast(Iterable[str], ensure_list(\n queues or [DEFAULT_QUEUE_NAME]\n ))\n\n qs = [get_queue(q) for q in queue_names]\n rq.worker.logger.setLevel(logging.INFO)\n super(Worker, self).__init__(qs, *args, **kwargs)\n\n def register_birth(self, *args: Any, **kwargs: Any) -> None:\n result = super(Worker, self).register_birth(*args, **kwargs)\n names_list = [remove_queue_name_prefix(n) for n in self.queue_names()]\n names = u', '.join(u'\"{}\"'.format(n) for n in names_list)\n log.info(u'Worker {} (PID {}) has started on queue(s) {} '.format(\n self.key, self.pid, names))\n return result\n\n def execute_job(self, job: Job, *args: Any, **kwargs: Any) -> None:\n # We shut down all database connections and the engine to make sure\n # that they are not shared with the child process and closed there\n # while still being in use in the main process, see\n #\n # https://github.com/ckan/ckan/issues/3365\n #\n # Note that this rolls back any non-committed changes in the session.\n # Both `Session` and `engine` automatically re-initialize themselve\n # when they are used the next time.\n log.debug(u'Disposing database engine before fork')\n meta.Session.remove()\n assert meta.engine\n meta.engine.dispose()\n\n # The original implementation performs the actual fork\n queue = remove_queue_name_prefix(cast(str, job.origin))\n\n if not job.meta:\n job.meta = {}\n if job.meta.get('title'):\n job_id = '{} ({})'.format(job.id, job.meta['title'])\n else:\n job_id = job.id\n\n log.info(u'Worker {} starts job {} from queue \"{}\"'.format(\n self.key, job_id, queue))\n for plugin in plugins.PluginImplementations(plugins.IForkObserver):\n plugin.before_fork()\n result = super(Worker, self).execute_job(job, *args, **kwargs)\n log.info(u'Worker {} has finished job {} from queue \"{}\"'.format(\n self.key, job_id, queue))\n\n return result\n\n def register_death(self, *args: Any, **kwargs: Any) -> None:\n result = super(Worker, self).register_death(*args, **kwargs)\n log.info(u'Worker {} (PID {}) has stopped'.format(self.key, self.pid))\n return result\n\n def handle_exception(self, job: Job, *exc_info: Any) -> None:\n log.exception(u'Job {} on worker {} raised an exception: {}'.format(\n job.id, self.key, exc_info[1]))\n return super(Worker, self).handle_exception(job, *exc_info)\n\n def main_work_horse(self, job: Job, queue: rq.Queue):\n # This method is called in a worker's work horse process right\n # after forking.\n load_environment(config)\n return super(Worker, self).main_work_horse(job, queue)\n\n def perform_job(self, *args: Any, **kwargs: Any) -> bool:\n result = super(Worker, self).perform_job(*args, **kwargs)\n # rq.Worker.main_work_horse does a hard exit via os._exit directly\n # after its call to perform_job returns. Hence here is the correct\n # location to clean up.\n try:\n meta.Session.remove()\n except Exception:\n log.exception(u'Error while closing database session')\n try:\n assert meta.engine\n meta.engine.dispose()\n except Exception:\n log.exception(u'Error while disposing database engine')\n return result\n", "path": "ckan/lib/jobs.py" } ]
[ { "content": "#!/usr/bin/env python\n# encoding: utf-8\n\nu'''\nAsynchronous background jobs.\n\nNote that most job management functions are not available from this\nmodule but via the various ``job_*`` API functions.\n\nInternally, RQ queue names are prefixed with a string containing the\nCKAN site ID to avoid key collisions when the same Redis database is\nused for multiple CKAN instances. The functions of this module expect\nunprefixed queue names (e.g. ``'default'``) unless noted otherwise. The\nraw RQ objects (e.g. a queue returned by ``get_queue``) use the full,\nprefixed names. Use the functions ``add_queue_name_prefix`` and\n``remove_queue_name_prefix`` to manage queue name prefixes.\n\n.. versionadded:: 2.7\n'''\nfrom __future__ import annotations\n\nimport logging\nfrom typing import Any, Union, Callable, Iterable, Optional, cast\nfrom redis import Redis\n\nimport rq\nfrom rq.connections import push_connection\nfrom rq.exceptions import NoSuchJobError\nfrom rq.job import Job\nfrom rq.utils import ensure_list\n\nfrom ckan.lib.redis import connect_to_redis\nfrom ckan.common import config\nfrom ckan.config.environment import load_environment\nfrom ckan.model import meta\nimport ckan.plugins as plugins\n\nlog = logging.getLogger(__name__)\n\nDEFAULT_QUEUE_NAME = u'default'\n\n# RQ job queues. Do not use this directly, use ``get_queue`` instead.\n_queues: dict[str, rq.Queue] = {}\n\n\ndef _connect() -> Redis: # type: ignore\n u'''\n Connect to Redis and tell RQ about it.\n\n Workaround for https://github.com/nvie/rq/issues/479.\n '''\n conn = connect_to_redis()\n push_connection(conn)\n return conn\n\n\ndef _get_queue_name_prefix() -> str:\n u'''\n Get the queue name prefix.\n '''\n # This must be done at runtime since we need a loaded config\n return u'ckan:{}:'.format(config[u'ckan.site_id'])\n\n\ndef add_queue_name_prefix(name: str) -> str:\n u'''\n Prefix a queue name.\n\n .. seealso:: :py:func:`remove_queue_name_prefix`\n '''\n return _get_queue_name_prefix() + name\n\n\ndef remove_queue_name_prefix(name: str) -> str:\n u'''\n Remove a queue name's prefix.\n\n :raises ValueError: if the given name is not prefixed.\n\n .. seealso:: :py:func:`add_queue_name_prefix`\n '''\n prefix = _get_queue_name_prefix()\n if not name.startswith(prefix):\n raise ValueError(u'Queue name \"{}\" is not prefixed.'.format(name))\n return name[len(prefix):]\n\n\ndef get_all_queues() -> list[rq.Queue]:\n u'''\n Return all job queues currently in use.\n\n :returns: The queues.\n :rtype: List of ``rq.queue.Queue`` instances\n\n .. seealso:: :py:func:`get_queue`\n '''\n redis_conn = _connect()\n prefix = _get_queue_name_prefix()\n return [q for q in rq.Queue.all(connection=redis_conn) if\n q.name.startswith(prefix)]\n\n\ndef get_queue(name: str = DEFAULT_QUEUE_NAME) -> rq.Queue:\n u'''\n Get a job queue.\n\n The job queue is initialized if that hasn't happened before.\n\n :param string name: The name of the queue. If not given then the\n default queue is returned.\n\n :returns: The job queue.\n :rtype: ``rq.queue.Queue``\n\n .. seealso:: :py:func:`get_all_queues`\n '''\n global _queues\n fullname = add_queue_name_prefix(name)\n try:\n return _queues[fullname]\n except KeyError:\n log.debug(u'Initializing background job queue \"{}\"'.format(name))\n redis_conn = _connect()\n queue = _queues[fullname] = rq.Queue(fullname, connection=redis_conn)\n return queue\n\n\ndef enqueue(fn: Callable[..., Any],\n args: Optional[Union[tuple[Any], list[Any], None]] = None,\n kwargs: Optional[dict[str, Any]] = None,\n title: Optional[str] = None,\n queue: str = DEFAULT_QUEUE_NAME,\n rq_kwargs: Optional[dict[str, Any]] = None) -> Job:\n u'''\n Enqueue a job to be run in the background.\n\n :param function fn: Function to be executed in the background\n\n :param list args: List of arguments to be passed to the function.\n Pass an empty list if there are no arguments (default).\n\n :param dict kwargs: Dict of keyword arguments to be passed to the\n function. Pass an empty dict if there are no keyword arguments\n (default).\n\n :param string title: Optional human-readable title of the job.\n\n :param string queue: Name of the queue. If not given then the\n default queue is used.\n\n :param dict rq_kwargs: Dict of keyword arguments that will get passed\n to the RQ ``enqueue_call`` invocation (eg ``timeout``, ``depends_on``,\n ``ttl`` etc).\n\n :returns: The enqueued job.\n :rtype: ``rq.job.Job``\n '''\n if args is None:\n args = []\n if kwargs is None:\n kwargs = {}\n if rq_kwargs is None:\n rq_kwargs = {}\n timeout = config.get(u'ckan.jobs.timeout')\n rq_kwargs[u'timeout'] = rq_kwargs.get(u'timeout', timeout)\n\n job = get_queue(queue).enqueue_call(\n func=fn, args=args, kwargs=kwargs, **rq_kwargs)\n if not job.meta:\n job.meta = {}\n job.meta[\"title\"] = title\n job.save()\n msg = u'Added background job {}'.format(job.id)\n if title:\n msg = u'{} (\"{}\")'.format(msg, title)\n msg = u'{} to queue \"{}\"'.format(msg, queue)\n log.info(msg)\n return job\n\n\ndef job_from_id(id: str) -> Job:\n u'''\n Look up an enqueued job by its ID.\n\n :param string id: The ID of the job.\n\n :returns: The job.\n :rtype: ``rq.job.Job``\n\n :raises KeyError: if no job with that ID exists.\n '''\n try:\n return Job.fetch(id, connection=_connect())\n except NoSuchJobError:\n raise KeyError(u'There is no job with ID \"{}\".'.format(id))\n\n\ndef dictize_job(job: Job) -> dict[str, Any]:\n u'''Convert a job to a dict.\n\n In contrast to ``rq.job.Job.to_dict`` this function includes only\n the attributes that are relevant to our use case and promotes the\n meta attributes that we use (e.g. ``title``).\n\n :param rq.job.Job job: The job to dictize.\n\n :returns: The dictized job.\n :rtype: dict\n '''\n assert job.created_at\n assert job.origin is not None\n if not job.meta:\n job.meta = {}\n return {\n \"id\": job.id,\n \"title\": job.meta.get(\"title\"),\n \"created\": job.created_at.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n \"queue\": remove_queue_name_prefix(job.origin),\n }\n\n\ndef test_job(*args: Any) -> None:\n u'''Test job.\n\n A test job for debugging purposes. Prints out any arguments it\n receives. Can be scheduled via ``paster jobs test``.\n '''\n print(args)\n\n\nclass Worker(rq.Worker):\n u'''\n CKAN-specific worker.\n\n Note that starting an instance of this class (via the ``work``\n method) disposes the currently active database engine and the\n associated session. This is necessary to prevent their corruption by\n the forked worker process. Both the engine and the session\n automatically re-initialize afterwards once they are used. However,\n non-committed changes are rolled back and instance variables bound\n to the old session have to be re-fetched from the database.\n '''\n def __init__(self,\n queues: Optional[Iterable[str]] = None,\n *args: Any,\n **kwargs: Any) -> None:\n u'''\n Constructor.\n\n Accepts the same arguments as the constructor of\n ``rq.worker.Worker``. However, the behavior of the ``queues``\n parameter is different.\n\n :param queues: The job queue(s) to listen on. Can be a string\n with the name of a single queue or a list of queue names.\n If not given then the default queue is used.\n '''\n queue_names = cast(Iterable[str], ensure_list(\n queues or [DEFAULT_QUEUE_NAME]\n ))\n\n qs = [get_queue(q) for q in queue_names]\n rq.worker.logger.setLevel(logging.INFO)\n super(Worker, self).__init__(qs, *args, **kwargs)\n\n def register_birth(self, *args: Any, **kwargs: Any) -> None:\n result = super(Worker, self).register_birth(*args, **kwargs)\n names_list = [remove_queue_name_prefix(n) for n in self.queue_names()]\n names = u', '.join(u'\"{}\"'.format(n) for n in names_list)\n log.info(u'Worker {} (PID {}) has started on queue(s) {} '.format(\n self.key, self.pid, names))\n return result\n\n def execute_job(self, job: Job, *args: Any, **kwargs: Any) -> None:\n # We shut down all database connections and the engine to make sure\n # that they are not shared with the child process and closed there\n # while still being in use in the main process, see\n #\n # https://github.com/ckan/ckan/issues/3365\n #\n # Note that this rolls back any non-committed changes in the session.\n # Both `Session` and `engine` automatically re-initialize themselve\n # when they are used the next time.\n log.debug(u'Disposing database engine before fork')\n meta.Session.remove()\n assert meta.engine\n meta.engine.dispose()\n\n # The original implementation performs the actual fork\n queue = remove_queue_name_prefix(job.origin)\n\n if not job.meta:\n job.meta = {}\n if job.meta.get('title'):\n job_id = '{} ({})'.format(job.id, job.meta['title'])\n else:\n job_id = job.id\n\n log.info(u'Worker {} starts job {} from queue \"{}\"'.format(\n self.key, job_id, queue))\n for plugin in plugins.PluginImplementations(plugins.IForkObserver):\n plugin.before_fork()\n result = super(Worker, self).execute_job(job, *args, **kwargs)\n log.info(u'Worker {} has finished job {} from queue \"{}\"'.format(\n self.key, job_id, queue))\n\n return result\n\n def register_death(self, *args: Any, **kwargs: Any) -> None:\n result = super(Worker, self).register_death(*args, **kwargs)\n log.info(u'Worker {} (PID {}) has stopped'.format(self.key, self.pid))\n return result\n\n def handle_exception(self, job: Job, *exc_info: Any) -> None:\n log.exception(u'Job {} on worker {} raised an exception: {}'.format(\n job.id, self.key, exc_info[1]))\n return super(Worker, self).handle_exception(job, *exc_info)\n\n def main_work_horse(self, job: Job, queue: rq.Queue):\n # This method is called in a worker's work horse process right\n # after forking.\n load_environment(config)\n return super(Worker, self).main_work_horse(job, queue)\n\n def perform_job(self, *args: Any, **kwargs: Any) -> bool:\n result = super(Worker, self).perform_job(*args, **kwargs)\n # rq.Worker.main_work_horse does a hard exit via os._exit directly\n # after its call to perform_job returns. Hence here is the correct\n # location to clean up.\n try:\n meta.Session.remove()\n except Exception:\n log.exception(u'Error while closing database session')\n try:\n assert meta.engine\n meta.engine.dispose()\n except Exception:\n log.exception(u'Error while disposing database engine')\n return result\n", "path": "ckan/lib/jobs.py" } ]
diff --git a/changes/7808.misc b/changes/7808.misc new file mode 100644 index 00000000000..07793606004 --- /dev/null +++ b/changes/7808.misc @@ -0,0 +1 @@ +Because of a new version of Sphinx, the command to rebuild the documentation is now ``sphinx-build doc build/sphinx`` diff --git a/ckan/lib/jobs.py b/ckan/lib/jobs.py index 8d9c0945fb0..641df60a470 100644 --- a/ckan/lib/jobs.py +++ b/ckan/lib/jobs.py @@ -287,7 +287,7 @@ def execute_job(self, job: Job, *args: Any, **kwargs: Any) -> None: meta.engine.dispose() # The original implementation performs the actual fork - queue = remove_queue_name_prefix(cast(str, job.origin)) + queue = remove_queue_name_prefix(job.origin) if not job.meta: job.meta = {} diff --git a/ckan/tests/test_coding_standards.py b/ckan/tests/test_coding_standards.py index 2b7a70ff1ed..b7f6f0e58d0 100644 --- a/ckan/tests/test_coding_standards.py +++ b/ckan/tests/test_coding_standards.py @@ -233,7 +233,7 @@ def test_building_the_docs(): """ try: output = subprocess.check_output( - [b"python", b"setup.py", b"build_sphinx"], stderr=subprocess.STDOUT + [b"sphinx-build", b"doc", b"build/sphinx"], stderr=subprocess.STDOUT ) except subprocess.CalledProcessError as err: assert ( diff --git a/dev-requirements.txt b/dev-requirements.txt index 5d9b83ce5e7..1ea7c6e7b73 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -3,24 +3,24 @@ beautifulsoup4==4.12.2 cookiecutter==2.1.1 coveralls #Let Unpinned - Requires latest coveralls -docutils==0.18 -Faker==18.5.1 -factory-boy==3.2.1 +docutils==0.18.1 # Needed until sphinx-rtd-theme 2.0 is out +Faker==19.6.2 +factory-boy==3.3.0 flask-debugtoolbar==0.13.1 freezegun==1.2.2 ipdb==0.13.13 -pip-tools==6.13.0 -Pillow==9.5.0 -responses==0.23.1 -sphinx-rtd-theme==1.2.0 +pip-tools==7.3.0 +Pillow==10.0.1 +responses==0.23.3 +sphinx-rtd-theme==1.3.0 sqlalchemy-stubs==0.4 -sphinx==5.3.0 +sphinx==7.1.2 toml==0.10.2 towncrier==22.12.0 -pytest==7.3.1 -pytest-cov==4.0.0 +pytest==7.4.2 +pytest-cov==4.1.0 pytest-factoryboy==2.5.1 pytest-freezegun==0.4.2 -pytest-rerunfailures==11.1.2 +pytest-rerunfailures==12.0 pytest-split==0.8.1 diff --git a/doc/contributing/documentation.rst b/doc/contributing/documentation.rst index 16da3f6b714..03bc4992eec 100644 --- a/doc/contributing/documentation.rst +++ b/doc/contributing/documentation.rst @@ -83,7 +83,7 @@ Build the docs You should now be able to build the CKAN documentation locally. Make sure your virtual environment is activated, and then run this command:: - python setup.py build_sphinx + sphinx-build doc build/sphinx Now you can open the built HTML files in ``build/sphinx/html``, e.g.:: @@ -96,7 +96,7 @@ Edit the reStructuredText files To make changes to the documentation, use a text editor to edit the ``.rst`` files in ``doc/``. Save your changes and then build the docs -again (``python setup.py build_sphinx``) and open the HTML files in a web +again (``sphinx-build doc build/sphinx``) and open the HTML files in a web browser to preview your changes. Once your docs are ready to submit to the CKAN project, follow the steps in @@ -216,7 +216,7 @@ any new ones. It's best to delete the ``build`` directory and completely rebuild the docs, to check for any warnings:: - rm -rf build; python setup.py build_sphinx + rm -rf build; sphinx-build doc build/sphinx Maximum line length diff --git a/doc/contributing/release-process.rst b/doc/contributing/release-process.rst index ded71dd69f1..9a65228f6bd 100644 --- a/doc/contributing/release-process.rst +++ b/doc/contributing/release-process.rst @@ -349,7 +349,7 @@ a release. #. Check that the docs compile correctly:: rm build/sphinx -rf - python setup.py build_sphinx + sphinx-build doc build/sphinx #. Remove the beta letter in the version number. diff --git a/requirements.in b/requirements.in index cdd0fb69a0f..7cf91a1370b 100644 --- a/requirements.in +++ b/requirements.in @@ -1,38 +1,39 @@ # The file contains the direct ckan requirements (python3). # Use pip-compile to create a requirements.txt file from this -alembic==1.10.4 +alembic==1.12.0 Babel==2.12.1 Beaker==1.12.1 bleach==6.0.0 blinker==1.6.2 -click==8.1.3 -dominate==2.7.0 +certifi>=2023.7.22 +click==8.1.7 +dominate==2.8.0 feedgen==0.9.0 -Flask==2.3.1 +Flask==2.3.3 Flask-Babel==3.1.0 Flask-Login==0.6.2 Flask-WTF==1.1.1 # For Python 3.11 support greenlet==2.0.2 Jinja2==3.1.2 -Markdown==3.4.3 +Markdown==3.4.4 passlib==1.7.4 polib==1.2.0 -psycopg2==2.9.6 -PyJWT==2.6.0 +psycopg2==2.9.7 +PyJWT==2.8.0 python-magic==0.4.27 pysolr==3.9.0 python-dateutil==2.8.2 pytz PyUtilib==6.0.0 -pyyaml==6.0 -requests==2.29.0 -rq==1.13.0 -simplejson==3.17.6 -SQLAlchemy[mypy]==1.4.47 +pyyaml==6.0.1 +requests==2.31.0 +rq==1.15.1 +simplejson==3.19.1 +SQLAlchemy[mypy]==1.4.49 sqlparse==0.4.4 -typing_extensions==4.5.0 -tzlocal==4.3 +typing_extensions==4.8.0 +tzlocal==5.0.1 webassets==2.0 -Werkzeug[watchdog]==2.3.1 +Werkzeug[watchdog]==2.3.7 zope.interface==6.0 diff --git a/requirements.txt b/requirements.txt index 95515ecde42..d913217cf6c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,15 +1,17 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.8 # by the following command: # # pip-compile requirements.in # -alembic==1.10.4 +alembic==1.12.0 # via -r requirements.in babel==2.12.1 # via # -r requirements.in # flask-babel +backports-zoneinfo==0.2.1 + # via tzlocal beaker==1.12.1 # via -r requirements.in bleach==6.0.0 @@ -18,22 +20,24 @@ blinker==1.6.2 # via # -r requirements.in # flask -certifi==2021.10.8 - # via requests +certifi==2023.7.22 + # via + # -r requirements.in + # requests charset-normalizer==2.0.12 # via requests -click==8.1.3 +click==8.1.7 # via # -r requirements.in # flask # rq deprecated==1.2.13 # via redis -dominate==2.7.0 +dominate==2.8.0 # via -r requirements.in feedgen==0.9.0 # via -r requirements.in -flask==2.3.1 +flask==2.3.3 # via # -r requirements.in # flask-babel @@ -53,8 +57,11 @@ idna==3.3 # via requests importlib-metadata==6.3.0 # via + # alembic # flask # markdown +importlib-resources==6.1.0 + # via alembic itsdangerous==2.1.2 # via # flask @@ -68,7 +75,7 @@ lxml==4.9.1 # via feedgen mako==1.2.2 # via alembic -markdown==3.4.3 +markdown==3.4.4 # via -r requirements.in markupsafe==2.1.1 # via @@ -88,9 +95,9 @@ passlib==1.7.4 # via -r requirements.in polib==1.2.0 # via -r requirements.in -psycopg2==2.9.6 +psycopg2==2.9.7 # via -r requirements.in -pyjwt==2.6.0 +pyjwt==2.8.0 # via -r requirements.in pyparsing==3.0.7 # via packaging @@ -105,29 +112,28 @@ python-magic==0.4.27 pytz==2022.7.1 # via # -r requirements.in + # babel # flask-babel -pytz-deprecation-shim==0.1.0.post0 - # via tzlocal pyutilib==6.0.0 # via -r requirements.in -pyyaml==6.0 +pyyaml==6.0.1 # via -r requirements.in redis==4.1.4 # via rq -requests==2.29.0 +requests==2.31.0 # via # -r requirements.in # pysolr -rq==1.13.0 +rq==1.15.1 # via -r requirements.in -simplejson==3.17.6 +simplejson==3.19.1 # via -r requirements.in six==1.16.0 # via # bleach # python-dateutil # pyutilib -sqlalchemy[mypy]==1.4.47 +sqlalchemy[mypy]==1.4.49 # via # -r requirements.in # alembic @@ -137,15 +143,13 @@ sqlparse==0.4.4 # via -r requirements.in tomli==2.0.1 # via mypy -typing-extensions==4.5.0 +typing-extensions==4.8.0 # via # -r requirements.in # alembic # mypy # sqlalchemy2-stubs -tzdata==2022.1 - # via pytz-deprecation-shim -tzlocal==4.3 +tzlocal==5.0.1 # via -r requirements.in urllib3==1.26.9 # via requests @@ -155,7 +159,7 @@ webassets==2.0 # via -r requirements.in webencodings==0.5.1 # via bleach -werkzeug[watchdog]==2.3.1 +werkzeug[watchdog]==2.3.7 # via # -r requirements.in # flask @@ -165,7 +169,9 @@ wrapt==1.14.0 wtforms==3.0.1 # via flask-wtf zipp==3.15.0 - # via importlib-metadata + # via + # importlib-metadata + # importlib-resources zope-interface==6.0 # via -r requirements.in
python-poetry__poetry-1577
poetry v1.0.0b4 breaks on zip packages <!-- Checked checkbox should look like this: [x] --> - [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version. - [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate. - [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option). - **OS version and name**: Windows 10 with a virtual environment for Python v3.7.4 - **Poetry version**: 1.0.0b4 - **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: (empty project) ## Issue Summary The newly refactored distribution loading mechanism from [PR 1549](https://github.com/sdispater/poetry/pull/1549/) fails when a zip-based package is present. The presenting problem is that the `zipp.Path` class is not compatible with the `__fspath__` protocol. ## Issue Details After updating to Poetry v1.0.0b4, I get this traceback ``` % poetry update -v Using virtualenv: C:\Users\garyd\devel\video-storage\venv-new [TypeError] expected str, bytes or os.PathLike object, not Path Traceback (most recent call last): File "C:\Users\garyd\.poetry\lib\poetry\_vendor\py3.7\clikit\console_application.py", line 131, in run status_code = command.handle(parsed_args, io) File "C:\Users\garyd\.poetry\lib\poetry\_vendor\py3.7\clikit\api\command\command.py", line 120, in handle status_code = self._do_handle(args, io) File "C:\Users\garyd\.poetry\lib\poetry\_vendor\py3.7\clikit\api\command\command.py", line 171, in _do_handle return getattr(handler, handler_method)(args, io, self) File "C:\Users\garyd\.poetry\lib\poetry\_vendor\py3.7\cleo\commands\command.py", line 92, in wrap_handle return self.handle() File "C:\Users\garyd\.poetry\lib\poetry\console\commands\update.py", line 36, in handle self.io, self.env, self.poetry.package, self.poetry.locker, self.poetry.pool File "C:\Users\garyd\.poetry\lib\poetry\installation\installer.py", line 55, in __init__ installed = self._get_installed() File "C:\Users\garyd\.poetry\lib\poetry\installation\installer.py", line 507, in _get_installed return InstalledRepository.load(self._env) File "C:\Users\garyd\.poetry\lib\poetry\repositories\installed_repository.py", line 30, in load path = Path(distribution._path) File "C:\Users\garyd\AppData\Local\Programs\Python\Python37\lib\pathlib.py", line 1010, in __new__ self = cls._from_parts(args, init=False) ``` When I run the broken part of the code in my console, I find that the broken distribution is: * type == <class 'importlib_metadata.PathDistribution'> * type(dist._path) == <class 'zipp.Path'> * dist._path == C:\Users\garyd\devel\video-storage\venv\lib\site-packages\setuptools-40.8.0-py3.7.egg/EGG-INFO/
[ { "content": "from importlib_metadata import distributions\nfrom poetry.packages import Package\nfrom poetry.utils._compat import Path\nfrom poetry.utils.env import Env\n\nfrom .repository import Repository\n\n\nclass InstalledRepository(Repository):\n @classmethod\n def load(cls, env): # type: (Env) -> InstalledRepository\n \"\"\"\n Load installed packages.\n\n For now, it uses the pip \"freeze\" command.\n \"\"\"\n repo = cls()\n\n for distribution in sorted(\n distributions(path=env.sys_path), key=lambda d: str(d._path),\n ):\n metadata = distribution.metadata\n name = metadata[\"name\"]\n version = metadata[\"version\"]\n package = Package(name, version, version)\n package.description = metadata.get(\"summary\", \"\")\n\n repo.add_package(package)\n\n path = Path(distribution._path)\n is_standard_package = True\n try:\n path.relative_to(env.site_packages)\n except ValueError:\n is_standard_package = False\n\n if is_standard_package:\n continue\n\n src_path = env.path / \"src\"\n\n # A VCS dependency should have been installed\n # in the src directory. If not, it's a path dependency\n try:\n path.relative_to(src_path)\n\n from poetry.vcs.git import Git\n\n git = Git()\n revision = git.rev_parse(\"HEAD\", src_path / package.name).strip()\n url = git.remote_url(src_path / package.name)\n\n package.source_type = \"git\"\n package.source_url = url\n package.source_reference = revision\n except ValueError:\n package.source_type = \"directory\"\n package.source_url = str(path.parent)\n\n return repo\n", "path": "poetry/repositories/installed_repository.py" } ]
[ { "content": "from importlib_metadata import distributions\nfrom poetry.packages import Package\nfrom poetry.utils._compat import Path\nfrom poetry.utils.env import Env\n\nfrom .repository import Repository\n\n\nclass InstalledRepository(Repository):\n @classmethod\n def load(cls, env): # type: (Env) -> InstalledRepository\n \"\"\"\n Load installed packages.\n\n For now, it uses the pip \"freeze\" command.\n \"\"\"\n repo = cls()\n\n for distribution in sorted(\n distributions(path=env.sys_path), key=lambda d: str(d._path),\n ):\n metadata = distribution.metadata\n name = metadata[\"name\"]\n version = metadata[\"version\"]\n package = Package(name, version, version)\n package.description = metadata.get(\"summary\", \"\")\n\n repo.add_package(package)\n\n path = Path(str(distribution._path))\n is_standard_package = True\n try:\n path.relative_to(env.site_packages)\n except ValueError:\n is_standard_package = False\n\n if is_standard_package:\n continue\n\n src_path = env.path / \"src\"\n\n # A VCS dependency should have been installed\n # in the src directory. If not, it's a path dependency\n try:\n path.relative_to(src_path)\n\n from poetry.vcs.git import Git\n\n git = Git()\n revision = git.rev_parse(\"HEAD\", src_path / package.name).strip()\n url = git.remote_url(src_path / package.name)\n\n package.source_type = \"git\"\n package.source_url = url\n package.source_reference = revision\n except ValueError:\n package.source_type = \"directory\"\n package.source_url = str(path.parent)\n\n return repo\n", "path": "poetry/repositories/installed_repository.py" } ]
diff --git a/.gitignore b/.gitignore index b0c49d1474d..c2d2a72090e 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ # Packages *.egg +!/tests/**/*.egg /*.egg-info /tests/fixtures/**/*.egg-info /dist/* diff --git a/poetry/repositories/installed_repository.py b/poetry/repositories/installed_repository.py index b0f5330d097..cb8cdf3f27c 100644 --- a/poetry/repositories/installed_repository.py +++ b/poetry/repositories/installed_repository.py @@ -27,7 +27,7 @@ def load(cls, env): # type: (Env) -> InstalledRepository repo.add_package(package) - path = Path(distribution._path) + path = Path(str(distribution._path)) is_standard_package = True try: path.relative_to(env.site_packages) diff --git a/tests/repositories/fixtures/installed/lib/python3.7/site-packages/foo-0.1.0-py3.8.egg b/tests/repositories/fixtures/installed/lib/python3.7/site-packages/foo-0.1.0-py3.8.egg new file mode 100644 index 00000000000..e5bf8f5683c Binary files /dev/null and b/tests/repositories/fixtures/installed/lib/python3.7/site-packages/foo-0.1.0-py3.8.egg differ diff --git a/tests/repositories/test_installed_repository.py b/tests/repositories/test_installed_repository.py index 1ba76c733c5..19d6c65cc98 100644 --- a/tests/repositories/test_installed_repository.py +++ b/tests/repositories/test_installed_repository.py @@ -1,3 +1,5 @@ +import zipp + from importlib_metadata import PathDistribution from poetry.repositories.installed_repository import InstalledRepository from poetry.utils._compat import Path @@ -11,6 +13,7 @@ INSTALLED_RESULTS = [ PathDistribution(SITE_PACKAGES / "cleo-0.7.6.dist-info"), PathDistribution(SRC / "pendulum" / "pendulum.egg-info"), + PathDistribution(zipp.Path(str(SITE_PACKAGES / "foo-0.1.0-py3.8.egg"), "EGG-INFO")), ] @@ -37,7 +40,7 @@ def test_load(mocker): ) repository = InstalledRepository.load(MockEnv(path=ENV_DIR)) - assert len(repository.packages) == 2 + assert len(repository.packages) == 3 cleo = repository.packages[0] assert cleo.name == "cleo" @@ -47,7 +50,11 @@ def test_load(mocker): == "Cleo allows you to create beautiful and testable command-line interfaces." ) - pendulum = repository.packages[1] + foo = repository.packages[1] + assert foo.name == "foo" + assert foo.version.text == "0.1.0" + + pendulum = repository.packages[2] assert pendulum.name == "pendulum" assert pendulum.version.text == "2.0.5" assert pendulum.description == "Python datetimes made easy"
streamlink__streamlink-5023
plugins.vtvgo: '403 Client Error: Forbidden for url: ...' ### Checklist - [X] This is a plugin issue and not a different kind of issue - [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink) - [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22) - [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master) ### Streamlink version Latest build from the master branch ### Description Last month VtvGo added cookie requirements for the stream playlist, and now it seems that they added another security layer. The request to the website returns error 403. ### Debug log ```text streamlink https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html --loglevel=debug [cli][debug] OS: Linux-5.15.0-53-generic-x86_64-with-glibc2.35 [cli][debug] Python: 3.10.6 [cli][debug] Streamlink: 5.1.2+4.g68dad105 [cli][debug] Dependencies: [cli][debug] certifi: 2022.9.24 [cli][debug] isodate: 0.6.1 [cli][debug] lxml: 4.9.1 [cli][debug] pycountry: 22.3.5 [cli][debug] pycryptodome: 3.15.0 [cli][debug] PySocks: 1.7.1 [cli][debug] requests: 2.28.1 [cli][debug] urllib3: 1.26.12 [cli][debug] websocket-client: 1.4.1 [cli][debug] importlib-metadata: 4.6.4 [cli][debug] Arguments: [cli][debug] url=https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html [cli][debug] --loglevel=debug [cli][info] Found matching plugin vtvgo for URL https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html error: Unable to open URL: https://vtvgo.vn/ajax-get-stream (403 Client Error: Forbidden for url: https://vtvgo.vn/ajax-get-stream) ```
[ { "content": "\"\"\"\n$description Live TV channels from VTV, a Vietnamese public, state-owned broadcaster.\n$url vtvgo.vn\n$type live\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://vtvgo\\.vn/xem-truc-tuyen-kenh-\"\n))\nclass VTVgo(Plugin):\n AJAX_URL = \"https://vtvgo.vn/ajax-get-stream\"\n\n def _get_streams(self):\n # get cookies\n self.session.http.get(\"https://vtvgo.vn/\")\n\n self.session.http.headers.update({\n \"Origin\": \"https://vtvgo.vn\",\n \"Referer\": self.url,\n \"X-Requested-With\": \"XMLHttpRequest\",\n })\n\n params = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//script[contains(text(),'setplayer(')][1]/text()\"),\n validate.none_or_all(\n validate.regex(\n re.compile(r\"\"\"var\\s+(?P<key>(?:type_)?id|time|token)\\s*=\\s*[\"']?(?P<value>[^\"']+)[\"']?;\"\"\"),\n method=\"findall\",\n ),\n [\n (\"id\", int),\n (\"type_id\", str),\n (\"time\", str),\n (\"token\", str),\n ],\n ),\n ))\n if not params:\n return\n\n log.trace(f\"{params!r}\")\n hls_url = self.session.http.post(\n self.AJAX_URL,\n data=dict(params),\n schema=validate.Schema(\n validate.parse_json(),\n {\"stream_url\": [validate.url()]},\n validate.get((\"stream_url\", 0)),\n ),\n )\n\n return HLSStream.parse_variant_playlist(self.session, hls_url)\n\n\n__plugin__ = VTVgo\n", "path": "src/streamlink/plugins/vtvgo.py" } ]
[ { "content": "\"\"\"\n$description Live TV channels from VTV, a Vietnamese public, state-owned broadcaster.\n$url vtvgo.vn\n$type live\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://vtvgo\\.vn/xem-truc-tuyen-kenh-\"\n))\nclass VTVgo(Plugin):\n AJAX_URL = \"https://vtvgo.vn/ajax-get-stream\"\n\n def _get_streams(self):\n # get cookies\n self.session.http.get(\"https://vtvgo.vn/\")\n\n self.session.http.headers.update({\n \"Origin\": \"https://vtvgo.vn\",\n \"Referer\": self.url,\n \"Sec-Fetch-Site\": \"same-origin\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n })\n\n params = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//script[contains(text(),'setplayer(')][1]/text()\"),\n validate.none_or_all(\n validate.regex(\n re.compile(r\"\"\"var\\s+(?P<key>(?:type_)?id|time|token)\\s*=\\s*[\"']?(?P<value>[^\"']+)[\"']?;\"\"\"),\n method=\"findall\",\n ),\n [\n (\"id\", int),\n (\"type_id\", str),\n (\"time\", str),\n (\"token\", str),\n ],\n ),\n ))\n if not params:\n return\n\n log.trace(f\"{params!r}\")\n hls_url = self.session.http.post(\n self.AJAX_URL,\n data=dict(params),\n schema=validate.Schema(\n validate.parse_json(),\n {\"stream_url\": [validate.url()]},\n validate.get((\"stream_url\", 0)),\n ),\n )\n\n return HLSStream.parse_variant_playlist(self.session, hls_url)\n\n\n__plugin__ = VTVgo\n", "path": "src/streamlink/plugins/vtvgo.py" } ]
diff --git a/src/streamlink/plugins/vtvgo.py b/src/streamlink/plugins/vtvgo.py index 160a82eadb4..143295d544c 100644 --- a/src/streamlink/plugins/vtvgo.py +++ b/src/streamlink/plugins/vtvgo.py @@ -27,6 +27,7 @@ def _get_streams(self): self.session.http.headers.update({ "Origin": "https://vtvgo.vn", "Referer": self.url, + "Sec-Fetch-Site": "same-origin", "X-Requested-With": "XMLHttpRequest", })
opendatacube__datacube-core-969
"datacube system init" fails when postgres user includes hyphen ### Expected behaviour Running `datacube system init` should init the postgres database, even if the postgres username includes a hyphen. ### Actual behaviour The init fails, causing issues with the sqlalchemy/psycopg2 driver. It looks like it's passing the username string with `"` marks included? Here is a trace: ``` Initialising database... Traceback (most recent call last): File "/opt/conda/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1284, in _execute_context cursor, statement, parameters, context File "/opt/conda/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute cursor.execute(statement, parameters) psycopg2.errors.SyntaxError: zero-length delimited identifier at or near """" LINE 1: set role ""ssdl-admin"" ^ The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/opt/conda/bin/datacube", line 10, in <module> sys.exit(cli()) File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 829, in __call__ return self.main(*args, **kwargs) File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 782, in main rv = self.invoke(ctx) File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 1066, in invoke return ctx.invoke(self.callback, **ctx.params) File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 610, in invoke return callback(*args, **kwargs) File "/opt/conda/lib/python3.6/site-packages/datacube/ui/click.py", line 197, in new_func return f(parsed_config, *args, **kwargs) File "/opt/conda/lib/python3.6/site-packages/datacube/ui/click.py", line 229, in with_index return f(index, *args, **kwargs) File "/opt/conda/lib/python3.6/site-packages/datacube/scripts/system.py", line 48, in database_init with_permissions=init_users) File "/opt/conda/lib/python3.6/site-packages/datacube/index/index.py", line 60, in init_db is_new = self._db.init(with_permissions=with_permissions) File "/opt/conda/lib/python3.6/site-packages/datacube/drivers/postgres/_connections.py", line 180, in init is_new = _core.ensure_db(self._engine, with_permissions=with_permissions) File "/opt/conda/lib/python3.6/site-packages/datacube/drivers/postgres/_core.py", line 88, in ensure_db c.execute('set role "{}"'.format(quoted_user)) File "/opt/conda/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1012, in execute return self._execute_text(object_, multiparams, params) File "/opt/conda/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1187, in _execute_text parameters, File "/opt/conda/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1324, in _execute_context e, statement, parameters, cursor, context File "/opt/conda/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1518, in _handle_dbapi_exception sqlalchemy_exception, with_traceback=exc_info[2], from_=e File "/opt/conda/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 178, in raise_ raise exception File "/opt/conda/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1284, in _execute_context cursor, statement, parameters, context File "/opt/conda/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute cursor.execute(statement, parameters) sqlalchemy.exc.ProgrammingError: (psycopg2.errors.SyntaxError) zero-length delimited identifier at or near """" LINE 1: set role ""ssdl-admin"" ^ [SQL: set role ""ssdl-admin""] (Background on this error at: http://sqlalche.me/e/f405) ``` ### Steps to reproduce the behaviour Run `datacube system init` with a postgres user including a hyphen, such as `ssdl-admin`. ### Environment information * Which ``datacube --version`` are you using? Open Data Cube core, version 1.8.0 * What datacube deployment/enviornment are you running against? Self-hosted "datacube system init" fails when postgres user includes hyphen ### Expected behaviour Running `datacube system init` should init the postgres database, even if the postgres username includes a hyphen. ### Actual behaviour The init fails, causing issues with the sqlalchemy/psycopg2 driver. It looks like it's passing the username string with `"` marks included? Here is a trace: ``` Initialising database... Traceback (most recent call last): File "/opt/conda/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1284, in _execute_context cursor, statement, parameters, context File "/opt/conda/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute cursor.execute(statement, parameters) psycopg2.errors.SyntaxError: zero-length delimited identifier at or near """" LINE 1: set role ""ssdl-admin"" ^ The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/opt/conda/bin/datacube", line 10, in <module> sys.exit(cli()) File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 829, in __call__ return self.main(*args, **kwargs) File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 782, in main rv = self.invoke(ctx) File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 1066, in invoke return ctx.invoke(self.callback, **ctx.params) File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 610, in invoke return callback(*args, **kwargs) File "/opt/conda/lib/python3.6/site-packages/datacube/ui/click.py", line 197, in new_func return f(parsed_config, *args, **kwargs) File "/opt/conda/lib/python3.6/site-packages/datacube/ui/click.py", line 229, in with_index return f(index, *args, **kwargs) File "/opt/conda/lib/python3.6/site-packages/datacube/scripts/system.py", line 48, in database_init with_permissions=init_users) File "/opt/conda/lib/python3.6/site-packages/datacube/index/index.py", line 60, in init_db is_new = self._db.init(with_permissions=with_permissions) File "/opt/conda/lib/python3.6/site-packages/datacube/drivers/postgres/_connections.py", line 180, in init is_new = _core.ensure_db(self._engine, with_permissions=with_permissions) File "/opt/conda/lib/python3.6/site-packages/datacube/drivers/postgres/_core.py", line 88, in ensure_db c.execute('set role "{}"'.format(quoted_user)) File "/opt/conda/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1012, in execute return self._execute_text(object_, multiparams, params) File "/opt/conda/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1187, in _execute_text parameters, File "/opt/conda/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1324, in _execute_context e, statement, parameters, cursor, context File "/opt/conda/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1518, in _handle_dbapi_exception sqlalchemy_exception, with_traceback=exc_info[2], from_=e File "/opt/conda/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 178, in raise_ raise exception File "/opt/conda/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1284, in _execute_context cursor, statement, parameters, context File "/opt/conda/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute cursor.execute(statement, parameters) sqlalchemy.exc.ProgrammingError: (psycopg2.errors.SyntaxError) zero-length delimited identifier at or near """" LINE 1: set role ""ssdl-admin"" ^ [SQL: set role ""ssdl-admin""] (Background on this error at: http://sqlalche.me/e/f405) ``` ### Steps to reproduce the behaviour Run `datacube system init` with a postgres user including a hyphen, such as `ssdl-admin`. ### Environment information * Which ``datacube --version`` are you using? Open Data Cube core, version 1.8.0 * What datacube deployment/enviornment are you running against? Self-hosted
[ { "content": "# coding=utf-8\n\"\"\"\nCore SQL schema settings.\n\"\"\"\n\nimport logging\n\nfrom sqlalchemy import MetaData\nfrom sqlalchemy.engine import Engine\nfrom sqlalchemy.schema import CreateSchema\n\nfrom datacube.drivers.postgres.sql import TYPES_INIT_SQL, pg_exists, pg_column_exists, escape_pg_identifier\n\nUSER_ROLES = ('agdc_user', 'agdc_ingest', 'agdc_manage', 'agdc_admin')\n\nSQL_NAMING_CONVENTIONS = {\n \"ix\": 'ix_%(column_0_label)s',\n \"uq\": \"uq_%(table_name)s_%(column_0_name)s\",\n \"ck\": \"ck_%(table_name)s_%(constraint_name)s\",\n \"fk\": \"fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s\",\n \"pk\": \"pk_%(table_name)s\",\n # Other prefixes handled outside of sqlalchemy:\n # dix: dynamic-index, those indexes created automatically based on search field configuration.\n # tix: test-index, created by hand for testing, particularly in dev.\n}\nSCHEMA_NAME = 'agdc'\n\nMETADATA = MetaData(naming_convention=SQL_NAMING_CONVENTIONS, schema=SCHEMA_NAME)\n\n_LOG = logging.getLogger(__name__)\n\n\ndef schema_qualified(name):\n \"\"\"\n >>> schema_qualified('dataset')\n 'agdc.dataset'\n \"\"\"\n return '{}.{}'.format(SCHEMA_NAME, name)\n\n\ndef _get_quoted_connection_info(connection):\n db, user = connection.execute(\"select quote_ident(current_database()), quote_ident(current_user)\").fetchone()\n return db, user\n\n\ndef ensure_db(engine, with_permissions=True):\n \"\"\"\n Initialise the db if needed.\n\n Ensures standard users exist.\n\n Create the schema if it doesn't exist.\n \"\"\"\n is_new = False\n c = engine.connect()\n\n quoted_db_name, quoted_user = _get_quoted_connection_info(c)\n\n if with_permissions:\n _LOG.info('Ensuring user roles.')\n _ensure_role(c, 'agdc_user')\n _ensure_role(c, 'agdc_ingest', inherits_from='agdc_user')\n _ensure_role(c, 'agdc_manage', inherits_from='agdc_ingest')\n _ensure_role(c, 'agdc_admin', inherits_from='agdc_manage', add_user=True)\n\n c.execute(\"\"\"\n grant all on database {db} to agdc_admin;\n \"\"\".format(db=quoted_db_name))\n\n if not has_schema(engine, c):\n is_new = True\n try:\n c.execute('begin')\n if with_permissions:\n # Switch to 'agdc_admin', so that all items are owned by them.\n c.execute('set role agdc_admin')\n _LOG.info('Creating schema.')\n c.execute(CreateSchema(SCHEMA_NAME))\n _LOG.info('Creating tables.')\n c.execute(TYPES_INIT_SQL)\n METADATA.create_all(c)\n c.execute('commit')\n except:\n c.execute('rollback')\n raise\n finally:\n if with_permissions:\n c.execute('set role \"{}\"'.format(quoted_user))\n\n if with_permissions:\n _LOG.info('Adding role grants.')\n c.execute(\"\"\"\n grant usage on schema {schema} to agdc_user;\n grant select on all tables in schema {schema} to agdc_user;\n grant execute on function {schema}.common_timestamp(text) to agdc_user;\n\n grant insert on {schema}.dataset,\n {schema}.dataset_location,\n {schema}.dataset_source to agdc_ingest;\n grant usage, select on all sequences in schema {schema} to agdc_ingest;\n\n -- (We're only granting deletion of types that have nothing written yet: they can't delete the data itself)\n grant insert, delete on {schema}.dataset_type,\n {schema}.metadata_type to agdc_manage;\n -- Allow creation of indexes, views\n grant create on schema {schema} to agdc_manage;\n \"\"\".format(schema=SCHEMA_NAME))\n\n c.close()\n\n return is_new\n\n\ndef database_exists(engine):\n \"\"\"\n Have they init'd this database?\n \"\"\"\n return has_schema(engine, engine)\n\n\ndef schema_is_latest(engine: Engine) -> bool:\n \"\"\"\n Is the current schema up-to-date?\n\n This is run when a new connection is established to see if it's compatible.\n\n It should be runnable by unprivileged users. If it returns false, their\n connection will be rejected and they will be told to get an administrator\n to apply updates.\n\n See the ``update_schema()`` function below for actually applying the updates.\n \"\"\"\n # In lieu of a versioned schema, we typically check by seeing if one of the objects\n # from the change exists.\n #\n # Eg.\n # return pg_column_exists(engine, schema_qualified('dataset_location'), 'archived')\n #\n # ie. Does the 'archived' column exist? If so, we know the related schema was applied.\n\n # No schema changes recently. Everything is perfect.\n return True\n\n\ndef update_schema(engine: Engine):\n \"\"\"\n Check and apply any missing schema changes to the database.\n\n This is run by an administrator.\n\n See the `schema_is_latest()` function above: this should apply updates\n that it requires.\n \"\"\"\n # This will typically check if something exists (like a newly added column), and\n # run the SQL of the change inside a single transaction.\n\n # Empty, as no schema changes have been made recently.\n # -> If you need to write one, look at the Git history of this\n # function for some examples.\n \n # Post 1.8 DB Federation triggers\n from datacube.drivers.postgres._triggers import install_timestamp_trigger\n _LOG.info(\"Adding Update Triggers\")\n c = engine.connect()\n c.execute('begin')\n install_timestamp_trigger(c)\n c.execute('commit')\n c.close()\n\n\ndef _ensure_role(engine, name, inherits_from=None, add_user=False, create_db=False):\n if has_role(engine, name):\n _LOG.debug('Role exists: %s', name)\n return\n\n sql = [\n 'create role %s nologin inherit' % name,\n 'createrole' if add_user else 'nocreaterole',\n 'createdb' if create_db else 'nocreatedb'\n ]\n if inherits_from:\n sql.append('in role ' + inherits_from)\n engine.execute(' '.join(sql))\n\n\ndef grant_role(engine, role, users):\n if role not in USER_ROLES:\n raise ValueError('Unknown role %r. Expected one of %r' % (role, USER_ROLES))\n\n users = [escape_pg_identifier(engine, user) for user in users]\n with engine.begin():\n engine.execute('revoke {roles} from {users}'.format(users=', '.join(users), roles=', '.join(USER_ROLES)))\n engine.execute('grant {role} to {users}'.format(users=', '.join(users), role=role))\n\n\ndef has_role(conn, role_name):\n return bool(conn.execute('SELECT rolname FROM pg_roles WHERE rolname=%s', role_name).fetchall())\n\n\ndef has_schema(engine, connection):\n return engine.dialect.has_schema(connection, SCHEMA_NAME)\n\n\ndef drop_db(connection):\n connection.execute('drop schema if exists %s cascade;' % SCHEMA_NAME)\n\n\ndef to_pg_role(role):\n \"\"\"\n Convert a role name to a name for use in PostgreSQL\n\n There is a short list of valid ODC role names, and they are given\n a prefix inside of PostgreSQL.\n\n Why are we even doing this? Can't we use the same names internally and externally?\n\n >>> to_pg_role('ingest')\n 'agdc_ingest'\n >>> to_pg_role('fake')\n Traceback (most recent call last):\n ...\n ValueError: Unknown role 'fake'. Expected one of ...\n \"\"\"\n pg_role = 'agdc_' + role.lower()\n if pg_role not in USER_ROLES:\n raise ValueError(\n 'Unknown role %r. Expected one of %r' %\n (role, [r.split('_')[1] for r in USER_ROLES])\n )\n return pg_role\n\n\ndef from_pg_role(pg_role):\n \"\"\"\n Convert a PostgreSQL role name back to an ODC name.\n\n >>> from_pg_role('agdc_admin')\n 'admin'\n >>> from_pg_role('fake')\n Traceback (most recent call last):\n ...\n ValueError: Not a pg role: 'fake'. Expected one of ...\n \"\"\"\n if pg_role not in USER_ROLES:\n raise ValueError('Not a pg role: %r. Expected one of %r' % (pg_role, USER_ROLES))\n\n return pg_role.split('_')[1]\n", "path": "datacube/drivers/postgres/_core.py" } ]
[ { "content": "# coding=utf-8\n\"\"\"\nCore SQL schema settings.\n\"\"\"\n\nimport logging\n\nfrom sqlalchemy import MetaData\nfrom sqlalchemy.engine import Engine\nfrom sqlalchemy.schema import CreateSchema\n\nfrom datacube.drivers.postgres.sql import TYPES_INIT_SQL, pg_exists, pg_column_exists, escape_pg_identifier\n\nUSER_ROLES = ('agdc_user', 'agdc_ingest', 'agdc_manage', 'agdc_admin')\n\nSQL_NAMING_CONVENTIONS = {\n \"ix\": 'ix_%(column_0_label)s',\n \"uq\": \"uq_%(table_name)s_%(column_0_name)s\",\n \"ck\": \"ck_%(table_name)s_%(constraint_name)s\",\n \"fk\": \"fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s\",\n \"pk\": \"pk_%(table_name)s\",\n # Other prefixes handled outside of sqlalchemy:\n # dix: dynamic-index, those indexes created automatically based on search field configuration.\n # tix: test-index, created by hand for testing, particularly in dev.\n}\nSCHEMA_NAME = 'agdc'\n\nMETADATA = MetaData(naming_convention=SQL_NAMING_CONVENTIONS, schema=SCHEMA_NAME)\n\n_LOG = logging.getLogger(__name__)\n\n\ndef schema_qualified(name):\n \"\"\"\n >>> schema_qualified('dataset')\n 'agdc.dataset'\n \"\"\"\n return '{}.{}'.format(SCHEMA_NAME, name)\n\n\ndef _get_quoted_connection_info(connection):\n db, user = connection.execute(\"select quote_ident(current_database()), quote_ident(current_user)\").fetchone()\n return db, user\n\n\ndef ensure_db(engine, with_permissions=True):\n \"\"\"\n Initialise the db if needed.\n\n Ensures standard users exist.\n\n Create the schema if it doesn't exist.\n \"\"\"\n is_new = False\n c = engine.connect()\n\n quoted_db_name, quoted_user = _get_quoted_connection_info(c)\n\n if with_permissions:\n _LOG.info('Ensuring user roles.')\n _ensure_role(c, 'agdc_user')\n _ensure_role(c, 'agdc_ingest', inherits_from='agdc_user')\n _ensure_role(c, 'agdc_manage', inherits_from='agdc_ingest')\n _ensure_role(c, 'agdc_admin', inherits_from='agdc_manage', add_user=True)\n\n c.execute(\"\"\"\n grant all on database {db} to agdc_admin;\n \"\"\".format(db=quoted_db_name))\n\n if not has_schema(engine, c):\n is_new = True\n try:\n c.execute('begin')\n if with_permissions:\n # Switch to 'agdc_admin', so that all items are owned by them.\n c.execute('set role agdc_admin')\n _LOG.info('Creating schema.')\n c.execute(CreateSchema(SCHEMA_NAME))\n _LOG.info('Creating tables.')\n c.execute(TYPES_INIT_SQL)\n METADATA.create_all(c)\n c.execute('commit')\n except:\n c.execute('rollback')\n raise\n finally:\n if with_permissions:\n c.execute('set role {}'.format(quoted_user))\n\n if with_permissions:\n _LOG.info('Adding role grants.')\n c.execute(\"\"\"\n grant usage on schema {schema} to agdc_user;\n grant select on all tables in schema {schema} to agdc_user;\n grant execute on function {schema}.common_timestamp(text) to agdc_user;\n\n grant insert on {schema}.dataset,\n {schema}.dataset_location,\n {schema}.dataset_source to agdc_ingest;\n grant usage, select on all sequences in schema {schema} to agdc_ingest;\n\n -- (We're only granting deletion of types that have nothing written yet: they can't delete the data itself)\n grant insert, delete on {schema}.dataset_type,\n {schema}.metadata_type to agdc_manage;\n -- Allow creation of indexes, views\n grant create on schema {schema} to agdc_manage;\n \"\"\".format(schema=SCHEMA_NAME))\n\n c.close()\n\n return is_new\n\n\ndef database_exists(engine):\n \"\"\"\n Have they init'd this database?\n \"\"\"\n return has_schema(engine, engine)\n\n\ndef schema_is_latest(engine: Engine) -> bool:\n \"\"\"\n Is the current schema up-to-date?\n\n This is run when a new connection is established to see if it's compatible.\n\n It should be runnable by unprivileged users. If it returns false, their\n connection will be rejected and they will be told to get an administrator\n to apply updates.\n\n See the ``update_schema()`` function below for actually applying the updates.\n \"\"\"\n # In lieu of a versioned schema, we typically check by seeing if one of the objects\n # from the change exists.\n #\n # Eg.\n # return pg_column_exists(engine, schema_qualified('dataset_location'), 'archived')\n #\n # ie. Does the 'archived' column exist? If so, we know the related schema was applied.\n\n # No schema changes recently. Everything is perfect.\n return True\n\n\ndef update_schema(engine: Engine):\n \"\"\"\n Check and apply any missing schema changes to the database.\n\n This is run by an administrator.\n\n See the `schema_is_latest()` function above: this should apply updates\n that it requires.\n \"\"\"\n # This will typically check if something exists (like a newly added column), and\n # run the SQL of the change inside a single transaction.\n\n # Empty, as no schema changes have been made recently.\n # -> If you need to write one, look at the Git history of this\n # function for some examples.\n \n # Post 1.8 DB Federation triggers\n from datacube.drivers.postgres._triggers import install_timestamp_trigger\n _LOG.info(\"Adding Update Triggers\")\n c = engine.connect()\n c.execute('begin')\n install_timestamp_trigger(c)\n c.execute('commit')\n c.close()\n\n\ndef _ensure_role(engine, name, inherits_from=None, add_user=False, create_db=False):\n if has_role(engine, name):\n _LOG.debug('Role exists: %s', name)\n return\n\n sql = [\n 'create role %s nologin inherit' % name,\n 'createrole' if add_user else 'nocreaterole',\n 'createdb' if create_db else 'nocreatedb'\n ]\n if inherits_from:\n sql.append('in role ' + inherits_from)\n engine.execute(' '.join(sql))\n\n\ndef grant_role(engine, role, users):\n if role not in USER_ROLES:\n raise ValueError('Unknown role %r. Expected one of %r' % (role, USER_ROLES))\n\n users = [escape_pg_identifier(engine, user) for user in users]\n with engine.begin():\n engine.execute('revoke {roles} from {users}'.format(users=', '.join(users), roles=', '.join(USER_ROLES)))\n engine.execute('grant {role} to {users}'.format(users=', '.join(users), role=role))\n\n\ndef has_role(conn, role_name):\n return bool(conn.execute('SELECT rolname FROM pg_roles WHERE rolname=%s', role_name).fetchall())\n\n\ndef has_schema(engine, connection):\n return engine.dialect.has_schema(connection, SCHEMA_NAME)\n\n\ndef drop_db(connection):\n connection.execute('drop schema if exists %s cascade;' % SCHEMA_NAME)\n\n\ndef to_pg_role(role):\n \"\"\"\n Convert a role name to a name for use in PostgreSQL\n\n There is a short list of valid ODC role names, and they are given\n a prefix inside of PostgreSQL.\n\n Why are we even doing this? Can't we use the same names internally and externally?\n\n >>> to_pg_role('ingest')\n 'agdc_ingest'\n >>> to_pg_role('fake')\n Traceback (most recent call last):\n ...\n ValueError: Unknown role 'fake'. Expected one of ...\n \"\"\"\n pg_role = 'agdc_' + role.lower()\n if pg_role not in USER_ROLES:\n raise ValueError(\n 'Unknown role %r. Expected one of %r' %\n (role, [r.split('_')[1] for r in USER_ROLES])\n )\n return pg_role\n\n\ndef from_pg_role(pg_role):\n \"\"\"\n Convert a PostgreSQL role name back to an ODC name.\n\n >>> from_pg_role('agdc_admin')\n 'admin'\n >>> from_pg_role('fake')\n Traceback (most recent call last):\n ...\n ValueError: Not a pg role: 'fake'. Expected one of ...\n \"\"\"\n if pg_role not in USER_ROLES:\n raise ValueError('Not a pg role: %r. Expected one of %r' % (pg_role, USER_ROLES))\n\n return pg_role.split('_')[1]\n", "path": "datacube/drivers/postgres/_core.py" } ]
diff --git a/datacube/drivers/postgres/_core.py b/datacube/drivers/postgres/_core.py index bef0164447..7594409149 100644 --- a/datacube/drivers/postgres/_core.py +++ b/datacube/drivers/postgres/_core.py @@ -85,7 +85,7 @@ def ensure_db(engine, with_permissions=True): raise finally: if with_permissions: - c.execute('set role "{}"'.format(quoted_user)) + c.execute('set role {}'.format(quoted_user)) if with_permissions: _LOG.info('Adding role grants.')
pypi__warehouse-3928
Missing legacy redirection from pypi.python.org/pypi/ **Describe the bug** Redirections from `https://pypi.python.org/pypi/` are not handled (only redirected to `https://pypi.org/pypi/` by varnish (fastly)). As https://pypi.org/pypi/ does not exists, it creates some broken links. **Expected behavior** A 301 to `https://pypi.org/`, simply. **To Reproduce** ```$ curl -sI https://pypi.python.org/pypi/ HTTP/2 301 server: Varnish retry-after: 0 location: https://pypi.org/pypi/ [...redacted for readability...] ```
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef includeme(config):\n # We need to get the value of the Warehouse and Forklift domains, we'll use\n # these to segregate the Warehouse routes from the Forklift routes until\n # Forklift is properly split out into it's own project.\n warehouse = config.get_settings().get(\"warehouse.domain\")\n files_url = config.get_settings()[\"files.url\"]\n\n # Simple Route for health checks.\n config.add_route(\"health\", \"/_health/\")\n\n # Internal route to make it easier to force a particular status for\n # debugging HTTPException templates.\n config.add_route(\"force-status\", \"/_force-status/{status:[45]\\d\\d}/\")\n\n # Basic global routes\n config.add_route(\"index\", \"/\", domain=warehouse)\n config.add_route(\"robots.txt\", \"/robots.txt\", domain=warehouse)\n config.add_route(\"opensearch.xml\", \"/opensearch.xml\", domain=warehouse)\n config.add_route(\"index.sitemap.xml\", \"/sitemap.xml\", domain=warehouse)\n config.add_route(\n \"bucket.sitemap.xml\",\n \"/{bucket}.sitemap.xml\",\n domain=warehouse,\n )\n\n # Some static, template driven pages\n config.add_template_view(\"help\", \"/help/\", \"pages/help.html\")\n config.add_template_view(\"security\", \"/security/\", \"pages/security.html\")\n config.add_template_view(\n \"sponsors\",\n \"/sponsors/\",\n # Use the full resource path here to make it able to be overridden by\n # pypi-theme.\n \"warehouse:templates/pages/sponsors.html\",\n )\n\n # Our legal policies\n config.add_policy(\"terms-of-use\", \"terms.md\")\n\n # HTML Snippets for including into other pages.\n config.add_route(\n \"includes.current-user-indicator\",\n \"/_includes/current-user-indicator/\",\n domain=warehouse,\n )\n config.add_route(\n \"includes.flash-messages\",\n \"/_includes/flash-messages/\",\n domain=warehouse,\n )\n config.add_route(\n \"includes.current-user-profile-callout\",\n \"/_includes/current-user-profile-callout/{username}\",\n factory=\"warehouse.accounts.models:UserFactory\",\n traverse=\"/{username}\",\n domain=warehouse,\n )\n config.add_route(\n \"includes.edit-project-button\",\n \"/_includes/edit-project-button/{project_name}\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n config.add_route(\n \"includes.profile-actions\",\n \"/_includes/profile-actions/{username}\",\n factory=\"warehouse.accounts.models:UserFactory\",\n traverse=\"/{username}\",\n domain=warehouse,\n )\n\n # Classifier Routes\n config.add_route(\"classifiers\", \"/classifiers/\", domain=warehouse)\n\n # Search Routes\n config.add_route(\"search\", \"/search/\", domain=warehouse)\n\n # Accounts\n config.add_route(\n \"accounts.profile\",\n \"/user/{username}/\",\n factory=\"warehouse.accounts.models:UserFactory\",\n traverse=\"/{username}\",\n domain=warehouse,\n )\n config.add_route(\"accounts.login\", \"/account/login/\", domain=warehouse)\n config.add_route(\"accounts.logout\", \"/account/logout/\", domain=warehouse)\n config.add_route(\n \"accounts.register\",\n \"/account/register/\",\n domain=warehouse,\n )\n config.add_route(\n \"accounts.request-password-reset\",\n \"/account/request-password-reset/\",\n domain=warehouse,\n )\n config.add_route(\n \"accounts.reset-password\",\n \"/account/reset-password/\",\n domain=warehouse,\n )\n config.add_route(\n \"accounts.verify-email\",\n \"/account/verify-email/\",\n domain=warehouse,\n )\n\n # Management (views for logged-in users)\n config.add_route(\"manage.account\", \"/manage/account/\", domain=warehouse)\n config.add_route(\"manage.projects\", \"/manage/projects/\", domain=warehouse)\n config.add_route(\n \"manage.project.settings\",\n \"/manage/project/{project_name}/settings/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n config.add_route(\n \"manage.project.delete_project\",\n \"/manage/project/{project_name}/delete_project/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n config.add_route(\n \"manage.project.destroy_docs\",\n \"/manage/project/{project_name}/delete_project_docs/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n config.add_route(\n \"manage.project.releases\",\n \"/manage/project/{project_name}/releases/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n config.add_route(\n \"manage.project.release\",\n \"/manage/project/{project_name}/release/{version}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}/{version}\",\n domain=warehouse,\n )\n config.add_route(\n \"manage.project.roles\",\n \"/manage/project/{project_name}/collaboration/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n config.add_route(\n \"manage.project.change_role\",\n \"/manage/project/{project_name}/collaboration/change/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n config.add_route(\n \"manage.project.delete_role\",\n \"/manage/project/{project_name}/collaboration/delete/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n config.add_route(\n \"manage.project.documentation\",\n \"/manage/project/{project_name}/documentation/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n config.add_route(\n \"manage.project.history\",\n \"/manage/project/{project_name}/history/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n\n # Packaging\n config.add_redirect('/p/{name}/', '/project/{name}/', domain=warehouse)\n config.add_route(\n \"packaging.project\",\n \"/project/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n domain=warehouse,\n )\n config.add_route(\n \"packaging.release\",\n \"/project/{name}/{version}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n domain=warehouse,\n )\n config.add_route(\"packaging.file\", files_url)\n\n # SES Webhooks\n config.add_route(\"ses.hook\", \"/_/ses-hook/\", domain=warehouse)\n\n # RSS\n config.add_route(\"rss.updates\", \"/rss/updates.xml\", domain=warehouse)\n config.add_route(\"rss.packages\", \"/rss/packages.xml\", domain=warehouse)\n\n # Legacy URLs\n config.add_route(\"legacy.api.simple.index\", \"/simple/\", domain=warehouse)\n config.add_route(\n \"legacy.api.simple.detail\",\n \"/simple/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/\",\n read_only=True,\n domain=warehouse,\n )\n config.add_route(\n \"legacy.api.json.project\",\n \"/pypi/{name}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n read_only=True,\n domain=warehouse,\n )\n config.add_route(\n \"legacy.api.json.release\",\n \"/pypi/{name}/{version}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n read_only=True,\n domain=warehouse,\n )\n\n # Legacy Action URLs\n # TODO: We should probably add Warehouse routes for these that just error\n # and direct people to use upload.pypi.io\n config.add_pypi_action_route(\n \"legacy.api.pypi.file_upload\",\n \"file_upload\",\n domain=warehouse,\n )\n config.add_pypi_action_route(\n \"legacy.api.pypi.submit\",\n \"submit\",\n domain=warehouse,\n )\n config.add_pypi_action_route(\n \"legacy.api.pypi.submit_pkg_info\",\n \"submit_pkg_info\",\n domain=warehouse,\n )\n config.add_pypi_action_route(\n \"legacy.api.pypi.doc_upload\",\n \"doc_upload\",\n domain=warehouse,\n )\n config.add_pypi_action_route(\n \"legacy.api.pypi.doap\",\n \"doap\",\n domain=warehouse,\n )\n config.add_pypi_action_route(\n \"legacy.api.pypi.list_classifiers\",\n \"list_classifiers\",\n domain=warehouse,\n )\n config.add_pypi_action_route(\n 'legacy.api.pypi.search',\n 'search',\n domain=warehouse,\n )\n config.add_pypi_action_route(\n 'legacy.api.pypi.browse',\n 'browse',\n domain=warehouse,\n )\n config.add_pypi_action_route(\n 'legacy.api.pypi.files',\n 'files',\n domain=warehouse,\n )\n config.add_pypi_action_route(\n 'legacy.api.pypi.display',\n 'display',\n domain=warehouse,\n )\n\n # Legacy XMLRPC\n config.add_xmlrpc_endpoint(\n \"pypi\",\n pattern=\"/pypi\",\n header=\"Content-Type:text/xml\",\n domain=warehouse,\n )\n config.add_xmlrpc_endpoint(\n \"pypi_slash\",\n pattern=\"/pypi/\",\n header=\"Content-Type:text/xml\",\n domain=warehouse,\n )\n config.add_xmlrpc_endpoint(\n \"RPC2\",\n pattern=\"/RPC2\",\n header=\"Content-Type:text/xml\",\n domain=warehouse,\n )\n\n # Legacy Documentation\n config.add_route(\"legacy.docs\", config.registry.settings[\"docs.url\"])\n\n # Legacy Redirects\n config.add_redirect(\"/pypi/{name}/\", \"/project/{name}/\", domain=warehouse)\n config.add_redirect(\n \"/pypi/{name}/{version}/\",\n \"/project/{name}/{version}/\",\n domain=warehouse,\n )\n config.add_redirect(\"/packages/{path:.*}\", files_url, domain=warehouse)\n\n # Legacy Action Redirects\n config.add_pypi_action_redirect(\n \"rss\",\n \"/rss/updates.xml\",\n domain=warehouse,\n )\n config.add_pypi_action_redirect(\n \"packages_rss\",\n \"/rss/packages.xml\",\n domain=warehouse,\n )\n", "path": "warehouse/routes.py" } ]
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef includeme(config):\n # We need to get the value of the Warehouse and Forklift domains, we'll use\n # these to segregate the Warehouse routes from the Forklift routes until\n # Forklift is properly split out into it's own project.\n warehouse = config.get_settings().get(\"warehouse.domain\")\n files_url = config.get_settings()[\"files.url\"]\n\n # Simple Route for health checks.\n config.add_route(\"health\", \"/_health/\")\n\n # Internal route to make it easier to force a particular status for\n # debugging HTTPException templates.\n config.add_route(\"force-status\", \"/_force-status/{status:[45]\\d\\d}/\")\n\n # Basic global routes\n config.add_route(\"index\", \"/\", domain=warehouse)\n config.add_route(\"robots.txt\", \"/robots.txt\", domain=warehouse)\n config.add_route(\"opensearch.xml\", \"/opensearch.xml\", domain=warehouse)\n config.add_route(\"index.sitemap.xml\", \"/sitemap.xml\", domain=warehouse)\n config.add_route(\n \"bucket.sitemap.xml\",\n \"/{bucket}.sitemap.xml\",\n domain=warehouse,\n )\n\n # Some static, template driven pages\n config.add_template_view(\"help\", \"/help/\", \"pages/help.html\")\n config.add_template_view(\"security\", \"/security/\", \"pages/security.html\")\n config.add_template_view(\n \"sponsors\",\n \"/sponsors/\",\n # Use the full resource path here to make it able to be overridden by\n # pypi-theme.\n \"warehouse:templates/pages/sponsors.html\",\n )\n\n # Our legal policies\n config.add_policy(\"terms-of-use\", \"terms.md\")\n\n # HTML Snippets for including into other pages.\n config.add_route(\n \"includes.current-user-indicator\",\n \"/_includes/current-user-indicator/\",\n domain=warehouse,\n )\n config.add_route(\n \"includes.flash-messages\",\n \"/_includes/flash-messages/\",\n domain=warehouse,\n )\n config.add_route(\n \"includes.current-user-profile-callout\",\n \"/_includes/current-user-profile-callout/{username}\",\n factory=\"warehouse.accounts.models:UserFactory\",\n traverse=\"/{username}\",\n domain=warehouse,\n )\n config.add_route(\n \"includes.edit-project-button\",\n \"/_includes/edit-project-button/{project_name}\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n config.add_route(\n \"includes.profile-actions\",\n \"/_includes/profile-actions/{username}\",\n factory=\"warehouse.accounts.models:UserFactory\",\n traverse=\"/{username}\",\n domain=warehouse,\n )\n\n # Classifier Routes\n config.add_route(\"classifiers\", \"/classifiers/\", domain=warehouse)\n\n # Search Routes\n config.add_route(\"search\", \"/search/\", domain=warehouse)\n\n # Accounts\n config.add_route(\n \"accounts.profile\",\n \"/user/{username}/\",\n factory=\"warehouse.accounts.models:UserFactory\",\n traverse=\"/{username}\",\n domain=warehouse,\n )\n config.add_route(\"accounts.login\", \"/account/login/\", domain=warehouse)\n config.add_route(\"accounts.logout\", \"/account/logout/\", domain=warehouse)\n config.add_route(\n \"accounts.register\",\n \"/account/register/\",\n domain=warehouse,\n )\n config.add_route(\n \"accounts.request-password-reset\",\n \"/account/request-password-reset/\",\n domain=warehouse,\n )\n config.add_route(\n \"accounts.reset-password\",\n \"/account/reset-password/\",\n domain=warehouse,\n )\n config.add_route(\n \"accounts.verify-email\",\n \"/account/verify-email/\",\n domain=warehouse,\n )\n\n # Management (views for logged-in users)\n config.add_route(\"manage.account\", \"/manage/account/\", domain=warehouse)\n config.add_route(\"manage.projects\", \"/manage/projects/\", domain=warehouse)\n config.add_route(\n \"manage.project.settings\",\n \"/manage/project/{project_name}/settings/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n config.add_route(\n \"manage.project.delete_project\",\n \"/manage/project/{project_name}/delete_project/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n config.add_route(\n \"manage.project.destroy_docs\",\n \"/manage/project/{project_name}/delete_project_docs/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n config.add_route(\n \"manage.project.releases\",\n \"/manage/project/{project_name}/releases/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n config.add_route(\n \"manage.project.release\",\n \"/manage/project/{project_name}/release/{version}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}/{version}\",\n domain=warehouse,\n )\n config.add_route(\n \"manage.project.roles\",\n \"/manage/project/{project_name}/collaboration/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n config.add_route(\n \"manage.project.change_role\",\n \"/manage/project/{project_name}/collaboration/change/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n config.add_route(\n \"manage.project.delete_role\",\n \"/manage/project/{project_name}/collaboration/delete/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n config.add_route(\n \"manage.project.documentation\",\n \"/manage/project/{project_name}/documentation/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n config.add_route(\n \"manage.project.history\",\n \"/manage/project/{project_name}/history/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{project_name}\",\n domain=warehouse,\n )\n\n # Packaging\n config.add_redirect('/p/{name}/', '/project/{name}/', domain=warehouse)\n config.add_route(\n \"packaging.project\",\n \"/project/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n domain=warehouse,\n )\n config.add_route(\n \"packaging.release\",\n \"/project/{name}/{version}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n domain=warehouse,\n )\n config.add_route(\"packaging.file\", files_url)\n\n # SES Webhooks\n config.add_route(\"ses.hook\", \"/_/ses-hook/\", domain=warehouse)\n\n # RSS\n config.add_route(\"rss.updates\", \"/rss/updates.xml\", domain=warehouse)\n config.add_route(\"rss.packages\", \"/rss/packages.xml\", domain=warehouse)\n\n # Legacy URLs\n config.add_route(\"legacy.api.simple.index\", \"/simple/\", domain=warehouse)\n config.add_route(\n \"legacy.api.simple.detail\",\n \"/simple/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/\",\n read_only=True,\n domain=warehouse,\n )\n config.add_route(\n \"legacy.api.json.project\",\n \"/pypi/{name}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n read_only=True,\n domain=warehouse,\n )\n config.add_route(\n \"legacy.api.json.release\",\n \"/pypi/{name}/{version}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n read_only=True,\n domain=warehouse,\n )\n\n # Legacy Action URLs\n # TODO: We should probably add Warehouse routes for these that just error\n # and direct people to use upload.pypi.io\n config.add_pypi_action_route(\n \"legacy.api.pypi.file_upload\",\n \"file_upload\",\n domain=warehouse,\n )\n config.add_pypi_action_route(\n \"legacy.api.pypi.submit\",\n \"submit\",\n domain=warehouse,\n )\n config.add_pypi_action_route(\n \"legacy.api.pypi.submit_pkg_info\",\n \"submit_pkg_info\",\n domain=warehouse,\n )\n config.add_pypi_action_route(\n \"legacy.api.pypi.doc_upload\",\n \"doc_upload\",\n domain=warehouse,\n )\n config.add_pypi_action_route(\n \"legacy.api.pypi.doap\",\n \"doap\",\n domain=warehouse,\n )\n config.add_pypi_action_route(\n \"legacy.api.pypi.list_classifiers\",\n \"list_classifiers\",\n domain=warehouse,\n )\n config.add_pypi_action_route(\n 'legacy.api.pypi.search',\n 'search',\n domain=warehouse,\n )\n config.add_pypi_action_route(\n 'legacy.api.pypi.browse',\n 'browse',\n domain=warehouse,\n )\n config.add_pypi_action_route(\n 'legacy.api.pypi.files',\n 'files',\n domain=warehouse,\n )\n config.add_pypi_action_route(\n 'legacy.api.pypi.display',\n 'display',\n domain=warehouse,\n )\n\n # Legacy XMLRPC\n config.add_xmlrpc_endpoint(\n \"pypi\",\n pattern=\"/pypi\",\n header=\"Content-Type:text/xml\",\n domain=warehouse,\n )\n config.add_xmlrpc_endpoint(\n \"pypi_slash\",\n pattern=\"/pypi/\",\n header=\"Content-Type:text/xml\",\n domain=warehouse,\n )\n config.add_xmlrpc_endpoint(\n \"RPC2\",\n pattern=\"/RPC2\",\n header=\"Content-Type:text/xml\",\n domain=warehouse,\n )\n\n # Legacy Documentation\n config.add_route(\"legacy.docs\", config.registry.settings[\"docs.url\"])\n\n # Legacy Redirects\n config.add_redirect(\"/pypi/{name}/\", \"/project/{name}/\", domain=warehouse)\n config.add_redirect(\n \"/pypi/{name}/{version}/\",\n \"/project/{name}/{version}/\",\n domain=warehouse,\n )\n config.add_redirect(\"/pypi/\", \"/\", domain=warehouse)\n config.add_redirect(\"/packages/{path:.*}\", files_url, domain=warehouse)\n\n # Legacy Action Redirects\n config.add_pypi_action_redirect(\n \"rss\",\n \"/rss/updates.xml\",\n domain=warehouse,\n )\n config.add_pypi_action_redirect(\n \"packages_rss\",\n \"/rss/packages.xml\",\n domain=warehouse,\n )\n", "path": "warehouse/routes.py" } ]
diff --git a/tests/unit/test_routes.py b/tests/unit/test_routes.py index c7cac6878c54..14dfc2223236 100644 --- a/tests/unit/test_routes.py +++ b/tests/unit/test_routes.py @@ -292,6 +292,7 @@ def add_policy(name, filename): "/project/{name}/{version}/", domain=warehouse, ), + pretend.call("/pypi/", "/", domain=warehouse), pretend.call( "/packages/{path:.*}", "https://files.example.com/packages/{path}", diff --git a/warehouse/routes.py b/warehouse/routes.py index 3fb33293af20..0f9434750108 100644 --- a/warehouse/routes.py +++ b/warehouse/routes.py @@ -330,6 +330,7 @@ def includeme(config): "/project/{name}/{version}/", domain=warehouse, ) + config.add_redirect("/pypi/", "/", domain=warehouse) config.add_redirect("/packages/{path:.*}", files_url, domain=warehouse) # Legacy Action Redirects
StackStorm__st2-3408
st2kv does not resolve in Jinja cast expression In the rule: ` {{ "2" | int }} - 2` Dah `{{ trigger.count | int }} -> 2` OK `{{ st2kv.system.count | int }} -> 0` Wrong.
[ { "content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom st2common import log as logging\n\nfrom st2common.constants.keyvalue import SYSTEM_SCOPE, FULL_SYSTEM_SCOPE\nfrom st2common.constants.keyvalue import USER_SCOPE, FULL_USER_SCOPE\nfrom st2common.constants.keyvalue import ALLOWED_SCOPES\nfrom st2common.constants.keyvalue import DATASTORE_KEY_SEPARATOR\nfrom st2common.exceptions.keyvalue import InvalidScopeException, InvalidUserException\nfrom st2common.models.system.keyvalue import UserKeyReference\nfrom st2common.persistence.keyvalue import KeyValuePair\n\n__all__ = [\n 'get_kvp_for_name',\n 'get_values_for_names',\n\n 'KeyValueLookup',\n 'UserKeyValueLookup'\n]\n\nLOG = logging.getLogger(__name__)\n\n\ndef get_kvp_for_name(name):\n try:\n kvp_db = KeyValuePair.get_by_name(name)\n except ValueError:\n kvp_db = None\n\n return kvp_db\n\n\ndef get_values_for_names(names, default_value=None):\n \"\"\"\n Retrieve values for the provided key names (multi get).\n\n If a KeyValuePair objects for a particular name doesn't exist, the dictionary will contain\n default_value for that name.\n\n :rtype: ``dict``\n \"\"\"\n result = {}\n kvp_dbs = KeyValuePair.get_by_names(names=names)\n\n name_to_kvp_db_map = {}\n for kvp_db in kvp_dbs:\n name_to_kvp_db_map[kvp_db.name] = kvp_db.value\n\n for name in names:\n result[name] = name_to_kvp_db_map.get(name, default_value)\n\n return result\n\n\nclass KeyValueLookup(object):\n\n def __init__(self, prefix=None, key_prefix=None, cache=None, scope=FULL_SYSTEM_SCOPE):\n if not scope:\n scope = FULL_SYSTEM_SCOPE\n\n if scope == SYSTEM_SCOPE:\n scope = FULL_SYSTEM_SCOPE\n\n self._prefix = prefix\n self._key_prefix = key_prefix or ''\n self._value_cache = cache or {}\n self._scope = scope\n\n def __str__(self):\n return self._value_cache[self._key_prefix]\n\n def __getitem__(self, key):\n return self._get(key)\n\n def __getattr__(self, name):\n return self._get(name)\n\n def _get(self, name):\n # get the value for this key and save in value_cache\n if self._key_prefix:\n key = '%s.%s' % (self._key_prefix, name)\n else:\n key = name\n\n if self._prefix:\n kvp_key = DATASTORE_KEY_SEPARATOR.join([self._prefix, key])\n else:\n kvp_key = key\n\n value = self._get_kv(kvp_key)\n self._value_cache[key] = value\n # return a KeyValueLookup as response since the lookup may not be complete e.g. if\n # the lookup is for 'key_base.key_value' it is likely that the calling code, e.g. Jinja,\n # will expect to do a dictionary style lookup for key_base and key_value as subsequent\n # calls. Saving the value in cache avoids extra DB calls.\n return KeyValueLookup(prefix=self._prefix, key_prefix=key, cache=self._value_cache,\n scope=self._scope)\n\n def _get_kv(self, key):\n scope = self._scope\n LOG.debug('Lookup system kv: scope: %s and key: %s', scope, key)\n kvp = KeyValuePair.get_by_scope_and_name(scope=scope, name=key)\n if kvp:\n LOG.debug('Got value %s from datastore.', kvp.value)\n return kvp.value if kvp else ''\n\n\nclass UserKeyValueLookup(object):\n\n def __init__(self, user, prefix=None, key_prefix=None, cache=None, scope=FULL_USER_SCOPE):\n if not scope:\n scope = FULL_USER_SCOPE\n\n if scope == USER_SCOPE:\n scope = FULL_USER_SCOPE\n\n self._prefix = prefix\n self._key_prefix = key_prefix or ''\n self._value_cache = cache or {}\n self._user = user\n self._scope = scope\n\n def __str__(self):\n return self._value_cache[self._key_prefix]\n\n def __getitem__(self, key):\n return self._get(key)\n\n def __getattr__(self, name):\n return self._get(name)\n\n def _get(self, name):\n # get the value for this key and save in value_cache\n if self._key_prefix:\n key = '%s.%s' % (self._key_prefix, name)\n else:\n key = UserKeyReference(name=name, user=self._user).ref\n\n if self._prefix:\n kvp_key = DATASTORE_KEY_SEPARATOR.join([self._prefix, key])\n else:\n kvp_key = key\n\n value = self._get_kv(kvp_key)\n self._value_cache[key] = value\n # return a KeyValueLookup as response since the lookup may not be complete e.g. if\n # the lookup is for 'key_base.key_value' it is likely that the calling code, e.g. Jinja,\n # will expect to do a dictionary style lookup for key_base and key_value as subsequent\n # calls. Saving the value in cache avoids extra DB calls.\n return UserKeyValueLookup(prefix=self._prefix, user=self._user, key_prefix=key,\n cache=self._value_cache, scope=self._scope)\n\n def _get_kv(self, key):\n scope = self._scope\n kvp = KeyValuePair.get_by_scope_and_name(scope=scope, name=key)\n return kvp.value if kvp else ''\n\n\ndef get_key_reference(scope, name, user=None):\n \"\"\"\n Given a key name and user this method returns a new name (string ref)\n to address the key value pair in the context of that user.\n\n :param user: User to whom key belongs.\n :type name: ``str``\n\n :param name: Original name of the key.\n :type name: ``str``\n\n :rtype: ``str``\n \"\"\"\n if (scope == SYSTEM_SCOPE or scope == FULL_SYSTEM_SCOPE):\n return name\n elif (scope == USER_SCOPE or scope == FULL_USER_SCOPE):\n if not user:\n raise InvalidUserException('A valid user must be specified for user key ref.')\n return UserKeyReference(name=name, user=user).ref\n else:\n raise InvalidScopeException('Scope \"%s\" is not valid. Allowed scopes are %s.' %\n (scope, ALLOWED_SCOPES))\n", "path": "st2common/st2common/services/keyvalues.py" } ]
[ { "content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom st2common import log as logging\n\nfrom st2common.constants.keyvalue import SYSTEM_SCOPE, FULL_SYSTEM_SCOPE\nfrom st2common.constants.keyvalue import USER_SCOPE, FULL_USER_SCOPE\nfrom st2common.constants.keyvalue import ALLOWED_SCOPES\nfrom st2common.constants.keyvalue import DATASTORE_KEY_SEPARATOR\nfrom st2common.exceptions.keyvalue import InvalidScopeException, InvalidUserException\nfrom st2common.models.system.keyvalue import UserKeyReference\nfrom st2common.persistence.keyvalue import KeyValuePair\n\n__all__ = [\n 'get_kvp_for_name',\n 'get_values_for_names',\n\n 'KeyValueLookup',\n 'UserKeyValueLookup'\n]\n\nLOG = logging.getLogger(__name__)\n\n\ndef get_kvp_for_name(name):\n try:\n kvp_db = KeyValuePair.get_by_name(name)\n except ValueError:\n kvp_db = None\n\n return kvp_db\n\n\ndef get_values_for_names(names, default_value=None):\n \"\"\"\n Retrieve values for the provided key names (multi get).\n\n If a KeyValuePair objects for a particular name doesn't exist, the dictionary will contain\n default_value for that name.\n\n :rtype: ``dict``\n \"\"\"\n result = {}\n kvp_dbs = KeyValuePair.get_by_names(names=names)\n\n name_to_kvp_db_map = {}\n for kvp_db in kvp_dbs:\n name_to_kvp_db_map[kvp_db.name] = kvp_db.value\n\n for name in names:\n result[name] = name_to_kvp_db_map.get(name, default_value)\n\n return result\n\n\nclass KeyValueLookup(object):\n\n def __init__(self, prefix=None, key_prefix=None, cache=None, scope=FULL_SYSTEM_SCOPE):\n if not scope:\n scope = FULL_SYSTEM_SCOPE\n\n if scope == SYSTEM_SCOPE:\n scope = FULL_SYSTEM_SCOPE\n\n self._prefix = prefix\n self._key_prefix = key_prefix or ''\n self._value_cache = cache or {}\n self._scope = scope\n\n def __str__(self):\n return self._value_cache[self._key_prefix]\n\n def __int__(self):\n return int(float(self))\n\n def __float__(self):\n return float(str(self))\n\n def __getitem__(self, key):\n return self._get(key)\n\n def __getattr__(self, name):\n return self._get(name)\n\n def _get(self, name):\n # get the value for this key and save in value_cache\n if self._key_prefix:\n key = '%s.%s' % (self._key_prefix, name)\n else:\n key = name\n\n if self._prefix:\n kvp_key = DATASTORE_KEY_SEPARATOR.join([self._prefix, key])\n else:\n kvp_key = key\n\n value = self._get_kv(kvp_key)\n self._value_cache[key] = value\n # return a KeyValueLookup as response since the lookup may not be complete e.g. if\n # the lookup is for 'key_base.key_value' it is likely that the calling code, e.g. Jinja,\n # will expect to do a dictionary style lookup for key_base and key_value as subsequent\n # calls. Saving the value in cache avoids extra DB calls.\n return KeyValueLookup(prefix=self._prefix, key_prefix=key, cache=self._value_cache,\n scope=self._scope)\n\n def _get_kv(self, key):\n scope = self._scope\n LOG.debug('Lookup system kv: scope: %s and key: %s', scope, key)\n kvp = KeyValuePair.get_by_scope_and_name(scope=scope, name=key)\n if kvp:\n LOG.debug('Got value %s from datastore.', kvp.value)\n return kvp.value if kvp else ''\n\n\nclass UserKeyValueLookup(object):\n\n def __init__(self, user, prefix=None, key_prefix=None, cache=None, scope=FULL_USER_SCOPE):\n if not scope:\n scope = FULL_USER_SCOPE\n\n if scope == USER_SCOPE:\n scope = FULL_USER_SCOPE\n\n self._prefix = prefix\n self._key_prefix = key_prefix or ''\n self._value_cache = cache or {}\n self._user = user\n self._scope = scope\n\n def __str__(self):\n return self._value_cache[self._key_prefix]\n\n def __getitem__(self, key):\n return self._get(key)\n\n def __getattr__(self, name):\n return self._get(name)\n\n def _get(self, name):\n # get the value for this key and save in value_cache\n if self._key_prefix:\n key = '%s.%s' % (self._key_prefix, name)\n else:\n key = UserKeyReference(name=name, user=self._user).ref\n\n if self._prefix:\n kvp_key = DATASTORE_KEY_SEPARATOR.join([self._prefix, key])\n else:\n kvp_key = key\n\n value = self._get_kv(kvp_key)\n self._value_cache[key] = value\n # return a KeyValueLookup as response since the lookup may not be complete e.g. if\n # the lookup is for 'key_base.key_value' it is likely that the calling code, e.g. Jinja,\n # will expect to do a dictionary style lookup for key_base and key_value as subsequent\n # calls. Saving the value in cache avoids extra DB calls.\n return UserKeyValueLookup(prefix=self._prefix, user=self._user, key_prefix=key,\n cache=self._value_cache, scope=self._scope)\n\n def _get_kv(self, key):\n scope = self._scope\n kvp = KeyValuePair.get_by_scope_and_name(scope=scope, name=key)\n return kvp.value if kvp else ''\n\n\ndef get_key_reference(scope, name, user=None):\n \"\"\"\n Given a key name and user this method returns a new name (string ref)\n to address the key value pair in the context of that user.\n\n :param user: User to whom key belongs.\n :type name: ``str``\n\n :param name: Original name of the key.\n :type name: ``str``\n\n :rtype: ``str``\n \"\"\"\n if (scope == SYSTEM_SCOPE or scope == FULL_SYSTEM_SCOPE):\n return name\n elif (scope == USER_SCOPE or scope == FULL_USER_SCOPE):\n if not user:\n raise InvalidUserException('A valid user must be specified for user key ref.')\n return UserKeyReference(name=name, user=user).ref\n else:\n raise InvalidScopeException('Scope \"%s\" is not valid. Allowed scopes are %s.' %\n (scope, ALLOWED_SCOPES))\n", "path": "st2common/st2common/services/keyvalues.py" } ]
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 0a0c16f8bd..43cc9922dd 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -61,6 +61,7 @@ in development * ``st2 role-assignment get <role assignment id>`` * Update ``/v1/rbac/roles`` API endpoint so it includes corresponding permission grant objects. Previously it only included permission grant ids. (improvement) +* Fix a bug where keyvalue objects weren't properly cast to numeric types. (bug fix) 2.2.1 - April 3, 2017 --------------------- diff --git a/st2common/st2common/services/keyvalues.py b/st2common/st2common/services/keyvalues.py index be443eda56..09d56d375b 100644 --- a/st2common/st2common/services/keyvalues.py +++ b/st2common/st2common/services/keyvalues.py @@ -82,6 +82,12 @@ def __init__(self, prefix=None, key_prefix=None, cache=None, scope=FULL_SYSTEM_S def __str__(self): return self._value_cache[self._key_prefix] + def __int__(self): + return int(float(self)) + + def __float__(self): + return float(str(self)) + def __getitem__(self, key): return self._get(key) diff --git a/st2common/tests/unit/test_keyvalue_lookup.py b/st2common/tests/unit/test_keyvalue_lookup.py index 7629ea33aa..fc7627466e 100644 --- a/st2common/tests/unit/test_keyvalue_lookup.py +++ b/st2common/tests/unit/test_keyvalue_lookup.py @@ -145,3 +145,11 @@ def test_secret_lookup(self): user_lookup = UserKeyValueLookup(scope=FULL_USER_SCOPE, user='stanley') self.assertEquals(str(user_lookup.k3), k3.value) + + def test_lookup_cast(self): + KeyValuePair.add_or_update(KeyValuePairDB(name='count', value='5.5')) + + lookup = KeyValueLookup(scope=FULL_SYSTEM_SCOPE) + self.assertEqual(str(lookup.count), '5.5') + self.assertEqual(float(lookup.count), 5.5) + self.assertEqual(int(lookup.count), 5)
PaddlePaddle__PaddleDetection-8421
训练出现长警告 ### 问题确认 Search before asking - [X] 我已经查询[历史issue](https://github.com/PaddlePaddle/PaddleDetection/issues),没有发现相似的bug。I have searched the [issues](https://github.com/PaddlePaddle/PaddleDetection/issues) and found no similar bug report. ### Bug组件 Bug Component _No response_ ### Bug描述 Describe the Bug 训练出现长警告 ``` I0706 13:09:13.075042 3772 eager_method.cc:140] Warning:: 0D Tensor cannot be used as 'Tensor.numpy()[0]' . In order to avoid this problem, 0D Tensor will be changed to 1D numpy currently, but it's not correct and will be removed in release 2.6. For Tensor contain only one element, Please modify 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as possible, otherwise 'Tensor.numpy()[0]' will raise error in release 2.6. I0706 13:09:13.382442 3772 eager_method.cc:140] Warning:: 0D Tensor cannot be used as 'Tensor.numpy()[0]' . In order to avoid this problem, 0D Tensor will be changed to 1D numpy currently, but it's not correct and will be removed in release 2.6. For Tensor contain only one element, Please modify 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as possible, otherwise 'Tensor.numpy()[0]' will raise error in release 2.6. ``` ### 复现环境 Environment PaddleDetection2.6 PaddlePaddle2.5.0 经过排查将`ppdet/utils/stats.py`第77行进行如下修改 `v.update(stats[k].numpy())`→`v.update(float(stats[k]))` ### Bug描述确认 Bug description confirmation - [X] 我确认已经提供了Bug复现步骤、代码改动说明、以及环境信息,确认问题是可以复现的。I confirm that the bug replication steps, code change instructions, and environment information have been provided, and the problem can be reproduced. ### 是否愿意提交PR? Are you willing to submit a PR? - [ ] 我愿意提交PR!I'd like to help by submitting a PR!
[ { "content": "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport numpy as np\n\n__all__ = ['SmoothedValue', 'TrainingStats']\n\n\nclass SmoothedValue(object):\n \"\"\"Track a series of values and provide access to smoothed values over a\n window or the global series average.\n \"\"\"\n\n def __init__(self, window_size=20, fmt=None):\n if fmt is None:\n fmt = \"{median:.4f} ({avg:.4f})\"\n self.deque = collections.deque(maxlen=window_size)\n self.fmt = fmt\n self.total = 0.\n self.count = 0\n\n def update(self, value, n=1):\n self.deque.append(value)\n self.count += n\n self.total += value * n\n\n @property\n def median(self):\n return np.median(self.deque)\n\n @property\n def avg(self):\n return np.mean(self.deque)\n\n @property\n def max(self):\n return np.max(self.deque)\n\n @property\n def value(self):\n return self.deque[-1]\n\n @property\n def global_avg(self):\n return self.total / self.count\n\n def __str__(self):\n return self.fmt.format(\n median=self.median, avg=self.avg, max=self.max, value=self.value)\n\n\nclass TrainingStats(object):\n def __init__(self, window_size, delimiter=' '):\n self.meters = None\n self.window_size = window_size\n self.delimiter = delimiter\n\n def update(self, stats):\n if self.meters is None:\n self.meters = {\n k: SmoothedValue(self.window_size)\n for k in stats.keys()\n }\n for k, v in self.meters.items():\n v.update(stats[k].numpy())\n\n def get(self, extras=None):\n stats = collections.OrderedDict()\n if extras:\n for k, v in extras.items():\n stats[k] = v\n for k, v in self.meters.items():\n stats[k] = format(v.median, '.6f')\n\n return stats\n\n def log(self, extras=None):\n d = self.get(extras)\n strs = []\n for k, v in d.items():\n strs.append(\"{}: {}\".format(k, str(v)))\n return self.delimiter.join(strs)\n", "path": "ppdet/utils/stats.py" } ]
[ { "content": "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport numpy as np\n\n__all__ = ['SmoothedValue', 'TrainingStats']\n\n\nclass SmoothedValue(object):\n \"\"\"Track a series of values and provide access to smoothed values over a\n window or the global series average.\n \"\"\"\n\n def __init__(self, window_size=20, fmt=None):\n if fmt is None:\n fmt = \"{median:.4f} ({avg:.4f})\"\n self.deque = collections.deque(maxlen=window_size)\n self.fmt = fmt\n self.total = 0.\n self.count = 0\n\n def update(self, value, n=1):\n self.deque.append(value)\n self.count += n\n self.total += value * n\n\n @property\n def median(self):\n return np.median(self.deque)\n\n @property\n def avg(self):\n return np.mean(self.deque)\n\n @property\n def max(self):\n return np.max(self.deque)\n\n @property\n def value(self):\n return self.deque[-1]\n\n @property\n def global_avg(self):\n return self.total / self.count\n\n def __str__(self):\n return self.fmt.format(\n median=self.median, avg=self.avg, max=self.max, value=self.value)\n\n\nclass TrainingStats(object):\n def __init__(self, window_size, delimiter=' '):\n self.meters = None\n self.window_size = window_size\n self.delimiter = delimiter\n\n def update(self, stats):\n if self.meters is None:\n self.meters = {\n k: SmoothedValue(self.window_size)\n for k in stats.keys()\n }\n for k, v in self.meters.items():\n v.update(float(stats[k]))\n\n def get(self, extras=None):\n stats = collections.OrderedDict()\n if extras:\n for k, v in extras.items():\n stats[k] = v\n for k, v in self.meters.items():\n stats[k] = format(v.median, '.6f')\n\n return stats\n\n def log(self, extras=None):\n d = self.get(extras)\n strs = []\n for k, v in d.items():\n strs.append(\"{}: {}\".format(k, str(v)))\n return self.delimiter.join(strs)\n", "path": "ppdet/utils/stats.py" } ]
diff --git a/ppdet/utils/stats.py b/ppdet/utils/stats.py index 4cd36d91cf8..c070e6544ed 100644 --- a/ppdet/utils/stats.py +++ b/ppdet/utils/stats.py @@ -74,7 +74,7 @@ def update(self, stats): for k in stats.keys() } for k, v in self.meters.items(): - v.update(stats[k].numpy()) + v.update(float(stats[k])) def get(self, extras=None): stats = collections.OrderedDict()
googleapis__python-bigquery-942
chore: update system tests and samples to use and `@google.com` email address Re: https://github.com/googleapis/python-bigquery/pull/935#issuecomment-911791623 It may be some time before we can reconfigure our test project(s) to allow permissions for non-google.com folks. I propose we use `[email protected]`, which is intended for interacting with the public and most Googlers who support this client are members of but don't have email enabled, so it should be less of a spam risk.
[ { "content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef update_dataset_access(dataset_id):\n\n # [START bigquery_update_dataset_access]\n from google.cloud import bigquery\n\n # Construct a BigQuery client object.\n client = bigquery.Client()\n\n # TODO(developer): Set dataset_id to the ID of the dataset to fetch.\n # dataset_id = 'your-project.your_dataset'\n\n dataset = client.get_dataset(dataset_id) # Make an API request.\n\n entry = bigquery.AccessEntry(\n role=\"READER\",\n entity_type=\"userByEmail\",\n entity_id=\"[email protected]\",\n )\n\n entries = list(dataset.access_entries)\n entries.append(entry)\n dataset.access_entries = entries\n\n dataset = client.update_dataset(dataset, [\"access_entries\"]) # Make an API request.\n\n full_dataset_id = \"{}.{}\".format(dataset.project, dataset.dataset_id)\n print(\n \"Updated dataset '{}' with modified user permissions.\".format(full_dataset_id)\n )\n # [END bigquery_update_dataset_access]\n", "path": "samples/update_dataset_access.py" } ]
[ { "content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef update_dataset_access(dataset_id):\n\n # [START bigquery_update_dataset_access]\n from google.cloud import bigquery\n\n # Construct a BigQuery client object.\n client = bigquery.Client()\n\n # TODO(developer): Set dataset_id to the ID of the dataset to fetch.\n # dataset_id = 'your-project.your_dataset'\n\n dataset = client.get_dataset(dataset_id) # Make an API request.\n\n entry = bigquery.AccessEntry(\n role=\"READER\",\n entity_type=\"groupByEmail\",\n entity_id=\"[email protected]\",\n )\n\n entries = list(dataset.access_entries)\n entries.append(entry)\n dataset.access_entries = entries\n\n dataset = client.update_dataset(dataset, [\"access_entries\"]) # Make an API request.\n\n full_dataset_id = \"{}.{}\".format(dataset.project, dataset.dataset_id)\n print(\n \"Updated dataset '{}' with modified user permissions.\".format(full_dataset_id)\n )\n # [END bigquery_update_dataset_access]\n", "path": "samples/update_dataset_access.py" } ]
diff --git a/samples/update_dataset_access.py b/samples/update_dataset_access.py index 6e844cc90..a5c2670e7 100644 --- a/samples/update_dataset_access.py +++ b/samples/update_dataset_access.py @@ -28,8 +28,8 @@ def update_dataset_access(dataset_id): entry = bigquery.AccessEntry( role="READER", - entity_type="userByEmail", - entity_id="[email protected]", + entity_type="groupByEmail", + entity_id="[email protected]", ) entries = list(dataset.access_entries)
django-json-api__django-rest-framework-json-api-908
Django 3.2 compatibility Django 3.2 is now out, but we are currently blocked on upgrading to it due to djangorestframework-jsonapi specifically disallows that and newer versions as dependencies: https://github.com/django-json-api/django-rest-framework-json-api/blob/0892e3a8a4dbad9630d70e2b78e18b242a8b057d/setup.py#L101 I couldn't find any other issues related to this so thought I would create one. Are there any specific concerns with loosening the version constraint so users of this library also can upgrade to Django 3.2?
[ { "content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\n\nneeds_wheel = {\"bdist_wheel\"}.intersection(sys.argv)\nwheel = [\"wheel\"] if needs_wheel else []\n\n\ndef read(*paths):\n \"\"\"\n Build a file path from paths and return the contents.\n \"\"\"\n with open(os.path.join(*paths), \"r\") as f:\n return f.read()\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, \"__init__.py\")).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [\n (dirpath.replace(package + os.sep, \"\", 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename) for filename in filenames])\n return {package: filepaths}\n\n\nif sys.argv[-1] == \"publish\":\n os.system(\"python setup.py sdist upload\")\n os.system(\"python setup.py bdist_wheel upload\")\n print(\"You probably want to also tag the version now:\")\n print(\n \" git tag -a {0} -m 'version {0}'\".format(\n get_version(\"rest_framework_json_api\")\n )\n )\n print(\" git push --tags\")\n sys.exit()\n\nsetup(\n name=\"djangorestframework-jsonapi\",\n version=get_version(\"rest_framework_json_api\"),\n url=\"https://github.com/django-json-api/django-rest-framework-json-api\",\n license=\"BSD\",\n description=\"A Django REST framework API adapter for the JSON API spec.\",\n long_description=read(\"README.rst\"),\n author=\"Jerel Unruh\",\n author_email=\"\",\n packages=get_packages(\"rest_framework_json_api\"),\n package_data=get_package_data(\"rest_framework_json_api\"),\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Django\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n install_requires=[\n \"inflection>=0.3.0\",\n \"djangorestframework>=3.12,<3.13\",\n \"django>=2.2,<3.2\",\n ],\n extras_require={\n \"django-polymorphic\": [\"django-polymorphic>=2.0\"],\n \"django-filter\": [\"django-filter>=2.0\"],\n \"openapi\": [\"pyyaml>=5.3\", \"uritemplate>=3.0.1\"],\n },\n setup_requires=wheel,\n python_requires=\">=3.6\",\n zip_safe=False,\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\n\nneeds_wheel = {\"bdist_wheel\"}.intersection(sys.argv)\nwheel = [\"wheel\"] if needs_wheel else []\n\n\ndef read(*paths):\n \"\"\"\n Build a file path from paths and return the contents.\n \"\"\"\n with open(os.path.join(*paths), \"r\") as f:\n return f.read()\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, \"__init__.py\")).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [\n (dirpath.replace(package + os.sep, \"\", 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename) for filename in filenames])\n return {package: filepaths}\n\n\nif sys.argv[-1] == \"publish\":\n os.system(\"python setup.py sdist upload\")\n os.system(\"python setup.py bdist_wheel upload\")\n print(\"You probably want to also tag the version now:\")\n print(\n \" git tag -a {0} -m 'version {0}'\".format(\n get_version(\"rest_framework_json_api\")\n )\n )\n print(\" git push --tags\")\n sys.exit()\n\nsetup(\n name=\"djangorestframework-jsonapi\",\n version=get_version(\"rest_framework_json_api\"),\n url=\"https://github.com/django-json-api/django-rest-framework-json-api\",\n license=\"BSD\",\n description=\"A Django REST framework API adapter for the JSON API spec.\",\n long_description=read(\"README.rst\"),\n author=\"Jerel Unruh\",\n author_email=\"\",\n packages=get_packages(\"rest_framework_json_api\"),\n package_data=get_package_data(\"rest_framework_json_api\"),\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Django\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n install_requires=[\n \"inflection>=0.3.0\",\n \"djangorestframework>=3.12,<3.13\",\n \"django>=2.2,<3.3\",\n ],\n extras_require={\n \"django-polymorphic\": [\"django-polymorphic>=2.0\"],\n \"django-filter\": [\"django-filter>=2.0\"],\n \"openapi\": [\"pyyaml>=5.3\", \"uritemplate>=3.0.1\"],\n },\n setup_requires=wheel,\n python_requires=\">=3.6\",\n zip_safe=False,\n)\n", "path": "setup.py" } ]
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e3e786ae..a48a7915 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -10,7 +10,7 @@ jobs: fail-fast: false matrix: python-version: ["3.6", "3.7", "3.8", "3.9"] - django: ["2.2", "3.0", "3.1"] + django: ["2.2", "3.0", "3.1", "3.2"] django-rest-framework: ["3.12", "master"] env: PYTHON: ${{ matrix.python-version }} diff --git a/AUTHORS b/AUTHORS index c1a273c4..543274a8 100644 --- a/AUTHORS +++ b/AUTHORS @@ -12,6 +12,7 @@ Felix Viernickel <[email protected]> Greg Aker <[email protected]> Jamie Bliss <[email protected]> Jason Housley <[email protected]> +Jeppe Fihl-Pearson <[email protected]> Jerel Unruh <[email protected]> Jonathan Senecal <[email protected]> Joseba Mendivil <[email protected]> diff --git a/CHANGELOG.md b/CHANGELOG.md index c64e4a4c..b816f47d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 Note that in line with [Django REST Framework policy](http://www.django-rest-framework.org/topics/release-notes/), any parts of the framework not mentioned in the documentation should generally be considered private API, and may be subject to change. +## [Unreleased] + +### Added + +* Added support for Django 3.2. + ## [4.1.0] - 2021-03-08 ### Added diff --git a/README.rst b/README.rst index 2ab48598..03f9d52a 100644 --- a/README.rst +++ b/README.rst @@ -89,7 +89,7 @@ Requirements ------------ 1. Python (3.6, 3.7, 3.8, 3.9) -2. Django (2.2, 3.0, 3.1) +2. Django (2.2, 3.0, 3.1, 3.2) 3. Django REST Framework (3.12) We **highly** recommend and only officially support the latest patch release of each Python, Django and REST Framework series. diff --git a/docs/getting-started.md b/docs/getting-started.md index c305c801..5e374b30 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -52,7 +52,7 @@ like the following: ## Requirements 1. Python (3.6, 3.7, 3.8, 3.9) -2. Django (2.2, 3.0, 3.1) +2. Django (2.2, 3.0, 3.1, 3.2) 3. Django REST Framework (3.12) We **highly** recommend and only officially support the latest patch release of each Python, Django and REST Framework series. diff --git a/setup.cfg b/setup.cfg index 527ddd6b..83f6fa37 100644 --- a/setup.cfg +++ b/setup.cfg @@ -58,6 +58,8 @@ DJANGO_SETTINGS_MODULE=example.settings.test filterwarnings = error::DeprecationWarning error::PendingDeprecationWarning + # Django Debug Toolbar currently (2021-04-07) specifies default_app_config which is deprecated in Django 3.2: + ignore:'debug_toolbar' defines default_app_config = 'debug_toolbar.apps.DebugToolbarConfig'. Django now detects this configuration automatically. You can remove default_app_config.:PendingDeprecationWarning testpaths = example tests diff --git a/setup.py b/setup.py index a076a7a5..8cce9a5d 100755 --- a/setup.py +++ b/setup.py @@ -98,7 +98,7 @@ def get_package_data(package): install_requires=[ "inflection>=0.3.0", "djangorestframework>=3.12,<3.13", - "django>=2.2,<3.2", + "django>=2.2,<3.3", ], extras_require={ "django-polymorphic": ["django-polymorphic>=2.0"], diff --git a/tox.ini b/tox.ini index dab1676c..f4160e5a 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] envlist = - py{36,37,38,39}-django{22,30,31}-drf{312,master}, + py{36,37,38,39}-django{22,30,31,32}-drf{312,master}, lint,docs [gh-actions] @@ -15,6 +15,7 @@ DJANGO = 2.2: django22 3.0: django30 3.1: django31 + 3.2: django32 DJANGO_REST_FRAMEWORK = 3.12: drf312 @@ -25,6 +26,7 @@ deps = django22: Django>=2.2,<2.3 django30: Django>=3.0,<3.1 django31: Django>=3.1,<3.2 + django32: Django>=3.2,<3.3 drf312: djangorestframework>=3.12,<3.13 drfmaster: https://github.com/encode/django-rest-framework/archive/master.zip -rrequirements/requirements-testing.txt
rasterio__rasterio-598
rio warp null transformer error with bad proj4 Currently, if you pass a bad projection, you get the following behavior: ``` $ rio warp --dst-crs "+proj=foobar" tests/data/warp_test.tif /tmp/foo.tif ERROR:GDAL:CPLE_NotSupported in Failed to initialize PROJ.4 with `+proj=foobar +wktext'. Traceback (most recent call last): ... File "/Users/mperry/work/rasterio/rasterio/rio/warp.py", line 198, in warp resolution=res) File "/Users/mperry/work/rasterio/rasterio/warp.py", line 296, in calculate_default_transform left, bottom, right, top) File "rasterio/_warp.pyx", line 535, in rasterio._warp._calculate_default_transform (rasterio/_warp.cpp:9551) with InMemoryRaster( File "rasterio/_warp.pyx", line 542, in rasterio._warp._calculate_default_transform (rasterio/_warp.cpp:9261) raise ValueError("NULL transformer") ValueError: NULL transformer ``` The transformer fails to initialize, which is reasonable considering the invalid proj string. Is there any way to catch that error and report back something more meaningful than "NULL transformer"?
[ { "content": "\"\"\"A module of errors.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioIOError(IOError):\n \"\"\"A failure to open a dataset using the presently registered drivers.\"\"\"\n\n\nclass RasterioDriverRegistrationError(ValueError):\n \"\"\"To be raised when, eg, _gdal.GDALGetDriverByName(\"MEM\") returns NULL.\"\"\"\n\n\nclass FileOverwriteError(FileError):\n \"\"\"Rasterio's CLI refuses to implicitly clobber output files.\"\"\"\n\n def __init__(self, message):\n super(FileOverwriteError, self).__init__('', hint=message)\n", "path": "rasterio/errors.py" } ]
[ { "content": "\"\"\"A module of errors.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioIOError(IOError):\n \"\"\"A failure to open a dataset using the presently registered drivers.\"\"\"\n\n\nclass DriverRegistrationError(ValueError):\n \"\"\"To be raised when, eg, _gdal.GDALGetDriverByName(\"MEM\") returns NULL.\"\"\"\n\n\nclass FileOverwriteError(FileError):\n \"\"\"Rasterio's CLI refuses to implicitly clobber output files.\"\"\"\n\n def __init__(self, message):\n super(FileOverwriteError, self).__init__('', hint=message)\n", "path": "rasterio/errors.py" } ]
diff --git a/rasterio/_io.pyx b/rasterio/_io.pyx index d4ada0fbc..1b5b2e7d8 100644 --- a/rasterio/_io.pyx +++ b/rasterio/_io.pyx @@ -20,7 +20,7 @@ from rasterio._drivers import driver_count, GDALEnv from rasterio._err import cpl_errs, GDALError from rasterio import dtypes from rasterio.coords import BoundingBox -from rasterio.errors import RasterioDriverRegistrationError +from rasterio.errors import DriverRegistrationError from rasterio.five import text_type, string_types from rasterio.transform import Affine from rasterio.enums import ColorInterp, MaskFlags, Resampling @@ -1851,7 +1851,7 @@ cdef class InMemoryRaster: cdef void *memdriver = _gdal.GDALGetDriverByName("MEM") if memdriver == NULL: - raise RasterioDriverRegistrationError( + raise DriverRegistrationError( "MEM driver is not registered.") self.dataset = _gdal.GDALCreate( diff --git a/rasterio/_warp.pyx b/rasterio/_warp.pyx index 8965dbd39..e2ad94020 100644 --- a/rasterio/_warp.pyx +++ b/rasterio/_warp.pyx @@ -10,7 +10,7 @@ from rasterio cimport _base, _gdal, _ogr, _io, _features from rasterio import dtypes from rasterio._err import cpl_errs, GDALError from rasterio._io cimport InMemoryRaster -from rasterio.errors import RasterioDriverRegistrationError +from rasterio.errors import DriverRegistrationError from rasterio.transform import Affine, from_bounds @@ -269,7 +269,7 @@ def _reproject( hrdriver = _gdal.GDALGetDriverByName("MEM") if hrdriver == NULL: - raise RasterioDriverRegistrationError( + raise DriverRegistrationError( "'MEM' driver not found. Check that this call is contained " "in a `with rasterio.drivers()` or `with rasterio.open()` " "block.") @@ -317,7 +317,7 @@ def _reproject( hrdriver = _gdal.GDALGetDriverByName("MEM") if hrdriver == NULL: - raise RasterioDriverRegistrationError( + raise DriverRegistrationError( "'MEM' driver not found. Check that this call is contained " "in a `with rasterio.drivers()` or `with rasterio.open()` " "block.") diff --git a/rasterio/errors.py b/rasterio/errors.py index 4cfbddc3c..3a4f34e7f 100644 --- a/rasterio/errors.py +++ b/rasterio/errors.py @@ -7,7 +7,7 @@ class RasterioIOError(IOError): """A failure to open a dataset using the presently registered drivers.""" -class RasterioDriverRegistrationError(ValueError): +class DriverRegistrationError(ValueError): """To be raised when, eg, _gdal.GDALGetDriverByName("MEM") returns NULL.""" diff --git a/tests/test_warp_transform.py b/tests/test_warp_transform.py index c1c3396f5..8019df640 100644 --- a/tests/test_warp_transform.py +++ b/tests/test_warp_transform.py @@ -3,7 +3,7 @@ from rasterio.transform import Affine, from_bounds -def test_indentity(): +def test_identity(): """Get the same transform and dimensions back for same crs.""" # Tile: [53, 96, 8] # [-11740727.544603072, 4852834.0517692715, -11584184.510675032, 5009377.085697309] @@ -36,3 +36,28 @@ def test_gdal_transform_notnull(): right=-80, top=70) assert True + + +def test_gdal_transform_fail_dst_crs(): + with rasterio.drivers(): + dt, dw, dh = _calculate_default_transform( + {'init': 'EPSG:4326'}, + '+proj=foobar', + width=80, + height=80, + left=-120, + bottom=30, + right=-80, + top=70) + +def test_gdal_transform_fail_src_crs(): + with rasterio.drivers(): + dt, dw, dh = _calculate_default_transform( + '+proj=foobar', + {'init': 'EPSG:32610'}, + width=80, + height=80, + left=-120, + bottom=30, + right=-80, + top=70)
mozilla__bugbug-3850
Optimize the `bug_to_types()` functions to avoid redundant the execution of string lowering The `bug["whiteboard"].lower()` statement is performed multiple times in the function. Instead, we could store the lowercase value in a variable (e.g., `bug_whiteboard`) and reuse it where needed. Examples: https://github.com/mozilla/bugbug/blob/0586c6a49732f03781daf43d8726b53e2049e3db/bugbug/models/bugtype.py#L46 https://github.com/mozilla/bugbug/blob/0586c6a49732f03781daf43d8726b53e2049e3db/bugbug/models/bugtype.py#L50
[ { "content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nfrom typing import Iterable, Optional\n\nimport numpy as np\nimport xgboost\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup, utils\nfrom bugbug.model import BugModel\n\nlogger = logging.getLogger(__name__)\n\nKEYWORD_DICT = {\n \"sec-\": \"security\",\n \"csectype-\": \"security\",\n \"memory-\": \"memory\",\n \"crash\": \"crash\",\n \"crashreportid\": \"crash\",\n \"perf\": \"performance\",\n \"topperf\": \"performance\",\n \"main-thread-io\": \"performance\",\n \"power\": \"power\",\n}\nTYPE_LIST = sorted(set(KEYWORD_DICT.values()))\n\n\ndef bug_to_types(\n bug: bugzilla.BugDict, bug_map: Optional[dict[int, bugzilla.BugDict]] = None\n) -> list[str]:\n types = set()\n\n bug_whiteboard = bug[\"whiteboard\"].lower()\n\n if any(\n f\"{whiteboard_text}\" in bug_whiteboard\n for whiteboard_text in (\"overhead\", \"memshrink\")\n ):\n types.add(\"memory\")\n\n if \"[power\" in bug_whiteboard:\n types.add(\"power\")\n\n if any(\n f\"[{whiteboard_text}\" in bug_whiteboard\n for whiteboard_text in (\n \"fxperf\",\n \"fxperfsize\",\n \"snappy\",\n \"pdfjs-c-performance\",\n \"pdfjs-performance\",\n \"sp3\",\n )\n ):\n types.add(\"performance\")\n\n if any(\n f\"[{whiteboard_text}\" in bug[\"whiteboard\"].lower()\n for whiteboard_text in (\"client-bounty-form\", \"sec-survey\")\n ):\n types.add(\"security\")\n\n if \"cf_performance\" in bug and bug[\"cf_performance\"] not in (\"---\", \"?\"):\n types.add(\"performance\")\n\n if \"cf_crash_signature\" in bug and bug[\"cf_crash_signature\"] not in (\"\", \"---\"):\n types.add(\"crash\")\n\n if bug_map is not None:\n for bug_id in bug[\"blocks\"]:\n if bug_id not in bug_map:\n continue\n\n alias = bug_map[bug_id][\"alias\"]\n if alias and alias.startswith(\"memshrink\"):\n types.add(\"memory\")\n\n for keyword_start, type in KEYWORD_DICT.items():\n if any(keyword.startswith(keyword_start) for keyword in bug[\"keywords\"]):\n types.add(type)\n\n return list(types)\n\n\nclass BugTypeModel(BugModel):\n def __init__(self, lemmatization=False, historical=False):\n BugModel.__init__(self, lemmatization)\n\n self.calculate_importance = False\n\n feature_extractors = [\n bug_features.HasSTR(),\n bug_features.Severity(),\n # Ignore keywords that would make the ML completely skewed\n # (we are going to use them as 100% rules in the evaluation phase).\n bug_features.Keywords(set(KEYWORD_DICT.keys())),\n bug_features.IsCoverityIssue(),\n bug_features.HasCrashSignature(),\n bug_features.HasURL(),\n bug_features.HasW3CURL(),\n bug_features.HasGithubURL(),\n bug_features.Whiteboard(),\n bug_features.Patches(),\n bug_features.Landings(),\n bug_features.BlockedBugsNumber(),\n bug_features.EverAffected(),\n bug_features.AffectedThenUnaffected(),\n bug_features.Product(),\n bug_features.Component(),\n ]\n\n cleanup_functions = [\n feature_cleanup.url(),\n feature_cleanup.fileref(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(feature_extractors, cleanup_functions),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(min_df=0.001), \"title\"),\n (\n \"first_comment\",\n self.text_vectorizer(min_df=0.001),\n \"first_comment\",\n ),\n (\n \"comments\",\n self.text_vectorizer(min_df=0.001),\n \"comments\",\n ),\n ]\n ),\n ),\n ]\n )\n\n self.hyperparameter = {\"n_jobs\": utils.get_physical_cpu_count()}\n self.clf = OneVsRestClassifier(xgboost.XGBClassifier(**self.hyperparameter))\n\n def get_labels(self) -> tuple[dict[int, np.ndarray], list[str]]:\n classes = {}\n\n bug_map = {bug[\"id\"]: bug for bug in bugzilla.get_bugs()}\n\n for bug_data in bug_map.values():\n target = np.zeros(len(TYPE_LIST))\n for type_ in bug_to_types(bug_data, bug_map):\n target[TYPE_LIST.index(type_)] = 1\n\n classes[int(bug_data[\"id\"])] = target\n\n for type_ in TYPE_LIST:\n logger.info(\n \"%d %s bugs\",\n sum(\n 1\n for target in classes.values()\n if target[TYPE_LIST.index(type_)] == 1\n ),\n type_,\n )\n\n return classes, TYPE_LIST\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names_out()\n\n def overwrite_classes(\n self,\n bugs: Iterable[bugzilla.BugDict],\n classes: dict[int, np.ndarray],\n probabilities: bool,\n ):\n for i, bug in enumerate(bugs):\n for type_ in bug_to_types(bug):\n if probabilities:\n classes[i][TYPE_LIST.index(type_)] = 1.0\n else:\n classes[i][TYPE_LIST.index(type_)] = 1\n\n return classes\n", "path": "bugbug/models/bugtype.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nfrom typing import Iterable, Optional\n\nimport numpy as np\nimport xgboost\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup, utils\nfrom bugbug.model import BugModel\n\nlogger = logging.getLogger(__name__)\n\nKEYWORD_DICT = {\n \"sec-\": \"security\",\n \"csectype-\": \"security\",\n \"memory-\": \"memory\",\n \"crash\": \"crash\",\n \"crashreportid\": \"crash\",\n \"perf\": \"performance\",\n \"topperf\": \"performance\",\n \"main-thread-io\": \"performance\",\n \"power\": \"power\",\n}\nTYPE_LIST = sorted(set(KEYWORD_DICT.values()))\n\n\ndef bug_to_types(\n bug: bugzilla.BugDict, bug_map: Optional[dict[int, bugzilla.BugDict]] = None\n) -> list[str]:\n types = set()\n\n bug_whiteboard = bug[\"whiteboard\"].lower()\n\n if any(\n f\"{whiteboard_text}\" in bug_whiteboard\n for whiteboard_text in (\"overhead\", \"memshrink\")\n ):\n types.add(\"memory\")\n\n if \"[power\" in bug_whiteboard:\n types.add(\"power\")\n\n if any(\n f\"[{whiteboard_text}\" in bug_whiteboard\n for whiteboard_text in (\n \"fxperf\",\n \"fxperfsize\",\n \"snappy\",\n \"pdfjs-c-performance\",\n \"pdfjs-performance\",\n \"sp3\",\n )\n ):\n types.add(\"performance\")\n\n if any(\n f\"[{whiteboard_text}\" in bug_whiteboard\n for whiteboard_text in (\"client-bounty-form\", \"sec-survey\")\n ):\n types.add(\"security\")\n\n if \"cf_performance\" in bug and bug[\"cf_performance\"] not in (\"---\", \"?\"):\n types.add(\"performance\")\n\n if \"cf_crash_signature\" in bug and bug[\"cf_crash_signature\"] not in (\"\", \"---\"):\n types.add(\"crash\")\n\n if bug_map is not None:\n for bug_id in bug[\"blocks\"]:\n if bug_id not in bug_map:\n continue\n\n alias = bug_map[bug_id][\"alias\"]\n if alias and alias.startswith(\"memshrink\"):\n types.add(\"memory\")\n\n for keyword_start, type in KEYWORD_DICT.items():\n if any(keyword.startswith(keyword_start) for keyword in bug[\"keywords\"]):\n types.add(type)\n\n return list(types)\n\n\nclass BugTypeModel(BugModel):\n def __init__(self, lemmatization=False, historical=False):\n BugModel.__init__(self, lemmatization)\n\n self.calculate_importance = False\n\n feature_extractors = [\n bug_features.HasSTR(),\n bug_features.Severity(),\n # Ignore keywords that would make the ML completely skewed\n # (we are going to use them as 100% rules in the evaluation phase).\n bug_features.Keywords(set(KEYWORD_DICT.keys())),\n bug_features.IsCoverityIssue(),\n bug_features.HasCrashSignature(),\n bug_features.HasURL(),\n bug_features.HasW3CURL(),\n bug_features.HasGithubURL(),\n bug_features.Whiteboard(),\n bug_features.Patches(),\n bug_features.Landings(),\n bug_features.BlockedBugsNumber(),\n bug_features.EverAffected(),\n bug_features.AffectedThenUnaffected(),\n bug_features.Product(),\n bug_features.Component(),\n ]\n\n cleanup_functions = [\n feature_cleanup.url(),\n feature_cleanup.fileref(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(feature_extractors, cleanup_functions),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(min_df=0.001), \"title\"),\n (\n \"first_comment\",\n self.text_vectorizer(min_df=0.001),\n \"first_comment\",\n ),\n (\n \"comments\",\n self.text_vectorizer(min_df=0.001),\n \"comments\",\n ),\n ]\n ),\n ),\n ]\n )\n\n self.hyperparameter = {\"n_jobs\": utils.get_physical_cpu_count()}\n self.clf = OneVsRestClassifier(xgboost.XGBClassifier(**self.hyperparameter))\n\n def get_labels(self) -> tuple[dict[int, np.ndarray], list[str]]:\n classes = {}\n\n bug_map = {bug[\"id\"]: bug for bug in bugzilla.get_bugs()}\n\n for bug_data in bug_map.values():\n target = np.zeros(len(TYPE_LIST))\n for type_ in bug_to_types(bug_data, bug_map):\n target[TYPE_LIST.index(type_)] = 1\n\n classes[int(bug_data[\"id\"])] = target\n\n for type_ in TYPE_LIST:\n logger.info(\n \"%d %s bugs\",\n sum(\n 1\n for target in classes.values()\n if target[TYPE_LIST.index(type_)] == 1\n ),\n type_,\n )\n\n return classes, TYPE_LIST\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names_out()\n\n def overwrite_classes(\n self,\n bugs: Iterable[bugzilla.BugDict],\n classes: dict[int, np.ndarray],\n probabilities: bool,\n ):\n for i, bug in enumerate(bugs):\n for type_ in bug_to_types(bug):\n if probabilities:\n classes[i][TYPE_LIST.index(type_)] = 1.0\n else:\n classes[i][TYPE_LIST.index(type_)] = 1\n\n return classes\n", "path": "bugbug/models/bugtype.py" } ]
diff --git a/bugbug/models/bugtype.py b/bugbug/models/bugtype.py index 858ad7d785..71fc1a6afb 100644 --- a/bugbug/models/bugtype.py +++ b/bugbug/models/bugtype.py @@ -62,7 +62,7 @@ def bug_to_types( types.add("performance") if any( - f"[{whiteboard_text}" in bug["whiteboard"].lower() + f"[{whiteboard_text}" in bug_whiteboard for whiteboard_text in ("client-bounty-form", "sec-survey") ): types.add("security")
mirumee__ariadne-232
Update GraphQL Core Next & Starlette Issue for me to remember to update our core dependencies to latest versions before release.
[ { "content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.5.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core-next>=1.0.4\",\n \"python-multipart>=0.0.5\",\n \"starlette<0.13\",\n \"typing_extensions>=3.6.0\",\n ],\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py" } ]
[ { "content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.5.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core-next<3.0.0\",\n \"python-multipart>=0.0.5\",\n \"starlette<0.13\",\n \"typing_extensions>=3.6.0\",\n ],\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py" } ]
diff --git a/requirements-dev.txt b/requirements-dev.txt index eb710e433..0b76cfa33 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -7,40 +7,48 @@ appdirs==1.4.3 # via black astroid==2.2.5 # via pylint atomicwrites==1.3.0 # via pytest -attrs==19.1.0 # via black, pytest +attrs==19.1.0 # via black, packaging, pytest black==19.3b0 -certifi==2019.3.9 # via requests +certifi==2019.6.16 # via requests chardet==3.0.4 # via requests click==7.0 # via black, pip-tools codecov==2.0.15 -coverage==4.5.3 # via codecov, pytest-cov +coverage==4.5.4 # via codecov, pytest-cov django==2.2.4 +fastdiff==0.2.0 # via snapshottest freezegun==0.3.12 idna==2.8 # via requests -isort==4.3.18 # via pylint -lazy-object-proxy==1.3.1 # via astroid +importlib-metadata==0.19 # via pluggy, pytest +isort==4.3.21 # via pylint +lazy-object-proxy==1.4.1 # via astroid mccabe==0.6.1 # via pylint -more-itertools==7.0.0 # via pytest +more-itertools==7.2.0 # via pytest mypy-extensions==0.4.1 # via mypy -mypy==0.701 +mypy==0.720 opentracing==2.2.0 -pip-tools==3.6.1 -pluggy==0.9.0 # via pytest +packaging==19.1 # via pytest +pip-tools==4.0.0 +pluggy==0.12.0 # via pytest py==1.8.0 # via pytest pylint==2.3.1 +pyparsing==2.4.2 # via packaging pytest-asyncio==0.10.0 pytest-cov==2.7.1 -pytest-django==3.4.8 +pytest-django==3.5.1 pytest-mock==1.10.4 -pytest==4.4.1 +pytest==5.0.1 python-dateutil==2.8.0 -pytz==2019.1 # via django -requests==2.21.0 -six==1.12.0 # via astroid, freezegun, pip-tools, pytest, python-dateutil, snapshottest -snapshottest==0.5.0 +pytz==2019.2 # via django +requests==2.22.0 +six==1.12.0 # via astroid, freezegun, packaging, pip-tools, python-dateutil, snapshottest +snapshottest==0.5.1 sqlparse==0.3.0 # via django termcolor==1.1.0 # via snapshottest toml==0.10.0 # via black -typed-ast==1.3.5 # via astroid, mypy -urllib3==1.24.3 # via requests -wrapt==1.11.1 # via astroid +typed-ast==1.4.0 # via astroid, mypy +typing-extensions==3.7.4 # via mypy +urllib3==1.25.3 # via requests +wasmer==0.3.0 # via fastdiff +wcwidth==0.1.7 # via pytest +wrapt==1.11.2 # via astroid +zipp==0.5.2 # via importlib-metadata diff --git a/requirements.txt b/requirements.txt index 4b90eb5fd..345cdea2c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,5 +7,5 @@ graphql-core-next==1.1.1 python-multipart==0.0.5 six==1.12.0 # via python-multipart -starlette==0.12.0 -typing-extensions==3.6.6 +starlette==0.12.7 +typing-extensions==3.7.4 diff --git a/setup.py b/setup.py index f2d03d640..2ece8c7c7 100755 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ packages=["ariadne"], include_package_data=True, install_requires=[ - "graphql-core-next>=1.0.4", + "graphql-core-next<3.0.0", "python-multipart>=0.0.5", "starlette<0.13", "typing_extensions>=3.6.0", diff --git a/tests/test_schema_file_load.py b/tests/test_schema_file_load.py index 4ff09d1b0..622dc85c8 100644 --- a/tests/test_schema_file_load.py +++ b/tests/test_schema_file_load.py @@ -45,7 +45,7 @@ def incorrect_schema_file(tmpdir_factory): def test_loading_schema_fails_on_bad_syntax(incorrect_schema_file): with pytest.raises(exceptions.GraphQLFileSyntaxError) as e: load_schema_from_path(str(incorrect_schema_file)) - assert str(incorrect_schema_file) in str(e) + assert str(incorrect_schema_file) in str(e.value) SECOND_SCHEMA = """ diff --git a/tests/tracing/snapshots/snap_test_apollotracing.py b/tests/tracing/snapshots/snap_test_apollotracing.py index c4e94a12f..d844dc325 100644 --- a/tests/tracing/snapshots/snap_test_apollotracing.py +++ b/tests/tracing/snapshots/snap_test_apollotracing.py @@ -36,43 +36,5 @@ } snapshots['test_apollotracing_extension_handles_exceptions_in_resolvers 1'] = { - 'data': { - 'testError': None - }, - 'errors': [ - { - 'locations': [ - ( - 1, - 3 - ) - ], - 'message': 'Test exception', - 'path': [ - 'testError' - ] - } - ], - 'extensions': { - 'tracing': { - 'duration': 0, - 'endTime': '2012-01-14T03:21:34.000000Z', - 'execution': { - 'resolvers': [ - { - 'duration': 0, - 'fieldName': 'testError', - 'parentType': 'Query', - 'path': [ - 'testError' - ], - 'returnType': 'Boolean', - 'startOffset': 0 - } - ] - }, - 'startTime': '2012-01-14T03:21:34.000000Z', - 'version': 1 - } - } + 'testError': None } diff --git a/tests/tracing/test_apollotracing.py b/tests/tracing/test_apollotracing.py index 1984f52df..1dd4faa57 100644 --- a/tests/tracing/test_apollotracing.py +++ b/tests/tracing/test_apollotracing.py @@ -40,7 +40,7 @@ async def test_apollotracing_extension_handles_exceptions_in_resolvers( _, result = await graphql( schema, {"query": "{ testError }"}, extensions=[ApolloTracingExtension] ) - snapshot.assert_match(result) + snapshot.assert_match(result["data"]) @pytest.mark.asyncio
fonttools__fonttools-337
I find the font's line height is bigger than original font. I have tried pyftsubset with command line option --no-recalc-bounds , but the generated subfont's line height is still bigger than original font. I used html font-face render font. @font-face { font-family: 'freetype'; src: url('font.ttf') format('truetype'); } the font file is microsoft chinese liti.ttf.
[ { "content": "from __future__ import print_function, division, absolute_import\nfrom fontTools.misc.py23 import *\nfrom fontTools.misc import sstruct\nfrom fontTools.misc.textTools import safeEval\nfrom . import DefaultTable\n\nvheaFormat = \"\"\"\n\t\t>\t# big endian\n\t\ttableVersion:\t\t16.16F\n\t\tascent:\t\t\th\n\t\tdescent:\t\th\n\t\tlineGap:\t\th\n\t\tadvanceHeightMax:\tH\n\t\tminTopSideBearing:\th\n\t\tminBottomSideBearing:\th\n\t\tyMaxExtent:\t\th\n\t\tcaretSlopeRise:\t\th\n\t\tcaretSlopeRun:\t\th\n\t\treserved0:\t\th\n\t\treserved1:\t\th\n\t\treserved2:\t\th\n\t\treserved3:\t\th\n\t\treserved4:\t\th\n\t\tmetricDataFormat:\th\n\t\tnumberOfVMetrics:\tH\n\"\"\"\n\nclass table__v_h_e_a(DefaultTable.DefaultTable):\n\n\t# Note: Keep in sync with table__h_h_e_a\n\n\tdependencies = ['vmtx', 'glyf']\n\n\tdef decompile(self, data, ttFont):\n\t\tsstruct.unpack(vheaFormat, data, self)\n\n\tdef compile(self, ttFont):\n\t\tself.recalc(ttFont)\n\t\treturn sstruct.pack(vheaFormat, self)\n\n\tdef recalc(self, ttFont):\n\t\tvtmxTable = ttFont['vmtx']\n\t\tif 'glyf' in ttFont:\n\t\t\tglyfTable = ttFont['glyf']\n\t\t\tINFINITY = 100000\n\t\t\tadvanceHeightMax = 0\n\t\t\tminTopSideBearing = +INFINITY # arbitrary big number\n\t\t\tminBottomSideBearing = +INFINITY # arbitrary big number\n\t\t\tyMaxExtent = -INFINITY # arbitrary big negative number\n\n\t\t\tfor name in ttFont.getGlyphOrder():\n\t\t\t\theight, tsb = vtmxTable[name]\n\t\t\t\tadvanceHeightMax = max(advanceHeightMax, height)\n\t\t\t\tg = glyfTable[name]\n\t\t\t\tif g.numberOfContours == 0:\n\t\t\t\t\tcontinue\n\t\t\t\tif g.numberOfContours < 0 and not hasattr(g, \"yMax\"):\n\t\t\t\t\t# Composite glyph without extents set.\n\t\t\t\t\t# Calculate those.\n\t\t\t\t\tg.recalcBounds(glyfTable)\n\t\t\t\tminTopSideBearing = min(minTopSideBearing, tsb)\n\t\t\t\tbsb = height - tsb - (g.yMax - g.yMin)\n\t\t\t\tminBottomSideBearing = min(minBottomSideBearing, bsb)\n\t\t\t\textent = tsb + (g.yMax - g.yMin)\n\t\t\t\tyMaxExtent = max(yMaxExtent, extent)\n\n\t\t\tif yMaxExtent == -INFINITY:\n\t\t\t\t# No glyph has outlines.\n\t\t\t\tminTopSideBearing = 0\n\t\t\t\tminBottomSideBearing = 0\n\t\t\t\tyMaxExtent = 0\n\n\t\t\tself.advanceHeightMax = advanceHeightMax\n\t\t\tself.minTopSideBearing = minTopSideBearing\n\t\t\tself.minBottomSideBearing = minBottomSideBearing\n\t\t\tself.yMaxExtent = yMaxExtent\n\t\telse:\n\t\t\t# XXX CFF recalc...\n\t\t\tpass\n\n\tdef toXML(self, writer, ttFont):\n\t\tformatstring, names, fixes = sstruct.getformat(vheaFormat)\n\t\tfor name in names:\n\t\t\tvalue = getattr(self, name)\n\t\t\twriter.simpletag(name, value=value)\n\t\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tsetattr(self, name, safeEval(attrs[\"value\"]))\n", "path": "Lib/fontTools/ttLib/tables/_v_h_e_a.py" } ]
[ { "content": "from __future__ import print_function, division, absolute_import\nfrom fontTools.misc.py23 import *\nfrom fontTools.misc import sstruct\nfrom fontTools.misc.textTools import safeEval\nfrom . import DefaultTable\n\nvheaFormat = \"\"\"\n\t\t>\t# big endian\n\t\ttableVersion:\t\t16.16F\n\t\tascent:\t\t\th\n\t\tdescent:\t\th\n\t\tlineGap:\t\th\n\t\tadvanceHeightMax:\tH\n\t\tminTopSideBearing:\th\n\t\tminBottomSideBearing:\th\n\t\tyMaxExtent:\t\th\n\t\tcaretSlopeRise:\t\th\n\t\tcaretSlopeRun:\t\th\n\t\treserved0:\t\th\n\t\treserved1:\t\th\n\t\treserved2:\t\th\n\t\treserved3:\t\th\n\t\treserved4:\t\th\n\t\tmetricDataFormat:\th\n\t\tnumberOfVMetrics:\tH\n\"\"\"\n\nclass table__v_h_e_a(DefaultTable.DefaultTable):\n\n\t# Note: Keep in sync with table__h_h_e_a\n\n\tdependencies = ['vmtx', 'glyf']\n\n\tdef decompile(self, data, ttFont):\n\t\tsstruct.unpack(vheaFormat, data, self)\n\n\tdef compile(self, ttFont):\n\t\tif ttFont.isLoaded('glyf') and ttFont.recalcBBoxes:\n\t\t\tself.recalc(ttFont)\n\t\treturn sstruct.pack(vheaFormat, self)\n\n\tdef recalc(self, ttFont):\n\t\tvtmxTable = ttFont['vmtx']\n\t\tif 'glyf' in ttFont:\n\t\t\tglyfTable = ttFont['glyf']\n\t\t\tINFINITY = 100000\n\t\t\tadvanceHeightMax = 0\n\t\t\tminTopSideBearing = +INFINITY # arbitrary big number\n\t\t\tminBottomSideBearing = +INFINITY # arbitrary big number\n\t\t\tyMaxExtent = -INFINITY # arbitrary big negative number\n\n\t\t\tfor name in ttFont.getGlyphOrder():\n\t\t\t\theight, tsb = vtmxTable[name]\n\t\t\t\tadvanceHeightMax = max(advanceHeightMax, height)\n\t\t\t\tg = glyfTable[name]\n\t\t\t\tif g.numberOfContours == 0:\n\t\t\t\t\tcontinue\n\t\t\t\tif g.numberOfContours < 0 and not hasattr(g, \"yMax\"):\n\t\t\t\t\t# Composite glyph without extents set.\n\t\t\t\t\t# Calculate those.\n\t\t\t\t\tg.recalcBounds(glyfTable)\n\t\t\t\tminTopSideBearing = min(minTopSideBearing, tsb)\n\t\t\t\tbsb = height - tsb - (g.yMax - g.yMin)\n\t\t\t\tminBottomSideBearing = min(minBottomSideBearing, bsb)\n\t\t\t\textent = tsb + (g.yMax - g.yMin)\n\t\t\t\tyMaxExtent = max(yMaxExtent, extent)\n\n\t\t\tif yMaxExtent == -INFINITY:\n\t\t\t\t# No glyph has outlines.\n\t\t\t\tminTopSideBearing = 0\n\t\t\t\tminBottomSideBearing = 0\n\t\t\t\tyMaxExtent = 0\n\n\t\t\tself.advanceHeightMax = advanceHeightMax\n\t\t\tself.minTopSideBearing = minTopSideBearing\n\t\t\tself.minBottomSideBearing = minBottomSideBearing\n\t\t\tself.yMaxExtent = yMaxExtent\n\t\telse:\n\t\t\t# XXX CFF recalc...\n\t\t\tpass\n\n\tdef toXML(self, writer, ttFont):\n\t\tformatstring, names, fixes = sstruct.getformat(vheaFormat)\n\t\tfor name in names:\n\t\t\tvalue = getattr(self, name)\n\t\t\twriter.simpletag(name, value=value)\n\t\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tsetattr(self, name, safeEval(attrs[\"value\"]))\n", "path": "Lib/fontTools/ttLib/tables/_v_h_e_a.py" } ]
diff --git a/Lib/fontTools/ttLib/tables/_v_h_e_a.py b/Lib/fontTools/ttLib/tables/_v_h_e_a.py index 79f4d7637e..d8dfc0a6af 100644 --- a/Lib/fontTools/ttLib/tables/_v_h_e_a.py +++ b/Lib/fontTools/ttLib/tables/_v_h_e_a.py @@ -35,7 +35,8 @@ def decompile(self, data, ttFont): sstruct.unpack(vheaFormat, data, self) def compile(self, ttFont): - self.recalc(ttFont) + if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes: + self.recalc(ttFont) return sstruct.pack(vheaFormat, self) def recalc(self, ttFont):
coala__coala-bears-1082
GofmtBear: Add advanced asciinema The coala bear GofmtBear does not have a proper asciinema. `gofmt` is a command line tool that automatically solves formatting / styling issues to the absolute coding style that Go has. I'm planning to use a working code as the sample where it's filled with mixed indentation (spaces and tabs), semicolons and demonstrate how gofmt formats the code automatically and correctly to the absolute Go coding style.
[ { "content": "from coalib.bearlib.abstractions.Linter import linter\nfrom coalib.bears.requirements.GoRequirement import GoRequirement\n\n\n@linter(executable='gofmt',\n use_stdin=True,\n output_format='corrected',\n result_message='Formatting can be improved.')\nclass GofmtBear:\n \"\"\"\n Suggest better formatting options in Go code. Basic checks like alignment,\n indentation, and redundant parentheses are provided.\n\n This is done using the ``gofmt`` utility. For more information visit\n <https://golang.org/cmd/gofmt/>.\n \"\"\"\n LANGUAGES = {'Go'}\n REQUIREMENTS = {GoRequirement(package='golang.org/cmd/gofmt', flag='-u')}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n CAN_FIX = {'Formatting'}\n\n @staticmethod\n def create_arguments(filename, file, config_file):\n return ()\n", "path": "bears/go/GofmtBear.py" } ]
[ { "content": "from coalib.bearlib.abstractions.Linter import linter\nfrom coalib.bears.requirements.GoRequirement import GoRequirement\n\n\n@linter(executable='gofmt',\n use_stdin=True,\n output_format='corrected',\n result_message='Formatting can be improved.')\nclass GofmtBear:\n \"\"\"\n Suggest better formatting options in Go code. Basic checks like alignment,\n indentation, and redundant parentheses are provided.\n\n This is done using the ``gofmt`` utility. For more information visit\n <https://golang.org/cmd/gofmt/>.\n \"\"\"\n LANGUAGES = {'Go'}\n REQUIREMENTS = {GoRequirement(package='golang.org/cmd/gofmt', flag='-u')}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n CAN_FIX = {'Formatting'}\n ASCIINEMA_URL = 'https://asciinema.org/a/94812'\n\n @staticmethod\n def create_arguments(filename, file, config_file):\n return ()\n", "path": "bears/go/GofmtBear.py" } ]
diff --git a/bears/go/GofmtBear.py b/bears/go/GofmtBear.py index bdf3b589dc..f385d562ac 100644 --- a/bears/go/GofmtBear.py +++ b/bears/go/GofmtBear.py @@ -20,6 +20,7 @@ class GofmtBear: AUTHORS_EMAILS = {'[email protected]'} LICENSE = 'AGPL-3.0' CAN_FIX = {'Formatting'} + ASCIINEMA_URL = 'https://asciinema.org/a/94812' @staticmethod def create_arguments(filename, file, config_file):
getsentry__sentry-1340
do we really need 'redis>=2.7.0,<2.9.0' ? Hi Recently I was trying to use sentry with [django-redis](https://github.com/niwibe/django-redis) as a cache backend and this can't be (easily) done with current versions of both django-redis and sentry since django-redis has a restriction for [redis>=2.10.0](https://github.com/niwibe/django-redis/blob/fcfd73d85d4fc3350d9cdacdb08546a5f4c9a66d/setup.py#L21). A simple installation shows that sentry works fine with `redis==2.10.3` but I guess this would need a more thorough tests. I briefly checked redis-py changelog and it seems the only possible backwards incompatible change is [this](https://github.com/andymccurdy/redis-py/blob/54e1040b576afb4155bf839483428c5edac14df0/CHANGES#L9-L15). Also I noticed that current version of sentry has a builtin redis cache backend, but it doesn't seem to cover other potential apps installed within the project. I also posted similar issue on niwibe/django-redis#113.
[ { "content": "#!/usr/bin/env python\n\"\"\"\nSentry\n======\n\nSentry is a realtime event logging and aggregation platform. It specializes\nin monitoring errors and extracting all the information needed to do a proper\npost-mortem without any of the hassle of the standard user feedback loop.\n\nSentry is a Server\n------------------\n\nThe Sentry package, at its core, is just a simple server and web UI. It will\nhandle authentication clients (such as `Raven <https://github.com/getsentry/raven-python>`_)\nand all of the logic behind storage and aggregation.\n\nThat said, Sentry is not limited to Python. The primary implementation is in\nPython, but it contains a full API for sending events from any language, in\nany application.\n\n:copyright: (c) 2011-2014 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\nimport sys\n\n\n# Hack to prevent stupid \"TypeError: 'NoneType' object is not callable\" error\n# in multiprocessing/util.py _exit_function when running `python\n# setup.py test` (see\n# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)\nfor m in ('multiprocessing', 'billiard'):\n try:\n __import__(m)\n except ImportError:\n pass\n\nsetup_requires = []\n\nif 'test' in sys.argv:\n setup_requires.append('pytest')\n\ndev_requires = [\n 'flake8>=2.0,<2.1',\n]\n\ntests_require = [\n 'casscache',\n 'cqlsh',\n 'elasticsearch',\n 'exam>=0.5.1',\n 'eventlet',\n 'httpretty',\n 'pytest',\n 'pytest-cov>=1.4',\n 'pytest-django',\n 'pytest-timeout',\n 'python-coveralls',\n 'mock>=0.8.0',\n 'riak',\n 'unittest2',\n]\n\n\ninstall_requires = [\n 'BeautifulSoup>=3.2.1,<3.3.0',\n 'celery>=3.0.15,<3.1.0',\n 'cssutils>=0.9.9,<0.10.0',\n 'Django>=1.5.8,<1.6',\n 'django-bitfield>=1.7.0,<1.8.0',\n 'django-celery>=3.0.11,<3.1.0',\n 'django-crispy-forms>=1.2.3,<1.3.0',\n 'django-paging>=0.2.5,<0.3.0',\n 'django-picklefield>=0.3.0,<0.4.0',\n 'django-recaptcha>=1.0.0,<1.1.0',\n 'django-social-auth>=0.7.28,<0.8.0',\n 'django-static-compiler>=0.3.0,<0.4.0',\n 'django-statsd-mozilla>=0.3.8.0,<0.3.9.0',\n 'django-sudo>=1.1.0,<1.2.0',\n 'django-templatetag-sugar>=0.1.0',\n 'djangorestframework>=2.3.8,<2.4.0',\n 'email-reply-parser>=0.2.0,<0.3.0',\n 'enum34>=0.9.18,<0.10.0',\n 'gunicorn>=0.17.2,<0.18.0',\n 'ipaddr>=2.1.11,<2.2.0',\n 'logan>=0.5.8.2,<0.6.0',\n 'nydus>=0.10.7,<0.11.0',\n 'progressbar>=2.2,<2.4',\n 'Pygments>=1.6.0,<1.7.0',\n 'python-dateutil>=1.5.0,<2.0.0',\n 'python-memcached>=1.53,<2.0.0',\n 'raven>=5.0.0',\n 'redis>=2.7.0,<2.9.0',\n 'simplejson>=3.1.0,<3.4.0',\n 'six>=1.6.0,<2.0.0',\n 'setproctitle>=1.1.7,<1.2.0',\n 'South==1.0.1',\n 'toronado>=0.0.4,<0.1.0',\n 'ua-parser>=0.3.5',\n 'urllib3>=1.7.1,<1.8.0',\n]\n\npostgres_requires = [\n 'psycopg2>=2.5.0,<2.6.0',\n]\n\npostgres_pypy_requires = [\n 'psycopg2cffi',\n]\n\nmysql_requires = [\n 'MySQL-python>=1.2.0,<1.3.0',\n]\n\n\nclass PyTest(TestCommand):\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = ['tests']\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\nsetup(\n name='sentry',\n version='7.0.0-DEV',\n author='David Cramer',\n author_email='[email protected]',\n url='https://www.getsentry.com',\n description='A realtime logging and aggregation server.',\n long_description=open('README.rst').read(),\n package_dir={'': 'src'},\n packages=find_packages('src'),\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\n 'tests': tests_require,\n 'dev': dev_requires,\n 'postgres': install_requires + postgres_requires,\n 'postgres_pypy': install_requires + postgres_pypy_requires,\n 'mysql': install_requires + mysql_requires,\n },\n tests_require=tests_require,\n cmdclass={'test': PyTest},\n license='BSD',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'sentry = sentry.utils.runner:main',\n ],\n },\n classifiers=[\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development'\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n\"\"\"\nSentry\n======\n\nSentry is a realtime event logging and aggregation platform. It specializes\nin monitoring errors and extracting all the information needed to do a proper\npost-mortem without any of the hassle of the standard user feedback loop.\n\nSentry is a Server\n------------------\n\nThe Sentry package, at its core, is just a simple server and web UI. It will\nhandle authentication clients (such as `Raven <https://github.com/getsentry/raven-python>`_)\nand all of the logic behind storage and aggregation.\n\nThat said, Sentry is not limited to Python. The primary implementation is in\nPython, but it contains a full API for sending events from any language, in\nany application.\n\n:copyright: (c) 2011-2014 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\nimport sys\n\n\n# Hack to prevent stupid \"TypeError: 'NoneType' object is not callable\" error\n# in multiprocessing/util.py _exit_function when running `python\n# setup.py test` (see\n# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)\nfor m in ('multiprocessing', 'billiard'):\n try:\n __import__(m)\n except ImportError:\n pass\n\nsetup_requires = []\n\nif 'test' in sys.argv:\n setup_requires.append('pytest')\n\ndev_requires = [\n 'flake8>=2.0,<2.1',\n]\n\ntests_require = [\n 'casscache',\n 'cqlsh',\n 'elasticsearch',\n 'exam>=0.5.1',\n 'eventlet',\n 'httpretty',\n 'pytest',\n 'pytest-cov>=1.4',\n 'pytest-django',\n 'pytest-timeout',\n 'python-coveralls',\n 'mock>=0.8.0',\n 'riak',\n 'unittest2',\n]\n\n\ninstall_requires = [\n 'BeautifulSoup>=3.2.1,<3.3.0',\n 'celery>=3.0.15,<3.1.0',\n 'cssutils>=0.9.9,<0.10.0',\n 'Django>=1.5.8,<1.6',\n 'django-bitfield>=1.7.0,<1.8.0',\n 'django-celery>=3.0.11,<3.1.0',\n 'django-crispy-forms>=1.2.3,<1.3.0',\n 'django-paging>=0.2.5,<0.3.0',\n 'django-picklefield>=0.3.0,<0.4.0',\n 'django-recaptcha>=1.0.0,<1.1.0',\n 'django-social-auth>=0.7.28,<0.8.0',\n 'django-static-compiler>=0.3.0,<0.4.0',\n 'django-statsd-mozilla>=0.3.8.0,<0.3.9.0',\n 'django-sudo>=1.1.0,<1.2.0',\n 'django-templatetag-sugar>=0.1.0',\n 'djangorestframework>=2.3.8,<2.4.0',\n 'email-reply-parser>=0.2.0,<0.3.0',\n 'enum34>=0.9.18,<0.10.0',\n 'gunicorn>=0.17.2,<0.18.0',\n 'ipaddr>=2.1.11,<2.2.0',\n 'logan>=0.5.8.2,<0.6.0',\n 'nydus>=0.10.7,<0.11.0',\n 'progressbar>=2.2,<2.4',\n 'Pygments>=1.6.0,<1.7.0',\n 'python-dateutil>=1.5.0,<2.0.0',\n 'python-memcached>=1.53,<2.0.0',\n 'raven>=5.0.0',\n 'redis>=2.7.0,<2.11.0',\n 'simplejson>=3.1.0,<3.4.0',\n 'six>=1.6.0,<2.0.0',\n 'setproctitle>=1.1.7,<1.2.0',\n 'South==1.0.1',\n 'toronado>=0.0.4,<0.1.0',\n 'ua-parser>=0.3.5',\n 'urllib3>=1.7.1,<1.8.0',\n]\n\npostgres_requires = [\n 'psycopg2>=2.5.0,<2.6.0',\n]\n\npostgres_pypy_requires = [\n 'psycopg2cffi',\n]\n\nmysql_requires = [\n 'MySQL-python>=1.2.0,<1.3.0',\n]\n\n\nclass PyTest(TestCommand):\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = ['tests']\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\nsetup(\n name='sentry',\n version='7.0.0-DEV',\n author='David Cramer',\n author_email='[email protected]',\n url='https://www.getsentry.com',\n description='A realtime logging and aggregation server.',\n long_description=open('README.rst').read(),\n package_dir={'': 'src'},\n packages=find_packages('src'),\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\n 'tests': tests_require,\n 'dev': dev_requires,\n 'postgres': install_requires + postgres_requires,\n 'postgres_pypy': install_requires + postgres_pypy_requires,\n 'mysql': install_requires + mysql_requires,\n },\n tests_require=tests_require,\n cmdclass={'test': PyTest},\n license='BSD',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'sentry = sentry.utils.runner:main',\n ],\n },\n classifiers=[\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development'\n ],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index cddf1334767824..90584c634a635e 100755 --- a/setup.py +++ b/setup.py @@ -92,7 +92,7 @@ 'python-dateutil>=1.5.0,<2.0.0', 'python-memcached>=1.53,<2.0.0', 'raven>=5.0.0', - 'redis>=2.7.0,<2.9.0', + 'redis>=2.7.0,<2.11.0', 'simplejson>=3.1.0,<3.4.0', 'six>=1.6.0,<2.0.0', 'setproctitle>=1.1.7,<1.2.0',
getnikola__nikola-3762
nikola auto: "AttributeError: 'FileOpenedEvent' object has no attribute '_src_path'. Did you mean: 'src_path'?" <!-- Before creating an issue: * make sure you are using an up-to-date version of Nikola * search for existing issues that might be related Make sure to: * provide information about your environment (below) * include all the output you get, and any other information related to your problem Nikola v7.6.4, as provided by Ubuntu, is NOT SUPPORTED. If you are using this version, you should upgrade: https://getnikola.com/getting-started.html --> ### Environment **Python Version:** 3.12.2 **Nikola Version:** SHA #b46b1211128 on my branch **Operating System:** Debian Bullseye ### Description: `nikola auto -b` produces an error ``` Exception in thread Thread-1: Traceback (most recent call last): File "/usr/local/python-3.12.2/lib/python3.12/threading.py", line 1073, in _bootstrap_inner self.run() File "/home/andreas/workspace-famsik/dj3ei/venv/lib/python3.12/site-packages/watchdog/observers/api.py", line 223, in run self.dispatch_events(self.event_queue) File "/home/andreas/workspace-famsik/dj3ei/venv/lib/python3.12/site-packages/watchdog/observers/api.py", line 402, in dispatch_events handler.dispatch(event) File "/home/andreas/comp/nikola/nikola/plugins/command/auto/__init__.py", line 623, in dispatch if event._src_path == self.configuration_filename: ^^^^^^^^^^^^^^^ AttributeError: 'FileOpenedEvent' object has no attribute '_src_path'. Did you mean: 'src_path'? ``` ## Initial analysis: Our dependency `watchdog` came out with a new major release 4.0.0 and 4.0.1. The private attribute `_src_path` has been promoted official to `src_path`. Fix should be easy, forthcoming.
[ { "content": "# -*- coding: utf-8 -*-\n\n# Copyright © 2012-2024 Chris Warrick, Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Automatic rebuilds for Nikola.\"\"\"\n\nimport asyncio\nimport datetime\nimport mimetypes\nimport os\nimport re\nimport stat\nimport subprocess\nimport sys\nimport typing\nimport urllib.parse\nimport webbrowser\nfrom pathlib import Path\n\nimport blinker\n\nfrom nikola.plugin_categories import Command\nfrom nikola.utils import dns_sd, req_missing, get_theme_path, makedirs, pkg_resources_path\n\ntry:\n import aiohttp\n from aiohttp import web\n from aiohttp.web_urldispatcher import StaticResource\n from aiohttp.web_exceptions import HTTPNotFound, HTTPForbidden, HTTPMovedPermanently\n from aiohttp.web_response import Response\n from aiohttp.web_fileresponse import FileResponse\nexcept ImportError:\n aiohttp = web = None\n StaticResource = HTTPNotFound = HTTPForbidden = Response = FileResponse = object\n\ntry:\n from watchdog.observers import Observer\n from watchdog.observers.polling import PollingObserver\nexcept ImportError:\n Observer = None\n PollingObserver = None\n\nLRJS_PATH = os.path.join(os.path.dirname(__file__), 'livereload.js')\nREBUILDING_REFRESH_DELAY = 0.35\nIDLE_REFRESH_DELAY = 0.05\n\n\ndef base_path_from_siteuri(siteuri: str) -> str:\n \"\"\"Extract the path part from a URI such as site['SITE_URL'].\n\n The path never ends with a \"/\". (If only \"/\" is intended, it is empty.)\n \"\"\"\n path = urllib.parse.urlsplit(siteuri).path\n if path.endswith(\"/\"):\n path = path[:-1]\n return path\n\n\nclass CommandAuto(Command):\n \"\"\"Automatic rebuilds for Nikola.\"\"\"\n\n name = \"auto\"\n has_server = True\n doc_purpose = \"builds and serves a site; automatically detects site changes, rebuilds, and optionally refreshes a browser\"\n dns_sd = None\n delta_last_rebuild = datetime.timedelta(milliseconds=100)\n web_runner = None # type: web.AppRunner\n\n cmd_options = [\n {\n 'name': 'port',\n 'short': 'p',\n 'long': 'port',\n 'default': 8000,\n 'type': int,\n 'help': 'Port number',\n },\n {\n 'name': 'address',\n 'short': 'a',\n 'long': 'address',\n 'type': str,\n 'default': '127.0.0.1',\n 'help': 'Address to bind',\n },\n {\n 'name': 'browser',\n 'short': 'b',\n 'long': 'browser',\n 'type': bool,\n 'help': 'Start a web browser',\n 'default': False,\n },\n {\n 'name': 'ipv6',\n 'short': '6',\n 'long': 'ipv6',\n 'default': False,\n 'type': bool,\n 'help': 'Use IPv6',\n },\n {\n 'name': 'no-server',\n 'long': 'no-server',\n 'default': False,\n 'type': bool,\n 'help': 'Disable the server, automate rebuilds only'\n },\n {\n 'name': 'process',\n 'short': 'n',\n 'long': 'process',\n 'default': 0,\n 'type': int,\n 'help': 'Number of subprocesses',\n 'section': 'Arguments passed to `nikola build`'\n },\n {\n 'name': 'parallel-type',\n 'short': 'P',\n 'long': 'parallel-type',\n 'default': 'process',\n 'type': str,\n 'help': \"Parallelization mode ('process' or 'thread')\",\n 'section': 'Arguments passed to `nikola build`'\n },\n {\n 'name': 'db-file',\n 'long': 'db-file',\n 'default': '.doit.db',\n 'type': str,\n 'help': 'Database file',\n 'section': 'Arguments passed to `nikola build`'\n },\n {\n 'name': 'backend',\n 'long': 'backend',\n 'default': 'dbm',\n 'type': str,\n 'help': \"Database backend ('dbm', 'json', 'sqlite3')\",\n 'section': 'Arguments passed to `nikola build`'\n },\n {\n # We might be able to improve on this\n # if and when https://github.com/gorakhargosh/watchdog/issues/365\n # is ever fixed.\n 'name': 'poll',\n 'long': 'poll',\n 'default': False,\n 'type': bool,\n 'help': 'Use polling to notice changes behind symbolic links. This may reduce performance.'\n }\n ]\n\n def _execute(self, options, args):\n \"\"\"Start the watcher.\"\"\"\n self.sockets = []\n self.rebuild_queue = asyncio.Queue()\n self.reload_queue = asyncio.Queue()\n self.last_rebuild = datetime.datetime.now()\n self.is_rebuilding = False\n\n if aiohttp is None and Observer is None:\n req_missing(['aiohttp', 'watchdog'], 'use the \"auto\" command')\n elif aiohttp is None:\n req_missing(['aiohttp'], 'use the \"auto\" command')\n elif Observer is None:\n req_missing(['watchdog'], 'use the \"auto\" command')\n\n blinker.signal('auto_command_starting').send(self.site)\n\n if sys.argv[0].endswith('__main__.py'):\n self.nikola_cmd = [sys.executable, '-m', 'nikola', 'build']\n else:\n self.nikola_cmd = [sys.argv[0], 'build']\n\n if self.site.configuration_filename != 'conf.py':\n self.nikola_cmd.append('--conf=' + self.site.configuration_filename)\n\n if options and options.get('process'):\n self.nikola_cmd += ['--process={}'.format(options['process']),\n '--parallel-type={}'.format(options['parallel-type'])]\n\n if options:\n self.nikola_cmd += ['--db-file={}'.format(options['db-file']),\n '--backend={}'.format(options['backend'])]\n\n port = options and options.get('port')\n self.snippet = '''<script>document.write('<script src=\"http://'\n + (location.host || 'localhost').split(':')[0]\n + ':{0}/livereload.js?snipver=1\"></'\n + 'script>')</script>\n </head>'''.format(port)\n\n # Deduplicate entries by using a set -- otherwise, multiple rebuilds are triggered\n watched = set([\n 'templates/'\n ] + [get_theme_path(name) for name in self.site.THEMES])\n for item in self.site.config['post_pages']:\n watched.add(os.path.dirname(item[0]))\n for item in self.site.config['FILES_FOLDERS']:\n watched.add(item)\n for item in self.site.config['GALLERY_FOLDERS']:\n watched.add(item)\n for item in self.site.config['LISTINGS_FOLDERS']:\n watched.add(item)\n for item in self.site.config['IMAGE_FOLDERS']:\n watched.add(item)\n for item in self.site._plugin_places:\n watched.add(item)\n watched |= self.site.registered_auto_watched_folders\n # Nikola itself (useful for developers)\n watched.add(pkg_resources_path('nikola', ''))\n\n out_folder = self.site.config['OUTPUT_FOLDER']\n if not os.path.exists(out_folder):\n makedirs(out_folder)\n\n if options and options.get('browser'):\n browser = True\n else:\n browser = False\n\n if options['ipv6']:\n dhost = '::'\n else:\n dhost = '0.0.0.0'\n\n host = options['address'].strip('[').strip(']') or dhost\n\n # Prepare asyncio event loop\n # Required for subprocessing to work\n loop = asyncio.get_event_loop()\n\n # Set debug setting\n loop.set_debug(self.site.debug)\n\n # Server can be disabled (Issue #1883)\n self.has_server = not options['no-server']\n\n base_path = base_path_from_siteuri(self.site.config['SITE_URL'])\n\n if self.has_server:\n loop.run_until_complete(self.set_up_server(host, port, base_path, out_folder))\n\n # Run an initial build so we are up-to-date. The server is running, but we are not watching yet.\n loop.run_until_complete(self.run_initial_rebuild())\n\n self.wd_observer = Observer() if not options['poll'] else PollingObserver()\n # Watch output folders and trigger reloads\n if self.has_server:\n self.wd_observer.schedule(NikolaEventHandler(self.reload_page, loop), out_folder, recursive=True)\n\n # Watch input folders and trigger rebuilds\n for p in watched:\n if os.path.exists(p):\n self.wd_observer.schedule(NikolaEventHandler(self.queue_rebuild, loop), p, recursive=True)\n\n # Watch config file (a bit of a hack, but we need a directory)\n _conf_fn = os.path.abspath(self.site.configuration_filename or 'conf.py')\n _conf_dn = os.path.dirname(_conf_fn)\n self.wd_observer.schedule(ConfigEventHandler(_conf_fn, self.queue_rebuild, loop), _conf_dn, recursive=False)\n self.wd_observer.start()\n\n if not self.has_server:\n self.logger.info(\"Watching for changes...\")\n # Run the event loop forever (no server mode).\n try:\n # Run rebuild queue\n loop.run_until_complete(self.run_rebuild_queue())\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n finally:\n self.wd_observer.stop()\n self.wd_observer.join()\n return\n\n if options['ipv6'] or '::' in host:\n server_url = \"http://[{0}]:{1}/\".format(host, port)\n else:\n server_url = \"http://{0}:{1}/\".format(host, port)\n self.logger.info(\"Serving on {0} ...\".format(server_url))\n\n if browser:\n # Some browsers fail to load 0.0.0.0 (Issue #2755)\n if host == '0.0.0.0':\n browser_url = \"http://127.0.0.1:{0}/{1}\".format(port, base_path.lstrip(\"/\"))\n else:\n # server_url always ends with a \"/\":\n browser_url = \"{0}{1}\".format(server_url, base_path.lstrip(\"/\"))\n self.logger.info(\"Opening {0} in the default web browser...\".format(browser_url))\n webbrowser.open(browser_url)\n\n # Run the event loop forever and handle shutdowns.\n try:\n # Run rebuild queue\n rebuild_queue_fut = asyncio.ensure_future(self.run_rebuild_queue())\n reload_queue_fut = asyncio.ensure_future(self.run_reload_queue())\n\n self.dns_sd = dns_sd(port, (options['ipv6'] or '::' in host))\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n finally:\n self.logger.info(\"Server is shutting down.\")\n if self.dns_sd:\n self.dns_sd.Reset()\n rebuild_queue_fut.cancel()\n reload_queue_fut.cancel()\n\n # Not sure why this isn't done by the web_runner.cleanup() code:\n loop.run_until_complete(self.remove_websockets(None))\n\n loop.run_until_complete(self.web_runner.cleanup())\n self.wd_observer.stop()\n self.wd_observer.join()\n\n async def set_up_server(self, host: str, port: int, base_path: str, out_folder: str) -> None:\n \"\"\"Set up aiohttp server and start it.\"\"\"\n webapp = web.Application()\n webapp.router.add_get('/livereload.js', self.serve_livereload_js)\n webapp.router.add_get('/robots.txt', self.serve_robots_txt)\n webapp.router.add_route('*', '/livereload', self.websocket_handler)\n resource = IndexHtmlStaticResource(True, self.snippet, base_path, out_folder)\n webapp.router.register_resource(resource)\n webapp.on_shutdown.append(self.remove_websockets)\n\n self.web_runner = web.AppRunner(webapp)\n await self.web_runner.setup()\n website = web.TCPSite(self.web_runner, host, port)\n await website.start()\n\n async def run_initial_rebuild(self) -> None:\n \"\"\"Run an initial rebuild.\"\"\"\n await self._rebuild_site()\n # If there are any clients, have them reload the root.\n await self._send_reload_command(self.site.config['INDEX_FILE'])\n\n async def queue_rebuild(self, event) -> None:\n \"\"\"Rebuild the site.\"\"\"\n # Move events have a dest_path, some editors like gedit use a\n # move on larger save operations for write protection\n event_path = event.dest_path if hasattr(event, 'dest_path') else event.src_path\n if sys.platform == 'win32':\n # Windows hidden files support\n is_hidden = os.stat(event_path).st_file_attributes & stat.FILE_ATTRIBUTE_HIDDEN\n else:\n is_hidden = False\n has_hidden_component = any(p.startswith('.') for p in event_path.split(os.sep))\n if (is_hidden or has_hidden_component or\n '__pycache__' in event_path or\n event_path.endswith(('.pyc', '.pyo', '.pyd', '_bak', '~')) or\n event.is_directory): # Skip on folders, these are usually duplicates\n return\n\n self.logger.debug('Queuing rebuild from {0}'.format(event_path))\n await self.rebuild_queue.put((datetime.datetime.now(), event_path))\n\n async def run_rebuild_queue(self) -> None:\n \"\"\"Run rebuilds from a queue (Nikola can only build in a single instance).\"\"\"\n while True:\n date, event_path = await self.rebuild_queue.get()\n if date < (self.last_rebuild + self.delta_last_rebuild):\n self.logger.debug(\"Skipping rebuild from {0} (within delta)\".format(event_path))\n continue\n await self._rebuild_site(event_path)\n\n async def _rebuild_site(self, event_path: typing.Optional[str] = None) -> None:\n \"\"\"Rebuild the site.\"\"\"\n self.is_rebuilding = True\n self.last_rebuild = datetime.datetime.now()\n if event_path:\n self.logger.info('REBUILDING SITE (from {0})'.format(event_path))\n else:\n self.logger.info('REBUILDING SITE')\n\n p = await asyncio.create_subprocess_exec(*self.nikola_cmd, stderr=subprocess.PIPE)\n exit_code = await p.wait()\n out = (await p.stderr.read()).decode('utf-8')\n\n if exit_code != 0:\n self.logger.error(\"Rebuild failed\\n\" + out)\n await self.send_to_websockets({'command': 'alert', 'message': out})\n else:\n self.logger.info(\"Rebuild successful\\n\" + out)\n\n self.is_rebuilding = False\n\n async def run_reload_queue(self) -> None:\n \"\"\"Send reloads from a queue to limit CPU usage.\"\"\"\n while True:\n p = await self.reload_queue.get()\n self.logger.info('REFRESHING: {0}'.format(p))\n await self._send_reload_command(p)\n if self.is_rebuilding:\n await asyncio.sleep(REBUILDING_REFRESH_DELAY)\n else:\n await asyncio.sleep(IDLE_REFRESH_DELAY)\n\n async def _send_reload_command(self, path: str) -> None:\n \"\"\"Send a reload command.\"\"\"\n await self.send_to_websockets({'command': 'reload', 'path': path, 'liveCSS': True})\n\n async def reload_page(self, event) -> None:\n \"\"\"Reload the page.\"\"\"\n # Move events have a dest_path, some editors like gedit use a\n # move on larger save operations for write protection\n if event:\n event_path = event.dest_path if hasattr(event, 'dest_path') else event.src_path\n else:\n event_path = self.site.config['OUTPUT_FOLDER']\n p = os.path.relpath(event_path, os.path.abspath(self.site.config['OUTPUT_FOLDER'])).replace(os.sep, '/')\n await self.reload_queue.put(p)\n\n async def serve_livereload_js(self, request):\n \"\"\"Handle requests to /livereload.js and serve the JS file.\"\"\"\n return FileResponse(LRJS_PATH)\n\n async def serve_robots_txt(self, request):\n \"\"\"Handle requests to /robots.txt.\"\"\"\n return Response(body=b'User-Agent: *\\nDisallow: /\\n', content_type='text/plain', charset='utf-8')\n\n async def websocket_handler(self, request):\n \"\"\"Handle requests to /livereload and initiate WebSocket communication.\"\"\"\n ws = web.WebSocketResponse()\n await ws.prepare(request)\n self.sockets.append(ws)\n\n while True:\n msg = await ws.receive()\n\n self.logger.debug(\"Received message: {0}\".format(msg))\n if msg.type == aiohttp.WSMsgType.TEXT:\n message = msg.json()\n if message['command'] == 'hello':\n response = {\n 'command': 'hello',\n 'protocols': [\n 'http://livereload.com/protocols/official-7',\n ],\n 'serverName': 'Nikola Auto (livereload)',\n }\n await ws.send_json(response)\n elif message['command'] != 'info':\n self.logger.warning(\"Unknown command in message: {0}\".format(message))\n elif msg.type in (aiohttp.WSMsgType.CLOSED, aiohttp.WSMsgType.CLOSING):\n break\n elif msg.type == aiohttp.WSMsgType.CLOSE:\n self.logger.debug(\"Closing WebSocket\")\n await ws.close()\n break\n elif msg.type == aiohttp.WSMsgType.ERROR:\n self.logger.error('WebSocket connection closed with exception {0}'.format(ws.exception()))\n break\n else:\n self.logger.warning(\"Received unknown message: {0}\".format(msg))\n\n self.sockets.remove(ws)\n self.logger.debug(\"WebSocket connection closed: {0}\".format(ws))\n\n return ws\n\n async def remove_websockets(self, _app) -> None:\n \"\"\"Remove all websockets.\"\"\"\n for ws in self.sockets:\n await ws.close()\n self.sockets.clear()\n\n async def send_to_websockets(self, message: dict) -> None:\n \"\"\"Send a message to all open WebSockets.\"\"\"\n to_delete = []\n for ws in self.sockets:\n if ws.closed:\n to_delete.append(ws)\n continue\n\n try:\n await ws.send_json(message)\n if ws._close_code:\n await ws.close()\n to_delete.append(ws)\n except RuntimeError as e:\n if 'closed' in e.args[0]:\n self.logger.warning(\"WebSocket {0} closed uncleanly\".format(ws))\n to_delete.append(ws)\n else:\n raise\n\n for ws in to_delete:\n self.sockets.remove(ws)\n\n\nclass IndexHtmlStaticResource(StaticResource):\n \"\"\"A StaticResource implementation that serves /index.html in directory roots.\"\"\"\n\n modify_html = True\n snippet = \"</head>\"\n\n def __init__(self, modify_html=True, snippet=\"</head>\", *args, **kwargs):\n \"\"\"Initialize a resource.\"\"\"\n self.modify_html = modify_html\n self.snippet = snippet\n super().__init__(*args, **kwargs)\n\n async def _handle(self, request: 'web.Request') -> 'web.Response':\n \"\"\"Handle incoming requests (pass to handle_file).\"\"\"\n filename = request.match_info['filename']\n return await self.handle_file(request, filename)\n\n async def handle_file(self, request: 'web.Request', filename: str, from_index=None) -> 'web.Response':\n \"\"\"Handle file requests.\"\"\"\n try:\n unresolved_path = self._directory.joinpath(filename)\n if self._follow_symlinks:\n normalized_path = Path(os.path.normpath(unresolved_path))\n normalized_path.relative_to(self._directory)\n filepath = normalized_path.resolve()\n else:\n filepath = unresolved_path.resolve()\n filepath.relative_to(self._directory)\n except (ValueError, FileNotFoundError) as error:\n # relatively safe\n raise HTTPNotFound() from error\n except Exception as error:\n # perm error or other kind!\n request.app.logger.exception(error)\n raise HTTPNotFound() from error\n\n # on opening a dir, load it's contents if allowed\n if filepath.is_dir():\n if filename.endswith('/') or not filename:\n ret = await self.handle_file(request, filename + 'index.html', from_index=filename)\n else:\n # Redirect and add trailing slash so relative links work (Issue #3140)\n new_url = request.rel_url.path + '/'\n if request.rel_url.query_string:\n new_url += '?' + request.rel_url.query_string\n raise HTTPMovedPermanently(new_url)\n elif filepath.is_file():\n ct, encoding = mimetypes.guess_type(str(filepath))\n encoding = encoding or 'utf-8'\n if ct == 'text/html' and self.modify_html:\n if sys.version_info[0] == 3 and sys.version_info[1] <= 5:\n # Python 3.4 and 3.5 do not accept pathlib.Path objects in calls to open()\n filepath = str(filepath)\n with open(filepath, 'r', encoding=encoding) as fh:\n text = fh.read()\n text = self.transform_html(text)\n ret = Response(text=text, content_type=ct, charset=encoding)\n else:\n ret = FileResponse(filepath, chunk_size=self._chunk_size)\n elif from_index:\n filepath = self._directory.joinpath(from_index).resolve()\n try:\n return Response(text=self._directory_as_html(filepath),\n content_type=\"text/html\")\n except PermissionError:\n raise HTTPForbidden\n else:\n raise HTTPNotFound\n\n return ret\n\n def transform_html(self, text: str) -> str:\n \"\"\"Apply some transforms to HTML content.\"\"\"\n # Inject livereload.js\n text = text.replace('</head>', self.snippet, 1)\n # Disable <base> tag\n text = re.sub(r'<base\\s([^>]*)>', r'<!--base \\g<1>-->', text, flags=re.IGNORECASE)\n return text\n\n\n# Based on code from the 'hachiko' library by John Biesnecker — thanks!\n# https://github.com/biesnecker/hachiko\nclass NikolaEventHandler:\n \"\"\"A Nikola-specific event handler for Watchdog. Based on code from hachiko.\"\"\"\n\n def __init__(self, function, loop):\n \"\"\"Initialize the handler.\"\"\"\n self.function = function\n self.loop = loop\n\n def dispatch(self, event):\n \"\"\"Dispatch events to handler.\"\"\"\n if event.event_type in {\"opened\", \"closed\"}:\n return\n self.loop.call_soon_threadsafe(asyncio.ensure_future, self.function(event))\n\n\nclass ConfigEventHandler(NikolaEventHandler):\n \"\"\"A Nikola-specific handler for Watchdog that handles the config file (as a workaround).\"\"\"\n\n def __init__(self, configuration_filename, function, loop):\n \"\"\"Initialize the handler.\"\"\"\n super().__init__(function, loop)\n self.configuration_filename = configuration_filename\n\n def dispatch(self, event):\n \"\"\"Handle file events if they concern the configuration file.\"\"\"\n if event._src_path == self.configuration_filename:\n super().dispatch(event)\n", "path": "nikola/plugins/command/auto/__init__.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\n# Copyright © 2012-2024 Chris Warrick, Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Automatic rebuilds for Nikola.\"\"\"\n\nimport asyncio\nimport datetime\nimport mimetypes\nimport os\nimport re\nimport stat\nimport subprocess\nimport sys\nimport typing\nimport urllib.parse\nimport webbrowser\nfrom pathlib import Path\n\nimport blinker\n\nfrom nikola.plugin_categories import Command\nfrom nikola.utils import dns_sd, req_missing, get_theme_path, makedirs, pkg_resources_path\n\ntry:\n import aiohttp\n from aiohttp import web\n from aiohttp.web_urldispatcher import StaticResource\n from aiohttp.web_exceptions import HTTPNotFound, HTTPForbidden, HTTPMovedPermanently\n from aiohttp.web_response import Response\n from aiohttp.web_fileresponse import FileResponse\nexcept ImportError:\n aiohttp = web = None\n StaticResource = HTTPNotFound = HTTPForbidden = Response = FileResponse = object\n\ntry:\n from watchdog.observers import Observer\n from watchdog.observers.polling import PollingObserver\nexcept ImportError:\n Observer = None\n PollingObserver = None\n\nLRJS_PATH = os.path.join(os.path.dirname(__file__), 'livereload.js')\nREBUILDING_REFRESH_DELAY = 0.35\nIDLE_REFRESH_DELAY = 0.05\n\n\ndef base_path_from_siteuri(siteuri: str) -> str:\n \"\"\"Extract the path part from a URI such as site['SITE_URL'].\n\n The path never ends with a \"/\". (If only \"/\" is intended, it is empty.)\n \"\"\"\n path = urllib.parse.urlsplit(siteuri).path\n if path.endswith(\"/\"):\n path = path[:-1]\n return path\n\n\nclass CommandAuto(Command):\n \"\"\"Automatic rebuilds for Nikola.\"\"\"\n\n name = \"auto\"\n has_server = True\n doc_purpose = \"builds and serves a site; automatically detects site changes, rebuilds, and optionally refreshes a browser\"\n dns_sd = None\n delta_last_rebuild = datetime.timedelta(milliseconds=100)\n web_runner = None # type: web.AppRunner\n\n cmd_options = [\n {\n 'name': 'port',\n 'short': 'p',\n 'long': 'port',\n 'default': 8000,\n 'type': int,\n 'help': 'Port number',\n },\n {\n 'name': 'address',\n 'short': 'a',\n 'long': 'address',\n 'type': str,\n 'default': '127.0.0.1',\n 'help': 'Address to bind',\n },\n {\n 'name': 'browser',\n 'short': 'b',\n 'long': 'browser',\n 'type': bool,\n 'help': 'Start a web browser',\n 'default': False,\n },\n {\n 'name': 'ipv6',\n 'short': '6',\n 'long': 'ipv6',\n 'default': False,\n 'type': bool,\n 'help': 'Use IPv6',\n },\n {\n 'name': 'no-server',\n 'long': 'no-server',\n 'default': False,\n 'type': bool,\n 'help': 'Disable the server, automate rebuilds only'\n },\n {\n 'name': 'process',\n 'short': 'n',\n 'long': 'process',\n 'default': 0,\n 'type': int,\n 'help': 'Number of subprocesses',\n 'section': 'Arguments passed to `nikola build`'\n },\n {\n 'name': 'parallel-type',\n 'short': 'P',\n 'long': 'parallel-type',\n 'default': 'process',\n 'type': str,\n 'help': \"Parallelization mode ('process' or 'thread')\",\n 'section': 'Arguments passed to `nikola build`'\n },\n {\n 'name': 'db-file',\n 'long': 'db-file',\n 'default': '.doit.db',\n 'type': str,\n 'help': 'Database file',\n 'section': 'Arguments passed to `nikola build`'\n },\n {\n 'name': 'backend',\n 'long': 'backend',\n 'default': 'dbm',\n 'type': str,\n 'help': \"Database backend ('dbm', 'json', 'sqlite3')\",\n 'section': 'Arguments passed to `nikola build`'\n },\n {\n # We might be able to improve on this\n # if and when https://github.com/gorakhargosh/watchdog/issues/365\n # is ever fixed.\n 'name': 'poll',\n 'long': 'poll',\n 'default': False,\n 'type': bool,\n 'help': 'Use polling to notice changes behind symbolic links. This may reduce performance.'\n }\n ]\n\n def _execute(self, options, args):\n \"\"\"Start the watcher.\"\"\"\n self.sockets = []\n self.rebuild_queue = asyncio.Queue()\n self.reload_queue = asyncio.Queue()\n self.last_rebuild = datetime.datetime.now()\n self.is_rebuilding = False\n\n if aiohttp is None and Observer is None:\n req_missing(['aiohttp', 'watchdog'], 'use the \"auto\" command')\n elif aiohttp is None:\n req_missing(['aiohttp'], 'use the \"auto\" command')\n elif Observer is None:\n req_missing(['watchdog'], 'use the \"auto\" command')\n\n blinker.signal('auto_command_starting').send(self.site)\n\n if sys.argv[0].endswith('__main__.py'):\n self.nikola_cmd = [sys.executable, '-m', 'nikola', 'build']\n else:\n self.nikola_cmd = [sys.argv[0], 'build']\n\n if self.site.configuration_filename != 'conf.py':\n self.nikola_cmd.append('--conf=' + self.site.configuration_filename)\n\n if options and options.get('process'):\n self.nikola_cmd += ['--process={}'.format(options['process']),\n '--parallel-type={}'.format(options['parallel-type'])]\n\n if options:\n self.nikola_cmd += ['--db-file={}'.format(options['db-file']),\n '--backend={}'.format(options['backend'])]\n\n port = options and options.get('port')\n self.snippet = '''<script>document.write('<script src=\"http://'\n + (location.host || 'localhost').split(':')[0]\n + ':{0}/livereload.js?snipver=1\"></'\n + 'script>')</script>\n </head>'''.format(port)\n\n # Deduplicate entries by using a set -- otherwise, multiple rebuilds are triggered\n watched = set([\n 'templates/'\n ] + [get_theme_path(name) for name in self.site.THEMES])\n for item in self.site.config['post_pages']:\n watched.add(os.path.dirname(item[0]))\n for item in self.site.config['FILES_FOLDERS']:\n watched.add(item)\n for item in self.site.config['GALLERY_FOLDERS']:\n watched.add(item)\n for item in self.site.config['LISTINGS_FOLDERS']:\n watched.add(item)\n for item in self.site.config['IMAGE_FOLDERS']:\n watched.add(item)\n for item in self.site._plugin_places:\n watched.add(item)\n watched |= self.site.registered_auto_watched_folders\n # Nikola itself (useful for developers)\n watched.add(pkg_resources_path('nikola', ''))\n\n out_folder = self.site.config['OUTPUT_FOLDER']\n if not os.path.exists(out_folder):\n makedirs(out_folder)\n\n if options and options.get('browser'):\n browser = True\n else:\n browser = False\n\n if options['ipv6']:\n dhost = '::'\n else:\n dhost = '0.0.0.0'\n\n host = options['address'].strip('[').strip(']') or dhost\n\n # Prepare asyncio event loop\n # Required for subprocessing to work\n loop = asyncio.get_event_loop()\n\n # Set debug setting\n loop.set_debug(self.site.debug)\n\n # Server can be disabled (Issue #1883)\n self.has_server = not options['no-server']\n\n base_path = base_path_from_siteuri(self.site.config['SITE_URL'])\n\n if self.has_server:\n loop.run_until_complete(self.set_up_server(host, port, base_path, out_folder))\n\n # Run an initial build so we are up-to-date. The server is running, but we are not watching yet.\n loop.run_until_complete(self.run_initial_rebuild())\n\n self.wd_observer = Observer() if not options['poll'] else PollingObserver()\n # Watch output folders and trigger reloads\n if self.has_server:\n self.wd_observer.schedule(NikolaEventHandler(self.reload_page, loop), out_folder, recursive=True)\n\n # Watch input folders and trigger rebuilds\n for p in watched:\n if os.path.exists(p):\n self.wd_observer.schedule(NikolaEventHandler(self.queue_rebuild, loop), p, recursive=True)\n\n # Watch config file (a bit of a hack, but we need a directory)\n _conf_fn = os.path.abspath(self.site.configuration_filename or 'conf.py')\n _conf_dn = os.path.dirname(_conf_fn)\n self.wd_observer.schedule(ConfigEventHandler(_conf_fn, self.queue_rebuild, loop), _conf_dn, recursive=False)\n self.wd_observer.start()\n\n if not self.has_server:\n self.logger.info(\"Watching for changes...\")\n # Run the event loop forever (no server mode).\n try:\n # Run rebuild queue\n loop.run_until_complete(self.run_rebuild_queue())\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n finally:\n self.wd_observer.stop()\n self.wd_observer.join()\n return\n\n if options['ipv6'] or '::' in host:\n server_url = \"http://[{0}]:{1}/\".format(host, port)\n else:\n server_url = \"http://{0}:{1}/\".format(host, port)\n self.logger.info(\"Serving on {0} ...\".format(server_url))\n\n if browser:\n # Some browsers fail to load 0.0.0.0 (Issue #2755)\n if host == '0.0.0.0':\n browser_url = \"http://127.0.0.1:{0}/{1}\".format(port, base_path.lstrip(\"/\"))\n else:\n # server_url always ends with a \"/\":\n browser_url = \"{0}{1}\".format(server_url, base_path.lstrip(\"/\"))\n self.logger.info(\"Opening {0} in the default web browser...\".format(browser_url))\n webbrowser.open(browser_url)\n\n # Run the event loop forever and handle shutdowns.\n try:\n # Run rebuild queue\n rebuild_queue_fut = asyncio.ensure_future(self.run_rebuild_queue())\n reload_queue_fut = asyncio.ensure_future(self.run_reload_queue())\n\n self.dns_sd = dns_sd(port, (options['ipv6'] or '::' in host))\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n finally:\n self.logger.info(\"Server is shutting down.\")\n if self.dns_sd:\n self.dns_sd.Reset()\n rebuild_queue_fut.cancel()\n reload_queue_fut.cancel()\n\n # Not sure why this isn't done by the web_runner.cleanup() code:\n loop.run_until_complete(self.remove_websockets(None))\n\n loop.run_until_complete(self.web_runner.cleanup())\n self.wd_observer.stop()\n self.wd_observer.join()\n\n async def set_up_server(self, host: str, port: int, base_path: str, out_folder: str) -> None:\n \"\"\"Set up aiohttp server and start it.\"\"\"\n webapp = web.Application()\n webapp.router.add_get('/livereload.js', self.serve_livereload_js)\n webapp.router.add_get('/robots.txt', self.serve_robots_txt)\n webapp.router.add_route('*', '/livereload', self.websocket_handler)\n resource = IndexHtmlStaticResource(True, self.snippet, base_path, out_folder)\n webapp.router.register_resource(resource)\n webapp.on_shutdown.append(self.remove_websockets)\n\n self.web_runner = web.AppRunner(webapp)\n await self.web_runner.setup()\n website = web.TCPSite(self.web_runner, host, port)\n await website.start()\n\n async def run_initial_rebuild(self) -> None:\n \"\"\"Run an initial rebuild.\"\"\"\n await self._rebuild_site()\n # If there are any clients, have them reload the root.\n await self._send_reload_command(self.site.config['INDEX_FILE'])\n\n async def queue_rebuild(self, event) -> None:\n \"\"\"Rebuild the site.\"\"\"\n # Move events have a dest_path, some editors like gedit use a\n # move on larger save operations for write protection\n event_path = event.dest_path if hasattr(event, 'dest_path') else event.src_path\n if sys.platform == 'win32':\n # Windows hidden files support\n is_hidden = os.stat(event_path).st_file_attributes & stat.FILE_ATTRIBUTE_HIDDEN\n else:\n is_hidden = False\n has_hidden_component = any(p.startswith('.') for p in event_path.split(os.sep))\n if (is_hidden or has_hidden_component or\n '__pycache__' in event_path or\n event_path.endswith(('.pyc', '.pyo', '.pyd', '_bak', '~')) or\n event.is_directory): # Skip on folders, these are usually duplicates\n return\n\n self.logger.debug('Queuing rebuild from {0}'.format(event_path))\n await self.rebuild_queue.put((datetime.datetime.now(), event_path))\n\n async def run_rebuild_queue(self) -> None:\n \"\"\"Run rebuilds from a queue (Nikola can only build in a single instance).\"\"\"\n while True:\n date, event_path = await self.rebuild_queue.get()\n if date < (self.last_rebuild + self.delta_last_rebuild):\n self.logger.debug(\"Skipping rebuild from {0} (within delta)\".format(event_path))\n continue\n await self._rebuild_site(event_path)\n\n async def _rebuild_site(self, event_path: typing.Optional[str] = None) -> None:\n \"\"\"Rebuild the site.\"\"\"\n self.is_rebuilding = True\n self.last_rebuild = datetime.datetime.now()\n if event_path:\n self.logger.info('REBUILDING SITE (from {0})'.format(event_path))\n else:\n self.logger.info('REBUILDING SITE')\n\n p = await asyncio.create_subprocess_exec(*self.nikola_cmd, stderr=subprocess.PIPE)\n exit_code = await p.wait()\n out = (await p.stderr.read()).decode('utf-8')\n\n if exit_code != 0:\n self.logger.error(\"Rebuild failed\\n\" + out)\n await self.send_to_websockets({'command': 'alert', 'message': out})\n else:\n self.logger.info(\"Rebuild successful\\n\" + out)\n\n self.is_rebuilding = False\n\n async def run_reload_queue(self) -> None:\n \"\"\"Send reloads from a queue to limit CPU usage.\"\"\"\n while True:\n p = await self.reload_queue.get()\n self.logger.info('REFRESHING: {0}'.format(p))\n await self._send_reload_command(p)\n if self.is_rebuilding:\n await asyncio.sleep(REBUILDING_REFRESH_DELAY)\n else:\n await asyncio.sleep(IDLE_REFRESH_DELAY)\n\n async def _send_reload_command(self, path: str) -> None:\n \"\"\"Send a reload command.\"\"\"\n await self.send_to_websockets({'command': 'reload', 'path': path, 'liveCSS': True})\n\n async def reload_page(self, event) -> None:\n \"\"\"Reload the page.\"\"\"\n # Move events have a dest_path, some editors like gedit use a\n # move on larger save operations for write protection\n if event:\n event_path = event.dest_path if hasattr(event, 'dest_path') else event.src_path\n else:\n event_path = self.site.config['OUTPUT_FOLDER']\n p = os.path.relpath(event_path, os.path.abspath(self.site.config['OUTPUT_FOLDER'])).replace(os.sep, '/')\n await self.reload_queue.put(p)\n\n async def serve_livereload_js(self, request):\n \"\"\"Handle requests to /livereload.js and serve the JS file.\"\"\"\n return FileResponse(LRJS_PATH)\n\n async def serve_robots_txt(self, request):\n \"\"\"Handle requests to /robots.txt.\"\"\"\n return Response(body=b'User-Agent: *\\nDisallow: /\\n', content_type='text/plain', charset='utf-8')\n\n async def websocket_handler(self, request):\n \"\"\"Handle requests to /livereload and initiate WebSocket communication.\"\"\"\n ws = web.WebSocketResponse()\n await ws.prepare(request)\n self.sockets.append(ws)\n\n while True:\n msg = await ws.receive()\n\n self.logger.debug(\"Received message: {0}\".format(msg))\n if msg.type == aiohttp.WSMsgType.TEXT:\n message = msg.json()\n if message['command'] == 'hello':\n response = {\n 'command': 'hello',\n 'protocols': [\n 'http://livereload.com/protocols/official-7',\n ],\n 'serverName': 'Nikola Auto (livereload)',\n }\n await ws.send_json(response)\n elif message['command'] != 'info':\n self.logger.warning(\"Unknown command in message: {0}\".format(message))\n elif msg.type in (aiohttp.WSMsgType.CLOSED, aiohttp.WSMsgType.CLOSING):\n break\n elif msg.type == aiohttp.WSMsgType.CLOSE:\n self.logger.debug(\"Closing WebSocket\")\n await ws.close()\n break\n elif msg.type == aiohttp.WSMsgType.ERROR:\n self.logger.error('WebSocket connection closed with exception {0}'.format(ws.exception()))\n break\n else:\n self.logger.warning(\"Received unknown message: {0}\".format(msg))\n\n self.sockets.remove(ws)\n self.logger.debug(\"WebSocket connection closed: {0}\".format(ws))\n\n return ws\n\n async def remove_websockets(self, _app) -> None:\n \"\"\"Remove all websockets.\"\"\"\n for ws in self.sockets:\n await ws.close()\n self.sockets.clear()\n\n async def send_to_websockets(self, message: dict) -> None:\n \"\"\"Send a message to all open WebSockets.\"\"\"\n to_delete = []\n for ws in self.sockets:\n if ws.closed:\n to_delete.append(ws)\n continue\n\n try:\n await ws.send_json(message)\n if ws._close_code:\n await ws.close()\n to_delete.append(ws)\n except RuntimeError as e:\n if 'closed' in e.args[0]:\n self.logger.warning(\"WebSocket {0} closed uncleanly\".format(ws))\n to_delete.append(ws)\n else:\n raise\n\n for ws in to_delete:\n self.sockets.remove(ws)\n\n\nclass IndexHtmlStaticResource(StaticResource):\n \"\"\"A StaticResource implementation that serves /index.html in directory roots.\"\"\"\n\n modify_html = True\n snippet = \"</head>\"\n\n def __init__(self, modify_html=True, snippet=\"</head>\", *args, **kwargs):\n \"\"\"Initialize a resource.\"\"\"\n self.modify_html = modify_html\n self.snippet = snippet\n super().__init__(*args, **kwargs)\n\n async def _handle(self, request: 'web.Request') -> 'web.Response':\n \"\"\"Handle incoming requests (pass to handle_file).\"\"\"\n filename = request.match_info['filename']\n return await self.handle_file(request, filename)\n\n async def handle_file(self, request: 'web.Request', filename: str, from_index=None) -> 'web.Response':\n \"\"\"Handle file requests.\"\"\"\n try:\n unresolved_path = self._directory.joinpath(filename)\n if self._follow_symlinks:\n normalized_path = Path(os.path.normpath(unresolved_path))\n normalized_path.relative_to(self._directory)\n filepath = normalized_path.resolve()\n else:\n filepath = unresolved_path.resolve()\n filepath.relative_to(self._directory)\n except (ValueError, FileNotFoundError) as error:\n # relatively safe\n raise HTTPNotFound() from error\n except Exception as error:\n # perm error or other kind!\n request.app.logger.exception(error)\n raise HTTPNotFound() from error\n\n # on opening a dir, load it's contents if allowed\n if filepath.is_dir():\n if filename.endswith('/') or not filename:\n ret = await self.handle_file(request, filename + 'index.html', from_index=filename)\n else:\n # Redirect and add trailing slash so relative links work (Issue #3140)\n new_url = request.rel_url.path + '/'\n if request.rel_url.query_string:\n new_url += '?' + request.rel_url.query_string\n raise HTTPMovedPermanently(new_url)\n elif filepath.is_file():\n ct, encoding = mimetypes.guess_type(str(filepath))\n encoding = encoding or 'utf-8'\n if ct == 'text/html' and self.modify_html:\n if sys.version_info[0] == 3 and sys.version_info[1] <= 5:\n # Python 3.4 and 3.5 do not accept pathlib.Path objects in calls to open()\n filepath = str(filepath)\n with open(filepath, 'r', encoding=encoding) as fh:\n text = fh.read()\n text = self.transform_html(text)\n ret = Response(text=text, content_type=ct, charset=encoding)\n else:\n ret = FileResponse(filepath, chunk_size=self._chunk_size)\n elif from_index:\n filepath = self._directory.joinpath(from_index).resolve()\n try:\n return Response(text=self._directory_as_html(filepath),\n content_type=\"text/html\")\n except PermissionError:\n raise HTTPForbidden\n else:\n raise HTTPNotFound\n\n return ret\n\n def transform_html(self, text: str) -> str:\n \"\"\"Apply some transforms to HTML content.\"\"\"\n # Inject livereload.js\n text = text.replace('</head>', self.snippet, 1)\n # Disable <base> tag\n text = re.sub(r'<base\\s([^>]*)>', r'<!--base \\g<1>-->', text, flags=re.IGNORECASE)\n return text\n\n\n# Based on code from the 'hachiko' library by John Biesnecker — thanks!\n# https://github.com/biesnecker/hachiko\nclass NikolaEventHandler:\n \"\"\"A Nikola-specific event handler for Watchdog. Based on code from hachiko.\"\"\"\n\n def __init__(self, function, loop):\n \"\"\"Initialize the handler.\"\"\"\n self.function = function\n self.loop = loop\n\n def dispatch(self, event):\n \"\"\"Dispatch events to handler.\"\"\"\n if event.event_type in {\"opened\", \"closed\"}:\n return\n self.loop.call_soon_threadsafe(asyncio.ensure_future, self.function(event))\n\n\nclass ConfigEventHandler(NikolaEventHandler):\n \"\"\"A Nikola-specific handler for Watchdog that handles the config file (as a workaround).\"\"\"\n\n def __init__(self, configuration_filename, function, loop):\n \"\"\"Initialize the handler.\"\"\"\n super().__init__(function, loop)\n self.configuration_filename = configuration_filename\n\n def dispatch(self, event):\n \"\"\"Handle file events if they concern the configuration file.\"\"\"\n if event.src_path == self.configuration_filename:\n super().dispatch(event)\n", "path": "nikola/plugins/command/auto/__init__.py" } ]
diff --git a/nikola/plugins/command/auto/__init__.py b/nikola/plugins/command/auto/__init__.py index a3e0d1bb34..f05dbcf19e 100644 --- a/nikola/plugins/command/auto/__init__.py +++ b/nikola/plugins/command/auto/__init__.py @@ -620,5 +620,5 @@ def __init__(self, configuration_filename, function, loop): def dispatch(self, event): """Handle file events if they concern the configuration file.""" - if event._src_path == self.configuration_filename: + if event.src_path == self.configuration_filename: super().dispatch(event) diff --git a/requirements-extras.txt b/requirements-extras.txt index 1ad129de3d..d8c2259c89 100644 --- a/requirements-extras.txt +++ b/requirements-extras.txt @@ -10,7 +10,7 @@ notebook>=6.0.0 ipykernel>=6.21.2 ghp-import>=1.0.0 aiohttp>=3.8.6 -watchdog>=0.8.3 +watchdog>=2.3.0 ruamel.yaml>=0.15.98 toml>=0.9.2 html5lib>=1.0.1
nilearn__nilearn-3201
get_cluster_table returns inconsistent result depending on `cluster_threshold` <!--Provide a brief description of the bug.--> <!--Please fill in the following information, to the best of your ability.--> Nilearn version: 0.9.0 ### Expected behavior The example below should return identical information for the large clusters (> 10 voxels) ### Actual behavior I does not ### Steps and code to reproduce bug ``` from nilearn import datasets motor_images = datasets.fetch_neurovault_motor_task() stat_img = motor_images.images[0] from nilearn.reporting import get_clusters_table threshold = 3.0 table1 = get_clusters_table(stat_img, threshold, cluster_threshold=10) table2 = get_clusters_table(stat_img, threshold, cluster_threshold=0) print(table1) print(table2) ```
[ { "content": "\"\"\"\nPreprocessing functions for images.\n\nSee also nilearn.signal.\n\"\"\"\n# Authors: Philippe Gervais, Alexandre Abraham\n# License: simplified BSD\n\nimport collections.abc\nimport copy\nimport warnings\n\nimport nibabel\nimport numpy as np\nfrom joblib import Parallel, delayed\nfrom scipy import ndimage\nfrom scipy.stats import scoreatpercentile\n\nfrom .. import signal\nfrom .._utils import (_repr_niimgs,\n as_ndarray,\n check_niimg,\n check_niimg_3d,\n check_niimg_4d,\n fill_doc)\nfrom .._utils.niimg import _get_data, _safe_get_data\nfrom .._utils.niimg_conversions import _check_same_fov, _index_img\nfrom .._utils.param_validation import check_threshold\nfrom .._utils.helpers import rename_parameters\n\n\ndef get_data(img):\n \"\"\"Get the image data as a :class:`numpy.ndarray`.\n\n Parameters\n ----------\n img : Niimg-like object or iterable of Niimg-like objects\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n Returns\n -------\n :class:`numpy.ndarray`\n 3D or 4D numpy array depending on the shape of `img`. This function\n preserves the type of the image data. If `img` is an in-memory Nifti image\n it returns the image data array itself -- not a copy.\n\n \"\"\"\n img = check_niimg(img)\n return _get_data(img)\n\n\ndef high_variance_confounds(imgs, n_confounds=5, percentile=2.,\n detrend=True, mask_img=None):\n \"\"\" Return confounds signals extracted from input signals with highest\n variance.\n\n Parameters\n ----------\n imgs : Niimg-like object\n 4D image.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n mask_img : Niimg-like object\n If not provided, all voxels are used.\n If provided, confounds are extracted from voxels inside the mask.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n n_confounds : :obj:`int`, optional\n Number of confounds to return. Default=5.\n\n percentile : :obj:`float`, optional\n Highest-variance signals percentile to keep before computing the\n singular value decomposition, 0. <= `percentile` <= 100.\n `mask_img.sum() * percentile / 100` must be greater than `n_confounds`.\n Default=2.\n\n detrend : :obj:`bool`, optional\n If True, detrend signals before processing. Default=True.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Highest variance confounds. Shape: *(number_of_scans, n_confounds)*.\n\n Notes\n ------\n This method is related to what has been published in the literature\n as 'CompCor' (Behzadi NeuroImage 2007).\n\n The implemented algorithm does the following:\n\n - Computes the sum of squares for each signal (no mean removal).\n - Keeps a given percentile of signals with highest variance (percentile).\n - Computes an SVD of the extracted signals.\n - Returns a given number (n_confounds) of signals from the SVD with\n highest singular values.\n\n See also\n --------\n nilearn.signal.high_variance_confounds\n\n \"\"\"\n from .. import masking\n\n if mask_img is not None:\n sigs = masking.apply_mask(imgs, mask_img)\n else:\n # Load the data only if it doesn't need to be masked\n imgs = check_niimg_4d(imgs)\n sigs = as_ndarray(get_data(imgs))\n # Not using apply_mask here saves memory in most cases.\n del imgs # help reduce memory consumption\n sigs = np.reshape(sigs, (-1, sigs.shape[-1])).T\n\n return signal.high_variance_confounds(sigs, n_confounds=n_confounds,\n percentile=percentile,\n detrend=detrend)\n\n\ndef _fast_smooth_array(arr):\n \"\"\"Simple smoothing which is less computationally expensive than\n applying a Gaussian filter.\n\n Only the first three dimensions of the array will be smoothed. The\n filter uses [0.2, 1, 0.2] weights in each direction and use a\n normalisation to preserve the local average value.\n\n Parameters\n ----------\n arr : :class:`numpy.ndarray`\n 4D array, with image number as last dimension. 3D arrays are\n also accepted.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Smoothed array.\n\n Notes\n -----\n Rather than calling this function directly, users are encouraged\n to call the high-level function :func:`smooth_img` with\n `fwhm='fast'`.\n\n \"\"\"\n neighbor_weight = 0.2\n # 6 neighbors in 3D if not on an edge\n nb_neighbors = 6\n # This scale ensures that a uniform array stays uniform\n # except on the array edges\n scale = 1 + nb_neighbors * neighbor_weight\n\n # Need to copy because the smoothing is done in multiple statements\n # and there does not seem to be an easy way to do it in place\n smoothed_arr = arr.copy()\n weighted_arr = neighbor_weight * arr\n\n smoothed_arr[:-1] += weighted_arr[1:]\n smoothed_arr[1:] += weighted_arr[:-1]\n smoothed_arr[:, :-1] += weighted_arr[:, 1:]\n smoothed_arr[:, 1:] += weighted_arr[:, :-1]\n smoothed_arr[:, :, :-1] += weighted_arr[:, :, 1:]\n smoothed_arr[:, :, 1:] += weighted_arr[:, :, :-1]\n smoothed_arr /= scale\n\n return smoothed_arr\n\n\n@fill_doc\ndef _smooth_array(arr, affine, fwhm=None, ensure_finite=True, copy=True):\n \"\"\"Smooth images by applying a Gaussian filter.\n\n Apply a Gaussian filter along the three first dimensions of `arr`.\n\n Parameters\n ----------\n arr : :class:`numpy.ndarray`\n 4D array, with image number as last dimension. 3D arrays are also\n accepted.\n\n affine : :class:`numpy.ndarray`\n (4, 4) matrix, giving affine transformation for image. (3, 3) matrices\n are also accepted (only these coefficients are used).\n If `fwhm='fast'`, the affine is not used and can be None.\n %(fwhm)s\n ensure_finite : :obj:`bool`, optional\n If True, replace every non-finite values (like NaNs) by zero before\n filtering. Default=True.\n\n copy : :obj:`bool`, optional\n If True, input array is not modified. True by default: the filtering\n is not performed in-place. Default=True.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Filtered `arr`.\n\n Notes\n -----\n This function is most efficient with arr in C order.\n\n \"\"\"\n # Here, we have to investigate use cases of fwhm. Particularly, if fwhm=0.\n # See issue #1537\n if isinstance(fwhm, (int, float)) and (fwhm == 0.0):\n warnings.warn(\"The parameter 'fwhm' for smoothing is specified \"\n \"as {0}. Setting it to None \"\n \"(no smoothing will be performed)\"\n .format(fwhm))\n fwhm = None\n if arr.dtype.kind == 'i':\n if arr.dtype == np.int64:\n arr = arr.astype(np.float64)\n else:\n arr = arr.astype(np.float32) # We don't need crazy precision.\n if copy:\n arr = arr.copy()\n if ensure_finite:\n # SPM tends to put NaNs in the data outside the brain\n arr[np.logical_not(np.isfinite(arr))] = 0\n if isinstance(fwhm, str) and (fwhm == 'fast'):\n arr = _fast_smooth_array(arr)\n elif fwhm is not None:\n fwhm = np.asarray([fwhm]).ravel()\n fwhm = np.asarray([0. if elem is None else elem for elem in fwhm])\n affine = affine[:3, :3] # Keep only the scale part.\n fwhm_over_sigma_ratio = np.sqrt(8 * np.log(2)) # FWHM to sigma.\n vox_size = np.sqrt(np.sum(affine ** 2, axis=0))\n sigma = fwhm / (fwhm_over_sigma_ratio * vox_size)\n for n, s in enumerate(sigma):\n if s > 0.0:\n ndimage.gaussian_filter1d(arr, s, output=arr, axis=n)\n return arr\n\n\n@fill_doc\ndef smooth_img(imgs, fwhm):\n \"\"\"Smooth images by applying a Gaussian filter.\n\n Apply a Gaussian filter along the three first dimensions of `arr`.\n In all cases, non-finite values in input image are replaced by zeros.\n\n Parameters\n ----------\n imgs : Niimg-like object or iterable of Niimg-like objects\n Image(s) to smooth (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n %(fwhm)s\n\n Returns\n -------\n :class:`nibabel.nifti1.Nifti1Image` or list of\n Filtered input image. If `imgs` is an iterable, then `filtered_img` is a\n list.\n\n \"\"\"\n\n # Use hasattr() instead of isinstance to workaround a Python 2.6/2.7 bug\n # See http://bugs.python.org/issue7624\n if hasattr(imgs, \"__iter__\") \\\n and not isinstance(imgs, str):\n single_img = False\n else:\n single_img = True\n imgs = [imgs]\n\n ret = []\n for img in imgs:\n img = check_niimg(img)\n affine = img.affine\n filtered = _smooth_array(get_data(img), affine, fwhm=fwhm,\n ensure_finite=True, copy=True)\n ret.append(new_img_like(img, filtered, affine, copy_header=True))\n\n if single_img:\n return ret[0]\n else:\n return ret\n\n\ndef _crop_img_to(img, slices, copy=True):\n \"\"\"Crops an image to a smaller size.\n\n Crop `img` to size indicated by slices and adjust affine accordingly.\n\n Parameters\n ----------\n img : Niimg-like object\n Image to be cropped. If slices has less entries than `img` has dimensions,\n the slices will be applied to the first `len(slices)` dimensions (See\n http://nilearn.github.io/manipulating_images/input_output.html).\n\n slices : list of slices\n Defines the range of the crop.\n E.g. [slice(20, 200), slice(40, 150), slice(0, 100)] defines a cube.\n\n copy : :obj:`bool`, optional\n Specifies whether cropped data is to be copied or not. Default=True.\n\n Returns\n -------\n Niimg-like object\n Cropped version of the input image.\n\n offset : :obj:`list`, optional\n List of tuples representing the number of voxels removed (before, after)\n the cropped volumes, i.e.:\n *[(x1_pre, x1_post), (x2_pre, x2_post), ..., (xN_pre, xN_post)]*\n\n \"\"\"\n img = check_niimg(img)\n\n data = get_data(img)\n affine = img.affine\n\n cropped_data = data[tuple(slices)]\n if copy:\n cropped_data = cropped_data.copy()\n\n linear_part = affine[:3, :3]\n old_origin = affine[:3, 3]\n new_origin_voxel = np.array([s.start for s in slices])\n new_origin = old_origin + linear_part.dot(new_origin_voxel)\n\n new_affine = np.eye(4)\n new_affine[:3, :3] = linear_part\n new_affine[:3, 3] = new_origin\n\n return new_img_like(img, cropped_data, new_affine)\n\n\ndef crop_img(img, rtol=1e-8, copy=True, pad=True, return_offset=False):\n \"\"\"Crops an image as much as possible.\n\n Will crop `img`, removing as many zero entries as possible without\n touching non-zero entries. Will leave one voxel of zero padding\n around the obtained non-zero area in order to avoid sampling issues\n later on.\n\n Parameters\n ----------\n img : Niimg-like object\n Image to be cropped (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n\n rtol : :obj:`float`, optional\n relative tolerance (with respect to maximal absolute value of the\n image), under which values are considered negligeable and thus\n croppable. Default=1e-8.\n\n copy : :obj:`bool`, optional\n Specifies whether cropped data is copied or not. Default=True.\n\n pad : :obj:`bool`, optional\n Toggles adding 1-voxel of 0s around the border. Default=True.\n\n return_offset : :obj:`bool`, optional\n Specifies whether to return a tuple of the removed padding.\n Default=False.\n\n Returns\n -------\n Niimg-like object or :obj:`tuple`\n Cropped version of the input image and, if `return_offset=True`, a tuple\n of tuples representing the number of voxels removed (before, after) the\n cropped volumes, i.e.:\n *[(x1_pre, x1_post), (x2_pre, x2_post), ..., (xN_pre, xN_post)]*\n\n \"\"\"\n img = check_niimg(img)\n data = get_data(img)\n infinity_norm = max(-data.min(), data.max())\n passes_threshold = np.logical_or(data < -rtol * infinity_norm,\n data > rtol * infinity_norm)\n\n if data.ndim == 4:\n passes_threshold = np.any(passes_threshold, axis=-1)\n coords = np.array(np.where(passes_threshold))\n\n # Sets full range if no data are found along the axis\n if coords.shape[1] == 0:\n start, end = [0, 0, 0], list(data.shape)\n else:\n start = coords.min(axis=1)\n end = coords.max(axis=1) + 1\n\n # pad with one voxel to avoid resampling problems\n if pad:\n start = np.maximum(start - 1, 0)\n end = np.minimum(end + 1, data.shape[:3])\n\n slices = [slice(s, e) for s, e in zip(start, end)][:3]\n cropped_im = _crop_img_to(img, slices, copy=copy)\n return cropped_im if not return_offset else (cropped_im, tuple(slices))\n\n\ndef _pad_array(array, pad_sizes):\n \"\"\"Pad an array with zeros.\n\n Pads an array with zeros as specified in `pad_sizes`.\n\n Parameters\n ----------\n array : :class:`numpy.ndarray`\n Array to pad.\n\n pad_sizes : :obj:`list`\n Padding quantity specified as\n *[x1minpad, x1maxpad, x2minpad,x2maxpad, x3minpad, ...]*.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Padded array.\n\n Raises\n ------\n ValueError\n Inconsistent min/max padding quantities.\n\n \"\"\"\n if len(pad_sizes) % 2 != 0:\n raise ValueError(\"Please specify as many max paddings as min\"\n \" paddings. You have specified %d arguments\" %\n len(pad_sizes))\n\n all_paddings = np.zeros([array.ndim, 2], dtype=np.int64)\n all_paddings[:len(pad_sizes) // 2] = np.array(pad_sizes).reshape(-1, 2)\n\n lower_paddings, upper_paddings = all_paddings.T\n new_shape = np.array(array.shape) + upper_paddings + lower_paddings\n\n padded = np.zeros(new_shape, dtype=array.dtype)\n source_slices = [slice(max(-lp, 0), min(s + up, s))\n for lp, up, s in zip(lower_paddings,\n upper_paddings,\n array.shape)]\n target_slices = [slice(max(lp, 0), min(s - up, s))\n for lp, up, s in zip(lower_paddings,\n upper_paddings,\n new_shape)]\n\n padded[tuple(target_slices)] = array[source_slices].copy()\n return padded\n\n\ndef _compute_mean(imgs, target_affine=None,\n target_shape=None, smooth=False):\n from . import resampling\n input_repr = _repr_niimgs(imgs, shorten=True)\n\n imgs = check_niimg(imgs)\n mean_data = _safe_get_data(imgs)\n affine = imgs.affine\n # Free memory ASAP\n del imgs\n if mean_data.ndim not in (3, 4):\n raise ValueError('Computation expects 3D or 4D '\n 'images, but %i dimensions were given (%s)'\n % (mean_data.ndim, input_repr))\n if mean_data.ndim == 4:\n mean_data = mean_data.mean(axis=-1)\n else:\n mean_data = mean_data.copy()\n mean_data = resampling.resample_img(\n nibabel.Nifti1Image(mean_data, affine),\n target_affine=target_affine, target_shape=target_shape,\n copy=False)\n affine = mean_data.affine\n mean_data = get_data(mean_data)\n\n if smooth:\n nan_mask = np.isnan(mean_data)\n mean_data = _smooth_array(mean_data, affine=np.eye(4), fwhm=smooth,\n ensure_finite=True, copy=False)\n mean_data[nan_mask] = np.nan\n\n return mean_data, affine\n\n\ndef mean_img(imgs, target_affine=None, target_shape=None,\n verbose=0, n_jobs=1):\n \"\"\"Compute the mean of the images over time or the 4th dimension.\n\n Note that if list of 4D images are given, the mean of each 4D image is\n computed separately, and the resulting mean is computed after.\n\n Parameters\n ----------\n imgs : Niimg-like object or iterable of Niimg-like objects\n Images to be averaged over time (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n\n target_affine : :class:`numpy.ndarray`, optional\n If specified, the image is resampled corresponding to this new affine.\n target_affine can be a 3x3 or a 4x4 matrix.\n\n target_shape : :obj:`tuple` or :obj:`list`, optional\n If specified, the image will be resized to match this new shape.\n len(target_shape) must be equal to 3.\n A target_affine has to be specified jointly with target_shape.\n\n verbose : :obj:`int`, optional\n Controls the amount of verbosity: higher numbers give more messages\n (0 means no messages). Default=0.\n\n n_jobs : :obj:`int`, optional\n The number of CPUs to use to do the computation (-1 means\n 'all CPUs'). Default=1.\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Mean image.\n\n See Also\n --------\n nilearn.image.math_img : For more general operations on images.\n\n \"\"\"\n is_str = isinstance(imgs, str)\n is_iterable = isinstance(imgs, collections.abc.Iterable)\n if is_str or not is_iterable:\n imgs = [imgs, ]\n\n imgs_iter = iter(imgs)\n first_img = check_niimg(next(imgs_iter))\n\n # Compute the first mean to retrieve the reference\n # target_affine and target_shape if_needed\n n_imgs = 1\n running_mean, first_affine = _compute_mean(first_img,\n target_affine=target_affine,\n target_shape=target_shape)\n\n if target_affine is None or target_shape is None:\n target_affine = first_affine\n target_shape = running_mean.shape[:3]\n\n for this_mean in Parallel(n_jobs=n_jobs, verbose=verbose)(\n delayed(_compute_mean)(n, target_affine=target_affine,\n target_shape=target_shape)\n for n in imgs_iter):\n n_imgs += 1\n # _compute_mean returns (mean_img, affine)\n this_mean = this_mean[0]\n running_mean += this_mean\n\n running_mean = running_mean / float(n_imgs)\n return new_img_like(first_img, running_mean, target_affine)\n\n\ndef swap_img_hemispheres(img):\n \"\"\"Performs swapping of hemispheres in the indicated NIfTI image.\n\n Use case: synchronizing ROIs across hemispheres.\n\n Parameters\n ----------\n img : Niimg-like object\n Images to swap (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Hemispherically swapped image.\n\n Notes\n -----\n Assumes that the image is sagitally aligned.\n\n Should be used with caution (confusion might be caused with\n radio/neuro conventions)\n\n Note that this does not require a change of the affine matrix.\n\n \"\"\"\n from .resampling import reorder_img\n\n # Check input is really a path to a nifti file or a nifti object\n img = check_niimg_3d(img)\n\n # get nifti in x-y-z order\n img = reorder_img(img)\n\n # create swapped nifti object\n out_img = new_img_like(img, get_data(img)[::-1], img.affine,\n copy_header=True)\n\n return out_img\n\n\ndef index_img(imgs, index):\n \"\"\"Indexes into a 4D Niimg-like object in the fourth dimension.\n\n Common use cases include extracting a 3D image out of `img` or\n creating a 4D image whose data is a subset of `img` data.\n\n Parameters\n ----------\n imgs : 4D Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n index : Any type compatible with numpy array indexing\n Used for indexing the 4D data array in the fourth dimension.\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Indexed image.\n\n See Also\n --------\n nilearn.image.concat_imgs\n nilearn.image.iter_img\n\n Examples\n --------\n First we concatenate two MNI152 images to create a 4D-image::\n\n >>> from nilearn import datasets\n >>> from nilearn.image import concat_imgs, index_img\n >>> joint_mni_image = concat_imgs([datasets.load_mni152_template(),\n ... datasets.load_mni152_template()])\n >>> print(joint_mni_image.shape)\n (99, 117, 95, 2)\n\n We can now select one slice from the last dimension of this 4D-image::\n\n >>> single_mni_image = index_img(joint_mni_image, 1)\n >>> print(single_mni_image.shape)\n (99, 117, 95)\n\n We can also select multiple frames using the `slice` constructor::\n\n >>> five_mni_images = concat_imgs([datasets.load_mni152_template()] * 5)\n >>> print(five_mni_images.shape)\n (99, 117, 95, 5)\n\n >>> first_three_images = index_img(five_mni_images,\n ... slice(0, 3))\n >>> print(first_three_images.shape)\n (99, 117, 95, 3)\n\n \"\"\"\n imgs = check_niimg_4d(imgs)\n # duck-type for pandas arrays, and select the 'values' attr\n if hasattr(index, 'values') and hasattr(index, 'iloc'):\n index = index.values.flatten()\n return _index_img(imgs, index)\n\n\ndef iter_img(imgs):\n \"\"\"Iterates over a 4D Niimg-like object in the fourth dimension.\n\n Parameters\n ----------\n imgs : 4D Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n Returns\n -------\n Iterator of 3D :class:`~nibabel.nifti1.Nifti1Image`\n\n See Also\n --------\n nilearn.image.index_img\n\n \"\"\"\n return check_niimg_4d(imgs, return_iterator=True)\n\n\ndef new_img_like(ref_niimg, data, affine=None, copy_header=False):\n \"\"\"Create a new image of the same class as the reference image\n\n Parameters\n ----------\n ref_niimg : Niimg-like object\n Reference image. The new image will be of the same type.\n\n data : :class:`numpy.ndarray`\n Data to be stored in the image.\n\n affine : 4x4 :class:`numpy.ndarray`, optional\n Transformation matrix.\n\n copy_header : :obj:`bool`, optional\n Indicated if the header of the reference image should be used to\n create the new image. Default=False.\n\n Returns\n -------\n Niimg-like object\n A loaded image with the same file type (and, optionally, header)\n as the reference image.\n\n \"\"\"\n # Hand-written loading code to avoid too much memory consumption\n orig_ref_niimg = ref_niimg\n is_str = isinstance(ref_niimg, str)\n has_get_data = hasattr(ref_niimg, 'get_data')\n has_get_fdata = hasattr(ref_niimg, 'get_fdata')\n has_iter = hasattr(ref_niimg, '__iter__')\n has_affine = hasattr(ref_niimg, 'affine')\n if has_iter and not any([is_str, has_get_data, has_get_fdata]):\n ref_niimg = ref_niimg[0]\n is_str = isinstance(ref_niimg, str)\n has_get_data = hasattr(ref_niimg, 'get_data')\n has_get_fdata = hasattr(ref_niimg, 'get_fdata')\n has_affine = hasattr(ref_niimg, 'affine')\n if not ((has_get_data or has_get_fdata) and has_affine):\n if is_str:\n ref_niimg = nibabel.load(ref_niimg)\n else:\n raise TypeError(('The reference image should be a niimg, %r '\n 'was passed') % orig_ref_niimg)\n\n if affine is None:\n affine = ref_niimg.affine\n if data.dtype == bool:\n default_dtype = np.int8\n if isinstance(ref_niimg, nibabel.freesurfer.mghformat.MGHImage):\n default_dtype = np.uint8\n data = as_ndarray(data, dtype=default_dtype)\n header = None\n if copy_header:\n header = copy.deepcopy(ref_niimg.header)\n try:\n 'something' in header\n except TypeError:\n pass\n else:\n if 'scl_slope' in header:\n header['scl_slope'] = 0.\n if 'scl_inter' in header:\n header['scl_inter'] = 0.\n # 'glmax' is removed for Nifti2Image. Modify only if 'glmax' is\n # available in header. See issue #1611\n if 'glmax' in header:\n header['glmax'] = 0.\n if 'cal_max' in header:\n header['cal_max'] = np.max(data) if data.size > 0 else 0.\n if 'cal_min' in header:\n header['cal_min'] = np.min(data) if data.size > 0 else 0.\n klass = ref_niimg.__class__\n if klass is nibabel.Nifti1Pair:\n # Nifti1Pair is an internal class, without a to_filename,\n # we shouldn't return it\n klass = nibabel.Nifti1Image\n return klass(data, affine, header=header)\n\n\ndef _apply_cluster_size_threshold(arr, cluster_threshold, copy=True):\n \"\"\"Apply cluster-extent thresholding to an array that has already been\n voxel-wise thresholded.\n\n Parameters\n ----------\n arr : :obj:`numpy.ndarray` of shape (X, Y, Z)\n 3D array that has been thresholded at the voxel level.\n cluster_threshold : :obj:`float`\n Cluster-size threshold, in voxels, to apply to ``arr``.\n copy : :obj:`bool`, optional\n Whether to copy the array before modifying it or not.\n Default is True.\n\n Returns\n -------\n arr : :obj:`numpy.ndarray` of shape (X, Y, Z)\n Cluster-extent thresholded array.\n\n Notes\n -----\n Clusters are defined in a bi-sided manner;\n both negative and positive clusters are evaluated,\n but this is done separately for each sign.\n\n Clusters are defined using 6-connectivity, also known as NN1 (in AFNI) or\n \"faces\" connectivity.\n \"\"\"\n assert arr.ndim == 3\n\n if copy:\n arr = arr.copy()\n\n # Define array for 6-connectivity, aka NN1 or \"faces\"\n conn_mat = np.zeros((3, 3, 3), int)\n conn_mat[:, 1, 1] = 1\n conn_mat[1, :, 1] = 1\n conn_mat[1, 1, :] = 1\n\n for sign in np.sign(arr):\n # Binarize using one-sided cluster-defining threshold\n binarized = ((arr * sign) > 0).astype(int)\n\n # Apply cluster threshold\n label_map = ndimage.measurements.label(binarized, conn_mat)[0]\n clust_ids = sorted(list(np.unique(label_map)[1:]))\n for c_val in clust_ids:\n if np.sum(label_map == c_val) < cluster_threshold:\n arr[label_map == c_val] = 0\n\n return arr\n\n\ndef threshold_img(\n img,\n threshold,\n cluster_threshold=0,\n two_sided=True,\n mask_img=None,\n copy=True,\n):\n \"\"\"Threshold the given input image, mostly statistical or atlas images.\n\n Thresholding can be done based on direct image intensities or selection\n threshold with given percentile.\n\n .. versionchanged:: 0.9.0\n New ``cluster_threshold`` and ``two_sided`` parameters added.\n\n .. versionadded:: 0.2\n\n Parameters\n ----------\n img : a 3D/4D Niimg-like object\n Image containing statistical or atlas maps which should be thresholded.\n\n threshold : :obj:`float` or :obj:`str`\n If float, we threshold the image based on image intensities meaning\n voxels which have intensities greater than this value will be kept.\n The given value should be within the range of minimum and\n maximum intensity of the input image.\n If string, it should finish with percent sign e.g. \"80%\" and we threshold\n based on the score obtained using this percentile on the image data. The\n voxels which have intensities greater than this score will be kept.\n The given string should be within the range of \"0%\" to \"100%\".\n\n cluster_threshold : :obj:`float`, optional\n Cluster size threshold, in voxels. In the returned thresholded map,\n sets of connected voxels (``clusters``) with size smaller\n than this number will be removed. Default=0.\n\n .. versionadded:: 0.9.0\n\n two_sided : :obj:`bool`, optional\n Whether the thresholding should yield both positive and negative\n part of the maps.\n Default=True.\n\n .. versionadded:: 0.9.0\n\n mask_img : Niimg-like object, default None, optional\n Mask image applied to mask the input data.\n If None, no masking will be applied.\n\n copy : :obj:`bool`, optional\n If True, input array is not modified. True by default: the filtering\n is not performed in-place. Default=True.\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Thresholded image of the given input image.\n\n See also\n --------\n nilearn.glm.threshold_stats_img :\n Threshold a statistical image using the alpha value, optionally with\n false positive control.\n\n \"\"\"\n from . import resampling\n from .. import masking\n\n img = check_niimg(img)\n img_data = _safe_get_data(img, ensure_finite=True, copy_data=copy)\n affine = img.affine\n\n if mask_img is not None:\n mask_img = check_niimg_3d(mask_img)\n if not _check_same_fov(img, mask_img):\n mask_img = resampling.resample_img(mask_img, target_affine=affine,\n target_shape=img.shape[:3],\n interpolation=\"nearest\")\n\n mask_data, _ = masking._load_mask_img(mask_img)\n # Set as 0 for the values which are outside of the mask\n img_data[mask_data == 0.] = 0.\n\n cutoff_threshold = check_threshold(\n threshold,\n img_data,\n percentile_func=scoreatpercentile,\n name='threshold',\n )\n\n # Apply threshold\n if two_sided:\n img_data[np.abs(img_data) < cutoff_threshold] = 0.\n else:\n img_data[img_data < cutoff_threshold] = 0.\n\n # Expand to 4D to support both 3D and 4D\n expand_to_4d = img_data.ndim == 3\n if expand_to_4d:\n img_data = img_data[:, :, :, None]\n\n # Perform cluster thresholding, if requested\n if cluster_threshold > 0:\n for i_vol in range(img_data.shape[3]):\n img_data[..., i_vol] = _apply_cluster_size_threshold(\n img_data[..., i_vol],\n cluster_threshold,\n )\n\n if expand_to_4d:\n # Reduce back to 3D\n img_data = img_data[:, :, :, 0]\n\n # Reconstitute img object\n thresholded_img = new_img_like(img, img_data, affine)\n\n return thresholded_img\n\n\ndef math_img(formula, **imgs):\n \"\"\"Interpret a numpy based string formula using niimg in named parameters.\n\n .. versionadded:: 0.2.3\n\n Parameters\n ----------\n formula : :obj:`str`\n The mathematical formula to apply to image internal data. It can use\n numpy imported as 'np'.\n\n imgs : images (:class:`~nibabel.nifti1.Nifti1Image` or file names)\n Keyword arguments corresponding to the variables in the formula as\n Nifti images. All input images should have the same geometry (shape,\n affine).\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Result of the formula as a Nifti image. Note that the dimension of the\n result image can be smaller than the input image. The affine is the\n same as the input image.\n\n See Also\n --------\n nilearn.image.mean_img : To simply compute the mean of multiple images\n\n Examples\n --------\n Let's load an image using nilearn datasets module::\n\n >>> from nilearn import datasets\n >>> anatomical_image = datasets.load_mni152_template()\n\n Now we can use any numpy function on this image::\n\n >>> from nilearn.image import math_img\n >>> log_img = math_img(\"np.log(img)\", img=anatomical_image)\n\n We can also apply mathematical operations on several images::\n\n >>> result_img = math_img(\"img1 + img2\",\n ... img1=anatomical_image, img2=log_img)\n\n Notes\n -----\n This function is the Python equivalent of ImCal in SPM or fslmaths\n in FSL.\n\n \"\"\"\n try:\n # Check that input images are valid niimg and have a compatible shape\n # and affine.\n niimgs = []\n for image in imgs.values():\n niimgs.append(check_niimg(image))\n _check_same_fov(*niimgs, raise_error=True)\n except Exception as exc:\n exc.args = ((\"Input images cannot be compared, you provided '{0}',\"\n .format(imgs.values()),) + exc.args)\n raise\n\n # Computing input data as a dictionary of numpy arrays. Keep a reference\n # niimg for building the result as a new niimg.\n niimg = None\n data_dict = {}\n for key, img in imgs.items():\n niimg = check_niimg(img)\n data_dict[key] = _safe_get_data(niimg)\n\n # Add a reference to numpy in the kwargs of eval so that numpy functions\n # can be called from there.\n data_dict['np'] = np\n try:\n result = eval(formula, data_dict)\n except Exception as exc:\n exc.args = ((\"Input formula couldn't be processed, you provided '{0}',\"\n .format(formula),) + exc.args)\n raise\n\n return new_img_like(niimg, result, niimg.affine)\n\n\ndef binarize_img(img, threshold=0, mask_img=None):\n \"\"\"Binarize an image such that its values are either 0 or 1.\n\n .. versionadded:: 0.8.1\n\n Parameters\n ----------\n img : a 3D/4D Niimg-like object\n Image which should be binarized.\n\n threshold : :obj:`float` or :obj:`str`\n If float, we threshold the image based on image intensities meaning\n voxels which have intensities greater than this value will be kept.\n The given value should be within the range of minimum and\n maximum intensity of the input image.\n If string, it should finish with percent sign e.g. \"80%\" and we\n threshold based on the score obtained using this percentile on\n the image data. The voxels which have intensities greater than\n this score will be kept. The given string should be\n within the range of \"0%\" to \"100%\".\n\n mask_img : Niimg-like object, default None, optional\n Mask image applied to mask the input data.\n If None, no masking will be applied.\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Binarized version of the given input image. Output dtype is int.\n\n See Also\n --------\n nilearn.image.threshold_img : To simply threshold but not binarize images.\n\n Examples\n --------\n Let's load an image using nilearn datasets module::\n\n >>> from nilearn import datasets\n >>> anatomical_image = datasets.load_mni152_template()\n\n Now we binarize it, generating a pseudo brainmask::\n\n >>> from nilearn.image import binarize_img\n >>> img = binarize_img(anatomical_image)\n\n \"\"\"\n return math_img(\n \"img.astype(bool).astype(int)\",\n img=threshold_img(img, threshold, mask_img=mask_img)\n )\n\n\n@rename_parameters({'sessions': 'runs'}, '0.10.0')\ndef clean_img(imgs, runs=None, detrend=True, standardize=True,\n confounds=None, low_pass=None, high_pass=None, t_r=None,\n ensure_finite=False, mask_img=None):\n \"\"\"Improve SNR on masked fMRI signals.\n\n This function can do several things on the input signals, in\n the following order:\n\n - detrend\n - low- and high-pass filter\n - remove confounds\n - standardize\n\n Low-pass filtering improves specificity.\n\n High-pass filtering should be kept small, to keep some sensitivity.\n\n Filtering is only meaningful on evenly-sampled signals.\n\n According to Lindquist et al. (2018), removal of confounds will be done\n orthogonally to temporal filters (low- and/or high-pass filters), if both\n are specified.\n\n .. versionadded:: 0.2.5\n\n Parameters\n ----------\n imgs : Niimg-like object\n 4D image. The signals in the last dimension are filtered (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n\n runs : :class:`numpy.ndarray`, optional\n Add a run level to the cleaning process. Each run will be\n cleaned independently. Must be a 1D array of n_samples elements.\n\n .. warning::\n\n 'runs' replaces 'sessions' after release 0.10.0.\n Using 'session' will result in an error after release 0.10.0.\n\n Default=``None``.\n\n detrend : :obj:`bool`, optional\n If detrending should be applied on timeseries (before confound removal).\n Default=True.\n\n standardize : :obj:`bool`, optional\n If True, returned signals are set to unit variance. Default=True.\n\n confounds : :class:`numpy.ndarray`, :obj:`str` or :obj:`list` of\n Confounds timeseries. optional\n Shape must be (instant number, confound number), or just (instant number,)\n The number of time instants in signals and confounds must be\n identical (i.e. signals.shape[0] == confounds.shape[0]).\n If a string is provided, it is assumed to be the name of a csv file\n containing signals as columns, with an optional one-line header.\n If a list is provided, all confounds are removed from the input\n signal, as if all were in the same array.\n\n low_pass : :obj:`float`, optional\n Low cutoff frequencies, in Hertz.\n\n high_pass : :obj:`float`, optional\n High cutoff frequencies, in Hertz.\n\n t_r : :obj:`float`, optional\n Repetition time, in second (sampling period). Set to None if not\n specified. Mandatory if used together with `low_pass` or `high_pass`.\n\n ensure_finite : :obj:`bool`, optional\n If True, the non-finite values (NaNs and infs) found in the images\n will be replaced by zeros. Default=False.\n\n mask_img : Niimg-like object, optional\n If provided, signal is only cleaned from voxels inside the mask. If\n mask is provided, it should have same shape and affine as imgs.\n If not provided, all voxels are used.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n Returns\n -------\n Niimg-like object\n Input images, cleaned. Same shape as `imgs`.\n\n Notes\n -----\n Confounds removal is based on a projection on the orthogonal\n of the signal space [:footcite:`friston1994statistical`].\n\n Orthogonalization between temporal filters and confound removal is based on\n suggestions in [:footcite:`Lindquist407676`].\n\n References\n ----------\n .. footbibliography::\n\n See Also\n --------\n nilearn.signal.clean\n\n \"\"\"\n # Avoid circular import\n from .image import new_img_like\n from .. import masking\n\n imgs_ = check_niimg_4d(imgs)\n\n # Check if t_r is set, otherwise propose t_r from imgs header\n if low_pass is not None or high_pass is not None:\n if t_r is None:\n\n # We raise an error, instead of using the header's t_r as this\n # value is considered to be non-reliable\n raise ValueError(\n \"Repetition time (t_r) must be specified for filtering. You \"\n \"specified None. imgs header suggest it to be {0}\".format(\n imgs.header.get_zooms()[3]))\n\n # Prepare signal for cleaning\n if mask_img is not None:\n signals = masking.apply_mask(imgs_, mask_img)\n else:\n signals = get_data(imgs_).reshape(-1, imgs_.shape[-1]).T\n\n # Clean signal\n data = signal.clean(\n signals, runs=runs, detrend=detrend, standardize=standardize,\n confounds=confounds, low_pass=low_pass, high_pass=high_pass, t_r=t_r,\n ensure_finite=ensure_finite)\n\n # Put results back into Niimg-like object\n if mask_img is not None:\n imgs_ = masking.unmask(data, mask_img)\n else:\n imgs_ = new_img_like(\n imgs_, data.T.reshape(imgs_.shape), copy_header=True)\n\n return imgs_\n\n\ndef load_img(img, wildcards=True, dtype=None):\n \"\"\"Load a Niimg-like object from filenames or list of filenames.\n\n .. versionadded:: 0.2.5\n\n Parameters\n ----------\n img : Niimg-like object\n If string, consider it as a path to NIfTI image and call `nibabel.load()`\n on it. The '~' symbol is expanded to the user home folder.\n If it is an object, check if affine attribute is present, raise\n `TypeError` otherwise.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n wildcards : :obj:`bool`, optional\n Use `img` as a regular expression to get a list of matching input\n filenames.\n If multiple files match, the returned list is sorted using an ascending\n order.\n If no file matches the regular expression, a `ValueError` exception is\n raised.\n Default=True.\n\n dtype : {dtype, \"auto\"}, optional\n Data type toward which the data should be converted. If \"auto\", the\n data will be converted to int32 if dtype is discrete and float32 if it\n is continuous.\n\n Returns\n -------\n 3D/4D Niimg-like object\n Result can be :class:`~nibabel.nifti1.Nifti1Image` or the input, as-is. It is guaranteed\n that the returned object has an affine attributes and that\n nilearn.image.get_data returns its data.\n\n \"\"\"\n return check_niimg(img, wildcards=wildcards, dtype=dtype)\n\n\ndef largest_connected_component_img(imgs):\n \"\"\"Return the largest connected component of an image or list of images.\n\n .. versionadded:: 0.3.1\n\n Parameters\n ----------\n imgs : Niimg-like object or iterable of Niimg-like objects (3D)\n Image(s) to extract the largest connected component from.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n Returns\n -------\n 3D Niimg-like object or list of\n Image or list of images containing the largest connected component.\n\n Notes\n -----\n **Handling big-endian in given Nifti image**\n This function changes the existing byte-ordering information to new byte\n order, if the dtype in given Nifti image has non-native data type.\n This operation is done internally to avoid big-endian issues with\n scipy ndimage module.\n\n \"\"\"\n from .._utils.ndimage import largest_connected_component\n\n if hasattr(imgs, \"__iter__\") and not isinstance(imgs, str):\n single_img = False\n else:\n single_img = True\n imgs = [imgs]\n\n ret = []\n for img in imgs:\n img = check_niimg_3d(img)\n affine = img.affine\n largest_component = largest_connected_component(_safe_get_data(img))\n ret.append(new_img_like(img, largest_component, affine,\n copy_header=True))\n\n if single_img:\n return ret[0]\n else:\n return ret\n", "path": "nilearn/image/image.py" } ]
[ { "content": "\"\"\"\nPreprocessing functions for images.\n\nSee also nilearn.signal.\n\"\"\"\n# Authors: Philippe Gervais, Alexandre Abraham\n# License: simplified BSD\n\nimport collections.abc\nimport copy\nimport warnings\n\nimport nibabel\nimport numpy as np\nfrom joblib import Parallel, delayed\nfrom scipy import ndimage\nfrom scipy.stats import scoreatpercentile\n\nfrom .. import signal\nfrom .._utils import (_repr_niimgs,\n as_ndarray,\n check_niimg,\n check_niimg_3d,\n check_niimg_4d,\n fill_doc)\nfrom .._utils.niimg import _get_data, _safe_get_data\nfrom .._utils.niimg_conversions import _check_same_fov, _index_img\nfrom .._utils.param_validation import check_threshold\nfrom .._utils.helpers import rename_parameters\n\n\ndef get_data(img):\n \"\"\"Get the image data as a :class:`numpy.ndarray`.\n\n Parameters\n ----------\n img : Niimg-like object or iterable of Niimg-like objects\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n Returns\n -------\n :class:`numpy.ndarray`\n 3D or 4D numpy array depending on the shape of `img`. This function\n preserves the type of the image data. If `img` is an in-memory Nifti image\n it returns the image data array itself -- not a copy.\n\n \"\"\"\n img = check_niimg(img)\n return _get_data(img)\n\n\ndef high_variance_confounds(imgs, n_confounds=5, percentile=2.,\n detrend=True, mask_img=None):\n \"\"\" Return confounds signals extracted from input signals with highest\n variance.\n\n Parameters\n ----------\n imgs : Niimg-like object\n 4D image.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n mask_img : Niimg-like object\n If not provided, all voxels are used.\n If provided, confounds are extracted from voxels inside the mask.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n n_confounds : :obj:`int`, optional\n Number of confounds to return. Default=5.\n\n percentile : :obj:`float`, optional\n Highest-variance signals percentile to keep before computing the\n singular value decomposition, 0. <= `percentile` <= 100.\n `mask_img.sum() * percentile / 100` must be greater than `n_confounds`.\n Default=2.\n\n detrend : :obj:`bool`, optional\n If True, detrend signals before processing. Default=True.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Highest variance confounds. Shape: *(number_of_scans, n_confounds)*.\n\n Notes\n ------\n This method is related to what has been published in the literature\n as 'CompCor' (Behzadi NeuroImage 2007).\n\n The implemented algorithm does the following:\n\n - Computes the sum of squares for each signal (no mean removal).\n - Keeps a given percentile of signals with highest variance (percentile).\n - Computes an SVD of the extracted signals.\n - Returns a given number (n_confounds) of signals from the SVD with\n highest singular values.\n\n See also\n --------\n nilearn.signal.high_variance_confounds\n\n \"\"\"\n from .. import masking\n\n if mask_img is not None:\n sigs = masking.apply_mask(imgs, mask_img)\n else:\n # Load the data only if it doesn't need to be masked\n imgs = check_niimg_4d(imgs)\n sigs = as_ndarray(get_data(imgs))\n # Not using apply_mask here saves memory in most cases.\n del imgs # help reduce memory consumption\n sigs = np.reshape(sigs, (-1, sigs.shape[-1])).T\n\n return signal.high_variance_confounds(sigs, n_confounds=n_confounds,\n percentile=percentile,\n detrend=detrend)\n\n\ndef _fast_smooth_array(arr):\n \"\"\"Simple smoothing which is less computationally expensive than\n applying a Gaussian filter.\n\n Only the first three dimensions of the array will be smoothed. The\n filter uses [0.2, 1, 0.2] weights in each direction and use a\n normalisation to preserve the local average value.\n\n Parameters\n ----------\n arr : :class:`numpy.ndarray`\n 4D array, with image number as last dimension. 3D arrays are\n also accepted.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Smoothed array.\n\n Notes\n -----\n Rather than calling this function directly, users are encouraged\n to call the high-level function :func:`smooth_img` with\n `fwhm='fast'`.\n\n \"\"\"\n neighbor_weight = 0.2\n # 6 neighbors in 3D if not on an edge\n nb_neighbors = 6\n # This scale ensures that a uniform array stays uniform\n # except on the array edges\n scale = 1 + nb_neighbors * neighbor_weight\n\n # Need to copy because the smoothing is done in multiple statements\n # and there does not seem to be an easy way to do it in place\n smoothed_arr = arr.copy()\n weighted_arr = neighbor_weight * arr\n\n smoothed_arr[:-1] += weighted_arr[1:]\n smoothed_arr[1:] += weighted_arr[:-1]\n smoothed_arr[:, :-1] += weighted_arr[:, 1:]\n smoothed_arr[:, 1:] += weighted_arr[:, :-1]\n smoothed_arr[:, :, :-1] += weighted_arr[:, :, 1:]\n smoothed_arr[:, :, 1:] += weighted_arr[:, :, :-1]\n smoothed_arr /= scale\n\n return smoothed_arr\n\n\n@fill_doc\ndef _smooth_array(arr, affine, fwhm=None, ensure_finite=True, copy=True):\n \"\"\"Smooth images by applying a Gaussian filter.\n\n Apply a Gaussian filter along the three first dimensions of `arr`.\n\n Parameters\n ----------\n arr : :class:`numpy.ndarray`\n 4D array, with image number as last dimension. 3D arrays are also\n accepted.\n\n affine : :class:`numpy.ndarray`\n (4, 4) matrix, giving affine transformation for image. (3, 3) matrices\n are also accepted (only these coefficients are used).\n If `fwhm='fast'`, the affine is not used and can be None.\n %(fwhm)s\n ensure_finite : :obj:`bool`, optional\n If True, replace every non-finite values (like NaNs) by zero before\n filtering. Default=True.\n\n copy : :obj:`bool`, optional\n If True, input array is not modified. True by default: the filtering\n is not performed in-place. Default=True.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Filtered `arr`.\n\n Notes\n -----\n This function is most efficient with arr in C order.\n\n \"\"\"\n # Here, we have to investigate use cases of fwhm. Particularly, if fwhm=0.\n # See issue #1537\n if isinstance(fwhm, (int, float)) and (fwhm == 0.0):\n warnings.warn(\"The parameter 'fwhm' for smoothing is specified \"\n \"as {0}. Setting it to None \"\n \"(no smoothing will be performed)\"\n .format(fwhm))\n fwhm = None\n if arr.dtype.kind == 'i':\n if arr.dtype == np.int64:\n arr = arr.astype(np.float64)\n else:\n arr = arr.astype(np.float32) # We don't need crazy precision.\n if copy:\n arr = arr.copy()\n if ensure_finite:\n # SPM tends to put NaNs in the data outside the brain\n arr[np.logical_not(np.isfinite(arr))] = 0\n if isinstance(fwhm, str) and (fwhm == 'fast'):\n arr = _fast_smooth_array(arr)\n elif fwhm is not None:\n fwhm = np.asarray([fwhm]).ravel()\n fwhm = np.asarray([0. if elem is None else elem for elem in fwhm])\n affine = affine[:3, :3] # Keep only the scale part.\n fwhm_over_sigma_ratio = np.sqrt(8 * np.log(2)) # FWHM to sigma.\n vox_size = np.sqrt(np.sum(affine ** 2, axis=0))\n sigma = fwhm / (fwhm_over_sigma_ratio * vox_size)\n for n, s in enumerate(sigma):\n if s > 0.0:\n ndimage.gaussian_filter1d(arr, s, output=arr, axis=n)\n return arr\n\n\n@fill_doc\ndef smooth_img(imgs, fwhm):\n \"\"\"Smooth images by applying a Gaussian filter.\n\n Apply a Gaussian filter along the three first dimensions of `arr`.\n In all cases, non-finite values in input image are replaced by zeros.\n\n Parameters\n ----------\n imgs : Niimg-like object or iterable of Niimg-like objects\n Image(s) to smooth (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n %(fwhm)s\n\n Returns\n -------\n :class:`nibabel.nifti1.Nifti1Image` or list of\n Filtered input image. If `imgs` is an iterable, then `filtered_img` is a\n list.\n\n \"\"\"\n\n # Use hasattr() instead of isinstance to workaround a Python 2.6/2.7 bug\n # See http://bugs.python.org/issue7624\n if hasattr(imgs, \"__iter__\") \\\n and not isinstance(imgs, str):\n single_img = False\n else:\n single_img = True\n imgs = [imgs]\n\n ret = []\n for img in imgs:\n img = check_niimg(img)\n affine = img.affine\n filtered = _smooth_array(get_data(img), affine, fwhm=fwhm,\n ensure_finite=True, copy=True)\n ret.append(new_img_like(img, filtered, affine, copy_header=True))\n\n if single_img:\n return ret[0]\n else:\n return ret\n\n\ndef _crop_img_to(img, slices, copy=True):\n \"\"\"Crops an image to a smaller size.\n\n Crop `img` to size indicated by slices and adjust affine accordingly.\n\n Parameters\n ----------\n img : Niimg-like object\n Image to be cropped. If slices has less entries than `img` has dimensions,\n the slices will be applied to the first `len(slices)` dimensions (See\n http://nilearn.github.io/manipulating_images/input_output.html).\n\n slices : list of slices\n Defines the range of the crop.\n E.g. [slice(20, 200), slice(40, 150), slice(0, 100)] defines a cube.\n\n copy : :obj:`bool`, optional\n Specifies whether cropped data is to be copied or not. Default=True.\n\n Returns\n -------\n Niimg-like object\n Cropped version of the input image.\n\n offset : :obj:`list`, optional\n List of tuples representing the number of voxels removed (before, after)\n the cropped volumes, i.e.:\n *[(x1_pre, x1_post), (x2_pre, x2_post), ..., (xN_pre, xN_post)]*\n\n \"\"\"\n img = check_niimg(img)\n\n data = get_data(img)\n affine = img.affine\n\n cropped_data = data[tuple(slices)]\n if copy:\n cropped_data = cropped_data.copy()\n\n linear_part = affine[:3, :3]\n old_origin = affine[:3, 3]\n new_origin_voxel = np.array([s.start for s in slices])\n new_origin = old_origin + linear_part.dot(new_origin_voxel)\n\n new_affine = np.eye(4)\n new_affine[:3, :3] = linear_part\n new_affine[:3, 3] = new_origin\n\n return new_img_like(img, cropped_data, new_affine)\n\n\ndef crop_img(img, rtol=1e-8, copy=True, pad=True, return_offset=False):\n \"\"\"Crops an image as much as possible.\n\n Will crop `img`, removing as many zero entries as possible without\n touching non-zero entries. Will leave one voxel of zero padding\n around the obtained non-zero area in order to avoid sampling issues\n later on.\n\n Parameters\n ----------\n img : Niimg-like object\n Image to be cropped (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n\n rtol : :obj:`float`, optional\n relative tolerance (with respect to maximal absolute value of the\n image), under which values are considered negligeable and thus\n croppable. Default=1e-8.\n\n copy : :obj:`bool`, optional\n Specifies whether cropped data is copied or not. Default=True.\n\n pad : :obj:`bool`, optional\n Toggles adding 1-voxel of 0s around the border. Default=True.\n\n return_offset : :obj:`bool`, optional\n Specifies whether to return a tuple of the removed padding.\n Default=False.\n\n Returns\n -------\n Niimg-like object or :obj:`tuple`\n Cropped version of the input image and, if `return_offset=True`, a tuple\n of tuples representing the number of voxels removed (before, after) the\n cropped volumes, i.e.:\n *[(x1_pre, x1_post), (x2_pre, x2_post), ..., (xN_pre, xN_post)]*\n\n \"\"\"\n img = check_niimg(img)\n data = get_data(img)\n infinity_norm = max(-data.min(), data.max())\n passes_threshold = np.logical_or(data < -rtol * infinity_norm,\n data > rtol * infinity_norm)\n\n if data.ndim == 4:\n passes_threshold = np.any(passes_threshold, axis=-1)\n coords = np.array(np.where(passes_threshold))\n\n # Sets full range if no data are found along the axis\n if coords.shape[1] == 0:\n start, end = [0, 0, 0], list(data.shape)\n else:\n start = coords.min(axis=1)\n end = coords.max(axis=1) + 1\n\n # pad with one voxel to avoid resampling problems\n if pad:\n start = np.maximum(start - 1, 0)\n end = np.minimum(end + 1, data.shape[:3])\n\n slices = [slice(s, e) for s, e in zip(start, end)][:3]\n cropped_im = _crop_img_to(img, slices, copy=copy)\n return cropped_im if not return_offset else (cropped_im, tuple(slices))\n\n\ndef _pad_array(array, pad_sizes):\n \"\"\"Pad an array with zeros.\n\n Pads an array with zeros as specified in `pad_sizes`.\n\n Parameters\n ----------\n array : :class:`numpy.ndarray`\n Array to pad.\n\n pad_sizes : :obj:`list`\n Padding quantity specified as\n *[x1minpad, x1maxpad, x2minpad,x2maxpad, x3minpad, ...]*.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Padded array.\n\n Raises\n ------\n ValueError\n Inconsistent min/max padding quantities.\n\n \"\"\"\n if len(pad_sizes) % 2 != 0:\n raise ValueError(\"Please specify as many max paddings as min\"\n \" paddings. You have specified %d arguments\" %\n len(pad_sizes))\n\n all_paddings = np.zeros([array.ndim, 2], dtype=np.int64)\n all_paddings[:len(pad_sizes) // 2] = np.array(pad_sizes).reshape(-1, 2)\n\n lower_paddings, upper_paddings = all_paddings.T\n new_shape = np.array(array.shape) + upper_paddings + lower_paddings\n\n padded = np.zeros(new_shape, dtype=array.dtype)\n source_slices = [slice(max(-lp, 0), min(s + up, s))\n for lp, up, s in zip(lower_paddings,\n upper_paddings,\n array.shape)]\n target_slices = [slice(max(lp, 0), min(s - up, s))\n for lp, up, s in zip(lower_paddings,\n upper_paddings,\n new_shape)]\n\n padded[tuple(target_slices)] = array[source_slices].copy()\n return padded\n\n\ndef _compute_mean(imgs, target_affine=None,\n target_shape=None, smooth=False):\n from . import resampling\n input_repr = _repr_niimgs(imgs, shorten=True)\n\n imgs = check_niimg(imgs)\n mean_data = _safe_get_data(imgs)\n affine = imgs.affine\n # Free memory ASAP\n del imgs\n if mean_data.ndim not in (3, 4):\n raise ValueError('Computation expects 3D or 4D '\n 'images, but %i dimensions were given (%s)'\n % (mean_data.ndim, input_repr))\n if mean_data.ndim == 4:\n mean_data = mean_data.mean(axis=-1)\n else:\n mean_data = mean_data.copy()\n mean_data = resampling.resample_img(\n nibabel.Nifti1Image(mean_data, affine),\n target_affine=target_affine, target_shape=target_shape,\n copy=False)\n affine = mean_data.affine\n mean_data = get_data(mean_data)\n\n if smooth:\n nan_mask = np.isnan(mean_data)\n mean_data = _smooth_array(mean_data, affine=np.eye(4), fwhm=smooth,\n ensure_finite=True, copy=False)\n mean_data[nan_mask] = np.nan\n\n return mean_data, affine\n\n\ndef mean_img(imgs, target_affine=None, target_shape=None,\n verbose=0, n_jobs=1):\n \"\"\"Compute the mean of the images over time or the 4th dimension.\n\n Note that if list of 4D images are given, the mean of each 4D image is\n computed separately, and the resulting mean is computed after.\n\n Parameters\n ----------\n imgs : Niimg-like object or iterable of Niimg-like objects\n Images to be averaged over time (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n\n target_affine : :class:`numpy.ndarray`, optional\n If specified, the image is resampled corresponding to this new affine.\n target_affine can be a 3x3 or a 4x4 matrix.\n\n target_shape : :obj:`tuple` or :obj:`list`, optional\n If specified, the image will be resized to match this new shape.\n len(target_shape) must be equal to 3.\n A target_affine has to be specified jointly with target_shape.\n\n verbose : :obj:`int`, optional\n Controls the amount of verbosity: higher numbers give more messages\n (0 means no messages). Default=0.\n\n n_jobs : :obj:`int`, optional\n The number of CPUs to use to do the computation (-1 means\n 'all CPUs'). Default=1.\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Mean image.\n\n See Also\n --------\n nilearn.image.math_img : For more general operations on images.\n\n \"\"\"\n is_str = isinstance(imgs, str)\n is_iterable = isinstance(imgs, collections.abc.Iterable)\n if is_str or not is_iterable:\n imgs = [imgs, ]\n\n imgs_iter = iter(imgs)\n first_img = check_niimg(next(imgs_iter))\n\n # Compute the first mean to retrieve the reference\n # target_affine and target_shape if_needed\n n_imgs = 1\n running_mean, first_affine = _compute_mean(first_img,\n target_affine=target_affine,\n target_shape=target_shape)\n\n if target_affine is None or target_shape is None:\n target_affine = first_affine\n target_shape = running_mean.shape[:3]\n\n for this_mean in Parallel(n_jobs=n_jobs, verbose=verbose)(\n delayed(_compute_mean)(n, target_affine=target_affine,\n target_shape=target_shape)\n for n in imgs_iter):\n n_imgs += 1\n # _compute_mean returns (mean_img, affine)\n this_mean = this_mean[0]\n running_mean += this_mean\n\n running_mean = running_mean / float(n_imgs)\n return new_img_like(first_img, running_mean, target_affine)\n\n\ndef swap_img_hemispheres(img):\n \"\"\"Performs swapping of hemispheres in the indicated NIfTI image.\n\n Use case: synchronizing ROIs across hemispheres.\n\n Parameters\n ----------\n img : Niimg-like object\n Images to swap (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Hemispherically swapped image.\n\n Notes\n -----\n Assumes that the image is sagitally aligned.\n\n Should be used with caution (confusion might be caused with\n radio/neuro conventions)\n\n Note that this does not require a change of the affine matrix.\n\n \"\"\"\n from .resampling import reorder_img\n\n # Check input is really a path to a nifti file or a nifti object\n img = check_niimg_3d(img)\n\n # get nifti in x-y-z order\n img = reorder_img(img)\n\n # create swapped nifti object\n out_img = new_img_like(img, get_data(img)[::-1], img.affine,\n copy_header=True)\n\n return out_img\n\n\ndef index_img(imgs, index):\n \"\"\"Indexes into a 4D Niimg-like object in the fourth dimension.\n\n Common use cases include extracting a 3D image out of `img` or\n creating a 4D image whose data is a subset of `img` data.\n\n Parameters\n ----------\n imgs : 4D Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n index : Any type compatible with numpy array indexing\n Used for indexing the 4D data array in the fourth dimension.\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Indexed image.\n\n See Also\n --------\n nilearn.image.concat_imgs\n nilearn.image.iter_img\n\n Examples\n --------\n First we concatenate two MNI152 images to create a 4D-image::\n\n >>> from nilearn import datasets\n >>> from nilearn.image import concat_imgs, index_img\n >>> joint_mni_image = concat_imgs([datasets.load_mni152_template(),\n ... datasets.load_mni152_template()])\n >>> print(joint_mni_image.shape)\n (99, 117, 95, 2)\n\n We can now select one slice from the last dimension of this 4D-image::\n\n >>> single_mni_image = index_img(joint_mni_image, 1)\n >>> print(single_mni_image.shape)\n (99, 117, 95)\n\n We can also select multiple frames using the `slice` constructor::\n\n >>> five_mni_images = concat_imgs([datasets.load_mni152_template()] * 5)\n >>> print(five_mni_images.shape)\n (99, 117, 95, 5)\n\n >>> first_three_images = index_img(five_mni_images,\n ... slice(0, 3))\n >>> print(first_three_images.shape)\n (99, 117, 95, 3)\n\n \"\"\"\n imgs = check_niimg_4d(imgs)\n # duck-type for pandas arrays, and select the 'values' attr\n if hasattr(index, 'values') and hasattr(index, 'iloc'):\n index = index.values.flatten()\n return _index_img(imgs, index)\n\n\ndef iter_img(imgs):\n \"\"\"Iterates over a 4D Niimg-like object in the fourth dimension.\n\n Parameters\n ----------\n imgs : 4D Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n Returns\n -------\n Iterator of 3D :class:`~nibabel.nifti1.Nifti1Image`\n\n See Also\n --------\n nilearn.image.index_img\n\n \"\"\"\n return check_niimg_4d(imgs, return_iterator=True)\n\n\ndef new_img_like(ref_niimg, data, affine=None, copy_header=False):\n \"\"\"Create a new image of the same class as the reference image\n\n Parameters\n ----------\n ref_niimg : Niimg-like object\n Reference image. The new image will be of the same type.\n\n data : :class:`numpy.ndarray`\n Data to be stored in the image.\n\n affine : 4x4 :class:`numpy.ndarray`, optional\n Transformation matrix.\n\n copy_header : :obj:`bool`, optional\n Indicated if the header of the reference image should be used to\n create the new image. Default=False.\n\n Returns\n -------\n Niimg-like object\n A loaded image with the same file type (and, optionally, header)\n as the reference image.\n\n \"\"\"\n # Hand-written loading code to avoid too much memory consumption\n orig_ref_niimg = ref_niimg\n is_str = isinstance(ref_niimg, str)\n has_get_data = hasattr(ref_niimg, 'get_data')\n has_get_fdata = hasattr(ref_niimg, 'get_fdata')\n has_iter = hasattr(ref_niimg, '__iter__')\n has_affine = hasattr(ref_niimg, 'affine')\n if has_iter and not any([is_str, has_get_data, has_get_fdata]):\n ref_niimg = ref_niimg[0]\n is_str = isinstance(ref_niimg, str)\n has_get_data = hasattr(ref_niimg, 'get_data')\n has_get_fdata = hasattr(ref_niimg, 'get_fdata')\n has_affine = hasattr(ref_niimg, 'affine')\n if not ((has_get_data or has_get_fdata) and has_affine):\n if is_str:\n ref_niimg = nibabel.load(ref_niimg)\n else:\n raise TypeError(('The reference image should be a niimg, %r '\n 'was passed') % orig_ref_niimg)\n\n if affine is None:\n affine = ref_niimg.affine\n if data.dtype == bool:\n default_dtype = np.int8\n if isinstance(ref_niimg, nibabel.freesurfer.mghformat.MGHImage):\n default_dtype = np.uint8\n data = as_ndarray(data, dtype=default_dtype)\n header = None\n if copy_header:\n header = copy.deepcopy(ref_niimg.header)\n try:\n 'something' in header\n except TypeError:\n pass\n else:\n if 'scl_slope' in header:\n header['scl_slope'] = 0.\n if 'scl_inter' in header:\n header['scl_inter'] = 0.\n # 'glmax' is removed for Nifti2Image. Modify only if 'glmax' is\n # available in header. See issue #1611\n if 'glmax' in header:\n header['glmax'] = 0.\n if 'cal_max' in header:\n header['cal_max'] = np.max(data) if data.size > 0 else 0.\n if 'cal_min' in header:\n header['cal_min'] = np.min(data) if data.size > 0 else 0.\n klass = ref_niimg.__class__\n if klass is nibabel.Nifti1Pair:\n # Nifti1Pair is an internal class, without a to_filename,\n # we shouldn't return it\n klass = nibabel.Nifti1Image\n return klass(data, affine, header=header)\n\n\ndef _apply_cluster_size_threshold(arr, cluster_threshold, copy=True):\n \"\"\"Apply cluster-extent thresholding to an array that has already been\n voxel-wise thresholded.\n\n Parameters\n ----------\n arr : :obj:`numpy.ndarray` of shape (X, Y, Z)\n 3D array that has been thresholded at the voxel level.\n cluster_threshold : :obj:`float`\n Cluster-size threshold, in voxels, to apply to ``arr``.\n copy : :obj:`bool`, optional\n Whether to copy the array before modifying it or not.\n Default is True.\n\n Returns\n -------\n arr : :obj:`numpy.ndarray` of shape (X, Y, Z)\n Cluster-extent thresholded array.\n\n Notes\n -----\n Clusters are defined in a bi-sided manner;\n both negative and positive clusters are evaluated,\n but this is done separately for each sign.\n\n Clusters are defined using 6-connectivity, also known as NN1 (in AFNI) or\n \"faces\" connectivity.\n \"\"\"\n assert arr.ndim == 3\n\n if copy:\n arr = arr.copy()\n\n # Define array for 6-connectivity, aka NN1 or \"faces\"\n conn_mat = np.zeros((3, 3, 3), int)\n conn_mat[:, 1, 1] = 1\n conn_mat[1, :, 1] = 1\n conn_mat[1, 1, :] = 1\n\n for sign in np.unique(np.sign(arr)):\n # Binarize using one-sided cluster-defining threshold\n binarized = ((arr * sign) > 0).astype(int)\n\n # Apply cluster threshold\n label_map = ndimage.measurements.label(binarized, conn_mat)[0]\n clust_ids = sorted(list(np.unique(label_map)[1:]))\n for c_val in clust_ids:\n if np.sum(label_map == c_val) < cluster_threshold:\n arr[label_map == c_val] = 0\n\n return arr\n\n\ndef threshold_img(\n img,\n threshold,\n cluster_threshold=0,\n two_sided=True,\n mask_img=None,\n copy=True,\n):\n \"\"\"Threshold the given input image, mostly statistical or atlas images.\n\n Thresholding can be done based on direct image intensities or selection\n threshold with given percentile.\n\n .. versionchanged:: 0.9.0\n New ``cluster_threshold`` and ``two_sided`` parameters added.\n\n .. versionadded:: 0.2\n\n Parameters\n ----------\n img : a 3D/4D Niimg-like object\n Image containing statistical or atlas maps which should be thresholded.\n\n threshold : :obj:`float` or :obj:`str`\n If float, we threshold the image based on image intensities meaning\n voxels which have intensities greater than this value will be kept.\n The given value should be within the range of minimum and\n maximum intensity of the input image.\n If string, it should finish with percent sign e.g. \"80%\" and we threshold\n based on the score obtained using this percentile on the image data. The\n voxels which have intensities greater than this score will be kept.\n The given string should be within the range of \"0%\" to \"100%\".\n\n cluster_threshold : :obj:`float`, optional\n Cluster size threshold, in voxels. In the returned thresholded map,\n sets of connected voxels (``clusters``) with size smaller\n than this number will be removed. Default=0.\n\n .. versionadded:: 0.9.0\n\n two_sided : :obj:`bool`, optional\n Whether the thresholding should yield both positive and negative\n part of the maps.\n Default=True.\n\n .. versionadded:: 0.9.0\n\n mask_img : Niimg-like object, default None, optional\n Mask image applied to mask the input data.\n If None, no masking will be applied.\n\n copy : :obj:`bool`, optional\n If True, input array is not modified. True by default: the filtering\n is not performed in-place. Default=True.\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Thresholded image of the given input image.\n\n See also\n --------\n nilearn.glm.threshold_stats_img :\n Threshold a statistical image using the alpha value, optionally with\n false positive control.\n\n \"\"\"\n from . import resampling\n from .. import masking\n\n img = check_niimg(img)\n img_data = _safe_get_data(img, ensure_finite=True, copy_data=copy)\n affine = img.affine\n\n if mask_img is not None:\n mask_img = check_niimg_3d(mask_img)\n if not _check_same_fov(img, mask_img):\n mask_img = resampling.resample_img(mask_img, target_affine=affine,\n target_shape=img.shape[:3],\n interpolation=\"nearest\")\n\n mask_data, _ = masking._load_mask_img(mask_img)\n # Set as 0 for the values which are outside of the mask\n img_data[mask_data == 0.] = 0.\n\n cutoff_threshold = check_threshold(\n threshold,\n img_data,\n percentile_func=scoreatpercentile,\n name='threshold',\n )\n\n # Apply threshold\n if two_sided:\n img_data[np.abs(img_data) < cutoff_threshold] = 0.\n else:\n img_data[img_data < cutoff_threshold] = 0.\n\n # Expand to 4D to support both 3D and 4D\n expand_to_4d = img_data.ndim == 3\n if expand_to_4d:\n img_data = img_data[:, :, :, None]\n\n # Perform cluster thresholding, if requested\n if cluster_threshold > 0:\n for i_vol in range(img_data.shape[3]):\n img_data[..., i_vol] = _apply_cluster_size_threshold(\n img_data[..., i_vol],\n cluster_threshold,\n )\n\n if expand_to_4d:\n # Reduce back to 3D\n img_data = img_data[:, :, :, 0]\n\n # Reconstitute img object\n thresholded_img = new_img_like(img, img_data, affine)\n\n return thresholded_img\n\n\ndef math_img(formula, **imgs):\n \"\"\"Interpret a numpy based string formula using niimg in named parameters.\n\n .. versionadded:: 0.2.3\n\n Parameters\n ----------\n formula : :obj:`str`\n The mathematical formula to apply to image internal data. It can use\n numpy imported as 'np'.\n\n imgs : images (:class:`~nibabel.nifti1.Nifti1Image` or file names)\n Keyword arguments corresponding to the variables in the formula as\n Nifti images. All input images should have the same geometry (shape,\n affine).\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Result of the formula as a Nifti image. Note that the dimension of the\n result image can be smaller than the input image. The affine is the\n same as the input image.\n\n See Also\n --------\n nilearn.image.mean_img : To simply compute the mean of multiple images\n\n Examples\n --------\n Let's load an image using nilearn datasets module::\n\n >>> from nilearn import datasets\n >>> anatomical_image = datasets.load_mni152_template()\n\n Now we can use any numpy function on this image::\n\n >>> from nilearn.image import math_img\n >>> log_img = math_img(\"np.log(img)\", img=anatomical_image)\n\n We can also apply mathematical operations on several images::\n\n >>> result_img = math_img(\"img1 + img2\",\n ... img1=anatomical_image, img2=log_img)\n\n Notes\n -----\n This function is the Python equivalent of ImCal in SPM or fslmaths\n in FSL.\n\n \"\"\"\n try:\n # Check that input images are valid niimg and have a compatible shape\n # and affine.\n niimgs = []\n for image in imgs.values():\n niimgs.append(check_niimg(image))\n _check_same_fov(*niimgs, raise_error=True)\n except Exception as exc:\n exc.args = ((\"Input images cannot be compared, you provided '{0}',\"\n .format(imgs.values()),) + exc.args)\n raise\n\n # Computing input data as a dictionary of numpy arrays. Keep a reference\n # niimg for building the result as a new niimg.\n niimg = None\n data_dict = {}\n for key, img in imgs.items():\n niimg = check_niimg(img)\n data_dict[key] = _safe_get_data(niimg)\n\n # Add a reference to numpy in the kwargs of eval so that numpy functions\n # can be called from there.\n data_dict['np'] = np\n try:\n result = eval(formula, data_dict)\n except Exception as exc:\n exc.args = ((\"Input formula couldn't be processed, you provided '{0}',\"\n .format(formula),) + exc.args)\n raise\n\n return new_img_like(niimg, result, niimg.affine)\n\n\ndef binarize_img(img, threshold=0, mask_img=None):\n \"\"\"Binarize an image such that its values are either 0 or 1.\n\n .. versionadded:: 0.8.1\n\n Parameters\n ----------\n img : a 3D/4D Niimg-like object\n Image which should be binarized.\n\n threshold : :obj:`float` or :obj:`str`\n If float, we threshold the image based on image intensities meaning\n voxels which have intensities greater than this value will be kept.\n The given value should be within the range of minimum and\n maximum intensity of the input image.\n If string, it should finish with percent sign e.g. \"80%\" and we\n threshold based on the score obtained using this percentile on\n the image data. The voxels which have intensities greater than\n this score will be kept. The given string should be\n within the range of \"0%\" to \"100%\".\n\n mask_img : Niimg-like object, default None, optional\n Mask image applied to mask the input data.\n If None, no masking will be applied.\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Binarized version of the given input image. Output dtype is int.\n\n See Also\n --------\n nilearn.image.threshold_img : To simply threshold but not binarize images.\n\n Examples\n --------\n Let's load an image using nilearn datasets module::\n\n >>> from nilearn import datasets\n >>> anatomical_image = datasets.load_mni152_template()\n\n Now we binarize it, generating a pseudo brainmask::\n\n >>> from nilearn.image import binarize_img\n >>> img = binarize_img(anatomical_image)\n\n \"\"\"\n return math_img(\n \"img.astype(bool).astype(int)\",\n img=threshold_img(img, threshold, mask_img=mask_img)\n )\n\n\n@rename_parameters({'sessions': 'runs'}, '0.10.0')\ndef clean_img(imgs, runs=None, detrend=True, standardize=True,\n confounds=None, low_pass=None, high_pass=None, t_r=None,\n ensure_finite=False, mask_img=None):\n \"\"\"Improve SNR on masked fMRI signals.\n\n This function can do several things on the input signals, in\n the following order:\n\n - detrend\n - low- and high-pass filter\n - remove confounds\n - standardize\n\n Low-pass filtering improves specificity.\n\n High-pass filtering should be kept small, to keep some sensitivity.\n\n Filtering is only meaningful on evenly-sampled signals.\n\n According to Lindquist et al. (2018), removal of confounds will be done\n orthogonally to temporal filters (low- and/or high-pass filters), if both\n are specified.\n\n .. versionadded:: 0.2.5\n\n Parameters\n ----------\n imgs : Niimg-like object\n 4D image. The signals in the last dimension are filtered (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n\n runs : :class:`numpy.ndarray`, optional\n Add a run level to the cleaning process. Each run will be\n cleaned independently. Must be a 1D array of n_samples elements.\n\n .. warning::\n\n 'runs' replaces 'sessions' after release 0.10.0.\n Using 'session' will result in an error after release 0.10.0.\n\n Default=``None``.\n\n detrend : :obj:`bool`, optional\n If detrending should be applied on timeseries (before confound removal).\n Default=True.\n\n standardize : :obj:`bool`, optional\n If True, returned signals are set to unit variance. Default=True.\n\n confounds : :class:`numpy.ndarray`, :obj:`str` or :obj:`list` of\n Confounds timeseries. optional\n Shape must be (instant number, confound number), or just (instant number,)\n The number of time instants in signals and confounds must be\n identical (i.e. signals.shape[0] == confounds.shape[0]).\n If a string is provided, it is assumed to be the name of a csv file\n containing signals as columns, with an optional one-line header.\n If a list is provided, all confounds are removed from the input\n signal, as if all were in the same array.\n\n low_pass : :obj:`float`, optional\n Low cutoff frequencies, in Hertz.\n\n high_pass : :obj:`float`, optional\n High cutoff frequencies, in Hertz.\n\n t_r : :obj:`float`, optional\n Repetition time, in second (sampling period). Set to None if not\n specified. Mandatory if used together with `low_pass` or `high_pass`.\n\n ensure_finite : :obj:`bool`, optional\n If True, the non-finite values (NaNs and infs) found in the images\n will be replaced by zeros. Default=False.\n\n mask_img : Niimg-like object, optional\n If provided, signal is only cleaned from voxels inside the mask. If\n mask is provided, it should have same shape and affine as imgs.\n If not provided, all voxels are used.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n Returns\n -------\n Niimg-like object\n Input images, cleaned. Same shape as `imgs`.\n\n Notes\n -----\n Confounds removal is based on a projection on the orthogonal\n of the signal space [:footcite:`friston1994statistical`].\n\n Orthogonalization between temporal filters and confound removal is based on\n suggestions in [:footcite:`Lindquist407676`].\n\n References\n ----------\n .. footbibliography::\n\n See Also\n --------\n nilearn.signal.clean\n\n \"\"\"\n # Avoid circular import\n from .image import new_img_like\n from .. import masking\n\n imgs_ = check_niimg_4d(imgs)\n\n # Check if t_r is set, otherwise propose t_r from imgs header\n if low_pass is not None or high_pass is not None:\n if t_r is None:\n\n # We raise an error, instead of using the header's t_r as this\n # value is considered to be non-reliable\n raise ValueError(\n \"Repetition time (t_r) must be specified for filtering. You \"\n \"specified None. imgs header suggest it to be {0}\".format(\n imgs.header.get_zooms()[3]))\n\n # Prepare signal for cleaning\n if mask_img is not None:\n signals = masking.apply_mask(imgs_, mask_img)\n else:\n signals = get_data(imgs_).reshape(-1, imgs_.shape[-1]).T\n\n # Clean signal\n data = signal.clean(\n signals, runs=runs, detrend=detrend, standardize=standardize,\n confounds=confounds, low_pass=low_pass, high_pass=high_pass, t_r=t_r,\n ensure_finite=ensure_finite)\n\n # Put results back into Niimg-like object\n if mask_img is not None:\n imgs_ = masking.unmask(data, mask_img)\n else:\n imgs_ = new_img_like(\n imgs_, data.T.reshape(imgs_.shape), copy_header=True)\n\n return imgs_\n\n\ndef load_img(img, wildcards=True, dtype=None):\n \"\"\"Load a Niimg-like object from filenames or list of filenames.\n\n .. versionadded:: 0.2.5\n\n Parameters\n ----------\n img : Niimg-like object\n If string, consider it as a path to NIfTI image and call `nibabel.load()`\n on it. The '~' symbol is expanded to the user home folder.\n If it is an object, check if affine attribute is present, raise\n `TypeError` otherwise.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n wildcards : :obj:`bool`, optional\n Use `img` as a regular expression to get a list of matching input\n filenames.\n If multiple files match, the returned list is sorted using an ascending\n order.\n If no file matches the regular expression, a `ValueError` exception is\n raised.\n Default=True.\n\n dtype : {dtype, \"auto\"}, optional\n Data type toward which the data should be converted. If \"auto\", the\n data will be converted to int32 if dtype is discrete and float32 if it\n is continuous.\n\n Returns\n -------\n 3D/4D Niimg-like object\n Result can be :class:`~nibabel.nifti1.Nifti1Image` or the input, as-is. It is guaranteed\n that the returned object has an affine attributes and that\n nilearn.image.get_data returns its data.\n\n \"\"\"\n return check_niimg(img, wildcards=wildcards, dtype=dtype)\n\n\ndef largest_connected_component_img(imgs):\n \"\"\"Return the largest connected component of an image or list of images.\n\n .. versionadded:: 0.3.1\n\n Parameters\n ----------\n imgs : Niimg-like object or iterable of Niimg-like objects (3D)\n Image(s) to extract the largest connected component from.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n Returns\n -------\n 3D Niimg-like object or list of\n Image or list of images containing the largest connected component.\n\n Notes\n -----\n **Handling big-endian in given Nifti image**\n This function changes the existing byte-ordering information to new byte\n order, if the dtype in given Nifti image has non-native data type.\n This operation is done internally to avoid big-endian issues with\n scipy ndimage module.\n\n \"\"\"\n from .._utils.ndimage import largest_connected_component\n\n if hasattr(imgs, \"__iter__\") and not isinstance(imgs, str):\n single_img = False\n else:\n single_img = True\n imgs = [imgs]\n\n ret = []\n for img in imgs:\n img = check_niimg_3d(img)\n affine = img.affine\n largest_component = largest_connected_component(_safe_get_data(img))\n ret.append(new_img_like(img, largest_component, affine,\n copy_header=True))\n\n if single_img:\n return ret[0]\n else:\n return ret\n", "path": "nilearn/image/image.py" } ]
diff --git a/doc/changes/latest.rst b/doc/changes/latest.rst index 2be686b36c..8dbda80dcc 100644 --- a/doc/changes/latest.rst +++ b/doc/changes/latest.rst @@ -16,6 +16,7 @@ Fixes - Fix function :func:`~mass_univariate.permuted_ols`, which was only returning the null distribution (``h0_fmax``) for the first regressor (:gh:`3184` by `Taylor Salo`_). - Fix function :func:`~datasets.fetch_abide_pcp` which was returning empty phenotypes and ``func_preproc`` after release ``0.9.0`` due to supporting pandas dataframes in fetchers (:gh:`3174` by `Nicolas Gensollen`_). - Fix function :func:`~datasets.fetch_atlas_harvard_oxford` and :func:`~datasets.fetch_atlas_juelich` which were returning the image in the `filename` attribute instead of the path to the image (:gh:`3179` by `Raphael Meudec`_). +- Fix function ``nilearn.image._apply_cluster_size_threshold``, which resulted in wrong clusters extraction when cluster_size was non-zero (:gh:`3201` by `Bertrand Thirion`_). - Fix colorbars in :func:`~plotting.plot_stat_map`, :func:`~plotting.plot_glass_brain` and :func:`~plotting.plot_surf_stat_map` which could extend beyond the figure for users with newest matplotlib version (>=3.5.1) (:gh:`3188` by `Raphael Meudec`_) Enhancements diff --git a/nilearn/image/image.py b/nilearn/image/image.py index 05770b4ead..68ed191926 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -795,7 +795,7 @@ def _apply_cluster_size_threshold(arr, cluster_threshold, copy=True): conn_mat[1, :, 1] = 1 conn_mat[1, 1, :] = 1 - for sign in np.sign(arr): + for sign in np.unique(np.sign(arr)): # Binarize using one-sided cluster-defining threshold binarized = ((arr * sign) > 0).astype(int) diff --git a/nilearn/image/tests/test_image.py b/nilearn/image/tests/test_image.py index e76f6a273c..3d7e146721 100644 --- a/nilearn/image/tests/test_image.py +++ b/nilearn/image/tests/test_image.py @@ -585,7 +585,7 @@ def test_threshold_img_with_cluster_threshold(): copy=True, ) assert np.array_equal(np.unique(thr_img.get_fdata()), np.array([-4, 0, 4])) - + assert np.sum(thr_img.get_fdata() == 4) == 8 # With a cluster threshold of 5 we get clusters with |values| > 0.5 and # cluster sizes > 5 thr_img = threshold_img(
apluslms__a-plus-204
Numbers should be safe characters in file names If the attachment for the "Exercise with attachment" is named for example "2.zip", the name for that file will be just ".zip" in the database because numbers are not considered as safe characters: https://github.com/Aalto-LeTech/a-plus/blob/a86bf/lib/helpers.py#L62
[ { "content": "from django.conf import settings\nfrom random import choice\nfrom PIL import Image\nimport string\nimport urllib\n\n\ndef extract_form_errors(form):\n \"\"\"\n Extracts Django form errors to a list of error messages.\n \"\"\"\n errors = []\n for field in form.errors:\n for err in form.errors[field]:\n errors.append(\"%s: %s\" % (field, err))\n return errors\n\n\ndef get_random_string(length=32):\n \"\"\"\n This function creates a random string with a given length.\n The strings consist of upper and lower case letters and numbers.\n\n @param length: the length of the randomized string, defaults to 32\n @return: a random string containing lower and upper case letters and digits\n \"\"\"\n\n # Use all letters and numbers in the identifier\n choices = string.ascii_letters + string.digits\n\n return ''.join([choice(choices) for _ in range(length)])\n\n\ndef query_dict_to_list_of_tuples(query_dict):\n \"\"\"\n This helper function creates a list of tuples with the values\n from a QueryDict object. In a QueryDict the same key can have\n several values, which is not possible with a typical dict nor a JSON\n object. The resulting list will be similar to [(key1, value1), (key2, value2)].\n\n @param query_dict: a QueryDict object\n @return: a list of tuples with the same keys and values as in the given QueryDict\n \"\"\"\n list_of_tuples = []\n for key in query_dict:\n for val in query_dict.getlist(key):\n list_of_tuples.append((key, val))\n return list_of_tuples\n\n\ndef update_url_params(url, params):\n delimiter = \"&\" if \"?\" in url else \"?\"\n return url + delimiter + urllib.parse.urlencode(params)\n\n\ndef has_same_domain(url1, url2):\n uri1 = urllib.parse.urlparse(url1)\n uri2 = urllib.parse.urlparse(url2)\n return uri1.netloc == uri2.netloc\n\n\nFILENAME_CHARS = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ._-\"\n\ndef safe_file_name(name):\n safename = \"\".join(c for c in name if c in FILENAME_CHARS)\n if safename[0] == \"-\":\n return \"_\" + safename[1:80]\n return safename[:80]\n\n\ndef resize_image(path, max_size):\n image = Image.open(path)\n image.thumbnail(max_size, Image.ANTIALIAS)\n image.save(path)\n\n\ndef roman_numeral(number):\n numbers = [1000,900,500,400,100,90,50,40,10,9,5,4,1];\n letters = [\"M\",\"CM\",\"D\",\"CD\",\"C\",\"XC\",\"L\",\"XL\",\"X\",\"IX\",\"V\",\"IV\",\"I\"];\n roman = \"\"\n for i in range(len(numbers)):\n while number >= numbers[i]:\n roman += letters[i]\n number -= numbers[i]\n return roman\n\n\ndef settings_text(request, key):\n def get(name):\n if hasattr(settings, name):\n return getattr(settings, name)\n return None\n return get('{}_{}'.format(key, request.LANGUAGE_CODE.upper())) or get(key)\n", "path": "lib/helpers.py" } ]
[ { "content": "from django.conf import settings\nfrom random import choice\nfrom PIL import Image\nimport string\nimport urllib\n\n\ndef extract_form_errors(form):\n \"\"\"\n Extracts Django form errors to a list of error messages.\n \"\"\"\n errors = []\n for field in form.errors:\n for err in form.errors[field]:\n errors.append(\"%s: %s\" % (field, err))\n return errors\n\n\ndef get_random_string(length=32):\n \"\"\"\n This function creates a random string with a given length.\n The strings consist of upper and lower case letters and numbers.\n\n @param length: the length of the randomized string, defaults to 32\n @return: a random string containing lower and upper case letters and digits\n \"\"\"\n\n # Use all letters and numbers in the identifier\n choices = string.ascii_letters + string.digits\n\n return ''.join([choice(choices) for _ in range(length)])\n\n\ndef query_dict_to_list_of_tuples(query_dict):\n \"\"\"\n This helper function creates a list of tuples with the values\n from a QueryDict object. In a QueryDict the same key can have\n several values, which is not possible with a typical dict nor a JSON\n object. The resulting list will be similar to [(key1, value1), (key2, value2)].\n\n @param query_dict: a QueryDict object\n @return: a list of tuples with the same keys and values as in the given QueryDict\n \"\"\"\n list_of_tuples = []\n for key in query_dict:\n for val in query_dict.getlist(key):\n list_of_tuples.append((key, val))\n return list_of_tuples\n\n\ndef update_url_params(url, params):\n delimiter = \"&\" if \"?\" in url else \"?\"\n return url + delimiter + urllib.parse.urlencode(params)\n\n\ndef has_same_domain(url1, url2):\n uri1 = urllib.parse.urlparse(url1)\n uri2 = urllib.parse.urlparse(url2)\n return uri1.netloc == uri2.netloc\n\n\nFILENAME_CHARS = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ._-0123456789\"\n\ndef safe_file_name(name):\n safename = \"\".join(c for c in name if c in FILENAME_CHARS)\n if safename[0] == \"-\":\n return \"_\" + safename[1:80]\n return safename[:80]\n\n\ndef resize_image(path, max_size):\n image = Image.open(path)\n image.thumbnail(max_size, Image.ANTIALIAS)\n image.save(path)\n\n\ndef roman_numeral(number):\n numbers = [1000,900,500,400,100,90,50,40,10,9,5,4,1];\n letters = [\"M\",\"CM\",\"D\",\"CD\",\"C\",\"XC\",\"L\",\"XL\",\"X\",\"IX\",\"V\",\"IV\",\"I\"];\n roman = \"\"\n for i in range(len(numbers)):\n while number >= numbers[i]:\n roman += letters[i]\n number -= numbers[i]\n return roman\n\n\ndef settings_text(request, key):\n def get(name):\n if hasattr(settings, name):\n return getattr(settings, name)\n return None\n return get('{}_{}'.format(key, request.LANGUAGE_CODE.upper())) or get(key)\n", "path": "lib/helpers.py" } ]
diff --git a/lib/helpers.py b/lib/helpers.py index 46744cf69..3384084ae 100644 --- a/lib/helpers.py +++ b/lib/helpers.py @@ -59,7 +59,7 @@ def has_same_domain(url1, url2): return uri1.netloc == uri2.netloc -FILENAME_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ._-" +FILENAME_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ._-0123456789" def safe_file_name(name): safename = "".join(c for c in name if c in FILENAME_CHARS)
conda__conda-2896
conda throws error if allow_other_channels setting is used The feature to lock down what channels your users are allowed to use stopped working http://conda.pydata.org/docs/install/central.html#allow-other-channels-allow-other-channels Reproduced this error in Windows 10 and OS X 10.11.5, if you use this setting in the systemwide .condarc file. ``` $ cat /Users/jenns/anaconda/.condarc allow_other_channels: False channels: - defaults ``` ``` $ conda info Traceback (most recent call last): File "/Users/jenns/anaconda/bin/conda", line 6, in <module> sys.exit(main()) File "/Users/jenns/anaconda/lib/python2.7/site-packages/conda/cli/main.py", line 61, in main from conda.cli import conda_argparse File "/Users/jenns/anaconda/lib/python2.7/site-packages/conda/cli/conda_argparse.py", line 15, in <module> from .common import add_parser_help File "/Users/jenns/anaconda/lib/python2.7/site-packages/conda/cli/common.py", line 12, in <module> from ..config import (envs_dirs, default_prefix, platform, update_dependencies, File "/Users/jenns/anaconda/lib/python2.7/site-packages/conda/config.py", line 331, in <module> allowed_channels = get_allowed_channels() File "/Users/jenns/anaconda/lib/python2.7/site-packages/conda/config.py", line 329, in get_allowed_channels return normalize_urls(base_urls) File "/Users/jenns/anaconda/lib/python2.7/site-packages/conda/config.py", line 253, in normalize_urls urls = get_rc_urls() + urls File "/Users/jenns/anaconda/lib/python2.7/site-packages/conda/config.py", line 197, in get_rc_urls if rc.get('channels') is None: AttributeError: 'NoneType' object has no attribute 'get' ```
[ { "content": "# (c) 2012-2015 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\nfrom __future__ import print_function, division, absolute_import\n\nimport logging\nimport os\nimport re\nimport sys\nfrom collections import OrderedDict\nfrom os.path import abspath, expanduser, isfile, isdir, join\nfrom platform import machine\n\nfrom .compat import urlparse, string_types\nfrom .utils import try_write, yaml_load\n\nlog = logging.getLogger(__name__)\nstderrlog = logging.getLogger('stderrlog')\n\ndefault_python = '%d.%d' % sys.version_info[:2]\n# CONDA_FORCE_32BIT should only be used when running conda-build (in order\n# to build 32-bit packages on a 64-bit system). We don't want to mention it\n# in the documentation, because it can mess up a lot of things.\nforce_32bit = bool(int(os.getenv('CONDA_FORCE_32BIT', 0)))\n\n# ----- operating system and architecture -----\n\n_sys_map = {\n 'linux2': 'linux',\n 'linux': 'linux',\n 'darwin': 'osx',\n 'win32': 'win',\n 'openbsd5': 'openbsd',\n}\nnon_x86_linux_machines = {'armv6l', 'armv7l', 'ppc64le'}\nplatform = _sys_map.get(sys.platform, 'unknown')\nbits = 8 * tuple.__itemsize__\nif force_32bit:\n bits = 32\n\nif platform == 'linux' and machine() in non_x86_linux_machines:\n arch_name = machine()\n subdir = 'linux-%s' % arch_name\nelse:\n arch_name = {64: 'x86_64', 32: 'x86'}[bits]\n subdir = '%s-%d' % (platform, bits)\n\n# ----- rc file -----\n\n# This is used by conda config to check which keys are allowed in the config\n# file. Be sure to update it when new keys are added.\n\n#################################################################\n# Also update the example condarc file when you add a key here! #\n#################################################################\n\nrc_list_keys = [\n 'channels',\n 'disallow',\n 'create_default_packages',\n 'track_features',\n 'envs_dirs',\n 'default_channels',\n]\n\nDEFAULT_CHANNEL_ALIAS = 'https://conda.anaconda.org/'\n\nADD_BINSTAR_TOKEN = True\n\nrc_bool_keys = [\n 'add_binstar_token',\n 'add_anaconda_token',\n 'add_pip_as_python_dependency',\n 'always_yes',\n 'always_copy',\n 'allow_softlinks',\n 'auto_update_conda',\n 'changeps1',\n 'use_pip',\n 'offline',\n 'binstar_upload',\n 'anaconda_upload',\n 'show_channel_urls',\n 'allow_other_channels',\n 'update_dependencies',\n 'channel_priority',\n]\n\nrc_string_keys = [\n 'ssl_verify',\n 'channel_alias',\n 'root_dir',\n]\n\n# Not supported by conda config yet\nrc_other = [\n 'proxy_servers',\n]\n\nuser_rc_path = abspath(expanduser('~/.condarc'))\nsys_rc_path = join(sys.prefix, '.condarc')\nlocal_channel = []\nrc = root_dir = root_writable = BINSTAR_TOKEN_PAT = channel_alias = None\n\ndef get_rc_path():\n path = os.getenv('CONDARC')\n if path == ' ':\n return None\n if path:\n return path\n for path in user_rc_path, sys_rc_path:\n if isfile(path):\n return path\n return None\n\nrc_path = get_rc_path()\n\ndef load_condarc_(path):\n if not path or not isfile(path):\n return {}\n with open(path) as f:\n return yaml_load(f) or {}\n\nsys_rc = load_condarc_(sys_rc_path) if isfile(sys_rc_path) else {}\n\n# ----- local directories -----\n\n# root_dir should only be used for testing, which is why don't mention it in\n# the documentation, to avoid confusion (it can really mess up a lot of\n# things)\nroot_env_name = 'root'\n\ndef _default_envs_dirs():\n lst = [join(root_dir, 'envs')]\n if not root_writable:\n # ~/envs for backwards compatibility\n lst = ['~/.conda/envs', '~/envs'] + lst\n return lst\n\ndef _pathsep_env(name):\n x = os.getenv(name)\n if x is None:\n return []\n res = []\n for path in x.split(os.pathsep):\n if path == 'DEFAULTS':\n for p in rc.get('envs_dirs') or _default_envs_dirs():\n res.append(p)\n else:\n res.append(path)\n return res\n\ndef pkgs_dir_from_envs_dir(envs_dir):\n if abspath(envs_dir) == abspath(join(root_dir, 'envs')):\n return join(root_dir, 'pkgs32' if force_32bit else 'pkgs')\n else:\n return join(envs_dir, '.pkgs')\n\n# ----- channels -----\n\n# Note, get_*_urls() return unnormalized urls.\n\ndef get_local_urls(clear_cache=True):\n # remove the cache such that a refetch is made,\n # this is necessary because we add the local build repo URL\n if clear_cache:\n from .fetch import fetch_index\n fetch_index.cache = {}\n if local_channel:\n return local_channel\n from os.path import exists\n from .utils import url_path\n try:\n from conda_build.config import croot\n if exists(croot):\n local_channel.append(url_path(croot))\n except ImportError:\n pass\n return local_channel\n\ndefaults_ = ['https://repo.continuum.io/pkgs/free',\n 'https://repo.continuum.io/pkgs/pro']\n\ndef get_default_urls(merged=False):\n if 'default_channels' in sys_rc:\n res = sys_rc['default_channels']\n if merged:\n res = list(res)\n res.extend(c for c in defaults_ if c not in res)\n return res\n return defaults_\n\ndef get_rc_urls():\n if rc.get('channels') is None:\n return []\n if 'system' in rc['channels']:\n raise RuntimeError(\"system cannot be used in .condarc\")\n return rc['channels']\n\ndef is_url(url):\n if url:\n p = urlparse.urlparse(url)\n return p.netloc != \"\" or p.scheme == \"file\"\n\ndef binstar_channel_alias(channel_alias):\n if channel_alias.startswith('file:/'):\n return channel_alias\n if rc.get('add_anaconda_token',\n rc.get('add_binstar_token', ADD_BINSTAR_TOKEN)):\n try:\n from binstar_client.utils import get_binstar\n bs = get_binstar()\n bs_domain = bs.domain.replace(\"api\", \"conda\").rstrip('/') + '/'\n if channel_alias.startswith(bs_domain) and bs.token:\n channel_alias += 't/%s/' % bs.token\n except ImportError:\n log.debug(\"Could not import binstar\")\n pass\n except Exception as e:\n stderrlog.info(\"Warning: could not import binstar_client (%s)\" % e)\n return channel_alias\n\ndef hide_binstar_tokens(url):\n return BINSTAR_TOKEN_PAT.sub(r'\\1t/<TOKEN>/', url)\n\ndef remove_binstar_tokens(url):\n return BINSTAR_TOKEN_PAT.sub(r'\\1', url)\n\ndef prioritize_channels(channels):\n newchans = OrderedDict()\n lastchan = None\n priority = 0\n for channel in channels:\n channel = channel.rstrip('/') + '/'\n if channel not in newchans:\n channel_s = canonical_channel_name(channel.rsplit('/', 2)[0])\n priority += channel_s != lastchan\n newchans[channel] = (channel_s, priority)\n lastchan = channel_s\n return newchans\n\ndef normalize_urls(urls, platform=None, offline_only=False):\n defaults = tuple(x.rstrip('/') + '/' for x in get_default_urls(False))\n alias = None\n newurls = []\n while urls:\n url = urls[0]\n urls = urls[1:]\n if url == \"system\" and rc_path:\n urls = get_rc_urls() + urls\n continue\n elif url in (\"defaults\", \"system\"):\n t_urls = defaults\n elif url == \"local\":\n t_urls = get_local_urls()\n else:\n t_urls = [url]\n for url0 in t_urls:\n url0 = url0.rstrip('/')\n if not is_url(url0):\n if alias is None:\n alias = binstar_channel_alias(channel_alias)\n url0 = alias + url0\n if offline_only and not url0.startswith('file:'):\n continue\n for plat in (platform or subdir, 'noarch'):\n newurls.append('%s/%s/' % (url0, plat))\n return newurls\n\ndef get_channel_urls(platform=None, offline=False):\n if os.getenv('CIO_TEST'):\n import cio_test\n base_urls = cio_test.base_urls\n elif 'channels' in rc:\n base_urls = ['system']\n else:\n base_urls = ['defaults']\n res = normalize_urls(base_urls, platform, offline)\n return res\n\ndef canonical_channel_name(channel):\n if channel is None:\n return '<unknown>'\n channel = remove_binstar_tokens(channel).rstrip('/')\n if any(channel.startswith(i) for i in get_default_urls(True)):\n return 'defaults'\n elif any(channel.startswith(i) for i in get_local_urls(clear_cache=False)):\n return 'local'\n elif channel.startswith('http://filer/'):\n return 'filer'\n elif channel.startswith(channel_alias):\n return channel.split(channel_alias, 1)[1]\n elif channel.startswith('http:/'):\n channel2 = 'https' + channel[4:]\n channel3 = canonical_channel_name(channel2)\n return channel3 if channel3 != channel2 else channel\n else:\n return channel\n\ndef url_channel(url):\n parts = (url or '').rsplit('/', 2)\n if len(parts) == 1:\n return '<unknown>', '<unknown>'\n if len(parts) == 2:\n return parts[0], parts[0]\n if url.startswith('file://') and parts[1] not in ('noarch', subdir):\n # Explicit file-based URLs are denoted with a '/' in the schannel\n channel = parts[0] + '/' + parts[1]\n schannel = channel + '/'\n else:\n channel = parts[0]\n schannel = canonical_channel_name(channel)\n return channel, schannel\n\n# ----- allowed channels -----\n\ndef get_allowed_channels():\n if not isfile(sys_rc_path):\n return None\n if sys_rc.get('allow_other_channels', True):\n return None\n if 'channels' in sys_rc:\n base_urls = ['system']\n else:\n base_urls = ['default']\n return normalize_urls(base_urls)\n\nallowed_channels = get_allowed_channels()\n\n# ----- proxy -----\n\ndef get_proxy_servers():\n res = rc.get('proxy_servers') or {}\n if isinstance(res, dict):\n return res\n sys.exit(\"Error: proxy_servers setting not a mapping\")\n\n\ndef load_condarc(path):\n rc = load_condarc_(path)\n\n root_dir = abspath(expanduser(os.getenv('CONDA_ROOT', rc.get('root_dir', sys.prefix))))\n root_writable = try_write(root_dir)\n\n globals().update(locals())\n\n envs_dirs = [abspath(expanduser(p)) for p in (\n _pathsep_env('CONDA_ENVS_PATH') or\n rc.get('envs_dirs') or\n _default_envs_dirs()\n )]\n\n pkgs_dirs = [pkgs_dir_from_envs_dir(envs_dir) for envs_dir in envs_dirs]\n\n _default_env = os.getenv('CONDA_DEFAULT_ENV')\n if _default_env in (None, root_env_name):\n default_prefix = root_dir\n elif os.sep in _default_env:\n default_prefix = abspath(_default_env)\n else:\n for envs_dir in envs_dirs:\n default_prefix = join(envs_dir, _default_env)\n if isdir(default_prefix):\n break\n else:\n default_prefix = join(envs_dirs[0], _default_env)\n\n # ----- foreign -----\n\n try:\n with open(join(root_dir, 'conda-meta', 'foreign')) as fi:\n foreign = fi.read().split()\n except IOError:\n foreign = [] if isdir(join(root_dir, 'conda-meta')) else ['python']\n\n channel_alias = rc.get('channel_alias', DEFAULT_CHANNEL_ALIAS)\n if not sys_rc.get('allow_other_channels', True) and 'channel_alias' in sys_rc:\n channel_alias = sys_rc['channel_alias']\n\n channel_alias = channel_alias.rstrip('/')\n _binstar = r'((:?%s|binstar\\.org|anaconda\\.org)/?)(t/[0-9a-zA-Z\\-<>]{4,})/'\n BINSTAR_TOKEN_PAT = re.compile(_binstar % re.escape(channel_alias))\n channel_alias = BINSTAR_TOKEN_PAT.sub(r'\\1', channel_alias + '/')\n\n offline = bool(rc.get('offline', False))\n\n add_pip_as_python_dependency = bool(rc.get('add_pip_as_python_dependency', True))\n always_yes = bool(rc.get('always_yes', False))\n always_copy = bool(rc.get('always_copy', False))\n changeps1 = bool(rc.get('changeps1', True))\n use_pip = bool(rc.get('use_pip', True))\n binstar_upload = rc.get('anaconda_upload',\n rc.get('binstar_upload', None)) # None means ask\n allow_softlinks = bool(rc.get('allow_softlinks', True))\n auto_update_conda = bool(rc.get('auto_update_conda', rc.get('self_update', True)))\n # show channel URLs when displaying what is going to be downloaded\n show_channel_urls = rc.get('show_channel_urls', None) # None means letting conda decide\n # set packages disallowed to be installed\n disallow = set(rc.get('disallow', []))\n # packages which are added to a newly created environment by default\n create_default_packages = list(rc.get('create_default_packages', []))\n update_dependencies = bool(rc.get('update_dependencies', True))\n channel_priority = bool(rc.get('channel_priority', True))\n\n # ssl_verify can be a boolean value or a filename string\n ssl_verify = rc.get('ssl_verify', True)\n\n try:\n track_features = rc.get('track_features', [])\n if isinstance(track_features, string_types):\n track_features = track_features.split()\n track_features = set(track_features)\n except KeyError:\n track_features = None\n\n globals().update(locals())\n return rc\n\nload_condarc(rc_path)\n", "path": "conda/config.py" } ]
[ { "content": "# (c) 2012-2015 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\nfrom __future__ import print_function, division, absolute_import\n\nimport logging\nimport os\nimport re\nimport sys\nfrom collections import OrderedDict\nfrom os.path import abspath, expanduser, isfile, isdir, join\nfrom platform import machine\n\nfrom .compat import urlparse, string_types\nfrom .utils import try_write, yaml_load\n\nlog = logging.getLogger(__name__)\nstderrlog = logging.getLogger('stderrlog')\n\ndefault_python = '%d.%d' % sys.version_info[:2]\n# CONDA_FORCE_32BIT should only be used when running conda-build (in order\n# to build 32-bit packages on a 64-bit system). We don't want to mention it\n# in the documentation, because it can mess up a lot of things.\nforce_32bit = bool(int(os.getenv('CONDA_FORCE_32BIT', 0)))\n\n# ----- operating system and architecture -----\n\n_sys_map = {\n 'linux2': 'linux',\n 'linux': 'linux',\n 'darwin': 'osx',\n 'win32': 'win',\n 'openbsd5': 'openbsd',\n}\nnon_x86_linux_machines = {'armv6l', 'armv7l', 'ppc64le'}\nplatform = _sys_map.get(sys.platform, 'unknown')\nbits = 8 * tuple.__itemsize__\nif force_32bit:\n bits = 32\n\nif platform == 'linux' and machine() in non_x86_linux_machines:\n arch_name = machine()\n subdir = 'linux-%s' % arch_name\nelse:\n arch_name = {64: 'x86_64', 32: 'x86'}[bits]\n subdir = '%s-%d' % (platform, bits)\n\n# ----- rc file -----\n\n# This is used by conda config to check which keys are allowed in the config\n# file. Be sure to update it when new keys are added.\n\n#################################################################\n# Also update the example condarc file when you add a key here! #\n#################################################################\n\nrc_list_keys = [\n 'channels',\n 'disallow',\n 'create_default_packages',\n 'track_features',\n 'envs_dirs',\n 'default_channels',\n]\n\nDEFAULT_CHANNEL_ALIAS = 'https://conda.anaconda.org/'\n\nADD_BINSTAR_TOKEN = True\n\nrc_bool_keys = [\n 'add_binstar_token',\n 'add_anaconda_token',\n 'add_pip_as_python_dependency',\n 'always_yes',\n 'always_copy',\n 'allow_softlinks',\n 'auto_update_conda',\n 'changeps1',\n 'use_pip',\n 'offline',\n 'binstar_upload',\n 'anaconda_upload',\n 'show_channel_urls',\n 'allow_other_channels',\n 'update_dependencies',\n 'channel_priority',\n]\n\nrc_string_keys = [\n 'ssl_verify',\n 'channel_alias',\n 'root_dir',\n]\n\n# Not supported by conda config yet\nrc_other = [\n 'proxy_servers',\n]\n\nuser_rc_path = abspath(expanduser('~/.condarc'))\nsys_rc_path = join(sys.prefix, '.condarc')\nlocal_channel = []\nrc = root_dir = root_writable = BINSTAR_TOKEN_PAT = channel_alias = None\n\ndef get_rc_path():\n path = os.getenv('CONDARC')\n if path == ' ':\n return None\n if path:\n return path\n for path in user_rc_path, sys_rc_path:\n if isfile(path):\n return path\n return None\n\nrc_path = get_rc_path()\n\ndef load_condarc_(path):\n if not path or not isfile(path):\n return {}\n with open(path) as f:\n return yaml_load(f) or {}\n\nsys_rc = load_condarc_(sys_rc_path) if isfile(sys_rc_path) else {}\n\n# ----- local directories -----\n\n# root_dir should only be used for testing, which is why don't mention it in\n# the documentation, to avoid confusion (it can really mess up a lot of\n# things)\nroot_env_name = 'root'\n\ndef _default_envs_dirs():\n lst = [join(root_dir, 'envs')]\n if not root_writable:\n # ~/envs for backwards compatibility\n lst = ['~/.conda/envs', '~/envs'] + lst\n return lst\n\ndef _pathsep_env(name):\n x = os.getenv(name)\n if x is None:\n return []\n res = []\n for path in x.split(os.pathsep):\n if path == 'DEFAULTS':\n for p in rc.get('envs_dirs') or _default_envs_dirs():\n res.append(p)\n else:\n res.append(path)\n return res\n\ndef pkgs_dir_from_envs_dir(envs_dir):\n if abspath(envs_dir) == abspath(join(root_dir, 'envs')):\n return join(root_dir, 'pkgs32' if force_32bit else 'pkgs')\n else:\n return join(envs_dir, '.pkgs')\n\n# ----- channels -----\n\n# Note, get_*_urls() return unnormalized urls.\n\ndef get_local_urls(clear_cache=True):\n # remove the cache such that a refetch is made,\n # this is necessary because we add the local build repo URL\n if clear_cache:\n from .fetch import fetch_index\n fetch_index.cache = {}\n if local_channel:\n return local_channel\n from os.path import exists\n from .utils import url_path\n try:\n from conda_build.config import croot\n if exists(croot):\n local_channel.append(url_path(croot))\n except ImportError:\n pass\n return local_channel\n\ndefaults_ = ['https://repo.continuum.io/pkgs/free',\n 'https://repo.continuum.io/pkgs/pro']\n\ndef get_default_urls(merged=False):\n if 'default_channels' in sys_rc:\n res = sys_rc['default_channels']\n if merged:\n res = list(res)\n res.extend(c for c in defaults_ if c not in res)\n return res\n return defaults_\n\ndef get_rc_urls():\n if rc is None or rc.get('channels') is None:\n return []\n if 'system' in rc['channels']:\n raise RuntimeError(\"system cannot be used in .condarc\")\n return rc['channels']\n\ndef is_url(url):\n if url:\n p = urlparse.urlparse(url)\n return p.netloc != \"\" or p.scheme == \"file\"\n\ndef binstar_channel_alias(channel_alias):\n if channel_alias.startswith('file:/'):\n return channel_alias\n if rc.get('add_anaconda_token',\n rc.get('add_binstar_token', ADD_BINSTAR_TOKEN)):\n try:\n from binstar_client.utils import get_binstar\n bs = get_binstar()\n bs_domain = bs.domain.replace(\"api\", \"conda\").rstrip('/') + '/'\n if channel_alias.startswith(bs_domain) and bs.token:\n channel_alias += 't/%s/' % bs.token\n except ImportError:\n log.debug(\"Could not import binstar\")\n pass\n except Exception as e:\n stderrlog.info(\"Warning: could not import binstar_client (%s)\" % e)\n return channel_alias\n\ndef hide_binstar_tokens(url):\n return BINSTAR_TOKEN_PAT.sub(r'\\1t/<TOKEN>/', url)\n\ndef remove_binstar_tokens(url):\n return BINSTAR_TOKEN_PAT.sub(r'\\1', url)\n\ndef prioritize_channels(channels):\n newchans = OrderedDict()\n lastchan = None\n priority = 0\n for channel in channels:\n channel = channel.rstrip('/') + '/'\n if channel not in newchans:\n channel_s = canonical_channel_name(channel.rsplit('/', 2)[0])\n priority += channel_s != lastchan\n newchans[channel] = (channel_s, priority)\n lastchan = channel_s\n return newchans\n\ndef normalize_urls(urls, platform=None, offline_only=False):\n defaults = tuple(x.rstrip('/') + '/' for x in get_default_urls(False))\n alias = None\n newurls = []\n while urls:\n url = urls[0]\n urls = urls[1:]\n if url == \"system\" and rc_path:\n urls = get_rc_urls() + urls\n continue\n elif url in (\"defaults\", \"system\"):\n t_urls = defaults\n elif url == \"local\":\n t_urls = get_local_urls()\n else:\n t_urls = [url]\n for url0 in t_urls:\n url0 = url0.rstrip('/')\n if not is_url(url0):\n if alias is None:\n alias = binstar_channel_alias(channel_alias)\n url0 = alias + url0\n if offline_only and not url0.startswith('file:'):\n continue\n for plat in (platform or subdir, 'noarch'):\n newurls.append('%s/%s/' % (url0, plat))\n return newurls\n\ndef get_channel_urls(platform=None, offline=False):\n if os.getenv('CIO_TEST'):\n import cio_test\n base_urls = cio_test.base_urls\n elif 'channels' in rc:\n base_urls = ['system']\n else:\n base_urls = ['defaults']\n res = normalize_urls(base_urls, platform, offline)\n return res\n\ndef canonical_channel_name(channel):\n if channel is None:\n return '<unknown>'\n channel = remove_binstar_tokens(channel).rstrip('/')\n if any(channel.startswith(i) for i in get_default_urls(True)):\n return 'defaults'\n elif any(channel.startswith(i) for i in get_local_urls(clear_cache=False)):\n return 'local'\n elif channel.startswith('http://filer/'):\n return 'filer'\n elif channel.startswith(channel_alias):\n return channel.split(channel_alias, 1)[1]\n elif channel.startswith('http:/'):\n channel2 = 'https' + channel[4:]\n channel3 = canonical_channel_name(channel2)\n return channel3 if channel3 != channel2 else channel\n else:\n return channel\n\ndef url_channel(url):\n parts = (url or '').rsplit('/', 2)\n if len(parts) == 1:\n return '<unknown>', '<unknown>'\n if len(parts) == 2:\n return parts[0], parts[0]\n if url.startswith('file://') and parts[1] not in ('noarch', subdir):\n # Explicit file-based URLs are denoted with a '/' in the schannel\n channel = parts[0] + '/' + parts[1]\n schannel = channel + '/'\n else:\n channel = parts[0]\n schannel = canonical_channel_name(channel)\n return channel, schannel\n\n# ----- allowed channels -----\n\ndef get_allowed_channels():\n if not isfile(sys_rc_path):\n return None\n if sys_rc.get('allow_other_channels', True):\n return None\n if 'channels' in sys_rc:\n base_urls = ['system']\n else:\n base_urls = ['default']\n return normalize_urls(base_urls)\n\nallowed_channels = get_allowed_channels()\n\n# ----- proxy -----\n\ndef get_proxy_servers():\n res = rc.get('proxy_servers') or {}\n if isinstance(res, dict):\n return res\n sys.exit(\"Error: proxy_servers setting not a mapping\")\n\n\ndef load_condarc(path):\n rc = load_condarc_(path)\n\n root_dir = abspath(expanduser(os.getenv('CONDA_ROOT', rc.get('root_dir', sys.prefix))))\n root_writable = try_write(root_dir)\n\n globals().update(locals())\n\n envs_dirs = [abspath(expanduser(p)) for p in (\n _pathsep_env('CONDA_ENVS_PATH') or\n rc.get('envs_dirs') or\n _default_envs_dirs()\n )]\n\n pkgs_dirs = [pkgs_dir_from_envs_dir(envs_dir) for envs_dir in envs_dirs]\n\n _default_env = os.getenv('CONDA_DEFAULT_ENV')\n if _default_env in (None, root_env_name):\n default_prefix = root_dir\n elif os.sep in _default_env:\n default_prefix = abspath(_default_env)\n else:\n for envs_dir in envs_dirs:\n default_prefix = join(envs_dir, _default_env)\n if isdir(default_prefix):\n break\n else:\n default_prefix = join(envs_dirs[0], _default_env)\n\n # ----- foreign -----\n\n try:\n with open(join(root_dir, 'conda-meta', 'foreign')) as fi:\n foreign = fi.read().split()\n except IOError:\n foreign = [] if isdir(join(root_dir, 'conda-meta')) else ['python']\n\n channel_alias = rc.get('channel_alias', DEFAULT_CHANNEL_ALIAS)\n if not sys_rc.get('allow_other_channels', True) and 'channel_alias' in sys_rc:\n channel_alias = sys_rc['channel_alias']\n\n channel_alias = channel_alias.rstrip('/')\n _binstar = r'((:?%s|binstar\\.org|anaconda\\.org)/?)(t/[0-9a-zA-Z\\-<>]{4,})/'\n BINSTAR_TOKEN_PAT = re.compile(_binstar % re.escape(channel_alias))\n channel_alias = BINSTAR_TOKEN_PAT.sub(r'\\1', channel_alias + '/')\n\n offline = bool(rc.get('offline', False))\n\n add_pip_as_python_dependency = bool(rc.get('add_pip_as_python_dependency', True))\n always_yes = bool(rc.get('always_yes', False))\n always_copy = bool(rc.get('always_copy', False))\n changeps1 = bool(rc.get('changeps1', True))\n use_pip = bool(rc.get('use_pip', True))\n binstar_upload = rc.get('anaconda_upload',\n rc.get('binstar_upload', None)) # None means ask\n allow_softlinks = bool(rc.get('allow_softlinks', True))\n auto_update_conda = bool(rc.get('auto_update_conda', rc.get('self_update', True)))\n # show channel URLs when displaying what is going to be downloaded\n show_channel_urls = rc.get('show_channel_urls', None) # None means letting conda decide\n # set packages disallowed to be installed\n disallow = set(rc.get('disallow', []))\n # packages which are added to a newly created environment by default\n create_default_packages = list(rc.get('create_default_packages', []))\n update_dependencies = bool(rc.get('update_dependencies', True))\n channel_priority = bool(rc.get('channel_priority', True))\n\n # ssl_verify can be a boolean value or a filename string\n ssl_verify = rc.get('ssl_verify', True)\n\n try:\n track_features = rc.get('track_features', [])\n if isinstance(track_features, string_types):\n track_features = track_features.split()\n track_features = set(track_features)\n except KeyError:\n track_features = None\n\n globals().update(locals())\n return rc\n\nload_condarc(rc_path)\n", "path": "conda/config.py" } ]
diff --git a/conda/config.py b/conda/config.py index 038f2972e16..9e1405b036d 100644 --- a/conda/config.py +++ b/conda/config.py @@ -194,7 +194,7 @@ def get_default_urls(merged=False): return defaults_ def get_rc_urls(): - if rc.get('channels') is None: + if rc is None or rc.get('channels') is None: return [] if 'system' in rc['channels']: raise RuntimeError("system cannot be used in .condarc")
zigpy__zha-device-handlers-184
Philips Remote DIM_DOWN typo? https://github.com/dmulcahey/zha-device-handlers/blob/833ee24710496d317a03b0f0b9f61df31291d75b/zhaquirks/philips/rwl021.py#L137 It seems that it should be: `ARGS: [1, 30, 9],`
[ { "content": "\"\"\"Phillips RWL021 device.\"\"\"\nfrom zigpy.profiles import zha, zll\nfrom zigpy.quirks import CustomCluster, CustomDevice\nimport zigpy.types as t\nfrom zigpy.zcl.clusters.general import (\n Basic,\n BinaryInput,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n PowerConfiguration,\n Scenes,\n)\n\nfrom ..const import (\n ARGS,\n CLUSTER_ID,\n COMMAND,\n COMMAND_OFF_WITH_EFFECT,\n COMMAND_ON,\n COMMAND_STEP,\n DEVICE_TYPE,\n DIM_DOWN,\n DIM_UP,\n ENDPOINT_ID,\n ENDPOINTS,\n INPUT_CLUSTERS,\n LONG_PRESS,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n SHORT_PRESS,\n TURN_OFF,\n TURN_ON,\n)\n\nDIAGNOSTICS_CLUSTER_ID = 0x0B05 # decimal = 2821\n\n\nclass BasicCluster(CustomCluster, Basic):\n \"\"\"Centralite acceleration cluster.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Init.\"\"\"\n super().__init__(*args, **kwargs)\n self.attributes = super().attributes.copy()\n self.attributes.update({0x0031: (\"phillips\", t.bitmap16)})\n\n\nclass PhilipsRWL021(CustomDevice):\n \"\"\"Phillips RWL021 device.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=49246 device_type=2096\n # device_version=2\n # input_clusters=[0]\n # output_clusters=[0, 3, 4, 6, 8, 5]>\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zll.PROFILE_ID,\n DEVICE_TYPE: zll.DeviceType.SCENE_CONTROLLER,\n INPUT_CLUSTERS: [Basic.cluster_id],\n OUTPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Scenes.cluster_id,\n ],\n },\n # <SimpleDescriptor endpoint=2 profile=260 device_type=12\n # device_version=0\n # input_clusters=[0, 1, 3, 15, 64512]\n # output_clusters=[25]>\n 2: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SIMPLE_SENSOR,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n BinaryInput.cluster_id,\n 64512,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id],\n },\n }\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n INPUT_CLUSTERS: [Basic.cluster_id],\n OUTPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Scenes.cluster_id,\n ],\n },\n 2: {\n INPUT_CLUSTERS: [\n BasicCluster,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n BinaryInput.cluster_id,\n 64512,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id],\n },\n }\n }\n\n device_automation_triggers = {\n (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON},\n (LONG_PRESS, TURN_OFF): {COMMAND: COMMAND_OFF_WITH_EFFECT},\n (SHORT_PRESS, DIM_UP): {\n COMMAND: COMMAND_STEP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [0, 30, 9],\n },\n (LONG_PRESS, DIM_UP): {\n COMMAND: COMMAND_STEP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [0, 56, 9],\n },\n (SHORT_PRESS, DIM_DOWN): {\n COMMAND: COMMAND_STEP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [1, 56, 9],\n },\n (LONG_PRESS, DIM_DOWN): {\n COMMAND: COMMAND_STEP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [1, 56, 9],\n },\n }\n", "path": "zhaquirks/philips/rwl021.py" } ]
[ { "content": "\"\"\"Phillips RWL021 device.\"\"\"\nfrom zigpy.profiles import zha, zll\nfrom zigpy.quirks import CustomCluster, CustomDevice\nimport zigpy.types as t\nfrom zigpy.zcl.clusters.general import (\n Basic,\n BinaryInput,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n PowerConfiguration,\n Scenes,\n)\n\nfrom ..const import (\n ARGS,\n CLUSTER_ID,\n COMMAND,\n COMMAND_OFF_WITH_EFFECT,\n COMMAND_ON,\n COMMAND_STEP,\n DEVICE_TYPE,\n DIM_DOWN,\n DIM_UP,\n ENDPOINT_ID,\n ENDPOINTS,\n INPUT_CLUSTERS,\n LONG_PRESS,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n SHORT_PRESS,\n TURN_OFF,\n TURN_ON,\n)\n\nDIAGNOSTICS_CLUSTER_ID = 0x0B05 # decimal = 2821\n\n\nclass BasicCluster(CustomCluster, Basic):\n \"\"\"Centralite acceleration cluster.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Init.\"\"\"\n super().__init__(*args, **kwargs)\n self.attributes = super().attributes.copy()\n self.attributes.update({0x0031: (\"phillips\", t.bitmap16)})\n\n\nclass PhilipsRWL021(CustomDevice):\n \"\"\"Phillips RWL021 device.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=49246 device_type=2096\n # device_version=2\n # input_clusters=[0]\n # output_clusters=[0, 3, 4, 6, 8, 5]>\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zll.PROFILE_ID,\n DEVICE_TYPE: zll.DeviceType.SCENE_CONTROLLER,\n INPUT_CLUSTERS: [Basic.cluster_id],\n OUTPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Scenes.cluster_id,\n ],\n },\n # <SimpleDescriptor endpoint=2 profile=260 device_type=12\n # device_version=0\n # input_clusters=[0, 1, 3, 15, 64512]\n # output_clusters=[25]>\n 2: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SIMPLE_SENSOR,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n BinaryInput.cluster_id,\n 64512,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id],\n },\n }\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n INPUT_CLUSTERS: [Basic.cluster_id],\n OUTPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Scenes.cluster_id,\n ],\n },\n 2: {\n INPUT_CLUSTERS: [\n BasicCluster,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n BinaryInput.cluster_id,\n 64512,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id],\n },\n }\n }\n\n device_automation_triggers = {\n (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON},\n (LONG_PRESS, TURN_OFF): {COMMAND: COMMAND_OFF_WITH_EFFECT},\n (SHORT_PRESS, DIM_UP): {\n COMMAND: COMMAND_STEP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [0, 30, 9],\n },\n (LONG_PRESS, DIM_UP): {\n COMMAND: COMMAND_STEP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [0, 56, 9],\n },\n (SHORT_PRESS, DIM_DOWN): {\n COMMAND: COMMAND_STEP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [1, 30, 9],\n },\n (LONG_PRESS, DIM_DOWN): {\n COMMAND: COMMAND_STEP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [1, 56, 9],\n },\n }\n", "path": "zhaquirks/philips/rwl021.py" } ]
diff --git a/zhaquirks/philips/rwl021.py b/zhaquirks/philips/rwl021.py index 32297f7133..2c18700483 100644 --- a/zhaquirks/philips/rwl021.py +++ b/zhaquirks/philips/rwl021.py @@ -134,7 +134,7 @@ class PhilipsRWL021(CustomDevice): COMMAND: COMMAND_STEP, CLUSTER_ID: 8, ENDPOINT_ID: 1, - ARGS: [1, 56, 9], + ARGS: [1, 30, 9], }, (LONG_PRESS, DIM_DOWN): { COMMAND: COMMAND_STEP,
googleapis__python-bigquery-859
Increase default timeout of retry objects to 10 minutes Per internal issue 195337762, the general timeout for jobs.insert API is 4 minutes. We should increase our default deadline to 10 minutes to allow for at least 1 retry if the first request fails.
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import exceptions\nfrom google.api_core import retry\nfrom google.auth import exceptions as auth_exceptions\nimport requests.exceptions\n\n\n_RETRYABLE_REASONS = frozenset(\n [\"rateLimitExceeded\", \"backendError\", \"internalError\", \"badGateway\"]\n)\n\n_UNSTRUCTURED_RETRYABLE_TYPES = (\n ConnectionError,\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n requests.exceptions.ChunkedEncodingError,\n requests.exceptions.ConnectionError,\n auth_exceptions.TransportError,\n)\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n # Check for unstructured error returns, e.g. from GFE\n return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)\n\n reason = exc.errors[0][\"reason\"]\n return reason in _RETRYABLE_REASONS\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n", "path": "google/cloud/bigquery/retry.py" } ]
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import exceptions\nfrom google.api_core import retry\nfrom google.auth import exceptions as auth_exceptions\nimport requests.exceptions\n\n\n_RETRYABLE_REASONS = frozenset(\n [\"rateLimitExceeded\", \"backendError\", \"internalError\", \"badGateway\"]\n)\n\n_UNSTRUCTURED_RETRYABLE_TYPES = (\n ConnectionError,\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n requests.exceptions.ChunkedEncodingError,\n requests.exceptions.ConnectionError,\n auth_exceptions.TransportError,\n)\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n # Check for unstructured error returns, e.g. from GFE\n return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)\n\n reason = exc.errors[0][\"reason\"]\n return reason in _RETRYABLE_REASONS\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry, deadline=600.0)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n", "path": "google/cloud/bigquery/retry.py" } ]
diff --git a/google/cloud/bigquery/retry.py b/google/cloud/bigquery/retry.py index 2df4de08b..bab28aacb 100644 --- a/google/cloud/bigquery/retry.py +++ b/google/cloud/bigquery/retry.py @@ -47,7 +47,7 @@ def _should_retry(exc): return reason in _RETRYABLE_REASONS -DEFAULT_RETRY = retry.Retry(predicate=_should_retry) +DEFAULT_RETRY = retry.Retry(predicate=_should_retry, deadline=600.0) """The default retry object. Any method with a ``retry`` parameter will be retried automatically,
mitmproxy__mitmproxy-4179
TypeError: Subscripted generics cannot be used with class and instance checks under python 3.9.0b1 #### Problem Description Running mitmproxy 5.1.1 under python 3.9.0b1 fails with `TypeError: Subscripted generics cannot be used with class and instance checks`. The test suite fails as well with hundreds of ERROR and FAILED tests. #### Steps to reproduce the behavior: 1. install mitmproxy 5.1.1 on Fedora rawhide 2. mitmproxy 3. pytest -v There are: ``` =================== 303 failed, 994 passed, 2 xfailed, 115 warnings, 182 errors in 72.86s (0:01:12) ==================== ``` Most of them throw a `TypeError: Subscripted generics cannot be used with class and instance checks` and have a stack trace similar to: ``` ___________________________________ ERROR at setup of TestHTTPS.test_clientcert_dir ____________________________________ cls = <class 'test.mitmproxy.proxy.test_server.TestHTTPS'> @classmethod def setup_class(cls): cls.server = pathod.test.Daemon( ssl=cls.ssl, ssloptions=cls.ssloptions) cls.server2 = pathod.test.Daemon( ssl=cls.ssl, ssloptions=cls.ssloptions) > cls.options = cls.get_options() test/mitmproxy/tservers.py:146: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ test/mitmproxy/tservers.py:179: in get_options return options.Options( mitmproxy/options.py:50: in __init__ self.add_option( mitmproxy/optmanager.py:109: in add_option self._options[name] = _Option(name, typespec, default, help, choices) mitmproxy/optmanager.py:34: in __init__ typecheck.check_option_type(name, default, typespec) mitmproxy/utils/typecheck.py:73: in check_option_type elif not isinstance(value, typeinfo): /usr/lib64/python3.9/typing.py:649: in __instancecheck__ return self.__subclasscheck__(type(obj)) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = typing.Optional[str], cls = <class 'NoneType'> def __subclasscheck__(self, cls): > raise TypeError("Subscripted generics cannot be used with" " class and instance checks") E TypeError: Subscripted generics cannot be used with class and instance checks /usr/lib64/python3.9/typing.py:652: TypeError ``` #### System Information ``` Traceback (most recent call last): File "/builddir/build/BUILDROOT/mitmproxy-5.1.1-1.fc33.x86_64/usr/bin/./mitmproxy", line 11, in <module> load_entry_point('mitmproxy==5.1.1', 'console_scripts', 'mitmproxy')() File "/builddir/build/BUILDROOT/mitmproxy-5.1.1-1.fc33.x86_64/usr/lib/python3.9/site-packages/mitmproxy/tools/_main.py", line 147, in mitmproxy run(console.master.ConsoleMaster, cmdline.mitmproxy, args) File "/builddir/build/BUILDROOT/mitmproxy-5.1.1-1.fc33.x86_64/usr/lib/python3.9/site-packages/mitmproxy/tools/_main.py", line 71, in run opts = options.Options() File "/builddir/build/BUILDROOT/mitmproxy-5.1.1-1.fc33.x86_64/usr/lib/python3.9/site-packages/mitmproxy/options.py", line 50, in __init__ self.add_option( File "/builddir/build/BUILDROOT/mitmproxy-5.1.1-1.fc33.x86_64/usr/lib/python3.9/site-packages/mitmproxy/optmanager.py", line 109, in add_option self._options[name] = _Option(name, typespec, default, help, choices) File "/builddir/build/BUILDROOT/mitmproxy-5.1.1-1.fc33.x86_64/usr/lib/python3.9/site-packages/mitmproxy/optmanager.py", line 34, in __init__ typecheck.check_option_type(name, default, typespec) File "/builddir/build/BUILDROOT/mitmproxy-5.1.1-1.fc33.x86_64/usr/lib/python3.9/site-packages/mitmproxy/utils/typecheck.py", line 73, in check_option_type elif not isinstance(value, typeinfo): File "/usr/lib64/python3.9/typing.py", line 649, in __instancecheck__ return self.__subclasscheck__(type(obj)) File "/usr/lib64/python3.9/typing.py", line 652, in __subclasscheck__ raise TypeError("Subscripted generics cannot be used with" TypeError: Subscripted generics cannot be used with class and instance checks ```
[ { "content": "import typing\n\nType = typing.Union[\n typing.Any # anything more elaborate really fails with mypy at the moment.\n]\n\n\ndef sequence_type(typeinfo: typing.Type[typing.List]) -> Type:\n \"\"\"Return the type of a sequence, e.g. typing.List\"\"\"\n return typeinfo.__args__[0] # type: ignore\n\n\ndef tuple_types(typeinfo: typing.Type[typing.Tuple]) -> typing.Sequence[Type]:\n \"\"\"Return the types of a typing.Tuple\"\"\"\n return typeinfo.__args__ # type: ignore\n\n\ndef union_types(typeinfo: typing.Type[typing.Tuple]) -> typing.Sequence[Type]:\n \"\"\"return the types of a typing.Union\"\"\"\n return typeinfo.__args__ # type: ignore\n\n\ndef mapping_types(typeinfo: typing.Type[typing.Mapping]) -> typing.Tuple[Type, Type]:\n \"\"\"return the types of a mapping, e.g. typing.Dict\"\"\"\n return typeinfo.__args__ # type: ignore\n\n\ndef check_option_type(name: str, value: typing.Any, typeinfo: Type) -> None:\n \"\"\"\n Check if the provided value is an instance of typeinfo and raises a\n TypeError otherwise. This function supports only those types required for\n options.\n \"\"\"\n e = TypeError(\"Expected {} for {}, but got {}.\".format(\n typeinfo,\n name,\n type(value)\n ))\n\n typename = str(typeinfo)\n\n if typename.startswith(\"typing.Union\"):\n for T in union_types(typeinfo):\n try:\n check_option_type(name, value, T)\n except TypeError:\n pass\n else:\n return\n raise e\n elif typename.startswith(\"typing.Tuple\"):\n types = tuple_types(typeinfo)\n if not isinstance(value, (tuple, list)):\n raise e\n if len(types) != len(value):\n raise e\n for i, (x, T) in enumerate(zip(value, types)):\n check_option_type(\"{}[{}]\".format(name, i), x, T)\n return\n elif typename.startswith(\"typing.Sequence\"):\n T = sequence_type(typeinfo)\n if not isinstance(value, (tuple, list)):\n raise e\n for v in value:\n check_option_type(name, v, T)\n elif typename.startswith(\"typing.IO\"):\n if hasattr(value, \"read\"):\n return\n else:\n raise e\n elif typename.startswith(\"typing.Any\"):\n return\n elif not isinstance(value, typeinfo):\n if typeinfo is float and isinstance(value, int):\n return\n raise e\n\n\ndef typespec_to_str(typespec: typing.Any) -> str:\n if typespec in (str, int, bool):\n t = typespec.__name__\n elif typespec == typing.Optional[str]:\n t = 'optional str'\n elif typespec == typing.Sequence[str]:\n t = 'sequence of str'\n elif typespec == typing.Optional[int]:\n t = 'optional int'\n else:\n raise NotImplementedError\n return t\n", "path": "mitmproxy/utils/typecheck.py" } ]
[ { "content": "import typing\n\nType = typing.Union[\n typing.Any # anything more elaborate really fails with mypy at the moment.\n]\n\n\ndef sequence_type(typeinfo: typing.Type[typing.List]) -> Type:\n \"\"\"Return the type of a sequence, e.g. typing.List\"\"\"\n return typeinfo.__args__[0] # type: ignore\n\n\ndef tuple_types(typeinfo: typing.Type[typing.Tuple]) -> typing.Sequence[Type]:\n \"\"\"Return the types of a typing.Tuple\"\"\"\n return typeinfo.__args__ # type: ignore\n\n\ndef union_types(typeinfo: typing.Type[typing.Tuple]) -> typing.Sequence[Type]:\n \"\"\"return the types of a typing.Union\"\"\"\n return typeinfo.__args__ # type: ignore\n\n\ndef mapping_types(typeinfo: typing.Type[typing.Mapping]) -> typing.Tuple[Type, Type]:\n \"\"\"return the types of a mapping, e.g. typing.Dict\"\"\"\n return typeinfo.__args__ # type: ignore\n\n\ndef check_option_type(name: str, value: typing.Any, typeinfo: Type) -> None:\n \"\"\"\n Check if the provided value is an instance of typeinfo and raises a\n TypeError otherwise. This function supports only those types required for\n options.\n \"\"\"\n e = TypeError(\"Expected {} for {}, but got {}.\".format(\n typeinfo,\n name,\n type(value)\n ))\n\n typename = str(typeinfo)\n\n if typename.startswith(\"typing.Union\") or typename.startswith(\"typing.Optional\"):\n for T in union_types(typeinfo):\n try:\n check_option_type(name, value, T)\n except TypeError:\n pass\n else:\n return\n raise e\n elif typename.startswith(\"typing.Tuple\"):\n types = tuple_types(typeinfo)\n if not isinstance(value, (tuple, list)):\n raise e\n if len(types) != len(value):\n raise e\n for i, (x, T) in enumerate(zip(value, types)):\n check_option_type(\"{}[{}]\".format(name, i), x, T)\n return\n elif typename.startswith(\"typing.Sequence\"):\n T = sequence_type(typeinfo)\n if not isinstance(value, (tuple, list)):\n raise e\n for v in value:\n check_option_type(name, v, T)\n elif typename.startswith(\"typing.IO\"):\n if hasattr(value, \"read\"):\n return\n else:\n raise e\n elif typename.startswith(\"typing.Any\"):\n return\n elif not isinstance(value, typeinfo):\n if typeinfo is float and isinstance(value, int):\n return\n raise e\n\n\ndef typespec_to_str(typespec: typing.Any) -> str:\n if typespec in (str, int, bool):\n t = typespec.__name__\n elif typespec == typing.Optional[str]:\n t = 'optional str'\n elif typespec == typing.Sequence[str]:\n t = 'sequence of str'\n elif typespec == typing.Optional[int]:\n t = 'optional int'\n else:\n raise NotImplementedError\n return t\n", "path": "mitmproxy/utils/typecheck.py" } ]
diff --git a/mitmproxy/utils/typecheck.py b/mitmproxy/utils/typecheck.py index b2793e4708..78537cc29b 100644 --- a/mitmproxy/utils/typecheck.py +++ b/mitmproxy/utils/typecheck.py @@ -39,7 +39,7 @@ def check_option_type(name: str, value: typing.Any, typeinfo: Type) -> None: typename = str(typeinfo) - if typename.startswith("typing.Union"): + if typename.startswith("typing.Union") or typename.startswith("typing.Optional"): for T in union_types(typeinfo): try: check_option_type(name, value, T)
cython__cython-5505
[BUG] `cython` command prints stdout-type information to stderr ### Describe the bug As reported on the mailing list, the `cython` command prints things to stderr that should go to stdout. ### Code to reproduce the behaviour: ```python >>> from subprocess import run >>> res = run("cython --version", capture_output=True, shell=True) >>> err = res.stderr.splitlines() >>> print(err) [b'Cython version 0.29.35'] >>> out = res.stdout.splitlines() >>> print(out) >>> [] ``` ### Expected behaviour _No response_ ### OS _No response_ ### Python version _No response_ ### Cython version 0.29.35, probably also master ### Additional context _No response_
[ { "content": "#\n# Cython Top Level\n#\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport re\nimport sys\nimport io\n\nif sys.version_info[:2] < (2, 7) or (3, 0) <= sys.version_info[:2] < (3, 3):\n sys.stderr.write(\"Sorry, Cython requires Python 2.7 or 3.3+, found %d.%d\\n\" % tuple(sys.version_info[:2]))\n sys.exit(1)\n\ntry:\n from __builtin__ import basestring\nexcept ImportError:\n basestring = str\n\n# Do not import Parsing here, import it when needed, because Parsing imports\n# Nodes, which globally needs debug command line options initialized to set a\n# conditional metaclass. These options are processed by CmdLine called from\n# main() in this file.\n# import Parsing\nfrom . import Errors\nfrom .StringEncoding import EncodedString\nfrom .Scanning import PyrexScanner, FileSourceDescriptor\nfrom .Errors import PyrexError, CompileError, error, warning\nfrom .Symtab import ModuleScope\nfrom .. import Utils\nfrom . import Options\nfrom .Options import CompilationOptions, default_options\nfrom .CmdLine import parse_command_line\nfrom .Lexicon import (unicode_start_ch_any, unicode_continuation_ch_any,\n unicode_start_ch_range, unicode_continuation_ch_range)\n\n\ndef _make_range_re(chrs):\n out = []\n for i in range(0, len(chrs), 2):\n out.append(u\"{0}-{1}\".format(chrs[i], chrs[i+1]))\n return u\"\".join(out)\n\n# py2 version looked like r\"[A-Za-z_][A-Za-z0-9_]*(\\.[A-Za-z_][A-Za-z0-9_]*)*$\"\nmodule_name_pattern = u\"[{0}{1}][{0}{2}{1}{3}]*\".format(\n unicode_start_ch_any, _make_range_re(unicode_start_ch_range),\n unicode_continuation_ch_any,\n _make_range_re(unicode_continuation_ch_range))\nmodule_name_pattern = re.compile(u\"{0}(\\\\.{0})*$\".format(module_name_pattern))\n\n\nstandard_include_path = os.path.abspath(\n os.path.join(os.path.dirname(os.path.dirname(__file__)), 'Includes'))\n\n\nclass Context(object):\n # This class encapsulates the context needed for compiling\n # one or more Cython implementation files along with their\n # associated and imported declaration files. It includes\n # the root of the module import namespace and the list\n # of directories to search for include files.\n #\n # modules {string : ModuleScope}\n # include_directories [string]\n # future_directives [object]\n # language_level int currently 2 or 3 for Python 2/3\n\n cython_scope = None\n language_level = None # warn when not set but default to Py2\n\n def __init__(self, include_directories, compiler_directives, cpp=False,\n language_level=None, options=None):\n # cython_scope is a hack, set to False by subclasses, in order to break\n # an infinite loop.\n # Better code organization would fix it.\n\n from . import Builtin, CythonScope\n self.modules = {\"__builtin__\" : Builtin.builtin_scope}\n self.cython_scope = CythonScope.create_cython_scope(self)\n self.modules[\"cython\"] = self.cython_scope\n self.include_directories = include_directories\n self.future_directives = set()\n self.compiler_directives = compiler_directives\n self.cpp = cpp\n self.options = options\n\n self.pxds = {} # full name -> node tree\n self._interned = {} # (type(value), value, *key_args) -> interned_value\n\n if language_level is not None:\n self.set_language_level(language_level)\n\n self.legacy_implicit_noexcept = self.compiler_directives.get('legacy_implicit_noexcept', False)\n\n self.gdb_debug_outputwriter = None\n\n @classmethod\n def from_options(cls, options):\n return cls(options.include_path, options.compiler_directives,\n options.cplus, options.language_level, options=options)\n\n def set_language_level(self, level):\n from .Future import print_function, unicode_literals, absolute_import, division, generator_stop\n future_directives = set()\n if level == '3str':\n level = 3\n else:\n level = int(level)\n if level >= 3:\n future_directives.add(unicode_literals)\n if level >= 3:\n future_directives.update([print_function, absolute_import, division, generator_stop])\n self.language_level = level\n self.future_directives = future_directives\n if level >= 3:\n self.modules['builtins'] = self.modules['__builtin__']\n\n def intern_ustring(self, value, encoding=None):\n key = (EncodedString, value, encoding)\n try:\n return self._interned[key]\n except KeyError:\n pass\n value = EncodedString(value)\n if encoding:\n value.encoding = encoding\n self._interned[key] = value\n return value\n\n # pipeline creation functions can now be found in Pipeline.py\n\n def process_pxd(self, source_desc, scope, module_name):\n from . import Pipeline\n if isinstance(source_desc, FileSourceDescriptor) and source_desc._file_type == 'pyx':\n source = CompilationSource(source_desc, module_name, os.getcwd())\n result_sink = create_default_resultobj(source, self.options)\n pipeline = Pipeline.create_pyx_as_pxd_pipeline(self, result_sink)\n result = Pipeline.run_pipeline(pipeline, source)\n else:\n pipeline = Pipeline.create_pxd_pipeline(self, scope, module_name)\n result = Pipeline.run_pipeline(pipeline, source_desc)\n return result\n\n def nonfatal_error(self, exc):\n return Errors.report_error(exc)\n\n def _split_qualified_name(self, qualified_name):\n # Splits qualified_name into parts in form of 2-tuples: (PART_NAME, IS_PACKAGE).\n qualified_name_parts = qualified_name.split('.')\n last_part = qualified_name_parts.pop()\n qualified_name_parts = [(p, True) for p in qualified_name_parts]\n if last_part != '__init__':\n # If Last part is __init__, then it is omitted. Otherwise, we need to check whether we can find\n # __init__.pyx/__init__.py file to determine if last part is package or not.\n is_package = False\n for suffix in ('.py', '.pyx'):\n path = self.search_include_directories(\n qualified_name, suffix=suffix, source_pos=None, source_file_path=None)\n if path:\n is_package = self._is_init_file(path)\n break\n\n qualified_name_parts.append((last_part, is_package))\n return qualified_name_parts\n\n @staticmethod\n def _is_init_file(path):\n return os.path.basename(path) in ('__init__.pyx', '__init__.py', '__init__.pxd') if path else False\n\n @staticmethod\n def _check_pxd_filename(pos, pxd_pathname, qualified_name):\n if not pxd_pathname:\n return\n pxd_filename = os.path.basename(pxd_pathname)\n if '.' in qualified_name and qualified_name == os.path.splitext(pxd_filename)[0]:\n warning(pos, \"Dotted filenames ('%s') are deprecated.\"\n \" Please use the normal Python package directory layout.\" % pxd_filename, level=1)\n\n def find_module(self, module_name, relative_to=None, pos=None, need_pxd=1,\n absolute_fallback=True):\n # Finds and returns the module scope corresponding to\n # the given relative or absolute module name. If this\n # is the first time the module has been requested, finds\n # the corresponding .pxd file and process it.\n # If relative_to is not None, it must be a module scope,\n # and the module will first be searched for relative to\n # that module, provided its name is not a dotted name.\n debug_find_module = 0\n if debug_find_module:\n print(\"Context.find_module: module_name = %s, relative_to = %s, pos = %s, need_pxd = %s\" % (\n module_name, relative_to, pos, need_pxd))\n\n scope = None\n pxd_pathname = None\n if relative_to:\n if module_name:\n # from .module import ...\n qualified_name = relative_to.qualify_name(module_name)\n else:\n # from . import ...\n qualified_name = relative_to.qualified_name\n scope = relative_to\n relative_to = None\n else:\n qualified_name = module_name\n\n if not module_name_pattern.match(qualified_name):\n raise CompileError(pos or (module_name, 0, 0),\n u\"'%s' is not a valid module name\" % module_name)\n\n if relative_to:\n if debug_find_module:\n print(\"...trying relative import\")\n scope = relative_to.lookup_submodule(module_name)\n if not scope:\n pxd_pathname = self.find_pxd_file(qualified_name, pos)\n self._check_pxd_filename(pos, pxd_pathname, qualified_name)\n if pxd_pathname:\n is_package = self._is_init_file(pxd_pathname)\n scope = relative_to.find_submodule(module_name, as_package=is_package)\n if not scope:\n if debug_find_module:\n print(\"...trying absolute import\")\n if absolute_fallback:\n qualified_name = module_name\n scope = self\n for name, is_package in self._split_qualified_name(qualified_name):\n scope = scope.find_submodule(name, as_package=is_package)\n if debug_find_module:\n print(\"...scope = %s\" % scope)\n if not scope.pxd_file_loaded:\n if debug_find_module:\n print(\"...pxd not loaded\")\n if not pxd_pathname:\n if debug_find_module:\n print(\"...looking for pxd file\")\n # Only look in sys.path if we are explicitly looking\n # for a .pxd file.\n pxd_pathname = self.find_pxd_file(qualified_name, pos, sys_path=need_pxd)\n self._check_pxd_filename(pos, pxd_pathname, qualified_name)\n if debug_find_module:\n print(\"......found %s\" % pxd_pathname)\n if not pxd_pathname and need_pxd:\n # Set pxd_file_loaded such that we don't need to\n # look for the non-existing pxd file next time.\n scope.pxd_file_loaded = True\n package_pathname = self.search_include_directories(\n qualified_name, suffix=\".py\", source_pos=pos)\n if package_pathname and package_pathname.endswith(Utils.PACKAGE_FILES):\n pass\n else:\n error(pos, \"'%s.pxd' not found\" % qualified_name.replace('.', os.sep))\n if pxd_pathname:\n scope.pxd_file_loaded = True\n try:\n if debug_find_module:\n print(\"Context.find_module: Parsing %s\" % pxd_pathname)\n rel_path = module_name.replace('.', os.sep) + os.path.splitext(pxd_pathname)[1]\n if not pxd_pathname.endswith(rel_path):\n rel_path = pxd_pathname # safety measure to prevent printing incorrect paths\n source_desc = FileSourceDescriptor(pxd_pathname, rel_path)\n err, result = self.process_pxd(source_desc, scope, qualified_name)\n if err:\n raise err\n (pxd_codenodes, pxd_scope) = result\n self.pxds[module_name] = (pxd_codenodes, pxd_scope)\n except CompileError:\n pass\n return scope\n\n def find_pxd_file(self, qualified_name, pos=None, sys_path=True, source_file_path=None):\n # Search include path (and sys.path if sys_path is True) for\n # the .pxd file corresponding to the given fully-qualified\n # module name.\n # Will find either a dotted filename or a file in a\n # package directory. If a source file position is given,\n # the directory containing the source file is searched first\n # for a dotted filename, and its containing package root\n # directory is searched first for a non-dotted filename.\n pxd = self.search_include_directories(\n qualified_name, suffix=\".pxd\", source_pos=pos, sys_path=sys_path, source_file_path=source_file_path)\n if pxd is None and Options.cimport_from_pyx:\n return self.find_pyx_file(qualified_name, pos)\n return pxd\n\n def find_pyx_file(self, qualified_name, pos=None, source_file_path=None):\n # Search include path for the .pyx file corresponding to the\n # given fully-qualified module name, as for find_pxd_file().\n return self.search_include_directories(\n qualified_name, suffix=\".pyx\", source_pos=pos, source_file_path=source_file_path)\n\n def find_include_file(self, filename, pos=None, source_file_path=None):\n # Search list of include directories for filename.\n # Reports an error and returns None if not found.\n path = self.search_include_directories(\n filename, source_pos=pos, include=True, source_file_path=source_file_path)\n if not path:\n error(pos, \"'%s' not found\" % filename)\n return path\n\n def search_include_directories(self, qualified_name,\n suffix=None, source_pos=None, include=False, sys_path=False, source_file_path=None):\n include_dirs = self.include_directories\n if sys_path:\n include_dirs = include_dirs + sys.path\n # include_dirs must be hashable for caching in @cached_function\n include_dirs = tuple(include_dirs + [standard_include_path])\n return search_include_directories(\n include_dirs, qualified_name, suffix or \"\", source_pos, include, source_file_path)\n\n def find_root_package_dir(self, file_path):\n return Utils.find_root_package_dir(file_path)\n\n def check_package_dir(self, dir, package_names):\n return Utils.check_package_dir(dir, tuple(package_names))\n\n def c_file_out_of_date(self, source_path, output_path):\n if not os.path.exists(output_path):\n return 1\n c_time = Utils.modification_time(output_path)\n if Utils.file_newer_than(source_path, c_time):\n return 1\n pxd_path = Utils.replace_suffix(source_path, \".pxd\")\n if os.path.exists(pxd_path) and Utils.file_newer_than(pxd_path, c_time):\n return 1\n for kind, name in self.read_dependency_file(source_path):\n if kind == \"cimport\":\n dep_path = self.find_pxd_file(name, source_file_path=source_path)\n elif kind == \"include\":\n dep_path = self.search_include_directories(name, source_file_path=source_path)\n else:\n continue\n if dep_path and Utils.file_newer_than(dep_path, c_time):\n return 1\n return 0\n\n def find_cimported_module_names(self, source_path):\n return [ name for kind, name in self.read_dependency_file(source_path)\n if kind == \"cimport\" ]\n\n def is_package_dir(self, dir_path):\n return Utils.is_package_dir(dir_path)\n\n def read_dependency_file(self, source_path):\n dep_path = Utils.replace_suffix(source_path, \".dep\")\n if os.path.exists(dep_path):\n with open(dep_path, \"rU\") as f:\n chunks = [ line.split(\" \", 1)\n for line in (l.strip() for l in f)\n if \" \" in line ]\n return chunks\n else:\n return ()\n\n def lookup_submodule(self, name):\n # Look up a top-level module. Returns None if not found.\n return self.modules.get(name, None)\n\n def find_submodule(self, name, as_package=False):\n # Find a top-level module, creating a new one if needed.\n scope = self.lookup_submodule(name)\n if not scope:\n scope = ModuleScope(name,\n parent_module = None, context = self, is_package=as_package)\n self.modules[name] = scope\n return scope\n\n def parse(self, source_desc, scope, pxd, full_module_name):\n if not isinstance(source_desc, FileSourceDescriptor):\n raise RuntimeError(\"Only file sources for code supported\")\n source_filename = source_desc.filename\n scope.cpp = self.cpp\n # Parse the given source file and return a parse tree.\n num_errors = Errors.get_errors_count()\n try:\n with Utils.open_source_file(source_filename) as f:\n from . import Parsing\n s = PyrexScanner(f, source_desc, source_encoding = f.encoding,\n scope = scope, context = self)\n tree = Parsing.p_module(s, pxd, full_module_name)\n if self.options.formal_grammar:\n try:\n from ..Parser import ConcreteSyntaxTree\n except ImportError:\n raise RuntimeError(\n \"Formal grammar can only be used with compiled Cython with an available pgen.\")\n ConcreteSyntaxTree.p_module(source_filename)\n except UnicodeDecodeError as e:\n #import traceback\n #traceback.print_exc()\n raise self._report_decode_error(source_desc, e)\n\n if Errors.get_errors_count() > num_errors:\n raise CompileError()\n return tree\n\n def _report_decode_error(self, source_desc, exc):\n msg = exc.args[-1]\n position = exc.args[2]\n encoding = exc.args[0]\n\n line = 1\n column = idx = 0\n with io.open(source_desc.filename, \"r\", encoding='iso8859-1', newline='') as f:\n for line, data in enumerate(f, 1):\n idx += len(data)\n if idx >= position:\n column = position - (idx - len(data)) + 1\n break\n\n return error((source_desc, line, column),\n \"Decoding error, missing or incorrect coding=<encoding-name> \"\n \"at top of source (cannot decode with encoding %r: %s)\" % (encoding, msg))\n\n def extract_module_name(self, path, options):\n # Find fully_qualified module name from the full pathname\n # of a source file.\n dir, filename = os.path.split(path)\n module_name, _ = os.path.splitext(filename)\n if \".\" in module_name:\n return module_name\n names = [module_name]\n while self.is_package_dir(dir):\n parent, package_name = os.path.split(dir)\n if parent == dir:\n break\n names.append(package_name)\n dir = parent\n names.reverse()\n return \".\".join(names)\n\n def setup_errors(self, options, result):\n Errors.init_thread()\n if options.use_listing_file:\n path = result.listing_file = Utils.replace_suffix(result.main_source_file, \".lis\")\n else:\n path = None\n Errors.open_listing_file(path=path, echo_to_stderr=options.errors_to_stderr)\n\n def teardown_errors(self, err, options, result):\n source_desc = result.compilation_source.source_desc\n if not isinstance(source_desc, FileSourceDescriptor):\n raise RuntimeError(\"Only file sources for code supported\")\n Errors.close_listing_file()\n result.num_errors = Errors.get_errors_count()\n if result.num_errors > 0:\n err = True\n if err and result.c_file:\n try:\n Utils.castrate_file(result.c_file, os.stat(source_desc.filename))\n except EnvironmentError:\n pass\n result.c_file = None\n\n\ndef get_output_filename(source_filename, cwd, options):\n if options.cplus:\n c_suffix = \".cpp\"\n else:\n c_suffix = \".c\"\n suggested_file_name = Utils.replace_suffix(source_filename, c_suffix)\n if options.output_file:\n out_path = os.path.join(cwd, options.output_file)\n if os.path.isdir(out_path):\n return os.path.join(out_path, os.path.basename(suggested_file_name))\n else:\n return out_path\n else:\n return suggested_file_name\n\n\ndef create_default_resultobj(compilation_source, options):\n result = CompilationResult()\n result.main_source_file = compilation_source.source_desc.filename\n result.compilation_source = compilation_source\n source_desc = compilation_source.source_desc\n result.c_file = get_output_filename(source_desc.filename,\n compilation_source.cwd, options)\n result.embedded_metadata = options.embedded_metadata\n return result\n\n\ndef run_pipeline(source, options, full_module_name=None, context=None):\n from . import Pipeline\n\n # ensure that the inputs are unicode (for Python 2)\n if sys.version_info[0] == 2:\n source = Utils.decode_filename(source)\n if full_module_name:\n full_module_name = Utils.decode_filename(full_module_name)\n\n source_ext = os.path.splitext(source)[1]\n options.configure_language_defaults(source_ext[1:]) # py/pyx\n if context is None:\n context = Context.from_options(options)\n\n # Set up source object\n cwd = os.getcwd()\n abs_path = os.path.abspath(source)\n full_module_name = full_module_name or context.extract_module_name(source, options)\n full_module_name = EncodedString(full_module_name)\n\n Utils.raise_error_if_module_name_forbidden(full_module_name)\n\n if options.relative_path_in_code_position_comments:\n rel_path = full_module_name.replace('.', os.sep) + source_ext\n if not abs_path.endswith(rel_path):\n rel_path = source # safety measure to prevent printing incorrect paths\n else:\n rel_path = abs_path\n source_desc = FileSourceDescriptor(abs_path, rel_path)\n source = CompilationSource(source_desc, full_module_name, cwd)\n\n # Set up result object\n result = create_default_resultobj(source, options)\n\n if options.annotate is None:\n # By default, decide based on whether an html file already exists.\n html_filename = os.path.splitext(result.c_file)[0] + \".html\"\n if os.path.exists(html_filename):\n with io.open(html_filename, \"r\", encoding=\"UTF-8\") as html_file:\n if u'<!-- Generated by Cython' in html_file.read(100):\n options.annotate = True\n\n # Get pipeline\n if source_ext.lower() == '.py' or not source_ext:\n pipeline = Pipeline.create_py_pipeline(context, options, result)\n else:\n pipeline = Pipeline.create_pyx_pipeline(context, options, result)\n\n context.setup_errors(options, result)\n\n if '.' in full_module_name and '.' in os.path.splitext(os.path.basename(abs_path))[0]:\n warning((source_desc, 1, 0),\n \"Dotted filenames ('%s') are deprecated.\"\n \" Please use the normal Python package directory layout.\" % os.path.basename(abs_path), level=1)\n\n err, enddata = Pipeline.run_pipeline(pipeline, source)\n context.teardown_errors(err, options, result)\n if err is None and options.depfile:\n from ..Build.Dependencies import create_dependency_tree\n dependencies = create_dependency_tree(context).all_dependencies(result.main_source_file)\n Utils.write_depfile(result.c_file, result.main_source_file, dependencies)\n return result\n\n\n# ------------------------------------------------------------------------\n#\n# Main Python entry points\n#\n# ------------------------------------------------------------------------\n\nclass CompilationSource(object):\n \"\"\"\n Contains the data necessary to start up a compilation pipeline for\n a single compilation unit.\n \"\"\"\n def __init__(self, source_desc, full_module_name, cwd):\n self.source_desc = source_desc\n self.full_module_name = full_module_name\n self.cwd = cwd\n\n\nclass CompilationResult(object):\n \"\"\"\n Results from the Cython compiler:\n\n c_file string or None The generated C source file\n h_file string or None The generated C header file\n i_file string or None The generated .pxi file\n api_file string or None The generated C API .h file\n listing_file string or None File of error messages\n object_file string or None Result of compiling the C file\n extension_file string or None Result of linking the object file\n num_errors integer Number of compilation errors\n compilation_source CompilationSource\n \"\"\"\n\n def __init__(self):\n self.c_file = None\n self.h_file = None\n self.i_file = None\n self.api_file = None\n self.listing_file = None\n self.object_file = None\n self.extension_file = None\n self.main_source_file = None\n\n\nclass CompilationResultSet(dict):\n \"\"\"\n Results from compiling multiple Pyrex source files. A mapping\n from source file paths to CompilationResult instances. Also\n has the following attributes:\n\n num_errors integer Total number of compilation errors\n \"\"\"\n\n num_errors = 0\n\n def add(self, source, result):\n self[source] = result\n self.num_errors += result.num_errors\n\n\ndef compile_single(source, options, full_module_name = None):\n \"\"\"\n compile_single(source, options, full_module_name)\n\n Compile the given Pyrex implementation file and return a CompilationResult.\n Always compiles a single file; does not perform timestamp checking or\n recursion.\n \"\"\"\n return run_pipeline(source, options, full_module_name)\n\n\ndef compile_multiple(sources, options):\n \"\"\"\n compile_multiple(sources, options)\n\n Compiles the given sequence of Pyrex implementation files and returns\n a CompilationResultSet. Performs timestamp checking and/or recursion\n if these are specified in the options.\n \"\"\"\n if len(sources) > 1 and options.module_name:\n raise RuntimeError('Full module name can only be set '\n 'for single source compilation')\n # run_pipeline creates the context\n # context = Context.from_options(options)\n sources = [os.path.abspath(source) for source in sources]\n processed = set()\n results = CompilationResultSet()\n timestamps = options.timestamps\n verbose = options.verbose\n context = None\n cwd = os.getcwd()\n for source in sources:\n if source not in processed:\n if context is None:\n context = Context.from_options(options)\n output_filename = get_output_filename(source, cwd, options)\n out_of_date = context.c_file_out_of_date(source, output_filename)\n if (not timestamps) or out_of_date:\n if verbose:\n sys.stderr.write(\"Compiling %s\\n\" % source)\n result = run_pipeline(source, options,\n full_module_name=options.module_name,\n context=context)\n results.add(source, result)\n # Compiling multiple sources in one context doesn't quite\n # work properly yet.\n context = None\n processed.add(source)\n return results\n\n\ndef compile(source, options = None, full_module_name = None, **kwds):\n \"\"\"\n compile(source [, options], [, <option> = <value>]...)\n\n Compile one or more Pyrex implementation files, with optional timestamp\n checking and recursing on dependencies. The source argument may be a string\n or a sequence of strings. If it is a string and no recursion or timestamp\n checking is requested, a CompilationResult is returned, otherwise a\n CompilationResultSet is returned.\n \"\"\"\n options = CompilationOptions(defaults = options, **kwds)\n if isinstance(source, basestring) and not options.timestamps:\n return compile_single(source, options, full_module_name)\n else:\n return compile_multiple(source, options)\n\n\[email protected]_function\ndef search_include_directories(dirs, qualified_name, suffix=\"\", pos=None, include=False, source_file_path=None):\n \"\"\"\n Search the list of include directories for the given file name.\n\n If a source file path or position is given, first searches the directory\n containing that file. Returns None if not found, but does not report an error.\n\n The 'include' option will disable package dereferencing.\n \"\"\"\n if pos and not source_file_path:\n file_desc = pos[0]\n if not isinstance(file_desc, FileSourceDescriptor):\n raise RuntimeError(\"Only file sources for code supported\")\n source_file_path = file_desc.filename\n if source_file_path:\n if include:\n dirs = (os.path.dirname(source_file_path),) + dirs\n else:\n dirs = (Utils.find_root_package_dir(source_file_path),) + dirs\n\n # search for dotted filename e.g. <dir>/foo.bar.pxd\n dotted_filename = qualified_name\n if suffix:\n dotted_filename += suffix\n\n for dirname in dirs:\n path = os.path.join(dirname, dotted_filename)\n if os.path.exists(path):\n return path\n\n # search for filename in package structure e.g. <dir>/foo/bar.pxd or <dir>/foo/bar/__init__.pxd\n if not include:\n\n names = qualified_name.split('.')\n package_names = tuple(names[:-1])\n module_name = names[-1]\n\n # search for standard packages first - PEP420\n namespace_dirs = []\n for dirname in dirs:\n package_dir, is_namespace = Utils.check_package_dir(dirname, package_names)\n if package_dir is not None:\n if is_namespace:\n namespace_dirs.append(package_dir)\n continue\n path = search_module_in_dir(package_dir, module_name, suffix)\n if path:\n return path\n\n # search for namespaces second - PEP420\n for package_dir in namespace_dirs:\n path = search_module_in_dir(package_dir, module_name, suffix)\n if path:\n return path\n\n return None\n\n\[email protected]_function\ndef search_module_in_dir(package_dir, module_name, suffix):\n # matches modules of the form: <dir>/foo/bar.pxd\n path = Utils.find_versioned_file(package_dir, module_name, suffix)\n\n # matches modules of the form: <dir>/foo/bar/__init__.pxd\n if not path and suffix:\n path = Utils.find_versioned_file(os.path.join(package_dir, module_name), \"__init__\", suffix)\n\n return path\n\n\n# ------------------------------------------------------------------------\n#\n# Main command-line entry point\n#\n# ------------------------------------------------------------------------\n\ndef setuptools_main():\n return main(command_line = 1)\n\n\ndef main(command_line = 0):\n args = sys.argv[1:]\n any_failures = 0\n if command_line:\n try:\n options, sources = parse_command_line(args)\n except IOError as e:\n # TODO: IOError can be replaced with FileNotFoundError in Cython 3.1\n import errno\n if errno.ENOENT != e.errno:\n # Raised IOError is not caused by missing file.\n raise\n print(\"{}: No such file or directory: '{}'\".format(sys.argv[0], e.filename), file=sys.stderr)\n sys.exit(1)\n else:\n options = CompilationOptions(default_options)\n sources = args\n\n if options.show_version:\n from .. import __version__\n sys.stderr.write(\"Cython version %s\\n\" % __version__)\n if options.working_path!=\"\":\n os.chdir(options.working_path)\n try:\n result = compile(sources, options)\n if result.num_errors > 0:\n any_failures = 1\n except (EnvironmentError, PyrexError) as e:\n sys.stderr.write(str(e) + '\\n')\n any_failures = 1\n if any_failures:\n sys.exit(1)\n", "path": "Cython/Compiler/Main.py" } ]
[ { "content": "#\n# Cython Top Level\n#\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport re\nimport sys\nimport io\n\nif sys.version_info[:2] < (2, 7) or (3, 0) <= sys.version_info[:2] < (3, 3):\n sys.stderr.write(\"Sorry, Cython requires Python 2.7 or 3.3+, found %d.%d\\n\" % tuple(sys.version_info[:2]))\n sys.exit(1)\n\ntry:\n from __builtin__ import basestring\nexcept ImportError:\n basestring = str\n\n# Do not import Parsing here, import it when needed, because Parsing imports\n# Nodes, which globally needs debug command line options initialized to set a\n# conditional metaclass. These options are processed by CmdLine called from\n# main() in this file.\n# import Parsing\nfrom . import Errors\nfrom .StringEncoding import EncodedString\nfrom .Scanning import PyrexScanner, FileSourceDescriptor\nfrom .Errors import PyrexError, CompileError, error, warning\nfrom .Symtab import ModuleScope\nfrom .. import Utils\nfrom . import Options\nfrom .Options import CompilationOptions, default_options\nfrom .CmdLine import parse_command_line\nfrom .Lexicon import (unicode_start_ch_any, unicode_continuation_ch_any,\n unicode_start_ch_range, unicode_continuation_ch_range)\n\n\ndef _make_range_re(chrs):\n out = []\n for i in range(0, len(chrs), 2):\n out.append(u\"{0}-{1}\".format(chrs[i], chrs[i+1]))\n return u\"\".join(out)\n\n# py2 version looked like r\"[A-Za-z_][A-Za-z0-9_]*(\\.[A-Za-z_][A-Za-z0-9_]*)*$\"\nmodule_name_pattern = u\"[{0}{1}][{0}{2}{1}{3}]*\".format(\n unicode_start_ch_any, _make_range_re(unicode_start_ch_range),\n unicode_continuation_ch_any,\n _make_range_re(unicode_continuation_ch_range))\nmodule_name_pattern = re.compile(u\"{0}(\\\\.{0})*$\".format(module_name_pattern))\n\n\nstandard_include_path = os.path.abspath(\n os.path.join(os.path.dirname(os.path.dirname(__file__)), 'Includes'))\n\n\nclass Context(object):\n # This class encapsulates the context needed for compiling\n # one or more Cython implementation files along with their\n # associated and imported declaration files. It includes\n # the root of the module import namespace and the list\n # of directories to search for include files.\n #\n # modules {string : ModuleScope}\n # include_directories [string]\n # future_directives [object]\n # language_level int currently 2 or 3 for Python 2/3\n\n cython_scope = None\n language_level = None # warn when not set but default to Py2\n\n def __init__(self, include_directories, compiler_directives, cpp=False,\n language_level=None, options=None):\n # cython_scope is a hack, set to False by subclasses, in order to break\n # an infinite loop.\n # Better code organization would fix it.\n\n from . import Builtin, CythonScope\n self.modules = {\"__builtin__\" : Builtin.builtin_scope}\n self.cython_scope = CythonScope.create_cython_scope(self)\n self.modules[\"cython\"] = self.cython_scope\n self.include_directories = include_directories\n self.future_directives = set()\n self.compiler_directives = compiler_directives\n self.cpp = cpp\n self.options = options\n\n self.pxds = {} # full name -> node tree\n self._interned = {} # (type(value), value, *key_args) -> interned_value\n\n if language_level is not None:\n self.set_language_level(language_level)\n\n self.legacy_implicit_noexcept = self.compiler_directives.get('legacy_implicit_noexcept', False)\n\n self.gdb_debug_outputwriter = None\n\n @classmethod\n def from_options(cls, options):\n return cls(options.include_path, options.compiler_directives,\n options.cplus, options.language_level, options=options)\n\n def set_language_level(self, level):\n from .Future import print_function, unicode_literals, absolute_import, division, generator_stop\n future_directives = set()\n if level == '3str':\n level = 3\n else:\n level = int(level)\n if level >= 3:\n future_directives.add(unicode_literals)\n if level >= 3:\n future_directives.update([print_function, absolute_import, division, generator_stop])\n self.language_level = level\n self.future_directives = future_directives\n if level >= 3:\n self.modules['builtins'] = self.modules['__builtin__']\n\n def intern_ustring(self, value, encoding=None):\n key = (EncodedString, value, encoding)\n try:\n return self._interned[key]\n except KeyError:\n pass\n value = EncodedString(value)\n if encoding:\n value.encoding = encoding\n self._interned[key] = value\n return value\n\n # pipeline creation functions can now be found in Pipeline.py\n\n def process_pxd(self, source_desc, scope, module_name):\n from . import Pipeline\n if isinstance(source_desc, FileSourceDescriptor) and source_desc._file_type == 'pyx':\n source = CompilationSource(source_desc, module_name, os.getcwd())\n result_sink = create_default_resultobj(source, self.options)\n pipeline = Pipeline.create_pyx_as_pxd_pipeline(self, result_sink)\n result = Pipeline.run_pipeline(pipeline, source)\n else:\n pipeline = Pipeline.create_pxd_pipeline(self, scope, module_name)\n result = Pipeline.run_pipeline(pipeline, source_desc)\n return result\n\n def nonfatal_error(self, exc):\n return Errors.report_error(exc)\n\n def _split_qualified_name(self, qualified_name):\n # Splits qualified_name into parts in form of 2-tuples: (PART_NAME, IS_PACKAGE).\n qualified_name_parts = qualified_name.split('.')\n last_part = qualified_name_parts.pop()\n qualified_name_parts = [(p, True) for p in qualified_name_parts]\n if last_part != '__init__':\n # If Last part is __init__, then it is omitted. Otherwise, we need to check whether we can find\n # __init__.pyx/__init__.py file to determine if last part is package or not.\n is_package = False\n for suffix in ('.py', '.pyx'):\n path = self.search_include_directories(\n qualified_name, suffix=suffix, source_pos=None, source_file_path=None)\n if path:\n is_package = self._is_init_file(path)\n break\n\n qualified_name_parts.append((last_part, is_package))\n return qualified_name_parts\n\n @staticmethod\n def _is_init_file(path):\n return os.path.basename(path) in ('__init__.pyx', '__init__.py', '__init__.pxd') if path else False\n\n @staticmethod\n def _check_pxd_filename(pos, pxd_pathname, qualified_name):\n if not pxd_pathname:\n return\n pxd_filename = os.path.basename(pxd_pathname)\n if '.' in qualified_name and qualified_name == os.path.splitext(pxd_filename)[0]:\n warning(pos, \"Dotted filenames ('%s') are deprecated.\"\n \" Please use the normal Python package directory layout.\" % pxd_filename, level=1)\n\n def find_module(self, module_name, relative_to=None, pos=None, need_pxd=1,\n absolute_fallback=True):\n # Finds and returns the module scope corresponding to\n # the given relative or absolute module name. If this\n # is the first time the module has been requested, finds\n # the corresponding .pxd file and process it.\n # If relative_to is not None, it must be a module scope,\n # and the module will first be searched for relative to\n # that module, provided its name is not a dotted name.\n debug_find_module = 0\n if debug_find_module:\n print(\"Context.find_module: module_name = %s, relative_to = %s, pos = %s, need_pxd = %s\" % (\n module_name, relative_to, pos, need_pxd))\n\n scope = None\n pxd_pathname = None\n if relative_to:\n if module_name:\n # from .module import ...\n qualified_name = relative_to.qualify_name(module_name)\n else:\n # from . import ...\n qualified_name = relative_to.qualified_name\n scope = relative_to\n relative_to = None\n else:\n qualified_name = module_name\n\n if not module_name_pattern.match(qualified_name):\n raise CompileError(pos or (module_name, 0, 0),\n u\"'%s' is not a valid module name\" % module_name)\n\n if relative_to:\n if debug_find_module:\n print(\"...trying relative import\")\n scope = relative_to.lookup_submodule(module_name)\n if not scope:\n pxd_pathname = self.find_pxd_file(qualified_name, pos)\n self._check_pxd_filename(pos, pxd_pathname, qualified_name)\n if pxd_pathname:\n is_package = self._is_init_file(pxd_pathname)\n scope = relative_to.find_submodule(module_name, as_package=is_package)\n if not scope:\n if debug_find_module:\n print(\"...trying absolute import\")\n if absolute_fallback:\n qualified_name = module_name\n scope = self\n for name, is_package in self._split_qualified_name(qualified_name):\n scope = scope.find_submodule(name, as_package=is_package)\n if debug_find_module:\n print(\"...scope = %s\" % scope)\n if not scope.pxd_file_loaded:\n if debug_find_module:\n print(\"...pxd not loaded\")\n if not pxd_pathname:\n if debug_find_module:\n print(\"...looking for pxd file\")\n # Only look in sys.path if we are explicitly looking\n # for a .pxd file.\n pxd_pathname = self.find_pxd_file(qualified_name, pos, sys_path=need_pxd)\n self._check_pxd_filename(pos, pxd_pathname, qualified_name)\n if debug_find_module:\n print(\"......found %s\" % pxd_pathname)\n if not pxd_pathname and need_pxd:\n # Set pxd_file_loaded such that we don't need to\n # look for the non-existing pxd file next time.\n scope.pxd_file_loaded = True\n package_pathname = self.search_include_directories(\n qualified_name, suffix=\".py\", source_pos=pos)\n if package_pathname and package_pathname.endswith(Utils.PACKAGE_FILES):\n pass\n else:\n error(pos, \"'%s.pxd' not found\" % qualified_name.replace('.', os.sep))\n if pxd_pathname:\n scope.pxd_file_loaded = True\n try:\n if debug_find_module:\n print(\"Context.find_module: Parsing %s\" % pxd_pathname)\n rel_path = module_name.replace('.', os.sep) + os.path.splitext(pxd_pathname)[1]\n if not pxd_pathname.endswith(rel_path):\n rel_path = pxd_pathname # safety measure to prevent printing incorrect paths\n source_desc = FileSourceDescriptor(pxd_pathname, rel_path)\n err, result = self.process_pxd(source_desc, scope, qualified_name)\n if err:\n raise err\n (pxd_codenodes, pxd_scope) = result\n self.pxds[module_name] = (pxd_codenodes, pxd_scope)\n except CompileError:\n pass\n return scope\n\n def find_pxd_file(self, qualified_name, pos=None, sys_path=True, source_file_path=None):\n # Search include path (and sys.path if sys_path is True) for\n # the .pxd file corresponding to the given fully-qualified\n # module name.\n # Will find either a dotted filename or a file in a\n # package directory. If a source file position is given,\n # the directory containing the source file is searched first\n # for a dotted filename, and its containing package root\n # directory is searched first for a non-dotted filename.\n pxd = self.search_include_directories(\n qualified_name, suffix=\".pxd\", source_pos=pos, sys_path=sys_path, source_file_path=source_file_path)\n if pxd is None and Options.cimport_from_pyx:\n return self.find_pyx_file(qualified_name, pos)\n return pxd\n\n def find_pyx_file(self, qualified_name, pos=None, source_file_path=None):\n # Search include path for the .pyx file corresponding to the\n # given fully-qualified module name, as for find_pxd_file().\n return self.search_include_directories(\n qualified_name, suffix=\".pyx\", source_pos=pos, source_file_path=source_file_path)\n\n def find_include_file(self, filename, pos=None, source_file_path=None):\n # Search list of include directories for filename.\n # Reports an error and returns None if not found.\n path = self.search_include_directories(\n filename, source_pos=pos, include=True, source_file_path=source_file_path)\n if not path:\n error(pos, \"'%s' not found\" % filename)\n return path\n\n def search_include_directories(self, qualified_name,\n suffix=None, source_pos=None, include=False, sys_path=False, source_file_path=None):\n include_dirs = self.include_directories\n if sys_path:\n include_dirs = include_dirs + sys.path\n # include_dirs must be hashable for caching in @cached_function\n include_dirs = tuple(include_dirs + [standard_include_path])\n return search_include_directories(\n include_dirs, qualified_name, suffix or \"\", source_pos, include, source_file_path)\n\n def find_root_package_dir(self, file_path):\n return Utils.find_root_package_dir(file_path)\n\n def check_package_dir(self, dir, package_names):\n return Utils.check_package_dir(dir, tuple(package_names))\n\n def c_file_out_of_date(self, source_path, output_path):\n if not os.path.exists(output_path):\n return 1\n c_time = Utils.modification_time(output_path)\n if Utils.file_newer_than(source_path, c_time):\n return 1\n pxd_path = Utils.replace_suffix(source_path, \".pxd\")\n if os.path.exists(pxd_path) and Utils.file_newer_than(pxd_path, c_time):\n return 1\n for kind, name in self.read_dependency_file(source_path):\n if kind == \"cimport\":\n dep_path = self.find_pxd_file(name, source_file_path=source_path)\n elif kind == \"include\":\n dep_path = self.search_include_directories(name, source_file_path=source_path)\n else:\n continue\n if dep_path and Utils.file_newer_than(dep_path, c_time):\n return 1\n return 0\n\n def find_cimported_module_names(self, source_path):\n return [ name for kind, name in self.read_dependency_file(source_path)\n if kind == \"cimport\" ]\n\n def is_package_dir(self, dir_path):\n return Utils.is_package_dir(dir_path)\n\n def read_dependency_file(self, source_path):\n dep_path = Utils.replace_suffix(source_path, \".dep\")\n if os.path.exists(dep_path):\n with open(dep_path, \"rU\") as f:\n chunks = [ line.split(\" \", 1)\n for line in (l.strip() for l in f)\n if \" \" in line ]\n return chunks\n else:\n return ()\n\n def lookup_submodule(self, name):\n # Look up a top-level module. Returns None if not found.\n return self.modules.get(name, None)\n\n def find_submodule(self, name, as_package=False):\n # Find a top-level module, creating a new one if needed.\n scope = self.lookup_submodule(name)\n if not scope:\n scope = ModuleScope(name,\n parent_module = None, context = self, is_package=as_package)\n self.modules[name] = scope\n return scope\n\n def parse(self, source_desc, scope, pxd, full_module_name):\n if not isinstance(source_desc, FileSourceDescriptor):\n raise RuntimeError(\"Only file sources for code supported\")\n source_filename = source_desc.filename\n scope.cpp = self.cpp\n # Parse the given source file and return a parse tree.\n num_errors = Errors.get_errors_count()\n try:\n with Utils.open_source_file(source_filename) as f:\n from . import Parsing\n s = PyrexScanner(f, source_desc, source_encoding = f.encoding,\n scope = scope, context = self)\n tree = Parsing.p_module(s, pxd, full_module_name)\n if self.options.formal_grammar:\n try:\n from ..Parser import ConcreteSyntaxTree\n except ImportError:\n raise RuntimeError(\n \"Formal grammar can only be used with compiled Cython with an available pgen.\")\n ConcreteSyntaxTree.p_module(source_filename)\n except UnicodeDecodeError as e:\n #import traceback\n #traceback.print_exc()\n raise self._report_decode_error(source_desc, e)\n\n if Errors.get_errors_count() > num_errors:\n raise CompileError()\n return tree\n\n def _report_decode_error(self, source_desc, exc):\n msg = exc.args[-1]\n position = exc.args[2]\n encoding = exc.args[0]\n\n line = 1\n column = idx = 0\n with io.open(source_desc.filename, \"r\", encoding='iso8859-1', newline='') as f:\n for line, data in enumerate(f, 1):\n idx += len(data)\n if idx >= position:\n column = position - (idx - len(data)) + 1\n break\n\n return error((source_desc, line, column),\n \"Decoding error, missing or incorrect coding=<encoding-name> \"\n \"at top of source (cannot decode with encoding %r: %s)\" % (encoding, msg))\n\n def extract_module_name(self, path, options):\n # Find fully_qualified module name from the full pathname\n # of a source file.\n dir, filename = os.path.split(path)\n module_name, _ = os.path.splitext(filename)\n if \".\" in module_name:\n return module_name\n names = [module_name]\n while self.is_package_dir(dir):\n parent, package_name = os.path.split(dir)\n if parent == dir:\n break\n names.append(package_name)\n dir = parent\n names.reverse()\n return \".\".join(names)\n\n def setup_errors(self, options, result):\n Errors.init_thread()\n if options.use_listing_file:\n path = result.listing_file = Utils.replace_suffix(result.main_source_file, \".lis\")\n else:\n path = None\n Errors.open_listing_file(path=path, echo_to_stderr=options.errors_to_stderr)\n\n def teardown_errors(self, err, options, result):\n source_desc = result.compilation_source.source_desc\n if not isinstance(source_desc, FileSourceDescriptor):\n raise RuntimeError(\"Only file sources for code supported\")\n Errors.close_listing_file()\n result.num_errors = Errors.get_errors_count()\n if result.num_errors > 0:\n err = True\n if err and result.c_file:\n try:\n Utils.castrate_file(result.c_file, os.stat(source_desc.filename))\n except EnvironmentError:\n pass\n result.c_file = None\n\n\ndef get_output_filename(source_filename, cwd, options):\n if options.cplus:\n c_suffix = \".cpp\"\n else:\n c_suffix = \".c\"\n suggested_file_name = Utils.replace_suffix(source_filename, c_suffix)\n if options.output_file:\n out_path = os.path.join(cwd, options.output_file)\n if os.path.isdir(out_path):\n return os.path.join(out_path, os.path.basename(suggested_file_name))\n else:\n return out_path\n else:\n return suggested_file_name\n\n\ndef create_default_resultobj(compilation_source, options):\n result = CompilationResult()\n result.main_source_file = compilation_source.source_desc.filename\n result.compilation_source = compilation_source\n source_desc = compilation_source.source_desc\n result.c_file = get_output_filename(source_desc.filename,\n compilation_source.cwd, options)\n result.embedded_metadata = options.embedded_metadata\n return result\n\n\ndef run_pipeline(source, options, full_module_name=None, context=None):\n from . import Pipeline\n\n # ensure that the inputs are unicode (for Python 2)\n if sys.version_info[0] == 2:\n source = Utils.decode_filename(source)\n if full_module_name:\n full_module_name = Utils.decode_filename(full_module_name)\n\n source_ext = os.path.splitext(source)[1]\n options.configure_language_defaults(source_ext[1:]) # py/pyx\n if context is None:\n context = Context.from_options(options)\n\n # Set up source object\n cwd = os.getcwd()\n abs_path = os.path.abspath(source)\n full_module_name = full_module_name or context.extract_module_name(source, options)\n full_module_name = EncodedString(full_module_name)\n\n Utils.raise_error_if_module_name_forbidden(full_module_name)\n\n if options.relative_path_in_code_position_comments:\n rel_path = full_module_name.replace('.', os.sep) + source_ext\n if not abs_path.endswith(rel_path):\n rel_path = source # safety measure to prevent printing incorrect paths\n else:\n rel_path = abs_path\n source_desc = FileSourceDescriptor(abs_path, rel_path)\n source = CompilationSource(source_desc, full_module_name, cwd)\n\n # Set up result object\n result = create_default_resultobj(source, options)\n\n if options.annotate is None:\n # By default, decide based on whether an html file already exists.\n html_filename = os.path.splitext(result.c_file)[0] + \".html\"\n if os.path.exists(html_filename):\n with io.open(html_filename, \"r\", encoding=\"UTF-8\") as html_file:\n if u'<!-- Generated by Cython' in html_file.read(100):\n options.annotate = True\n\n # Get pipeline\n if source_ext.lower() == '.py' or not source_ext:\n pipeline = Pipeline.create_py_pipeline(context, options, result)\n else:\n pipeline = Pipeline.create_pyx_pipeline(context, options, result)\n\n context.setup_errors(options, result)\n\n if '.' in full_module_name and '.' in os.path.splitext(os.path.basename(abs_path))[0]:\n warning((source_desc, 1, 0),\n \"Dotted filenames ('%s') are deprecated.\"\n \" Please use the normal Python package directory layout.\" % os.path.basename(abs_path), level=1)\n\n err, enddata = Pipeline.run_pipeline(pipeline, source)\n context.teardown_errors(err, options, result)\n if err is None and options.depfile:\n from ..Build.Dependencies import create_dependency_tree\n dependencies = create_dependency_tree(context).all_dependencies(result.main_source_file)\n Utils.write_depfile(result.c_file, result.main_source_file, dependencies)\n return result\n\n\n# ------------------------------------------------------------------------\n#\n# Main Python entry points\n#\n# ------------------------------------------------------------------------\n\nclass CompilationSource(object):\n \"\"\"\n Contains the data necessary to start up a compilation pipeline for\n a single compilation unit.\n \"\"\"\n def __init__(self, source_desc, full_module_name, cwd):\n self.source_desc = source_desc\n self.full_module_name = full_module_name\n self.cwd = cwd\n\n\nclass CompilationResult(object):\n \"\"\"\n Results from the Cython compiler:\n\n c_file string or None The generated C source file\n h_file string or None The generated C header file\n i_file string or None The generated .pxi file\n api_file string or None The generated C API .h file\n listing_file string or None File of error messages\n object_file string or None Result of compiling the C file\n extension_file string or None Result of linking the object file\n num_errors integer Number of compilation errors\n compilation_source CompilationSource\n \"\"\"\n\n def __init__(self):\n self.c_file = None\n self.h_file = None\n self.i_file = None\n self.api_file = None\n self.listing_file = None\n self.object_file = None\n self.extension_file = None\n self.main_source_file = None\n\n\nclass CompilationResultSet(dict):\n \"\"\"\n Results from compiling multiple Pyrex source files. A mapping\n from source file paths to CompilationResult instances. Also\n has the following attributes:\n\n num_errors integer Total number of compilation errors\n \"\"\"\n\n num_errors = 0\n\n def add(self, source, result):\n self[source] = result\n self.num_errors += result.num_errors\n\n\ndef compile_single(source, options, full_module_name = None):\n \"\"\"\n compile_single(source, options, full_module_name)\n\n Compile the given Pyrex implementation file and return a CompilationResult.\n Always compiles a single file; does not perform timestamp checking or\n recursion.\n \"\"\"\n return run_pipeline(source, options, full_module_name)\n\n\ndef compile_multiple(sources, options):\n \"\"\"\n compile_multiple(sources, options)\n\n Compiles the given sequence of Pyrex implementation files and returns\n a CompilationResultSet. Performs timestamp checking and/or recursion\n if these are specified in the options.\n \"\"\"\n if len(sources) > 1 and options.module_name:\n raise RuntimeError('Full module name can only be set '\n 'for single source compilation')\n # run_pipeline creates the context\n # context = Context.from_options(options)\n sources = [os.path.abspath(source) for source in sources]\n processed = set()\n results = CompilationResultSet()\n timestamps = options.timestamps\n verbose = options.verbose\n context = None\n cwd = os.getcwd()\n for source in sources:\n if source not in processed:\n if context is None:\n context = Context.from_options(options)\n output_filename = get_output_filename(source, cwd, options)\n out_of_date = context.c_file_out_of_date(source, output_filename)\n if (not timestamps) or out_of_date:\n if verbose:\n sys.stderr.write(\"Compiling %s\\n\" % source)\n result = run_pipeline(source, options,\n full_module_name=options.module_name,\n context=context)\n results.add(source, result)\n # Compiling multiple sources in one context doesn't quite\n # work properly yet.\n context = None\n processed.add(source)\n return results\n\n\ndef compile(source, options = None, full_module_name = None, **kwds):\n \"\"\"\n compile(source [, options], [, <option> = <value>]...)\n\n Compile one or more Pyrex implementation files, with optional timestamp\n checking and recursing on dependencies. The source argument may be a string\n or a sequence of strings. If it is a string and no recursion or timestamp\n checking is requested, a CompilationResult is returned, otherwise a\n CompilationResultSet is returned.\n \"\"\"\n options = CompilationOptions(defaults = options, **kwds)\n if isinstance(source, basestring) and not options.timestamps:\n return compile_single(source, options, full_module_name)\n else:\n return compile_multiple(source, options)\n\n\[email protected]_function\ndef search_include_directories(dirs, qualified_name, suffix=\"\", pos=None, include=False, source_file_path=None):\n \"\"\"\n Search the list of include directories for the given file name.\n\n If a source file path or position is given, first searches the directory\n containing that file. Returns None if not found, but does not report an error.\n\n The 'include' option will disable package dereferencing.\n \"\"\"\n if pos and not source_file_path:\n file_desc = pos[0]\n if not isinstance(file_desc, FileSourceDescriptor):\n raise RuntimeError(\"Only file sources for code supported\")\n source_file_path = file_desc.filename\n if source_file_path:\n if include:\n dirs = (os.path.dirname(source_file_path),) + dirs\n else:\n dirs = (Utils.find_root_package_dir(source_file_path),) + dirs\n\n # search for dotted filename e.g. <dir>/foo.bar.pxd\n dotted_filename = qualified_name\n if suffix:\n dotted_filename += suffix\n\n for dirname in dirs:\n path = os.path.join(dirname, dotted_filename)\n if os.path.exists(path):\n return path\n\n # search for filename in package structure e.g. <dir>/foo/bar.pxd or <dir>/foo/bar/__init__.pxd\n if not include:\n\n names = qualified_name.split('.')\n package_names = tuple(names[:-1])\n module_name = names[-1]\n\n # search for standard packages first - PEP420\n namespace_dirs = []\n for dirname in dirs:\n package_dir, is_namespace = Utils.check_package_dir(dirname, package_names)\n if package_dir is not None:\n if is_namespace:\n namespace_dirs.append(package_dir)\n continue\n path = search_module_in_dir(package_dir, module_name, suffix)\n if path:\n return path\n\n # search for namespaces second - PEP420\n for package_dir in namespace_dirs:\n path = search_module_in_dir(package_dir, module_name, suffix)\n if path:\n return path\n\n return None\n\n\[email protected]_function\ndef search_module_in_dir(package_dir, module_name, suffix):\n # matches modules of the form: <dir>/foo/bar.pxd\n path = Utils.find_versioned_file(package_dir, module_name, suffix)\n\n # matches modules of the form: <dir>/foo/bar/__init__.pxd\n if not path and suffix:\n path = Utils.find_versioned_file(os.path.join(package_dir, module_name), \"__init__\", suffix)\n\n return path\n\n\n# ------------------------------------------------------------------------\n#\n# Main command-line entry point\n#\n# ------------------------------------------------------------------------\n\ndef setuptools_main():\n return main(command_line = 1)\n\n\ndef main(command_line = 0):\n args = sys.argv[1:]\n any_failures = 0\n if command_line:\n try:\n options, sources = parse_command_line(args)\n except IOError as e:\n # TODO: IOError can be replaced with FileNotFoundError in Cython 3.1\n import errno\n if errno.ENOENT != e.errno:\n # Raised IOError is not caused by missing file.\n raise\n print(\"{}: No such file or directory: '{}'\".format(sys.argv[0], e.filename), file=sys.stderr)\n sys.exit(1)\n else:\n options = CompilationOptions(default_options)\n sources = args\n\n if options.show_version:\n from .. import __version__\n print(\"Cython version %s\" % __version__)\n if options.working_path!=\"\":\n os.chdir(options.working_path)\n try:\n result = compile(sources, options)\n if result.num_errors > 0:\n any_failures = 1\n except (EnvironmentError, PyrexError) as e:\n sys.stderr.write(str(e) + '\\n')\n any_failures = 1\n if any_failures:\n sys.exit(1)\n", "path": "Cython/Compiler/Main.py" } ]
diff --git a/Cython/Compiler/Main.py b/Cython/Compiler/Main.py index a03f16c48e3..a5d1c7d5728 100644 --- a/Cython/Compiler/Main.py +++ b/Cython/Compiler/Main.py @@ -773,7 +773,7 @@ def main(command_line = 0): if options.show_version: from .. import __version__ - sys.stderr.write("Cython version %s\n" % __version__) + print("Cython version %s" % __version__) if options.working_path!="": os.chdir(options.working_path) try:
hpcaitech__ColossalAI-5321
[tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests
[ { "content": "#!/usr/bin/env python\n\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport torch\nimport torch.distributed as dist\n\nfrom .base_accelerator import BaseAccelerator\n\ntry:\n import torch_npu # noqa\nexcept ImportError:\n pass\n\n\n__all__ = [\"NpuAccelerator\"]\n\n\nclass NpuAccelerator(BaseAccelerator):\n \"\"\"\n Accelerator class for Huawei NPU devices.\n \"\"\"\n\n def __init__(self):\n super().__init__(name=\"npu\", communication_backend=\"hccl\", is_synchronous=False)\n\n # =======================\n # device APIs\n # =======================\n def get_version(self) -> str:\n \"\"\"\n Return the version of the accelerator which torch is built against.\n \"\"\"\n return torch.version.npu\n\n def get_current_device(self) -> torch.device:\n \"\"\"\n Return the current device.\n \"\"\"\n return torch.device(f\"npu:{torch.npu.current_device()}\")\n\n def current_device(self) -> int:\n \"\"\"\n Return the current device index.\n \"\"\"\n return torch.npu.current_device()\n\n def set_device(self, device: Optional[Union[torch.device, int]] = None) -> None:\n \"\"\"\n Bind the current process to a device.\n \"\"\"\n if device is None:\n if not dist.is_initialized():\n raise RuntimeError(\"Cannot get current device when distributed is not initialized.\")\n device = dist.get_rank() % self.device_count()\n torch.npu.set_device(device)\n\n def get_device_name(self, device: Union[torch.device, int]) -> str:\n \"\"\"\n Return the name of the device.\n \"\"\"\n return torch.npu.get_device_name(device)\n\n def synchronize(self, device: Union[torch.device, int] = None):\n \"\"\"\n Synchronize the current process.\n \"\"\"\n torch.npu.synchronize(device)\n\n def is_available(self):\n \"\"\"\n Check if the accelerator is available.\n \"\"\"\n return torch.npu.is_available()\n\n def device_count(self):\n \"\"\"\n Return the number of devices on the machine.\n \"\"\"\n return torch.npu.device_count()\n\n def get_device_capability(self, device=None) -> Tuple[int, int]:\n \"\"\"\n Gets the npu capability of a device.\n \"\"\"\n return torch.npu.get_device_capability(device)\n\n def get_device_name(self, device=None) -> str:\n \"\"\"\n Gets the name of a device.\n \"\"\"\n return torch.npu.get_device_name(device)\n\n def get_device_properties(self, device):\n \"\"\"\n Gets the properties of a device.\n \"\"\"\n return torch.npu.get_device_properties(device)\n\n def utilization(self, device=None) -> int:\n \"\"\"\n Returns the percent of time over the past sample period during which one or more kernels was executing on the GPU as given by nvidia-smi\n \"\"\"\n return torch.npu.utilization(device)\n\n # =======================\n # random number generator APIs\n # =======================\n def get_rng_state(self, device=\"npu\") -> torch.Tensor:\n \"\"\"\n Returns the random number generator state of the specified GPU as a ByteTensor.\n \"\"\"\n return torch.npu.get_rng_state(device)\n\n def get_rng_state_all(self) -> List[torch.Tensor]:\n \"\"\"\n Returns a list of ByteTensor representing the random number states of all devices.\n \"\"\"\n return torch.npu.get_rng_state_all()\n\n def set_rng_state(self, new_state: torch.ByteTensor, device: str = \"npu\") -> None:\n \"\"\"\n Sets the random number generator state of the specified GPU.\n \"\"\"\n torch.npu.set_rng_state(new_state, device)\n\n def set_rng_state_all(self, new_states: List[torch.ByteTensor]) -> None:\n \"\"\"\n Sets the random number generator state of all devices.\n \"\"\"\n torch.npu.set_rng_state_all(new_states)\n\n def manual_seed(self, seed: int) -> None:\n \"\"\"\n Sets the seed for generating random numbers for the current GPU.\n \"\"\"\n torch.npu.manual_seed(seed)\n\n def manual_seed_all(self, seed: int) -> None:\n \"\"\"\n Set the random seed for the all processes.\n \"\"\"\n torch.npu.manual_seed_all(seed)\n\n def seed(self) -> None:\n \"\"\"\n Sets the seed for generating random numbers to a random number for the current GPU.\n \"\"\"\n torch.npu.seed()\n\n def seed_all(self) -> None:\n \"\"\"\n Sets the seed for generating random numbers to a random number on all GPUs.\n \"\"\"\n torch.npu.seed_all()\n\n def initial_seed(self) -> int:\n \"\"\"\n Returns the current random seed of the current GPU.\n \"\"\"\n return torch.npu.initial_seed()\n\n # =======================\n # memory management APIs\n # =======================\n\n def empty_cache(self) -> None:\n \"\"\"\n Releases all unoccupied cached memory currently held by the caching allocator so that those can be used in other GPU application and visible in nvidia-smi.\n \"\"\"\n torch.npu.empty_cache()\n\n def memory_stats(self, device=None) -> Dict[str, Any]:\n \"\"\"\n Returns a dictionary of npu memory allocator statistics for a given device.\n \"\"\"\n return torch.npu.memory_stats(device=device)\n\n def memory_summary(self, device=None, abbreviated=False) -> str:\n \"\"\"\n Returns a human-readable printout of the current memory allocator statistics for a given device.\n \"\"\"\n return torch.npu.memory_summary(device=device, abbreviated=abbreviated)\n\n def memory_snapshot(self):\n \"\"\"\n Returns a snapshot of the npu memory allocator state across all devices.\n \"\"\"\n return torch.npu.memory_snapshot()\n\n def memory_allocated(self, device=None) -> int:\n \"\"\"\n Returns the current GPU memory occupied by tensors in bytes for a given device.\n \"\"\"\n return torch.npu.memory_allocated(device=device)\n\n def max_memory_allocated(self, device=None) -> int:\n \"\"\"\n Returns the maximum GPU memory occupied by tensors in bytes for a given device.\n \"\"\"\n return torch.npu.max_memory_allocated(device=device)\n\n def reset_max_memory_allocated(self, device=None) -> None:\n \"\"\"\n Resets the starting point in tracking maximum GPU memory occupied by tensors for a given device.\n \"\"\"\n torch.npu.reset_max_memory_allocated(device=device)\n\n def reset_max_memory_cached(self, device=None) -> None:\n \"\"\"\n Resets the starting point in tracking maximum GPU memory managed by the caching allocator for a given device.\n \"\"\"\n torch.npu.reset_max_memory_cached(device=device)\n\n def memory_reserved(self, device=None) -> int:\n \"\"\"\n Returns the current GPU memory managed by the caching allocator in bytes for a given device.\n \"\"\"\n return torch.npu.memory_reserved(device=device)\n\n def max_memory_reserved(self, device=None) -> int:\n \"\"\"\n Returns the maximum GPU memory managed by the caching allocator in bytes for a given device.\n \"\"\"\n return torch.npu.max_memory_reserved(device=device)\n\n def set_per_process_memory_fraction(self, fraction: float, device=None) -> None:\n \"\"\"\n Set memory fraction for a process.\n \"\"\"\n torch.npu.set_per_process_memory_fraction(fraction, device=device)\n\n def reset_peak_memory_stats(self, device=None) -> None:\n \"\"\"\n Resets the \"peak\" stats tracked by the npu memory allocator.\n \"\"\"\n torch.npu.reset_peak_memory_stats(device=device)\n\n # =======================\n # streams and events APIs\n # =======================\n\n def Stream(self, device=None, priority=0, **kwargs):\n \"\"\"\n A npu stream is a linear sequence of execution that belongs to a specific device, independent from other streams. See npu-semantics for details.\n \"\"\"\n return torch.npu.Stream(device, priority, **kwargs)\n\n def Event(self, enable_timing: bool = False, blocking: bool = False, interprocess: bool = False):\n \"\"\"\n npu events are synchronization markers that can be used to monitor the device's progress, to accurately measure timing, and to synchronize npu streams.\n \"\"\"\n return torch.npu.Event(enable_timing, blocking, interprocess)\n\n def current_stream(self, device=None):\n \"\"\"\n Returns the currently selected Stream for a given device.\n \"\"\"\n return torch.npu.current_stream(device)\n\n def default_stream(self, device=None):\n \"\"\"\n Returns the default Stream for a given device.\n \"\"\"\n return torch.npu.default_stream(device)\n\n def set_stream(self, stream_):\n \"\"\"\n Sets the current stream.This is a wrapper API to set the stream.\n \"\"\"\n torch.npu.set_stream(stream_)\n\n def stream(self, stream_):\n \"\"\"\n Wrapper around the Context-manager StreamContext that selects a given stream.\n \"\"\"\n return torch.npu.stream(stream_)\n\n # =======================\n # amp APIs\n # =======================\n def autocast(\n self, enabled: bool = True, dtype: torch.dtype = torch.float16, cache_enabled: bool = True\n ) -> Callable:\n \"\"\"\n Return autocast function\n \"\"\"\n return torch.npu.amp.autocast(enabled=enabled, dtype=dtype, cache_enabled=cache_enabled)\n", "path": "colossalai/accelerator/npu_accelerator.py" } ]
[ { "content": "#!/usr/bin/env python\n\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport torch\nimport torch.distributed as dist\n\nfrom .base_accelerator import BaseAccelerator\n\ntry:\n import torch_npu # noqa\nexcept ImportError:\n pass\n\n\n__all__ = [\"NpuAccelerator\"]\n\n\nclass NpuAccelerator(BaseAccelerator):\n \"\"\"\n Accelerator class for Huawei NPU devices.\n \"\"\"\n\n def __init__(self):\n super().__init__(name=\"npu\", communication_backend=\"hccl\", is_synchronous=False)\n\n # =======================\n # device APIs\n # =======================\n def get_version(self) -> str:\n \"\"\"\n Return the version of the accelerator which torch is built against.\n \"\"\"\n return torch.version.cann\n\n def get_current_device(self) -> torch.device:\n \"\"\"\n Return the current device.\n \"\"\"\n return torch.device(f\"npu:{torch.npu.current_device()}\")\n\n def current_device(self) -> int:\n \"\"\"\n Return the current device index.\n \"\"\"\n return torch.npu.current_device()\n\n def set_device(self, device: Optional[Union[torch.device, int]] = None) -> None:\n \"\"\"\n Bind the current process to a device.\n \"\"\"\n if device is None:\n if not dist.is_initialized():\n raise RuntimeError(\"Cannot get current device when distributed is not initialized.\")\n device = dist.get_rank() % self.device_count()\n torch.npu.set_device(device)\n\n def get_device_name(self, device: Union[torch.device, int]) -> str:\n \"\"\"\n Return the name of the device.\n \"\"\"\n return torch.npu.get_device_name(device)\n\n def synchronize(self, device: Union[torch.device, int] = None):\n \"\"\"\n Synchronize the current process.\n \"\"\"\n torch.npu.synchronize(device)\n\n def is_available(self):\n \"\"\"\n Check if the accelerator is available.\n \"\"\"\n return torch.npu.is_available()\n\n def device_count(self):\n \"\"\"\n Return the number of devices on the machine.\n \"\"\"\n return torch.npu.device_count()\n\n def get_device_capability(self, device=None) -> Tuple[int, int]:\n \"\"\"\n Gets the npu capability of a device.\n \"\"\"\n return torch.npu.get_device_capability(device)\n\n def get_device_name(self, device=None) -> str:\n \"\"\"\n Gets the name of a device.\n \"\"\"\n return torch.npu.get_device_name(device)\n\n def get_device_properties(self, device):\n \"\"\"\n Gets the properties of a device.\n \"\"\"\n return torch.npu.get_device_properties(device)\n\n def utilization(self, device=None) -> int:\n \"\"\"\n Returns the percent of time over the past sample period during which one or more kernels was executing on the GPU as given by nvidia-smi\n \"\"\"\n return torch.npu.utilization(device)\n\n # =======================\n # random number generator APIs\n # =======================\n def get_rng_state(self, device=\"npu\") -> torch.Tensor:\n \"\"\"\n Returns the random number generator state of the specified GPU as a ByteTensor.\n \"\"\"\n return torch.npu.get_rng_state(device)\n\n def get_rng_state_all(self) -> List[torch.Tensor]:\n \"\"\"\n Returns a list of ByteTensor representing the random number states of all devices.\n \"\"\"\n return torch.npu.get_rng_state_all()\n\n def set_rng_state(self, new_state: torch.ByteTensor, device: str = \"npu\") -> None:\n \"\"\"\n Sets the random number generator state of the specified GPU.\n \"\"\"\n torch.npu.set_rng_state(new_state, device)\n\n def set_rng_state_all(self, new_states: List[torch.ByteTensor]) -> None:\n \"\"\"\n Sets the random number generator state of all devices.\n \"\"\"\n torch.npu.set_rng_state_all(new_states)\n\n def manual_seed(self, seed: int) -> None:\n \"\"\"\n Sets the seed for generating random numbers for the current GPU.\n \"\"\"\n torch.npu.manual_seed(seed)\n\n def manual_seed_all(self, seed: int) -> None:\n \"\"\"\n Set the random seed for the all processes.\n \"\"\"\n torch.npu.manual_seed_all(seed)\n\n def seed(self) -> None:\n \"\"\"\n Sets the seed for generating random numbers to a random number for the current GPU.\n \"\"\"\n torch.npu.seed()\n\n def seed_all(self) -> None:\n \"\"\"\n Sets the seed for generating random numbers to a random number on all GPUs.\n \"\"\"\n torch.npu.seed_all()\n\n def initial_seed(self) -> int:\n \"\"\"\n Returns the current random seed of the current GPU.\n \"\"\"\n return torch.npu.initial_seed()\n\n # =======================\n # memory management APIs\n # =======================\n\n def empty_cache(self) -> None:\n \"\"\"\n Releases all unoccupied cached memory currently held by the caching allocator so that those can be used in other GPU application and visible in nvidia-smi.\n \"\"\"\n torch.npu.empty_cache()\n\n def memory_stats(self, device=None) -> Dict[str, Any]:\n \"\"\"\n Returns a dictionary of npu memory allocator statistics for a given device.\n \"\"\"\n return torch.npu.memory_stats(device=device)\n\n def memory_summary(self, device=None, abbreviated=False) -> str:\n \"\"\"\n Returns a human-readable printout of the current memory allocator statistics for a given device.\n \"\"\"\n return torch.npu.memory_summary(device=device, abbreviated=abbreviated)\n\n def memory_snapshot(self):\n \"\"\"\n Returns a snapshot of the npu memory allocator state across all devices.\n \"\"\"\n return torch.npu.memory_snapshot()\n\n def memory_allocated(self, device=None) -> int:\n \"\"\"\n Returns the current GPU memory occupied by tensors in bytes for a given device.\n \"\"\"\n return torch.npu.memory_allocated(device=device)\n\n def max_memory_allocated(self, device=None) -> int:\n \"\"\"\n Returns the maximum GPU memory occupied by tensors in bytes for a given device.\n \"\"\"\n return torch.npu.max_memory_allocated(device=device)\n\n def reset_max_memory_allocated(self, device=None) -> None:\n \"\"\"\n Resets the starting point in tracking maximum GPU memory occupied by tensors for a given device.\n \"\"\"\n torch.npu.reset_max_memory_allocated(device=device)\n\n def reset_max_memory_cached(self, device=None) -> None:\n \"\"\"\n Resets the starting point in tracking maximum GPU memory managed by the caching allocator for a given device.\n \"\"\"\n torch.npu.reset_max_memory_cached(device=device)\n\n def memory_reserved(self, device=None) -> int:\n \"\"\"\n Returns the current GPU memory managed by the caching allocator in bytes for a given device.\n \"\"\"\n return torch.npu.memory_reserved(device=device)\n\n def max_memory_reserved(self, device=None) -> int:\n \"\"\"\n Returns the maximum GPU memory managed by the caching allocator in bytes for a given device.\n \"\"\"\n return torch.npu.max_memory_reserved(device=device)\n\n def set_per_process_memory_fraction(self, fraction: float, device=None) -> None:\n \"\"\"\n Set memory fraction for a process.\n \"\"\"\n torch.npu.set_per_process_memory_fraction(fraction, device=device)\n\n def reset_peak_memory_stats(self, device=None) -> None:\n \"\"\"\n Resets the \"peak\" stats tracked by the npu memory allocator.\n \"\"\"\n torch.npu.reset_peak_memory_stats(device=device)\n\n # =======================\n # streams and events APIs\n # =======================\n\n def Stream(self, device=None, priority=0, **kwargs):\n \"\"\"\n A npu stream is a linear sequence of execution that belongs to a specific device, independent from other streams. See npu-semantics for details.\n \"\"\"\n return torch.npu.Stream(device, priority, **kwargs)\n\n def Event(self, enable_timing: bool = False, blocking: bool = False, interprocess: bool = False):\n \"\"\"\n npu events are synchronization markers that can be used to monitor the device's progress, to accurately measure timing, and to synchronize npu streams.\n \"\"\"\n return torch.npu.Event(enable_timing, blocking, interprocess)\n\n def current_stream(self, device=None):\n \"\"\"\n Returns the currently selected Stream for a given device.\n \"\"\"\n return torch.npu.current_stream(device)\n\n def default_stream(self, device=None):\n \"\"\"\n Returns the default Stream for a given device.\n \"\"\"\n return torch.npu.default_stream(device)\n\n def set_stream(self, stream_):\n \"\"\"\n Sets the current stream.This is a wrapper API to set the stream.\n \"\"\"\n torch.npu.set_stream(stream_)\n\n def stream(self, stream_):\n \"\"\"\n Wrapper around the Context-manager StreamContext that selects a given stream.\n \"\"\"\n return torch.npu.stream(stream_)\n\n # =======================\n # amp APIs\n # =======================\n def autocast(\n self, enabled: bool = True, dtype: torch.dtype = torch.float16, cache_enabled: bool = True\n ) -> Callable:\n \"\"\"\n Return autocast function\n \"\"\"\n return torch.npu.amp.autocast(enabled=enabled, dtype=dtype, cache_enabled=cache_enabled)\n", "path": "colossalai/accelerator/npu_accelerator.py" } ]
diff --git a/colossalai/accelerator/npu_accelerator.py b/colossalai/accelerator/npu_accelerator.py index 1a86f84cb2d4..b28492968eeb 100644 --- a/colossalai/accelerator/npu_accelerator.py +++ b/colossalai/accelerator/npu_accelerator.py @@ -31,7 +31,7 @@ def get_version(self) -> str: """ Return the version of the accelerator which torch is built against. """ - return torch.version.npu + return torch.version.cann def get_current_device(self) -> torch.device: """
bookwyrm-social__bookwyrm-1410
Unable to post review in Safari on iPadOS **Describe the bug** When trying to post a long-ish review in Safari on iPadOS (desktop mode, content blockers disabled), the post button shows the spinner introduced with #1388, but the posting never concludes. **To Reproduce** Steps to reproduce the behavior: 1. Get an iPad pro 13.9” running iPadOS 14.7.1 2. Open your bookwyrm.social account in Safari, ensuring it runs in desktop mode and content blockers are disabled 3. Write a review of at least 2700 chars 4. Try to post it **Expected behavior** After some spinning, the review appears on your feed. **Instance** bookwyrm.social **Extra context** [Book I’m trying to review](https://bookwyrm.social/book/214201). --- **Device Info:** - Device: iPad pro 2nd gen 13.9” - OS: iPadOS 14.7.1 - Browser: Safari - Version N/A
[ { "content": "\"\"\" bookwyrm settings and configuration \"\"\"\nimport os\nfrom environs import Env\n\nimport requests\nfrom django.utils.translation import gettext_lazy as _\n\n\nenv = Env()\nDOMAIN = env(\"DOMAIN\")\nVERSION = \"0.0.1\"\n\nPAGE_LENGTH = env(\"PAGE_LENGTH\", 15)\nDEFAULT_LANGUAGE = env(\"DEFAULT_LANGUAGE\", \"English\")\n\nJS_CACHE = \"19447742\"\n\n# email\nEMAIL_BACKEND = env(\"EMAIL_BACKEND\", \"django.core.mail.backends.smtp.EmailBackend\")\nEMAIL_HOST = env(\"EMAIL_HOST\")\nEMAIL_PORT = env(\"EMAIL_PORT\", 587)\nEMAIL_HOST_USER = env(\"EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = env(\"EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = env.bool(\"EMAIL_USE_TLS\", True)\nEMAIL_USE_SSL = env.bool(\"EMAIL_USE_SSL\", False)\nDEFAULT_FROM_EMAIL = \"admin@{:s}\".format(env(\"DOMAIN\"))\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, \"locale\"),\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Preview image\nENABLE_PREVIEW_IMAGES = env.bool(\"ENABLE_PREVIEW_IMAGES\", False)\nPREVIEW_BG_COLOR = env.str(\"PREVIEW_BG_COLOR\", \"use_dominant_color_light\")\nPREVIEW_TEXT_COLOR = env.str(\"PREVIEW_TEXT_COLOR\", \"#363636\")\nPREVIEW_IMG_WIDTH = env.int(\"PREVIEW_IMG_WIDTH\", 1200)\nPREVIEW_IMG_HEIGHT = env.int(\"PREVIEW_IMG_HEIGHT\", 630)\nPREVIEW_DEFAULT_COVER_COLOR = env.str(\"PREVIEW_DEFAULT_COVER_COLOR\", \"#002549\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env.bool(\"DEBUG\", True)\nUSE_HTTPS = env.bool(\"USE_HTTPS\", False)\n\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\", [\"*\"])\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"django_rename_app\",\n \"bookwyrm\",\n \"celery\",\n \"imagekit\",\n \"storages\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"bookwyrm.timezone_middleware.TimezoneMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"bookwyrm.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"bookwyrm.context_processors.site_settings\",\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = \"bookwyrm.wsgi.application\"\n\n# redis/activity streams settings\nREDIS_ACTIVITY_HOST = env(\"REDIS_ACTIVITY_HOST\", \"localhost\")\nREDIS_ACTIVITY_PORT = env(\"REDIS_ACTIVITY_PORT\", 6379)\nREDIS_ACTIVITY_PASSWORD = env(\"REDIS_ACTIVITY_PASSWORD\", None)\n\nMAX_STREAM_LENGTH = int(env(\"MAX_STREAM_LENGTH\", 200))\n\nSTREAMS = [\n {\"key\": \"home\", \"name\": _(\"Home Timeline\"), \"shortname\": _(\"Home\")},\n {\"key\": \"books\", \"name\": _(\"Books Timeline\"), \"shortname\": _(\"Books\")},\n]\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": env(\"POSTGRES_DB\", \"fedireads\"),\n \"USER\": env(\"POSTGRES_USER\", \"fedireads\"),\n \"PASSWORD\": env(\"POSTGRES_PASSWORD\", \"fedireads\"),\n \"HOST\": env(\"POSTGRES_HOST\", \"\"),\n \"PORT\": env(\"POSTGRES_PORT\", 5432),\n },\n}\n\n\nLOGIN_URL = \"/login/\"\nAUTH_USER_MODEL = \"bookwyrm.User\"\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\n# pylint: disable=line-too-long\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\nLANGUAGES = [\n (\"en-us\", _(\"English\")),\n (\"de-de\", _(\"German\")),\n (\"es\", _(\"Spanish\")),\n (\"fr-fr\", _(\"French\")),\n (\"zh-hans\", _(\"Simplified Chinese\")),\n (\"zh-hant\", _(\"Traditional Chinese\")),\n]\n\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\nUSER_AGENT = \"%s (BookWyrm/%s; +https://%s/)\" % (\n requests.utils.default_user_agent(),\n VERSION,\n DOMAIN,\n)\n\n# Imagekit generated thumbnails\nENABLE_THUMBNAIL_GENERATION = env.bool(\"ENABLE_THUMBNAIL_GENERATION\", False)\nIMAGEKIT_CACHEFILE_DIR = \"thumbnails\"\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Storage\n\nPROTOCOL = \"http\"\nif USE_HTTPS:\n PROTOCOL = \"https\"\n\nUSE_S3 = env.bool(\"USE_S3\", False)\n\nif USE_S3:\n # AWS settings\n AWS_ACCESS_KEY_ID = env(\"AWS_ACCESS_KEY_ID\")\n AWS_SECRET_ACCESS_KEY = env(\"AWS_SECRET_ACCESS_KEY\")\n AWS_STORAGE_BUCKET_NAME = env(\"AWS_STORAGE_BUCKET_NAME\")\n AWS_S3_CUSTOM_DOMAIN = env(\"AWS_S3_CUSTOM_DOMAIN\")\n AWS_S3_REGION_NAME = env(\"AWS_S3_REGION_NAME\", \"\")\n AWS_S3_ENDPOINT_URL = env(\"AWS_S3_ENDPOINT_URL\")\n AWS_DEFAULT_ACL = \"public-read\"\n AWS_S3_OBJECT_PARAMETERS = {\"CacheControl\": \"max-age=86400\"}\n # S3 Static settings\n STATIC_LOCATION = \"static\"\n STATIC_URL = \"https://%s/%s/\" % (AWS_S3_CUSTOM_DOMAIN, STATIC_LOCATION)\n STATICFILES_STORAGE = \"bookwyrm.storage_backends.StaticStorage\"\n # S3 Media settings\n MEDIA_LOCATION = \"images\"\n MEDIA_URL = \"https://%s/%s/\" % (AWS_S3_CUSTOM_DOMAIN, MEDIA_LOCATION)\n MEDIA_FULL_URL = MEDIA_URL\n DEFAULT_FILE_STORAGE = \"bookwyrm.storage_backends.ImagesStorage\"\n # I don't know if it's used, but the site crashes without it\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\nelse:\n STATIC_URL = \"/static/\"\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_URL = \"/images/\"\n MEDIA_FULL_URL = \"%s://%s%s\" % (PROTOCOL, DOMAIN, MEDIA_URL)\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\n", "path": "bookwyrm/settings.py" } ]
[ { "content": "\"\"\" bookwyrm settings and configuration \"\"\"\nimport os\nfrom environs import Env\n\nimport requests\nfrom django.utils.translation import gettext_lazy as _\n\n\nenv = Env()\nDOMAIN = env(\"DOMAIN\")\nVERSION = \"0.0.1\"\n\nPAGE_LENGTH = env(\"PAGE_LENGTH\", 15)\nDEFAULT_LANGUAGE = env(\"DEFAULT_LANGUAGE\", \"English\")\n\nJS_CACHE = \"e5832a26\"\n\n# email\nEMAIL_BACKEND = env(\"EMAIL_BACKEND\", \"django.core.mail.backends.smtp.EmailBackend\")\nEMAIL_HOST = env(\"EMAIL_HOST\")\nEMAIL_PORT = env(\"EMAIL_PORT\", 587)\nEMAIL_HOST_USER = env(\"EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = env(\"EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = env.bool(\"EMAIL_USE_TLS\", True)\nEMAIL_USE_SSL = env.bool(\"EMAIL_USE_SSL\", False)\nDEFAULT_FROM_EMAIL = \"admin@{:s}\".format(env(\"DOMAIN\"))\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, \"locale\"),\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Preview image\nENABLE_PREVIEW_IMAGES = env.bool(\"ENABLE_PREVIEW_IMAGES\", False)\nPREVIEW_BG_COLOR = env.str(\"PREVIEW_BG_COLOR\", \"use_dominant_color_light\")\nPREVIEW_TEXT_COLOR = env.str(\"PREVIEW_TEXT_COLOR\", \"#363636\")\nPREVIEW_IMG_WIDTH = env.int(\"PREVIEW_IMG_WIDTH\", 1200)\nPREVIEW_IMG_HEIGHT = env.int(\"PREVIEW_IMG_HEIGHT\", 630)\nPREVIEW_DEFAULT_COVER_COLOR = env.str(\"PREVIEW_DEFAULT_COVER_COLOR\", \"#002549\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env.bool(\"DEBUG\", True)\nUSE_HTTPS = env.bool(\"USE_HTTPS\", False)\n\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\", [\"*\"])\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"django_rename_app\",\n \"bookwyrm\",\n \"celery\",\n \"imagekit\",\n \"storages\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"bookwyrm.timezone_middleware.TimezoneMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"bookwyrm.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"bookwyrm.context_processors.site_settings\",\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = \"bookwyrm.wsgi.application\"\n\n# redis/activity streams settings\nREDIS_ACTIVITY_HOST = env(\"REDIS_ACTIVITY_HOST\", \"localhost\")\nREDIS_ACTIVITY_PORT = env(\"REDIS_ACTIVITY_PORT\", 6379)\nREDIS_ACTIVITY_PASSWORD = env(\"REDIS_ACTIVITY_PASSWORD\", None)\n\nMAX_STREAM_LENGTH = int(env(\"MAX_STREAM_LENGTH\", 200))\n\nSTREAMS = [\n {\"key\": \"home\", \"name\": _(\"Home Timeline\"), \"shortname\": _(\"Home\")},\n {\"key\": \"books\", \"name\": _(\"Books Timeline\"), \"shortname\": _(\"Books\")},\n]\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": env(\"POSTGRES_DB\", \"fedireads\"),\n \"USER\": env(\"POSTGRES_USER\", \"fedireads\"),\n \"PASSWORD\": env(\"POSTGRES_PASSWORD\", \"fedireads\"),\n \"HOST\": env(\"POSTGRES_HOST\", \"\"),\n \"PORT\": env(\"POSTGRES_PORT\", 5432),\n },\n}\n\n\nLOGIN_URL = \"/login/\"\nAUTH_USER_MODEL = \"bookwyrm.User\"\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\n# pylint: disable=line-too-long\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\nLANGUAGES = [\n (\"en-us\", _(\"English\")),\n (\"de-de\", _(\"German\")),\n (\"es\", _(\"Spanish\")),\n (\"fr-fr\", _(\"French\")),\n (\"zh-hans\", _(\"Simplified Chinese\")),\n (\"zh-hant\", _(\"Traditional Chinese\")),\n]\n\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\nUSER_AGENT = \"%s (BookWyrm/%s; +https://%s/)\" % (\n requests.utils.default_user_agent(),\n VERSION,\n DOMAIN,\n)\n\n# Imagekit generated thumbnails\nENABLE_THUMBNAIL_GENERATION = env.bool(\"ENABLE_THUMBNAIL_GENERATION\", False)\nIMAGEKIT_CACHEFILE_DIR = \"thumbnails\"\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Storage\n\nPROTOCOL = \"http\"\nif USE_HTTPS:\n PROTOCOL = \"https\"\n\nUSE_S3 = env.bool(\"USE_S3\", False)\n\nif USE_S3:\n # AWS settings\n AWS_ACCESS_KEY_ID = env(\"AWS_ACCESS_KEY_ID\")\n AWS_SECRET_ACCESS_KEY = env(\"AWS_SECRET_ACCESS_KEY\")\n AWS_STORAGE_BUCKET_NAME = env(\"AWS_STORAGE_BUCKET_NAME\")\n AWS_S3_CUSTOM_DOMAIN = env(\"AWS_S3_CUSTOM_DOMAIN\")\n AWS_S3_REGION_NAME = env(\"AWS_S3_REGION_NAME\", \"\")\n AWS_S3_ENDPOINT_URL = env(\"AWS_S3_ENDPOINT_URL\")\n AWS_DEFAULT_ACL = \"public-read\"\n AWS_S3_OBJECT_PARAMETERS = {\"CacheControl\": \"max-age=86400\"}\n # S3 Static settings\n STATIC_LOCATION = \"static\"\n STATIC_URL = \"https://%s/%s/\" % (AWS_S3_CUSTOM_DOMAIN, STATIC_LOCATION)\n STATICFILES_STORAGE = \"bookwyrm.storage_backends.StaticStorage\"\n # S3 Media settings\n MEDIA_LOCATION = \"images\"\n MEDIA_URL = \"https://%s/%s/\" % (AWS_S3_CUSTOM_DOMAIN, MEDIA_LOCATION)\n MEDIA_FULL_URL = MEDIA_URL\n DEFAULT_FILE_STORAGE = \"bookwyrm.storage_backends.ImagesStorage\"\n # I don't know if it's used, but the site crashes without it\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\nelse:\n STATIC_URL = \"/static/\"\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_URL = \"/images/\"\n MEDIA_FULL_URL = \"%s://%s%s\" % (PROTOCOL, DOMAIN, MEDIA_URL)\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\n", "path": "bookwyrm/settings.py" } ]
diff --git a/bookwyrm/settings.py b/bookwyrm/settings.py index 420d840a6a..452b8d9403 100644 --- a/bookwyrm/settings.py +++ b/bookwyrm/settings.py @@ -13,7 +13,7 @@ PAGE_LENGTH = env("PAGE_LENGTH", 15) DEFAULT_LANGUAGE = env("DEFAULT_LANGUAGE", "English") -JS_CACHE = "19447742" +JS_CACHE = "e5832a26" # email EMAIL_BACKEND = env("EMAIL_BACKEND", "django.core.mail.backends.smtp.EmailBackend") diff --git a/bookwyrm/static/js/status_cache.js b/bookwyrm/static/js/status_cache.js index a6eaf03419..b3e345b192 100644 --- a/bookwyrm/static/js/status_cache.js +++ b/bookwyrm/static/js/status_cache.js @@ -64,9 +64,21 @@ let StatusCache = new class { * @return {undefined} */ submitStatus(event) { - event.preventDefault(); const form = event.currentTarget; - const trigger = event.submitter; + let trigger = event.submitter; + + // Safari doesn't understand "submitter" + if (!trigger) { + trigger = event.currentTarget.querySelector("button[type=submit]"); + } + + // This allows the form to submit in the old fashioned way if there's a problem + + if (!trigger || !form) { + return; + } + + event.preventDefault(); BookWyrm.addRemoveClass(form, 'is-processing', true); trigger.setAttribute('disabled', null);
scverse__scanpy-1002
Plotting triggering copy of view Example using scanpy 9dd2e94846aa and anndata `762fdb924e757cdd758231` ```python import scanpy as sc pbmc = sc.datasets.pbmc3k_processed() sc.pl.umap(pbmc, color="louvain") # To make sure that "louvain_colors" has been made bcells = pbmc[pbmc.obs["louvain"] == "B cells"] # This line triggers a copy being made: sc.pl.umap(bcells) # /Users/isaac/github/anndata/anndata/_core/anndata.py:1120: ImplicitModificationWarning: # Initializing view as actual. # "Initializing view as actual.", ImplicitModificationWarning, assert not bcells.is_view ``` Pretty sure that shouldn't be making a copy, since nothing should be modified in the view. To make sure: ```python from anndata.tests.helpers import assert_equal bcells_view = pbmc[pbmc.obs["louvain"] == "B cells"] assert_equal(bcells, bcells_view, exact=True) ``` This also seems to be happening with some of the other plotting functions, like `sc.pl.rank_genes_groups_dotplot`. Elaborating a bit: To me this is an issue since it will use quite a lot of memory for cases where it isn't needed. Why copy a large number of arrays when you don't need to?
[ { "content": "import warnings\nimport collections.abc as cabc\nfrom typing import Union, List, Sequence, Tuple, Collection, Optional\n\nimport numpy as np\nfrom matplotlib import pyplot as pl\nfrom matplotlib import rcParams, ticker\nfrom matplotlib.axes import Axes\nfrom matplotlib.colors import is_color_like\nfrom matplotlib.figure import SubplotParams as sppars, Figure\nfrom cycler import Cycler, cycler\n\nfrom .. import logging as logg\nfrom .._settings import settings\nfrom .._compat import Literal\nfrom . import palettes\n\n\n_tmp_cluster_pos = None # just a hacky solution for storing a tmp global variable\n\nColorLike = Union[str, Tuple[float, ...]]\n_IGraphLayout = Literal['fa', 'fr', 'rt', 'rt_circular', 'drl', 'eq_tree', ...]\n_FontWeight = Literal[\n 'light', 'normal', 'medium', 'semibold', 'bold', 'heavy', 'black'\n]\n_FontSize = Literal[\n 'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'\n]\n\n\n# -------------------------------------------------------------------------------\n# Simple plotting functions\n# -------------------------------------------------------------------------------\n\n\ndef matrix(matrix, xlabel=None, ylabel=None, xticks=None, yticks=None,\n title=None, colorbar_shrink=0.5, color_map=None, show=None,\n save=None, ax=None):\n \"\"\"Plot a matrix.\"\"\"\n if ax is None: ax = pl.gca()\n img = ax.imshow(matrix, cmap=color_map)\n if xlabel is not None: ax.set_xlabel(xlabel)\n if ylabel is not None: ax.set_ylabel(ylabel)\n if title is not None: ax.set_title(title)\n if xticks is not None:\n ax.set_xticks(range(len(xticks)), xticks, rotation='vertical')\n if yticks is not None:\n ax.set_yticks(range(len(yticks)), yticks)\n pl.colorbar(img, shrink=colorbar_shrink, ax=ax) # need a figure instance for colorbar\n savefig_or_show('matrix', show=show, save=save)\n\n\ndef timeseries(X, **kwargs):\n \"\"\"Plot X. See timeseries_subplot.\"\"\"\n pl.figure(\n figsize=tuple(2*s for s in rcParams['figure.figsize']),\n subplotpars=sppars(left=0.12, right=0.98, bottom=0.13),\n )\n timeseries_subplot(X, **kwargs)\n\n\ndef timeseries_subplot(\n X: np.ndarray,\n time=None,\n color=None,\n var_names=(),\n highlights_x=(),\n xlabel='',\n ylabel='gene expression',\n yticks=None,\n xlim=None,\n legend=True,\n palette: Union[Sequence[str], Cycler, None] = None,\n color_map='viridis',\n ax: Optional[Axes] = None,\n):\n \"\"\"\\\n Plot X.\n\n Parameters\n ----------\n X\n Call this with:\n X with one column, color categorical.\n X with one column, color continuous.\n X with n columns, color is of length n.\n \"\"\"\n\n if color is not None:\n use_color_map = isinstance(color[0], (float, np.floating))\n palette = default_palette(palette)\n x_range = np.arange(X.shape[0]) if time is None else time\n if X.ndim == 1: X = X[:, None]\n if X.shape[1] > 1:\n colors = palette[:X.shape[1]].by_key()['color']\n subsets = [(x_range, X[:, i]) for i in range(X.shape[1])]\n elif use_color_map:\n colors = [color]\n subsets = [(x_range, X[:, 0])]\n else:\n levels, _ = np.unique(color, return_inverse=True)\n colors = np.array(palette[:len(levels)].by_key()['color'])\n subsets = [(x_range[color == l], X[color == l, :]) for l in levels]\n\n if ax is None:\n ax = pl.subplot()\n for i, (x, y) in enumerate(subsets):\n ax.scatter(\n x, y,\n marker='.',\n edgecolor='face',\n s=rcParams['lines.markersize'],\n c=colors[i],\n label=var_names[i] if len(var_names) > 0 else '',\n cmap=color_map,\n rasterized=settings._vector_friendly,\n )\n ylim = ax.get_ylim()\n for h in highlights_x:\n ax.plot([h, h], [ylim[0], ylim[1]], '--', color='black')\n ax.set_ylim(ylim)\n if xlim is not None:\n ax.set_xlim(xlim)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n if yticks is not None:\n ax.set_yticks(yticks)\n if len(var_names) > 0 and legend:\n ax.legend(frameon=False)\n\n\ndef timeseries_as_heatmap(\n X: np.ndarray,\n var_names: Collection[str] = (),\n highlights_x=(),\n color_map=None,\n):\n \"\"\"\\\n Plot timeseries as heatmap.\n\n Parameters\n ----------\n X\n Data array.\n var_names\n Array of strings naming variables stored in columns of X.\n \"\"\"\n if len(var_names) == 0:\n var_names = np.arange(X.shape[1])\n if var_names.ndim == 2:\n var_names = var_names[:, 0]\n\n # transpose X\n X = X.T\n min_x = np.min(X)\n\n # insert space into X\n if False:\n # generate new array with highlights_x\n space = 10 # integer\n x_new = np.zeros((X.shape[0], X.shape[1] + space*len(highlights_x)))\n hold = 0\n _hold = 0\n space_sum = 0\n for ih, h in enumerate(highlights_x):\n _h = h + space_sum\n x_new[:, _hold:_h] = X[:, hold:h]\n x_new[:, _h:_h+space] = min_x * np.ones((X.shape[0], space))\n # update variables\n space_sum += space\n _hold = _h + space\n hold = h\n x_new[:, _hold:] = X[:, hold:]\n\n _, ax = pl.subplots(figsize=(1.5*4, 2*4))\n ax.imshow(\n np.array(X, dtype=np.float_),\n aspect='auto',\n interpolation='nearest',\n cmap=color_map,\n )\n pl.colorbar(shrink=0.5)\n pl.yticks(range(X.shape[0]), var_names)\n for h in highlights_x:\n pl.plot([h, h], [0, X.shape[0]], '--', color='black')\n pl.xlim([0, X.shape[1]-1])\n pl.ylim([0, X.shape[0]-1])\n\n\n# -------------------------------------------------------------------------------\n# Colors in addition to matplotlib's colors\n# -------------------------------------------------------------------------------\n\n\nadditional_colors = {\n 'gold2': '#eec900',\n 'firebrick3': '#cd2626',\n 'khaki2': '#eee685',\n 'slategray3': '#9fb6cd',\n 'palegreen3': '#7ccd7c',\n 'tomato2': '#ee5c42',\n 'grey80': '#cccccc',\n 'grey90': '#e5e5e5',\n 'wheat4': '#8b7e66',\n 'grey65': '#a6a6a6',\n 'grey10': '#1a1a1a',\n 'grey20': '#333333',\n 'grey50': '#7f7f7f',\n 'grey30': '#4d4d4d',\n 'grey40': '#666666',\n 'antiquewhite2': '#eedfcc',\n 'grey77': '#c4c4c4',\n 'snow4': '#8b8989',\n 'chartreuse3': '#66cd00',\n 'yellow4': '#8b8b00',\n 'darkolivegreen2': '#bcee68',\n 'olivedrab3': '#9acd32',\n 'azure3': '#c1cdcd',\n 'violetred': '#d02090',\n 'mediumpurple3': '#8968cd',\n 'purple4': '#551a8b',\n 'seagreen4': '#2e8b57',\n 'lightblue3': '#9ac0cd',\n 'orchid3': '#b452cd',\n 'indianred 3': '#cd5555',\n 'grey60': '#999999',\n 'mediumorchid1': '#e066ff',\n 'plum3': '#cd96cd',\n 'palevioletred3': '#cd6889'}\n\n# -------------------------------------------------------------------------------\n# Helper functions\n# -------------------------------------------------------------------------------\n\n\ndef savefig(writekey, dpi=None, ext=None):\n \"\"\"Save current figure to file.\n\n The `filename` is generated as follows:\n\n filename = settings.figdir / (writekey + settings.plot_suffix + '.' + settings.file_format_figs)\n \"\"\"\n if dpi is None:\n # we need this as in notebooks, the internal figures are also influenced by 'savefig.dpi' this...\n if not isinstance(rcParams['savefig.dpi'], str) and rcParams['savefig.dpi'] < 150:\n if settings._low_resolution_warning:\n logg.warning(\n 'You are using a low resolution (dpi<150) for saving figures.\\n'\n 'Consider running `set_figure_params(dpi_save=...)`, which will '\n \"adjust `matplotlib.rcParams['savefig.dpi']`\"\n )\n settings._low_resolution_warning = False\n else:\n dpi = rcParams['savefig.dpi']\n settings.figdir.mkdir(parents=True, exist_ok=True)\n if ext is None: ext = settings.file_format_figs\n filename = settings.figdir / f'{writekey}{settings.plot_suffix}.{ext}'\n # output the following msg at warning level; it's really important for the user\n logg.warning(f'saving figure to file {filename}')\n pl.savefig(filename, dpi=dpi, bbox_inches='tight')\n\n\ndef savefig_or_show(\n writekey: str,\n show: Optional[bool] = None,\n dpi: Optional[int] = None,\n ext: str = None,\n save: Union[bool, str, None] = None,\n):\n if isinstance(save, str):\n # check whether `save` contains a figure extension\n if ext is None:\n for try_ext in ['.svg', '.pdf', '.png']:\n if save.endswith(try_ext):\n ext = try_ext[1:]\n save = save.replace(try_ext, '')\n break\n # append it\n writekey += save\n save = True\n save = settings.autosave if save is None else save\n show = settings.autoshow if show is None else show\n if save: savefig(writekey, dpi=dpi, ext=ext)\n if show: pl.show()\n if save: pl.close() # clear figure\n\n\ndef default_palette(\n palette: Union[Sequence[str], Cycler, None] = None\n) -> Cycler:\n if palette is None: return rcParams['axes.prop_cycle']\n elif not isinstance(palette, Cycler): return cycler(color=palette)\n else: return palette\n\n\ndef _validate_palette(adata, key):\n \"\"\"\n checks if the list of colors in adata.uns[f'{key}_colors'] is valid\n and updates the color list in adata.uns[f'{key}_colors'] if needed.\n\n Not only valid matplotlib colors are checked but also if the color name\n is a valid R color name, in which case it will be translated to a valid name\n \"\"\"\n\n _palette = []\n color_key = f\"{key}_colors\"\n\n for color in adata.uns[color_key]:\n if not is_color_like(color):\n # check if the color is a valid R color and translate it\n # to a valid hex color value\n if color in additional_colors:\n color = additional_colors[color]\n else:\n logg.warning(\n f\"The following color value found in adata.uns['{key}_colors'] \"\n f\"is not valid: '{color}'. Default colors will be used instead.\"\n )\n _set_default_colors_for_categorical_obs(adata, key)\n _palette = None\n break\n _palette.append(color)\n if _palette is not None:\n adata.uns[color_key] = _palette\n\n\ndef _set_colors_for_categorical_obs(\n adata,\n value_to_plot,\n palette: Union[str, Sequence[str], Cycler],\n):\n \"\"\"\n Sets the adata.uns[value_to_plot + '_colors'] according to the given palette\n\n Parameters\n ----------\n adata\n annData object\n value_to_plot\n name of a valid categorical observation\n palette\n Palette should be either a valid :func:`~matplotlib.pyplot.colormaps` string,\n a sequence of colors (in a format that can be understood by matplotlib,\n eg. RGB, RGBS, hex, or a cycler object with key='color'\n\n Returns\n -------\n None\n \"\"\"\n from matplotlib.colors import to_hex\n\n categories = adata.obs[value_to_plot].cat.categories\n # check is palette is a valid matplotlib colormap\n if isinstance(palette, str) and palette in pl.colormaps():\n # this creates a palette from a colormap. E.g. 'Accent, Dark2, tab20'\n cmap = pl.get_cmap(palette)\n colors_list = [to_hex(x) for x in cmap(np.linspace(0, 1, len(categories)))]\n\n else:\n # check if palette is a list and convert it to a cycler, thus\n # it doesnt matter if the list is shorter than the categories length:\n if isinstance(palette, cabc.Sequence):\n if len(palette) < len(categories):\n logg.warning(\n \"Length of palette colors is smaller than the number of \"\n f\"categories (palette length: {len(palette)}, \"\n f\"categories length: {len(categories)}. \"\n \"Some categories will have the same color.\"\n )\n # check that colors are valid\n _color_list = []\n for color in palette:\n if not is_color_like(color):\n # check if the color is a valid R color and translate it\n # to a valid hex color value\n if color in additional_colors:\n color = additional_colors[color]\n else:\n raise ValueError(\n \"The following color value of the given palette \"\n f\"is not valid: {color}\"\n )\n _color_list.append(color)\n\n palette = cycler(color=_color_list)\n if not isinstance(palette, Cycler):\n raise ValueError(\n \"Please check that the value of 'palette' is a valid \"\n \"matplotlib colormap string (eg. Set2), a list of color names \"\n \"or a cycler with a 'color' key.\"\n )\n if 'color' not in palette.keys:\n raise ValueError(\"Please set the palette key 'color'.\")\n\n cc = palette()\n colors_list = [to_hex(next(cc)['color']) for x in range(len(categories))]\n\n adata.uns[value_to_plot + '_colors'] = colors_list\n\n\ndef _set_default_colors_for_categorical_obs(adata, value_to_plot):\n \"\"\"\n Sets the adata.uns[value_to_plot + '_colors'] using default color palettes\n\n Parameters\n ----------\n adata\n AnnData object\n value_to_plot\n Name of a valid categorical observation\n\n Returns\n -------\n None\n \"\"\"\n\n categories = adata.obs[value_to_plot].cat.categories\n length = len(categories)\n\n # check if default matplotlib palette has enough colors\n if len(rcParams['axes.prop_cycle'].by_key()['color']) >= length:\n cc = rcParams['axes.prop_cycle']()\n palette = [next(cc)['color'] for _ in range(length)]\n\n else:\n if length <= 20:\n palette = palettes.default_20\n elif length <= 28:\n palette = palettes.default_28\n elif length <= len(palettes.default_102): # 103 colors\n palette = palettes.default_102\n else:\n palette = ['grey' for _ in range(length)]\n logg.info(\n f'the obs value {value_to_plot!r} has more than 103 categories. Uniform '\n \"'grey' color will be used for all categories.\"\n )\n\n adata.uns[value_to_plot + '_colors'] = palette[:length]\n\n\ndef add_colors_for_categorical_sample_annotation(adata, key, palette=None,\n force_update_colors=False):\n\n color_key = f\"{key}_colors\"\n colors_needed = len(adata.obs[key].cat.categories)\n if palette and force_update_colors:\n _set_colors_for_categorical_obs(adata, key, palette)\n elif color_key in adata.uns and len(adata.uns[color_key]) <= colors_needed:\n _validate_palette(adata, key)\n else:\n _set_default_colors_for_categorical_obs(adata, key)\n\n\ndef plot_edges(axs, adata, basis, edges_width, edges_color):\n import networkx as nx\n\n if not isinstance(axs, cabc.Sequence): axs = [axs]\n if 'neighbors' not in adata.uns:\n raise ValueError('`edges=True` requires `pp.neighbors` to be run before.')\n g = nx.Graph(adata.uns['neighbors']['connectivities'])\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n for ax in axs:\n edge_collection = nx.draw_networkx_edges(\n g, adata.obsm['X_' + basis],\n ax=ax, width=edges_width, edge_color=edges_color)\n edge_collection.set_zorder(-2)\n edge_collection.set_rasterized(settings._vector_friendly)\n\n\ndef plot_arrows(axs, adata, basis, arrows_kwds=None):\n if not isinstance(axs, cabc.Sequence): axs = [axs]\n v_prefix = next((\n p for p in ['velocity', 'Delta']\n if f'{p}_{basis}' in adata.obsm\n ), None)\n if v_prefix is None:\n raise ValueError(\n \"`arrows=True` requires \"\n f\"`'velocity_{basis}'` from scvelo or \"\n f\"`'Delta_{basis}'` from velocyto.\"\n )\n if v_prefix == 'velocity':\n logg.warning(\n 'The module `scvelo` has improved plotting facilities. '\n 'Prefer using `scv.pl.velocity_embedding` to `arrows=True`.'\n )\n X = adata.obsm[f'X_{basis}']\n V = adata.obsm[f'{v_prefix}_{basis}']\n for ax in axs:\n quiver_kwds = arrows_kwds if arrows_kwds is not None else {}\n ax.quiver(\n X[:, 0], X[:, 1],\n V[:, 0], V[:, 1],\n **quiver_kwds,\n rasterized=settings._vector_friendly,\n )\n\n\ndef scatter_group(ax, key, imask, adata, Y, projection='2d', size=3, alpha=None):\n \"\"\"Scatter of group using representation of data Y.\n \"\"\"\n mask = adata.obs[key].cat.categories[imask] == adata.obs[key].values\n color = adata.uns[key + '_colors'][imask]\n if not isinstance(color[0], str):\n from matplotlib.colors import rgb2hex\n color = rgb2hex(adata.uns[key + '_colors'][imask])\n if not is_color_like(color):\n raise ValueError('\"{}\" is not a valid matplotlib color.'.format(color))\n data = [Y[mask, 0], Y[mask, 1]]\n if projection == '3d': data.append(Y[mask, 2])\n ax.scatter(*data,\n marker='.',\n alpha=alpha,\n c=color,\n edgecolors='none',\n s=size,\n label=adata.obs[key].cat.categories[imask],\n rasterized=settings._vector_friendly)\n return mask\n\n\ndef setup_axes(\n ax: Union[Axes, Sequence[Axes]] = None,\n panels='blue',\n colorbars=(False,),\n right_margin=None,\n left_margin=None,\n projection: Literal['2d', '3d'] = '2d',\n show_ticks=False,\n):\n \"\"\"Grid of axes for plotting, legends and colorbars.\n \"\"\"\n if '3d' in projection: from mpl_toolkits.mplot3d import Axes3D\n avail_projections = {'2d', '3d'}\n if projection not in avail_projections:\n raise ValueError('choose projection from', avail_projections)\n if left_margin is not None:\n raise NotImplementedError('We currently don’t support `left_margin`.')\n if np.any(colorbars) and right_margin is None:\n right_margin = 1 - rcParams['figure.subplot.right'] + 0.21 # 0.25\n elif right_margin is None:\n right_margin = 1 - rcParams['figure.subplot.right'] + 0.06 # 0.10\n # make a list of right margins for each panel\n if not isinstance(right_margin, list):\n right_margin_list = [right_margin for i in range(len(panels))]\n else:\n right_margin_list = right_margin\n\n # make a figure with len(panels) panels in a row side by side\n top_offset = 1 - rcParams['figure.subplot.top']\n bottom_offset = 0.15 if show_ticks else 0.08\n left_offset = 1 if show_ticks else 0.3 # in units of base_height\n base_height = rcParams['figure.figsize'][1]\n height = base_height\n base_width = rcParams['figure.figsize'][0]\n if show_ticks: base_width *= 1.1\n\n draw_region_width = base_width - left_offset - top_offset - 0.5 # this is kept constant throughout\n\n right_margin_factor = sum([1 + right_margin for right_margin in right_margin_list])\n width_without_offsets = right_margin_factor * draw_region_width # this is the total width that keeps draw_region_width\n\n right_offset = (len(panels) - 1) * left_offset\n figure_width = width_without_offsets + left_offset + right_offset\n draw_region_width_frac = draw_region_width / figure_width\n left_offset_frac = left_offset / figure_width\n right_offset_frac = 1 - (len(panels) - 1) * left_offset_frac\n\n if ax is None:\n pl.figure(figsize=(figure_width, height),\n subplotpars=sppars(left=0, right=1, bottom=bottom_offset))\n left_positions = [left_offset_frac, left_offset_frac + draw_region_width_frac]\n for i in range(1, len(panels)):\n right_margin = right_margin_list[i-1]\n left_positions.append(left_positions[-1] + right_margin * draw_region_width_frac)\n left_positions.append(left_positions[-1] + draw_region_width_frac)\n panel_pos = [[bottom_offset], [1-top_offset], left_positions]\n\n axs = []\n if ax is None:\n for icolor, color in enumerate(panels):\n left = panel_pos[2][2*icolor]\n bottom = panel_pos[0][0]\n width = draw_region_width / figure_width\n height = panel_pos[1][0] - bottom\n if projection == '2d': ax = pl.axes([left, bottom, width, height])\n elif projection == '3d': ax = pl.axes([left, bottom, width, height], projection='3d')\n axs.append(ax)\n else:\n axs = ax if isinstance(ax, cabc.Sequence) else [ax]\n\n return axs, panel_pos, draw_region_width, figure_width\n\n\ndef scatter_base(\n Y: np.ndarray,\n colors='blue',\n sort_order=True,\n alpha=None,\n highlights=(),\n right_margin=None,\n left_margin=None,\n projection: Literal['2d', '3d'] = '2d',\n title=None,\n component_name='DC',\n component_indexnames=(1, 2, 3),\n axis_labels=None,\n colorbars=(False,),\n sizes=(1,),\n color_map='viridis',\n show_ticks=True,\n ax=None,\n) -> Union[Axes, List[Axes]]:\n \"\"\"Plot scatter plot of data.\n\n Parameters\n ----------\n Y\n Data array.\n projection\n\n Returns\n -------\n Depending on whether supplying a single array or a list of arrays,\n return a single axis or a list of axes.\n \"\"\"\n if isinstance(highlights, cabc.Mapping):\n highlights_indices = sorted(highlights)\n highlights_labels = [highlights[i] for i in highlights_indices]\n else:\n highlights_indices = highlights\n highlights_labels = []\n # if we have a single array, transform it into a list with a single array\n if type(colors) == str: colors = [colors]\n if len(sizes) != len(colors) and len(sizes) == 1:\n sizes = [sizes[0] for _ in range(len(colors))]\n axs, panel_pos, draw_region_width, figure_width = setup_axes(\n ax=ax, panels=colors, colorbars=colorbars, projection=projection,\n right_margin=right_margin, left_margin=left_margin,\n show_ticks=show_ticks,\n )\n for icolor, color in enumerate(colors):\n ax = axs[icolor]\n left = panel_pos[2][2*icolor]\n bottom = panel_pos[0][0]\n width = draw_region_width / figure_width\n height = panel_pos[1][0] - bottom\n Y_sort = Y\n if not is_color_like(color) and sort_order:\n sort = np.argsort(color)\n color = color[sort]\n Y_sort = Y[sort]\n if projection == '2d': data = Y_sort[:, 0], Y_sort[:, 1]\n elif projection == '3d': data = Y_sort[:, 0], Y_sort[:, 1], Y_sort[:, 2]\n else: raise ValueError(\n f\"Unknown projection {projection!r} not in '2d', '3d'\"\n )\n if not isinstance(color, str) or color != 'white':\n sct = ax.scatter(\n *data,\n marker='.',\n c=color,\n alpha=alpha,\n edgecolors='none', # 'face',\n s=sizes[icolor],\n cmap=color_map,\n rasterized=settings._vector_friendly,\n )\n if colorbars[icolor]:\n width = 0.006 * draw_region_width / len(colors)\n left = panel_pos[2][2*icolor+1] + (1.2 if projection == '3d' else 0.2) * width\n rectangle = [left, bottom, width, height]\n fig = pl.gcf()\n ax_cb = fig.add_axes(rectangle)\n cb = pl.colorbar(\n sct,\n format=ticker.FuncFormatter(ticks_formatter),\n cax=ax_cb,\n )\n # set the title\n if title is not None: ax.set_title(title[icolor])\n # output highlighted data points\n for iihighlight, ihighlight in enumerate(highlights_indices):\n ihighlight = ihighlight if isinstance(ihighlight, int) else int(ihighlight)\n data = [Y[ihighlight, 0]], [Y[ihighlight, 1]]\n if '3d' in projection:\n data = [Y[ihighlight, 0]], [Y[ihighlight, 1]], [Y[ihighlight, 2]]\n ax.scatter(\n *data, c='black',\n facecolors='black', edgecolors='black',\n marker='x', s=10, zorder=20,\n )\n highlight_text = (\n highlights_labels[iihighlight]\n if len(highlights_labels) > 0 else\n str(ihighlight)\n )\n # the following is a Python 2 compatibility hack\n ax.text(\n *([d[0] for d in data] + [highlight_text]),\n zorder=20,\n fontsize=10,\n color='black',\n )\n if not show_ticks:\n ax.set_xticks([])\n ax.set_yticks([])\n if '3d' in projection: ax.set_zticks([])\n # set default axis_labels\n if axis_labels is None:\n axis_labels = [\n [component_name + str(i) for i in component_indexnames]\n for _ in range(len(axs))\n ]\n else:\n axis_labels = [axis_labels for _ in range(len(axs))]\n for iax, ax in enumerate(axs):\n ax.set_xlabel(axis_labels[iax][0])\n ax.set_ylabel(axis_labels[iax][1])\n if '3d' in projection:\n # shift the label closer to the axis\n ax.set_zlabel(axis_labels[iax][2], labelpad=-7)\n for ax in axs:\n # scale limits to match data\n ax.autoscale_view()\n return axs\n\n\ndef scatter_single(ax: Axes, Y: np.ndarray, *args, **kwargs):\n \"\"\"Plot scatter plot of data.\n\n Parameters\n ----------\n ax\n Axis to plot on.\n Y\n Data array, data to be plotted needs to be in the first two columns.\n \"\"\"\n if 's' not in kwargs:\n kwargs['s'] = 2 if Y.shape[0] > 500 else 10\n if 'edgecolors' not in kwargs:\n kwargs['edgecolors'] = 'face'\n ax.scatter(Y[:, 0], Y[:, 1], **kwargs, rasterized=settings._vector_friendly)\n ax.set_xticks([])\n ax.set_yticks([])\n\n\ndef arrows_transitions(\n ax: Axes,\n X: np.ndarray,\n indices: Sequence[int],\n weight=None,\n):\n \"\"\"\n Plot arrows of transitions in data matrix.\n\n Parameters\n ----------\n ax\n Axis object from matplotlib.\n X\n Data array, any representation wished (X, psi, phi, etc).\n indices\n Indices storing the transitions.\n \"\"\"\n step = 1\n width = axis_to_data(ax, 0.001)\n if X.shape[0] > 300:\n step = 5\n width = axis_to_data(ax, 0.0005)\n if X.shape[0] > 500:\n step = 30\n width = axis_to_data(ax, 0.0001)\n head_width = 10*width\n for ix, x in enumerate(X):\n if ix % step != 0:\n continue\n X_step = X[indices[ix]] - x\n # don't plot arrow of length 0\n for itrans in range(X_step.shape[0]):\n alphai = 1\n widthi = width\n head_widthi = head_width\n if weight is not None:\n alphai *= weight[ix, itrans]\n widthi *= weight[ix, itrans]\n if not np.any(X_step[itrans, :1]):\n continue\n ax.arrow(\n x[0],\n x[1],\n X_step[itrans, 0],\n X_step[itrans, 1],\n length_includes_head=True,\n width=widthi,\n head_width=head_widthi,\n alpha=alphai,\n color='grey',\n )\n\n\ndef ticks_formatter(x, pos):\n # pretty scientific notation\n if False:\n a, b = f'{x:.2e}'.split('e')\n b = int(b)\n return fr'${a} \\times 10^{{{b}}}$'\n else:\n return f'{x:.3f}'.rstrip('0').rstrip('.')\n\n\ndef pimp_axis(x_or_y_ax):\n \"\"\"Remove trailing zeros.\n \"\"\"\n x_or_y_ax.set_major_formatter(ticker.FuncFormatter(ticks_formatter))\n\n\ndef scale_to_zero_one(x):\n \"\"\"Take some 1d data and scale it so that min matches 0 and max 1.\n \"\"\"\n xscaled = x - np.min(x)\n xscaled /= np.max(xscaled)\n return xscaled\n\n\ndef hierarchy_pos(G, root, levels=None, width=1., height=1.):\n \"\"\"Tree layout for networkx graph.\n\n See https://stackoverflow.com/questions/29586520/can-one-get-hierarchical-graphs-from-networkx-with-python-3\n answer by burubum.\n\n If there is a cycle that is reachable from root, then this will see\n infinite recursion.\n\n Parameters\n ----------\n G: the graph\n root: the root node\n levels: a dictionary\n key: level number (starting from 0)\n value: number of nodes in this level\n width: horizontal space allocated for drawing\n height: vertical space allocated for drawing\n \"\"\"\n TOTAL = \"total\"\n CURRENT = \"current\"\n\n def make_levels(levels, node=root, currentLevel=0, parent=None):\n \"\"\"Compute the number of nodes for each level\n \"\"\"\n if currentLevel not in levels:\n levels[currentLevel] = {TOTAL: 0, CURRENT: 0}\n levels[currentLevel][TOTAL] += 1\n neighbors = list(G.neighbors(node))\n if parent is not None:\n neighbors.remove(parent)\n for neighbor in neighbors:\n levels = make_levels(levels, neighbor, currentLevel + 1, node)\n return levels\n\n def make_pos(pos, node=root, currentLevel=0, parent=None, vert_loc=0):\n dx = 1/levels[currentLevel][TOTAL]\n left = dx/2\n pos[node] = ((left + dx*levels[currentLevel][CURRENT])*width,\n vert_loc)\n levels[currentLevel][CURRENT] += 1\n neighbors = list(G.neighbors(node))\n if parent is not None:\n neighbors.remove(parent)\n for neighbor in neighbors:\n pos = make_pos(pos, neighbor, currentLevel + 1, node, vert_loc-vert_gap)\n return pos\n\n if levels is None:\n levels = make_levels({})\n else:\n levels = {l: {TOTAL: levels[l], CURRENT: 0} for l in levels}\n vert_gap = height / (max([l for l in levels])+1)\n return make_pos({})\n\n\ndef hierarchy_sc(G, root, node_sets):\n import networkx as nx\n\n def make_sc_tree(sc_G, node=root, parent=None):\n sc_G.add_node(node)\n neighbors = G.neighbors(node)\n if parent is not None:\n sc_G.add_edge(parent, node)\n neighbors.remove(parent)\n old_node = node\n for n in node_sets[int(node)]:\n new_node = str(node) + '_' + str(n)\n sc_G.add_node(new_node)\n sc_G.add_edge(old_node, new_node)\n old_node = new_node\n for neighbor in neighbors:\n sc_G = make_sc_tree(sc_G, neighbor, node)\n return sc_G\n return make_sc_tree(nx.Graph())\n\n\ndef zoom(ax, xy='x', factor=1):\n \"\"\"Zoom into axis.\n\n Parameters\n ----------\n \"\"\"\n limits = ax.get_xlim() if xy == 'x' else ax.get_ylim()\n new_limits = (0.5*(limits[0] + limits[1])\n + 1./factor * np.array((-0.5, 0.5)) * (limits[1] - limits[0]))\n if xy == 'x':\n ax.set_xlim(new_limits)\n else:\n ax.set_ylim(new_limits)\n\n\ndef get_ax_size(ax: Axes, fig: Figure):\n \"\"\"Get axis size\n\n Parameters\n ----------\n ax\n Axis object from matplotlib.\n fig\n Figure.\n \"\"\"\n bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\n width, height = bbox.width, bbox.height\n width *= fig.dpi\n height *= fig.dpi\n\n\ndef axis_to_data(ax: Axes, width: float):\n \"\"\"For a width in axis coordinates, return the corresponding in data\n coordinates.\n\n Parameters\n ----------\n ax\n Axis object from matplotlib.\n width\n Width in xaxis coordinates.\n \"\"\"\n xlim = ax.get_xlim()\n widthx = width*(xlim[1] - xlim[0])\n ylim = ax.get_ylim()\n widthy = width*(ylim[1] - ylim[0])\n return 0.5*(widthx + widthy)\n\n\ndef axis_to_data_points(ax: Axes, points_axis: np.ndarray):\n \"\"\"Map points in axis coordinates to data coordinates.\n\n Uses matplotlib.transform.\n\n Parameters\n ----------\n ax\n Axis object from matplotlib.\n points_axis\n Points in axis coordinates.\n \"\"\"\n axis_to_data = ax.transAxes + ax.transData.inverted()\n return axis_to_data.transform(points_axis)\n\n\ndef data_to_axis_points(ax: Axes, points_data: np.ndarray):\n \"\"\"Map points in data coordinates to axis coordinates.\n\n Uses matplotlib.transform.\n\n Parameters\n ----------\n ax\n Axis object from matplotlib.\n points_data\n Points in data coordinates.\n \"\"\"\n data_to_axis = axis_to_data.inverted()\n return data_to_axis(points_data)\n", "path": "scanpy/plotting/_utils.py" } ]
[ { "content": "import warnings\nimport collections.abc as cabc\nfrom typing import Union, List, Sequence, Tuple, Collection, Optional\n\nimport numpy as np\nfrom matplotlib import pyplot as pl\nfrom matplotlib import rcParams, ticker\nfrom matplotlib.axes import Axes\nfrom matplotlib.colors import is_color_like\nfrom matplotlib.figure import SubplotParams as sppars, Figure\nfrom cycler import Cycler, cycler\n\nfrom .. import logging as logg\nfrom .._settings import settings\nfrom .._compat import Literal\nfrom . import palettes\n\n\n_tmp_cluster_pos = None # just a hacky solution for storing a tmp global variable\n\nColorLike = Union[str, Tuple[float, ...]]\n_IGraphLayout = Literal['fa', 'fr', 'rt', 'rt_circular', 'drl', 'eq_tree', ...]\n_FontWeight = Literal[\n 'light', 'normal', 'medium', 'semibold', 'bold', 'heavy', 'black'\n]\n_FontSize = Literal[\n 'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'\n]\n\n\n# -------------------------------------------------------------------------------\n# Simple plotting functions\n# -------------------------------------------------------------------------------\n\n\ndef matrix(matrix, xlabel=None, ylabel=None, xticks=None, yticks=None,\n title=None, colorbar_shrink=0.5, color_map=None, show=None,\n save=None, ax=None):\n \"\"\"Plot a matrix.\"\"\"\n if ax is None: ax = pl.gca()\n img = ax.imshow(matrix, cmap=color_map)\n if xlabel is not None: ax.set_xlabel(xlabel)\n if ylabel is not None: ax.set_ylabel(ylabel)\n if title is not None: ax.set_title(title)\n if xticks is not None:\n ax.set_xticks(range(len(xticks)), xticks, rotation='vertical')\n if yticks is not None:\n ax.set_yticks(range(len(yticks)), yticks)\n pl.colorbar(img, shrink=colorbar_shrink, ax=ax) # need a figure instance for colorbar\n savefig_or_show('matrix', show=show, save=save)\n\n\ndef timeseries(X, **kwargs):\n \"\"\"Plot X. See timeseries_subplot.\"\"\"\n pl.figure(\n figsize=tuple(2*s for s in rcParams['figure.figsize']),\n subplotpars=sppars(left=0.12, right=0.98, bottom=0.13),\n )\n timeseries_subplot(X, **kwargs)\n\n\ndef timeseries_subplot(\n X: np.ndarray,\n time=None,\n color=None,\n var_names=(),\n highlights_x=(),\n xlabel='',\n ylabel='gene expression',\n yticks=None,\n xlim=None,\n legend=True,\n palette: Union[Sequence[str], Cycler, None] = None,\n color_map='viridis',\n ax: Optional[Axes] = None,\n):\n \"\"\"\\\n Plot X.\n\n Parameters\n ----------\n X\n Call this with:\n X with one column, color categorical.\n X with one column, color continuous.\n X with n columns, color is of length n.\n \"\"\"\n\n if color is not None:\n use_color_map = isinstance(color[0], (float, np.floating))\n palette = default_palette(palette)\n x_range = np.arange(X.shape[0]) if time is None else time\n if X.ndim == 1: X = X[:, None]\n if X.shape[1] > 1:\n colors = palette[:X.shape[1]].by_key()['color']\n subsets = [(x_range, X[:, i]) for i in range(X.shape[1])]\n elif use_color_map:\n colors = [color]\n subsets = [(x_range, X[:, 0])]\n else:\n levels, _ = np.unique(color, return_inverse=True)\n colors = np.array(palette[:len(levels)].by_key()['color'])\n subsets = [(x_range[color == l], X[color == l, :]) for l in levels]\n\n if ax is None:\n ax = pl.subplot()\n for i, (x, y) in enumerate(subsets):\n ax.scatter(\n x, y,\n marker='.',\n edgecolor='face',\n s=rcParams['lines.markersize'],\n c=colors[i],\n label=var_names[i] if len(var_names) > 0 else '',\n cmap=color_map,\n rasterized=settings._vector_friendly,\n )\n ylim = ax.get_ylim()\n for h in highlights_x:\n ax.plot([h, h], [ylim[0], ylim[1]], '--', color='black')\n ax.set_ylim(ylim)\n if xlim is not None:\n ax.set_xlim(xlim)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n if yticks is not None:\n ax.set_yticks(yticks)\n if len(var_names) > 0 and legend:\n ax.legend(frameon=False)\n\n\ndef timeseries_as_heatmap(\n X: np.ndarray,\n var_names: Collection[str] = (),\n highlights_x=(),\n color_map=None,\n):\n \"\"\"\\\n Plot timeseries as heatmap.\n\n Parameters\n ----------\n X\n Data array.\n var_names\n Array of strings naming variables stored in columns of X.\n \"\"\"\n if len(var_names) == 0:\n var_names = np.arange(X.shape[1])\n if var_names.ndim == 2:\n var_names = var_names[:, 0]\n\n # transpose X\n X = X.T\n min_x = np.min(X)\n\n # insert space into X\n if False:\n # generate new array with highlights_x\n space = 10 # integer\n x_new = np.zeros((X.shape[0], X.shape[1] + space*len(highlights_x)))\n hold = 0\n _hold = 0\n space_sum = 0\n for ih, h in enumerate(highlights_x):\n _h = h + space_sum\n x_new[:, _hold:_h] = X[:, hold:h]\n x_new[:, _h:_h+space] = min_x * np.ones((X.shape[0], space))\n # update variables\n space_sum += space\n _hold = _h + space\n hold = h\n x_new[:, _hold:] = X[:, hold:]\n\n _, ax = pl.subplots(figsize=(1.5*4, 2*4))\n ax.imshow(\n np.array(X, dtype=np.float_),\n aspect='auto',\n interpolation='nearest',\n cmap=color_map,\n )\n pl.colorbar(shrink=0.5)\n pl.yticks(range(X.shape[0]), var_names)\n for h in highlights_x:\n pl.plot([h, h], [0, X.shape[0]], '--', color='black')\n pl.xlim([0, X.shape[1]-1])\n pl.ylim([0, X.shape[0]-1])\n\n\n# -------------------------------------------------------------------------------\n# Colors in addition to matplotlib's colors\n# -------------------------------------------------------------------------------\n\n\nadditional_colors = {\n 'gold2': '#eec900',\n 'firebrick3': '#cd2626',\n 'khaki2': '#eee685',\n 'slategray3': '#9fb6cd',\n 'palegreen3': '#7ccd7c',\n 'tomato2': '#ee5c42',\n 'grey80': '#cccccc',\n 'grey90': '#e5e5e5',\n 'wheat4': '#8b7e66',\n 'grey65': '#a6a6a6',\n 'grey10': '#1a1a1a',\n 'grey20': '#333333',\n 'grey50': '#7f7f7f',\n 'grey30': '#4d4d4d',\n 'grey40': '#666666',\n 'antiquewhite2': '#eedfcc',\n 'grey77': '#c4c4c4',\n 'snow4': '#8b8989',\n 'chartreuse3': '#66cd00',\n 'yellow4': '#8b8b00',\n 'darkolivegreen2': '#bcee68',\n 'olivedrab3': '#9acd32',\n 'azure3': '#c1cdcd',\n 'violetred': '#d02090',\n 'mediumpurple3': '#8968cd',\n 'purple4': '#551a8b',\n 'seagreen4': '#2e8b57',\n 'lightblue3': '#9ac0cd',\n 'orchid3': '#b452cd',\n 'indianred 3': '#cd5555',\n 'grey60': '#999999',\n 'mediumorchid1': '#e066ff',\n 'plum3': '#cd96cd',\n 'palevioletred3': '#cd6889'}\n\n# -------------------------------------------------------------------------------\n# Helper functions\n# -------------------------------------------------------------------------------\n\n\ndef savefig(writekey, dpi=None, ext=None):\n \"\"\"Save current figure to file.\n\n The `filename` is generated as follows:\n\n filename = settings.figdir / (writekey + settings.plot_suffix + '.' + settings.file_format_figs)\n \"\"\"\n if dpi is None:\n # we need this as in notebooks, the internal figures are also influenced by 'savefig.dpi' this...\n if not isinstance(rcParams['savefig.dpi'], str) and rcParams['savefig.dpi'] < 150:\n if settings._low_resolution_warning:\n logg.warning(\n 'You are using a low resolution (dpi<150) for saving figures.\\n'\n 'Consider running `set_figure_params(dpi_save=...)`, which will '\n \"adjust `matplotlib.rcParams['savefig.dpi']`\"\n )\n settings._low_resolution_warning = False\n else:\n dpi = rcParams['savefig.dpi']\n settings.figdir.mkdir(parents=True, exist_ok=True)\n if ext is None: ext = settings.file_format_figs\n filename = settings.figdir / f'{writekey}{settings.plot_suffix}.{ext}'\n # output the following msg at warning level; it's really important for the user\n logg.warning(f'saving figure to file {filename}')\n pl.savefig(filename, dpi=dpi, bbox_inches='tight')\n\n\ndef savefig_or_show(\n writekey: str,\n show: Optional[bool] = None,\n dpi: Optional[int] = None,\n ext: str = None,\n save: Union[bool, str, None] = None,\n):\n if isinstance(save, str):\n # check whether `save` contains a figure extension\n if ext is None:\n for try_ext in ['.svg', '.pdf', '.png']:\n if save.endswith(try_ext):\n ext = try_ext[1:]\n save = save.replace(try_ext, '')\n break\n # append it\n writekey += save\n save = True\n save = settings.autosave if save is None else save\n show = settings.autoshow if show is None else show\n if save: savefig(writekey, dpi=dpi, ext=ext)\n if show: pl.show()\n if save: pl.close() # clear figure\n\n\ndef default_palette(\n palette: Union[Sequence[str], Cycler, None] = None\n) -> Cycler:\n if palette is None: return rcParams['axes.prop_cycle']\n elif not isinstance(palette, Cycler): return cycler(color=palette)\n else: return palette\n\n\ndef _validate_palette(adata, key):\n \"\"\"\n checks if the list of colors in adata.uns[f'{key}_colors'] is valid\n and updates the color list in adata.uns[f'{key}_colors'] if needed.\n\n Not only valid matplotlib colors are checked but also if the color name\n is a valid R color name, in which case it will be translated to a valid name\n \"\"\"\n\n _palette = []\n color_key = f\"{key}_colors\"\n\n for color in adata.uns[color_key]:\n if not is_color_like(color):\n # check if the color is a valid R color and translate it\n # to a valid hex color value\n if color in additional_colors:\n color = additional_colors[color]\n else:\n logg.warning(\n f\"The following color value found in adata.uns['{key}_colors'] \"\n f\"is not valid: '{color}'. Default colors will be used instead.\"\n )\n _set_default_colors_for_categorical_obs(adata, key)\n _palette = None\n break\n _palette.append(color)\n # Don't modify if nothing changed\n if (_palette is not None and list(_palette) != list(adata.uns[color_key])):\n adata.uns[color_key] = _palette\n\n\ndef _set_colors_for_categorical_obs(\n adata,\n value_to_plot,\n palette: Union[str, Sequence[str], Cycler],\n):\n \"\"\"\n Sets the adata.uns[value_to_plot + '_colors'] according to the given palette\n\n Parameters\n ----------\n adata\n annData object\n value_to_plot\n name of a valid categorical observation\n palette\n Palette should be either a valid :func:`~matplotlib.pyplot.colormaps` string,\n a sequence of colors (in a format that can be understood by matplotlib,\n eg. RGB, RGBS, hex, or a cycler object with key='color'\n\n Returns\n -------\n None\n \"\"\"\n from matplotlib.colors import to_hex\n\n categories = adata.obs[value_to_plot].cat.categories\n # check is palette is a valid matplotlib colormap\n if isinstance(palette, str) and palette in pl.colormaps():\n # this creates a palette from a colormap. E.g. 'Accent, Dark2, tab20'\n cmap = pl.get_cmap(palette)\n colors_list = [to_hex(x) for x in cmap(np.linspace(0, 1, len(categories)))]\n\n else:\n # check if palette is a list and convert it to a cycler, thus\n # it doesnt matter if the list is shorter than the categories length:\n if isinstance(palette, cabc.Sequence):\n if len(palette) < len(categories):\n logg.warning(\n \"Length of palette colors is smaller than the number of \"\n f\"categories (palette length: {len(palette)}, \"\n f\"categories length: {len(categories)}. \"\n \"Some categories will have the same color.\"\n )\n # check that colors are valid\n _color_list = []\n for color in palette:\n if not is_color_like(color):\n # check if the color is a valid R color and translate it\n # to a valid hex color value\n if color in additional_colors:\n color = additional_colors[color]\n else:\n raise ValueError(\n \"The following color value of the given palette \"\n f\"is not valid: {color}\"\n )\n _color_list.append(color)\n\n palette = cycler(color=_color_list)\n if not isinstance(palette, Cycler):\n raise ValueError(\n \"Please check that the value of 'palette' is a valid \"\n \"matplotlib colormap string (eg. Set2), a list of color names \"\n \"or a cycler with a 'color' key.\"\n )\n if 'color' not in palette.keys:\n raise ValueError(\"Please set the palette key 'color'.\")\n\n cc = palette()\n colors_list = [to_hex(next(cc)['color']) for x in range(len(categories))]\n\n adata.uns[value_to_plot + '_colors'] = colors_list\n\n\ndef _set_default_colors_for_categorical_obs(adata, value_to_plot):\n \"\"\"\n Sets the adata.uns[value_to_plot + '_colors'] using default color palettes\n\n Parameters\n ----------\n adata\n AnnData object\n value_to_plot\n Name of a valid categorical observation\n\n Returns\n -------\n None\n \"\"\"\n\n categories = adata.obs[value_to_plot].cat.categories\n length = len(categories)\n\n # check if default matplotlib palette has enough colors\n if len(rcParams['axes.prop_cycle'].by_key()['color']) >= length:\n cc = rcParams['axes.prop_cycle']()\n palette = [next(cc)['color'] for _ in range(length)]\n\n else:\n if length <= 20:\n palette = palettes.default_20\n elif length <= 28:\n palette = palettes.default_28\n elif length <= len(palettes.default_102): # 103 colors\n palette = palettes.default_102\n else:\n palette = ['grey' for _ in range(length)]\n logg.info(\n f'the obs value {value_to_plot!r} has more than 103 categories. Uniform '\n \"'grey' color will be used for all categories.\"\n )\n\n adata.uns[value_to_plot + '_colors'] = palette[:length]\n\n\ndef add_colors_for_categorical_sample_annotation(adata, key, palette=None,\n force_update_colors=False):\n\n color_key = f\"{key}_colors\"\n colors_needed = len(adata.obs[key].cat.categories)\n if palette and force_update_colors:\n _set_colors_for_categorical_obs(adata, key, palette)\n elif color_key in adata.uns and len(adata.uns[color_key]) <= colors_needed:\n _validate_palette(adata, key)\n else:\n _set_default_colors_for_categorical_obs(adata, key)\n\n\ndef plot_edges(axs, adata, basis, edges_width, edges_color):\n import networkx as nx\n\n if not isinstance(axs, cabc.Sequence): axs = [axs]\n if 'neighbors' not in adata.uns:\n raise ValueError('`edges=True` requires `pp.neighbors` to be run before.')\n g = nx.Graph(adata.uns['neighbors']['connectivities'])\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n for ax in axs:\n edge_collection = nx.draw_networkx_edges(\n g, adata.obsm['X_' + basis],\n ax=ax, width=edges_width, edge_color=edges_color)\n edge_collection.set_zorder(-2)\n edge_collection.set_rasterized(settings._vector_friendly)\n\n\ndef plot_arrows(axs, adata, basis, arrows_kwds=None):\n if not isinstance(axs, cabc.Sequence): axs = [axs]\n v_prefix = next((\n p for p in ['velocity', 'Delta']\n if f'{p}_{basis}' in adata.obsm\n ), None)\n if v_prefix is None:\n raise ValueError(\n \"`arrows=True` requires \"\n f\"`'velocity_{basis}'` from scvelo or \"\n f\"`'Delta_{basis}'` from velocyto.\"\n )\n if v_prefix == 'velocity':\n logg.warning(\n 'The module `scvelo` has improved plotting facilities. '\n 'Prefer using `scv.pl.velocity_embedding` to `arrows=True`.'\n )\n X = adata.obsm[f'X_{basis}']\n V = adata.obsm[f'{v_prefix}_{basis}']\n for ax in axs:\n quiver_kwds = arrows_kwds if arrows_kwds is not None else {}\n ax.quiver(\n X[:, 0], X[:, 1],\n V[:, 0], V[:, 1],\n **quiver_kwds,\n rasterized=settings._vector_friendly,\n )\n\n\ndef scatter_group(ax, key, imask, adata, Y, projection='2d', size=3, alpha=None):\n \"\"\"Scatter of group using representation of data Y.\n \"\"\"\n mask = adata.obs[key].cat.categories[imask] == adata.obs[key].values\n color = adata.uns[key + '_colors'][imask]\n if not isinstance(color[0], str):\n from matplotlib.colors import rgb2hex\n color = rgb2hex(adata.uns[key + '_colors'][imask])\n if not is_color_like(color):\n raise ValueError('\"{}\" is not a valid matplotlib color.'.format(color))\n data = [Y[mask, 0], Y[mask, 1]]\n if projection == '3d': data.append(Y[mask, 2])\n ax.scatter(*data,\n marker='.',\n alpha=alpha,\n c=color,\n edgecolors='none',\n s=size,\n label=adata.obs[key].cat.categories[imask],\n rasterized=settings._vector_friendly)\n return mask\n\n\ndef setup_axes(\n ax: Union[Axes, Sequence[Axes]] = None,\n panels='blue',\n colorbars=(False,),\n right_margin=None,\n left_margin=None,\n projection: Literal['2d', '3d'] = '2d',\n show_ticks=False,\n):\n \"\"\"Grid of axes for plotting, legends and colorbars.\n \"\"\"\n if '3d' in projection: from mpl_toolkits.mplot3d import Axes3D\n avail_projections = {'2d', '3d'}\n if projection not in avail_projections:\n raise ValueError('choose projection from', avail_projections)\n if left_margin is not None:\n raise NotImplementedError('We currently don’t support `left_margin`.')\n if np.any(colorbars) and right_margin is None:\n right_margin = 1 - rcParams['figure.subplot.right'] + 0.21 # 0.25\n elif right_margin is None:\n right_margin = 1 - rcParams['figure.subplot.right'] + 0.06 # 0.10\n # make a list of right margins for each panel\n if not isinstance(right_margin, list):\n right_margin_list = [right_margin for i in range(len(panels))]\n else:\n right_margin_list = right_margin\n\n # make a figure with len(panels) panels in a row side by side\n top_offset = 1 - rcParams['figure.subplot.top']\n bottom_offset = 0.15 if show_ticks else 0.08\n left_offset = 1 if show_ticks else 0.3 # in units of base_height\n base_height = rcParams['figure.figsize'][1]\n height = base_height\n base_width = rcParams['figure.figsize'][0]\n if show_ticks: base_width *= 1.1\n\n draw_region_width = base_width - left_offset - top_offset - 0.5 # this is kept constant throughout\n\n right_margin_factor = sum([1 + right_margin for right_margin in right_margin_list])\n width_without_offsets = right_margin_factor * draw_region_width # this is the total width that keeps draw_region_width\n\n right_offset = (len(panels) - 1) * left_offset\n figure_width = width_without_offsets + left_offset + right_offset\n draw_region_width_frac = draw_region_width / figure_width\n left_offset_frac = left_offset / figure_width\n right_offset_frac = 1 - (len(panels) - 1) * left_offset_frac\n\n if ax is None:\n pl.figure(figsize=(figure_width, height),\n subplotpars=sppars(left=0, right=1, bottom=bottom_offset))\n left_positions = [left_offset_frac, left_offset_frac + draw_region_width_frac]\n for i in range(1, len(panels)):\n right_margin = right_margin_list[i-1]\n left_positions.append(left_positions[-1] + right_margin * draw_region_width_frac)\n left_positions.append(left_positions[-1] + draw_region_width_frac)\n panel_pos = [[bottom_offset], [1-top_offset], left_positions]\n\n axs = []\n if ax is None:\n for icolor, color in enumerate(panels):\n left = panel_pos[2][2*icolor]\n bottom = panel_pos[0][0]\n width = draw_region_width / figure_width\n height = panel_pos[1][0] - bottom\n if projection == '2d': ax = pl.axes([left, bottom, width, height])\n elif projection == '3d': ax = pl.axes([left, bottom, width, height], projection='3d')\n axs.append(ax)\n else:\n axs = ax if isinstance(ax, cabc.Sequence) else [ax]\n\n return axs, panel_pos, draw_region_width, figure_width\n\n\ndef scatter_base(\n Y: np.ndarray,\n colors='blue',\n sort_order=True,\n alpha=None,\n highlights=(),\n right_margin=None,\n left_margin=None,\n projection: Literal['2d', '3d'] = '2d',\n title=None,\n component_name='DC',\n component_indexnames=(1, 2, 3),\n axis_labels=None,\n colorbars=(False,),\n sizes=(1,),\n color_map='viridis',\n show_ticks=True,\n ax=None,\n) -> Union[Axes, List[Axes]]:\n \"\"\"Plot scatter plot of data.\n\n Parameters\n ----------\n Y\n Data array.\n projection\n\n Returns\n -------\n Depending on whether supplying a single array or a list of arrays,\n return a single axis or a list of axes.\n \"\"\"\n if isinstance(highlights, cabc.Mapping):\n highlights_indices = sorted(highlights)\n highlights_labels = [highlights[i] for i in highlights_indices]\n else:\n highlights_indices = highlights\n highlights_labels = []\n # if we have a single array, transform it into a list with a single array\n if type(colors) == str: colors = [colors]\n if len(sizes) != len(colors) and len(sizes) == 1:\n sizes = [sizes[0] for _ in range(len(colors))]\n axs, panel_pos, draw_region_width, figure_width = setup_axes(\n ax=ax, panels=colors, colorbars=colorbars, projection=projection,\n right_margin=right_margin, left_margin=left_margin,\n show_ticks=show_ticks,\n )\n for icolor, color in enumerate(colors):\n ax = axs[icolor]\n left = panel_pos[2][2*icolor]\n bottom = panel_pos[0][0]\n width = draw_region_width / figure_width\n height = panel_pos[1][0] - bottom\n Y_sort = Y\n if not is_color_like(color) and sort_order:\n sort = np.argsort(color)\n color = color[sort]\n Y_sort = Y[sort]\n if projection == '2d': data = Y_sort[:, 0], Y_sort[:, 1]\n elif projection == '3d': data = Y_sort[:, 0], Y_sort[:, 1], Y_sort[:, 2]\n else: raise ValueError(\n f\"Unknown projection {projection!r} not in '2d', '3d'\"\n )\n if not isinstance(color, str) or color != 'white':\n sct = ax.scatter(\n *data,\n marker='.',\n c=color,\n alpha=alpha,\n edgecolors='none', # 'face',\n s=sizes[icolor],\n cmap=color_map,\n rasterized=settings._vector_friendly,\n )\n if colorbars[icolor]:\n width = 0.006 * draw_region_width / len(colors)\n left = panel_pos[2][2*icolor+1] + (1.2 if projection == '3d' else 0.2) * width\n rectangle = [left, bottom, width, height]\n fig = pl.gcf()\n ax_cb = fig.add_axes(rectangle)\n cb = pl.colorbar(\n sct,\n format=ticker.FuncFormatter(ticks_formatter),\n cax=ax_cb,\n )\n # set the title\n if title is not None: ax.set_title(title[icolor])\n # output highlighted data points\n for iihighlight, ihighlight in enumerate(highlights_indices):\n ihighlight = ihighlight if isinstance(ihighlight, int) else int(ihighlight)\n data = [Y[ihighlight, 0]], [Y[ihighlight, 1]]\n if '3d' in projection:\n data = [Y[ihighlight, 0]], [Y[ihighlight, 1]], [Y[ihighlight, 2]]\n ax.scatter(\n *data, c='black',\n facecolors='black', edgecolors='black',\n marker='x', s=10, zorder=20,\n )\n highlight_text = (\n highlights_labels[iihighlight]\n if len(highlights_labels) > 0 else\n str(ihighlight)\n )\n # the following is a Python 2 compatibility hack\n ax.text(\n *([d[0] for d in data] + [highlight_text]),\n zorder=20,\n fontsize=10,\n color='black',\n )\n if not show_ticks:\n ax.set_xticks([])\n ax.set_yticks([])\n if '3d' in projection: ax.set_zticks([])\n # set default axis_labels\n if axis_labels is None:\n axis_labels = [\n [component_name + str(i) for i in component_indexnames]\n for _ in range(len(axs))\n ]\n else:\n axis_labels = [axis_labels for _ in range(len(axs))]\n for iax, ax in enumerate(axs):\n ax.set_xlabel(axis_labels[iax][0])\n ax.set_ylabel(axis_labels[iax][1])\n if '3d' in projection:\n # shift the label closer to the axis\n ax.set_zlabel(axis_labels[iax][2], labelpad=-7)\n for ax in axs:\n # scale limits to match data\n ax.autoscale_view()\n return axs\n\n\ndef scatter_single(ax: Axes, Y: np.ndarray, *args, **kwargs):\n \"\"\"Plot scatter plot of data.\n\n Parameters\n ----------\n ax\n Axis to plot on.\n Y\n Data array, data to be plotted needs to be in the first two columns.\n \"\"\"\n if 's' not in kwargs:\n kwargs['s'] = 2 if Y.shape[0] > 500 else 10\n if 'edgecolors' not in kwargs:\n kwargs['edgecolors'] = 'face'\n ax.scatter(Y[:, 0], Y[:, 1], **kwargs, rasterized=settings._vector_friendly)\n ax.set_xticks([])\n ax.set_yticks([])\n\n\ndef arrows_transitions(\n ax: Axes,\n X: np.ndarray,\n indices: Sequence[int],\n weight=None,\n):\n \"\"\"\n Plot arrows of transitions in data matrix.\n\n Parameters\n ----------\n ax\n Axis object from matplotlib.\n X\n Data array, any representation wished (X, psi, phi, etc).\n indices\n Indices storing the transitions.\n \"\"\"\n step = 1\n width = axis_to_data(ax, 0.001)\n if X.shape[0] > 300:\n step = 5\n width = axis_to_data(ax, 0.0005)\n if X.shape[0] > 500:\n step = 30\n width = axis_to_data(ax, 0.0001)\n head_width = 10*width\n for ix, x in enumerate(X):\n if ix % step != 0:\n continue\n X_step = X[indices[ix]] - x\n # don't plot arrow of length 0\n for itrans in range(X_step.shape[0]):\n alphai = 1\n widthi = width\n head_widthi = head_width\n if weight is not None:\n alphai *= weight[ix, itrans]\n widthi *= weight[ix, itrans]\n if not np.any(X_step[itrans, :1]):\n continue\n ax.arrow(\n x[0],\n x[1],\n X_step[itrans, 0],\n X_step[itrans, 1],\n length_includes_head=True,\n width=widthi,\n head_width=head_widthi,\n alpha=alphai,\n color='grey',\n )\n\n\ndef ticks_formatter(x, pos):\n # pretty scientific notation\n if False:\n a, b = f'{x:.2e}'.split('e')\n b = int(b)\n return fr'${a} \\times 10^{{{b}}}$'\n else:\n return f'{x:.3f}'.rstrip('0').rstrip('.')\n\n\ndef pimp_axis(x_or_y_ax):\n \"\"\"Remove trailing zeros.\n \"\"\"\n x_or_y_ax.set_major_formatter(ticker.FuncFormatter(ticks_formatter))\n\n\ndef scale_to_zero_one(x):\n \"\"\"Take some 1d data and scale it so that min matches 0 and max 1.\n \"\"\"\n xscaled = x - np.min(x)\n xscaled /= np.max(xscaled)\n return xscaled\n\n\ndef hierarchy_pos(G, root, levels=None, width=1., height=1.):\n \"\"\"Tree layout for networkx graph.\n\n See https://stackoverflow.com/questions/29586520/can-one-get-hierarchical-graphs-from-networkx-with-python-3\n answer by burubum.\n\n If there is a cycle that is reachable from root, then this will see\n infinite recursion.\n\n Parameters\n ----------\n G: the graph\n root: the root node\n levels: a dictionary\n key: level number (starting from 0)\n value: number of nodes in this level\n width: horizontal space allocated for drawing\n height: vertical space allocated for drawing\n \"\"\"\n TOTAL = \"total\"\n CURRENT = \"current\"\n\n def make_levels(levels, node=root, currentLevel=0, parent=None):\n \"\"\"Compute the number of nodes for each level\n \"\"\"\n if currentLevel not in levels:\n levels[currentLevel] = {TOTAL: 0, CURRENT: 0}\n levels[currentLevel][TOTAL] += 1\n neighbors = list(G.neighbors(node))\n if parent is not None:\n neighbors.remove(parent)\n for neighbor in neighbors:\n levels = make_levels(levels, neighbor, currentLevel + 1, node)\n return levels\n\n def make_pos(pos, node=root, currentLevel=0, parent=None, vert_loc=0):\n dx = 1/levels[currentLevel][TOTAL]\n left = dx/2\n pos[node] = ((left + dx*levels[currentLevel][CURRENT])*width,\n vert_loc)\n levels[currentLevel][CURRENT] += 1\n neighbors = list(G.neighbors(node))\n if parent is not None:\n neighbors.remove(parent)\n for neighbor in neighbors:\n pos = make_pos(pos, neighbor, currentLevel + 1, node, vert_loc-vert_gap)\n return pos\n\n if levels is None:\n levels = make_levels({})\n else:\n levels = {l: {TOTAL: levels[l], CURRENT: 0} for l in levels}\n vert_gap = height / (max([l for l in levels])+1)\n return make_pos({})\n\n\ndef hierarchy_sc(G, root, node_sets):\n import networkx as nx\n\n def make_sc_tree(sc_G, node=root, parent=None):\n sc_G.add_node(node)\n neighbors = G.neighbors(node)\n if parent is not None:\n sc_G.add_edge(parent, node)\n neighbors.remove(parent)\n old_node = node\n for n in node_sets[int(node)]:\n new_node = str(node) + '_' + str(n)\n sc_G.add_node(new_node)\n sc_G.add_edge(old_node, new_node)\n old_node = new_node\n for neighbor in neighbors:\n sc_G = make_sc_tree(sc_G, neighbor, node)\n return sc_G\n return make_sc_tree(nx.Graph())\n\n\ndef zoom(ax, xy='x', factor=1):\n \"\"\"Zoom into axis.\n\n Parameters\n ----------\n \"\"\"\n limits = ax.get_xlim() if xy == 'x' else ax.get_ylim()\n new_limits = (0.5*(limits[0] + limits[1])\n + 1./factor * np.array((-0.5, 0.5)) * (limits[1] - limits[0]))\n if xy == 'x':\n ax.set_xlim(new_limits)\n else:\n ax.set_ylim(new_limits)\n\n\ndef get_ax_size(ax: Axes, fig: Figure):\n \"\"\"Get axis size\n\n Parameters\n ----------\n ax\n Axis object from matplotlib.\n fig\n Figure.\n \"\"\"\n bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\n width, height = bbox.width, bbox.height\n width *= fig.dpi\n height *= fig.dpi\n\n\ndef axis_to_data(ax: Axes, width: float):\n \"\"\"For a width in axis coordinates, return the corresponding in data\n coordinates.\n\n Parameters\n ----------\n ax\n Axis object from matplotlib.\n width\n Width in xaxis coordinates.\n \"\"\"\n xlim = ax.get_xlim()\n widthx = width*(xlim[1] - xlim[0])\n ylim = ax.get_ylim()\n widthy = width*(ylim[1] - ylim[0])\n return 0.5*(widthx + widthy)\n\n\ndef axis_to_data_points(ax: Axes, points_axis: np.ndarray):\n \"\"\"Map points in axis coordinates to data coordinates.\n\n Uses matplotlib.transform.\n\n Parameters\n ----------\n ax\n Axis object from matplotlib.\n points_axis\n Points in axis coordinates.\n \"\"\"\n axis_to_data = ax.transAxes + ax.transData.inverted()\n return axis_to_data.transform(points_axis)\n\n\ndef data_to_axis_points(ax: Axes, points_data: np.ndarray):\n \"\"\"Map points in data coordinates to axis coordinates.\n\n Uses matplotlib.transform.\n\n Parameters\n ----------\n ax\n Axis object from matplotlib.\n points_data\n Points in data coordinates.\n \"\"\"\n data_to_axis = axis_to_data.inverted()\n return data_to_axis(points_data)\n", "path": "scanpy/plotting/_utils.py" } ]
diff --git a/scanpy/plotting/_utils.py b/scanpy/plotting/_utils.py index e33e05f0f7..3712c7ef92 100644 --- a/scanpy/plotting/_utils.py +++ b/scanpy/plotting/_utils.py @@ -320,7 +320,8 @@ def _validate_palette(adata, key): _palette = None break _palette.append(color) - if _palette is not None: + # Don't modify if nothing changed + if (_palette is not None and list(_palette) != list(adata.uns[color_key])): adata.uns[color_key] = _palette diff --git a/scanpy/tests/test_plotting.py b/scanpy/tests/test_plotting.py index e2b76196e1..975ee6808f 100644 --- a/scanpy/tests/test_plotting.py +++ b/scanpy/tests/test_plotting.py @@ -770,3 +770,35 @@ def test_paga(image_comparer): pbmc, basis='X_pca', legend_fontweight='normal', threshold=0.5, show=False, ) save_and_compare_images('master_paga_compare_pca') + + +def test_no_copy(): + # https://github.com/theislab/scanpy/issues/1000 + # Tests that plotting functions don't make a copy from a view unless they + # actually have to + actual = sc.datasets.pbmc68k_reduced() + sc.pl.umap(actual, color=["bulk_labels", "louvain"], show=False) # Set colors + + view = actual[np.random.choice(actual.obs_names, size=actual.shape[0] // 5), :] + + sc.pl.umap(view, color=["bulk_labels", "louvain"], show=False) + assert view.is_view + + rank_genes_groups_plotting_funcs = [ + sc.pl.rank_genes_groups, + sc.pl.rank_genes_groups_dotplot, + sc.pl.rank_genes_groups_heatmap, + sc.pl.rank_genes_groups_matrixplot, + sc.pl.rank_genes_groups_stacked_violin, + # TODO: raises ValueError about empty distance matrix – investigate + # sc.pl.rank_genes_groups_tracksplot, + sc.pl.rank_genes_groups_violin, + ] + + # Only plotting one group at a time to avoid generating dendrogram + # TODO: Generating a dendrogram modifies the object, this should be + # optional and also maybe not modify the object. + for plotfunc in rank_genes_groups_plotting_funcs: + view = actual[actual.obs["bulk_labels"] == "Dendritic"] + plotfunc(view, ["Dendritic"], show=False) + assert view.is_view
encode__django-rest-framework-4273
Serializing "complex" field returns None instead of the value since 3.4 (Not a great title — I hope the description will clarify.) ## Steps to reproduce First: `pip install django-phonenumber-field` With the following model and serializer: ``` python from django.db.models import Model from phonenumber_field.modelfields import PhoneNumberField from rest_framework.serializers import ModelSerializer class PhoneModel(Model): number = PhoneNumberField() class PhoneSerializer(ModelSerializer): class Meta: model = PhoneModel fields = ['number'] ``` ## Expected behavior This test used to pass (until version 3.3.3): ``` python def test_phone_serializer(): phone = models.PhoneModel(number='+33610293847') data = models.PhoneSerializer(phone).data assert data['number'] == '+33610293847' ``` ## Actual behavior The test fails (since version 3.4) with: ``` def test_phone_serializer(): phone = models.PhoneModel(number='+33610293847') data = models.PhoneSerializer(phone).data > assert data['number'] == '+33610293847' E assert None == '+33610293847' ``` ## Analysis I bisected this regression to 9c996d7d2aa4137c8ba29afa2253efec8d6db74f. As far as I can tell, DRF used to get the string representation of the field and no longer does. ``` (Pdb) phone.number PhoneNumber(country_code=33, national_number=610293847, extension=None, italian_leading_zero=None, number_of_leading_zeros=None, country_code_source=1, preferred_domestic_carrier_code='') (Pdb) str(phone.number) '+33610293847' ```
[ { "content": "\"\"\"\nThe `compat` module provides support for backwards compatibility with older\nversions of Django/Python, and compatibility wrappers around optional packages.\n\"\"\"\n\n# flake8: noqa\nfrom __future__ import unicode_literals\n\nimport inspect\n\nimport django\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import connection, models, transaction\nfrom django.template import Context, RequestContext, Template\nfrom django.utils import six\nfrom django.views.generic import View\n\ntry:\n import importlib # Available in Python 3.1+\nexcept ImportError:\n from django.utils import importlib # Will be removed in Django 1.9\n\n\ndef unicode_repr(instance):\n # Get the repr of an instance, but ensure it is a unicode string\n # on both python 3 (already the case) and 2 (not the case).\n if six.PY2:\n return repr(instance).decode('utf-8')\n return repr(instance)\n\n\ndef unicode_to_repr(value):\n # Coerce a unicode string to the correct repr return type, depending on\n # the Python version. We wrap all our `__repr__` implementations with\n # this and then use unicode throughout internally.\n if six.PY2:\n return value.encode('utf-8')\n return value\n\n\ndef unicode_http_header(value):\n # Coerce HTTP header value to unicode.\n if isinstance(value, six.binary_type):\n return value.decode('iso-8859-1')\n return value\n\n\ndef total_seconds(timedelta):\n # TimeDelta.total_seconds() is only available in Python 2.7\n if hasattr(timedelta, 'total_seconds'):\n return timedelta.total_seconds()\n else:\n return (timedelta.days * 86400.0) + float(timedelta.seconds) + (timedelta.microseconds / 1000000.0)\n\n\ndef distinct(queryset, base):\n if settings.DATABASES[queryset.db][\"ENGINE\"] == \"django.db.backends.oracle\":\n # distinct analogue for Oracle users\n return base.filter(pk__in=set(queryset.values_list('pk', flat=True)))\n return queryset.distinct()\n\n\n# Obtaining manager instances and names from model options differs after 1.10.\ndef get_names_and_managers(options):\n if django.VERSION >= (1, 10):\n # Django 1.10 onwards provides a `.managers` property on the Options.\n return [\n (manager.name, manager)\n for manager\n in options.managers\n ]\n # For Django 1.8 and 1.9, use the three-tuple information provided\n # by .concrete_managers and .abstract_managers\n return [\n (manager_info[1], manager_info[2])\n for manager_info\n in (options.concrete_managers + options.abstract_managers)\n ]\n\n\n# field.rel is deprecated from 1.9 onwards\ndef get_remote_field(field, **kwargs):\n if 'default' in kwargs:\n if django.VERSION < (1, 9):\n return getattr(field, 'rel', kwargs['default'])\n return getattr(field, 'remote_field', kwargs['default'])\n\n if django.VERSION < (1, 9):\n return field.rel\n return field.remote_field\n\n\ndef _resolve_model(obj):\n \"\"\"\n Resolve supplied `obj` to a Django model class.\n\n `obj` must be a Django model class itself, or a string\n representation of one. Useful in situations like GH #1225 where\n Django may not have resolved a string-based reference to a model in\n another model's foreign key definition.\n\n String representations should have the format:\n 'appname.ModelName'\n \"\"\"\n if isinstance(obj, six.string_types) and len(obj.split('.')) == 2:\n app_name, model_name = obj.split('.')\n resolved_model = apps.get_model(app_name, model_name)\n if resolved_model is None:\n msg = \"Django did not return a model for {0}.{1}\"\n raise ImproperlyConfigured(msg.format(app_name, model_name))\n return resolved_model\n elif inspect.isclass(obj) and issubclass(obj, models.Model):\n return obj\n raise ValueError(\"{0} is not a Django model\".format(obj))\n\n\ndef get_related_model(field):\n if django.VERSION < (1, 9):\n return _resolve_model(field.rel.to)\n return field.remote_field.model\n\n\ndef value_from_object(field, obj):\n if django.VERSION < (1, 9):\n return field._get_val_from_obj(obj)\n field.value_from_object(obj)\n\n\n# contrib.postgres only supported from 1.8 onwards.\ntry:\n from django.contrib.postgres import fields as postgres_fields\nexcept ImportError:\n postgres_fields = None\n\n\n# JSONField is only supported from 1.9 onwards\ntry:\n from django.contrib.postgres.fields import JSONField\nexcept ImportError:\n JSONField = None\n\n\n# django-filter is optional\ntry:\n import django_filters\nexcept ImportError:\n django_filters = None\n\n\n# django-crispy-forms is optional\ntry:\n import crispy_forms\nexcept ImportError:\n crispy_forms = None\n\n\n# coreapi is optional (Note that uritemplate is a dependancy of coreapi)\ntry:\n import coreapi\n import uritemplate\nexcept (ImportError, SyntaxError):\n # SyntaxError is possible under python 3.2\n coreapi = None\n uritemplate = None\n\n\n# Django-guardian is optional. Import only if guardian is in INSTALLED_APPS\n# Fixes (#1712). We keep the try/except for the test suite.\nguardian = None\ntry:\n if 'guardian' in settings.INSTALLED_APPS:\n import guardian\n import guardian.shortcuts # Fixes #1624\nexcept ImportError:\n pass\n\n\n# PATCH method is not implemented by Django\nif 'patch' not in View.http_method_names:\n View.http_method_names = View.http_method_names + ['patch']\n\n\n# Markdown is optional\ntry:\n import markdown\n\n if markdown.version <= '2.2':\n HEADERID_EXT_PATH = 'headerid'\n else:\n HEADERID_EXT_PATH = 'markdown.extensions.headerid'\n\n def apply_markdown(text):\n \"\"\"\n Simple wrapper around :func:`markdown.markdown` to set the base level\n of '#' style headers to <h2>.\n \"\"\"\n extensions = [HEADERID_EXT_PATH]\n extension_configs = {\n HEADERID_EXT_PATH: {\n 'level': '2'\n }\n }\n md = markdown.Markdown(\n extensions=extensions, extension_configs=extension_configs\n )\n return md.convert(text)\nexcept ImportError:\n apply_markdown = None\n\n\n# `separators` argument to `json.dumps()` differs between 2.x and 3.x\n# See: http://bugs.python.org/issue22767\nif six.PY3:\n SHORT_SEPARATORS = (',', ':')\n LONG_SEPARATORS = (', ', ': ')\n INDENT_SEPARATORS = (',', ': ')\nelse:\n SHORT_SEPARATORS = (b',', b':')\n LONG_SEPARATORS = (b', ', b': ')\n INDENT_SEPARATORS = (b',', b': ')\n\ntry:\n # DecimalValidator is unavailable in Django < 1.9\n from django.core.validators import DecimalValidator\nexcept ImportError:\n DecimalValidator = None\n\n\ndef set_rollback():\n if hasattr(transaction, 'set_rollback'):\n if connection.settings_dict.get('ATOMIC_REQUESTS', False):\n # If running in >=1.6 then mark a rollback as required,\n # and allow it to be handled by Django.\n if connection.in_atomic_block:\n transaction.set_rollback(True)\n elif transaction.is_managed():\n # Otherwise handle it explicitly if in managed mode.\n if transaction.is_dirty():\n transaction.rollback()\n transaction.leave_transaction_management()\n else:\n # transaction not managed\n pass\n\n\ndef template_render(template, context=None, request=None):\n \"\"\"\n Passing Context or RequestContext to Template.render is deprecated in 1.9+,\n see https://github.com/django/django/pull/3883 and\n https://github.com/django/django/blob/1.9/django/template/backends/django.py#L82-L84\n\n :param template: Template instance\n :param context: dict\n :param request: Request instance\n :return: rendered template as SafeText instance\n \"\"\"\n if isinstance(template, Template):\n if request:\n context = RequestContext(request, context)\n else:\n context = Context(context)\n return template.render(context)\n # backends template, e.g. django.template.backends.django.Template\n else:\n return template.render(context, request=request)\n", "path": "rest_framework/compat.py" } ]
[ { "content": "\"\"\"\nThe `compat` module provides support for backwards compatibility with older\nversions of Django/Python, and compatibility wrappers around optional packages.\n\"\"\"\n\n# flake8: noqa\nfrom __future__ import unicode_literals\n\nimport inspect\n\nimport django\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import connection, models, transaction\nfrom django.template import Context, RequestContext, Template\nfrom django.utils import six\nfrom django.views.generic import View\n\ntry:\n import importlib # Available in Python 3.1+\nexcept ImportError:\n from django.utils import importlib # Will be removed in Django 1.9\n\n\ndef unicode_repr(instance):\n # Get the repr of an instance, but ensure it is a unicode string\n # on both python 3 (already the case) and 2 (not the case).\n if six.PY2:\n return repr(instance).decode('utf-8')\n return repr(instance)\n\n\ndef unicode_to_repr(value):\n # Coerce a unicode string to the correct repr return type, depending on\n # the Python version. We wrap all our `__repr__` implementations with\n # this and then use unicode throughout internally.\n if six.PY2:\n return value.encode('utf-8')\n return value\n\n\ndef unicode_http_header(value):\n # Coerce HTTP header value to unicode.\n if isinstance(value, six.binary_type):\n return value.decode('iso-8859-1')\n return value\n\n\ndef total_seconds(timedelta):\n # TimeDelta.total_seconds() is only available in Python 2.7\n if hasattr(timedelta, 'total_seconds'):\n return timedelta.total_seconds()\n else:\n return (timedelta.days * 86400.0) + float(timedelta.seconds) + (timedelta.microseconds / 1000000.0)\n\n\ndef distinct(queryset, base):\n if settings.DATABASES[queryset.db][\"ENGINE\"] == \"django.db.backends.oracle\":\n # distinct analogue for Oracle users\n return base.filter(pk__in=set(queryset.values_list('pk', flat=True)))\n return queryset.distinct()\n\n\n# Obtaining manager instances and names from model options differs after 1.10.\ndef get_names_and_managers(options):\n if django.VERSION >= (1, 10):\n # Django 1.10 onwards provides a `.managers` property on the Options.\n return [\n (manager.name, manager)\n for manager\n in options.managers\n ]\n # For Django 1.8 and 1.9, use the three-tuple information provided\n # by .concrete_managers and .abstract_managers\n return [\n (manager_info[1], manager_info[2])\n for manager_info\n in (options.concrete_managers + options.abstract_managers)\n ]\n\n\n# field.rel is deprecated from 1.9 onwards\ndef get_remote_field(field, **kwargs):\n if 'default' in kwargs:\n if django.VERSION < (1, 9):\n return getattr(field, 'rel', kwargs['default'])\n return getattr(field, 'remote_field', kwargs['default'])\n\n if django.VERSION < (1, 9):\n return field.rel\n return field.remote_field\n\n\ndef _resolve_model(obj):\n \"\"\"\n Resolve supplied `obj` to a Django model class.\n\n `obj` must be a Django model class itself, or a string\n representation of one. Useful in situations like GH #1225 where\n Django may not have resolved a string-based reference to a model in\n another model's foreign key definition.\n\n String representations should have the format:\n 'appname.ModelName'\n \"\"\"\n if isinstance(obj, six.string_types) and len(obj.split('.')) == 2:\n app_name, model_name = obj.split('.')\n resolved_model = apps.get_model(app_name, model_name)\n if resolved_model is None:\n msg = \"Django did not return a model for {0}.{1}\"\n raise ImproperlyConfigured(msg.format(app_name, model_name))\n return resolved_model\n elif inspect.isclass(obj) and issubclass(obj, models.Model):\n return obj\n raise ValueError(\"{0} is not a Django model\".format(obj))\n\n\ndef get_related_model(field):\n if django.VERSION < (1, 9):\n return _resolve_model(field.rel.to)\n return field.remote_field.model\n\n\ndef value_from_object(field, obj):\n if django.VERSION < (1, 9):\n return field._get_val_from_obj(obj)\n return field.value_from_object(obj)\n\n\n# contrib.postgres only supported from 1.8 onwards.\ntry:\n from django.contrib.postgres import fields as postgres_fields\nexcept ImportError:\n postgres_fields = None\n\n\n# JSONField is only supported from 1.9 onwards\ntry:\n from django.contrib.postgres.fields import JSONField\nexcept ImportError:\n JSONField = None\n\n\n# django-filter is optional\ntry:\n import django_filters\nexcept ImportError:\n django_filters = None\n\n\n# django-crispy-forms is optional\ntry:\n import crispy_forms\nexcept ImportError:\n crispy_forms = None\n\n\n# coreapi is optional (Note that uritemplate is a dependancy of coreapi)\ntry:\n import coreapi\n import uritemplate\nexcept (ImportError, SyntaxError):\n # SyntaxError is possible under python 3.2\n coreapi = None\n uritemplate = None\n\n\n# Django-guardian is optional. Import only if guardian is in INSTALLED_APPS\n# Fixes (#1712). We keep the try/except for the test suite.\nguardian = None\ntry:\n if 'guardian' in settings.INSTALLED_APPS:\n import guardian\n import guardian.shortcuts # Fixes #1624\nexcept ImportError:\n pass\n\n\n# PATCH method is not implemented by Django\nif 'patch' not in View.http_method_names:\n View.http_method_names = View.http_method_names + ['patch']\n\n\n# Markdown is optional\ntry:\n import markdown\n\n if markdown.version <= '2.2':\n HEADERID_EXT_PATH = 'headerid'\n else:\n HEADERID_EXT_PATH = 'markdown.extensions.headerid'\n\n def apply_markdown(text):\n \"\"\"\n Simple wrapper around :func:`markdown.markdown` to set the base level\n of '#' style headers to <h2>.\n \"\"\"\n extensions = [HEADERID_EXT_PATH]\n extension_configs = {\n HEADERID_EXT_PATH: {\n 'level': '2'\n }\n }\n md = markdown.Markdown(\n extensions=extensions, extension_configs=extension_configs\n )\n return md.convert(text)\nexcept ImportError:\n apply_markdown = None\n\n\n# `separators` argument to `json.dumps()` differs between 2.x and 3.x\n# See: http://bugs.python.org/issue22767\nif six.PY3:\n SHORT_SEPARATORS = (',', ':')\n LONG_SEPARATORS = (', ', ': ')\n INDENT_SEPARATORS = (',', ': ')\nelse:\n SHORT_SEPARATORS = (b',', b':')\n LONG_SEPARATORS = (b', ', b': ')\n INDENT_SEPARATORS = (b',', b': ')\n\ntry:\n # DecimalValidator is unavailable in Django < 1.9\n from django.core.validators import DecimalValidator\nexcept ImportError:\n DecimalValidator = None\n\n\ndef set_rollback():\n if hasattr(transaction, 'set_rollback'):\n if connection.settings_dict.get('ATOMIC_REQUESTS', False):\n # If running in >=1.6 then mark a rollback as required,\n # and allow it to be handled by Django.\n if connection.in_atomic_block:\n transaction.set_rollback(True)\n elif transaction.is_managed():\n # Otherwise handle it explicitly if in managed mode.\n if transaction.is_dirty():\n transaction.rollback()\n transaction.leave_transaction_management()\n else:\n # transaction not managed\n pass\n\n\ndef template_render(template, context=None, request=None):\n \"\"\"\n Passing Context or RequestContext to Template.render is deprecated in 1.9+,\n see https://github.com/django/django/pull/3883 and\n https://github.com/django/django/blob/1.9/django/template/backends/django.py#L82-L84\n\n :param template: Template instance\n :param context: dict\n :param request: Request instance\n :return: rendered template as SafeText instance\n \"\"\"\n if isinstance(template, Template):\n if request:\n context = RequestContext(request, context)\n else:\n context = Context(context)\n return template.render(context)\n # backends template, e.g. django.template.backends.django.Template\n else:\n return template.render(context, request=request)\n", "path": "rest_framework/compat.py" } ]
diff --git a/rest_framework/compat.py b/rest_framework/compat.py index 9c69eaa032..94f64265aa 100644 --- a/rest_framework/compat.py +++ b/rest_framework/compat.py @@ -125,7 +125,7 @@ def get_related_model(field): def value_from_object(field, obj): if django.VERSION < (1, 9): return field._get_val_from_obj(obj) - field.value_from_object(obj) + return field.value_from_object(obj) # contrib.postgres only supported from 1.8 onwards.
redis__redis-py-3265
Discrepancy in `INFO` response parsing **Version**: `5.0.0` **Description**: The code for parsing the `INFO` response is creating a discrepancy in the new RediSearch index field types field, e.g., `search_fields_text`. If there is more than one section to this field its corresponding value (e.g., `TEXT=1, SORTABLE=1`) is returned as a dictionary (as expected), while if there is only one value for it (e.g., `TEXT=1`) it is returned as a string! This is bad since we later use this value assuming that it is a dictionary, and thus face failures in the case that it is not. The wanted behavior: We always get a dictionary back, whether we have only one section for this value or more. Example: We have one section for the `TEXT` field, and two for the `TAG` field, and this is what we get: ![image](https://github.com/redis/redis-py/assets/74051729/51282024-7e1c-4d2a-bae8-04dcaf0fc53e) The source of the problem is in the `parse_info()` function, [here](https://github.com/redis/redis-py/blob/19b55c62389c890a96dd611e28aaaedba7506720/redis/_parsers/helpers.py#L35-L36). Thanks!
[ { "content": "import datetime\n\nfrom redis.utils import str_if_bytes\n\n\ndef timestamp_to_datetime(response):\n \"Converts a unix timestamp to a Python datetime object\"\n if not response:\n return None\n try:\n response = int(response)\n except ValueError:\n return None\n return datetime.datetime.fromtimestamp(response)\n\n\ndef parse_debug_object(response):\n \"Parse the results of Redis's DEBUG OBJECT command into a Python dict\"\n # The 'type' of the object is the first item in the response, but isn't\n # prefixed with a name\n response = str_if_bytes(response)\n response = \"type:\" + response\n response = dict(kv.split(\":\") for kv in response.split())\n\n # parse some expected int values from the string response\n # note: this cmd isn't spec'd so these may not appear in all redis versions\n int_fields = (\"refcount\", \"serializedlength\", \"lru\", \"lru_seconds_idle\")\n for field in int_fields:\n if field in response:\n response[field] = int(response[field])\n\n return response\n\n\ndef parse_info(response):\n \"\"\"Parse the result of Redis's INFO command into a Python dict\"\"\"\n info = {}\n response = str_if_bytes(response)\n\n def get_value(value):\n if \",\" not in value or \"=\" not in value:\n try:\n if \".\" in value:\n return float(value)\n else:\n return int(value)\n except ValueError:\n return value\n else:\n sub_dict = {}\n for item in value.split(\",\"):\n k, v = item.rsplit(\"=\", 1)\n sub_dict[k] = get_value(v)\n return sub_dict\n\n for line in response.splitlines():\n if line and not line.startswith(\"#\"):\n if line.find(\":\") != -1:\n # Split, the info fields keys and values.\n # Note that the value may contain ':'. but the 'host:'\n # pseudo-command is the only case where the key contains ':'\n key, value = line.split(\":\", 1)\n if key == \"cmdstat_host\":\n key, value = line.rsplit(\":\", 1)\n\n if key == \"module\":\n # Hardcode a list for key 'modules' since there could be\n # multiple lines that started with 'module'\n info.setdefault(\"modules\", []).append(get_value(value))\n else:\n info[key] = get_value(value)\n else:\n # if the line isn't splittable, append it to the \"__raw__\" key\n info.setdefault(\"__raw__\", []).append(line)\n\n return info\n\n\ndef parse_memory_stats(response, **kwargs):\n \"\"\"Parse the results of MEMORY STATS\"\"\"\n stats = pairs_to_dict(response, decode_keys=True, decode_string_values=True)\n for key, value in stats.items():\n if key.startswith(\"db.\") and isinstance(value, list):\n stats[key] = pairs_to_dict(\n value, decode_keys=True, decode_string_values=True\n )\n return stats\n\n\nSENTINEL_STATE_TYPES = {\n \"can-failover-its-master\": int,\n \"config-epoch\": int,\n \"down-after-milliseconds\": int,\n \"failover-timeout\": int,\n \"info-refresh\": int,\n \"last-hello-message\": int,\n \"last-ok-ping-reply\": int,\n \"last-ping-reply\": int,\n \"last-ping-sent\": int,\n \"master-link-down-time\": int,\n \"master-port\": int,\n \"num-other-sentinels\": int,\n \"num-slaves\": int,\n \"o-down-time\": int,\n \"pending-commands\": int,\n \"parallel-syncs\": int,\n \"port\": int,\n \"quorum\": int,\n \"role-reported-time\": int,\n \"s-down-time\": int,\n \"slave-priority\": int,\n \"slave-repl-offset\": int,\n \"voted-leader-epoch\": int,\n}\n\n\ndef parse_sentinel_state(item):\n result = pairs_to_dict_typed(item, SENTINEL_STATE_TYPES)\n flags = set(result[\"flags\"].split(\",\"))\n for name, flag in (\n (\"is_master\", \"master\"),\n (\"is_slave\", \"slave\"),\n (\"is_sdown\", \"s_down\"),\n (\"is_odown\", \"o_down\"),\n (\"is_sentinel\", \"sentinel\"),\n (\"is_disconnected\", \"disconnected\"),\n (\"is_master_down\", \"master_down\"),\n ):\n result[name] = flag in flags\n return result\n\n\ndef parse_sentinel_master(response):\n return parse_sentinel_state(map(str_if_bytes, response))\n\n\ndef parse_sentinel_state_resp3(response):\n result = {}\n for key in response:\n try:\n value = SENTINEL_STATE_TYPES[key](str_if_bytes(response[key]))\n result[str_if_bytes(key)] = value\n except Exception:\n result[str_if_bytes(key)] = response[str_if_bytes(key)]\n flags = set(result[\"flags\"].split(\",\"))\n result[\"flags\"] = flags\n return result\n\n\ndef parse_sentinel_masters(response):\n result = {}\n for item in response:\n state = parse_sentinel_state(map(str_if_bytes, item))\n result[state[\"name\"]] = state\n return result\n\n\ndef parse_sentinel_masters_resp3(response):\n return [parse_sentinel_state(master) for master in response]\n\n\ndef parse_sentinel_slaves_and_sentinels(response):\n return [parse_sentinel_state(map(str_if_bytes, item)) for item in response]\n\n\ndef parse_sentinel_slaves_and_sentinels_resp3(response):\n return [parse_sentinel_state_resp3(item) for item in response]\n\n\ndef parse_sentinel_get_master(response):\n return response and (response[0], int(response[1])) or None\n\n\ndef pairs_to_dict(response, decode_keys=False, decode_string_values=False):\n \"\"\"Create a dict given a list of key/value pairs\"\"\"\n if response is None:\n return {}\n if decode_keys or decode_string_values:\n # the iter form is faster, but I don't know how to make that work\n # with a str_if_bytes() map\n keys = response[::2]\n if decode_keys:\n keys = map(str_if_bytes, keys)\n values = response[1::2]\n if decode_string_values:\n values = map(str_if_bytes, values)\n return dict(zip(keys, values))\n else:\n it = iter(response)\n return dict(zip(it, it))\n\n\ndef pairs_to_dict_typed(response, type_info):\n it = iter(response)\n result = {}\n for key, value in zip(it, it):\n if key in type_info:\n try:\n value = type_info[key](value)\n except Exception:\n # if for some reason the value can't be coerced, just use\n # the string value\n pass\n result[key] = value\n return result\n\n\ndef zset_score_pairs(response, **options):\n \"\"\"\n If ``withscores`` is specified in the options, return the response as\n a list of (value, score) pairs\n \"\"\"\n if not response or not options.get(\"withscores\"):\n return response\n score_cast_func = options.get(\"score_cast_func\", float)\n it = iter(response)\n return list(zip(it, map(score_cast_func, it)))\n\n\ndef sort_return_tuples(response, **options):\n \"\"\"\n If ``groups`` is specified, return the response as a list of\n n-element tuples with n being the value found in options['groups']\n \"\"\"\n if not response or not options.get(\"groups\"):\n return response\n n = options[\"groups\"]\n return list(zip(*[response[i::n] for i in range(n)]))\n\n\ndef parse_stream_list(response):\n if response is None:\n return None\n data = []\n for r in response:\n if r is not None:\n data.append((r[0], pairs_to_dict(r[1])))\n else:\n data.append((None, None))\n return data\n\n\ndef pairs_to_dict_with_str_keys(response):\n return pairs_to_dict(response, decode_keys=True)\n\n\ndef parse_list_of_dicts(response):\n return list(map(pairs_to_dict_with_str_keys, response))\n\n\ndef parse_xclaim(response, **options):\n if options.get(\"parse_justid\", False):\n return response\n return parse_stream_list(response)\n\n\ndef parse_xautoclaim(response, **options):\n if options.get(\"parse_justid\", False):\n return response[1]\n response[1] = parse_stream_list(response[1])\n return response\n\n\ndef parse_xinfo_stream(response, **options):\n if isinstance(response, list):\n data = pairs_to_dict(response, decode_keys=True)\n else:\n data = {str_if_bytes(k): v for k, v in response.items()}\n if not options.get(\"full\", False):\n first = data.get(\"first-entry\")\n if first is not None:\n data[\"first-entry\"] = (first[0], pairs_to_dict(first[1]))\n last = data[\"last-entry\"]\n if last is not None:\n data[\"last-entry\"] = (last[0], pairs_to_dict(last[1]))\n else:\n data[\"entries\"] = {_id: pairs_to_dict(entry) for _id, entry in data[\"entries\"]}\n if isinstance(data[\"groups\"][0], list):\n data[\"groups\"] = [\n pairs_to_dict(group, decode_keys=True) for group in data[\"groups\"]\n ]\n else:\n data[\"groups\"] = [\n {str_if_bytes(k): v for k, v in group.items()}\n for group in data[\"groups\"]\n ]\n return data\n\n\ndef parse_xread(response):\n if response is None:\n return []\n return [[r[0], parse_stream_list(r[1])] for r in response]\n\n\ndef parse_xread_resp3(response):\n if response is None:\n return {}\n return {key: [parse_stream_list(value)] for key, value in response.items()}\n\n\ndef parse_xpending(response, **options):\n if options.get(\"parse_detail\", False):\n return parse_xpending_range(response)\n consumers = [{\"name\": n, \"pending\": int(p)} for n, p in response[3] or []]\n return {\n \"pending\": response[0],\n \"min\": response[1],\n \"max\": response[2],\n \"consumers\": consumers,\n }\n\n\ndef parse_xpending_range(response):\n k = (\"message_id\", \"consumer\", \"time_since_delivered\", \"times_delivered\")\n return [dict(zip(k, r)) for r in response]\n\n\ndef float_or_none(response):\n if response is None:\n return None\n return float(response)\n\n\ndef bool_ok(response, **options):\n return str_if_bytes(response) == \"OK\"\n\n\ndef parse_zadd(response, **options):\n if response is None:\n return None\n if options.get(\"as_score\"):\n return float(response)\n return int(response)\n\n\ndef parse_client_list(response, **options):\n clients = []\n for c in str_if_bytes(response).splitlines():\n # Values might contain '='\n clients.append(dict(pair.split(\"=\", 1) for pair in c.split(\" \")))\n return clients\n\n\ndef parse_config_get(response, **options):\n response = [str_if_bytes(i) if i is not None else None for i in response]\n return response and pairs_to_dict(response) or {}\n\n\ndef parse_scan(response, **options):\n cursor, r = response\n return int(cursor), r\n\n\ndef parse_hscan(response, **options):\n cursor, r = response\n no_values = options.get(\"no_values\", False)\n if no_values:\n payload = r or []\n else:\n payload = r and pairs_to_dict(r) or {}\n return int(cursor), payload\n\n\ndef parse_zscan(response, **options):\n score_cast_func = options.get(\"score_cast_func\", float)\n cursor, r = response\n it = iter(r)\n return int(cursor), list(zip(it, map(score_cast_func, it)))\n\n\ndef parse_zmscore(response, **options):\n # zmscore: list of scores (double precision floating point number) or nil\n return [float(score) if score is not None else None for score in response]\n\n\ndef parse_slowlog_get(response, **options):\n space = \" \" if options.get(\"decode_responses\", False) else b\" \"\n\n def parse_item(item):\n result = {\"id\": item[0], \"start_time\": int(item[1]), \"duration\": int(item[2])}\n # Redis Enterprise injects another entry at index [3], which has\n # the complexity info (i.e. the value N in case the command has\n # an O(N) complexity) instead of the command.\n if isinstance(item[3], list):\n result[\"command\"] = space.join(item[3])\n result[\"client_address\"] = item[4]\n result[\"client_name\"] = item[5]\n else:\n result[\"complexity\"] = item[3]\n result[\"command\"] = space.join(item[4])\n result[\"client_address\"] = item[5]\n result[\"client_name\"] = item[6]\n return result\n\n return [parse_item(item) for item in response]\n\n\ndef parse_stralgo(response, **options):\n \"\"\"\n Parse the response from `STRALGO` command.\n Without modifiers the returned value is string.\n When LEN is given the command returns the length of the result\n (i.e integer).\n When IDX is given the command returns a dictionary with the LCS\n length and all the ranges in both the strings, start and end\n offset for each string, where there are matches.\n When WITHMATCHLEN is given, each array representing a match will\n also have the length of the match at the beginning of the array.\n \"\"\"\n if options.get(\"len\", False):\n return int(response)\n if options.get(\"idx\", False):\n if options.get(\"withmatchlen\", False):\n matches = [\n [(int(match[-1]))] + list(map(tuple, match[:-1]))\n for match in response[1]\n ]\n else:\n matches = [list(map(tuple, match)) for match in response[1]]\n return {\n str_if_bytes(response[0]): matches,\n str_if_bytes(response[2]): int(response[3]),\n }\n return str_if_bytes(response)\n\n\ndef parse_cluster_info(response, **options):\n response = str_if_bytes(response)\n return dict(line.split(\":\") for line in response.splitlines() if line)\n\n\ndef _parse_node_line(line):\n line_items = line.split(\" \")\n node_id, addr, flags, master_id, ping, pong, epoch, connected = line.split(\" \")[:8]\n addr = addr.split(\"@\")[0]\n node_dict = {\n \"node_id\": node_id,\n \"flags\": flags,\n \"master_id\": master_id,\n \"last_ping_sent\": ping,\n \"last_pong_rcvd\": pong,\n \"epoch\": epoch,\n \"slots\": [],\n \"migrations\": [],\n \"connected\": True if connected == \"connected\" else False,\n }\n if len(line_items) >= 9:\n slots, migrations = _parse_slots(line_items[8:])\n node_dict[\"slots\"], node_dict[\"migrations\"] = slots, migrations\n return addr, node_dict\n\n\ndef _parse_slots(slot_ranges):\n slots, migrations = [], []\n for s_range in slot_ranges:\n if \"->-\" in s_range:\n slot_id, dst_node_id = s_range[1:-1].split(\"->-\", 1)\n migrations.append(\n {\"slot\": slot_id, \"node_id\": dst_node_id, \"state\": \"migrating\"}\n )\n elif \"-<-\" in s_range:\n slot_id, src_node_id = s_range[1:-1].split(\"-<-\", 1)\n migrations.append(\n {\"slot\": slot_id, \"node_id\": src_node_id, \"state\": \"importing\"}\n )\n else:\n s_range = [sl for sl in s_range.split(\"-\")]\n slots.append(s_range)\n\n return slots, migrations\n\n\ndef parse_cluster_nodes(response, **options):\n \"\"\"\n @see: https://redis.io/commands/cluster-nodes # string / bytes\n @see: https://redis.io/commands/cluster-replicas # list of string / bytes\n \"\"\"\n if isinstance(response, (str, bytes)):\n response = response.splitlines()\n return dict(_parse_node_line(str_if_bytes(node)) for node in response)\n\n\ndef parse_geosearch_generic(response, **options):\n \"\"\"\n Parse the response of 'GEOSEARCH', GEORADIUS' and 'GEORADIUSBYMEMBER'\n commands according to 'withdist', 'withhash' and 'withcoord' labels.\n \"\"\"\n try:\n if options[\"store\"] or options[\"store_dist\"]:\n # `store` and `store_dist` cant be combined\n # with other command arguments.\n # relevant to 'GEORADIUS' and 'GEORADIUSBYMEMBER'\n return response\n except KeyError: # it means the command was sent via execute_command\n return response\n\n if type(response) != list:\n response_list = [response]\n else:\n response_list = response\n\n if not options[\"withdist\"] and not options[\"withcoord\"] and not options[\"withhash\"]:\n # just a bunch of places\n return response_list\n\n cast = {\n \"withdist\": float,\n \"withcoord\": lambda ll: (float(ll[0]), float(ll[1])),\n \"withhash\": int,\n }\n\n # zip all output results with each casting function to get\n # the properly native Python value.\n f = [lambda x: x]\n f += [cast[o] for o in [\"withdist\", \"withhash\", \"withcoord\"] if options[o]]\n return [list(map(lambda fv: fv[0](fv[1]), zip(f, r))) for r in response_list]\n\n\ndef parse_command(response, **options):\n commands = {}\n for command in response:\n cmd_dict = {}\n cmd_name = str_if_bytes(command[0])\n cmd_dict[\"name\"] = cmd_name\n cmd_dict[\"arity\"] = int(command[1])\n cmd_dict[\"flags\"] = [str_if_bytes(flag) for flag in command[2]]\n cmd_dict[\"first_key_pos\"] = command[3]\n cmd_dict[\"last_key_pos\"] = command[4]\n cmd_dict[\"step_count\"] = command[5]\n if len(command) > 7:\n cmd_dict[\"tips\"] = command[7]\n cmd_dict[\"key_specifications\"] = command[8]\n cmd_dict[\"subcommands\"] = command[9]\n commands[cmd_name] = cmd_dict\n return commands\n\n\ndef parse_command_resp3(response, **options):\n commands = {}\n for command in response:\n cmd_dict = {}\n cmd_name = str_if_bytes(command[0])\n cmd_dict[\"name\"] = cmd_name\n cmd_dict[\"arity\"] = command[1]\n cmd_dict[\"flags\"] = {str_if_bytes(flag) for flag in command[2]}\n cmd_dict[\"first_key_pos\"] = command[3]\n cmd_dict[\"last_key_pos\"] = command[4]\n cmd_dict[\"step_count\"] = command[5]\n cmd_dict[\"acl_categories\"] = command[6]\n if len(command) > 7:\n cmd_dict[\"tips\"] = command[7]\n cmd_dict[\"key_specifications\"] = command[8]\n cmd_dict[\"subcommands\"] = command[9]\n\n commands[cmd_name] = cmd_dict\n return commands\n\n\ndef parse_pubsub_numsub(response, **options):\n return list(zip(response[0::2], response[1::2]))\n\n\ndef parse_client_kill(response, **options):\n if isinstance(response, int):\n return response\n return str_if_bytes(response) == \"OK\"\n\n\ndef parse_acl_getuser(response, **options):\n if response is None:\n return None\n if isinstance(response, list):\n data = pairs_to_dict(response, decode_keys=True)\n else:\n data = {str_if_bytes(key): value for key, value in response.items()}\n\n # convert everything but user-defined data in 'keys' to native strings\n data[\"flags\"] = list(map(str_if_bytes, data[\"flags\"]))\n data[\"passwords\"] = list(map(str_if_bytes, data[\"passwords\"]))\n data[\"commands\"] = str_if_bytes(data[\"commands\"])\n if isinstance(data[\"keys\"], str) or isinstance(data[\"keys\"], bytes):\n data[\"keys\"] = list(str_if_bytes(data[\"keys\"]).split(\" \"))\n if data[\"keys\"] == [\"\"]:\n data[\"keys\"] = []\n if \"channels\" in data:\n if isinstance(data[\"channels\"], str) or isinstance(data[\"channels\"], bytes):\n data[\"channels\"] = list(str_if_bytes(data[\"channels\"]).split(\" \"))\n if data[\"channels\"] == [\"\"]:\n data[\"channels\"] = []\n if \"selectors\" in data:\n if data[\"selectors\"] != [] and isinstance(data[\"selectors\"][0], list):\n data[\"selectors\"] = [\n list(map(str_if_bytes, selector)) for selector in data[\"selectors\"]\n ]\n elif data[\"selectors\"] != []:\n data[\"selectors\"] = [\n {str_if_bytes(k): str_if_bytes(v) for k, v in selector.items()}\n for selector in data[\"selectors\"]\n ]\n\n # split 'commands' into separate 'categories' and 'commands' lists\n commands, categories = [], []\n for command in data[\"commands\"].split(\" \"):\n categories.append(command) if \"@\" in command else commands.append(command)\n\n data[\"commands\"] = commands\n data[\"categories\"] = categories\n data[\"enabled\"] = \"on\" in data[\"flags\"]\n return data\n\n\ndef parse_acl_log(response, **options):\n if response is None:\n return None\n if isinstance(response, list):\n data = []\n for log in response:\n log_data = pairs_to_dict(log, True, True)\n client_info = log_data.get(\"client-info\", \"\")\n log_data[\"client-info\"] = parse_client_info(client_info)\n\n # float() is lossy comparing to the \"double\" in C\n log_data[\"age-seconds\"] = float(log_data[\"age-seconds\"])\n data.append(log_data)\n else:\n data = bool_ok(response)\n return data\n\n\ndef parse_client_info(value):\n \"\"\"\n Parsing client-info in ACL Log in following format.\n \"key1=value1 key2=value2 key3=value3\"\n \"\"\"\n client_info = {}\n for info in str_if_bytes(value).strip().split():\n key, value = info.split(\"=\")\n client_info[key] = value\n\n # Those fields are defined as int in networking.c\n for int_key in {\n \"id\",\n \"age\",\n \"idle\",\n \"db\",\n \"sub\",\n \"psub\",\n \"multi\",\n \"qbuf\",\n \"qbuf-free\",\n \"obl\",\n \"argv-mem\",\n \"oll\",\n \"omem\",\n \"tot-mem\",\n }:\n client_info[int_key] = int(client_info[int_key])\n return client_info\n\n\ndef parse_set_result(response, **options):\n \"\"\"\n Handle SET result since GET argument is available since Redis 6.2.\n Parsing SET result into:\n - BOOL\n - String when GET argument is used\n \"\"\"\n if options.get(\"get\"):\n # Redis will return a getCommand result.\n # See `setGenericCommand` in t_string.c\n return response\n return response and str_if_bytes(response) == \"OK\"\n\n\ndef string_keys_to_dict(key_string, callback):\n return dict.fromkeys(key_string.split(), callback)\n\n\n_RedisCallbacks = {\n **string_keys_to_dict(\n \"AUTH COPY EXPIRE EXPIREAT HEXISTS HMSET MOVE MSETNX PERSIST PSETEX \"\n \"PEXPIRE PEXPIREAT RENAMENX SETEX SETNX SMOVE\",\n bool,\n ),\n **string_keys_to_dict(\"HINCRBYFLOAT INCRBYFLOAT\", float),\n **string_keys_to_dict(\n \"ASKING FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE READONLY READWRITE \"\n \"RENAME SAVE SELECT SHUTDOWN SLAVEOF SWAPDB WATCH UNWATCH\",\n bool_ok,\n ),\n **string_keys_to_dict(\"XREAD XREADGROUP\", parse_xread),\n **string_keys_to_dict(\n \"GEORADIUS GEORADIUSBYMEMBER GEOSEARCH\",\n parse_geosearch_generic,\n ),\n **string_keys_to_dict(\"XRANGE XREVRANGE\", parse_stream_list),\n \"ACL GETUSER\": parse_acl_getuser,\n \"ACL LOAD\": bool_ok,\n \"ACL LOG\": parse_acl_log,\n \"ACL SETUSER\": bool_ok,\n \"ACL SAVE\": bool_ok,\n \"CLIENT INFO\": parse_client_info,\n \"CLIENT KILL\": parse_client_kill,\n \"CLIENT LIST\": parse_client_list,\n \"CLIENT PAUSE\": bool_ok,\n \"CLIENT SETINFO\": bool_ok,\n \"CLIENT SETNAME\": bool_ok,\n \"CLIENT UNBLOCK\": bool,\n \"CLUSTER ADDSLOTS\": bool_ok,\n \"CLUSTER ADDSLOTSRANGE\": bool_ok,\n \"CLUSTER DELSLOTS\": bool_ok,\n \"CLUSTER DELSLOTSRANGE\": bool_ok,\n \"CLUSTER FAILOVER\": bool_ok,\n \"CLUSTER FORGET\": bool_ok,\n \"CLUSTER INFO\": parse_cluster_info,\n \"CLUSTER MEET\": bool_ok,\n \"CLUSTER NODES\": parse_cluster_nodes,\n \"CLUSTER REPLICAS\": parse_cluster_nodes,\n \"CLUSTER REPLICATE\": bool_ok,\n \"CLUSTER RESET\": bool_ok,\n \"CLUSTER SAVECONFIG\": bool_ok,\n \"CLUSTER SET-CONFIG-EPOCH\": bool_ok,\n \"CLUSTER SETSLOT\": bool_ok,\n \"CLUSTER SLAVES\": parse_cluster_nodes,\n \"COMMAND\": parse_command,\n \"CONFIG RESETSTAT\": bool_ok,\n \"CONFIG SET\": bool_ok,\n \"FUNCTION DELETE\": bool_ok,\n \"FUNCTION FLUSH\": bool_ok,\n \"FUNCTION RESTORE\": bool_ok,\n \"GEODIST\": float_or_none,\n \"HSCAN\": parse_hscan,\n \"INFO\": parse_info,\n \"LASTSAVE\": timestamp_to_datetime,\n \"MEMORY PURGE\": bool_ok,\n \"MODULE LOAD\": bool,\n \"MODULE UNLOAD\": bool,\n \"PING\": lambda r: str_if_bytes(r) == \"PONG\",\n \"PUBSUB NUMSUB\": parse_pubsub_numsub,\n \"PUBSUB SHARDNUMSUB\": parse_pubsub_numsub,\n \"QUIT\": bool_ok,\n \"SET\": parse_set_result,\n \"SCAN\": parse_scan,\n \"SCRIPT EXISTS\": lambda r: list(map(bool, r)),\n \"SCRIPT FLUSH\": bool_ok,\n \"SCRIPT KILL\": bool_ok,\n \"SCRIPT LOAD\": str_if_bytes,\n \"SENTINEL CKQUORUM\": bool_ok,\n \"SENTINEL FAILOVER\": bool_ok,\n \"SENTINEL FLUSHCONFIG\": bool_ok,\n \"SENTINEL GET-MASTER-ADDR-BY-NAME\": parse_sentinel_get_master,\n \"SENTINEL MONITOR\": bool_ok,\n \"SENTINEL RESET\": bool_ok,\n \"SENTINEL REMOVE\": bool_ok,\n \"SENTINEL SET\": bool_ok,\n \"SLOWLOG GET\": parse_slowlog_get,\n \"SLOWLOG RESET\": bool_ok,\n \"SORT\": sort_return_tuples,\n \"SSCAN\": parse_scan,\n \"TIME\": lambda x: (int(x[0]), int(x[1])),\n \"XAUTOCLAIM\": parse_xautoclaim,\n \"XCLAIM\": parse_xclaim,\n \"XGROUP CREATE\": bool_ok,\n \"XGROUP DESTROY\": bool,\n \"XGROUP SETID\": bool_ok,\n \"XINFO STREAM\": parse_xinfo_stream,\n \"XPENDING\": parse_xpending,\n \"ZSCAN\": parse_zscan,\n}\n\n\n_RedisCallbacksRESP2 = {\n **string_keys_to_dict(\n \"SDIFF SINTER SMEMBERS SUNION\", lambda r: r and set(r) or set()\n ),\n **string_keys_to_dict(\n \"ZDIFF ZINTER ZPOPMAX ZPOPMIN ZRANGE ZRANGEBYSCORE ZRANK ZREVRANGE \"\n \"ZREVRANGEBYSCORE ZREVRANK ZUNION\",\n zset_score_pairs,\n ),\n **string_keys_to_dict(\"ZINCRBY ZSCORE\", float_or_none),\n **string_keys_to_dict(\"BGREWRITEAOF BGSAVE\", lambda r: True),\n **string_keys_to_dict(\"BLPOP BRPOP\", lambda r: r and tuple(r) or None),\n **string_keys_to_dict(\n \"BZPOPMAX BZPOPMIN\", lambda r: r and (r[0], r[1], float(r[2])) or None\n ),\n \"ACL CAT\": lambda r: list(map(str_if_bytes, r)),\n \"ACL GENPASS\": str_if_bytes,\n \"ACL HELP\": lambda r: list(map(str_if_bytes, r)),\n \"ACL LIST\": lambda r: list(map(str_if_bytes, r)),\n \"ACL USERS\": lambda r: list(map(str_if_bytes, r)),\n \"ACL WHOAMI\": str_if_bytes,\n \"CLIENT GETNAME\": str_if_bytes,\n \"CLIENT TRACKINGINFO\": lambda r: list(map(str_if_bytes, r)),\n \"CLUSTER GETKEYSINSLOT\": lambda r: list(map(str_if_bytes, r)),\n \"COMMAND GETKEYS\": lambda r: list(map(str_if_bytes, r)),\n \"CONFIG GET\": parse_config_get,\n \"DEBUG OBJECT\": parse_debug_object,\n \"GEOHASH\": lambda r: list(map(str_if_bytes, r)),\n \"GEOPOS\": lambda r: list(\n map(lambda ll: (float(ll[0]), float(ll[1])) if ll is not None else None, r)\n ),\n \"HGETALL\": lambda r: r and pairs_to_dict(r) or {},\n \"MEMORY STATS\": parse_memory_stats,\n \"MODULE LIST\": lambda r: [pairs_to_dict(m) for m in r],\n \"RESET\": str_if_bytes,\n \"SENTINEL MASTER\": parse_sentinel_master,\n \"SENTINEL MASTERS\": parse_sentinel_masters,\n \"SENTINEL SENTINELS\": parse_sentinel_slaves_and_sentinels,\n \"SENTINEL SLAVES\": parse_sentinel_slaves_and_sentinels,\n \"STRALGO\": parse_stralgo,\n \"XINFO CONSUMERS\": parse_list_of_dicts,\n \"XINFO GROUPS\": parse_list_of_dicts,\n \"ZADD\": parse_zadd,\n \"ZMSCORE\": parse_zmscore,\n}\n\n\n_RedisCallbacksRESP3 = {\n **string_keys_to_dict(\n \"ZRANGE ZINTER ZPOPMAX ZPOPMIN ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE \"\n \"ZUNION HGETALL XREADGROUP\",\n lambda r, **kwargs: r,\n ),\n **string_keys_to_dict(\"XREAD XREADGROUP\", parse_xread_resp3),\n \"ACL LOG\": lambda r: (\n [\n {str_if_bytes(key): str_if_bytes(value) for key, value in x.items()}\n for x in r\n ]\n if isinstance(r, list)\n else bool_ok(r)\n ),\n \"COMMAND\": parse_command_resp3,\n \"CONFIG GET\": lambda r: {\n str_if_bytes(key) if key is not None else None: (\n str_if_bytes(value) if value is not None else None\n )\n for key, value in r.items()\n },\n \"MEMORY STATS\": lambda r: {str_if_bytes(key): value for key, value in r.items()},\n \"SENTINEL MASTER\": parse_sentinel_state_resp3,\n \"SENTINEL MASTERS\": parse_sentinel_masters_resp3,\n \"SENTINEL SENTINELS\": parse_sentinel_slaves_and_sentinels_resp3,\n \"SENTINEL SLAVES\": parse_sentinel_slaves_and_sentinels_resp3,\n \"STRALGO\": lambda r, **options: (\n {str_if_bytes(key): str_if_bytes(value) for key, value in r.items()}\n if isinstance(r, dict)\n else str_if_bytes(r)\n ),\n \"XINFO CONSUMERS\": lambda r: [\n {str_if_bytes(key): value for key, value in x.items()} for x in r\n ],\n \"XINFO GROUPS\": lambda r: [\n {str_if_bytes(key): value for key, value in d.items()} for d in r\n ],\n}\n", "path": "redis/_parsers/helpers.py" } ]
[ { "content": "import datetime\n\nfrom redis.utils import str_if_bytes\n\n\ndef timestamp_to_datetime(response):\n \"Converts a unix timestamp to a Python datetime object\"\n if not response:\n return None\n try:\n response = int(response)\n except ValueError:\n return None\n return datetime.datetime.fromtimestamp(response)\n\n\ndef parse_debug_object(response):\n \"Parse the results of Redis's DEBUG OBJECT command into a Python dict\"\n # The 'type' of the object is the first item in the response, but isn't\n # prefixed with a name\n response = str_if_bytes(response)\n response = \"type:\" + response\n response = dict(kv.split(\":\") for kv in response.split())\n\n # parse some expected int values from the string response\n # note: this cmd isn't spec'd so these may not appear in all redis versions\n int_fields = (\"refcount\", \"serializedlength\", \"lru\", \"lru_seconds_idle\")\n for field in int_fields:\n if field in response:\n response[field] = int(response[field])\n\n return response\n\n\ndef parse_info(response):\n \"\"\"Parse the result of Redis's INFO command into a Python dict\"\"\"\n info = {}\n response = str_if_bytes(response)\n\n def get_value(value):\n if \",\" not in value and \"=\" not in value:\n try:\n if \".\" in value:\n return float(value)\n else:\n return int(value)\n except ValueError:\n return value\n else:\n sub_dict = {}\n for item in value.split(\",\"):\n k, v = item.rsplit(\"=\", 1)\n sub_dict[k] = get_value(v)\n return sub_dict\n\n for line in response.splitlines():\n if line and not line.startswith(\"#\"):\n if line.find(\":\") != -1:\n # Split, the info fields keys and values.\n # Note that the value may contain ':'. but the 'host:'\n # pseudo-command is the only case where the key contains ':'\n key, value = line.split(\":\", 1)\n if key == \"cmdstat_host\":\n key, value = line.rsplit(\":\", 1)\n\n if key == \"module\":\n # Hardcode a list for key 'modules' since there could be\n # multiple lines that started with 'module'\n info.setdefault(\"modules\", []).append(get_value(value))\n else:\n info[key] = get_value(value)\n else:\n # if the line isn't splittable, append it to the \"__raw__\" key\n info.setdefault(\"__raw__\", []).append(line)\n\n return info\n\n\ndef parse_memory_stats(response, **kwargs):\n \"\"\"Parse the results of MEMORY STATS\"\"\"\n stats = pairs_to_dict(response, decode_keys=True, decode_string_values=True)\n for key, value in stats.items():\n if key.startswith(\"db.\") and isinstance(value, list):\n stats[key] = pairs_to_dict(\n value, decode_keys=True, decode_string_values=True\n )\n return stats\n\n\nSENTINEL_STATE_TYPES = {\n \"can-failover-its-master\": int,\n \"config-epoch\": int,\n \"down-after-milliseconds\": int,\n \"failover-timeout\": int,\n \"info-refresh\": int,\n \"last-hello-message\": int,\n \"last-ok-ping-reply\": int,\n \"last-ping-reply\": int,\n \"last-ping-sent\": int,\n \"master-link-down-time\": int,\n \"master-port\": int,\n \"num-other-sentinels\": int,\n \"num-slaves\": int,\n \"o-down-time\": int,\n \"pending-commands\": int,\n \"parallel-syncs\": int,\n \"port\": int,\n \"quorum\": int,\n \"role-reported-time\": int,\n \"s-down-time\": int,\n \"slave-priority\": int,\n \"slave-repl-offset\": int,\n \"voted-leader-epoch\": int,\n}\n\n\ndef parse_sentinel_state(item):\n result = pairs_to_dict_typed(item, SENTINEL_STATE_TYPES)\n flags = set(result[\"flags\"].split(\",\"))\n for name, flag in (\n (\"is_master\", \"master\"),\n (\"is_slave\", \"slave\"),\n (\"is_sdown\", \"s_down\"),\n (\"is_odown\", \"o_down\"),\n (\"is_sentinel\", \"sentinel\"),\n (\"is_disconnected\", \"disconnected\"),\n (\"is_master_down\", \"master_down\"),\n ):\n result[name] = flag in flags\n return result\n\n\ndef parse_sentinel_master(response):\n return parse_sentinel_state(map(str_if_bytes, response))\n\n\ndef parse_sentinel_state_resp3(response):\n result = {}\n for key in response:\n try:\n value = SENTINEL_STATE_TYPES[key](str_if_bytes(response[key]))\n result[str_if_bytes(key)] = value\n except Exception:\n result[str_if_bytes(key)] = response[str_if_bytes(key)]\n flags = set(result[\"flags\"].split(\",\"))\n result[\"flags\"] = flags\n return result\n\n\ndef parse_sentinel_masters(response):\n result = {}\n for item in response:\n state = parse_sentinel_state(map(str_if_bytes, item))\n result[state[\"name\"]] = state\n return result\n\n\ndef parse_sentinel_masters_resp3(response):\n return [parse_sentinel_state(master) for master in response]\n\n\ndef parse_sentinel_slaves_and_sentinels(response):\n return [parse_sentinel_state(map(str_if_bytes, item)) for item in response]\n\n\ndef parse_sentinel_slaves_and_sentinels_resp3(response):\n return [parse_sentinel_state_resp3(item) for item in response]\n\n\ndef parse_sentinel_get_master(response):\n return response and (response[0], int(response[1])) or None\n\n\ndef pairs_to_dict(response, decode_keys=False, decode_string_values=False):\n \"\"\"Create a dict given a list of key/value pairs\"\"\"\n if response is None:\n return {}\n if decode_keys or decode_string_values:\n # the iter form is faster, but I don't know how to make that work\n # with a str_if_bytes() map\n keys = response[::2]\n if decode_keys:\n keys = map(str_if_bytes, keys)\n values = response[1::2]\n if decode_string_values:\n values = map(str_if_bytes, values)\n return dict(zip(keys, values))\n else:\n it = iter(response)\n return dict(zip(it, it))\n\n\ndef pairs_to_dict_typed(response, type_info):\n it = iter(response)\n result = {}\n for key, value in zip(it, it):\n if key in type_info:\n try:\n value = type_info[key](value)\n except Exception:\n # if for some reason the value can't be coerced, just use\n # the string value\n pass\n result[key] = value\n return result\n\n\ndef zset_score_pairs(response, **options):\n \"\"\"\n If ``withscores`` is specified in the options, return the response as\n a list of (value, score) pairs\n \"\"\"\n if not response or not options.get(\"withscores\"):\n return response\n score_cast_func = options.get(\"score_cast_func\", float)\n it = iter(response)\n return list(zip(it, map(score_cast_func, it)))\n\n\ndef sort_return_tuples(response, **options):\n \"\"\"\n If ``groups`` is specified, return the response as a list of\n n-element tuples with n being the value found in options['groups']\n \"\"\"\n if not response or not options.get(\"groups\"):\n return response\n n = options[\"groups\"]\n return list(zip(*[response[i::n] for i in range(n)]))\n\n\ndef parse_stream_list(response):\n if response is None:\n return None\n data = []\n for r in response:\n if r is not None:\n data.append((r[0], pairs_to_dict(r[1])))\n else:\n data.append((None, None))\n return data\n\n\ndef pairs_to_dict_with_str_keys(response):\n return pairs_to_dict(response, decode_keys=True)\n\n\ndef parse_list_of_dicts(response):\n return list(map(pairs_to_dict_with_str_keys, response))\n\n\ndef parse_xclaim(response, **options):\n if options.get(\"parse_justid\", False):\n return response\n return parse_stream_list(response)\n\n\ndef parse_xautoclaim(response, **options):\n if options.get(\"parse_justid\", False):\n return response[1]\n response[1] = parse_stream_list(response[1])\n return response\n\n\ndef parse_xinfo_stream(response, **options):\n if isinstance(response, list):\n data = pairs_to_dict(response, decode_keys=True)\n else:\n data = {str_if_bytes(k): v for k, v in response.items()}\n if not options.get(\"full\", False):\n first = data.get(\"first-entry\")\n if first is not None:\n data[\"first-entry\"] = (first[0], pairs_to_dict(first[1]))\n last = data[\"last-entry\"]\n if last is not None:\n data[\"last-entry\"] = (last[0], pairs_to_dict(last[1]))\n else:\n data[\"entries\"] = {_id: pairs_to_dict(entry) for _id, entry in data[\"entries\"]}\n if isinstance(data[\"groups\"][0], list):\n data[\"groups\"] = [\n pairs_to_dict(group, decode_keys=True) for group in data[\"groups\"]\n ]\n else:\n data[\"groups\"] = [\n {str_if_bytes(k): v for k, v in group.items()}\n for group in data[\"groups\"]\n ]\n return data\n\n\ndef parse_xread(response):\n if response is None:\n return []\n return [[r[0], parse_stream_list(r[1])] for r in response]\n\n\ndef parse_xread_resp3(response):\n if response is None:\n return {}\n return {key: [parse_stream_list(value)] for key, value in response.items()}\n\n\ndef parse_xpending(response, **options):\n if options.get(\"parse_detail\", False):\n return parse_xpending_range(response)\n consumers = [{\"name\": n, \"pending\": int(p)} for n, p in response[3] or []]\n return {\n \"pending\": response[0],\n \"min\": response[1],\n \"max\": response[2],\n \"consumers\": consumers,\n }\n\n\ndef parse_xpending_range(response):\n k = (\"message_id\", \"consumer\", \"time_since_delivered\", \"times_delivered\")\n return [dict(zip(k, r)) for r in response]\n\n\ndef float_or_none(response):\n if response is None:\n return None\n return float(response)\n\n\ndef bool_ok(response, **options):\n return str_if_bytes(response) == \"OK\"\n\n\ndef parse_zadd(response, **options):\n if response is None:\n return None\n if options.get(\"as_score\"):\n return float(response)\n return int(response)\n\n\ndef parse_client_list(response, **options):\n clients = []\n for c in str_if_bytes(response).splitlines():\n # Values might contain '='\n clients.append(dict(pair.split(\"=\", 1) for pair in c.split(\" \")))\n return clients\n\n\ndef parse_config_get(response, **options):\n response = [str_if_bytes(i) if i is not None else None for i in response]\n return response and pairs_to_dict(response) or {}\n\n\ndef parse_scan(response, **options):\n cursor, r = response\n return int(cursor), r\n\n\ndef parse_hscan(response, **options):\n cursor, r = response\n no_values = options.get(\"no_values\", False)\n if no_values:\n payload = r or []\n else:\n payload = r and pairs_to_dict(r) or {}\n return int(cursor), payload\n\n\ndef parse_zscan(response, **options):\n score_cast_func = options.get(\"score_cast_func\", float)\n cursor, r = response\n it = iter(r)\n return int(cursor), list(zip(it, map(score_cast_func, it)))\n\n\ndef parse_zmscore(response, **options):\n # zmscore: list of scores (double precision floating point number) or nil\n return [float(score) if score is not None else None for score in response]\n\n\ndef parse_slowlog_get(response, **options):\n space = \" \" if options.get(\"decode_responses\", False) else b\" \"\n\n def parse_item(item):\n result = {\"id\": item[0], \"start_time\": int(item[1]), \"duration\": int(item[2])}\n # Redis Enterprise injects another entry at index [3], which has\n # the complexity info (i.e. the value N in case the command has\n # an O(N) complexity) instead of the command.\n if isinstance(item[3], list):\n result[\"command\"] = space.join(item[3])\n result[\"client_address\"] = item[4]\n result[\"client_name\"] = item[5]\n else:\n result[\"complexity\"] = item[3]\n result[\"command\"] = space.join(item[4])\n result[\"client_address\"] = item[5]\n result[\"client_name\"] = item[6]\n return result\n\n return [parse_item(item) for item in response]\n\n\ndef parse_stralgo(response, **options):\n \"\"\"\n Parse the response from `STRALGO` command.\n Without modifiers the returned value is string.\n When LEN is given the command returns the length of the result\n (i.e integer).\n When IDX is given the command returns a dictionary with the LCS\n length and all the ranges in both the strings, start and end\n offset for each string, where there are matches.\n When WITHMATCHLEN is given, each array representing a match will\n also have the length of the match at the beginning of the array.\n \"\"\"\n if options.get(\"len\", False):\n return int(response)\n if options.get(\"idx\", False):\n if options.get(\"withmatchlen\", False):\n matches = [\n [(int(match[-1]))] + list(map(tuple, match[:-1]))\n for match in response[1]\n ]\n else:\n matches = [list(map(tuple, match)) for match in response[1]]\n return {\n str_if_bytes(response[0]): matches,\n str_if_bytes(response[2]): int(response[3]),\n }\n return str_if_bytes(response)\n\n\ndef parse_cluster_info(response, **options):\n response = str_if_bytes(response)\n return dict(line.split(\":\") for line in response.splitlines() if line)\n\n\ndef _parse_node_line(line):\n line_items = line.split(\" \")\n node_id, addr, flags, master_id, ping, pong, epoch, connected = line.split(\" \")[:8]\n addr = addr.split(\"@\")[0]\n node_dict = {\n \"node_id\": node_id,\n \"flags\": flags,\n \"master_id\": master_id,\n \"last_ping_sent\": ping,\n \"last_pong_rcvd\": pong,\n \"epoch\": epoch,\n \"slots\": [],\n \"migrations\": [],\n \"connected\": True if connected == \"connected\" else False,\n }\n if len(line_items) >= 9:\n slots, migrations = _parse_slots(line_items[8:])\n node_dict[\"slots\"], node_dict[\"migrations\"] = slots, migrations\n return addr, node_dict\n\n\ndef _parse_slots(slot_ranges):\n slots, migrations = [], []\n for s_range in slot_ranges:\n if \"->-\" in s_range:\n slot_id, dst_node_id = s_range[1:-1].split(\"->-\", 1)\n migrations.append(\n {\"slot\": slot_id, \"node_id\": dst_node_id, \"state\": \"migrating\"}\n )\n elif \"-<-\" in s_range:\n slot_id, src_node_id = s_range[1:-1].split(\"-<-\", 1)\n migrations.append(\n {\"slot\": slot_id, \"node_id\": src_node_id, \"state\": \"importing\"}\n )\n else:\n s_range = [sl for sl in s_range.split(\"-\")]\n slots.append(s_range)\n\n return slots, migrations\n\n\ndef parse_cluster_nodes(response, **options):\n \"\"\"\n @see: https://redis.io/commands/cluster-nodes # string / bytes\n @see: https://redis.io/commands/cluster-replicas # list of string / bytes\n \"\"\"\n if isinstance(response, (str, bytes)):\n response = response.splitlines()\n return dict(_parse_node_line(str_if_bytes(node)) for node in response)\n\n\ndef parse_geosearch_generic(response, **options):\n \"\"\"\n Parse the response of 'GEOSEARCH', GEORADIUS' and 'GEORADIUSBYMEMBER'\n commands according to 'withdist', 'withhash' and 'withcoord' labels.\n \"\"\"\n try:\n if options[\"store\"] or options[\"store_dist\"]:\n # `store` and `store_dist` cant be combined\n # with other command arguments.\n # relevant to 'GEORADIUS' and 'GEORADIUSBYMEMBER'\n return response\n except KeyError: # it means the command was sent via execute_command\n return response\n\n if type(response) != list:\n response_list = [response]\n else:\n response_list = response\n\n if not options[\"withdist\"] and not options[\"withcoord\"] and not options[\"withhash\"]:\n # just a bunch of places\n return response_list\n\n cast = {\n \"withdist\": float,\n \"withcoord\": lambda ll: (float(ll[0]), float(ll[1])),\n \"withhash\": int,\n }\n\n # zip all output results with each casting function to get\n # the properly native Python value.\n f = [lambda x: x]\n f += [cast[o] for o in [\"withdist\", \"withhash\", \"withcoord\"] if options[o]]\n return [list(map(lambda fv: fv[0](fv[1]), zip(f, r))) for r in response_list]\n\n\ndef parse_command(response, **options):\n commands = {}\n for command in response:\n cmd_dict = {}\n cmd_name = str_if_bytes(command[0])\n cmd_dict[\"name\"] = cmd_name\n cmd_dict[\"arity\"] = int(command[1])\n cmd_dict[\"flags\"] = [str_if_bytes(flag) for flag in command[2]]\n cmd_dict[\"first_key_pos\"] = command[3]\n cmd_dict[\"last_key_pos\"] = command[4]\n cmd_dict[\"step_count\"] = command[5]\n if len(command) > 7:\n cmd_dict[\"tips\"] = command[7]\n cmd_dict[\"key_specifications\"] = command[8]\n cmd_dict[\"subcommands\"] = command[9]\n commands[cmd_name] = cmd_dict\n return commands\n\n\ndef parse_command_resp3(response, **options):\n commands = {}\n for command in response:\n cmd_dict = {}\n cmd_name = str_if_bytes(command[0])\n cmd_dict[\"name\"] = cmd_name\n cmd_dict[\"arity\"] = command[1]\n cmd_dict[\"flags\"] = {str_if_bytes(flag) for flag in command[2]}\n cmd_dict[\"first_key_pos\"] = command[3]\n cmd_dict[\"last_key_pos\"] = command[4]\n cmd_dict[\"step_count\"] = command[5]\n cmd_dict[\"acl_categories\"] = command[6]\n if len(command) > 7:\n cmd_dict[\"tips\"] = command[7]\n cmd_dict[\"key_specifications\"] = command[8]\n cmd_dict[\"subcommands\"] = command[9]\n\n commands[cmd_name] = cmd_dict\n return commands\n\n\ndef parse_pubsub_numsub(response, **options):\n return list(zip(response[0::2], response[1::2]))\n\n\ndef parse_client_kill(response, **options):\n if isinstance(response, int):\n return response\n return str_if_bytes(response) == \"OK\"\n\n\ndef parse_acl_getuser(response, **options):\n if response is None:\n return None\n if isinstance(response, list):\n data = pairs_to_dict(response, decode_keys=True)\n else:\n data = {str_if_bytes(key): value for key, value in response.items()}\n\n # convert everything but user-defined data in 'keys' to native strings\n data[\"flags\"] = list(map(str_if_bytes, data[\"flags\"]))\n data[\"passwords\"] = list(map(str_if_bytes, data[\"passwords\"]))\n data[\"commands\"] = str_if_bytes(data[\"commands\"])\n if isinstance(data[\"keys\"], str) or isinstance(data[\"keys\"], bytes):\n data[\"keys\"] = list(str_if_bytes(data[\"keys\"]).split(\" \"))\n if data[\"keys\"] == [\"\"]:\n data[\"keys\"] = []\n if \"channels\" in data:\n if isinstance(data[\"channels\"], str) or isinstance(data[\"channels\"], bytes):\n data[\"channels\"] = list(str_if_bytes(data[\"channels\"]).split(\" \"))\n if data[\"channels\"] == [\"\"]:\n data[\"channels\"] = []\n if \"selectors\" in data:\n if data[\"selectors\"] != [] and isinstance(data[\"selectors\"][0], list):\n data[\"selectors\"] = [\n list(map(str_if_bytes, selector)) for selector in data[\"selectors\"]\n ]\n elif data[\"selectors\"] != []:\n data[\"selectors\"] = [\n {str_if_bytes(k): str_if_bytes(v) for k, v in selector.items()}\n for selector in data[\"selectors\"]\n ]\n\n # split 'commands' into separate 'categories' and 'commands' lists\n commands, categories = [], []\n for command in data[\"commands\"].split(\" \"):\n categories.append(command) if \"@\" in command else commands.append(command)\n\n data[\"commands\"] = commands\n data[\"categories\"] = categories\n data[\"enabled\"] = \"on\" in data[\"flags\"]\n return data\n\n\ndef parse_acl_log(response, **options):\n if response is None:\n return None\n if isinstance(response, list):\n data = []\n for log in response:\n log_data = pairs_to_dict(log, True, True)\n client_info = log_data.get(\"client-info\", \"\")\n log_data[\"client-info\"] = parse_client_info(client_info)\n\n # float() is lossy comparing to the \"double\" in C\n log_data[\"age-seconds\"] = float(log_data[\"age-seconds\"])\n data.append(log_data)\n else:\n data = bool_ok(response)\n return data\n\n\ndef parse_client_info(value):\n \"\"\"\n Parsing client-info in ACL Log in following format.\n \"key1=value1 key2=value2 key3=value3\"\n \"\"\"\n client_info = {}\n for info in str_if_bytes(value).strip().split():\n key, value = info.split(\"=\")\n client_info[key] = value\n\n # Those fields are defined as int in networking.c\n for int_key in {\n \"id\",\n \"age\",\n \"idle\",\n \"db\",\n \"sub\",\n \"psub\",\n \"multi\",\n \"qbuf\",\n \"qbuf-free\",\n \"obl\",\n \"argv-mem\",\n \"oll\",\n \"omem\",\n \"tot-mem\",\n }:\n client_info[int_key] = int(client_info[int_key])\n return client_info\n\n\ndef parse_set_result(response, **options):\n \"\"\"\n Handle SET result since GET argument is available since Redis 6.2.\n Parsing SET result into:\n - BOOL\n - String when GET argument is used\n \"\"\"\n if options.get(\"get\"):\n # Redis will return a getCommand result.\n # See `setGenericCommand` in t_string.c\n return response\n return response and str_if_bytes(response) == \"OK\"\n\n\ndef string_keys_to_dict(key_string, callback):\n return dict.fromkeys(key_string.split(), callback)\n\n\n_RedisCallbacks = {\n **string_keys_to_dict(\n \"AUTH COPY EXPIRE EXPIREAT HEXISTS HMSET MOVE MSETNX PERSIST PSETEX \"\n \"PEXPIRE PEXPIREAT RENAMENX SETEX SETNX SMOVE\",\n bool,\n ),\n **string_keys_to_dict(\"HINCRBYFLOAT INCRBYFLOAT\", float),\n **string_keys_to_dict(\n \"ASKING FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE READONLY READWRITE \"\n \"RENAME SAVE SELECT SHUTDOWN SLAVEOF SWAPDB WATCH UNWATCH\",\n bool_ok,\n ),\n **string_keys_to_dict(\"XREAD XREADGROUP\", parse_xread),\n **string_keys_to_dict(\n \"GEORADIUS GEORADIUSBYMEMBER GEOSEARCH\",\n parse_geosearch_generic,\n ),\n **string_keys_to_dict(\"XRANGE XREVRANGE\", parse_stream_list),\n \"ACL GETUSER\": parse_acl_getuser,\n \"ACL LOAD\": bool_ok,\n \"ACL LOG\": parse_acl_log,\n \"ACL SETUSER\": bool_ok,\n \"ACL SAVE\": bool_ok,\n \"CLIENT INFO\": parse_client_info,\n \"CLIENT KILL\": parse_client_kill,\n \"CLIENT LIST\": parse_client_list,\n \"CLIENT PAUSE\": bool_ok,\n \"CLIENT SETINFO\": bool_ok,\n \"CLIENT SETNAME\": bool_ok,\n \"CLIENT UNBLOCK\": bool,\n \"CLUSTER ADDSLOTS\": bool_ok,\n \"CLUSTER ADDSLOTSRANGE\": bool_ok,\n \"CLUSTER DELSLOTS\": bool_ok,\n \"CLUSTER DELSLOTSRANGE\": bool_ok,\n \"CLUSTER FAILOVER\": bool_ok,\n \"CLUSTER FORGET\": bool_ok,\n \"CLUSTER INFO\": parse_cluster_info,\n \"CLUSTER MEET\": bool_ok,\n \"CLUSTER NODES\": parse_cluster_nodes,\n \"CLUSTER REPLICAS\": parse_cluster_nodes,\n \"CLUSTER REPLICATE\": bool_ok,\n \"CLUSTER RESET\": bool_ok,\n \"CLUSTER SAVECONFIG\": bool_ok,\n \"CLUSTER SET-CONFIG-EPOCH\": bool_ok,\n \"CLUSTER SETSLOT\": bool_ok,\n \"CLUSTER SLAVES\": parse_cluster_nodes,\n \"COMMAND\": parse_command,\n \"CONFIG RESETSTAT\": bool_ok,\n \"CONFIG SET\": bool_ok,\n \"FUNCTION DELETE\": bool_ok,\n \"FUNCTION FLUSH\": bool_ok,\n \"FUNCTION RESTORE\": bool_ok,\n \"GEODIST\": float_or_none,\n \"HSCAN\": parse_hscan,\n \"INFO\": parse_info,\n \"LASTSAVE\": timestamp_to_datetime,\n \"MEMORY PURGE\": bool_ok,\n \"MODULE LOAD\": bool,\n \"MODULE UNLOAD\": bool,\n \"PING\": lambda r: str_if_bytes(r) == \"PONG\",\n \"PUBSUB NUMSUB\": parse_pubsub_numsub,\n \"PUBSUB SHARDNUMSUB\": parse_pubsub_numsub,\n \"QUIT\": bool_ok,\n \"SET\": parse_set_result,\n \"SCAN\": parse_scan,\n \"SCRIPT EXISTS\": lambda r: list(map(bool, r)),\n \"SCRIPT FLUSH\": bool_ok,\n \"SCRIPT KILL\": bool_ok,\n \"SCRIPT LOAD\": str_if_bytes,\n \"SENTINEL CKQUORUM\": bool_ok,\n \"SENTINEL FAILOVER\": bool_ok,\n \"SENTINEL FLUSHCONFIG\": bool_ok,\n \"SENTINEL GET-MASTER-ADDR-BY-NAME\": parse_sentinel_get_master,\n \"SENTINEL MONITOR\": bool_ok,\n \"SENTINEL RESET\": bool_ok,\n \"SENTINEL REMOVE\": bool_ok,\n \"SENTINEL SET\": bool_ok,\n \"SLOWLOG GET\": parse_slowlog_get,\n \"SLOWLOG RESET\": bool_ok,\n \"SORT\": sort_return_tuples,\n \"SSCAN\": parse_scan,\n \"TIME\": lambda x: (int(x[0]), int(x[1])),\n \"XAUTOCLAIM\": parse_xautoclaim,\n \"XCLAIM\": parse_xclaim,\n \"XGROUP CREATE\": bool_ok,\n \"XGROUP DESTROY\": bool,\n \"XGROUP SETID\": bool_ok,\n \"XINFO STREAM\": parse_xinfo_stream,\n \"XPENDING\": parse_xpending,\n \"ZSCAN\": parse_zscan,\n}\n\n\n_RedisCallbacksRESP2 = {\n **string_keys_to_dict(\n \"SDIFF SINTER SMEMBERS SUNION\", lambda r: r and set(r) or set()\n ),\n **string_keys_to_dict(\n \"ZDIFF ZINTER ZPOPMAX ZPOPMIN ZRANGE ZRANGEBYSCORE ZRANK ZREVRANGE \"\n \"ZREVRANGEBYSCORE ZREVRANK ZUNION\",\n zset_score_pairs,\n ),\n **string_keys_to_dict(\"ZINCRBY ZSCORE\", float_or_none),\n **string_keys_to_dict(\"BGREWRITEAOF BGSAVE\", lambda r: True),\n **string_keys_to_dict(\"BLPOP BRPOP\", lambda r: r and tuple(r) or None),\n **string_keys_to_dict(\n \"BZPOPMAX BZPOPMIN\", lambda r: r and (r[0], r[1], float(r[2])) or None\n ),\n \"ACL CAT\": lambda r: list(map(str_if_bytes, r)),\n \"ACL GENPASS\": str_if_bytes,\n \"ACL HELP\": lambda r: list(map(str_if_bytes, r)),\n \"ACL LIST\": lambda r: list(map(str_if_bytes, r)),\n \"ACL USERS\": lambda r: list(map(str_if_bytes, r)),\n \"ACL WHOAMI\": str_if_bytes,\n \"CLIENT GETNAME\": str_if_bytes,\n \"CLIENT TRACKINGINFO\": lambda r: list(map(str_if_bytes, r)),\n \"CLUSTER GETKEYSINSLOT\": lambda r: list(map(str_if_bytes, r)),\n \"COMMAND GETKEYS\": lambda r: list(map(str_if_bytes, r)),\n \"CONFIG GET\": parse_config_get,\n \"DEBUG OBJECT\": parse_debug_object,\n \"GEOHASH\": lambda r: list(map(str_if_bytes, r)),\n \"GEOPOS\": lambda r: list(\n map(lambda ll: (float(ll[0]), float(ll[1])) if ll is not None else None, r)\n ),\n \"HGETALL\": lambda r: r and pairs_to_dict(r) or {},\n \"MEMORY STATS\": parse_memory_stats,\n \"MODULE LIST\": lambda r: [pairs_to_dict(m) for m in r],\n \"RESET\": str_if_bytes,\n \"SENTINEL MASTER\": parse_sentinel_master,\n \"SENTINEL MASTERS\": parse_sentinel_masters,\n \"SENTINEL SENTINELS\": parse_sentinel_slaves_and_sentinels,\n \"SENTINEL SLAVES\": parse_sentinel_slaves_and_sentinels,\n \"STRALGO\": parse_stralgo,\n \"XINFO CONSUMERS\": parse_list_of_dicts,\n \"XINFO GROUPS\": parse_list_of_dicts,\n \"ZADD\": parse_zadd,\n \"ZMSCORE\": parse_zmscore,\n}\n\n\n_RedisCallbacksRESP3 = {\n **string_keys_to_dict(\n \"ZRANGE ZINTER ZPOPMAX ZPOPMIN ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE \"\n \"ZUNION HGETALL XREADGROUP\",\n lambda r, **kwargs: r,\n ),\n **string_keys_to_dict(\"XREAD XREADGROUP\", parse_xread_resp3),\n \"ACL LOG\": lambda r: (\n [\n {str_if_bytes(key): str_if_bytes(value) for key, value in x.items()}\n for x in r\n ]\n if isinstance(r, list)\n else bool_ok(r)\n ),\n \"COMMAND\": parse_command_resp3,\n \"CONFIG GET\": lambda r: {\n str_if_bytes(key) if key is not None else None: (\n str_if_bytes(value) if value is not None else None\n )\n for key, value in r.items()\n },\n \"MEMORY STATS\": lambda r: {str_if_bytes(key): value for key, value in r.items()},\n \"SENTINEL MASTER\": parse_sentinel_state_resp3,\n \"SENTINEL MASTERS\": parse_sentinel_masters_resp3,\n \"SENTINEL SENTINELS\": parse_sentinel_slaves_and_sentinels_resp3,\n \"SENTINEL SLAVES\": parse_sentinel_slaves_and_sentinels_resp3,\n \"STRALGO\": lambda r, **options: (\n {str_if_bytes(key): str_if_bytes(value) for key, value in r.items()}\n if isinstance(r, dict)\n else str_if_bytes(r)\n ),\n \"XINFO CONSUMERS\": lambda r: [\n {str_if_bytes(key): value for key, value in x.items()} for x in r\n ],\n \"XINFO GROUPS\": lambda r: [\n {str_if_bytes(key): value for key, value in d.items()} for d in r\n ],\n}\n", "path": "redis/_parsers/helpers.py" } ]
diff --git a/redis/_parsers/helpers.py b/redis/_parsers/helpers.py index aa115aa4a1..57b12ab89e 100644 --- a/redis/_parsers/helpers.py +++ b/redis/_parsers/helpers.py @@ -38,7 +38,7 @@ def parse_info(response): response = str_if_bytes(response) def get_value(value): - if "," not in value or "=" not in value: + if "," not in value and "=" not in value: try: if "." in value: return float(value) diff --git a/tests/test_parsers/test_helpers.py b/tests/test_parsers/test_helpers.py new file mode 100644 index 0000000000..6430a237f6 --- /dev/null +++ b/tests/test_parsers/test_helpers.py @@ -0,0 +1,35 @@ +from redis._parsers.helpers import parse_info + + +def test_parse_info(): + info_output = """ +# Modules +module:name=search,ver=999999,api=1,filters=0,usedby=[],using=[ReJSON],options=[handle-io-errors] + +# search_fields_statistics +search_fields_text:Text=3 +search_fields_tag:Tag=2,Sortable=1 + +# search_version +search_version:99.99.99 +search_redis_version:7.2.2 - oss + +# search_runtime_configurations +search_query_timeout_ms:500 + """ + info = parse_info(info_output) + + assert isinstance(info["modules"], list) + assert isinstance(info["modules"][0], dict) + assert info["modules"][0]["name"] == "search" + + assert isinstance(info["search_fields_text"], dict) + assert info["search_fields_text"]["Text"] == 3 + + assert isinstance(info["search_fields_tag"], dict) + assert info["search_fields_tag"]["Tag"] == 2 + assert info["search_fields_tag"]["Sortable"] == 1 + + assert info["search_version"] == "99.99.99" + assert info["search_redis_version"] == "7.2.2 - oss" + assert info["search_query_timeout_ms"] == 500
redis__redis-py-3264
Discrepancy in `INFO` response parsing **Version**: `5.0.0` **Description**: The code for parsing the `INFO` response is creating a discrepancy in the new RediSearch index field types field, e.g., `search_fields_text`. If there is more than one section to this field its corresponding value (e.g., `TEXT=1, SORTABLE=1`) is returned as a dictionary (as expected), while if there is only one value for it (e.g., `TEXT=1`) it is returned as a string! This is bad since we later use this value assuming that it is a dictionary, and thus face failures in the case that it is not. The wanted behavior: We always get a dictionary back, whether we have only one section for this value or more. Example: We have one section for the `TEXT` field, and two for the `TAG` field, and this is what we get: ![image](https://github.com/redis/redis-py/assets/74051729/51282024-7e1c-4d2a-bae8-04dcaf0fc53e) The source of the problem is in the `parse_info()` function, [here](https://github.com/redis/redis-py/blob/19b55c62389c890a96dd611e28aaaedba7506720/redis/_parsers/helpers.py#L35-L36). Thanks!
[ { "content": "import datetime\n\nfrom redis.utils import str_if_bytes\n\n\ndef timestamp_to_datetime(response):\n \"Converts a unix timestamp to a Python datetime object\"\n if not response:\n return None\n try:\n response = int(response)\n except ValueError:\n return None\n return datetime.datetime.fromtimestamp(response)\n\n\ndef parse_debug_object(response):\n \"Parse the results of Redis's DEBUG OBJECT command into a Python dict\"\n # The 'type' of the object is the first item in the response, but isn't\n # prefixed with a name\n response = str_if_bytes(response)\n response = \"type:\" + response\n response = dict(kv.split(\":\") for kv in response.split())\n\n # parse some expected int values from the string response\n # note: this cmd isn't spec'd so these may not appear in all redis versions\n int_fields = (\"refcount\", \"serializedlength\", \"lru\", \"lru_seconds_idle\")\n for field in int_fields:\n if field in response:\n response[field] = int(response[field])\n\n return response\n\n\ndef parse_info(response):\n \"\"\"Parse the result of Redis's INFO command into a Python dict\"\"\"\n info = {}\n response = str_if_bytes(response)\n\n def get_value(value):\n if \",\" not in value or \"=\" not in value:\n try:\n if \".\" in value:\n return float(value)\n else:\n return int(value)\n except ValueError:\n return value\n else:\n sub_dict = {}\n for item in value.split(\",\"):\n k, v = item.rsplit(\"=\", 1)\n sub_dict[k] = get_value(v)\n return sub_dict\n\n for line in response.splitlines():\n if line and not line.startswith(\"#\"):\n if line.find(\":\") != -1:\n # Split, the info fields keys and values.\n # Note that the value may contain ':'. but the 'host:'\n # pseudo-command is the only case where the key contains ':'\n key, value = line.split(\":\", 1)\n if key == \"cmdstat_host\":\n key, value = line.rsplit(\":\", 1)\n\n if key == \"module\":\n # Hardcode a list for key 'modules' since there could be\n # multiple lines that started with 'module'\n info.setdefault(\"modules\", []).append(get_value(value))\n else:\n info[key] = get_value(value)\n else:\n # if the line isn't splittable, append it to the \"__raw__\" key\n info.setdefault(\"__raw__\", []).append(line)\n\n return info\n\n\ndef parse_memory_stats(response, **kwargs):\n \"\"\"Parse the results of MEMORY STATS\"\"\"\n stats = pairs_to_dict(response, decode_keys=True, decode_string_values=True)\n for key, value in stats.items():\n if key.startswith(\"db.\"):\n stats[key] = pairs_to_dict(\n value, decode_keys=True, decode_string_values=True\n )\n return stats\n\n\nSENTINEL_STATE_TYPES = {\n \"can-failover-its-master\": int,\n \"config-epoch\": int,\n \"down-after-milliseconds\": int,\n \"failover-timeout\": int,\n \"info-refresh\": int,\n \"last-hello-message\": int,\n \"last-ok-ping-reply\": int,\n \"last-ping-reply\": int,\n \"last-ping-sent\": int,\n \"master-link-down-time\": int,\n \"master-port\": int,\n \"num-other-sentinels\": int,\n \"num-slaves\": int,\n \"o-down-time\": int,\n \"pending-commands\": int,\n \"parallel-syncs\": int,\n \"port\": int,\n \"quorum\": int,\n \"role-reported-time\": int,\n \"s-down-time\": int,\n \"slave-priority\": int,\n \"slave-repl-offset\": int,\n \"voted-leader-epoch\": int,\n}\n\n\ndef parse_sentinel_state(item):\n result = pairs_to_dict_typed(item, SENTINEL_STATE_TYPES)\n flags = set(result[\"flags\"].split(\",\"))\n for name, flag in (\n (\"is_master\", \"master\"),\n (\"is_slave\", \"slave\"),\n (\"is_sdown\", \"s_down\"),\n (\"is_odown\", \"o_down\"),\n (\"is_sentinel\", \"sentinel\"),\n (\"is_disconnected\", \"disconnected\"),\n (\"is_master_down\", \"master_down\"),\n ):\n result[name] = flag in flags\n return result\n\n\ndef parse_sentinel_master(response):\n return parse_sentinel_state(map(str_if_bytes, response))\n\n\ndef parse_sentinel_state_resp3(response):\n result = {}\n for key in response:\n try:\n value = SENTINEL_STATE_TYPES[key](str_if_bytes(response[key]))\n result[str_if_bytes(key)] = value\n except Exception:\n result[str_if_bytes(key)] = response[str_if_bytes(key)]\n flags = set(result[\"flags\"].split(\",\"))\n result[\"flags\"] = flags\n return result\n\n\ndef parse_sentinel_masters(response):\n result = {}\n for item in response:\n state = parse_sentinel_state(map(str_if_bytes, item))\n result[state[\"name\"]] = state\n return result\n\n\ndef parse_sentinel_masters_resp3(response):\n return [parse_sentinel_state(master) for master in response]\n\n\ndef parse_sentinel_slaves_and_sentinels(response):\n return [parse_sentinel_state(map(str_if_bytes, item)) for item in response]\n\n\ndef parse_sentinel_slaves_and_sentinels_resp3(response):\n return [parse_sentinel_state_resp3(item) for item in response]\n\n\ndef parse_sentinel_get_master(response):\n return response and (response[0], int(response[1])) or None\n\n\ndef pairs_to_dict(response, decode_keys=False, decode_string_values=False):\n \"\"\"Create a dict given a list of key/value pairs\"\"\"\n if response is None:\n return {}\n if decode_keys or decode_string_values:\n # the iter form is faster, but I don't know how to make that work\n # with a str_if_bytes() map\n keys = response[::2]\n if decode_keys:\n keys = map(str_if_bytes, keys)\n values = response[1::2]\n if decode_string_values:\n values = map(str_if_bytes, values)\n return dict(zip(keys, values))\n else:\n it = iter(response)\n return dict(zip(it, it))\n\n\ndef pairs_to_dict_typed(response, type_info):\n it = iter(response)\n result = {}\n for key, value in zip(it, it):\n if key in type_info:\n try:\n value = type_info[key](value)\n except Exception:\n # if for some reason the value can't be coerced, just use\n # the string value\n pass\n result[key] = value\n return result\n\n\ndef zset_score_pairs(response, **options):\n \"\"\"\n If ``withscores`` is specified in the options, return the response as\n a list of (value, score) pairs\n \"\"\"\n if not response or not options.get(\"withscores\"):\n return response\n score_cast_func = options.get(\"score_cast_func\", float)\n it = iter(response)\n return list(zip(it, map(score_cast_func, it)))\n\n\ndef sort_return_tuples(response, **options):\n \"\"\"\n If ``groups`` is specified, return the response as a list of\n n-element tuples with n being the value found in options['groups']\n \"\"\"\n if not response or not options.get(\"groups\"):\n return response\n n = options[\"groups\"]\n return list(zip(*[response[i::n] for i in range(n)]))\n\n\ndef parse_stream_list(response):\n if response is None:\n return None\n data = []\n for r in response:\n if r is not None:\n data.append((r[0], pairs_to_dict(r[1])))\n else:\n data.append((None, None))\n return data\n\n\ndef pairs_to_dict_with_str_keys(response):\n return pairs_to_dict(response, decode_keys=True)\n\n\ndef parse_list_of_dicts(response):\n return list(map(pairs_to_dict_with_str_keys, response))\n\n\ndef parse_xclaim(response, **options):\n if options.get(\"parse_justid\", False):\n return response\n return parse_stream_list(response)\n\n\ndef parse_xautoclaim(response, **options):\n if options.get(\"parse_justid\", False):\n return response[1]\n response[1] = parse_stream_list(response[1])\n return response\n\n\ndef parse_xinfo_stream(response, **options):\n if isinstance(response, list):\n data = pairs_to_dict(response, decode_keys=True)\n else:\n data = {str_if_bytes(k): v for k, v in response.items()}\n if not options.get(\"full\", False):\n first = data.get(\"first-entry\")\n if first is not None:\n data[\"first-entry\"] = (first[0], pairs_to_dict(first[1]))\n last = data[\"last-entry\"]\n if last is not None:\n data[\"last-entry\"] = (last[0], pairs_to_dict(last[1]))\n else:\n data[\"entries\"] = {_id: pairs_to_dict(entry) for _id, entry in data[\"entries\"]}\n if isinstance(data[\"groups\"][0], list):\n data[\"groups\"] = [\n pairs_to_dict(group, decode_keys=True) for group in data[\"groups\"]\n ]\n else:\n data[\"groups\"] = [\n {str_if_bytes(k): v for k, v in group.items()}\n for group in data[\"groups\"]\n ]\n return data\n\n\ndef parse_xread(response):\n if response is None:\n return []\n return [[r[0], parse_stream_list(r[1])] for r in response]\n\n\ndef parse_xread_resp3(response):\n if response is None:\n return {}\n return {key: [parse_stream_list(value)] for key, value in response.items()}\n\n\ndef parse_xpending(response, **options):\n if options.get(\"parse_detail\", False):\n return parse_xpending_range(response)\n consumers = [{\"name\": n, \"pending\": int(p)} for n, p in response[3] or []]\n return {\n \"pending\": response[0],\n \"min\": response[1],\n \"max\": response[2],\n \"consumers\": consumers,\n }\n\n\ndef parse_xpending_range(response):\n k = (\"message_id\", \"consumer\", \"time_since_delivered\", \"times_delivered\")\n return [dict(zip(k, r)) for r in response]\n\n\ndef float_or_none(response):\n if response is None:\n return None\n return float(response)\n\n\ndef bool_ok(response, **options):\n return str_if_bytes(response) == \"OK\"\n\n\ndef parse_zadd(response, **options):\n if response is None:\n return None\n if options.get(\"as_score\"):\n return float(response)\n return int(response)\n\n\ndef parse_client_list(response, **options):\n clients = []\n for c in str_if_bytes(response).splitlines():\n # Values might contain '='\n clients.append(dict(pair.split(\"=\", 1) for pair in c.split(\" \")))\n return clients\n\n\ndef parse_config_get(response, **options):\n response = [str_if_bytes(i) if i is not None else None for i in response]\n return response and pairs_to_dict(response) or {}\n\n\ndef parse_scan(response, **options):\n cursor, r = response\n return int(cursor), r\n\n\ndef parse_hscan(response, **options):\n cursor, r = response\n return int(cursor), r and pairs_to_dict(r) or {}\n\n\ndef parse_zscan(response, **options):\n score_cast_func = options.get(\"score_cast_func\", float)\n cursor, r = response\n it = iter(r)\n return int(cursor), list(zip(it, map(score_cast_func, it)))\n\n\ndef parse_zmscore(response, **options):\n # zmscore: list of scores (double precision floating point number) or nil\n return [float(score) if score is not None else None for score in response]\n\n\ndef parse_slowlog_get(response, **options):\n space = \" \" if options.get(\"decode_responses\", False) else b\" \"\n\n def parse_item(item):\n result = {\"id\": item[0], \"start_time\": int(item[1]), \"duration\": int(item[2])}\n # Redis Enterprise injects another entry at index [3], which has\n # the complexity info (i.e. the value N in case the command has\n # an O(N) complexity) instead of the command.\n if isinstance(item[3], list):\n result[\"command\"] = space.join(item[3])\n result[\"client_address\"] = item[4]\n result[\"client_name\"] = item[5]\n else:\n result[\"complexity\"] = item[3]\n result[\"command\"] = space.join(item[4])\n result[\"client_address\"] = item[5]\n result[\"client_name\"] = item[6]\n return result\n\n return [parse_item(item) for item in response]\n\n\ndef parse_stralgo(response, **options):\n \"\"\"\n Parse the response from `STRALGO` command.\n Without modifiers the returned value is string.\n When LEN is given the command returns the length of the result\n (i.e integer).\n When IDX is given the command returns a dictionary with the LCS\n length and all the ranges in both the strings, start and end\n offset for each string, where there are matches.\n When WITHMATCHLEN is given, each array representing a match will\n also have the length of the match at the beginning of the array.\n \"\"\"\n if options.get(\"len\", False):\n return int(response)\n if options.get(\"idx\", False):\n if options.get(\"withmatchlen\", False):\n matches = [\n [(int(match[-1]))] + list(map(tuple, match[:-1]))\n for match in response[1]\n ]\n else:\n matches = [list(map(tuple, match)) for match in response[1]]\n return {\n str_if_bytes(response[0]): matches,\n str_if_bytes(response[2]): int(response[3]),\n }\n return str_if_bytes(response)\n\n\ndef parse_cluster_info(response, **options):\n response = str_if_bytes(response)\n return dict(line.split(\":\") for line in response.splitlines() if line)\n\n\ndef _parse_node_line(line):\n line_items = line.split(\" \")\n node_id, addr, flags, master_id, ping, pong, epoch, connected = line.split(\" \")[:8]\n addr = addr.split(\"@\")[0]\n node_dict = {\n \"node_id\": node_id,\n \"flags\": flags,\n \"master_id\": master_id,\n \"last_ping_sent\": ping,\n \"last_pong_rcvd\": pong,\n \"epoch\": epoch,\n \"slots\": [],\n \"migrations\": [],\n \"connected\": True if connected == \"connected\" else False,\n }\n if len(line_items) >= 9:\n slots, migrations = _parse_slots(line_items[8:])\n node_dict[\"slots\"], node_dict[\"migrations\"] = slots, migrations\n return addr, node_dict\n\n\ndef _parse_slots(slot_ranges):\n slots, migrations = [], []\n for s_range in slot_ranges:\n if \"->-\" in s_range:\n slot_id, dst_node_id = s_range[1:-1].split(\"->-\", 1)\n migrations.append(\n {\"slot\": slot_id, \"node_id\": dst_node_id, \"state\": \"migrating\"}\n )\n elif \"-<-\" in s_range:\n slot_id, src_node_id = s_range[1:-1].split(\"-<-\", 1)\n migrations.append(\n {\"slot\": slot_id, \"node_id\": src_node_id, \"state\": \"importing\"}\n )\n else:\n s_range = [sl for sl in s_range.split(\"-\")]\n slots.append(s_range)\n\n return slots, migrations\n\n\ndef parse_cluster_nodes(response, **options):\n \"\"\"\n @see: https://redis.io/commands/cluster-nodes # string / bytes\n @see: https://redis.io/commands/cluster-replicas # list of string / bytes\n \"\"\"\n if isinstance(response, (str, bytes)):\n response = response.splitlines()\n return dict(_parse_node_line(str_if_bytes(node)) for node in response)\n\n\ndef parse_geosearch_generic(response, **options):\n \"\"\"\n Parse the response of 'GEOSEARCH', GEORADIUS' and 'GEORADIUSBYMEMBER'\n commands according to 'withdist', 'withhash' and 'withcoord' labels.\n \"\"\"\n try:\n if options[\"store\"] or options[\"store_dist\"]:\n # `store` and `store_dist` cant be combined\n # with other command arguments.\n # relevant to 'GEORADIUS' and 'GEORADIUSBYMEMBER'\n return response\n except KeyError: # it means the command was sent via execute_command\n return response\n\n if type(response) != list:\n response_list = [response]\n else:\n response_list = response\n\n if not options[\"withdist\"] and not options[\"withcoord\"] and not options[\"withhash\"]:\n # just a bunch of places\n return response_list\n\n cast = {\n \"withdist\": float,\n \"withcoord\": lambda ll: (float(ll[0]), float(ll[1])),\n \"withhash\": int,\n }\n\n # zip all output results with each casting function to get\n # the properly native Python value.\n f = [lambda x: x]\n f += [cast[o] for o in [\"withdist\", \"withhash\", \"withcoord\"] if options[o]]\n return [list(map(lambda fv: fv[0](fv[1]), zip(f, r))) for r in response_list]\n\n\ndef parse_command(response, **options):\n commands = {}\n for command in response:\n cmd_dict = {}\n cmd_name = str_if_bytes(command[0])\n cmd_dict[\"name\"] = cmd_name\n cmd_dict[\"arity\"] = int(command[1])\n cmd_dict[\"flags\"] = [str_if_bytes(flag) for flag in command[2]]\n cmd_dict[\"first_key_pos\"] = command[3]\n cmd_dict[\"last_key_pos\"] = command[4]\n cmd_dict[\"step_count\"] = command[5]\n if len(command) > 7:\n cmd_dict[\"tips\"] = command[7]\n cmd_dict[\"key_specifications\"] = command[8]\n cmd_dict[\"subcommands\"] = command[9]\n commands[cmd_name] = cmd_dict\n return commands\n\n\ndef parse_command_resp3(response, **options):\n commands = {}\n for command in response:\n cmd_dict = {}\n cmd_name = str_if_bytes(command[0])\n cmd_dict[\"name\"] = cmd_name\n cmd_dict[\"arity\"] = command[1]\n cmd_dict[\"flags\"] = {str_if_bytes(flag) for flag in command[2]}\n cmd_dict[\"first_key_pos\"] = command[3]\n cmd_dict[\"last_key_pos\"] = command[4]\n cmd_dict[\"step_count\"] = command[5]\n cmd_dict[\"acl_categories\"] = command[6]\n if len(command) > 7:\n cmd_dict[\"tips\"] = command[7]\n cmd_dict[\"key_specifications\"] = command[8]\n cmd_dict[\"subcommands\"] = command[9]\n\n commands[cmd_name] = cmd_dict\n return commands\n\n\ndef parse_pubsub_numsub(response, **options):\n return list(zip(response[0::2], response[1::2]))\n\n\ndef parse_client_kill(response, **options):\n if isinstance(response, int):\n return response\n return str_if_bytes(response) == \"OK\"\n\n\ndef parse_acl_getuser(response, **options):\n if response is None:\n return None\n if isinstance(response, list):\n data = pairs_to_dict(response, decode_keys=True)\n else:\n data = {str_if_bytes(key): value for key, value in response.items()}\n\n # convert everything but user-defined data in 'keys' to native strings\n data[\"flags\"] = list(map(str_if_bytes, data[\"flags\"]))\n data[\"passwords\"] = list(map(str_if_bytes, data[\"passwords\"]))\n data[\"commands\"] = str_if_bytes(data[\"commands\"])\n if isinstance(data[\"keys\"], str) or isinstance(data[\"keys\"], bytes):\n data[\"keys\"] = list(str_if_bytes(data[\"keys\"]).split(\" \"))\n if data[\"keys\"] == [\"\"]:\n data[\"keys\"] = []\n if \"channels\" in data:\n if isinstance(data[\"channels\"], str) or isinstance(data[\"channels\"], bytes):\n data[\"channels\"] = list(str_if_bytes(data[\"channels\"]).split(\" \"))\n if data[\"channels\"] == [\"\"]:\n data[\"channels\"] = []\n if \"selectors\" in data:\n if data[\"selectors\"] != [] and isinstance(data[\"selectors\"][0], list):\n data[\"selectors\"] = [\n list(map(str_if_bytes, selector)) for selector in data[\"selectors\"]\n ]\n elif data[\"selectors\"] != []:\n data[\"selectors\"] = [\n {str_if_bytes(k): str_if_bytes(v) for k, v in selector.items()}\n for selector in data[\"selectors\"]\n ]\n\n # split 'commands' into separate 'categories' and 'commands' lists\n commands, categories = [], []\n for command in data[\"commands\"].split(\" \"):\n categories.append(command) if \"@\" in command else commands.append(command)\n\n data[\"commands\"] = commands\n data[\"categories\"] = categories\n data[\"enabled\"] = \"on\" in data[\"flags\"]\n return data\n\n\ndef parse_acl_log(response, **options):\n if response is None:\n return None\n if isinstance(response, list):\n data = []\n for log in response:\n log_data = pairs_to_dict(log, True, True)\n client_info = log_data.get(\"client-info\", \"\")\n log_data[\"client-info\"] = parse_client_info(client_info)\n\n # float() is lossy comparing to the \"double\" in C\n log_data[\"age-seconds\"] = float(log_data[\"age-seconds\"])\n data.append(log_data)\n else:\n data = bool_ok(response)\n return data\n\n\ndef parse_client_info(value):\n \"\"\"\n Parsing client-info in ACL Log in following format.\n \"key1=value1 key2=value2 key3=value3\"\n \"\"\"\n client_info = {}\n for info in str_if_bytes(value).strip().split():\n key, value = info.split(\"=\")\n client_info[key] = value\n\n # Those fields are defined as int in networking.c\n for int_key in {\n \"id\",\n \"age\",\n \"idle\",\n \"db\",\n \"sub\",\n \"psub\",\n \"multi\",\n \"qbuf\",\n \"qbuf-free\",\n \"obl\",\n \"argv-mem\",\n \"oll\",\n \"omem\",\n \"tot-mem\",\n }:\n client_info[int_key] = int(client_info[int_key])\n return client_info\n\n\ndef parse_set_result(response, **options):\n \"\"\"\n Handle SET result since GET argument is available since Redis 6.2.\n Parsing SET result into:\n - BOOL\n - String when GET argument is used\n \"\"\"\n if options.get(\"get\"):\n # Redis will return a getCommand result.\n # See `setGenericCommand` in t_string.c\n return response\n return response and str_if_bytes(response) == \"OK\"\n\n\ndef string_keys_to_dict(key_string, callback):\n return dict.fromkeys(key_string.split(), callback)\n\n\n_RedisCallbacks = {\n **string_keys_to_dict(\n \"AUTH COPY EXPIRE EXPIREAT HEXISTS HMSET MOVE MSETNX PERSIST PSETEX \"\n \"PEXPIRE PEXPIREAT RENAMENX SETEX SETNX SMOVE\",\n bool,\n ),\n **string_keys_to_dict(\"HINCRBYFLOAT INCRBYFLOAT\", float),\n **string_keys_to_dict(\n \"ASKING FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE READONLY READWRITE \"\n \"RENAME SAVE SELECT SHUTDOWN SLAVEOF SWAPDB WATCH UNWATCH\",\n bool_ok,\n ),\n **string_keys_to_dict(\"XREAD XREADGROUP\", parse_xread),\n **string_keys_to_dict(\n \"GEORADIUS GEORADIUSBYMEMBER GEOSEARCH\",\n parse_geosearch_generic,\n ),\n **string_keys_to_dict(\"XRANGE XREVRANGE\", parse_stream_list),\n \"ACL GETUSER\": parse_acl_getuser,\n \"ACL LOAD\": bool_ok,\n \"ACL LOG\": parse_acl_log,\n \"ACL SETUSER\": bool_ok,\n \"ACL SAVE\": bool_ok,\n \"CLIENT INFO\": parse_client_info,\n \"CLIENT KILL\": parse_client_kill,\n \"CLIENT LIST\": parse_client_list,\n \"CLIENT PAUSE\": bool_ok,\n \"CLIENT SETINFO\": bool_ok,\n \"CLIENT SETNAME\": bool_ok,\n \"CLIENT UNBLOCK\": bool,\n \"CLUSTER ADDSLOTS\": bool_ok,\n \"CLUSTER ADDSLOTSRANGE\": bool_ok,\n \"CLUSTER DELSLOTS\": bool_ok,\n \"CLUSTER DELSLOTSRANGE\": bool_ok,\n \"CLUSTER FAILOVER\": bool_ok,\n \"CLUSTER FORGET\": bool_ok,\n \"CLUSTER INFO\": parse_cluster_info,\n \"CLUSTER MEET\": bool_ok,\n \"CLUSTER NODES\": parse_cluster_nodes,\n \"CLUSTER REPLICAS\": parse_cluster_nodes,\n \"CLUSTER REPLICATE\": bool_ok,\n \"CLUSTER RESET\": bool_ok,\n \"CLUSTER SAVECONFIG\": bool_ok,\n \"CLUSTER SET-CONFIG-EPOCH\": bool_ok,\n \"CLUSTER SETSLOT\": bool_ok,\n \"CLUSTER SLAVES\": parse_cluster_nodes,\n \"COMMAND\": parse_command,\n \"CONFIG RESETSTAT\": bool_ok,\n \"CONFIG SET\": bool_ok,\n \"FUNCTION DELETE\": bool_ok,\n \"FUNCTION FLUSH\": bool_ok,\n \"FUNCTION RESTORE\": bool_ok,\n \"GEODIST\": float_or_none,\n \"HSCAN\": parse_hscan,\n \"INFO\": parse_info,\n \"LASTSAVE\": timestamp_to_datetime,\n \"MEMORY PURGE\": bool_ok,\n \"MODULE LOAD\": bool,\n \"MODULE UNLOAD\": bool,\n \"PING\": lambda r: str_if_bytes(r) == \"PONG\",\n \"PUBSUB NUMSUB\": parse_pubsub_numsub,\n \"PUBSUB SHARDNUMSUB\": parse_pubsub_numsub,\n \"QUIT\": bool_ok,\n \"SET\": parse_set_result,\n \"SCAN\": parse_scan,\n \"SCRIPT EXISTS\": lambda r: list(map(bool, r)),\n \"SCRIPT FLUSH\": bool_ok,\n \"SCRIPT KILL\": bool_ok,\n \"SCRIPT LOAD\": str_if_bytes,\n \"SENTINEL CKQUORUM\": bool_ok,\n \"SENTINEL FAILOVER\": bool_ok,\n \"SENTINEL FLUSHCONFIG\": bool_ok,\n \"SENTINEL GET-MASTER-ADDR-BY-NAME\": parse_sentinel_get_master,\n \"SENTINEL MONITOR\": bool_ok,\n \"SENTINEL RESET\": bool_ok,\n \"SENTINEL REMOVE\": bool_ok,\n \"SENTINEL SET\": bool_ok,\n \"SLOWLOG GET\": parse_slowlog_get,\n \"SLOWLOG RESET\": bool_ok,\n \"SORT\": sort_return_tuples,\n \"SSCAN\": parse_scan,\n \"TIME\": lambda x: (int(x[0]), int(x[1])),\n \"XAUTOCLAIM\": parse_xautoclaim,\n \"XCLAIM\": parse_xclaim,\n \"XGROUP CREATE\": bool_ok,\n \"XGROUP DESTROY\": bool,\n \"XGROUP SETID\": bool_ok,\n \"XINFO STREAM\": parse_xinfo_stream,\n \"XPENDING\": parse_xpending,\n \"ZSCAN\": parse_zscan,\n}\n\n\n_RedisCallbacksRESP2 = {\n **string_keys_to_dict(\n \"SDIFF SINTER SMEMBERS SUNION\", lambda r: r and set(r) or set()\n ),\n **string_keys_to_dict(\n \"ZDIFF ZINTER ZPOPMAX ZPOPMIN ZRANGE ZRANGEBYSCORE ZRANK ZREVRANGE \"\n \"ZREVRANGEBYSCORE ZREVRANK ZUNION\",\n zset_score_pairs,\n ),\n **string_keys_to_dict(\"ZINCRBY ZSCORE\", float_or_none),\n **string_keys_to_dict(\"BGREWRITEAOF BGSAVE\", lambda r: True),\n **string_keys_to_dict(\"BLPOP BRPOP\", lambda r: r and tuple(r) or None),\n **string_keys_to_dict(\n \"BZPOPMAX BZPOPMIN\", lambda r: r and (r[0], r[1], float(r[2])) or None\n ),\n \"ACL CAT\": lambda r: list(map(str_if_bytes, r)),\n \"ACL GENPASS\": str_if_bytes,\n \"ACL HELP\": lambda r: list(map(str_if_bytes, r)),\n \"ACL LIST\": lambda r: list(map(str_if_bytes, r)),\n \"ACL USERS\": lambda r: list(map(str_if_bytes, r)),\n \"ACL WHOAMI\": str_if_bytes,\n \"CLIENT GETNAME\": str_if_bytes,\n \"CLIENT TRACKINGINFO\": lambda r: list(map(str_if_bytes, r)),\n \"CLUSTER GETKEYSINSLOT\": lambda r: list(map(str_if_bytes, r)),\n \"COMMAND GETKEYS\": lambda r: list(map(str_if_bytes, r)),\n \"CONFIG GET\": parse_config_get,\n \"DEBUG OBJECT\": parse_debug_object,\n \"GEOHASH\": lambda r: list(map(str_if_bytes, r)),\n \"GEOPOS\": lambda r: list(\n map(lambda ll: (float(ll[0]), float(ll[1])) if ll is not None else None, r)\n ),\n \"HGETALL\": lambda r: r and pairs_to_dict(r) or {},\n \"MEMORY STATS\": parse_memory_stats,\n \"MODULE LIST\": lambda r: [pairs_to_dict(m) for m in r],\n \"RESET\": str_if_bytes,\n \"SENTINEL MASTER\": parse_sentinel_master,\n \"SENTINEL MASTERS\": parse_sentinel_masters,\n \"SENTINEL SENTINELS\": parse_sentinel_slaves_and_sentinels,\n \"SENTINEL SLAVES\": parse_sentinel_slaves_and_sentinels,\n \"STRALGO\": parse_stralgo,\n \"XINFO CONSUMERS\": parse_list_of_dicts,\n \"XINFO GROUPS\": parse_list_of_dicts,\n \"ZADD\": parse_zadd,\n \"ZMSCORE\": parse_zmscore,\n}\n\n\n_RedisCallbacksRESP3 = {\n **string_keys_to_dict(\n \"ZRANGE ZINTER ZPOPMAX ZPOPMIN ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE \"\n \"ZUNION HGETALL XREADGROUP\",\n lambda r, **kwargs: r,\n ),\n **string_keys_to_dict(\"XREAD XREADGROUP\", parse_xread_resp3),\n \"ACL LOG\": lambda r: [\n {str_if_bytes(key): str_if_bytes(value) for key, value in x.items()} for x in r\n ]\n if isinstance(r, list)\n else bool_ok(r),\n \"COMMAND\": parse_command_resp3,\n \"CONFIG GET\": lambda r: {\n str_if_bytes(key)\n if key is not None\n else None: str_if_bytes(value)\n if value is not None\n else None\n for key, value in r.items()\n },\n \"MEMORY STATS\": lambda r: {str_if_bytes(key): value for key, value in r.items()},\n \"SENTINEL MASTER\": parse_sentinel_state_resp3,\n \"SENTINEL MASTERS\": parse_sentinel_masters_resp3,\n \"SENTINEL SENTINELS\": parse_sentinel_slaves_and_sentinels_resp3,\n \"SENTINEL SLAVES\": parse_sentinel_slaves_and_sentinels_resp3,\n \"STRALGO\": lambda r, **options: {\n str_if_bytes(key): str_if_bytes(value) for key, value in r.items()\n }\n if isinstance(r, dict)\n else str_if_bytes(r),\n \"XINFO CONSUMERS\": lambda r: [\n {str_if_bytes(key): value for key, value in x.items()} for x in r\n ],\n \"XINFO GROUPS\": lambda r: [\n {str_if_bytes(key): value for key, value in d.items()} for d in r\n ],\n}\n", "path": "redis/_parsers/helpers.py" } ]
[ { "content": "import datetime\n\nfrom redis.utils import str_if_bytes\n\n\ndef timestamp_to_datetime(response):\n \"Converts a unix timestamp to a Python datetime object\"\n if not response:\n return None\n try:\n response = int(response)\n except ValueError:\n return None\n return datetime.datetime.fromtimestamp(response)\n\n\ndef parse_debug_object(response):\n \"Parse the results of Redis's DEBUG OBJECT command into a Python dict\"\n # The 'type' of the object is the first item in the response, but isn't\n # prefixed with a name\n response = str_if_bytes(response)\n response = \"type:\" + response\n response = dict(kv.split(\":\") for kv in response.split())\n\n # parse some expected int values from the string response\n # note: this cmd isn't spec'd so these may not appear in all redis versions\n int_fields = (\"refcount\", \"serializedlength\", \"lru\", \"lru_seconds_idle\")\n for field in int_fields:\n if field in response:\n response[field] = int(response[field])\n\n return response\n\n\ndef parse_info(response):\n \"\"\"Parse the result of Redis's INFO command into a Python dict\"\"\"\n info = {}\n response = str_if_bytes(response)\n\n def get_value(value):\n if \",\" not in value and \"=\" not in value:\n try:\n if \".\" in value:\n return float(value)\n else:\n return int(value)\n except ValueError:\n return value\n else:\n sub_dict = {}\n for item in value.split(\",\"):\n k, v = item.rsplit(\"=\", 1)\n sub_dict[k] = get_value(v)\n return sub_dict\n\n for line in response.splitlines():\n if line and not line.startswith(\"#\"):\n if line.find(\":\") != -1:\n # Split, the info fields keys and values.\n # Note that the value may contain ':'. but the 'host:'\n # pseudo-command is the only case where the key contains ':'\n key, value = line.split(\":\", 1)\n if key == \"cmdstat_host\":\n key, value = line.rsplit(\":\", 1)\n\n if key == \"module\":\n # Hardcode a list for key 'modules' since there could be\n # multiple lines that started with 'module'\n info.setdefault(\"modules\", []).append(get_value(value))\n else:\n info[key] = get_value(value)\n else:\n # if the line isn't splittable, append it to the \"__raw__\" key\n info.setdefault(\"__raw__\", []).append(line)\n\n return info\n\n\ndef parse_memory_stats(response, **kwargs):\n \"\"\"Parse the results of MEMORY STATS\"\"\"\n stats = pairs_to_dict(response, decode_keys=True, decode_string_values=True)\n for key, value in stats.items():\n if key.startswith(\"db.\"):\n stats[key] = pairs_to_dict(\n value, decode_keys=True, decode_string_values=True\n )\n return stats\n\n\nSENTINEL_STATE_TYPES = {\n \"can-failover-its-master\": int,\n \"config-epoch\": int,\n \"down-after-milliseconds\": int,\n \"failover-timeout\": int,\n \"info-refresh\": int,\n \"last-hello-message\": int,\n \"last-ok-ping-reply\": int,\n \"last-ping-reply\": int,\n \"last-ping-sent\": int,\n \"master-link-down-time\": int,\n \"master-port\": int,\n \"num-other-sentinels\": int,\n \"num-slaves\": int,\n \"o-down-time\": int,\n \"pending-commands\": int,\n \"parallel-syncs\": int,\n \"port\": int,\n \"quorum\": int,\n \"role-reported-time\": int,\n \"s-down-time\": int,\n \"slave-priority\": int,\n \"slave-repl-offset\": int,\n \"voted-leader-epoch\": int,\n}\n\n\ndef parse_sentinel_state(item):\n result = pairs_to_dict_typed(item, SENTINEL_STATE_TYPES)\n flags = set(result[\"flags\"].split(\",\"))\n for name, flag in (\n (\"is_master\", \"master\"),\n (\"is_slave\", \"slave\"),\n (\"is_sdown\", \"s_down\"),\n (\"is_odown\", \"o_down\"),\n (\"is_sentinel\", \"sentinel\"),\n (\"is_disconnected\", \"disconnected\"),\n (\"is_master_down\", \"master_down\"),\n ):\n result[name] = flag in flags\n return result\n\n\ndef parse_sentinel_master(response):\n return parse_sentinel_state(map(str_if_bytes, response))\n\n\ndef parse_sentinel_state_resp3(response):\n result = {}\n for key in response:\n try:\n value = SENTINEL_STATE_TYPES[key](str_if_bytes(response[key]))\n result[str_if_bytes(key)] = value\n except Exception:\n result[str_if_bytes(key)] = response[str_if_bytes(key)]\n flags = set(result[\"flags\"].split(\",\"))\n result[\"flags\"] = flags\n return result\n\n\ndef parse_sentinel_masters(response):\n result = {}\n for item in response:\n state = parse_sentinel_state(map(str_if_bytes, item))\n result[state[\"name\"]] = state\n return result\n\n\ndef parse_sentinel_masters_resp3(response):\n return [parse_sentinel_state(master) for master in response]\n\n\ndef parse_sentinel_slaves_and_sentinels(response):\n return [parse_sentinel_state(map(str_if_bytes, item)) for item in response]\n\n\ndef parse_sentinel_slaves_and_sentinels_resp3(response):\n return [parse_sentinel_state_resp3(item) for item in response]\n\n\ndef parse_sentinel_get_master(response):\n return response and (response[0], int(response[1])) or None\n\n\ndef pairs_to_dict(response, decode_keys=False, decode_string_values=False):\n \"\"\"Create a dict given a list of key/value pairs\"\"\"\n if response is None:\n return {}\n if decode_keys or decode_string_values:\n # the iter form is faster, but I don't know how to make that work\n # with a str_if_bytes() map\n keys = response[::2]\n if decode_keys:\n keys = map(str_if_bytes, keys)\n values = response[1::2]\n if decode_string_values:\n values = map(str_if_bytes, values)\n return dict(zip(keys, values))\n else:\n it = iter(response)\n return dict(zip(it, it))\n\n\ndef pairs_to_dict_typed(response, type_info):\n it = iter(response)\n result = {}\n for key, value in zip(it, it):\n if key in type_info:\n try:\n value = type_info[key](value)\n except Exception:\n # if for some reason the value can't be coerced, just use\n # the string value\n pass\n result[key] = value\n return result\n\n\ndef zset_score_pairs(response, **options):\n \"\"\"\n If ``withscores`` is specified in the options, return the response as\n a list of (value, score) pairs\n \"\"\"\n if not response or not options.get(\"withscores\"):\n return response\n score_cast_func = options.get(\"score_cast_func\", float)\n it = iter(response)\n return list(zip(it, map(score_cast_func, it)))\n\n\ndef sort_return_tuples(response, **options):\n \"\"\"\n If ``groups`` is specified, return the response as a list of\n n-element tuples with n being the value found in options['groups']\n \"\"\"\n if not response or not options.get(\"groups\"):\n return response\n n = options[\"groups\"]\n return list(zip(*[response[i::n] for i in range(n)]))\n\n\ndef parse_stream_list(response):\n if response is None:\n return None\n data = []\n for r in response:\n if r is not None:\n data.append((r[0], pairs_to_dict(r[1])))\n else:\n data.append((None, None))\n return data\n\n\ndef pairs_to_dict_with_str_keys(response):\n return pairs_to_dict(response, decode_keys=True)\n\n\ndef parse_list_of_dicts(response):\n return list(map(pairs_to_dict_with_str_keys, response))\n\n\ndef parse_xclaim(response, **options):\n if options.get(\"parse_justid\", False):\n return response\n return parse_stream_list(response)\n\n\ndef parse_xautoclaim(response, **options):\n if options.get(\"parse_justid\", False):\n return response[1]\n response[1] = parse_stream_list(response[1])\n return response\n\n\ndef parse_xinfo_stream(response, **options):\n if isinstance(response, list):\n data = pairs_to_dict(response, decode_keys=True)\n else:\n data = {str_if_bytes(k): v for k, v in response.items()}\n if not options.get(\"full\", False):\n first = data.get(\"first-entry\")\n if first is not None:\n data[\"first-entry\"] = (first[0], pairs_to_dict(first[1]))\n last = data[\"last-entry\"]\n if last is not None:\n data[\"last-entry\"] = (last[0], pairs_to_dict(last[1]))\n else:\n data[\"entries\"] = {_id: pairs_to_dict(entry) for _id, entry in data[\"entries\"]}\n if isinstance(data[\"groups\"][0], list):\n data[\"groups\"] = [\n pairs_to_dict(group, decode_keys=True) for group in data[\"groups\"]\n ]\n else:\n data[\"groups\"] = [\n {str_if_bytes(k): v for k, v in group.items()}\n for group in data[\"groups\"]\n ]\n return data\n\n\ndef parse_xread(response):\n if response is None:\n return []\n return [[r[0], parse_stream_list(r[1])] for r in response]\n\n\ndef parse_xread_resp3(response):\n if response is None:\n return {}\n return {key: [parse_stream_list(value)] for key, value in response.items()}\n\n\ndef parse_xpending(response, **options):\n if options.get(\"parse_detail\", False):\n return parse_xpending_range(response)\n consumers = [{\"name\": n, \"pending\": int(p)} for n, p in response[3] or []]\n return {\n \"pending\": response[0],\n \"min\": response[1],\n \"max\": response[2],\n \"consumers\": consumers,\n }\n\n\ndef parse_xpending_range(response):\n k = (\"message_id\", \"consumer\", \"time_since_delivered\", \"times_delivered\")\n return [dict(zip(k, r)) for r in response]\n\n\ndef float_or_none(response):\n if response is None:\n return None\n return float(response)\n\n\ndef bool_ok(response, **options):\n return str_if_bytes(response) == \"OK\"\n\n\ndef parse_zadd(response, **options):\n if response is None:\n return None\n if options.get(\"as_score\"):\n return float(response)\n return int(response)\n\n\ndef parse_client_list(response, **options):\n clients = []\n for c in str_if_bytes(response).splitlines():\n # Values might contain '='\n clients.append(dict(pair.split(\"=\", 1) for pair in c.split(\" \")))\n return clients\n\n\ndef parse_config_get(response, **options):\n response = [str_if_bytes(i) if i is not None else None for i in response]\n return response and pairs_to_dict(response) or {}\n\n\ndef parse_scan(response, **options):\n cursor, r = response\n return int(cursor), r\n\n\ndef parse_hscan(response, **options):\n cursor, r = response\n return int(cursor), r and pairs_to_dict(r) or {}\n\n\ndef parse_zscan(response, **options):\n score_cast_func = options.get(\"score_cast_func\", float)\n cursor, r = response\n it = iter(r)\n return int(cursor), list(zip(it, map(score_cast_func, it)))\n\n\ndef parse_zmscore(response, **options):\n # zmscore: list of scores (double precision floating point number) or nil\n return [float(score) if score is not None else None for score in response]\n\n\ndef parse_slowlog_get(response, **options):\n space = \" \" if options.get(\"decode_responses\", False) else b\" \"\n\n def parse_item(item):\n result = {\"id\": item[0], \"start_time\": int(item[1]), \"duration\": int(item[2])}\n # Redis Enterprise injects another entry at index [3], which has\n # the complexity info (i.e. the value N in case the command has\n # an O(N) complexity) instead of the command.\n if isinstance(item[3], list):\n result[\"command\"] = space.join(item[3])\n result[\"client_address\"] = item[4]\n result[\"client_name\"] = item[5]\n else:\n result[\"complexity\"] = item[3]\n result[\"command\"] = space.join(item[4])\n result[\"client_address\"] = item[5]\n result[\"client_name\"] = item[6]\n return result\n\n return [parse_item(item) for item in response]\n\n\ndef parse_stralgo(response, **options):\n \"\"\"\n Parse the response from `STRALGO` command.\n Without modifiers the returned value is string.\n When LEN is given the command returns the length of the result\n (i.e integer).\n When IDX is given the command returns a dictionary with the LCS\n length and all the ranges in both the strings, start and end\n offset for each string, where there are matches.\n When WITHMATCHLEN is given, each array representing a match will\n also have the length of the match at the beginning of the array.\n \"\"\"\n if options.get(\"len\", False):\n return int(response)\n if options.get(\"idx\", False):\n if options.get(\"withmatchlen\", False):\n matches = [\n [(int(match[-1]))] + list(map(tuple, match[:-1]))\n for match in response[1]\n ]\n else:\n matches = [list(map(tuple, match)) for match in response[1]]\n return {\n str_if_bytes(response[0]): matches,\n str_if_bytes(response[2]): int(response[3]),\n }\n return str_if_bytes(response)\n\n\ndef parse_cluster_info(response, **options):\n response = str_if_bytes(response)\n return dict(line.split(\":\") for line in response.splitlines() if line)\n\n\ndef _parse_node_line(line):\n line_items = line.split(\" \")\n node_id, addr, flags, master_id, ping, pong, epoch, connected = line.split(\" \")[:8]\n addr = addr.split(\"@\")[0]\n node_dict = {\n \"node_id\": node_id,\n \"flags\": flags,\n \"master_id\": master_id,\n \"last_ping_sent\": ping,\n \"last_pong_rcvd\": pong,\n \"epoch\": epoch,\n \"slots\": [],\n \"migrations\": [],\n \"connected\": True if connected == \"connected\" else False,\n }\n if len(line_items) >= 9:\n slots, migrations = _parse_slots(line_items[8:])\n node_dict[\"slots\"], node_dict[\"migrations\"] = slots, migrations\n return addr, node_dict\n\n\ndef _parse_slots(slot_ranges):\n slots, migrations = [], []\n for s_range in slot_ranges:\n if \"->-\" in s_range:\n slot_id, dst_node_id = s_range[1:-1].split(\"->-\", 1)\n migrations.append(\n {\"slot\": slot_id, \"node_id\": dst_node_id, \"state\": \"migrating\"}\n )\n elif \"-<-\" in s_range:\n slot_id, src_node_id = s_range[1:-1].split(\"-<-\", 1)\n migrations.append(\n {\"slot\": slot_id, \"node_id\": src_node_id, \"state\": \"importing\"}\n )\n else:\n s_range = [sl for sl in s_range.split(\"-\")]\n slots.append(s_range)\n\n return slots, migrations\n\n\ndef parse_cluster_nodes(response, **options):\n \"\"\"\n @see: https://redis.io/commands/cluster-nodes # string / bytes\n @see: https://redis.io/commands/cluster-replicas # list of string / bytes\n \"\"\"\n if isinstance(response, (str, bytes)):\n response = response.splitlines()\n return dict(_parse_node_line(str_if_bytes(node)) for node in response)\n\n\ndef parse_geosearch_generic(response, **options):\n \"\"\"\n Parse the response of 'GEOSEARCH', GEORADIUS' and 'GEORADIUSBYMEMBER'\n commands according to 'withdist', 'withhash' and 'withcoord' labels.\n \"\"\"\n try:\n if options[\"store\"] or options[\"store_dist\"]:\n # `store` and `store_dist` cant be combined\n # with other command arguments.\n # relevant to 'GEORADIUS' and 'GEORADIUSBYMEMBER'\n return response\n except KeyError: # it means the command was sent via execute_command\n return response\n\n if type(response) != list:\n response_list = [response]\n else:\n response_list = response\n\n if not options[\"withdist\"] and not options[\"withcoord\"] and not options[\"withhash\"]:\n # just a bunch of places\n return response_list\n\n cast = {\n \"withdist\": float,\n \"withcoord\": lambda ll: (float(ll[0]), float(ll[1])),\n \"withhash\": int,\n }\n\n # zip all output results with each casting function to get\n # the properly native Python value.\n f = [lambda x: x]\n f += [cast[o] for o in [\"withdist\", \"withhash\", \"withcoord\"] if options[o]]\n return [list(map(lambda fv: fv[0](fv[1]), zip(f, r))) for r in response_list]\n\n\ndef parse_command(response, **options):\n commands = {}\n for command in response:\n cmd_dict = {}\n cmd_name = str_if_bytes(command[0])\n cmd_dict[\"name\"] = cmd_name\n cmd_dict[\"arity\"] = int(command[1])\n cmd_dict[\"flags\"] = [str_if_bytes(flag) for flag in command[2]]\n cmd_dict[\"first_key_pos\"] = command[3]\n cmd_dict[\"last_key_pos\"] = command[4]\n cmd_dict[\"step_count\"] = command[5]\n if len(command) > 7:\n cmd_dict[\"tips\"] = command[7]\n cmd_dict[\"key_specifications\"] = command[8]\n cmd_dict[\"subcommands\"] = command[9]\n commands[cmd_name] = cmd_dict\n return commands\n\n\ndef parse_command_resp3(response, **options):\n commands = {}\n for command in response:\n cmd_dict = {}\n cmd_name = str_if_bytes(command[0])\n cmd_dict[\"name\"] = cmd_name\n cmd_dict[\"arity\"] = command[1]\n cmd_dict[\"flags\"] = {str_if_bytes(flag) for flag in command[2]}\n cmd_dict[\"first_key_pos\"] = command[3]\n cmd_dict[\"last_key_pos\"] = command[4]\n cmd_dict[\"step_count\"] = command[5]\n cmd_dict[\"acl_categories\"] = command[6]\n if len(command) > 7:\n cmd_dict[\"tips\"] = command[7]\n cmd_dict[\"key_specifications\"] = command[8]\n cmd_dict[\"subcommands\"] = command[9]\n\n commands[cmd_name] = cmd_dict\n return commands\n\n\ndef parse_pubsub_numsub(response, **options):\n return list(zip(response[0::2], response[1::2]))\n\n\ndef parse_client_kill(response, **options):\n if isinstance(response, int):\n return response\n return str_if_bytes(response) == \"OK\"\n\n\ndef parse_acl_getuser(response, **options):\n if response is None:\n return None\n if isinstance(response, list):\n data = pairs_to_dict(response, decode_keys=True)\n else:\n data = {str_if_bytes(key): value for key, value in response.items()}\n\n # convert everything but user-defined data in 'keys' to native strings\n data[\"flags\"] = list(map(str_if_bytes, data[\"flags\"]))\n data[\"passwords\"] = list(map(str_if_bytes, data[\"passwords\"]))\n data[\"commands\"] = str_if_bytes(data[\"commands\"])\n if isinstance(data[\"keys\"], str) or isinstance(data[\"keys\"], bytes):\n data[\"keys\"] = list(str_if_bytes(data[\"keys\"]).split(\" \"))\n if data[\"keys\"] == [\"\"]:\n data[\"keys\"] = []\n if \"channels\" in data:\n if isinstance(data[\"channels\"], str) or isinstance(data[\"channels\"], bytes):\n data[\"channels\"] = list(str_if_bytes(data[\"channels\"]).split(\" \"))\n if data[\"channels\"] == [\"\"]:\n data[\"channels\"] = []\n if \"selectors\" in data:\n if data[\"selectors\"] != [] and isinstance(data[\"selectors\"][0], list):\n data[\"selectors\"] = [\n list(map(str_if_bytes, selector)) for selector in data[\"selectors\"]\n ]\n elif data[\"selectors\"] != []:\n data[\"selectors\"] = [\n {str_if_bytes(k): str_if_bytes(v) for k, v in selector.items()}\n for selector in data[\"selectors\"]\n ]\n\n # split 'commands' into separate 'categories' and 'commands' lists\n commands, categories = [], []\n for command in data[\"commands\"].split(\" \"):\n categories.append(command) if \"@\" in command else commands.append(command)\n\n data[\"commands\"] = commands\n data[\"categories\"] = categories\n data[\"enabled\"] = \"on\" in data[\"flags\"]\n return data\n\n\ndef parse_acl_log(response, **options):\n if response is None:\n return None\n if isinstance(response, list):\n data = []\n for log in response:\n log_data = pairs_to_dict(log, True, True)\n client_info = log_data.get(\"client-info\", \"\")\n log_data[\"client-info\"] = parse_client_info(client_info)\n\n # float() is lossy comparing to the \"double\" in C\n log_data[\"age-seconds\"] = float(log_data[\"age-seconds\"])\n data.append(log_data)\n else:\n data = bool_ok(response)\n return data\n\n\ndef parse_client_info(value):\n \"\"\"\n Parsing client-info in ACL Log in following format.\n \"key1=value1 key2=value2 key3=value3\"\n \"\"\"\n client_info = {}\n for info in str_if_bytes(value).strip().split():\n key, value = info.split(\"=\")\n client_info[key] = value\n\n # Those fields are defined as int in networking.c\n for int_key in {\n \"id\",\n \"age\",\n \"idle\",\n \"db\",\n \"sub\",\n \"psub\",\n \"multi\",\n \"qbuf\",\n \"qbuf-free\",\n \"obl\",\n \"argv-mem\",\n \"oll\",\n \"omem\",\n \"tot-mem\",\n }:\n client_info[int_key] = int(client_info[int_key])\n return client_info\n\n\ndef parse_set_result(response, **options):\n \"\"\"\n Handle SET result since GET argument is available since Redis 6.2.\n Parsing SET result into:\n - BOOL\n - String when GET argument is used\n \"\"\"\n if options.get(\"get\"):\n # Redis will return a getCommand result.\n # See `setGenericCommand` in t_string.c\n return response\n return response and str_if_bytes(response) == \"OK\"\n\n\ndef string_keys_to_dict(key_string, callback):\n return dict.fromkeys(key_string.split(), callback)\n\n\n_RedisCallbacks = {\n **string_keys_to_dict(\n \"AUTH COPY EXPIRE EXPIREAT HEXISTS HMSET MOVE MSETNX PERSIST PSETEX \"\n \"PEXPIRE PEXPIREAT RENAMENX SETEX SETNX SMOVE\",\n bool,\n ),\n **string_keys_to_dict(\"HINCRBYFLOAT INCRBYFLOAT\", float),\n **string_keys_to_dict(\n \"ASKING FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE READONLY READWRITE \"\n \"RENAME SAVE SELECT SHUTDOWN SLAVEOF SWAPDB WATCH UNWATCH\",\n bool_ok,\n ),\n **string_keys_to_dict(\"XREAD XREADGROUP\", parse_xread),\n **string_keys_to_dict(\n \"GEORADIUS GEORADIUSBYMEMBER GEOSEARCH\",\n parse_geosearch_generic,\n ),\n **string_keys_to_dict(\"XRANGE XREVRANGE\", parse_stream_list),\n \"ACL GETUSER\": parse_acl_getuser,\n \"ACL LOAD\": bool_ok,\n \"ACL LOG\": parse_acl_log,\n \"ACL SETUSER\": bool_ok,\n \"ACL SAVE\": bool_ok,\n \"CLIENT INFO\": parse_client_info,\n \"CLIENT KILL\": parse_client_kill,\n \"CLIENT LIST\": parse_client_list,\n \"CLIENT PAUSE\": bool_ok,\n \"CLIENT SETINFO\": bool_ok,\n \"CLIENT SETNAME\": bool_ok,\n \"CLIENT UNBLOCK\": bool,\n \"CLUSTER ADDSLOTS\": bool_ok,\n \"CLUSTER ADDSLOTSRANGE\": bool_ok,\n \"CLUSTER DELSLOTS\": bool_ok,\n \"CLUSTER DELSLOTSRANGE\": bool_ok,\n \"CLUSTER FAILOVER\": bool_ok,\n \"CLUSTER FORGET\": bool_ok,\n \"CLUSTER INFO\": parse_cluster_info,\n \"CLUSTER MEET\": bool_ok,\n \"CLUSTER NODES\": parse_cluster_nodes,\n \"CLUSTER REPLICAS\": parse_cluster_nodes,\n \"CLUSTER REPLICATE\": bool_ok,\n \"CLUSTER RESET\": bool_ok,\n \"CLUSTER SAVECONFIG\": bool_ok,\n \"CLUSTER SET-CONFIG-EPOCH\": bool_ok,\n \"CLUSTER SETSLOT\": bool_ok,\n \"CLUSTER SLAVES\": parse_cluster_nodes,\n \"COMMAND\": parse_command,\n \"CONFIG RESETSTAT\": bool_ok,\n \"CONFIG SET\": bool_ok,\n \"FUNCTION DELETE\": bool_ok,\n \"FUNCTION FLUSH\": bool_ok,\n \"FUNCTION RESTORE\": bool_ok,\n \"GEODIST\": float_or_none,\n \"HSCAN\": parse_hscan,\n \"INFO\": parse_info,\n \"LASTSAVE\": timestamp_to_datetime,\n \"MEMORY PURGE\": bool_ok,\n \"MODULE LOAD\": bool,\n \"MODULE UNLOAD\": bool,\n \"PING\": lambda r: str_if_bytes(r) == \"PONG\",\n \"PUBSUB NUMSUB\": parse_pubsub_numsub,\n \"PUBSUB SHARDNUMSUB\": parse_pubsub_numsub,\n \"QUIT\": bool_ok,\n \"SET\": parse_set_result,\n \"SCAN\": parse_scan,\n \"SCRIPT EXISTS\": lambda r: list(map(bool, r)),\n \"SCRIPT FLUSH\": bool_ok,\n \"SCRIPT KILL\": bool_ok,\n \"SCRIPT LOAD\": str_if_bytes,\n \"SENTINEL CKQUORUM\": bool_ok,\n \"SENTINEL FAILOVER\": bool_ok,\n \"SENTINEL FLUSHCONFIG\": bool_ok,\n \"SENTINEL GET-MASTER-ADDR-BY-NAME\": parse_sentinel_get_master,\n \"SENTINEL MONITOR\": bool_ok,\n \"SENTINEL RESET\": bool_ok,\n \"SENTINEL REMOVE\": bool_ok,\n \"SENTINEL SET\": bool_ok,\n \"SLOWLOG GET\": parse_slowlog_get,\n \"SLOWLOG RESET\": bool_ok,\n \"SORT\": sort_return_tuples,\n \"SSCAN\": parse_scan,\n \"TIME\": lambda x: (int(x[0]), int(x[1])),\n \"XAUTOCLAIM\": parse_xautoclaim,\n \"XCLAIM\": parse_xclaim,\n \"XGROUP CREATE\": bool_ok,\n \"XGROUP DESTROY\": bool,\n \"XGROUP SETID\": bool_ok,\n \"XINFO STREAM\": parse_xinfo_stream,\n \"XPENDING\": parse_xpending,\n \"ZSCAN\": parse_zscan,\n}\n\n\n_RedisCallbacksRESP2 = {\n **string_keys_to_dict(\n \"SDIFF SINTER SMEMBERS SUNION\", lambda r: r and set(r) or set()\n ),\n **string_keys_to_dict(\n \"ZDIFF ZINTER ZPOPMAX ZPOPMIN ZRANGE ZRANGEBYSCORE ZRANK ZREVRANGE \"\n \"ZREVRANGEBYSCORE ZREVRANK ZUNION\",\n zset_score_pairs,\n ),\n **string_keys_to_dict(\"ZINCRBY ZSCORE\", float_or_none),\n **string_keys_to_dict(\"BGREWRITEAOF BGSAVE\", lambda r: True),\n **string_keys_to_dict(\"BLPOP BRPOP\", lambda r: r and tuple(r) or None),\n **string_keys_to_dict(\n \"BZPOPMAX BZPOPMIN\", lambda r: r and (r[0], r[1], float(r[2])) or None\n ),\n \"ACL CAT\": lambda r: list(map(str_if_bytes, r)),\n \"ACL GENPASS\": str_if_bytes,\n \"ACL HELP\": lambda r: list(map(str_if_bytes, r)),\n \"ACL LIST\": lambda r: list(map(str_if_bytes, r)),\n \"ACL USERS\": lambda r: list(map(str_if_bytes, r)),\n \"ACL WHOAMI\": str_if_bytes,\n \"CLIENT GETNAME\": str_if_bytes,\n \"CLIENT TRACKINGINFO\": lambda r: list(map(str_if_bytes, r)),\n \"CLUSTER GETKEYSINSLOT\": lambda r: list(map(str_if_bytes, r)),\n \"COMMAND GETKEYS\": lambda r: list(map(str_if_bytes, r)),\n \"CONFIG GET\": parse_config_get,\n \"DEBUG OBJECT\": parse_debug_object,\n \"GEOHASH\": lambda r: list(map(str_if_bytes, r)),\n \"GEOPOS\": lambda r: list(\n map(lambda ll: (float(ll[0]), float(ll[1])) if ll is not None else None, r)\n ),\n \"HGETALL\": lambda r: r and pairs_to_dict(r) or {},\n \"MEMORY STATS\": parse_memory_stats,\n \"MODULE LIST\": lambda r: [pairs_to_dict(m) for m in r],\n \"RESET\": str_if_bytes,\n \"SENTINEL MASTER\": parse_sentinel_master,\n \"SENTINEL MASTERS\": parse_sentinel_masters,\n \"SENTINEL SENTINELS\": parse_sentinel_slaves_and_sentinels,\n \"SENTINEL SLAVES\": parse_sentinel_slaves_and_sentinels,\n \"STRALGO\": parse_stralgo,\n \"XINFO CONSUMERS\": parse_list_of_dicts,\n \"XINFO GROUPS\": parse_list_of_dicts,\n \"ZADD\": parse_zadd,\n \"ZMSCORE\": parse_zmscore,\n}\n\n\n_RedisCallbacksRESP3 = {\n **string_keys_to_dict(\n \"ZRANGE ZINTER ZPOPMAX ZPOPMIN ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE \"\n \"ZUNION HGETALL XREADGROUP\",\n lambda r, **kwargs: r,\n ),\n **string_keys_to_dict(\"XREAD XREADGROUP\", parse_xread_resp3),\n \"ACL LOG\": lambda r: [\n {str_if_bytes(key): str_if_bytes(value) for key, value in x.items()} for x in r\n ]\n if isinstance(r, list)\n else bool_ok(r),\n \"COMMAND\": parse_command_resp3,\n \"CONFIG GET\": lambda r: {\n str_if_bytes(key)\n if key is not None\n else None: str_if_bytes(value)\n if value is not None\n else None\n for key, value in r.items()\n },\n \"MEMORY STATS\": lambda r: {str_if_bytes(key): value for key, value in r.items()},\n \"SENTINEL MASTER\": parse_sentinel_state_resp3,\n \"SENTINEL MASTERS\": parse_sentinel_masters_resp3,\n \"SENTINEL SENTINELS\": parse_sentinel_slaves_and_sentinels_resp3,\n \"SENTINEL SLAVES\": parse_sentinel_slaves_and_sentinels_resp3,\n \"STRALGO\": lambda r, **options: {\n str_if_bytes(key): str_if_bytes(value) for key, value in r.items()\n }\n if isinstance(r, dict)\n else str_if_bytes(r),\n \"XINFO CONSUMERS\": lambda r: [\n {str_if_bytes(key): value for key, value in x.items()} for x in r\n ],\n \"XINFO GROUPS\": lambda r: [\n {str_if_bytes(key): value for key, value in d.items()} for d in r\n ],\n}\n", "path": "redis/_parsers/helpers.py" } ]
diff --git a/redis/_parsers/helpers.py b/redis/_parsers/helpers.py index bdd749a5bc..7418cdca53 100644 --- a/redis/_parsers/helpers.py +++ b/redis/_parsers/helpers.py @@ -38,7 +38,7 @@ def parse_info(response): response = str_if_bytes(response) def get_value(value): - if "," not in value or "=" not in value: + if "," not in value and "=" not in value: try: if "." in value: return float(value) diff --git a/tests/test_parsers/test_helpers.py b/tests/test_parsers/test_helpers.py new file mode 100644 index 0000000000..6430a237f6 --- /dev/null +++ b/tests/test_parsers/test_helpers.py @@ -0,0 +1,35 @@ +from redis._parsers.helpers import parse_info + + +def test_parse_info(): + info_output = """ +# Modules +module:name=search,ver=999999,api=1,filters=0,usedby=[],using=[ReJSON],options=[handle-io-errors] + +# search_fields_statistics +search_fields_text:Text=3 +search_fields_tag:Tag=2,Sortable=1 + +# search_version +search_version:99.99.99 +search_redis_version:7.2.2 - oss + +# search_runtime_configurations +search_query_timeout_ms:500 + """ + info = parse_info(info_output) + + assert isinstance(info["modules"], list) + assert isinstance(info["modules"][0], dict) + assert info["modules"][0]["name"] == "search" + + assert isinstance(info["search_fields_text"], dict) + assert info["search_fields_text"]["Text"] == 3 + + assert isinstance(info["search_fields_tag"], dict) + assert info["search_fields_tag"]["Tag"] == 2 + assert info["search_fields_tag"]["Sortable"] == 1 + + assert info["search_version"] == "99.99.99" + assert info["search_redis_version"] == "7.2.2 - oss" + assert info["search_query_timeout_ms"] == 500
WeblateOrg__weblate-8229
Maintainers not able to add Punjabi translations ### Describe the issue The Punjabi language is used in both Pakistan and India, but with different writing systems. The locales available for this in Weblate are `pa_PK` and `pa` respectively. (This is the same as how the Android locales are set up for this.) I have been translating applications for both scripts, which is easier to do together. However, `pa_PK` is not on the list of languages to choose from in the menu for adding a new translation, which says only a basic list of languages is included. For context, Punjabi is the 10th most spoken language in the world, and the majority of speakers live in Pakistan where it is the most widely apoken language. The language selection menu even includes ancient languages but does not have this option, it is odd that it is omitted entirely. I have tried requesting adding it manually to maintainers, but here the maintainer of F-Droid suggests this may be a bug with Weblate since Android supports this locale but they were not able to add it on their hostes Weblate instance: https://gitlab.com/fdroid/admin/-/issues/342 In the meantime, I have been using the "und" Undetermined language code to store these translationa, but this puts a burden on project maintainers to manually export these and include them under the `pa_PK` locale when the Weblate application is supposed to be able to do this. ### I already tried - [X] I've read and searched [the documentation](https://docs.weblate.org/). - [X] I've searched for similar issues in this repository. ### Steps to reproduce the behavior 1. Click "+" to add "Punjabi (Pakistan)" 2. Error appears saying project maintainer needs to add support for this 3. Project maintainer is not able to do this ### Expected behavior Ability to translate projects on Weblate to Punjabi. ### Screenshots Screenshots in the linked Gitlab issue re: F-Droid ### Exception traceback _No response_ ### How do you run Weblate? weblate.org service ### Weblate versions _No response_ ### Weblate deploy checks _No response_ ### Additional context _No response_
[ { "content": "#\n# Copyright © 2012–2022 Michal Čihař <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n# pylint: disable=line-too-long\n\nfrom django.utils.translation import pgettext_lazy\nfrom weblate_language_data import languages\nfrom weblate_language_data.ambiguous import AMBIGUOUS\n\nNO_CODE_LANGUAGES = {lang[0] for lang in languages.LANGUAGES}\n\nUNDERSCORE_EXCEPTIONS = {\"nb_NO\", \"zh_Hant\", \"zh_Hans\", \"be_Latn\", \"ro_MD\", \"pt_BR\"}\nAT_EXCEPTIONS = {\"ca@valencia\"}\n\n\ndef is_basic(code):\n if code in AMBIGUOUS:\n return False\n if \"_\" in code:\n return code in UNDERSCORE_EXCEPTIONS\n return \"@\" not in code or code in AT_EXCEPTIONS\n\n\nBASIC_LANGUAGES = {lang for lang in NO_CODE_LANGUAGES if is_basic(lang)}\n\n# Following variables are used to map Gettext plural formulas\n# to one/few/may/other like rules\n\nONE_OTHER_PLURALS = (\n \"n==1 || n%10==1 ? 0 : 1\",\n \"n != 1\",\n \"(n != 1)\",\n \"n > 1\",\n \"(n > 1)\",\n \"n >= 2 && (n < 11 || n > 99)\",\n \"n % 10 != 1 || n % 100 == 11\",\n \"(n % 10 == 1 && n % 100 != 11) ? 0 : 1\",\n \"n != 1 && n != 2 && n != 3 && (n % 10 == 4 || n % 10 == 6 || n % 10 == 9)\",\n \"(n==0 || n==1)\",\n)\n\nTWO_OTHER_PLURALS = (\"(n==2) ? 1 : 0\",)\n\nONE_FEW_OTHER_PLURALS = (\n \"n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2\",\n \"(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2\",\n \"n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2\",\n \"n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2\",\n \"n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2\",\n \"(n == 1) ? 0 : ((n == 0 || n != 1 && n % 100 >= 1 && n % 100 <= 19) ? 1 : 2)\",\n \"(n == 0 || n == 1) ? 0 : ((n >= 2 && n <= 10) ? 1 : 2)\",\n \"(n % 10 == 1 && (n % 100 < 11 || n % 100 > 19)) ? 0 : ((n % 10 >= 2 && n % 10 <= 9 && (n % 100 < 11 || n % 100 > 19)) ? 1 : 2)\",\n \"(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2)\",\n \"(n == 1) ? 0 : ((n == 0 || n % 100 >= 2 && n % 100 <= 19) ? 1 : 2)\",\n \"(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\",\n)\nZERO_ONE_FEW_OTHER_PLURALS = (\n \"n == 0 ? 0 : n == 1 ? 1 : ((n >= 2 && n <= 10) ? 2 : 3)\",\n \"n==0 ? 0 : n==1 ? 1 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 2 : 3\",\n \"n==0 ? 0 : (n == 1) ? 1 : ((n == 0 || n % 100 >= 2 && n % 100 <= 19) ? 2 : 3)\",\n \"n==0 ? 0 : n==1 ? 1 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 2 : 3\",\n \"n==0 ? 0 : (n==1) ? 1 : (n>=2 && n<=4) ? 2 : 3\",\n \"n==0 ? 0 : n%10==1 && n%100!=11 ? 1 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 2 : 3\",\n \"n==0 ? 0 : (n % 10 == 1 && (n % 100 < 11 || n % 100 > 19)) ? 1 : ((n % 10 >= 2 && n % 10 <= 9 && (n % 100 < 11 || n % 100 > 19)) ? 2 : 3)\",\n)\n\nZERO_ONE_OTHER_PLURALS = (\n \"n==0 ? 0 : n==1 ? 1 : 2\",\n \"(n == 0) ? 0 : ((n == 1) ? 1 : 2)\",\n \"(n % 10 == 0 || n % 100 >= 11 && n % 100 <= 19) ? 0 : ((n % 10 == 1 && n % 100 != 11) ? 1 : 2)\",\n \"n==0 ? 0 : n>1 ? 1 : 2\",\n \"n==0 ? 0 : n!=1 ? 1 : 2\",\n \"n == 0 ? 0 : n==1 || n%10==1 ? 1 : 2\",\n \"n==0 ? 0 : n != 1 && n != 2 && n != 3 && (n % 10 == 4 || n % 10 == 6 || n % 10 == 9) ? 1: 2\",\n \"n==0 ? 0 : n % 10 != 1 || n % 100 == 11 ? 1 :2\",\n \"n==0 ? 0 : n >= 2 && (n < 11 || n > 99) ? 1 : 2\",\n \"n==0 ? 0 : n%10==1 && n%100!=11 ? 1 : 2\",\n)\n\nONE_TWO_OTHER_PLURALS = (\n \"n==1 ? 0 : n==2 ? 1 : 2\",\n \"(n == 1) ? 0 : ((n == 2) ? 1 : 2)\",\n \"n%100==1 ? 0 : n%100==2 ? 1 : 2\",\n)\nZERO_ONE_TWO_OTHER_PLURALS = (\n \"n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : 3\",\n \"n==0 ? 0 : (n == 1) ? 1 : ((n == 2) ? 2 : 3)\",\n)\n\nONE_OTHER_TWO_PLURALS = (\"n==1 ? 0 : n==2 ? 2 : 1\",)\n\nONE_TWO_THREE_OTHER_PLURALS = (\"(n==1) ? 0 : (n==2) ? 1 : (n == 3) ? 2 : 3\",)\n\nONE_TWO_FEW_OTHER_PLURALS = (\n \"(n==1 || n==11) ? 0 : (n==2 || n==12) ? 1 : (n > 2 && n < 20) ? 2 : 3\",\n \"n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3\",\n \"(n % 10 == 1) ? 0 : ((n % 10 == 2) ? 1 : ((n % 100 == 0 || n % 100 == 20 || n % 100 == 40 || n % 100 == 60 || n % 100 == 80) ? 2 : 3))\",\n \"(n % 100 == 1) ? 0 : ((n % 100 == 2) ? 1 : ((n % 100 == 3 || n % 100 == 4) ? 2 : 3))\",\n \"(n == 1) ? 0 : ((n == 2) ? 1 : ((n > 10 && n % 10 == 0) ? 2 : 3))\",\n \"(n == 1) ? 0 : ((n == 2) ? 1 : ((n == 10) ? 2 : 3))\",\n \"(n==1) ? 0 : (n==2) ? 1 : (n != 8 && n != 11) ? 2 : 3\",\n)\nZERO_ONE_TWO_FEW_OTHER_PLURALS = (\n \"n==0 ? 0 : (n==1 || n==11) ? 1 : (n==2 || n==12) ? 2 : (n > 2 && n < 20) ? 3 : 4\",\n \"n==0 ? 0 : (n == 1) ? 1 : ((n == 2) ? 2 : ((n > 10 && n % 10 == 0) ? 3 : 4))\",\n \"n==0 ? 0 : (n % 100 == 1) ? 1 : ((n % 100 == 2) ? 2 : ((n % 100 == 3 || n % 100 == 4) ? 3 : 4))\",\n \"n==0 ? 0 : n%100==1 ? 1 : n%100==2 ? 2 : n%100==3 || n%100==4 ? 3 : 4\",\n \"n==0 ? 0 : (n % 10 == 1) ? 1 : ((n % 10 == 2) ? 2 : ((n % 100 == 0 || n % 100 == 20 || n % 100 == 40 || n % 100 == 60 || n % 100 == 80) ? 3 : 4))\",\n)\n\nOTHER_ONE_TWO_FEW_PLURALS = (\n \"(n%100==1 ? 1 : n%100==2 ? 2 : n%100==3 || n%100==4 ? 3 : 0)\",\n)\n\nONE_TWO_FEW_MANY_OTHER_PLURALS = (\n \"n==1 ? 0 : n==2 ? 1 : n<7 ? 2 : n<11 ? 3 : 4\",\n \"n==1 ? 0 : n==2 ? 1 : (n>2 && n<7) ? 2 :(n>6 && n<11) ? 3 : 4\",\n \"(n % 10 == 1 && n % 100 != 11 && n % 100 != 71 && n % 100 != 91) ? 0 : ((n % 10 == 2 && n % 100 != 12 && n % 100 != 72 && n % 100 != 92) ? 1 : ((((n % 10 == 3 || n % 10 == 4) || n % 10 == 9) && (n % 100 < 10 || n % 100 > 19) && (n % 100 < 70 || n % 100 > 79) && (n % 100 < 90 || n % 100 > 99)) ? 2 : ((n != 0 && n % 1000000 == 0) ? 3 : 4)))\",\n)\n\nONE_FEW_MANY_OTHER_PLURALS = (\n \"n==1 ? 0 : n==0 || ( n%100>1 && n%100<11) ? 1 : (n%100>10 && n%100<20 ) ? 2 : 3\",\n \"n==1 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : n%10==0 || (n%100>10 && n%100<20) ? 2 : 3\",\n \"n==1 ? 3 : n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2\",\n)\nZERO_ONE_FEW_MANY_OTHER_PLURALS = (\n \"n==0 ? 0 : n==1 ? 1 : ( n%100>1 && n%100<11) ? 2 : (n%100>10 && n%100<20 ) ? 3 : 4\",\n)\n\nONE_OTHER_ZERO_PLURALS = (\"n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2\",)\n\nZERO_ONE_TWO_FEW_MANY_OTHER_PLURALS = (\n \"(n==0) ? 0 : (n==1) ? 1 : (n==2) ? 2 : (n==3) ? 3 :(n==6) ? 4 : 5\",\n \"(n == 0) ? 0 : ((n == 1) ? 1 : ((n == 2) ? 2 : ((n % 100 >= 3 && n % 100 <= 10) ? 3 : ((n % 100 >= 11 && n % 100 <= 99) ? 4 : 5))))\",\n \"(n == 0) ? 0 : ((n == 1) ? 1 : (((n % 100 == 2 || n % 100 == 22 || n % 100 == 42 || n % 100 == 62 || n % 100 == 82) || n % 1000 == 0 && (n % 100000 >= 1000 && n % 100000 <= 20000 || n % 100000 == 40000 || n % 100000 == 60000 || n % 100000 == 80000) || n != 0 && n % 1000000 == 100000) ? 2 : ((n % 100 == 3 || n % 100 == 23 || n % 100 == 43 || n % 100 == 63 || n % 100 == 83) ? 3 : ((n != 1 && (n % 100 == 1 || n % 100 == 21 || n % 100 == 41 || n % 100 == 61 || n % 100 == 81)) ? 4 : 5))))\",\n \"n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : (n>2 && n<7) ? 3 :(n>6 && n<11) ? 4 : 5\",\n \"n==0 ? 0 : (n % 10 == 1 && n % 100 != 11 && n % 100 != 71 && n % 100 != 91) ? 1 : ((n % 10 == 2 && n % 100 != 12 && n % 100 != 72 && n % 100 != 92) ? 2 : ((((n % 10 == 3 || n % 10 == 4) || n % 10 == 9) && (n % 100 < 10 || n % 100 > 19) && (n % 100 < 70 || n % 100 > 79) && (n % 100 < 90 || n % 100 > 99)) ? 3 : ((n != 0 && n % 1000000 == 0) ? 4 : 5)))\",\n)\nONE_MANY_OTHER_PLURALS = (\n \"(n == 1) ? 0 : ((n != 0 && n % 1000000 == 0) ? 1 : 2)\",\n \"(n == 0 || n == 1) ? 0 : ((n != 0 && n % 1000000 == 0) ? 1 : 2)\",\n)\n\nZERO_OTHER_PLURALS = (\"n==0 ? 0 : 1\",)\n\n# Plural types definition\nPLURAL_NONE = 0\nPLURAL_ONE_OTHER = 1\nPLURAL_ONE_FEW_OTHER = 2\nPLURAL_ARABIC = 3\nPLURAL_ONE_TWO_OTHER = 4\nPLURAL_ONE_TWO_THREE_OTHER = 5\nPLURAL_ONE_TWO_FEW_OTHER = 6\nPLURAL_ONE_OTHER_ZERO = 7\nPLURAL_ONE_FEW_MANY_OTHER = 8\nPLURAL_TWO_OTHER = 9\nPLURAL_ONE_TWO_FEW_MANY_OTHER = 10\nPLURAL_ZERO_ONE_OTHER = 11\nPLURAL_ZERO_ONE_TWO_FEW_MANY_OTHER = 12\nPLURAL_OTHER_ONE_TWO_FEW = 13\nPLURAL_ONE_OTHER_TWO = 14\nPLURAL_ZERO_OTHER = 15\nPLURAL_ZERO_ONE_FEW_OTHER = 16\nPLURAL_ZERO_ONE_TWO_FEW_OTHER = 17\nPLURAL_ZERO_ONE_TWO_OTHER = 18\nPLURAL_ZERO_ONE_FEW_MANY_OTHER = 19\nPLURAL_ONE_MANY_OTHER = 20\nPLURAL_UNKNOWN = 666\n\n# Extra zero plural handling for stringsdict\nZERO_PLURAL_TYPES = {\n PLURAL_ARABIC,\n PLURAL_ZERO_ONE_OTHER,\n PLURAL_ZERO_ONE_TWO_FEW_MANY_OTHER,\n}\n\nFORMULA_WITH_ZERO = {\n \"0\": \"n==0 ? 0 : 1\",\n \"n > 1\": \"n==0 ? 0 : n>1 ? 1 : 2\",\n \"n != 1\": \"n==0 ? 0 : n!=1 ? 1 : 2\",\n \"(n == 0 || n == 1) ? 0 : ((n >= 2 && n <= 10) ? 1 : 2)\": \"n == 0 ? 0 : n == 1 ? 1 : ((n >= 2 && n <= 10) ? 2 : 3)\",\n \"n == 1 ? 0 : n == 2 ? 1 : 2\": \"n == 0 ? 0 : n == 1 ? 1 : n == 2 ? 2 : 3\",\n \"n==1 || n%10==1 ? 0 : 1\": \"n == 0 ? 0 : n==1 || n%10==1 ? 1 : 2\",\n \"n==1 ? 0 : n==2 ? 1 : 2\": \"n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : 3\",\n \"(n==1 || n==11) ? 0 : (n==2 || n==12) ? 1 : (n > 2 && n < 20) ? 2 : 3\": \"n==0 ? 0 : (n==1 || n==11) ? 1 : (n==2 || n==12) ? 2 : (n > 2 && n < 20) ? 3 : 4\",\n \"n != 1 && n != 2 && n != 3 && (n % 10 == 4 || n % 10 == 6 || n % 10 == 9)\": \"n==0 ? 0 : n != 1 && n != 2 && n != 3 && (n % 10 == 4 || n % 10 == 6 || n % 10 == 9) ? 1: 2\",\n \"n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2\": \"n==0 ? 0 : n==1 ? 1 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 2 : 3\",\n \"(n == 1) ? 0 : ((n == 0 || n % 100 >= 2 && n % 100 <= 19) ? 1 : 2)\": \"n==0 ? 0 : (n == 1) ? 1 : ((n == 0 || n % 100 >= 2 && n % 100 <= 19) ? 2 : 3)\",\n \"n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2\": \"n==0 ? 0 : n==1 ? 1 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 2 : 3\",\n \"(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2\": \"n==0 ? 0 : (n==1) ? 1 : (n>=2 && n<=4) ? 2 : 3\",\n \"(n == 1) ? 0 : ((n == 2) ? 1 : 2)\": \"n==0 ? 0 : (n == 1) ? 1 : ((n == 2) ? 2 : 3)\",\n \"n==1 ? 0 : n==0 || ( n%100>1 && n%100<11) ? 1 : (n%100>10 && n%100<20 ) ? 2 : 3\": \"n==0 ? 0 : n==1 ? 1 : ( n%100>1 && n%100<11) ? 2 : (n%100>10 && n%100<20 ) ? 3 : 4\",\n \"n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2\": \"n==0 ? 0 : n%10==1 && n%100!=11 ? 1 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 2 : 3\",\n \"(n == 1) ? 0 : ((n == 2) ? 1 : ((n > 10 && n % 10 == 0) ? 2 : 3))\": \"n==0 ? 0 : (n == 1) ? 1 : ((n == 2) ? 2 : ((n > 10 && n % 10 == 0) ? 3 : 4))\",\n \"n==1 ? 0 : n==2 ? 1 : (n>2 && n<7) ? 2 :(n>6 && n<11) ? 3 : 4\": \"n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : (n>2 && n<7) ? 3 :(n>6 && n<11) ? 4 : 5\",\n \"(n % 100 == 1) ? 0 : ((n % 100 == 2) ? 1 : ((n % 100 == 3 || n % 100 == 4) ? 2 : 3))\": \"n==0 ? 0 : (n % 100 == 1) ? 1 : ((n % 100 == 2) ? 2 : ((n % 100 == 3 || n % 100 == 4) ? 3 : 4))\",\n \"n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3\": \"n==0 ? 0 : n%100==1 ? 1 : n%100==2 ? 2 : n%100==3 || n%100==4 ? 3 : 4\",\n \"n % 10 != 1 || n % 100 == 11\": \"n==0 ? 0 : n % 10 != 1 || n % 100 == 11 ? 1 :2\",\n \"(n % 10 == 1 && (n % 100 < 11 || n % 100 > 19)) ? 0 : ((n % 10 >= 2 && n % 10 <= 9 && (n % 100 < 11 || n % 100 > 19)) ? 1 : 2)\": \"n==0 ? 0 : (n % 10 == 1 && (n % 100 < 11 || n % 100 > 19)) ? 1 : ((n % 10 >= 2 && n % 10 <= 9 && (n % 100 < 11 || n % 100 > 19)) ? 2 : 3)\",\n \"n >= 2 && (n < 11 || n > 99)\": \"n==0 ? 0 : n >= 2 && (n < 11 || n > 99) ? 1 : 2\",\n \"(n % 10 == 1) ? 0 : ((n % 10 == 2) ? 1 : ((n % 100 == 0 || n % 100 == 20 || n % 100 == 40 || n % 100 == 60 || n % 100 == 80) ? 2 : 3))\": \"n==0 ? 0 : (n % 10 == 1) ? 1 : ((n % 10 == 2) ? 2 : ((n % 100 == 0 || n % 100 == 20 || n % 100 == 40 || n % 100 == 60 || n % 100 == 80) ? 3 : 4))\",\n \"(n % 10 == 1 && n % 100 != 11 && n % 100 != 71 && n % 100 != 91) ? 0 : ((n % 10 == 2 && n % 100 != 12 && n % 100 != 72 && n % 100 != 92) ? 1 : ((((n % 10 == 3 || n % 10 == 4) || n % 10 == 9) && (n % 100 < 10 || n % 100 > 19) && (n % 100 < 70 || n % 100 > 79) && (n % 100 < 90 || n % 100 > 99)) ? 2 : ((n != 0 && n % 1000000 == 0) ? 3 : 4)))\": \"n==0 ? 0 : (n % 10 == 1 && n % 100 != 11 && n % 100 != 71 && n % 100 != 91) ? 1 : ((n % 10 == 2 && n % 100 != 12 && n % 100 != 72 && n % 100 != 92) ? 2 : ((((n % 10 == 3 || n % 10 == 4) || n % 10 == 9) && (n % 100 < 10 || n % 100 > 19) && (n % 100 < 70 || n % 100 > 79) && (n % 100 < 90 || n % 100 > 99)) ? 3 : ((n != 0 && n % 1000000 == 0) ? 4 : 5)))\",\n \"n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2\": \"n==0 ? 0 : n%10==1 && n%100!=11 ? 1 : 2\",\n}\n\n\ndef nospace_set(source):\n return {item.replace(\" \", \"\") for item in source}\n\n\n# Plural formula - type mappings\nPLURAL_MAPPINGS = (\n (nospace_set(ONE_OTHER_PLURALS), PLURAL_ONE_OTHER),\n (nospace_set(ONE_FEW_OTHER_PLURALS), PLURAL_ONE_FEW_OTHER),\n (nospace_set(ZERO_ONE_FEW_OTHER_PLURALS), PLURAL_ZERO_ONE_FEW_OTHER),\n (nospace_set(ONE_TWO_OTHER_PLURALS), PLURAL_ONE_TWO_OTHER),\n (nospace_set(ZERO_ONE_TWO_OTHER_PLURALS), PLURAL_ZERO_ONE_TWO_OTHER),\n (nospace_set(ONE_OTHER_TWO_PLURALS), PLURAL_ONE_OTHER_TWO),\n (nospace_set(ZERO_ONE_OTHER_PLURALS), PLURAL_ZERO_ONE_OTHER),\n (nospace_set(ONE_TWO_FEW_OTHER_PLURALS), PLURAL_ONE_TWO_FEW_OTHER),\n (nospace_set(ZERO_ONE_TWO_FEW_OTHER_PLURALS), PLURAL_ZERO_ONE_TWO_FEW_OTHER),\n (nospace_set(OTHER_ONE_TWO_FEW_PLURALS), PLURAL_OTHER_ONE_TWO_FEW),\n (nospace_set(ONE_TWO_THREE_OTHER_PLURALS), PLURAL_ONE_TWO_THREE_OTHER),\n (nospace_set(ONE_OTHER_ZERO_PLURALS), PLURAL_ONE_OTHER_ZERO),\n (nospace_set(ONE_FEW_MANY_OTHER_PLURALS), PLURAL_ONE_FEW_MANY_OTHER),\n (nospace_set(ZERO_ONE_FEW_MANY_OTHER_PLURALS), PLURAL_ZERO_ONE_FEW_MANY_OTHER),\n (nospace_set(TWO_OTHER_PLURALS), PLURAL_TWO_OTHER),\n (nospace_set(ONE_TWO_FEW_MANY_OTHER_PLURALS), PLURAL_ONE_TWO_FEW_MANY_OTHER),\n (\n nospace_set(ZERO_ONE_TWO_FEW_MANY_OTHER_PLURALS),\n PLURAL_ZERO_ONE_TWO_FEW_MANY_OTHER,\n ),\n (nospace_set(ZERO_OTHER_PLURALS), PLURAL_ZERO_OTHER),\n (nospace_set(ONE_MANY_OTHER_PLURALS), PLURAL_ONE_MANY_OTHER),\n)\n\n# Plural names mapping\nPLURAL_NAMES = {\n PLURAL_NONE: (\"\",),\n PLURAL_ONE_OTHER: (\n pgettext_lazy(\"Plural form description\", \"Singular\"),\n pgettext_lazy(\"Plural form description\", \"Plural\"),\n ),\n PLURAL_ONE_FEW_OTHER: (\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ZERO_ONE_FEW_OTHER: (\n pgettext_lazy(\"Plural form description\", \"Zero\"),\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ARABIC: (\n pgettext_lazy(\"Plural form description\", \"Zero\"),\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n pgettext_lazy(\"Plural form description\", \"Many\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ZERO_ONE_OTHER: (\n pgettext_lazy(\"Plural form description\", \"Zero\"),\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ONE_TWO_OTHER: (\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ZERO_ONE_TWO_OTHER: (\n pgettext_lazy(\"Plural form description\", \"Zero\"),\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ONE_OTHER_TWO: (\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n ),\n PLURAL_ONE_TWO_THREE_OTHER: (\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Three\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ONE_TWO_FEW_OTHER: (\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ZERO_ONE_TWO_FEW_OTHER: (\n pgettext_lazy(\"Plural form description\", \"Zero\"),\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_OTHER_ONE_TWO_FEW: (\n pgettext_lazy(\"Plural form description\", \"Other\"),\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n ),\n PLURAL_ONE_OTHER_ZERO: (\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n pgettext_lazy(\"Plural form description\", \"Zero\"),\n ),\n PLURAL_ONE_FEW_MANY_OTHER: (\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n pgettext_lazy(\"Plural form description\", \"Many\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ZERO_ONE_FEW_MANY_OTHER: (\n pgettext_lazy(\"Plural form description\", \"Zero\"),\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n pgettext_lazy(\"Plural form description\", \"Many\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ONE_TWO_FEW_MANY_OTHER: (\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n pgettext_lazy(\"Plural form description\", \"Many\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_TWO_OTHER: (\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ZERO_ONE_TWO_FEW_MANY_OTHER: (\n pgettext_lazy(\"Plural form description\", \"Zero\"),\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n pgettext_lazy(\"Plural form description\", \"Many\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ZERO_OTHER: (\n pgettext_lazy(\"Plural form description\", \"Zero\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ONE_MANY_OTHER: (\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Many\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n}\n", "path": "weblate/lang/data.py" } ]
[ { "content": "#\n# Copyright © 2012–2022 Michal Čihař <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n# pylint: disable=line-too-long\n\nfrom django.utils.translation import pgettext_lazy\nfrom weblate_language_data import languages\nfrom weblate_language_data.ambiguous import AMBIGUOUS\n\nNO_CODE_LANGUAGES = {lang[0] for lang in languages.LANGUAGES}\n\nUNDERSCORE_EXCEPTIONS = {\n \"nb_NO\",\n \"zh_Hant\",\n \"zh_Hans\",\n \"be_Latn\",\n \"ro_MD\",\n \"pt_BR\",\n \"pa_PK\",\n}\nAT_EXCEPTIONS = {\"ca@valencia\"}\n\n\ndef is_basic(code):\n if code in AMBIGUOUS:\n return False\n if \"_\" in code:\n return code in UNDERSCORE_EXCEPTIONS\n return \"@\" not in code or code in AT_EXCEPTIONS\n\n\nBASIC_LANGUAGES = {lang for lang in NO_CODE_LANGUAGES if is_basic(lang)}\n\n# Following variables are used to map Gettext plural formulas\n# to one/few/may/other like rules\n\nONE_OTHER_PLURALS = (\n \"n==1 || n%10==1 ? 0 : 1\",\n \"n != 1\",\n \"(n != 1)\",\n \"n > 1\",\n \"(n > 1)\",\n \"n >= 2 && (n < 11 || n > 99)\",\n \"n % 10 != 1 || n % 100 == 11\",\n \"(n % 10 == 1 && n % 100 != 11) ? 0 : 1\",\n \"n != 1 && n != 2 && n != 3 && (n % 10 == 4 || n % 10 == 6 || n % 10 == 9)\",\n \"(n==0 || n==1)\",\n)\n\nTWO_OTHER_PLURALS = (\"(n==2) ? 1 : 0\",)\n\nONE_FEW_OTHER_PLURALS = (\n \"n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2\",\n \"(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2\",\n \"n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2\",\n \"n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2\",\n \"n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2\",\n \"(n == 1) ? 0 : ((n == 0 || n != 1 && n % 100 >= 1 && n % 100 <= 19) ? 1 : 2)\",\n \"(n == 0 || n == 1) ? 0 : ((n >= 2 && n <= 10) ? 1 : 2)\",\n \"(n % 10 == 1 && (n % 100 < 11 || n % 100 > 19)) ? 0 : ((n % 10 >= 2 && n % 10 <= 9 && (n % 100 < 11 || n % 100 > 19)) ? 1 : 2)\",\n \"(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2)\",\n \"(n == 1) ? 0 : ((n == 0 || n % 100 >= 2 && n % 100 <= 19) ? 1 : 2)\",\n \"(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\",\n)\nZERO_ONE_FEW_OTHER_PLURALS = (\n \"n == 0 ? 0 : n == 1 ? 1 : ((n >= 2 && n <= 10) ? 2 : 3)\",\n \"n==0 ? 0 : n==1 ? 1 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 2 : 3\",\n \"n==0 ? 0 : (n == 1) ? 1 : ((n == 0 || n % 100 >= 2 && n % 100 <= 19) ? 2 : 3)\",\n \"n==0 ? 0 : n==1 ? 1 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 2 : 3\",\n \"n==0 ? 0 : (n==1) ? 1 : (n>=2 && n<=4) ? 2 : 3\",\n \"n==0 ? 0 : n%10==1 && n%100!=11 ? 1 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 2 : 3\",\n \"n==0 ? 0 : (n % 10 == 1 && (n % 100 < 11 || n % 100 > 19)) ? 1 : ((n % 10 >= 2 && n % 10 <= 9 && (n % 100 < 11 || n % 100 > 19)) ? 2 : 3)\",\n)\n\nZERO_ONE_OTHER_PLURALS = (\n \"n==0 ? 0 : n==1 ? 1 : 2\",\n \"(n == 0) ? 0 : ((n == 1) ? 1 : 2)\",\n \"(n % 10 == 0 || n % 100 >= 11 && n % 100 <= 19) ? 0 : ((n % 10 == 1 && n % 100 != 11) ? 1 : 2)\",\n \"n==0 ? 0 : n>1 ? 1 : 2\",\n \"n==0 ? 0 : n!=1 ? 1 : 2\",\n \"n == 0 ? 0 : n==1 || n%10==1 ? 1 : 2\",\n \"n==0 ? 0 : n != 1 && n != 2 && n != 3 && (n % 10 == 4 || n % 10 == 6 || n % 10 == 9) ? 1: 2\",\n \"n==0 ? 0 : n % 10 != 1 || n % 100 == 11 ? 1 :2\",\n \"n==0 ? 0 : n >= 2 && (n < 11 || n > 99) ? 1 : 2\",\n \"n==0 ? 0 : n%10==1 && n%100!=11 ? 1 : 2\",\n)\n\nONE_TWO_OTHER_PLURALS = (\n \"n==1 ? 0 : n==2 ? 1 : 2\",\n \"(n == 1) ? 0 : ((n == 2) ? 1 : 2)\",\n \"n%100==1 ? 0 : n%100==2 ? 1 : 2\",\n)\nZERO_ONE_TWO_OTHER_PLURALS = (\n \"n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : 3\",\n \"n==0 ? 0 : (n == 1) ? 1 : ((n == 2) ? 2 : 3)\",\n)\n\nONE_OTHER_TWO_PLURALS = (\"n==1 ? 0 : n==2 ? 2 : 1\",)\n\nONE_TWO_THREE_OTHER_PLURALS = (\"(n==1) ? 0 : (n==2) ? 1 : (n == 3) ? 2 : 3\",)\n\nONE_TWO_FEW_OTHER_PLURALS = (\n \"(n==1 || n==11) ? 0 : (n==2 || n==12) ? 1 : (n > 2 && n < 20) ? 2 : 3\",\n \"n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3\",\n \"(n % 10 == 1) ? 0 : ((n % 10 == 2) ? 1 : ((n % 100 == 0 || n % 100 == 20 || n % 100 == 40 || n % 100 == 60 || n % 100 == 80) ? 2 : 3))\",\n \"(n % 100 == 1) ? 0 : ((n % 100 == 2) ? 1 : ((n % 100 == 3 || n % 100 == 4) ? 2 : 3))\",\n \"(n == 1) ? 0 : ((n == 2) ? 1 : ((n > 10 && n % 10 == 0) ? 2 : 3))\",\n \"(n == 1) ? 0 : ((n == 2) ? 1 : ((n == 10) ? 2 : 3))\",\n \"(n==1) ? 0 : (n==2) ? 1 : (n != 8 && n != 11) ? 2 : 3\",\n)\nZERO_ONE_TWO_FEW_OTHER_PLURALS = (\n \"n==0 ? 0 : (n==1 || n==11) ? 1 : (n==2 || n==12) ? 2 : (n > 2 && n < 20) ? 3 : 4\",\n \"n==0 ? 0 : (n == 1) ? 1 : ((n == 2) ? 2 : ((n > 10 && n % 10 == 0) ? 3 : 4))\",\n \"n==0 ? 0 : (n % 100 == 1) ? 1 : ((n % 100 == 2) ? 2 : ((n % 100 == 3 || n % 100 == 4) ? 3 : 4))\",\n \"n==0 ? 0 : n%100==1 ? 1 : n%100==2 ? 2 : n%100==3 || n%100==4 ? 3 : 4\",\n \"n==0 ? 0 : (n % 10 == 1) ? 1 : ((n % 10 == 2) ? 2 : ((n % 100 == 0 || n % 100 == 20 || n % 100 == 40 || n % 100 == 60 || n % 100 == 80) ? 3 : 4))\",\n)\n\nOTHER_ONE_TWO_FEW_PLURALS = (\n \"(n%100==1 ? 1 : n%100==2 ? 2 : n%100==3 || n%100==4 ? 3 : 0)\",\n)\n\nONE_TWO_FEW_MANY_OTHER_PLURALS = (\n \"n==1 ? 0 : n==2 ? 1 : n<7 ? 2 : n<11 ? 3 : 4\",\n \"n==1 ? 0 : n==2 ? 1 : (n>2 && n<7) ? 2 :(n>6 && n<11) ? 3 : 4\",\n \"(n % 10 == 1 && n % 100 != 11 && n % 100 != 71 && n % 100 != 91) ? 0 : ((n % 10 == 2 && n % 100 != 12 && n % 100 != 72 && n % 100 != 92) ? 1 : ((((n % 10 == 3 || n % 10 == 4) || n % 10 == 9) && (n % 100 < 10 || n % 100 > 19) && (n % 100 < 70 || n % 100 > 79) && (n % 100 < 90 || n % 100 > 99)) ? 2 : ((n != 0 && n % 1000000 == 0) ? 3 : 4)))\",\n)\n\nONE_FEW_MANY_OTHER_PLURALS = (\n \"n==1 ? 0 : n==0 || ( n%100>1 && n%100<11) ? 1 : (n%100>10 && n%100<20 ) ? 2 : 3\",\n \"n==1 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : n%10==0 || (n%100>10 && n%100<20) ? 2 : 3\",\n \"n==1 ? 3 : n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2\",\n)\nZERO_ONE_FEW_MANY_OTHER_PLURALS = (\n \"n==0 ? 0 : n==1 ? 1 : ( n%100>1 && n%100<11) ? 2 : (n%100>10 && n%100<20 ) ? 3 : 4\",\n)\n\nONE_OTHER_ZERO_PLURALS = (\"n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2\",)\n\nZERO_ONE_TWO_FEW_MANY_OTHER_PLURALS = (\n \"(n==0) ? 0 : (n==1) ? 1 : (n==2) ? 2 : (n==3) ? 3 :(n==6) ? 4 : 5\",\n \"(n == 0) ? 0 : ((n == 1) ? 1 : ((n == 2) ? 2 : ((n % 100 >= 3 && n % 100 <= 10) ? 3 : ((n % 100 >= 11 && n % 100 <= 99) ? 4 : 5))))\",\n \"(n == 0) ? 0 : ((n == 1) ? 1 : (((n % 100 == 2 || n % 100 == 22 || n % 100 == 42 || n % 100 == 62 || n % 100 == 82) || n % 1000 == 0 && (n % 100000 >= 1000 && n % 100000 <= 20000 || n % 100000 == 40000 || n % 100000 == 60000 || n % 100000 == 80000) || n != 0 && n % 1000000 == 100000) ? 2 : ((n % 100 == 3 || n % 100 == 23 || n % 100 == 43 || n % 100 == 63 || n % 100 == 83) ? 3 : ((n != 1 && (n % 100 == 1 || n % 100 == 21 || n % 100 == 41 || n % 100 == 61 || n % 100 == 81)) ? 4 : 5))))\",\n \"n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : (n>2 && n<7) ? 3 :(n>6 && n<11) ? 4 : 5\",\n \"n==0 ? 0 : (n % 10 == 1 && n % 100 != 11 && n % 100 != 71 && n % 100 != 91) ? 1 : ((n % 10 == 2 && n % 100 != 12 && n % 100 != 72 && n % 100 != 92) ? 2 : ((((n % 10 == 3 || n % 10 == 4) || n % 10 == 9) && (n % 100 < 10 || n % 100 > 19) && (n % 100 < 70 || n % 100 > 79) && (n % 100 < 90 || n % 100 > 99)) ? 3 : ((n != 0 && n % 1000000 == 0) ? 4 : 5)))\",\n)\nONE_MANY_OTHER_PLURALS = (\n \"(n == 1) ? 0 : ((n != 0 && n % 1000000 == 0) ? 1 : 2)\",\n \"(n == 0 || n == 1) ? 0 : ((n != 0 && n % 1000000 == 0) ? 1 : 2)\",\n)\n\nZERO_OTHER_PLURALS = (\"n==0 ? 0 : 1\",)\n\n# Plural types definition\nPLURAL_NONE = 0\nPLURAL_ONE_OTHER = 1\nPLURAL_ONE_FEW_OTHER = 2\nPLURAL_ARABIC = 3\nPLURAL_ONE_TWO_OTHER = 4\nPLURAL_ONE_TWO_THREE_OTHER = 5\nPLURAL_ONE_TWO_FEW_OTHER = 6\nPLURAL_ONE_OTHER_ZERO = 7\nPLURAL_ONE_FEW_MANY_OTHER = 8\nPLURAL_TWO_OTHER = 9\nPLURAL_ONE_TWO_FEW_MANY_OTHER = 10\nPLURAL_ZERO_ONE_OTHER = 11\nPLURAL_ZERO_ONE_TWO_FEW_MANY_OTHER = 12\nPLURAL_OTHER_ONE_TWO_FEW = 13\nPLURAL_ONE_OTHER_TWO = 14\nPLURAL_ZERO_OTHER = 15\nPLURAL_ZERO_ONE_FEW_OTHER = 16\nPLURAL_ZERO_ONE_TWO_FEW_OTHER = 17\nPLURAL_ZERO_ONE_TWO_OTHER = 18\nPLURAL_ZERO_ONE_FEW_MANY_OTHER = 19\nPLURAL_ONE_MANY_OTHER = 20\nPLURAL_UNKNOWN = 666\n\n# Extra zero plural handling for stringsdict\nZERO_PLURAL_TYPES = {\n PLURAL_ARABIC,\n PLURAL_ZERO_ONE_OTHER,\n PLURAL_ZERO_ONE_TWO_FEW_MANY_OTHER,\n}\n\nFORMULA_WITH_ZERO = {\n \"0\": \"n==0 ? 0 : 1\",\n \"n > 1\": \"n==0 ? 0 : n>1 ? 1 : 2\",\n \"n != 1\": \"n==0 ? 0 : n!=1 ? 1 : 2\",\n \"(n == 0 || n == 1) ? 0 : ((n >= 2 && n <= 10) ? 1 : 2)\": \"n == 0 ? 0 : n == 1 ? 1 : ((n >= 2 && n <= 10) ? 2 : 3)\",\n \"n == 1 ? 0 : n == 2 ? 1 : 2\": \"n == 0 ? 0 : n == 1 ? 1 : n == 2 ? 2 : 3\",\n \"n==1 || n%10==1 ? 0 : 1\": \"n == 0 ? 0 : n==1 || n%10==1 ? 1 : 2\",\n \"n==1 ? 0 : n==2 ? 1 : 2\": \"n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : 3\",\n \"(n==1 || n==11) ? 0 : (n==2 || n==12) ? 1 : (n > 2 && n < 20) ? 2 : 3\": \"n==0 ? 0 : (n==1 || n==11) ? 1 : (n==2 || n==12) ? 2 : (n > 2 && n < 20) ? 3 : 4\",\n \"n != 1 && n != 2 && n != 3 && (n % 10 == 4 || n % 10 == 6 || n % 10 == 9)\": \"n==0 ? 0 : n != 1 && n != 2 && n != 3 && (n % 10 == 4 || n % 10 == 6 || n % 10 == 9) ? 1: 2\",\n \"n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2\": \"n==0 ? 0 : n==1 ? 1 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 2 : 3\",\n \"(n == 1) ? 0 : ((n == 0 || n % 100 >= 2 && n % 100 <= 19) ? 1 : 2)\": \"n==0 ? 0 : (n == 1) ? 1 : ((n == 0 || n % 100 >= 2 && n % 100 <= 19) ? 2 : 3)\",\n \"n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2\": \"n==0 ? 0 : n==1 ? 1 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 2 : 3\",\n \"(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2\": \"n==0 ? 0 : (n==1) ? 1 : (n>=2 && n<=4) ? 2 : 3\",\n \"(n == 1) ? 0 : ((n == 2) ? 1 : 2)\": \"n==0 ? 0 : (n == 1) ? 1 : ((n == 2) ? 2 : 3)\",\n \"n==1 ? 0 : n==0 || ( n%100>1 && n%100<11) ? 1 : (n%100>10 && n%100<20 ) ? 2 : 3\": \"n==0 ? 0 : n==1 ? 1 : ( n%100>1 && n%100<11) ? 2 : (n%100>10 && n%100<20 ) ? 3 : 4\",\n \"n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2\": \"n==0 ? 0 : n%10==1 && n%100!=11 ? 1 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 2 : 3\",\n \"(n == 1) ? 0 : ((n == 2) ? 1 : ((n > 10 && n % 10 == 0) ? 2 : 3))\": \"n==0 ? 0 : (n == 1) ? 1 : ((n == 2) ? 2 : ((n > 10 && n % 10 == 0) ? 3 : 4))\",\n \"n==1 ? 0 : n==2 ? 1 : (n>2 && n<7) ? 2 :(n>6 && n<11) ? 3 : 4\": \"n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : (n>2 && n<7) ? 3 :(n>6 && n<11) ? 4 : 5\",\n \"(n % 100 == 1) ? 0 : ((n % 100 == 2) ? 1 : ((n % 100 == 3 || n % 100 == 4) ? 2 : 3))\": \"n==0 ? 0 : (n % 100 == 1) ? 1 : ((n % 100 == 2) ? 2 : ((n % 100 == 3 || n % 100 == 4) ? 3 : 4))\",\n \"n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3\": \"n==0 ? 0 : n%100==1 ? 1 : n%100==2 ? 2 : n%100==3 || n%100==4 ? 3 : 4\",\n \"n % 10 != 1 || n % 100 == 11\": \"n==0 ? 0 : n % 10 != 1 || n % 100 == 11 ? 1 :2\",\n \"(n % 10 == 1 && (n % 100 < 11 || n % 100 > 19)) ? 0 : ((n % 10 >= 2 && n % 10 <= 9 && (n % 100 < 11 || n % 100 > 19)) ? 1 : 2)\": \"n==0 ? 0 : (n % 10 == 1 && (n % 100 < 11 || n % 100 > 19)) ? 1 : ((n % 10 >= 2 && n % 10 <= 9 && (n % 100 < 11 || n % 100 > 19)) ? 2 : 3)\",\n \"n >= 2 && (n < 11 || n > 99)\": \"n==0 ? 0 : n >= 2 && (n < 11 || n > 99) ? 1 : 2\",\n \"(n % 10 == 1) ? 0 : ((n % 10 == 2) ? 1 : ((n % 100 == 0 || n % 100 == 20 || n % 100 == 40 || n % 100 == 60 || n % 100 == 80) ? 2 : 3))\": \"n==0 ? 0 : (n % 10 == 1) ? 1 : ((n % 10 == 2) ? 2 : ((n % 100 == 0 || n % 100 == 20 || n % 100 == 40 || n % 100 == 60 || n % 100 == 80) ? 3 : 4))\",\n \"(n % 10 == 1 && n % 100 != 11 && n % 100 != 71 && n % 100 != 91) ? 0 : ((n % 10 == 2 && n % 100 != 12 && n % 100 != 72 && n % 100 != 92) ? 1 : ((((n % 10 == 3 || n % 10 == 4) || n % 10 == 9) && (n % 100 < 10 || n % 100 > 19) && (n % 100 < 70 || n % 100 > 79) && (n % 100 < 90 || n % 100 > 99)) ? 2 : ((n != 0 && n % 1000000 == 0) ? 3 : 4)))\": \"n==0 ? 0 : (n % 10 == 1 && n % 100 != 11 && n % 100 != 71 && n % 100 != 91) ? 1 : ((n % 10 == 2 && n % 100 != 12 && n % 100 != 72 && n % 100 != 92) ? 2 : ((((n % 10 == 3 || n % 10 == 4) || n % 10 == 9) && (n % 100 < 10 || n % 100 > 19) && (n % 100 < 70 || n % 100 > 79) && (n % 100 < 90 || n % 100 > 99)) ? 3 : ((n != 0 && n % 1000000 == 0) ? 4 : 5)))\",\n \"n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2\": \"n==0 ? 0 : n%10==1 && n%100!=11 ? 1 : 2\",\n}\n\n\ndef nospace_set(source):\n return {item.replace(\" \", \"\") for item in source}\n\n\n# Plural formula - type mappings\nPLURAL_MAPPINGS = (\n (nospace_set(ONE_OTHER_PLURALS), PLURAL_ONE_OTHER),\n (nospace_set(ONE_FEW_OTHER_PLURALS), PLURAL_ONE_FEW_OTHER),\n (nospace_set(ZERO_ONE_FEW_OTHER_PLURALS), PLURAL_ZERO_ONE_FEW_OTHER),\n (nospace_set(ONE_TWO_OTHER_PLURALS), PLURAL_ONE_TWO_OTHER),\n (nospace_set(ZERO_ONE_TWO_OTHER_PLURALS), PLURAL_ZERO_ONE_TWO_OTHER),\n (nospace_set(ONE_OTHER_TWO_PLURALS), PLURAL_ONE_OTHER_TWO),\n (nospace_set(ZERO_ONE_OTHER_PLURALS), PLURAL_ZERO_ONE_OTHER),\n (nospace_set(ONE_TWO_FEW_OTHER_PLURALS), PLURAL_ONE_TWO_FEW_OTHER),\n (nospace_set(ZERO_ONE_TWO_FEW_OTHER_PLURALS), PLURAL_ZERO_ONE_TWO_FEW_OTHER),\n (nospace_set(OTHER_ONE_TWO_FEW_PLURALS), PLURAL_OTHER_ONE_TWO_FEW),\n (nospace_set(ONE_TWO_THREE_OTHER_PLURALS), PLURAL_ONE_TWO_THREE_OTHER),\n (nospace_set(ONE_OTHER_ZERO_PLURALS), PLURAL_ONE_OTHER_ZERO),\n (nospace_set(ONE_FEW_MANY_OTHER_PLURALS), PLURAL_ONE_FEW_MANY_OTHER),\n (nospace_set(ZERO_ONE_FEW_MANY_OTHER_PLURALS), PLURAL_ZERO_ONE_FEW_MANY_OTHER),\n (nospace_set(TWO_OTHER_PLURALS), PLURAL_TWO_OTHER),\n (nospace_set(ONE_TWO_FEW_MANY_OTHER_PLURALS), PLURAL_ONE_TWO_FEW_MANY_OTHER),\n (\n nospace_set(ZERO_ONE_TWO_FEW_MANY_OTHER_PLURALS),\n PLURAL_ZERO_ONE_TWO_FEW_MANY_OTHER,\n ),\n (nospace_set(ZERO_OTHER_PLURALS), PLURAL_ZERO_OTHER),\n (nospace_set(ONE_MANY_OTHER_PLURALS), PLURAL_ONE_MANY_OTHER),\n)\n\n# Plural names mapping\nPLURAL_NAMES = {\n PLURAL_NONE: (\"\",),\n PLURAL_ONE_OTHER: (\n pgettext_lazy(\"Plural form description\", \"Singular\"),\n pgettext_lazy(\"Plural form description\", \"Plural\"),\n ),\n PLURAL_ONE_FEW_OTHER: (\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ZERO_ONE_FEW_OTHER: (\n pgettext_lazy(\"Plural form description\", \"Zero\"),\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ARABIC: (\n pgettext_lazy(\"Plural form description\", \"Zero\"),\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n pgettext_lazy(\"Plural form description\", \"Many\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ZERO_ONE_OTHER: (\n pgettext_lazy(\"Plural form description\", \"Zero\"),\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ONE_TWO_OTHER: (\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ZERO_ONE_TWO_OTHER: (\n pgettext_lazy(\"Plural form description\", \"Zero\"),\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ONE_OTHER_TWO: (\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n ),\n PLURAL_ONE_TWO_THREE_OTHER: (\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Three\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ONE_TWO_FEW_OTHER: (\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ZERO_ONE_TWO_FEW_OTHER: (\n pgettext_lazy(\"Plural form description\", \"Zero\"),\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_OTHER_ONE_TWO_FEW: (\n pgettext_lazy(\"Plural form description\", \"Other\"),\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n ),\n PLURAL_ONE_OTHER_ZERO: (\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n pgettext_lazy(\"Plural form description\", \"Zero\"),\n ),\n PLURAL_ONE_FEW_MANY_OTHER: (\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n pgettext_lazy(\"Plural form description\", \"Many\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ZERO_ONE_FEW_MANY_OTHER: (\n pgettext_lazy(\"Plural form description\", \"Zero\"),\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n pgettext_lazy(\"Plural form description\", \"Many\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ONE_TWO_FEW_MANY_OTHER: (\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n pgettext_lazy(\"Plural form description\", \"Many\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_TWO_OTHER: (\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ZERO_ONE_TWO_FEW_MANY_OTHER: (\n pgettext_lazy(\"Plural form description\", \"Zero\"),\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Two\"),\n pgettext_lazy(\"Plural form description\", \"Few\"),\n pgettext_lazy(\"Plural form description\", \"Many\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ZERO_OTHER: (\n pgettext_lazy(\"Plural form description\", \"Zero\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n PLURAL_ONE_MANY_OTHER: (\n pgettext_lazy(\"Plural form description\", \"One\"),\n pgettext_lazy(\"Plural form description\", \"Many\"),\n pgettext_lazy(\"Plural form description\", \"Other\"),\n ),\n}\n", "path": "weblate/lang/data.py" } ]
diff --git a/weblate/lang/data.py b/weblate/lang/data.py index 88e50e6e92fb..35d7ee7d8c73 100644 --- a/weblate/lang/data.py +++ b/weblate/lang/data.py @@ -24,7 +24,15 @@ NO_CODE_LANGUAGES = {lang[0] for lang in languages.LANGUAGES} -UNDERSCORE_EXCEPTIONS = {"nb_NO", "zh_Hant", "zh_Hans", "be_Latn", "ro_MD", "pt_BR"} +UNDERSCORE_EXCEPTIONS = { + "nb_NO", + "zh_Hant", + "zh_Hans", + "be_Latn", + "ro_MD", + "pt_BR", + "pa_PK", +} AT_EXCEPTIONS = {"ca@valencia"}
bookwyrm-social__bookwyrm-2501
AASIN and isfdb not editable Somehow during the merge some code most be gone lost because... <img width="640" alt="Bildschirm­foto 2022-12-11 um 21 29 47" src="https://user-images.githubusercontent.com/2017105/206927195-f9b27bcc-2f3a-46eb-ab1d-84340e5fa061.png">
[ { "content": "\"\"\" using django model forms \"\"\"\nfrom django import forms\n\nfrom bookwyrm import models\nfrom bookwyrm.models.fields import ClearableFileInputWithWarning\nfrom .custom_form import CustomForm\nfrom .widgets import ArrayWidget, SelectDateWidget, Select\n\n\n# pylint: disable=missing-class-docstring\nclass CoverForm(CustomForm):\n class Meta:\n model = models.Book\n fields = [\"cover\"]\n help_texts = {f: None for f in fields}\n\n\nclass EditionForm(CustomForm):\n class Meta:\n model = models.Edition\n fields = [\n \"title\",\n \"subtitle\",\n \"description\",\n \"series\",\n \"series_number\",\n \"languages\",\n \"subjects\",\n \"publishers\",\n \"first_published_date\",\n \"published_date\",\n \"cover\",\n \"physical_format\",\n \"physical_format_detail\",\n \"pages\",\n \"isbn_13\",\n \"isbn_10\",\n \"openlibrary_key\",\n \"inventaire_id\",\n \"goodreads_key\",\n \"oclc_number\",\n \"asin\",\n ]\n widgets = {\n \"title\": forms.TextInput(attrs={\"aria-describedby\": \"desc_title\"}),\n \"subtitle\": forms.TextInput(attrs={\"aria-describedby\": \"desc_subtitle\"}),\n \"description\": forms.Textarea(\n attrs={\"aria-describedby\": \"desc_description\"}\n ),\n \"series\": forms.TextInput(attrs={\"aria-describedby\": \"desc_series\"}),\n \"series_number\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_series_number\"}\n ),\n \"subjects\": ArrayWidget(),\n \"languages\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_languages_help desc_languages\"}\n ),\n \"publishers\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_publishers_help desc_publishers\"}\n ),\n \"first_published_date\": SelectDateWidget(\n attrs={\"aria-describedby\": \"desc_first_published_date\"}\n ),\n \"published_date\": SelectDateWidget(\n attrs={\"aria-describedby\": \"desc_published_date\"}\n ),\n \"cover\": ClearableFileInputWithWarning(\n attrs={\"aria-describedby\": \"desc_cover\"}\n ),\n \"physical_format\": Select(\n attrs={\"aria-describedby\": \"desc_physical_format\"}\n ),\n \"physical_format_detail\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_physical_format_detail\"}\n ),\n \"pages\": forms.NumberInput(attrs={\"aria-describedby\": \"desc_pages\"}),\n \"isbn_13\": forms.TextInput(attrs={\"aria-describedby\": \"desc_isbn_13\"}),\n \"isbn_10\": forms.TextInput(attrs={\"aria-describedby\": \"desc_isbn_10\"}),\n \"openlibrary_key\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_openlibrary_key\"}\n ),\n \"inventaire_id\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_inventaire_id\"}\n ),\n \"goodreads_key\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_goodreads_key\"}\n ),\n \"oclc_number\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_oclc_number\"}\n ),\n \"ASIN\": forms.TextInput(attrs={\"aria-describedby\": \"desc_ASIN\"}),\n \"AASIN\": forms.TextInput(attrs={\"aria-describedby\": \"desc_AASIN\"}),\n \"isfdb\": forms.TextInput(attrs={\"aria-describedby\": \"desc_isfdb\"}),\n }\n\n\nclass EditionFromWorkForm(CustomForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # make all fields hidden\n for visible in self.visible_fields():\n visible.field.widget = forms.HiddenInput()\n\n class Meta:\n model = models.Work\n fields = [\n \"title\",\n \"subtitle\",\n \"authors\",\n \"description\",\n \"languages\",\n \"series\",\n \"series_number\",\n \"subjects\",\n \"subject_places\",\n \"cover\",\n \"first_published_date\",\n ]\n", "path": "bookwyrm/forms/books.py" } ]
[ { "content": "\"\"\" using django model forms \"\"\"\nfrom django import forms\n\nfrom bookwyrm import models\nfrom bookwyrm.models.fields import ClearableFileInputWithWarning\nfrom .custom_form import CustomForm\nfrom .widgets import ArrayWidget, SelectDateWidget, Select\n\n\n# pylint: disable=missing-class-docstring\nclass CoverForm(CustomForm):\n class Meta:\n model = models.Book\n fields = [\"cover\"]\n help_texts = {f: None for f in fields}\n\n\nclass EditionForm(CustomForm):\n class Meta:\n model = models.Edition\n fields = [\n \"title\",\n \"subtitle\",\n \"description\",\n \"series\",\n \"series_number\",\n \"languages\",\n \"subjects\",\n \"publishers\",\n \"first_published_date\",\n \"published_date\",\n \"cover\",\n \"physical_format\",\n \"physical_format_detail\",\n \"pages\",\n \"isbn_13\",\n \"isbn_10\",\n \"openlibrary_key\",\n \"inventaire_id\",\n \"goodreads_key\",\n \"oclc_number\",\n \"asin\",\n \"aasin\",\n \"isfdb\",\n ]\n widgets = {\n \"title\": forms.TextInput(attrs={\"aria-describedby\": \"desc_title\"}),\n \"subtitle\": forms.TextInput(attrs={\"aria-describedby\": \"desc_subtitle\"}),\n \"description\": forms.Textarea(\n attrs={\"aria-describedby\": \"desc_description\"}\n ),\n \"series\": forms.TextInput(attrs={\"aria-describedby\": \"desc_series\"}),\n \"series_number\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_series_number\"}\n ),\n \"subjects\": ArrayWidget(),\n \"languages\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_languages_help desc_languages\"}\n ),\n \"publishers\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_publishers_help desc_publishers\"}\n ),\n \"first_published_date\": SelectDateWidget(\n attrs={\"aria-describedby\": \"desc_first_published_date\"}\n ),\n \"published_date\": SelectDateWidget(\n attrs={\"aria-describedby\": \"desc_published_date\"}\n ),\n \"cover\": ClearableFileInputWithWarning(\n attrs={\"aria-describedby\": \"desc_cover\"}\n ),\n \"physical_format\": Select(\n attrs={\"aria-describedby\": \"desc_physical_format\"}\n ),\n \"physical_format_detail\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_physical_format_detail\"}\n ),\n \"pages\": forms.NumberInput(attrs={\"aria-describedby\": \"desc_pages\"}),\n \"isbn_13\": forms.TextInput(attrs={\"aria-describedby\": \"desc_isbn_13\"}),\n \"isbn_10\": forms.TextInput(attrs={\"aria-describedby\": \"desc_isbn_10\"}),\n \"openlibrary_key\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_openlibrary_key\"}\n ),\n \"inventaire_id\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_inventaire_id\"}\n ),\n \"goodreads_key\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_goodreads_key\"}\n ),\n \"oclc_number\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_oclc_number\"}\n ),\n \"ASIN\": forms.TextInput(attrs={\"aria-describedby\": \"desc_ASIN\"}),\n \"AASIN\": forms.TextInput(attrs={\"aria-describedby\": \"desc_AASIN\"}),\n \"isfdb\": forms.TextInput(attrs={\"aria-describedby\": \"desc_isfdb\"}),\n }\n\n\nclass EditionFromWorkForm(CustomForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # make all fields hidden\n for visible in self.visible_fields():\n visible.field.widget = forms.HiddenInput()\n\n class Meta:\n model = models.Work\n fields = [\n \"title\",\n \"subtitle\",\n \"authors\",\n \"description\",\n \"languages\",\n \"series\",\n \"series_number\",\n \"subjects\",\n \"subject_places\",\n \"cover\",\n \"first_published_date\",\n ]\n", "path": "bookwyrm/forms/books.py" } ]
diff --git a/bookwyrm/forms/books.py b/bookwyrm/forms/books.py index 67b044f05d..623beaa042 100644 --- a/bookwyrm/forms/books.py +++ b/bookwyrm/forms/books.py @@ -40,6 +40,8 @@ class Meta: "goodreads_key", "oclc_number", "asin", + "aasin", + "isfdb", ] widgets = { "title": forms.TextInput(attrs={"aria-describedby": "desc_title"}),
bookwyrm-social__bookwyrm-1809
Unread notifications are no longer visually distinguished from read ones I just forgot to consider this when I re-wrote the notifications page
[ { "content": "\"\"\" bookwyrm settings and configuration \"\"\"\nimport os\nfrom environs import Env\n\nimport requests\nfrom django.utils.translation import gettext_lazy as _\n\n\nenv = Env()\nenv.read_env()\nDOMAIN = env(\"DOMAIN\")\nVERSION = \"0.1.1\"\n\nPAGE_LENGTH = env(\"PAGE_LENGTH\", 15)\nDEFAULT_LANGUAGE = env(\"DEFAULT_LANGUAGE\", \"English\")\n\nJS_CACHE = \"2d3181e1\"\n\n# email\nEMAIL_BACKEND = env(\"EMAIL_BACKEND\", \"django.core.mail.backends.smtp.EmailBackend\")\nEMAIL_HOST = env(\"EMAIL_HOST\")\nEMAIL_PORT = env(\"EMAIL_PORT\", 587)\nEMAIL_HOST_USER = env(\"EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = env(\"EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = env.bool(\"EMAIL_USE_TLS\", True)\nEMAIL_USE_SSL = env.bool(\"EMAIL_USE_SSL\", False)\nEMAIL_SENDER_NAME = env(\"EMAIL_SENDER_NAME\", \"admin\")\nEMAIL_SENDER_DOMAIN = env(\"EMAIL_SENDER_NAME\", DOMAIN)\nEMAIL_SENDER = f\"{EMAIL_SENDER_NAME}@{EMAIL_SENDER_DOMAIN}\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, \"locale\"),\n]\nLANGUAGE_COOKIE_NAME = env.str(\"LANGUAGE_COOKIE_NAME\", \"django_language\")\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Preview image\nENABLE_PREVIEW_IMAGES = env.bool(\"ENABLE_PREVIEW_IMAGES\", False)\nPREVIEW_BG_COLOR = env.str(\"PREVIEW_BG_COLOR\", \"use_dominant_color_light\")\nPREVIEW_TEXT_COLOR = env.str(\"PREVIEW_TEXT_COLOR\", \"#363636\")\nPREVIEW_IMG_WIDTH = env.int(\"PREVIEW_IMG_WIDTH\", 1200)\nPREVIEW_IMG_HEIGHT = env.int(\"PREVIEW_IMG_HEIGHT\", 630)\nPREVIEW_DEFAULT_COVER_COLOR = env.str(\"PREVIEW_DEFAULT_COVER_COLOR\", \"#002549\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env.bool(\"DEBUG\", True)\nUSE_HTTPS = env.bool(\"USE_HTTPS\", False)\n\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\", [\"*\"])\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"django_rename_app\",\n \"bookwyrm\",\n \"celery\",\n \"imagekit\",\n \"storages\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"bookwyrm.middleware.TimezoneMiddleware\",\n \"bookwyrm.middleware.IPBlocklistMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"bookwyrm.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"bookwyrm.context_processors.site_settings\",\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = \"bookwyrm.wsgi.application\"\n\n# redis/activity streams settings\nREDIS_ACTIVITY_HOST = env(\"REDIS_ACTIVITY_HOST\", \"localhost\")\nREDIS_ACTIVITY_PORT = env(\"REDIS_ACTIVITY_PORT\", 6379)\nREDIS_ACTIVITY_PASSWORD = env(\"REDIS_ACTIVITY_PASSWORD\", None)\n\nMAX_STREAM_LENGTH = int(env(\"MAX_STREAM_LENGTH\", 200))\n\nSTREAMS = [\n {\"key\": \"home\", \"name\": _(\"Home Timeline\"), \"shortname\": _(\"Home\")},\n {\"key\": \"books\", \"name\": _(\"Books Timeline\"), \"shortname\": _(\"Books\")},\n]\n\n# Search configuration\n# total time in seconds that the instance will spend searching connectors\nSEARCH_TIMEOUT = int(env(\"SEARCH_TIMEOUT\", 15))\n# timeout for a query to an individual connector\nQUERY_TIMEOUT = int(env(\"QUERY_TIMEOUT\", 5))\n\n# Redis cache backend\nif env(\"USE_DUMMY_CACHE\", False):\n CACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.dummy.DummyCache\",\n }\n }\nelse:\n # pylint: disable=line-too-long\n CACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": f\"redis://:{REDIS_ACTIVITY_PASSWORD}@{REDIS_ACTIVITY_HOST}:{REDIS_ACTIVITY_PORT}/0\",\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n },\n }\n }\n\n SESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\n SESSION_CACHE_ALIAS = \"default\"\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": env(\"POSTGRES_DB\", \"bookwyrm\"),\n \"USER\": env(\"POSTGRES_USER\", \"bookwyrm\"),\n \"PASSWORD\": env(\"POSTGRES_PASSWORD\", \"bookwyrm\"),\n \"HOST\": env(\"POSTGRES_HOST\", \"\"),\n \"PORT\": env(\"PGPORT\", 5432),\n },\n}\n\n\nLOGIN_URL = \"/login/\"\nAUTH_USER_MODEL = \"bookwyrm.User\"\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\n# pylint: disable=line-too-long\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\nLANGUAGES = [\n (\"en-us\", _(\"English\")),\n (\"de-de\", _(\"Deutsch (German)\")),\n (\"es-es\", _(\"Español (Spanish)\")),\n (\"gl-es\", _(\"Galego (Galician)\")),\n (\"it-it\", _(\"Italiano (Italian)\")),\n (\"fr-fr\", _(\"Français (French)\")),\n (\"lt-lt\", _(\"Lietuvių (Lithuanian)\")),\n (\"no-no\", _(\"Norsk (Norwegian)\")),\n (\"pt-br\", _(\"Português do Brasil (Brazilian Portuguese)\")),\n (\"pt-pt\", _(\"Português Europeu (European Portuguese)\")),\n (\"zh-hans\", _(\"简体中文 (Simplified Chinese)\")),\n (\"zh-hant\", _(\"繁體中文 (Traditional Chinese)\")),\n]\n\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\nagent = requests.utils.default_user_agent()\nUSER_AGENT = f\"{agent} (BookWyrm/{VERSION}; +https://{DOMAIN}/)\"\n\n# Imagekit generated thumbnails\nENABLE_THUMBNAIL_GENERATION = env.bool(\"ENABLE_THUMBNAIL_GENERATION\", False)\nIMAGEKIT_CACHEFILE_DIR = \"thumbnails\"\nIMAGEKIT_DEFAULT_CACHEFILE_STRATEGY = \"bookwyrm.thumbnail_generation.Strategy\"\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Storage\n\nPROTOCOL = \"http\"\nif USE_HTTPS:\n PROTOCOL = \"https\"\n\nUSE_S3 = env.bool(\"USE_S3\", False)\n\nif USE_S3:\n # AWS settings\n AWS_ACCESS_KEY_ID = env(\"AWS_ACCESS_KEY_ID\")\n AWS_SECRET_ACCESS_KEY = env(\"AWS_SECRET_ACCESS_KEY\")\n AWS_STORAGE_BUCKET_NAME = env(\"AWS_STORAGE_BUCKET_NAME\")\n AWS_S3_CUSTOM_DOMAIN = env(\"AWS_S3_CUSTOM_DOMAIN\")\n AWS_S3_REGION_NAME = env(\"AWS_S3_REGION_NAME\", \"\")\n AWS_S3_ENDPOINT_URL = env(\"AWS_S3_ENDPOINT_URL\")\n AWS_DEFAULT_ACL = \"public-read\"\n AWS_S3_OBJECT_PARAMETERS = {\"CacheControl\": \"max-age=86400\"}\n # S3 Static settings\n STATIC_LOCATION = \"static\"\n STATIC_URL = f\"{PROTOCOL}://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/\"\n STATICFILES_STORAGE = \"bookwyrm.storage_backends.StaticStorage\"\n # S3 Media settings\n MEDIA_LOCATION = \"images\"\n MEDIA_URL = f\"{PROTOCOL}://{AWS_S3_CUSTOM_DOMAIN}/{MEDIA_LOCATION}/\"\n MEDIA_FULL_URL = MEDIA_URL\n STATIC_FULL_URL = STATIC_URL\n DEFAULT_FILE_STORAGE = \"bookwyrm.storage_backends.ImagesStorage\"\n # I don't know if it's used, but the site crashes without it\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\nelse:\n STATIC_URL = \"/static/\"\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_URL = \"/images/\"\n MEDIA_FULL_URL = f\"{PROTOCOL}://{DOMAIN}{MEDIA_URL}\"\n STATIC_FULL_URL = f\"{PROTOCOL}://{DOMAIN}{STATIC_URL}\"\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\n", "path": "bookwyrm/settings.py" } ]
[ { "content": "\"\"\" bookwyrm settings and configuration \"\"\"\nimport os\nfrom environs import Env\n\nimport requests\nfrom django.utils.translation import gettext_lazy as _\n\n\nenv = Env()\nenv.read_env()\nDOMAIN = env(\"DOMAIN\")\nVERSION = \"0.1.1\"\n\nPAGE_LENGTH = env(\"PAGE_LENGTH\", 15)\nDEFAULT_LANGUAGE = env(\"DEFAULT_LANGUAGE\", \"English\")\n\nJS_CACHE = \"9b4cc1f7\"\n\n# email\nEMAIL_BACKEND = env(\"EMAIL_BACKEND\", \"django.core.mail.backends.smtp.EmailBackend\")\nEMAIL_HOST = env(\"EMAIL_HOST\")\nEMAIL_PORT = env(\"EMAIL_PORT\", 587)\nEMAIL_HOST_USER = env(\"EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = env(\"EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = env.bool(\"EMAIL_USE_TLS\", True)\nEMAIL_USE_SSL = env.bool(\"EMAIL_USE_SSL\", False)\nEMAIL_SENDER_NAME = env(\"EMAIL_SENDER_NAME\", \"admin\")\nEMAIL_SENDER_DOMAIN = env(\"EMAIL_SENDER_NAME\", DOMAIN)\nEMAIL_SENDER = f\"{EMAIL_SENDER_NAME}@{EMAIL_SENDER_DOMAIN}\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, \"locale\"),\n]\nLANGUAGE_COOKIE_NAME = env.str(\"LANGUAGE_COOKIE_NAME\", \"django_language\")\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Preview image\nENABLE_PREVIEW_IMAGES = env.bool(\"ENABLE_PREVIEW_IMAGES\", False)\nPREVIEW_BG_COLOR = env.str(\"PREVIEW_BG_COLOR\", \"use_dominant_color_light\")\nPREVIEW_TEXT_COLOR = env.str(\"PREVIEW_TEXT_COLOR\", \"#363636\")\nPREVIEW_IMG_WIDTH = env.int(\"PREVIEW_IMG_WIDTH\", 1200)\nPREVIEW_IMG_HEIGHT = env.int(\"PREVIEW_IMG_HEIGHT\", 630)\nPREVIEW_DEFAULT_COVER_COLOR = env.str(\"PREVIEW_DEFAULT_COVER_COLOR\", \"#002549\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env.bool(\"DEBUG\", True)\nUSE_HTTPS = env.bool(\"USE_HTTPS\", False)\n\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\", [\"*\"])\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"django_rename_app\",\n \"bookwyrm\",\n \"celery\",\n \"imagekit\",\n \"storages\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"bookwyrm.middleware.TimezoneMiddleware\",\n \"bookwyrm.middleware.IPBlocklistMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"bookwyrm.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"bookwyrm.context_processors.site_settings\",\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = \"bookwyrm.wsgi.application\"\n\n# redis/activity streams settings\nREDIS_ACTIVITY_HOST = env(\"REDIS_ACTIVITY_HOST\", \"localhost\")\nREDIS_ACTIVITY_PORT = env(\"REDIS_ACTIVITY_PORT\", 6379)\nREDIS_ACTIVITY_PASSWORD = env(\"REDIS_ACTIVITY_PASSWORD\", None)\n\nMAX_STREAM_LENGTH = int(env(\"MAX_STREAM_LENGTH\", 200))\n\nSTREAMS = [\n {\"key\": \"home\", \"name\": _(\"Home Timeline\"), \"shortname\": _(\"Home\")},\n {\"key\": \"books\", \"name\": _(\"Books Timeline\"), \"shortname\": _(\"Books\")},\n]\n\n# Search configuration\n# total time in seconds that the instance will spend searching connectors\nSEARCH_TIMEOUT = int(env(\"SEARCH_TIMEOUT\", 15))\n# timeout for a query to an individual connector\nQUERY_TIMEOUT = int(env(\"QUERY_TIMEOUT\", 5))\n\n# Redis cache backend\nif env(\"USE_DUMMY_CACHE\", False):\n CACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.dummy.DummyCache\",\n }\n }\nelse:\n # pylint: disable=line-too-long\n CACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": f\"redis://:{REDIS_ACTIVITY_PASSWORD}@{REDIS_ACTIVITY_HOST}:{REDIS_ACTIVITY_PORT}/0\",\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n },\n }\n }\n\n SESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\n SESSION_CACHE_ALIAS = \"default\"\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": env(\"POSTGRES_DB\", \"bookwyrm\"),\n \"USER\": env(\"POSTGRES_USER\", \"bookwyrm\"),\n \"PASSWORD\": env(\"POSTGRES_PASSWORD\", \"bookwyrm\"),\n \"HOST\": env(\"POSTGRES_HOST\", \"\"),\n \"PORT\": env(\"PGPORT\", 5432),\n },\n}\n\n\nLOGIN_URL = \"/login/\"\nAUTH_USER_MODEL = \"bookwyrm.User\"\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\n# pylint: disable=line-too-long\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\nLANGUAGES = [\n (\"en-us\", _(\"English\")),\n (\"de-de\", _(\"Deutsch (German)\")),\n (\"es-es\", _(\"Español (Spanish)\")),\n (\"gl-es\", _(\"Galego (Galician)\")),\n (\"it-it\", _(\"Italiano (Italian)\")),\n (\"fr-fr\", _(\"Français (French)\")),\n (\"lt-lt\", _(\"Lietuvių (Lithuanian)\")),\n (\"no-no\", _(\"Norsk (Norwegian)\")),\n (\"pt-br\", _(\"Português do Brasil (Brazilian Portuguese)\")),\n (\"pt-pt\", _(\"Português Europeu (European Portuguese)\")),\n (\"zh-hans\", _(\"简体中文 (Simplified Chinese)\")),\n (\"zh-hant\", _(\"繁體中文 (Traditional Chinese)\")),\n]\n\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\nagent = requests.utils.default_user_agent()\nUSER_AGENT = f\"{agent} (BookWyrm/{VERSION}; +https://{DOMAIN}/)\"\n\n# Imagekit generated thumbnails\nENABLE_THUMBNAIL_GENERATION = env.bool(\"ENABLE_THUMBNAIL_GENERATION\", False)\nIMAGEKIT_CACHEFILE_DIR = \"thumbnails\"\nIMAGEKIT_DEFAULT_CACHEFILE_STRATEGY = \"bookwyrm.thumbnail_generation.Strategy\"\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Storage\n\nPROTOCOL = \"http\"\nif USE_HTTPS:\n PROTOCOL = \"https\"\n\nUSE_S3 = env.bool(\"USE_S3\", False)\n\nif USE_S3:\n # AWS settings\n AWS_ACCESS_KEY_ID = env(\"AWS_ACCESS_KEY_ID\")\n AWS_SECRET_ACCESS_KEY = env(\"AWS_SECRET_ACCESS_KEY\")\n AWS_STORAGE_BUCKET_NAME = env(\"AWS_STORAGE_BUCKET_NAME\")\n AWS_S3_CUSTOM_DOMAIN = env(\"AWS_S3_CUSTOM_DOMAIN\")\n AWS_S3_REGION_NAME = env(\"AWS_S3_REGION_NAME\", \"\")\n AWS_S3_ENDPOINT_URL = env(\"AWS_S3_ENDPOINT_URL\")\n AWS_DEFAULT_ACL = \"public-read\"\n AWS_S3_OBJECT_PARAMETERS = {\"CacheControl\": \"max-age=86400\"}\n # S3 Static settings\n STATIC_LOCATION = \"static\"\n STATIC_URL = f\"{PROTOCOL}://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/\"\n STATICFILES_STORAGE = \"bookwyrm.storage_backends.StaticStorage\"\n # S3 Media settings\n MEDIA_LOCATION = \"images\"\n MEDIA_URL = f\"{PROTOCOL}://{AWS_S3_CUSTOM_DOMAIN}/{MEDIA_LOCATION}/\"\n MEDIA_FULL_URL = MEDIA_URL\n STATIC_FULL_URL = STATIC_URL\n DEFAULT_FILE_STORAGE = \"bookwyrm.storage_backends.ImagesStorage\"\n # I don't know if it's used, but the site crashes without it\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\nelse:\n STATIC_URL = \"/static/\"\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_URL = \"/images/\"\n MEDIA_FULL_URL = f\"{PROTOCOL}://{DOMAIN}{MEDIA_URL}\"\n STATIC_FULL_URL = f\"{PROTOCOL}://{DOMAIN}{STATIC_URL}\"\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\n", "path": "bookwyrm/settings.py" } ]
diff --git a/bookwyrm/settings.py b/bookwyrm/settings.py index fe2f7467ab..92ff7ecdd5 100644 --- a/bookwyrm/settings.py +++ b/bookwyrm/settings.py @@ -14,7 +14,7 @@ PAGE_LENGTH = env("PAGE_LENGTH", 15) DEFAULT_LANGUAGE = env("DEFAULT_LANGUAGE", "English") -JS_CACHE = "2d3181e1" +JS_CACHE = "9b4cc1f7" # email EMAIL_BACKEND = env("EMAIL_BACKEND", "django.core.mail.backends.smtp.EmailBackend") diff --git a/bookwyrm/static/css/bookwyrm.css b/bookwyrm/static/css/bookwyrm.css index 4d960734e1..cc87e5b813 100644 --- a/bookwyrm/static/css/bookwyrm.css +++ b/bookwyrm/static/css/bookwyrm.css @@ -751,6 +751,13 @@ ol.ordered-list li::before { padding: 0 0.75em; } +/* Notifications page + ******************************************************************************/ + +.notification a.icon { + text-decoration: none !important; +} + /* Breadcrumbs ******************************************************************************/ diff --git a/bookwyrm/templates/notifications/items/accept.html b/bookwyrm/templates/notifications/items/accept.html index 045e232666..5f26008f47 100644 --- a/bookwyrm/templates/notifications/items/accept.html +++ b/bookwyrm/templates/notifications/items/accept.html @@ -1,4 +1,4 @@ -{% extends 'notifications/items/item_layout.html' %} +{% extends 'notifications/items/layout.html' %} {% load i18n %} {% load utilities %} diff --git a/bookwyrm/templates/notifications/items/add.html b/bookwyrm/templates/notifications/items/add.html index 0e653aeb8d..6a0183ebe9 100644 --- a/bookwyrm/templates/notifications/items/add.html +++ b/bookwyrm/templates/notifications/items/add.html @@ -1,4 +1,4 @@ -{% extends 'notifications/items/item_layout.html' %} +{% extends 'notifications/items/layout.html' %} {% load i18n %} {% load utilities %} diff --git a/bookwyrm/templates/notifications/items/boost.html b/bookwyrm/templates/notifications/items/boost.html index 5f8962b382..6bb373ef63 100644 --- a/bookwyrm/templates/notifications/items/boost.html +++ b/bookwyrm/templates/notifications/items/boost.html @@ -1,4 +1,4 @@ -{% extends 'notifications/items/item_layout.html' %} +{% extends 'notifications/items/layout.html' %} {% load i18n %} {% load utilities %} diff --git a/bookwyrm/templates/notifications/items/fav.html b/bookwyrm/templates/notifications/items/fav.html index fbb865e4f9..58964d0331 100644 --- a/bookwyrm/templates/notifications/items/fav.html +++ b/bookwyrm/templates/notifications/items/fav.html @@ -1,4 +1,4 @@ -{% extends 'notifications/items/item_layout.html' %} +{% extends 'notifications/items/layout.html' %} {% load i18n %} {% load utilities %} diff --git a/bookwyrm/templates/notifications/items/follow.html b/bookwyrm/templates/notifications/items/follow.html index 7220d5d174..3518e7b1b7 100644 --- a/bookwyrm/templates/notifications/items/follow.html +++ b/bookwyrm/templates/notifications/items/follow.html @@ -1,4 +1,4 @@ -{% extends 'notifications/items/item_layout.html' %} +{% extends 'notifications/items/layout.html' %} {% load i18n %} {% load utilities %} diff --git a/bookwyrm/templates/notifications/items/follow_request.html b/bookwyrm/templates/notifications/items/follow_request.html index febb0a50e2..9cec8116aa 100644 --- a/bookwyrm/templates/notifications/items/follow_request.html +++ b/bookwyrm/templates/notifications/items/follow_request.html @@ -1,4 +1,4 @@ -{% extends 'notifications/items/item_layout.html' %} +{% extends 'notifications/items/layout.html' %} {% load i18n %} {% load utilities %} diff --git a/bookwyrm/templates/notifications/items/import.html b/bookwyrm/templates/notifications/items/import.html index f3c8b5c099..7f59948117 100644 --- a/bookwyrm/templates/notifications/items/import.html +++ b/bookwyrm/templates/notifications/items/import.html @@ -1,4 +1,4 @@ -{% extends 'notifications/items/item_layout.html' %} +{% extends 'notifications/items/layout.html' %} {% load i18n %} {% block primary_link %}{% spaceless %} diff --git a/bookwyrm/templates/notifications/items/invite.html b/bookwyrm/templates/notifications/items/invite.html index abb8cd02fe..aff416b07c 100644 --- a/bookwyrm/templates/notifications/items/invite.html +++ b/bookwyrm/templates/notifications/items/invite.html @@ -1,4 +1,4 @@ -{% extends 'notifications/items/item_layout.html' %} +{% extends 'notifications/items/layout.html' %} {% load i18n %} {% load utilities %} diff --git a/bookwyrm/templates/notifications/items/join.html b/bookwyrm/templates/notifications/items/join.html index c10def4565..82f8a8c505 100644 --- a/bookwyrm/templates/notifications/items/join.html +++ b/bookwyrm/templates/notifications/items/join.html @@ -1,4 +1,4 @@ -{% extends 'notifications/items/item_layout.html' %} +{% extends 'notifications/items/layout.html' %} {% load i18n %} {% load utilities %} diff --git a/bookwyrm/templates/notifications/items/item_layout.html b/bookwyrm/templates/notifications/items/layout.html similarity index 70% rename from bookwyrm/templates/notifications/items/item_layout.html rename to bookwyrm/templates/notifications/items/layout.html index 506bda8dd4..6ddbdcc314 100644 --- a/bookwyrm/templates/notifications/items/item_layout.html +++ b/bookwyrm/templates/notifications/items/layout.html @@ -1,9 +1,9 @@ {% load bookwyrm_tags %} {% related_status notification as related_status %} -<div class="box is-shadowless has-background-white-ter {% if notification.id in unread %} is-primary{% endif %}"> - <div class="columns is-mobile"> - <div class="column is-narrow is-size-3 {% if notification.id in unread%}has-text-white{% else %}has-text-grey{% endif %}"> - <a class="has-text-dark" href="{% block primary_link %}{% endblock %}"> +<div class="notification {% if notification.id in unread %}has-background-primary{% endif %}"> + <div class="columns is-mobile {% if notification.id in unread %}has-text-white{% else %}has-text-grey{% endif %}"> + <div class="column is-narrow is-size-3"> + <a class="icon" href="{% block primary_link %}{% endblock %}"> {% block icon %}{% endblock %} </a> </div> diff --git a/bookwyrm/templates/notifications/items/leave.html b/bookwyrm/templates/notifications/items/leave.html index 422a31dead..c17a1986ea 100644 --- a/bookwyrm/templates/notifications/items/leave.html +++ b/bookwyrm/templates/notifications/items/leave.html @@ -1,4 +1,4 @@ -{% extends 'notifications/items/item_layout.html' %} +{% extends 'notifications/items/layout.html' %} {% load i18n %} {% load utilities %} diff --git a/bookwyrm/templates/notifications/items/mention.html b/bookwyrm/templates/notifications/items/mention.html index cda77163e7..ead3c8a6cb 100644 --- a/bookwyrm/templates/notifications/items/mention.html +++ b/bookwyrm/templates/notifications/items/mention.html @@ -1,4 +1,4 @@ -{% extends 'notifications/items/item_layout.html' %} +{% extends 'notifications/items/layout.html' %} {% load i18n %} {% load utilities %} diff --git a/bookwyrm/templates/notifications/items/remove.html b/bookwyrm/templates/notifications/items/remove.html index eba18fd899..84160c7bd3 100644 --- a/bookwyrm/templates/notifications/items/remove.html +++ b/bookwyrm/templates/notifications/items/remove.html @@ -1,4 +1,4 @@ -{% extends 'notifications/items/item_layout.html' %} +{% extends 'notifications/items/layout.html' %} {% load i18n %} {% load utilities %} diff --git a/bookwyrm/templates/notifications/items/reply.html b/bookwyrm/templates/notifications/items/reply.html index 883bbbb5bf..0aa664ce40 100644 --- a/bookwyrm/templates/notifications/items/reply.html +++ b/bookwyrm/templates/notifications/items/reply.html @@ -1,4 +1,4 @@ -{% extends 'notifications/items/item_layout.html' %} +{% extends 'notifications/items/layout.html' %} {% load i18n %} {% load utilities %} diff --git a/bookwyrm/templates/notifications/items/report.html b/bookwyrm/templates/notifications/items/report.html index f537b52556..fdd5f00946 100644 --- a/bookwyrm/templates/notifications/items/report.html +++ b/bookwyrm/templates/notifications/items/report.html @@ -1,4 +1,4 @@ -{% extends 'notifications/items/item_layout.html' %} +{% extends 'notifications/items/layout.html' %} {% load i18n %} diff --git a/bookwyrm/templates/notifications/items/update.html b/bookwyrm/templates/notifications/items/update.html index be796b7856..7fc52cef10 100644 --- a/bookwyrm/templates/notifications/items/update.html +++ b/bookwyrm/templates/notifications/items/update.html @@ -1,4 +1,4 @@ -{% extends 'notifications/items/item_layout.html' %} +{% extends 'notifications/items/layout.html' %} {% load i18n %} {% load utilities %}
locustio__locust-1573
Misleading log message in distributed mode <!-- If you have a general question about how to use Locust, please check Stack Overflow first https://stackoverflow.com/questions/tagged/locust You can also ask new questions on SO, https://stackoverflow.com/questions/ask just remember to tag your question with "locust". Do not immediately post your issue here after posting to SO, wait for an answer there instead. Use this form only for reporting actual bugs in locust. Be mindful that the developers of locust are unpaid volunteers, so make sure you have tried everything you can think of before filing a bug :) --> ### Describe the bug When running locust in distributed mode you can still get this warning message on the master node: ``` /WARNING/locust.main: System open file limit setting is not high enough for load testing, and the OS didn't allow locust to increase it by itself. See https://github.com/locustio/locust/wiki/Installation#increasing-maximum-number-of-open-files-limit for more info. ``` Since the master won't execute any of the tests this warning is misleading. <!-- A clear and concise description of what the bug is --> ### Expected behavior <!-- Tell us what you think should happen --> No log message about open file limit on master when running distributed ### Actual behavior Log message about low open file limit on instance running master mode <!-- Tell us what happens instead. Include screenshots if this an issue with the GUI. --> ### Steps to reproduce <!-- Please provide a minimal reproducible code example (https://stackoverflow.com/help/minimal-reproducible-example) --> Run `locust --master` on a host with low open file limit (e.g 2048). ### Environment - OS: locust docker image running on Linux host - Python version: Embedded version in docker image with tag `1.2.3` - Locust version: 1.2.3 - Locust command line that you ran: `locust --master` - Locust file contents (anonymized if necessary): N/A
[ { "content": "import inspect\nimport logging\nimport os\nimport importlib\nimport signal\nimport socket\nimport sys\nimport time\n\nimport gevent\n\nimport locust\n\nfrom . import log\nfrom .argument_parser import parse_locustfile_option, parse_options\nfrom .env import Environment\nfrom .log import setup_logging, greenlet_exception_logger\nfrom . import stats\nfrom .stats import print_error_report, print_percentile_stats, print_stats, stats_printer, stats_history\nfrom .stats import StatsCSV, StatsCSVFileWriter\nfrom .user import User\nfrom .user.inspectuser import get_task_ratio_dict, print_task_ratio\nfrom .util.timespan import parse_timespan\nfrom .exception import AuthCredentialsError\nfrom .shape import LoadTestShape\n\n\nversion = locust.__version__\n\n\ndef is_user_class(item):\n \"\"\"\n Check if a variable is a runnable (non-abstract) User class\n \"\"\"\n return bool(inspect.isclass(item) and issubclass(item, User) and item.abstract is False)\n\n\ndef is_shape_class(item):\n \"\"\"\n Check if a class is a LoadTestShape\n \"\"\"\n return bool(\n inspect.isclass(item) and issubclass(item, LoadTestShape) and item.__dict__[\"__module__\"] != \"locust.shape\"\n )\n\n\ndef load_locustfile(path):\n \"\"\"\n Import given locustfile path and return (docstring, callables).\n\n Specifically, the locustfile's ``__doc__`` attribute (a string) and a\n dictionary of ``{'name': callable}`` containing all callables which pass\n the \"is a Locust\" test.\n \"\"\"\n\n # Start with making sure the current working dir is in the sys.path\n sys.path.insert(0, os.getcwd())\n # Get directory and locustfile name\n directory, locustfile = os.path.split(path)\n # If the directory isn't in the PYTHONPATH, add it so our import will work\n added_to_path = False\n index = None\n if directory not in sys.path:\n sys.path.insert(0, directory)\n added_to_path = True\n # If the directory IS in the PYTHONPATH, move it to the front temporarily,\n # otherwise other locustfiles -- like Locusts's own -- may scoop the intended\n # one.\n else:\n i = sys.path.index(directory)\n if i != 0:\n # Store index for later restoration\n index = i\n # Add to front, then remove from original position\n sys.path.insert(0, directory)\n del sys.path[i + 1]\n # Perform the import\n source = importlib.machinery.SourceFileLoader(os.path.splitext(locustfile)[0], path)\n imported = source.load_module()\n # Remove directory from path if we added it ourselves (just to be neat)\n if added_to_path:\n del sys.path[0]\n # Put back in original index if we moved it\n if index is not None:\n sys.path.insert(index + 1, directory)\n del sys.path[0]\n # Return our two-tuple\n user_classes = {name: value for name, value in vars(imported).items() if is_user_class(value)}\n\n # Find shape class, if any, return it\n shape_classes = [value for name, value in vars(imported).items() if is_shape_class(value)]\n if shape_classes:\n shape_class = shape_classes[0]()\n else:\n shape_class = None\n\n return imported.__doc__, user_classes, shape_class\n\n\ndef create_environment(user_classes, options, events=None, shape_class=None):\n \"\"\"\n Create an Environment instance from options\n \"\"\"\n return Environment(\n user_classes=user_classes,\n shape_class=shape_class,\n tags=options.tags,\n exclude_tags=options.exclude_tags,\n events=events,\n host=options.host,\n reset_stats=options.reset_stats,\n step_load=options.step_load,\n stop_timeout=options.stop_timeout,\n parsed_options=options,\n )\n\n\ndef main():\n # find specified locustfile and make sure it exists, using a very simplified\n # command line parser that is only used to parse the -f option\n locustfile = parse_locustfile_option()\n\n # import the locustfile\n docstring, user_classes, shape_class = load_locustfile(locustfile)\n\n # parse all command line options\n options = parse_options()\n\n if options.slave or options.expect_slaves:\n sys.stderr.write(\"The --slave/--expect-slaves parameters have been renamed --worker/--expect-workers\\n\")\n sys.exit(1)\n\n if options.hatch_rate:\n sys.stderr.write(\"[DEPRECATED] The --hatch-rate parameter has been renamed --spawn-rate\\n\")\n options.spawn_rate = options.hatch_rate\n\n # setup logging\n if not options.skip_log_setup:\n if options.loglevel.upper() in [\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"]:\n setup_logging(options.loglevel, options.logfile)\n else:\n sys.stderr.write(\"Invalid --loglevel. Valid values are: DEBUG/INFO/WARNING/ERROR/CRITICAL\\n\")\n sys.exit(1)\n\n logger = logging.getLogger(__name__)\n greenlet_exception_handler = greenlet_exception_logger(logger)\n\n if options.list_commands:\n print(\"Available Users:\")\n for name in user_classes:\n print(\" \" + name)\n sys.exit(0)\n\n if not user_classes:\n logger.error(\"No User class found!\")\n sys.exit(1)\n\n # make sure specified User exists\n if options.user_classes:\n missing = set(options.user_classes) - set(user_classes.keys())\n if missing:\n logger.error(\"Unknown User(s): %s\\n\" % (\", \".join(missing)))\n sys.exit(1)\n else:\n names = set(options.user_classes) & set(user_classes.keys())\n user_classes = [user_classes[n] for n in names]\n else:\n # list() call is needed to consume the dict_view object in Python 3\n user_classes = list(user_classes.values())\n\n if os.name != \"nt\":\n try:\n import resource\n\n minimum_open_file_limit = 10000\n current_open_file_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]\n\n if current_open_file_limit < minimum_open_file_limit:\n # Increasing the limit to 10000 within a running process should work on at least MacOS.\n # It does not work on all OS:es, but we should be no worse off for trying.\n resource.setrlimit(resource.RLIMIT_NOFILE, [minimum_open_file_limit, resource.RLIM_INFINITY])\n except BaseException:\n logger.warning(\n (\n f\"System open file limit '{current_open_file_limit}' is below minimum setting '{minimum_open_file_limit}'. \"\n \"It's not high enough for load testing, and the OS didn't allow locust to increase it by itself. \"\n \"See https://github.com/locustio/locust/wiki/Installation#increasing-maximum-number-of-open-files-limit for more info.\"\n )\n )\n\n # create locust Environment\n environment = create_environment(user_classes, options, events=locust.events, shape_class=shape_class)\n\n if shape_class and (options.num_users or options.spawn_rate or options.step_load):\n logger.error(\n \"The specified locustfile contains a shape class but a conflicting argument was specified: users, spawn-rate or step-load\"\n )\n sys.exit(1)\n\n if options.show_task_ratio:\n print(\"\\n Task ratio per User class\")\n print(\"-\" * 80)\n print_task_ratio(user_classes)\n print(\"\\n Total task ratio\")\n print(\"-\" * 80)\n print_task_ratio(user_classes, total=True)\n sys.exit(0)\n if options.show_task_ratio_json:\n from json import dumps\n\n task_data = {\n \"per_class\": get_task_ratio_dict(user_classes),\n \"total\": get_task_ratio_dict(user_classes, total=True),\n }\n print(dumps(task_data))\n sys.exit(0)\n\n if options.step_time:\n if not options.step_load:\n logger.error(\"The --step-time argument can only be used together with --step-load\")\n sys.exit(1)\n if options.worker:\n logger.error(\"--step-time should be specified on the master node, and not on worker nodes\")\n sys.exit(1)\n try:\n options.step_time = parse_timespan(options.step_time)\n except ValueError:\n logger.error(\"Valid --step-time formats are: 20, 20s, 3m, 2h, 1h20m, 3h30m10s, etc.\")\n sys.exit(1)\n\n if options.master:\n runner = environment.create_master_runner(\n master_bind_host=options.master_bind_host,\n master_bind_port=options.master_bind_port,\n )\n elif options.worker:\n try:\n runner = environment.create_worker_runner(options.master_host, options.master_port)\n except socket.error as e:\n logger.error(\"Failed to connect to the Locust master: %s\", e)\n sys.exit(-1)\n else:\n runner = environment.create_local_runner()\n\n # main_greenlet is pointing to runners.greenlet by default, it will point the web greenlet later if in web mode\n main_greenlet = runner.greenlet\n\n if options.run_time:\n if not options.headless:\n logger.error(\"The --run-time argument can only be used together with --headless\")\n sys.exit(1)\n if options.worker:\n logger.error(\"--run-time should be specified on the master node, and not on worker nodes\")\n sys.exit(1)\n try:\n options.run_time = parse_timespan(options.run_time)\n except ValueError:\n logger.error(\"Valid --run-time formats are: 20, 20s, 3m, 2h, 1h20m, 3h30m10s, etc.\")\n sys.exit(1)\n\n def spawn_run_time_limit_greenlet():\n logger.info(\"Run time limit set to %s seconds\" % options.run_time)\n\n def timelimit_stop():\n logger.info(\"Time limit reached. Stopping Locust.\")\n runner.quit()\n\n gevent.spawn_later(options.run_time, timelimit_stop).link_exception(greenlet_exception_handler)\n\n if options.csv_prefix:\n stats_csv_writer = StatsCSVFileWriter(\n environment, stats.PERCENTILES_TO_REPORT, options.csv_prefix, options.stats_history_enabled\n )\n else:\n stats_csv_writer = StatsCSV(environment, stats.PERCENTILES_TO_REPORT)\n\n # start Web UI\n if not options.headless and not options.worker:\n # spawn web greenlet\n protocol = \"https\" if options.tls_cert and options.tls_key else \"http\"\n try:\n if options.web_host == \"*\":\n # special check for \"*\" so that we're consistent with --master-bind-host\n web_host = \"\"\n else:\n web_host = options.web_host\n if web_host:\n logger.info(\"Starting web interface at %s://%s:%s\" % (protocol, web_host, options.web_port))\n else:\n logger.info(\n \"Starting web interface at %s://0.0.0.0:%s (accepting connections from all network interfaces)\"\n % (protocol, options.web_port)\n )\n web_ui = environment.create_web_ui(\n host=web_host,\n port=options.web_port,\n auth_credentials=options.web_auth,\n tls_cert=options.tls_cert,\n tls_key=options.tls_key,\n stats_csv_writer=stats_csv_writer,\n )\n except AuthCredentialsError:\n logger.error(\"Credentials supplied with --web-auth should have the format: username:password\")\n sys.exit(1)\n else:\n main_greenlet = web_ui.greenlet\n else:\n web_ui = None\n\n # Fire locust init event which can be used by end-users' code to run setup code that\n # need access to the Environment, Runner or WebUI\n environment.events.init.fire(environment=environment, runner=runner, web_ui=web_ui)\n\n if options.headless:\n # headless mode\n if options.master:\n # wait for worker nodes to connect\n while len(runner.clients.ready) < options.expect_workers:\n logging.info(\n \"Waiting for workers to be ready, %s of %s connected\",\n len(runner.clients.ready),\n options.expect_workers,\n )\n time.sleep(1)\n if not options.worker:\n # apply headless mode defaults\n if options.num_users is None:\n options.num_users = 1\n if options.spawn_rate is None:\n options.spawn_rate = 1\n if options.step_users is None:\n options.step_users = 1\n\n # start the test\n if options.step_time:\n runner.start_stepload(options.num_users, options.spawn_rate, options.step_users, options.step_time)\n if environment.shape_class:\n environment.runner.start_shape()\n else:\n runner.start(options.num_users, options.spawn_rate)\n\n if options.run_time:\n spawn_run_time_limit_greenlet()\n\n stats_printer_greenlet = None\n if not options.only_summary and (options.print_stats or (options.headless and not options.worker)):\n # spawn stats printing greenlet\n stats_printer_greenlet = gevent.spawn(stats_printer(runner.stats))\n stats_printer_greenlet.link_exception(greenlet_exception_handler)\n\n if options.csv_prefix:\n gevent.spawn(stats_csv_writer.stats_writer).link_exception(greenlet_exception_handler)\n\n gevent.spawn(stats_history, runner)\n\n def shutdown():\n \"\"\"\n Shut down locust by firing quitting event, printing/writing stats and exiting\n \"\"\"\n logger.info(\"Running teardowns...\")\n environment.events.quitting.fire(environment=environment, reverse=True)\n\n # determine the process exit code\n if log.unhandled_greenlet_exception:\n code = 2\n elif environment.process_exit_code is not None:\n code = environment.process_exit_code\n elif len(runner.errors) or len(runner.exceptions):\n code = options.exit_code_on_error\n else:\n code = 0\n\n logger.info(\"Shutting down (exit code %s), bye.\" % code)\n if stats_printer_greenlet is not None:\n stats_printer_greenlet.kill(block=False)\n logger.info(\"Cleaning up runner...\")\n if runner is not None:\n runner.quit()\n\n print_stats(runner.stats, current=False)\n print_percentile_stats(runner.stats)\n\n print_error_report(runner.stats)\n\n sys.exit(code)\n\n # install SIGTERM handler\n def sig_term_handler():\n logger.info(\"Got SIGTERM signal\")\n shutdown()\n\n gevent.signal_handler(signal.SIGTERM, sig_term_handler)\n\n try:\n logger.info(\"Starting Locust %s\" % version)\n main_greenlet.join()\n shutdown()\n except KeyboardInterrupt:\n shutdown()\n", "path": "locust/main.py" } ]
[ { "content": "import inspect\nimport logging\nimport os\nimport importlib\nimport signal\nimport socket\nimport sys\nimport time\n\nimport gevent\n\nimport locust\n\nfrom . import log\nfrom .argument_parser import parse_locustfile_option, parse_options\nfrom .env import Environment\nfrom .log import setup_logging, greenlet_exception_logger\nfrom . import stats\nfrom .stats import print_error_report, print_percentile_stats, print_stats, stats_printer, stats_history\nfrom .stats import StatsCSV, StatsCSVFileWriter\nfrom .user import User\nfrom .user.inspectuser import get_task_ratio_dict, print_task_ratio\nfrom .util.timespan import parse_timespan\nfrom .exception import AuthCredentialsError\nfrom .shape import LoadTestShape\n\n\nversion = locust.__version__\n\n\ndef is_user_class(item):\n \"\"\"\n Check if a variable is a runnable (non-abstract) User class\n \"\"\"\n return bool(inspect.isclass(item) and issubclass(item, User) and item.abstract is False)\n\n\ndef is_shape_class(item):\n \"\"\"\n Check if a class is a LoadTestShape\n \"\"\"\n return bool(\n inspect.isclass(item) and issubclass(item, LoadTestShape) and item.__dict__[\"__module__\"] != \"locust.shape\"\n )\n\n\ndef load_locustfile(path):\n \"\"\"\n Import given locustfile path and return (docstring, callables).\n\n Specifically, the locustfile's ``__doc__`` attribute (a string) and a\n dictionary of ``{'name': callable}`` containing all callables which pass\n the \"is a Locust\" test.\n \"\"\"\n\n # Start with making sure the current working dir is in the sys.path\n sys.path.insert(0, os.getcwd())\n # Get directory and locustfile name\n directory, locustfile = os.path.split(path)\n # If the directory isn't in the PYTHONPATH, add it so our import will work\n added_to_path = False\n index = None\n if directory not in sys.path:\n sys.path.insert(0, directory)\n added_to_path = True\n # If the directory IS in the PYTHONPATH, move it to the front temporarily,\n # otherwise other locustfiles -- like Locusts's own -- may scoop the intended\n # one.\n else:\n i = sys.path.index(directory)\n if i != 0:\n # Store index for later restoration\n index = i\n # Add to front, then remove from original position\n sys.path.insert(0, directory)\n del sys.path[i + 1]\n # Perform the import\n source = importlib.machinery.SourceFileLoader(os.path.splitext(locustfile)[0], path)\n imported = source.load_module()\n # Remove directory from path if we added it ourselves (just to be neat)\n if added_to_path:\n del sys.path[0]\n # Put back in original index if we moved it\n if index is not None:\n sys.path.insert(index + 1, directory)\n del sys.path[0]\n # Return our two-tuple\n user_classes = {name: value for name, value in vars(imported).items() if is_user_class(value)}\n\n # Find shape class, if any, return it\n shape_classes = [value for name, value in vars(imported).items() if is_shape_class(value)]\n if shape_classes:\n shape_class = shape_classes[0]()\n else:\n shape_class = None\n\n return imported.__doc__, user_classes, shape_class\n\n\ndef create_environment(user_classes, options, events=None, shape_class=None):\n \"\"\"\n Create an Environment instance from options\n \"\"\"\n return Environment(\n user_classes=user_classes,\n shape_class=shape_class,\n tags=options.tags,\n exclude_tags=options.exclude_tags,\n events=events,\n host=options.host,\n reset_stats=options.reset_stats,\n step_load=options.step_load,\n stop_timeout=options.stop_timeout,\n parsed_options=options,\n )\n\n\ndef main():\n # find specified locustfile and make sure it exists, using a very simplified\n # command line parser that is only used to parse the -f option\n locustfile = parse_locustfile_option()\n\n # import the locustfile\n docstring, user_classes, shape_class = load_locustfile(locustfile)\n\n # parse all command line options\n options = parse_options()\n\n if options.slave or options.expect_slaves:\n sys.stderr.write(\"The --slave/--expect-slaves parameters have been renamed --worker/--expect-workers\\n\")\n sys.exit(1)\n\n if options.hatch_rate:\n sys.stderr.write(\"[DEPRECATED] The --hatch-rate parameter has been renamed --spawn-rate\\n\")\n options.spawn_rate = options.hatch_rate\n\n # setup logging\n if not options.skip_log_setup:\n if options.loglevel.upper() in [\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"]:\n setup_logging(options.loglevel, options.logfile)\n else:\n sys.stderr.write(\"Invalid --loglevel. Valid values are: DEBUG/INFO/WARNING/ERROR/CRITICAL\\n\")\n sys.exit(1)\n\n logger = logging.getLogger(__name__)\n greenlet_exception_handler = greenlet_exception_logger(logger)\n\n if options.list_commands:\n print(\"Available Users:\")\n for name in user_classes:\n print(\" \" + name)\n sys.exit(0)\n\n if not user_classes:\n logger.error(\"No User class found!\")\n sys.exit(1)\n\n # make sure specified User exists\n if options.user_classes:\n missing = set(options.user_classes) - set(user_classes.keys())\n if missing:\n logger.error(\"Unknown User(s): %s\\n\" % (\", \".join(missing)))\n sys.exit(1)\n else:\n names = set(options.user_classes) & set(user_classes.keys())\n user_classes = [user_classes[n] for n in names]\n else:\n # list() call is needed to consume the dict_view object in Python 3\n user_classes = list(user_classes.values())\n\n if os.name != \"nt\" and not options.master:\n\n try:\n import resource\n\n minimum_open_file_limit = 10000\n current_open_file_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]\n\n if current_open_file_limit < minimum_open_file_limit:\n # Increasing the limit to 10000 within a running process should work on at least MacOS.\n # It does not work on all OS:es, but we should be no worse off for trying.\n resource.setrlimit(resource.RLIMIT_NOFILE, [minimum_open_file_limit, resource.RLIM_INFINITY])\n except BaseException:\n logger.warning(\n (\n f\"System open file limit '{current_open_file_limit}' is below minimum setting '{minimum_open_file_limit}'. \"\n \"It's not high enough for load testing, and the OS didn't allow locust to increase it by itself. \"\n \"See https://github.com/locustio/locust/wiki/Installation#increasing-maximum-number-of-open-files-limit for more info.\"\n )\n )\n\n # create locust Environment\n environment = create_environment(user_classes, options, events=locust.events, shape_class=shape_class)\n\n if shape_class and (options.num_users or options.spawn_rate or options.step_load):\n logger.error(\n \"The specified locustfile contains a shape class but a conflicting argument was specified: users, spawn-rate or step-load\"\n )\n sys.exit(1)\n\n if options.show_task_ratio:\n print(\"\\n Task ratio per User class\")\n print(\"-\" * 80)\n print_task_ratio(user_classes)\n print(\"\\n Total task ratio\")\n print(\"-\" * 80)\n print_task_ratio(user_classes, total=True)\n sys.exit(0)\n if options.show_task_ratio_json:\n from json import dumps\n\n task_data = {\n \"per_class\": get_task_ratio_dict(user_classes),\n \"total\": get_task_ratio_dict(user_classes, total=True),\n }\n print(dumps(task_data))\n sys.exit(0)\n\n if options.step_time:\n if not options.step_load:\n logger.error(\"The --step-time argument can only be used together with --step-load\")\n sys.exit(1)\n if options.worker:\n logger.error(\"--step-time should be specified on the master node, and not on worker nodes\")\n sys.exit(1)\n try:\n options.step_time = parse_timespan(options.step_time)\n except ValueError:\n logger.error(\"Valid --step-time formats are: 20, 20s, 3m, 2h, 1h20m, 3h30m10s, etc.\")\n sys.exit(1)\n\n if options.master:\n runner = environment.create_master_runner(\n master_bind_host=options.master_bind_host,\n master_bind_port=options.master_bind_port,\n )\n elif options.worker:\n try:\n runner = environment.create_worker_runner(options.master_host, options.master_port)\n except socket.error as e:\n logger.error(\"Failed to connect to the Locust master: %s\", e)\n sys.exit(-1)\n else:\n runner = environment.create_local_runner()\n\n # main_greenlet is pointing to runners.greenlet by default, it will point the web greenlet later if in web mode\n main_greenlet = runner.greenlet\n\n if options.run_time:\n if not options.headless:\n logger.error(\"The --run-time argument can only be used together with --headless\")\n sys.exit(1)\n if options.worker:\n logger.error(\"--run-time should be specified on the master node, and not on worker nodes\")\n sys.exit(1)\n try:\n options.run_time = parse_timespan(options.run_time)\n except ValueError:\n logger.error(\"Valid --run-time formats are: 20, 20s, 3m, 2h, 1h20m, 3h30m10s, etc.\")\n sys.exit(1)\n\n def spawn_run_time_limit_greenlet():\n logger.info(\"Run time limit set to %s seconds\" % options.run_time)\n\n def timelimit_stop():\n logger.info(\"Time limit reached. Stopping Locust.\")\n runner.quit()\n\n gevent.spawn_later(options.run_time, timelimit_stop).link_exception(greenlet_exception_handler)\n\n if options.csv_prefix:\n stats_csv_writer = StatsCSVFileWriter(\n environment, stats.PERCENTILES_TO_REPORT, options.csv_prefix, options.stats_history_enabled\n )\n else:\n stats_csv_writer = StatsCSV(environment, stats.PERCENTILES_TO_REPORT)\n\n # start Web UI\n if not options.headless and not options.worker:\n # spawn web greenlet\n protocol = \"https\" if options.tls_cert and options.tls_key else \"http\"\n try:\n if options.web_host == \"*\":\n # special check for \"*\" so that we're consistent with --master-bind-host\n web_host = \"\"\n else:\n web_host = options.web_host\n if web_host:\n logger.info(\"Starting web interface at %s://%s:%s\" % (protocol, web_host, options.web_port))\n else:\n logger.info(\n \"Starting web interface at %s://0.0.0.0:%s (accepting connections from all network interfaces)\"\n % (protocol, options.web_port)\n )\n web_ui = environment.create_web_ui(\n host=web_host,\n port=options.web_port,\n auth_credentials=options.web_auth,\n tls_cert=options.tls_cert,\n tls_key=options.tls_key,\n stats_csv_writer=stats_csv_writer,\n )\n except AuthCredentialsError:\n logger.error(\"Credentials supplied with --web-auth should have the format: username:password\")\n sys.exit(1)\n else:\n main_greenlet = web_ui.greenlet\n else:\n web_ui = None\n\n # Fire locust init event which can be used by end-users' code to run setup code that\n # need access to the Environment, Runner or WebUI\n environment.events.init.fire(environment=environment, runner=runner, web_ui=web_ui)\n\n if options.headless:\n # headless mode\n if options.master:\n # wait for worker nodes to connect\n while len(runner.clients.ready) < options.expect_workers:\n logging.info(\n \"Waiting for workers to be ready, %s of %s connected\",\n len(runner.clients.ready),\n options.expect_workers,\n )\n time.sleep(1)\n if not options.worker:\n # apply headless mode defaults\n if options.num_users is None:\n options.num_users = 1\n if options.spawn_rate is None:\n options.spawn_rate = 1\n if options.step_users is None:\n options.step_users = 1\n\n # start the test\n if options.step_time:\n runner.start_stepload(options.num_users, options.spawn_rate, options.step_users, options.step_time)\n if environment.shape_class:\n environment.runner.start_shape()\n else:\n runner.start(options.num_users, options.spawn_rate)\n\n if options.run_time:\n spawn_run_time_limit_greenlet()\n\n stats_printer_greenlet = None\n if not options.only_summary and (options.print_stats or (options.headless and not options.worker)):\n # spawn stats printing greenlet\n stats_printer_greenlet = gevent.spawn(stats_printer(runner.stats))\n stats_printer_greenlet.link_exception(greenlet_exception_handler)\n\n if options.csv_prefix:\n gevent.spawn(stats_csv_writer.stats_writer).link_exception(greenlet_exception_handler)\n\n gevent.spawn(stats_history, runner)\n\n def shutdown():\n \"\"\"\n Shut down locust by firing quitting event, printing/writing stats and exiting\n \"\"\"\n logger.info(\"Running teardowns...\")\n environment.events.quitting.fire(environment=environment, reverse=True)\n\n # determine the process exit code\n if log.unhandled_greenlet_exception:\n code = 2\n elif environment.process_exit_code is not None:\n code = environment.process_exit_code\n elif len(runner.errors) or len(runner.exceptions):\n code = options.exit_code_on_error\n else:\n code = 0\n\n logger.info(\"Shutting down (exit code %s), bye.\" % code)\n if stats_printer_greenlet is not None:\n stats_printer_greenlet.kill(block=False)\n logger.info(\"Cleaning up runner...\")\n if runner is not None:\n runner.quit()\n\n print_stats(runner.stats, current=False)\n print_percentile_stats(runner.stats)\n\n print_error_report(runner.stats)\n\n sys.exit(code)\n\n # install SIGTERM handler\n def sig_term_handler():\n logger.info(\"Got SIGTERM signal\")\n shutdown()\n\n gevent.signal_handler(signal.SIGTERM, sig_term_handler)\n\n try:\n logger.info(\"Starting Locust %s\" % version)\n main_greenlet.join()\n shutdown()\n except KeyboardInterrupt:\n shutdown()\n", "path": "locust/main.py" } ]
diff --git a/locust/main.py b/locust/main.py index ece30d3763..801ae69878 100644 --- a/locust/main.py +++ b/locust/main.py @@ -168,7 +168,8 @@ def main(): # list() call is needed to consume the dict_view object in Python 3 user_classes = list(user_classes.values()) - if os.name != "nt": + if os.name != "nt" and not options.master: + try: import resource
python-pillow__Pillow-6481
PSD incorrectly loaded ### What did you do? I opened the TIFF in Pillow and converted it to JPG. ### What did you expect to happen? The JPG image to look the same as the original TIFF. ### What actually happened? The converted JPG looks malformed and has messed up colors. ### What are your OS, Python and Pillow versions? * OS: Linux * Python: 3.10.5 * Pillow: 9.1.1 (also tested -git) ```python >>> img = Image.open("3662b8bd397337482862ab1a06bf3366-OA_535_161_17_F_TE.tif") >>> out_img = img.convert("RGB") >>> out_img.save("converted.jpg", quality=95) ``` [original image](https://api.collectie.gent/storage/v1/download/3662b8bd397337482862ab1a06bf3366-OA_535_161_17_F_TE.tif) (beware, 274MB) [converted image](https://api.collectie.gent/storage/v1/download/3a029a4f48b480211286486a6a1f0f0b-transcode-OA_535_161_17_F_TE.jpg) Is it okay to report this here or should I report this to the appropriate library (libtiff, jpeg-turbo, ?)
[ { "content": "#\n# The Python Imaging Library\n# $Id$\n#\n# Adobe PSD 2.5/3.0 file handling\n#\n# History:\n# 1995-09-01 fl Created\n# 1997-01-03 fl Read most PSD images\n# 1997-01-18 fl Fixed P and CMYK support\n# 2001-10-21 fl Added seek/tell support (for layers)\n#\n# Copyright (c) 1997-2001 by Secret Labs AB.\n# Copyright (c) 1995-2001 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport io\n\nfrom . import Image, ImageFile, ImagePalette\nfrom ._binary import i8\nfrom ._binary import i16be as i16\nfrom ._binary import i32be as i32\nfrom ._binary import si16be as si16\n\nMODES = {\n # (photoshop mode, bits) -> (pil mode, required channels)\n (0, 1): (\"1\", 1),\n (0, 8): (\"L\", 1),\n (1, 8): (\"L\", 1),\n (2, 8): (\"P\", 1),\n (3, 8): (\"RGB\", 3),\n (4, 8): (\"CMYK\", 4),\n (7, 8): (\"L\", 1), # FIXME: multilayer\n (8, 8): (\"L\", 1), # duotone\n (9, 8): (\"LAB\", 3),\n}\n\n\n# --------------------------------------------------------------------.\n# read PSD images\n\n\ndef _accept(prefix):\n return prefix[:4] == b\"8BPS\"\n\n\n##\n# Image plugin for Photoshop images.\n\n\nclass PsdImageFile(ImageFile.ImageFile):\n\n format = \"PSD\"\n format_description = \"Adobe Photoshop\"\n _close_exclusive_fp_after_loading = False\n\n def _open(self):\n\n read = self.fp.read\n\n #\n # header\n\n s = read(26)\n if not _accept(s) or i16(s, 4) != 1:\n raise SyntaxError(\"not a PSD file\")\n\n psd_bits = i16(s, 22)\n psd_channels = i16(s, 12)\n psd_mode = i16(s, 24)\n\n mode, channels = MODES[(psd_mode, psd_bits)]\n\n if channels > psd_channels:\n raise OSError(\"not enough channels\")\n\n self.mode = mode\n self._size = i32(s, 18), i32(s, 14)\n\n #\n # color mode data\n\n size = i32(read(4))\n if size:\n data = read(size)\n if mode == \"P\" and size == 768:\n self.palette = ImagePalette.raw(\"RGB;L\", data)\n\n #\n # image resources\n\n self.resources = []\n\n size = i32(read(4))\n if size:\n # load resources\n end = self.fp.tell() + size\n while self.fp.tell() < end:\n read(4) # signature\n id = i16(read(2))\n name = read(i8(read(1)))\n if not (len(name) & 1):\n read(1) # padding\n data = read(i32(read(4)))\n if len(data) & 1:\n read(1) # padding\n self.resources.append((id, name, data))\n if id == 1039: # ICC profile\n self.info[\"icc_profile\"] = data\n\n #\n # layer and mask information\n\n self.layers = []\n\n size = i32(read(4))\n if size:\n end = self.fp.tell() + size\n size = i32(read(4))\n if size:\n _layer_data = io.BytesIO(ImageFile._safe_read(self.fp, size))\n self.layers = _layerinfo(_layer_data, size)\n self.fp.seek(end)\n self.n_frames = len(self.layers)\n self.is_animated = self.n_frames > 1\n\n #\n # image descriptor\n\n self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)\n\n # keep the file open\n self._fp = self.fp\n self.frame = 1\n self._min_frame = 1\n\n def seek(self, layer):\n if not self._seek_check(layer):\n return\n\n # seek to given layer (1..max)\n try:\n name, mode, bbox, tile = self.layers[layer - 1]\n self.mode = mode\n self.tile = tile\n self.frame = layer\n self.fp = self._fp\n return name, bbox\n except IndexError as e:\n raise EOFError(\"no such layer\") from e\n\n def tell(self):\n # return layer number (0=image, 1..max=layers)\n return self.frame\n\n\ndef _layerinfo(fp, ct_bytes):\n # read layerinfo block\n layers = []\n\n def read(size):\n return ImageFile._safe_read(fp, size)\n\n ct = si16(read(2))\n\n # sanity check\n if ct_bytes < (abs(ct) * 20):\n raise SyntaxError(\"Layer block too short for number of layers requested\")\n\n for _ in range(abs(ct)):\n\n # bounding box\n y0 = i32(read(4))\n x0 = i32(read(4))\n y1 = i32(read(4))\n x1 = i32(read(4))\n\n # image info\n mode = []\n ct_types = i16(read(2))\n types = list(range(ct_types))\n if len(types) > 4:\n continue\n\n for _ in types:\n type = i16(read(2))\n\n if type == 65535:\n m = \"A\"\n else:\n m = \"RGBA\"[type]\n\n mode.append(m)\n read(4) # size\n\n # figure out the image mode\n mode.sort()\n if mode == [\"R\"]:\n mode = \"L\"\n elif mode == [\"B\", \"G\", \"R\"]:\n mode = \"RGB\"\n elif mode == [\"A\", \"B\", \"G\", \"R\"]:\n mode = \"RGBA\"\n else:\n mode = None # unknown\n\n # skip over blend flags and extra information\n read(12) # filler\n name = \"\"\n size = i32(read(4)) # length of the extra data field\n if size:\n data_end = fp.tell() + size\n\n length = i32(read(4))\n if length:\n fp.seek(length - 16, io.SEEK_CUR)\n\n length = i32(read(4))\n if length:\n fp.seek(length, io.SEEK_CUR)\n\n length = i8(read(1))\n if length:\n # Don't know the proper encoding,\n # Latin-1 should be a good guess\n name = read(length).decode(\"latin-1\", \"replace\")\n\n fp.seek(data_end)\n layers.append((name, mode, (x0, y0, x1, y1)))\n\n # get tiles\n i = 0\n for name, mode, bbox in layers:\n tile = []\n for m in mode:\n t = _maketile(fp, m, bbox, 1)\n if t:\n tile.extend(t)\n layers[i] = name, mode, bbox, tile\n i += 1\n\n return layers\n\n\ndef _maketile(file, mode, bbox, channels):\n\n tile = None\n read = file.read\n\n compression = i16(read(2))\n\n xsize = bbox[2] - bbox[0]\n ysize = bbox[3] - bbox[1]\n\n offset = file.tell()\n\n if compression == 0:\n #\n # raw compression\n tile = []\n for channel in range(channels):\n layer = mode[channel]\n if mode == \"CMYK\":\n layer += \";I\"\n tile.append((\"raw\", bbox, offset, layer))\n offset = offset + xsize * ysize\n\n elif compression == 1:\n #\n # packbits compression\n i = 0\n tile = []\n bytecount = read(channels * ysize * 2)\n offset = file.tell()\n for channel in range(channels):\n layer = mode[channel]\n if mode == \"CMYK\":\n layer += \";I\"\n tile.append((\"packbits\", bbox, offset, layer))\n for y in range(ysize):\n offset = offset + i16(bytecount, i)\n i += 2\n\n file.seek(offset)\n\n if offset & 1:\n read(1) # padding\n\n return tile\n\n\n# --------------------------------------------------------------------\n# registry\n\n\nImage.register_open(PsdImageFile.format, PsdImageFile, _accept)\n\nImage.register_extension(PsdImageFile.format, \".psd\")\n\nImage.register_mime(PsdImageFile.format, \"image/vnd.adobe.photoshop\")\n", "path": "src/PIL/PsdImagePlugin.py" } ]
[ { "content": "#\n# The Python Imaging Library\n# $Id$\n#\n# Adobe PSD 2.5/3.0 file handling\n#\n# History:\n# 1995-09-01 fl Created\n# 1997-01-03 fl Read most PSD images\n# 1997-01-18 fl Fixed P and CMYK support\n# 2001-10-21 fl Added seek/tell support (for layers)\n#\n# Copyright (c) 1997-2001 by Secret Labs AB.\n# Copyright (c) 1995-2001 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport io\n\nfrom . import Image, ImageFile, ImagePalette\nfrom ._binary import i8\nfrom ._binary import i16be as i16\nfrom ._binary import i32be as i32\nfrom ._binary import si16be as si16\n\nMODES = {\n # (photoshop mode, bits) -> (pil mode, required channels)\n (0, 1): (\"1\", 1),\n (0, 8): (\"L\", 1),\n (1, 8): (\"L\", 1),\n (2, 8): (\"P\", 1),\n (3, 8): (\"RGB\", 3),\n (4, 8): (\"CMYK\", 4),\n (7, 8): (\"L\", 1), # FIXME: multilayer\n (8, 8): (\"L\", 1), # duotone\n (9, 8): (\"LAB\", 3),\n}\n\n\n# --------------------------------------------------------------------.\n# read PSD images\n\n\ndef _accept(prefix):\n return prefix[:4] == b\"8BPS\"\n\n\n##\n# Image plugin for Photoshop images.\n\n\nclass PsdImageFile(ImageFile.ImageFile):\n\n format = \"PSD\"\n format_description = \"Adobe Photoshop\"\n _close_exclusive_fp_after_loading = False\n\n def _open(self):\n\n read = self.fp.read\n\n #\n # header\n\n s = read(26)\n if not _accept(s) or i16(s, 4) != 1:\n raise SyntaxError(\"not a PSD file\")\n\n psd_bits = i16(s, 22)\n psd_channels = i16(s, 12)\n psd_mode = i16(s, 24)\n\n mode, channels = MODES[(psd_mode, psd_bits)]\n\n if channels > psd_channels:\n raise OSError(\"not enough channels\")\n if mode == \"RGB\" and psd_channels == 4:\n mode = \"RGBA\"\n channels = 4\n\n self.mode = mode\n self._size = i32(s, 18), i32(s, 14)\n\n #\n # color mode data\n\n size = i32(read(4))\n if size:\n data = read(size)\n if mode == \"P\" and size == 768:\n self.palette = ImagePalette.raw(\"RGB;L\", data)\n\n #\n # image resources\n\n self.resources = []\n\n size = i32(read(4))\n if size:\n # load resources\n end = self.fp.tell() + size\n while self.fp.tell() < end:\n read(4) # signature\n id = i16(read(2))\n name = read(i8(read(1)))\n if not (len(name) & 1):\n read(1) # padding\n data = read(i32(read(4)))\n if len(data) & 1:\n read(1) # padding\n self.resources.append((id, name, data))\n if id == 1039: # ICC profile\n self.info[\"icc_profile\"] = data\n\n #\n # layer and mask information\n\n self.layers = []\n\n size = i32(read(4))\n if size:\n end = self.fp.tell() + size\n size = i32(read(4))\n if size:\n _layer_data = io.BytesIO(ImageFile._safe_read(self.fp, size))\n self.layers = _layerinfo(_layer_data, size)\n self.fp.seek(end)\n self.n_frames = len(self.layers)\n self.is_animated = self.n_frames > 1\n\n #\n # image descriptor\n\n self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)\n\n # keep the file open\n self._fp = self.fp\n self.frame = 1\n self._min_frame = 1\n\n def seek(self, layer):\n if not self._seek_check(layer):\n return\n\n # seek to given layer (1..max)\n try:\n name, mode, bbox, tile = self.layers[layer - 1]\n self.mode = mode\n self.tile = tile\n self.frame = layer\n self.fp = self._fp\n return name, bbox\n except IndexError as e:\n raise EOFError(\"no such layer\") from e\n\n def tell(self):\n # return layer number (0=image, 1..max=layers)\n return self.frame\n\n\ndef _layerinfo(fp, ct_bytes):\n # read layerinfo block\n layers = []\n\n def read(size):\n return ImageFile._safe_read(fp, size)\n\n ct = si16(read(2))\n\n # sanity check\n if ct_bytes < (abs(ct) * 20):\n raise SyntaxError(\"Layer block too short for number of layers requested\")\n\n for _ in range(abs(ct)):\n\n # bounding box\n y0 = i32(read(4))\n x0 = i32(read(4))\n y1 = i32(read(4))\n x1 = i32(read(4))\n\n # image info\n mode = []\n ct_types = i16(read(2))\n types = list(range(ct_types))\n if len(types) > 4:\n continue\n\n for _ in types:\n type = i16(read(2))\n\n if type == 65535:\n m = \"A\"\n else:\n m = \"RGBA\"[type]\n\n mode.append(m)\n read(4) # size\n\n # figure out the image mode\n mode.sort()\n if mode == [\"R\"]:\n mode = \"L\"\n elif mode == [\"B\", \"G\", \"R\"]:\n mode = \"RGB\"\n elif mode == [\"A\", \"B\", \"G\", \"R\"]:\n mode = \"RGBA\"\n else:\n mode = None # unknown\n\n # skip over blend flags and extra information\n read(12) # filler\n name = \"\"\n size = i32(read(4)) # length of the extra data field\n if size:\n data_end = fp.tell() + size\n\n length = i32(read(4))\n if length:\n fp.seek(length - 16, io.SEEK_CUR)\n\n length = i32(read(4))\n if length:\n fp.seek(length, io.SEEK_CUR)\n\n length = i8(read(1))\n if length:\n # Don't know the proper encoding,\n # Latin-1 should be a good guess\n name = read(length).decode(\"latin-1\", \"replace\")\n\n fp.seek(data_end)\n layers.append((name, mode, (x0, y0, x1, y1)))\n\n # get tiles\n i = 0\n for name, mode, bbox in layers:\n tile = []\n for m in mode:\n t = _maketile(fp, m, bbox, 1)\n if t:\n tile.extend(t)\n layers[i] = name, mode, bbox, tile\n i += 1\n\n return layers\n\n\ndef _maketile(file, mode, bbox, channels):\n\n tile = None\n read = file.read\n\n compression = i16(read(2))\n\n xsize = bbox[2] - bbox[0]\n ysize = bbox[3] - bbox[1]\n\n offset = file.tell()\n\n if compression == 0:\n #\n # raw compression\n tile = []\n for channel in range(channels):\n layer = mode[channel]\n if mode == \"CMYK\":\n layer += \";I\"\n tile.append((\"raw\", bbox, offset, layer))\n offset = offset + xsize * ysize\n\n elif compression == 1:\n #\n # packbits compression\n i = 0\n tile = []\n bytecount = read(channels * ysize * 2)\n offset = file.tell()\n for channel in range(channels):\n layer = mode[channel]\n if mode == \"CMYK\":\n layer += \";I\"\n tile.append((\"packbits\", bbox, offset, layer))\n for y in range(ysize):\n offset = offset + i16(bytecount, i)\n i += 2\n\n file.seek(offset)\n\n if offset & 1:\n read(1) # padding\n\n return tile\n\n\n# --------------------------------------------------------------------\n# registry\n\n\nImage.register_open(PsdImageFile.format, PsdImageFile, _accept)\n\nImage.register_extension(PsdImageFile.format, \".psd\")\n\nImage.register_mime(PsdImageFile.format, \"image/vnd.adobe.photoshop\")\n", "path": "src/PIL/PsdImagePlugin.py" } ]
diff --git a/Tests/images/rgba.psd b/Tests/images/rgba.psd new file mode 100644 index 00000000000..45fb7c3cca0 Binary files /dev/null and b/Tests/images/rgba.psd differ diff --git a/Tests/test_file_psd.py b/Tests/test_file_psd.py index b4b5b7a0c65..4f934375c7c 100644 --- a/Tests/test_file_psd.py +++ b/Tests/test_file_psd.py @@ -4,7 +4,7 @@ from PIL import Image, PsdImagePlugin -from .helper import assert_image_similar, hopper, is_pypy +from .helper import assert_image_equal_tofile, assert_image_similar, hopper, is_pypy test_file = "Tests/images/hopper.psd" @@ -107,6 +107,11 @@ def test_open_after_exclusive_load(): im.load() +def test_rgba(): + with Image.open("Tests/images/rgba.psd") as im: + assert_image_equal_tofile(im, "Tests/images/imagedraw_square.png") + + def test_icc_profile(): with Image.open(test_file) as im: assert "icc_profile" in im.info diff --git a/src/PIL/PsdImagePlugin.py b/src/PIL/PsdImagePlugin.py index 04c2e4fe379..bd10e3b95dd 100644 --- a/src/PIL/PsdImagePlugin.py +++ b/src/PIL/PsdImagePlugin.py @@ -75,6 +75,9 @@ def _open(self): if channels > psd_channels: raise OSError("not enough channels") + if mode == "RGB" and psd_channels == 4: + mode = "RGBA" + channels = 4 self.mode = mode self._size = i32(s, 18), i32(s, 14)
kivy__python-for-android-2447
tarfile failure with long user ID Note this is a follow-on from the same effort which produced #1012, but I think the bugs are independent. I switched to Python 3 and the CrystaxNDK in an attempt to avoid NDK build errors, and got far enough to run into this. While the dist's build.py is building its tar archive, I get `ValueError("overflow in number field")`: ``` Traceback (most recent call last): File "/usr/local/bin/p4a", line 11, in <module> sys.exit(main()) File "/usr/local/lib/python3.5/site-packages/pythonforandroid/toolchain.py", line 754, in main ToolchainCL() File "/usr/local/lib/python3.5/site-packages/pythonforandroid/toolchain.py", line 343, in __init__ getattr(self, command_method_name)(unknown) File "/usr/local/lib/python3.5/site-packages/pythonforandroid/toolchain.py", line 94, in wrapper_func func(self, args) File "/usr/local/lib/python3.5/site-packages/pythonforandroid/toolchain.py", line 589, in apk build_args = build.parse_args(args) File "/Users/testuser/.python-for-android/dists/unnamed_dist_1/build.py", line 488, in parse_args make_package(args) File "/Users/testuser/.python-for-android/dists/unnamed_dist_1/build.py", line 248, in make_package make_tar('assets/private.mp3', tar_dirs, args.ignore_path) File "/Users/testuser/.python-for-android/dists/unnamed_dist_1/build.py", line 197, in make_tar tf.add(fn, afn) File "/usr/local/Cellar/python3/3.5.2_3/Frameworks/Python.framework/Versions/3.5/lib/python3.5/tarfile.py", line 1938, in add self.addfile(tarinfo, f) File "/usr/local/Cellar/python3/3.5.2_3/Frameworks/Python.framework/Versions/3.5/lib/python3.5/tarfile.py", line 1960, in addfile buf = tarinfo.tobuf(self.format, self.encoding, self.errors) File "/usr/local/Cellar/python3/3.5.2_3/Frameworks/Python.framework/Versions/3.5/lib/python3.5/tarfile.py", line 802, in tobuf return self.create_ustar_header(info, encoding, errors) File "/usr/local/Cellar/python3/3.5.2_3/Frameworks/Python.framework/Versions/3.5/lib/python3.5/tarfile.py", line 821, in create_ustar_header return self._create_header(info, USTAR_FORMAT, encoding, errors) File "/usr/local/Cellar/python3/3.5.2_3/Frameworks/Python.framework/Versions/3.5/lib/python3.5/tarfile.py", line 916, in _create_header itn(info.get("uid", 0), 8, format), File "/usr/local/Cellar/python3/3.5.2_3/Frameworks/Python.framework/Versions/3.5/lib/python3.5/tarfile.py", line 211, in itn raise ValueError("overflow in number field") ValueError: overflow in number field ``` It seems the overflow is in the "uid" field, and I notice that my UID on my Mac (part of a corporate domain) is pretty high at 1730938684. Looking at the package in [`tarfile.py`](https://github.com/python/cpython/blob/master/Lib/tarfile.py#L218) it seems this is a limitation of the USTAR format which the python-4-android builder is selecting specifically. The default GNU format can support much larger numbers, though I presume the unpacking code used in the Android package might not support it? I was able to work around this problem by running as a different local user account, so long as that account owns both the .python-for-android directory and the source input directory. Since UID information isn't relevant when unpacking on Android it would be nice to discard it and make this not matter, if using the GNU tar format isn't an option. It doesn't look like the tarfile package gives the ability to do this directly, but you could chmod all the files to root (in a temporary location) before compressing them.
[ { "content": "#!/usr/bin/env python3\n\nfrom gzip import GzipFile\nimport hashlib\nimport json\nfrom os.path import (\n dirname, join, isfile, realpath,\n relpath, split, exists, basename\n)\nfrom os import environ, listdir, makedirs, remove\nimport os\nimport shlex\nimport shutil\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\nimport time\n\nfrom distutils.version import LooseVersion\nfrom fnmatch import fnmatch\nimport jinja2\n\n\ndef get_dist_info_for(key, error_if_missing=True):\n try:\n with open(join(dirname(__file__), 'dist_info.json'), 'r') as fileh:\n info = json.load(fileh)\n value = info[key]\n except (OSError, KeyError) as e:\n if not error_if_missing:\n return None\n print(\"BUILD FAILURE: Couldn't extract the key `\" + key + \"` \" +\n \"from dist_info.json: \" + str(e))\n sys.exit(1)\n return value\n\n\ndef get_hostpython():\n return get_dist_info_for('hostpython')\n\n\ndef get_python_version():\n return get_dist_info_for('python_version')\n\n\ndef get_bootstrap_name():\n return get_dist_info_for('bootstrap')\n\n\nif os.name == 'nt':\n ANDROID = 'android.bat'\n ANT = 'ant.bat'\nelse:\n ANDROID = 'android'\n ANT = 'ant'\n\ncurdir = dirname(__file__)\n\nPYTHON = get_hostpython()\nPYTHON_VERSION = get_python_version()\nif PYTHON is not None and not exists(PYTHON):\n PYTHON = None\n\nBLACKLIST_PATTERNS = [\n # code versionning\n '^*.hg/*',\n '^*.git/*',\n '^*.bzr/*',\n '^*.svn/*',\n\n # temp files\n '~',\n '*.bak',\n '*.swp',\n]\n# pyc/py\nif PYTHON is not None:\n BLACKLIST_PATTERNS.append('*.py')\n\nWHITELIST_PATTERNS = []\nif get_bootstrap_name() in ('sdl2', 'webview', 'service_only'):\n WHITELIST_PATTERNS.append('pyconfig.h')\n\npython_files = []\n\n\nenvironment = jinja2.Environment(loader=jinja2.FileSystemLoader(\n join(curdir, 'templates')))\n\n\nDEFAULT_PYTHON_ACTIVITY_JAVA_CLASS = 'org.kivy.android.PythonActivity'\nDEFAULT_PYTHON_SERVICE_JAVA_CLASS = 'org.kivy.android.PythonService'\n\n\ndef ensure_dir(path):\n if not exists(path):\n makedirs(path)\n\n\ndef render(template, dest, **kwargs):\n '''Using jinja2, render `template` to the filename `dest`, supplying the\n\n keyword arguments as template parameters.\n '''\n\n dest_dir = dirname(dest)\n if dest_dir and not exists(dest_dir):\n makedirs(dest_dir)\n\n template = environment.get_template(template)\n text = template.render(**kwargs)\n\n f = open(dest, 'wb')\n f.write(text.encode('utf-8'))\n f.close()\n\n\ndef is_whitelist(name):\n return match_filename(WHITELIST_PATTERNS, name)\n\n\ndef is_blacklist(name):\n if is_whitelist(name):\n return False\n return match_filename(BLACKLIST_PATTERNS, name)\n\n\ndef match_filename(pattern_list, name):\n for pattern in pattern_list:\n if pattern.startswith('^'):\n pattern = pattern[1:]\n else:\n pattern = '*/' + pattern\n if fnmatch(name, pattern):\n return True\n\n\ndef listfiles(d):\n basedir = d\n subdirlist = []\n for item in os.listdir(d):\n fn = join(d, item)\n if isfile(fn):\n yield fn\n else:\n subdirlist.append(join(basedir, item))\n for subdir in subdirlist:\n for fn in listfiles(subdir):\n yield fn\n\n\ndef make_tar(tfn, source_dirs, ignore_path=[], optimize_python=True):\n '''\n Make a zip file `fn` from the contents of source_dis.\n '''\n\n # selector function\n def select(fn):\n rfn = realpath(fn)\n for p in ignore_path:\n if p.endswith('/'):\n p = p[:-1]\n if rfn.startswith(p):\n return False\n if rfn in python_files:\n return False\n return not is_blacklist(fn)\n\n def clean(tinfo):\n \"\"\"cleaning function (for reproducible builds)\"\"\"\n tinfo.uid = tinfo.gid = 0\n tinfo.uname = tinfo.gname = ''\n tinfo.mtime = 0\n return tinfo\n\n # get the files and relpath file of all the directory we asked for\n files = []\n for sd in source_dirs:\n sd = realpath(sd)\n compile_dir(sd, optimize_python=optimize_python)\n files += [(x, relpath(realpath(x), sd)) for x in listfiles(sd)\n if select(x)]\n files.sort() # deterministic\n\n # create tar.gz of thoses files\n gf = GzipFile(tfn, 'wb', mtime=0) # deterministic\n tf = tarfile.open(None, 'w', gf, format=tarfile.USTAR_FORMAT)\n dirs = []\n for fn, afn in files:\n dn = dirname(afn)\n if dn not in dirs:\n # create every dirs first if not exist yet\n d = ''\n for component in split(dn):\n d = join(d, component)\n if d.startswith('/'):\n d = d[1:]\n if d == '' or d in dirs:\n continue\n dirs.append(d)\n tinfo = tarfile.TarInfo(d)\n tinfo.type = tarfile.DIRTYPE\n tf.addfile(tinfo)\n\n # put the file\n tf.add(fn, afn, filter=clean)\n tf.close()\n gf.close()\n\n\ndef compile_dir(dfn, optimize_python=True):\n '''\n Compile *.py in directory `dfn` to *.pyo\n '''\n\n if PYTHON is None:\n return\n\n if int(PYTHON_VERSION[0]) >= 3:\n args = [PYTHON, '-m', 'compileall', '-b', '-f', dfn]\n else:\n args = [PYTHON, '-m', 'compileall', '-f', dfn]\n if optimize_python:\n # -OO = strip docstrings\n args.insert(1, '-OO')\n return_code = subprocess.call(args)\n\n if return_code != 0:\n print('Error while running \"{}\"'.format(' '.join(args)))\n print('This probably means one of your Python files has a syntax '\n 'error, see logs above')\n exit(1)\n\n\ndef make_package(args):\n # If no launcher is specified, require a main.py/main.pyo:\n if (get_bootstrap_name() != \"sdl\" or args.launcher is None) and \\\n get_bootstrap_name() not in [\"webview\", \"service_library\"]:\n # (webview doesn't need an entrypoint, apparently)\n if args.private is None or (\n not exists(join(realpath(args.private), 'main.py')) and\n not exists(join(realpath(args.private), 'main.pyo'))):\n print('''BUILD FAILURE: No main.py(o) found in your app directory. This\nfile must exist to act as the entry point for you app. If your app is\nstarted by a file with a different name, rename it to main.py or add a\nmain.py that loads it.''')\n sys.exit(1)\n\n assets_dir = \"src/main/assets\"\n\n # Delete the old assets.\n shutil.rmtree(assets_dir, ignore_errors=True)\n ensure_dir(assets_dir)\n\n # Add extra environment variable file into tar-able directory:\n env_vars_tarpath = tempfile.mkdtemp(prefix=\"p4a-extra-env-\")\n with open(os.path.join(env_vars_tarpath, \"p4a_env_vars.txt\"), \"w\") as f:\n if hasattr(args, \"window\"):\n f.write(\"P4A_IS_WINDOWED=\" + str(args.window) + \"\\n\")\n if hasattr(args, \"orientation\"):\n f.write(\"P4A_ORIENTATION=\" + str(args.orientation) + \"\\n\")\n f.write(\"P4A_NUMERIC_VERSION=\" + str(args.numeric_version) + \"\\n\")\n f.write(\"P4A_MINSDK=\" + str(args.min_sdk_version) + \"\\n\")\n\n # Package up the private data (public not supported).\n use_setup_py = get_dist_info_for(\"use_setup_py\",\n error_if_missing=False) is True\n tar_dirs = [env_vars_tarpath]\n _temp_dirs_to_clean = []\n try:\n if args.private:\n if not use_setup_py or (\n not exists(join(args.private, \"setup.py\")) and\n not exists(join(args.private, \"pyproject.toml\"))\n ):\n print('No setup.py/pyproject.toml used, copying '\n 'full private data into .apk.')\n tar_dirs.append(args.private)\n else:\n print(\"Copying main.py's ONLY, since other app data is \"\n \"expected in site-packages.\")\n main_py_only_dir = tempfile.mkdtemp()\n _temp_dirs_to_clean.append(main_py_only_dir)\n\n # Check all main.py files we need to copy:\n copy_paths = [\"main.py\", join(\"service\", \"main.py\")]\n for copy_path in copy_paths:\n variants = [\n copy_path,\n copy_path.partition(\".\")[0] + \".pyc\",\n copy_path.partition(\".\")[0] + \".pyo\",\n ]\n # Check in all variants with all possible endings:\n for variant in variants:\n if exists(join(args.private, variant)):\n # Make sure surrounding directly exists:\n dir_path = os.path.dirname(variant)\n if (len(dir_path) > 0 and\n not exists(\n join(main_py_only_dir, dir_path)\n )):\n os.mkdir(join(main_py_only_dir, dir_path))\n # Copy actual file:\n shutil.copyfile(\n join(args.private, variant),\n join(main_py_only_dir, variant),\n )\n\n # Append directory with all main.py's to result apk paths:\n tar_dirs.append(main_py_only_dir)\n for python_bundle_dir in ('private', '_python_bundle'):\n if exists(python_bundle_dir):\n tar_dirs.append(python_bundle_dir)\n if get_bootstrap_name() == \"webview\":\n tar_dirs.append('webview_includes')\n\n for asset in args.assets:\n asset_src, asset_dest = asset.split(\":\")\n if isfile(realpath(asset_src)):\n ensure_dir(dirname(join(assets_dir, asset_dest)))\n shutil.copy(realpath(asset_src), join(assets_dir, asset_dest))\n else:\n shutil.copytree(realpath(asset_src), join(assets_dir, asset_dest))\n\n if args.private or args.launcher:\n make_tar(\n join(assets_dir, 'private.mp3'), tar_dirs, args.ignore_path,\n optimize_python=args.optimize_python)\n finally:\n for directory in _temp_dirs_to_clean:\n shutil.rmtree(directory)\n\n # Remove extra env vars tar-able directory:\n shutil.rmtree(env_vars_tarpath)\n\n # Prepare some variables for templating process\n res_dir = \"src/main/res\"\n default_icon = 'templates/kivy-icon.png'\n default_presplash = 'templates/kivy-presplash.jpg'\n shutil.copy(\n args.icon or default_icon,\n join(res_dir, 'drawable/icon.png')\n )\n\n if args.enable_androidx:\n shutil.copy('templates/gradle.properties', 'gradle.properties')\n\n if get_bootstrap_name() != \"service_only\":\n lottie_splashscreen = join(res_dir, 'raw/splashscreen.json')\n if args.presplash_lottie:\n shutil.copy(\n 'templates/lottie.xml',\n join(res_dir, 'layout/lottie.xml')\n )\n ensure_dir(join(res_dir, 'raw'))\n shutil.copy(\n args.presplash_lottie,\n join(res_dir, 'raw/splashscreen.json')\n )\n else:\n if exists(lottie_splashscreen):\n remove(lottie_splashscreen)\n remove(join(res_dir, 'layout/lottie.xml'))\n\n shutil.copy(\n args.presplash or default_presplash,\n join(res_dir, 'drawable/presplash.jpg')\n )\n\n # If extra Java jars were requested, copy them into the libs directory\n jars = []\n if args.add_jar:\n for jarname in args.add_jar:\n if not exists(jarname):\n print('Requested jar does not exist: {}'.format(jarname))\n sys.exit(-1)\n shutil.copy(jarname, 'src/main/libs')\n jars.append(basename(jarname))\n\n # If extra aar were requested, copy them into the libs directory\n aars = []\n if args.add_aar:\n ensure_dir(\"libs\")\n for aarname in args.add_aar:\n if not exists(aarname):\n print('Requested aar does not exists: {}'.format(aarname))\n sys.exit(-1)\n shutil.copy(aarname, 'libs')\n aars.append(basename(aarname).rsplit('.', 1)[0])\n\n versioned_name = (args.name.replace(' ', '').replace('\\'', '') +\n '-' + args.version)\n\n version_code = 0\n if not args.numeric_version:\n # Set version code in format (arch-minsdk-app_version)\n arch = get_dist_info_for(\"archs\")[0]\n arch_dict = {\"x86_64\": \"9\", \"arm64-v8a\": \"8\", \"armeabi-v7a\": \"7\", \"x86\": \"6\"}\n arch_code = arch_dict.get(arch, '1')\n min_sdk = args.min_sdk_version\n for i in args.version.split('.'):\n version_code *= 100\n version_code += int(i)\n args.numeric_version = \"{}{}{}\".format(arch_code, min_sdk, version_code)\n\n if args.intent_filters:\n with open(args.intent_filters) as fd:\n args.intent_filters = fd.read()\n\n if not args.add_activity:\n args.add_activity = []\n\n if not args.activity_launch_mode:\n args.activity_launch_mode = ''\n\n if args.extra_source_dirs:\n esd = []\n for spec in args.extra_source_dirs:\n if ':' in spec:\n specdir, specincludes = spec.split(':')\n print('WARNING: Currently gradle builds only support including source '\n 'directories, so when building using gradle all files in '\n '{} will be included.'.format(specdir))\n else:\n specdir = spec\n specincludes = '**'\n esd.append((realpath(specdir), specincludes))\n args.extra_source_dirs = esd\n else:\n args.extra_source_dirs = []\n\n service = False\n if args.private:\n service_main = join(realpath(args.private), 'service', 'main.py')\n if exists(service_main) or exists(service_main + 'o'):\n service = True\n\n service_names = []\n base_service_class = args.service_class_name.split('.')[-1]\n for sid, spec in enumerate(args.services):\n spec = spec.split(':')\n name = spec[0]\n entrypoint = spec[1]\n options = spec[2:]\n\n foreground = 'foreground' in options\n sticky = 'sticky' in options\n\n service_names.append(name)\n service_target_path =\\\n 'src/main/java/{}/Service{}.java'.format(\n args.package.replace(\".\", \"/\"),\n name.capitalize()\n )\n render(\n 'Service.tmpl.java',\n service_target_path,\n name=name,\n entrypoint=entrypoint,\n args=args,\n foreground=foreground,\n sticky=sticky,\n service_id=sid + 1,\n base_service_class=base_service_class,\n )\n\n # Find the SDK directory and target API\n with open('project.properties', 'r') as fileh:\n target = fileh.read().strip()\n android_api = target.split('-')[1]\n try:\n int(android_api)\n except (ValueError, TypeError):\n raise ValueError(\n \"failed to extract the Android API level from \" +\n \"build.properties. expected int, got: '\" +\n str(android_api) + \"'\"\n )\n with open('local.properties', 'r') as fileh:\n sdk_dir = fileh.read().strip()\n sdk_dir = sdk_dir[8:]\n\n # Try to build with the newest available build tools\n ignored = {\".DS_Store\", \".ds_store\"}\n build_tools_versions = [x for x in listdir(join(sdk_dir, 'build-tools')) if x not in ignored]\n build_tools_versions = sorted(build_tools_versions,\n key=LooseVersion)\n build_tools_version = build_tools_versions[-1]\n\n # Folder name for launcher (used by SDL2 bootstrap)\n url_scheme = 'kivy'\n\n # Copy backup rules file if specified and update the argument\n if args.backup_rules:\n res_xml_dir = join(res_dir, 'xml')\n ensure_dir(res_xml_dir)\n shutil.copy(join(args.private, args.backup_rules), res_xml_dir)\n args.backup_rules = split(args.backup_rules)[1][:-4]\n\n # Render out android manifest:\n manifest_path = \"src/main/AndroidManifest.xml\"\n render_args = {\n \"args\": args,\n \"service\": service,\n \"service_names\": service_names,\n \"android_api\": android_api,\n \"debug\": \"debug\" in args.build_mode,\n \"native_services\": args.native_services\n }\n if get_bootstrap_name() == \"sdl2\":\n render_args[\"url_scheme\"] = url_scheme\n render(\n 'AndroidManifest.tmpl.xml',\n manifest_path,\n **render_args)\n\n # Copy the AndroidManifest.xml to the dist root dir so that ant\n # can also use it\n if exists('AndroidManifest.xml'):\n remove('AndroidManifest.xml')\n shutil.copy(manifest_path, 'AndroidManifest.xml')\n\n # gradle build templates\n render(\n 'build.tmpl.gradle',\n 'build.gradle',\n args=args,\n aars=aars,\n jars=jars,\n android_api=android_api,\n build_tools_version=build_tools_version,\n debug_build=\"debug\" in args.build_mode,\n is_library=(get_bootstrap_name() == 'service_library'),\n )\n\n # ant build templates\n render(\n 'build.tmpl.xml',\n 'build.xml',\n args=args,\n versioned_name=versioned_name)\n\n # String resources:\n timestamp = time.time()\n if 'SOURCE_DATE_EPOCH' in environ:\n # for reproducible builds\n timestamp = int(environ['SOURCE_DATE_EPOCH'])\n private_version = \"{} {} {}\".format(\n args.version,\n args.numeric_version,\n timestamp\n )\n render_args = {\n \"args\": args,\n \"private_version\": hashlib.sha1(private_version.encode()).hexdigest()\n }\n if get_bootstrap_name() == \"sdl2\":\n render_args[\"url_scheme\"] = url_scheme\n render(\n 'strings.tmpl.xml',\n join(res_dir, 'values/strings.xml'),\n **render_args)\n\n if exists(join(\"templates\", \"custom_rules.tmpl.xml\")):\n render(\n 'custom_rules.tmpl.xml',\n 'custom_rules.xml',\n args=args)\n\n if get_bootstrap_name() == \"webview\":\n render('WebViewLoader.tmpl.java',\n 'src/main/java/org/kivy/android/WebViewLoader.java',\n args=args)\n\n if args.sign:\n render('build.properties', 'build.properties')\n else:\n if exists('build.properties'):\n os.remove('build.properties')\n\n # Apply java source patches if any are present:\n if exists(join('src', 'patches')):\n print(\"Applying Java source code patches...\")\n for patch_name in os.listdir(join('src', 'patches')):\n patch_path = join('src', 'patches', patch_name)\n print(\"Applying patch: \" + str(patch_path))\n\n # -N: insist this is FORWARD patch, don't reverse apply\n # -p1: strip first path component\n # -t: batch mode, don't ask questions\n patch_command = [\"patch\", \"-N\", \"-p1\", \"-t\", \"-i\", patch_path]\n\n try:\n # Use a dry run to establish whether the patch is already applied.\n # If we don't check this, the patch may be partially applied (which is bad!)\n subprocess.check_output(patch_command + [\"--dry-run\"])\n except subprocess.CalledProcessError as e:\n if e.returncode == 1:\n # Return code 1 means not all hunks could be applied, this usually\n # means the patch is already applied.\n print(\"Warning: failed to apply patch (exit code 1), \"\n \"assuming it is already applied: \",\n str(patch_path))\n else:\n raise e\n else:\n # The dry run worked, so do the real thing\n subprocess.check_output(patch_command)\n\n\ndef parse_args_and_make_package(args=None):\n global BLACKLIST_PATTERNS, WHITELIST_PATTERNS, PYTHON\n\n # Get the default minsdk, equal to the NDK API that this dist is built against\n try:\n with open('dist_info.json', 'r') as fileh:\n info = json.load(fileh)\n default_min_api = int(info['ndk_api'])\n ndk_api = default_min_api\n except (OSError, KeyError, ValueError, TypeError):\n print('WARNING: Failed to read ndk_api from dist info, defaulting to 12')\n default_min_api = 12 # The old default before ndk_api was introduced\n ndk_api = 12\n\n import argparse\n ap = argparse.ArgumentParser(description='''\\\nPackage a Python application for Android (using\nbootstrap ''' + get_bootstrap_name() + ''').\n\nFor this to work, Java and Ant need to be in your path, as does the\ntools directory of the Android SDK.\n''')\n\n # --private is required unless for sdl2, where there's also --launcher\n ap.add_argument('--private', dest='private',\n help='the directory with the app source code files' +\n ' (containing your main.py entrypoint)',\n required=(get_bootstrap_name() != \"sdl2\"))\n ap.add_argument('--package', dest='package',\n help=('The name of the java package the project will be'\n ' packaged under.'),\n required=True)\n ap.add_argument('--name', dest='name',\n help=('The human-readable name of the project.'),\n required=True)\n ap.add_argument('--numeric-version', dest='numeric_version',\n help=('The numeric version number of the project. If not '\n 'given, this is automatically computed from the '\n 'version.'))\n ap.add_argument('--version', dest='version',\n help=('The version number of the project. This should '\n 'consist of numbers and dots, and should have the '\n 'same number of groups of numbers as previous '\n 'versions.'),\n required=True)\n if get_bootstrap_name() == \"sdl2\":\n ap.add_argument('--launcher', dest='launcher', action='store_true',\n help=('Provide this argument to build a multi-app '\n 'launcher, rather than a single app.'))\n ap.add_argument('--permission', dest='permissions', action='append', default=[],\n help='The permissions to give this app.', nargs='+')\n ap.add_argument('--meta-data', dest='meta_data', action='append', default=[],\n help='Custom key=value to add in application metadata')\n ap.add_argument('--uses-library', dest='android_used_libs', action='append', default=[],\n help='Used shared libraries included using <uses-library> tag in AndroidManifest.xml')\n ap.add_argument('--asset', dest='assets',\n action=\"append\", default=[],\n metavar=\"/path/to/source:dest\",\n help='Put this in the assets folder at assets/dest')\n ap.add_argument('--icon', dest='icon',\n help=('A png file to use as the icon for '\n 'the application.'))\n ap.add_argument('--service', dest='services', action='append', default=[],\n help='Declare a new service entrypoint: '\n 'NAME:PATH_TO_PY[:foreground]')\n ap.add_argument('--native-service', dest='native_services', action='append', default=[],\n help='Declare a new native service: '\n 'package.name.service')\n if get_bootstrap_name() != \"service_only\":\n ap.add_argument('--presplash', dest='presplash',\n help=('A jpeg file to use as a screen while the '\n 'application is loading.'))\n ap.add_argument('--presplash-lottie', dest='presplash_lottie',\n help=('A lottie (json) file to use as an animation while the '\n 'application is loading.'))\n ap.add_argument('--presplash-color',\n dest='presplash_color',\n default='#000000',\n help=('A string to set the loading screen '\n 'background color. '\n 'Supported formats are: '\n '#RRGGBB #AARRGGBB or color names '\n 'like red, green, blue, etc.'))\n ap.add_argument('--window', dest='window', action='store_true',\n default=False,\n help='Indicate if the application will be windowed')\n ap.add_argument('--orientation', dest='orientation',\n default='portrait',\n help=('The orientation that the game will '\n 'display in. '\n 'Usually one of \"landscape\", \"portrait\", '\n '\"sensor\", or \"user\" (the same as \"sensor\" '\n 'but obeying the '\n 'user\\'s Android rotation setting). '\n 'The full list of options is given under '\n 'android_screenOrientation at '\n 'https://developer.android.com/guide/'\n 'topics/manifest/'\n 'activity-element.html'))\n\n ap.add_argument('--enable-androidx', dest='enable_androidx',\n action='store_true',\n help=('Enable the AndroidX support library, '\n 'requires api = 28 or greater'))\n ap.add_argument('--android-entrypoint', dest='android_entrypoint',\n default=DEFAULT_PYTHON_ACTIVITY_JAVA_CLASS,\n help='Defines which java class will be used for startup, usually a subclass of PythonActivity')\n ap.add_argument('--android-apptheme', dest='android_apptheme',\n default='@android:style/Theme.NoTitleBar',\n help='Defines which app theme should be selected for the main activity')\n ap.add_argument('--add-compile-option', dest='compile_options', default=[],\n action='append', help='add compile options to gradle.build')\n ap.add_argument('--add-gradle-repository', dest='gradle_repositories',\n default=[],\n action='append',\n help='Ddd a repository for gradle')\n ap.add_argument('--add-packaging-option', dest='packaging_options',\n default=[],\n action='append',\n help='Dndroid packaging options')\n\n ap.add_argument('--wakelock', dest='wakelock', action='store_true',\n help=('Indicate if the application needs the device '\n 'to stay on'))\n ap.add_argument('--blacklist', dest='blacklist',\n default=join(curdir, 'blacklist.txt'),\n help=('Use a blacklist file to match unwanted file in '\n 'the final APK'))\n ap.add_argument('--whitelist', dest='whitelist',\n default=join(curdir, 'whitelist.txt'),\n help=('Use a whitelist file to prevent blacklisting of '\n 'file in the final APK'))\n ap.add_argument('--release', dest='build_mode', action='store_const',\n const='release', default='debug',\n help='Build your app as a non-debug release build. '\n '(Disables gdb debugging among other things)')\n ap.add_argument('--with-debug-symbols', dest='with_debug_symbols',\n action='store_const', const=True, default=False,\n help='Will keep debug symbols from `.so` files.')\n ap.add_argument('--add-jar', dest='add_jar', action='append',\n help=('Add a Java .jar to the libs, so you can access its '\n 'classes with pyjnius. You can specify this '\n 'argument more than once to include multiple jars'))\n ap.add_argument('--add-aar', dest='add_aar', action='append',\n help=('Add an aar dependency manually'))\n ap.add_argument('--depend', dest='depends', action='append',\n help=('Add a external dependency '\n '(eg: com.android.support:appcompat-v7:19.0.1)'))\n # The --sdk option has been removed, it is ignored in favour of\n # --android-api handled by toolchain.py\n ap.add_argument('--sdk', dest='sdk_version', default=-1,\n type=int, help=('Deprecated argument, does nothing'))\n ap.add_argument('--minsdk', dest='min_sdk_version',\n default=default_min_api, type=int,\n help=('Minimum Android SDK version that the app supports. '\n 'Defaults to {}.'.format(default_min_api)))\n ap.add_argument('--allow-minsdk-ndkapi-mismatch', default=False,\n action='store_true',\n help=('Allow the --minsdk argument to be different from '\n 'the discovered ndk_api in the dist'))\n ap.add_argument('--intent-filters', dest='intent_filters',\n help=('Add intent-filters xml rules to the '\n 'AndroidManifest.xml file. The argument is a '\n 'filename containing xml. The filename should be '\n 'located relative to the python-for-android '\n 'directory'))\n ap.add_argument('--with-billing', dest='billing_pubkey',\n help='If set, the billing service will be added (not implemented)')\n ap.add_argument('--add-source', dest='extra_source_dirs', action='append',\n help='Include additional source dirs in Java build')\n if get_bootstrap_name() == \"webview\":\n ap.add_argument('--port',\n help='The port on localhost that the WebView will access',\n default='5000')\n ap.add_argument('--try-system-python-compile', dest='try_system_python_compile',\n action='store_true',\n help='Use the system python during compileall if possible.')\n ap.add_argument('--no-compile-pyo', dest='no_compile_pyo', action='store_true',\n help='Do not optimise .py files to .pyo.')\n ap.add_argument('--sign', action='store_true',\n help=('Try to sign the APK with your credentials. You must set '\n 'the appropriate environment variables.'))\n ap.add_argument('--add-activity', dest='add_activity', action='append',\n help='Add this Java class as an Activity to the manifest.')\n ap.add_argument('--activity-launch-mode',\n dest='activity_launch_mode',\n default='singleTask',\n help='Set the launch mode of the main activity in the manifest.')\n ap.add_argument('--allow-backup', dest='allow_backup', default='true',\n help=\"if set to 'false', then android won't backup the application.\")\n ap.add_argument('--backup-rules', dest='backup_rules', default='',\n help=('Backup rules for Android Auto Backup. Argument is a '\n 'filename containing xml. The filename should be '\n 'located relative to the private directory containing your source code '\n 'files (containing your main.py entrypoint). '\n 'See https://developer.android.com/guide/topics/data/'\n 'autobackup#IncludingFiles for more information'))\n ap.add_argument('--no-optimize-python', dest='optimize_python',\n action='store_false', default=True,\n help=('Whether to compile to optimised .pyo files, using -OO '\n '(strips docstrings and asserts)'))\n ap.add_argument('--extra-manifest-xml', default='',\n help=('Extra xml to write directly inside the <manifest> element of'\n 'AndroidManifest.xml'))\n ap.add_argument('--extra-manifest-application-arguments', default='',\n help='Extra arguments to be added to the <manifest><application> tag of'\n 'AndroidManifest.xml')\n ap.add_argument('--manifest-placeholders', dest='manifest_placeholders',\n default='[:]', help=('Inject build variables into the manifest '\n 'via the manifestPlaceholders property'))\n ap.add_argument('--service-class-name', dest='service_class_name', default=DEFAULT_PYTHON_SERVICE_JAVA_CLASS,\n help='Use that parameter if you need to implement your own PythonServive Java class')\n ap.add_argument('--activity-class-name', dest='activity_class_name', default=DEFAULT_PYTHON_ACTIVITY_JAVA_CLASS,\n help='The full java class name of the main activity')\n\n # Put together arguments, and add those from .p4a config file:\n if args is None:\n args = sys.argv[1:]\n\n def _read_configuration():\n if not exists(\".p4a\"):\n return\n print(\"Reading .p4a configuration\")\n with open(\".p4a\") as fd:\n lines = fd.readlines()\n lines = [shlex.split(line)\n for line in lines if not line.startswith(\"#\")]\n for line in lines:\n for arg in line:\n args.append(arg)\n _read_configuration()\n\n args = ap.parse_args(args)\n\n args.ignore_path = []\n\n if args.name and args.name[0] == '\"' and args.name[-1] == '\"':\n args.name = args.name[1:-1]\n\n if ndk_api != args.min_sdk_version:\n print(('WARNING: --minsdk argument does not match the api that is '\n 'compiled against. Only proceed if you know what you are '\n 'doing, otherwise use --minsdk={} or recompile against api '\n '{}').format(ndk_api, args.min_sdk_version))\n if not args.allow_minsdk_ndkapi_mismatch:\n print('You must pass --allow-minsdk-ndkapi-mismatch to build '\n 'with --minsdk different to the target NDK api from the '\n 'build step')\n sys.exit(1)\n else:\n print('Proceeding with --minsdk not matching build target api')\n\n if args.billing_pubkey:\n print('Billing not yet supported!')\n sys.exit(1)\n\n if args.sdk_version == -1:\n print('WARNING: Received a --sdk argument, but this argument is '\n 'deprecated and does nothing.')\n args.sdk_version = -1 # ensure it is not used\n\n if args.permissions and isinstance(args.permissions[0], list):\n args.permissions = [p for perm in args.permissions for p in perm]\n\n if args.try_system_python_compile:\n # Hardcoding python2.7 is okay for now, as python3 skips the\n # compilation anyway\n python_executable = 'python2.7'\n try:\n subprocess.call([python_executable, '--version'])\n except (OSError, subprocess.CalledProcessError):\n pass\n else:\n PYTHON = python_executable\n\n if args.no_compile_pyo:\n PYTHON = None\n BLACKLIST_PATTERNS.remove('*.py')\n\n if args.blacklist:\n with open(args.blacklist) as fd:\n patterns = [x.strip() for x in fd.read().splitlines()\n if x.strip() and not x.strip().startswith('#')]\n BLACKLIST_PATTERNS += patterns\n\n if args.whitelist:\n with open(args.whitelist) as fd:\n patterns = [x.strip() for x in fd.read().splitlines()\n if x.strip() and not x.strip().startswith('#')]\n WHITELIST_PATTERNS += patterns\n\n if args.private is None and \\\n get_bootstrap_name() == 'sdl2' and args.launcher is None:\n print('Need --private directory or ' +\n '--launcher (SDL2 bootstrap only)' +\n 'to have something to launch inside the .apk!')\n sys.exit(1)\n make_package(args)\n\n return args\n\n\nif __name__ == \"__main__\":\n parse_args_and_make_package()\n", "path": "pythonforandroid/bootstraps/common/build/build.py" } ]
[ { "content": "#!/usr/bin/env python3\n\nfrom gzip import GzipFile\nimport hashlib\nimport json\nfrom os.path import (\n dirname, join, isfile, realpath,\n relpath, split, exists, basename\n)\nfrom os import environ, listdir, makedirs, remove\nimport os\nimport shlex\nimport shutil\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\nimport time\n\nfrom distutils.version import LooseVersion\nfrom fnmatch import fnmatch\nimport jinja2\n\n\ndef get_dist_info_for(key, error_if_missing=True):\n try:\n with open(join(dirname(__file__), 'dist_info.json'), 'r') as fileh:\n info = json.load(fileh)\n value = info[key]\n except (OSError, KeyError) as e:\n if not error_if_missing:\n return None\n print(\"BUILD FAILURE: Couldn't extract the key `\" + key + \"` \" +\n \"from dist_info.json: \" + str(e))\n sys.exit(1)\n return value\n\n\ndef get_hostpython():\n return get_dist_info_for('hostpython')\n\n\ndef get_python_version():\n return get_dist_info_for('python_version')\n\n\ndef get_bootstrap_name():\n return get_dist_info_for('bootstrap')\n\n\nif os.name == 'nt':\n ANDROID = 'android.bat'\n ANT = 'ant.bat'\nelse:\n ANDROID = 'android'\n ANT = 'ant'\n\ncurdir = dirname(__file__)\n\nPYTHON = get_hostpython()\nPYTHON_VERSION = get_python_version()\nif PYTHON is not None and not exists(PYTHON):\n PYTHON = None\n\nBLACKLIST_PATTERNS = [\n # code versionning\n '^*.hg/*',\n '^*.git/*',\n '^*.bzr/*',\n '^*.svn/*',\n\n # temp files\n '~',\n '*.bak',\n '*.swp',\n]\n# pyc/py\nif PYTHON is not None:\n BLACKLIST_PATTERNS.append('*.py')\n\nWHITELIST_PATTERNS = []\nif get_bootstrap_name() in ('sdl2', 'webview', 'service_only'):\n WHITELIST_PATTERNS.append('pyconfig.h')\n\npython_files = []\n\n\nenvironment = jinja2.Environment(loader=jinja2.FileSystemLoader(\n join(curdir, 'templates')))\n\n\nDEFAULT_PYTHON_ACTIVITY_JAVA_CLASS = 'org.kivy.android.PythonActivity'\nDEFAULT_PYTHON_SERVICE_JAVA_CLASS = 'org.kivy.android.PythonService'\n\n\ndef ensure_dir(path):\n if not exists(path):\n makedirs(path)\n\n\ndef render(template, dest, **kwargs):\n '''Using jinja2, render `template` to the filename `dest`, supplying the\n\n keyword arguments as template parameters.\n '''\n\n dest_dir = dirname(dest)\n if dest_dir and not exists(dest_dir):\n makedirs(dest_dir)\n\n template = environment.get_template(template)\n text = template.render(**kwargs)\n\n f = open(dest, 'wb')\n f.write(text.encode('utf-8'))\n f.close()\n\n\ndef is_whitelist(name):\n return match_filename(WHITELIST_PATTERNS, name)\n\n\ndef is_blacklist(name):\n if is_whitelist(name):\n return False\n return match_filename(BLACKLIST_PATTERNS, name)\n\n\ndef match_filename(pattern_list, name):\n for pattern in pattern_list:\n if pattern.startswith('^'):\n pattern = pattern[1:]\n else:\n pattern = '*/' + pattern\n if fnmatch(name, pattern):\n return True\n\n\ndef listfiles(d):\n basedir = d\n subdirlist = []\n for item in os.listdir(d):\n fn = join(d, item)\n if isfile(fn):\n yield fn\n else:\n subdirlist.append(join(basedir, item))\n for subdir in subdirlist:\n for fn in listfiles(subdir):\n yield fn\n\n\ndef make_tar(tfn, source_dirs, ignore_path=[], optimize_python=True):\n '''\n Make a zip file `fn` from the contents of source_dis.\n '''\n\n # selector function\n def select(fn):\n rfn = realpath(fn)\n for p in ignore_path:\n if p.endswith('/'):\n p = p[:-1]\n if rfn.startswith(p):\n return False\n if rfn in python_files:\n return False\n return not is_blacklist(fn)\n\n def clean(tinfo):\n \"\"\"cleaning function (for reproducible builds)\"\"\"\n tinfo.uid = tinfo.gid = 0\n tinfo.uname = tinfo.gname = ''\n tinfo.mtime = 0\n return tinfo\n\n # get the files and relpath file of all the directory we asked for\n files = []\n for sd in source_dirs:\n sd = realpath(sd)\n compile_dir(sd, optimize_python=optimize_python)\n files += [(x, relpath(realpath(x), sd)) for x in listfiles(sd)\n if select(x)]\n files.sort() # deterministic\n\n # create tar.gz of thoses files\n gf = GzipFile(tfn, 'wb', mtime=0) # deterministic\n tf = tarfile.open(None, 'w', gf, format=tarfile.USTAR_FORMAT)\n dirs = []\n for fn, afn in files:\n dn = dirname(afn)\n if dn not in dirs:\n # create every dirs first if not exist yet\n d = ''\n for component in split(dn):\n d = join(d, component)\n if d.startswith('/'):\n d = d[1:]\n if d == '' or d in dirs:\n continue\n dirs.append(d)\n tinfo = tarfile.TarInfo(d)\n tinfo.type = tarfile.DIRTYPE\n clean(tinfo)\n tf.addfile(tinfo)\n\n # put the file\n tf.add(fn, afn, filter=clean)\n tf.close()\n gf.close()\n\n\ndef compile_dir(dfn, optimize_python=True):\n '''\n Compile *.py in directory `dfn` to *.pyo\n '''\n\n if PYTHON is None:\n return\n\n if int(PYTHON_VERSION[0]) >= 3:\n args = [PYTHON, '-m', 'compileall', '-b', '-f', dfn]\n else:\n args = [PYTHON, '-m', 'compileall', '-f', dfn]\n if optimize_python:\n # -OO = strip docstrings\n args.insert(1, '-OO')\n return_code = subprocess.call(args)\n\n if return_code != 0:\n print('Error while running \"{}\"'.format(' '.join(args)))\n print('This probably means one of your Python files has a syntax '\n 'error, see logs above')\n exit(1)\n\n\ndef make_package(args):\n # If no launcher is specified, require a main.py/main.pyo:\n if (get_bootstrap_name() != \"sdl\" or args.launcher is None) and \\\n get_bootstrap_name() not in [\"webview\", \"service_library\"]:\n # (webview doesn't need an entrypoint, apparently)\n if args.private is None or (\n not exists(join(realpath(args.private), 'main.py')) and\n not exists(join(realpath(args.private), 'main.pyo'))):\n print('''BUILD FAILURE: No main.py(o) found in your app directory. This\nfile must exist to act as the entry point for you app. If your app is\nstarted by a file with a different name, rename it to main.py or add a\nmain.py that loads it.''')\n sys.exit(1)\n\n assets_dir = \"src/main/assets\"\n\n # Delete the old assets.\n shutil.rmtree(assets_dir, ignore_errors=True)\n ensure_dir(assets_dir)\n\n # Add extra environment variable file into tar-able directory:\n env_vars_tarpath = tempfile.mkdtemp(prefix=\"p4a-extra-env-\")\n with open(os.path.join(env_vars_tarpath, \"p4a_env_vars.txt\"), \"w\") as f:\n if hasattr(args, \"window\"):\n f.write(\"P4A_IS_WINDOWED=\" + str(args.window) + \"\\n\")\n if hasattr(args, \"orientation\"):\n f.write(\"P4A_ORIENTATION=\" + str(args.orientation) + \"\\n\")\n f.write(\"P4A_NUMERIC_VERSION=\" + str(args.numeric_version) + \"\\n\")\n f.write(\"P4A_MINSDK=\" + str(args.min_sdk_version) + \"\\n\")\n\n # Package up the private data (public not supported).\n use_setup_py = get_dist_info_for(\"use_setup_py\",\n error_if_missing=False) is True\n tar_dirs = [env_vars_tarpath]\n _temp_dirs_to_clean = []\n try:\n if args.private:\n if not use_setup_py or (\n not exists(join(args.private, \"setup.py\")) and\n not exists(join(args.private, \"pyproject.toml\"))\n ):\n print('No setup.py/pyproject.toml used, copying '\n 'full private data into .apk.')\n tar_dirs.append(args.private)\n else:\n print(\"Copying main.py's ONLY, since other app data is \"\n \"expected in site-packages.\")\n main_py_only_dir = tempfile.mkdtemp()\n _temp_dirs_to_clean.append(main_py_only_dir)\n\n # Check all main.py files we need to copy:\n copy_paths = [\"main.py\", join(\"service\", \"main.py\")]\n for copy_path in copy_paths:\n variants = [\n copy_path,\n copy_path.partition(\".\")[0] + \".pyc\",\n copy_path.partition(\".\")[0] + \".pyo\",\n ]\n # Check in all variants with all possible endings:\n for variant in variants:\n if exists(join(args.private, variant)):\n # Make sure surrounding directly exists:\n dir_path = os.path.dirname(variant)\n if (len(dir_path) > 0 and\n not exists(\n join(main_py_only_dir, dir_path)\n )):\n os.mkdir(join(main_py_only_dir, dir_path))\n # Copy actual file:\n shutil.copyfile(\n join(args.private, variant),\n join(main_py_only_dir, variant),\n )\n\n # Append directory with all main.py's to result apk paths:\n tar_dirs.append(main_py_only_dir)\n for python_bundle_dir in ('private', '_python_bundle'):\n if exists(python_bundle_dir):\n tar_dirs.append(python_bundle_dir)\n if get_bootstrap_name() == \"webview\":\n tar_dirs.append('webview_includes')\n\n for asset in args.assets:\n asset_src, asset_dest = asset.split(\":\")\n if isfile(realpath(asset_src)):\n ensure_dir(dirname(join(assets_dir, asset_dest)))\n shutil.copy(realpath(asset_src), join(assets_dir, asset_dest))\n else:\n shutil.copytree(realpath(asset_src), join(assets_dir, asset_dest))\n\n if args.private or args.launcher:\n make_tar(\n join(assets_dir, 'private.mp3'), tar_dirs, args.ignore_path,\n optimize_python=args.optimize_python)\n finally:\n for directory in _temp_dirs_to_clean:\n shutil.rmtree(directory)\n\n # Remove extra env vars tar-able directory:\n shutil.rmtree(env_vars_tarpath)\n\n # Prepare some variables for templating process\n res_dir = \"src/main/res\"\n default_icon = 'templates/kivy-icon.png'\n default_presplash = 'templates/kivy-presplash.jpg'\n shutil.copy(\n args.icon or default_icon,\n join(res_dir, 'drawable/icon.png')\n )\n\n if args.enable_androidx:\n shutil.copy('templates/gradle.properties', 'gradle.properties')\n\n if get_bootstrap_name() != \"service_only\":\n lottie_splashscreen = join(res_dir, 'raw/splashscreen.json')\n if args.presplash_lottie:\n shutil.copy(\n 'templates/lottie.xml',\n join(res_dir, 'layout/lottie.xml')\n )\n ensure_dir(join(res_dir, 'raw'))\n shutil.copy(\n args.presplash_lottie,\n join(res_dir, 'raw/splashscreen.json')\n )\n else:\n if exists(lottie_splashscreen):\n remove(lottie_splashscreen)\n remove(join(res_dir, 'layout/lottie.xml'))\n\n shutil.copy(\n args.presplash or default_presplash,\n join(res_dir, 'drawable/presplash.jpg')\n )\n\n # If extra Java jars were requested, copy them into the libs directory\n jars = []\n if args.add_jar:\n for jarname in args.add_jar:\n if not exists(jarname):\n print('Requested jar does not exist: {}'.format(jarname))\n sys.exit(-1)\n shutil.copy(jarname, 'src/main/libs')\n jars.append(basename(jarname))\n\n # If extra aar were requested, copy them into the libs directory\n aars = []\n if args.add_aar:\n ensure_dir(\"libs\")\n for aarname in args.add_aar:\n if not exists(aarname):\n print('Requested aar does not exists: {}'.format(aarname))\n sys.exit(-1)\n shutil.copy(aarname, 'libs')\n aars.append(basename(aarname).rsplit('.', 1)[0])\n\n versioned_name = (args.name.replace(' ', '').replace('\\'', '') +\n '-' + args.version)\n\n version_code = 0\n if not args.numeric_version:\n # Set version code in format (arch-minsdk-app_version)\n arch = get_dist_info_for(\"archs\")[0]\n arch_dict = {\"x86_64\": \"9\", \"arm64-v8a\": \"8\", \"armeabi-v7a\": \"7\", \"x86\": \"6\"}\n arch_code = arch_dict.get(arch, '1')\n min_sdk = args.min_sdk_version\n for i in args.version.split('.'):\n version_code *= 100\n version_code += int(i)\n args.numeric_version = \"{}{}{}\".format(arch_code, min_sdk, version_code)\n\n if args.intent_filters:\n with open(args.intent_filters) as fd:\n args.intent_filters = fd.read()\n\n if not args.add_activity:\n args.add_activity = []\n\n if not args.activity_launch_mode:\n args.activity_launch_mode = ''\n\n if args.extra_source_dirs:\n esd = []\n for spec in args.extra_source_dirs:\n if ':' in spec:\n specdir, specincludes = spec.split(':')\n print('WARNING: Currently gradle builds only support including source '\n 'directories, so when building using gradle all files in '\n '{} will be included.'.format(specdir))\n else:\n specdir = spec\n specincludes = '**'\n esd.append((realpath(specdir), specincludes))\n args.extra_source_dirs = esd\n else:\n args.extra_source_dirs = []\n\n service = False\n if args.private:\n service_main = join(realpath(args.private), 'service', 'main.py')\n if exists(service_main) or exists(service_main + 'o'):\n service = True\n\n service_names = []\n base_service_class = args.service_class_name.split('.')[-1]\n for sid, spec in enumerate(args.services):\n spec = spec.split(':')\n name = spec[0]\n entrypoint = spec[1]\n options = spec[2:]\n\n foreground = 'foreground' in options\n sticky = 'sticky' in options\n\n service_names.append(name)\n service_target_path =\\\n 'src/main/java/{}/Service{}.java'.format(\n args.package.replace(\".\", \"/\"),\n name.capitalize()\n )\n render(\n 'Service.tmpl.java',\n service_target_path,\n name=name,\n entrypoint=entrypoint,\n args=args,\n foreground=foreground,\n sticky=sticky,\n service_id=sid + 1,\n base_service_class=base_service_class,\n )\n\n # Find the SDK directory and target API\n with open('project.properties', 'r') as fileh:\n target = fileh.read().strip()\n android_api = target.split('-')[1]\n try:\n int(android_api)\n except (ValueError, TypeError):\n raise ValueError(\n \"failed to extract the Android API level from \" +\n \"build.properties. expected int, got: '\" +\n str(android_api) + \"'\"\n )\n with open('local.properties', 'r') as fileh:\n sdk_dir = fileh.read().strip()\n sdk_dir = sdk_dir[8:]\n\n # Try to build with the newest available build tools\n ignored = {\".DS_Store\", \".ds_store\"}\n build_tools_versions = [x for x in listdir(join(sdk_dir, 'build-tools')) if x not in ignored]\n build_tools_versions = sorted(build_tools_versions,\n key=LooseVersion)\n build_tools_version = build_tools_versions[-1]\n\n # Folder name for launcher (used by SDL2 bootstrap)\n url_scheme = 'kivy'\n\n # Copy backup rules file if specified and update the argument\n if args.backup_rules:\n res_xml_dir = join(res_dir, 'xml')\n ensure_dir(res_xml_dir)\n shutil.copy(join(args.private, args.backup_rules), res_xml_dir)\n args.backup_rules = split(args.backup_rules)[1][:-4]\n\n # Render out android manifest:\n manifest_path = \"src/main/AndroidManifest.xml\"\n render_args = {\n \"args\": args,\n \"service\": service,\n \"service_names\": service_names,\n \"android_api\": android_api,\n \"debug\": \"debug\" in args.build_mode,\n \"native_services\": args.native_services\n }\n if get_bootstrap_name() == \"sdl2\":\n render_args[\"url_scheme\"] = url_scheme\n render(\n 'AndroidManifest.tmpl.xml',\n manifest_path,\n **render_args)\n\n # Copy the AndroidManifest.xml to the dist root dir so that ant\n # can also use it\n if exists('AndroidManifest.xml'):\n remove('AndroidManifest.xml')\n shutil.copy(manifest_path, 'AndroidManifest.xml')\n\n # gradle build templates\n render(\n 'build.tmpl.gradle',\n 'build.gradle',\n args=args,\n aars=aars,\n jars=jars,\n android_api=android_api,\n build_tools_version=build_tools_version,\n debug_build=\"debug\" in args.build_mode,\n is_library=(get_bootstrap_name() == 'service_library'),\n )\n\n # ant build templates\n render(\n 'build.tmpl.xml',\n 'build.xml',\n args=args,\n versioned_name=versioned_name)\n\n # String resources:\n timestamp = time.time()\n if 'SOURCE_DATE_EPOCH' in environ:\n # for reproducible builds\n timestamp = int(environ['SOURCE_DATE_EPOCH'])\n private_version = \"{} {} {}\".format(\n args.version,\n args.numeric_version,\n timestamp\n )\n render_args = {\n \"args\": args,\n \"private_version\": hashlib.sha1(private_version.encode()).hexdigest()\n }\n if get_bootstrap_name() == \"sdl2\":\n render_args[\"url_scheme\"] = url_scheme\n render(\n 'strings.tmpl.xml',\n join(res_dir, 'values/strings.xml'),\n **render_args)\n\n if exists(join(\"templates\", \"custom_rules.tmpl.xml\")):\n render(\n 'custom_rules.tmpl.xml',\n 'custom_rules.xml',\n args=args)\n\n if get_bootstrap_name() == \"webview\":\n render('WebViewLoader.tmpl.java',\n 'src/main/java/org/kivy/android/WebViewLoader.java',\n args=args)\n\n if args.sign:\n render('build.properties', 'build.properties')\n else:\n if exists('build.properties'):\n os.remove('build.properties')\n\n # Apply java source patches if any are present:\n if exists(join('src', 'patches')):\n print(\"Applying Java source code patches...\")\n for patch_name in os.listdir(join('src', 'patches')):\n patch_path = join('src', 'patches', patch_name)\n print(\"Applying patch: \" + str(patch_path))\n\n # -N: insist this is FORWARD patch, don't reverse apply\n # -p1: strip first path component\n # -t: batch mode, don't ask questions\n patch_command = [\"patch\", \"-N\", \"-p1\", \"-t\", \"-i\", patch_path]\n\n try:\n # Use a dry run to establish whether the patch is already applied.\n # If we don't check this, the patch may be partially applied (which is bad!)\n subprocess.check_output(patch_command + [\"--dry-run\"])\n except subprocess.CalledProcessError as e:\n if e.returncode == 1:\n # Return code 1 means not all hunks could be applied, this usually\n # means the patch is already applied.\n print(\"Warning: failed to apply patch (exit code 1), \"\n \"assuming it is already applied: \",\n str(patch_path))\n else:\n raise e\n else:\n # The dry run worked, so do the real thing\n subprocess.check_output(patch_command)\n\n\ndef parse_args_and_make_package(args=None):\n global BLACKLIST_PATTERNS, WHITELIST_PATTERNS, PYTHON\n\n # Get the default minsdk, equal to the NDK API that this dist is built against\n try:\n with open('dist_info.json', 'r') as fileh:\n info = json.load(fileh)\n default_min_api = int(info['ndk_api'])\n ndk_api = default_min_api\n except (OSError, KeyError, ValueError, TypeError):\n print('WARNING: Failed to read ndk_api from dist info, defaulting to 12')\n default_min_api = 12 # The old default before ndk_api was introduced\n ndk_api = 12\n\n import argparse\n ap = argparse.ArgumentParser(description='''\\\nPackage a Python application for Android (using\nbootstrap ''' + get_bootstrap_name() + ''').\n\nFor this to work, Java and Ant need to be in your path, as does the\ntools directory of the Android SDK.\n''')\n\n # --private is required unless for sdl2, where there's also --launcher\n ap.add_argument('--private', dest='private',\n help='the directory with the app source code files' +\n ' (containing your main.py entrypoint)',\n required=(get_bootstrap_name() != \"sdl2\"))\n ap.add_argument('--package', dest='package',\n help=('The name of the java package the project will be'\n ' packaged under.'),\n required=True)\n ap.add_argument('--name', dest='name',\n help=('The human-readable name of the project.'),\n required=True)\n ap.add_argument('--numeric-version', dest='numeric_version',\n help=('The numeric version number of the project. If not '\n 'given, this is automatically computed from the '\n 'version.'))\n ap.add_argument('--version', dest='version',\n help=('The version number of the project. This should '\n 'consist of numbers and dots, and should have the '\n 'same number of groups of numbers as previous '\n 'versions.'),\n required=True)\n if get_bootstrap_name() == \"sdl2\":\n ap.add_argument('--launcher', dest='launcher', action='store_true',\n help=('Provide this argument to build a multi-app '\n 'launcher, rather than a single app.'))\n ap.add_argument('--permission', dest='permissions', action='append', default=[],\n help='The permissions to give this app.', nargs='+')\n ap.add_argument('--meta-data', dest='meta_data', action='append', default=[],\n help='Custom key=value to add in application metadata')\n ap.add_argument('--uses-library', dest='android_used_libs', action='append', default=[],\n help='Used shared libraries included using <uses-library> tag in AndroidManifest.xml')\n ap.add_argument('--asset', dest='assets',\n action=\"append\", default=[],\n metavar=\"/path/to/source:dest\",\n help='Put this in the assets folder at assets/dest')\n ap.add_argument('--icon', dest='icon',\n help=('A png file to use as the icon for '\n 'the application.'))\n ap.add_argument('--service', dest='services', action='append', default=[],\n help='Declare a new service entrypoint: '\n 'NAME:PATH_TO_PY[:foreground]')\n ap.add_argument('--native-service', dest='native_services', action='append', default=[],\n help='Declare a new native service: '\n 'package.name.service')\n if get_bootstrap_name() != \"service_only\":\n ap.add_argument('--presplash', dest='presplash',\n help=('A jpeg file to use as a screen while the '\n 'application is loading.'))\n ap.add_argument('--presplash-lottie', dest='presplash_lottie',\n help=('A lottie (json) file to use as an animation while the '\n 'application is loading.'))\n ap.add_argument('--presplash-color',\n dest='presplash_color',\n default='#000000',\n help=('A string to set the loading screen '\n 'background color. '\n 'Supported formats are: '\n '#RRGGBB #AARRGGBB or color names '\n 'like red, green, blue, etc.'))\n ap.add_argument('--window', dest='window', action='store_true',\n default=False,\n help='Indicate if the application will be windowed')\n ap.add_argument('--orientation', dest='orientation',\n default='portrait',\n help=('The orientation that the game will '\n 'display in. '\n 'Usually one of \"landscape\", \"portrait\", '\n '\"sensor\", or \"user\" (the same as \"sensor\" '\n 'but obeying the '\n 'user\\'s Android rotation setting). '\n 'The full list of options is given under '\n 'android_screenOrientation at '\n 'https://developer.android.com/guide/'\n 'topics/manifest/'\n 'activity-element.html'))\n\n ap.add_argument('--enable-androidx', dest='enable_androidx',\n action='store_true',\n help=('Enable the AndroidX support library, '\n 'requires api = 28 or greater'))\n ap.add_argument('--android-entrypoint', dest='android_entrypoint',\n default=DEFAULT_PYTHON_ACTIVITY_JAVA_CLASS,\n help='Defines which java class will be used for startup, usually a subclass of PythonActivity')\n ap.add_argument('--android-apptheme', dest='android_apptheme',\n default='@android:style/Theme.NoTitleBar',\n help='Defines which app theme should be selected for the main activity')\n ap.add_argument('--add-compile-option', dest='compile_options', default=[],\n action='append', help='add compile options to gradle.build')\n ap.add_argument('--add-gradle-repository', dest='gradle_repositories',\n default=[],\n action='append',\n help='Ddd a repository for gradle')\n ap.add_argument('--add-packaging-option', dest='packaging_options',\n default=[],\n action='append',\n help='Dndroid packaging options')\n\n ap.add_argument('--wakelock', dest='wakelock', action='store_true',\n help=('Indicate if the application needs the device '\n 'to stay on'))\n ap.add_argument('--blacklist', dest='blacklist',\n default=join(curdir, 'blacklist.txt'),\n help=('Use a blacklist file to match unwanted file in '\n 'the final APK'))\n ap.add_argument('--whitelist', dest='whitelist',\n default=join(curdir, 'whitelist.txt'),\n help=('Use a whitelist file to prevent blacklisting of '\n 'file in the final APK'))\n ap.add_argument('--release', dest='build_mode', action='store_const',\n const='release', default='debug',\n help='Build your app as a non-debug release build. '\n '(Disables gdb debugging among other things)')\n ap.add_argument('--with-debug-symbols', dest='with_debug_symbols',\n action='store_const', const=True, default=False,\n help='Will keep debug symbols from `.so` files.')\n ap.add_argument('--add-jar', dest='add_jar', action='append',\n help=('Add a Java .jar to the libs, so you can access its '\n 'classes with pyjnius. You can specify this '\n 'argument more than once to include multiple jars'))\n ap.add_argument('--add-aar', dest='add_aar', action='append',\n help=('Add an aar dependency manually'))\n ap.add_argument('--depend', dest='depends', action='append',\n help=('Add a external dependency '\n '(eg: com.android.support:appcompat-v7:19.0.1)'))\n # The --sdk option has been removed, it is ignored in favour of\n # --android-api handled by toolchain.py\n ap.add_argument('--sdk', dest='sdk_version', default=-1,\n type=int, help=('Deprecated argument, does nothing'))\n ap.add_argument('--minsdk', dest='min_sdk_version',\n default=default_min_api, type=int,\n help=('Minimum Android SDK version that the app supports. '\n 'Defaults to {}.'.format(default_min_api)))\n ap.add_argument('--allow-minsdk-ndkapi-mismatch', default=False,\n action='store_true',\n help=('Allow the --minsdk argument to be different from '\n 'the discovered ndk_api in the dist'))\n ap.add_argument('--intent-filters', dest='intent_filters',\n help=('Add intent-filters xml rules to the '\n 'AndroidManifest.xml file. The argument is a '\n 'filename containing xml. The filename should be '\n 'located relative to the python-for-android '\n 'directory'))\n ap.add_argument('--with-billing', dest='billing_pubkey',\n help='If set, the billing service will be added (not implemented)')\n ap.add_argument('--add-source', dest='extra_source_dirs', action='append',\n help='Include additional source dirs in Java build')\n if get_bootstrap_name() == \"webview\":\n ap.add_argument('--port',\n help='The port on localhost that the WebView will access',\n default='5000')\n ap.add_argument('--try-system-python-compile', dest='try_system_python_compile',\n action='store_true',\n help='Use the system python during compileall if possible.')\n ap.add_argument('--no-compile-pyo', dest='no_compile_pyo', action='store_true',\n help='Do not optimise .py files to .pyo.')\n ap.add_argument('--sign', action='store_true',\n help=('Try to sign the APK with your credentials. You must set '\n 'the appropriate environment variables.'))\n ap.add_argument('--add-activity', dest='add_activity', action='append',\n help='Add this Java class as an Activity to the manifest.')\n ap.add_argument('--activity-launch-mode',\n dest='activity_launch_mode',\n default='singleTask',\n help='Set the launch mode of the main activity in the manifest.')\n ap.add_argument('--allow-backup', dest='allow_backup', default='true',\n help=\"if set to 'false', then android won't backup the application.\")\n ap.add_argument('--backup-rules', dest='backup_rules', default='',\n help=('Backup rules for Android Auto Backup. Argument is a '\n 'filename containing xml. The filename should be '\n 'located relative to the private directory containing your source code '\n 'files (containing your main.py entrypoint). '\n 'See https://developer.android.com/guide/topics/data/'\n 'autobackup#IncludingFiles for more information'))\n ap.add_argument('--no-optimize-python', dest='optimize_python',\n action='store_false', default=True,\n help=('Whether to compile to optimised .pyo files, using -OO '\n '(strips docstrings and asserts)'))\n ap.add_argument('--extra-manifest-xml', default='',\n help=('Extra xml to write directly inside the <manifest> element of'\n 'AndroidManifest.xml'))\n ap.add_argument('--extra-manifest-application-arguments', default='',\n help='Extra arguments to be added to the <manifest><application> tag of'\n 'AndroidManifest.xml')\n ap.add_argument('--manifest-placeholders', dest='manifest_placeholders',\n default='[:]', help=('Inject build variables into the manifest '\n 'via the manifestPlaceholders property'))\n ap.add_argument('--service-class-name', dest='service_class_name', default=DEFAULT_PYTHON_SERVICE_JAVA_CLASS,\n help='Use that parameter if you need to implement your own PythonServive Java class')\n ap.add_argument('--activity-class-name', dest='activity_class_name', default=DEFAULT_PYTHON_ACTIVITY_JAVA_CLASS,\n help='The full java class name of the main activity')\n\n # Put together arguments, and add those from .p4a config file:\n if args is None:\n args = sys.argv[1:]\n\n def _read_configuration():\n if not exists(\".p4a\"):\n return\n print(\"Reading .p4a configuration\")\n with open(\".p4a\") as fd:\n lines = fd.readlines()\n lines = [shlex.split(line)\n for line in lines if not line.startswith(\"#\")]\n for line in lines:\n for arg in line:\n args.append(arg)\n _read_configuration()\n\n args = ap.parse_args(args)\n\n args.ignore_path = []\n\n if args.name and args.name[0] == '\"' and args.name[-1] == '\"':\n args.name = args.name[1:-1]\n\n if ndk_api != args.min_sdk_version:\n print(('WARNING: --minsdk argument does not match the api that is '\n 'compiled against. Only proceed if you know what you are '\n 'doing, otherwise use --minsdk={} or recompile against api '\n '{}').format(ndk_api, args.min_sdk_version))\n if not args.allow_minsdk_ndkapi_mismatch:\n print('You must pass --allow-minsdk-ndkapi-mismatch to build '\n 'with --minsdk different to the target NDK api from the '\n 'build step')\n sys.exit(1)\n else:\n print('Proceeding with --minsdk not matching build target api')\n\n if args.billing_pubkey:\n print('Billing not yet supported!')\n sys.exit(1)\n\n if args.sdk_version == -1:\n print('WARNING: Received a --sdk argument, but this argument is '\n 'deprecated and does nothing.')\n args.sdk_version = -1 # ensure it is not used\n\n if args.permissions and isinstance(args.permissions[0], list):\n args.permissions = [p for perm in args.permissions for p in perm]\n\n if args.try_system_python_compile:\n # Hardcoding python2.7 is okay for now, as python3 skips the\n # compilation anyway\n python_executable = 'python2.7'\n try:\n subprocess.call([python_executable, '--version'])\n except (OSError, subprocess.CalledProcessError):\n pass\n else:\n PYTHON = python_executable\n\n if args.no_compile_pyo:\n PYTHON = None\n BLACKLIST_PATTERNS.remove('*.py')\n\n if args.blacklist:\n with open(args.blacklist) as fd:\n patterns = [x.strip() for x in fd.read().splitlines()\n if x.strip() and not x.strip().startswith('#')]\n BLACKLIST_PATTERNS += patterns\n\n if args.whitelist:\n with open(args.whitelist) as fd:\n patterns = [x.strip() for x in fd.read().splitlines()\n if x.strip() and not x.strip().startswith('#')]\n WHITELIST_PATTERNS += patterns\n\n if args.private is None and \\\n get_bootstrap_name() == 'sdl2' and args.launcher is None:\n print('Need --private directory or ' +\n '--launcher (SDL2 bootstrap only)' +\n 'to have something to launch inside the .apk!')\n sys.exit(1)\n make_package(args)\n\n return args\n\n\nif __name__ == \"__main__\":\n parse_args_and_make_package()\n", "path": "pythonforandroid/bootstraps/common/build/build.py" } ]
diff --git a/pythonforandroid/bootstraps/common/build/build.py b/pythonforandroid/bootstraps/common/build/build.py index 895e599f6c..aa0574072a 100644 --- a/pythonforandroid/bootstraps/common/build/build.py +++ b/pythonforandroid/bootstraps/common/build/build.py @@ -201,6 +201,7 @@ def clean(tinfo): dirs.append(d) tinfo = tarfile.TarInfo(d) tinfo.type = tarfile.DIRTYPE + clean(tinfo) tf.addfile(tinfo) # put the file
svthalia__concrexit-1844
Event (registration) status message in the API ### Is your feature request related to a problem? Please describe. Currently, the event status messages (like 'you cannot cancel your registration without having to pay a fine') are hardcoded and whenever we update them, we must also update the app ### Describe the solution you'd like Put the message in the API ### Additional context Also checkout #1381
[ { "content": "from rest_framework import serializers\n\nfrom activemembers.api.v2.serializers.member_group import MemberGroupSerializer\nfrom announcements.api.v2.serializers import SlideSerializer\nfrom documents.api.v2.serializers.document import DocumentSerializer\nfrom events import services\nfrom events.api.v2.serializers.event_registration import EventRegistrationSerializer\nfrom events.models import Event, EventRegistration\nfrom thaliawebsite.api.v2.serializers import CleanedHTMLSerializer\nfrom utils.snippets import create_google_maps_url\n\n\nclass EventSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for events.\"\"\"\n\n class Meta:\n model = Event\n fields = (\n \"pk\",\n \"title\",\n \"description\",\n \"start\",\n \"end\",\n \"category\",\n \"registration_start\",\n \"registration_end\",\n \"cancel_deadline\",\n \"optional_registrations\",\n \"location\",\n \"price\",\n \"fine\",\n \"num_participants\",\n \"max_participants\",\n \"no_registration_message\",\n \"has_fields\",\n \"food_event\",\n \"maps_url\",\n \"user_permissions\",\n \"user_registration\",\n \"organiser\",\n \"slide\",\n \"documents\",\n )\n\n description = CleanedHTMLSerializer()\n organiser = MemberGroupSerializer()\n user_registration = serializers.SerializerMethodField(\"_user_registration\")\n num_participants = serializers.SerializerMethodField(\"_num_participants\")\n maps_url = serializers.SerializerMethodField(\"_maps_url\")\n price = serializers.DecimalField(max_digits=5, decimal_places=2)\n fine = serializers.DecimalField(max_digits=5, decimal_places=2)\n slide = SlideSerializer()\n documents = DocumentSerializer(many=True)\n user_permissions = serializers.SerializerMethodField(\"_user_permissions\")\n\n def _user_registration(self, instance):\n try:\n if self.context[\"request\"].member:\n reg = instance.eventregistration_set.get(\n member=self.context[\"request\"].member, date_cancelled=None\n )\n return EventRegistrationSerializer(\n reg,\n context=self.context,\n fields=(\"pk\", \"present\", \"queue_position\", \"date\", \"payment\"),\n ).data\n except EventRegistration.DoesNotExist:\n pass\n return None\n\n def _num_participants(self, instance):\n if (\n instance.max_participants\n and instance.participants.count() > instance.max_participants\n ):\n return instance.max_participants\n return instance.participants.count()\n\n def _user_permissions(self, instance):\n member = self.context[\"request\"].member\n return services.event_permissions(member, instance)\n\n def _maps_url(self, instance):\n return create_google_maps_url(instance.map_location, zoom=13, size=\"450x250\")\n", "path": "website/events/api/v2/serializers/event.py" } ]
[ { "content": "from rest_framework import serializers\n\nfrom activemembers.api.v2.serializers.member_group import MemberGroupSerializer\nfrom announcements.api.v2.serializers import SlideSerializer\nfrom documents.api.v2.serializers.document import DocumentSerializer\nfrom events import services\nfrom events.api.v2.serializers.event_registration import EventRegistrationSerializer\nfrom events.models import Event, EventRegistration\nfrom thaliawebsite.api.v2.serializers import CleanedHTMLSerializer\nfrom utils.snippets import create_google_maps_url\n\n\nclass EventSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for events.\"\"\"\n\n class Meta:\n model = Event\n fields = (\n \"pk\",\n \"title\",\n \"description\",\n \"start\",\n \"end\",\n \"category\",\n \"registration_start\",\n \"registration_end\",\n \"cancel_deadline\",\n \"optional_registrations\",\n \"location\",\n \"price\",\n \"fine\",\n \"num_participants\",\n \"max_participants\",\n \"no_registration_message\",\n \"cancel_too_late_message\",\n \"has_fields\",\n \"food_event\",\n \"maps_url\",\n \"user_permissions\",\n \"user_registration\",\n \"organiser\",\n \"slide\",\n \"documents\",\n )\n\n description = CleanedHTMLSerializer()\n organiser = MemberGroupSerializer()\n user_registration = serializers.SerializerMethodField(\"_user_registration\")\n num_participants = serializers.SerializerMethodField(\"_num_participants\")\n maps_url = serializers.SerializerMethodField(\"_maps_url\")\n price = serializers.DecimalField(max_digits=5, decimal_places=2)\n fine = serializers.DecimalField(max_digits=5, decimal_places=2)\n slide = SlideSerializer()\n documents = DocumentSerializer(many=True)\n user_permissions = serializers.SerializerMethodField(\"_user_permissions\")\n\n def _user_registration(self, instance):\n try:\n if self.context[\"request\"].member:\n reg = instance.eventregistration_set.get(\n member=self.context[\"request\"].member, date_cancelled=None\n )\n return EventRegistrationSerializer(\n reg,\n context=self.context,\n fields=(\"pk\", \"present\", \"queue_position\", \"date\", \"payment\"),\n ).data\n except EventRegistration.DoesNotExist:\n pass\n return None\n\n def _num_participants(self, instance):\n if (\n instance.max_participants\n and instance.participants.count() > instance.max_participants\n ):\n return instance.max_participants\n return instance.participants.count()\n\n def _user_permissions(self, instance):\n member = self.context[\"request\"].member\n return services.event_permissions(member, instance)\n\n def _maps_url(self, instance):\n return create_google_maps_url(instance.map_location, zoom=13, size=\"450x250\")\n", "path": "website/events/api/v2/serializers/event.py" } ]
diff --git a/website/events/api/v2/serializers/event.py b/website/events/api/v2/serializers/event.py index ccfa35ae3..d3c60ef9a 100644 --- a/website/events/api/v2/serializers/event.py +++ b/website/events/api/v2/serializers/event.py @@ -32,6 +32,7 @@ class Meta: "num_participants", "max_participants", "no_registration_message", + "cancel_too_late_message", "has_fields", "food_event", "maps_url",
google__clusterfuzz-2784
Tolerate empty primary_contact in project setup ``` Traceback (most recent call last): File "/srv/handlers/base_handler.py", line 277, in dispatch_request return super(Handler, self).dispatch_request(*args, **kwargs) File "/layers/google.python.pip/pip/lib/python3.7/site-packages/flask/views.py", line 163, in dispatch_request return meth(*args, **kwargs) File "/srv/libs/handler.py", line 100, in wrapper result = func(self) File "/srv/handlers/cron/project_setup.py", line 1052, in get result = config.set_up(projects) File "/srv/handlers/cron/project_setup.py", line 942, in set_up self.sync_user_permissions(project, info) File "/srv/handlers/cron/project_setup.py", line 883, in sync_user_permissions ccs = ccs_from_info(info) File "/srv/handlers/cron/project_setup.py", line 445, in ccs_from_info ccs.extend(_get_ccs('primary_contact', allow_list=False)) File "/srv/handlers/cron/project_setup.py", line 442, in _get_ccs raise ProjectSetupError(f'Bad value for field {field_name}: {field_value}.') handlers.cron.project_setup.ProjectSetupError: Bad value for field primary_contact: None. ```
[ { "content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Handler used for setting up oss-fuzz jobs.\"\"\"\n\nimport base64\nimport collections\nimport copy\nimport json\nimport re\n\nfrom google.cloud import ndb\nimport requests\nimport six\nimport yaml\n\nfrom clusterfuzz._internal.base import tasks\nfrom clusterfuzz._internal.base import untrusted\nfrom clusterfuzz._internal.base import utils\nfrom clusterfuzz._internal.config import db_config\nfrom clusterfuzz._internal.config import local_config\nfrom clusterfuzz._internal.datastore import data_handler\nfrom clusterfuzz._internal.datastore import data_types\nfrom clusterfuzz._internal.datastore import ndb_utils\nfrom clusterfuzz._internal.fuzzing import fuzzer_selection\nfrom clusterfuzz._internal.google_cloud_utils import pubsub\nfrom clusterfuzz._internal.google_cloud_utils import storage\nfrom clusterfuzz._internal.metrics import logs\nfrom clusterfuzz._internal.system import environment\nfrom handlers import base_handler\nfrom libs import handler\n\nfrom . import service_accounts\n\nBUILD_BUCKET_PATH_TEMPLATE = (\n 'gs://%BUCKET%/%PROJECT%/%PROJECT%-%SANITIZER%-([0-9]+).zip')\n\nBACKUPS_LIFECYCLE = storage.generate_life_cycle_config('Delete', age=100)\nLOGS_LIFECYCLE = storage.generate_life_cycle_config('Delete', age=14)\nQUARANTINE_LIFECYCLE = storage.generate_life_cycle_config('Delete', age=90)\n\nJOB_TEMPLATE = ('{build_type} = {build_bucket_path}\\n'\n 'PROJECT_NAME = {project}\\n'\n 'SUMMARY_PREFIX = {project}\\n'\n 'MANAGED = True\\n')\n\nOBJECT_VIEWER_IAM_ROLE = 'roles/storage.objectViewer'\nOBJECT_ADMIN_IAM_ROLE = 'roles/storage.objectAdmin'\n\nVALID_PROJECT_NAME_REGEX = re.compile(r'^[a-zA-Z0-9_-]+$')\n\nREVISION_URL = ('https://commondatastorage.googleapis.com/'\n '{bucket}/{project}/{project}-{sanitizer}-%s.srcmap.json')\n\nREQUEST_TIMEOUT = 60\n\nALLOWED_VIEW_RESTRICTIONS = ['none', 'security', 'all']\n\nPUBSUB_PLATFORMS = ['linux']\n\nMEMORY_SAFE_LANGUAGES = {'go', 'java', 'python', 'rust'}\nOSS_FUZZ_DEFAULT_PROJECT_CPU_WEIGHT = 1.0\nOSS_FUZZ_MEMORY_SAFE_LANGUAGE_PROJECT_WEIGHT = 0.2\n\nSetupResult = collections.namedtuple('SetupResult', 'project_names job_names')\n\n\nclass ProjectSetupError(Exception):\n \"\"\"Exception.\"\"\"\n\n\nclass JobInfo(object):\n \"\"\"Job information.\"\"\"\n\n def __init__(self,\n prefix,\n engine,\n memory_tool,\n cf_job_templates,\n architecture='x86_64',\n experimental=False,\n minimize_job_override=None):\n self.prefix = prefix\n self.engine = engine\n self.memory_tool = memory_tool\n self.architecture = architecture\n self.cf_job_templates = cf_job_templates\n self.experimental = experimental\n self.minimize_job_override = minimize_job_override\n\n def job_name(self, project_name, config_suffix):\n return (\n self.prefix + data_types.normalized_name(project_name) + config_suffix)\n\n\n# The order of templates is important here. Later templates override settings in\n# the earlier ones. An engine template may override vars set for a sanitizer.\nLIBFUZZER_ASAN_JOB = JobInfo('libfuzzer_asan_', 'libfuzzer', 'address',\n ['libfuzzer', 'engine_asan', 'prune'])\nLIBFUZZER_MSAN_JOB = JobInfo('libfuzzer_msan_', 'libfuzzer', 'memory',\n ['libfuzzer', 'engine_msan'])\nLIBFUZZER_UBSAN_JOB = JobInfo('libfuzzer_ubsan_', 'libfuzzer', 'undefined',\n ['libfuzzer', 'engine_ubsan'])\nLIBFUZZER_ASAN_I386_JOB = JobInfo(\n 'libfuzzer_asan_i386_',\n 'libfuzzer',\n 'address', ['libfuzzer', 'engine_asan'],\n architecture='i386')\n\nAFL_ASAN_JOB = JobInfo(\n 'afl_asan_',\n 'afl',\n 'address', ['afl', 'engine_asan'],\n minimize_job_override=LIBFUZZER_ASAN_JOB)\nNO_ENGINE_ASAN_JOB = JobInfo('asan_', 'none', 'address', [])\n\nHONGGFUZZ_ASAN_JOB = JobInfo(\n 'honggfuzz_asan_',\n 'honggfuzz',\n 'address', ['honggfuzz', 'engine_asan'],\n minimize_job_override=LIBFUZZER_ASAN_JOB)\n\nGFT_ASAN_JOB = JobInfo('googlefuzztest_asan_', 'googlefuzztest', 'address',\n ['googlefuzztest', 'engine_asan'])\nGFT_MSAN_JOB = JobInfo('googlefuzztest_msan_', 'googlefuzztest', 'memory',\n ['googlefuzztest', 'engine_msan'])\nGFT_UBSAN_JOB = JobInfo('googlefuzztest_ubsan_', 'googlefuzztest', 'undefined',\n ['googlefuzztest', 'engine_ubsan'])\n\nLIBFUZZER_NONE_JOB = JobInfo('libfuzzer_nosanitizer_', 'libfuzzer', 'none',\n ['libfuzzer'])\nLIBFUZZER_NONE_I386_JOB = JobInfo(\n 'libfuzzer_nosanitizer_i386_',\n 'libfuzzer',\n 'none', ['libfuzzer'],\n architecture='i386')\n\nJOB_MAP = {\n 'libfuzzer': {\n 'x86_64': {\n 'address': LIBFUZZER_ASAN_JOB,\n 'memory': LIBFUZZER_MSAN_JOB,\n 'undefined': LIBFUZZER_UBSAN_JOB,\n 'none': LIBFUZZER_NONE_JOB,\n },\n 'i386': {\n 'address': LIBFUZZER_ASAN_I386_JOB,\n 'none': LIBFUZZER_NONE_I386_JOB,\n },\n },\n 'afl': {\n 'x86_64': {\n 'address': AFL_ASAN_JOB,\n }\n },\n 'honggfuzz': {\n 'x86_64': {\n 'address': HONGGFUZZ_ASAN_JOB,\n },\n },\n 'googlefuzztest': {\n 'x86_64': {\n 'address': GFT_ASAN_JOB,\n 'memory': GFT_MSAN_JOB,\n 'undefined': GFT_UBSAN_JOB,\n },\n },\n 'none': {\n 'x86_64': {\n 'address': NO_ENGINE_ASAN_JOB,\n }\n }\n}\n\nDEFAULT_ARCHITECTURES = ['x86_64']\nDEFAULT_SANITIZERS = ['address', 'undefined']\nDEFAULT_ENGINES = ['libfuzzer', 'afl', 'honggfuzz']\n\n\ndef _to_experimental_job(job_info):\n job_info = copy.copy(job_info)\n job_info.experimental = True\n return job_info\n\n\ndef get_github_url(url):\n \"\"\"Return contents of URL.\"\"\"\n github_credentials = db_config.get_value('github_credentials')\n if not github_credentials:\n raise ProjectSetupError('No github credentials.')\n\n client_id, client_secret = github_credentials.strip().split(';')\n response = requests.get(url, auth=(client_id, client_secret))\n if response.status_code != 200:\n logs.log_error(\n f'Failed to get github url: {url}.', status_code=response.status_code)\n response.raise_for_status()\n\n return json.loads(response.text)\n\n\ndef find_github_item_url(github_json, name):\n \"\"\"Get url of a blob/tree from a github json response.\"\"\"\n for item in github_json['tree']:\n if item['path'] == name:\n return item['url']\n\n return None\n\n\ndef get_oss_fuzz_projects():\n \"\"\"Return list of projects for oss-fuzz.\"\"\"\n ossfuzz_tree_url = ('https://api.github.com/repos/google/oss-fuzz/'\n 'git/trees/master')\n tree = get_github_url(ossfuzz_tree_url)\n projects = []\n\n projects_url = find_github_item_url(tree, 'projects')\n if not projects_url:\n logs.log_error('No projects found.')\n return []\n\n tree = get_github_url(projects_url)\n for item in tree['tree']:\n if item['type'] != 'tree':\n continue\n\n item_json = get_github_url(item['url'])\n project_yaml_url = find_github_item_url(item_json, 'project.yaml')\n if not project_yaml_url:\n continue\n\n projects_yaml = get_github_url(project_yaml_url)\n info = yaml.safe_load(base64.b64decode(projects_yaml['content']))\n\n has_dockerfile = (\n find_github_item_url(item_json, 'Dockerfile') or 'dockerfile' in info)\n if not has_dockerfile:\n continue\n\n projects.append((item['path'], info))\n\n return projects\n\n\ndef get_projects_from_gcs(gcs_url):\n \"\"\"Get projects from GCS path.\"\"\"\n data = json.loads(storage.read_data(gcs_url))\n return [(project['name'], project) for project in data['projects']]\n\n\ndef _process_sanitizers_field(sanitizers):\n \"\"\"Pre-process sanitizers field into a map from sanitizer name -> dict of\n options.\"\"\"\n processed_sanitizers = {}\n if not isinstance(sanitizers, list):\n return None\n\n # each field can either be a Map or a String:\n # sanitizers:\n # - undefined:\n # experimental: true\n # - address\n # - memory\n for sanitizer in sanitizers:\n if isinstance(sanitizer, str):\n processed_sanitizers[sanitizer] = {}\n elif isinstance(sanitizer, dict):\n for key, value in six.iteritems(sanitizer):\n processed_sanitizers[key] = value\n else:\n return None\n\n return processed_sanitizers\n\n\ndef get_jobs_for_project(project, info):\n \"\"\"Return jobs for the project.\"\"\"\n sanitizers = _process_sanitizers_field(\n info.get('sanitizers', DEFAULT_SANITIZERS))\n if not sanitizers:\n logs.log_error(f'Invalid sanitizers field for {project}.')\n return []\n\n engines = info.get('fuzzing_engines', DEFAULT_ENGINES)\n architectures = info.get('architectures', DEFAULT_ARCHITECTURES)\n\n jobs = []\n for engine in engines:\n if engine not in JOB_MAP:\n continue\n\n for architecture in architectures:\n if architecture not in JOB_MAP[engine]:\n continue\n\n for sanitizer, options in six.iteritems(sanitizers):\n experimental = (\n options.get('experimental', False) or\n info.get('experimental', False))\n if sanitizer in JOB_MAP[engine][architecture]:\n job = JOB_MAP[engine][architecture][sanitizer]\n if experimental:\n job = _to_experimental_job(job)\n\n jobs.append(job)\n\n return jobs\n\n\ndef convert_googlemail_to_gmail(email):\n \"\"\"Convert @googlemail.com to @gmail.com.\"\"\"\n # TODO(ochang): Investiate if we can/need to do this in general, and not just\n # for cloud storage bucket IAMs.\n if email.endswith('@googlemail.com'):\n return email.split('@')[0] + '@gmail.com'\n\n return email\n\n\ndef _add_users_to_bucket(info, client, bucket_name, iam_policy):\n \"\"\"Add user account to bucket.\"\"\"\n ccs = sorted(\n ['user:' + convert_googlemail_to_gmail(cc) for cc in ccs_from_info(info)])\n binding = storage.get_bucket_iam_binding(iam_policy, OBJECT_VIEWER_IAM_ROLE)\n\n if binding:\n # buckets.getIamPolicy can return duplicate members when we add a @gmail.com\n # as well as @googlemail.com address for the same account.\n binding['members'] = sorted(list(set(binding['members'])))\n if binding['members'] == ccs:\n return iam_policy\n\n filtered_members = [\n member for member in binding['members'] if member in ccs\n ]\n\n if len(filtered_members) != len(binding['members']):\n # Remove old members.\n binding['members'] = filtered_members\n iam_policy = storage.set_bucket_iam_policy(client, bucket_name,\n iam_policy)\n\n # We might have no binding either from start or after filtering members above.\n # Create a new one in those cases.\n binding = storage.get_or_create_bucket_iam_binding(iam_policy,\n OBJECT_VIEWER_IAM_ROLE)\n\n for cc in ccs:\n if cc in binding['members']:\n continue\n\n logs.log(f'Adding {cc} to bucket IAM for {bucket_name}.')\n # Add CCs one at a time since the API does not work with invalid or\n # non-Google emails.\n modified_iam_policy = storage.add_single_bucket_iam(\n client, iam_policy, OBJECT_VIEWER_IAM_ROLE, bucket_name, cc)\n if modified_iam_policy:\n iam_policy = modified_iam_policy\n binding = storage.get_bucket_iam_binding(iam_policy,\n OBJECT_VIEWER_IAM_ROLE)\n\n if not binding['members']:\n # Check that the final binding has members. Empty bindings are not valid.\n storage.remove_bucket_iam_binding(iam_policy, OBJECT_VIEWER_IAM_ROLE)\n\n return iam_policy\n\n\ndef _set_bucket_service_account(service_account, client, bucket_name,\n iam_policy):\n \"\"\"Set service account for a bucket.\"\"\"\n # Add service account as objectAdmin.\n binding = storage.get_or_create_bucket_iam_binding(iam_policy,\n OBJECT_ADMIN_IAM_ROLE)\n\n members = ['serviceAccount:' + service_account['email']]\n if members == binding['members']:\n # No changes required.\n return iam_policy\n\n binding['members'] = members\n return storage.set_bucket_iam_policy(client, bucket_name, iam_policy)\n\n\ndef add_bucket_iams(info, client, bucket_name, service_account):\n \"\"\"Add CC'ed users to storage bucket IAM.\"\"\"\n iam_policy = storage.get_bucket_iam_policy(client, bucket_name)\n if not iam_policy:\n return\n\n iam_policy = _add_users_to_bucket(info, client, bucket_name, iam_policy)\n _set_bucket_service_account(service_account, client, bucket_name, iam_policy)\n\n\ndef add_service_account_to_bucket(client, bucket_name, service_account, role):\n \"\"\"Add service account to the gcr.io images bucket.\"\"\"\n iam_policy = storage.get_bucket_iam_policy(client, bucket_name)\n if not iam_policy:\n return\n\n binding = storage.get_or_create_bucket_iam_binding(iam_policy, role)\n\n member = 'serviceAccount:' + service_account['email']\n if member in binding['members']:\n # No changes required.\n return\n\n binding['members'].append(member)\n storage.set_bucket_iam_policy(client, bucket_name, iam_policy)\n\n\ndef has_maintainer(info):\n \"\"\"Return whether or not a project has at least one maintainer.\"\"\"\n return info.get('primary_contact') or info.get('auto_ccs')\n\n\ndef ccs_from_info(info):\n \"\"\"Get list of CC's from project info.\"\"\"\n\n def _get_ccs(field_name, allow_list=True):\n \"\"\"Return list of emails to cc given a field name.\"\"\"\n if field_name not in info:\n return []\n\n field_value = info.get(field_name)\n if allow_list and isinstance(field_value, list):\n return field_value\n if isinstance(field_value, str):\n return [field_value]\n\n raise ProjectSetupError(f'Bad value for field {field_name}: {field_value}.')\n\n ccs = []\n ccs.extend(_get_ccs('primary_contact', allow_list=False))\n ccs.extend(_get_ccs('auto_ccs'))\n ccs.extend(_get_ccs('vendor_ccs'))\n\n return [utils.normalize_email(cc) for cc in ccs]\n\n\ndef update_fuzzer_jobs(fuzzer_entities, job_names):\n \"\"\"Update fuzzer job mappings.\"\"\"\n to_delete = {}\n\n for fuzzer_entity_key in fuzzer_entities:\n fuzzer_entity = fuzzer_entity_key.get()\n\n for job in data_types.Job.query():\n if not job.environment_string:\n continue\n\n job_environment = job.get_environment()\n if not utils.string_is_true(job_environment.get('MANAGED', 'False')):\n continue\n\n if job.name in job_names:\n continue\n\n logs.log(f'Deleting job {job.name}')\n to_delete[job.name] = job.key\n\n try:\n fuzzer_entity.jobs.remove(job.name)\n except ValueError:\n pass\n\n fuzzer_entity.put()\n fuzzer_selection.update_mappings_for_fuzzer(fuzzer_entity)\n\n if to_delete:\n ndb_utils.delete_multi(to_delete.values())\n\n\ndef cleanup_old_projects_settings(project_names):\n \"\"\"Delete old projects that are no longer used or disabled.\"\"\"\n to_delete = []\n\n for project in data_types.OssFuzzProject.query():\n if project.name not in project_names:\n logs.log(f'Deleting project {project.name}.')\n to_delete.append(project.key)\n\n if to_delete:\n ndb_utils.delete_multi(to_delete)\n\n\ndef create_project_settings(project, info, service_account):\n \"\"\"Setup settings for ClusterFuzz (such as CPU distribution).\"\"\"\n key = ndb.Key(data_types.OssFuzzProject, project)\n oss_fuzz_project = key.get()\n\n # Expecting to run a blackbox fuzzer, so use high end hosts.\n is_high_end = info.get('blackbox', False)\n\n ccs = ccs_from_info(info)\n language = info.get('language')\n\n if oss_fuzz_project:\n if oss_fuzz_project.service_account != service_account['email']:\n oss_fuzz_project.service_account = service_account['email']\n oss_fuzz_project.put()\n\n if oss_fuzz_project.high_end != is_high_end:\n oss_fuzz_project.high_end = is_high_end\n oss_fuzz_project.put()\n\n if oss_fuzz_project.ccs != ccs:\n oss_fuzz_project.ccs = ccs\n oss_fuzz_project.put()\n else:\n if language in MEMORY_SAFE_LANGUAGES:\n cpu_weight = OSS_FUZZ_MEMORY_SAFE_LANGUAGE_PROJECT_WEIGHT\n else:\n cpu_weight = OSS_FUZZ_DEFAULT_PROJECT_CPU_WEIGHT\n\n data_types.OssFuzzProject(\n id=project,\n name=project,\n high_end=is_high_end,\n cpu_weight=cpu_weight,\n service_account=service_account['email'],\n ccs=ccs).put()\n\n\ndef create_pubsub_topics(project):\n \"\"\"Create pubsub topics for tasks.\"\"\"\n for platform in PUBSUB_PLATFORMS:\n name = untrusted.queue_name(project, platform)\n client = pubsub.PubSubClient()\n application_id = utils.get_application_id()\n\n topic_name = pubsub.topic_name(application_id, name)\n if client.get_topic(topic_name) is None:\n client.create_topic(topic_name)\n\n subscription_name = pubsub.subscription_name(application_id, name)\n if client.get_subscription(subscription_name) is None:\n client.create_subscription(subscription_name, topic_name)\n\n\ndef cleanup_pubsub_topics(project_names):\n \"\"\"Delete old pubsub topics and subscriptions.\"\"\"\n client = pubsub.PubSubClient()\n application_id = utils.get_application_id()\n\n expected_topics = set()\n for platform in PUBSUB_PLATFORMS:\n expected_topics.update(\n [untrusted.queue_name(project, platform) for project in project_names])\n\n pubsub_config = local_config.Config('pubsub.queues')\n unmanaged_queues = [queue['name'] for queue in pubsub_config.get('resources')]\n\n for topic in client.list_topics(pubsub.project_name(application_id)):\n _, name = pubsub.parse_name(topic)\n\n if (not name.startswith(tasks.JOBS_PREFIX) and\n not name.startswith(tasks.HIGH_END_JOBS_PREFIX)):\n # Some topic created by another service, ignore.\n continue\n\n if name in unmanaged_queues:\n continue\n\n if name in expected_topics:\n continue\n\n for subscription in client.list_topic_subscriptions(topic):\n client.delete_subscription(subscription)\n\n client.delete_topic(topic)\n\n\nclass ProjectSetup(object):\n \"\"\"Project setup.\"\"\"\n\n def __init__(self,\n build_bucket_path_template,\n revision_url_template,\n build_type,\n config_suffix='',\n external_config=None,\n segregate_projects=False,\n experimental_sanitizers=None,\n engine_build_buckets=None,\n fuzzer_entities=None,\n add_info_labels=False,\n add_revision_mappings=False,\n additional_vars=None):\n self._build_type = build_type\n self._config_suffix = config_suffix\n self._external_config = external_config\n self._build_bucket_path_template = build_bucket_path_template\n self._revision_url_template = revision_url_template\n self._segregate_projects = segregate_projects\n self._experimental_sanitizers = experimental_sanitizers\n self._engine_build_buckets = engine_build_buckets\n self._fuzzer_entities = fuzzer_entities\n self._add_info_labels = add_info_labels\n self._add_revision_mappings = add_revision_mappings\n self._additional_vars = additional_vars\n\n def _get_build_bucket(self, engine, architecture):\n \"\"\"Return the bucket for the given |engine| and |architecture|.\"\"\"\n if architecture != 'x86_64':\n engine += '-' + architecture\n\n bucket = self._engine_build_buckets.get(engine)\n if not bucket:\n raise ProjectSetupError('Invalid fuzzing engine ' + engine)\n\n return bucket\n\n def _deployment_bucket_name(self):\n \"\"\"Deployment bucket name.\"\"\"\n return f'{utils.get_application_id()}-deployment'\n\n def _shared_corpus_bucket_name(self):\n \"\"\"Shared corpus bucket name.\"\"\"\n return environment.get_value('SHARED_CORPUS_BUCKET')\n\n def _mutator_plugins_bucket_name(self):\n \"\"\"Mutator plugins bucket name.\"\"\"\n return environment.get_value('MUTATOR_PLUGINS_BUCKET')\n\n def _backup_bucket_name(self, project_name):\n \"\"\"Return the backup_bucket_name.\"\"\"\n return project_name + '-backup.' + data_handler.bucket_domain_suffix()\n\n def _corpus_bucket_name(self, project_name):\n \"\"\"Return the corpus_bucket_name.\"\"\"\n return project_name + '-corpus.' + data_handler.bucket_domain_suffix()\n\n def _quarantine_bucket_name(self, project_name):\n \"\"\"Return the quarantine_bucket_name.\"\"\"\n return project_name + '-quarantine.' + data_handler.bucket_domain_suffix()\n\n def _logs_bucket_name(self, project_name):\n \"\"\"Return the logs bucket name.\"\"\"\n return project_name + '-logs.' + data_handler.bucket_domain_suffix()\n\n def _create_service_accounts_and_buckets(self, project, info):\n \"\"\"Create per-project service account and buckets.\"\"\"\n service_account, exists = service_accounts.get_or_create_service_account(\n project)\n if not exists:\n # TODO(ochang): Temporary hack to get around\n # https://github.com/google/clusterfuzz/issues/2775.\n service_accounts.set_service_account_roles(service_account)\n\n # Create GCS buckets.\n backup_bucket_name = self._backup_bucket_name(project)\n corpus_bucket_name = self._corpus_bucket_name(project)\n logs_bucket_name = self._logs_bucket_name(project)\n quarantine_bucket_name = self._quarantine_bucket_name(project)\n\n storage.create_bucket_if_needed(backup_bucket_name, BACKUPS_LIFECYCLE)\n storage.create_bucket_if_needed(corpus_bucket_name)\n storage.create_bucket_if_needed(quarantine_bucket_name,\n QUARANTINE_LIFECYCLE)\n storage.create_bucket_if_needed(logs_bucket_name, LOGS_LIFECYCLE)\n\n client = storage.create_discovery_storage_client()\n try:\n add_bucket_iams(info, client, backup_bucket_name, service_account)\n add_bucket_iams(info, client, corpus_bucket_name, service_account)\n add_bucket_iams(info, client, logs_bucket_name, service_account)\n add_bucket_iams(info, client, quarantine_bucket_name, service_account)\n except Exception as e:\n logs.log_error(f'Failed to add bucket IAMs for {project}: {e}.')\n\n # Grant the service account read access to deployment, shared corpus and\n # mutator plugin buckets.\n add_service_account_to_bucket(client, self._deployment_bucket_name(),\n service_account, OBJECT_VIEWER_IAM_ROLE)\n add_service_account_to_bucket(client, self._shared_corpus_bucket_name(),\n service_account, OBJECT_VIEWER_IAM_ROLE)\n add_service_account_to_bucket(client, self._mutator_plugins_bucket_name(),\n service_account, OBJECT_VIEWER_IAM_ROLE)\n\n data_bundles = {\n fuzzer_entity.get().data_bundle_name\n for fuzzer_entity in six.itervalues(self._fuzzer_entities)\n }\n for data_bundle in data_bundles:\n if not data_bundle:\n continue\n\n # Workers also need to be able to set up these global bundles.\n data_bundle_bucket_name = data_handler.get_data_bundle_bucket_name(\n data_bundle)\n add_service_account_to_bucket(client, data_bundle_bucket_name,\n service_account, OBJECT_VIEWER_IAM_ROLE)\n\n return (service_account, backup_bucket_name, corpus_bucket_name,\n logs_bucket_name, quarantine_bucket_name)\n\n def _get_build_bucket_path(self, project_name, info, engine, memory_tool,\n architecture):\n \"\"\"Returns the build bucket path for the |project|, |engine|, |memory_tool|,\n and |architecture|.\"\"\"\n build_path = info.get('build_path')\n if not build_path:\n build_path = self._build_bucket_path_template\n\n build_path = build_path.replace(\n '%BUCKET%', self._get_build_bucket(engine, architecture))\n build_path = build_path.replace('%PROJECT%', project_name)\n build_path = build_path.replace('%ENGINE%', engine)\n build_path = build_path.replace('%SANITIZER%', memory_tool)\n return build_path\n\n def _sync_job(self, project, info, corpus_bucket_name, quarantine_bucket_name,\n logs_bucket_name, backup_bucket_name):\n \"\"\"Sync the config with ClusterFuzz.\"\"\"\n # Create/update ClusterFuzz jobs.\n job_names = []\n\n for template in get_jobs_for_project(project, info):\n if template.engine == 'none':\n # Engine-less jobs are not automatically managed.\n continue\n\n fuzzer_entity = self._fuzzer_entities.get(template.engine).get()\n if not fuzzer_entity:\n raise ProjectSetupError('Invalid fuzzing engine ' + template.engine)\n\n job_name = template.job_name(project, self._config_suffix)\n job = data_types.Job.query(data_types.Job.name == job_name).get()\n if not job:\n job = data_types.Job()\n\n if self._external_config:\n if ('reproduction_topic' not in self._external_config or\n 'updates_subscription' not in self._external_config):\n raise ProjectSetupError('Invalid external_config.')\n\n job.external_reproduction_topic = self._external_config[\n 'reproduction_topic']\n job.external_updates_subscription = self._external_config[\n 'updates_subscription']\n else:\n job.external_reproduction_topic = None\n job.external_updates_subscription = None\n\n if not info.get('disabled', False):\n job_names.append(job_name)\n if job_name not in fuzzer_entity.jobs and not job.is_external():\n # Enable new job.\n fuzzer_entity.jobs.append(job_name)\n fuzzer_entity.put()\n\n job.name = job_name\n if self._segregate_projects:\n job.platform = untrusted.platform_name(project, 'linux')\n else:\n # TODO(ochang): Support other platforms?\n job.platform = 'LINUX'\n\n job.templates = template.cf_job_templates\n\n job.environment_string = JOB_TEMPLATE.format(\n build_type=self._build_type,\n build_bucket_path=self._get_build_bucket_path(\n project, info, template.engine, template.memory_tool,\n template.architecture),\n engine=template.engine,\n project=project)\n\n if self._add_revision_mappings:\n revision_vars_url = self._revision_url_template.format(\n project=project,\n bucket=self._get_build_bucket(template.engine,\n template.architecture),\n sanitizer=template.memory_tool)\n\n job.environment_string += f'REVISION_VARS_URL = {revision_vars_url}\\n'\n\n if logs_bucket_name:\n job.environment_string += f'FUZZ_LOGS_BUCKET = {logs_bucket_name}\\n'\n\n if corpus_bucket_name:\n job.environment_string += f'CORPUS_BUCKET = {corpus_bucket_name}\\n'\n\n if quarantine_bucket_name:\n job.environment_string += (\n f'QUARANTINE_BUCKET = {quarantine_bucket_name}\\n')\n\n if backup_bucket_name:\n job.environment_string += f'BACKUP_BUCKET = {backup_bucket_name}\\n'\n\n if self._add_info_labels:\n automatic_labels = [f'Proj-{project}', f'Engine-{template.engine}']\n labels = info.get('labels')\n if labels and '*' in labels:\n automatic_labels.extend(labels['*'])\n automatic_labels = ','.join(automatic_labels)\n job.environment_string += f'AUTOMATIC_LABELS = {automatic_labels}\\n'\n\n help_url = info.get('help_url')\n if help_url:\n job.environment_string += f'HELP_URL = {help_url}\\n'\n\n if (template.experimental or\n (self._experimental_sanitizers and\n template.memory_tool in self._experimental_sanitizers)):\n job.environment_string += 'EXPERIMENTAL = True\\n'\n\n if template.minimize_job_override:\n minimize_job_override = template.minimize_job_override.job_name(\n project, self._config_suffix)\n job.environment_string += (\n f'MINIMIZE_JOB_OVERRIDE = {minimize_job_override}\\n')\n\n view_restrictions = info.get('view_restrictions')\n if view_restrictions:\n if view_restrictions in ALLOWED_VIEW_RESTRICTIONS:\n job.environment_string += (\n f'ISSUE_VIEW_RESTRICTIONS = {view_restrictions}\\n')\n else:\n logs.log_error(\n f'Invalid view restriction setting {view_restrictions} '\n f'for project {project}.')\n\n if not has_maintainer(info):\n job.environment_string += 'DISABLE_DISCLOSURE = True\\n'\n\n selective_unpack = info.get('selective_unpack')\n if selective_unpack:\n job.environment_string += 'UNPACK_ALL_FUZZ_TARGETS_AND_FILES = False\\n'\n\n main_repo = info.get('main_repo')\n if main_repo:\n job.environment_string += f'MAIN_REPO = {main_repo}\\n'\n\n file_github_issue = info.get('file_github_issue', False)\n job.environment_string += f'FILE_GITHUB_ISSUE = {file_github_issue}\\n'\n\n if (template.engine == 'libfuzzer' and\n template.architecture == 'x86_64' and\n 'dataflow' in info.get('fuzzing_engines', DEFAULT_ENGINES)):\n # Dataflow binaries are built with dataflow sanitizer, but can be used\n # as an auxiliary build with libFuzzer builds (e.g. with ASan or UBSan).\n dataflow_build_bucket_path = self._get_build_bucket_path(\n project_name=project,\n info=info,\n engine='dataflow',\n memory_tool='dataflow',\n architecture=template.architecture)\n job.environment_string += (\n f'DATAFLOW_BUILD_BUCKET_PATH = {dataflow_build_bucket_path}\\n')\n\n if self._additional_vars:\n additional_vars = {}\n additional_vars.update(self._additional_vars.get('all', {}))\n\n engine_vars = self._additional_vars.get(template.engine, {})\n engine_sanitizer_vars = engine_vars.get(template.memory_tool, {})\n additional_vars.update(engine_sanitizer_vars)\n\n for key, value in sorted(six.iteritems(additional_vars)):\n job.environment_string += (\n f'{key} = {str(value).encode(\"unicode-escape\").decode(\"utf-8\")}\\n'\n )\n\n job.put()\n\n return job_names\n\n def sync_user_permissions(self, project, info):\n \"\"\"Sync permissions of project based on project.yaml.\"\"\"\n ccs = ccs_from_info(info)\n\n for template in get_jobs_for_project(project, info):\n job_name = template.job_name(project, self._config_suffix)\n\n # Delete removed CCs.\n existing_ccs = data_types.ExternalUserPermission.query(\n data_types.ExternalUserPermission.entity_kind ==\n data_types.PermissionEntityKind.JOB,\n data_types.ExternalUserPermission.entity_name == job_name)\n ndb_utils.delete_multi([\n permission.key\n for permission in existing_ccs\n if permission.email not in ccs\n ])\n\n for cc in ccs:\n query = data_types.ExternalUserPermission.query(\n data_types.ExternalUserPermission.email == cc,\n data_types.ExternalUserPermission.entity_kind ==\n data_types.PermissionEntityKind.JOB,\n data_types.ExternalUserPermission.entity_name == job_name)\n\n existing_permission = query.get()\n if existing_permission:\n continue\n\n data_types.ExternalUserPermission(\n email=cc,\n entity_kind=data_types.PermissionEntityKind.JOB,\n entity_name=job_name,\n is_prefix=False,\n auto_cc=data_types.AutoCCType.ALL).put()\n\n def set_up(self, projects):\n \"\"\"Do project setup. Return a list of all the project names that were set\n up.\"\"\"\n job_names = []\n for project, info in projects:\n logs.log(f'Syncing configs for {project}.')\n\n backup_bucket_name = None\n corpus_bucket_name = None\n logs_bucket_name = None\n quarantine_bucket_name = None\n\n if self._segregate_projects:\n # Create per project service account and GCS buckets.\n (service_account, backup_bucket_name, corpus_bucket_name,\n logs_bucket_name, quarantine_bucket_name) = (\n self._create_service_accounts_and_buckets(project, info))\n\n # Create CF jobs for project.\n current_job_names = self._sync_job(project, info, corpus_bucket_name,\n quarantine_bucket_name,\n logs_bucket_name, backup_bucket_name)\n job_names.extend(current_job_names)\n\n if self._segregate_projects:\n self.sync_user_permissions(project, info)\n\n # Create Pub/Sub topics for tasks.\n create_pubsub_topics(project)\n\n # Set up projects settings (such as CPU distribution settings).\n if not info.get('disabled', False):\n create_project_settings(project, info, service_account)\n\n # Delete old/disabled project settings.\n enabled_projects = [\n project for project, info in projects\n if not info.get('disabled', False)\n ]\n return SetupResult(enabled_projects, job_names)\n\n\ndef cleanup_stale_projects(fuzzer_entities, project_names, job_names,\n segregate_projects):\n \"\"\"Clean up stale projects.\"\"\"\n update_fuzzer_jobs(fuzzer_entities, job_names)\n cleanup_old_projects_settings(project_names)\n\n if segregate_projects:\n cleanup_pubsub_topics(project_names)\n\n\nclass Handler(base_handler.Handler):\n \"\"\"Setup ClusterFuzz jobs for projects.\"\"\"\n\n @handler.cron()\n def get(self):\n \"\"\"Handles a GET request.\"\"\"\n libfuzzer = data_types.Fuzzer.query(\n data_types.Fuzzer.name == 'libFuzzer').get()\n if not libfuzzer:\n logs.log_error('Failed to get libFuzzer Fuzzer entity.')\n return\n\n afl = data_types.Fuzzer.query(data_types.Fuzzer.name == 'afl').get()\n if not afl:\n logs.log_error('Failed to get AFL Fuzzer entity.')\n return\n\n honggfuzz = data_types.Fuzzer.query(\n data_types.Fuzzer.name == 'honggfuzz').get()\n if not honggfuzz:\n logs.log_error('Failed to get honggfuzz Fuzzer entity.')\n return\n\n gft = data_types.Fuzzer.query(\n data_types.Fuzzer.name == 'googlefuzztest').get()\n if not gft:\n logs.log_error('Failed to get googlefuzztest Fuzzer entity.')\n return\n\n project_config = local_config.ProjectConfig()\n segregate_projects = project_config.get('segregate_projects')\n project_setup_configs = project_config.get('project_setup')\n project_names = set()\n job_names = set()\n\n fuzzer_entities = {\n 'afl': afl.key,\n 'honggfuzz': honggfuzz.key,\n 'googlefuzztest': gft.key,\n 'libfuzzer': libfuzzer.key,\n }\n\n for setup_config in project_setup_configs:\n bucket_config = setup_config.get('build_buckets')\n\n if not bucket_config:\n raise ProjectSetupError('Project setup buckets not specified.')\n\n config = ProjectSetup(\n BUILD_BUCKET_PATH_TEMPLATE,\n REVISION_URL,\n setup_config.get('build_type'),\n config_suffix=setup_config.get('job_suffix', ''),\n external_config=setup_config.get('external_config', ''),\n segregate_projects=segregate_projects,\n experimental_sanitizers=setup_config.get('experimental_sanitizers',\n []),\n engine_build_buckets={\n 'libfuzzer': bucket_config.get('libfuzzer'),\n 'libfuzzer-i386': bucket_config.get('libfuzzer_i386'),\n 'afl': bucket_config.get('afl'),\n 'honggfuzz': bucket_config.get('honggfuzz'),\n 'googlefuzztest': bucket_config.get('googlefuzztest'),\n 'none': bucket_config.get('no_engine'),\n 'dataflow': bucket_config.get('dataflow'),\n },\n fuzzer_entities=fuzzer_entities,\n add_info_labels=setup_config.get('add_info_labels', False),\n add_revision_mappings=setup_config.get('add_revision_mappings',\n False),\n additional_vars=setup_config.get('additional_vars'))\n\n projects_source = setup_config.get('source')\n if projects_source == 'oss-fuzz':\n projects = get_oss_fuzz_projects()\n elif projects_source.startswith(storage.GS_PREFIX):\n projects = get_projects_from_gcs(projects_source)\n else:\n raise ProjectSetupError('Invalid projects source: ' + projects_source)\n\n if not projects:\n raise ProjectSetupError('Missing projects list.')\n\n result = config.set_up(projects)\n project_names.update(result.project_names)\n job_names.update(result.job_names)\n\n cleanup_stale_projects(\n list(fuzzer_entities.values()), project_names, job_names,\n segregate_projects)\n", "path": "src/appengine/handlers/cron/project_setup.py" } ]
[ { "content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Handler used for setting up oss-fuzz jobs.\"\"\"\n\nimport base64\nimport collections\nimport copy\nimport json\nimport re\n\nfrom google.cloud import ndb\nimport requests\nimport six\nimport yaml\n\nfrom clusterfuzz._internal.base import tasks\nfrom clusterfuzz._internal.base import untrusted\nfrom clusterfuzz._internal.base import utils\nfrom clusterfuzz._internal.config import db_config\nfrom clusterfuzz._internal.config import local_config\nfrom clusterfuzz._internal.datastore import data_handler\nfrom clusterfuzz._internal.datastore import data_types\nfrom clusterfuzz._internal.datastore import ndb_utils\nfrom clusterfuzz._internal.fuzzing import fuzzer_selection\nfrom clusterfuzz._internal.google_cloud_utils import pubsub\nfrom clusterfuzz._internal.google_cloud_utils import storage\nfrom clusterfuzz._internal.metrics import logs\nfrom clusterfuzz._internal.system import environment\nfrom handlers import base_handler\nfrom libs import handler\n\nfrom . import service_accounts\n\nBUILD_BUCKET_PATH_TEMPLATE = (\n 'gs://%BUCKET%/%PROJECT%/%PROJECT%-%SANITIZER%-([0-9]+).zip')\n\nBACKUPS_LIFECYCLE = storage.generate_life_cycle_config('Delete', age=100)\nLOGS_LIFECYCLE = storage.generate_life_cycle_config('Delete', age=14)\nQUARANTINE_LIFECYCLE = storage.generate_life_cycle_config('Delete', age=90)\n\nJOB_TEMPLATE = ('{build_type} = {build_bucket_path}\\n'\n 'PROJECT_NAME = {project}\\n'\n 'SUMMARY_PREFIX = {project}\\n'\n 'MANAGED = True\\n')\n\nOBJECT_VIEWER_IAM_ROLE = 'roles/storage.objectViewer'\nOBJECT_ADMIN_IAM_ROLE = 'roles/storage.objectAdmin'\n\nVALID_PROJECT_NAME_REGEX = re.compile(r'^[a-zA-Z0-9_-]+$')\n\nREVISION_URL = ('https://commondatastorage.googleapis.com/'\n '{bucket}/{project}/{project}-{sanitizer}-%s.srcmap.json')\n\nREQUEST_TIMEOUT = 60\n\nALLOWED_VIEW_RESTRICTIONS = ['none', 'security', 'all']\n\nPUBSUB_PLATFORMS = ['linux']\n\nMEMORY_SAFE_LANGUAGES = {'go', 'java', 'python', 'rust'}\nOSS_FUZZ_DEFAULT_PROJECT_CPU_WEIGHT = 1.0\nOSS_FUZZ_MEMORY_SAFE_LANGUAGE_PROJECT_WEIGHT = 0.2\n\nSetupResult = collections.namedtuple('SetupResult', 'project_names job_names')\n\n\nclass ProjectSetupError(Exception):\n \"\"\"Exception.\"\"\"\n\n\nclass JobInfo(object):\n \"\"\"Job information.\"\"\"\n\n def __init__(self,\n prefix,\n engine,\n memory_tool,\n cf_job_templates,\n architecture='x86_64',\n experimental=False,\n minimize_job_override=None):\n self.prefix = prefix\n self.engine = engine\n self.memory_tool = memory_tool\n self.architecture = architecture\n self.cf_job_templates = cf_job_templates\n self.experimental = experimental\n self.minimize_job_override = minimize_job_override\n\n def job_name(self, project_name, config_suffix):\n return (\n self.prefix + data_types.normalized_name(project_name) + config_suffix)\n\n\n# The order of templates is important here. Later templates override settings in\n# the earlier ones. An engine template may override vars set for a sanitizer.\nLIBFUZZER_ASAN_JOB = JobInfo('libfuzzer_asan_', 'libfuzzer', 'address',\n ['libfuzzer', 'engine_asan', 'prune'])\nLIBFUZZER_MSAN_JOB = JobInfo('libfuzzer_msan_', 'libfuzzer', 'memory',\n ['libfuzzer', 'engine_msan'])\nLIBFUZZER_UBSAN_JOB = JobInfo('libfuzzer_ubsan_', 'libfuzzer', 'undefined',\n ['libfuzzer', 'engine_ubsan'])\nLIBFUZZER_ASAN_I386_JOB = JobInfo(\n 'libfuzzer_asan_i386_',\n 'libfuzzer',\n 'address', ['libfuzzer', 'engine_asan'],\n architecture='i386')\n\nAFL_ASAN_JOB = JobInfo(\n 'afl_asan_',\n 'afl',\n 'address', ['afl', 'engine_asan'],\n minimize_job_override=LIBFUZZER_ASAN_JOB)\nNO_ENGINE_ASAN_JOB = JobInfo('asan_', 'none', 'address', [])\n\nHONGGFUZZ_ASAN_JOB = JobInfo(\n 'honggfuzz_asan_',\n 'honggfuzz',\n 'address', ['honggfuzz', 'engine_asan'],\n minimize_job_override=LIBFUZZER_ASAN_JOB)\n\nGFT_ASAN_JOB = JobInfo('googlefuzztest_asan_', 'googlefuzztest', 'address',\n ['googlefuzztest', 'engine_asan'])\nGFT_MSAN_JOB = JobInfo('googlefuzztest_msan_', 'googlefuzztest', 'memory',\n ['googlefuzztest', 'engine_msan'])\nGFT_UBSAN_JOB = JobInfo('googlefuzztest_ubsan_', 'googlefuzztest', 'undefined',\n ['googlefuzztest', 'engine_ubsan'])\n\nLIBFUZZER_NONE_JOB = JobInfo('libfuzzer_nosanitizer_', 'libfuzzer', 'none',\n ['libfuzzer'])\nLIBFUZZER_NONE_I386_JOB = JobInfo(\n 'libfuzzer_nosanitizer_i386_',\n 'libfuzzer',\n 'none', ['libfuzzer'],\n architecture='i386')\n\nJOB_MAP = {\n 'libfuzzer': {\n 'x86_64': {\n 'address': LIBFUZZER_ASAN_JOB,\n 'memory': LIBFUZZER_MSAN_JOB,\n 'undefined': LIBFUZZER_UBSAN_JOB,\n 'none': LIBFUZZER_NONE_JOB,\n },\n 'i386': {\n 'address': LIBFUZZER_ASAN_I386_JOB,\n 'none': LIBFUZZER_NONE_I386_JOB,\n },\n },\n 'afl': {\n 'x86_64': {\n 'address': AFL_ASAN_JOB,\n }\n },\n 'honggfuzz': {\n 'x86_64': {\n 'address': HONGGFUZZ_ASAN_JOB,\n },\n },\n 'googlefuzztest': {\n 'x86_64': {\n 'address': GFT_ASAN_JOB,\n 'memory': GFT_MSAN_JOB,\n 'undefined': GFT_UBSAN_JOB,\n },\n },\n 'none': {\n 'x86_64': {\n 'address': NO_ENGINE_ASAN_JOB,\n }\n }\n}\n\nDEFAULT_ARCHITECTURES = ['x86_64']\nDEFAULT_SANITIZERS = ['address', 'undefined']\nDEFAULT_ENGINES = ['libfuzzer', 'afl', 'honggfuzz']\n\n\ndef _to_experimental_job(job_info):\n job_info = copy.copy(job_info)\n job_info.experimental = True\n return job_info\n\n\ndef get_github_url(url):\n \"\"\"Return contents of URL.\"\"\"\n github_credentials = db_config.get_value('github_credentials')\n if not github_credentials:\n raise ProjectSetupError('No github credentials.')\n\n client_id, client_secret = github_credentials.strip().split(';')\n response = requests.get(url, auth=(client_id, client_secret))\n if response.status_code != 200:\n logs.log_error(\n f'Failed to get github url: {url}.', status_code=response.status_code)\n response.raise_for_status()\n\n return json.loads(response.text)\n\n\ndef find_github_item_url(github_json, name):\n \"\"\"Get url of a blob/tree from a github json response.\"\"\"\n for item in github_json['tree']:\n if item['path'] == name:\n return item['url']\n\n return None\n\n\ndef get_oss_fuzz_projects():\n \"\"\"Return list of projects for oss-fuzz.\"\"\"\n ossfuzz_tree_url = ('https://api.github.com/repos/google/oss-fuzz/'\n 'git/trees/master')\n tree = get_github_url(ossfuzz_tree_url)\n projects = []\n\n projects_url = find_github_item_url(tree, 'projects')\n if not projects_url:\n logs.log_error('No projects found.')\n return []\n\n tree = get_github_url(projects_url)\n for item in tree['tree']:\n if item['type'] != 'tree':\n continue\n\n item_json = get_github_url(item['url'])\n project_yaml_url = find_github_item_url(item_json, 'project.yaml')\n if not project_yaml_url:\n continue\n\n projects_yaml = get_github_url(project_yaml_url)\n info = yaml.safe_load(base64.b64decode(projects_yaml['content']))\n\n has_dockerfile = (\n find_github_item_url(item_json, 'Dockerfile') or 'dockerfile' in info)\n if not has_dockerfile:\n continue\n\n projects.append((item['path'], info))\n\n return projects\n\n\ndef get_projects_from_gcs(gcs_url):\n \"\"\"Get projects from GCS path.\"\"\"\n data = json.loads(storage.read_data(gcs_url))\n return [(project['name'], project) for project in data['projects']]\n\n\ndef _process_sanitizers_field(sanitizers):\n \"\"\"Pre-process sanitizers field into a map from sanitizer name -> dict of\n options.\"\"\"\n processed_sanitizers = {}\n if not isinstance(sanitizers, list):\n return None\n\n # each field can either be a Map or a String:\n # sanitizers:\n # - undefined:\n # experimental: true\n # - address\n # - memory\n for sanitizer in sanitizers:\n if isinstance(sanitizer, str):\n processed_sanitizers[sanitizer] = {}\n elif isinstance(sanitizer, dict):\n for key, value in six.iteritems(sanitizer):\n processed_sanitizers[key] = value\n else:\n return None\n\n return processed_sanitizers\n\n\ndef get_jobs_for_project(project, info):\n \"\"\"Return jobs for the project.\"\"\"\n sanitizers = _process_sanitizers_field(\n info.get('sanitizers', DEFAULT_SANITIZERS))\n if not sanitizers:\n logs.log_error(f'Invalid sanitizers field for {project}.')\n return []\n\n engines = info.get('fuzzing_engines', DEFAULT_ENGINES)\n architectures = info.get('architectures', DEFAULT_ARCHITECTURES)\n\n jobs = []\n for engine in engines:\n if engine not in JOB_MAP:\n continue\n\n for architecture in architectures:\n if architecture not in JOB_MAP[engine]:\n continue\n\n for sanitizer, options in six.iteritems(sanitizers):\n experimental = (\n options.get('experimental', False) or\n info.get('experimental', False))\n if sanitizer in JOB_MAP[engine][architecture]:\n job = JOB_MAP[engine][architecture][sanitizer]\n if experimental:\n job = _to_experimental_job(job)\n\n jobs.append(job)\n\n return jobs\n\n\ndef convert_googlemail_to_gmail(email):\n \"\"\"Convert @googlemail.com to @gmail.com.\"\"\"\n # TODO(ochang): Investiate if we can/need to do this in general, and not just\n # for cloud storage bucket IAMs.\n if email.endswith('@googlemail.com'):\n return email.split('@')[0] + '@gmail.com'\n\n return email\n\n\ndef _add_users_to_bucket(info, client, bucket_name, iam_policy):\n \"\"\"Add user account to bucket.\"\"\"\n ccs = sorted(\n ['user:' + convert_googlemail_to_gmail(cc) for cc in ccs_from_info(info)])\n binding = storage.get_bucket_iam_binding(iam_policy, OBJECT_VIEWER_IAM_ROLE)\n\n if binding:\n # buckets.getIamPolicy can return duplicate members when we add a @gmail.com\n # as well as @googlemail.com address for the same account.\n binding['members'] = sorted(list(set(binding['members'])))\n if binding['members'] == ccs:\n return iam_policy\n\n filtered_members = [\n member for member in binding['members'] if member in ccs\n ]\n\n if len(filtered_members) != len(binding['members']):\n # Remove old members.\n binding['members'] = filtered_members\n iam_policy = storage.set_bucket_iam_policy(client, bucket_name,\n iam_policy)\n\n # We might have no binding either from start or after filtering members above.\n # Create a new one in those cases.\n binding = storage.get_or_create_bucket_iam_binding(iam_policy,\n OBJECT_VIEWER_IAM_ROLE)\n\n for cc in ccs:\n if cc in binding['members']:\n continue\n\n logs.log(f'Adding {cc} to bucket IAM for {bucket_name}.')\n # Add CCs one at a time since the API does not work with invalid or\n # non-Google emails.\n modified_iam_policy = storage.add_single_bucket_iam(\n client, iam_policy, OBJECT_VIEWER_IAM_ROLE, bucket_name, cc)\n if modified_iam_policy:\n iam_policy = modified_iam_policy\n binding = storage.get_bucket_iam_binding(iam_policy,\n OBJECT_VIEWER_IAM_ROLE)\n\n if not binding['members']:\n # Check that the final binding has members. Empty bindings are not valid.\n storage.remove_bucket_iam_binding(iam_policy, OBJECT_VIEWER_IAM_ROLE)\n\n return iam_policy\n\n\ndef _set_bucket_service_account(service_account, client, bucket_name,\n iam_policy):\n \"\"\"Set service account for a bucket.\"\"\"\n # Add service account as objectAdmin.\n binding = storage.get_or_create_bucket_iam_binding(iam_policy,\n OBJECT_ADMIN_IAM_ROLE)\n\n members = ['serviceAccount:' + service_account['email']]\n if members == binding['members']:\n # No changes required.\n return iam_policy\n\n binding['members'] = members\n return storage.set_bucket_iam_policy(client, bucket_name, iam_policy)\n\n\ndef add_bucket_iams(info, client, bucket_name, service_account):\n \"\"\"Add CC'ed users to storage bucket IAM.\"\"\"\n iam_policy = storage.get_bucket_iam_policy(client, bucket_name)\n if not iam_policy:\n return\n\n iam_policy = _add_users_to_bucket(info, client, bucket_name, iam_policy)\n _set_bucket_service_account(service_account, client, bucket_name, iam_policy)\n\n\ndef add_service_account_to_bucket(client, bucket_name, service_account, role):\n \"\"\"Add service account to the gcr.io images bucket.\"\"\"\n iam_policy = storage.get_bucket_iam_policy(client, bucket_name)\n if not iam_policy:\n return\n\n binding = storage.get_or_create_bucket_iam_binding(iam_policy, role)\n\n member = 'serviceAccount:' + service_account['email']\n if member in binding['members']:\n # No changes required.\n return\n\n binding['members'].append(member)\n storage.set_bucket_iam_policy(client, bucket_name, iam_policy)\n\n\ndef has_maintainer(info):\n \"\"\"Return whether or not a project has at least one maintainer.\"\"\"\n return info.get('primary_contact') or info.get('auto_ccs')\n\n\ndef ccs_from_info(info):\n \"\"\"Get list of CC's from project info.\"\"\"\n\n def _get_ccs(field_name, allow_list=True):\n \"\"\"Return list of emails to cc given a field name.\"\"\"\n if field_name not in info:\n return []\n\n field_value = info.get(field_name)\n if allow_list and isinstance(field_value, list):\n return field_value\n if isinstance(field_value, str):\n return [field_value]\n if field_value is None:\n return []\n\n raise ProjectSetupError(f'Bad value for field {field_name}: {field_value}.')\n\n ccs = []\n ccs.extend(_get_ccs('primary_contact', allow_list=False))\n ccs.extend(_get_ccs('auto_ccs'))\n ccs.extend(_get_ccs('vendor_ccs'))\n\n return [utils.normalize_email(cc) for cc in ccs]\n\n\ndef update_fuzzer_jobs(fuzzer_entities, job_names):\n \"\"\"Update fuzzer job mappings.\"\"\"\n to_delete = {}\n\n for fuzzer_entity_key in fuzzer_entities:\n fuzzer_entity = fuzzer_entity_key.get()\n\n for job in data_types.Job.query():\n if not job.environment_string:\n continue\n\n job_environment = job.get_environment()\n if not utils.string_is_true(job_environment.get('MANAGED', 'False')):\n continue\n\n if job.name in job_names:\n continue\n\n logs.log(f'Deleting job {job.name}')\n to_delete[job.name] = job.key\n\n try:\n fuzzer_entity.jobs.remove(job.name)\n except ValueError:\n pass\n\n fuzzer_entity.put()\n fuzzer_selection.update_mappings_for_fuzzer(fuzzer_entity)\n\n if to_delete:\n ndb_utils.delete_multi(to_delete.values())\n\n\ndef cleanup_old_projects_settings(project_names):\n \"\"\"Delete old projects that are no longer used or disabled.\"\"\"\n to_delete = []\n\n for project in data_types.OssFuzzProject.query():\n if project.name not in project_names:\n logs.log(f'Deleting project {project.name}.')\n to_delete.append(project.key)\n\n if to_delete:\n ndb_utils.delete_multi(to_delete)\n\n\ndef create_project_settings(project, info, service_account):\n \"\"\"Setup settings for ClusterFuzz (such as CPU distribution).\"\"\"\n key = ndb.Key(data_types.OssFuzzProject, project)\n oss_fuzz_project = key.get()\n\n # Expecting to run a blackbox fuzzer, so use high end hosts.\n is_high_end = info.get('blackbox', False)\n\n ccs = ccs_from_info(info)\n language = info.get('language')\n\n if oss_fuzz_project:\n if oss_fuzz_project.service_account != service_account['email']:\n oss_fuzz_project.service_account = service_account['email']\n oss_fuzz_project.put()\n\n if oss_fuzz_project.high_end != is_high_end:\n oss_fuzz_project.high_end = is_high_end\n oss_fuzz_project.put()\n\n if oss_fuzz_project.ccs != ccs:\n oss_fuzz_project.ccs = ccs\n oss_fuzz_project.put()\n else:\n if language in MEMORY_SAFE_LANGUAGES:\n cpu_weight = OSS_FUZZ_MEMORY_SAFE_LANGUAGE_PROJECT_WEIGHT\n else:\n cpu_weight = OSS_FUZZ_DEFAULT_PROJECT_CPU_WEIGHT\n\n data_types.OssFuzzProject(\n id=project,\n name=project,\n high_end=is_high_end,\n cpu_weight=cpu_weight,\n service_account=service_account['email'],\n ccs=ccs).put()\n\n\ndef create_pubsub_topics(project):\n \"\"\"Create pubsub topics for tasks.\"\"\"\n for platform in PUBSUB_PLATFORMS:\n name = untrusted.queue_name(project, platform)\n client = pubsub.PubSubClient()\n application_id = utils.get_application_id()\n\n topic_name = pubsub.topic_name(application_id, name)\n if client.get_topic(topic_name) is None:\n client.create_topic(topic_name)\n\n subscription_name = pubsub.subscription_name(application_id, name)\n if client.get_subscription(subscription_name) is None:\n client.create_subscription(subscription_name, topic_name)\n\n\ndef cleanup_pubsub_topics(project_names):\n \"\"\"Delete old pubsub topics and subscriptions.\"\"\"\n client = pubsub.PubSubClient()\n application_id = utils.get_application_id()\n\n expected_topics = set()\n for platform in PUBSUB_PLATFORMS:\n expected_topics.update(\n [untrusted.queue_name(project, platform) for project in project_names])\n\n pubsub_config = local_config.Config('pubsub.queues')\n unmanaged_queues = [queue['name'] for queue in pubsub_config.get('resources')]\n\n for topic in client.list_topics(pubsub.project_name(application_id)):\n _, name = pubsub.parse_name(topic)\n\n if (not name.startswith(tasks.JOBS_PREFIX) and\n not name.startswith(tasks.HIGH_END_JOBS_PREFIX)):\n # Some topic created by another service, ignore.\n continue\n\n if name in unmanaged_queues:\n continue\n\n if name in expected_topics:\n continue\n\n for subscription in client.list_topic_subscriptions(topic):\n client.delete_subscription(subscription)\n\n client.delete_topic(topic)\n\n\nclass ProjectSetup(object):\n \"\"\"Project setup.\"\"\"\n\n def __init__(self,\n build_bucket_path_template,\n revision_url_template,\n build_type,\n config_suffix='',\n external_config=None,\n segregate_projects=False,\n experimental_sanitizers=None,\n engine_build_buckets=None,\n fuzzer_entities=None,\n add_info_labels=False,\n add_revision_mappings=False,\n additional_vars=None):\n self._build_type = build_type\n self._config_suffix = config_suffix\n self._external_config = external_config\n self._build_bucket_path_template = build_bucket_path_template\n self._revision_url_template = revision_url_template\n self._segregate_projects = segregate_projects\n self._experimental_sanitizers = experimental_sanitizers\n self._engine_build_buckets = engine_build_buckets\n self._fuzzer_entities = fuzzer_entities\n self._add_info_labels = add_info_labels\n self._add_revision_mappings = add_revision_mappings\n self._additional_vars = additional_vars\n\n def _get_build_bucket(self, engine, architecture):\n \"\"\"Return the bucket for the given |engine| and |architecture|.\"\"\"\n if architecture != 'x86_64':\n engine += '-' + architecture\n\n bucket = self._engine_build_buckets.get(engine)\n if not bucket:\n raise ProjectSetupError('Invalid fuzzing engine ' + engine)\n\n return bucket\n\n def _deployment_bucket_name(self):\n \"\"\"Deployment bucket name.\"\"\"\n return f'{utils.get_application_id()}-deployment'\n\n def _shared_corpus_bucket_name(self):\n \"\"\"Shared corpus bucket name.\"\"\"\n return environment.get_value('SHARED_CORPUS_BUCKET')\n\n def _mutator_plugins_bucket_name(self):\n \"\"\"Mutator plugins bucket name.\"\"\"\n return environment.get_value('MUTATOR_PLUGINS_BUCKET')\n\n def _backup_bucket_name(self, project_name):\n \"\"\"Return the backup_bucket_name.\"\"\"\n return project_name + '-backup.' + data_handler.bucket_domain_suffix()\n\n def _corpus_bucket_name(self, project_name):\n \"\"\"Return the corpus_bucket_name.\"\"\"\n return project_name + '-corpus.' + data_handler.bucket_domain_suffix()\n\n def _quarantine_bucket_name(self, project_name):\n \"\"\"Return the quarantine_bucket_name.\"\"\"\n return project_name + '-quarantine.' + data_handler.bucket_domain_suffix()\n\n def _logs_bucket_name(self, project_name):\n \"\"\"Return the logs bucket name.\"\"\"\n return project_name + '-logs.' + data_handler.bucket_domain_suffix()\n\n def _create_service_accounts_and_buckets(self, project, info):\n \"\"\"Create per-project service account and buckets.\"\"\"\n service_account, exists = service_accounts.get_or_create_service_account(\n project)\n if not exists:\n # TODO(ochang): Temporary hack to get around\n # https://github.com/google/clusterfuzz/issues/2775.\n service_accounts.set_service_account_roles(service_account)\n\n # Create GCS buckets.\n backup_bucket_name = self._backup_bucket_name(project)\n corpus_bucket_name = self._corpus_bucket_name(project)\n logs_bucket_name = self._logs_bucket_name(project)\n quarantine_bucket_name = self._quarantine_bucket_name(project)\n\n storage.create_bucket_if_needed(backup_bucket_name, BACKUPS_LIFECYCLE)\n storage.create_bucket_if_needed(corpus_bucket_name)\n storage.create_bucket_if_needed(quarantine_bucket_name,\n QUARANTINE_LIFECYCLE)\n storage.create_bucket_if_needed(logs_bucket_name, LOGS_LIFECYCLE)\n\n client = storage.create_discovery_storage_client()\n try:\n add_bucket_iams(info, client, backup_bucket_name, service_account)\n add_bucket_iams(info, client, corpus_bucket_name, service_account)\n add_bucket_iams(info, client, logs_bucket_name, service_account)\n add_bucket_iams(info, client, quarantine_bucket_name, service_account)\n except Exception as e:\n logs.log_error(f'Failed to add bucket IAMs for {project}: {e}.')\n\n # Grant the service account read access to deployment, shared corpus and\n # mutator plugin buckets.\n add_service_account_to_bucket(client, self._deployment_bucket_name(),\n service_account, OBJECT_VIEWER_IAM_ROLE)\n add_service_account_to_bucket(client, self._shared_corpus_bucket_name(),\n service_account, OBJECT_VIEWER_IAM_ROLE)\n add_service_account_to_bucket(client, self._mutator_plugins_bucket_name(),\n service_account, OBJECT_VIEWER_IAM_ROLE)\n\n data_bundles = {\n fuzzer_entity.get().data_bundle_name\n for fuzzer_entity in six.itervalues(self._fuzzer_entities)\n }\n for data_bundle in data_bundles:\n if not data_bundle:\n continue\n\n # Workers also need to be able to set up these global bundles.\n data_bundle_bucket_name = data_handler.get_data_bundle_bucket_name(\n data_bundle)\n add_service_account_to_bucket(client, data_bundle_bucket_name,\n service_account, OBJECT_VIEWER_IAM_ROLE)\n\n return (service_account, backup_bucket_name, corpus_bucket_name,\n logs_bucket_name, quarantine_bucket_name)\n\n def _get_build_bucket_path(self, project_name, info, engine, memory_tool,\n architecture):\n \"\"\"Returns the build bucket path for the |project|, |engine|, |memory_tool|,\n and |architecture|.\"\"\"\n build_path = info.get('build_path')\n if not build_path:\n build_path = self._build_bucket_path_template\n\n build_path = build_path.replace(\n '%BUCKET%', self._get_build_bucket(engine, architecture))\n build_path = build_path.replace('%PROJECT%', project_name)\n build_path = build_path.replace('%ENGINE%', engine)\n build_path = build_path.replace('%SANITIZER%', memory_tool)\n return build_path\n\n def _sync_job(self, project, info, corpus_bucket_name, quarantine_bucket_name,\n logs_bucket_name, backup_bucket_name):\n \"\"\"Sync the config with ClusterFuzz.\"\"\"\n # Create/update ClusterFuzz jobs.\n job_names = []\n\n for template in get_jobs_for_project(project, info):\n if template.engine == 'none':\n # Engine-less jobs are not automatically managed.\n continue\n\n fuzzer_entity = self._fuzzer_entities.get(template.engine).get()\n if not fuzzer_entity:\n raise ProjectSetupError('Invalid fuzzing engine ' + template.engine)\n\n job_name = template.job_name(project, self._config_suffix)\n job = data_types.Job.query(data_types.Job.name == job_name).get()\n if not job:\n job = data_types.Job()\n\n if self._external_config:\n if ('reproduction_topic' not in self._external_config or\n 'updates_subscription' not in self._external_config):\n raise ProjectSetupError('Invalid external_config.')\n\n job.external_reproduction_topic = self._external_config[\n 'reproduction_topic']\n job.external_updates_subscription = self._external_config[\n 'updates_subscription']\n else:\n job.external_reproduction_topic = None\n job.external_updates_subscription = None\n\n if not info.get('disabled', False):\n job_names.append(job_name)\n if job_name not in fuzzer_entity.jobs and not job.is_external():\n # Enable new job.\n fuzzer_entity.jobs.append(job_name)\n fuzzer_entity.put()\n\n job.name = job_name\n if self._segregate_projects:\n job.platform = untrusted.platform_name(project, 'linux')\n else:\n # TODO(ochang): Support other platforms?\n job.platform = 'LINUX'\n\n job.templates = template.cf_job_templates\n\n job.environment_string = JOB_TEMPLATE.format(\n build_type=self._build_type,\n build_bucket_path=self._get_build_bucket_path(\n project, info, template.engine, template.memory_tool,\n template.architecture),\n engine=template.engine,\n project=project)\n\n if self._add_revision_mappings:\n revision_vars_url = self._revision_url_template.format(\n project=project,\n bucket=self._get_build_bucket(template.engine,\n template.architecture),\n sanitizer=template.memory_tool)\n\n job.environment_string += f'REVISION_VARS_URL = {revision_vars_url}\\n'\n\n if logs_bucket_name:\n job.environment_string += f'FUZZ_LOGS_BUCKET = {logs_bucket_name}\\n'\n\n if corpus_bucket_name:\n job.environment_string += f'CORPUS_BUCKET = {corpus_bucket_name}\\n'\n\n if quarantine_bucket_name:\n job.environment_string += (\n f'QUARANTINE_BUCKET = {quarantine_bucket_name}\\n')\n\n if backup_bucket_name:\n job.environment_string += f'BACKUP_BUCKET = {backup_bucket_name}\\n'\n\n if self._add_info_labels:\n automatic_labels = [f'Proj-{project}', f'Engine-{template.engine}']\n labels = info.get('labels')\n if labels and '*' in labels:\n automatic_labels.extend(labels['*'])\n automatic_labels = ','.join(automatic_labels)\n job.environment_string += f'AUTOMATIC_LABELS = {automatic_labels}\\n'\n\n help_url = info.get('help_url')\n if help_url:\n job.environment_string += f'HELP_URL = {help_url}\\n'\n\n if (template.experimental or\n (self._experimental_sanitizers and\n template.memory_tool in self._experimental_sanitizers)):\n job.environment_string += 'EXPERIMENTAL = True\\n'\n\n if template.minimize_job_override:\n minimize_job_override = template.minimize_job_override.job_name(\n project, self._config_suffix)\n job.environment_string += (\n f'MINIMIZE_JOB_OVERRIDE = {minimize_job_override}\\n')\n\n view_restrictions = info.get('view_restrictions')\n if view_restrictions:\n if view_restrictions in ALLOWED_VIEW_RESTRICTIONS:\n job.environment_string += (\n f'ISSUE_VIEW_RESTRICTIONS = {view_restrictions}\\n')\n else:\n logs.log_error(\n f'Invalid view restriction setting {view_restrictions} '\n f'for project {project}.')\n\n if not has_maintainer(info):\n job.environment_string += 'DISABLE_DISCLOSURE = True\\n'\n\n selective_unpack = info.get('selective_unpack')\n if selective_unpack:\n job.environment_string += 'UNPACK_ALL_FUZZ_TARGETS_AND_FILES = False\\n'\n\n main_repo = info.get('main_repo')\n if main_repo:\n job.environment_string += f'MAIN_REPO = {main_repo}\\n'\n\n file_github_issue = info.get('file_github_issue', False)\n job.environment_string += f'FILE_GITHUB_ISSUE = {file_github_issue}\\n'\n\n if (template.engine == 'libfuzzer' and\n template.architecture == 'x86_64' and\n 'dataflow' in info.get('fuzzing_engines', DEFAULT_ENGINES)):\n # Dataflow binaries are built with dataflow sanitizer, but can be used\n # as an auxiliary build with libFuzzer builds (e.g. with ASan or UBSan).\n dataflow_build_bucket_path = self._get_build_bucket_path(\n project_name=project,\n info=info,\n engine='dataflow',\n memory_tool='dataflow',\n architecture=template.architecture)\n job.environment_string += (\n f'DATAFLOW_BUILD_BUCKET_PATH = {dataflow_build_bucket_path}\\n')\n\n if self._additional_vars:\n additional_vars = {}\n additional_vars.update(self._additional_vars.get('all', {}))\n\n engine_vars = self._additional_vars.get(template.engine, {})\n engine_sanitizer_vars = engine_vars.get(template.memory_tool, {})\n additional_vars.update(engine_sanitizer_vars)\n\n for key, value in sorted(six.iteritems(additional_vars)):\n job.environment_string += (\n f'{key} = {str(value).encode(\"unicode-escape\").decode(\"utf-8\")}\\n'\n )\n\n job.put()\n\n return job_names\n\n def sync_user_permissions(self, project, info):\n \"\"\"Sync permissions of project based on project.yaml.\"\"\"\n ccs = ccs_from_info(info)\n\n for template in get_jobs_for_project(project, info):\n job_name = template.job_name(project, self._config_suffix)\n\n # Delete removed CCs.\n existing_ccs = data_types.ExternalUserPermission.query(\n data_types.ExternalUserPermission.entity_kind ==\n data_types.PermissionEntityKind.JOB,\n data_types.ExternalUserPermission.entity_name == job_name)\n ndb_utils.delete_multi([\n permission.key\n for permission in existing_ccs\n if permission.email not in ccs\n ])\n\n for cc in ccs:\n query = data_types.ExternalUserPermission.query(\n data_types.ExternalUserPermission.email == cc,\n data_types.ExternalUserPermission.entity_kind ==\n data_types.PermissionEntityKind.JOB,\n data_types.ExternalUserPermission.entity_name == job_name)\n\n existing_permission = query.get()\n if existing_permission:\n continue\n\n data_types.ExternalUserPermission(\n email=cc,\n entity_kind=data_types.PermissionEntityKind.JOB,\n entity_name=job_name,\n is_prefix=False,\n auto_cc=data_types.AutoCCType.ALL).put()\n\n def set_up(self, projects):\n \"\"\"Do project setup. Return a list of all the project names that were set\n up.\"\"\"\n job_names = []\n for project, info in projects:\n logs.log(f'Syncing configs for {project}.')\n\n backup_bucket_name = None\n corpus_bucket_name = None\n logs_bucket_name = None\n quarantine_bucket_name = None\n\n if self._segregate_projects:\n # Create per project service account and GCS buckets.\n (service_account, backup_bucket_name, corpus_bucket_name,\n logs_bucket_name, quarantine_bucket_name) = (\n self._create_service_accounts_and_buckets(project, info))\n\n # Create CF jobs for project.\n current_job_names = self._sync_job(project, info, corpus_bucket_name,\n quarantine_bucket_name,\n logs_bucket_name, backup_bucket_name)\n job_names.extend(current_job_names)\n\n if self._segregate_projects:\n self.sync_user_permissions(project, info)\n\n # Create Pub/Sub topics for tasks.\n create_pubsub_topics(project)\n\n # Set up projects settings (such as CPU distribution settings).\n if not info.get('disabled', False):\n create_project_settings(project, info, service_account)\n\n # Delete old/disabled project settings.\n enabled_projects = [\n project for project, info in projects\n if not info.get('disabled', False)\n ]\n return SetupResult(enabled_projects, job_names)\n\n\ndef cleanup_stale_projects(fuzzer_entities, project_names, job_names,\n segregate_projects):\n \"\"\"Clean up stale projects.\"\"\"\n update_fuzzer_jobs(fuzzer_entities, job_names)\n cleanup_old_projects_settings(project_names)\n\n if segregate_projects:\n cleanup_pubsub_topics(project_names)\n\n\nclass Handler(base_handler.Handler):\n \"\"\"Setup ClusterFuzz jobs for projects.\"\"\"\n\n @handler.cron()\n def get(self):\n \"\"\"Handles a GET request.\"\"\"\n libfuzzer = data_types.Fuzzer.query(\n data_types.Fuzzer.name == 'libFuzzer').get()\n if not libfuzzer:\n logs.log_error('Failed to get libFuzzer Fuzzer entity.')\n return\n\n afl = data_types.Fuzzer.query(data_types.Fuzzer.name == 'afl').get()\n if not afl:\n logs.log_error('Failed to get AFL Fuzzer entity.')\n return\n\n honggfuzz = data_types.Fuzzer.query(\n data_types.Fuzzer.name == 'honggfuzz').get()\n if not honggfuzz:\n logs.log_error('Failed to get honggfuzz Fuzzer entity.')\n return\n\n gft = data_types.Fuzzer.query(\n data_types.Fuzzer.name == 'googlefuzztest').get()\n if not gft:\n logs.log_error('Failed to get googlefuzztest Fuzzer entity.')\n return\n\n project_config = local_config.ProjectConfig()\n segregate_projects = project_config.get('segregate_projects')\n project_setup_configs = project_config.get('project_setup')\n project_names = set()\n job_names = set()\n\n fuzzer_entities = {\n 'afl': afl.key,\n 'honggfuzz': honggfuzz.key,\n 'googlefuzztest': gft.key,\n 'libfuzzer': libfuzzer.key,\n }\n\n for setup_config in project_setup_configs:\n bucket_config = setup_config.get('build_buckets')\n\n if not bucket_config:\n raise ProjectSetupError('Project setup buckets not specified.')\n\n config = ProjectSetup(\n BUILD_BUCKET_PATH_TEMPLATE,\n REVISION_URL,\n setup_config.get('build_type'),\n config_suffix=setup_config.get('job_suffix', ''),\n external_config=setup_config.get('external_config', ''),\n segregate_projects=segregate_projects,\n experimental_sanitizers=setup_config.get('experimental_sanitizers',\n []),\n engine_build_buckets={\n 'libfuzzer': bucket_config.get('libfuzzer'),\n 'libfuzzer-i386': bucket_config.get('libfuzzer_i386'),\n 'afl': bucket_config.get('afl'),\n 'honggfuzz': bucket_config.get('honggfuzz'),\n 'googlefuzztest': bucket_config.get('googlefuzztest'),\n 'none': bucket_config.get('no_engine'),\n 'dataflow': bucket_config.get('dataflow'),\n },\n fuzzer_entities=fuzzer_entities,\n add_info_labels=setup_config.get('add_info_labels', False),\n add_revision_mappings=setup_config.get('add_revision_mappings',\n False),\n additional_vars=setup_config.get('additional_vars'))\n\n projects_source = setup_config.get('source')\n if projects_source == 'oss-fuzz':\n projects = get_oss_fuzz_projects()\n elif projects_source.startswith(storage.GS_PREFIX):\n projects = get_projects_from_gcs(projects_source)\n else:\n raise ProjectSetupError('Invalid projects source: ' + projects_source)\n\n if not projects:\n raise ProjectSetupError('Missing projects list.')\n\n result = config.set_up(projects)\n project_names.update(result.project_names)\n job_names.update(result.job_names)\n\n cleanup_stale_projects(\n list(fuzzer_entities.values()), project_names, job_names,\n segregate_projects)\n", "path": "src/appengine/handlers/cron/project_setup.py" } ]
diff --git a/src/appengine/handlers/cron/project_setup.py b/src/appengine/handlers/cron/project_setup.py index 69f3479b16..b075a98aa2 100644 --- a/src/appengine/handlers/cron/project_setup.py +++ b/src/appengine/handlers/cron/project_setup.py @@ -438,6 +438,8 @@ def _get_ccs(field_name, allow_list=True): return field_value if isinstance(field_value, str): return [field_value] + if field_value is None: + return [] raise ProjectSetupError(f'Bad value for field {field_name}: {field_value}.') diff --git a/src/clusterfuzz/_internal/tests/appengine/handlers/cron/project_setup_test.py b/src/clusterfuzz/_internal/tests/appengine/handlers/cron/project_setup_test.py index e884e345d4..62f086467b 100644 --- a/src/clusterfuzz/_internal/tests/appengine/handlers/cron/project_setup_test.py +++ b/src/clusterfuzz/_internal/tests/appengine/handlers/cron/project_setup_test.py @@ -244,6 +244,7 @@ def test_execute(self): '[email protected]', '[email protected]', ], + 'vendor_ccs': None, }), ('lib2', { 'homepage': 'http://example2.com',
cloudtools__troposphere-1740
SageMaker Model ContainerDefinition object does not support attribute Mode Setting a `Mode` attribute within the ContainerDefinition for both the `PrimaryContainer` and `Containers` attributes for creating a Model resources keeps throwing error - `AttributeError: ContainerDefinition object does not support attribute Mode`. Within the latest cloudformation docs https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-model-containerdefinition.html the `Mode` attribute is supported. Without this support, multiple models container(s) creates/updates cannot be configured. Would you prefer I open a PR or can I wait if it won't take much. Thanks.
[ { "content": "# Copyright (c) 2012-2018, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Tags\nfrom .validators import integer\n\n\nclass GitConfig(AWSProperty):\n props = {\n 'Branch': (basestring, False),\n 'RepositoryUrl': (basestring, True),\n 'SecretArn': (basestring, False),\n }\n\n\nclass CodeRepository(AWSObject):\n resource_type = \"AWS::SageMaker::CodeRepository\"\n\n props = {\n 'CodeRepositoryName': (basestring, False),\n 'GitConfig': (GitConfig, True)\n }\n\n\nclass Endpoint(AWSObject):\n resource_type = \"AWS::SageMaker::Endpoint\"\n\n props = {\n 'EndpointName': (basestring, False),\n 'EndpointConfigName': (basestring, True),\n 'Tags': (Tags, True)\n }\n\n\nclass ProductionVariant(AWSProperty):\n props = {\n 'ModelName': (basestring, True),\n 'VariantName': (basestring, True),\n 'InitialInstanceCount': (integer, True),\n 'InstanceType': (basestring, True),\n 'InitialVariantWeight': (float, True)\n }\n\n\nclass EndpointConfig(AWSObject):\n resource_type = \"AWS::SageMaker::EndpointConfig\"\n\n props = {\n 'EndpointConfigName': (basestring, False),\n 'ProductionVariants': ([ProductionVariant], True),\n 'KmsKeyId': (basestring, False),\n 'Tags': (Tags, True)\n }\n\n\nclass ContainerDefinition(AWSProperty):\n props = {\n 'ContainerHostname': (basestring, False),\n 'Environment': (dict, False),\n 'ModelDataUrl': (basestring, False),\n 'Image': (basestring, True)\n }\n\n\nclass VpcConfig(AWSProperty):\n props = {\n 'Subnets': ([basestring], True),\n 'SecurityGroupIds': ([basestring], True)\n }\n\n\nclass Model(AWSObject):\n resource_type = \"AWS::SageMaker::Model\"\n\n props = {\n 'Containers': ([ContainerDefinition], False),\n 'ExecutionRoleArn': (basestring, True),\n 'ModelName': (basestring, False),\n 'PrimaryContainer': (ContainerDefinition, False),\n 'Tags': (Tags, False),\n 'VpcConfig': (VpcConfig, False),\n }\n\n\nclass NotebookInstanceLifecycleHook(AWSProperty):\n props = {\n 'Content': (basestring, False)\n }\n\n\nclass NotebookInstanceLifecycleConfig(AWSObject):\n resource_type = \"AWS::SageMaker::NotebookInstanceLifecycleConfig\"\n\n props = {\n 'NotebookInstanceLifecycleConfigName': (basestring, False),\n 'OnCreate': ([NotebookInstanceLifecycleHook], False),\n 'OnStart': ([NotebookInstanceLifecycleHook], False)\n }\n\n\nclass NotebookInstance(AWSObject):\n resource_type = \"AWS::SageMaker::NotebookInstance\"\n\n props = {\n 'AcceleratorTypes': ([basestring], False),\n 'AdditionalCodeRepositories': ([basestring], False),\n 'DefaultCodeRepository': (basestring, False),\n 'DirectInternetAccess': (basestring, False),\n 'InstanceType': (basestring, True),\n 'KmsKeyId': (basestring, False),\n 'LifecycleConfigName': (basestring, False),\n 'NotebookInstanceName': (basestring, False),\n 'RoleArn': (basestring, True),\n 'RootAccess': (basestring, False),\n 'SecurityGroupIds': ([basestring], False),\n 'SubnetId': (basestring, False),\n 'Tags': (Tags, False),\n 'VolumeSizeInGB': (integer, False),\n }\n\n\nclass CognitoMemberDefinition(AWSProperty):\n props = {\n 'CognitoClientId': (basestring, True),\n 'CognitoUserGroup': (basestring, True),\n 'CognitoUserPool': (basestring, True),\n }\n\n\nclass MemberDefinition(AWSProperty):\n props = {\n 'CognitoMemberDefinition': (CognitoMemberDefinition, True),\n }\n\n\nclass NotificationConfiguration(AWSProperty):\n props = {\n 'NotificationTopicArn': (basestring, True),\n }\n\n\nclass Workteam(AWSObject):\n resource_type = \"AWS::SageMaker::Workteam\"\n\n props = {\n 'Description': (basestring, False),\n 'MemberDefinitions': ([MemberDefinition], False),\n 'NotificationConfiguration': (NotificationConfiguration, False),\n 'Tags': (Tags, False),\n 'WorkteamName': (basestring, False),\n }\n", "path": "troposphere/sagemaker.py" } ]
[ { "content": "# Copyright (c) 2012-2018, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Tags\nfrom .validators import integer\n\n\nclass GitConfig(AWSProperty):\n props = {\n 'Branch': (basestring, False),\n 'RepositoryUrl': (basestring, True),\n 'SecretArn': (basestring, False),\n }\n\n\nclass CodeRepository(AWSObject):\n resource_type = \"AWS::SageMaker::CodeRepository\"\n\n props = {\n 'CodeRepositoryName': (basestring, False),\n 'GitConfig': (GitConfig, True)\n }\n\n\nclass Endpoint(AWSObject):\n resource_type = \"AWS::SageMaker::Endpoint\"\n\n props = {\n 'EndpointName': (basestring, False),\n 'EndpointConfigName': (basestring, True),\n 'Tags': (Tags, True)\n }\n\n\nclass ProductionVariant(AWSProperty):\n props = {\n 'ModelName': (basestring, True),\n 'VariantName': (basestring, True),\n 'InitialInstanceCount': (integer, True),\n 'InstanceType': (basestring, True),\n 'InitialVariantWeight': (float, True)\n }\n\n\nclass EndpointConfig(AWSObject):\n resource_type = \"AWS::SageMaker::EndpointConfig\"\n\n props = {\n 'EndpointConfigName': (basestring, False),\n 'ProductionVariants': ([ProductionVariant], True),\n 'KmsKeyId': (basestring, False),\n 'Tags': (Tags, True)\n }\n\n\nclass ContainerDefinition(AWSProperty):\n props = {\n 'ContainerHostname': (basestring, False),\n 'Environment': (dict, False),\n 'Mode': (basestring, False),\n 'ModelDataUrl': (basestring, False),\n 'Image': (basestring, True)\n }\n\n\nclass VpcConfig(AWSProperty):\n props = {\n 'Subnets': ([basestring], True),\n 'SecurityGroupIds': ([basestring], True)\n }\n\n\nclass Model(AWSObject):\n resource_type = \"AWS::SageMaker::Model\"\n\n props = {\n 'Containers': ([ContainerDefinition], False),\n 'ExecutionRoleArn': (basestring, True),\n 'ModelName': (basestring, False),\n 'PrimaryContainer': (ContainerDefinition, False),\n 'Tags': (Tags, False),\n 'VpcConfig': (VpcConfig, False),\n }\n\n\nclass NotebookInstanceLifecycleHook(AWSProperty):\n props = {\n 'Content': (basestring, False)\n }\n\n\nclass NotebookInstanceLifecycleConfig(AWSObject):\n resource_type = \"AWS::SageMaker::NotebookInstanceLifecycleConfig\"\n\n props = {\n 'NotebookInstanceLifecycleConfigName': (basestring, False),\n 'OnCreate': ([NotebookInstanceLifecycleHook], False),\n 'OnStart': ([NotebookInstanceLifecycleHook], False)\n }\n\n\nclass NotebookInstance(AWSObject):\n resource_type = \"AWS::SageMaker::NotebookInstance\"\n\n props = {\n 'AcceleratorTypes': ([basestring], False),\n 'AdditionalCodeRepositories': ([basestring], False),\n 'DefaultCodeRepository': (basestring, False),\n 'DirectInternetAccess': (basestring, False),\n 'InstanceType': (basestring, True),\n 'KmsKeyId': (basestring, False),\n 'LifecycleConfigName': (basestring, False),\n 'NotebookInstanceName': (basestring, False),\n 'RoleArn': (basestring, True),\n 'RootAccess': (basestring, False),\n 'SecurityGroupIds': ([basestring], False),\n 'SubnetId': (basestring, False),\n 'Tags': (Tags, False),\n 'VolumeSizeInGB': (integer, False),\n }\n\n\nclass CognitoMemberDefinition(AWSProperty):\n props = {\n 'CognitoClientId': (basestring, True),\n 'CognitoUserGroup': (basestring, True),\n 'CognitoUserPool': (basestring, True),\n }\n\n\nclass MemberDefinition(AWSProperty):\n props = {\n 'CognitoMemberDefinition': (CognitoMemberDefinition, True),\n }\n\n\nclass NotificationConfiguration(AWSProperty):\n props = {\n 'NotificationTopicArn': (basestring, True),\n }\n\n\nclass Workteam(AWSObject):\n resource_type = \"AWS::SageMaker::Workteam\"\n\n props = {\n 'Description': (basestring, False),\n 'MemberDefinitions': ([MemberDefinition], False),\n 'NotificationConfiguration': (NotificationConfiguration, False),\n 'Tags': (Tags, False),\n 'WorkteamName': (basestring, False),\n }\n", "path": "troposphere/sagemaker.py" } ]
diff --git a/troposphere/sagemaker.py b/troposphere/sagemaker.py index 6bd90952d..a172945cf 100644 --- a/troposphere/sagemaker.py +++ b/troposphere/sagemaker.py @@ -59,6 +59,7 @@ class ContainerDefinition(AWSProperty): props = { 'ContainerHostname': (basestring, False), 'Environment': (dict, False), + 'Mode': (basestring, False), 'ModelDataUrl': (basestring, False), 'Image': (basestring, True) }